text
stringlengths
29
850k
# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Taken From Twisted Python which licensed under MIT license # https://github.com/powdahound/twisted/blob/master/twisted/python/dist.py # https://github.com/powdahound/twisted/blob/master/LICENSE import os import fnmatch # Names that are excluded from globbing results: EXCLUDE_NAMES = ['{arch}', 'CVS', '.cvsignore', '_darcs', 'RCS', 'SCCS', '.svn'] EXCLUDE_PATTERNS = ['*.py[cdo]', '*.s[ol]', '.#*', '*~', '*.py'] def _filter_names(names): """ Given a list of file names, return those names that should be copied. """ names = [n for n in names if n not in EXCLUDE_NAMES] # This is needed when building a distro from a working # copy (likely a checkout) rather than a pristine export: for pattern in EXCLUDE_PATTERNS: names = [n for n in names if not fnmatch.fnmatch(n, pattern) and not n.endswith('.py')] return names def relative_to(base, relativee): """ Gets 'relativee' relative to 'basepath'. i.e., >>> relative_to('/home/', '/home/radix/') 'radix' >>> relative_to('.', '/home/radix/Projects/Twisted') 'Projects/Twisted' The 'relativee' must be a child of 'basepath'. """ basepath = os.path.abspath(base) relativee = os.path.abspath(relativee) if relativee.startswith(basepath): relative = relativee[len(basepath):] if relative.startswith(os.sep): relative = relative[1:] return os.path.join(base, relative) raise ValueError("%s is not a subpath of %s" % (relativee, basepath)) def get_packages(dname, pkgname=None, results=None, ignore=None, parent=None): """ Get all packages which are under dname. This is necessary for Python 2.2's distutils. Pretty similar arguments to getDataFiles, including 'parent'. """ parent = parent or "" prefix = [] if parent: prefix = [parent] bname = os.path.basename(dname) ignore = ignore or [] if bname in ignore: return [] if results is None: results = [] if pkgname is None: pkgname = [] subfiles = os.listdir(dname) abssubfiles = [os.path.join(dname, x) for x in subfiles] if '__init__.py' in subfiles: results.append(prefix + pkgname + [bname]) for subdir in filter(os.path.isdir, abssubfiles): get_packages(subdir, pkgname=pkgname + [bname], results=results, ignore=ignore, parent=parent) res = ['.'.join(result) for result in results] return res def get_data_files(dname, ignore=None, parent=None): """ Get all the data files that should be included in this distutils Project. 'dname' should be the path to the package that you're distributing. 'ignore' is a list of sub-packages to ignore. This facilitates disparate package hierarchies. That's a fancy way of saying that the 'twisted' package doesn't want to include the 'twisted.conch' package, so it will pass ['conch'] as the value. 'parent' is necessary if you're distributing a subpackage like twisted.conch. 'dname' should point to 'twisted/conch' and 'parent' should point to 'twisted'. This ensures that your data_files are generated correctly, only using relative paths for the first element of the tuple ('twisted/conch/*'). The default 'parent' is the current working directory. """ parent = parent or "." ignore = ignore or [] result = [] for directory, subdirectories, filenames in os.walk(dname): resultfiles = [] for exname in EXCLUDE_NAMES: if exname in subdirectories: subdirectories.remove(exname) for ig in ignore: if ig in subdirectories: subdirectories.remove(ig) for filename in _filter_names(filenames): resultfiles.append(filename) if resultfiles: for filename in resultfiles: file_path = os.path.join(directory, filename) if parent: file_path = file_path.replace(parent + os.sep, '') result.append(file_path) return result
March 6, 2019 April 5, 2019 by Erik K. In these bounty times, getting the most outstanding tutoring for the sake of your children is of the utmost importance. The theme of apprentice education is a factious a specific as there are more than a scattering options present fitted the parent to organize the educational condition of their children. It does not help that the open pedagogical setting is often jammed of difficulty and debate over allocation of school in funding, curricula choices and extrinsic influences. These things and others object up affecting a admirer’s cultivation, deprecating development and belief system. As epoch has past, it has been charmed during granted that getting a respected cultivation and nurturing the speculative and influence scenario of profuse children is successfully accomplished near the institutionalized first systems of our states and cities. For the benefit of myriad, the free private school systems take not met the needs of divers parents and their children with connection to the unrealistic academic standards expected during the proactive parent. This has resulted in a growing upward of parents winsome the lore of their children into their own hands. Cuttingly learning continues to grow and to widen as more and more parents aware the multitudinous benefits and advantages of teaching at home. Home ground education offers many benefits and advantages beyond accustomed educational methods and systems offered through our civic schools. Home indoctrination allows the parent to exceptional the exact instruction plan or curriculum the students disposition learn from based on what the progenitor thinks is foremost to save the schoolboy, not the segment prepare system. It is oftentimes set that varied custom schools edify students’ subjects that are academically inapposite, not challenging, or that are remarkably best socialistic to the well-spring to teach. At ease training offers a manage works over this and allows for a going to tailor the apprentice’s upbringing to specific interests and desires while continuing to provide a invite informed about that inclination preserve the student growing in terms of his or her learning abilities. Impress upon education offers suppleness of the scholastic process. Some students go beyond at some things but not at others. Home ground education a grind of this complexion would assign that apprentice to top where his or her strengths are while at the Buy Technical Report uniform heyday allowing that student to spend a little more in the improvement of the weaker areas. Some students are excellent and do proficiently with all discipline purport they are being taught. In search them, up on tuition allows for the indoctrination process to be more challenging since a more academically challenging curriculum can be adopted. Virtuoso home schooled students are proficient to stay with their interests and circumstance footway without the time constraints or curriculum limitations that are present in the routine culture environment. There are tons ways that home schooling can be skilled today. Some parents opt as regards a structured curriculum while others power ready textbooks. Some parents pool these things with their own teaching while others coach each lesson fully of their own resources. Private this, it is express that this lore process is precisely amenable, can be specifically tailored to the apprentice’s needs and can be changed on the make good one’s escape as trainee edifying needs change. Residency enlightenment offers other meandering advantages as well. One of these sway be more present time. Deeply research can be a proficient manner to teach. The pass‚ you suffer with during the prime can be against efficiently, thereby reducing the entire heretofore that the student in reality spends at school. This thrifty utility of conditions results in more at all times to be done up on additional activities either allied or unlinked to the devotee’s education. Nursing home study allows for the benefit of the begetter to mature the main mentor and beginning of auspices payment a child. Since a cuttingly schooled infant relates to the facetiousmater a straw more, the issue and parent can form a tighter bond than they force if not form. This thong could be the foundation of a higher position of rely on between the lady and parent where the child is more apt to find to the stepfather an eye to ease and instruction in place of turning elsewhere. Home learning can be an exit as far as something a specialized circumstance where a child may contain been a schlemiel of extreme bullying at a common community school. Disciples bullying is a serious enigma at some schools and is a spirituous matter to resolve. The domestic schooling of a neonate victimized in this technique offers the neonate a custom to re-focus on wisdom while at the same term being able to be subjected to the close parental guidance needed to subjugate how the child handles situations of this nature. It is these days known for a fait accompli that haunt schooled students do glowingly when it comes to college preparation. In all-inclusive, territory schooled students include performed uniform to or crap-shooter than community schooled students on SAT assessments. In summation, it is a fact that lodgings schooled students take an even steven up to attainment deputy for doing spectacularly in college as their public schooled peers. Home enlightenment is not for everyone. Each mother should carefully approximate whether to the heart schooling leave benefit their picky detail situation or not. There are sundry considerations to be made when choosing to digs university, but for many, home base teaching has been a rare that has proven to be completely helpful to the grind’s anomalous education.
# -*- coding: utf-8 -*- import urlparse import logging from dirtyfields import DirtyFieldsMixin from django.db import models from django.utils import timezone from django.contrib.contenttypes.fields import GenericRelation from django.core.exceptions import ValidationError from framework.exceptions import PermissionsError from osf.models.nodelog import NodeLog from osf.models.mixins import ReviewableMixin from osf.models import OSFUser from osf.utils.fields import NonNaiveDateTimeField from osf.utils.workflows import ReviewStates from osf.utils.permissions import ADMIN from osf.utils.requests import DummyRequest, get_request_and_user_id, get_headers_from_request from website.notifications.emails import get_user_subscriptions from website.notifications import utils from website.preprints.tasks import update_or_enqueue_on_preprint_updated from website.project.licenses import set_license from website.util import api_v2_url from website.identifiers.clients import CrossRefClient, ECSArXivCrossRefClient from website import settings, mails from osf.models.base import BaseModel, GuidMixin from osf.models.identifiers import IdentifierMixin, Identifier from osf.models.mixins import TaxonomizableMixin from osf.models.spam import SpamMixin logger = logging.getLogger(__name__) class PreprintService(DirtyFieldsMixin, SpamMixin, GuidMixin, IdentifierMixin, ReviewableMixin, TaxonomizableMixin, BaseModel): SPAM_CHECK_FIELDS = set() provider = models.ForeignKey('osf.PreprintProvider', on_delete=models.SET_NULL, related_name='preprint_services', null=True, blank=True, db_index=True) node = models.ForeignKey('osf.AbstractNode', on_delete=models.SET_NULL, related_name='preprints', null=True, blank=True, db_index=True) is_published = models.BooleanField(default=False, db_index=True) date_published = NonNaiveDateTimeField(null=True, blank=True) original_publication_date = NonNaiveDateTimeField(null=True, blank=True) license = models.ForeignKey('osf.NodeLicenseRecord', on_delete=models.SET_NULL, null=True, blank=True) identifiers = GenericRelation(Identifier, related_query_name='preprintservices') preprint_doi_created = NonNaiveDateTimeField(default=None, null=True, blank=True) date_withdrawn = NonNaiveDateTimeField(default=None, null=True, blank=True) withdrawal_justification = models.TextField(default='', blank=True) ever_public = models.BooleanField(default=False, blank=True) class Meta: unique_together = ('node', 'provider') permissions = ( ('view_preprintservice', 'Can view preprint service details in the admin app.'), ) def __unicode__(self): return '{} preprint (guid={}) of {}'.format('published' if self.is_published else 'unpublished', self._id, self.node.__unicode__() if self.node else None) @property def verified_publishable(self): return self.is_published and self.node.is_preprint and not (self.is_retracted or self.node.is_deleted) @property def primary_file(self): if not self.node: return return self.node.preprint_file @property def is_retracted(self): return self.date_withdrawn is not None @property def article_doi(self): if not self.node: return return self.node.preprint_article_doi @property def preprint_doi(self): return self.get_identifier_value('doi') @property def is_preprint_orphan(self): if not self.node: return return self.node.is_preprint_orphan @property def deep_url(self): # Required for GUID routing return '/preprints/{}/'.format(self._primary_key) @property def url(self): if (self.provider.domain_redirect_enabled and self.provider.domain) or self.provider._id == 'osf': return '/{}/'.format(self._id) return '/preprints/{}/{}/'.format(self.provider._id, self._id) @property def absolute_url(self): return urlparse.urljoin( self.provider.domain if self.provider.domain_redirect_enabled else settings.DOMAIN, self.url ) @property def absolute_api_v2_url(self): path = '/preprints/{}/'.format(self._id) return api_v2_url(path) @property def should_request_identifiers(self): return not self.node.all_tags.filter(name='qatest').exists() @property def has_pending_withdrawal_request(self): return self.requests.filter(request_type='withdrawal', machine_state='pending').exists() @property def has_withdrawal_request(self): return self.requests.filter(request_type='withdrawal').exists() def has_permission(self, *args, **kwargs): return self.node.has_permission(*args, **kwargs) def set_primary_file(self, preprint_file, auth, save=False): if not self.node.has_permission(auth.user, ADMIN): raise PermissionsError('Only admins can change a preprint\'s primary file.') if preprint_file.target != self.node or preprint_file.provider != 'osfstorage': raise ValueError('This file is not a valid primary file for this preprint.') existing_file = self.node.preprint_file self.node.preprint_file = preprint_file # only log if updating the preprint file, not adding for the first time if existing_file: self.node.add_log( action=NodeLog.PREPRINT_FILE_UPDATED, params={ 'preprint': self._id }, auth=auth, save=False ) if save: self.save() self.node.save() def set_published(self, published, auth, save=False): if not self.node.has_permission(auth.user, ADMIN): raise PermissionsError('Only admins can publish a preprint.') if self.is_published and not published: raise ValueError('Cannot unpublish preprint.') self.is_published = published if published: if not (self.node.preprint_file and self.node.preprint_file.target == self.node): raise ValueError('Preprint node is not a valid preprint; cannot publish.') if not self.provider: raise ValueError('Preprint provider not specified; cannot publish.') if not self.subjects.exists(): raise ValueError('Preprint must have at least one subject to be published.') self.date_published = timezone.now() self.node._has_abandoned_preprint = False # In case this provider is ever set up to use a reviews workflow, put this preprint in a sensible state self.machine_state = ReviewStates.ACCEPTED.value self.date_last_transitioned = self.date_published # This preprint will have a tombstone page when it's withdrawn. self.ever_public = True self.node.add_log( action=NodeLog.PREPRINT_INITIATED, params={ 'preprint': self._id }, auth=auth, save=False, ) if not self.node.is_public: self.node.set_privacy( self.node.PUBLIC, auth=None, log=True ) self._send_preprint_confirmation(auth) if save: self.node.save() self.save() def set_preprint_license(self, license_detail, auth, save=False): license_record, license_changed = set_license(self, license_detail, auth, node_type='preprint') if license_changed: self.node.add_log( action=NodeLog.PREPRINT_LICENSE_UPDATED, params={ 'preprint': self._id, 'new_license': license_record.node_license.name }, auth=auth, save=False ) if save: self.save() def set_identifier_values(self, doi, save=False): self.set_identifier_value('doi', doi) self.preprint_doi_created = timezone.now() if save: self.save() def get_doi_client(self): if settings.CROSSREF_URL: if self.provider._id == 'ecsarxiv': return ECSArXivCrossRefClient(base_url=settings.CROSSREF_URL) return CrossRefClient(base_url=settings.CROSSREF_URL) else: return None def save(self, *args, **kwargs): first_save = not bool(self.pk) saved_fields = self.get_dirty_fields() or [] old_subjects = kwargs.pop('old_subjects', []) if saved_fields: request, user_id = get_request_and_user_id() request_headers = {} if not isinstance(request, DummyRequest): request_headers = { k: v for k, v in get_headers_from_request(request).items() if isinstance(v, basestring) } user = OSFUser.load(user_id) if user: self.check_spam(user, saved_fields, request_headers) if not first_save and ('ever_public' in saved_fields and saved_fields['ever_public']): raise ValidationError('Cannot set "ever_public" to False') ret = super(PreprintService, self).save(*args, **kwargs) if (not first_save and 'is_published' in saved_fields) or self.is_published: update_or_enqueue_on_preprint_updated(preprint_id=self._id, old_subjects=old_subjects, saved_fields=saved_fields) return ret def _get_spam_content(self, saved_fields): spam_fields = self.SPAM_CHECK_FIELDS if self.is_published and 'is_published' in saved_fields else self.SPAM_CHECK_FIELDS.intersection( saved_fields) content = [] for field in spam_fields: content.append((getattr(self.node, field, None) or '').encode('utf-8')) if self.node.all_tags.exists(): content.extend([name.encode('utf-8') for name in self.node.all_tags.values_list('name', flat=True)]) if not content: return None return ' '.join(content) def check_spam(self, user, saved_fields, request_headers): if not settings.SPAM_CHECK_ENABLED: return False if settings.SPAM_CHECK_PUBLIC_ONLY and not self.node.is_public: return False if 'ham_confirmed' in user.system_tags: return False content = self._get_spam_content(saved_fields) if not content: return is_spam = self.do_check_spam( user.fullname, user.username, content, request_headers, ) logger.info("Preprint ({}) '{}' smells like {} (tip: {})".format( self._id, self.node.title.encode('utf-8'), 'SPAM' if is_spam else 'HAM', self.spam_pro_tip )) if is_spam: self.node._check_spam_user(user) return is_spam def _check_spam_user(self, user): self.node._check_spam_user(user) def flag_spam(self): """ Overrides SpamMixin#flag_spam. """ super(PreprintService, self).flag_spam() self.node.flag_spam() def confirm_spam(self, save=False): super(PreprintService, self).confirm_spam(save=save) self.node.confirm_spam(save=save) def confirm_ham(self, save=False): super(PreprintService, self).confirm_ham(save=save) self.node.confirm_ham(save=save) def _send_preprint_confirmation(self, auth): # Send creator confirmation email recipient = self.node.creator event_type = utils.find_subscription_type('global_reviews') user_subscriptions = get_user_subscriptions(recipient, event_type) if self.provider._id == 'osf': logo = settings.OSF_PREPRINTS_LOGO else: logo = self.provider._id context = { 'domain': settings.DOMAIN, 'reviewable': self, 'workflow': self.provider.reviews_workflow, 'provider_url': '{domain}preprints/{provider_id}'.format( domain=self.provider.domain or settings.DOMAIN, provider_id=self.provider._id if not self.provider.domain else '').strip('/'), 'provider_contact_email': self.provider.email_contact or settings.OSF_CONTACT_EMAIL, 'provider_support_email': self.provider.email_support or settings.OSF_SUPPORT_EMAIL, 'no_future_emails': user_subscriptions['none'], 'is_creator': True, 'provider_name': 'OSF Preprints' if self.provider.name == 'Open Science Framework' else self.provider.name, 'logo': logo, } mails.send_mail( recipient.username, mails.REVIEWS_SUBMISSION_CONFIRMATION, mimetype='html', user=recipient, **context )
All information and specifications shown on this website are based upon the latest available information provided by race organizations, teams and riders. No rights can be withdrawn from rankings, results or statistics as presented on this website. 0.19189 sec.
from __future__ import print_function import math, numpy, time from ImageD11 import cImageD11 from fabio.openimage import openimage print("Using class version") class fourier_radial(object): """ Cache results for re-use where possible on next layer """ def __init__(self, dims, theta=None): self.dims = dims self.theta = theta if self.theta is not None: assert len(self.theta) == dims[1] self.theta = numpy.array(theta)*numpy.pi/180.0 self.make_indices() def set_theta(self, theta): """ th is the list of angles in degrees of the projections assumed 1 degree steps otherwise """ self.theta = theta assert len(self.theta) == self.dims[1] self.theta = numpy.array(theta)*numpy.pi/180.0 self.make_indices() def make_indices(self): arshape = self.dims[0]/2+1, self.dims[1] nv = (arshape[0]-1)*2 nh = arshape[0] print("NV,NH",nv,nh) self.ftimshape = (nv, nh) self.ftimlen = nv*nh n1 = (self.dims[0]/2+1)*self.dims[1] xv = numpy.arange(0, self.dims[0]/2+1, 1, dtype=numpy.float32 ) # dimensions? cth = numpy.cos( self.theta ) # 1D sth = numpy.sin( self.theta ) # 1D ia = numpy.round(numpy.outer( cth, xv )).astype(numpy.int) ja = numpy.round(numpy.outer( sth, xv )).astype(numpy.int) on = numpy.array([1.0],numpy.float32) jm = numpy.where(ja < 0, -on, on) numpy.multiply( ia, jm, ia ) # if j<0: i=-i numpy.multiply( ja, jm, ja ) # if j<0: j=-j # if j<0: f=f.conj() ia = numpy.where( ia < 0, nv+ia, ia) inds = (ia*nh + ja).ravel() self.conjer = jm self.inds = inds nim = numpy.zeros( ( nv* nh), numpy.float32 ) wons = numpy.ones( (len(inds)), dtype=numpy.float32 ) # This is now more dense - bincount? cImageD11.put_incr( nim , inds, wons ) nim = nim.astype(numpy.int) self.nim_div = nim + (nim==0) def process_sinogram( self, sinogram, do_interpolation=False): """ sinogram is from the data dimensions [npixels, nangles] do_interp - tries to fill in some of the missing data in fourier space returns the radon transform """ assert sinogram.shape == self.dims ar = numpy.fft.rfft(sinogram, axis=0) faprojr = (ar.T.real.astype(numpy.float32)) faprojc = (ar.T.imag.astype(numpy.float32)) numpy.multiply( faprojc, self.conjer, faprojc) fimr = numpy.zeros( self.ftimlen , numpy.float32 ) fimc = numpy.zeros( self.ftimlen , numpy.float32 ) cImageD11.put_incr( fimr, self.inds, faprojr.ravel()) cImageD11.put_incr( fimc, self.inds, faprojc.ravel()) fim = fimr + fimc*1j fim = numpy.divide( fimr + fimc*1j, self.nim_div) fim.shape = self.ftimshape return fim def sino2im(self, sinogram, centrepixel ): # Take out high frequency in mean (some ring artifacts) s = sinogram cp = centrepixel d = numpy.concatenate( ( s[cp:,:], s[:cp,:], ), axis=0) im = self.process_sinogram( d , centrepixel ) # figure out what the scale factor really is ret = numpy.fft.irfft2( im ) * im.shape[0] * im.shape[1] ret = numpy.fft.fftshift( ret ) return ret if __name__=="__main__": import sys if len(sys.argv) != 5: print("Usage: sinogram startangle step centrepixel") sys.exit() fname = sys.argv[1] star = time.time() sino = openimage( fname ) na,nx = sino.data.shape start = float(sys.argv[2]) step = float(sys.argv[3]) centrepixel = int( sys.argv[4] ) end = na*step + start print("start, step, end",start, step, end) angles = numpy.arange(start, end, step) assert len(angles) == na,"%d %d ... %d"%(nx,na,len(angles)) print("%.2f setup"%(time.time()-star)) d = sino.data.T[:1200] o = fourier_radial( d.shape, angles ) start = time.time() im = o.sino2im( d, centrepixel ) sino.data = im sino.write(fname+"_r", force_type=numpy.float32) import pylab pylab.imshow(im, interpolation='nearest',aspect='auto') pylab.show() print("per image",time.time()-start)
Eliminate Ground Contact… Maximize Post-Foundation Longevity! DeKalb County, IL: Homeower & experienced DIY guy, JW, was kind enough to send pics of his recently completed deck project. He purchased his materials from the friendly, knowledgeable folks at Lowe's #0059 of DeKalb, IL. With materials on-site & just 3 days away from his Saturday start, JW decided to search lowes.com to see if there was a 'product out there' that would rid him of the one concern he just couldn't shake... in-ground post failure.
#!/bin/python # encoding=utf8 import sys import platform def kifu_converter(index_name, user_id, kifu_folder, save_folder): f = open(index_name) stack = [] save_index = "" for line in f: line = line.decode('utf-8') line_r = line.split('\t') if line_r[2] != user_id and line_r[3] != user_id: continue save_index += line Num = line_r[0] fn_sgf = line_r[1][0:4] + "-" + line_r[1][5:7] DT = line_r[1] PW = line_r[2] PB = line_r[3] RE = line_r[5] RO = line_r[6] TM = line_r[7].split(' ')[1][0:-2] + " min" OT = line_r[7].split(' ')[0] OT = OT.split('/')[1] + "x " + OT.split('/')[0] + " sec" SGF = "(;CA[UTF-8]GM[1]FF[4]AP[converter]\nSZ[19]" SGF += "GN[" + Num + "]\n" SGF += "DT[" + DT + "]\n" SGF += "PB[" + PB + "]" + "BR[9d]\n" SGF += "PW[" + PW + "]" + "WR[9d]\n" SGF += "RE[" + RE + "]" SGF += "RO[" + RO + "]" SGF += "KM[6.5]" SGF += "RU[Japanese]" SGF += "TM[" + TM + "]" + "OT[" + OT + "]\n" SGF += "PC[Tom]" stack.append([SGF, fn_sgf, Num]) f.close() writer = open(user_id + ".index", "w") writer.write(save_index.encode('utf-8')) writer.close() if platform.platform() == "Windows": if kifu_folder[-1] != "\\": kifu_folder += "\\" if save_folder[-1] != "\\": save_folder += "\\" else: if kifu_folder[-1] != "/": kifu_folder += "/" if save_folder[-1] != "/": save_folder += "/" i = 0 stack_size = len(stack) while i < stack_size: info = stack[i] SGF = info[0] fn_sgf = info[1] fn_open = fn_sgf Num = info[2] f_sgf = open(kifu_folder + fn_sgf) for line_sgf in f_sgf: line_sgf = line_sgf.decode('utf-8') split = line_sgf.split('\t') if split[0] == Num: SGF += split[1] + ")" writer = open(save_folder + Num + ".SGF", "w") writer.write(SGF.encode('utf-8')) writer.close() if i + 1 >= stack_size: break if fn_open != stack[i + 1][1]: break i += 1 info = stack[i] SGF = info[0] fn_sgf = info[1] Num = info[2] f_sgf.close() i += 1 if len(sys.argv) != 5: print "usage: python Converter_Tom.py Kifu.index user_id kifu_folder save_folder" print "example: python Converter_Tom.py Kifu.index 930115 kifu save" else: index_name = sys.argv[1] user_id = sys.argv[2] kifu_folder = sys.argv[3] save_folder = sys.argv[4] kifu_converter(index_name, user_id, kifu_folder, save_folder)
As used by Europe's largest internet retailer, these near A2 size 24 x 16 x 14" double wall cardboard boxes are made to a higher specification than many other manufacturers standard boxes. Made to protect your goods through transit with a BCT rating of 2890N (more than twice as strong as the industry standard). We are the only company to have the FSC environmental logo printed on every box. This double wall cardboard box from Lil packaging is one of our best-selling postal boxes. Looking for strong cardboard boxes? The K30, double wall cardboard box is seriously stiff and strong with a Box Compression Test Strength (BCT) of 2890N. This box is more than twice as strong than the Industry Standard in the material strength Box Compression Test. In comparison, a standard single wall box will have a material stength of between 1190 and 1270N. (The Box Compression Test shows the force required to crush a box with a size of 400x300x200mm). Perfect double wall cardboard boxes, ideal for shipping over 95% of all microwave ovens and home Hi-fi systems, musical instruments and garden power tools. The Lil cardboard box range boasts variable depth for all sizes deeper than 6 inches. Simply tear down the easy to use vari-depth perforation on your K30 cardboard box, fold the flaps and away you go! All of our Lil cardboard boxes are manufactured using FSC® certified material for environmental sustainability. These K30 double wall cardboard boxes from Lil packaging are made to a higher specification than many other manufacturers standard boxes to protect your goods through the post. Quality matters - don't waste time, money and customers patience on inferior basic packaging, when you can get high quality cardboard packaging boxes right here that your customers will love. Sturdy double wall, strong cardboard boxes. Flat packed - make them up by taping the flaps. More attractive than a plain unprinted stock box. Large packaging boxes with variable depth allowing adjustment of the height of your box.
from __future__ import division import json import mailchimp3 from mailchimp3 import MailChimp from user_login_credentials import user_name from user_login_credentials import api_key class single_report: def __init__(self, report_data): self.campaign_id = report_data['id'] self.subject_line = report_data['subject_line'] self.list_name = report_data['list_name'] self.send_time = report_data['send_time'] self.total_sent = report_data['emails_sent'] self.total_bounces = report_data['bounces']['hard_bounces'] + report_data['bounces']['soft_bounces'] + report_data['bounces']['syntax_errors'] self.hard_bounces = report_data['bounces']['hard_bounces'] self.soft_bounces = report_data['bounces']['soft_bounces'] self.total_delivered = self.total_sent - self.total_bounces self.unsubscribes = report_data['unsubscribed'] self.total_opens = report_data['opens']['opens_total'] self.unique_opens = report_data['opens']['unique_opens'] self.total_clicks = report_data['clicks']['clicks_total'] self.unique_clicks = report_data['clicks']['unique_clicks'] self.send_date = self.send_time[0:10] self.delivery_rate = str(self.total_delivered / self.total_sent * 100) + "%" self.open_rate = str("%.2f" % (report_data['opens']['open_rate'] * 100)) + "%" self.click_rate = str("%.2f" % (report_data['clicks']['click_rate'] * 100)) + "%" self.clickthru_rate = str("%.2f" % (self.total_clicks / self.total_delivered * 100)) + "%" #self.click_report = "" def reports_result(date_range, campaign_name_search): client = MailChimp(user_name, api_key) all_json_data = client.reports.all(get_all=True) all_reports = all_json_data['reports'] reports_in_daterange = all_reports#[0:50] # TODO: create new method find_index_for_date_range to handle a simple string date range input and provide the right index number for this filter matching_reports = [reports for reports in reports_in_daterange if campaign_name_search in reports["campaign_title"]] return matching_reports """ def get_click_report(campaign_id): client = MailChimp(user_name, api_key) json_data = client.reports.click_details.all(campaign_id=campaign_id, get_all=False) click_report = json_data['urls_clicked'] return click_report """ class click_report_object(): def __init__(self, c_id): client = MailChimp(user_name, api_key) json_data = client.reports.click_details.all(campaign_id=c_id, get_all=False) links_clicked = json_data['urls_clicked'] self.url_1 = links_clicked[0]["url"] self.total_clicks_1 = links_clicked[0]["total_clicks"] self.total_click_percent_1 = links_clicked[0]["click_percentage"] self.unique_clicks_1 = links_clicked[0]["unique_clicks"] self.unique_click_percent_1 = links_clicked[0]["unique_click_percentage"] self.url_2 = links_clicked[1]["url"] self.total_clicks_2 = links_clicked[1]["total_clicks"] self.total_click_percent_2 = links_clicked[1]["click_percentage"] self.unique_clicks_2 = links_clicked[1]["unique_clicks"] self.unique_click_percent_2 = links_clicked[1]["unique_click_percentage"] self.url_3 = links_clicked[2]["url"] self.total_clicks_3 = links_clicked[2]["total_clicks"] self.total_click_percent_3 = links_clicked[2]["click_percentage"] self.unique_clicks_3 = links_clicked[2]["unique_clicks"] self.unique_click_percent_3 = links_clicked[2]["unique_click_percentage"] self.url_4 = links_clicked[3]["url"] self.total_clicks_4 = links_clicked[3]["total_clicks"] self.total_click_percent_4 = links_clicked[3]["click_percentage"] self.unique_clicks_4 = links_clicked[3]["unique_clicks"] self.unique_click_percent_4 = links_clicked[3]["unique_click_percentage"] self.url_5 = links_clicked[4]["url"] self.total_clicks_5 = links_clicked[4]["total_clicks"] self.total_click_percent_5 = links_clicked[4]["click_percentage"] self.unique_clicks_5 = links_clicked[4]["unique_clicks"] self.unique_click_percent_5 = links_clicked[4]["unique_click_percentage"] self.url_6 = links_clicked[5]["url"] self.total_clicks_6 = links_clicked[5]["total_clicks"] self.total_click_percent_6 = links_clicked[5]["click_percentage"] self.unique_clicks_6 = links_clicked[5]["unique_clicks"] self.unique_click_percent_6 = links_clicked[5]["unique_click_percentage"]
As the costs for wind, solar, and battery storage technologies continue to decline, futures in which variable renewable energy (VRE) resources reach percentages as high as 50% of demand are increasingly plausible. In this presentation ABB Advisors present an analysis of bounds-stretching, what-if scenarios involving high-VRE penetrations of wind and solar capacity in ERCOT. Changes to diurnal, seasonal, and annual peak and off-peak marginal clearing prices, including the incidence of negative- or zero-priced hours. Pure arbitrage battery storage builds - in what scenarios and at what installation costs are they economic? Wind and solar unit profitability over time.
"""Validation functions used by pynetdicom""" from collections import OrderedDict import logging from typing import Union, Dict, Optional, cast, Tuple import unicodedata from pydicom.dataset import Dataset from pydicom.uid import UID LOGGER = logging.getLogger('pynetdicom._validators') def validate_ae(value: str) -> Tuple[bool, str]: """Return ``True`` if `value` is a conformant **AE** value. An **AE** value: * Must be no more than 16 characters * Leading and trailing spaces are not significant * May only use ASCII characters, excluding ``0x5C`` (backslash) and all control characters Parameters ---------- value : str The **AE** value to check. Returns ------- Tuple[bool, str] A tuple of (bool, str), with the first item being ``True`` if the value is conformant to the DICOM Standard and ``False`` otherwise and the second item being a short description of why the validation failed or ``''`` if validation was successful. """ if not isinstance(value, str): return False, "must be str" if len(value) > 16: return False, "must not exceed 16 characters" # All characters use ASCII if not value.isascii(): return False, "must only contain ASCII characters" # Unicode category: 'Cc' is control characters invalid = [c for c in value if unicodedata.category(c)[0] == 'C'] if invalid or '\\' in value: return False, "must not contain control characters or backslashes" return True, '' def validate_ui(value: UID) -> Tuple[bool, str]: from pynetdicom import _config if not isinstance(value, str): return False, "must be pydicom.uid.UID" value = UID(value) if _config.ENFORCE_UID_CONFORMANCE: if value.is_valid: return True, "" return False, "UID is non-conformant" if not 0 < len(value): return False, "must not be an empty str" if not len(value) < 65: return False, "must not exceed 64 characters" return True, ""
Tamil Super Star Rajnikanth’s daughter Soundarya Rajnikanth has got herself actively involved in various arenas of film production. She has almost completed shooting for ‘Sultan– The Warrior’, which is slated to hit screens in April 2010. This Young beauty is Spending more money into her production than estimated. Eventually, she seems to be running out of funds. Since, Soundarya Rajnikanth is not interested to seek the help in financial aids frmm her dad Rajnikanth.So she is seeking help from her close circuits.
# -*- coding: utf-8 -*- # __author__ = chenchiyuan from __future__ import division, unicode_literals, print_function from django.db.models.signals import m2m_changed def commodity_inventory_changed(sender, instance, *args, **kwargs): from libs.datetimes import dates_during from hawaii.apps.commodity.models import CommodityProduct, CommodityInventory inventory = CommodityInventory.objects.select_related().get(pk=instance.pk) weekdays = inventory.days.values_list("number", flat=True) dates = dates_during(from_date=inventory.begin, to_date=inventory.end, weekdays=weekdays) copy_dates = dates[:] products = list(inventory.products.all()) products_will_delete = [] for product in products: if not product.datetime.date in copy_dates: products_will_delete.append(product.id) else: dates.remove(product.date) # delete products CommodityProduct.objects.filter(id__in=products_will_delete).delete() # create products CommodityProduct.bulk_create_products(inventory, dates) def register_commodity_inventory_changed(): from hawaii.apps.commodity.models import CommodityInventory m2m_changed.connect(commodity_inventory_changed, sender=CommodityInventory.days.through, dispatch_uid='commodity_inventory_changed') def register_commodity_signals(): register_commodity_inventory_changed() print("commodity signal register")
News Representing nearly half of the total U.S. labor force, but less than a third in manufacturing, women are an underutilized talent pool that could play a pivotal role in driving economic growth. In 2012, The Manufacturing Institute launched the STEP Ahead initiative to honor and promote the role of women in the manufacturing industry through recognition, research and leadership. Since World War II, women have played a key role in manufacturing. Women today are developing new technologies, building bridges, shaping infrastructure, creating products and running companies. But in an industry where the skills gap is expected to grow by an estimated two million jobs by 2020, women are absolutely essential to our future success. There is a clear correlation between companies with more women in top-level positions and stronger financial performance, according to various studies ranging from Catalyst to the Center for Creative Leadership to Mercer Consulting. Companies that want to remain competitive not only need to make it a priority to recruit women into their workforce, but also must have a well thought-out gender diversity strategy to attract, develop and retain talent. "In an industry where the skills gap is expected to grow by an estimated two million jobs by 2020, women are absolutely essential to our future success." Earlier this year, I had the opportunity to spend two days with 130 women leaders from across the U.S. who had been recognized for their accomplishments in manufacturing. Their stories ranged from being a single mom who started on the shop floor and now leads a team of 75 people, to young entrepreneur running a multimillion-dollar company. The highlight was seeing all these powerful women on stage, standing before 600+ attendees, yelling in unison, “we are manufacturing.” If our industry can harness this energy and talent, which sometimes goes untapped, we can drive innovation and uncover new opportunities to grow our businesses and economy. So can #MFGWomen transform the industry? Absolutely. These women are the future engineers designing the next big products, the future machinists testing innovations on the shop floor, or the future CEOs running the company. Women are the future of manufacturing, and initiatives like STEP Ahead are the catalyst for transformation.
import graphics import driver import game import random class Snake(game.Game): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.reset() def reset(self): self.sprites.clear() self.playing = True self.snake = [graphics.Rectangle(1, 1, x=7, y=7)] self.direction = (1,0) self.sprites.add(self.snake[0]) self.food = graphics.Rectangle(1, 1, x=17, y=7) self.sprites.add(self.food) self.count = 0 def loop(self): if self.playing: if 'a' in self.keys and not self.direction[0]: self.direction = (-1, 0) elif 'd' in self.keys and not self.direction[0]: self.direction = (1, 0) if 's' in self.keys and not self.direction[1]: self.direction = (0, 1) elif 'w' in self.keys and not self.direction[1]: self.direction = (0, -1) self.count = (self.count + 1) % 2 if not self.count: for i in range(len(self.snake) - 1, 0, -1): self.snake[i].x = self.snake[i-1].x self.snake[i].y = self.snake[i-1].y self.snake[0].x += self.direction[0] self.snake[0].y += self.direction[1] poses = set((s.x, s.y) for s in self.snake[1:]) if (self.snake[0].x < 0 or self.snake[0].x >= 112 or self.snake[0].y < 0 or self.snake[0].y >= 15 or (self.snake[0].x, self.snake[0].y) in poses): self.sprites.clear() self.sprites.add(graphics.TextSprite( 'GAME OVER. LEN:{}'.format(len(self.snake)), width=5, height=7)) self.sprites.add(graphics.TextSprite( 'R TO RELOAD'.format(len(self.snake)), width=5, height=7, y=8)) self.playing = False if (self.snake[0].x, self.snake[0].y) == (self.food.x, self.food.y): self.snake.append(self.food) poses.add((self.snake[0].x, self.snake[0].y)) nx, ny = random.randrange(0, 112), random.randrange(0, 15) while (nx,ny) in poses: nx, ny = random.randrange(0, 112), random.randrange(0, 15) self.food = graphics.Rectangle(1, 1, x=nx, y=ny) self.sprites.add(self.food) else: if 'r' in self.keys: self.reset() super().loop() GAME = Snake
Japanese Soaking Tub Guys in Ericson, NE offers quality soaking tub construction services. We offer several distinct construction methods due to our latest technology and pool of professionals, we can custom make bath tubs to fit our clients needs. We offer a range of materials four our tubs. These materials include acrylics, fiberglass, and wood. Bathtubs also come in a variety of shapes and colours. In our construction of soaking tubs we look at possible constraints, for example, physical constraints whereby we weigh the reliability of where the tub should fit. For example in the remodelling of a bathroom where an existing tub should be replaced. Sometimes the construction does not pose any constraints and allows you to choose virtually any kind of tub you want. For new homes where the client doesnt have to conform to any pre-existing tub. Japanese Soaking Tub Guys is known as the best pool company in the Ericson, NE for the timely delivery of our products and services. Our team of professionals who through their work at our offices have developed a culture of detail keenness. Experience has enabled us to garner speed. We meet deadlines without delay or seeking extension of time without compromising on the specification of the customer and quality of our products. We are always available for hire and consultation and customers in Ericson, NE can get in touch with us on 888-419-8802 for any tub works and services. With our distribution chain, we are able to interact with our clients directly without using middlemen who may sometimes cause unnecessary delays and increased costs. We have a direct chain of communication between us and our customers. Japanese Soaking Tub Guys in Ericson, NE ensures that we do our business effectively. Effectiveness to us includes, price competitiveness, material quality, design accuracy and timely delivery. Safety and health regulation compliance is top on our list of priorities. Due to our effectiveness (success in producing a desired or intended result) we have been contracted by schools, hotels and home owners. Japanese Soaking Tub Guys looks at the purpose of tub in Ericson, NE. There are general purpose tubs. These kinds of tubs are very common and are used for various purposes. They are not very fancy but they serve well for their many purposes. There are our special soaking tubs that are deeper than the standard general purpose bathtubs. They allow you to immerse your whole body into the water. The size of our tubs vary depending on the amount of water they hold. The claw foot and vintage style free-standing tubs fit this description because they have taller sides and hold a lot of water. We have a myriad of designs and materials to choose from for or customers in Ericson, NE. We have a range of materials all of which have advantages added to them. Among the advantages are the initial installation cost, subsequent maintenance and ease of damage from scratching and cracking. Acrylics are cheaper and easy to cast as it is a form of plastic. Wood is low in subsequent maintenance cost and gives your bathroom an antique finish. Fiberglass gel coat is easy to shape and has a shiny finish making your bathroom look a lot more attractive. Enamel on steel also known as porcelain-on-steel are lighter in weight thus making it so much easier for bathroom modellers to muscle them into their new modelled bathrooms. Cultured marble which is covered in clear gel coat to provide a durable, easily cleaned and stain resistant surface finish. This makes subsequent maintenance much easier. It has an ability to be cast. Composite tubs are very durable.
#------------------------------------------------------------------------------ # Reynolds-Blender | The Blender add-on for Reynolds, an OpenFoam toolbox. #------------------------------------------------------------------------------ # Copyright| #------------------------------------------------------------------------------ # Deepak Surti ([email protected]) # Prabhu R (IIT Bombay, [email protected]) # Shivasubramanian G (IIT Bombay, [email protected]) #------------------------------------------------------------------------------ # License # # This file is part of reynolds-blender. # # reynolds-blender is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # reynolds-blender is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License # along with reynolds-blender. If not, see <http://www.gnu.org/licenses/>. #------------------------------------------------------------------------------ # ----------- # bpy imports # ----------- import bpy, bmesh from bpy.props import (StringProperty, BoolProperty, IntProperty, FloatProperty, EnumProperty, PointerProperty, IntVectorProperty, FloatVectorProperty, CollectionProperty ) from bpy.types import (Panel, Operator, PropertyGroup, UIList ) from bpy.path import abspath from mathutils import Matrix, Vector # -------------- # python imports # -------------- import operator import os # ------------------------ # reynolds blender imports # ------------------------ from reynolds_blender.gui.register import register_classes, unregister_classes from reynolds_blender.gui.attrs import set_scene_attrs, del_scene_attrs from reynolds_blender.gui.custom_operator import create_custom_operators from reynolds_blender.gui.renderer import ReynoldsGUIRenderer from reynolds_blender.sphere import SearchableSphereAddOperator from reynolds_blender.add_block import BlockMeshAddOperator # ---------------- # reynolds imports # ---------------- from reynolds.dict.parser import ReynoldsFoamDict from reynolds.foam.cmd_runner import FoamCmdRunner # ------------------------------------------------------------------------ # operators # ------------------------------------------------------------------------ def import_stl(self, context): scene = context.scene bpy.ops.import_mesh.stl(filepath=scene.stl_file_path, axis_forward='Z', axis_up='Y') obj = scene.objects.active print('active objects after import ', obj) # ------------------------------------------------------------- # TBD : OBJ IS NONE, if multiple objects are added after import # ------------------------------------------------------------- scene.geometries[obj.name] = {'file_path': scene.stl_file_path} print('STL IMPORT: ', scene.geometries) return {'FINISHED'} def import_obj(self, context): scene = context.scene bpy.ops.import_scene.obj(filepath=scene.obj_file_path) obj = scene.objects.active print('active objects after import ', obj) bpy.ops.object.transform_apply(location=False, rotation=True, scale=False) # ------------------------------------------------------------- # TBD : OBJ IS NONE, if multiple objects are added after import # ------------------------------------------------------------- scene.geometries[obj.name] = {'file_path': scene.obj_file_path} print('OBJ IMPORT: ', scene.geometries) return {'FINISHED'} def add_geometry_block(self, context): scene = context.scene bpy.ops.object.origin_set(type='ORIGIN_GEOMETRY', center='BOUNDS') obj = scene.objects.active # ------------------------- # Start the console operatorr # -------------------------- bpy.ops.reynolds.of_console_op() if obj is None: self.report({'ERROR'}, 'Please select a geometry') return {'FINISHED'} bpy.ops.mesh.primitive_cube_add() bound_box = bpy.context.active_object dims = obj.dimensions bound_box.dimensions = Vector((dims.x * 1.5, dims.y * 1.5, dims.z * 1.2)) bound_box.location = obj.location bpy.ops.object.transform_apply(location=True, rotation=True, scale=True) return {'FINISHED'} class ModelsPanel(Panel): bl_idname = "of_models_panel" bl_label = "Import STL/OBJ Models" bl_space_type = "VIEW_3D" bl_region_type = "TOOLS" bl_category = "Tools" bl_context = "objectmode" def draw(self, context): layout = self.layout scene = context.scene row = layout.row() row.operator(SearchableSphereAddOperator.bl_idname, text='Sphere', icon='MESH_UVSPHERE') row.operator(BlockMeshAddOperator.bl_idname, text='Box', icon='META_CUBE') # ---------------------------------------- # Render Models Panel using YAML GUI Spec # ---------------------------------------- gui_renderer = ReynoldsGUIRenderer(scene, layout, 'models.yaml') gui_renderer.render() # ------------------------------------------------------------------------ # register and unregister # ------------------------------------------------------------------------ def register(): register_classes(__name__) set_scene_attrs('models.yaml') create_custom_operators('models.yaml', __name__) def unregister(): unregister_classes(__name__) del_scene_attrs('models.yaml') if __name__ == "__main__": register()
Thanks to everyone who helped install the green fence barrier on the north fence line. It goes a long way toward hiding all of the miscellaneous junk that Parks & Rec has hidden behind their shed. New gardener, Alex, jumped right in to helping the bee team set the hives up for this season. The bees have been moved to the orchard area where they can work their magic pollinating the fruit trees. Alex is ready for the bees.
import httplib import socket import json class UnixHTTPConnection(httplib.HTTPConnection): """ HTTPConnection object which connects to a unix socket. """ def __init__(self, sock_path): httplib.HTTPConnection.__init__(self, "localhost") self.sock_path = sock_path def connect(self): self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) self.sock.connect(self.sock_path) class RequestHandler(object): def __init__(self): self.err_no = 0 def request(self, http_method, path, data): self.err_no = 0 try: conn = UnixHTTPConnection("/var/run/docker.sock") conn.connect() conn.request(http_method, path, body=json.dumps(data), headers={"Content-type": "application/json"}) response = conn.getresponse() if response.status != 200: self.err_no = response.status return response.read() except Exception as e: self.err_no = -1 self.msg = str(e) return str(e) finally: conn.close() def has_error(self): return (self.err_no != 0 and self.err_no != 200 and self.err_no != 204) def printjson(jsonstr=None, obj=None): if obj is None: obj = json.loads(jsonstr) print(json.dumps(obj, indent=2, sort_keys=True)) def paramstr_from_dict(params): params_str = "" for key in params.keys(): params_str += ("&" + key + "=" + str(params[key])) return params_str
planning to establish another 15 to 20 shops this year.Currently there are 10 company-owned stores in Devon and Cornwall and 25 franchise operations. The new outlets will be located nationwide and will either be take-away premises or a take-away with additional restaurant facilities.Full support is given to all franchisees, including fitting-out of premises, sourcing of equipment and comprehensive training. The most recent Oggy Oggy outlet to open was in Glasgow. Franchise director Paul Clark told British Baker that Oggy Oggy is currently in discussion with the owners of two bakery outlets looking to convert to the pasty franchise.Frozen pasties baked by Crantock Bakery are delivered to franchise premises weekly for bake-off. Shops also sell made-to-order baguettes, jacket potatoes, soups and coffee.
# Copyright 2017 IBM Corp. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ A :term:`Console` resource represents an HMC. In a paired setup with primary and alternate HMC, each HMC is represented as a separate :term:`Console` resource. """ from __future__ import absolute_import import time from ._manager import BaseManager from ._resource import BaseResource from ._logging import logged_api_call from ._utils import timestamp_from_datetime, divide_filter_args, matches_filters from ._storage_group import StorageGroupManager from ._storage_group_template import StorageGroupTemplateManager from ._user import UserManager from ._user_role import UserRoleManager from ._user_pattern import UserPatternManager from ._password_rule import PasswordRuleManager from ._task import TaskManager from ._ldap_server_definition import LdapServerDefinitionManager from ._unmanaged_cpc import UnmanagedCpcManager __all__ = ['ConsoleManager', 'Console'] class ConsoleManager(BaseManager): """ Manager providing access to the :term:`Console` representing the HMC this client is connected to. In a paired setup with primary and alternate HMC, each HMC is represented as a separate :term:`Console` resource. Derived from :class:`~zhmcclient.BaseManager`; see there for common methods and attributes. Objects of this class are not directly created by the user; they are accessible via the following instance variable of a :class:`~zhmcclient.Client` object: * :attr:`zhmcclient.Client.consoles` """ def __init__(self, client): # This function should not go into the docs. # Parameters: # client (:class:`~zhmcclient.Client`): # Client object for the HMC to be used. super(ConsoleManager, self).__init__( resource_class=Console, class_name='console', session=client.session, parent=None, base_uri='/api/console', oid_prop='object-id', uri_prop='object-uri', name_prop='name', query_props=None, list_has_name=False) self._client = client self._console = None @property def client(self): """ :class:`~zhmcclient.Client`: The client defining the scope for this manager. """ return self._client @property def console(self): """ :class:`~zhmcclient.Console`: The :term:`Console` representing the HMC this client is connected to. The returned object is cached, so it is looked up only upon first access to this property. The returned object has only the following properties set: * 'class' * 'parent' * 'object-uri' Use :meth:`~zhmcclient.BaseResource.get_property` or :meth:`~zhmcclient.BaseResource.prop` to access any properties regardless of whether they are already set or first need to be retrieved. """ if self._console is None: self._console = self.resource_object('/api/console') return self._console @logged_api_call def list(self, full_properties=True, filter_args=None): """ List the (one) :term:`Console` representing the HMC this client is connected to. Authorization requirements: * None Parameters: full_properties (bool): Controls whether the full set of resource properties should be retrieved, vs. only a short set consisting of 'object-uri'. filter_args (dict): This parameter exists for consistency with other list() methods and will be ignored. Returns: : A list of :class:`~zhmcclient.Console` objects, containing the one :term:`Console` representing the HMC this client is connected to. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` """ uri = self._base_uri # There is only one console object. if full_properties: props = self.session.get(uri) else: # Note: The Console resource's Object ID is not part of its URI. props = { self._uri_prop: uri, } resource_obj = self.resource_class( manager=self, uri=props[self._uri_prop], name=props.get(self._name_prop, None), properties=props) return [resource_obj] class Console(BaseResource): """ Representation of a :term:`Console`. Derived from :class:`~zhmcclient.BaseResource`; see there for common methods and attributes. Objects of this class are not directly created by the user; they are returned from creation or list functions on their manager object (in this case, :class:`~zhmcclient.ConsoleManager`). """ def __init__(self, manager, uri, name=None, properties=None): # This function should not go into the docs. # manager (:class:`~zhmcclient.ConsoleManager`): # Manager object for this resource object. # uri (string): # Canonical URI path of the resource. # name (string): # Name of the resource. # properties (dict): # Properties to be set for this resource object. May be `None` or # empty. assert isinstance(manager, ConsoleManager), \ "Console init: Expected manager type %s, got %s" % \ (ConsoleManager, type(manager)) super(Console, self).__init__(manager, uri, name, properties) # The manager objects for child resources (with lazy initialization): self._storage_groups = None self._storage_group_templates = None self._users = None self._user_roles = None self._user_patterns = None self._password_rules = None self._tasks = None self._ldap_server_definitions = None self._unmanaged_cpcs = None @property def storage_groups(self): """ :class:`~zhmcclient.StorageGroupManager`: Manager object for the Storage Groups in scope of this Console. """ # We do here some lazy loading. if not self._storage_groups: self._storage_groups = StorageGroupManager(self) return self._storage_groups @property def storage_group_templates(self): """ :class:`~zhmcclient.StorageGroupTemplateManager`: Manager object for the Storage Group Templates in scope of this Console. """ # We do here some lazy loading. if not self._storage_group_templates: self._storage_group_templates = StorageGroupTemplateManager(self) return self._storage_group_templates @property def users(self): """ :class:`~zhmcclient.UserManager`: Access to the :term:`Users <User>` in this Console. """ # We do here some lazy loading. if not self._users: self._users = UserManager(self) return self._users @property def user_roles(self): """ :class:`~zhmcclient.UserRoleManager`: Access to the :term:`User Roles <User Role>` in this Console. """ # We do here some lazy loading. if not self._user_roles: self._user_roles = UserRoleManager(self) return self._user_roles @property def user_patterns(self): """ :class:`~zhmcclient.UserPatternManager`: Access to the :term:`User Patterns <User Pattern>` in this Console. """ # We do here some lazy loading. if not self._user_patterns: self._user_patterns = UserPatternManager(self) return self._user_patterns @property def password_rules(self): """ :class:`~zhmcclient.PasswordRuleManager`: Access to the :term:`Password Rules <Password Rule>` in this Console. """ # We do here some lazy loading. if not self._password_rules: self._password_rules = PasswordRuleManager(self) return self._password_rules @property def tasks(self): """ :class:`~zhmcclient.TaskManager`: Access to the :term:`Tasks <Task>` in this Console. """ # We do here some lazy loading. if not self._tasks: self._tasks = TaskManager(self) return self._tasks @property def ldap_server_definitions(self): """ :class:`~zhmcclient.LdapServerDefinitionManager`: Access to the :term:`LDAP Server Definitions <LDAP Server Definition>` in this Console. """ # We do here some lazy loading. if not self._ldap_server_definitions: self._ldap_server_definitions = LdapServerDefinitionManager(self) return self._ldap_server_definitions @property def unmanaged_cpcs(self): """ :class:`~zhmcclient.UnmanagedCpcManager`: Access to the unmanaged :term:`CPCs <CPC>` in this Console. """ # We do here some lazy loading. if not self._unmanaged_cpcs: self._unmanaged_cpcs = UnmanagedCpcManager(self) return self._unmanaged_cpcs @logged_api_call def restart(self, force=False, wait_for_available=True, operation_timeout=None): """ Restart the HMC represented by this Console object. Once the HMC is online again, this Console object, as well as any other resource objects accessed through this HMC, can continue to be used. An automatic re-logon will be performed under the covers, because the HMC restart invalidates the currently used HMC session. Authorization requirements: * Task permission for the "Shutdown/Restart" task. * "Remote Restart" must be enabled on the HMC. Parameters: force (bool): Boolean controlling whether the restart operation is processed when users are connected (`True`) or not (`False`). Users in this sense are local or remote GUI users. HMC WS API clients do not count as users for this purpose. wait_for_available (bool): Boolean controlling whether this method should wait for the HMC to become available again after the restart, as follows: * If `True`, this method will wait until the HMC has restarted and is available again. The :meth:`~zhmcclient.Client.query_api_version` method will be used to check for availability of the HMC. * If `False`, this method will return immediately once the HMC has accepted the request to be restarted. operation_timeout (:term:`number`): Timeout in seconds, for waiting for HMC availability after the restart. The special value 0 means that no timeout is set. `None` means that the default async operation timeout of the session is used. If the timeout expires when `wait_for_available=True`, a :exc:`~zhmcclient.OperationTimeout` is raised. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` :exc:`~zhmcclient.OperationTimeout`: The timeout expired while waiting for the HMC to become available again after the restart. """ body = {'force': force} self.manager.session.post(self.uri + '/operations/restart', body=body) if wait_for_available: time.sleep(10) self.manager.client.wait_for_available( operation_timeout=operation_timeout) @logged_api_call def shutdown(self, force=False): """ Shut down and power off the HMC represented by this Console object. While the HMC is powered off, any Python resource objects retrieved from this HMC may raise exceptions upon further use. In order to continue using Python resource objects retrieved from this HMC, the HMC needs to be started again (e.g. by powering it on locally). Once the HMC is available again, Python resource objects retrieved from that HMC can continue to be used. An automatic re-logon will be performed under the covers, because the HMC startup invalidates the currently used HMC session. Authorization requirements: * Task permission for the "Shutdown/Restart" task. * "Remote Shutdown" must be enabled on the HMC. Parameters: force (bool): Boolean controlling whether the shutdown operation is processed when users are connected (`True`) or not (`False`). Users in this sense are local or remote GUI users. HMC WS API clients do not count as users for this purpose. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` """ body = {'force': force} self.manager.session.post(self.uri + '/operations/shutdown', body=body) @logged_api_call def make_primary(self): """ Change the role of the alternate HMC represented by this Console object to become the primary HMC. If that HMC is already the primary HMC, this method does not change its rols and succeeds. The HMC represented by this Console object must participate in a {primary, alternate} pairing. Authorization requirements: * Task permission for the "Manage Alternate HMC" task. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` """ self.manager.session.post(self.uri + '/operations/make-primary') @staticmethod def _time_query_parms(begin_time, end_time): """Return the URI query paramterer string for the specified begin time and end time.""" query_parms = [] if begin_time is not None: begin_ts = timestamp_from_datetime(begin_time) qp = 'begin-time={}'.format(begin_ts) query_parms.append(qp) if end_time is not None: end_ts = timestamp_from_datetime(end_time) qp = 'end-time={}'.format(end_ts) query_parms.append(qp) query_parms_str = '&'.join(query_parms) if query_parms_str: query_parms_str = '?' + query_parms_str return query_parms_str @logged_api_call def get_audit_log(self, begin_time=None, end_time=None): """ Return the console audit log entries, optionally filtered by their creation time. Authorization requirements: * Task permission to the "Audit and Log Management" task. Parameters: begin_time (:class:`~py:datetime.datetime`): Begin time for filtering. Log entries with a creation time older than the begin time will be omitted from the results. If `None`, no such filtering is performed (and the oldest available log entries will be included). end_time (:class:`~py:datetime.datetime`): End time for filtering. Log entries with a creation time newer than the end time will be omitted from the results. If `None`, no such filtering is performed (and the newest available log entries will be included). Returns: :term:`json object`: A JSON object with the log entries, as described in section 'Response body contents' of operation 'Get Console Audit Log' in the :term:`HMC API` book. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` """ query_parms = self._time_query_parms(begin_time, end_time) uri = self.uri + '/operations/get-audit-log' + query_parms result = self.manager.session.get(uri) return result @logged_api_call def get_security_log(self, begin_time=None, end_time=None): """ Return the console security log entries, optionally filtered by their creation time. Authorization requirements: * Task permission to the "View Security Logs" task. Parameters: begin_time (:class:`~py:datetime.datetime`): Begin time for filtering. Log entries with a creation time older than the begin time will be omitted from the results. If `None`, no such filtering is performed (and the oldest available log entries will be included). end_time (:class:`~py:datetime.datetime`): End time for filtering. Log entries with a creation time newer than the end time will be omitted from the results. If `None`, no such filtering is performed (and the newest available log entries will be included). Returns: :term:`json object`: A JSON object with the log entries, as described in section 'Response body contents' of operation 'Get Console Security Log' in the :term:`HMC API` book. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` """ query_parms = self._time_query_parms(begin_time, end_time) uri = self.uri + '/operations/get-security-log' + query_parms result = self.manager.session.get(uri) return result @logged_api_call def list_unmanaged_cpcs(self, name=None): """ List the unmanaged CPCs of this HMC. For details, see :meth:`~zhmcclient.UnmanagedCpc.list`. Authorization requirements: * None Parameters: name (:term:`string`): Regular expression pattern for the CPC name, as a filter that narrows the list of returned CPCs to those whose name property matches the specified pattern. `None` causes no filtering to happen, i.e. all unmanaged CPCs discovered by the HMC are returned. Returns: : A list of :class:`~zhmcclient.UnmanagedCpc` objects. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` """ filter_args = dict() if name is not None: filter_args['name'] = name cpcs = self.unmanaged_cpcs.list(filter_args=filter_args) return cpcs @logged_api_call def list_permitted_partitions( self, full_properties=False, filter_args=None): """ List the permitted partitions of CPCs in DPM mode managed by this HMC. *Added in version 1.0; requires HMC 2.14.0 or later* Any CPCs in classic mode managed by the HMC will be ignored for this operation. The partitions in the result can be additionally limited by specifying filter arguments. Authorization requirements: * Object permission to the partition objects included in the result. Parameters: full_properties (bool): Controls whether the full set of resource properties for the returned Partition objects should be retrieved, vs. only a short set. filter_args (dict): Filter arguments for limiting the partitions in the result. `None` causes no filtering to happen. The following filter arguments are supported by server-side filtering: * name (string): Limits the result to partitions whose name match the specified regular expression. * type (string): Limits the result to partitions with a matching "type" property value (i.e. "linux", "ssc", "zvm"). * status (string): Limits the result to partitions with a matching "status" property value. * has-unacceptable-status (bool): Limits the result to partitions with a matching "has-unacceptable-status" property value. * cpc-name (string): Limits the result to partitions whose CPC has a name that matches the specified regular expression. Any other valid property of partitions is supported by client-side filtering: * <property-name>: Any other property of partitions. Returns: : A list of :class:`~zhmcclient.Partition` objects. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` """ query_parms, client_filters = divide_filter_args( ['name', 'type', 'status', 'has-unacceptable-status', 'cpc-name'], filter_args) # Perform the operation with the HMC, including any server-side # filtering. uri = self.uri + '/operations/list-permitted-partitions' + query_parms result = self.manager.session.get(uri) partition_objs = [] if result: partition_items = result['partitions'] for partition_item in partition_items: # The partition items have the following partition properties: # * name, object-uri, type, status, has-unacceptable-status # And the following properties for their parent CPC: # * cpc-name (CPC property 'name') # * cpc-object-uri (CPC property 'object-uri') # * se-version (CPC property 'se-version') # Create a 'skeleton' local Cpc object we can hang the # Partition objects off of, even if the user does not have # access permissions to these CPCs. Note that different # partitions can have different parent CPCs. cpc = self.manager.client.cpcs.find_local( partition_item['cpc-name'], partition_item['cpc-object-uri'], { 'se-version': partition_item['se-version'], }, ) partition_obj = cpc.partitions.resource_object( partition_item['object-uri'], { 'name': partition_item['name'], 'type': partition_item['type'], 'status': partition_item['status'], 'has-unacceptable-status': partition_item['has-unacceptable-status'], }, ) # Apply client-side filtering if matches_filters(partition_obj, client_filters): partition_objs.append(partition_obj) if full_properties: partition_obj.pull_full_properties() return partition_objs @logged_api_call def list_permitted_lpars( self, full_properties=False, filter_args=None): """ List the permitted LPARs of CPCs in classic mode managed by this HMC. *Added in version 1.0; requires HMC 2.14.0 or later* Any CPCs in DPM mode managed by the HMC will be ignored for this operation. The LPARs in the result can be additionally limited by specifying filter arguments. Authorization requirements: * Object permission to the LPAR objects included in the result. Parameters: full_properties (bool): Controls whether the full set of resource properties for the returned LPAR objects should be retrieved, vs. only a short set. filter_args (dict): Filter arguments for limiting the LPARs in the result. `None` causes no filtering to happen. The following filter arguments are supported by server-side filtering: * name (string): Limits the result to LPARs whose name match the specified regular expression. * activation-mode (string): Limits the result to LPARs with a matching "activation-mode" property value. * status (string): Limits the result to LPARs with a matching "status" property value. * has-unacceptable-status (bool): Limits the result to LPARs with a matching "has-unacceptable-status" property value. * cpc-name (string): Limits the result to LPARs whose CPC has a name that matches the specified regular expression. Any other valid property of LPARs is supported by client-side filtering: * <property-name>: Any other property of LPARs. Returns: : A list of :class:`~zhmcclient.Lpar` objects. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` """ query_parms, client_filters = divide_filter_args( ['name', 'type', 'status', 'has-unacceptable-status', 'cpc-name'], filter_args) # Perform the operation with the HMC, including any server-side # filtering. uri = self.uri + '/operations/list-permitted-logical-partitions' + \ query_parms result = self.manager.session.get(uri) lpar_objs = [] if result: lpar_items = result['logical-partitions'] for lpar_item in lpar_items: # The partition items have the following partition properties: # * name, object-uri, activation-mode, status, # has-unacceptable-status # And the following properties for their parent CPC: # * cpc-name (CPC property 'name') # * cpc-object-uri (CPC property 'object-uri') # * se-version (CPC property 'se-version') # Create a 'skeleton' local Cpc object we can hang the # Partition objects off of, even if the user does not have # access permissions to these CPCs. Note that different # partitions can have different parent CPCs. cpc = self.manager.client.cpcs.find_local( lpar_item['cpc-name'], lpar_item['cpc-object-uri'], { 'se-version': lpar_item['se-version'], }, ) lpar_obj = cpc.lpars.resource_object( lpar_item['object-uri'], { 'name': lpar_item['name'], 'activation-mode': lpar_item['activation-mode'], 'status': lpar_item['status'], 'has-unacceptable-status': lpar_item['has-unacceptable-status'], }, ) # Apply client-side filtering if matches_filters(lpar_obj, client_filters): lpar_objs.append(lpar_obj) if full_properties: lpar_obj.pull_full_properties() return lpar_objs
In Coffee Bay, on the Indian Ocean coast of South Africa, some 250 kilometres south of Durban, a mother takes her son to be vaccinated at the closest public clinic, in Mapuzi. She has tried three times now, but there are never vaccines available. After these three unsuccessful trips, she gives up and does not return to the clinic again. Her baby had only received the vaccines given to him at birth. When the child becomes ill, his chastened mother decides to take a taxi and visit a private clinic in Mqanduli, 65 kilometres away. The doctor gives them a prescription for the child’s cough and sends them home. The child’s health does not improve, however, and his mother decides to take him to hospital. But it is too late. The baby dies before they arrive. “The mother could not clearly describe the symptoms, although I suspect that her son died from pneumonia”, said Karl le Roux, doctor and researcher from Zithuele hospital. In 2013, during a study that analysed immunisation and the impact of vaccine shortages in the community, le Roux interviewed this mother in the rondavel (a traditional South African hut) where they lived, near to the estuary of the river Mthatha. “It’s more likely that the baby could have survived” if they had gone to hospital straight away, considered the doctor, adding that “it is very difficult to know whether this was a pneumonia that was vaccine-preventable”. Due to these stockouts the mother “lost faith in the public health sector”, said le Roux. The lack of vaccines is one of the biggest problems for immunisation in South Africa. The shortage of antigens is influenced by external problems, such as issues with pharmaceutical production and internal problems, such as poor management of stock, poor training, or staff shortages. It is a complex subject, in which the causes converge but there are many parties at fault. Everyone agrees that these shortages are a global problem, due to the fact that the vaccines are developed by just a handful of companies, those with the sufficient human and financial capacity. If one of these companies has production problems, countries have few alternatives to satisfy demand. In cases of high demand, the market becomes saturated and the companies are unable to satisfy demand to all countries. Whilst certain, this excuse restricts liability to uncontrollable, external, factors. Internal factors are not curbed and are only reported by healthcare staff. On a global level, according to World Health Organization data (WHO), 97 countries accounted for that they were left without at least one of the essential vaccines in 2015 (the last year for which data is available) either on a national level on in certain areas. In Africa, 66% of countries acknowledged supply disruption. In America and Europe one in every two countries admitted having suffered this problem. Taking into account the fact that not all countries communicate this information to the WHO, the real number of countries without vaccines at any moment could be greater still. The anti-tuberculosis BCG vaccine is a prime case of a supply problem, caused by a production failures in one of the main labs of this vaccine, Statens Serum Institute, based in Denmark. Due to global shortages in 2015, 54 countries reported stock shortages this year. The majority of these countries are in Africa (21) and Europe (14). In 2015, UNICEF, who supply this vaccine to countries where tuberculosis is endemic, reported a global supply shortage of 16.5 million doses. Beyond the crisis that year, many countries have been lacking the BCG vaccine for over a decade. Currently, stock problems with the tuberculosis vaccine persist, including in countries that had never previously reported a shortage of the antigen to the WHO, such as Spain, Australia and France. In the case of France, where the vaccine is no longer obligatory since 2007, Sanofi Pasteur (the French pharmaceutical company specialised in vaccines) diverted its stock which was destined for the Polish market in order to satisfy domestic demand. This diversion was introduced as an “interim measure” in March 2016. In South Africa, this vaccine is especially relevant and its shortage is “a real problem”, according to Doctor Anban Pillay, Deputy Director of the Department of Health. Although its effectiveness is limited against pulmonary tuberculosis, its most common manifestation, the WHO recommends its administration to new born babies in countries where the incidence (morbility) of tuberculosis is high. In South Africa, morbility is huge: with 834 cases for every 100,000 people, the country has the highest rates of this disease, often compounded by the impact of HIV. “By ourselves we could never solve the BCG problem”, acknowledged the Deputy Director of the Department of Health. To tackle it, the last World Health Assembly passed a resolution to deal with the lack of medicines and vaccines. The document advocates the development of a new centralised repository of stockout data. With that, “the WHO is suppossed to go to manufacturers and say ‘here is the entire volume of the world, can you please start producing to be sure that you get the business?’”, explains Anban Pillay. Anban Pillay, from the National Department of Health of South Africa. | Video: Serusha Govender. The BCG vaccine supply crisis is worsened by the concentration of production in few companies. And low profit margins makes it an unattractive venture for other laboratories which have the capacity to produce it, appoints Pillay. The oligopoly of the vaccines industry is even more concentrated: Sanofi Pasteur, Merck, Pfizer and GlaxoSmithKline take around 80% of the proceeds globally from the sales of these products. Bexsero, the only vaccine available in the European Union which protects against Meningitis B, the most common serogroup in the continent, is currently suffering distribution problems. The antigen, produced by GlaxoSmithKline (GSK) is approved in over 35 countries, between which are the EU member states, Argentina, Chile, Uruguay, Canada, Brazil and the USA. This last country also depends on another drug for the disease, Trumemba. In Spain, where the vaccine is recommended but unsubsidised, pharmacies offer waiting lists of up to six months to obtain a dose (at a retail price of 105€). GSK Spain asserts that they will receive new doses in the second quarter of 2017. Other GSK subsidiaries have also reported problems with the manufacture of this vaccine, like in Australia and in the United Kingdom, due to “unanticipated global demand”. In South Africa, provinces that default on their payments for vaccines are another of the missing links in the supply chain, according to Shabir Madhi, director of the National Institute for Communicable Diseases (NICD). The South African government negotiates the prices and Biovac, a public-private partnership, receive the raw materials from the pharmacists. In their premises in Ciudad del Cabo, they fill vials and distribute the orders that they receive from provinces. Both Madhi and Pillay claim that Biovac has frozen supply to certain provinces which have not paid. Morena Makhoana, CEO of The Biovac Institute. | Vídeo: Anne Gonschorek. Once the vaccines have been distributed to the South African clinics the stock management problems begin to arise. Karl le Roux, doctor and researcher from Zithuele hospital, analysed the causes and effects of vaccines stockouts in the OR Tambo district, in the Eastern Cape province. According to his research, the shortages are due to a lack of space in the clinics, inadequate stock management systems, overworked staff and poorly trained nurses (that make orders for inadequate quantities), lack of responsibility in the provincial storage and theft. Moreover, the state of the roads and the distance from storage facilities to healthcare centres are even more important in rural areas. Le Roux tells, for example, that there are 90 kilometres of very poor roads between Zithuele hospital and its medicine depot. For this rural doctor “it is frustrating not being able to offer a patient what you know they need”. As soon as there is a shortage of vaccines, the centres are swamped as patients have to return. In his research, Karl le Roux shows that the lack of vaccines in clinics meant that 56% of children surveyed did not complete their courses of immunisation, according to their mothers. Johann Van der Heever, who managed the South African immunization programme for 11 years, blames the lack of control squarely on the Department of Health: “You have to staff adequately to the chain, you have to provide equip supervision and monitoring which is a function of the National Department”. Johann van der Heever, former Manager of the immunization program of South Africa. | Video: Serusha Govender. In an article in the South African Medical Journal magazine, which cites Van der Heever’s criticisms of the national immunization programme and condemns the government’s neglect of the situation, the national cold-chain manager (imperative system for the conservation of the antigens), admits that they were only able to inspect between 5% and 10% of facilities. Van der Heever regrets that, despite the fact that controlling the deterioration of vaccines was one of his priorities, the wastage is unknown, as it “requires very good supervision and support”. “One of the big problems that we have is manufacturers have been aware that they have a shortage but they have not been willing to share that information, unlike in the United States or in Europe, where they are obliged by law”, alleged the Deputy Director of the Department of Health, Anban Pillay. Given the time required to produce each vaccine, both internal and external information is imperative in order to avoid stockouts. “You can only be foresighted if you have the information”, stated the healthcare manager. The only way to approach to the real situation is through the Stop Stockouts Project (SSP), a platform that monitors the lack of medicines and vaccines in South Africa. Behind the project are organisations such as Médecins Sans Frontières, the Treatment Action Campaign and the Rural Health Advocacy Project, which created the campaign in light of the persistent lack of antiretroviral drugs in the country which has the highest number of HIV cases in the world (over 6.9 million people): one in five adults is seropositive. At the end of each year, SSP conducts a telephone interview about vaccines and stockouts with over 2,400 of the 3,547 healthcare centres that they have identified across the country. Whilst Jacob Zuma’s government only reported its stock issues with the BCG vaccine to the WHO in 2015, Stop Stockouts recorded that 9% of healthcare centres contacted did not have the combined vaccine (composed of the DPT vaccine and the vaccines against Polio, Hib and Hepatitis B) available; 4% did not have the rotavirus vaccine available and 3% had a shortage of measles vaccines. The data is indispensable: to control the existence of vaccines in stores, to know when to make an efficient order for a clinic and to design effective vaccination campaigns that avoid possible outbreaks in communities with low immunisation rates. Coordinated administration and the shouldering of responsibilities is vital to avoid shortages and stockouts of vaccines nationwide. But above all, South Africa cannot allow its people to lose faith in its healthcare system. Shabir Madhi, Executive Director of the National Institute of Contagious Diseases. | Video: Serusha Govender. Edition: Miguel Ángel Gavilanes. Problems with data in South Africa do not only affect stock levels; the most basic data, such as immunization rates, are not reliable. The lack of an up-to-date and detailed survey on vaccination coverage makes it impossible to control and avoid possible outbreaks, according to Shabir Madhi of the National Institute of Contagious Diseases in South Africa, as it is not known in which areas immunization is lower than recommended. “We do not know the actual coverage against measles,” laments Madhi. In addition, the WHO reduces by more than 20 points the levels of coverage that South Africa presents to them. Madhi, who estimates that the real figure must be between both values, urges to carry out a new survey to solve this situation. This article has been simultaneously published in a data journalism initiative called ‘Medicamentalia – Vaccines‘ brought to you by the Civio Foundation. It has been funded by the Journalism Grants programme of the European Journalism Centre and the Bill & Melinda Gates Foundation. None of these organisations had previous access to the content, nor influenced editorially by any means.
# Copyright 2014 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Delete node pool command.""" import argparse from googlecloudsdk.calliope import actions from googlecloudsdk.calliope import base from googlecloudsdk.core import log from googlecloudsdk.core import properties from googlecloudsdk.core.console import console_io @base.ReleaseTracks(base.ReleaseTrack.ALPHA) class Delete(base.Command): """Delete an existing node pool in a running cluster.""" @staticmethod def Args(parser): """Register flags for this command. Args: parser: An argparse.ArgumentParser-like object. It is mocked out in order to capture some information, but behaves like an ArgumentParser. """ parser.add_argument( 'name', metavar='NAME', help='The name of the node pool to delete.') parser.add_argument( '--timeout', type=int, default=1800, help=argparse.SUPPRESS) parser.add_argument( '--wait', action='store_true', default=True, help='Poll the operation for completion after issuing a delete ' 'request.') parser.add_argument( '--cluster', help='The cluster from which to delete the node pool.', action=actions.StoreProperty(properties.VALUES.container.cluster)) def Run(self, args): """This is what gets called when the user runs this command. Args: args: an argparse namespace. All the arguments that were provided to this command invocation. Returns: Some value that we want to have printed later. """ adapter = self.context['api_adapter'] pool_ref = adapter.ParseNodePool(args.name) console_io.PromptContinue( message=('The following node pool will be deleted.\n' '[{name}] in cluster [{clusterId}] in zone [{zone}]') .format(name=pool_ref.nodePoolId, clusterId=pool_ref.clusterId, zone=adapter.Zone(pool_ref)), throw_if_unattended=True, cancel_on_no=True) # Make sure it exists (will raise appropriate error if not) adapter.GetNodePool(pool_ref) op_ref = adapter.DeleteNodePool(pool_ref) if args.wait: adapter.WaitForOperation( op_ref, 'Deleting node pool {0}'.format(pool_ref.clusterId), timeout_s=args.timeout) log.DeletedResource(pool_ref) return op_ref
Lake Champlain is considered the most historic body of water in North America. It was a key strategic location for troops in the French and Indian War, the American Revolution and the War of 1812. Over the years, it has provided sustenance both winter and summer for trade and recreation. The lake is named for Samuel de Champlain, who, in 1609, was the first European to explore it. A documentary on its history by Caro Thompson, Champlain: The Lake Between, is available from Vermont Public Television: www.vpt.org or (800) 866-1666. According to the Vermont Department of Fish & Wildlife, the lake provides some of the best fishing and the greatest variety of freshwater fish in the Northeast. A survey the U.S. Fish & Wildlife Service conducts every five years reports that anglers spent nearly $62 million in Vermont during 2006. Vermonters — 70,000 of them, or 14% of the population — spend time fishing. Lake Champlain International Inc. organizes several fishing tournaments a year: the LCI Father’s Day Derby; an all-season tournament running from May through September; a Little Anglers Derby in June; and the Lake Champlain Bass Open in September. www.mychamplain.net; 879-3466. There’s a fine for fishing without a license if you’re age 15 or older. The Vermont Fish & Wildlife Department sells them online or through authorized agents — resident, $22; nonresident, $45. Check the website for other types and prices: www.vtfishandwildlife.com/ licenses.cfm. Ice fishing, a popular winter pursuit, begins with the onset of safe ice. The average date is Feb. 12 for the broad lake; shallower bays freeze earlier. By law, ice-fishing shanties must be removed from the ice before the ice becomes unsafe or loses its ability to support the shanty, or before the last Sunday in March at the latest. Ice fishermen and recreation seekers sometimes drive their cars or trucks onto the frozen lake. Under state law, a frozen lake counts as a public highway. The police keep watch. Drivers going over 50 miles per hour or driving recklessly can be ticketed or even arrested. So can driving under the influence of alcohol or drugs. Almost every year, at least one vehicle plunges through the ice. Only the very lucky survive. The Lake Champlain Underwater Historic Preserve, established by Vermont and New York, provides public access for divers to some of the lake’s historic shipwrecks. Registration is required for every dive. Contact the Lake Champlain Maritime Museum for information. www.lcmm.org; 475-2022. The minimum age for operating a boat in Vermont is 12. Operators of personal watercraft (such as Jet Skis) must be 16. In New York, the minimum age for both is 10, with certificate. Without it, the minimum age in New York for operators of personal watercraft is 24. Vermont requires safety education (and a safe-boating certificate) for boat operators born after 1974 who are 10 years of age or older, but a license is not required to operate a boat. The Vermont State Police offer the course, which can be taken online: www.boat-ed.com/vt/vt_state_police.htm. Vermont registers approximately 40,000 boats a year through the department of motor vehicles. Vessels must be registered. The noise of your boat’s engine may not exceed 82 decibels at 50 feet. Water skiing is prohibited from one-half hour after sunset to one-half hour before sunrise. Skiers must wear a personal flotation device. Vermont does not permit self-propelled skis. The lake is 120 miles long and 12 miles across at its widest point. It flows north from Whitehall, N.Y., to the Richelieu River in Quebec and eventually to the St. Lawrence River and the Atlantic Ocean. It is divided into five distinct areas, each with its own physical and chemical characteristics and water quality. They are the South Lake, the Main (or Broad) Lake, Malletts Bay, the Inland Sea, and Missisquoi Bay. The lake’s basin (or watershed or drainage area) covers 8,234 square miles, 56% of which lies in Vermont, with 37% in New York, and 7% in the Province of Quebec. About 571,000 people live in the Lake Champlain basin. About 68% live in Vermont, 27% in New York, and 5% in Quebec. Tributaries that drain the basin contribute over 90% of the water that enters the lake. Major tributaries in Vermont are: the Missisquoi, Lamoille, Winooski, and LaPlatte rivers and Otter Creek; in New York: the Great Chazy, Saranac, Ausable, and Bosquet rivers. Approximately 200,000 people, or about a third of the basin’s residents, depend on the lake for drinking water; about 4,150 draw water directly from the lake for individual use. There are more than 40 marinas around the lake. The lake’s shoreline is 587 miles long. It has 435 square miles of surface water and an average depth of 64 feet; 400 feet at its deepest. More than 70 islands dot the lake. The mean annual water level is 95.5 feet above sea level. There are about 54 public or commercial beaches and 10 private beaches on the lake’s shores. Water quality and invasive species are the environmental concerns most often cited by people whose lives and livelihoods depend on the lake. Water quality has improved in the last 20 years as a result of required industrial waste treatment and a large investment of state, federal, municipal, and private funds for sewage treatment facilities. Concerns remain, however, over pollution from urban and agricultural areas, particularly phosphorus from farm fertilizer runoff and mercury poisoning via acid rain. High phosphorus levels have produced algal blooms in parts of the lake, and toxic substances such as PCBs and mercury have resulted in fish consumption advisories for some fish. Other issues include the impact to fish and wildlife from nuisance non-native aquatic species and wetland loss. The Lake Champlain Basin Program has an ongoing Opportunities for Action management plan. Details can be found at the organization’s website, plan.lcbp.org.
from django.utils.translation import ugettext_lazy as _ from django import forms from django.contrib.auth.models import User from oluch.models import Submit class SubmitForm(forms.Form): file = forms.FileField(label=_("Choose a file")) user = forms.HiddenInput() def __init__(self, choices, *args, **kwargs): super(SubmitForm, self).__init__(*args, **kwargs) self.fields['problem'] = forms.ChoiceField(choices, label=_("Problem")) class UserInfoForm(forms.Form): username = forms.SlugField(max_length=20, label=_("Login"), widget=forms.TextInput(attrs={'size':'40'})) password1 = forms.SlugField(max_length=20, widget=forms.PasswordInput(attrs={'size':'40'}), label=_("Password")) password2 = forms.SlugField(max_length=20, widget=forms.PasswordInput(attrs={'size':'40'}), label=_("Password again")) lastname = forms.CharField(max_length=100, required=False, label=_("Last name"), widget=forms.TextInput(attrs={'size':'40'})) firstname = forms.CharField(max_length=100, required=False, label=_("Given name"), widget=forms.TextInput(attrs={'size':'40'})) grade = forms.CharField(max_length=1000, required=True, label=_("Grade"), widget=forms.TextInput(attrs={'size':'40'})) school = forms.CharField(max_length=1000, required=True, label=_("School"), widget=forms.TextInput(attrs={'size':'40'})) maxgrade = forms.CharField(max_length=1000, required=True, label=_("The last grade at your high school"), widget=forms.TextInput(attrs={'size':'40'})) city = forms.CharField(max_length=1000, required=True, label=_("City/settlement"), widget=forms.TextInput(attrs={'size':'40'})) country = forms.CharField(max_length=1000, required=True, label=_("Country"), widget=forms.TextInput(attrs={'size':'40'})) def clean(self): '''Required custom validation for the form.''' super(forms.Form, self).clean() if 'password1' in self.cleaned_data and 'password2' in self.cleaned_data: if self.cleaned_data['password1'] != self.cleaned_data['password2']: self._errors['password'] = [_('Passwords must match.')] self._errors['password_confirm'] = [_('Passwords must match.')] try: if set(self.cleaned_data["username"]) - set('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'): self._errors['username'] = [_('Bad login.')] elif User.objects.filter(username=self.cleaned_data["username"]).count() > 0: self._errors['username'] = [_('User with such username already exists.')] except: self._errors['username'] = [_('Bad login.')] try: int(self.cleaned_data['grade']) except: self._errors['grade'] = [_('Grade must be a number.')] try: int(self.cleaned_data['maxgrade']) except: self._errors['maxgrade'] = [_('Grade must be a number.')] self.cleaned_data['lastname'] = self.cleaned_data['lastname'][0].upper() + self.cleaned_data['lastname'][1:] self.cleaned_data['firstname'] = self.cleaned_data['firstname'][0].upper() + self.cleaned_data['firstname'][1:] return self.cleaned_data
Homes for Sale in 08825 have a median listing price of $399,997 and a price per square foot of $174. There are 56 active homes for sale in 08825, which spend an average of days on the market. Some of the hottest neighborhoods near 08825 are West Ward, College Hill Residential Historic District, West Easton, Downtown Phillipsburg, Alpha. You may also be interested in homes for sale in popular zip codes like , or in neighboring cities, such as .
import os import csv import time from datetime import datetime from datetime import timedelta from dateutil import parser import exifread from pymongo import MongoClient client = MongoClient() root = '/home/ubuntu/Pictures/GoPro/' client.gopro.gpx.remove({}) client.gopro.photos.remove({}) with open(root + 'Gopro2.csv') as f: first_line = f.readline().strip() if first_line == '"Name","Activity type","Description"': # Skip 2nd & 3rd line also f.readline() f.readline() else: pass # Read CSV reader = csv.DictReader(f) for line in reader: delta = timedelta(0, 60*60*5) dt = parser.parse(line['Time']) - delta store = { 'lat': line['Latitude (deg)'], 'lng': line['Longitude (deg)'], 'dt': dt, 'bearing': line['Bearing (deg)'], 'altitude': line['Altitude (m)'], 'accuracy': line['Accuracy (m)'] } client.gopro.gpx.save(store) # Read Photos for filename in os.listdir(root): if '.JPG' in filename: path = root + filename with open(path) as f: tags = exifread.process_file(f) dt = parser.parse(str(tags['EXIF DateTimeOriginal'])) store = { 'dt': dt, 'filename': filename, 'path': path, } client.gopro.photos.save(store)
warriorsforinnocence.org Page 25: small leather sectional sofa with chaise. shiloh sectional sofa. chesterfield sofa sectional. round sectional sofa leather. orion sectional sofa. sectional sofa living room ideas.
#! /usr/bin/env python # -*- coding: utf-8 -*- """boids -- Boids implementation using Owyl behavior trees. This module provides example code using the L{owyl} library to implement the Boids flocking algorithm. Requirements ============ Note: this demo requires Pyglet, Rabbyt, cocos2d - B{Pyglet}: U{http://pypi.python.org/pypi/pyglet} - B{Rabbyt}: U{http://pypi.python.org/pypi/Rabbyt} - B{cocos}: U{http://cocos2d.org/} Intent ====== This example demonstrates the basic usage of Owyl, including: - building and running a Behavior Tree, and - developing custom behaviors. Definitions =========== - B{behavior}: Any unit of a Behavior Tree, as represented by a task node, branch, or group of parallel behaviors. - B{task node}: Any atomic Behavior Tree node. - B{parent node}/B{parent task}: Any task node that has child nodes. - B{branch}: A parent node and all its children. - B{node decorator}: A parent node with only one child. Used to add functionality to a child. - B{leaf node}/B{leaf task}/B{leaf}: A task node that has no children. Algorithm ========= The basic Boids flocking algorithm was developed by Craig Reynolds. For more information, see his page at U{http://www.red3d.com/cwr/boids/}. It's a very simple algorithm, with three basic behaviors: - "B{Separation}: steer to avoid crowding local flockmates" - "B{Alignment}: steer towards the average heading of local flockmates" - "B{Cohesion}: steer to move toward the average position of local flockmates" I{(Definitions from C. Reynolds, linked above)} This is actually so simple, we wouldn't really need a behavior tree to model it, but it's a good place to start. Just to spice things up, we've added some extra behavior: boids will accelerate as they steer away from too-close flock mates, and they will seek to match a global speed. This gives the flock more the appearance of a school of fish, rather than a flight of sparrows, but it will let us break out some slightly more advanced behaviors. The boids will also seek after a fixed point (conveniently, the center of the screen), so that we can observe their movement better. Building the Tree ================= See L{Boid.buildTree} below. Core Behaviors ============== The core behaviors are documented below in each task nodes' docstring. They are: - L{Boid.hasCloseNeighbors}: conditional to detect crowding - L{Boid.accelerate}: accelerate at a given rate - L{Boid.matchSpeed}: accelerate to match a given speed - L{Boid.move}: move straight ahead at current speed - L{Boid.seek}: seek a fixed goal position - L{Boid.steerToMatchHeading}: match neighbors' average heading - L{Boid.steerForSeparation}: steer away from close flockmates - L{Boid.steerForCohesion}: steer toward average position of neighbors. Helpers ======= A number of other helper methods clutter up the namespace. Boid also inherits from L{steering.Steerable<examples.steering.Steerable>}, which contains common steering helper methods which will be useful in future examples. Other Stuff =========== Copyright 2008 David Eyk. All rights reserved. $Author$\n $Rev$\n $Date$ @newfield blackboard: Blackboard data """ __author__ = "$Author$"[9:-2] __revision__ = "$Rev$"[6:-2] __date__ = "$Date$"[7:-2] import os import random from math import radians, degrees, sin, cos, pi, atan2 pi_2 = pi*2.0 pi_1_2 = pi/2.0 pi_1_4 = pi/4.0 pi_3_4 = (pi*3)/4 ### Optimized attribute getters for sprites.. from operator import attrgetter getX = attrgetter('x') getY = attrgetter('y') getR = attrgetter('rotation') ### Memojito provides memoization (caching) services. import memojito ### Pyglet provides graphics and resource management. import pyglet pyglet.resource.path = [os.path.dirname(os.path.abspath(__file__)),] pyglet.resource.reindex() ## Cocos provides scene direction and composition from cocos.director import director from cocos.scene import Scene from cocos.actions import FadeIn from cocos.layer import ScrollableLayer, ScrollingManager ## Rabbyt provides collision detection from rabbyt.collisions import collide_single ## Owyl provides the wisdom from owyl import blackboard import owyl from steering import Steerable class Boid(Steerable): """Implement a member of a flock. Boid implements its leaf node behaviors as methods, using the L{owyl.taskmethod} decorator. Leaf node behaviors may also be implemented as unbound functions using the L{owyl.task} decorators. The boid's behavior tree is built in the L{Boid.buildTree} method, below. """ _img = pyglet.resource.image('triangle_yellow.png') _img.anchor_x = _img.width / 2 _img.anchor_y = _img.height / 2 boids = [] def __init__(self, blackboard): super(Boid, self).__init__(self._img) self.scale = 0.05 self.schedule(self.update) self.bb = blackboard self.boids.append(self) self.opacity = 0 self.do(FadeIn(2)) self.speed = 200 self.bounding_radius = 5 self.bounding_radius_squared = 25 self.neighborhood_radius = 1000 self.personal_radius = 20 self.tree = self.buildTree() def buildTree(self): """Build the behavior tree. Building the behavior tree is as simple as nesting the behavior constructor calls. Building the Behavior Tree ========================== We'll use a L{parallel<owyl.core.parallel>} parent node as the root of our tree. Parallel is essentially a round-robin scheduler. That is, it will run one step on each its children sequentially, so that the children execute parallel to each other. Parallel is useful as a root behavior when we want multiple behaviors to run at the same time, as with Boids. The first call to a task node constructor returns another function. Calling I{that} function will return an iterable generator. (This behavior is provided by the "@task..." family of python decorators found in L{owyl.core}.) Generally, you won't have to worry about this unless you're writing new parent nodes, but keep it in mind. Also note that keyword arguments can be provided at construction time (call to task constructor) or at run-time (call to visit). The C{blackboard} keyword argument to C{visit} will be available to the entire tree. (This is also why all nodes should accept C{**kwargs}-style keyword arguments, and access. Skipping down to the end of the tree definition, we see the first use of L{visit<owyl.core.visit>}. L{visit<owyl.core.visit>} provides the external iterator interface to the tree. Technically, it's an implementation of the Visitor pattern. It visits each "node" of the behavior tree and iterates over it, descending into children as determined by the logic of the parent nodes. (In AI terminology, this is a depth-first search, but with the search logic embedded in the tree.) L{visit<owyl.core.visit>} is also used internally by several parent behaviors, including L{parallel<owyl.core.parallel>}, L{limit<owyl.decorators.limit>}, and L{repeatAlways<owyl.decorators.repeatAlways>} in order to gain more control over its children. L{limit<owyl.decorators.limit>} =============================== The next parent node we see is L{limit<owyl.decorators.limit>}. L{limit<owyl.decorators.limit>} is a decorator node designed to limit how often its child is run (given by the keyword argument C{limit_period} in seconds). This is useful for limiting the execution of expensive tasks. In the example below, we're using L{limit<owyl.decorators.limit>} to clear memoes once every 0.4 seconds. This implementation of Boids uses L{memojito<examples.memojito>} to cache (or "memoize") neighbor data for each Boid. Neighbor data is used by each of the core behaviors, and is fairly expensive to calculate. However, it's constantly changing, so adjusting the limit_period will affect the behavior of the flock (and the frame rate). L{repeatAlways<owyl.decorators.repeatAlways>} ============================================= We next see the L{repeatAlways<owyl.decorators.repeatAlways>} decorator node. This does exactly as you might expect: it takes a behavior that might only run once, and repeats it perpetually, ignoring return values and always yielding None (the special code for "I'm not done yet, give me another chance to run"). L{sequence<owyl.decorators.sequence>} ============================================= Runs a sequence of actions. If any action yields False, then the rest of the sequence is not executed (the sequence is halted). Otherwise, the next sequence item is run. In this example, a boid accelerates away only if it is too close to another boid. Core Behaviors ============== The core behaviors are documented below in each method's docstring. They are: - L{Boid.hasCloseNeighbors}: conditional to detect crowding - L{Boid.accelerate}: accelerate at a given rate - L{Boid.matchSpeed}: accelerate to match a given speed - L{Boid.move}: move straight ahead at current speed - L{Boid.seek}: seek a fixed goal position - L{Boid.steerToMatchHeading}: match neighbors' average heading - L{Boid.steerForSeparation}: steer away from close flockmates - L{Boid.steerForCohesion}: steer toward average position of neighbors. """ tree = owyl.parallel( owyl.limit( owyl.repeatAlways(self.clearMemoes(), debug=True), limit_period=0.4), ### Velocity and Acceleration ############################# owyl.repeatAlways(owyl.sequence(self.hasCloseNeighbors(), self.accelerate(rate=-.01), ), ), self.move(), self.matchSpeed(match_speed=300, rate=.01), ### Steering ############ self.seek(goal=(0, 0), rate=5), self.steerToMatchHeading(rate=2), self.steerForSeparation(rate=5), self.steerForCohesion(rate=2), policy=owyl.PARALLEL_SUCCESS.REQUIRE_ALL ) return owyl.visit(tree, blackboard=self.bb) @owyl.taskmethod def hasCloseNeighbors(self, **kwargs): """Check to see if we have close neighbors. """ yield bool(self.closest_neighbors) @owyl.taskmethod def accelerate(self, **kwargs): """accelerate @keyword rate: The rate of acceleration (+ or -) """ bb = kwargs['blackboard'] rate = kwargs['rate'] dt = bb['dt'] self.speed = max(self.speed + rate * dt, 0) yield True @owyl.taskmethod def matchSpeed(self, **kwargs): """Accelerate to match the given speed. @keyword blackboard: A shared blackboard. @keyword match_speed: The speed to match. @keyword rate: The rate of acceleration. """ bb = kwargs['blackboard'] ms = kwargs['match_speed'] rate = kwargs['rate'] while True: if self.speed == ms: yield None dt = bb['dt'] dv_size = ms - self.speed dv = dv_size * rate * dt self.speed += dv yield None @owyl.taskmethod def move(self, **kwargs): """Move the actor forward perpetually. @keyword blackboard: shared blackboard @blackboard: B{dt}: time elapsed since last update. """ bb = kwargs['blackboard'] while True: dt = bb['dt'] r = radians(getR(self)) # rotation s = dt * self.speed self.x += sin(r) * s self.y += cos(r) * s yield None @owyl.taskmethod def seek(self, **kwargs): """Perpetually seek a goal position. @keyword rate: steering rate @keyword blackboard: shared blackboard @blackboard: B{dt}: time elapsed since last update. """ bb = kwargs['blackboard'] rate = kwargs['rate'] gx, gy = kwargs.get('goal', (0, 0)) while True: dt = bb['dt'] dx = gx-self.x dy = gy-self.y seek_heading = self.getFacing(dx, dy) my_heading = radians(self.rotation) rsize = degrees(self.findRotationDelta(my_heading, seek_heading)) rchange = rsize * rate * dt self.rotation += rchange yield None @owyl.taskmethod def steerToMatchHeading(self, **kwargs): """Perpetually steer to match actor's heading to neighbors. @keyword blackboard: shared blackboard @keyword rate: steering rate @blackboard: B{dt}: time elapsed since last update. """ bb = kwargs['blackboard'] rate = kwargs['rate'] while True: dt = bb['dt'] or 0.01 n_heading = radians(self.findAverageHeading(*self.neighbors)) if n_heading is None: yield None continue my_heading = radians(self.rotation) rsize = degrees(self.findRotationDelta(my_heading, n_heading)) # Factor in our turning rate and elapsed time. rchange = rsize * rate * dt self.rotation += rchange yield None @owyl.taskmethod def steerForSeparation(self, **kwargs): """Steer to maintain distance between self and neighbors. @keyword blackboard: shared blackboard @keyword rate: steering rate @blackboard: B{dt}: time elapsed since last update. """ bb = kwargs['blackboard'] rate = kwargs['rate'] while True: cn_x, cn_y = self.findAveragePosition(*self.closest_neighbors) dt = bb['dt'] dx = self.x-cn_x dy = self.y-cn_y heading_away_from_neighbors = self.getFacing(dx, dy) flee_heading = heading_away_from_neighbors my_heading = radians(self.rotation) rsize = degrees(self.findRotationDelta(my_heading, flee_heading)) # Factor in our turning rate and elapsed time. rchange = rsize * rate * dt self.rotation += rchange yield None @owyl.taskmethod def steerForCohesion(self, **kwargs): """Steer toward the average position of neighbors. @keyword blackboard: shared blackboard @keyword rate: steering rate @blackboard: B{dt}: time elapsed since last update. """ bb = kwargs['blackboard'] rate = kwargs['rate'] while True: neighbors = self.neighbors np_x, np_y = self.findAveragePosition(*neighbors) dt = bb['dt'] dx = np_x-self.x dy = np_y-self.y seek_heading = self.getFacing(dx, dy) my_heading = radians(self.rotation) # Find the rotation delta rsize = degrees(self.findRotationDelta(my_heading, seek_heading)) # Factor in our turning rate and elapsed time. rchange = rsize * rate * dt self.rotation += rchange yield None def canSee(self, other): """Return True if I can see the other boid. @param other: Another Boid or Sprite. @type other: L{Boid} or C{Sprite}. """ dx = self.x - other.x dy = self.y - other.y return abs(self.getFacing(dx, dy)) < pi_1_2 @memojito.memoizedproperty def others(self): """Find other boids that I can see. @rtype: C{list} of L{Boid}s. """ return [b for b in self.boids if b is not self and self.canSee(b)] @property def neighbors(self): """Find the other boids in my neighborhood. @rtype: C{list} of L{Boid}s. """ hood = (self.x, self.y, self.neighborhood_radius) # neighborhood n = collide_single(hood, self.others) return n @property def closest_neighbors(self): """Find the average position of the closest neighbors. @rtype: C{tuple} of C{(x, y)}. """ hood = (self.x, self.y, self.personal_radius) n = collide_single(hood, self.others) return n def findAveragePosition(self, *boids): """Return the average position of the given boids. @rtype: C{tuple} of C{(x, y)}. """ if not boids: return (0, 0) num_n = float(len(boids)) or 1 avg_x = sum((getX(n) for n in boids))/num_n avg_y = sum((getY(n) for n in boids))/num_n return avg_x, avg_y def findAverageHeading(self, *boids): """Return the average heading of the given boids. @rtype: C{float} rotation in degrees. """ if not boids: return 0.0 return sum((getR(b) for b in boids))/len(boids) @owyl.taskmethod def clearMemoes(self, **kwargs): """Clear memoizations. """ self.clear() yield True @memojito.clearbefore def clear(self): """Clear memoizations. """ pass def update(self, dt): """Update this Boid's behavior tree. This gets scheduled in L{Boid.__init__}. @param dt: Change in time since last update. @type dt: C{float} seconds. """ self.bb['dt'] = dt self.tree.next() class BoidLayer(ScrollableLayer): """Where the boids fly. """ is_event_handler = True def __init__(self, how_many): super(BoidLayer, self).__init__() self.how_many = how_many self.manager = ScrollingManager() self.manager.add(self) self.active = None self.blackboard = blackboard.Blackboard("boids") self.boids = None def makeBoids(self): boids = [] for x in xrange(int(self.how_many)): boid = Boid(self.blackboard) boid.position = (random.randint(0, 200), random.randint(0, 200)) boid.rotation = random.randint(1, 360) self.add(boid) boids.append(boid) return boids def on_enter(self): """Code to run when the Layer enters the scene. """ super(BoidLayer, self).on_enter() self.boids = self.makeBoids() # Place flock in the center of the window self.manager.set_focus(-512, -384) if __name__ == "__main__": import sys if len(sys.argv) == 2: how_many = int(sys.argv[1]) else: how_many = 50 director.init(resizable=True, caption="Owyl Behavior Tree Demo: Boids", width=1024, height=768) s = Scene(BoidLayer(how_many)) director.run(s)
The Bunny Egg is an Easter item. It appears only during the ARK: Eggcellent Adventure and ARK: Eggcellent Adventure 2 event. Bunny Eggs are laid by the special BunnyDodos and BunnyOviraptors. These creatures only appear during the Eggcellent Adventure event. They wear Bunny Ears, which makes them easily distinguishable from normal Dodos and Oviraptors. In Eggcellent Adventure 2 they are laid by mate boosted wild Dodos. They were also used to summon the DodoRex. The eggs had a light grayish color. They could be placed somewhere and painted with Dye. In addition to its preset paint regions, you can doodle on the Bunny Egg by "attacking" it with a Paintbrush, which will open the "Apply Paint" Menu. Then, click and drag your cursor over the 3D model (like a paintbrush) to draw anything you want. See Painting for more information on doodling. A promotional image from the Eggcellent Adventure with examples of dyed Bunny Eggs.
# -*- coding: utf-8 -*- from django.db.utils import IntegrityError, ProgrammingError from south.utils import datetime_utils as datetime from south.db import db from south.v2 import DataMigration from django.db import models, connection from articles.models import Article class Migration(DataMigration): def get_full_raw_content(self, page): return u'# {}\n\n> {}\n\n{}\n\n{}'.format(page.title, page.punchline, page.description, page.raw_content) def forwards(self, orm): if not 'pages_page' in connection.introspection.table_names(): # The table does not exists, which means that we're running on a fresh installation, so we can skip # the whole migration return for page in orm['pages.page'].objects.exclude(author__user_id=308): defaults = {'created_at': page.created_at, 'deleted_at': page.deleted_at, 'hide': page.hide, 'is_wiki': page.is_wiki, 'published_at': page.published_at, 'raw_content': self.get_full_raw_content(page), 'slug': page.slug.lower(), 'submitted_at': page.submitted_at, 'title': page.title, 'updated_at': page.updated_at, 'views_count': page.views, 'received_kudos_count': page.karma, 'revisions_count': page.pagerevision_set.count(), # Special treatment required 'author_id': page.author.user_id, } rendered_content = Article.process_raw_content(defaults['raw_content']) defaults['rendered_html'] = rendered_content['rendered_html'] defaults['description'] = rendered_content['description'] defaults['punchline'] = rendered_content['punchline'] a, created = orm['articles.article'].objects.get_or_create(pk=page.pk, defaults=defaults) a.tags.clear() for tag in page.tags.all(): apply_tag, tag_created = orm['tags.tag'].objects.get_or_create(pk=tag.pk, defaults={'title': tag.name}) a.tags.add(apply_tag) a.revision_set.all().delete() for rev in page.pagerevision_set.all(): rev_values = { 'author': rev.author.user, 'created_at': rev.created_at, 'raw_content': self.get_full_raw_content(rev), 'title': rev.title, } rendered_content = Article.process_raw_content(defaults['raw_content']) rev_values['description'] = rendered_content['description'] rev_values['punchline'] = rendered_content['punchline'] a.revision_set.create(pk=rev.pk, **rev_values) a.kudos_received.all().delete() for kudos in page.pagekarma_set.all(): kudos_values = { 'user': kudos.user, 'session_id': kudos.session_id, 'timestamp': kudos.timestamp, } a.kudos_received.create(**kudos_values) a.articleview_set.all().delete() for v in page.pageview_set.all(): view_values = { 'user': v.user, 'session_id': v.session_id, 'timestamp': v.timestamp, } a.articleview_set.create(**view_values) def backwards(self, orm): pass models = { u'articles.article': { 'Meta': {'object_name': 'Article'}, 'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}), 'comments_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {}), 'editors_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}), 'hide': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'published_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'punchline': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'raw_content': ('django.db.models.fields.TextField', [], {}), 'received_kudos_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'rendered_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'revisions_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}), 'submitted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'tags': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'tagged_article_set'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['tags.Tag']"}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'views_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}) }, u'articles.articlegroup': { 'Meta': {'object_name': 'ArticleGroup'}, 'articles': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['articles.Article']", 'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'publish_start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'target_block': ( 'django.db.models.fields.CharField', [], {'default': "'editors_picks'", 'max_length': '255'}) }, u'articles.articleview': { 'Meta': {'object_name': 'ArticleView'}, 'article': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['articles.Article']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'session_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'viewed_pages'", 'null': 'True', 'to': u"orm['auth.User']"}) }, u'articles.kudos': { 'Meta': {'unique_together': "[('article', 'session_id'), ('article', 'user')]", 'object_name': 'Kudos'}, 'article': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'kudos_received'", 'to': u"orm['articles.Article']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'session_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'kudos_given'", 'null': 'True', 'to': u"orm['auth.User']"}) }, u'articles.revision': { 'Meta': {'ordering': "['-pk']", 'object_name': 'Revision'}, 'article': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['articles.Article']"}), 'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'punchline': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'raw_content': ('django.db.models.fields.TextField', [], {}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}) }, u'auth.group': { 'Meta': {'object_name': 'Group'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, u'auth.permission': { 'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ( 'django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, u'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, u'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'tags.tag': { 'Meta': {'object_name': 'Tag'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'tag_type': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'title': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}), 'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}) }, u'pages.page': { 'Meta': {'object_name': 'Page'}, 'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pages.PageAuthor']"}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {}), 'hide': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'karma': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'published_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'punchline': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'raw_content': ('django.db.models.fields.TextField', [], {}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}), 'submitted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['pages.PageTag']", 'null': 'True', 'blank': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'views': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}) }, u'pages.pageauthor': { 'Meta': {'object_name': 'PageAuthor'}, 'bio': ('django.db.models.fields.TextField', [], {'null': 'True'}), 'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'display_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'karma': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}), 'user': ( 'django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'}), 'website': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}) }, u'pages.pagetag': { 'Meta': {'object_name': 'PageTag'}, 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}) }, u'pages.pageview': { 'Meta': {'ordering': "['-timestamp']", 'object_name': 'PageView'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'page': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pages.Page']"}), 'session_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}) }, u'pages.pagekarma': { 'Meta': {'ordering': "['-timestamp']", 'object_name': 'PageKarma'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'page': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pages.Page']"}), 'session_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}) }, u'pages.pagerevision': { 'Meta': {'object_name': 'PageRevision'}, 'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pages.PageAuthor']"}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'page': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pages.Page']"}), 'punchline': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'raw_content': ('django.db.models.fields.TextField', [], {}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}) }, } complete_apps = ['articles'] symmetrical = True
Jobs & Vacancies | Latest-Available & SA . Anglo Platinum Mining Learnerships and Careers 126. Posted by aridhezic on 2 Dec, 2016 In Jobs & Vacancies, Learnerships. mponeng mine rustenburg learnership - . Engineering learnerships at mponeng gold . Technical papers - Association of Mine . Learnership - Mining jobs. ... MPONENG GOLD MINE. Pretoria / Tshwane. 1 weeks ago. Save ad. ... Rustenburg. 4 months ago. Save ad. Bakubung platinum mine. Learnership Engineering Jobs - June 2018 | . Latest Learnership jobs in Rustenburg (page 4) . learnerships in mines around rustenburg - gurusrestaurant.in. learnership in rustenburg mines -, Home » Mining Machine>apply for learnership online at mines . Mponeng Gold Mine, Gauteng - Mining . Implats Learnerships by Impala Platinum . list of mines in north west province south africa . mining learnership jobs - Find a new job today! what? job title, keywords or company. where? city, state or zip. ... Rustenburg Bathopele platinum mine - North West. Lonmin Mining Bursaries 2016 - 2017 available - . MODIKWA PLATINUM MINE (South Africa) - . MODIKWA PLATINUM MINE located in South Africa. MODIKWA PLATINUM MINE Address, Phone number, Email, Reviews and Photos. . Global Ashanti Mponeneng Careltovilles Contact . anglo gold ashanti mponeng mine - . Mponeng Gold Mine, Gauteng - Mining Technology. AngloGold Ashanti's Mponeng mine .
""" MT: 3D: Forward =============== Forward model 3D MT data. Test script to use SimPEG.NSEM platform to forward model impedance and tipper synthetic data. """ import SimPEG as simpeg from SimPEG.EM import NSEM import numpy as np import matplotlib.pyplot as plt try: from pymatsolver import Pardiso as Solver except: from SimPEG import Solver def run(plotIt=True): """ MT: 3D: Forward =============== Forward model 3D MT data. """ # Make a mesh M = simpeg.Mesh.TensorMesh( [ [(100, 9, -1.5), (100., 13), (100, 9, 1.5)], [(100, 9, -1.5), (100., 13), (100, 9, 1.5)], [(50, 10, -1.6), (50., 10), (50, 6, 2)] ], x0=['C', 'C', -14926.8217] ) # Setup the model conds = [1,1e-2] sig = simpeg.Utils.ModelBuilder.defineBlock( M.gridCC, [-100, -100, -350], [100, 100, -150], conds ) sig[M.gridCC[:, 2] > 0] = 1e-8 sig[M.gridCC[:, 2] < -1000] = 1e-1 sigBG = np.zeros(M.nC) + conds[1] sigBG[M.gridCC[:, 2] > 0] = 1e-8 if plotIt: collect_obj, line_obj = M.plotSlice(np.log10(sig), grid=True, normal='X') color_bar = plt.colorbar(collect_obj) # Setup the the survey object # Receiver locations rx_x, rx_y = np.meshgrid(np.arange(-600, 601, 100), np.arange(-600, 601, 100)) rx_loc = np.hstack((simpeg.Utils.mkvc(rx_x, 2), simpeg.Utils.mkvc(rx_y, 2), np.zeros((np.prod(rx_x.shape), 1)))) # Make a receiver list rxList = [] for rx_orientation in ['xx', 'xy', 'yx', 'yy']: rxList.append(NSEM.Rx.Point_impedance3D(rx_loc, rx_orientation, 'real')) rxList.append(NSEM.Rx.Point_impedance3D(rx_loc, rx_orientation, 'imag')) for rx_orientation in ['zx', 'zy']: rxList.append(NSEM.Rx.Point_tipper3D(rx_loc, rx_orientation, 'real')) rxList.append(NSEM.Rx.Point_tipper3D(rx_loc, rx_orientation, 'imag')) # Source list srcList = [ NSEM.Src.Planewave_xy_1Dprimary(rxList, freq) for freq in np.logspace(4, -2, 13) ] # Survey MT survey = NSEM.Survey(srcList) # Setup the problem object problem = NSEM.Problem3D_ePrimSec(M, sigma=sig, sigmaPrimary=sigBG) problem.pair(survey) problem.Solver = Solver # Calculate the data fields = problem.fields() dataVec = survey.eval(fields) # Add uncertainty to the data - 10% standard # devation and 0 floor dataVec.standard_deviation.fromvec( np.ones_like(simpeg.mkvc(dataVec)) * 0.1 ) dataVec.floor.fromvec( np.zeros_like(simpeg.mkvc(dataVec)) ) # Add plots if plotIt: # Plot the data # On and off diagonal (on left and right axis, respectively) fig, axes = plt.subplots(2, 1, figsize=(7, 5)) plt.subplots_adjust(right=0.8) [(ax.invert_xaxis(), ax.set_xscale('log')) for ax in axes] ax_r, ax_p = axes ax_r.set_yscale('log') ax_r.set_ylabel('Apparent resistivity [xy-yx]') ax_r_on = ax_r.twinx() ax_r_on.set_yscale('log') ax_r_on.set_ylabel('Apparent resistivity [xx-yy]') ax_p.set_ylabel('Apparent phase') ax_p.set_xlabel('Frequency [Hz]') # Start plotting ax_r = dataVec.plot_app_res( np.array([-200, 0]), components=['xy', 'yx'], ax=ax_r, errorbars=True) ax_r_on = dataVec.plot_app_res( np.array([-200, 0]), components=['xx', 'yy'], ax=ax_r_on, errorbars=True) ax_p = dataVec.plot_app_phs( np.array([-200, 0]), components=['xx', 'xy', 'yx', 'yy'], ax=ax_p, errorbars=True) ax_p.legend(bbox_to_anchor=(1.05, 1), loc=2) if __name__ == '__main__': do_plots = True run(do_plots) if do_plots: plt.show()
Aerial view of Pennsylvania’s capitol complex. Photo: PA Capitol. As satellite companies writ large increasingly consider how to interoperate with terrestrial connectivity options, Hughes is bringing this mindset to its customers in the public sector. The company recently secured a contract to support the Commonwealth of Pennsylvania’s telecommunications network (COPANET), for which it will combine fiber, wireless and satellite broadband technologies to provide managed network solutions to government agency sites. Despite the adversarial narrative sometimes spun between terrestrial networks and satellite, Tony Bardo, Hughes’ assistant vice president of government services, believes a mixed approach is key to achieving proper resiliency. In an interview with Via Satellite, Bardo explained this is particularly important when serving customers in the public sector because the government needs to be able to conduct its operations regardless of extenuating circumstances. Government agencies that have relied exclusively on T1 lines in the past are now beginning to understand the benefits of complementing their networks with satellite. States like Pennsylvania “have an eye toward understanding the properties of satellite that lend to governing remotely, in an emergency situation and having resiliency in their network,” Bardo said, which means having a diversity of connectivity options at critical sites. “An emergency situation is not the ideal time for a government to shut down its doors” simply because its fiber lines have been knocked offline. Tony Bardo, Hughes’ assistant vice president of government services. Photo: Hughes. In the past, Bardo observed that government agencies stuck fast to models based on dedicated access services provided by a single, statewide terrestrial carrier. “They didn’t want to talk about using broadband,” partly because of the security concerns associated with transmitting data over the internet, but also because local Digital Subscriber Line (DSL) lines were not necessarily any faster. Then 3G and 4G networks emerged, followed by High Throughput Satellites (HTS), and suddenly broadband “had a whole different flavor,” Bardo said. It had all the advantages of a traditional carrier-class network, except for the fact that with a managed services provider like Hughes, scattered sites could now leverage different connectivity technologies — DSL in one region, satellite in another, Comcast fiber in yet another, and so on. Field offices’ migration toward cloud-based services is also driving demand for multi-transport connectivity, Bardo said. Filing records, using Supervisory Control and Data Acquisition (SCADA) software, monitoring remote sites, taking credit card transactions for access to public parks — these applications are more bandwidth-hungry today than ever before. Hughes took a cue from its old satellite customers in the retail and oil and gas industries, who years ago inquired about adding DSL to their networks. “We said why not? It has many of the same broadband properties as our satellites,” Bardo said. So the company began developing new integrated routers and forming partnership with major cable companies and wireless providers. “And now our service knows no boundaries,” Bardo said. Like most end users, customers in the public sector don’t care too much where their connectivity comes from, as long as it is reliable, secure and affordable. Improvements in satellite technology have made it a more viable alternative to wireless and fiber, whereas 10 or 15 years ago its advantages were less defined. Now, some states, including Pennsylvania, are picking up on that. “Hopefully more will in the future,” Bardo said.
# # A ping service to be 'compiled' into an exe-file with py2exe. # To install this service, run 'LabtrackerPingService.py install' at command prompt # 'Then LabtrackerPingService.py start' # # #Need to download pywin32 in order to import these module import win32serviceutil import win32service import win32event import win32evtlogutil import win32api import win32con import time import sys,os import urllib2 import urllib import getpass import servicemanager DEBUG = True LABTRACKER_URL = "labtracker.eplt.washington.edu" if DEBUG: LABTRACKER_URL = "web16.eplt.washington.edu:8000" def get_mac(): # windows if sys.platform == 'win32': for line in os.popen("ipconfig /all"): if line.lstrip().startswith('Physical Address'): mac = line.split(':')[1].strip().replace('-',':') break return mac def get_data(status): # get user info from machine user = getpass.getuser() data = urllib.urlencode({'user': user, 'status': status}) return data class MyService(win32serviceutil.ServiceFramework): _svc_name_ = "LabtrackerService" _svc_display_name_ = "Labtracker Service" _svc_deps_ = ["EventLog"] def __init__(self, args): win32serviceutil.ServiceFramework.__init__(self, args) self.isAlive = True def SvcStop(self): servicemanager.LogInfoMsg("ping service - Stopping") self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING) self.isAlive = False def SvcDoRun(self): servicemanager.LogInfoMsg("ping service - Start") mac = get_mac() while self.isAlive: servicemanager.LogInfoMsg("ping service - Ping") req= urllib2.Request(url="http://%s/tracker/ping/%s/" %(LABTRACKER_URL,mac),data=get_data('ping')) urllib2.urlopen(req) win32api.SleepEx(10000,True) servicemanager.LogInfoMsg("ping service - Stopped") def ctrlHandler(ctrlType): return True if __name__ == '__main__': # Note that this code will not be run in the 'frozen' exe-file!!! win32api.SetConsoleCtrlHandler(ctrlHandler,True) win32serviceutil.HandleCommandLine(MyService)
This is the perfect time of year to talk about pumpkin, ‘cause we pretty much ignore it every month but October, November and December. So while it’s fresh in our minds, how about a little reappraisal of this underappreciated gem of a vegetable? Let’s start with potassium. A cup of mashed pumpkin gives you a whopping 564 mg of potassium (about 33 percent more than a medium banana), and that’s all for a measly 49 calories. Potassium works with sodium to maintain the body’s water balance, and that, in turn, impacts blood pressure. High blood pressure—unlike high total cholesterol—is a real risk for heart disease. Lots of studies show that people who consume high amounts of potassium have lower blood pressure than people who don’t. In primitive cultures, salt intake is about seven times lower than potassium intake, but in Western industrialized cultures, salt intake is about three times higher than potassium intake. I believe that the “problems” with sodium in our diet are at least as much problems caused by low potassium as they are by high sodium. Can Pumpkins Decrease Your Risk of Stroke? Several large epidemiological studies have suggested that increased potassium intake is associated with decreased risk of stroke. A study of more than 43,000 men followed for 87 years found that men in the top 20 percent of potassium intake (averaging 4,300 mg per day) were only 62 percent as likely to have a stroke than those in the lowest 20 percent of potassium intake (averaging 2,400 mg per day). This inverse association was especially high in men with high blood pressure. Athletes may need more potassium to replace what’s lost from muscle during exercise. Low potassium can cause muscle cramping (and cardiovascular irregularities). When people tell me they have muscle cramps, the first thing I think of is that they’re low on minerals, especially potassium and magnesium, (and sometimes calcium as well). Four large studies have reported significant positive associations between dietary potassium intake and bone mineral density. This isn’t really surprising if you think about it. When we eat a highly acid diet, the body has to buffer that acid, and it does this by mobilizing alkaline calcium salts from the bones in order to neutralize the acids consumed in the diet. Increased consumption of high-potassium fruits and vegetables like pumpkin reduces the net acid content of the diet and may help preserve calcium in the bones, where it belongs. Random note: This is why I tend to roll my eyes when I hear about expensive alkaline waters or alkalizing machines like Kangen. You want to “alkalize” your system, eat a cup of pumpkin or put some baking soda in your water and save the four grand. People, really! Pumpkin has more than 2,400 mcg of the carotenoids lutein and zeaxanthin, star nutrients in eye health and vision protection formulas. Pumpkin also has more than 12,000 IUs of vitamin A, plus a little bit of calcium, iron, magnesium, and phosphorus just for good measure. And a cup of the stuff also provides more than 21/2 g of fiber. Remember that the carotenoids need fat for absorption, which makes it all the easier to consume some pumpkin on a regular basis. Just cook it with some grass-fed butter or a healthy oil. If you like it sweet, try adding some stevia (I like Pyure brand, available everywhere). Mashed pumpkin—with butter and salt—is a great substitute for mashed white potatoes; way healthier, and in my opinion, way more delicious. For decades, I’ve railed against the “conventional wisdom” that grains are an important source of fiber. (They’re not—just read the label on any cereal box). Pumpkin, however, is a whole different story. An average slice of bread (or average portion of commercial cold cereal) has between 1-3 grams of fiber, and comes with a whole host of blood-sugar raising starch, not to mention gluten and (frequently) high-fructose corn syrup. Pumpkin, on the other hand, has 49 calories per cup, and a whopping 7 grams of fiber. Bread is about 100 calories a slice, and—with few exceptions (like sourdough)– has virtually nothing of nutritional value to recommend it. And no bread on earth provides the amount of fiber that a cup of mashed pumpkin provides. Every epidemiological dietary study ever done shows way better health outcomes for people who consume large amounts of fiber in their diet. Dr. Steven Masley and I presented a study at the annual conference of the American College of Nutrition showing that fiber intake was one of the best predictors of success on a weight loss program. Fiber matters—it slows the entrance of sugar into the bloodstream (blunting its glycemic impact), helps with digestion, and provides “food” for bacteria in the gut. When bacteria in the gut dine on fiber, they produce critically important nutrients like butyric acid, which helps support the integrity of the gut wall and may have positive metabolic effects as well. Adding a cup of pumpkin to your daily intake is a great way to get about 20-25% of the fiber you should be getting daily. On Thanksgiving, we had a vegan guest and Michelle wanted to make something special for her, so she made this amazing dessert—mini pumpkin cheesecakes— which ended up being my favorite! Preheat oven to 300 degrees. For the crust, pulse the granola, pecans, and brown sugar in a food processor until it has a course sand texture. Add butter, and mix until well blended. Press into the bottom of muffin pan cups to form crust. Place cashews in food processor bowl. Blend until smooth, stopping to scrape down sides of bowl as needed. Add pumpkin, banana, lemon juice, vanilla, cinnamon, nutmeg and salt to food processor bowl. Blend again until smooth, stopping to scrape down sides of bowl as needed. Divide batter among muffin cups, placing a scant 1/4 cup full in each. Bake 20 minutes. Cheesecakes will still be a bit soft when finished baking. Transfer to cooling rack. Allow to cool completely and then refrigerate several hours, until fully set. Serve with coconut whipped cream and a sprinkle of cinnamon.
import os import sys import time import random import datetime from bisect import bisect from termcolor import colored, cprint from string import ascii_lowercase, digits, punctuation import settings CHARS = ascii_lowercase + digits + punctuation def relpath(*x): return os.path.join(settings.BASE_DIR, *x) def weighted_choice(choices): values, weights = zip(*choices) total = 0 cum_weights = [] for w in weights: total += w cum_weights.append(total) x = random.random() * total i = bisect(cum_weights, x) return values[i] def shoot(line, color=None, output=sys.stdout, attrs=None): ln = line.lower() try: line = line.rstrip().encode('utf-8') except UnicodeDecodeError: pass if color: cprint(line, color, attrs=attrs, file=output) return if ('error' in ln) or ('exception' in ln) or ('err' in ln): cprint(line, 'red', file=output) elif 'debug' in ln: cprint(line, 'white', attrs=['bold'], file=output) elif ('warning' in ln) or ('warn' in ln) or ('profile' in ln): cprint(line, 'white', attrs=['bold'], file=output) elif ('info' in ln) or ('inf' in ln): cprint(line, 'white', attrs=['dark'], file=output) else: cprint(line, 'white', file=output) def shoot_file(fname=None, color=None): exclude_files = ['osceleton.trace'] if fname is None: fname = random.choice([ f for f in os.listdir(settings.BASE_DIR) if os.path.isfile(f) and f not in exclude_files]) fname = relpath(fname) if color is None: # do not allow big files to be displayed in color statinfo = os.stat(fname) if statinfo.st_size <= 10000: color = random.choice(['blue', 'white', 'red']) else: color = 'white' with open(fname, 'r') as f: count = 0 for ln in f.readlines(): count += 1 ln = "%-3d %s" % (count, ln) shoot(ln, color=color) time.sleep(0.05) shoot('\n') f.close() def spinner(): while True: for cursor in '|/-\\': yield cursor def spinning_cursor(wait=10, output=sys.stdout): spinner_ = spinner() for _ in range(int(wait/0.1)): output.write(spinner_.next()) output.flush() time.sleep(0.1) output.write('\b') def table_row(row, width): return "".join(str(word).ljust(width) for word in row) def get_stat(labels, num_col): data = [] for _ in range(random.randint(5, 20)): data.append( [random.choice(labels)] \ + [random.randint(0, 2000) for i in range(num_col-2)] + [random.random()*random.randint(0, 100)] ) col_width = max(len(str(word)) for row in data for word in row) + 2 # padding data = sorted(data, key=lambda x: x[0], reverse=random.choice([True, False])) return col_width, [table_row(rw, col_width) for rw in data] def shoot_table(): shoot("=" * 80) header = ['#', 'LC', 'CCN', 'Dict#4', '-->'] labels = ['inf', 'err', 'err cri', 'warn', 'generic'] width, stat = get_stat(labels, num_col=len(header)) shoot(table_row(header, width), color='white', attrs=['dark']) for row in stat: shoot(row) time.sleep(0.1) time.sleep(random.random()*2) shoot('\n\n') def rand_string(size=12): """ Generates quazi-unique sequence from random digits and letters. """ return ''.join(random.choice(CHARS) for x in range(size)) def wait_key(): """ Wait for a key press on the console and return it. """ result = None if os.name == 'nt': import msvcrt result = msvcrt.getch() else: import termios fd = sys.stdin.fileno() oldterm = termios.tcgetattr(fd) newattr = termios.tcgetattr(fd) newattr[3] = newattr[3] & ~termios.ICANON & ~termios.ECHO termios.tcsetattr(fd, termios.TCSANOW, newattr) try: result = sys.stdin.read(1) except IOError: pass finally: termios.tcsetattr(fd, termios.TCSAFLUSH, oldterm) return result def log_post(msg, output=sys.stdout): if msg.lower().startswith('debug'): symbol = '>' elif msg.lower().startswith('error'): symbol = 'x' elif msg.lower().startswith('warning'): symbol = '!' else: symbol = '.' shoot('[%s] %s: %s' % ( symbol, datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), msg), output=output)
Any variation you want is available with revisions. Thanks. Use INBOX for easy and quick communication. All versions are edits are available.
# -*- coding:utf-8 -*- """ 这是用来快速上手Evernote API的测试代码 注意:由于Evernote官方Py库使用的是py2, 其py3分支很久没有更新了,因此这里统一使用py2 个人感觉最好的学习资源,就是sublime-evernote插件 https://github.com/bordaigorl/sublime-evernote/blob/master/sublime_evernote.py 因为那是一个全功能的插件(包括增删改查) 另外一个比较好的资源,是evernote官方代码里的样例 https://github.com/evernote/evernote-sdk-python/blob/master/sample/client/EDAMTest.py 还有一个是国内网友写的几个工具 https://github.com/littlecodersh/EasierLife Evernote API使用的thrift框架,所有语言的API都是同样的接口定义 """ from evernote.api.client import EvernoteClient from evernote.edam import type import evernote.edam.type.ttypes as Types import evernote.edam.notestore.NoteStore as NoteStore # 这个是用来测试的沙盒Token,需要自己去申请 # https://sandbox.evernote.com/api/DeveloperToken.action dev_token = "S=s1:U=92e22:E=15e5ac1167d:C=157030fe988:P=1cd:A=en-devtoken:V=2:H=1ef28ef900ebae2ba1d1385bffbb6635" client = EvernoteClient(token=dev_token) userStore = client.get_user_store() print userStore.token user = userStore.getUser() # 这个note_store是最重要的数据API note_store = client.get_note_store() for nb in note_store.listNotebooks(): print nb.name n = type.ttypes.Note() n.title = "First evernote using api" n.content = u"哈哈wahahahaha" # 貌似需要对中文进行编码 n.content = "haha" note_store.createNote(n) note = Types.Note() note.title = "Test note from EDAMTest.py" note.content = '<?xml version="1.0" encoding="UTF-8"?>' note.content += '<!DOCTYPE en-note SYSTEM ' \ '"http://xml.evernote.com/pub/enml2.dtd">' note.content += '<en-note>Here is the Evernote logo:<br/>' note.content += '</en-note>' created_note = note_store.createNote(note) books = note_store.listNotebooks() bid = books[1].guid # 拿到第二个笔记本(因为第一个测试笔记本没数据) search = {'notebookGuid': bid} results = note_store.findNotesMetadata( NoteStore.NoteFilter(**search), None, 10, NoteStore.NotesMetadataResultSpec( includeTitle=True, includeNotebookGuid=True) ) print results.notes[0].title print results.notes[0].content haha = results.notes[0].guid # 'e3570976-3dbd-439e-84fa-98d8d2aae28e' n = note_store.getNote(haha, True, False, False, False) print n.created print n.resources print n.tagNames print n.contentHash
Inderal for migraines dosage how much zofran can a 6 year old have unisom side dosage is allegra d sold over the counter heart medications coreg. What is the strength of over the counter allegra dosage of inderal for migraine unisom recreational dose unisom sleepgels recommended dosage. Unisom dosage to get high over the counter allegra equivalent is allegra over the counter the same as prescription is generic allegra available over the counter. Inderal for public speaking dosage inderal la medication unisom 25 mg review which allegra is over the counter how much is allegra over the counter. How much does zofran 4 mg cost unisom 50 mg unisom 25 mg tablet unisom sleepgels 100mg 100 mg unisom while pregnant. How much zofran can a 2 year old take is allegra available over the counter buy flovent online inderal la for migraine headaches inderal medication migraine. B6 and unisom for morning sickness dosage allegra allergy over the counter Inderal 80mg $66.58 - $0.55 Per pill. Is inderal good for high blood pressure allegra tablets over the counter unisom dose mg inderal for weight loss can you buy allegra over the counter in the us. Inderal la for high blood pressure How do you buy cialis online cymbalta discontinuation syndrome fda unisom 75mg propranolol inderal over the counter unisom 25 mg unisom and b6 for morning sickness dosage. Allegra 60 mg over the counter coreg medication wiki unisom tablets dosage inderal dosage for essential tremor. Unisom 25 mg 20 tablet Canada pharmacy 24 discount code unisom sleep tabs 50mg unisom 25 mg dosage unisom b6 morning sickness dosage drug interactions inderal topamax coreg medication interactions. Vitamin b6 and unisom dosage for morning sickness dosage of unisom is allegra an over the counter medication. Coreg 3.125 mg medication inderal medications for migraines unisom dosage 100mg how much zofran for 3 year old. Coreg medication class unisom dosage morning sickness how much zofran for 2 year old allegra now over the counter. Unisom 25 mg tablet nedir unisom dosage for morning sickness coreg medication action dose of inderal for migraines. Inderal medication for coreg medication heart inderal tablets for migraine coreg medication for heart over the counter allergy medicine allegra. Coreg bp medication allegra over the counter strength dosage of vitamin b6 and unisom for morning sickness Online drug store 24. Inderal for anxiety reviews how much does zofran iv cost how much zofran for 1 year old how much zofran can a 6 year old take. Abilify food drug interactions abilify drug program ciprodex coupon instant savings inderal tablet price abilify like drugs abilify more drug_side_effects eurax cream 100g price. Eurax cream price alcon coupon for ciprodex abilify drug class inderal dosage social anxiety abilify drug use ciprodex suspension coupon. Abilify and drug interactions alcon ciprodex otic suspension coupon abilify drug coupon over the counter drugs similar to singulair. Abilify drug monograph buy plavix in canada ciprodex ear drops coupon buy plavix online uk inderal 40 mg propranololo cloridrato. Plavix where to buy cheap coupon for ciprodex drugbank abilify singulair interactions with other drugs elavil anxiety medication. Buy generic plavix online singulair replacement drug inderal dosage for anxiety singulair and benadryl drug interactions singulair drug interactions zyrtec. Eurax price plavix 75 mg buy online ciprodex coupon eurax cream 100g price inderal 10 mg indication singulair interactions drug ciprodex otic coupon. Abilify drug interactions prozac buy plavix 75 mg uk singulair otc drug melatonin abilify drug interactions inderal dosage anxiety. Price of eurax cream elavil medication interactions ciprodex copay coupon inderal la dosages singulair drug prices. Abilify the drug Buy doxycycline online nz ciprodex drug coupon abilify drug name singulair and zoloft drug interactions inderal la 60 mg reviews buy plavix uk dosage of inderal for performance anxiety. Where can i buy viagra in adelaide inderal 20 mg inderal la dosage ciprodex discount coupon Buy sildenafil citrate in usa abilify drugs forum ciprodex coupon manufacturer. Singulair drug generic abilify drug bank buy generic plavix in us singulair drug dosage buy plavix online inderal dosage forms abilify drug actions price of eurax. Abilify vyvanse drug interactions drugstore $5 discount abilify drug cost is there an over the counter drugs like singulair. Hyperthyroidism inderal dosage is singulair an over the counter drug inderal la 60 mg for migraines buy plavix 75 mg Inderal 80mg $87.55 - $0.49 Per pill. Cipro hc ear drops coupon eurax cream price philippines inderal la 160 mg capsules ciprodex coupon walgreens eurax lotion price philippines. 160 mg of inderal abilify drug test benzodiazepine cost of inderal ciprodex otic coupons what over the counter drug is similar to singulair. How much bactrim for sinus infection bactrim dosage for diarrhea inderal la 80 cost bactrim forte for uti dosage what is the dosage of bactrim for a urinary tract infection. Bactrim dosing for mrsa infection bactrim for urinary tract infection dosage inderal xr doses fda high grade prostate cancer avodart inderal medication for. Bactrim used for kidney infection bactrim ds dosage for uti 3 days bactrim dosages for uti cost for inderal celexa or zoloft for depression inderal medication wiki. Propranolol inderal price singulair generic cheap Can you buy prednisolone online bactrim for acne rash inderal medication for anxiety bactrim ds dosage for mrsa avodart prostate cancer fda. Liquid bactrim dosage for adults bactrim cream for acne cheap singulair online inderal la medication inderal medication australia. Bactrim dosage for mrsa infection dose bactrim for uti inderal cost without insurance avodart hair loss fda bactrim for uti while pregnant. Inderal doses adults bactrim dose for urinary tract infections bactrim dose for 12 year old is avodart fda approved for hair loss bactrim dosage for canine uti. Inderal la 80 mg cost normal bactrim dose for uti is bactrim effective for sinus infection zoloft or celexa for panic disorder. Usual dose of bactrim for uti where to buy singulair cheap bactrim dosage for head lice bactrim 400 mg for uti price of inderal 10mg. Bactrim for urinary tract infections dosage bactrim for bronchitis dosage is celexa or lexapro better for anxiety is celexa or zoloft better bactrim dosing for uti. Inderal medication interactions bactrim ds dosage for bladder infection is celexa or zoloft stronger bactrim antibiotic dosage for uti. Bactrim dosing for urinary tract infection Ampicillin 500mg capsule dosage inderal medication migraine inderal la cost bactrim dosing for pediatric uti. Zoloft or celexa for social anxiety bactrim dose for mrsa cellulitis inderal 40 mg price inderal la 120 mg cost Orlistat for sale in us which is best celexa or zoloft. Inderal heart medication is celexa or zoloft more effective inderal medication guide celexa or zoloft maxalt mlt cost per pill bactrim dosage for cats. Bactrim for uti dose bactrim doses for mrsa buy inderal over the counter. Buy claritin d 24 inderal 20 mg used tramadol for hip joint pain what is tramadol hcl 50mg used for in dogs tramadol for neck pain. Tramadol 50mg for pain tramadol or ibuprofen for toothache getting a prescription for antabuse tramadol generic for ultracet. Inderal 20 mg uses Buy ventolin evohaler 100 micrograms inderal dosage and administration tramadol for pain after tooth extraction tramadol for leg nerve pain tramadol reviews for headaches. Tramadol for rheumatoid arthritis pain tramadol hydrochloride dosage for humans tramadol for chronic pain and depression. Dosage of inderal yasmin 28 coupon tramadol for leg pain combination tramadol plus acetaminophen for post surgical pain inderal dosage for esophageal varices. Inderal 40 mg for migraines inderal 40 mg dosering inderal la dosage for tremors inderal for public speaking dosage tramadol for knee pain. Tramadol for gallstone pain tramadol vs lyrica for nerve pain does tramadol work for shoulder pain yasmin 28 coupons. Tramadol hcl tab 50mg used for what tramadol not working for back pain inderal 40 mg costo para que es el inderalici 40 mg. Inderal 80 mg tramadol for toothache pain bayer yasmin coupons is tramadol for headaches hydrea drug interactions tramadol hcl for muscle pain. Tramadol for back pain relief tramadol for neck and back pain inderal migraines dosage inderal dosage for anxiety which is better for back pain hydrocodone or tramadol. Tramadol for si joint pain tramadol for sciatica nerve pain tramadol for back muscle pain inderal 30 mg tramadol for pain while on suboxone. Is inderal available over the counter inderal dosage essential tremor tramadol good for back pain is antabuse prescription only buy generic claritin d is tramadol used for pain in dogs. Inderal dosage for essential tremor tramadol for sciatica pain inderal medication australia tramadol for diverticulitis pain. Tramadol hcl 50 mg for migraines inderal retard 160 mg biverkningar inderal tablets for migraine inderal 160 mg is tramadol effective for sciatica pain. Tramadol for headache pain tramadol for wrist pain inderal usual dosage tramadol for painful periods tramadol safe for high blood pressure. Tramadol 50mg for my dog tramadol for menstrual pain how much is a prescription for antabuse tramadol for severe period pain tramadol or codeine for back pain. Tramadol 50mg tablets for pain maximum dosage for tramadol inderal tablets 40mg inderal recommended dose inderal injection dosage inderal typical dose. Tramadol and acetaminophen combination for chronic non-cancer pain tramadol dosage for nerve pain tramadol reviews for pain claritin hives relief where to buy. Inderal social anxiety dose buy generic claritin d 24 hour tramadol 50mg for gallbladder pain inderal high blood pressure dosage inderal for high blood pressure. Inderal 40 mg dosage how do i get a prescription for antabuse drugstore gift card discount tramadol for back pain uk inderal dose for test anxiety. Europe meds online buy viagra professional tramadol for cancer pain in dogs.
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from enum import Enum class CachingType(Enum): none = "none" readonly = "readonly" readwrite = "readwrite" class StorageAccountType(Enum): standard_lrs = "Standard_LRS" premium_lrs = "Premium_LRS" class FileServerType(Enum): nfs = "nfs" glusterfs = "glusterfs" class FileServerProvisioningState(Enum): creating = "creating" updating = "updating" deleting = "deleting" succeeded = "succeeded" failed = "failed" class VmPriority(Enum): dedicated = "dedicated" lowpriority = "lowpriority" class DeallocationOption(Enum): requeue = "requeue" terminate = "terminate" waitforjobcompletion = "waitforjobcompletion" unknown = "unknown" class ProvisioningState(Enum): creating = "creating" succeeded = "succeeded" failed = "failed" deleting = "deleting" class AllocationState(Enum): steady = "steady" resizing = "resizing" class OutputType(Enum): model = "model" logs = "logs" summary = "summary" custom = "custom" class ToolType(Enum): cntk = "cntk" tensorflow = "tensorflow" caffe = "caffe" caffe2 = "caffe2" chainer = "chainer" custom = "custom" class ExecutionState(Enum): queued = "queued" running = "running" terminating = "terminating" succeeded = "succeeded" failed = "failed"
The Price of Inequality: How Today's Divided Society Endangers Our Future, by Joseph Stiglitz. Freefall: America, Free Markets, and the Sinking of the World Economy, by Joseph Stiglitz. The Three Trillion Dollar War: The True Cost of the Iraq Conflict, by Joseph Stiglitz. Globalization and Its Discontents, by Joseph Stiglitz. The Roaring Nineties Signed Edition, by Joseph Stiglitz. Making Globalization Work, by Joseph Stiglitz. Fair Trade for All: How Trade Can Promote Development, by Joseph Stiglitz.
#!/usr/bin/python ''' use subprocess to triggen the say command to do Text to speech Works on OSX out of the box. On Ubuntu do apt-get install gnustep-gui-runtime ''' # IMPORTS import sys import subprocess from zocp import ZOCP def map(value, istart, istop, ostart, ostop): return ostart + (ostop - ostart) * ((value - istart) / (istop - istart)) def clamp(n, minn, maxn): if n < minn: return minn elif n > maxn: return maxn else: return n class SayNode(ZOCP): # Constructor def __init__(self, nodename=""): self.nodename = nodename super(SayNode, self).__init__() # INIT DMX # ZOCP STUFF self.set_name(self.nodename) # Register everything .. print("###########") self.register_string("text to say", "bla", 'srw') subprocess.call('say "init"', shell=True) self.start() def on_peer_signaled(self, peer, name, data, *args, **kwargs): print("#### on_peer_signaled") if self._running and peer: for sensor in data[2]: if(sensor): self.receive_value(sensor) def on_modified(self, peer, name, data, *args, **kwargs): print("#### on_modified") if self._running and peer: for key in data: if 'value' in data[key]: self.receive_value(key) def receive_value(self, key): new_value = self.capability[key]['value'] if(type(new_value)== str): toSay = "say "+new_value subprocess.call(toSay, shell=True) if __name__ == '__main__': #zl = logging.getLogger("zocp") #zl.setLevel(logging.DEBUG) z = SayNode("SAYnode") z.run() print("FINISH")
Tacoma native Ted J. Fick was picked to succeed Tay Yoshitani, who is retiring. Fick has a manufacturing background but has not worked in the maritime or aviation industries. Ted J. Fick, the choice for CEO, is native of Tacoma. Education: Bachelor’s degree in economics from the University of Washington, master’s degrees in management from Stanford University and business administration from the University of Puget Sound. Career highlights: Estimating engineer, Fick Foundry, 1977-1983; senior director, Kenworth division of Paccar, 1983 to 2000; president and CEO, Polar Corp., 2009 to 2013. Amazon's tax maneuvers stir up storm in U.K.
# -*- coding: utf-8 -*- from nbx.models import db class FiscalData(db.Model): __tablename__ = 'fiscal_data' FISCAL_CONSUMIDOR_FINAL = 'CONSUMIDOR FINAL' FISCAL_RESPONSABLE_INSCRIPTO = 'RESPONSABLE INSCRIPTO' FISCAL_EXCENTO = 'EXCENTO' FISCAL_MONOTRIBUTO = 'MONOTRIBUTO' _fiscal_types = { FISCAL_CONSUMIDOR_FINAL: 'Consumidor Final', FISCAL_RESPONSABLE_INSCRIPTO: 'Responsable Inscripto', FISCAL_EXCENTO: 'Excento', FISCAL_MONOTRIBUTO: 'Monotributo', } id = db.Column(db.Integer, primary_key=True) cuit = db.Column(db.Unicode(13)) fiscal_type = db.Column(db.Enum(*_fiscal_types.keys(), name='fiscal_type'), default=FISCAL_CONSUMIDOR_FINAL) iibb = db.Column(db.Unicode, nullable=True) @property def needs_cuit(self): return self.fiscal_type not in (self.FISCAL_CONSUMIDOR_FINAL,) @property def type(self): return self._fiscal_types.get(self.fiscal_type) def __repr__(self): return "<FiscalData '{} {}' of '{}'>".format( self.type, self.cuit, self.entity.full_name, )
Finding a mortgage broker (or mortgage advisor) in Lawton Heath will work to allow you to analyse your situation and pick the best mortgage alternative that satisfies your requirements. Mortgage brokers in Lawton Heath are educated in the area and are regulated by permits. Brokers offer the borrowers distinct plans from numerous lenders. They are able to be employed by a firm or can work privately. Individuals trying to find a mortgage loan hire these professionals to assist them find what they are trying to find. They locate loans so and understand their client’s needs, the marketplace. Mortgage brokers in Lawton Heath are typically in constant contact with various lenders so that they have a range to pick from. Mortgage brokers in Lawton Heath help you comprehend all the mortgage related info which is crucial. First of all, there are numerous kinds of mortgages available, and each of these have several different parameters and technicalities that can be quite confusing for a lay person. A good mortgage broker in Lawton Heath will help straighten out all this information as well as explain the different kinds of deals accessible the industry. The broker can assist you to narrow down the information to finally choose which mortgage is finest for you, once all the information is sorted out. It really is hard to understand all the legalities associated with mortgages, this is where the broker measures in to enable you to deal with the legal aspects of the mortgage. Another significant variable is that the broker operates for you. This makes you are favoured by them and not the mortgage businesses. This level reinforces the reality the broker will focus on getting you the best bargain possible. Dealing directly with businesses can be challenging, and they may not always have your most useful interests at heart. Instead they can be focused on making profits. A mortgage broker in Lawton Heath is compensated to be assist the borrower get the most useful out of a deal and an excellent broker ensures this is done. Brokers can help you get a better deal on your own mortgage. They have been usually well connected within the sector and are conscious of the methods of the marketplace. They will help you receive a superb interest rate in your mortgage that’ll save a fortune in the extended run. Hiring a mortgage broker in Lawton Heath also saves lots of time. Searching for the appropriate mortgage deal can be quite time consuming, and brokers can cut down this variable greatly. Advisors help you to get the best appropriate mortgage for the unique needs. Everyone has diverse needs as it pertains to mortgages, mortgage brokers in Lawton Heath help get good deals that will benefit the borrower. By hiring a broker the paperwork can also be substantially reduced. You typically should only fill out one application and hand it to your broker, who’ll then reveal this to different lenders. Other paper work that can be very confusing is usually organised by the broker. This makes the whole procedure much easier to cope with as a mortgage broker in Lawton Heath specialise in these processes. Additionally, as a result of their contacts, deals that go through brokers are accepted much faster than deals which are handled otherwise. The mortgage industry is sizeable with banking, lending institutions and investor groups competing for your company. Add mortgage brokers in Lawton Heath to the blend and it might feel like finding a mortgage is a baffling procedure. However, a great mortgage broker in Lawton Heath should be some body that helps you navigate the marketplace, not complicate your view. Here are eight tips on locating a broker that is good. 1. Shop around. You might be entering into a business model. Just as you’d study other providers that are expert, do not wait to speak to several mortgage brokers in Lawton Heath. This can be the greatest means to get to know them and the packages they offer. Just inform them that you’re doing research so it is clear that you are not yet invested. 2. Access your credit records. Pull your credit report, before embarking on a meeting with different brokers. This implies that various brokers will not access your credit score many times. 3. Make certain they’re certified. Mortgage brokers in Lawton Heath need to compose qualifying tests that, subsequently, certify them as a broker. Certification differs across nations and states. Do your research to ensure that any broker you take some time to match and the legal requirements have met to call him/herself a mortgage broker. 4. Learn how the broker earns their cash. Up front, request the broker to show you how they’re paid. In this way, you’ve got an understanding of all the trades that they help you with. Be wary of brokers who require money upfront to be provided by you. 5. Discussion their system. Do not hesitate to discuss the method in order to make a suggestion the broker uses. Mortgage brokers in Lawton Heath will make commission on products that are certain and may support you in this direction. Understanding their formula will enable you to uncover a broker who is working in your very best interest. 6. Put them immediately. Don’t be scared to ask them what forms of packages are currently available or will be in the future, when you are interviewing brokers. A great broker should be upto-date on current packages and prices. They should likewise have a finger on the pulse of what institutions might be offering in the foreseeable future. 7. Know both the broker and their firm. Mortgage brokers in Lawton Heath can work independently, or they signify an organisation. Sometimes, brokers may even symbolise a bank. So that you can be certain you are provided a package that is right for you versus a commodity that bank or their firm is trying to sell inquire their background. 8. Seek out those who need to find out more about you. Mortgage brokers in Lawton Heath should have a clear notion about your situation until he or she understood your condition and all your symptoms just as a physician wouldn’t make a diagnosis. Keep clear of a broker who’s eager to offer loan packages to you instantly. Ideally, they should be completely apprised of circumstances and your aims before seeking a mortgage that best fits you out. There are mortgage brokers that are great and there are mortgage brokers that are poor. Doing a little research and following these tips can help you find a great mortgage broker in Lawton Heath who will find the best mortgage for you. Please note you might be consistently better off speaking to some specialist mortgage broker in Lawton Heath to discuss your mortgage specifications in detail and that this is an overall guide.
# greenthreads.py -- Utility module for querying an ObjectStore with gevent # Copyright (C) 2013 eNovance SAS <[email protected]> # # Author: Fabien Boucher <[email protected]> # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; version 2 # of the License or (at your option) any later version of # the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, # MA 02110-1301, USA. """Utility module for querying an ObjectStore with gevent.""" import gevent from gevent import pool from dulwich.objects import ( Commit, Tag, ) from dulwich.object_store import ( MissingObjectFinder, _collect_filetree_revs, ObjectStoreIterator, ) def _split_commits_and_tags(obj_store, lst, ignore_unknown=False, pool=None): """Split object id list into two list with commit SHA1s and tag SHA1s. Same implementation as object_store._split_commits_and_tags except we use gevent to parallelize object retrieval. """ commits = set() tags = set() def find_commit_type(sha): try: o = obj_store[sha] except KeyError: if not ignore_unknown: raise else: if isinstance(o, Commit): commits.add(sha) elif isinstance(o, Tag): tags.add(sha) commits.add(o.object[1]) else: raise KeyError('Not a commit or a tag: %s' % sha) jobs = [pool.spawn(find_commit_type, s) for s in lst] gevent.joinall(jobs) return (commits, tags) class GreenThreadsMissingObjectFinder(MissingObjectFinder): """Find the objects missing from another object store. Same implementation as object_store.MissingObjectFinder except we use gevent to parallelize object retrieval. """ def __init__(self, object_store, haves, wants, progress=None, get_tagged=None, concurrency=1, get_parents=None): def collect_tree_sha(sha): self.sha_done.add(sha) cmt = object_store[sha] _collect_filetree_revs(object_store, cmt.tree, self.sha_done) self.object_store = object_store p = pool.Pool(size=concurrency) have_commits, have_tags = \ _split_commits_and_tags(object_store, haves, True, p) want_commits, want_tags = \ _split_commits_and_tags(object_store, wants, False, p) all_ancestors = object_store._collect_ancestors(have_commits)[0] missing_commits, common_commits = \ object_store._collect_ancestors(want_commits, all_ancestors) self.sha_done = set() jobs = [p.spawn(collect_tree_sha, c) for c in common_commits] gevent.joinall(jobs) for t in have_tags: self.sha_done.add(t) missing_tags = want_tags.difference(have_tags) wants = missing_commits.union(missing_tags) self.objects_to_send = set([(w, None, False) for w in wants]) if progress is None: self.progress = lambda x: None else: self.progress = progress self._tagged = get_tagged and get_tagged() or {} class GreenThreadsObjectStoreIterator(ObjectStoreIterator): """ObjectIterator that works on top of an ObjectStore. Same implementation as object_store.ObjectStoreIterator except we use gevent to parallelize object retrieval. """ def __init__(self, store, shas, finder, concurrency=1): self.finder = finder self.p = pool.Pool(size=concurrency) super(GreenThreadsObjectStoreIterator, self).__init__(store, shas) def retrieve(self, args): sha, path = args return self.store[sha], path def __iter__(self): for sha, path in self.p.imap_unordered(self.retrieve, self.itershas()): yield sha, path def __len__(self): if len(self._shas) > 0: return len(self._shas) while len(self.finder.objects_to_send): jobs = [] for _ in range(0, len(self.finder.objects_to_send)): jobs.append(self.p.spawn(self.finder.next)) gevent.joinall(jobs) for j in jobs: if j.value is not None: self._shas.append(j.value) return len(self._shas)
Image Title: Gigantic Girls Coloring Pages Groovy Free For Kids Pertaining To Teen 3. Post Title: Free Teen Coloring Pages. Filename: gigantic-girls-coloring-pages-groovy-free-for-kids-pertaining-to-teen-3.jpg. Image Dimension: 567 x 794 pixels. Images Format: jpg/jpeg. Publisher/Author: Mervin Bartell. Uploaded Date: Monday - September 24th. 2018 01:46:10 AM. Category: Coloring Pages. Image Source: homedsgn.com. Coloring Pages Teens Free Teen For Bageriet Info Inside 5. Free Color Pages Print Coloring For Teens In Teen 0. Teen Coloring Pages Page Great With Additional Characters Free For Regard To 15. Coloring Pages For Teens Best Kids Pertaining To Free Teen 6. Free Flower Coloring Pages Spring Activities Pertaining To Teen 8. Coloring Page For Teens Save Teen Books Beautiful Free Intended Pages 1. Free Printable Coloring Pages For Teens Beingthere Me Regarding Teen 7. Amazing Free Printable Coloring Pages For Teens About With Regard To Teen 13. Gigantic Girls Coloring Pages Groovy Free For Kids Pertaining To Teen 3. Coloring Pages For Girls Easy Free In Teen 12.
from __future__ import division import sys, os import re import libtbx.load_env boost_python_include_pat = re.compile(r"#include\s*<boost(?:/|_)python"); def run(modules): directory_paths = [ libtbx.env.dist_path(m) for m in modules ] line_counts_in_files_of_type = {} for d in directory_paths: for root, dirs, files in os.walk(d): for f in files: if f.startswith('.'): continue _, ext = os.path.splitext(f) if ext in ('.pyo', '.pyc'): continue boost_python_binding = False n_lines = 0 with open(os.path.join(root,f)) as fo: for li in fo: n_lines += 1 if (not boost_python_binding and boost_python_include_pat.search(li)): boost_python_binding = True if boost_python_binding: file_type = "Boost.Python" elif not ext: file_type = "unknown" else: file_type = ext[1:] line_counts_in_files_of_type.setdefault(file_type, []).append(n_lines) print "Lines of code in %s" % ', '.join(modules) print "%-15s%8s" % ('extension', '#lines') output = [] for file_type, line_counts in line_counts_in_files_of_type.iteritems(): cnt = sum(line_counts) output.append((cnt, "%-15s%8d" % (file_type, cnt))) output.sort(reverse=True) output = [ entry[1] for entry in output ] print '\n'.join(output) if __name__ == '__main__': run(sys.argv[1:])
Suitt & Associates, PA in New Bern, NC. A family owned business established by Floyd L. Suitt, Jr., who became a Licensed Professional Land Surveyor in 1965. After Floyd's retirement his son Chet M. Suitt became the Owner & President of Suitt & Associates, PA. Chet M. Suitt, PLS has been a North Carolina Licensed Professional Land Surveyor since 1986. 3609 M L King, Jr. Blvd. We are located in the Historic Town of New Bern, North Carolina, in a log cabin on Dr. M L King Jr. Blvd. / Hwy. 17 South, next to Moore's BBQ and across the street from New Bern's own Pepsi-Cola plant.
# 459. Repeated Substring Pattern Add to List # DescriptionHintsSubmissionsSolutions # Total Accepted: 28053 # Total Submissions: 73141 # Difficulty: Easy # Contributors: # YuhanXu # Given a non-empty string check if it can be constructed by taking a substring of it and appending multiple copies of the substring together. You may assume the given string consists of lowercase English letters only and its length will not exceed 10000. # # Example 1: # Input: "abab" # # Output: True # # Explanation: It's the substring "ab" twice. # Example 2: # Input: "aba" # # Output: False # Example 3: # Input: "abcabcabcabc" # # Output: True # # Explanation: It's the substring "abc" four times. (And the substring "abcabc" twice.) # 2017.05.24 class Solution(object): def repeatedSubstringPattern(self, s): """ :type s: str :rtype: bool """ n = len(s) for size in xrange(1, n // 2 + 1): if n % size != 0: continue i = 0 while (i + 2 * size) <= n and s[i:(i+size)] == s[(i+size):(i+2*size)]: i = i + size if i + size == n: return True return False
Fundamentalism is not the exclusive domain of one sect or ideology, political, religious or otherwise. When we think of fundamentalism we usually think Christianity, at least in the Western perspective, which is something that we need to be mindful of. Essentially, fundamentalism of any kind is a movement of purity, a returning to the roots. But fundamentalism at its core is a reaction to an ever modernizing culture that is rendering the fundamentalist irrelevant. So a militant, antagonistic hostility ensues against the modernizing culture to preserve and maintain sanctity, to feel relevant, which is largely the case with Christian fundamentalism. But fundamentalism is also control. This caustic preservation of sanctity exhibits itself in a manic control, which extends even to their “god.” The last thing a fundamentalist wants to feel is a loss of control, as even “god” falls under the rigid purview of the fundamentalist. “God” is contained by man’s hands and subject to their whims like a magical genie. You pray harder, “god” will respond. If you don’t, “god” will not. If you just obey a little more, “god” will react. If you fail to obey in the least, “god” will not. Do you not see the manipulation? This is man manipulating “god” for their own benefits and whims. This foolishness is the foolishness of a child because that’s the naiveté of a child and fundamentalism requires a child like nature. Now I say all of this to make a point about the “New Age.” The “New Age” prides itself on being a viable alternative to the more traditional western Christianity. The New Age views itself as the opposite of a fundamentalist Christianity, and in some ways, in dogma and ideology, it is. But in many ways, it shares things in common with fundamentalist Christianity, namely in mentality. The New Age, in many ways, is just as fundamentalist. What the Christians do not get, is that “god” the energy cannot be contained, but the New Age seeks to control this uncontrollable energy for its own gains. Like the fundamentalist Christian who prays harder and harder to make “god” react, there is the New Ager, who will meditate harder, chant more, for the purpose of getting this “god” energy to react and give them their rightly earned “enlightenment.” Again, this is man’s attempt to manipulate and control the uncontrollable energy that is “god” for their ultimate end, as if one can bend the will of this energy by repeating the right words, taking the right postures or thinking the right thoughts. But whatever name it masquerades under, it is all man’s ego and control, again at the forefront.
import copy import os import tarfile import urllib2 import zipfile from calcrepo import index from calcrepo import output from calcrepo import util class CalcRepository: """A class for adding new calcpkg repositories""" def __init__(self, name, url): self.name = name self.url = url self.output = output.CalcpkgOutput(True, False) self.index = index.Index(self) self.searchString = "" self.category = "" self.extension = "" self.math = False self.game = False self.searchFiles = False self.downloadDir = os.path.join(os.path.expanduser("~"), "Downloads", "") self.data = None def __repr__(self): return self.name + " at " + self.url def __str__(self): return self.name + " at " + self.url def setRepoData(self, searchString, category="", extension="", math=False, game=False, searchFiles=False): """Call this function with all the settings to use for future operations on a repository, must be called FIRST""" self.searchString = searchString self.category = category self.math = math self.game = game self.searchFiles = searchFiles self.extension = extension def setOutputObject(self, newOutput=output.CalcpkgOutput(True, True)): """Set an object where all output from calcpkg will be redirected to for this repository""" self.output = newOutput def searchHierarchy(self, fparent='/'): return self.index.searchHierarchy(fparent) def searchIndex(self, printData=True): """Search the index with all the repo's specified parameters""" backupValue = copy.deepcopy(self.output.printData) self.output.printData = printData self.data = self.index.search(self.searchString, self.category, self.math, self.game, self.searchFiles, self.extension) self.output.printData = backupValue return self.data def countIndex(self): """A wrapper for the count function in calcrepo.index; count using specified parameters""" self.data = self.index.count(self.searchString, self.category, self.math, self.game, self.searchFiles, self.extension) def getDownloadUrls(self): """Return a list of the urls to download from""" data = self.searchIndex(False) fileUrls = [] for datum in data: fileUrl = self.formatDownloadUrl(datum[0]) fileUrls.append(fileUrl) return fileUrls def getFileInfos(self): """Return a list of FileInfo objects""" data = self.searchIndex(False) self.data = data self.printd(" ") fileInfos = [] for datum in data: try: fileInfo = self.getFileInfo(datum[0], datum[1]) fileInfos.append(fileInfo) except NotImplementedError: self.printd("Error: the info command is not supported for " + self.name + ".") return [] return fileInfos def downloadFiles(self, prompt=True, extract=False): """Download files from the repository""" #First, get the download urls data = self.data downloadUrls = self.getDownloadUrls() #Then, confirm the user wants to do this if prompt: confirm = raw_input("Download files [Y/N]? ") if confirm.lower() != 'y': self.printd("Operation aborted by user input") return #Now, if they still do, do all this stuff: counter = -1 for datum in data: counter += 1 try: download = downloadUrls[counter] except: pass # Download the file; fix our user agent self.printd("Downloading " + datum[0] + " from " + download) headers = { 'User-Agent' : 'calcpkg/2.0' } request = urllib2.Request(download, None, headers) fileData = urllib2.urlopen(request).read() # Now, process the downloaded file dowName = datum[0] # Use a helper function to remove /pub, /files dowName = util.removeRootFromName(dowName) dowName = dowName[1:] dowName = dowName.replace('/', '-') dowName = self.downloadDir + dowName try: downloaded = open(dowName, 'wb') except: os.remove(dowName) downloaded.write(fileData) downloaded.close() self.printd("Download complete! Wrote file " + dowName + "\n") #Extract them if told to do so if extract: extractType = "" if '.zip' in dowName: extractType = "zip" elif '.tar' in dowName: extractType = "tar" specType = "" if '.bz2' in dowName: specType = ":bz2" elif ".gz" in dowName: specType = ":gz" elif ".tgz" in dowName: extractType = "tar" specType = ":gz" if extractType != "": self.printd("Extracting file " + dowName + ", creating directory for extracted files") dirName, a, ending = dowName.partition('.') dirName = dirName + '-' + ending try: os.mkdir(dirName) except: pass if extractType == "zip": archive = zipfile.ZipFile(dowName, 'r') elif extractType == "tar": archive = tarfile.open(dowName, "r" + specType) else: self.printd("An unknown error has occured!") return archive.extractall(dirName) self.printd("All files in archive extracted to " + dirName) os.remove(dowName) self.printd("The archive file " + dowName + " has been deleted!\n") def getFileInfo(self): """Return a list of FileInfo objects""" raise NotImplementedError def formatDownloadUrl(self, url): """Format a repository path to be a real, valid download link""" raise NotImplementedError def updateRepoIndexes(self, verbose=False): """Update the local copies of the repository's master index""" raise NotImplementedError def printd(self, message): """Output function for repository to specific output location""" if self.output != None: print >> self.output, message def downloadFileFromUrl(self, url): """Given a URL, download the specified file""" fullurl = self.baseUrl + url try: urlobj = urllib2.urlopen(fullurl) contents = urlobj.read() except urllib2.HTTPError, e: self.printd("HTTP error:", e.code, url) return None except urllib2.URLError, e: self.printd("URL error:", e.code, url) return None self.printd("Fetched '%s' (size %d bytes)" % (fullurl, len(contents))) return contents def openIndex(self, filename, description): """Attempt to delete and recreate an index, returns open file object or None.""" try: os.remove(filename) self.printd(" Deleted old " + description) except: self.printd(" No " + description + " found") # Now, attempt to open a new index try: files = open(filename, 'wt') except: self.printd("Error: Unable to create file " + filename + " in current folder. Quitting.") return None return files
In the "Great Singers" operalogues, through video and audio we will discuss and listen to as many great voices as we can. Narrated by Edward Perretti, New Jersey State Opera. With my vast collection of materials, I will choose some rare and not-so-rare selections from as many operas as possible in the time allotted.
# -*- coding: utf-8 -*- """ *************************************************************************** OTBUtils.py --------------------- Date : August 2012 Copyright : (C) 2012 by Victor Olaya Email : volayaf at gmail dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = 'Victor Olaya' __date__ = 'August 2012' __copyright__ = '(C) 2012, Victor Olaya' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' import os from qgis.core import QgsApplication import subprocess from sextante.core.SextanteConfig import SextanteConfig from sextante.core.SextanteLog import SextanteLog from sextante.core.SextanteUtils import SextanteUtils class OTBUtils: OTB_FOLDER = "OTB_FOLDER" OTB_LIB_FOLDER = "OTB_LIB_FOLDER" OTB_SRTM_FOLDER = "OTB_SRTM_FOLDER" OTB_GEOID_FILE = "OTB_GEOID_FILE" @staticmethod def otbPath(): folder = SextanteConfig.getSetting(OTBUtils.OTB_FOLDER) if folder == None: folder = "" #try to configure the path automatically if SextanteUtils.isMac(): testfolder = os.path.join(str(QgsApplication.prefixPath()), "bin") if os.path.exists(os.path.join(testfolder, "otbcli")): folder = testfolder else: testfolder = "/usr/local/bin" if os.path.exists(os.path.join(testfolder, "otbcli")): folder = testfolder elif SextanteUtils.isWindows(): testfolder = os.path.dirname(str(QgsApplication.prefixPath())) testfolder = os.path.dirname(testfolder) testfolder = os.path.join(testfolder, "bin") path = os.path.join(testfolder, "otbcli.bat") if os.path.exists(path): folder = testfolder else: testfolder = "/usr/bin" if os.path.exists(os.path.join(testfolder, "otbcli")): folder = testfolder return folder @staticmethod def otbLibPath(): folder = SextanteConfig.getSetting(OTBUtils.OTB_LIB_FOLDER) if folder == None: folder ="" #try to configure the path automatically if SextanteUtils.isMac(): testfolder = os.path.join(str(QgsApplication.prefixPath()), "lib/otb/applications") if os.path.exists(testfolder): folder = testfolder else: testfolder = "/usr/local/lib/otb/applications" if os.path.exists(testfolder): folder = testfolder elif SextanteUtils.isWindows(): testfolder = os.path.dirname(str(QgsApplication.prefixPath())) testfolder = os.path.join(testfolder, "orfeotoolbox") testfolder = os.path.join(testfolder, "applications") if os.path.exists(testfolder): folder = testfolder else: testfolder = "/usr/lib/otb/applications" if os.path.exists(testfolder): folder = testfolder return folder @staticmethod def otbSRTMPath(): folder = SextanteConfig.getSetting(OTBUtils.OTB_SRTM_FOLDER) if folder == None: folder ="" return folder @staticmethod def otbGeoidPath(): filepath = SextanteConfig.getSetting(OTBUtils.OTB_GEOID_FILE) if filepath == None: filepath ="" return filepath @staticmethod def otbDescriptionPath(): return os.path.join(os.path.dirname(__file__), "description") @staticmethod def executeOtb(commands, progress): loglines = [] loglines.append("OTB execution console output") os.putenv('ITK_AUTOLOAD_PATH', OTBUtils.otbLibPath()) fused_command = ''.join(['"%s" ' % c for c in commands]) proc = subprocess.Popen(fused_command, shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE,stderr=subprocess.STDOUT, universal_newlines=True).stdout for line in iter(proc.readline, ""): if "[*" in line: idx = line.find("[*") perc = int(line[idx-4:idx-2].strip(" ")) if perc !=0: progress.setPercentage(perc) else: loglines.append(line) progress.setConsoleInfo(line) SextanteLog.addToLog(SextanteLog.LOG_INFO, loglines)
When you’re injured, there’s more at work than only feeling pain. If you’re in a car accident, for example, your body will instinctively tense up in response to the impending crash. When the impact does happen, your balance may be thrown off due to the tension, the spinal cord and discs might become misaligned, and you can potentially run into issues with tendons, joints, and other parts of the muscular system. Since the effects of an injury can ripple throughout your body, it’s critical to seek smart care. Better Body Massage Therapy has spent more than seven years treating a wide variety of injuries through a number of massage therapy disciplines. Massage therapy can be an effective means of shortening your recovery time, and it’s a treatment method that’s both fully natural and drug-free. Our massage therapy services are fully customized based on your needs. By providing a comprehensive and total body approach, we can help to reduce pain, increase mobility, and help you get back to a healthier life. To schedule a consultation, contact us online or by phone today.
def update(game_map): """Updated the game_map depending on version :param game_map: The currently loaded GameMap object :type game_map: class GameMap :return: True/False depending on if any updates occurred :rtype: boolean """ updated = False if game_map.version == "0.1.1": import locations updated = True game_map.locations = [] game_map.locations.append(locations.Location("First room")) game_map.player.location = game_map.locations[0] game_map.version = "0.2.0" if game_map.version == "0.2.0": import locations first_room = game_map.locations[0] del game_map.locations game_map.locations = {} game_map.locations["first room"] = first_room game_map.locations["second room"] = locations.Location("Second room") game_map.locations["first room"].available_locations["second room"] = game_map.locations["second room"] game_map.locations["second room"].available_locations["first room"] = game_map.locations["first room"] game_map.version = "0.3.0" return updated
◆リアライン・インソール・ライト 販売中 (Realign Insole Light (RIL) on the sell) . insole easily and in short time. shoes especially width close shoes. By supporting cuboid bone, which is important parts of feet, RIL keeps total feet 3D frame. Then feet get used to the insole step by step. fabric mending direction, it shows high grip power and prevents feet slipping in the shoes. RIF was developed for women who are annoyed to foot pain, nee pain, swollen caused by wearing high heels. ・Support feet arch in 3-D, giving any uncomfortable. The foam is made not to give pressure on under feet muscle and sinew. ・“Toe Support” protect top of toe and prevent slipping which happens in high heels. ・The material is very durable but, having proper rebound, it can keep the foam long time. feet and gives you more high performance than general custom insole. scheduled on 1 Mar and 29 Mar. 2014. Above event needs fee, more detail please check GLAB website. Copyright (C) フットテクノ All Rights Reserved.
# -*- coding: utf-8 -*- # # (C) முத்தையா அண்ணாமலை 2013-2015 # # N-gram language model for Tamil letters import tamil import copy from .Corpus import Corpus class Letters: def __init__(self,filename): self.letter = dict() self.letter.update(zip( tamil.utf8.tamil_letters, map(lambda x : 0, tamil.utf8.tamil_letters) ) ) self.corpus = Corpus( filename ) def __del__(self): try: del self.corpus except Exception: pass def __unicode__( self ): op = u"" for lett,freq in self.letter.items(): op = op + u"%s => %d\n"%(lett,freq) print(max(self.letter.values())) return op class Unigram(Letters): def frequency_model( self ): """ build a letter frequency model for Tamil letters from a corpus """ # use a generator in corpus for next_letter in self.corpus.next_tamil_letter(): # update frequency from corpus self.letter[next_letter] = self.letter[next_letter] + 1 class Bigram(Unigram): def __init__(self,filename): Unigram.__init__(self,filename) self.letter2 = dict() for k in tamil.utf8.tamil_letters: self.letter2[k] = copy.copy( self.letter ) def language_model(self,verbose=True): """ builds a Tamil bigram letter model """ # use a generator in corpus prev = None for next_letter in self.corpus.next_tamil_letter(): # update frequency from corpus if prev: self.letter2[prev][next_letter] += 1 if ( verbose ) : print(prev) print(next_letter) print( self.letter2[prev][next_letter] ) prev = next_letter #update always return
Sometimes I want to bake a cake but for whatever reason baking a layer cake seems daunting. Cakes that bake in 9x13 pans seem much easier because they cool in the same pan they bake in and frosting the cake takes much less time (and requires less skill). This cake is great for celebrations and the sprinkles are a fun addition. 2. Spray a 9X13 pan with nonstick spray. 3. Cream butter and sugar together in a mixer, scrape bowl with a spatula. 4. Add milk and eggs, beat until well combined. 5. Add baking powder, salt, almond and vanilla extract followed by cake flour, mixing until combined. 6. Pour batter into pan and place into oven, immediately reduce temperature to 350 and bake for 40-50 minutes, rotating pan half way through. Remove from oven when cake tester comes out clean when inserted in the center and removed. 7. Allow cake to cool completely before frosting. 1. Place ingredients into the bowl of a mixer on low, when ingredients come together switch speed to high and beat for ten minutes. 2. Spread frosting on cake. Yum!!! Now I'm craving cake, even though I had a week full of junk food!! I must eat cake now.
#!/usr/bin/env python #-*- coding:utf-8 -*- import os, sys, time import socket, struct, select import thread, logging import protocols reload(sys) sys.setdefaultencoding('utf8') logging.basicConfig( # filename ='proxy.log', format = '%(asctime)s %(message)s', datefmt = '%Y-%m-%d %H:%M:%S', level = logging.DEBUG ) class Session: def __init__(self, session=None, host="", port=0 ): self.session = session self.host = host self.port = port def begin(self): try: self.start() except socket.timeout: logging.info('[Session] Session %s:%d timeout.' % (self.host, self.port) ) except (KeyboardInterrupt, SystemExit): self.close() raise KeyboardInterrupt finally: try: self.close() except Exception as e: logging.debug(e) def start(self): buff, protocol = protocols.guess_protocol(self.session) if protocol == "socks": logging.info('[Session] protocol is Socks') handle = protocols.socks.Socks(buff=buff, session=self.session) handle.handle() self.close() elif protocol == "http": logging.info('[Session] protocol is Http') # handle = protocols.http.Http(buff=buff, session=self.session) self.close() elif protocol == "ftp": # handle = protocols.ftp.Ftp(buff=buff, session=self.session) logging.info('[Session] unsupport protocol ') self.close() elif protocol == "ssl": # handle = protocols.ssl.Ssl(buff=buff, session=self.session) logging.info('[Session] unsupport protocol ') self.close() else: logging.info('[Session] unknow protocol ') self.close() def close(self): logging.info('[Session] Session %s:%d close.' % (self.host, self.port) ) return self.session.close() class Proxy: def __init__(self, host="0.0.0.0", port=1070): self.host = host self.port = port def run(self): try: self.server = socket.socket() self.server.bind((self.host, self.port)) self.server.listen(100) except Exception as e: logging.debug("[Server] Can not make proxy server on %s:%d " %(self.host, self.port) ) logging.debug(e) return self.shutdown() logging.info("[Server] Proxy Server running on %s:%d ..." %(self.host, self.port)) # run forever try: self.loop() except (KeyboardInterrupt, SystemExit): pass except Exception as e: logging.info('[Server] Unknow error ...') logging.info(e) finally: self.shutdown() def shutdown(self): logging.info('[Server] Shutdown Proxy server ...') return self.server.close() def loop(self): while True: connection, address = self.server.accept() session = Session(session=connection, host=address[0], port=address[1]) try: thread.start_new_thread(session.start, () ) except Exception as e: logging.debug("[Server] 会话异常...") logging.info(e) session.close() if __name__ == '__main__': host = "0.0.0.0" port = 1070 proxy = Proxy(host=host, port=port) proxy.run()
This question was triggered by my watching the Australian ABC TV program Catalyst: The Heart of the Matter. Part 1. Dietary Villains that aired on 24th October 2013. The show claimed several things that went against what I believed to be a healthy diet to prevent heart disease. One of those claims was that blood cholesterol levels were pre-set for an individual and changing the diet would have no effect. At the time the show aired I was mid-stream through my own change of diet with dietary patterns as recommended on the Australian Heart Foundation website. Remarkably after ten weeks my total cholesterol level and LDL cholesterol levels had dropped remarkably. Thus, at least for me, that claim did not hold true. I decided to investigate whether there was any evidence in the literature of studies that had been done. I will link to any studies here.
""" Methods to update/combine authoritative Land Use/Land Cover Information with OSM Data """ def update_globe_land_cover( original_globe_raster, osm_urban_atlas_raster, osm_globe_raster, epsg, updated_globe_raster, detailed_globe_raster): """ Update the original Glob Land 30 with the result of the conversion of OSM DATA to the Globe Land Cover nomenclature; Also updates he previous updated Glob Land 30 with the result of the conversion of osm data to the Urban Atlas Nomenclature """ import os import numpy as np from gasp.torst.gdal import array_to_raster from gasp.fromrst.gdal import rst_to_array from gasp.gdal.properties.cells import get_cellsize from gasp.gdal.properties.cells import get_nodata # ############################# # # Convert images to numpy array # # ############################# # np_globe_original = rst_to_array(original_globe_raster) np_globe_osm = rst_to_array(osm_globe_raster) np_ua_osm = rst_to_array(osm_urban_atlas_raster) # ################################## # # Check the dimension of both images # # ################################## # if np_globe_original.shape != np_globe_osm.shape: return ( 'The Globe Land 30 raster (original) do not have the same number' ' of columns/lines comparing with the Globe Land 30 derived ' 'from OSM data' ) elif np_globe_original.shape != np_ua_osm.shape: return ( 'The Globe Land 30 raster (original) do not have the same ' 'number of columns/lines comparing with the Urban Atlas raster ' 'derived from OSM data' ) elif np_globe_osm.shape != np_ua_osm.shape: return ( 'The Globe Land 30 derived from OSM data do not have the same ' 'number of columns/lines comparing with the Urban Atlas raster ' 'derived from OSM data' ) # ############## # # Check Cellsize # # ############## # cell_of_rsts = get_cellsize( [original_globe_raster, osm_globe_raster, osm_urban_atlas_raster], xy=True ) cell_globe_original = cell_of_rsts[original_globe_raster] cell_globe_osm = cell_of_rsts[osm_globe_raster] cell_ua_osm = cell_of_rsts[osm_urban_atlas_raster] if cell_globe_original != cell_globe_osm: return ( 'The cellsize of the Globe Land 30 raster (original) is not the ' 'same comparing with the Globe Land 30 derived from OSM data' ) elif cell_globe_original != cell_ua_osm: return ( 'The cellsize of the Globe Land 30 raster (original) is not the ' 'same comparing with the Urban Atlas raster derived from OSM data' ) elif cell_ua_osm != cell_globe_osm: return ( 'The cellsize of the Globe Land 30 derived from OSM data is not ' 'the same comparing with the Urban Atlas raster derived from ' 'OSM data' ) # ############################# # # Get the Value of Nodata Cells # # ############################# # nodata_glob_original = get_nodata(original_globe_raster) nodata_glob_osm = get_nodata(osm_globe_raster) nodata_ua_osm = get_nodata(osm_urban_atlas_raster) # ######################################## # # Create a new map - Globe Land 30 Updated # # ######################################## # """ Create a new array with zeros... 1) The zeros will be replaced by the values in the Globe Land derived from OSM. 2) The zeros will be replaced by the values in the Original Globe Land at the cells with NULL data in the Globe Land derived from OSM. The meta array will identify values origins in the updated raster: 1 - Orinal Raster 2 - OSM Derived Raster """ update_array = np.zeros(( np_globe_original.shape[0], np_globe_original.shape[1])) update_meta_array = np.zeros(( np_globe_original.shape[0], np_globe_original.shape[1])) # 1) np.copyto(update_array, np_globe_osm, 'no', np_globe_osm != nodata_glob_osm) # 1) meta np.place(update_meta_array, update_array != 0, 2) # 2) meta np.place(update_meta_array, update_array == 0, 1) # 2) np.copyto(update_array, np_globe_original, 'no', update_array == 0) # 2) meta np.place( update_meta_array, update_array==nodata_glob_original, int(nodata_glob_original) ) # noData to int np.place( update_array, update_array==nodata_glob_original, int(nodata_glob_original) ) updated_meta = os.path.join( os.path.dirname(updated_globe_raster), '{n}_meta{e}'.format( n = os.path.splitext(os.path.basename(updated_globe_raster))[0], e = os.path.splitext(os.path.basename(updated_globe_raster))[1] ) ) # Create Updated Globe Cover 30 array_to_raster( update_array, updated_globe_raster, original_globe_raster, epsg, gdal.GDT_Int32, noData=int(nodata_glob_original) ) # Create Updated Globe Cover 30 meta array_to_raster( update_meta_array, updated_meta, original_globe_raster, epsg, gdal.GDT_Int32, noData=int(nodata_glob_original) ) # ################################################# # # Create a new map - Globe Land 30 Detailed with UA # # ################################################# # np_update = rst_to_array(updated_globe_raster) detailed_array = np.zeros((np_update.shape[0], np_update.shape[1])) detailed_meta_array = np.zeros(( np_update.shape[0], np_update.shape[1] )) """ Replace 80 Globe Land for 11, 12, 13, 14 of Urban Atlas The meta array will identify values origins in the detailed raster: 1 - Updated Raster 2 - UA Derived Raster from OSM """ # Globe - Mantain some classes np.place(detailed_array, np_update==30, 8) np.place(detailed_array, np_update==30, 1) np.place(detailed_array, np_update==40, 9) np.place(detailed_array, np_update==40, 1) np.place(detailed_array, np_update==50, 10) np.place(detailed_array, np_update==50, 1) np.place(detailed_array, np_update==10, 5) np.place(detailed_array, np_update==10, 1) # Water bodies np.place(detailed_array, np_ua_osm==50 or np_update==60, 7) np.place(detailed_meta_array, np_ua_osm==50 or np_update==60, 1) # Urban - Where Urban Atlas IS NOT NULL np.place(detailed_array, np_ua_osm==11, 1) np.place(detailed_meta_array, np_ua_osm==11, 2) np.place(detailed_array, np_ua_osm==12, 2) np.place(detailed_meta_array, np_ua_osm==12, 2) np.place(detailed_array, np_ua_osm==13, 3) np.place(detailed_meta_array, np_ua_osm==13, 2) np.place(detailed_array, np_ua_osm==14, 4) np.place(detailed_meta_array, np_ua_osm==14, 2) # Urban Atlas - Class 30 to 6 np.place(detailed_array, np_ua_osm==30, 6) np.place(detailed_meta_array, np_ua_osm==30, 2) # Create Detailed Globe Cover 30 array_to_raster( detailed_array, detailed_globe_raster, original_globe_raster, epsg, gdal.GDT_Int32, noData=0 ) # Create Detailed Globe Cover 30 meta detailed_meta = os.path.join( os.path.dirname(detailed_globe_raster), '{n}_meta{e}'.format( n = os.path.splitext(os.path.basename(detailed_meta))[0], e = os.path.splitext(os.path.basename(detailed_meta))[1] ) ) array_to_raster( detailed_meta_array, detailed_meta, original_globe_raster, epsg, gdal.GDT_Int32, noData=0 )
Air routes into Russia are from most corners of the globe. Moscow’s Sheremetyevo, Domodedovo and Vnukovo and St Petersburg's Pulkovo International Airport host the bulk of Russia’s international flights. Plenty of other cities have direct international connections, including Arkhangelsk, Irkutsk, Kaliningrad, Kazan, Khabarovsk, Krasnodar, Mineralnye Vody, Murmansk, Nalchik, Nizhny Novgorod, Novosibirsk, Perm, Yekaterinburg and Yuzhno-Sakhalinsk. Departure tax is included in the price of a ticket. Russia borders 14 countries. Popular land approaches include trains and buses from Central European and Baltic countries or on either the trans-Manchurian or trans-Mongolian train routes from China and Mongolia. Russia shares borders with Azerbaijan, Belarus, China, Estonia, Finland, Georgia, Kazakhstan, Latvia, Lithuania, Mongolia, North Korea, Norway, Poland and Ukraine. Before planning a journey into or out of Russia from any of these countries, check the visa situation for your nationality. On trains, border crossings are a straightforward but drawn-out affair, with a steady stream of customs and ticket personnel scrutinising your passport and visa. If you’re arriving by car or motorcycle, you’ll need to show your vehicle registration and insurance papers, and your driving licence, passport and visa. These formalities are usually minimal for Western European citizens. On the Russian side, most cars are subjected to cursory inspection, with only a small percentage getting a thorough check. The main crossing is between Yarag-Kazmalyar in Dagestan and Samur in Azerbaijan. Take a shared taxi from Derbent to Yarag-Kazmalyar. You have to be in a vehicle to cross the border over the Samur River; marshrutky (fixed route minibuses) are the way to go. On the Azeri side take a shared taxi to Baku. The direct Moscow–Baku train (platzkart/kupe or 2nd/3rd class R9920/7080, two days, three hours and 30 minutes, three weekly) goes via Astrakhan, Makhachkala and Derbent. A somewhat relaxed version of border control has been reestablished between Russia and Belarus, despite the two being part of a single Customs Union. You must have visas for both countries; crossing without them is a criminal offence. Don't even consider entering Belarus on a Russian visa or vice versa. There are several daily buses between Minsk and Moscow (12 hours), but be aware of the potential problems with using the Russian–Belarus border. Highway crossings between Russia and Belarus can't be used by the citizens of third countries. Non-Russian and Belarus passport holders travelling from the EU by road should use border crossings with Latvia or Estonia. There are services to/from Kaliningrad, Moscow, Smolensk and St Petersburg, but be aware of the potential problems with using the Russian–Belarus border. There are potentially serious implications for those transiting into Russia via Belarus on an international bus or train as you will not receive a Russian border stamp or an immigration form on entering the country. If you plan to exit Russia via a different route, this will be a problem and you could be fined. We’ve not heard of any travellers running into serious difficulties but it would still be wise to make careful enquiries with visa authorities in both Belarus and Russia before you’ve confirmed your travel arrangements. The road from Manzhouli to Zabaikalsk in the Chita Region is open to traffic; it’s also possible to cross from Heihe to Blagoveshchensk using a ferry across the Amur River. A bus runs between Manzhouli and Zabaikalsk, but asking Russians for a ride is usually faster. The classic way into Russia from China is along the trans-Mongolian and trans-Manchurian rail routes. Vladivostok and Khabarovsk have other options for travelling overland to China. There are three border crossings, of which Narva is nearest to Tallinn. Conveniently for motorists, you can avoid queues by booking a time slot for your crossing from (but not into) Estonia for a small fee at www.estonianborder.eu. There are daily trains between Tallinn and Moscow (kupe R7600, 15 hours, 30 minutes) and St Petersburg (R4100, seven hours and 20 minutes). By bus you can connect to/from Tallinn with St Petersburg (from €15, seven hours, seven daily) and Pskov (R1000, six hours, daily). There are many daily buses between Helsinki and St Petersburg and Helsinki and Petrozavodsk, as well as three buses a week from Rovaniemi to Murmansk. Highways cross at the Finnish border posts of Nuijamaa and Vaalimaa (Brusnichnoe and Torfyanovka, respectively, on the Russian side). High-speed Allegro trains (from R5000, 3½ hours, four daily) connect St Petersburg and Helsinki. The daily 31/34 Leo Tolstoy service between Moscow and Helsinki (R5600, 14 hours and 20 minutes) also passes through St Petersburg (R4100, seven hours, 30 minutes). The Georgian Military Highway over the Greater Caucasus mountains provides a connection between Vladikavkaz in Russia and Tblisi in Georgia. It's possible to catch buses from Vladikavkaz to Lars where you'll need to arrange a taxi across the border itself to Kazbegi. As long as your papers are in order you should also be able to drive yourself between Russia and Georgia on this route; no border permit is required. Roads into Kazakhstan head east from Astrakhan and south from Samara, Chelyabinsk, Orenburg and Omsk. There are buses (R1000, 11 hours, two daily) between Omsk and Astana, Kazakhstan's capital. There are direct trains on even days between Moscow and Astana (platzkart/kupe R10,000/14,000, two days and six hours) in addition to services connecting Samara and Novosibirsk with Almaty. Rīga is connected by bus to Moscow (from €50, 15 hours, daily), St Petersburg (from €20, 11 hours, four daily), Pskov (€30, six hours, three daily) and Kaliningrad (from €20, eight hours, two daily). The M9 Rīga–Moscow road crosses the border east of Zilupe (Latvia). Be prepared to lose at least a couple of hours at the border as checks are slow, especially on the Latvian side. The A212 road from Rīga leads to Pskov, crossing a corner of Estonia en route. Overnight trains run between Rīga and Moscow (platzkartny/kupe R6000/10,000, 16 hours, daily) and St Petersburg (platzkart/kupe R3400/6000, 16 hours, daily). From Kaliningrad there are services to Klaipėda (R600, four hours, three daily) and Vilnius (R900, six hours, two daily). The border crossing points from Kaliningrad into Lithuania are Chernyshevskoye–Kibartay, Sovetsk–Panemune, Pogranichny–Ramoniškių and Morskoe–Nida. Services link Vilnius with Kaliningrad (platzkart/kupe R3000/5600, six hours, two to three daily), Moscow (platzkart/kupe R5470/10,170, 14 hours, two daily) and St Petersburg (platzkart/kupe R5400/10,300, 17 hours, daily). The St Petersburg trains cross Latvia and the Moscow ones cross Belarus, for which you’ll need a Belarus visa or transit visa. There are direct buses between Ulaanbaatar and Ulan-Ude (R1100, 10 to 12 hours, daily). It’s possible to drive between Mongolia and Russia at the Tsagaanuur–Tashanta and Altanbulag–Kyakhta borders. Getting through these borders can be a very slow process; it helps to have written permission from a Mongolian embassy if you wish to bring a vehicle through. Apart from the trans-Mongolian train connecting Moscow and Beijing, there’s a direct train from Ulaanbaatar to Moscow (kupe R16,370, four days and two hours, twice weekly) as well as a service to and from Irkutsk (kupe R6445, 35 hours, daily). The only crossing of the 17km North Korea–Russia border is via trains going over the Friendship Bridge across the Tumen River. Only Russian and North Korean citizens can use this crossing. That said, back in 2008, a couple of Western tourists did manage to enter North Korea using this route – we do not recommend trying it. There are minibus connections between Murmansk and Kirkenes (R1200, four to six hours, two daily). The border crossing is at Storskog/Borisoglebsk on the Kirkenes–Murmansk road. As this is a sensitive border region, no stopping is allowed along the Russian side of this road. Also non-Russian registered vehicles are barred from the Nikel–Zapolyarnye section of the M18 highway between 11pm and 7am and any time on Tuesday, Thursday or Saturday. On those days you will be diverted via Prirechniy, a longer drive involving a rough, unpaved section. There are several daily buses between both Gdańsk and Olsztyn and Kaliningrad as well as daily buses to/from Warsaw (R1000, nine hours). The main border crossing to/from Kaliningrad is at Bezledy/Bagrationovsk on the A195 highway. Queues here can be very long. Warsaw is connected with Moscow (from R9150, 18 hours, daily). The Moscow trains enter Belarus near Brest, so you’ll need a Belarus visa or transit visa. The two countries were essentially at war with each other at the time of writing, but it was still possible to cross in both directions by vehicle or train, with the exception of rebel-held zones in southeastern Ukraine and the Russian-controlled Crimea. Note that crossing into the rebel-held zones or Crimea from the Russian side is a criminal offence under Ukrainian law. To enter Crimea from Ukraine, you need special permission from the Ukrainian authorities and you must return by the same route. Several daily buses run between Moscow and Kyiv (from R1400, 15 to 17 hours) as well as Kharkiv (from R1100, 14 hours) and other major Ukrainian cities. The main auto route between Kyiv and Moscow starts as the E93 (M20) north of Kyiv, but becomes the M3 when it branches off to the east some 50km south of Chernihiv. Kharkiv is connected to Moscow by the M2 road. There is a train service to Kyiv from St Petersburg, but it is of little use for most travellers as the route goes via Belarus, which requires an extra hard-to-get visa. Travelling overland by train from the UK or Western Europe takes a minimum of two days and nights. There are no direct trains from the UK to Russia. The cheapest route you can take is on the Eurostar (www.eurostar.com) to Brussels, and then via Cologne and Warsaw to Moscow. This journey passes through Minsk (Belarus), which may be problematic. All foreigners visiting Belarus need a visa, including those transiting by train – sort this out before arriving in Belarus. There may also be an issue crossing into Russia as you're unlikely to receive a visa stamp into the country or an immigration card. To avoid such hassles consider taking the train to St Petersburg from Vilnius in Lithuania, which runs several times a week via Latvia. There are daily connections between Vilnius and Warsaw. From Moscow and St Petersburg there are also regular direct international services to Berlin, Nice, Paris, Prague and Vienna (note all these services go via Belarus). For European rail timetables check www.railfaneurope.net, which has links to all of Europe’s national railways.
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo.config import cfg _DEFAULT_AUTH_METHODS = ['external', 'password', 'token'] FILE_OPTIONS = { '': [ cfg.StrOpt('admin_token', secret=True, default='ADMIN'), cfg.StrOpt('public_bind_host', default='0.0.0.0', deprecated_opts=[cfg.DeprecatedOpt('bind_host', group='DEFAULT')]), cfg.StrOpt('admin_bind_host', default='0.0.0.0', deprecated_opts=[cfg.DeprecatedOpt('bind_host', group='DEFAULT')]), cfg.IntOpt('compute_port', default=8774), cfg.IntOpt('admin_port', default=35357), cfg.IntOpt('public_port', default=5000), cfg.StrOpt('public_endpoint', default='http://localhost:%(public_port)s/'), cfg.StrOpt('admin_endpoint', default='http://localhost:%(admin_port)s/'), cfg.StrOpt('onready'), # default max request size is 112k cfg.IntOpt('max_request_body_size', default=114688), cfg.IntOpt('max_param_size', default=64), # we allow tokens to be a bit larger to accommodate PKI cfg.IntOpt('max_token_size', default=8192), cfg.StrOpt('member_role_id', default='9fe2ff9ee4384b1894a90878d3e92bab'), cfg.StrOpt('member_role_name', default='_member_'), cfg.IntOpt('crypt_strength', default=40000)], 'identity': [ cfg.StrOpt('default_domain_id', default='default'), cfg.BoolOpt('domain_specific_drivers_enabled', default=False), cfg.StrOpt('domain_config_dir', default='/etc/keystone/domains'), cfg.StrOpt('driver', default=('keystone.identity.backends' '.sql.Identity')), cfg.IntOpt('max_password_length', default=4096)], 'trust': [ cfg.BoolOpt('enabled', default=True), cfg.StrOpt('driver', default='keystone.trust.backends.sql.Trust')], 'os_inherit': [ cfg.BoolOpt('enabled', default=False)], 'token': [ cfg.ListOpt('bind', default=[]), cfg.StrOpt('enforce_token_bind', default='permissive'), cfg.IntOpt('expiration', default=3600), cfg.StrOpt('provider', default=None), cfg.StrOpt('driver', default='keystone.token.backends.sql.Token'), cfg.BoolOpt('caching', default=True), cfg.IntOpt('revocation_cache_time', default=3600), cfg.IntOpt('cache_time', default=None)], 'cache': [ cfg.StrOpt('config_prefix', default='cache.keystone'), cfg.IntOpt('expiration_time', default=600), # NOTE(morganfainberg): the dogpile.cache.memory acceptable in devstack # and other such single-process/thread deployments. Running # dogpile.cache.memory in any other configuration has the same pitfalls # as the KVS token backend. It is recommended that either Redis or # Memcached are used as the dogpile backend for real workloads. To # prevent issues with the memory cache ending up in "production" # unintentionally, we register a no-op as the keystone default caching # backend. cfg.StrOpt('backend', default='keystone.common.cache.noop'), cfg.BoolOpt('use_key_mangler', default=True), cfg.MultiStrOpt('backend_argument', default=[]), cfg.ListOpt('proxies', default=[]), # Global toggle for all caching using the should_cache_fn mechanism. cfg.BoolOpt('enabled', default=False), # caching backend specific debugging. cfg.BoolOpt('debug_cache_backend', default=False)], 'ssl': [ cfg.BoolOpt('enable', default=False), cfg.StrOpt('certfile', default="/etc/keystone/ssl/certs/keystone.pem"), cfg.StrOpt('keyfile', default="/etc/keystone/ssl/private/keystonekey.pem"), cfg.StrOpt('ca_certs', default="/etc/keystone/ssl/certs/ca.pem"), cfg.StrOpt('ca_key', default="/etc/keystone/ssl/private/cakey.pem"), cfg.BoolOpt('cert_required', default=False), cfg.IntOpt('key_size', default=1024), cfg.IntOpt('valid_days', default=3650), cfg.StrOpt('cert_subject', default='/C=US/ST=Unset/L=Unset/O=Unset/CN=localhost')], 'signing': [ cfg.StrOpt('token_format', default=None), cfg.StrOpt('certfile', default="/etc/keystone/ssl/certs/signing_cert.pem"), cfg.StrOpt('keyfile', default="/etc/keystone/ssl/private/signing_key.pem"), cfg.StrOpt('ca_certs', default="/etc/keystone/ssl/certs/ca.pem"), cfg.StrOpt('ca_key', default="/etc/keystone/ssl/private/cakey.pem"), cfg.IntOpt('key_size', default=2048), cfg.IntOpt('valid_days', default=3650), cfg.StrOpt('cert_subject', default=('/C=US/ST=Unset/L=Unset/O=Unset/' 'CN=www.example.com'))], 'assignment': [ # assignment has no default for backward compatibility reasons. # If assignment driver is not specified, the identity driver chooses # the backend cfg.StrOpt('driver', default=None), cfg.BoolOpt('caching', default=True), cfg.IntOpt('cache_time', default=None)], 'credential': [ cfg.StrOpt('driver', default=('keystone.credential.backends' '.sql.Credential'))], 'oauth1': [ cfg.StrOpt('driver', default='keystone.contrib.oauth1.backends.sql.OAuth1'), cfg.IntOpt('request_token_duration', default=28800), cfg.IntOpt('access_token_duration', default=86400)], 'federation': [ cfg.StrOpt('driver', default='keystone.contrib.federation.' 'backends.sql.Federation')], 'policy': [ cfg.StrOpt('driver', default='keystone.policy.backends.sql.Policy')], 'ec2': [ cfg.StrOpt('driver', default='keystone.contrib.ec2.backends.kvs.Ec2')], 'endpoint_filter': [ cfg.StrOpt('driver', default='keystone.contrib.endpoint_filter.backends' '.sql.EndpointFilter'), cfg.BoolOpt('return_all_endpoints_if_no_filter', default=True)], 'stats': [ cfg.StrOpt('driver', default=('keystone.contrib.stats.backends' '.kvs.Stats'))], 'ldap': [ cfg.StrOpt('url', default='ldap://localhost'), cfg.StrOpt('user', default=None), cfg.StrOpt('password', secret=True, default=None), cfg.StrOpt('suffix', default='cn=example,cn=com'), cfg.BoolOpt('use_dumb_member', default=False), cfg.StrOpt('dumb_member', default='cn=dumb,dc=nonexistent'), cfg.BoolOpt('allow_subtree_delete', default=False), cfg.StrOpt('query_scope', default='one'), cfg.IntOpt('page_size', default=0), cfg.StrOpt('alias_dereferencing', default='default'), cfg.StrOpt('user_tree_dn', default=None), cfg.StrOpt('user_filter', default=None), cfg.StrOpt('user_objectclass', default='inetOrgPerson'), cfg.StrOpt('user_id_attribute', default='cn'), cfg.StrOpt('user_name_attribute', default='sn'), cfg.StrOpt('user_mail_attribute', default='email'), cfg.StrOpt('user_pass_attribute', default='userPassword'), cfg.StrOpt('user_enabled_attribute', default='enabled'), cfg.IntOpt('user_enabled_mask', default=0), cfg.StrOpt('user_enabled_default', default='True'), cfg.ListOpt('user_attribute_ignore', default=['default_project_id', 'tenants']), cfg.StrOpt('user_default_project_id_attribute', default=None), cfg.BoolOpt('user_allow_create', default=True), cfg.BoolOpt('user_allow_update', default=True), cfg.BoolOpt('user_allow_delete', default=True), cfg.BoolOpt('user_enabled_emulation', default=False), cfg.StrOpt('user_enabled_emulation_dn', default=None), cfg.ListOpt('user_additional_attribute_mapping', default=[]), cfg.StrOpt('tenant_tree_dn', default=None), cfg.StrOpt('tenant_filter', default=None), cfg.StrOpt('tenant_objectclass', default='groupOfNames'), cfg.StrOpt('tenant_id_attribute', default='cn'), cfg.StrOpt('tenant_member_attribute', default='member'), cfg.StrOpt('tenant_name_attribute', default='ou'), cfg.StrOpt('tenant_desc_attribute', default='description'), cfg.StrOpt('tenant_enabled_attribute', default='enabled'), cfg.StrOpt('tenant_domain_id_attribute', default='businessCategory'), cfg.ListOpt('tenant_attribute_ignore', default=[]), cfg.BoolOpt('tenant_allow_create', default=True), cfg.BoolOpt('tenant_allow_update', default=True), cfg.BoolOpt('tenant_allow_delete', default=True), cfg.BoolOpt('tenant_enabled_emulation', default=False), cfg.StrOpt('tenant_enabled_emulation_dn', default=None), cfg.ListOpt('tenant_additional_attribute_mapping', default=[]), cfg.StrOpt('role_tree_dn', default=None), cfg.StrOpt('role_filter', default=None), cfg.StrOpt('role_objectclass', default='organizationalRole'), cfg.StrOpt('role_id_attribute', default='cn'), cfg.StrOpt('role_name_attribute', default='ou'), cfg.StrOpt('role_member_attribute', default='roleOccupant'), cfg.ListOpt('role_attribute_ignore', default=[]), cfg.BoolOpt('role_allow_create', default=True), cfg.BoolOpt('role_allow_update', default=True), cfg.BoolOpt('role_allow_delete', default=True), cfg.ListOpt('role_additional_attribute_mapping', default=[]), cfg.StrOpt('group_tree_dn', default=None), cfg.StrOpt('group_filter', default=None), cfg.StrOpt('group_objectclass', default='groupOfNames'), cfg.StrOpt('group_id_attribute', default='cn'), cfg.StrOpt('group_name_attribute', default='ou'), cfg.StrOpt('group_member_attribute', default='member'), cfg.StrOpt('group_desc_attribute', default='description'), cfg.ListOpt('group_attribute_ignore', default=[]), cfg.BoolOpt('group_allow_create', default=True), cfg.BoolOpt('group_allow_update', default=True), cfg.BoolOpt('group_allow_delete', default=True), cfg.ListOpt('group_additional_attribute_mapping', default=[]), cfg.StrOpt('tls_cacertfile', default=None), cfg.StrOpt('tls_cacertdir', default=None), cfg.BoolOpt('use_tls', default=False), cfg.StrOpt('tls_req_cert', default='demand')], 'pam': [ cfg.StrOpt('userid', default=None), cfg.StrOpt('password', default=None)], 'auth': [ cfg.ListOpt('methods', default=_DEFAULT_AUTH_METHODS), cfg.StrOpt('password', default='keystone.auth.plugins.password.Password'), cfg.StrOpt('token', default='keystone.auth.plugins.token.Token'), #deals with REMOTE_USER authentication cfg.StrOpt('external', default='keystone.auth.plugins.external.DefaultDomain')], 'paste_deploy': [ cfg.StrOpt('config_file', default=None)], 'memcache': [ cfg.ListOpt('servers', default=['localhost:11211']), cfg.IntOpt('max_compare_and_set_retry', default=16)], 'catalog': [ cfg.StrOpt('template_file', default='default_catalog.templates'), cfg.StrOpt('driver', default='keystone.catalog.backends.sql.Catalog')], 'kvs': [ cfg.ListOpt('backends', default=[]), cfg.StrOpt('config_prefix', default='keystone.kvs'), cfg.BoolOpt('enable_key_mangler', default=True), cfg.IntOpt('default_lock_timeout', default=5)]} CONF = cfg.CONF def setup_authentication(conf=None): # register any non-default auth methods here (used by extensions, etc) if conf is None: conf = CONF for method_name in conf.auth.methods: if method_name not in _DEFAULT_AUTH_METHODS: conf.register_opt(cfg.StrOpt(method_name), group='auth') def configure(conf=None): if conf is None: conf = CONF conf.register_cli_opt( cfg.BoolOpt('standard-threads', default=False, help='Do not monkey-patch threading system modules.')) conf.register_cli_opt( cfg.StrOpt('pydev-debug-host', default=None, help='Host to connect to for remote debugger.')) conf.register_cli_opt( cfg.IntOpt('pydev-debug-port', default=None, help='Port to connect to for remote debugger.')) for section in FILE_OPTIONS: for option in FILE_OPTIONS[section]: if section: conf.register_opt(option, group=section) else: conf.register_opt(option) # register any non-default auth methods here (used by extensions, etc) setup_authentication(conf)
Your wishes will come true with Wish Master slot! Wish Master will make your dreams come true in a blink of an eye! Spin the reels now-you never know, maybe one spin will be enough to be the happiest casino player. This slot machine is made by NetEnt and has a great graphics with nice and valuable symbols. The beautifully carved symbols will give you the highest winnings. What's more, there is a magic lamp in the right corner, waiting for you to wish anything! Wish Master is a 5-reel, 20-betline online video slot and this means you will have great chances to win. Rub the lamp to meet the genie who can give you more kinds of Wilds and multipliers and cool features. There is a Scatter symbol on the reels. When it appears anywhere on the reels, the genie escapes from the lamp. Then a random feature will be activated and you will get 10 free spins. This is a great way to obtain good prizes and, furthermore, if a new Scatter symbol appears on the reels during the feature, the new one will be added to the feature inventory and you will see 5 additional free spins. However, a maximum of 3 of this feature can be active at the same time. If you are lucky to land a Scatter symbol when you already have three features, the first one will be replaced by a new feature. You will also be awarded with 5 extra spins. But wait, there's even more than this! There are purple and orange Extra Wilds. Either feature in the inventory will transform the appropriate symbol into a Wild symbol. Keep on reading, it's not over yet! Wild symbols substitute for the highest possible winning combination on a winning line. They can appear on any reels. Wilds substitute for all symbols except for Scatter symbols. When you can see an entire reel covered with the flaming Wild symbols, it will appear on the same reel as a Scatter symbol. It will activate the bonus feature. Speaking of these flaming Wilds, when any of them appears on the reels, turns into an Expanding Wild which will cover the whole reel. Wish Master online slot has Random Wilds, too. In each spin, 2 Floating Wilds can appear on the reels randomly. What about the multipliers? The wins are multiplied by 2 or 3. Of course, both can be active. That means all winnings will be multiplied by 6. It's worth playing Wish Master online slot, give it a try now! Pink, orange, green and blue ornaments with shiny gems can be seen along with the nicely carved classic A, K, Q and J symbols. For all combinations, see the Paytable by clicking on the information button in the left corner of the slot machine. Try Wish Master slot machine now with real money or for fun. You will enjoy this game for sure. Playing for real money makes the whole game more exciting. Get ready to meet the genie who will make your wishes come true. Good luck and play Wish Master now!
''' Copyright 2013 Cosnita Radu Viorel Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. .. codeauthor:: Radu Viorel Cosnita <[email protected]> .. py:module:fantastico.mvc.models.model_filter_compound ''' from fantastico.exceptions import FantasticoNotSupportedError, FantasticoError from fantastico.mvc.models.model_filter import ModelFilterAbstract from sqlalchemy.sql.expression import and_, or_ class ModelFilterCompound(ModelFilterAbstract): '''This class provides the api for compounding ModelFilter objects into a specified sql alchemy operation.''' @property def model_filters(self): '''This property returns all ModelFilter instances being compound.''' return self._model_filters def __init__(self, operation, *args): if len(args) < 2: raise FantasticoNotSupportedError("Compound filter takes at least 2 simple model filters.") for arg in args: if not isinstance(arg, ModelFilterAbstract): raise FantasticoNotSupportedError("ModelFilterAnd accept only arguments of type ModelFilter.") self._operation = operation self._model_filters = args def build(self, query): '''This method transform the current compound statement into an sql alchemy filter.''' try: for model_filter in self._model_filters: # pylint: disable=W0212 if hasattr(query, "_primary_entity") and model_filter.column.table != query._primary_entity.selectable \ and hasattr(query, "_joinpoint") and not (model_filter.column.table in query._joinpoint.values()): query = query.join(model_filter.column.table) return query.filter(self.get_expression()) except Exception as ex: raise FantasticoError(ex) def get_expression(self): '''This method transforms calculates sqlalchemy expression held by this filter.''' return self._operation(*[model_filter.get_expression() for model_filter in self._model_filters]) def __eq__(self, obj): '''This method is overriden in order to correctly evaluate equality of two compound model filters.''' if type(self) != type(obj): return False if len(obj.model_filters) != len(self.model_filters): return False for idx in range(0, len(self.model_filters)): if self.model_filters[idx] != obj.model_filters[idx]: return False return True def __hash__(self): '''This method generates a hash code for compound model filters.''' result = hash(self.model_filters[0]) for idx in range(1, len(self.model_filters)): result ^= hash(self.model_filters[idx]) return result class ModelFilterAnd(ModelFilterCompound): '''This class provides a compound filter that allows **and** conditions against models. Below you can find a simple example: .. code-block:: python id_gt_filter = ModelFilter(PersonModel.id, 1, ModelFilter.GT) id_lt_filter = ModelFilter(PersonModel.id, 5, ModelFilter.LT) name_like_filter = ModelFilter(PersonModel.name, '%%john%%', ModelFilter.LIKE) complex_condition = ModelFilterAnd(id_gt_filter, id_lt_filter, name_like_filter) ''' def __init__(self, *args): super(ModelFilterAnd, self).__init__(and_, *args) class ModelFilterOr(ModelFilterCompound): '''This class provides a compound filter that allows **or** conditions against models. Below you can find a simple example: .. code-block:: python id_gt_filter = ModelFilter(PersonModel.id, 1, ModelFilter.GT) id_lt_filter = ModelFilter(PersonModel.id, 5, ModelFilter.LT) name_like_filter = ModelFilter(PersonModel.name, '%%john%%', ModelFilter.LIKE) complex_condition = ModelFilterOr(id_gt_filter, id_lt_filter, name_like_filter) ''' def __init__(self, *args): super(ModelFilterOr, self).__init__(or_, *args)
I work closely with Paula Barnes on complex spinal clinical negligence claims. I have a growing depth of experience in relation to complex spinal claims including Cauda Equina Syndrome. I also have experience of community care law. Spinal injuries are life changing injuries and I am passionate about ensuring that my clients receive all of the support that they need throughout the legal process. I enjoy regularly meeting my clients to ensure that their voice is heard throughout the progression of their case. I qualified as a solicitor in 2016. I graduated from the University of Surrey in 2013 and I then went on to complete the LPC course at the Guildford College of Law.
#!/usr/bin/env python # -*- coding: utf-8 -*- from numap import NuMap from time import sleep def printer(element): print element return element LEFT_INPUT = ('L0', 'L1', 'L2', 'L3') RIGHT_INPUT = ('R0', 'R1', 'R2', 'R3') # LEFT_INPUT RIGHT_INPUT # | | # |(printer) |(printer) # | | # left_iter right_iter numap = NuMap(stride=2, buffer=6) left_iter = numap.add_task(printer, LEFT_INPUT) right_iter = numap.add_task(printer, RIGHT_INPUT) # BUFFER 6 6 ... # ---------------------- ------ # STRIDES 2 2 2 2 # ------ ------ ------ ------ # order of input L0, L1, R0, R1, L2, L3, R2, R3 print "first 6:" numap.start() sleep(1) # should print: # L0, L1, R0, R1, L2, L3 print "last 2:" L0 = left_iter.next() L1 = left_iter.next() # should print: # R2, R3 R0 = right_iter.next() R1 = right_iter.next() L2 = left_iter.next() L3 = left_iter.next() R2 = right_iter.next() R3 = right_iter.next() assert (L0, L1, L2, L3) == LEFT_INPUT assert (R0, R1, R2, R3) == RIGHT_INPUT
A few of the common questions asked by those just getting familiar with blackjack are regarding the value of sitting at third base as opposed to another seat and whether the number of players affect your win/lose rate. Numerous computer simulations of games with varying numbers of players at the table have been run, as well as simulations which look at players’ win rates according to their seating positions. Surprisingly this exact scenario has been looked at numerous times and analyzed a number of experts at the game. Let us look at the second question, first… The main effect that the number of players at the table has on the win/loss rates, whether you count cards or not, is that the more players at the table, the fewer bets you will be able to make per hour. If you are not a card counter this can be a benefit as the more players at the table the slower your hourly rate of losing. If you happen to count cards, slowing the game down slows down your hourly expectation proportionately. Most blackjack experts advise that card-counter’s play at tables with the fewest number of players possible. This will have the affect of increasing their hourly action and win rate. These same experts recommend that those players that are playing for comps, to play in the most crowded, (or the slowest games). If you’re a big bettor trying to get the most comps for your gambling dollar, slow play will keep your hourly rate of losing to a minimum. There are other effects that having other players on the table have on the game, but these will only affect those that count cards. If the game is a face-down game, card-counters will be adversely affected if they do not see and therefore cannot count all of the other players’ cards prior to making their decisions. A second effect is if the game is a single deck game. If the number of players at the table causes the dealer to shuffle the deck sooner, this will have a negative impact on the card counter. Note that it’s not the extra players that hurt the card counter’s potential win rate, but the early shuffle point. Also note that even though this effect was specified for single deck games. An early shuffle point also hurts a card-counter’s win potential in shoe games, but in these games the early shuffle point is not affected by having extra players at the table. Dealers simply shuffle after they come to the cut card. Because in single deck games each card in play represents a larger proportion of the deck then a shoe game, card-counters will lose betting accuracy in single-deck games if other players are at the table. This happens because the card counter will have to place their bets before seeing all the cards dealt. There is only one other minor effect that extra players at the table have on your win rate whether you are a card-counter or not, and this also only applies in single-deck games. If the table is full, so that you are always getting the same number of rounds (2), then you will have a slightly more advantageous game. On the other hand, if the dealer is shuffling up based on the number of cards he has left in his hand; this is slightly less advantageous to the players. The reason for this strange effect is that low cards, 2-7, use up more cards when they are played. If the dealer is basing his shuffle decisions on how many cards remain to be dealt, he will more often shuffle away the high card packs. High cards favor the players. This affects all players at the table, by about .17%. By the way, if you are in a single-deck game where only one round is dealt, this is generally an unprofitable situation for card-counters, but it will be slightly more advantageous or at least less disadvantageous for non-counters, based on the same principle. As for seating positions, there is no effect whatsoever on non-card counters, based on which seat you are sitting in. It is true, however, that card-counters often prefers the third base side of the table. There is nothing fundamentally more profitable about that seating position; it’s the fact that a card-counter will be able to see (and count) more cards before playing his hand that draws him to that side of the table. In face-down games (one and two-decks), many card counters prefers a central seat at the table, as this affords the best chance of seeing more of the other player’s cards to their right and left. So, the effect of seating position for a card-counter is based solely upon his ability (or inability) to see and count more cards. There is no seating position at a Blackjack table that is better for the players in and of itself.
# -*- coding: utf-8 -*- from __future__ import absolute_import import logging from django.conf import settings from django.core.exceptions import ObjectDoesNotExist from django.http import Http404 from django.shortcuts import get_object_or_404 from rest_framework.settings import api_settings from ..mixin import BaseElasticsearchAPIViewMixin from lib import ValidationHelper, ConfigManager, get_export_type_wrapper_map from rest.lib import PaginationSerializer from .exception import TooManyEsResultsError config = ConfigManager.instance() logger = logging.getLogger(__name__) class BaseElasticsearchMappedAPIViewMixin(BaseElasticsearchAPIViewMixin): """ This is a base mixin for all Elasticsearch APIView classes that query Elasticsearch models which are mapped to database model instances. """ # Class Members _db_object = None _filter_by_parent_db_object = True # Instantiation # Static Methods # Class Methods @classmethod def get_db_model_class(cls): """ Get the database model class that this APIView is meant to query against. :return: The database model class that this APIView is meant to query against. """ raise NotImplementedError("Subclasses must implement this!") # Public Methods # Protected Methods def _check_permissions(self): return self._check_db_object_permissions() def _check_db_object_permissions(self): """ Check to see if the requesting user has sufficient permissions to be querying self.db_object. :return: True if the requesting user has sufficient permissions to be querying self.db_object, False otherwise. """ raise NotImplementedError("Subclasses must implement this!") # Private Methods def __get_db_object(self): """ Get the database object that the queried Elasticsearch data should be tied to. :return: The database object that the queried Elasticsearch data should be tied to. """ to_return = get_object_or_404(self.db_model_class, pk=self.kwargs["pk"]) return to_return # Properties @property def db_object(self): """ Get the database object that the queried Elasticsearch data should be tied to. :return: the database object that the queried Elasticsearch data should be tied to. """ if self._db_object is None: self._db_object = self.__get_db_object() return self._db_object @property def db_model_class(self): """ Get the database model class that this APIView is meant to query against. :return: The database model class that this APIView is meant to query against. """ return self.__class__.get_db_model_class() @property def filter_by_parent_db_object(self): """ Get whether or not Elasticsearch results should be filtered upon based on the mapped database object. :return: whether or not Elasticsearch results should be filtered upon based on the mapped database object. """ return self._filter_by_parent_db_object # Representation and Comparison class BaseElasticsearchSingleMappedAPIViewMixin(BaseElasticsearchMappedAPIViewMixin): """ This is a base mixin class for all Web Sight APIView classes that query single instances of Elasticsearch models that are in turn paired with database models. """ # Class Members # Instantiation # Static Methods # Class Methods # Public Methods def _extract_contents_from_response(self, response): if response.results_count > 1: raise TooManyEsResultsError( "Total of %s results retrieved in call to %s." % (response.results_count, self.__class__.__name__) ) elif response.results_count == 0: raise Http404 else: return self._get_object_from_result(response.results[0]) # Protected Methods # Private Methods # Properties # Representation and Comparison class BaseElasticsearchFilterableMappedAPIViewMixin(BaseElasticsearchMappedAPIViewMixin): """ This is a base mixin class for Elasticsearch query classes that enable clients to filter results of the Elasticsearch query. """ # Class Members # Instantiation # Static Methods # Class Methods # Public Methods def get(self, *args, **kwargs): to_return = super(BaseElasticsearchFilterableMappedAPIViewMixin, self).get(*args, **kwargs) to_return.data["filter_fields"] = self.filter_fields return to_return # Protected Methods def _apply_filters_to_query(self, query): query = super(BaseElasticsearchFilterableMappedAPIViewMixin, self)._apply_filters_to_query(query) query = self.__apply_query_string_filters(query) return query # Private Methods def __apply_query_string_filters(self, query): """ Apply filters to the given query based on the contents of the query string in self.request. :param query: The query to add filters to. :return: The query with filters added. """ for filter_key in self.hard_filterable_fields: if filter_key in self.request.query_params: filter_value = self.request.query_params.get(filter_key) query.must_by_term(key=filter_key, value=filter_value, verify_key=True, include=True) elif "-%s" % (filter_key,) in self.request.query_params: filter_value = self.request.query_params.get("-%s" % (filter_key,)) query.must_by_term(key=filter_key, value=filter_value, verify_key=True, include=False) for filter_key in self.soft_filterable_fields: if filter_key in self.request.query_params: filter_value = self.request.query_params.get(filter_key) query.must_by_term(key=filter_key, value=filter_value, verify_key=False, include=True) elif "-%s" % (filter_key,) in self.request.query_params: filter_value = self.request.query_params.get("-%s" % (filter_key,)) query.must_by_term(key=filter_key, value=filter_value, verify_key=False, include=False) if self.has_search_argument: query.set_search_term(term=self.search_argument, field="_all") return query # Properties @property def filter_fields(self): """ Get a list of the fields that the Elasticsearch model referenced by this view can be filtered on. :return: a list of the fields that the Elasticsearch model referenced by this view can be filtered on. """ return self.soft_filterable_fields + self.hard_filterable_fields @property def hard_filterable_fields(self): """ Get a list of strings representing the fields that are explicitly declared on the queried Elasticsearch model that can be filtered against. :return: a list of strings representing the fields that are explicitly declared on the queried Elasticsearch model that can be filtered against. """ return self.queryable_model_fields @property def has_search_argument(self): """ Get whether or not the request has a search argument. :return: whether or not the request has a search argument. """ return settings.SEARCH_PARAM in self.request.query_params @property def search_argument(self): """ Get the search argument from the request query string. :return: the search argument from the request query string. """ if self._search_argument is None: self._search_argument = self.request.query_params.get(settings.SEARCH_PARAM, "") return self._search_argument @property def soft_filterable_fields(self): """ Get a list of strings representing the fields that are not explicitly declared on the queried Elasticsearch model that can be filtered against. :return: A list of strings representing the fields that are not explicitly declared on the queried Elasticsearch model that can be filtered against. """ return [] # Representation and Comparison class BaseElasticsearchAnalyticsAPIViewMixin(BaseElasticsearchFilterableMappedAPIViewMixin): """ This is a base mixin class for all Web Sight APIView classes that query Elasticsearch to retrieve analytical data about models. """ # Class Members # Instantiation # Static Methods # Class Methods # Public Methods # Protected Methods def _apply_aggregates_to_query(self, query): """ Apply the necessary aggregates to the given query and return it. :param query: The query to add aggregates to. :return: The query with the added aggregates. """ raise NotImplementedError("Subclasses must implement this!") def _extract_contents_from_response(self, response): to_return = {} for aggregate_name, aggregate in self.query.aggregates.iteritems(): to_return[aggregate_name] = aggregate.unpack_response(response.aggregations[aggregate_name]) return to_return def _query_elasticsearch(self): es_index = self._get_elasticsearch_index() self._query = self.es_query_class(suppress_source=True) self._query = self._apply_filters_to_query(self._query) self._query = self._apply_aggregates_to_query(self._query) return self._query.search(index=es_index) # Private Methods # Properties # Representation and Comparison class BaseElasticsearchManyMappedAPIViewMixin(BaseElasticsearchFilterableMappedAPIViewMixin): """ This is a base mixin class for all Web Sight APIView classes that query Elasticsearch models that are paired with database models and that return multiple instances of the queried model. """ # Class Members _current_page = None _page_offset = None _sort_argument = None _export_argument = None _exporter_map = None # Instantiation # Static Methods # Class Methods # Public Methods def get(self, *args, **kwargs): """ Handle the HTTP GET request to this APIView. :param args: Positional arguments. :param kwargs: Keyword arguments. :return: A Django rest framework response object. """ if not self.has_export_argument: to_return = super(BaseElasticsearchManyMappedAPIViewMixin, self).get(*args, **kwargs) to_return.data["sortable_fields"] = self.sortable_fields return to_return else: self.check_ws_permissions() query_results = self._query_elasticsearch() return self.exporter_map[self.export_argument].get_django_response_from_elasticsearch_response(query_results) # Protected Methods def _get_elasticsearch_query(self): to_return = super(BaseElasticsearchManyMappedAPIViewMixin, self)._get_elasticsearch_query() if self.has_export_argument: self.__validate_export_value() to_return.offset = 0 to_return.size = config.es_max_query_size else: to_return.offset = self.page_offset to_return.size = self.page_size if self.has_sort_argument: self.__validate_sort_field() to_return.add_sort_field(field_name=self.sort_field, direction=self.sort_direction) return to_return def _extract_contents_from_response(self, response): results = self._extract_objects_from_response(response) to_return = PaginationSerializer( results=results, count=response.results_count, current_page=self.current_page, ) return to_return.to_response_dict() # Private Methods def __get_current_page(self): """ Get an integer representing the current page if a page number was supplied in the request. :return: An integer representing the current page if a page number was supplied in the request. """ page_number = self.request.query_params.get(settings.PAGINATION_PARAM, 1) to_return = int(page_number) if ValidationHelper.is_int(page_number) else 1 return max(to_return, 1) def __validate_export_value(self): """ Ensure that the value in self.export_argument is a valid string to export via. :return: None """ ValidationHelper.validate_in(to_check=self.export_argument, contained_by=self.exporter_map_keys) def __validate_sort_field(self): """ Ensure that the field in self.sort_field is a valid field to be sorted upon. :return: None """ ValidationHelper.validate_in(to_check=self.sort_field, contained_by=self.sortable_fields) # Properties @property def current_page(self): """ Get the current requested page number :return: the current requested page number """ if self._current_page is None: self._current_page = self.__get_current_page() return self._current_page @property def export_argument(self): """ Get the export argument from the request's query string. :return: the export argument from the request's query string. """ if self._export_argument is None: self._export_argument = self.request.query_params.get(settings.EXPORT_PARAM, "") return self._export_argument @property def exporter_map(self): """ Get a dictionary that maps export types to the classes that can handle exporting data to a file of the given type. :return: A dictionary that maps export types to the classes that can handle exporting data to a file of the given type. """ if self._exporter_map is None: self._exporter_map = get_export_type_wrapper_map() return self._exporter_map @property def exporter_map_keys(self): """ Get a list of strings representing the valid export types supported by Web Sight. :return: a list of strings representing the valid export types supported by Web Sight. """ return self.exporter_map.keys() @property def has_export_argument(self): """ Get whether or not the request has an export argument. :return: whether or not the request has an export argument. """ return settings.EXPORT_PARAM in self.request.query_params @property def has_sort_argument(self): """ Get whether or not the request has a sorting argument. :return: whether or not the request has a sorting argument. """ return api_settings.ORDERING_PARAM in self.request.query_params @property def page_offset(self): """ Get the page offset to use when querying Elasticsearch. :return: the page offset to use when querying Elasticsearch. """ if self._page_offset is None: self._page_offset = (self.current_page - 1) * self.page_size return self._page_offset @property def page_size(self): """ Get the page size to use. :return: the page size to use. """ return api_settings.PAGE_SIZE @property def sortable_fields(self): """ Get a list of the fields that this query allows sorting on. :return: a list of the fields that this query allows sorting on. """ return self.queryable_model_fields @property def sort_argument(self): """ Get the sort argument from the request query string. :return: the sort argument from the request query string. """ if self._sort_argument is None: self._sort_argument = self.request.query_params.get(api_settings.ORDERING_PARAM, "") return self._sort_argument @property def sort_direction(self): """ Get a string representing the direction that results should be ordered in. :return: a string representing the direction that results should be ordered in. """ to_return = "desc" if self.sort_argument.startswith("-") else "asc" print("SORT DIRECTION IS %s" % (to_return,)) return to_return @property def sort_field(self): """ Get the field to sort query results on. :return: The field to sort query results on. """ return self.sort_argument[1:] if self.sort_argument.startswith("-") else self.sort_argument # Representation and Comparison class BaseElasticsearchRelatedAPIViewMixin(BaseElasticsearchManyMappedAPIViewMixin): """ This is a base Elasticsearch APIView mixin that allows users to query data based on multidoc queries that span multiple document types. """ # Class Members _filter_by_parent_db_object = False # Instantiation # Static Methods # Class Methods # Public Methods def _apply_filters_to_query(self, query): query = super(BaseElasticsearchRelatedAPIViewMixin, self)._apply_filters_to_query(query) filter_value = self._get_related_filter_value() if filter_value is None: raise ObjectDoesNotExist() query.must_by_term(key=self.related_filter_key, value=filter_value) return query def _get_related_filter_value(self): """ Get the value that the Elasticsearch query should filter on to ensure results are related to the relevant document. :return: The value that the Elasticsearch query should filter on to ensure results are related to the relevant document. If this method returns None, then a 404 will be raised. """ raise NotImplementedError("Subclasses must implement this!") # Protected Methods # Private Methods # Properties @property def queryable_model_fields(self): return self.es_query_class.get_queryable_fields() @property def related_filter_key(self): """ Get the key that the Elasticsearch query should be filtered on to ensure results are related to the relevant document. :return: the key that the Elasticsearch query should be filtered on to ensure results are related to the relevant document. """ raise NotImplementedError("Subclasses must implement this!") # Representation and Comparison class BaseEsMixin(object): """ This is a base class for Elasticsearch mixin classes. """ # Class Members # Instantiation # Static Methods # Class Methods @classmethod def get_es_query_class(cls): """ Get the Elasticsearch query class that this APIView is meant to query. :return: The Elasticsearch query class that this APIView is meant to query. """ raise NotImplementedError("Subclasses must implement this!") # Public Methods # Protected Methods # Private Methods # Properties # Representation and Comparison class BaseRelatedEsMixin(BaseEsMixin): """ This is a base class for Elasticsearch mixin classes that rely on multidoc queries. """ # Class Members # Instantiation # Static Methods # Class Methods @classmethod def get_related_es_query_class(cls): """ Get the Elasticsearch query class that this related Elasticsearch mixin will retrieve data from. :return: The Elasticsearch query class that this related Elasticsearch mixin will retrieve data from. """ raise NotImplementedError("Subclasses must implement this!") # Public Methods # Protected Methods def _apply_related_elasticsearch_query_filters(self, query): """ Apply filters to the given query to restrict results to only those that match the Elasticsearch document that is related to data returned by this APIView. :param query: The query to apply filters to. :return: The query with filters applied. """ query.must_by_term(key=self.mapped_elasticsearch_key, value=self.mapped_elasticsearch_value) return query def _get_related_filter_value(self): """ Get the value that the Elasticsearch query should filter on to ensure results are related to the relevant document. :return: The value that the Elasticsearch query should filter on to ensure results are related to the relevant document. If this method returns None, then a 404 will be raised. """ query = self.related_elasticsearch_query_class() query = self._apply_related_elasticsearch_query_filters(query) query.queryable_model_fields = [self.parent_related_value_key] result = query.search(self._get_elasticsearch_index()) if result.results_count == 0: return None if result.results_count > 0: logger.warning( "Too many results returned in APIView %s (%s returned)." % (self.__class__.__name__, result.results_count) ) return result.results[0]["_source"][self.parent_related_value_key] # Private Methods # Properties @property def parent_related_value_key(self): """ Get a string representing the key contained in the parent Elasticsearch document that the relationship should be based upon. :return: a string representing the key contained in the parent Elasticsearch document that the relationship should be based upon. """ raise NotImplementedError("Subclasses must implement this!") @property def related_filter_key(self): """ Get the key that the Elasticsearch query should be filtered on to ensure results are related to the relevant document. :return: the key that the Elasticsearch query should be filtered on to ensure results are related to the relevant document. """ raise NotImplementedError("Subclasses must implement this!") @property def related_elasticsearch_query_class(self): """ Get the Elasticsearch query class that this related Elasticsearch mixin will retrieve data from. :return: The Elasticsearch query class that this related Elasticsearch mixin will retrieve data from. """ return self.__class__.get_related_es_query_class() # Representation and Comparison class BaseDbMixin(object): """ This is a base class for database mixin classes. """ # Class Members # Instantiation # Static Methods # Class Methods @classmethod def get_db_model_class(cls): """ Get the database model class that this APIView is meant to query against. :return: The database model class that this APIView is meant to query against. """ raise NotImplementedError("Subclasses must implement this!") # Public Methods # Protected Methods def _apply_filters_to_query(self, query): query = super(BaseDbMixin, self)._apply_filters_to_query(query) if self.filter_by_parent_db_object: return self._apply_parent_db_object_filter(query) else: return query def _apply_parent_db_object_filter(self, query): """ Apply a filter to the given Elasticsearch query that restricts results to only those objects that are related to the parent database object. :param query: The query to apply filters to. :return: The query with filters applied. """ query.must_by_term(key=self.mapped_elasticsearch_key, value=self.mapped_elasticsearch_value) return query def _check_db_object_permissions(self): """ Check to see if the requesting user has sufficient permissions to be querying self.db_object. :return: True if the requesting user has sufficient permissions to be querying self.db_object, False otherwise. """ raise NotImplementedError("Subclasses must implement this!") def _get_elasticsearch_index(self): """ Get the Elasticsearch index that the resulting Elasticsearch query should be restricted to. :return: The Elasticsearch index that the resulting Elasticsearch query should be restricted to. """ raise NotImplementedError("Subclasses must implement this!") # Private Methods # Properties @property def mapped_elasticsearch_key(self): """ Get a string representing the key that the Elasticsearch query should be filtered by when filtering upon a parent database object. :return: a string representing the key that the Elasticsearch query should be filtered by when filtering upon a parent database object. """ raise NotImplementedError("Subclasses must implement this!") @property def mapped_elasticsearch_value(self): """ Get a string representing the value that the Elasticsearch query should be filtered upon when filtering upon a parent database object. :return: a string representing the value that the Elasticsearch query should be filtered upon when filtering upon a parent database object. """ return self.db_object.uuid # Representation and Comparison
We'll set up your device with your custom alerts. When it arrives, just plug it in and log on. 24/7 real time tracking. We operate using Live Tracking, not on 5+ minute intervals like many other companies. Tow alerts, crash alerts, overspeed alerts, unplug detection, jamming detection, geofences & more.. Plus, we set up all of your custom alerts for you. You can customise these yourself at any point. 2yr Warranty. No Hidden Fees. Very impressed by the simplicity of the service. Previous devices we've used have been overly complicated & expensive, that was not the case here. Immediately Luca was in contact with us - he set everything up for us and it all worked perfectly. We just plugged the tracker in and done! I've been using the FM3001 to track my personal vehicle for several months now and it's been great. I've never had any issues, it's very accurate and reliable. This was the first car tracker I've purchased so I was a bit nervous, but Luca made everything so simple so there were no worries! In the interest of our customers we wanted to keep our pricing simple, and highly competitive. $18/month or $180/year​​ for unlimited live tracking. No hidden fees. No lock-in contracts. No lock-in contracts. No hidden fee's. Ever. Receive TWO MONTHS FREE Tracking! We get this question a lot, and we want to be 100% transparent with you. Like all tracking devices, our device uses a SIM Card to transfer the GPS info to our platform. The same type of SIM that is found in your mobile phone. Whenever the tracking device is in use, it uses the SIM's data to transfer the GPS info. As such, we need to keep the data topped up for you, which costs us. Furthermore, we provide high-quality, user-friendly tracking apps for the Web, Android, & iPhone. These apps have monthly expenses, which we need to cover. In saying this, we've still managed to price ourselves very competitively compared to many of other companies. As a final note, don't be fooled by companies offering tracking without monthly fee's. They'll require you to purchase your own SIM card and keep the data topped up yourself - which will cost you monthly. Instead, we handle everything for you. All of the alerts below are available on our FM3001 tracking device. These alerts can be configured for each account depending on your needs. You can receive these alerts either as an email, or as a notification on your phone via our free app. Have you ever finished work, come outside and find your car's been towed? Next time, get an instant notification straight to your phone so you can react immediately. As soon as your vehicle has been in a crash you'll receive an instant notification. This alert helped my friend find his wife when she hit a kangaroo with her car. Receive an alert every time your car goes over a certain speed, totally customizable by you. Perfect for monitoring a crazy teenage driver or your employees. Stop thieves and troublesome employees as soon as they tamper with your device. Receive an instant alert when your device has been unplugged or tampered with. In the unlikely event that the GPS signal is jammed, you'll receive an immediate alert. Stay on the ball and keep your vehicle out of the hands of thieves. If you find yourself outside of the cell phone tower range don't worry. The GPS tracker will still log all the location data and transfer it when you're back in range. Every time your vehicle enters/leaves these custom zones you receive an alert. Great for businesses monitoring their delivery trucks or preventing theft. Save on fuel costs by monitoring driver behaviour. You'll receive an alert if your employee (or teenage driver) is accelerating, braking or cornering too hard. Track the amount of fuel your vehicle is using on various trips. Use this data to make educated decisions about your vehicles and save on fuel costs. Our devices plug straight into the OBD-II Port on your vehicle, usually located under the steering wheel. There's no wires and no fuss. Plug it in, log on and your tracking. See where your vehicle is travelling turn-by-turn, live. When you sign up with Car Trackers Club you'll get unlimited live tracking, all inclusive for $18/month. Receive automatic alerts straight to your smartphone via our App or setup automatic summary reports sent directly to your email for review. Our devices are built to industry leading standards and are quality tested in Europe. To give you peace of mind we're offering a 2 year warranty on every device sold. Don't worry about the hassle of purchasing your own SIM Card and Data Plan. Everything, including unlimited tracking, is part of the monthly service fee. We're here to help you get up and tracking with your device no matter what. We understand that you're busy, so we've made everything as simple as possible. Our platform recognises when your vehicle has finished its trip and will automatically save this data. View the history of your trips & all associated data for up to 1 year. Our devices change network depending on which is providing the strongest signal. We support signals in 70+ countries, including Aus, U.S, Canada, and the U.K. Tracking Devices: Why Choose Us? Save Time & Your Temper. Many of our customers have tried the cheap, Chinese tracker approach - they gave up and switched to us instead. Although those devices are cheaper, you'll waste time trying to set it up, become frustrated, attempt to contact their support to no avail, and did I mention... waste time? Save yourself a lot of heart ache with our reliable, quality assured gps tracker. We set everything up to work perfectly out-of-the-box. All you need to do is plug it straight into your car and login to the app, that's seriously it! And if you are having issues or trouble just shoot us an email and we'll help out. We're a small, family-run business; able to provide unparalleled support. Most of our competition are large companies that outsource their support offshore - which negatively affects you. We're the opposite. We thrive on word-of-mouth, and the only way we achieve that is by providing exceptional support to our exceptional users. We understand that not everyone is tech savvy, and most people simply don't have time to learn the ins-and-outs of a new gps tracker, or platform. We created our tracking platform & app hand-in-hand with several of our key users. This gave us true insights into how our customers interact with the software, allowing us to create a simple, intuitive-to-use tracking platform. From individuals to small businesses, we're here to help you get the most out of our tracking devices. The majority of our users sign up for theft protection, but 95% stay because our device has become part of their everyday life. Once you've placed your order I'll get in contact with you to find out which alerts you'll need activated on your account. I'll then configure your gps tracker, setup your account, email you the login details, and ship the tracking device off to you. Once the tracking device arrives simply take it out of the box and plug it straight into the OBD-II port in your car. To find where your OBD port is located type your vehicle's make and model into this tool here. All that's left for you is to log into the app. Now that everything is set up just Log In to the app or web platform using the login details I emailed you. And that's seriously everything. If you ever want to change the configuration of your alerts we can easily do this for you. Every ten minutes a vehicle is stolen in Australia. In the U.S., it's roughly 13 times that rate - to be fair, they have roughly 13 times the population. More than 50% of all vehicles were stolen from a residence. Majority of which occurred between 4pm & midnight over the weekend. What's worse is 35% of all thefts are never recovered. However, 90% of cars with a GPS Tracker installed were recovered within a week. 80% of those were recovered in 24 hours. As you can see, installing a tracking device is one of the simplest and most effective methods of protecting your car from theft. In fact, many insurance companies will give discounts on your vehicle premiums just for having a GPS tracking device installed. That's because they understand that by having tracking devices the likelihood of vehicle recovery in the event of theft is much higher, therefore saving them money. But I thought they were expensive and only for large companies? Tracking devices have advanced a lot in the last few years, and now they're widely available and very affordable. A large portion of our customer base is actually made up of individuals tracking their personal vehicles. Whether that be for theft protection, monitoring their teenage kids, saving on fuel, tracking their vehicles travels, or any number of reasons. So, how do GPS Trackers actually work? Nearly all tracking devices use a SIM Card to transmit their data to their respective online tracking platform. This data is then translated on the platform into an understandable format. In terms of the location, it's shown as a vehicle on Google Maps. In terms of alerts, they're shown as notifications on the Online Platform and App. You can see the time, date and location of the alert. This is why there are always monthly costs associated with tracking devices. You need to pay for the data costs of the SIM, along with signing up for an Online Tracking Platform, unless you resort to using a free one which I do not recommend. For more information on car tracking please visit or resources page or read the review of our most popular tracking device here.
# -*- coding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid import jsonpatch from oslo_db import exception as db_exc import pecan from pecan import rest import six import wsme from wsme.rest import json as wjson from wsme import types as wtypes import wsmeext.pecan as wsme_pecan from solum.api.controllers.camp.v1_1.datamodel import plans as model from solum.api.controllers.camp.v1_1 import uris from solum.api.controllers import common_types from solum.api.handlers.camp import plan_handler as plan_handler from solum.common import exception from solum.common import yamlutils from solum.openstack.common.gettextutils import _ from solum.openstack.common import log as logging LOG = logging.getLogger(__name__) MAL_PATCH_ERR = 'JSON Patch request missing one or more required components' UNSUP_VER_ERR = 'camp_version \'%s\' is not supported by this implementation' def clean_plan(plan_dict): del plan_dict['camp_version'] return plan_dict def fluff_plan(plan_dict, pid): """Fluff the plan with a camp_version and uri.""" plan_dict['camp_version'] = "CAMP 1.1" plan_dict['uri'] = uris.PLAN_URI_STR % (pecan.request.host_url, pid) return plan_dict class JsonPatchProcessingException(exception.SolumException): msg_fmt = _("Error while processing the JSON Patch document: %(reason)s") code = 500 class PlansController(rest.RestController): """CAMP v1.1 plans controller.""" _custom_actions = { 'patch': ['PATCH'] } @exception.wrap_wsme_controller_exception @wsme_pecan.wsexpose(None, wtypes.text, status_code=204) def delete(self, uuid): """Delete this plan.""" handler = (plan_handler. PlanHandler(pecan.request.security_context)) try: handler.delete(uuid) except (db_exc.DBReferenceError, db_exc.DBError): raise exception.PlanStillReferenced(name=uuid) @exception.wrap_wsme_controller_exception @wsme_pecan.wsexpose(model.Plan, wtypes.text) def get_one(self, uuid): """Return the appropriate CAMP-style plan resource.""" handler = (plan_handler. PlanHandler(pecan.request.security_context)) db_obj = handler.get(uuid) plan_dict = fluff_plan(db_obj.refined_content(), db_obj.uuid) return model.Plan(**plan_dict) @exception.wrap_wsme_controller_exception @wsme_pecan.wsexpose(model.Plans) def get(self): puri = uris.PLANS_URI_STR % pecan.request.host_url pdef_uri = uris.DEPLOY_PARAMS_URI % pecan.request.host_url desc = "Solum CAMP API plans collection resource." handler = plan_handler.PlanHandler(pecan.request.security_context) plan_objs = handler.get_all() p_links = [] for m in plan_objs: p_links.append(common_types.Link(href=uris.PLAN_URI_STR % (pecan.request.host_url, m.uuid), target_name=m.name)) # if there aren't any plans, avoid returning a resource with an # empty plan_links array if len(p_links) > 0: res = model.Plans(uri=puri, name='Solum_CAMP_plans', type='plans', description=desc, parameter_definitions_uri=pdef_uri, plan_links=p_links) else: res = model.Plans(uri=puri, name='Solum_CAMP_plans', type='plans', description=desc, parameter_definitions_uri=pdef_uri) return res @exception.wrap_pecan_controller_exception @pecan.expose('json', content_type='application/json-patch+json') def patch(self, uuid): """Patch an existing CAMP-style plan.""" handler = (plan_handler. PlanHandler(pecan.request.security_context)) plan_obj = handler.get(uuid) # TODO([email protected]) check if there are any assemblies that # refer to this plan and raise an PlanStillReferenced exception if # there are. if not pecan.request.body or len(pecan.request.body) < 1: raise exception.BadRequest(reason='empty request body') # check to make sure the request has the right Content-Type if (pecan.request.content_type is None or pecan.request.content_type != 'application/json-patch+json'): raise exception.UnsupportedMediaType( name=pecan.request.content_type, method='PATCH') try: patch = jsonpatch.JsonPatch.from_string(pecan.request.body) patched_obj = patch.apply(plan_obj.refined_content()) db_obj = handler.update(uuid, patched_obj) except KeyError: # a key error indicates one of the patch operations is missing a # component raise exception.BadRequest(reason=MAL_PATCH_ERR) except jsonpatch.JsonPatchConflict: raise exception.Unprocessable except jsonpatch.JsonPatchException as jpe: raise JsonPatchProcessingException(reason=six.text_type(jpe)) return fluff_plan(db_obj.refined_content(), db_obj.uuid) @exception.wrap_pecan_controller_exception @pecan.expose('json', content_type='application/x-yaml') def post(self): """Create a new CAMP-style plan.""" if not pecan.request.body or len(pecan.request.body) < 1: raise exception.BadRequest # check to make sure the request has the right Content-Type if (pecan.request.content_type is None or pecan.request.content_type != 'application/x-yaml'): raise exception.UnsupportedMediaType( name=pecan.request.content_type, method='POST') try: yaml_input_plan = yamlutils.load(pecan.request.body) except ValueError as excp: raise exception.BadRequest(reason='Plan is invalid. ' + six.text_type(excp)) camp_version = yaml_input_plan.get('camp_version') if camp_version is None: raise exception.BadRequest( reason='camp_version attribute is missing from submitted Plan') elif camp_version != 'CAMP 1.1': raise exception.BadRequest(reason=UNSUP_VER_ERR % camp_version) # Use Solum's handler as the point of commonality. We can do this # because Solum stores plans in the DB in their JSON form. handler = (plan_handler. PlanHandler(pecan.request.security_context)) model_plan = model.Plan(**yaml_input_plan) # Move any inline Service Specifications to the "services" section. # This avoids an issue where WSME can't properly handle multi-typed # attributes (e.g. 'fulfillment'). It also smoothes out the primary # difference between CAMP plans and Solum plans, namely that Solum # plans don't have inline Service Specifications. for art in model_plan.artifacts: if art.requirements != wsme.Unset: for req in art.requirements: if (req.fulfillment != wsme.Unset and isinstance(req.fulfillment, model.ServiceSpecification)): s_spec = req.fulfillment # if the inline service spec doesn't have an id # generate one if s_spec.id == wsme.Unset: s_spec.id = str(uuid.uuid4()) # move the inline service spec to the 'services' # section if model_plan.services == wsme.Unset: model_plan.services = [s_spec] else: model_plan.services.append(s_spec) # set the fulfillment to the service spec id req.fulfillment = "id:%s" % s_spec.id db_obj = handler.create(clean_plan(wjson.tojson(model.Plan, model_plan))) plan_dict = fluff_plan(db_obj.refined_content(), db_obj.uuid) pecan.response.status = 201 pecan.response.location = plan_dict['uri'] return plan_dict
Seriously, why do I have a cold? I sound just like one of the cold medicine commercials. Too bad most medicines aren\’t good at clearing all my symptoms. *SNEEZE* I went to Claim Jumpers with *SNEEZE* Karen, Yeh, David, and Michelle. Earlier, Karen had requested *SNEEZE* to go to *SNEEZE* Claim Jumpers before we finished finals. *SNEEZE* The appetizer combo *SNEEZE* was basically a waste, because not only was the presentation bad *SNEEZE* (just a bunch of onion *SNEEZE* rings piled in the middle *SNEEZE* of the plat surrounded by assorted *SNEEZE* appetizers). The three dishes were order were good *SNEEZE* and *SNEEZE* plentiful. No *SNEEZE* dessert though. We also went to *SNEEZE* Safeway. Then hung out *SNEEZE* at *SNEEZE* Karen\’s *SNEEZE* place *SNEEZE*. As you can see, I had *SNEEZE* *SNEEZE* *SNEEZE* *SNEEZE* hor-*SNEEZE*-ble \”allergies\” *SNEEZE* and that *SNEEZE* *SNEEZE* was quite *SNEEZE* bad *SNEEZE* since *SNEEZE* I was dri-*SNEEZE*-vi-*SNEEZE*-ng. I admit though…the YODA scene was one of the best. Tell me again why I was selected as a RCC (residential computing consultant) for the fall? I spent about two hours trying to open my case (and ironically, it opened when I \”accidentally\” dropped the entire computer), and in effect unhooked the power supply. Then I spent 30 minutes trying to put an ethernet card in…unsuccessfully. It just wouldn\’t go all the way in. BAAAH. I want a mini network at home!
#!/usr/bin/env python # -*- coding: utf-8 -*- # # main.py # # Copyright 2014 Yang <yang@Leo-FamilyGuy> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, # MA 02110-1301, USA. # # import d3py import pandas import numpy as np import matplotlib.pyplot as plt import classes as TSs def main(): obj1 = TSs.Bond() obj2 = TSs.Stock() for i in range(10): obj1.data_add(obj1._datasize, i + 1) obj2.data_add(obj2._datasize, i - 10) print (obj1._datadict) print (obj2._datadict) plt.bar(obj1._datadict.values(), obj2._datadict.values(), align='center') plt.xticks(range(len(obj1._datadict)), obj1._datadict.keys()) plt.show() return 0 if __name__ == '__main__': main()
The Red Sox have had a copious amount of injuries this season. Because of all the casualties, many minor league prospects, and some veterans, have been given the chance to show what they can do. Had Jacoby Ellsbury, Mike Cameron, and Jeremy Hermida maintained a relatively healthy season, there is no way that the Red Sox would have seen Darnell McDonald, Daniel Nava, or Ryan Kalish. Sometimes I wonder if these guys–in the back of their minds–hope for injuries so that they can have a shot. To be honest, I never expected Ryan Kalish to be up this year at all. Not because he is a bad athlete or anything, but because how meticulous the Red Sox are when it comes to development. He started the year in Double-AA Portland, and he was performing at a very high level. No doubt that he was going to be moved up to Pawtucket, right? Kalish transitioned seamlessly from Portland to Pawtucket–considered by some to be the toughest jump. I think Kalish was called up because the Red Sox were unsure of what they had in Reddick. Believe me, I think that he is full of potential, he just hasn’t had the at-bats to prove it yet. He has been producing exponentially better since he changed his mechanics after the All-Star Break. The point I’m trying to make is that Ryan Kalish started the season in Double-AA, and now he is in the big leagues. I like to think that I have taken a similar path over the past couple of months. As you know, I worked in both Pawtucket and Portland this past summer. I was afforded unbelievable opportunities that gave me incredible access. I never expected to have that kind of access in the major leagues for a really long time. Those of you who have seen my pictures on Twitter and Facebook may be wondering how I got that kind of access. Basically, Subway is sponsoring this webcast that is going to be an app on Facebook and on youtube called “High School Heroes” (that might just be the working title). I think what they are trying to do is find kids around the country who are just really passionate about something, and they are just really into it. So they wanted to follow me around at a baseball game and kind of see what I normally do. Stalking a stalker, right? Here is the catch, though. Somehow, Subway was able to get me an all-access (minus the clubhouse) media pass for before the game, and even an interview with a player to be named later (my favorite expression…) I was allowed on the field during batting practice. I think the objective was for me to have easier access to the players to ask for pictures and what not. The only thing is that when I get a press pass, I switch into professional mode, but this was kind of difference. This press pass wasn’t to get me the kind of access that I got when I was at Pawtucket/Portland. This press pass to get me the kind of access I had at, say, the minor league complex, but with the major league players. The first thing I did with this access was finally show Dustin Pedroia my Dustin Pedroia salsa. I didn’t have him sign it, though, because I was still kind of figuring out exactly how I was supposed to behave (for lack of a better word) with this pass. It was mainly an opportunity to discuss it with him. I decided to ask Big Papi for a picture. Never hurts to ask, right? There were some fans with pre-game access badges behind home plate, and he was over there as well, so I thought it would be an appropriate time to ask. Then I asked Jacoby Ellsbury for a picture. Obviously, he wasn’t playing in the game, but he was still taking batting practice. He was one of the nicest guys I met that day. It seemed like he cared about who I was, he wasn’t as dismissive as some of the other guys were (understandably so). I also got a picture with Victor Martinez. It was absolutely surreal to be less than a foot away from these guys. I wasn’t separated by a fence, and security could not do anything to me. There were tons of fans around hoping for autographs too. Because I was where the players were, I now know that yes, they can hear you, but they choose to ignore you. It’s understandable because they have a job, it’s just annoying realizing that some of my efforts of the past have been futile. Luckily, if you’re on the field, they don’t ignore you as much. Emperor Felix was also kind enough to pose for a picture on his way back from shagging balls in the outfield. Unfortunately, Michael Bowden was sent down that very same day, which was really frustrating because I had been really looking forward to talking to him. I wanted to tell him that I plan on writing my college essay about my first interview with him. The prompt is to describe a significant experience and its impact on you. I didn’t realize how big of an impact it had had on me until I was writing the essay. As Daniel Nava and Ryan Kalish were jogging in, I asked them for a picture, and they said they would do it after batting practice. Before Nava went to batting practice, though, I was able to tell him how I was at his Double-AA debut. I was even able to show him the notes that I had from the game. We were talking about the first hit he got on that level and he said, “the ball found [him]” which I thought was really cool. The interview with the “player to be named later” was Darnell McDonald. I was so excited to interview him, but at the same time, I was really nervous because I had no time to prepare the questions. I had found out about it about an hour and a half before. Luckily, I had my notebook filled with various questions from my interviews in Portland. I asked him about his favorite major league experience. I assumed it would be either Opening Day with the Cincinnati Reds in 2009, or his debut with the Red Sox, so I listed those two options, but I obviously left it open for something else. He said his favorite moment was at one of the San Francisco games this past summer. In fact, I was at the game. Before the game, a young boy with cancer had given him a blue band, which he was still wearing. In his very first at-bat that day, he hit a home run. I remember being there for that home run, but I never realized it had that much significance to him. That was certainly beyond baseball. I had access to the press box during the game as well, so that was incredible. I had never been in a major league press box, and I didn’t expect to be in one until after college. This was a nice taste. In the press dining area, I had the chance to speak with Amalie Benjamin, a writer for the Boston Globe. She was very genial, and she told me that she went to Northwestern (currently in my top two choices). Although she didn’t go to the Medill School of Journalism, she used all of its resources. I really enjoyed talking to her because I admire her writing, and she is someone that I look up to considering she is a successful female sports journalist. I did not feel all that lost in the press box considering I had been in one a couple of times before. The only thing was that I didn’t have my laptop, but I was fine. I tried to keep track of all of the pitches in my notebook, and I kept score as well. I am definitely getting used to this. There was only one bad part of the night. The fact that Scott Atchinson gave up a walk off home run to Dan Johnson. My father and I had driven four hours to see the Red Sox lose, and then we had to drive all the way back after a pretty devastating loss. It was such a great baseball game to watch, though. A great pitcher’s duel between Garza and Buchholz, and just back and forth baseball that kept me on the edge of my seat (even though I had to maintain some level of objectivity in the press box). I think the pros outweighed the cons in this case. The kinds of opportunities that I have been getting for the past few months have been out of this world. I can’t thank the people of the various media relations departments enough to trust that I will be responsible with this kind of access. I don’t know if it all has set in yet. It’s really hard for me to believe that all this is happening, but I just try to go with the flow. I really think that it’s all a matter of taking every opportunity that you can get.
import tensorflow as tf custom_shuffle_module = tf.load_op_library('src/shuffle_op.so') shuffle = custom_shuffle_module.shuffle # ################### # TENSORBOARD HELPERS # ################### def comprehensive_variable_summaries(var): """ Attach a lot of summaries to a Tensor (for TensorBoard visualization). """ with tf.name_scope('summaries'): mean = tf.reduce_mean(var) tf.summary.scalar('mean', mean) with tf.name_scope('stddev'): stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean))) tf.summary.scalar('stddev', stddev) tf.summary.scalar('max', tf.reduce_max(var)) tf.summary.scalar('min', tf.reduce_min(var)) tf.summary.histogram('histogram', var) def histogram_variable_summaries(var): """ Attach a histogram summary to a Tensor (for TensorBoard visualization). """ with tf.name_scope('summaries'): tf.summary.histogram('histogram', var) # ################### # ################### # ###################### # LAYER HELPER FUNCTIONS # ###################### def subpixel_reshuffle_1D_impl(X, m): """ performs a 1-D subpixel reshuffle of the input 2-D tensor assumes the last dimension of X is the filter dimension ref: https://github.com/Tetrachrome/subpixel """ return tf.transpose(tf.stack([tf.reshape(x, (-1,)) for x in tf.split(X, m, axis=1)])) def subpixel_reshuffle_1D(X, m, name=None): """ maps over the batch dimension """ return tf.map_fn(lambda x: subpixel_reshuffle_1D_impl(x, m), X, name=name) def subpixel_restack_impl(X, n_prime, m_prime, name=None): """ performs a subpixel restacking such that it restacks columns of a 2-D tensor onto the rows """ bsize = tf.shape(X)[0] r_n = n_prime - X.get_shape().as_list()[1] total_new_space = r_n*m_prime to_stack = tf.slice(X, [0, 0, m_prime], [-1, -1, -1]) to_stack = tf.slice(tf.reshape(to_stack, (bsize, -1)), [0, 0], [-1, total_new_space]) to_stack = tf.reshape(to_stack, (bsize, -1, m_prime)) to_stack = tf.slice(to_stack, [0, 0, 0], [-1, r_n, -1]) return tf.concat((tf.slice(X, [0, 0, 0], [-1, -1, m_prime]), to_stack), axis=1, name=name) def subpixel_restack(X, n_prime, m_prime=None, name=None): n = X.get_shape().as_list()[1] m = X.get_shape().as_list()[2] r_n = n_prime - n if m_prime is None: for i in range(1, m): r_m = i m_prime = m - r_m if r_m*n >= m_prime*r_n: break return subpixel_restack_impl(X, n_prime, m_prime, name=name) def batch_norm(T, is_training, scope): # tf.cond takes nullary functions as its first and second arguments return tf.cond(is_training, lambda: tf.contrib.layers.batch_norm(T, decay=0.99, # zero_debias_moving_mean=True, is_training=is_training, center=True, scale=True, updates_collections=None, scope=scope, reuse=False), lambda: tf.contrib.layers.batch_norm(T, decay=0.99, is_training=is_training, center=True, scale=True, updates_collections=None, scope=scope, reuse=True)) def weight_variable(shape, name=None): initial = tf.truncated_normal(shape, mean=0.0, stddev=0.1) return tf.Variable(initial, name=name) def bias_variable(shape, name=None): initial = tf.constant(0.1, shape=shape) return tf.Variable(initial, name=name) def conv1d(x, W, stride=1, padding='SAME', name=None): return tf.nn.conv1d(x, W, stride=stride, padding=padding, name=name) def build_1d_conv_layer(prev_tensor, prev_conv_depth, conv_window, conv_depth, act, layer_number, stride=1, padding='SAME', tensorboard_output=False, name=None): with tf.name_scope('{}_layer_weights'.format(layer_number)): W = weight_variable([conv_window, prev_conv_depth, conv_depth]) if tensorboard_output: histogram_variable_summaries(W) with tf.name_scope('{}_layer_biases'.format(layer_number)): b = bias_variable([conv_depth]) if tensorboard_output: histogram_variable_summaries(b) with tf.name_scope('{}_layer_conv_preactivation'.format(layer_number)): conv = conv1d(prev_tensor, W, stride=stride, padding=padding) + b if tensorboard_output: histogram_variable_summaries(conv) with tf.name_scope('{}_layer_conv_activation'.format(layer_number)): h = act(conv, name=name) if tensorboard_output: histogram_variable_summaries(h) return h def build_1d_conv_layer_with_res(prev_tensor, prev_conv_depth, conv_window, conv_depth, res, act, layer_number, tensorboard_output=False, name=None): with tf.name_scope('{}_layer_weights'.format(layer_number)): W = weight_variable([conv_window, prev_conv_depth, conv_depth]) if tensorboard_output: histogram_variable_summaries(W) with tf.name_scope('{}_layer_biases'.format(layer_number)): b = bias_variable([conv_depth]) if tensorboard_output: histogram_variable_summaries(b) with tf.name_scope('{}_layer_conv_preactivation'.format(layer_number)): conv = conv1d(prev_tensor, W) + b if tensorboard_output: histogram_variable_summaries(conv) with tf.name_scope('{}_layer_conv_activation'.format(layer_number)): h = act(tf.add(conv, res), name=name) if tensorboard_output: histogram_variable_summaries(h) return h def build_downsampling_block(input_tensor, filter_size, stride, layer_number, act=tf.nn.relu, is_training=True, depth=None, padding='VALID', tensorboard_output=False, name=None): # assume this layer is twice the depth of the previous layer if no depth # information is given if depth is None: depth = 2*input_tensor.get_shape().as_list()[-1] with tf.name_scope('{}_layer_weights'.format(layer_number)): W = weight_variable([filter_size, input_tensor.get_shape().as_list()[-1], depth]) if tensorboard_output: histogram_variable_summaries(W) with tf.name_scope('{}_layer_biases'.format(layer_number)): b = bias_variable([depth]) if tensorboard_output: histogram_variable_summaries(b) with tf.name_scope('{}_layer_conv_preactivation'.format(layer_number)): l = tf.nn.conv1d(input_tensor, W, stride=stride, padding=padding, name=name) + b if tensorboard_output: histogram_variable_summaries(l) with tf.name_scope('{}_layer_batch_norm'.format(layer_number)) as scope: # l = tf.nn.dropout(l, keep_prob=0.25) l = batch_norm(l, is_training, scope) with tf.name_scope('{}_layer_conv_activation'.format(layer_number)): l = act(l, name=name) if tensorboard_output: histogram_variable_summaries(l) return l def build_upsampling_block(input_tensor, residual_tensor, filter_size, layer_number, act=tf.nn.relu, is_training=True, depth=None, padding='VALID', tensorboard_output=False, name=None): # assume this layer is half the depth of the previous layer if no depth # information is given if depth is None: depth = int(input_tensor.get_shape().as_list()[-1]/2) with tf.name_scope('{}_layer_weights'.format(layer_number)): W = weight_variable([filter_size, input_tensor.get_shape().as_list()[-1], depth]) if tensorboard_output: histogram_variable_summaries(W) with tf.name_scope('{}_layer_biases'.format(layer_number)): b = bias_variable([depth]) if tensorboard_output: histogram_variable_summaries(b) with tf.name_scope('{}_layer_conv_preactivation'.format(layer_number)): l = tf.nn.conv1d(input_tensor, W, stride=1, padding=padding, name=name) + b if tensorboard_output: histogram_variable_summaries(l) with tf.name_scope('{}_layer_batch_norm'.format(layer_number)) as scope: # l = tf.nn.dropout(l, keep_prob=0.25) l = batch_norm(l, is_training, scope) # l = tf.nn.l2_normalize(l, dim=2) with tf.name_scope('{}_layer_conv_activation'.format(layer_number)): l = act(l, name=name) if tensorboard_output: histogram_variable_summaries(l) with tf.name_scope('{}_layer_subpixel_reshuffle'.format(layer_number)): l = subpixel_reshuffle_1D(l, residual_tensor.get_shape().as_list()[-1], name=name) if tensorboard_output: histogram_variable_summaries(l) with tf.name_scope('{}_layer_stacking'.format(layer_number)): sliced = tf.slice(residual_tensor, begin=[0, 0, 0], size=[-1, l.get_shape().as_list()[1], -1]) l = tf.concat((l, sliced), axis=2, name=name) if tensorboard_output: histogram_variable_summaries(l) return l # ###################### # ###################### # ################# # MODEL DEFINITIONS # ################# def single_fully_connected_model(input_type, input_shape, n_inputs, n_weights, tensorboard_output=True, scope_name='single_fully_connected_layer'): with tf.name_scope(scope_name): # input of the model (examples) s = [None] shape_prod = 1 for i in input_shape: s.append(i) shape_prod *= i x = tf.placeholder(input_type, shape=s) x_ = tf.reshape(x, [-1, shape_prod]) # first conv layer with tf.name_scope('first_layer_weights'): s = [] s.append(shape_prod) s.append(n_weights) W = weight_variable(s) if tensorboard_output: histogram_variable_summaries(W) with tf.name_scope('first_layer_biases'): b = bias_variable([n_weights]) if tensorboard_output: histogram_variable_summaries(b) with tf.name_scope('first_layer_preactivation'): preact = tf.matmul(x_, W) + b if tensorboard_output: histogram_variable_summaries(preact) with tf.name_scope('first_layer_activation'): y = tf.identity(preact, name=scope_name) if tensorboard_output: histogram_variable_summaries(y) return x, y def three_layer_conv_model(input_type, input_shape, first_conv_window=30, first_conv_depth=128, second_conv_window=10, second_conv_depth=64, third_conv_window=15, tensorboard_output=False, scope_name='3-layer_conv'): with tf.name_scope(scope_name): # input of the model (examples) s = [None] for i in input_shape: s.append(i) x = tf.placeholder(input_type, shape=s) # first conv layer h1 = build_1d_conv_layer(x, 1, first_conv_window, first_conv_depth, tf.nn.elu, 1, tensorboard_output) # second conv layer h2 = build_1d_conv_layer(h1, first_conv_depth, second_conv_window, second_conv_depth, tf.nn.elu, 2, tensorboard_output) # third (last) conv layer y = build_1d_conv_layer(h2, second_conv_depth, third_conv_window, 1, tf.identity, 3, tensorboard_output, scope_name) return x, y def five_layer_conv_model(input_type, input_shape, first_conv_window=30, first_conv_depth=256, second_conv_window=20, second_conv_depth=128, third_conv_window=10, third_conv_depth=64, fourth_conv_window=5, fourth_conv_depth=32, fifth_conv_window=5, tensorboard_output=False, scope_name='5-layer_conv'): with tf.name_scope(scope_name): # input of the model (examples) s = [None] for i in input_shape: s.append(i) x = tf.placeholder(input_type, shape=s) # first conv layer h1 = build_1d_conv_layer(x, 1, first_conv_window, first_conv_depth, tf.nn.elu, 1, tensorboard_output) # second conv layer h2 = build_1d_conv_layer(h1, first_conv_depth, second_conv_window, second_conv_depth, tf.nn.elu, 2, tensorboard_output) # third conv layer h3 = build_1d_conv_layer(h2, second_conv_depth, third_conv_window, third_conv_depth, tf.nn.elu, 3, tensorboard_output) # fourth conv layer h4 = build_1d_conv_layer(h3, third_conv_depth, fourth_conv_window, fourth_conv_depth, tf.nn.elu, 4, tensorboard_output) # fifth (last) conv layer y = build_1d_conv_layer(h4, fourth_conv_depth, fifth_conv_window, 1, tf.identity, 5, tensorboard_output, scope_name) return x, y def deep_residual_network(input_type, input_shape, number_of_downsample_layers=8, channel_multiple=8, initial_filter_window=5, initial_stride=2, downsample_filter_window=3, downsample_stride=2, bottleneck_filter_window=4, bottleneck_stride=2, upsample_filter_window=3, tensorboard_output=False, scope_name='deep_residual'): print('layer summary for {} network'.format(scope_name)) downsample_layers = [] upsample_layers = [] with tf.name_scope(scope_name): # training flag train_flag = tf.placeholder(tf.bool) # input of the model (examples) s = [None] for i in input_shape: s.append(i) x = tf.placeholder(input_type, shape=s) input_size = s[-2] num_of_channels = s[-1] print('input: {}'.format(x.get_shape().as_list()[1:])) d1 = build_downsampling_block(x, filter_size=initial_filter_window, stride=initial_stride, tensorboard_output=tensorboard_output, depth=channel_multiple*num_of_channels, is_training=train_flag, layer_number=1) print('downsample layer: {}'.format(d1.get_shape().as_list()[1:])) downsample_layers.append(d1) layer_count = 2 for i in range(number_of_downsample_layers - 1): d = build_downsampling_block( downsample_layers[-1], filter_size=downsample_filter_window, stride=downsample_stride, tensorboard_output=tensorboard_output, is_training=train_flag, layer_number=layer_count) print('downsample layer: {}'.format(d.get_shape().as_list()[1:])) downsample_layers.append(d) layer_count += 1 bn = build_downsampling_block(downsample_layers[-1], filter_size=bottleneck_filter_window, stride=bottleneck_stride, tensorboard_output=tensorboard_output, is_training=train_flag, layer_number=layer_count) print('bottleneck layer: {}'.format(bn.get_shape().as_list()[1:])) layer_count += 1 u1 = build_upsampling_block(bn, downsample_layers[-1], depth=bn.get_shape().as_list()[-1], filter_size=upsample_filter_window, tensorboard_output=tensorboard_output, is_training=train_flag, layer_number=layer_count) print('upsample layer: {}'.format(u1.get_shape().as_list()[1:])) upsample_layers.append(u1) layer_count += 1 for i in range(number_of_downsample_layers - 2, -1, -1): u = build_upsampling_block(upsample_layers[-1], downsample_layers[i], filter_size=upsample_filter_window, tensorboard_output=tensorboard_output, is_training=train_flag, layer_number=layer_count) print('upsample layer: {}'.format(u.get_shape().as_list()[1:])) upsample_layers.append(u) layer_count += 1 target_size = int(input_size/initial_stride) restack = subpixel_restack(upsample_layers[-1], target_size + (upsample_filter_window - 1)) print('restack layer: {}'.format(restack.get_shape().as_list()[1:])) conv = build_1d_conv_layer(restack, restack.get_shape().as_list()[-1], upsample_filter_window, initial_stride, tf.nn.elu, layer_count, padding='VALID', tensorboard_output=tensorboard_output) print('final conv layer: {}'.format(conv.get_shape().as_list()[1:])) # NOTE this effectively is a linear activation on the last conv layer y = subpixel_reshuffle_1D(conv, num_of_channels) y = tf.add(y, x, name=scope_name) print('output: {}'.format(y.get_shape().as_list()[1:])) return train_flag, x, y # ################# # #################
Prize awarded during this year's PANE Annual Meeting at the Museum of Science. Boston Mayor Thomas Menino presented the awards. The Princeton Prize in Race Relations was founded in 2003 by Henry Von Kohorn ’66. Boston was one of first cities to pilot the Prize in 2003. Eight years later, the Princeton Prize has expanded to 23 regions across the United States. Project entries are judged by the Princeton Prize Committee, which consists of alumni, administrators, and former students winners. Each year, Boston’s Princeton Prize winner is awarded a cash grant of $1,000 and a trip to Princeton University for the Symposium on Race held each May. The Symposium on Race is sponsored by the Class of 1966 and co-sponsored by the Alumni Association and the Carl A. Fields Center. to plan a celebration gathering for the winners, their families, and the local Princeton alumni community.
### Copyright (C) 2002-2006 Stephen Kennedy <[email protected]> ### Copyright (C) 2009-2010 Kai Willadsen <[email protected]> ### This program is free software; you can redistribute it and/or modify ### it under the terms of the GNU General Public License as published by ### the Free Software Foundation; either version 2 of the License, or ### (at your option) any later version. ### This program is distributed in the hope that it will be useful, ### but WITHOUT ANY WARRANTY; without even the implied warranty of ### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ### GNU General Public License for more details. ### You should have received a copy of the GNU General Public License ### along with this program; if not, write to the Free Software ### Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA import codecs import copy import os from gettext import gettext as _ import sys import time import pango import glib import gobject import gtk import gtk.keysyms import diffutil from ui import findbar from ui import gnomeglade import matchers import misc import melddoc import patchdialog import paths import merge from meldapp import app from util.sourceviewer import srcviewer class CachedSequenceMatcher(object): """Simple class for caching diff results, with LRU-based eviction Results from the SequenceMatcher are cached and timestamped, and subsequently evicted based on least-recent generation/usage. The LRU-based eviction is overly simplistic, but is okay for our usage pattern. """ def __init__(self): self.cache = {} def __call__(self, text1, textn): try: self.cache[(text1, textn)][1] = time.time() return self.cache[(text1, textn)][0] except KeyError: matcher = matchers.MyersSequenceMatcher(None, text1, textn) opcodes = matcher.get_opcodes() self.cache[(text1, textn)] = [opcodes, time.time()] return opcodes def clean(self, size_hint): """Clean the cache if necessary @param size_hint: the recommended minimum number of cache entries """ if len(self.cache) < size_hint * 3: return items = self.cache.items() items.sort(key=lambda it: it[1][1]) for item in items[:-size_hint * 2]: del self.cache[item[0]] class BufferLines(object): """gtk.TextBuffer shim with line-based access and optional filtering This class allows a gtk.TextBuffer to be treated as a list of lines of possibly-filtered text. If no filter is given, the raw output from the gtk.TextBuffer is used. The logic here (and in places in FileDiff) requires that Python's unicode splitlines() implementation and gtk.TextBuffer agree on where linebreaks occur. Happily, this is usually the case. """ def __init__(self, buf, textfilter=None): self.buf = buf if textfilter is not None: self.textfilter = textfilter else: self.textfilter = lambda x: x def __getslice__(self, lo, hi): # FIXME: If we ask for arbitrary slices past the end of the buffer, # this will return the last line. start = get_iter_at_line_or_eof(self.buf, lo) end = get_iter_at_line_or_eof(self.buf, hi) txt = unicode(self.buf.get_text(start, end, False), 'utf8') filter_txt = self.textfilter(txt) lines = filter_txt.splitlines() ends = filter_txt.splitlines(True) # The last line in a gtk.TextBuffer is guaranteed never to end in a # newline. As splitlines() discards an empty line at the end, we need # to artificially add a line if the requested slice is past the end of # the buffer, and the last line in the slice ended in a newline. if hi >= self.buf.get_line_count() and \ (len(lines) == 0 or len(lines[-1]) != len(ends[-1])): lines.append(u"") ends.append(u"") hi = self.buf.get_line_count() if hi == sys.maxint else hi if hi - lo != len(lines): # These codepoints are considered line breaks by Python, but not # by GtkTextStore. additional_breaks = set((u'\x0c', u'\x85')) i = 0 while i < len(ends): line, end = lines[i], ends[i] # It's possible that the last line in a file would end in a # line break character, which requires no joining. if end and end[-1] in additional_breaks and \ (not line or line[-1] not in additional_breaks): assert len(ends) >= i + 1 lines[i:i + 2] = [line + end[-1] + lines[i + 1]] ends[i:i + 2] = [end + ends[i + 1]] i += 1 return lines def __getitem__(self, i): if i > len(self): raise IndexError line_start = get_iter_at_line_or_eof(self.buf, i) line_end = line_start.copy() if not line_end.ends_line(): line_end.forward_to_line_end() txt = self.buf.get_text(line_start, line_end, False) return unicode(self.textfilter(txt), 'utf8') def __len__(self): return self.buf.get_line_count() ################################################################################ # # FileDiff # ################################################################################ MASK_SHIFT, MASK_CTRL = 1, 2 MODE_REPLACE, MODE_DELETE, MODE_INSERT = 0, 1, 2 def get_iter_at_line_or_eof(buf, line): if line >= buf.get_line_count(): return buf.get_end_iter() return buf.get_iter_at_line(line) def buffer_insert(buf, line, text): if line >= buf.get_line_count(): # TODO: We need to insert a linebreak here, but there is no # way to be certain what kind of linebreak to use. text = "\n" + text it = get_iter_at_line_or_eof(buf, line) buf.insert(it, text) return it class CursorDetails(object): __slots__ = ("pane", "pos", "line", "offset", "chunk", "prev", "next", "prev_conflict", "next_conflict") def __init__(self): for var in self.__slots__: setattr(self, var, None) class TaskEntry(object): __slots__ = ("filename", "file", "buf", "codec", "pane", "was_cr") def __init__(self, *args): for var, val in zip(self.__slots__, args): setattr(self, var, val) class TextviewLineAnimation(object): __slots__ = ("start_mark", "end_mark", "start_rgba", "end_rgba", "start_time", "duration") def __init__(self, mark0, mark1, rgba0, rgba1, duration): self.start_mark = mark0 self.end_mark = mark1 self.start_rgba = rgba0 self.end_rgba = rgba1 self.start_time = glib.get_current_time() self.duration = duration class FileDiff(melddoc.MeldDoc, gnomeglade.Component): """Two or three way diff of text files. """ differ = diffutil.Differ keylookup = {gtk.keysyms.Shift_L : MASK_SHIFT, gtk.keysyms.Control_L : MASK_CTRL, gtk.keysyms.Shift_R : MASK_SHIFT, gtk.keysyms.Control_R : MASK_CTRL} # Identifiers for MsgArea messages (MSG_SAME,) = range(1) __gsignals__ = { 'next-conflict-changed': (gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE, (bool, bool)), 'action-mode-changed': (gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE, (int,)), } def __init__(self, prefs, num_panes): """Start up an filediff with num_panes empty contents. """ melddoc.MeldDoc.__init__(self, prefs) gnomeglade.Component.__init__(self, paths.ui_dir("filediff.ui"), "filediff") self.map_widgets_into_lists(["textview", "fileentry", "diffmap", "scrolledwindow", "linkmap", "statusimage", "msgarea_mgr", "vbox"]) self.warned_bad_comparison = False # Some sourceviews bind their own undo mechanism, which we replace gtk.binding_entry_remove(srcviewer.GtkTextView, gtk.keysyms.z, gtk.gdk.CONTROL_MASK) gtk.binding_entry_remove(srcviewer.GtkTextView, gtk.keysyms.z, gtk.gdk.CONTROL_MASK | gtk.gdk.SHIFT_MASK) for v in self.textview: v.set_buffer(srcviewer.GtkTextBuffer()) v.set_show_line_numbers(self.prefs.show_line_numbers) v.set_insert_spaces_instead_of_tabs(self.prefs.spaces_instead_of_tabs) v.set_wrap_mode(self.prefs.edit_wrap_lines) if self.prefs.show_whitespace: v.set_draw_spaces(srcviewer.spaces_flag) srcviewer.set_tab_width(v, self.prefs.tab_size) self._keymask = 0 self.load_font() self.deleted_lines_pending = -1 self.textview_overwrite = 0 self.textview_focussed = None self.textview_overwrite_handlers = [ t.connect("toggle-overwrite", self.on_textview_toggle_overwrite) for t in self.textview ] self.textbuffer = [v.get_buffer() for v in self.textview] self.bufferdata = [MeldBufferData() for b in self.textbuffer] self.buffer_texts = [BufferLines(b) for b in self.textbuffer] self.text_filters = [] self.create_text_filters() app.connect("text-filters-changed", self.on_text_filters_changed) self.buffer_filtered = [BufferLines(b, self._filter_text) for b in self.textbuffer] for (i, w) in enumerate(self.scrolledwindow): w.get_vadjustment().connect("value-changed", self._sync_vscroll, i) w.get_hadjustment().connect("value-changed", self._sync_hscroll) self._connect_buffer_handlers() self._sync_vscroll_lock = False self._sync_hscroll_lock = False self._scroll_lock = False self.linediffer = self.differ() self.linediffer.ignore_blanks = self.prefs.ignore_blank_lines self.in_nested_textview_gutter_expose = False self._inline_cache = set() self._cached_match = CachedSequenceMatcher() self.anim_source_id = [] self.animating_chunks = [] for buf in self.textbuffer: buf.create_tag("inline", background=self.prefs.color_inline_bg, foreground=self.prefs.color_inline_fg) self.anim_source_id.append(None) self.animating_chunks.append([]) def parse_to_cairo(color_spec): c = gtk.gdk.color_parse(color_spec) return tuple([x / 65535. for x in (c.red, c.green, c.blue)]) self.fill_colors = {"insert" : parse_to_cairo(self.prefs.color_delete_bg), "delete" : parse_to_cairo(self.prefs.color_delete_bg), "conflict" : parse_to_cairo(self.prefs.color_conflict_bg), "replace" : parse_to_cairo(self.prefs.color_replace_bg)} darken = lambda color: tuple([x * 0.8 for x in color]) self.line_colors = {"insert" : darken(self.fill_colors["insert"]), "delete" : darken(self.fill_colors["delete"]), "conflict" : darken(self.fill_colors["conflict"]), "replace" : darken(self.fill_colors["replace"])} actions = ( ("MakePatch", None, _("Format as patch..."), None, _("Create a patch using differences between files"), self.make_patch), ("PrevConflict", None, _("Previous conflict"), "<Ctrl>I", _("Go to the previous conflict"), lambda x: self.on_next_conflict(gtk.gdk.SCROLL_UP)), ("NextConflict", None, _("Next conflict"), "<Ctrl>K", _("Go to the next conflict"), lambda x: self.on_next_conflict(gtk.gdk.SCROLL_DOWN)), ("PushLeft", gtk.STOCK_GO_BACK, _("Push to left"), "<Alt>Left", _("Push current change to the left"), lambda x: self.push_change(-1)), ("PushRight", gtk.STOCK_GO_FORWARD, _("Push to right"), "<Alt>Right", _("Push current change to the right"), lambda x: self.push_change(1)), # FIXME: using LAST and FIRST is terrible and unreliable icon abuse ("PullLeft", gtk.STOCK_GOTO_LAST, _("Pull from left"), "<Alt><Shift>Right", _("Pull change from the left"), lambda x: self.pull_change(-1)), ("PullRight", gtk.STOCK_GOTO_FIRST, _("Pull from right"), "<Alt><Shift>Left", _("Pull change from the right"), lambda x: self.pull_change(1)), ("CopyLeftUp", None, _("Copy above left"), "<Alt>bracketleft", _("Copy change above the left chunk"), lambda x: self.copy_change(-1, -1)), ("CopyLeftDown", None, _("Copy below left"), "<Alt>semicolon", _("Copy change below the left chunk"), lambda x: self.copy_change(-1, 1)), ("CopyRightUp", None, _("Copy above right"), "<Alt>bracketright", _("Copy change above the right chunk"), lambda x: self.copy_change(1, -1)), ("CopyRightDown", None, _("Copy below right"), "<Alt>quoteright", _("Copy change below the right chunk"), lambda x: self.copy_change(1, 1)), ("Delete", gtk.STOCK_DELETE, _("Delete"), "<Alt>Delete", _("Delete change"), self.delete_change), ("MergeFromLeft", None, _("Merge all changes from left"), None, _("Merge all non-conflicting changes from the left"), lambda x: self.pull_all_non_conflicting_changes(-1)), ("MergeFromRight", None, _("Merge all changes from right"), None, _("Merge all non-conflicting changes from the right"), lambda x: self.pull_all_non_conflicting_changes(1)), ("MergeAll", None, _("Merge all non-conflicting"), None, _("Merge all non-conflicting changes from left and right panes"), lambda x: self.merge_all_non_conflicting_changes()), ("CycleDocuments", None, _("Cycle through documents"), "<control>Escape", _("Move keyboard focus to the next document in this comparison"), self.action_cycle_documents), ) toggle_actions = ( ("LockScrolling", None, _("Lock scrolling"), None, _("Lock scrolling of all panes"), self.on_action_lock_scrolling_toggled, True), ) self.ui_file = paths.ui_dir("filediff-ui.xml") self.actiongroup = gtk.ActionGroup('FilediffPopupActions') self.actiongroup.set_translation_domain("meld") self.actiongroup.add_actions(actions) self.actiongroup.add_toggle_actions(toggle_actions) self.set_num_panes(num_panes) gobject.idle_add( lambda *args: self.load_font()) # hack around Bug 316730 gnomeglade.connect_signal_handlers(self) self.findbar = findbar.FindBar() self.filediff.pack_end(self.findbar.widget, False) self.cursor = CursorDetails() self.connect("current-diff-changed", self.on_current_diff_changed) for t in self.textview: t.connect("focus-in-event", self.on_current_diff_changed) t.connect("focus-out-event", self.on_current_diff_changed) self.linediffer.connect("diffs-changed", self.on_diffs_changed) self.undosequence.connect("checkpointed", self.on_undo_checkpointed) self.connect("next-conflict-changed", self.on_next_conflict_changed) def get_keymask(self): return self._keymask def set_keymask(self, value): if value & MASK_SHIFT: mode = MODE_DELETE elif value & MASK_CTRL: mode = MODE_INSERT else: mode = MODE_REPLACE self._keymask = value self.emit("action-mode-changed", mode) keymask = property(get_keymask, set_keymask) def on_focus_change(self): self.keymask = 0 def on_container_switch_in_event(self, ui): melddoc.MeldDoc.on_container_switch_in_event(self, ui) # FIXME: If no focussed textview, action sensitivity will be unset if self.textview_focussed: self.scheduler.add_task(self.textview_focussed.grab_focus) def on_text_filters_changed(self, app): relevant_change = self.create_text_filters() if relevant_change: self.refresh_comparison() def create_text_filters(self): # In contrast to file filters, ordering of text filters can matter old_active = [f.filter_string for f in self.text_filters if f.active] new_active = [f.filter_string for f in app.text_filters if f.active] active_filters_changed = old_active != new_active self.text_filters = [copy.copy(f) for f in app.text_filters] return active_filters_changed def _disconnect_buffer_handlers(self): for textview in self.textview: textview.set_editable(0) for buf in self.textbuffer: assert hasattr(buf,"handlers") for h in buf.handlers: buf.disconnect(h) def _connect_buffer_handlers(self): for textview in self.textview: textview.set_editable(1) for buf in self.textbuffer: id0 = buf.connect("insert-text", self.on_text_insert_text) id1 = buf.connect("delete-range", self.on_text_delete_range) id2 = buf.connect_after("insert-text", self.after_text_insert_text) id3 = buf.connect_after("delete-range", self.after_text_delete_range) id4 = buf.connect("notify::cursor-position", self.on_cursor_position_changed) buf.handlers = id0, id1, id2, id3, id4 # Abbreviations for insert and overwrite that fit in the status bar _insert_overwrite_text = (_("INS"), _("OVR")) # Abbreviation for line, column so that it will fit in the status bar _line_column_text = _("Ln %i, Col %i") def on_cursor_position_changed(self, buf, pspec, force=False): pane = self.textbuffer.index(buf) pos = buf.props.cursor_position if pane == self.cursor.pane and pos == self.cursor.pos and not force: return self.cursor.pane, self.cursor.pos = pane, pos cursor_it = buf.get_iter_at_offset(pos) offset = cursor_it.get_line_offset() line = cursor_it.get_line() insert_overwrite = self._insert_overwrite_text[self.textview_overwrite] line_column = self._line_column_text % (line + 1, offset + 1) status = "%s : %s" % (insert_overwrite, line_column) self.emit("status-changed", status) if line != self.cursor.line or force: chunk, prev, next = self.linediffer.locate_chunk(pane, line) if chunk != self.cursor.chunk or force: self.cursor.chunk = chunk self.emit("current-diff-changed") if prev != self.cursor.prev or next != self.cursor.next or force: self.emit("next-diff-changed", prev is not None, next is not None) prev_conflict, next_conflict = None, None for conflict in self.linediffer.conflicts: if prev is not None and conflict <= prev: prev_conflict = conflict if next is not None and conflict >= next: next_conflict = conflict break if prev_conflict != self.cursor.prev_conflict or \ next_conflict != self.cursor.next_conflict or force: self.emit("next-conflict-changed", prev_conflict is not None, next_conflict is not None) self.cursor.prev, self.cursor.next = prev, next self.cursor.prev_conflict = prev_conflict self.cursor.next_conflict = next_conflict self.cursor.line, self.cursor.offset = line, offset def on_current_diff_changed(self, widget, *args): pane = self.cursor.pane chunk_id = self.cursor.chunk push_left, push_right, pull_left, pull_right, delete, \ copy_left, copy_right = (True,) * 7 if pane == -1 or chunk_id is None: push_left, push_right, pull_left, pull_right, delete, \ copy_left, copy_right = (False,) * 7 else: # Push and Delete are active if the current pane has something to # act on, and the target pane exists and is editable. Pull is # sensitive if the source pane has something to get, and the # current pane is editable. Copy actions are sensitive if the # conditions for push are met, *and* there is some content in the # target pane. editable = self.textview[pane].get_editable() editable_left = pane > 0 and self.textview[pane - 1].get_editable() editable_right = pane < self.num_panes - 1 and \ self.textview[pane + 1].get_editable() if pane == 0 or pane == 2: chunk = self.linediffer.get_chunk(chunk_id, pane) insert_chunk = chunk[1] == chunk[2] delete_chunk = chunk[3] == chunk[4] push_left = editable_left and not insert_chunk push_right = editable_right and not insert_chunk pull_left = pane == 2 and editable and not delete_chunk pull_right = pane == 0 and editable and not delete_chunk delete = editable and not insert_chunk copy_left = push_left and not delete_chunk copy_right = push_right and not delete_chunk elif pane == 1: chunk0 = self.linediffer.get_chunk(chunk_id, 1, 0) chunk2 = None if self.num_panes == 3: chunk2 = self.linediffer.get_chunk(chunk_id, 1, 2) left_mid_exists = chunk0 is not None and chunk0[1] != chunk0[2] left_exists = chunk0 is not None and chunk0[3] != chunk0[4] right_mid_exists = chunk2 is not None and chunk2[1] != chunk2[2] right_exists = chunk2 is not None and chunk2[3] != chunk2[4] push_left = editable_left and left_mid_exists push_right = editable_right and right_mid_exists pull_left = editable and left_exists pull_right = editable and right_exists delete = editable and (left_mid_exists or right_mid_exists) copy_left = push_left and left_exists copy_right = push_right and right_exists self.actiongroup.get_action("PushLeft").set_sensitive(push_left) self.actiongroup.get_action("PushRight").set_sensitive(push_right) self.actiongroup.get_action("PullLeft").set_sensitive(pull_left) self.actiongroup.get_action("PullRight").set_sensitive(pull_right) self.actiongroup.get_action("Delete").set_sensitive(delete) self.actiongroup.get_action("CopyLeftUp").set_sensitive(copy_left) self.actiongroup.get_action("CopyLeftDown").set_sensitive(copy_left) self.actiongroup.get_action("CopyRightUp").set_sensitive(copy_right) self.actiongroup.get_action("CopyRightDown").set_sensitive(copy_right) # FIXME: don't queue_draw() on everything... just on what changed self.queue_draw() def on_next_conflict_changed(self, doc, have_prev, have_next): self.actiongroup.get_action("PrevConflict").set_sensitive(have_prev) self.actiongroup.get_action("NextConflict").set_sensitive(have_next) def on_next_conflict(self, direction): if direction == gtk.gdk.SCROLL_DOWN: target = self.cursor.next_conflict else: # direction == gtk.gdk.SCROLL_UP target = self.cursor.prev_conflict if target is None: return buf = self.textbuffer[self.cursor.pane] chunk = self.linediffer.get_chunk(target, self.cursor.pane) buf.place_cursor(buf.get_iter_at_line(chunk[1])) self.textview[self.cursor.pane].scroll_to_mark(buf.get_insert(), 0.1) def push_change(self, direction): src = self._get_focused_pane() dst = src + direction chunk = self.linediffer.get_chunk(self.cursor.chunk, src, dst) assert(src != -1 and self.cursor.chunk is not None) assert(dst in (0, 1, 2)) assert(chunk is not None) self.replace_chunk(src, dst, chunk) def pull_change(self, direction): dst = self._get_focused_pane() src = dst + direction chunk = self.linediffer.get_chunk(self.cursor.chunk, src, dst) assert(dst != -1 and self.cursor.chunk is not None) assert(src in (0, 1, 2)) assert(chunk is not None) self.replace_chunk(src, dst, chunk) def copy_change(self, direction, copy_direction): src = self._get_focused_pane() dst = src + direction chunk = self.linediffer.get_chunk(self.cursor.chunk, src, dst) assert(src != -1 and self.cursor.chunk is not None) assert(dst in (0, 1, 2)) assert(chunk is not None) copy_up = True if copy_direction < 0 else False self.copy_chunk(src, dst, chunk, copy_up) def pull_all_non_conflicting_changes(self, direction): assert direction in (-1, 1) dst = self._get_focused_pane() src = dst + direction assert src in range(self.num_panes) merger = merge.Merger() merger.differ = self.linediffer merger.texts = self.buffer_texts for mergedfile in merger.merge_2_files(src, dst): pass self._sync_vscroll_lock = True self.on_textbuffer__begin_user_action() self.textbuffer[dst].set_text(mergedfile) self.on_textbuffer__end_user_action() def resync(): self._sync_vscroll_lock = False self._sync_vscroll(self.scrolledwindow[src].get_vadjustment(), src) self.scheduler.add_task(resync) def merge_all_non_conflicting_changes(self): dst = 1 merger = merge.Merger() merger.differ = self.linediffer merger.texts = self.buffer_texts for mergedfile in merger.merge_3_files(False): pass self._sync_vscroll_lock = True self.on_textbuffer__begin_user_action() self.textbuffer[dst].set_text(mergedfile) self.on_textbuffer__end_user_action() def resync(): self._sync_vscroll_lock = False self._sync_vscroll(self.scrolledwindow[0].get_vadjustment(), 0) self.scheduler.add_task(resync) def delete_change(self, widget): pane = self._get_focused_pane() chunk = self.linediffer.get_chunk(self.cursor.chunk, pane) assert(pane != -1 and self.cursor.chunk is not None) assert(chunk is not None) self.delete_chunk(pane, chunk) def _synth_chunk(self, pane0, pane1, line): """Returns the Same chunk that would exist at the given location if we didn't remove Same chunks""" # This method is a hack around our existing diffutil data structures; # getting rid of the Same chunk removal is difficult, as several places # have baked in the assumption of only being given changed blocks. buf0, buf1 = self.textbuffer[pane0], self.textbuffer[pane1] start0, end0 = 0, buf0.get_line_count() - 1 start1, end1 = 0, buf1.get_line_count() - 1 # This hack is required when pane0's prev/next chunk doesn't exist # (i.e., is Same) between pane0 and pane1. prev_chunk0, prev_chunk1, next_chunk0, next_chunk1 = (None,) * 4 _, prev, next = self.linediffer.locate_chunk(pane0, line) if prev is not None: while prev >= 0: prev_chunk0 = self.linediffer.get_chunk(prev, pane0, pane1) prev_chunk1 = self.linediffer.get_chunk(prev, pane1, pane0) if None not in (prev_chunk0, prev_chunk1): start0 = prev_chunk0[2] start1 = prev_chunk1[2] break prev -= 1 if next is not None: while next < self.linediffer.diff_count(): next_chunk0 = self.linediffer.get_chunk(next, pane0, pane1) next_chunk1 = self.linediffer.get_chunk(next, pane1, pane0) if None not in (next_chunk0, next_chunk1): end0 = next_chunk0[1] end1 = next_chunk1[1] break next += 1 return "Same", start0, end0, start1, end1 def _corresponding_chunk_line(self, chunk, line, pane, new_pane): """Approximates the corresponding line between panes""" old_buf, new_buf = self.textbuffer[pane], self.textbuffer[new_pane] # Special-case cross-pane jumps if (pane == 0 and new_pane == 2) or (pane == 2 and new_pane == 0): proxy = self._corresponding_chunk_line(chunk, line, pane, 1) return self._corresponding_chunk_line(chunk, proxy, 1, new_pane) # Either we are currently in a identifiable chunk, or we are in a Same # chunk; if we establish the start/end of that chunk in both panes, we # can figure out what our new offset should be. cur_chunk = None if chunk is not None: cur_chunk = self.linediffer.get_chunk(chunk, pane, new_pane) if cur_chunk is None: cur_chunk = self._synth_chunk(pane, new_pane, line) cur_start, cur_end, new_start, new_end = cur_chunk[1:5] # If the new buffer's current cursor is already in the correct chunk, # assume that we have in-progress editing, and don't move it. cursor_it = new_buf.get_iter_at_mark(new_buf.get_insert()) cursor_line = cursor_it.get_line() cursor_chunk, _, _ = self.linediffer.locate_chunk(new_pane, cursor_line) if cursor_chunk is not None: already_in_chunk = cursor_chunk == chunk else: cursor_chunk = self._synth_chunk(pane, new_pane, cursor_line) already_in_chunk = cursor_chunk[3] == new_start and \ cursor_chunk[4] == new_end if already_in_chunk: new_line = cursor_line else: # Guess where to put the cursor: in the same chunk, at about the # same place within the chunk, calculated proportionally by line. # Insert chunks and one-line chunks are placed at the top. if cur_end == cur_start: chunk_offset = 0.0 else: chunk_offset = (line - cur_start) / float(cur_end - cur_start) new_line = new_start + int(chunk_offset * (new_end - new_start)) return new_line def action_cycle_documents(self, widget): pane = self._get_focused_pane() new_pane = (pane + 1) % self.num_panes chunk, line = self.cursor.chunk, self.cursor.line new_line = self._corresponding_chunk_line(chunk, line, pane, new_pane) new_buf = self.textbuffer[new_pane] self.textview[new_pane].grab_focus() new_buf.place_cursor(new_buf.get_iter_at_line(new_line)) self.textview[new_pane].scroll_to_mark(new_buf.get_insert(), 0.1) def on_textview_focus_in_event(self, view, event): self.textview_focussed = view self.findbar.textview = view self.on_cursor_position_changed(view.get_buffer(), None, True) self._set_merge_action_sensitivity() def _after_text_modified(self, buffer, startline, sizechange): if self.num_panes > 1: pane = self.textbuffer.index(buffer) self.linediffer.change_sequence(pane, startline, sizechange, self.buffer_filtered) # FIXME: diff-changed signal for the current buffer would be cleaner focused_pane = self._get_focused_pane() if focused_pane != -1: self.on_cursor_position_changed(self.textbuffer[focused_pane], None, True) self.update_highlighting() self.queue_draw() def _filter_text(self, txt): def killit(m): assert m.group().count("\n") == 0 if len(m.groups()): s = m.group() for g in m.groups(): if g: s = s.replace(g,"") return s else: return "" try: for filt in self.text_filters: if filt.active: txt = filt.filter.sub(killit, txt) except AssertionError: if not self.warned_bad_comparison: misc.run_dialog(_("Filter '%s' changed the number of lines in the file. " "Comparison will be incorrect. See the user manual for more details.") % filt.label) self.warned_bad_comparison = True return txt def after_text_insert_text(self, buf, it, newtext, textlen): start_mark = buf.get_mark("insertion-start") starting_at = buf.get_iter_at_mark(start_mark).get_line() buf.delete_mark(start_mark) lines_added = it.get_line() - starting_at self._after_text_modified(buf, starting_at, lines_added) def after_text_delete_range(self, buffer, it0, it1): starting_at = it0.get_line() assert self.deleted_lines_pending != -1 self._after_text_modified(buffer, starting_at, -self.deleted_lines_pending) self.deleted_lines_pending = -1 def load_font(self): fontdesc = pango.FontDescription(self.prefs.get_current_font()) context = self.textview0.get_pango_context() metrics = context.get_metrics( fontdesc, context.get_language() ) self.pixels_per_line = (metrics.get_ascent() + metrics.get_descent()) / 1024 self.pango_char_width = metrics.get_approximate_char_width() tabs = pango.TabArray(10, 0) tab_size = self.prefs.tab_size for i in range(10): tabs.set_tab(i, pango.TAB_LEFT, i*tab_size*self.pango_char_width) for i in range(3): self.textview[i].modify_font(fontdesc) self.textview[i].set_tabs(tabs) for i in range(2): self.linkmap[i].queue_draw() def on_preference_changed(self, key, value): if key == "tab_size": tabs = pango.TabArray(10, 0) for i in range(10): tabs.set_tab(i, pango.TAB_LEFT, i*value*self.pango_char_width) for i in range(3): self.textview[i].set_tabs(tabs) for t in self.textview: srcviewer.set_tab_width(t, value) elif key == "use_custom_font" or key == "custom_font": self.load_font() elif key == "show_line_numbers": for t in self.textview: t.set_show_line_numbers( value ) elif key == "show_whitespace": spaces_flag = srcviewer.spaces_flag if value else 0 for v in self.textview: v.set_draw_spaces(spaces_flag) elif key == "use_syntax_highlighting": for i in range(self.num_panes): srcviewer.set_highlight_syntax(self.textbuffer[i], value) elif key == "edit_wrap_lines": for t in self.textview: t.set_wrap_mode(self.prefs.edit_wrap_lines) # FIXME: On changing wrap mode, we get one redraw using cached # coordinates, followed by a second redraw (e.g., on refocus) with # correct coordinates. Overly-aggressive textview lazy calculation? self.diffmap0.queue_draw() self.diffmap1.queue_draw() elif key == "spaces_instead_of_tabs": for t in self.textview: t.set_insert_spaces_instead_of_tabs(value) elif key == "ignore_blank_lines": self.linediffer.ignore_blanks = self.prefs.ignore_blank_lines self.refresh_comparison() def on_key_press_event(self, object, event): x = self.keylookup.get(event.keyval, 0) if self.keymask | x != self.keymask: self.keymask |= x elif event.keyval == gtk.keysyms.Escape: self.findbar.hide() def on_key_release_event(self, object, event): x = self.keylookup.get(event.keyval, 0) if self.keymask & ~x != self.keymask: self.keymask &= ~x # Ugly workaround for bgo#584342 elif event.keyval == gtk.keysyms.ISO_Prev_Group: self.keymask = 0 def _get_pane_label(self, i): #TRANSLATORS: this is the name of a new file which has not yet been saved return self.bufferdata[i].label or _("<unnamed>") def on_delete_event(self, appquit=0): response = gtk.RESPONSE_OK modified = [b.modified for b in self.bufferdata] if 1 in modified: dialog = gnomeglade.Component(paths.ui_dir("filediff.ui"), "closedialog") dialog.widget.set_transient_for(self.widget.get_toplevel()) buttons = [] for i in range(self.num_panes): b = gtk.CheckButton( self._get_pane_label(i) ) b.set_use_underline(False) buttons.append(b) dialog.box.pack_start(b, 1, 1) if not modified[i]: b.set_sensitive(0) else: b.set_active(1) dialog.box.show_all() response = dialog.widget.run() try_save = [ b.get_active() for b in buttons] dialog.widget.destroy() if response==gtk.RESPONSE_OK: for i in range(self.num_panes): if try_save[i]: if not self.save_file(i): return gtk.RESPONSE_CANCEL elif response == gtk.RESPONSE_DELETE_EVENT: response = gtk.RESPONSE_CANCEL return response # # text buffer undo/redo # def on_textbuffer__begin_user_action(self, *buffer): self.undosequence.begin_group() def on_textbuffer__end_user_action(self, *buffer): self.undosequence.end_group() self.update_highlighting() def on_text_insert_text(self, buf, it, text, textlen): text = unicode(text, 'utf8') self.undosequence.add_action( BufferInsertionAction(buf, it.get_offset(), text)) buf.create_mark("insertion-start", it, True) def on_text_delete_range(self, buf, it0, it1): text = unicode(buf.get_text(it0, it1, False), 'utf8') assert self.deleted_lines_pending == -1 self.deleted_lines_pending = it1.get_line() - it0.get_line() self.undosequence.add_action( BufferDeletionAction(buf, it0.get_offset(), text)) def on_undo_checkpointed(self, undosequence, buf, checkpointed): self.set_buffer_modified(buf, not checkpointed) # # # def open_external(self): pane = self._get_focused_pane() if pane >= 0: if self.bufferdata[pane].filename: self._open_files([self.bufferdata[pane].filename]) def get_selected_text(self): """Returns selected text of active pane""" pane = self._get_focused_pane() if pane != -1: buf = self.textbuffer[pane] sel = buf.get_selection_bounds() if sel: return unicode(buf.get_text(sel[0], sel[1], False), 'utf8') return None def on_find_activate(self, *args): self.findbar.start_find( self.textview_focussed ) self.keymask = 0 def on_replace_activate(self, *args): self.findbar.start_replace( self.textview_focussed ) self.keymask = 0 def on_find_next_activate(self, *args): self.findbar.start_find_next(self.textview_focussed) def on_find_previous_activate(self, *args): self.findbar.start_find_previous(self.textview_focussed) def on_filediff__key_press_event(self, entry, event): if event.keyval == gtk.keysyms.Escape: self.findbar.hide() def on_scrolledwindow__size_allocate(self, scrolledwindow, allocation): index = self.scrolledwindow.index(scrolledwindow) if index == 0 or index == 1: self.linkmap[0].queue_draw() if index == 1 or index == 2: self.linkmap[1].queue_draw() def on_textview_popup_menu(self, textview): self.popup_menu.popup(None, None, None, 0, gtk.get_current_event_time()) return True def on_textview_button_press_event(self, textview, event): if event.button == 3: textview.grab_focus() self.popup_menu.popup(None, None, None, event.button, event.time) return True return False def on_textview_toggle_overwrite(self, view): self.textview_overwrite = not self.textview_overwrite for v,h in zip(self.textview, self.textview_overwrite_handlers): v.disconnect(h) if v != view: v.emit("toggle-overwrite") self.textview_overwrite_handlers = [ t.connect("toggle-overwrite", self.on_textview_toggle_overwrite) for t in self.textview ] self.on_cursor_position_changed(view.get_buffer(), None, True) # # text buffer loading/saving # def set_labels(self, lst): assert len(lst) <= len(self.bufferdata) for l,d in zip(lst,self.bufferdata): if len(l): d.label = l def set_merge_output_file(self, filename): if len(self.bufferdata) < 2: return self.bufferdata[1].savefile = os.path.abspath(filename) def recompute_label(self): filenames = [] for i in range(self.num_panes): filenames.append( self._get_pane_label(i) ) shortnames = misc.shorten_names(*filenames) for i in range(self.num_panes): stock = None if self.bufferdata[i].modified == 1: shortnames[i] += "*" if self.bufferdata[i].writable == 1: stock = gtk.STOCK_SAVE else: stock = gtk.STOCK_SAVE_AS elif self.bufferdata[i].writable == 0: stock = gtk.STOCK_NO if stock: self.statusimage[i].show() self.statusimage[i].set_from_stock(stock, gtk.ICON_SIZE_BUTTON) self.statusimage[i].set_size_request(self.diffmap[0].size_request()[0],-1) else: self.statusimage[i].hide() self.label_text = " : ".join(shortnames) self.tooltip_text = self.label_text self.label_changed() def set_files(self, files): """Set num panes to len(files) and load each file given. If an element is None, the text of a pane is left as is. """ self._disconnect_buffer_handlers() self._inline_cache = set() for i,f in enumerate(files): if f: self.textbuffer[i].delete(*self.textbuffer[i].get_bounds()) absfile = os.path.abspath(f) self.fileentry[i].set_filename(absfile) self.fileentry[i].prepend_history(absfile) bold, bnew = self.bufferdata[i], MeldBufferData(absfile) if bold.filename == bnew.filename: bnew.label = bold.label self.bufferdata[i] = bnew self.msgarea_mgr[i].clear() self.recompute_label() self.textview[len(files) >= 2].grab_focus() self._connect_buffer_handlers() self.scheduler.add_task( self._set_files_internal(files).next ) def _load_files(self, files, textbuffers): self.undosequence.clear() yield _("[%s] Set num panes") % self.label_text self.set_num_panes( len(files) ) self._disconnect_buffer_handlers() self.linediffer.clear() self.queue_draw() try_codecs = self.prefs.text_codecs.split() or ['utf_8', 'utf_16'] yield _("[%s] Opening files") % self.label_text tasks = [] def add_dismissable_msg(pane, icon, primary, secondary): msgarea = self.msgarea_mgr[pane].new_from_text_and_icon( icon, primary, secondary) button = msgarea.add_stock_button_with_text(_("Hi_de"), gtk.STOCK_CLOSE, gtk.RESPONSE_CLOSE) msgarea.connect("response", lambda *args: self.msgarea_mgr[pane].clear()) msgarea.show_all() return msgarea for pane, filename in enumerate(files): buf = textbuffers[pane] if filename: try: handle = codecs.open(filename, "rU", try_codecs[0]) task = TaskEntry(filename, handle, buf, try_codecs[:], pane, False) tasks.append(task) except (IOError, LookupError), e: buf.delete(*buf.get_bounds()) add_dismissable_msg(pane, gtk.STOCK_DIALOG_ERROR, _("Could not read file"), str(e)) yield _("[%s] Reading files") % self.label_text while len(tasks): for t in tasks[:]: try: nextbit = t.file.read(4096) if nextbit.find("\x00") != -1: t.buf.delete(*t.buf.get_bounds()) add_dismissable_msg(t.pane, gtk.STOCK_DIALOG_ERROR, _("Could not read file"), _("%s appears to be a binary file.") % t.filename) tasks.remove(t) except ValueError, err: t.codec.pop(0) if len(t.codec): t.file = codecs.open(t.filename, "rU", t.codec[0]) t.buf.delete( t.buf.get_start_iter(), t.buf.get_end_iter() ) else: print "codec error fallback", err t.buf.delete(*t.buf.get_bounds()) add_dismissable_msg(t.pane, gtk.STOCK_DIALOG_ERROR, _("Could not read file"), _("%s is not in encodings: %s") % (t.filename, try_codecs)) tasks.remove(t) except IOError, ioerr: add_dismissable_msg(t.pane, gtk.STOCK_DIALOG_ERROR, _("Could not read file"), str(ioerr)) tasks.remove(t) else: # The handling here avoids inserting split CR/LF pairs into # GtkTextBuffers; this is relevant only when universal # newline support is unavailable or broken. if t.was_cr: nextbit = "\r" + nextbit t.was_cr = False if len(nextbit): if nextbit[-1] == "\r" and len(nextbit) > 1: t.was_cr = True nextbit = nextbit[0:-1] t.buf.insert( t.buf.get_end_iter(), nextbit ) else: self.set_buffer_writable(t.buf, os.access(t.filename, os.W_OK)) self.bufferdata[t.pane].encoding = t.codec[0] if hasattr(t.file, "newlines"): self.bufferdata[t.pane].newlines = t.file.newlines tasks.remove(t) yield 1 for b in self.textbuffer: self.undosequence.checkpoint(b) def _diff_files(self): yield _("[%s] Computing differences") % self.label_text texts = self.buffer_filtered[:self.num_panes] step = self.linediffer.set_sequences_iter(texts) while step.next() is None: yield 1 chunk, prev, next = self.linediffer.locate_chunk(1, 0) self.cursor.next = chunk if self.cursor.next is None: self.cursor.next = next for buf in self.textbuffer: buf.place_cursor(buf.get_start_iter()) self.scheduler.add_task(lambda: self.next_diff(gtk.gdk.SCROLL_DOWN), True) self.queue_draw() self.update_highlighting() self._connect_buffer_handlers() self._set_merge_action_sensitivity() langs = [] for i in range(self.num_panes): filename = self.bufferdata[i].filename if filename: langs.append(srcviewer.get_language_from_file(filename)) else: langs.append(None) # If we have only one identified language then we assume that all of # the files are actually of that type. real_langs = [l for l in langs if l] if real_langs and real_langs.count(real_langs[0]) == len(real_langs): langs = (real_langs[0],) * len(langs) for i in range(self.num_panes): srcviewer.set_language(self.textbuffer[i], langs[i]) srcviewer.set_highlight_syntax(self.textbuffer[i], self.prefs.use_syntax_highlighting) yield 0 def _set_files_internal(self, files): for i in self._load_files(files, self.textbuffer): yield i for i in self._diff_files(): yield i def refresh_comparison(self): """Refresh the view by clearing and redoing all comparisons""" self._disconnect_buffer_handlers() self._inline_cache = set() self.linediffer.clear() self.queue_draw() self.scheduler.add_task(self._diff_files().next) def _set_merge_action_sensitivity(self): pane = self._get_focused_pane() editable = self.textview[pane].get_editable() mergeable = self.linediffer.has_mergeable_changes(pane) self.actiongroup.get_action("MergeFromLeft").set_sensitive(mergeable[0] and editable) self.actiongroup.get_action("MergeFromRight").set_sensitive(mergeable[1] and editable) if self.num_panes == 3 and self.textview[1].get_editable(): mergeable = self.linediffer.has_mergeable_changes(1) else: mergeable = (False, False) self.actiongroup.get_action("MergeAll").set_sensitive(mergeable[0] or mergeable[1]) def on_diffs_changed(self, linediffer): self._set_merge_action_sensitivity() if self.linediffer.sequences_identical(): error_message = True in [m.has_message() for m in self.msgarea_mgr] if self.num_panes == 1 or error_message: return for index, mgr in enumerate(self.msgarea_mgr): secondary_text = None # TODO: Currently this only checks to see whether text filters # are active, and may be altering the comparison. It would be # better if we only showed this message if the filters *did* # change the text in question. active_filters = any([f.active for f in self.text_filters]) if active_filters: secondary_text = _("Text filters are being used, and may " "be masking differences between files. " "Would you like to compare the " "unfiltered files?") msgarea = mgr.new_from_text_and_icon(gtk.STOCK_INFO, _("Files are identical"), secondary_text) mgr.set_msg_id(FileDiff.MSG_SAME) button = msgarea.add_stock_button_with_text(_("Hide"), gtk.STOCK_CLOSE, gtk.RESPONSE_CLOSE) if index == 0: button.props.label = _("Hi_de") if active_filters: msgarea.add_button(_("Show without filters"), gtk.RESPONSE_OK) msgarea.connect("response", self.on_msgarea_identical_response) msgarea.show_all() else: for m in self.msgarea_mgr: if m.get_msg_id() == FileDiff.MSG_SAME: m.clear() def on_msgarea_identical_response(self, msgarea, respid): for mgr in self.msgarea_mgr: mgr.clear() if respid == gtk.RESPONSE_OK: self.text_filters = [] self.refresh_comparison() def update_highlighting(self): if not self.undosequence.in_grouped_action(): self.scheduler.add_task(self._update_highlighting().next) def _update_highlighting(self): alltexts = self.buffer_texts alltags = [b.get_tag_table().lookup("inline") for b in self.textbuffer] progress = [b.create_mark("progress", b.get_start_iter()) for b in self.textbuffer] newcache = set() for chunk in self.linediffer.all_changes(): for i,c in enumerate(chunk): if c and c[0] == "replace": bufs = self.textbuffer[1], self.textbuffer[i*2] tags = alltags[1], alltags[i*2] cacheitem = (i, c, tuple(alltexts[1][c[1]:c[2]]), tuple(alltexts[i*2][c[3]:c[4]])) newcache.add(cacheitem) # Clean interim chunks starts = [get_iter_at_line_or_eof(b, l) for b, l in zip(bufs, (c[1], c[3]))] prog_it0 = bufs[0].get_iter_at_mark(progress[1]) prog_it1 = bufs[1].get_iter_at_mark(progress[i * 2]) bufs[0].remove_tag(tags[0], prog_it0, starts[0]) bufs[1].remove_tag(tags[1], prog_it1, starts[1]) bufs[0].move_mark(progress[1], get_iter_at_line_or_eof(bufs[0], c[2])) bufs[1].move_mark(progress[i * 2], get_iter_at_line_or_eof(bufs[1], c[4])) if cacheitem in self._inline_cache: continue ends = [get_iter_at_line_or_eof(b, l) for b, l in zip(bufs, (c[2], c[4]))] bufs[0].remove_tag(tags[0], starts[0], ends[0]) bufs[1].remove_tag(tags[1], starts[1], ends[1]) # We don't use self.buffer_texts here, as removing line # breaks messes with inline highlighting in CRLF cases text1 = bufs[0].get_text(starts[0], ends[0], False) text1 = unicode(text1, 'utf8') textn = bufs[1].get_text(starts[1], ends[1], False) textn = unicode(textn, 'utf8') # For very long sequences, bail rather than trying a very slow comparison inline_limit = 8000 # arbitrary constant if len(text1) + len(textn) > inline_limit: for i in range(2): bufs[i].apply_tag(tags[i], starts[i], ends[i]) continue #print "<<<\n%s\n---\n%s\n>>>" % (text1, textn) back = (0,0) for o in self._cached_match(text1, textn): if o[0] == "equal": if (o[2]-o[1] < 3) or (o[4]-o[3] < 3): back = o[4]-o[3], o[2]-o[1] continue for i in range(2): s,e = starts[i].copy(), starts[i].copy() s.forward_chars( o[1+2*i] - back[i] ) e.forward_chars( o[2+2*i] ) bufs[i].apply_tag(tags[i], s, e) back = (0,0) yield 1 # Clean up trailing lines prog_it = [b.get_iter_at_mark(p) for b, p in zip(self.textbuffer, progress)] for b, tag, start in zip(self.textbuffer, alltags, prog_it): b.remove_tag(tag, start, b.get_end_iter()) self._inline_cache = newcache self._cached_match.clean(len(self._inline_cache)) def on_textview_expose_event(self, textview, event): if self.num_panes == 1: return if event.window != textview.get_window(gtk.TEXT_WINDOW_TEXT) \ and event.window != textview.get_window(gtk.TEXT_WINDOW_LEFT): return # Hack to redraw the line number gutter used by post-2.10 GtkSourceView if event.window == textview.get_window(gtk.TEXT_WINDOW_LEFT) and \ self.in_nested_textview_gutter_expose: self.in_nested_textview_gutter_expose = False return visible = textview.get_visible_rect() pane = self.textview.index(textview) area = event.area x, y = textview.window_to_buffer_coords(gtk.TEXT_WINDOW_WIDGET, area.x, area.y) bounds = (textview.get_line_num_for_y(y), textview.get_line_num_for_y(y + area.height + 1)) width, height = textview.allocation.width, textview.allocation.height context = event.window.cairo_create() context.rectangle(area.x, area.y, area.width, area.height) context.clip() context.set_line_width(1.0) for change in self.linediffer.single_changes(pane, bounds): ypos0 = textview.get_y_for_line_num(change[1]) - visible.y ypos1 = textview.get_y_for_line_num(change[2]) - visible.y context.rectangle(-0.5, ypos0 - 0.5, width + 1, ypos1 - ypos0) if change[1] != change[2]: context.set_source_rgb(*self.fill_colors[change[0]]) context.fill_preserve() if self.linediffer.locate_chunk(pane, change[1])[0] == self.cursor.chunk: context.set_source_rgba(1.0, 1.0, 1.0, 0.5) context.fill_preserve() context.set_source_rgb(*self.line_colors[change[0]]) context.stroke() if textview.is_focus() and self.cursor.line is not None: it = self.textbuffer[pane].get_iter_at_line(self.cursor.line) ypos, line_height = self.textview[pane].get_line_yrange(it) context.set_source_rgba(1, 1, 0, .25) context.rectangle(0, ypos - visible.y, width, line_height) context.fill() current_time = glib.get_current_time() new_anim_chunks = [] for c in self.animating_chunks[pane]: percent = min(1.0, (current_time - c.start_time) / c.duration) rgba_pairs = zip(c.start_rgba, c.end_rgba) rgba = [s + (e - s) * percent for s, e in rgba_pairs] it = self.textbuffer[pane].get_iter_at_mark(c.start_mark) ystart, _ = self.textview[pane].get_line_yrange(it) it = self.textbuffer[pane].get_iter_at_mark(c.end_mark) yend, _ = self.textview[pane].get_line_yrange(it) if ystart == yend: ystart -= 1 context.set_source_rgba(*rgba) context.rectangle(0, ystart - visible.y, width, yend - ystart) context.fill() if current_time <= c.start_time + c.duration: new_anim_chunks.append(c) else: self.textbuffer[pane].delete_mark(c.start_mark) self.textbuffer[pane].delete_mark(c.end_mark) self.animating_chunks[pane] = new_anim_chunks if self.animating_chunks[pane] and self.anim_source_id[pane] is None: def anim_cb(): textview.queue_draw() return True # Using timeout_add interferes with recalculation of inline # highlighting; this mechanism could be improved. self.anim_source_id[pane] = gobject.idle_add(anim_cb) elif not self.animating_chunks[pane] and self.anim_source_id[pane]: gobject.source_remove(self.anim_source_id[pane]) self.anim_source_id[pane] = None if event.window == textview.get_window(gtk.TEXT_WINDOW_LEFT): self.in_nested_textview_gutter_expose = True textview.emit("expose-event", event) def _get_filename_for_saving(self, title ): dialog = gtk.FileChooserDialog(title, parent=self.widget.get_toplevel(), action=gtk.FILE_CHOOSER_ACTION_SAVE, buttons = (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OK, gtk.RESPONSE_OK) ) dialog.set_default_response(gtk.RESPONSE_OK) response = dialog.run() filename = None if response == gtk.RESPONSE_OK: filename = dialog.get_filename() dialog.destroy() if filename: if os.path.exists(filename): response = misc.run_dialog( _('"%s" exists!\nOverwrite?') % os.path.basename(filename), parent = self, buttonstype = gtk.BUTTONS_YES_NO) if response == gtk.RESPONSE_NO: return None return filename return None def _save_text_to_filename(self, filename, text): try: open(filename, "wb").write(text) except IOError, e: misc.run_dialog( _("Error writing to %s\n\n%s.") % (filename, e), self, gtk.MESSAGE_ERROR, gtk.BUTTONS_OK) return False return True def save_file(self, pane, saveas=0): buf = self.textbuffer[pane] bufdata = self.bufferdata[pane] if saveas or not bufdata.filename: filename = self._get_filename_for_saving( _("Choose a name for buffer %i.") % (pane+1) ) if filename: bufdata.filename = bufdata.label = os.path.abspath(filename) self.fileentry[pane].set_filename( bufdata.filename) self.fileentry[pane].prepend_history(bufdata.filename) else: return False start, end = buf.get_bounds() text = unicode(buf.get_text(start, end, False), 'utf8') if bufdata.newlines: if type(bufdata.newlines) == type(""): if(bufdata.newlines) != '\n': text = text.replace("\n", bufdata.newlines) elif type(bufdata.newlines) == type(()): buttons = {'\n':("UNIX (LF)",0), '\r\n':("DOS (CR-LF)", 1), '\r':("MAC (CR)",2) } newline = misc.run_dialog( _("This file '%s' contains a mixture of line endings.\n\nWhich format would you like to use?") % bufdata.label, self, gtk.MESSAGE_WARNING, buttonstype=gtk.BUTTONS_CANCEL, extrabuttons=[ buttons[b] for b in bufdata.newlines ] ) if newline < 0: return for k,v in buttons.items(): if v[1] == newline: bufdata.newlines = k if k != '\n': text = text.replace('\n', k) break if bufdata.encoding: try: text = text.encode(bufdata.encoding) except UnicodeEncodeError: if misc.run_dialog( _("'%s' contains characters not encodable with '%s'\nWould you like to save as UTF-8?") % (bufdata.label, bufdata.encoding), self, gtk.MESSAGE_ERROR, gtk.BUTTONS_YES_NO) != gtk.RESPONSE_YES: return False save_to = bufdata.savefile or bufdata.filename if self._save_text_to_filename(save_to, text): self.emit("file-changed", save_to) self.undosequence.checkpoint(buf) return True else: return False def make_patch(self, *extra): dialog = patchdialog.PatchDialog(self) dialog.run() def set_buffer_writable(self, buf, yesno): pane = self.textbuffer.index(buf) self.bufferdata[pane].writable = yesno self.recompute_label() def set_buffer_modified(self, buf, yesno): pane = self.textbuffer.index(buf) self.bufferdata[pane].modified = yesno self.recompute_label() def save(self): pane = self._get_focused_pane() if pane >= 0: self.save_file(pane) def save_as(self): pane = self._get_focused_pane() if pane >= 0: self.save_file(pane, True) def save_all(self): for i in range(self.num_panes): if self.bufferdata[i].modified: self.save_file(i) def on_fileentry_activate(self, entry): if self.on_delete_event() != gtk.RESPONSE_CANCEL: files = [e.get_full_path() for e in self.fileentry[:self.num_panes]] self.set_files(files) return 1 def _get_focused_pane(self): for i in range(self.num_panes): if self.textview[i].is_focus(): return i return -1 # # refresh and reload # def on_reload_activate(self, *extra): modified = [os.path.basename(b.label) for b in self.bufferdata if b.modified] if len(modified): message = _("Reloading will discard changes in:\n%s\n\nYou cannot undo this operation.") % "\n".join(modified) response = misc.run_dialog( message, parent=self, messagetype=gtk.MESSAGE_WARNING, buttonstype=gtk.BUTTONS_OK_CANCEL) if response != gtk.RESPONSE_OK: return files = [b.filename for b in self.bufferdata[:self.num_panes] ] self.set_files(files) def on_refresh_activate(self, *extra): self.refresh_comparison() def queue_draw(self, junk=None): for t in self.textview: t.queue_draw() for i in range(self.num_panes-1): self.linkmap[i].queue_draw() self.diffmap0.queue_draw() self.diffmap1.queue_draw() def on_action_lock_scrolling_toggled(self, action): self.toggle_scroll_lock(action.get_active()) def on_lock_button_toggled(self, button): self.toggle_scroll_lock(not button.get_active()) def toggle_scroll_lock(self, locked): icon_name = "meld-locked" if locked else "meld-unlocked" self.lock_button_image.props.icon_name = icon_name self.lock_button.set_active(not locked) self.actiongroup.get_action("LockScrolling").set_active(locked) self._scroll_lock = not locked # # scrollbars # def _sync_hscroll(self, adjustment): if self._sync_hscroll_lock or self._scroll_lock: return self._sync_hscroll_lock = True val = adjustment.get_value() for sw in self.scrolledwindow[:self.num_panes]: adj = sw.get_hadjustment() if adj is not adjustment: adj.set_value(val) self._sync_hscroll_lock = False def _sync_vscroll(self, adjustment, master): # only allow one scrollbar to be here at a time if self._sync_vscroll_lock: return if not self._scroll_lock and (self.keymask & MASK_SHIFT) == 0: self._sync_vscroll_lock = True syncpoint = 0.5 # the line to search for in the 'master' text master_y = adjustment.value + adjustment.page_size * syncpoint it = self.textview[master].get_line_at_y(int(master_y))[0] line_y, height = self.textview[master].get_line_yrange(it) line = it.get_line() + ((master_y-line_y)/height) # scrollbar influence 0->1->2 or 0<-1->2 or 0<-1<-2 scrollbar_influence = ((1, 2), (0, 2), (1, 0)) for i in scrollbar_influence[master][:self.num_panes - 1]: adj = self.scrolledwindow[i].get_vadjustment() mbegin, mend = 0, self.textbuffer[master].get_line_count() obegin, oend = 0, self.textbuffer[i].get_line_count() # look for the chunk containing 'line' for c in self.linediffer.pair_changes(master, i): if c[1] >= line: mend = c[1] oend = c[3] break elif c[2] >= line: mbegin, mend = c[1], c[2] obegin, oend = c[3], c[4] break else: mbegin = c[2] obegin = c[4] fraction = (line - mbegin) / ((mend - mbegin) or 1) other_line = (obegin + fraction * (oend - obegin)) it = self.textbuffer[i].get_iter_at_line(int(other_line)) val, height = self.textview[i].get_line_yrange(it) val -= (adj.page_size) * syncpoint val += (other_line-int(other_line)) * height val = min(max(val, adj.lower), adj.upper - adj.page_size) adj.set_value( val ) # If we just changed the central bar, make it the master if i == 1: master, line = 1, other_line self._sync_vscroll_lock = False for lm in self.linkmap: if lm.window: lm.window.invalidate_rect(None, True) lm.window.process_updates(True) def set_num_panes(self, n): if n != self.num_panes and n in (1,2,3): self.num_panes = n toshow = self.scrolledwindow[:n] + self.fileentry[:n] toshow += self.vbox[:n] + self.msgarea_mgr[:n] toshow += self.linkmap[:n-1] + self.diffmap[:n] map( lambda x: x.show(), toshow ) tohide = self.statusimage + self.scrolledwindow[n:] + self.fileentry[n:] tohide += self.vbox[n:] + self.msgarea_mgr[n:] tohide += self.linkmap[n-1:] + self.diffmap[n:] map( lambda x: x.hide(), tohide ) self.actiongroup.get_action("MakePatch").set_sensitive(n > 1) self.actiongroup.get_action("CycleDocuments").set_sensitive(n > 1) def coords_iter(i): buf_index = 2 if i == 1 and self.num_panes == 3 else i get_end_iter = self.textbuffer[buf_index].get_end_iter get_iter_at_line = self.textbuffer[buf_index].get_iter_at_line get_line_yrange = self.textview[buf_index].get_line_yrange def coords_by_chunk(): y, h = get_line_yrange(get_end_iter()) max_y = float(y + h) for c in self.linediffer.single_changes(i): y0, _ = get_line_yrange(get_iter_at_line(c[1])) if c[1] == c[2]: y, h = y0, 0 else: y, h = get_line_yrange(get_iter_at_line(c[2] - 1)) yield c[0], y0 / max_y, (y + h) / max_y return coords_by_chunk colour_map = { "conflict": (1.0, 0.75294117647058822, 0.79607843137254897), "insert": (0.75686274509803919, 1.0, 0.75686274509803919), "replace": (0.8666666666666667, 0.93333333333333335, 1.0), "delete": (0.75686274509803919, 1.0, 0.75686274509803919) } for (w, i) in zip(self.diffmap, (0, self.num_panes - 1)): scroll = self.scrolledwindow[i].get_vscrollbar() w.setup(scroll, coords_iter(i), colour_map) for (w, i) in zip(self.linkmap, (0, self.num_panes - 2)): w.associate(self, self.textview[i], self.textview[i + 1]) for i in range(self.num_panes): if self.bufferdata[i].modified: self.statusimage[i].show() self.queue_draw() self.recompute_label() def next_diff(self, direction): pane = self._get_focused_pane() if pane == -1: if len(self.textview) > 1: pane = 1 else: pane = 0 buf = self.textbuffer[pane] if direction == gtk.gdk.SCROLL_DOWN: target = self.cursor.next else: # direction == gtk.gdk.SCROLL_UP target = self.cursor.prev if target is None: return c = self.linediffer.get_chunk(target, pane) if c: # Warp the cursor to the first line of next chunk if self.cursor.line != c[1]: buf.place_cursor(buf.get_iter_at_line(c[1])) self.textview[pane].scroll_to_mark(buf.get_insert(), 0.1) def copy_chunk(self, src, dst, chunk, copy_up): b0, b1 = self.textbuffer[src], self.textbuffer[dst] start = get_iter_at_line_or_eof(b0, chunk[1]) end = get_iter_at_line_or_eof(b0, chunk[2]) t0 = unicode(b0.get_text(start, end, False), 'utf8') if copy_up: if chunk[2] >= b0.get_line_count() and \ chunk[3] < b1.get_line_count(): # TODO: We need to insert a linebreak here, but there is no # way to be certain what kind of linebreak to use. t0 = t0 + "\n" dst_start = get_iter_at_line_or_eof(b1, chunk[3]) mark0 = b1.create_mark(None, dst_start, True) new_end = buffer_insert(b1, chunk[3], t0) else: # copy down dst_start = get_iter_at_line_or_eof(b1, chunk[4]) mark0 = b1.create_mark(None, dst_start, True) new_end = buffer_insert(b1, chunk[4], t0) mark1 = b1.create_mark(None, new_end, True) # FIXME: If the inserted chunk ends up being an insert chunk, then # this animation is not visible; this happens often in three-way diffs rgba0 = self.fill_colors['insert'] + (1.0,) rgba1 = self.fill_colors['insert'] + (0.0,) anim = TextviewLineAnimation(mark0, mark1, rgba0, rgba1, 0.5) self.animating_chunks[dst].append(anim) def replace_chunk(self, src, dst, chunk): b0, b1 = self.textbuffer[src], self.textbuffer[dst] src_start = get_iter_at_line_or_eof(b0, chunk[1]) src_end = get_iter_at_line_or_eof(b0, chunk[2]) dst_start = get_iter_at_line_or_eof(b1, chunk[3]) dst_end = get_iter_at_line_or_eof(b1, chunk[4]) t0 = unicode(b0.get_text(src_start, src_end, False), 'utf8') mark0 = b1.create_mark(None, dst_start, True) self.on_textbuffer__begin_user_action() b1.delete(dst_start, dst_end) new_end = buffer_insert(b1, chunk[3], t0) self.on_textbuffer__end_user_action() mark1 = b1.create_mark(None, new_end, True) # FIXME: If the inserted chunk ends up being an insert chunk, then # this animation is not visible; this happens often in three-way diffs rgba0 = self.fill_colors['insert'] + (1.0,) rgba1 = self.fill_colors['insert'] + (0.0,) anim = TextviewLineAnimation(mark0, mark1, rgba0, rgba1, 0.5) self.animating_chunks[dst].append(anim) def delete_chunk(self, src, chunk): b0 = self.textbuffer[src] it = get_iter_at_line_or_eof(b0, chunk[1]) if chunk[2] >= b0.get_line_count(): it.backward_char() b0.delete(it, get_iter_at_line_or_eof(b0, chunk[2])) mark0 = b0.create_mark(None, it, True) mark1 = b0.create_mark(None, it, True) # TODO: Need a more specific colour here; conflict is wrong rgba0 = self.fill_colors['conflict'] + (1.0,) rgba1 = self.fill_colors['conflict'] + (0.0,) anim = TextviewLineAnimation(mark0, mark1, rgba0, rgba1, 0.5) self.animating_chunks[src].append(anim) ################################################################################ # # Local Functions # ################################################################################ class MeldBufferData(object): __slots__ = ("modified", "writable", "filename", "savefile", "label", "encoding", "newlines") def __init__(self, filename=None): self.modified = 0 self.writable = 1 self.filename = filename self.savefile = None self.label = filename self.encoding = None self.newlines = None class BufferAction(object): """A helper to undo/redo text insertion/deletion into/from a text buffer""" def __init__(self, buf, offset, text): self.buffer = buf self.offset = offset self.text = text def delete(self): start = self.buffer.get_iter_at_offset(self.offset) end = self.buffer.get_iter_at_offset(self.offset + len(self.text)) self.buffer.delete(start, end) def insert(self): start = self.buffer.get_iter_at_offset(self.offset) self.buffer.insert(start, self.text) class BufferInsertionAction(BufferAction): undo = BufferAction.delete redo = BufferAction.insert class BufferDeletionAction(BufferAction): undo = BufferAction.insert redo = BufferAction.delete
In metropolitan cities, there are many nightclubs where fun lovers indulge and have a good time. For many, the concern is not what to do on a night in town but how to pick from the large selection a city like Port Harcourt or Abuja has to offer. Jovago.com, Africa’s No 1 Hotel Booking portal rounds up some tips on how to choose a groovy nightclub. Finding out information about the club is important for choosing a venue where you can indulge. One of the best places to go searching for information is their website. From the website, you can gather hints on how activities conducted at there as well as make budget plans for the evening. Where is the club located? How accessible is the club by road? These are pertinent questions you must ask.Before heading out for the night, ensure that the road to the venue is not a route where you are liable to attack or other dangerous incidents. Preferably, go to a club that is in close proximity with your home by either 30 minutes or an hour away. Music is a key feature in every pub so it will be a huge mistake to go to a club where the disk jockey dishes out songs you cannot dance to. As a DJ, he must be able to dish out amazing songs, that genre you enjoy, which will definitely attract people to the club. Also, the bar must be very classy replete with various drink brands. In choosing a nightclub, the security of lives must be guaranteed. No nightclub visitor should be susceptible to any attack or harassment. If you have got friends who love spending nights out in town, they can give you first-hand information which you will use to determine which nightclub to pick. Friends who have visited these clubs before can authoritatively tell you about the bar, music and other things you feel are important. However, no matter what anyone says, don’t go alone. Go with a friend and one other person that doesn’t take alcoholic drinks. He or she will be the ‘wing-man’ who drives you and your friends home if you get tipsy.
import numpy as np import matplotlib.pyplot as plt from astropy.io import ascii import sys, os, string import pandas as pd from astropy.io import fits import collections #Folder to save the figures figout = '/Users/blorenz/COSMOS/Reports/2018/Images/' #The location with the file for all of our data fluxdatapath = '/Users/blorenz/COSMOS/COSMOSData/lineflux.txt' #File for the MAD of the difference in flux of duplicates in each line maddatapath = '/Users/blorenz/COSMOS/COSMOSData/linemad.txt' #Merge our data with the UVISTA catalog #The location of the muzzin et al data: mdatapath = '/Users/blorenz/COSMOS/muzzin_data/UVISTA_final_colors_sfrs_v4.1.dat' #Read in the mad of the lines mad_df = ascii.read(maddatapath).to_pandas() #Read the datafile: fluxdata = ascii.read(fluxdatapath).to_pandas() #Fontsizes for plotting axisfont = 18 ticksize = 16 titlefont = 24 legendfont = 16 textfont = 16 #Division function def divz(X,Y): return X/np.where(Y,Y,Y+1)*np.not_equal(Y,0) #Plot of Haflux vs Hgflux of duplicates l1 = sys.argv[1] l2 = sys.argv[2] sig = int(sys.argv[3]) minlim= 0.005 maxlim= 1000 lines = [l1,l2] def findgoodfits(pd_df=fluxdata,lines=lines,sig=sig): goodidxs = [(pd_df[line+'_flag'] == 0) for line in lines] lowidxs = [(divz(pd_df[line+'_flux'],pd_df[line+'_scale']) < sig*mad_df[line+'_mad'][0]) for line in lines] goodidx = np.logical_and.reduce(goodidxs) lowidx = np.logical_or.reduce(lowidxs) badflux = pd_df[np.logical_not(goodidx)] lowflux = pd_df[np.logical_and(goodidx,lowidx)] goodflux = pd_df[np.logical_and(goodidx,np.logical_not(lowidx))] return goodflux,lowflux,badflux goodflux,lowflux,badflux = findgoodfits() yerr = mad_df[l2+'_mad'][0] xerr = mad_df[l1+'_mad'][0] xdata = divz(goodflux[l1+'_flux'],goodflux[l1+'_scale']) ydata = divz(goodflux[l2+'_flux'],goodflux[l2+'_scale']) xdatalow = divz(lowflux[l1+'_flux'],lowflux[l1+'_scale']) ydatalow = divz(lowflux[l2+'_flux'],lowflux[l2+'_scale']) lw=0.25 mark='.' ms=6 fig,ax = plt.subplots(figsize=(8,7)) ax.errorbar(xdatalow,ydatalow,xerr=xerr,yerr=yerr,ls='None',lw=lw,ms=ms,color='grey',marker=mark) ax.errorbar(xdata,ydata,xerr=xerr,yerr=yerr,ls='None',lw=lw,color='blue',ms=ms,marker=mark) ax.plot((0,1000),(0,1000),color='black',ls='--') #Titles, axes, legends ax.set_title(l2+ ' Flux vs ' + l1 + ' Flux',fontsize = titlefont) ax.legend(fontsize = legendfont) ax.set_xlabel(l1 +' Flux ($10^{-17}$ erg/s/$cm^2$)',fontsize = axisfont) ax.set_ylabel(l2 + ' Flux ($10^-{17}$ erg/s/$cm^2$)',fontsize = axisfont) ax.set_xscale("log") ax.set_yscale("log") ax.set_xlim(minlim,maxlim) ax.set_ylim(minlim,maxlim) ax.tick_params(labelsize = ticksize) plt.show() fig.savefig(figout + 'Flux_' + l2 + '_' + l1 + '.pdf') plt.close(fig)
Passenger car drivers are required to follow the rules of the road to protect us and make sure we arrive safely. Semi truck drivers are different situation because they are driving trucks that can weigh up to 80,000 pounds and that can devastate anything in their path. Because of the serious injury potential big rigs pose to the public, a commercial or professional truck driver has certain obligations under the safety rules that may not apply to you or I. One of the areas that can be of critical importance to safety on the highway is a situation where one is confronted with adverse weather. Whether it is from fog, rain, sleet or snow, adverse weather can be critically dangerous and specific rules apply to truck drivers confronted with the situation. Recently on Interstate 10 outside of Beaumont, Texas over 100 vehicles piled up due to foggy weather conditions early in the morning. While it is a good idea for anyone confronted with adverse weather to pull over and wait for the situation to improve, truck drivers are required to discontinue the trip if they are confronted with dangerous conditions. Any truck driver who is subject to the Federal Motor Carrier Safety Regulations has the obligation to reduce speed when confronted with adverse weather and to pull off the roadway and discontinue the travel when the situation is sufficiently dangerous. Such a condition obviously existed when the recent 100 car pileup occurred outside of Beaumont, Texas. Weather conditions, specifically the fog, had to be very dangerous for that many vehicles to have crashed. There are reports that indicated that over 1 mile of highway was covered with wrecked vehicles. Over 80 people were injured and at least 2 people killed in the deadly accident. While the initial reports do not indicate the initial cause of the crash other than the fog, it is highly likely that the safety regulations could have prevented some of the crashes if the professional truck drivers had followed the safety regulations. If you have lost a family member in a truck accident take advantage of a free consultation with a Houston semi truck wreck attorney.
# # Deltaic - an efficient backup system supporting multiple data sources # # Copyright (c) 2014 Carnegie Mellon University # # This program is free software; you can redistribute it and/or modify it # under the terms of version 2 of the GNU General Public License as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License # for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # from datetime import datetime, date import os import Queue import subprocess import sys from threading import Thread from ..command import get_cmdline_for_subcommand from ..util import make_dir_path class Unit(object): def __init__(self): self.root = None self.backup_args = None def __str__(self): return self.root class Task(object): DATE_FMT = '%Y%m%d' LOG_EXCERPT_INPUT_BYTES = 8192 LOG_EXCERPT_MAX_BYTES = 4096 LOG_EXCERPT_MAX_LINES = 10 def __init__(self, thread_count, units): self._queue = Queue.Queue() for unit in units: self._queue.put(unit) self._success = True self._threads = [Thread(target=self._worker) for i in range(thread_count)] def start(self): for thread in self._threads: thread.start() def _worker(self): while True: try: unit = self._queue.get_nowait() except Queue.Empty: return try: if not self._execute(unit): self._success = False except: self._success = False raise def _execute(self, unit): raise NotImplementedError def wait(self): for thread in self._threads: thread.join() return self._success def _run_subcommand(self, name, args, log_dir): log_base = os.path.join(log_dir, date.today().strftime(self.DATE_FMT)) timestamp = lambda: datetime.now().strftime('%Y-%m-%d %H:%M:%S') sys.stdout.write('Starting %s\n' % name) command = get_cmdline_for_subcommand(args) with open('/dev/null', 'r+') as null: with open(log_base + '.err', 'a') as err: with open(log_base + '.out', 'a') as out: for fh in out, err: fh.write('# Starting task at %s\n' % timestamp()) fh.write('# %s\n' % ' '.join(command)) fh.flush() ret = subprocess.call(command, stdin=null, stdout=out, stderr=err, close_fds=True) for fh in out, err: if ret < 0: fh.write('# Task died on signal %d\n' % -ret) else: fh.write('# Task exited with status %d\n' % ret) fh.write('# Ending task at %s\n\n' % timestamp()) if ret: with open(log_base + '.err') as err: # Read LOG_EXCERPT_INPUT_BYTES err.seek(0, 2) start = max(0, err.tell() - self.LOG_EXCERPT_INPUT_BYTES) err.seek(start) excerpt = err.read(self.LOG_EXCERPT_INPUT_BYTES).strip() truncated = start > 0 # Drop exception backtraces accept = True excerpt_lines = [] for line in excerpt.split('\n'): if accept: if line == 'Traceback (most recent call last):': accept = False else: excerpt_lines.append(line) elif not line.startswith(' '): accept = True excerpt_lines.append(line) # Reduce to LOG_EXCERPT_MAX_BYTES excerpt = '\n'.join(excerpt_lines) if len(excerpt) > self.LOG_EXCERPT_MAX_BYTES: excerpt = excerpt[-self.LOG_EXCERPT_MAX_BYTES:] truncated = True # Reduce to LOG_EXCERPT_MAX_LINES excerpt_lines = excerpt.split('\n') if len(excerpt_lines) > self.LOG_EXCERPT_MAX_LINES: excerpt_lines = excerpt_lines[-self.LOG_EXCERPT_MAX_LINES:] truncated = True # Add truncation indicator if truncated: excerpt_lines.insert(0, '[...]') # Serialize excerpt = '\n'.join(' ' * 3 + l for l in excerpt_lines) sys.stderr.write('Failed: %s\n %s\n%s\n' % (name, ' '.join(command), excerpt)) sys.stdout.write('Ending %s\n' % name) return ret == 0 class _SourceBackupTask(Task): def __init__(self, settings, thread_count, units): Task.__init__(self, thread_count, units) self._settings = settings def _execute(self, unit): log_dir = make_dir_path(self._settings['root'], 'Logs', unit.root) return self._run_subcommand(unit.root, unit.backup_args, log_dir) class Source(object): def __init__(self, config): self._settings = config.get('settings', {}) self._manifest = config.get(self.LABEL, {}) @classmethod def get_sources(cls): sources = {} for subclass in cls.__subclasses__(): if hasattr(subclass, 'LABEL'): sources[subclass.LABEL] = subclass return sources def get_units(self): raise NotImplementedError def get_backup_task(self): thread_count = self._settings.get('%s-workers' % self.LABEL, 1) return _SourceBackupTask(self._settings, thread_count, self.get_units()) # Now import submodules that need these definitions from . import coda, github, rbd, rgw, rsync
Last night during our birthday dinner date at Le Coq Au Vin, Ryan and I talked a lot about our upcoming wedding and the planning process. We discussed things that have surprised us most and both admitted that we thought I would be a little more stressed throughout the process. To be honest, I kind of surprised myself at my laid back attitude toward our big day since I am such a planner and really like to have all the details of any event completely drawn out. I will say that I feel like once we chose our venue and major vendors (florist, photographer), we kind of coasted for several months. About a month ago – two months out from our big day – the momentum started picking up again. Now I still feel rather relaxed, but definitely have an ongoing list of wedding-related to-dos each and every single day. Today’s task was our wedding welcome gifts! We want to have a little treat waiting for our guests to receive when they arrive at the hotel where our wedding is being held. Our wedding is taking place on Halloween weekend, but our actual wedding day will not feature any orange and black. We figured we’d use the welcome gifts as a way to recognize the holiday with treat bags full of candy corn and Reese’s pieces. I made the gifts by stamping papaya-colored cardstock (about the size of a business card) with a Happy Halloween stamp. I then punched a hole in the top of the card with an adorable Martha Stewart heart-shaped hole puncher and tied the card on with orange and yellow ribbon. Ryan was a great help and filled lots of bags with candy and punched lots of heart-shaped holes. I’m not gonna lie, lunch was candy corn and Reese’s Pieces. I intended to eat a real lunch, but in the process of crafting, I think I ate my entire bodyweight in candy and started to feel ill from all of the sugar. Around 3 p.m. I still didn’t feel hungry but figured I should get something nutritious into my body and made myself a smoothie. I drank about 3/4 of it before feeling a little ill again. Apparently my body is still in sugar overload mode! The rest of the day will include a pit-stop at the craft store for more ribbon so I may finish off the welcome gifts. We will also be getting Ryan fitted for his tux! Such a cute idea!! I’d have difficulty controlling myself around the candy too 🙂 Hope you have a great end to your weekend! Love the welcome gift ideas- super cute!! I CAN’T believe how close your wedding is, I’m BEYOND excited for you!! I would do the exact same thing! You put candy in front of me, I’ll eat it, no questions. haha. your so cute! I love the candy corn and reeces pieces combination! i bought some reeces pieces yesterday! Next stop: candy corn! I would definitely eat a ton of them as well. It’s good to know I’m not the only one who accidentally keeps eating too much sugar and then BAM:ehhh. Sometimes i feel like some people have perfect eating habits: but its just not true. and that’s a good thing! Those welcome bags looks super super cute! P.S. Isn’t it weird how girls can see guys in their tux before the wedding day, but guys can’t see girls in their dress?! It will be harder to resist this year because I am on a (mostly) clean eating/fitness lifestyle change. Such a great idea! You can keep the candy corn, but as for the Reeses..there all mine! 🙂 haha you just made me smile! i live in Tampa and i am thinking to move to Orlando in december? what are do you prefer? do you like Orlando better than st pete?? i love st. pete, but i think at the point in my life i do prefer orlando. there is ALWAYS something fun to do and is really close to winter park, a really nice area with cute shops/restaurants and the downtown area is also close to the amusement parks & some more touristy attractions (about 25 min. away). i think they’re both wonderful places to live, but for me, orlando is a better fit right now. Really cute gift idea! Love fall weddings! Love the welcome gifts! Such a great idea! I know that feeling…my fave is candy corn and peanuts…yum yum!!! Super cute welcome gifts! I think it’s a great idea to do the Halloween theme as the welcome gift! It’s seasonal and tasteful! You are great at this Julie! That is such a cute idea! I love Reese’s Pieces! I love the peanut butter cups more, but only the large ones. The small ones just don’t do it for me!
from pen import Pen from math import sin, cos, pi, sqrt from random import random class Shrapnel(Pen): def __init__(self, matrix, motion_cycles, huedelta=0.001, saturation=1, radius=0, decelerate=False): self.centerx = matrix.width/2.0 self.centery = matrix.height/2.0 self.cycles = motion_cycles self.decelerate = decelerate # we will reset some params to sensible values in a minute, so let's # not fuss with x, y, dx, dy now super(Shrapnel, self).__init__( matrix.width, matrix.height, 0, 0, 0, 0, huedelta=huedelta, saturation=saturation, radius=radius ) super(Shrapnel, self).setBumpStrategy(self._pause, x=True, y=True) self.reset(matrix) def _pause(self, x=None, y=None): self.paused = True def reset(self, matrix): # the furthest distance any pen will have to travel is on the diagonal w, h = matrix.width, matrix.height maxDimension = sqrt(w*w + h*h) # slowest pens need to cover the distance in cycles time, but there may # be some that go faster velocity = maxDimension/(2.0*self.cycles) + 0.05*random()*maxDimension angle = random()*2*pi self.dx = velocity * sin(angle) self.dy = velocity * cos(angle) self.x = self.centerx self.y = self.centery self.paused = False def clock(self, matrix): super(Shrapnel, self).clock(matrix) # optionally slow over time # XXX: this may cause problems for larger spans? if self.decelerate: self.dx *= 0.99 self.dy *= 0.99 return self.paused
I joined the Minnesota Population Center in 2010 as a research assistant and as full-time research staff in 2014. I previously worked on the 1960 Data Restoration Project, and currently work on the Complete Count Census Data 1790-1940 projects and administer the restricted versions of the Complete Count Census Data 1790-1940 for approved users. My research focuses on agricultural labor dynamics and kinship relations between families since the nineteenth century. Specifically, I use the restricted Complete Count Census Data 1790-1940 to measure kin propinquity, describing the long run decline of kin propinquity, explanations for the decline, and improving methods for measuring kin propinquity. I also use the complete count data to describe enumerator bias in occupational reporting for women and children on farms.
# -*- coding: utf-8 -*- """ Page sizes and various mechanisms for manipulating them. """ import math import collections from .units import mm, inch # ---------------------------------------------------------------------------- # Page size tuple. # ---------------------------------------------------------------------------- class PaperSize(collections.namedtuple('PaperSize', 'width height')): """The size of a piece of paper. This class inherits from a named tuple and has an empty ``__slots__`` property, so it is immutable and inextensible. It is used, rather than a raw (width, height) tuple, to allow additonal methods to be defined.""" __slots__ = () @classmethod def from_mm(Class, width_in_mm, height_in_mm): """Convert from width and height in mm into standard pts.""" return Class(width_in_mm*mm, height_in_mm*mm) @classmethod def from_inch(Class, width_in_inch, height_in_inch): """Convert from width and height in inches into standard pts.""" return Class(width_in_inch*inch, height_in_inch*inch) @classmethod def from_ratio(Class, width=None, height=None, ratio=1.0): """Create a new paper size from the given ratio and one dimension. Arguments: ``ratio`` The ratio of the height to the width of the resulting page. So a ratio of 1.5 (i.e. 3:2) will be 1.5x as tall as it is wide. Note that the ``ratio`` property returns the ratio of long to short side, not height to width. For the same ratio, therefore, this function will generate a paper in portrait orientation. The ``papersizes.ratios`` module provides a series of common ratios. """ if width is None: if height is None: raise ValueError('width or height must be given') else: return Class(height / ratio, width) else: if height is None: return Class(width, height * ratio) else: raise ValueError('only one of width or height may be given') @property def area_in_sq_pts(self): """The area of this paper.""" return self.width * self.height @property def ratio(self): """The ratio of long to short side.""" if self.width > self.height: return self.width / self.height else: return self.height / self.width def landscape(self): """Return a version of this paper size in landscape orientation.""" if self.width >= self.height: return self else: return self.flip() def portrait(self): """Return a version of this paper size in portrait orientation.""" if self.width <= self.height: return self else: return self.flip() def flip(self): """Return a version of this paper size with dimensions reversed.""" return PaperSize(self.height, self.width) def half(self): """Paper half the size of this, cut parallel to the short edge. If the original paper is portrait, the returned paper will be also, and vice versa. """ if self.height < self.width: if self.height > self.width / 2: return PaperSize(self.height, self.width / 2) else: return PaperSize(self.width / 2, self.height) else: if self.width > self.height / 2: return PaperSize(self.height / 2, self.width) else: return PaperSize(self.width, self.height / 2) def small_square(self): """Return a square paper size using the smaller dimension.""" if self.height < self.width: return PaperSize(self.height, self.height) elif self.height == self.width: return self else: return PaperSize(self.width, self.width) def large_square(self): """Return a square paper size using the larger dimension.""" if self.height > self.width: return PaperSize(self.height, self.height) elif self.height == self.width: return self else: return PaperSize(self.width, self.width) def round_to_mm(self): """Return a paper size with dimensions rounded to the nearest mm.""" return PaperSize(round(self.width / mm)*mm, round(self.height / mm)*mm) def is_landscape(self): """Check if this paper is landscape oriented. Square paper is neither landscape or portrait.""" return self.width > self.height def is_portrait(self): """Check if this paper is portrait oriented. Square paper is neither landscape or portrait.""" return self.width < self.height def is_square(self): """Check if this paper is square.""" return self.width == self.height def is_approximately(self, other, tolerance=0.1*mm): """Check if the given paper size is roughly the same as this one. Arguments: ``other`` The paper size to compare against. This can be given as any (width, height) tuple, it doesn't have to be a ``PaperSize`` instance. """ return abs(self.width - other[0]) <= tolerance and \ abs(self.height - other[1]) <= tolerance def add_bleed(self, bleed): """Return a paper size with the given bleed added. Standard bleeds are 3mm internationally and 1/8" US. Large images and die cuts have a larger bleed.""" if bleed != 0.0: return PaperSize(self.width + bleed*2.0, self.height + bleed*2.0) else: return self def as_pt_str(self): """Printable description of the size, to the nearest point.""" return '{0:.0f}x{1:.0f}pt'.format(self.width, self.height) def as_mm_str(self): """Printable description of the size, to the nearest mm.""" return '{0:.0f}x{1:.0f}mm'.format(self.width / mm, self.height / mm) def as_inch_str(self, unit='"'): """Printable description of the size, to the nearest ⅛ of an inch.""" EIGHTHS = ('', '⅛', '¼', '⅜', '½', '⅝', '¾', '⅞') def _to_eight(val): val /= inch whole = math.floor(val) eighth = round(val * 8) % 8 return '{0:.0f}{1}'.format(whole, EIGHTHS[eighth]) return '{0}x{1}{2}'.format( _to_eight(self.width), _to_eight(self.height), unit) def __repr__(self): return 'PaperSize({0:f}, {1:f})'.format(self.width, self.height) def __str__(self): return '{0} ({1}, {2})'.format( self.as_pt_str(), self.as_mm_str(), self.as_inch_str()) # ---------------------------------------------------------------------------- # Page size generator. # ---------------------------------------------------------------------------- class ISO269Series(object): """ A set of paper sizes conforming to ISO 269. ISO 269 specifies tolerances of at least 1mm in page sizes and these are often used to make sure that each page size is an integer number of mm in each direction. So A4 is of width 210mm, although A0 is 841mm wide. This breaks the normal halving rule, but is a widespread standard. Instances of this class can be used to retrieve paper sizes by using subscript notation: ``A[5]``, for example. There is no limit to the large (lower numbered) sizes that can be calculated in this way, but because this class always rounds to the nearest millimeter, very small paper sizes (high numbered) will be meaningless. Paper sizes returned by this class are portrait oriented. Arguments: ``initial_size`` The 'reference' paper size for this series. This is usually a large size, most commonly the 0-size. This can be given as any (width, height) tuple, it doesn't have to be a ``PaperSize`` instance. ``initial_number`` The size number of the initial paper size given in the first argument. """ def __init__(self, initial_size, initial_number=0): # We might be given a plain tuple, so don't use PaperSize.portrait if initial_size[0] > initial_size[1]: initial_size = initial_size[1], initial_size[0] # Store the size internally in mm, so we can do the simplification. initial_in_mm = round(initial_size[0] / mm), round(initial_size[1] / mm) self.cache = {initial_number:initial_in_mm} self.min_cached = initial_number self.max_cached = initial_number self.initial_size = initial_size self.initial_number = initial_number def __repr__(self): return "ISO 269 Series, {0} at size {1}".format( repr(self.initial_size), repr(self.initial_number)) def __getitem__(self, size): if size not in self.cache: if size > self.max_cached: # We're smaller than the last value cached. last = self.cache[self.max_cached] for s in range(self.max_cached+1, size+1): next = last[1] // 2, last[0] self.cache[s] = next last = next self.max_cached = size else: # We're larger than the initial. last = self.cache[self.min_cached] for s in range(self.min_cached-1, size-1, -1): next = last[1], last[0] * 2 self.cache[s] = next last = next self.min_cached = size # Cached data is in mm, so convert to pts. return PaperSize.from_mm(*self.cache[size])
Over the 8 years we have been working in the lighting industry, we have consistently provided quality products and reliable solutions to our customers. In 2012 we expanded into the LED industry to offer modern and environmentally friendly products and solutions. Nowadays, we are a rapidly growing company, able to establish a showroom and shop front in Sydney. This is to cater to retail customers while continuing to serve growing customer, electrician and design company demands. We strive for excellence in all avenues of our business. This excellence requires our accountability and accessibility for our products and services. We understand that customer service includes engineering, design, performance, reliability, delivery, communication, packaging, support, and sustainability and we are always looking for ways to improve the simplicity and ergonomics of our products. All of our premium products are tested thoroughly for 2 days before leaving the factory, where they are tested again in our office in China before being sent to Australia. Our confidence in our testing policy is the reason we can provide 2 and 3 year warranties. The products on our shelves are all thoroughly tested to meet customer requirements, and we refuse to sell the ones that fail. We have tested over 5000 LED lights to make sure they match our high expectations, so we have the confidence to sell it to you. LED Lighting Designs are proud distributors for Domus lighting, Sunny lighting, Martec, CLA lighting, Atom Lighting and Verbatim LED. If you would like a quote on any product from these companies contact us and we will provide a competitive price.
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import DataMigration from django.db import models verbose_log = False def log(msg): print(" %s" % msg) def logv(msg): if verbose_log: log(msg) else: return class Migration(DataMigration): def forwards(self, orm): """ This migration will make sure that every neuron that exists has exactly one skeleton linked to it. Neurons that are linked to no skeleton will be deleted. Neurons that are linked to multiple skeletons will be duplicated and each skeleton will be linked to one copy. """ # Get IDs of 'model_of' relation and 'skeleton' class for every project. # Ignore the project, if these don't exist. for p in orm.Project.objects.all(): log("Looking at project #%s" % p.id) try: model_of_rel = orm.Relation.objects.get(project=p, relation_name='model_of') skeleton_cls = orm.Class.objects.get(project=p, class_name='skeleton') neuron_cls = orm.Class.objects.get(project=p, class_name='neuron') except orm.Relation.DoesNotExist: log("Project #%s doesn't have a 'model_of' relation. " \ "Ignoring it." % p.id) continue except orm.Class.DoesNotExist: log("Project #%s doesn't have a 'skeleton' or 'neuron' class. " \ "Ignoring it." % p.id) continue # Prefetching the 'cici_via_a' and 'cici_via_b' takes unfortunately # too much memory. neurons = orm.ClassInstance.objects.filter(project_id=p.id, class_column_id=neuron_cls) log("Processing %s neurons" % len(neurons)) # Calculate percentage output marks for output every 10% output_count = {} for i in range(10): output_count[i*(len(neurons)/10)] = i * 10 output_count[len(neurons) - 1] = 100 deleted_neurons = 0 cloned_neurons = 0 for i, n in enumerate(neurons): # Give some rough estimate were we are if i in output_count: log("%s%%" % output_count[i]) # To copy all CICI links, the originals are needed from_links = n.cici_via_a.all() to_links = n.cici_via_b.all() # Skeleton links are 'to links': skeleton model_of neuron. Go through # them, delete all but the first and push deleted ones to work list. skeleton_links = [] other_to_links = [] for l in to_links: has_sk_rel = l.relation_id == model_of_rel.id has_sk_cls = l.class_instance_a.class_column_id == skeleton_cls.id if has_sk_rel and has_sk_cls: skeleton_links.append(l) else: other_to_links.append(l) # Get number of linked skeletons nr_skeleton_links = len(skeleton_links) # Delete neurons that don't have any skeleton link if not nr_skeleton_links: n.delete() deleted_neurons = deleted_neurons + 1 continue # Skip all neurons that have exactly one skeleton linked elif nr_skeleton_links == 1: continue # Clone all neurons that have more than one skeleton linked and link # one copy to one skeleton. logv("Expanding neuron #%s into %s clones." % (n.id, nr_skeleton_links)) logv(" Original CICI (via a) link IDs: %s" % \ str([l.id for l in from_links])) logv(" Original CICI (via b) link IDs: %s" % \ str([l.id for l in to_links])) # Create skeleton_links - 1 clones for k in range(nr_skeleton_links - 1): # Django will create a new copy of the object if the primary key # is set to None. n.pk = None n.save() # Explicitly re-create links for l in from_links: # Clone CICI link l.pk = None l.class_instance_a = n l.save() for l in other_to_links: # Clone CICI link l.pk = None l.class_instance_b = n l.save() # Get a skeleton link, delete it from the original neuron an # link ot to the new neuron. skeleton_l = skeleton_links.pop() skeleton_l.delete() skeleton_l.pk = None skeleton_l.class_instance_b = n skeleton_l.save() # Output to compare IDs logv(" Clone #%s CICI (via a) link IDs: %s" % \ (n.id, str([l.id for l in n.cici_via_a.all()]))) logv(" Clone #%s CICI (via b) link IDs: %s" % \ (n.id, str([l.id for l in n.cici_via_b.all()]))) # Keep track of cloned neuron count cloned_neurons = cloned_neurons + 1 action = "%s have been deleted." % deleted_neurons \ if deleted_neurons else "Nothing to delete." log("Found %s neuron(s) that had no skeleton linked. %s" % \ (deleted_neurons, action)) action = "%s have been cloned." % cloned_neurons \ if cloned_neurons else "Nothing to do." log("Found %s neuron(s) that had multiple skeleton linked. %s" % \ (cloned_neurons, action)) # Check if there are now only neurons with exactly one skeleton # linked. nr_neurons = orm.ClassInstance.objects.filter(project_id=p.id, class_column_id=neuron_cls.id).count() skeleton_links = orm.ClassInstanceClassInstance.objects.filter( project_id=p.id, class_instance_a__class_column_id=skeleton_cls.id, class_instance_b__class_column_id=neuron_cls.id, relation_id=model_of_rel).values('class_instance_b').annotate( sk_count=models.Count('class_instance_a')) for l in skeleton_links: if l['sk_count'] != 1: raise RuntimeError("Number of skeleton links for neurons %s ' \ 'is %s instead of 1. Aborting." % l.class_instance_b) log("Each neuron of project #%s has now exactly one skeleton " \ "linked" % p.id) if (nr_neurons == len(skeleton_links)): log("Number of neurons is now equal to number of skeleton links") else: raise RuntimeError("Number of neurons (%s) is not equal to ' \ 'number of skeleton links (%s) after this migration. ' \ 'Aborting." % (nr_neurons, len(skeleton_links))) log("Done with data migration") def backwards(self, orm): print("This data migration cannot be reverted!") models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'catmaid.apikey': { 'Meta': {'object_name': 'ApiKey'}, 'description': ('django.db.models.fields.TextField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '128'}) }, 'catmaid.brokenslice': { 'Meta': {'object_name': 'BrokenSlice', 'db_table': "'broken_slice'"}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'index': ('django.db.models.fields.IntegerField', [], {}), 'stack': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Stack']"}) }, 'catmaid.cardinalityrestriction': { 'Meta': {'object_name': 'CardinalityRestriction', 'db_table': "'cardinality_restriction'"}, 'cardinality_type': ('django.db.models.fields.IntegerField', [], {}), 'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}), 'restricted_link': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.ClassClass']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}), 'value': ('django.db.models.fields.IntegerField', [], {}) }, 'catmaid.changerequest': { 'Meta': {'object_name': 'ChangeRequest', 'db_table': "'change_request'"}, 'approve_action': ('django.db.models.fields.TextField', [], {}), 'completion_time': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}), 'connector': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Connector']"}), 'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'description': ('django.db.models.fields.TextField', [], {}), 'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'location': ('catmaid.fields.Double3DField', [], {}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}), 'recipient': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'change_recipient'", 'db_column': "'recipient_id'", 'to': "orm['auth.User']"}), 'reject_action': ('django.db.models.fields.TextField', [], {}), 'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'treenode': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Treenode']"}), 'type': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}), 'validate_action': ('django.db.models.fields.TextField', [], {}) }, 'catmaid.class': { 'Meta': {'object_name': 'Class', 'db_table': "'class'"}, 'class_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'description': ('django.db.models.fields.TextField', [], {}), 'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'catmaid.classclass': { 'Meta': {'object_name': 'ClassClass', 'db_table': "'class_class'"}, 'class_a': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'classes_a'", 'db_column': "'class_a'", 'to': "orm['catmaid.Class']"}), 'class_b': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'classes_b'", 'db_column': "'class_b'", 'to': "orm['catmaid.Class']"}), 'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}), 'relation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Relation']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'catmaid.classinstance': { 'Meta': {'object_name': 'ClassInstance', 'db_table': "'class_instance'"}, 'class_column': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Class']", 'db_column': "'class_id'"}), 'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'catmaid.classinstanceclassinstance': { 'Meta': {'object_name': 'ClassInstanceClassInstance', 'db_table': "'class_instance_class_instance'"}, 'class_instance_a': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'cici_via_a'", 'db_column': "'class_instance_a'", 'to': "orm['catmaid.ClassInstance']"}), 'class_instance_b': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'cici_via_b'", 'db_column': "'class_instance_b'", 'to': "orm['catmaid.ClassInstance']"}), 'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}), 'relation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Relation']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'catmaid.concept': { 'Meta': {'object_name': 'Concept', 'db_table': "'concept'"}, 'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'catmaid.connector': { 'Meta': {'object_name': 'Connector', 'db_table': "'connector'"}, 'confidence': ('django.db.models.fields.IntegerField', [], {'default': '5'}), 'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'editor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'connector_editor'", 'db_column': "'editor_id'", 'to': "orm['auth.User']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'location': ('catmaid.fields.Double3DField', [], {}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}), 'review_time': ('django.db.models.fields.DateTimeField', [], {}), 'reviewer_id': ('django.db.models.fields.IntegerField', [], {'default': '-1'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'catmaid.connectorclassinstance': { 'Meta': {'object_name': 'ConnectorClassInstance', 'db_table': "'connector_class_instance'"}, 'class_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.ClassInstance']"}), 'connector': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Connector']"}), 'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}), 'relation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Relation']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'catmaid.constraintstosegmentmap': { 'Meta': {'object_name': 'ConstraintsToSegmentMap'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'origin_section': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}), 'segments': ('catmaid.fields.IntegerArrayField', [], {}), 'stack': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Stack']"}), 'target_section': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}) }, 'catmaid.dataview': { 'Meta': {'ordering': "('position',)", 'object_name': 'DataView', 'db_table': "'data_view'"}, 'comment': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}), 'config': ('django.db.models.fields.TextField', [], {'default': "'{}'"}), 'data_view_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.DataViewType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'position': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'title': ('django.db.models.fields.TextField', [], {}) }, 'catmaid.dataviewtype': { 'Meta': {'object_name': 'DataViewType', 'db_table': "'data_view_type'"}, 'code_type': ('django.db.models.fields.TextField', [], {}), 'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'title': ('django.db.models.fields.TextField', [], {}) }, 'catmaid.deprecatedappliedmigrations': { 'Meta': {'object_name': 'DeprecatedAppliedMigrations', 'db_table': "'applied_migrations'"}, 'id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'primary_key': 'True'}) }, 'catmaid.deprecatedsession': { 'Meta': {'object_name': 'DeprecatedSession', 'db_table': "'sessions'"}, 'data': ('django.db.models.fields.TextField', [], {'default': "''"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'last_accessed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'session_id': ('django.db.models.fields.CharField', [], {'max_length': '26'}) }, 'catmaid.drawing': { 'Meta': {'object_name': 'Drawing', 'db_table': "'drawing'"}, 'component_id': ('django.db.models.fields.IntegerField', [], {}), 'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'max_x': ('django.db.models.fields.IntegerField', [], {}), 'max_y': ('django.db.models.fields.IntegerField', [], {}), 'min_x': ('django.db.models.fields.IntegerField', [], {}), 'min_y': ('django.db.models.fields.IntegerField', [], {}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}), 'skeleton_id': ('django.db.models.fields.IntegerField', [], {}), 'stack': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Stack']"}), 'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'svg': ('django.db.models.fields.TextField', [], {}), 'type': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}), 'z': ('django.db.models.fields.IntegerField', [], {}) }, 'catmaid.location': { 'Meta': {'object_name': 'Location', 'db_table': "'location'"}, 'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'editor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'location_editor'", 'db_column': "'editor_id'", 'to': "orm['auth.User']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'location': ('catmaid.fields.Double3DField', [], {}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}), 'review_time': ('django.db.models.fields.DateTimeField', [], {}), 'reviewer_id': ('django.db.models.fields.IntegerField', [], {'default': '-1'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'catmaid.log': { 'Meta': {'object_name': 'Log', 'db_table': "'log'"}, 'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'freetext': ('django.db.models.fields.TextField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'location': ('catmaid.fields.Double3DField', [], {}), 'operation_type': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'catmaid.message': { 'Meta': {'object_name': 'Message', 'db_table': "'message'"}, 'action': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'text': ('django.db.models.fields.TextField', [], {'default': "'New message'", 'null': 'True', 'blank': 'True'}), 'time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'title': ('django.db.models.fields.TextField', [], {}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'catmaid.overlay': { 'Meta': {'object_name': 'Overlay', 'db_table': "'overlay'"}, 'default_opacity': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'file_extension': ('django.db.models.fields.TextField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image_base': ('django.db.models.fields.TextField', [], {}), 'stack': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Stack']"}), 'tile_height': ('django.db.models.fields.IntegerField', [], {'default': '512'}), 'tile_source_type': ('django.db.models.fields.IntegerField', [], {'default': '1'}), 'tile_width': ('django.db.models.fields.IntegerField', [], {'default': '512'}), 'title': ('django.db.models.fields.TextField', [], {}) }, 'catmaid.project': { 'Meta': {'object_name': 'Project', 'db_table': "'project'"}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'stacks': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catmaid.Stack']", 'through': "orm['catmaid.ProjectStack']", 'symmetrical': 'False'}), 'title': ('django.db.models.fields.TextField', [], {}) }, 'catmaid.projectstack': { 'Meta': {'object_name': 'ProjectStack', 'db_table': "'project_stack'"}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'orientation': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}), 'stack': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Stack']"}), 'translation': ('catmaid.fields.Double3DField', [], {'default': '(0, 0, 0)'}) }, 'catmaid.regionofinterest': { 'Meta': {'object_name': 'RegionOfInterest', 'db_table': "'region_of_interest'"}, 'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'height': ('django.db.models.fields.FloatField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'location': ('catmaid.fields.Double3DField', [], {}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}), 'rotation_cw': ('django.db.models.fields.FloatField', [], {}), 'stack': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Stack']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}), 'width': ('django.db.models.fields.FloatField', [], {}), 'zoom_level': ('django.db.models.fields.IntegerField', [], {}) }, 'catmaid.regionofinterestclassinstance': { 'Meta': {'object_name': 'RegionOfInterestClassInstance', 'db_table': "'region_of_interest_class_instance'"}, 'class_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.ClassInstance']"}), 'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}), 'region_of_interest': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.RegionOfInterest']"}), 'relation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Relation']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'catmaid.relation': { 'Meta': {'object_name': 'Relation', 'db_table': "'relation'"}, 'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'description': ('django.db.models.fields.TextField', [], {}), 'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'isreciprocal': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}), 'relation_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'uri': ('django.db.models.fields.TextField', [], {}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'catmaid.relationinstance': { 'Meta': {'object_name': 'RelationInstance', 'db_table': "'relation_instance'"}, 'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}), 'relation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Relation']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'catmaid.restriction': { 'Meta': {'object_name': 'Restriction', 'db_table': "'restriction'"}, 'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}), 'restricted_link': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.ClassClass']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'catmaid.segments': { 'Meta': {'object_name': 'Segments'}, 'assembly': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.ClassInstance']", 'null': 'True'}), 'cost': ('django.db.models.fields.FloatField', [], {'db_index': 'True'}), 'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'direction': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'origin_section': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), 'origin_slice_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}), 'randomforest_cost': ('django.db.models.fields.FloatField', [], {}), 'segmentation_cost': ('django.db.models.fields.FloatField', [], {}), 'segmentid': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), 'segmenttype': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), 'stack': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Stack']"}), 'status': ('django.db.models.fields.IntegerField', [], {'default': '1', 'db_index': 'True'}), 'target1_slice_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_index': 'True'}), 'target2_slice_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_index': 'True'}), 'target_section': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_index': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'catmaid.segmenttoconstraintmap': { 'Meta': {'object_name': 'SegmentToConstraintMap'}, 'constraint': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.ConstraintsToSegmentMap']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'origin_section': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}), 'segment_node_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'segmentid': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), 'stack': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Stack']"}), 'target_section': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}) }, 'catmaid.settings': { 'Meta': {'object_name': 'Settings', 'db_table': "'settings'"}, 'key': ('django.db.models.fields.TextField', [], {'primary_key': 'True'}), 'value': ('django.db.models.fields.TextField', [], {'null': 'True'}) }, 'catmaid.slices': { 'Meta': {'object_name': 'Slices'}, 'assembly': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.ClassInstance']", 'null': 'True'}), 'center_x': ('django.db.models.fields.FloatField', [], {'db_index': 'True'}), 'center_y': ('django.db.models.fields.FloatField', [], {'db_index': 'True'}), 'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'flag_left': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), 'flag_right': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'max_x': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), 'max_y': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), 'min_x': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), 'min_y': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), 'node_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}), 'sectionindex': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), 'size': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), 'slice_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), 'stack': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Stack']"}), 'status': ('django.db.models.fields.IntegerField', [], {'default': '1', 'db_index': 'True'}), 'threshold': ('django.db.models.fields.FloatField', [], {}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'catmaid.stack': { 'Meta': {'object_name': 'Stack', 'db_table': "'stack'"}, 'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'dimension': ('catmaid.fields.Integer3DField', [], {}), 'file_extension': ('django.db.models.fields.TextField', [], {'default': "'jpg'", 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image_base': ('django.db.models.fields.TextField', [], {}), 'metadata': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}), 'num_zoom_levels': ('django.db.models.fields.IntegerField', [], {'default': '-1'}), 'resolution': ('catmaid.fields.Double3DField', [], {}), 'tile_height': ('django.db.models.fields.IntegerField', [], {'default': '256'}), 'tile_source_type': ('django.db.models.fields.IntegerField', [], {'default': '1'}), 'tile_width': ('django.db.models.fields.IntegerField', [], {'default': '256'}), 'title': ('django.db.models.fields.TextField', [], {}), 'trakem2_project': ('django.db.models.fields.BooleanField', [], {'default': 'False'}) }, 'catmaid.stacksliceinfo': { 'Meta': {'object_name': 'StackSliceInfo'}, 'file_extension': ('django.db.models.fields.TextField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'slice_base_path': ('django.db.models.fields.TextField', [], {}), 'slice_base_url': ('django.db.models.fields.TextField', [], {}), 'stack': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Stack']"}) }, 'catmaid.textlabel': { 'Meta': {'object_name': 'Textlabel', 'db_table': "'textlabel'"}, 'colour': ('catmaid.fields.RGBAField', [], {'default': '(1, 0.5, 0, 1)'}), 'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'font_name': ('django.db.models.fields.TextField', [], {'null': 'True'}), 'font_size': ('django.db.models.fields.FloatField', [], {'default': '32'}), 'font_style': ('django.db.models.fields.TextField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}), 'scaling': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'text': ('django.db.models.fields.TextField', [], {'default': "'Edit this text ...'"}), 'type': ('django.db.models.fields.CharField', [], {'max_length': '32'}) }, 'catmaid.textlabellocation': { 'Meta': {'object_name': 'TextlabelLocation', 'db_table': "'textlabel_location'"}, 'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'location': ('catmaid.fields.Double3DField', [], {}), 'textlabel': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Textlabel']"}) }, 'catmaid.treenode': { 'Meta': {'object_name': 'Treenode', 'db_table': "'treenode'"}, 'confidence': ('django.db.models.fields.IntegerField', [], {'default': '5'}), 'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'editor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'treenode_editor'", 'db_column': "'editor_id'", 'to': "orm['auth.User']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'location': ('catmaid.fields.Double3DField', [], {}), 'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'children'", 'null': 'True', 'to': "orm['catmaid.Treenode']"}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}), 'radius': ('django.db.models.fields.FloatField', [], {}), 'review_time': ('django.db.models.fields.DateTimeField', [], {}), 'reviewer_id': ('django.db.models.fields.IntegerField', [], {'default': '-1'}), 'skeleton': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.ClassInstance']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'catmaid.treenodeclassinstance': { 'Meta': {'object_name': 'TreenodeClassInstance', 'db_table': "'treenode_class_instance'"}, 'class_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.ClassInstance']"}), 'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}), 'relation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Relation']"}), 'treenode': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Treenode']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'catmaid.treenodeconnector': { 'Meta': {'object_name': 'TreenodeConnector', 'db_table': "'treenode_connector'"}, 'confidence': ('django.db.models.fields.IntegerField', [], {'default': '5'}), 'connector': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Connector']"}), 'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}), 'relation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Relation']"}), 'skeleton': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.ClassInstance']"}), 'treenode': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Treenode']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'catmaid.userprofile': { 'Meta': {'object_name': 'UserProfile'}, 'color': ('catmaid.fields.RGBAField', [], {'default': '(0.9239775287034722, 1.0, 0.9894981201919059, 1)'}), 'display_stack_reference_lines': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'independent_ontology_workspace_is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'inverse_mouse_wheel': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'show_cropping_tool': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'show_ontology_tool': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'show_segmentation_tool': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'show_tagging_tool': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'show_text_label_tool': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'show_tracing_tool': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'taggit.tag': { 'Meta': {'object_name': 'Tag'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}) }, 'taggit.taggeditem': { 'Meta': {'object_name': 'TaggedItem'}, 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), 'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_items'", 'to': "orm['taggit.Tag']"}) } } complete_apps = ['catmaid']
All Simpsons characters gathered in a large image. If you are a follower of this cartoon series on television, sure you recognize all the characters, especially the family of the protagonists and then all their friends and neighbors. The image has been decomposed in chips and your job is to click on the image and drag it to unite with the partner, you have to return to rebuild the original image piece by piece. You will have a great time playing. See if you can do it in less time the next time. This game has been played 16853 times online. Ranking 1136 of the best free games. The most famous television series of all time is the animated series The Simpsons, a family in a U.S. city lives a original and different adventures. A fun series with dozens of characters who live in the city, now they are all together in a great picture and is has heaped pieces, you should look piece by piece and place them back to reconstruct the original image. It is a simple puzzle does not have too many pieces, so you can recompose the image in a short time, and the next you can do it in less time yet. What is this game about? This is a free game called Puzzle The Simpsons Characters for your browser without having to download or install anything, it is completely reliable. On this website you will be able to play for free this The Simpsons flash game called Puzzle The Simpsons Characters. And remember this is a free game called Puzzle The Simpsons Characters and it is related to The Simpsons. Our new game Puzzle The Simpsons Characters is one of the best games of The Simpsons you can find and you can share it with your friends and contacts on the social networks. This is an online game and video game called Puzzle The Simpsons Characters that we invite you to play, enjoy and learn about it as well as to share in the comments and with your friends on the social networks.
#!/usr/bin/env python # -*- coding: utf-8 -*- from datetime import datetime from django.core.urlresolvers import reverse from photologue.tests import helpers from photologue.tests.helpers import RequestTest YEAR = datetime.now().year MONTH = datetime.now().ctime().split(' ')[1].lower() DAY = datetime.now().day class RequestGalleryTest(RequestTest): def setUp(self): super(RequestGalleryTest, self).setUp() self.gallery = helpers._create_new_gallery( name='Fake Gallery', slug='fake-gallery') def tearDown(self): super(RequestGalleryTest, self).tearDown() self.gallery.delete() def test_archive_gallery_url_works(self): self.assertUrl( reverse('pl-gallery-archive') ) def test_paginated_gallery_url_works(self): self.assertUrl( reverse('pl-gallery-list', kwargs={'page': 1}) ) def test_gallery_works(self): self.assertUrl( reverse('pl-gallery', kwargs={'slug': 'fake-gallery'}) ) def test_archive_year_gallery_works(self): self.assertUrl( reverse('pl-gallery-archive-year', kwargs={'year': YEAR} ) ) def test_archive_month_gallery_works(self): self.assertUrl( reverse('pl-gallery-archive-month', kwargs={'year': YEAR, 'month':MONTH} ) ) def test_archive_day_gallery_works(self): self.assertUrl( reverse('pl-gallery-archive-day', kwargs={'year': YEAR, 'month':MONTH, 'day': DAY} ) ) def test_detail_gallery_works(self): self.assertUrl( reverse('pl-gallery-detail', kwargs={'year': YEAR, 'month':MONTH, 'day': DAY, 'slug': 'fake-gallery'} ) )
Broyhill furniture jessa round glass dining table with. Broyhill furniture jessa round glass dining table with. Broyhill sunset pointe 5 piece counter height dining. Broyhill kitchen table cool beautiful kitchen tables gj. Broyhill furniture maine bar chair broyhill furniture. Attic heirlooms cocktail table broyhill. Broyhill suede dining table 8051 dining table kit. Attic heirlooms end table broyhill.
#! /usr/bin/env python # -*- coding: utf-8 -*- # OpenFisca -- A versatile microsimulation software # By: OpenFisca Team <[email protected]> # # Copyright (C) 2011, 2012, 2013, 2014, 2015 OpenFisca Team # https://github.com/openfisca # # This file is part of OpenFisca. # # OpenFisca is free software; you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # OpenFisca is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import logging from openfisca_survey_manager.survey_collections import SurveyCollection log = logging.getLogger(__name__) from openfisca_france_data import default_config_files_directory as config_files_directory from openfisca_france_data.temporary import TemporaryStore temporary_store = TemporaryStore.create(file_name = "indirect_taxation_tmp") #************************************************************************************************************************** #* Etape n° 0-2 : HOMOGENEISATION DES DONNEES SUR LES VEHICULES #************************************************************************************************************************** #************************************************************************************************************************** # # # * DONNEES SUR LES TYPES DE CARBURANTS def build_homogeneisation_vehicules(year = None): """Compute vehicule numbers by type""" assert year is not None # Load data bdf_survey_collection = SurveyCollection.load( collection = 'budget_des_familles', config_files_directory = config_files_directory) survey = bdf_survey_collection.get_survey('budget_des_familles_{}'.format(year)) if year == 1995: vehicule = None # * L'enquête BdF 1995 ne contient pas d'information sur le type de carburant utilisé par les véhicules. if year == 2000: vehicule = survey.get_values(table = "depmen") kept_variables = ['ident', 'carbu01', 'carbu02'] vehicule = vehicule[kept_variables] vehicule.rename(columns = {'ident': 'ident_men'}, inplace = True) vehicule.rename(columns = {'carbu01': 'carbu1'}, inplace = True) vehicule.rename(columns = {'carbu02': 'carbu2'}, inplace = True) vehicule["veh_tot"] = 1 vehicule["veh_essence"] = 1*(vehicule['carbu1'] == 1) + 1*(vehicule['carbu2'] == 1) vehicule["veh_diesel"] = 1*(vehicule['carbu1'] == 2) + 1*(vehicule['carbu2'] == 2) if year == 2005: vehicule = survey.get_values(table = "automobile") kept_variables = ['ident_men', 'carbu'] vehicule = vehicule[kept_variables] vehicule["veh_tot"] = 1 vehicule["veh_essence"] = (vehicule['carbu'] == 1) vehicule["veh_diesel"] = (vehicule['carbu'] == 2) if year == 2011: try: vehicule = survey.get_values(table = "AUTOMOBILE") except: vehicule = survey.get_values(table = "automobile") kept_variables = ['ident_me', 'carbu'] vehicule = vehicule[kept_variables] vehicule.rename(columns = {'ident_me': 'ident_men'}, inplace = True) vehicule["veh_tot"] = 1 vehicule["veh_essence"] = (vehicule['carbu'] == 1) vehicule["veh_diesel"] = (vehicule['carbu'] == 2) # Compute the number of cars by category if year != 1995: vehicule = vehicule.groupby(by = 'ident_men')["veh_tot", "veh_essence", "veh_diesel"].sum() vehicule["pourcentage_vehicule_essence"] = 0 vehicule.pourcentage_vehicule_essence[vehicule.veh_tot != 0] = vehicule.veh_essence / vehicule.veh_tot # Save in temporary store temporary_store['automobile_{}'.format(year)] = vehicule temporary_store.close() if __name__ == '__main__': import sys import time logging.basicConfig(level = logging.INFO, stream = sys.stdout) deb = time.clock() year = 1995 build_homogeneisation_vehicules(year = year) log.info("step 0_2_homogeneisation_vehicules duration is {}".format(time.clock() - deb))
The 'sequel' to Harnessed to the Plough (also available from our online shop). Roger and Cheryl Clark, with Paul Heiney, show how to train a young draught horse for work. Ideal for anyone wanting to bring on a foal, or after buying a young animal. From Old Pond Publishing.
"""Representation of a collection of items.""" import os from itertools import chain from collections import OrderedDict from doorstop import common from doorstop.common import DoorstopError, DoorstopWarning from doorstop.core.base import (add_document, edit_document, delete_document, auto_load, auto_save, BaseValidatable, BaseFileObject) from doorstop.core.types import Prefix, UID, Level from doorstop.core.item import Item from doorstop import settings log = common.logger(__name__) class Document(BaseValidatable, BaseFileObject): # pylint: disable=R0902 """Represents a document directory containing an outline of items.""" CONFIG = '.doorstop.yml' SKIP = '.doorstop.skip' # indicates this document should be skipped INDEX = 'index.yml' DEFAULT_PREFIX = Prefix('REQ') DEFAULT_SEP = '' DEFAULT_DIGITS = 3 def __init__(self, path, root=os.getcwd(), **kwargs): """Initialize a document from an exiting directory. :param path: path to document directory :param root: path to root of project """ super().__init__() # Ensure the directory is valid if not os.path.isfile(os.path.join(path, Document.CONFIG)): relpath = os.path.relpath(path, root) msg = "no {} in {}".format(Document.CONFIG, relpath) raise DoorstopError(msg) # Initialize the document self.path = path self.root = root self.tree = kwargs.get('tree') self.auto = kwargs.get('auto', Document.auto) # Set default values self._data['prefix'] = Document.DEFAULT_PREFIX self._data['sep'] = Document.DEFAULT_SEP self._data['digits'] = Document.DEFAULT_DIGITS self._data['parent'] = None # the root document does not have a parent self._items = [] self._itered = False def __repr__(self): return "Document('{}')".format(self.path) def __str__(self): if common.verbosity < common.STR_VERBOSITY: return self.prefix else: return "{} ({})".format(self.prefix, self.relpath) def __iter__(self): yield from self._iter() def __len__(self): return len(list(self._iter())) def __bool__(self): # override `__len__` behavior, pylint: disable=R0201 return True @staticmethod @add_document def new(tree, path, root, prefix, sep=None, digits=None, parent=None, auto=None): # pylint: disable=R0913,C0301 """Internal method to create a new document. :param tree: reference to tree that contains this document :param path: path to directory for the new document :param root: path to root of the project :param prefix: prefix for the new document :param sep: separator between prefix and numbers :param digits: number of digits for the new document :param parent: parent UID for the new document :param auto: automatically save the document :raises: :class:`~doorstop.common.DoorstopError` if the document already exists :return: new :class:`~doorstop.core.document.Document` """ # TODO: raise a specific exception for invalid separator characters? assert not sep or sep in settings.SEP_CHARS config = os.path.join(path, Document.CONFIG) # Check for an existing document if os.path.exists(config): raise DoorstopError("document already exists: {}".format(path)) # Create the document directory Document._create(config, name='document') # Initialize the document document = Document(path, root=root, tree=tree, auto=False) document.prefix = prefix if prefix is not None else document.prefix document.sep = sep if sep is not None else document.sep document.digits = digits if digits is not None else document.digits document.parent = parent if parent is not None else document.parent if auto or (auto is None and Document.auto): document.save() # Return the document return document def load(self, reload=False): """Load the document's properties from its file.""" if self._loaded and not reload: return log.debug("loading {}...".format(repr(self))) # Read text from file text = self._read(self.config) # Parse YAML data from text data = self._load(text, self.config) # Store parsed data sets = data.get('settings', {}) for key, value in sets.items(): if key == 'prefix': self._data['prefix'] = Prefix(value) elif key == 'sep': self._data['sep'] = value.strip() elif key == 'parent': self._data['parent'] = value.strip() elif key == 'digits': self._data['digits'] = int(value) # Set meta attributes self._loaded = True if reload: list(self._iter(reload=reload)) @edit_document def save(self): """Save the document's properties to its file.""" log.debug("saving {}...".format(repr(self))) # Format the data items data = {} sets = {} for key, value in self._data.items(): if key == 'prefix': sets['prefix'] = str(value) elif key == 'sep': sets['sep'] = value elif key == 'digits': sets['digits'] = value elif key == 'parent': if value: sets['parent'] = value else: data[key] = value data['settings'] = sets # Dump the data to YAML text = self._dump(data) # Save the YAML to file self._write(text, self.config) # Set meta attributes self._loaded = False self.auto = True def _iter(self, reload=False): """Yield the document's items.""" if self._itered and not reload: msg = "iterating document {}'s loaded items...".format(self) log.debug(msg) yield from list(self._items) return log.info("loading document {}'s items...".format(self)) # Reload the document's item self._items = [] for dirpath, dirnames, filenames in os.walk(self.path): for dirname in list(dirnames): path = os.path.join(dirpath, dirname, Document.CONFIG) if os.path.exists(path): path = os.path.dirname(path) dirnames.remove(dirname) log.trace("skipped embedded document: {}".format(path)) for filename in filenames: path = os.path.join(dirpath, filename) try: item = Item(path, root=self.root, document=self, tree=self.tree) except DoorstopError: pass # skip non-item files else: self._items.append(item) if reload: item.load(reload=reload) if settings.CACHE_ITEMS and self.tree: self.tree._item_cache[item.uid] = item # pylint: disable=W0212 log.trace("cached item: {}".format(item)) # Set meta attributes self._itered = True # Yield items yield from list(self._items) # properties ############################################################# @property def config(self): """Get the path to the document's file.""" return os.path.join(self.path, Document.CONFIG) @property @auto_load def prefix(self): """Get the document's prefix.""" return self._data['prefix'] @prefix.setter @auto_save @auto_load def prefix(self, value): """Set the document's prefix.""" self._data['prefix'] = Prefix(value) # TODO: should the new prefix be applied to all items? @property @auto_load def sep(self): """Get the prefix-number separator to use for new item UIDs.""" return self._data['sep'] @sep.setter @auto_save @auto_load def sep(self, value): """Set the prefix-number separator to use for new item UIDs.""" # TODO: raise a specific exception for invalid separator characters? assert not value or value in settings.SEP_CHARS self._data['sep'] = value.strip() # TODO: should the new separator be applied to all items? @property @auto_load def digits(self): """Get the number of digits to use for new item UIDs.""" return self._data['digits'] @digits.setter @auto_save @auto_load def digits(self, value): """Set the number of digits to use for new item UIDs.""" self._data['digits'] = value # TODO: should the new digits be applied to all items? @property @auto_load def parent(self): """Get the document's parent document prefix.""" return self._data['parent'] @parent.setter @auto_save @auto_load def parent(self, value): """Set the document's parent document prefix.""" self._data['parent'] = str(value) if value else "" @property def items(self): """Get an ordered list of items in the document.""" return sorted(self._iter()) @property def depth(self): """Return the maximum item level depth.""" return max(item.depth for item in self) @property def next_number(self): """Get the next item number for the document.""" try: number = max(item.number for item in self) + 1 except ValueError: number = 1 log.debug("next number (local): {}".format(number)) if self.tree and self.tree.request_next_number: remote_number = 0 while remote_number is not None and remote_number < number: if remote_number: log.warn("server is behind, requesting next number...") remote_number = self.tree.request_next_number(self.prefix) log.debug("next number (remote): {}".format(remote_number)) if remote_number: number = remote_number return number @property def skip(self): """Indicate the document should be skipped.""" return os.path.isfile(os.path.join(self.path, Document.SKIP)) @property def index(self): """Get the path to the document's index if it exists else `None`.""" path = os.path.join(self.path, Document.INDEX) if os.path.exists(path): return path @index.setter def index(self, value): """Create or update the document's index.""" if value: path = os.path.join(self.path, Document.INDEX) log.info("creating {} index...".format(self)) common.write_lines(self._lines_index(self.items), path) @index.deleter def index(self): """Delete the document's index if it exists.""" log.info("deleting {} index...".format(self)) common.delete(self.index) # actions ################################################################ # decorators are applied to methods in the associated classes def add_item(self, number=None, level=None, reorder=True): """Create a new item for the document and return it. :param number: desired item number :param level: desired item level :param reorder: update levels of document items :return: added :class:`~doorstop.core.item.Item` """ number = max(number or 0, self.next_number) log.debug("next number: {}".format(number)) try: last = self.items[-1] except IndexError: next_level = level else: if level: next_level = level elif last.level.heading: next_level = last.level >> 1 next_level.heading = False else: next_level = last.level + 1 log.debug("next level: {}".format(next_level)) uid = UID(self.prefix, self.sep, number, self.digits) item = Item.new(self.tree, self, self.path, self.root, uid, level=next_level) if level and reorder: self.reorder(keep=item) return item # decorators are applied to methods in the associated classes def remove_item(self, value, reorder=True): """Remove an item by its UID. :param value: item or UID :param reorder: update levels of document items :raises: :class:`~doorstop.common.DoorstopError` if the item cannot be found :return: removed :class:`~doorstop.core.item.Item` """ uid = UID(value) item = self.find_item(uid) item.delete() if reorder: self.reorder() return item # decorators are applied to methods in the associated classes def reorder(self, manual=True, automatic=True, start=None, keep=None, _items=None): """Reorder a document's items. Two methods are using to create the outline order: - manual: specify the order using an updated index file - automatic: shift duplicate levels and compress gaps :param manual: enable manual ordering using the index (if one exists) :param automatic: enable automatic ordering (after manual ordering) :param start: level to start numbering (None = use current start) :param keep: item or UID to keep over duplicates """ # Reorder manually if manual and self.index: log.info("reordering {} from index...".format(self)) self._reorder_from_index(self, self.index) del self.index # Reorder automatically if automatic: log.info("reordering {} automatically...".format(self)) items = _items or self.items keep = self.find_item(keep) if keep else None self._reorder_automatic(items, start=start, keep=keep) @staticmethod def _lines_index(items): """Generate (pseudo) YAML lines for the document index.""" yield '#' * settings.MAX_LINE_LENGTH yield '# THIS TEMPORARY FILE WILL BE DELETED AFTER DOCUMENT REORDERING' yield '# MANUALLY INDENT, DEDENT, & MOVE ITEMS TO THEIR DESIRED LEVEL' yield '# CHANGES ARE BE REFLECTED IN THE ITEM FILES AFTER CONFIRMATION' yield '#' * settings.MAX_LINE_LENGTH yield '' yield "initial: {}".format(items[0].level if items else 1.0) yield "outline:" for item in items: space = " " * item.depth comment = item.text.replace('\n', ' ') or item.ref line = space + "- {u}: # {c}".format(u=item.uid, c=comment) if len(line) > settings.MAX_LINE_LENGTH: line = line[:settings.MAX_LINE_LENGTH - 3] + '...' yield line @staticmethod def _reorder_from_index(document, path): """Reorder a document's item from the index.""" # Load and parse index text = common.read_text(path) data = common.load_yaml(text, path) # Read updated values initial = data.get('initial', 1.0) outline = data.get('outline', []) # Update levels level = Level(initial) Document._reorder_section(outline, level, document) @staticmethod def _reorder_section(section, level, document): """Recursive function to reorder a section of an outline. :param section: recursive `list` of `dict` loaded from document index :param level: current :class:`~doorstop.core.types.Level` :param document: :class:`~doorstop.core.document.Document` to order """ if isinstance(section, dict): # a section # Get the item and subsection uid = list(section.keys())[0] try: item = document.find_item(uid) except DoorstopError as exc: log.debug(exc) item = None subsection = section[uid] # An item is a header if it has a subsection level.heading = bool(subsection) # Apply the new level if item is None: log.info("({}): {}".format(uid, level)) elif item.level == level: log.info("{}: {}".format(item, level)) else: log.info("{}: {} to {}".format(item, item.level, level)) if item: item.level = level # Process the heading's subsection if subsection: Document._reorder_section(subsection, level >> 1, document) elif isinstance(section, list): # a list of sections # Process each subsection for index, subsection in enumerate(section): Document._reorder_section(subsection, level + index, document) @staticmethod def _reorder_automatic(items, start=None, keep=None): """Reorder a document's items automatically. :param items: items to reorder :param start: level to start numbering (None = use current start) :param keep: item to keep over duplicates """ nlevel = plevel = None for clevel, item in Document._items_by_level(items, keep=keep): log.debug("current level: {}".format(clevel)) # Determine the next level if not nlevel: # Use the specified or current starting level nlevel = Level(start) if start else clevel nlevel.heading = clevel.heading log.debug("next level (start): {}".format(nlevel)) else: # Adjust the next level to be the same depth if len(clevel) > len(nlevel): nlevel >>= len(clevel) - len(nlevel) log.debug("matched current indent: {}".format(nlevel)) elif len(clevel) < len(nlevel): nlevel <<= len(nlevel) - len(clevel) # nlevel += 1 log.debug("matched current dedent: {}".format(nlevel)) nlevel.heading = clevel.heading # Check for a level jump _size = min(len(clevel.value), len(plevel.value)) for index in range(max(_size - 1, 1)): if clevel.value[index] > plevel.value[index]: nlevel <<= len(nlevel) - 1 - index nlevel += 1 nlevel >>= len(clevel) - len(nlevel) msg = "next level (jump): {}".format(nlevel) log.debug(msg) break # Check for a normal increment else: if len(nlevel) <= len(plevel): nlevel += 1 msg = "next level (increment): {}".format(nlevel) log.debug(msg) else: msg = "next level (indent/dedent): {}".format(nlevel) log.debug(msg) # Apply the next level if clevel == nlevel: log.info("{}: {}".format(item, clevel)) else: log.info("{}: {} to {}".format(item, clevel, nlevel)) item.level = nlevel.copy() # Save the current level as the previous level plevel = clevel.copy() @staticmethod def _items_by_level(items, keep=None): """Iterate through items by level with the kept item first.""" # Collect levels levels = OrderedDict() for item in items: if item.level in levels: levels[item.level].append(item) else: levels[item.level] = [item] # Reorder levels for level, items in levels.items(): # Reorder items at this level if keep in items: # move the kept item to the front of the list log.debug("keeping {} level over duplicates".format(keep)) items = [items.pop(items.index(keep))] + items for item in items: yield level, item def find_item(self, value, _kind=''): """Return an item by its UID. :param value: item or UID :raises: :class:`~doorstop.common.DoorstopError` if the item cannot be found :return: matching :class:`~doorstop.core.item.Item` """ uid = UID(value) for item in self: if item.uid == uid: return item raise DoorstopError("no matching{} UID: {}".format(_kind, uid)) def get_issues(self, item_hook=None, **kwargs): """Yield all the document's issues. :param item_hook: function to call for custom item validation :return: generator of :class:`~doorstop.common.DoorstopError`, :class:`~doorstop.common.DoorstopWarning`, :class:`~doorstop.common.DoorstopInfo` """ assert kwargs.get('document_hook') is None hook = item_hook if item_hook else lambda **kwargs: [] log.info("checking document {}...".format(self)) # Check for items items = self.items if not items: yield DoorstopWarning("no items") return # Reorder or check item levels if settings.REORDER: self.reorder(_items=items) elif settings.CHECK_LEVELS: yield from self._get_issues_level(items) # Check each item for item in items: # Check item for issue in chain(hook(item=item, document=self, tree=self.tree), item.get_issues()): # Prepend the item's UID to yielded exceptions if isinstance(issue, Exception): yield type(issue)("{}: {}".format(item.uid, issue)) @staticmethod def _get_issues_level(items): """Yield all the document's issues related to item level.""" prev = items[0] if items else None for item in items[1:]: puid = prev.uid plev = prev.level nuid = item.uid nlev = item.level log.debug("checking level {} to {}...".format(plev, nlev)) # Duplicate level if plev == nlev: uids = sorted((puid, nuid)) msg = "duplicate level: {} ({}, {})".format(plev, *uids) yield DoorstopWarning(msg) # Skipped level length = min(len(plev.value), len(nlev.value)) for index in range(length): # Types of skipped levels: # 1. over: 1.0 --> 1.2 # 2. out: 1.1 --> 3.0 if (nlev.value[index] - plev.value[index] > 1 or # 3. over and out: 1.1 --> 2.2 (plev.value[index] != nlev.value[index] and index + 1 < length and nlev.value[index + 1] not in (0, 1))): msg = "skipped level: {} ({}), {} ({})".format(plev, puid, nlev, nuid) yield DoorstopWarning(msg) break prev = item @delete_document def delete(self, path=None): """Delete the document and its items.""" for item in self: item.delete() # the document is deleted in the decorated method
I do not find your faq answer and your article on security very reassuring. This grossly overstates the case. So... it is guaranteed safe, but don't use it if the network is not itself safe? This is like saying a phone is waterproof when you actually mean "well, it won't get wet as long as you keep it in a plastic bag" The phone is not waterproof, the bag is... if the bag is sealed properly, not punctured, and not already maliciously filled with water by some sneaky person (oh, and a goldfish while they're at it). This gets around the problem of claiming it is guaranteed secure when it is absolutely nothing of the sort, as you say in the surrounding sentences. The current wording seems all too like the stuff we see from companies all over the web: "We care about your privacy. Your privacy means more to us than a stadium full of puppies. We would rather sell our own daughters to unknown strangers just escaped from supermax prisons than share even the tiniest bit of your private data." when they don't actually say they won't share it -because they can't, their whole business plan relies on selling your secrets to everyone they can. In otherwords, just seems like you are avoiding the truth by saying happy things. (OK, you aren't really as bad as them - but it this is the interhighway, the superwebs, the hypertoobs, and if I don't make outragous exagerations you clearly won't understand that I care). The actual situation seems fine, just state it as it. Thanks for the suggestion. I've modified the help document. Coffee wrote:Thanks for the suggestion. I've modified the help document. I've always had my PC and my android device on the same WiFi network, and I had never had problems connecting until recently, which was the first time I had heard of a Lite Mode. After reading several explanations about what it is and how to choose this mode, I still have no understanding of either. Let's start at the beginning. I have the AirDroid site (web.airdroid.com) bookmarked on my PC. When I clicked on the bookmark tonight, my PC automatically tried to connect with my device (presumably because I have my email address and password saved and the "Stay connected" option checked). But the attempt to connect failed. There was a "Failed to connect" message on my PC accompanied by a suggestion to use Lite Mode with instructions I can't remember. I went to my device, opened the Airdroid app, and looked for a "Lite mode" option. Not seeing one, I clicked on settings. I saw a "Lite mode" category and an "HTTPS connection" option within it. Because the PC instructions had mentioned something about HTTPS, I turned this option on. Then I exited Airdroid and restarted it. The very top of the Airdroid app read "Open web address" with an "i" in a circle next to it (presumably for "information"). Nothing happened when I click on either "Open web address" or the circled "i". (Is something supposed to happen?) Below these apparent (but not actual) options, it read "http://web.airdroid.com," and below that in lighter, smaller font, it read "Or https://192.168.1.71:8890". Nothing happened when I clicked on either. (Is something supposed to happen?) I clicked "Reconnect" on my PC, and it worked. The top of my app reads "Local Connection Mode." That's where I am now. Presumably I'm connected via "http://web.android.com" since that was the bigger, bolder address on my app before I reconnected and it's the web address that displays in the AirDroid tab of my PC browser. But as I said, I turned on "HTTPS Connection" in settings. Shouldn't the app have reflected this change before I connected? (And shouldn't my PC browser now reflect this change?) If not, what exactly changed (if anything) when I turned on "HTTPS Connnection"? The title of this thread refers to "LAN Connection and Lite (non-HTTPS) modes." What is the difference between an HTTPS mode and a non-HTTPS mode? (Not all of us are technology experts.) And what is the effect of turning on the "HTTPS connection" under "Lite mode" in the settings? Apparently it doesn't mean that I'm in an HTTPS mode now. As I said earlier, the app reads "Local Connection Mode." Does that mean I'm not in Lite Mode, or are these two modes not related? If the latter, what is the alternative to Local Connection Mode? Is Local Connection Mode the same as LAN Connection Mode? If so, why don't you one term consistently? (So many different AirDroid features are referred to as a "mode" that the word means nothing to me.) I've connected only one way as far as I know. My AirDroid tab on my PC looks the same as it always has. The only reference to a mode is "Local Connection Mode" in the bottom right corner, which matches what the app in my device displays. I don't see any way on either my PC or my app to change to another mode. I suspect I'm not in Lite mode and that my PC connected to my device anyway, but I'm not certain. Assuming I'm right, why was I able to connect during the second attempt but not the first? I've since disconnected and reconnected several times without a hitch. Call me dumb, but I still have no idea what the purpose of the Lite mode is or how to get into that mode. (Or if I'm in fact in Lite mode, why isn't that evident?) Why isn't there a simple option to put my device (and my PC?) into Lite Mode and/or whatever the alternative to Local Connection mode is? AirDroid is a great app, but the instructions and help are awful IMO. Last edited by yeltommo on Mon Jul 27, 2015 6:31 am, edited 1 time in total. I've always had my PC and my android on the same WiFi network, and I had never had problems connecting until recently, which was the first time I had heard of a Lite Mode. After reading several explanations about what it is and how to choose this mode, I still have no understanding of either. I went to settings on my device and turned "HTTPS connection" on under "Lite mode," exited Airdroid and restarted it. I then tried connecting from my PC and I got connected. Does that mean I'm in LiteMode? The device app says "Local Connection Mode." Does that mean I'm not in Lite Mode, or are these two modes not related? If the latter, what is the alternative to Local Connection Mode? I've only connected one way. So many different Airdroid features are referred to as a "mode" that the word means nothing to me. On my PC, I use a bookmark to open an Airdroid tab (web.android.com). It currently looks the same as it always has. There's no reference to any mode. Call me dumb, but I don't have a clue what's going on. Why isn't there simply an option to click that will put Airdroid into Lite Mode or whatever the alternative mode is?
# Copyright (c) 2010 Reza Lotun http://reza.lotun.name # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. from boto.resultset import ResultSet class AppCookieStickinessPolicy(object): def __init__(self, connection=None): self.cookie_name = None self.policy_name = None def __repr__(self): return 'AppCookieStickiness(%s, %s)' % (self.policy_name, self.cookie_name) def startElement(self, name, attrs, connection): pass def endElement(self, name, value, connection): if name == 'CookieName': self.cookie_name = value elif name == 'PolicyName': self.policy_name = value class LBCookieStickinessPolicy(object): def __init__(self, connection=None): self.policy_name = None self.cookie_expiration_period = None def __repr__(self): return 'LBCookieStickiness(%s, %s)' % (self.policy_name, self.cookie_expiration_period) def startElement(self, name, attrs, connection): pass def endElement(self, name, value, connection): if name == 'CookieExpirationPeriod': self.cookie_expiration_period = value elif name == 'PolicyName': self.policy_name = value class OtherPolicy(object): def __init__(self, connection=None): self.policy_name = None def __repr__(self): return 'OtherPolicy(%s)' % (self.policy_name) def startElement(self, name, attrs, connection): pass def endElement(self, name, value, connection): self.policy_name = value class Policies(object): """ ELB Policies """ def __init__(self, connection=None): self.connection = connection self.app_cookie_stickiness_policies = None self.lb_cookie_stickiness_policies = None self.other_policies = None def __repr__(self): app = 'AppCookieStickiness%s' % self.app_cookie_stickiness_policies lb = 'LBCookieStickiness%s' % self.lb_cookie_stickiness_policies other = 'Other%s' % self.other_policies return 'Policies(%s,%s,%s)' % (app, lb, other) def startElement(self, name, attrs, connection): if name == 'AppCookieStickinessPolicies': rs = ResultSet([('member', AppCookieStickinessPolicy)]) self.app_cookie_stickiness_policies = rs return rs elif name == 'LBCookieStickinessPolicies': rs = ResultSet([('member', LBCookieStickinessPolicy)]) self.lb_cookie_stickiness_policies = rs return rs elif name == 'OtherPolicies': rs = ResultSet([('member', OtherPolicy)]) self.other_policies = rs return rs def endElement(self, name, value, connection): return
duplicating messages on every try to recover from the exception. Can someone shed some light on this issue for me. Thanks.
# Copyright 2018 The TensorFlow Probability Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for the TF implementations of auto-batched VM variables.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # Dependency imports import hypothesis as hp from hypothesis import strategies as hps from hypothesis.extra import numpy as hpnp import numpy as np import tensorflow.compat.v2 as tf from tensorflow_probability.python.experimental.auto_batching import backend_test_lib as backend_test from tensorflow_probability.python.experimental.auto_batching import instructions as inst from tensorflow_probability.python.experimental.auto_batching import tf_backend from tensorflow_probability.python.internal import hypothesis_testlib as tfp_hps from tensorflow_probability.python.internal import test_util # TODO(b/127689162): Restore testing complex dtypes. # TF_NP_DTYPES = [np.float32, np.float64, np.int32, np.complex64, np.bool_] TF_NP_DTYPES = [np.float32, np.float64, np.int32, np.bool_] TF_BACKEND = tf_backend.TensorFlowBackend() def var_init(max_stack_depth, initial_value): type_ = inst.TensorType(initial_value.dtype, initial_value.shape[1:]) var = TF_BACKEND.create_variable( None, inst.VariableAllocation.FULL, type_, max_stack_depth, batch_size=initial_value.shape[0]) return var.update( initial_value, TF_BACKEND.full_mask(initial_value.shape[0])) @test_util.test_all_tf_execution_regimes class TFVariableTest(test_util.TestCase, backend_test.VariableTestCase): def testTFSmoke(self): """Test the property on specific example, without relying on Hypothesis.""" init = (12, np.random.randn(3, 2, 2).astype(np.float32)) ops = [('pop', [False, False, True]), ('push', [True, False, True]), ('update', np.ones((3, 2, 2), dtype=np.float32), [True, True, False]), ('pop', [True, False, True])] self.check_same_results( init, ops, var_init, to_numpy_arrays=self.evaluate, exception_types=(ValueError, tf.errors.InvalidArgumentError)) @hp.given(hps.data()) @tfp_hps.tfp_hp_settings() def testTFVariableRandomOps(self, data): # Hypothesis strategy: # Generate a random max stack depth and value shape # Deduce the batch size from the value shape # Make a random dtype # Generate a random initial value of that dtype and shape # Generate ops, some of which write random values of that dtype and shape max_stack_depth = data.draw(hps.integers(min_value=1, max_value=100)) value_shape = data.draw(hpnp.array_shapes(min_dims=1)) batch_size = value_shape[0] dtype = data.draw(hps.one_of(*map(hps.just, TF_NP_DTYPES))) masks = hpnp.arrays(dtype=np.bool_, shape=[batch_size]) values = hpnp.arrays(dtype, value_shape) init_val = data.draw(values) ops = data.draw( hps.lists( hps.one_of( hps.tuples(hps.just('update'), values, masks), hps.tuples(hps.just('push'), masks), hps.tuples(hps.just('pop'), masks), # preserve line break hps.tuples(hps.just('read'))))) init = (max_stack_depth, init_val) self.check_same_results( init, ops, var_init, to_numpy_arrays=self.evaluate, exception_types=(ValueError, tf.errors.InvalidArgumentError)) def testClosingOverTensorDoesntRaise(self): x = tf.constant(0.) def f(y): return y * x arg_types = [inst.Type([inst.TensorType(shape=[], dtype=np.float32)])] TF_BACKEND.run_on_dummies(f, arg_types) def testDtypeMergingBoolsDoesntRaise(self): TF_BACKEND.merge_dtypes(np.bool_, np.bool_) if __name__ == '__main__': tf.test.main()
Mt Longonot Hike Overview Mount Longonot Hike is a one day trip to Mount Longonot Park located only 60 kilometers from Nairobi. Mount Longonot is one of the favorite places.. Ngong Hills sits about 40 minutes drive outside of Nairobi CBD past Karen and Ngong town. It is on a Friday and we are seated in the office with two.. Nairobi City attractions Welcome to Nairobi County where the Kenya’s largest city and Capital, Nairobi City, is found.This beautiful expanse of land is surrounded by plains,forests and cliffs that make..