text
stringlengths 29
850k
|
---|
import cherrypy
import glob
import os.path
from base import IdString
import view
def request_tuple():
mimeType = 'html'
try:
accept = cherrypy.request.headers['Accept']
if 'application/json' in accept or accept == '*/*':
mimeType = 'json'
except KeyError:
pass
return cherrypy.request.method, mimeType
class Response(object):
'_GET etc. methods can return this to pass back HTML output'
def __init__(self, content):
self.content = content
def __call__(self):
return self.content
class Redirect(Response):
'_GET etc. methods can return this to force redirection to a URL'
def __call__(self):
return view.redirect(self.content)
class Collection(object):
'''subclass this by adding the following kinds of methods:
1. HTTP verbs, e.g. GET, POST, DELETE, as follows
_POST(self, docID, **kwargs): create the specified document.
_search(self, **kwargs): search the collection based on kwargs.
2. representation generators for a specific verb and mimeType, e.g.
get_html(self, doc, **kwargs): for a GET request,
return HTML representation of the doc object.
This will typically be a renderer of a Jinja2 template.
'''
def __init__(self, name, klass, templateEnv=None, templateDir='_templates',
docArgs=None, collectionArgs=None, **templateArgs):
self.name = name
self.klass = klass
if docArgs is None:
docArgs = {}
self.docArgs = docArgs
self.collectionArgs = collectionArgs
if templateEnv: # load our template files
self.bind_templates(templateEnv, templateDir, **templateArgs)
def default(self, docID=None, *args, **kwargs):
'process all requests for this collection'
try:
method, mimeType = request_tuple()
if docID: # a specific document from this collection
docID = IdString(docID) # implements proper cmp() vs. ObjectId
invalidResponse = self.check_permission(method, docID, *args,
**kwargs)
if invalidResponse:
return invalidResponse
if not args: # perform the request
return self._request(method, mimeType, docID, **kwargs)
else: # pass request on to subcollection
try:
subcoll = getattr(self, args[0])
except AttributeError:
return view.report_error('no such subcollection: %s.%s'
% (self.name, args[0]), 404)
try:
parents = kwargs['parents'].copy()
except KeyError:
parents = {}
try:
parents[self.name] = self._GET(docID, parents=parents)
except KeyError:
return view.report_error('invalid ID: %s' % docID, 404,
"""Sorry, the data ID %s that
you requested does not exist in the database.
Please check whether you have the correct ID.""" % docID)
kwargs['parents'] = parents # pass dict of parents
return subcoll.default(*args[1:], **kwargs)
elif method == 'GET': # search the collection
return self._request('search', mimeType, **kwargs)
else:
return view.report_error('REST does not permit collection-%s'
% method, 405)
except Exception:
return view.report_error('REST collection error', 500)
default.exposed = True
def _request(self, method, mimeType, *args, **kwargs):
'dispatch to proper handler method, or return appropriate error'
try: # do we support this method?
action = getattr(self, '_' + method)
except AttributeError:
return view.report_error('%s objects do not allow %s'
% (self.name, method), 405)
try: # execute the request
o = action(*args, **kwargs)
except KeyError:
return view.report_error('Not found: %s: args=%s, kwargs=%s'
% (self.name, str(args), str(kwargs)), status=404,
webMsg="""Sorry, the data that
you requested does not exist in the database.
Please check whether you have the correct ID or spelling.""")
if isinstance(o, Response):
return o() # send the redirect
try: # do we support this mimeType?
viewFunc = getattr(self, method.lower() + '_' + mimeType)
except AttributeError:
return view.report_error('%s objects cannot return %s'
% (self.name, mimeType), 406)
try:
return viewFunc(o, **kwargs)
except Exception:
return view.report_error('view function error', 500)
def _GET(self, docID, parents={}, **kwargs):
'default GET method'
kwargs.update(self.docArgs)
if not parents: # works with documents with unique ID
return self.klass(docID, **kwargs)
elif len(parents) == 1: # works with ArrayDocument
return self.klass((parents.values()[0]._id, docID), **kwargs)
else: # multiple parents
return self.klass(docID, parents=parents, **kwargs)
def bind_templates(self, env, dirpath, **kwargs):
'''load template files of the form get_paper.html, bind as
attrs of the form get_html'''
for fname in glob.glob(os.path.join(dirpath,
'*_%s.html' % self.name)):
basename = os.path.basename(fname)
template = env.get_template(basename)
methodName = basename.split('_')[0] + '_html'
v = view.TemplateView(template, self.name, **kwargs)
setattr(self, methodName, v)
def check_permission(self, method, docID, *args, **kwargs):
'this authentication stub allows all requests'
return False
|
A teddy bear for Cecily in London; a guitar for Shanna in Jamaica.
Jett brings a fireman's hat to Stefan in Germany; Jett brings a miner's helmet to Bolivia.
Jett goes to Peru with pins; dance shoes for a theater troupe in New York.
Jett delivers binoculars to Ella on the Galapagos Islands; Jett brings a toy castle to Ayesha in The Maldives.
Ice skates for Canada; a unicycle for Russia.
Jett brings an archaeology kit to Akiiki, a boy in Egypt; Karl from Norway asks for a Viking helmet.
Jett takes brushes to Willem in The Netherlands; Jett brings a kite to a boy named Akas in the Himalayas who wants to fly it in a kite festival.
Mongolian Stars; Lights, Camera, Action!
Nambayar from Mongolia asks for glowing stars to decorate the inside of her tent; Riley from Hollywood directs his first action film.
Jett brings colorful paper to Yuki in Japan; Jett delivers a camera to Sam in Papua New Guinea.
Callum from Scotland wants bagpipes that blow bubbles so he can play them for Nessie; Jett has a feathered headdress for Camila from Brazil.
Jett takes a pair of running shoes that light up to Lorna; Jett travels to Hawaii to bring Keilani a hula skirt.
Jett goes to Venice, Italy with a mask for Luca to wear at his masquerade party; Martine in Paris, France needs a cake-decorating machine.
Mei from Beijing, China, requests a shadow puppet to use in a shadow play; Brigita from Romania hosts a spooky party and wants a flashlight that projects scary shapes.
Satomi is the girl who wants her cat's attention; Jett delivers a cat toy to her in Yanesen, Tokyo.
Jett visits Iulia in Romania to deliver a large mechanical key for her grandfather's Blockosaurus Park, a theme park with wonderful block dinosaurs.
Martha in Barcelona ordered some broken pieces of tiles and dishes to create a mosaic birdhouse.
Jett visits Ivan and his Dalmatian Mondo in Dubrovnik, Croatia.
Super Wings on YouTube TV.
|
# coding: utf-8
from __future__ import unicode_literals
import datetime
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
)
class GooglePlusIE(InfoExtractor):
IE_DESC = 'Google Plus'
_VALID_URL = r'https://plus\.google\.com/(?:[^/]+/)*?posts/(?P<id>\w+)'
IE_NAME = 'plus.google'
_TEST = {
'url': 'https://plus.google.com/u/0/108897254135232129896/posts/ZButuJc6CtH',
'info_dict': {
'id': 'ZButuJc6CtH',
'ext': 'flv',
'upload_date': '20120613',
'uploader': '井上ヨシマサ',
'title': '嘆きの天使 降臨',
}
}
def _real_extract(self, url):
# Extract id from URL
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
# Step 1, Retrieve post webpage to extract further information
webpage = self._download_webpage(url, video_id, 'Downloading entry webpage')
self.report_extraction(video_id)
# Extract update date
upload_date = self._html_search_regex(
r'''(?x)<a.+?class="o-U-s\s[^"]+"\s+style="display:\s*none"\s*>
([0-9]{4}-[0-9]{2}-[0-9]{2})</a>''',
webpage, 'upload date', fatal=False, flags=re.VERBOSE)
if upload_date:
# Convert timestring to a format suitable for filename
upload_date = datetime.datetime.strptime(upload_date, "%Y-%m-%d")
upload_date = upload_date.strftime('%Y%m%d')
# Extract uploader
uploader = self._html_search_regex(r'rel\="author".*?>(.*?)</a>',
webpage, 'uploader', fatal=False)
# Extract title
# Get the first line for title
video_title = self._og_search_description(webpage).splitlines()[0]
# Step 2, Simulate clicking the image box to launch video
DOMAIN = 'https://plus.google.com/'
video_page = self._search_regex(r'<a href="((?:%s)?photos/.*?)"' % re.escape(DOMAIN),
webpage, 'video page URL')
if not video_page.startswith(DOMAIN):
video_page = DOMAIN + video_page
webpage = self._download_webpage(video_page, video_id, 'Downloading video page')
# Extract video links all sizes
pattern = r'\d+,\d+,(\d+),"(http\://redirector\.googlevideo\.com.*?)"'
mobj = re.findall(pattern, webpage)
if len(mobj) == 0:
raise ExtractorError('Unable to extract video links')
# Sort in resolution
links = sorted(mobj)
# Choose the lowest of the sort, i.e. highest resolution
video_url = links[-1]
# Only get the url. The resolution part in the tuple has no use anymore
video_url = video_url[-1]
# Treat escaped \u0026 style hex
try:
video_url = video_url.decode("unicode_escape")
except AttributeError: # Python 3
video_url = bytes(video_url, 'ascii').decode('unicode-escape')
return {
'id': video_id,
'url': video_url,
'uploader': uploader,
'upload_date': upload_date,
'title': video_title,
'ext': 'flv',
}
|
Leadership, service and progress - The motto of Georgia Tech.
Explain why you are interested in the first-choice major-Mathematics?
Biochem and Bio-med Engineering- Why Michigan?
Personal Statement: Why I didn't choose to drop out.
Students have a background, talent or passion that they feel they must write about.
In addition to the generally known facts, what fascinates you the most in Georgia Tech?
Pursuit for a ribbon. Common-App essay revision/quality?
Briefly describe how your past activities or work experiences will enhance our community. USD app.
My ambition to become a storyteller.
UT Austin Topic C, "Growing up as the oldest of three brothers"
Falling head over heel for science. Why have I chosen chemical engineering as a career option?
Motivation letter for bsc Economics. Can someone check it if its good or not?
Reasons on applying and attending Swarthmore - both entail the exciting concept of social science.
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding unique constraint on 'MetaData', fields ['xform', 'data_type', 'data_value']
M = orm['main.metadata']
for x in M.objects.all():
dupes = M.objects.filter(xform_id=x.xform_id, data_type=x.data_type, data_value=x.data_value)
if dupes.count() > 1:
for dupe in dupes[1:]:
print 'Deleting duplicate MetaData', dupe.xform_id, dupe.data_type, dupe.data_value
dupe.delete()
partial_dupes = M.objects.filter(xform_id=x.xform_id, data_type=x.data_type)
if partial_dupes.count() > 1:
print 'Partially duplicate MetaData{}'.format('\n\t'.join(map(str, partial_dupes.values_list('xform_id', 'data_type', 'data_value'))))
db.create_unique(u'main_metadata', ['xform_id', 'data_type', 'data_value'])
def backwards(self, orm):
# Removing unique constraint on 'MetaData', fields ['xform', 'data_type', 'data_value']
db.delete_unique(u'main_metadata', ['xform_id', 'data_type', 'data_value'])
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'logger.xform': {
'Meta': {'ordering': "('id_string',)", 'unique_together': "(('user', 'id_string'), ('user', 'sms_id_string'))", 'object_name': 'XForm'},
'allows_sms': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'bamboo_dataset': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '60'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "u''", 'null': 'True'}),
'downloadable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'encrypted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'has_start_time': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'id_string': ('django.db.models.fields.SlugField', [], {'max_length': '100'}),
'instances_with_geopoints': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'json': ('django.db.models.fields.TextField', [], {'default': "u''"}),
'last_submission_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'num_of_submissions': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'require_auth': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'shared': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'shared_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sms_id_string': ('django.db.models.fields.SlugField', [], {'default': "''", 'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'xforms'", 'null': 'True', 'to': u"orm['auth.User']"}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '32'}),
'xls': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True'}),
'xml': ('django.db.models.fields.TextField', [], {})
},
'main.metadata': {
'Meta': {'unique_together': "(('xform', 'data_type', 'data_value'),)", 'object_name': 'MetaData'},
'data_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'data_file_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'data_type': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'data_value': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'xform': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['logger.XForm']"})
},
'main.tokenstoragemodel': {
'Meta': {'object_name': 'TokenStorageModel'},
'id': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'google_id'", 'primary_key': 'True', 'to': u"orm['auth.User']"}),
'token': ('django.db.models.fields.TextField', [], {})
},
'main.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'home_page': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'num_of_submissions': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'organization': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'phonenumber': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'require_auth': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': u"orm['auth.User']"})
},
u'taggit.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_tagged_items'", 'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_items'", 'to': u"orm['taggit.Tag']"})
}
}
complete_apps = ['main']
|
I'm trying to add AdWords Destination in order to track payments. I can see Mobile App data are required (from Event Delivery feature) : context.device.advertisingId, context.device.type, context.app.namespace, context.app.version and context.os.version.
However, I do not have a Mobile App, only a website. Track call is made when a stripe notification occurs to my API.
I choosed server side tracking over client, as recommended in segment doc for this kind of event (more reliable etc.).
What is the recommended way track AdWords conversions in this case ?
Hello, I know it is an old post but I have the same question. Did you find a way to track these "non client side" conversions ?
|
# (C) Datadog, Inc. 2010-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# stdlib
from collections import defaultdict, namedtuple
import time
import urlparse
# 3p
import requests
# project
from checks import AgentCheck
from config import _is_affirmative
from util import headers
class NodeNotFound(Exception):
pass
ESInstanceConfig = namedtuple(
'ESInstanceConfig', [
'pshard_stats',
'cluster_stats',
'password',
'service_check_tags',
'tags',
'timeout',
'url',
'username',
'pending_task_stats',
'ssl_verify',
'ssl_cert',
'ssl_key',
])
class ESCheck(AgentCheck):
SERVICE_CHECK_CONNECT_NAME = 'elasticsearch.can_connect'
SERVICE_CHECK_CLUSTER_STATUS = 'elasticsearch.cluster_health'
DEFAULT_TIMEOUT = 5
# Clusterwise metrics, pre aggregated on ES, compatible with all ES versions
PRIMARY_SHARD_METRICS = {
"elasticsearch.primaries.docs.count": ("gauge", "_all.primaries.docs.count"),
"elasticsearch.primaries.docs.deleted": ("gauge", "_all.primaries.docs.deleted"),
"elasticsearch.primaries.store.size": ("gauge", "_all.primaries.store.size_in_bytes"),
"elasticsearch.primaries.indexing.index.total": ("gauge", "_all.primaries.indexing.index_total"),
"elasticsearch.primaries.indexing.index.time": ("gauge", "_all.primaries.indexing.index_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.primaries.indexing.index.current": ("gauge", "_all.primaries.indexing.index_current"),
"elasticsearch.primaries.indexing.delete.total": ("gauge", "_all.primaries.indexing.delete_total"),
"elasticsearch.primaries.indexing.delete.time": ("gauge", "_all.primaries.indexing.delete_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.primaries.indexing.delete.current": ("gauge", "_all.primaries.indexing.delete_current"),
"elasticsearch.primaries.get.total": ("gauge", "_all.primaries.get.total"),
"elasticsearch.primaries.get.time": ("gauge", "_all.primaries.get.time_in_millis", lambda v: float(v)/1000),
"elasticsearch.primaries.get.current": ("gauge", "_all.primaries.get.current"),
"elasticsearch.primaries.get.exists.total": ("gauge", "_all.primaries.get.exists_total"),
"elasticsearch.primaries.get.exists.time": ("gauge", "_all.primaries.get.exists_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.primaries.get.missing.total": ("gauge", "_all.primaries.get.missing_total"),
"elasticsearch.primaries.get.missing.time": ("gauge", "_all.primaries.get.missing_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.primaries.search.query.total": ("gauge", "_all.primaries.search.query_total"),
"elasticsearch.primaries.search.query.time": ("gauge", "_all.primaries.search.query_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.primaries.search.query.current": ("gauge", "_all.primaries.search.query_current"),
"elasticsearch.primaries.search.fetch.total": ("gauge", "_all.primaries.search.fetch_total"),
"elasticsearch.primaries.search.fetch.time": ("gauge", "_all.primaries.search.fetch_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.primaries.search.fetch.current": ("gauge", "_all.primaries.search.fetch_current")
}
PRIMARY_SHARD_METRICS_POST_1_0 = {
"elasticsearch.primaries.merges.current": ("gauge", "_all.primaries.merges.current"),
"elasticsearch.primaries.merges.current.docs": ("gauge", "_all.primaries.merges.current_docs"),
"elasticsearch.primaries.merges.current.size": ("gauge", "_all.primaries.merges.current_size_in_bytes"),
"elasticsearch.primaries.merges.total": ("gauge", "_all.primaries.merges.total"),
"elasticsearch.primaries.merges.total.time": ("gauge", "_all.primaries.merges.total_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.primaries.merges.total.docs": ("gauge", "_all.primaries.merges.total_docs"),
"elasticsearch.primaries.merges.total.size": ("gauge", "_all.primaries.merges.total_size_in_bytes"),
"elasticsearch.primaries.refresh.total": ("gauge", "_all.primaries.refresh.total"),
"elasticsearch.primaries.refresh.total.time": ("gauge", "_all.primaries.refresh.total_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.primaries.flush.total": ("gauge", "_all.primaries.flush.total"),
"elasticsearch.primaries.flush.total.time": ("gauge", "_all.primaries.flush.total_time_in_millis", lambda v: float(v)/1000)
}
STATS_METRICS = { # Metrics that are common to all Elasticsearch versions
"elasticsearch.docs.count": ("gauge", "indices.docs.count"),
"elasticsearch.docs.deleted": ("gauge", "indices.docs.deleted"),
"elasticsearch.store.size": ("gauge", "indices.store.size_in_bytes"),
"elasticsearch.indexing.index.total": ("gauge", "indices.indexing.index_total"),
"elasticsearch.indexing.index.time": ("gauge", "indices.indexing.index_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.indexing.index.current": ("gauge", "indices.indexing.index_current"),
"elasticsearch.indexing.delete.total": ("gauge", "indices.indexing.delete_total"),
"elasticsearch.indexing.delete.time": ("gauge", "indices.indexing.delete_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.indexing.delete.current": ("gauge", "indices.indexing.delete_current"),
"elasticsearch.get.total": ("gauge", "indices.get.total"),
"elasticsearch.get.time": ("gauge", "indices.get.time_in_millis", lambda v: float(v)/1000),
"elasticsearch.get.current": ("gauge", "indices.get.current"),
"elasticsearch.get.exists.total": ("gauge", "indices.get.exists_total"),
"elasticsearch.get.exists.time": ("gauge", "indices.get.exists_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.get.missing.total": ("gauge", "indices.get.missing_total"),
"elasticsearch.get.missing.time": ("gauge", "indices.get.missing_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.search.query.total": ("gauge", "indices.search.query_total"),
"elasticsearch.search.query.time": ("gauge", "indices.search.query_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.search.query.current": ("gauge", "indices.search.query_current"),
"elasticsearch.search.fetch.total": ("gauge", "indices.search.fetch_total"),
"elasticsearch.search.fetch.time": ("gauge", "indices.search.fetch_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.search.fetch.current": ("gauge", "indices.search.fetch_current"),
"elasticsearch.indices.segments.count": ("gauge", "indices.segments.count"),
"elasticsearch.indices.segments.memory_in_bytes": ("gauge", "indices.segments.memory_in_bytes"),
"elasticsearch.merges.current": ("gauge", "indices.merges.current"),
"elasticsearch.merges.current.docs": ("gauge", "indices.merges.current_docs"),
"elasticsearch.merges.current.size": ("gauge", "indices.merges.current_size_in_bytes"),
"elasticsearch.merges.total": ("gauge", "indices.merges.total"),
"elasticsearch.merges.total.time": ("gauge", "indices.merges.total_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.merges.total.docs": ("gauge", "indices.merges.total_docs"),
"elasticsearch.merges.total.size": ("gauge", "indices.merges.total_size_in_bytes"),
"elasticsearch.refresh.total": ("gauge", "indices.refresh.total"),
"elasticsearch.refresh.total.time": ("gauge", "indices.refresh.total_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.flush.total": ("gauge", "indices.flush.total"),
"elasticsearch.flush.total.time": ("gauge", "indices.flush.total_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.process.open_fd": ("gauge", "process.open_file_descriptors"),
"elasticsearch.transport.rx_count": ("gauge", "transport.rx_count"),
"elasticsearch.transport.tx_count": ("gauge", "transport.tx_count"),
"elasticsearch.transport.rx_size": ("gauge", "transport.rx_size_in_bytes"),
"elasticsearch.transport.tx_size": ("gauge", "transport.tx_size_in_bytes"),
"elasticsearch.transport.server_open": ("gauge", "transport.server_open"),
"elasticsearch.thread_pool.bulk.active": ("gauge", "thread_pool.bulk.active"),
"elasticsearch.thread_pool.bulk.threads": ("gauge", "thread_pool.bulk.threads"),
"elasticsearch.thread_pool.bulk.queue": ("gauge", "thread_pool.bulk.queue"),
"elasticsearch.thread_pool.bulk.rejected": ("gauge", "thread_pool.bulk.rejected"),
"elasticsearch.thread_pool.flush.active": ("gauge", "thread_pool.flush.active"),
"elasticsearch.thread_pool.flush.threads": ("gauge", "thread_pool.flush.threads"),
"elasticsearch.thread_pool.flush.queue": ("gauge", "thread_pool.flush.queue"),
"elasticsearch.thread_pool.generic.active": ("gauge", "thread_pool.generic.active"),
"elasticsearch.thread_pool.generic.threads": ("gauge", "thread_pool.generic.threads"),
"elasticsearch.thread_pool.generic.queue": ("gauge", "thread_pool.generic.queue"),
"elasticsearch.thread_pool.get.active": ("gauge", "thread_pool.get.active"),
"elasticsearch.thread_pool.get.threads": ("gauge", "thread_pool.get.threads"),
"elasticsearch.thread_pool.get.queue": ("gauge", "thread_pool.get.queue"),
"elasticsearch.thread_pool.index.active": ("gauge", "thread_pool.index.active"),
"elasticsearch.thread_pool.index.threads": ("gauge", "thread_pool.index.threads"),
"elasticsearch.thread_pool.index.queue": ("gauge", "thread_pool.index.queue"),
"elasticsearch.thread_pool.management.active": ("gauge", "thread_pool.management.active"),
"elasticsearch.thread_pool.management.threads": ("gauge", "thread_pool.management.threads"),
"elasticsearch.thread_pool.management.queue": ("gauge", "thread_pool.management.queue"),
"elasticsearch.thread_pool.percolate.active": ("gauge", "thread_pool.percolate.active"),
"elasticsearch.thread_pool.percolate.threads": ("gauge", "thread_pool.percolate.threads"),
"elasticsearch.thread_pool.percolate.queue": ("gauge", "thread_pool.percolate.queue"),
"elasticsearch.thread_pool.refresh.active": ("gauge", "thread_pool.refresh.active"),
"elasticsearch.thread_pool.refresh.threads": ("gauge", "thread_pool.refresh.threads"),
"elasticsearch.thread_pool.refresh.queue": ("gauge", "thread_pool.refresh.queue"),
"elasticsearch.thread_pool.search.active": ("gauge", "thread_pool.search.active"),
"elasticsearch.thread_pool.search.threads": ("gauge", "thread_pool.search.threads"),
"elasticsearch.thread_pool.search.queue": ("gauge", "thread_pool.search.queue"),
"elasticsearch.thread_pool.snapshot.active": ("gauge", "thread_pool.snapshot.active"),
"elasticsearch.thread_pool.snapshot.threads": ("gauge", "thread_pool.snapshot.threads"),
"elasticsearch.thread_pool.snapshot.queue": ("gauge", "thread_pool.snapshot.queue"),
"elasticsearch.http.current_open": ("gauge", "http.current_open"),
"elasticsearch.http.total_opened": ("gauge", "http.total_opened"),
"jvm.mem.heap_committed": ("gauge", "jvm.mem.heap_committed_in_bytes"),
"jvm.mem.heap_used": ("gauge", "jvm.mem.heap_used_in_bytes"),
"jvm.mem.heap_in_use": ("gauge", "jvm.mem.heap_used_percent"),
"jvm.mem.heap_max": ("gauge", "jvm.mem.heap_max_in_bytes"),
"jvm.mem.non_heap_committed": ("gauge", "jvm.mem.non_heap_committed_in_bytes"),
"jvm.mem.non_heap_used": ("gauge", "jvm.mem.non_heap_used_in_bytes"),
"jvm.threads.count": ("gauge", "jvm.threads.count"),
"jvm.threads.peak_count": ("gauge", "jvm.threads.peak_count"),
"elasticsearch.fs.total.total_in_bytes": ("gauge", "fs.total.total_in_bytes"),
"elasticsearch.fs.total.free_in_bytes": ("gauge", "fs.total.free_in_bytes"),
"elasticsearch.fs.total.available_in_bytes": ("gauge", "fs.total.available_in_bytes"),
}
JVM_METRICS_POST_0_90_10 = {
"jvm.gc.collectors.young.count": ("gauge", "jvm.gc.collectors.young.collection_count"),
"jvm.gc.collectors.young.collection_time": ("gauge", "jvm.gc.collectors.young.collection_time_in_millis", lambda v: float(v)/1000),
"jvm.gc.collectors.old.count": ("gauge", "jvm.gc.collectors.old.collection_count"),
"jvm.gc.collectors.old.collection_time": ("gauge", "jvm.gc.collectors.old.collection_time_in_millis", lambda v: float(v)/1000)
}
JVM_METRICS_PRE_0_90_10 = {
"jvm.gc.concurrent_mark_sweep.count": ("gauge", "jvm.gc.collectors.ConcurrentMarkSweep.collection_count"),
"jvm.gc.concurrent_mark_sweep.collection_time": ("gauge", "jvm.gc.collectors.ConcurrentMarkSweep.collection_time_in_millis", lambda v: float(v)/1000),
"jvm.gc.par_new.count": ("gauge", "jvm.gc.collectors.ParNew.collection_count"),
"jvm.gc.par_new.collection_time": ("gauge", "jvm.gc.collectors.ParNew.collection_time_in_millis", lambda v: float(v)/1000),
"jvm.gc.collection_count": ("gauge", "jvm.gc.collection_count"),
"jvm.gc.collection_time": ("gauge", "jvm.gc.collection_time_in_millis", lambda v: float(v)/1000),
}
ADDITIONAL_METRICS_POST_0_90_5 = {
"elasticsearch.search.fetch.open_contexts": ("gauge", "indices.search.open_contexts"),
"elasticsearch.fielddata.size": ("gauge", "indices.fielddata.memory_size_in_bytes"),
"elasticsearch.fielddata.evictions": ("gauge", "indices.fielddata.evictions"),
}
ADDITIONAL_METRICS_POST_0_90_5_PRE_2_0 = {
"elasticsearch.cache.filter.evictions": ("gauge", "indices.filter_cache.evictions"),
"elasticsearch.cache.filter.size": ("gauge", "indices.filter_cache.memory_size_in_bytes"),
"elasticsearch.id_cache.size": ("gauge", "indices.id_cache.memory_size_in_bytes"),
}
ADDITIONAL_METRICS_PRE_0_90_5 = {
"elasticsearch.cache.field.evictions": ("gauge", "indices.cache.field_evictions"),
"elasticsearch.cache.field.size": ("gauge", "indices.cache.field_size_in_bytes"),
"elasticsearch.cache.filter.count": ("gauge", "indices.cache.filter_count"),
"elasticsearch.cache.filter.evictions": ("gauge", "indices.cache.filter_evictions"),
"elasticsearch.cache.filter.size": ("gauge", "indices.cache.filter_size_in_bytes"),
}
ADDITIONAL_METRICS_POST_1_0_0 = {
"elasticsearch.indices.translog.size_in_bytes": ("gauge", "indices.translog.size_in_bytes"),
"elasticsearch.indices.translog.operations": ("gauge", "indices.translog.operations"),
}
ADDITIONAL_METRICS_1_x = { # Stats are only valid for v1.x
"elasticsearch.fs.total.disk_reads": ("rate", "fs.total.disk_reads"),
"elasticsearch.fs.total.disk_writes": ("rate", "fs.total.disk_writes"),
"elasticsearch.fs.total.disk_io_op": ("rate", "fs.total.disk_io_op"),
"elasticsearch.fs.total.disk_read_size_in_bytes": ("gauge", "fs.total.disk_read_size_in_bytes"),
"elasticsearch.fs.total.disk_write_size_in_bytes": ("gauge", "fs.total.disk_write_size_in_bytes"),
"elasticsearch.fs.total.disk_io_size_in_bytes": ("gauge", "fs.total.disk_io_size_in_bytes"),
}
ADDITIONAL_METRICS_POST_1_3_0 = {
"elasticsearch.indices.segments.index_writer_memory_in_bytes": ("gauge", "indices.segments.index_writer_memory_in_bytes"),
"elasticsearch.indices.segments.version_map_memory_in_bytes": ("gauge", "indices.segments.version_map_memory_in_bytes"),
}
ADDITIONAL_METRICS_POST_1_4_0 = {
"elasticsearch.indices.segments.index_writer_max_memory_in_bytes": ("gauge", "indices.segments.index_writer_max_memory_in_bytes"),
"elasticsearch.indices.segments.fixed_bit_set_memory_in_bytes": ("gauge", "indices.segments.fixed_bit_set_memory_in_bytes"),
}
ADDITIONAL_METRICS_PRE_2_0 = {
"elasticsearch.thread_pool.merge.active": ("gauge", "thread_pool.merge.active"),
"elasticsearch.thread_pool.merge.threads": ("gauge", "thread_pool.merge.threads"),
"elasticsearch.thread_pool.merge.queue": ("gauge", "thread_pool.merge.queue"),
}
CLUSTER_HEALTH_METRICS = {
"elasticsearch.number_of_nodes": ("gauge", "number_of_nodes"),
"elasticsearch.number_of_data_nodes": ("gauge", "number_of_data_nodes"),
"elasticsearch.active_primary_shards": ("gauge", "active_primary_shards"),
"elasticsearch.active_shards": ("gauge", "active_shards"),
"elasticsearch.relocating_shards": ("gauge", "relocating_shards"),
"elasticsearch.initializing_shards": ("gauge", "initializing_shards"),
"elasticsearch.unassigned_shards": ("gauge", "unassigned_shards"),
"elasticsearch.cluster_status": ("gauge", "status", lambda v: {"red": 0, "yellow": 1, "green": 2}.get(v, -1)),
}
CLUSTER_PENDING_TASKS = {
"elasticsearch.pending_tasks_total": ("gauge", "pending_task_total"),
"elasticsearch.pending_tasks_priority_high": ("gauge", "pending_tasks_priority_high"),
"elasticsearch.pending_tasks_priority_urgent": ("gauge", "pending_tasks_priority_urgent")
}
SOURCE_TYPE_NAME = 'elasticsearch'
def __init__(self, name, init_config, agentConfig, instances=None):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
# Host status needs to persist across all checks
self.cluster_status = {}
def get_instance_config(self, instance):
url = instance.get('url')
if url is None:
raise Exception("An url must be specified in the instance")
pshard_stats = _is_affirmative(instance.get('pshard_stats', False))
cluster_stats = _is_affirmative(instance.get('cluster_stats', False))
if 'is_external' in instance:
cluster_stats = _is_affirmative(instance.get('is_external', False))
pending_task_stats = _is_affirmative(instance.get('pending_task_stats', True))
# Support URLs that have a path in them from the config, for
# backwards-compatibility.
parsed = urlparse.urlparse(url)
if parsed[2] != "":
url = "%s://%s" % (parsed[0], parsed[1])
port = parsed.port
host = parsed.hostname
custom_tags = instance.get('tags', [])
service_check_tags = [
'host:%s' % host,
'port:%s' % port
]
service_check_tags.extend(custom_tags)
# Tag by URL so we can differentiate the metrics
# from multiple instances
tags = ['url:%s' % url]
tags.extend(custom_tags)
timeout = instance.get('timeout') or self.DEFAULT_TIMEOUT
config = ESInstanceConfig(
pshard_stats=pshard_stats,
cluster_stats=cluster_stats,
password=instance.get('password'),
service_check_tags=service_check_tags,
ssl_cert=instance.get('ssl_cert'),
ssl_key=instance.get('ssl_key'),
ssl_verify=instance.get('ssl_verify'),
tags=tags,
timeout=timeout,
url=url,
username=instance.get('username'),
pending_task_stats=pending_task_stats
)
return config
def check(self, instance):
config = self.get_instance_config(instance)
# Check ES version for this instance and define parameters
# (URLs and metrics) accordingly
version = self._get_es_version(config)
health_url, nodes_url, stats_url, pshard_stats_url, pending_tasks_url, stats_metrics, \
pshard_stats_metrics = self._define_params(version, config.cluster_stats)
# Load clusterwise data
if config.pshard_stats:
pshard_stats_url = urlparse.urljoin(config.url, pshard_stats_url)
pshard_stats_data = self._get_data(pshard_stats_url, config)
self._process_pshard_stats_data(pshard_stats_data, config, pshard_stats_metrics)
# Load stats data.
stats_url = urlparse.urljoin(config.url, stats_url)
stats_data = self._get_data(stats_url, config)
self._process_stats_data(nodes_url, stats_data, stats_metrics, config)
# Load the health data.
health_url = urlparse.urljoin(config.url, health_url)
health_data = self._get_data(health_url, config)
self._process_health_data(health_data, config)
if config.pending_task_stats:
# Load the pending_tasks data.
pending_tasks_url = urlparse.urljoin(config.url, pending_tasks_url)
pending_tasks_data = self._get_data(pending_tasks_url, config)
self._process_pending_tasks_data(pending_tasks_data, config)
# If we're here we did not have any ES conn issues
self.service_check(
self.SERVICE_CHECK_CONNECT_NAME,
AgentCheck.OK,
tags=config.service_check_tags
)
def _get_es_version(self, config):
""" Get the running version of elasticsearch.
"""
try:
data = self._get_data(config.url, config, send_sc=False)
version = map(int, data['version']['number'].split('.')[0:3])
except Exception as e:
self.warning(
"Error while trying to get Elasticsearch version "
"from %s %s"
% (config.url, str(e))
)
version = [1, 0, 0]
self.service_metadata('version', version)
self.log.debug("Elasticsearch version is %s" % version)
return version
def _define_params(self, version, cluster_stats):
""" Define the set of URLs and METRICS to use depending on the
running ES version.
"""
pshard_stats_url = "/_stats"
if version >= [0, 90, 10]:
# ES versions 0.90.10 and above
health_url = "/_cluster/health?pretty=true"
nodes_url = "/_nodes?network=true"
pending_tasks_url = "/_cluster/pending_tasks?pretty=true"
# For "external" clusters, we want to collect from all nodes.
if cluster_stats:
stats_url = "/_nodes/stats?all=true"
else:
stats_url = "/_nodes/_local/stats?all=true"
additional_metrics = self.JVM_METRICS_POST_0_90_10
else:
health_url = "/_cluster/health?pretty=true"
nodes_url = "/_cluster/nodes?network=true"
pending_tasks_url = None
if cluster_stats:
stats_url = "/_cluster/nodes/stats?all=true"
else:
stats_url = "/_cluster/nodes/_local/stats?all=true"
additional_metrics = self.JVM_METRICS_PRE_0_90_10
stats_metrics = dict(self.STATS_METRICS)
stats_metrics.update(additional_metrics)
### Additional Stats metrics ###
if version >= [0, 90, 5]:
# ES versions 0.90.5 and above
additional_metrics = self.ADDITIONAL_METRICS_POST_0_90_5
else:
# ES version 0.90.4 and below
additional_metrics = self.ADDITIONAL_METRICS_PRE_0_90_5
stats_metrics.update(additional_metrics)
if version >= [1, 0, 0]:
stats_metrics.update(self.ADDITIONAL_METRICS_POST_1_0_0)
if version < [2, 0, 0]:
stats_metrics.update(self.ADDITIONAL_METRICS_PRE_2_0)
if version >= [0, 90, 5]:
stats_metrics.update(self.ADDITIONAL_METRICS_POST_0_90_5_PRE_2_0)
if version >= [1, 0, 0]:
stats_metrics.update(self.ADDITIONAL_METRICS_1_x)
if version >= [1, 3, 0]:
stats_metrics.update(self.ADDITIONAL_METRICS_POST_1_3_0)
if version >= [1, 4, 0]:
# ES versions 1.4 and above
stats_metrics.update(self.ADDITIONAL_METRICS_POST_1_4_0)
# Version specific stats metrics about the primary shards
pshard_stats_metrics = dict(self.PRIMARY_SHARD_METRICS)
if version >= [1, 0, 0]:
additional_metrics = self.PRIMARY_SHARD_METRICS_POST_1_0
pshard_stats_metrics.update(additional_metrics)
return health_url, nodes_url, stats_url, pshard_stats_url, pending_tasks_url, \
stats_metrics, pshard_stats_metrics
def _get_data(self, url, config, send_sc=True):
""" Hit a given URL and return the parsed json
"""
# Load basic authentication configuration, if available.
if config.username and config.password:
auth = (config.username, config.password)
else:
auth = None
# Load SSL configuration, if available.
# ssl_verify can be a bool or a string (http://docs.python-requests.org/en/latest/user/advanced/#ssl-cert-verification)
if isinstance(config.ssl_verify, bool) or isinstance(config.ssl_verify, str):
verify = config.ssl_verify
else:
verify = None
if config.ssl_cert and config.ssl_key:
cert = (config.ssl_cert, config.ssl_key)
elif config.ssl_cert:
cert = config.ssl_cert
else:
cert = None
try:
resp = requests.get(
url,
timeout=config.timeout,
headers=headers(self.agentConfig),
auth=auth,
verify=verify,
cert=cert
)
resp.raise_for_status()
except Exception as e:
if send_sc:
self.service_check(
self.SERVICE_CHECK_CONNECT_NAME,
AgentCheck.CRITICAL,
message="Error {0} when hitting {1}".format(e, url),
tags=config.service_check_tags
)
raise
return resp.json()
def _process_pending_tasks_data(self, data, config):
p_tasks = defaultdict(int)
for task in data.get('tasks', []):
p_tasks[task.get('priority')] += 1
node_data = {
'pending_task_total': sum(p_tasks.values()),
'pending_tasks_priority_high': p_tasks['high'],
'pending_tasks_priority_urgent': p_tasks['urgent'],
}
for metric in self.CLUSTER_PENDING_TASKS:
# metric description
desc = self.CLUSTER_PENDING_TASKS[metric]
self._process_metric(node_data, metric, *desc, tags=config.tags)
def _process_stats_data(self, nodes_url, data, stats_metrics, config):
cluster_stats = config.cluster_stats
for node_data in data['nodes'].itervalues():
metric_hostname = None
metrics_tags = list(config.tags)
# Resolve the node's name
node_name = node_data.get('name')
if node_name:
metrics_tags.append(
u"node_name:{}".format(node_name)
)
# Resolve the node's hostname
if cluster_stats:
for k in ['hostname', 'host']:
if k in node_data:
metric_hostname = node_data[k]
break
for metric, desc in stats_metrics.iteritems():
self._process_metric(
node_data, metric, *desc,
tags=metrics_tags, hostname=metric_hostname
)
def _process_pshard_stats_data(self, data, config, pshard_stats_metrics):
for metric, desc in pshard_stats_metrics.iteritems():
self._process_metric(data, metric, *desc, tags=config.tags)
def _process_metric(self, data, metric, xtype, path, xform=None,
tags=None, hostname=None):
"""data: dictionary containing all the stats
metric: datadog metric
path: corresponding path in data, flattened, e.g. thread_pool.bulk.queue
xfom: a lambda to apply to the numerical value
"""
value = data
# Traverse the nested dictionaries
for key in path.split('.'):
if value is not None:
value = value.get(key, None)
else:
break
if value is not None:
if xform:
value = xform(value)
if xtype == "gauge":
self.gauge(metric, value, tags=tags, hostname=hostname)
else:
self.rate(metric, value, tags=tags, hostname=hostname)
else:
self._metric_not_found(metric, path)
def _process_health_data(self, data, config):
if self.cluster_status.get(config.url) is None:
self.cluster_status[config.url] = data['status']
if data['status'] in ["yellow", "red"]:
event = self._create_event(data['status'], tags=config.tags)
self.event(event)
if data['status'] != self.cluster_status.get(config.url):
self.cluster_status[config.url] = data['status']
event = self._create_event(data['status'], tags=config.tags)
self.event(event)
for metric, desc in self.CLUSTER_HEALTH_METRICS.iteritems():
self._process_metric(data, metric, *desc, tags=config.tags)
# Process the service check
cluster_status = data['status']
if cluster_status == 'green':
status = AgentCheck.OK
data['tag'] = "OK"
elif cluster_status == 'yellow':
status = AgentCheck.WARNING
data['tag'] = "WARN"
else:
status = AgentCheck.CRITICAL
data['tag'] = "ALERT"
msg = "{tag} on cluster \"{cluster_name}\" "\
"| active_shards={active_shards} "\
"| initializing_shards={initializing_shards} "\
"| relocating_shards={relocating_shards} "\
"| unassigned_shards={unassigned_shards} "\
"| timed_out={timed_out}" \
.format(**data)
self.service_check(
self.SERVICE_CHECK_CLUSTER_STATUS,
status,
message=msg,
tags=config.service_check_tags
)
def _metric_not_found(self, metric, path):
self.log.debug("Metric not found: %s -> %s", path, metric)
def _create_event(self, status, tags=None):
hostname = self.hostname.decode('utf-8')
if status == "red":
alert_type = "error"
msg_title = "%s is %s" % (hostname, status)
elif status == "yellow":
alert_type = "warning"
msg_title = "%s is %s" % (hostname, status)
else:
# then it should be green
alert_type = "success"
msg_title = "%s recovered as %s" % (hostname, status)
msg = "ElasticSearch: %s just reported as %s" % (hostname, status)
return {
'timestamp': int(time.time()),
'event_type': 'elasticsearch',
'host': hostname,
'msg_text': msg,
'msg_title': msg_title,
'alert_type': alert_type,
'source_type_name': "elasticsearch",
'event_object': hostname,
'tags': tags
}
|
This page displays latest Hostingcon promo codes in April 2019 . With these updated valid Hostingcon promo codes, you could save more money when buying at Hostingcon online stores. We are adding new Hostingcon promo codes daily. You can find the top Hostingcon promo codes, deals and coupons now and later.
Hostingcon Sign Up & Discount On Hostingcon Global 2019 In San Diego, Ca.
Sign Up & Take Save On Hostingcon Global 2019 In San Diego, Ca.
Hostingcon Extra $60 Discount Full Conference Pass.
Extra $60 On Full Conference Pass.
Hostingcon Get An Extra On Hostingcon Global 2019 San Diego, Ca.
Get The Advantage Of A Save On Hostingcon Global 2019 San Diego, Ca.
Additional Save On Early Bird Registration For Hostingcon Europe 2019 Conference At Passenger Terminal In Amsterdam, Nl.
Hostingcon Take Up To €25 Discount Registartion For Hostingcon Europe.
Extra €25 On Registration For Hostingcon Europe In Amsterdam.
Extra €25 On Early Bird Registration For Hostingcon Europe At Passenger Terminal Amsterdam.
Hostingcon €25 Discount Hostingcon Europe Pass In Amsterdam.
Save Up To €25 On Hostingcon Europe Pass In Amsterdam.
Hostingcon Save An Extra 50% Discount Registration For Hostingcon In Amsterdam, Netherlands.
Get Extra 50% On Registration For Hostingcon In Amsterdam, Netherlands.
Hostingcon Additional $60 Discount Or More Saving $100 Discount Pass For Hostingcon Global 2019.
Take An Extra $60 Discount Or More Saving $100 Discount Pass For Hostingcon Global 2019.
Hostingcon Enjoy On 2019 Registration.
Take Up To On 2019 Registration.
Hostingcon Get On Registration To Hostingcon 2019 With Code.
Save Up To On Registration To Hostingcon 2019 With Code.
Hostingcon Additional $60 Discount Invoice.
Save Up To $60 Discount Invoice.
Hostingcon Save $60 Discount A Full Conference Pass.
Extra $60 Discount A Full Conference Pass. Expires 6/18/2019.
Hostingcon Plus $5 Discount Hostingcon 2019 Registration.
Get Up To $5 Discount Hostingcon 2019 Registratiwith Each Pass Type. Expires On 06/18/2019.
Hostingcon Save An Extra $60 On Registration For Hosting Con 2019.
Get $60 On Registration For Hosting Con 2019.
Hostingcon For Hostingcon 2019 In San Diego $60 Discount Full Conference With Lunch Pass.
Hostingcon Up To 30% Off.
Take The Advantage Of 30% Discount A Day Pass Or 60% Discount A Full Conference Pass.
Hostingcon Up To An Extra $200 Off.
Extra $200 On Hostingcon 2019 Registration.
Hostingcon 2019 Early Bird Discount Expire After Monday. Use Promo Code Webhostingtalk2019 & Discount More $60.
Extra $60 On 2019 Event Registration. Expires 6/19/2019.
Hostingcon Receive An Extra $260 Off.
Save $260 Discount Registration To Hostingcon 2019 On July 16 18, 2019 In Boston, Massachusetts.
Hostingcon Save Up To $60 Discount Full Registration.
Get The Advantage Of $60 Discount Full Registration, Or $30 Discount A Single Day Pass For Hostingcon 2019. Expires 06/19/2019.
Hostingcon Get An Extra $60 Off.
Extra $60 Discount Full Pass.
Hostingcon Save An Additional A Save When You Signup.
Receive A Save When You Signup.
|
#!/usr/bin/env python2.7
#######
## bindump_to_list.py
## (c) 2016 David Gschwend
##
## Usage: python2.7 bindump_to_list.py file.bin
#######
from __future__ import print_function # print without newline
import os
import argparse
import struct
import time
import numpy as np
import math
# Parse arguments
parser = argparse.ArgumentParser(description='Print a binary float32 memory dump in human-readable format')
### Positional arguments
parser.add_argument('bin_file', help='Binary dump of float32 memory region')
### Optional arguments
parser.add_argument('-x', '--hex', action='store_true', help='Use Hex Address')
parser.add_argument('-c', '--cols', type=int, help='Number of Columns', default=1)
parser.add_argument('-p', '--precision', type=int, help='Number of Places after Comma', default=3)
parser.add_argument('-i', '--intwidth', type=int, help='Number of Places before Comma', default=3)
args = vars(parser.parse_args())
filename = args["bin_file"]
addrformat = "X" if args["hex"] else "d"
cols = args["cols"]
precision = args["precision"]
intwidth = args["intwidth"]
# Read Binary Contents
print("Using input file: {}".format(filename))
binary = ""
try:
with open(filename, "rb") as f:
binary = f.read();
except:
print("Could not open file {}".format(filename))
raise
# Interpret Binary File as List of Floats
num_floats = len(binary)/struct.calcsize('f')
print("Interpreting as {} float32 values".format(num_floats))
# Convert to List of Floats
floats = []
try:
floats = struct.unpack('f'*num_floats, binary)
except:
print("Could not convert to floats!")
raise
# Print to stdout
printbase = (16 if addrformat=="X" else 10)
addrwidth = max(int(math.ceil(math.log(4*num_floats, printbase))), 6)
datawidth = intwidth + precision + 1
hdrformat = "{:%ds} {:>%ds}"%(addrwidth, datawidth)
addrformat = "\n{:0%d%s} "%(addrwidth, addrformat)
dataformat = "{:%d.%df} "%(datawidth, precision)
print("\nValue Dump:\n")
print(hdrformat.format("ADDR", "VALUES"))
addr = 0
for f in floats:
if (addr % cols == 0):
print(addrformat.format(addr*4), end="")
print(dataformat.format(f), end="")
addr += 1
|
Arezou first came to the UK in September 2017 after living in Iran, also spending time studying in Germany. When Arezou first arrived she was keen to find herself a job, so she attended job fairs and handed out CV’s to numerous companies with no luck finding a job. Arezou then attended the London Job Show in Westfield’s where she discovered Love London Working and after talking to staff she decided to register as she knew we could help her!
Arezou was then matched with a local advisor which was Hexagon’s Lionne who talked Arezou through what support we could offer her, her first task was to help Arezou change the format of her CV so it was easy to read for employers. Lionne showed Arezou how to write a cover letter so she could really sell herself to potential employers! Lionne also practiced mock interviews with Arezou so she could prepare and gave her constant feedback on how she could improve on her answers. When Arezou wasn’t with Lionne at face to face appointments there was constant support via email and regular check-ups to see how Arezou was getting on with the job search.
After 3-4 months Lionne informed Arezou about a temporary admin position which had arisen within Hexagon working in the stock improvement department. Arezou was successful and started the admin position and after only two months gained a full time position working in responsive repairs Arezou said ‘I was only given this chance in Hexagon’.
Having entered Hexagon as a full time employee, Arezou was informed by Lionne about an upcoming online business admin course with Learning Curve which Arezou grabbed by the horns, stating ‘I was really keen to learn’. She was then given the opportunity to attend a one day workshop on how to deal with challenging complaints as her line manager thought this would be relevant, displaying her determination for role.
7 months on Arezou is still in her position and loving her job, learning new things everyday with Arezou saying ‘I am learning to deal with all different kinds of people’.
Congratulations and well done Arezou!
|
MODE_ARM = 0
MODE_THUMB = 1
MODE_JAZELLE = 2
#IFLAGS - keep bottom 8-bits for cross-platform flags like envi.IF_NOFALL and envi.IF_BRFALL
IF_PSR_S = 1<<32 # This DP instruciton can update CPSR
IF_B = 1<<33 # Byte
IF_H = 1<<35 # HalfWord
IF_S = 1<<36 # Signed
IF_D = 1<<37 # Dword
IF_L = 1<<38 # Long-store (eg. Dblword Precision) for STC
IF_T = 1<<39 # Translate for strCCbt
IF_W = 1<<40 # Write Back for STM/LDM (!)
IF_UM = 1<<41 # User Mode Registers for STM/LDM (^) (obviously no R15)
IF_DAIB_SHFT = 56 # shift-bits to get DAIB bits down to 0. this chops off the "is DAIB present" bit that the following store.
IF_DAIB_MASK = 7<<(IF_DAIB_SHFT-1)
IF_DA = 1<<(IF_DAIB_SHFT-1) # Decrement After
IF_IA = 3<<(IF_DAIB_SHFT-1) # Increment After
IF_DB = 5<<(IF_DAIB_SHFT-1) # Decrement Before
IF_IB = 7<<(IF_DAIB_SHFT-1) # Increment Before
IF_DAIB_B = 5<<(IF_DAIB_SHFT-1) # Before mask
IF_DAIB_I = 3<<(IF_DAIB_SHFT-1) # Before mask
IF_THUMB32 = 1<<50 # thumb32
IF_VQ = 1<<51 # Adv SIMD: operation uses saturating arithmetic
IF_VR = 1<<52 # Adv SIMD: operation performs rounding
IF_VD = 1<<53 # Adv SIMD: operation doubles the result
IF_VH = 1<<54 # Adv SIMD: operation halves the result
IF_SYS_MODE = 1<<58 # instruction is encoded to be executed in SYSTEM mode, not USER mode
OF_W = 1<<8 # Write back to
OF_UM = 1<<9 # Usermode, or if r15 included set current SPSR -> CPSR
OSZFMT_BYTE = "B"
OSZFMT_HWORD = "<H" # Introduced in ARMv4
OSZFMT_WORD = "<I"
OSZ_BYTE = 1
OSZ_HWORD = 2
OSZ_WORD = 4
fmts = [None, OSZ_BYTE, OSZ_HWORD, None, OSZ_WORD]
COND_EQ = 0x0 # z==1 (equal)
COND_NE = 0x1 # z==0 (not equal)
COND_CS = 0x2 # c==1 (carry set/unsigned higher or same)
COND_CC = 0x3 # c==0 (carry clear/unsigned lower)
COND_MI = 0x4 # n==1 (minus/negative)
COND_PL = 0x5 # n==0 (plus/positive or zero)
COND_VS = 0x6 # v==1 (overflow)
COND_VC = 0x7 # v==0 (no overflow)
COND_HI = 0x8 # c==1 and z==0 (unsigned higher)
COND_LO = 0x9 # c==0 or z==1 (unsigned lower or same)
COND_GE = 0xA # n==v (signed greater than or equal) (n==1 and v==1) or (n==0 and v==0)
COND_LT = 0xB # n!=v (signed less than) (n==1 and v==0) or (n==0 and v==1)
COND_GT = 0xC # z==0 and n==v (signed greater than)
COND_LE = 0xD # z==1 and n!=v (signed less than or equal)
COND_AL = 0xE # always
COND_EXTENDED = 0xF # special case - see conditional 0b1111
cond_codes = {
COND_EQ:"eq", # Equal Z set
COND_NE:"ne", # Not equal Z clear
COND_CS:"cs", #/HS Carry set/unsigned higher or same C set
COND_CC:"cc", #/LO Carry clear/unsigned lower C clear
COND_MI:"mi", # Minus/negative N set
COND_PL:"pl", # Plus/positive or zero N clear
COND_VS:"vs", # Overflow V set
COND_VC:"vc", # No overflow V clear
COND_HI:"hi", # Unsigned higher C set and Z clear
COND_LO:"lo", # Unsigned lower or same C clear or Z set
COND_GE:"ge", # Signed greater than or equal N set and V set, or N clear and V clear (N == V)
COND_LT:"lt", # Signed less than N set and V clear, or N clear and V set (N!= V)
COND_GT:"gt", # Signed greater than Z clear, and either N set and V set, or N clear and V clear (Z == 0,N == V)
COND_LE:"le", # Signed less than or equal Z set, or N set and V clear, or N clear and V set (Z == 1 or N!= V)
COND_AL:"", # Always (unconditional) - could be "al" but "" seems better...
COND_EXTENDED:"2", # See extended opcode table
}
cond_map = {
COND_EQ:0, # Equal Z set
COND_NE:1, # Not equal Z clear
COND_CS:2, #/HS Carry set/unsigned higher or same C set
COND_CC:3, #/LO Carry clear/unsigned lower C clear
COND_MI:4, # Minus/negative N set
COND_PL:5, # Plus/positive or zero N clear
COND_VS:6, # Overflow V set
COND_VC:7, # No overflow V clear
COND_HI:8, # Unsigned higher C set and Z clear
COND_LO:9, # Unsigned lower or same C clear or Z set
COND_GE:10, # Signed greater than or equal N set and V set, or N clear and V clear (N == V)
COND_LT:11, # Signed less than N set and V clear, or N clear and V set (N!= V)
COND_GT:12, # Signed greater than Z clear, and either N set and V set, or N clear and V clear (Z == 0,N == V)
COND_LE:13, # Signed less than or equal Z set, or N set and V clear, or N clear and V set (Z == 1 or N!= V)
COND_AL:"", # Always (unconditional) - could be "al" but "" seems better...
COND_EXTENDED:"2", # See extended opcode table
}
PM_usr = 0b10000
PM_fiq = 0b10001
PM_irq = 0b10010
PM_svc = 0b10011
PM_mon = 0b10110
PM_abt = 0b10111
PM_hyp = 0b11010
PM_und = 0b11011
PM_sys = 0b11111
# reg stuff stolen from regs.py to support proc_modes
# these are in context of reg_table, not reg_data.
# ie. these are indexes into the lookup table.
REG_OFFSET_USR = 17 * (PM_usr&0xf)
REG_OFFSET_FIQ = 17 * (PM_fiq&0xf)
REG_OFFSET_IRQ = 17 * (PM_irq&0xf)
REG_OFFSET_SVC = 17 * (PM_svc&0xf)
REG_OFFSET_MON = 17 * (PM_mon&0xf)
REG_OFFSET_ABT = 17 * (PM_abt&0xf)
REG_OFFSET_HYP = 17 * (PM_hyp&0xf)
REG_OFFSET_UND = 17 * (PM_und&0xf)
REG_OFFSET_SYS = 17 * (PM_sys&0xf)
#REG_OFFSET_CPSR = 17 * 16
REG_OFFSET_CPSR = 16 # CPSR is available in every mode, and PM_usr and PM_sys don't have an SPSR.
REG_SPSR_usr = REG_OFFSET_USR + 17
REG_SPSR_fiq = REG_OFFSET_FIQ + 17
REG_SPSR_irq = REG_OFFSET_IRQ + 17
REG_SPSR_svc = REG_OFFSET_SVC + 17
REG_SPSR_mon = REG_OFFSET_MON + 17
REG_SPSR_abt = REG_OFFSET_ABT + 17
REG_SPSR_hyp = REG_OFFSET_HYP + 17
REG_SPSR_und = REG_OFFSET_UND + 17
REG_SPSR_sys = REG_OFFSET_SYS + 17
REG_PC = 0xf
REG_SP = 0xd
REG_BP = None
REG_CPSR = REG_OFFSET_CPSR
REG_FLAGS = REG_OFFSET_CPSR #same location, backward-compat name
proc_modes = { # mode_name, short_name, description, offset, mode_reg_count, PSR_offset, privilege_level
PM_usr: ("User Processor Mode", "usr", "Normal program execution mode", REG_OFFSET_USR, 15, REG_SPSR_usr, 0),
PM_fiq: ("FIQ Processor Mode", "fiq", "Supports a high-speed data transfer or channel process", REG_OFFSET_FIQ, 8, REG_SPSR_fiq, 1),
PM_irq: ("IRQ Processor Mode", "irq", "Used for general-purpose interrupt handling", REG_OFFSET_IRQ, 13, REG_SPSR_irq, 1),
PM_svc: ("Supervisor Processor Mode", "svc", "A protected mode for the operating system", REG_OFFSET_SVC, 13, REG_SPSR_svc, 1),
PM_mon: ("Monitor Processor Mode", "mon", "Secure Monitor Call exception", REG_OFFSET_MON, 13, REG_SPSR_mon, 1),
PM_abt: ("Abort Processor Mode", "abt", "Implements virtual memory and/or memory protection", REG_OFFSET_ABT, 13, REG_SPSR_abt, 1),
PM_hyp: ("Hyp Processor Mode", "hyp", "Hypervisor Mode", REG_OFFSET_HYP, 13, REG_SPSR_hyp, 2),
PM_und: ("Undefined Processor Mode", "und", "Supports software emulation of hardware coprocessor", REG_OFFSET_UND, 13, REG_SPSR_und, 1),
PM_sys: ("System Processor Mode", "sys", "Runs privileged operating system tasks (ARMv4 and above)", REG_OFFSET_SYS, 15, REG_SPSR_sys, 1),
}
PM_LNAME = 0
PM_SNAME = 1
PM_DESC = 2
PM_REGOFF = 3
PM_BANKED = 4
PM_SPSR = 5
INST_ENC_DP_IMM = 0 # Data Processing Immediate Shift
INST_ENC_MISC = 1 # Misc Instructions
# Instruction encodings in arm v5
IENC_DP_IMM_SHIFT = 0 # Data processing immediate shift
IENC_MISC = 1 # Miscellaneous instructions
IENC_MISC1 = 2 # Miscellaneous instructions again
IENC_DP_REG_SHIFT = 3 # Data processing register shift
IENC_MULT = 4 # Multiplies & Extra load/stores
IENC_UNDEF = 5 # Undefined instruction
IENC_MOV_IMM_STAT = 6 # Move immediate to status register
IENC_DP_IMM = 7 # Data processing immediate
IENC_LOAD_IMM_OFF = 8 # Load/Store immediate offset
IENC_LOAD_REG_OFF = 9 # Load/Store register offset
IENC_ARCH_UNDEF = 10 # Architecturally undefined
IENC_MEDIA = 11 # Media instructions
IENC_LOAD_MULT = 12 # Load/Store Multiple
IENC_BRANCH = 13 # Branch
IENC_COPROC_RREG_XFER = 14 # mrrc/mcrr
IENC_COPROC_LOAD = 15 # Coprocessor load/store and double reg xfers
IENC_COPROC_DP = 16 # Coprocessor data processing
IENC_COPROC_REG_XFER = 17 # Coprocessor register transfers
IENC_SWINT = 18 # Sofware interrupts
IENC_UNCOND = 19 # unconditional wacko instructions
IENC_EXTRA_LOAD = 20 # extra load/store (swp)
IENC_DP_MOVW = 21 #
IENC_DP_MOVT = 22 #
IENC_DP_MSR_IMM = 23 #
# offchutes
IENC_MEDIA_PARALLEL = ((IENC_MEDIA << 8) + 1) << 8
IENC_MEDIA_SAT = ((IENC_MEDIA << 8) + 2) << 8
IENC_MEDIA_REV = ((IENC_MEDIA << 8) + 3) << 8
IENC_MEDIA_SEL = ((IENC_MEDIA << 8) + 4) << 8
IENC_MEDIA_USAD8 = ((IENC_MEDIA << 8) + 5) << 8
IENC_MEDIA_USADA8 = ((IENC_MEDIA << 8) + 6) << 8
IENC_MEDIA_EXTEND = ((IENC_MEDIA << 8) + 7) << 8
IENC_MEDIA_PACK = ((IENC_MEDIA << 8) + 8) << 8
IENC_UNCOND_CPS = ((IENC_UNCOND << 8) + 1) << 8
IENC_UNCOND_SETEND = ((IENC_UNCOND << 8) + 2) << 8
IENC_UNCOND_PLD = ((IENC_UNCOND << 8) + 3) << 8
IENC_UNCOND_BLX = ((IENC_UNCOND << 8) + 4) << 8
IENC_UNCOND_RFE = ((IENC_UNCOND << 8) + 5) << 8
# The supported types of operand shifts (by the 2 bit field)
S_LSL = 0
S_LSR = 1
S_ASR = 2
S_ROR = 3
S_RRX = 4 # FIXME HACK XXX add this
shift_names = ("lsl", "lsr", "asr", "ror", "rrx")
SOT_REG = 0
SOT_IMM = 1
daib = ("da", "ia", "db", "ib")
def instrenc(encoding, index):
return (encoding << 16) + index
INS_AND = IENC_DP_IMM_SHIFT << 16
INS_EOR = (IENC_DP_IMM_SHIFT << 16) + 1
INS_SUB = (IENC_DP_IMM_SHIFT << 16) + 2
INS_RSB = (IENC_DP_IMM_SHIFT << 16) + 3
INS_ADD = (IENC_DP_IMM_SHIFT << 16) + 4
INS_ADC = (IENC_DP_IMM_SHIFT << 16) + 5
INS_SBC = (IENC_DP_IMM_SHIFT << 16) + 6
INS_RSC = (IENC_DP_IMM_SHIFT << 16) + 7
INS_TST = (IENC_DP_IMM_SHIFT << 16) + 8
INS_TEQ = (IENC_DP_IMM_SHIFT << 16) + 9
INS_CMP = (IENC_DP_IMM_SHIFT << 16) + 10
INS_CMN = (IENC_DP_IMM_SHIFT << 16) + 11
INS_ORR = (IENC_DP_IMM_SHIFT << 16) + 12
INS_MOV = (IENC_DP_IMM_SHIFT << 16) + 13
INS_BIC = (IENC_DP_IMM_SHIFT << 16) + 14
INS_MVN = (IENC_DP_IMM_SHIFT << 16) + 15
INS_ORN = (IENC_DP_IMM_SHIFT << 16) + 12
INS_ADR = (IENC_DP_IMM_SHIFT << 16) + 16
INS_B = instrenc(IENC_BRANCH, 0)
INS_BL = instrenc(IENC_BRANCH, 1)
INS_BCC = instrenc(IENC_BRANCH, 2)
INS_BX = instrenc(IENC_MISC, 3)
INS_BXJ = instrenc(IENC_MISC, 5)
INS_BLX = IENC_UNCOND_BLX
INS_SWI = IENC_SWINT
# FIXME: must fit these into the numbering scheme
INS_TB = 85
INS_LDREX = 85
INS_ORN = 85
INS_PKH = 85
INS_LSL = 85
INS_LSR = 85
INS_ASR = 85
INS_ROR = 85
INS_RRX = 85
INS_LDR = instrenc(IENC_LOAD_IMM_OFF, 0)
INS_STR = instrenc(IENC_LOAD_IMM_OFF, 1)
no_update_Rd = (INS_TST, INS_TEQ, INS_CMP, INS_CMN, )
|
The recall includes some batches of regular and extra-strength Tylenol, children's Tylenol, eight-hour Tylenol, Tylenol arthritis, Tylenol PM, children's Motrin, Motrin IB, Benadryl Rolaids, Simply Sleep, and St. Joseph's aspirin.
The FDA said about 70 people have been either sickened by the odor - including nausea, stomach pain, vomiting and diarrhea - or noticed it.
The smell is caused by small amounts of a chemical associated with the treatment of wooden pallets, Johnson & Johnson said. The FDA said the chemical can leach into the air, and traced it to a facility in Las Piedras, Puerto Rico.
The New Brunswick, N.J., company said it is investigating the issue and will stop shipping products with the same materials on wooden pallets. It has asked suppliers to do so as well.
It was the second such recall in less than a month because of the smell, which regulators said was first reported to Johnson & Johnson's McNeil Consumer Healthcare Products division in 2008. Federal regulators criticized the company, saying it didn't respond to the complaints quickly enough, wasn't thorough in how it handled the problem and didn't inform the Food and Drug Administration quickly.
The FDA said McNeil knew of the problem in early 2008 but made only a limited investigation.
"McNeil should have acted faster," said Deborah Autor, the director of the FDA's Office of Compliance of the Center for Drug Evaluation and Research. "When something smells bad, literally or figuratively, companies must aggressively investigate and take all necessary action to solve the problem."
What do I do if I have recalled products?
You should stop using the product and contact McNeil Consumer Healthcare for instructions on a refund or replacement, the company said.
For these instructions or information regarding how to return or dispose of the product, go to www.mcneilproductrecall.com or call 1-888-222-6036 (Monday-Friday 8 a.m. to 10 p.m. and Saturday-Sunday 9 a.m. to 5 p.m.).
|
# -*- coding: utf-8 -*-
import os
import re
import socket
from stat import S_ISDIR
from datetime import datetime
import logging
import paramiko
MTIME_TOLERANCE = 3
logger = logging.getLogger(__name__)
class AuthenticationError(Exception):
pass
class TimeoutError(Exception):
pass
class SshError(Exception):
pass
class Sftp(object):
def __init__(self, host, username, password=None, port=22, timeout=10,
max_attempts=3, **kwargs):
self.host = host
self.port = port
self.username = username
self.password = password
self.client = paramiko.SSHClient()
self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.sftp = None
for i in range(max_attempts):
try:
self.client.connect(host, port=port, username=username,
password=password, timeout=timeout,
**kwargs)
self.sftp = self.client.open_sftp()
return
except (paramiko.BadHostKeyException,
paramiko.AuthenticationException), e:
raise AuthenticationError(str(e))
except socket.timeout, e:
raise TimeoutError(str(e))
except Exception, e:
if i == max_attempts - 1:
raise SshError(str(e))
def _walk_remote(self, path, topdown=True):
try:
res = self.sftp.listdir_attr(path)
except IOError:
res = []
for stat in res:
file = os.path.join(path, stat.filename)
if not S_ISDIR(stat.st_mode):
yield 'file', file, stat
else:
if topdown:
yield 'dir', file, stat
for res in self._walk_remote(file, topdown=topdown):
yield res
else:
for res in self._walk_remote(file, topdown=topdown):
yield res
yield 'dir', file, None
def _walk_local(self, path, topdown=True):
for path, dirs, files in os.walk(path, topdown=topdown):
for file in files:
file = os.path.join(path, file)
yield 'file', file, os.stat(file)
for dir in dirs:
dir = os.path.join(path, dir)
yield 'dir', dir, os.stat(dir)
def _walk(self, *args, **kwargs):
remote = kwargs.pop('remote', False)
if remote:
return self._walk_remote(*args, **kwargs)
else:
return self._walk_local(*args, **kwargs)
def _makedirs_dst(self, path, remote=True, dry=False):
if remote:
paths = []
while path not in ('/', ''):
paths.insert(0, path)
path = os.path.dirname(path)
for path in paths:
try:
self.sftp.lstat(path)
except Exception:
if not dry:
self.sftp.mkdir(path)
logger.debug('created destination directory %s', path)
else:
if not os.path.exists(path):
if not dry:
os.makedirs(path)
logger.debug('created destination directory %s', path)
def _validate_src(self, file, include, exclude):
for re_ in include:
if not re_.search(file):
return False
for re_ in exclude:
if re_.search(file):
return False
return True
def _validate_dst(self, file, src_stat, remote=True):
if remote:
try:
dst_stat = self.sftp.lstat(file)
except Exception:
return
else:
if not os.path.exists(file):
return
dst_stat = os.stat(file)
if abs(dst_stat.st_mtime - src_stat.st_mtime) > MTIME_TOLERANCE:
debug_string = '%s modified time mismatch '
debug_string += '(source: %s, destination: %s)'
logger.debug(debug_string,
file, datetime.utcfromtimestamp(src_stat.st_mtime),
datetime.utcfromtimestamp(dst_stat.st_mtime))
return
if dst_stat.st_size != src_stat.st_size:
return
return True
def _save(self, src, dst, src_stat, remote=True):
if remote:
logger.info('copying %s to %s@%s:%s', src, self.username,
self.host, dst)
self.sftp.put(src, dst)
self.sftp.utime(dst, (int(src_stat.st_atime),
int(src_stat.st_mtime)))
else:
logger.info('copying %s@%s:%s to %s', self.username, self.host,
src, dst)
self.sftp.get(src, dst)
os.utime(dst, (int(src_stat.st_atime), int(src_stat.st_mtime)))
def _delete_dst(self, path, files, remote=True, dry=False):
if remote:
callables = {'file': self.sftp.remove, 'dir': self.sftp.rmdir}
else:
callables = {'file': os.remove, 'dir': os.rmdir}
for type, file, stat in self._walk(path, topdown=False, remote=remote):
if file not in files[type]:
if not dry:
try:
callables[type](file)
except Exception, e:
logger.debug('failed to remove %s: %s', file, str(e))
continue
logger.debug('removed %s', file)
def _get_filters(self, filters):
if not filters:
return []
return [re.compile(f) for f in filters]
def sync(self, src, dst, download=True, include=None, exclude=None,
delete=False, dry=False):
'''Sync files and directories.
:param src: source directory
:param dst: destination directory
:param download: True to sync from a remote source to a local
destination, else sync from a local source to a remote destination
:param include: list of regex patterns the source files must match
:param exclude: list of regex patterns the source files must not match
:param delete: remove destination files and directories not present
at source or filtered by the include/exlude patterns
'''
include = self._get_filters(include)
exclude = self._get_filters(exclude)
if src.endswith('/') != dst.endswith('/'):
dst = os.path.join(dst, os.path.basename(src.rstrip('/')))
src = src.rstrip('/')
re_base = re.compile(r'^%s/' % re.escape(src))
if not src:
src = '/'
self._makedirs_dst(dst, remote=not download, dry=dry)
started = datetime.utcnow()
total_size = 0
dst_list = {'file': [], 'dir': []}
for type, file, stat in self._walk(src, remote=download):
file_ = re_base.sub('', file)
if not self._validate_src(file_, include, exclude):
logger.debug('filtered %s', file)
continue
dst_file = os.path.join(dst, file_)
dst_list[type].append(dst_file)
if type == 'dir':
self._makedirs_dst(dst_file, remote=not download, dry=dry)
elif type == 'file':
if not self._validate_dst(dst_file, stat, remote=not download):
if not dry:
self._save(file, dst_file, stat, remote=not download)
total_size += stat.st_size
logger.debug('copied %s to %s', file, dst_file)
if delete:
self._delete_dst(dst, dst_list, remote=not download, dry=dry)
logger.debug('transferred %s bytes in %s', total_size,
datetime.utcnow() - started)
|
Chapter 7 bankruptcy is a legal process under which a person whose bills and debts are greater than his or her income surrenders non-exempt property in exchange for eliminating the obligation to pay his or her debts.
In a chapter 7 bankruptcy, the consumer files a petition asking the court to wipe out (discharge is the technical term) his or her debts. In addition to the petition, the consumer also files (i) schedules of assets and liabilities; (ii) a schedule of current income and expenditures; (iii) a statement of financial affairs; and (iv) a schedule of executory contracts and unexpired leases.
A detailed list of the debtor’s monthly living expenses (food, clothing, shelter, utilities, taxes, transportation, medicine, etc.).
Married individuals must gather this information for their spouse even if the spouse is not filing for bankruptcy. In a situation where only one spouse files, the income and expenses of the non-filing spouse are required so that the household’s entire financial position can be evaluated.
In order to file a chapter 7 bankruptcy the consumer must first qualify. Higher-income consumers whose household income is above the state median must complete a ‘‘means test’’. The means test is designed to keep debtors with higher income from filing for Chapter 7 bankruptcy. If the test reveals that the debtor has a certain amount of discretionary income that could be paid to unsecured creditors, the bankruptcy court may decide that they cannot file a chapter 7 case, unless there are special extenuating circumstances. Taking the means test doesn’t mean that a debtor must be in poverty in order to file a Chapter 7 bankruptcy. Debtors can earn significant monthly income and still qualify for Chapter 7 bankruptcy if they have a lot of expenses, such as high mortgage and car loan payments and other expenses.
When a chapter 7 petition is filed, an impartial case trustee is assigned to administer the case and liquidate the debtor’s nonexempt assets. If all the debtor’s assets are exempt or subject to valid liens, the trustee will normally file a “no asset” report with the court, and there will be no distribution to unsecured creditors. Most chapter 7 cases involving individual debtors are no asset cases.
Commencement of a bankruptcy case creates an “estate.” The estate technically becomes the temporary legal owner of all the debtor’s property. The Estate includes all legal or equitable interests of the debtor in property as of the date the petition if filed.
If you have questions or concerns about filing for chapter 7 bankruptcy, we urge you to contact The Law Offices of Robert J. Nahoum, P.C. today by calling 845-232-0202.
This Law Offices of Robert J. Nahoum, P.C. is a Debt Relief Agency. We assist individuals to become debt free through Bankruptcy.
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Isescan(Package):
"""A python pipeline to identify IS (Insertion Sequence) elements in
genome and metagenome"""
homepage = "https://github.com/xiezhq/ISEScan"
url = "https://github.com/xiezhq/ISEScan/archive/v1.7.2.1.tar.gz"
version('1.7.2.1', sha256='b971a3e86a8cddaa4bcd520ba9e75425bbe93190466f81a3791ae0cb4baf5e5d')
depends_on('[email protected]:', type='run')
depends_on('[email protected]:', type='run')
depends_on('[email protected]:', type='run')
depends_on('[email protected]:', type='run')
depends_on('py-fastcluster', type='run')
depends_on('py-argparse', type='run')
depends_on('[email protected]:', type='run')
depends_on('[email protected]:', type='run')
depends_on('[email protected]:', type='run')
def setup_run_environment(self, env):
env.prepend_path('PATH', self.prefix)
env.prepend_path('LD_LIBRARY_PATH',
join_path(self.prefix, 'ssw201507'))
def install(self, spec, prefix):
# build bundled SSW library
with working_dir('ssw201507'):
Executable(spack_cc)(
'-O3', '-pipe', self.compiler.cc_pic_flag, '-shared',
'-rdynamic', '-o', 'libssw.' + dso_suffix, 'ssw.c', 'ssw.h',
)
# set paths to required programs
blast_pfx = self.spec['blast-plus'].prefix.bin
blastn_path = blast_pfx.blastn
blastp_path = blast_pfx.blastp
makeblastdb_path = blast_pfx.makeblastdb
hmmer_pfx = self.spec['hmmer'].prefix.bin
phmmer_path = hmmer_pfx.phmmer
hmmsearch_path = hmmer_pfx.hmmsearch
fgs_pfx = self.spec['fraggenescan'].prefix.bin
fgs_path = join_path(fgs_pfx, 'run_FragGeneScan.pl')
constants = FileFilter('constants.py')
constants.filter('/apps/inst/FragGeneScan1.30/run_FragGeneScan.pl',
fgs_path, string=True)
constants.filter('/apps/inst/hmmer-3.3/bin/phmmer',
phmmer_path, string=True)
constants.filter('/apps/inst/hmmer-3.3/bin/hmmsearch',
hmmsearch_path, string=True)
constants.filter('/apps/inst/ncbi-blast-2.10.0+/bin/blastn',
blastn_path, string=True)
constants.filter('/apps/inst/ncbi-blast-2.10.0+/bin/blastp',
blastp_path, string=True)
constants.filter('/apps/inst/ncbi-blast-2.10.0+/bin/makeblastdb',
makeblastdb_path, string=True)
# install the whole tree
install_tree('.', prefix)
set_executable(join_path(prefix, 'isescan.py'))
|
Why is Lantana a garden rock star? Here’s just a few enticing answers: non-stop blooming time; easy-care flowers; and the color combinations are ah-MAZ-zing! Pull up a chair and let me tell you more . . .
Lantana is a must-have for your flower garden. This plant — which can be grown as an annual or as a perennial depending on which USDA Zone you’re in — blooms non-stop from Summer well into the Fall months, as I’ll show you in just a moment.
The above image was planted in June 2013 in our front garden. When you first plant Lantana, water it regularly until it gets established. Once it begins growing, you’ll notice that its water needs are minimal.
Amazing, right? And this is just one plant!
If you look at the image above, you can see the Lantana flowers in all stages of bloom, from just the green buds, to the larger yellow-colored buds that look almost like tiny yellow rectangular pillows, to the fully opened blooms!
This variety of Lantana is called Luscious Berry Blend Lantana. The multi-colored blooms are stunning!
The Lantana was still going strong, right along with those yellow Mums nearby. That’s when we knew we would always be planting Lantana in our gardens every year!
Lantana can be invasive in areas where frost doesn’t occur. Check with your local garden nursery if unsure.
You can see in these two images (above and below) how the individual tiny petals resemble rectangular pillows before they open.
I never tire of looking at the Lantana flowers up close, nor do I ever tire of watching the butterflies and hummingbirds enjoy the flowers!
Here in Australia Lantana is illegal as it is classed as a noxious week. That hasn’t however stopped me growing it in my back yard. I love it! The yellow version is such a lovely colour and you have colour all year around.
Best wishes, love your blog.
I’ve heard that Lantana can be invasive in some parts of the US too. I’d do the same thing as you: I’d happily grow it and have the prettiest “weeds” of all! 🙂 So glad you stopped by and love hearing your gardening perspective from Australia! Thank you!
Okay, I’m highly irritated! Why, you ask? Because for the life of me I cannot get MY Lantana near as beautiful as yours!!! I have two in separate container that I’m lucky if I get three flowers on them at once! They appear to be healthy. Green leaves, and gorgeous color when the few bloom! I live in zone 7 generally speaking in Northen California. What am I doing wrong?.. I dutifully clip off the expired “balls”, the occasional food, check the water levels with a gauge, they’re in the full sun approx. 4-6 hours due to trees. If you could give me any advice, I’d gladly receive it! I too, love the incredible God given array of colors that come on one flower! Safe to say they’re one of my favorites, so I’m really disappointed. How you got the shrub, I’ll never know! Maybe my little green thumb is yellowing??!!
I think Lantana would grow best if planted in the ground rather than a container.
My neighbor has one planted in her front flower bed that has grown into a beautiful shrub.
We live in Middle Tennessee…..
|
#!/usr/bin/python
#-*- coding: utf-8 -*-
from .TextParser import TextParser
from .utils import namespace
class SlideParser(object):
def __init__(self, XMLSlideObject, number, presentationParser):
self.presentation = presentationParser
self.text_parsers = self.parseText(XMLSlideObject)
self.title_parsers = self.parseTitle(XMLSlideObject)
self.number = number
self.layout = None
def get_style_by_id(self, style_id):
"""
Return a |StyleParser| matching the given id.
:param style_id:
:return: |StyleParser| object.
"""
return self.presentation.get_style_by_id(style_id)
def parseText(self, XMLSlideObject):
"""
Create |TextParser| object for each text of the given XML slide object.
:param XMLSlideObject: LXML slide object
:return: List of |TextParser| object.
"""
text = []
for frame in XMLSlideObject.findall(".//draw:frame", XMLSlideObject.nsmap):
if frame not in XMLSlideObject.findall(".//draw:frame[@presentation:class='title']", XMLSlideObject.nsmap):
for textF in frame.findall(".//text:p", XMLSlideObject.nsmap):
style = None
if textF.get(namespace(textF)+"style-name") is not None:
style_id = textF.get(namespace(textF)+"style-name")
style = self.get_style_by_id(style_id)
if textF.text is not None:
text.append(TextParser(textF, style, self))
for textF in frame.findall(".//text:span", XMLSlideObject.nsmap):
style = None
if textF.get(namespace(textF)+"style-name") is not None:
style_id = textF.get(namespace(textF)+"style-name")
style = self.get_style_by_id(style_id)
if textF.text is not None:
text.append(TextParser(textF, style, self))
for textF in frame.findall(".//text:text", XMLSlideObject.nsmap):
style = None
if textF.get(namespace(textF)+"style-name") is not None:
style_id = textF.get(namespace(textF)+"style-name")
style = self.get_style_by_id(style_id)
if textF.text is not None:
text.append(TextParser(textF, style, self))
return text
def parseTitle(self, XMLSlideObject):
"""
Look up for the XML title object within the given XML Slide Object and creates a list of |TextParser| objects for each text within the title.
:param XMLSlideObject:
:return:
"""
title = []
# On cheche la zone de texte correspondant au titre de la diapositive
titleFrame = XMLSlideObject.find(".//draw:frame[@presentation:class='title']", XMLSlideObject.nsmap)
if titleFrame is not None:
# On cherche le paragraphe qui contiendrait le titre
for textF in titleFrame.findall(".//text:p", XMLSlideObject.nsmap):
style = None
if textF.get(namespace(textF) + "style-name") is not None:
style_id = textF.get(namespace(textF) + "style-name")
style = self.get_style_by_id(style_id)
if textF.text is not None:
title.append(TextParser(textF, style, self))
# On cherche le span qui contiendrait le titre
for textF in titleFrame.findall(".//text:span", XMLSlideObject.nsmap):
style = None
if textF.get(namespace(textF)+"style-name") is not None:
style_id = textF.get(namespace(textF)+"style-name")
style = self.get_style_by_id(style_id)
if textF.text is not None:
title.append(TextParser(textF, style, self))
return title
# On cherche à extraire les textes d'un certain style
def getTextsByStyleId(self, styleID):
"""
Return a list of |TextParser| objects whose style matches the given style ID.
:param styleID: ID of a |StyleParser|
:return: List of |TextParser| objects.
"""
texts = []
# On parcourt les textes et pour chaque texte, on vérifie si il a le style recherché
for text in self.text_parsers:
if text.style_id == styleID:
texts.append(text)
return texts
@property
def text(self):
"""
Return a string containing all the body text of the slide.
:return: Strinf
"""
text=""
for tp in self.text_parsers:
text+="\n"+tp.text
return text
@property
def title(self):
"""
Retrun a string containing the title of the slide.
:return: String
"""
if len(self.title_parsers) > 0:
text=""
for tp in self.title_parsers:
text+=tp.text
return text
|
Si vous cherchez des informations sur le thème Home cinema 8100 notre membre Allan a trouvé et mis en ligne des ressources qualifiées sur la thématique Home cinema 8100 pour vous aider dans vos recherches.
epson powerlite home cinema 8100 projector specs, projector reviews and current street prices.
30 juil. 2010 - the epson powerlite home cinema 8100 offers an extraordinary level of capability for the price.
buy epson powerlite home cinema 8100 projector housing w/ genuine original oem bulb at walmart.com.
view full epson powerlite home cinema 8100 specs on cnet.
epson home cinema 8100 projector, home theater, lcd, hd (1920 x 1080) resolution, 1800 lumens. click for our best price.
epson powerlite home cinema 8100 projector light bulbs from batteries plus bulbs. top quality video projector bulbs and projection screen tv lamps.
buy epson powerlite home cinema 8100 1080p 1920x1080 1800 lumens home theater 3lcd projector with fast shipping and top-rated customer service.
|
import codecs,urllib,urllib2,re,xbmc,xbmcplugin,xbmcaddon,xbmcgui,os,sys,commands,HTMLParser,jsunpack,time
website = 'http://www.trilulilu.ro/';
__version__ = "1.0.4"
__plugin__ = "trilulilu.ro" + __version__
__url__ = "www.xbmc.com"
settings = xbmcaddon.Addon( id = 'plugin.video.triluliluro' )
search_thumb = os.path.join( settings.getAddonInfo( 'path' ), 'resources', 'media', 'search.png' )
movies_thumb = os.path.join( settings.getAddonInfo( 'path' ), 'resources', 'media', 'movies.png' )
movies_hd_thumb = os.path.join( settings.getAddonInfo( 'path' ), 'resources', 'media', 'movies-hd.png' )
tv_series_thumb = os.path.join( settings.getAddonInfo( 'path' ), 'resources', 'media', 'tv.png' )
next_thumb = os.path.join( settings.getAddonInfo( 'path' ), 'resources', 'media', 'next.png' )
def ROOT():
#addDir('Filme','http://www.trilulilu.ro/',1,movies_thumb)
addDir('Cauta','http://www.trilulilu.ro/',3,search_thumb)
addDir('Cauta ... dublat','http://www.trilulilu.ro/',31,search_thumb)
xbmc.executebuiltin("Container.SetViewMode(500)")
def CAUTA_LIST(url):
link = get_search(url)
match=re.compile('<a href="(http://www.trilulilu.ro/video-.+?)#ref=cauta" .+?title="(.+?)" .+?>\n.+?<div.+?>(\d+:\d+)</div><img (src|data-src)="(.+?)" width="', re.IGNORECASE|re.MULTILINE).findall(link)
if len(match) > 0:
print match
for legatura, name, length, s, img in match:
#name = HTMLParser.HTMLParser().unescape( codecs.decode(name, "unicode_escape") ) + " " + length
name = name + " " + length
the_link = legatura
image = img
sxaddLink(name,the_link,image,name,10)
match=re.compile('<link rel="next" href="\?offset=(\d+)" />', re.IGNORECASE).findall(link)
if len(match) > 0:
nexturl = re.sub('\?offset=(\d+)', '?offset='+match[0], url)
if nexturl.find("offset=") == -1:
nexturl += '?offset='+match[0]
print "NEXT " + nexturl
addNext('Next', nexturl, 2, next_thumb)
xbmc.executebuiltin("Container.SetViewMode(500)")
def CAUTA(url, autoSearch = None):
keyboard = xbmc.Keyboard( '' )
keyboard.doModal()
if ( keyboard.isConfirmed() == False ):
return
search_string = keyboard.getText()
if len( search_string ) == 0:
return
if autoSearch is None:
autoSearch = ""
CAUTA_LIST( get_search_url(search_string + "" + autoSearch) )
def SXVIDEO_GENERIC_PLAY(sxurl, seltitle, linksource="source1"):
listitem = xbmcgui.ListItem(seltitle)
listitem.setInfo('video', {'Title': seltitle})
selurl = sxurl
SXVIDEO_PLAY_THIS(selurl, listitem, None)
return
def SXVIDEO_PLAY_THIS(selurl, listitem, source):
movie_formats = {'flv': 'flv-vp6', 'mp4': 'mp4-360p'}
sformat = ''
player = xbmc.Player( xbmc.PLAYER_CORE_MPLAYER )
for (mfn, mf) in movie_formats.items():
if SX_checkUrl(selurl + mf):
player.play(selurl + mf, listitem)
time.sleep(1)
break;
#if player.isPlaying():
# break;
try:
print "-"
#player.setSubtitles(source['subtitle'])
except:
pass
#while player.isPlaying:
# xbmc.sleep(100);
return player.isPlaying()
def SXSHOWINFO(text):
#progress = xbmcgui.DialogProgress()
#progress.create("kml browser", "downloading playlist...", "please wait.")
print ""
def SXVIDEO_FILM_PLAY(url):
SXSHOWINFO("Playing movie...")
#print url
sxurli = sxGetMovieLink(url)
#print sxurli
#return
#print sxurls
SXVIDEO_GENERIC_PLAY(sxurli['url'], sxurli['title'])
def SX_checkUrl(url):
content_range=None
try:
req = urllib2.Request(url)
#
# Here we request that bytes 18000--19000 be downloaded.
# The range is inclusive, and starts at 0.
#
req.headers['Range']='bytes=%s-%s' % (100, 200)
f = urllib2.urlopen(req)
# This shows you the actual bytes that have been downloaded.
content_range=f.headers.get('Content-Range')
except:
pass
print "URL costel " + url
#print(content_range)
return content_range != None
def sxGetMovieLink(url):
print 'url video '+url
#print 'nume video '+ name
# thumbnail
src = get_url(urllib.quote(url, safe="%/:=&?~#+!$,;'@()*[]"))
#print src
thumbnail = ''
title = ''
link_video_trilu = ""
#title
match = re.compile('<title>(.+?)<', re.IGNORECASE).findall(src)
title = HTMLParser.HTMLParser().unescape(match[0])
title = re.sub('\s+-\s*Video\s*-\s*Trilulilu', '', title);
#print "MATCH SERCH " + match[0]
#video link --- # block_flash_vars = {"userid":"andreea_popa","hash":"edee1b51b240c9","server":"65","autoplay":"true","hasAds":"true","viewf
match = re.compile('block_flash_vars = {"userid":"(.+?)","hash":"(.+?)","server":"(.+?)",', re.IGNORECASE).findall(src)
if not match:
#addLink('Could NOT generate video link ', " ", thumbnail, title)
xbmc.executebuiltin('Notification(Error,Could NOT generate video link,5000,/script.hellow.world.png)')
return False
ids = match[0]
username = ids[0]
hash = ids[1]
server = ids[2]
#print "hash = " + hash + "; username = " + username + "; server=" + server
# video id
link_video_trilu = "http://fs"+server+".trilulilu.ro/stream.php?type=video&source=site&hash=" + hash + "&username=" + username + "&format="
return {'url':link_video_trilu, 'title': title}
def get_url(url):
req = urllib2.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3')
try:
response = urllib2.urlopen(req)
link=response.read()
response.close()
return link
except:
return False
def get_search_url(keyword, offset = None):
url = 'http://cauta.trilulilu.ro/video/' + urllib.quote_plus(keyword)
if offset != None:
url += "?offset="+offset
return url
def get_search(url):
params = {}
req = urllib2.Request(url, urllib.urlencode(params))
req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3')
req.add_header('Content-type', 'application/x-www-form-urlencoded')
try:
response = urllib2.urlopen(req)
link=response.read()
response.close()
return link
except:
return False
def get_params():
param=[]
paramstring=sys.argv[2]
if len(paramstring)>=2:
params=sys.argv[2]
cleanedparams=params.replace('?','')
if (params[len(params)-1]=='/'):
params=params[0:len(params)-2]
pairsofparams=cleanedparams.split('&')
param={}
for i in range(len(pairsofparams)):
splitparams={}
splitparams=pairsofparams[i].split('=')
if (len(splitparams))==2:
param[splitparams[0]]=splitparams[1]
return param
def yt_get_all_url_maps_name(url):
conn = urllib2.urlopen(url)
encoding = conn.headers.getparam('charset')
content = conn.read().decode(encoding)
s = re.findall(r'"url_encoded_fmt_stream_map": "([^"]+)"', content)
if s:
s = s[0].split(',')
s = [a.replace('\\u0026', '&') for a in s]
s = [urllib2.parse_keqv_list(a.split('&')) for a in s]
n = re.findall(r'<title>(.+) - YouTube</title>', content)
return (s or [],
HTMLParser.HTMLParser().unescape(n[0]))
def yt_get_url(z):
#return urllib.unquote(z['url'] + '&signature=%s' % z['sig'])
return urllib.unquote(z['url'])
def youtube_video_link(url):
# 18 - mp4
fmt = '18'
s, n = yt_get_all_url_maps_name(url)
for z in s:
if z['itag'] == fmt:
if 'mp4' in z['type']:
ext = '.mp4'
elif 'flv' in z['type']:
ext = '.flv'
found = True
link = yt_get_url(z)
return link
def sxaddLink(name,url,iconimage,movie_name,mode=4):
ok=True
u=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)
liz=xbmcgui.ListItem(name, iconImage="DefaultVideo.png", thumbnailImage=iconimage)
liz.setInfo( type="Video", infoLabels={ "Title": movie_name } )
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz)
return ok
def addLink(name,url,iconimage,movie_name):
ok=True
liz=xbmcgui.ListItem(name, iconImage="DefaultVideo.png", thumbnailImage=iconimage)
liz.setInfo( type="Video", infoLabels={ "Title": movie_name } )
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=url,listitem=liz)
return ok
def addNext(name,page,mode,iconimage):
u=sys.argv[0]+"?url="+urllib.quote_plus(page)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)
liz=xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=iconimage)
liz.setInfo( type="Video", infoLabels={ "Title": name } )
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=True)
return ok
def addDir(name,url,mode,iconimage):
u=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)
ok=True
liz=xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=iconimage)
liz.setInfo( type="Video", infoLabels={ "Title": name } )
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=True)
return ok
params=get_params()
url=None
name=None
mode=None
try:
url=urllib.unquote_plus(params["url"])
except:
pass
try:
name=urllib.unquote_plus(params["name"])
except:
pass
try:
mode=int(params["mode"])
except:
pass
#print "Mode: "+str(mode)
#print "URL: "+str(url)
#print "Name: "+str(name)
if mode==None or url==None or len(url)<1:
ROOT()
elif mode==2:
CAUTA_LIST(url)
elif mode==3:
CAUTA(url)
elif mode==31:
CAUTA(url, " dublat")
elif mode==4:
VIDEO(url,name)
elif mode==9:
SXVIDEO_EPISOD_PLAY(url)
elif mode==10:
SXVIDEO_FILM_PLAY(url)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
|
Major League Fishing pro Keith Poche took home the Challenge Select Shield in his last MLF Select competition. Now he’s moving up to the Cups with MLF Ott DeFoe. Find out how he felt after the win, where he found the fish, and what he was catching them on.
|
# -*- coding: utf-8 -*-
# store.py
# This file is part of gedit Session Saver Plugin
#
# Copyright (C) 2006-2007 - Steve Frécinaux <[email protected]>
# Copyright (C) 2010 - Kenny Meyer <[email protected]>
#
# gedit Session Saver Plugin is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# gedit Session Saver Plugin is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with gedit Session Saver Plugin; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA
from gi.repository import Gio
class Session(object):
def __init__(self, name, files = None):
super(Session, self).__init__()
self.name = name
if files is None:
files = []
self.files = files
def __lt__(self, session):
return (self.name.lower() < session.name.lower())
def __eq__(self, session):
return (self.name.lower() == session.name.lower())
def add_file(self, filename):
self.files.append(Gio.file_new_for_uri(filename))
# ex:ts=4:et:
|
Pem Dorjee (Nepali: पेम्दोर्जी शेर्पा) is a Nepalese Sherpa mountaineer born in 1982 in Chyangba, a small, remote village south of Mount Everest in Khumbu, Nepal.
Pem Dorjee has climbed Mt. Everest two times, the second time with his girlfriend Moni Mulepati where they exchanged wedding vows on 30 May 2005, and thus became the first couple to be married on top of Mt. Everest while on the Rotary Centennial Everest Expedition. They also hoisted the flag of Rotary International, a club with which Dorjee has frequently been involved, in honor of its centennial year.
Aside from his mountaineering achievements, Dorjee has worked on improving the quality of life in his home village of Chayangba. Dorjee has organized service projects such as dental, eye, and other health projects, as well as funding to build libraries, schools, and drinking water systems in his village and other remote villages in Nepal.
Dorjee is certified as a Trekking and Mountaineering Guide by the Nepalese government. He is also an active member of the American Mountain Guides Association (AMGA), American Alpine Club, Nepal Mountaineering Association, and Everest Summiteers Association. Dorjee recently hiked 3,100 miles along the Continental Divide Trail from the Mexico–US border to the Canada–US border as part of the Rotary CDT Challenge, a fundraising effort by Rotary International to build a continuous trail. Dorjee and his wife are partner owners of a gift store called The Himalayan Bazaar and are travel guides for the adventure travel company Of Global Interest — both were started by Heather O'Neal and are located in Ann Arbor, Michigan.
^ "Wedding on top of Mount Everest". BBC. 3 June 2005. Retrieved 4 February 2012.
^ "Wedding held on top of the world". Associated Press. MSNBC. 3 June 2005. Retrieved 7 September 2012.
The American Alpine Club is a non-profit member organization whose goal is a united community of competent climbers and healthy climbing landscapes. The Club is housed in the American Mountaineering Center in Golden and it maintains regional sections—with both regional staff and volunteers—throughout the United States. The AAC publishes two journals, The American Alpine Journal and Accidents in North American Mountaineering, and a Guidebook to Membership annually. Collections of these journals, along with tens of thousands of other climbing-related publications and mountaineering literature, can be found in the Henry S. Hall, American Alpine Club Library, located in the AMC. The AAC is a 5013 organization supported by gifts and grants from individuals and foundations, member dues, founded by Arctic zoologist and geographer Angelo Heilprin, the American Alpine Club was established in 1902 and had 45 founding members. These original members were primarily from the East Coast, although a handful resided in the Midwest, among them was Annie Smith Peck and the AACs first president, Charles Ernest Fay, who a founding member of the Appalachian Mountain Club.
The Club is housed in the American Mountaineering Center, whose tenants include the Colorado Mountain Club. The AAC is historically and contemporarily associated with a number of other American and it was a founding member of the Union International des Associations d’Alpinism in 1930 and the Arctic Institute of North America in 1948. The AAC Library was established in 1916 by a gift from American mountaineer Henry Montagnier, the Library was initially focused primarily on the Alps. Beginning in 1929, the Library was housed in the New York Public Library, during this time, the Library grew to include contributions from many members, as well as cultural artifacts from their various expeditions to the Himalaya and elsewhere. In 1941, the AAC purchased a firehouse in Manhattan to house the growing Library. When the AAC moved its permanent headquarters to Golden in 1993, the Library, moved to its current location in the basement of the American Mountaineering Center, many items are autographed by the expedition members who wrote them.
The Continental Divide National Scenic Trail is a United States National Scenic Trail running 3,100 miles between Mexico and Canada. It follows the Continental Divide of the Americas along the Rocky Mountains and traverses five U. S. states — Montana, Wyoming, Colorado, in Montana it crosses Triple Divide Peak which separates the Hudson Bay, Atlantic Ocean and Pacific Ocean drainages. The trail is a combination of dedicated trails and small roads, portions designated as uncompleted must be traveled by roadwalking on dirt or paved roads. The Continental Divide Trail along with the Appalachian Trail and the Pacific Crest Trail form what thru-hiker enthusiasts have termed the Triple Crown of long-distance hiking in the United States. Only about two hundred people a year attempt to hike the trail, taking about six months to complete it. Dave Odell thru-hiked in 1977 and in the same year Dan Torpey hiked from the NM/CO border to Mt Robson, german long-distance rider Günter Wamser, and Austrian Sonja Endlweber managed to complete the tour with four Bureau of Land Management mustangs in three summers 2007–09.
This seven-month journey spanned over 5,600 miles, tapon took the most circuitous, high, difficult route north and while returning south, took the more expedient route. Andrew Skurka completed the trail as part of the 6, 875-mile Great Western Loop in 2007. The youngest person to hike the trail is Reed Gjonnes, who hiked the trail with her father Eric Gjonnes from April 15 to September 6,2013, the CDT in New Mexico is about 700 miles long and some portions have very limited water. Local volunteer groups place water caches at strategic points along the trail, all three are located within New Mexicos boot heel. The terminus near Columbus is not on the Continental Divide but rather in the vicinity of Columbus, Columbus is listed as a National Historic Landmark due to the invasion in 1916 by Pancho Villa and his Villistas. From the Crazy Cook Monument, the trail begins as a desire path. From Columbus, the route is a roadwalk to Lordsburg, in most areas the trail is well marked. It is concurrent with the Colorado Trail for approximately 200 miles, the CDT itself meanders in Colorado some 650 miles at higher altitudes.
It is a non-political and non-sectarian organization open to all people regardless of race, creed, gender, or political preference. There are 34,282 member clubs worldwide,1.2 million individuals called Rotarians have joined these clubs. Rotarians usually gather weekly for breakfast, lunch, or dinner to fulfill their first guiding principle to develop friendships as an opportunity for service, the Rotarians primary motto is Service Above Self, its secondary motto is One profits most who serves best. This objective is set against the Rotary 4-Way Test, used to see if an action is compatible with the Rotarian spirit. It is still seen as a standard for ethics in business management, the 4-Way Test considers the following questions in respect to thinking, saying or doing, Is it the truth. Is it fair to all concerned, will it build goodwill and better friendships. Will it be beneficial to all concerned, the first Rotary Club was formed when attorney Paul P. In addition to Harris and Loehr, Silvester Schiele, and Hiram E.
Shorey were the two who attended this first meeting. The next four Rotary Clubs were organized in cities in the western United States, beginning with San Francisco, Los Angeles, the National Association of Rotary Clubs in America was formed in 1910. On November 3,1910, a Rotary club began meeting in Winnipeg, Canada, on 22 February 1911, the first meeting of the Rotary Club Dublin was held in Dublin, Ireland. This was the first club established outside of North America, in April 1912, Rotary chartered the Winnipeg club marking the first establishment of an American-style service club outside the United States. To reflect the addition of a club outside of the United States, in August 1912, the Rotary Club of London received its charter from the Association, marking the first acknowledged Rotary club outside North America. It became known that the Dublin club in Ireland was organized before the London club, but the Dublin club did not receive its charter until after the London club was chartered.
There are travel agencies that serve as general sales agents for foreign travel companies, allowing them to have offices in countries other than where their headquarters are located. The modern travel agency first appeared in the half of the 19th century with its root in 1758 as establishment of Cox & Kings Ltd. In the year 1970, Cox & Kings the longest established travel company centered its focus on its business of travel, lately Thomas Cook established a chain of agencies in the last quarter of the 19th century, in association with the Midland Railway. They not only sold their own tours to the public, but in addition, other British pioneer travel agencies were Dean & Dawson, the Polytechnic Touring Association, and the Co-operative Wholesale Society. The oldest travel agency in the United States is Brownell Travel, on 4 July 1887, Walter T. Brownell led ten travelers on a European tour, Travel agencies became more commonplace with the development of commercial aviation, starting in the 1920s. A travel agencys main function is to act as an agent, selling travel products, a package holiday or a ticket is not purchased from a supplier unless a customer requests that purchase.
The holiday or ticket is supplied to the agency at a discount, the profit is therefore the difference between the advertised price which the customer pays and the discounted price at which it is supplied to the agent. This is known as the commission, in many countries, all individuals or companies that sell tickets are required to be licensed as a travel agent. In some countries, airlines have stopped giving commissions to travel agencies, travel agencies are now forced to charge a percentage premium or a standard flat fee, per sale. However, some companies pay travel agencies a set percentage for selling their product. Major tour companies can afford to do this, because if they were to sell a thousand trips at a cheaper rate and it is cheaper to offer commissions to travel agents rather than engage in advertising and distribution campaigns without using agents. Other commercial operations are undertaken, especially by the larger chains, a travel agent is supposed to offer impartial travel advice to the customer, as well as coordinating travel details and assisting the customer in booking travel.
|
from __future__ import unicode_literals, division, absolute_import
import logging
from builtins import * # pylint: disable=unused-import, redefined-builtin
from datetime import datetime
from sqlalchemy import Column, Unicode, Integer, DateTime, func, Boolean
from sqlalchemy.orm import relationship
from sqlalchemy.sql.elements import and_
from sqlalchemy.sql.schema import ForeignKey
from flexget import db_schema
from flexget.db_schema import versioned_base
from flexget.utils.database import entry_synonym, with_session
plugin_name = 'pending_list'
log = logging.getLogger(plugin_name)
Base = versioned_base(plugin_name, 0)
@db_schema.upgrade(plugin_name)
def upgrade(ver, session):
ver = 0
return ver
class PendingListList(Base):
__tablename__ = 'pending_list_lists'
id = Column(Integer, primary_key=True)
name = Column(Unicode, unique=True)
added = Column(DateTime, default=datetime.now)
entries = relationship(
'PendingListEntry', backref='list', cascade='all, delete, delete-orphan', lazy='dynamic'
)
def to_dict(self):
return {'id': self.id, 'name': self.name, 'added_on': self.added}
class PendingListEntry(Base):
__tablename__ = 'wait_list_entries'
id = Column(Integer, primary_key=True)
list_id = Column(Integer, ForeignKey(PendingListList.id), nullable=False)
added = Column(DateTime, default=datetime.now)
title = Column(Unicode)
original_url = Column(Unicode)
_json = Column('json', Unicode)
entry = entry_synonym('_json')
approved = Column(Boolean)
def __init__(self, entry, pending_list_id):
self.title = entry['title']
self.original_url = entry.get('original_url') or entry['url']
self.entry = entry
self.list_id = pending_list_id
self.approved = False
def __repr__(self):
return '<PendingListEntry,title=%s,original_url=%s,approved=%s>' % (
self.title,
self.original_url,
self.approved,
)
def to_dict(self):
return {
'id': self.id,
'list_id': self.list_id,
'added_on': self.added,
'title': self.title,
'original_url': self.original_url,
'entry': dict(self.entry),
'approved': self.approved,
}
@with_session
def get_pending_lists(name=None, session=None):
log.debug('retrieving pending lists')
query = session.query(PendingListList)
if name:
log.debug('searching for pending lists with name %s', name)
query = query.filter(PendingListList.name.contains(name))
return query.all()
@with_session
def get_list_by_exact_name(name, session=None):
log.debug('returning pending list with name %s', name)
return (
session.query(PendingListList)
.filter(func.lower(PendingListList.name) == name.lower())
.one()
)
@with_session
def get_list_by_id(list_id, session=None):
log.debug('returning pending list with id %d', list_id)
return session.query(PendingListList).filter(PendingListList.id == list_id).one()
@with_session
def delete_list_by_id(list_id, session=None):
entry_list = get_list_by_id(list_id=list_id, session=session)
if entry_list:
log.debug('deleting pending list with id %d', list_id)
session.delete(entry_list)
@with_session
def get_entries_by_list_id(
list_id,
start=None,
stop=None,
order_by='title',
descending=False,
approved=False,
filter=None,
session=None,
):
log.debug('querying entries from pending list with id %d', list_id)
query = session.query(PendingListEntry).filter(PendingListEntry.list_id == list_id)
if filter:
query = query.filter(func.lower(PendingListEntry.title).contains(filter.lower()))
if approved:
query = query.filter(PendingListEntry.approved is approved)
if descending:
query = query.order_by(getattr(PendingListEntry, order_by).desc())
else:
query = query.order_by(getattr(PendingListEntry, order_by))
return query.slice(start, stop).all()
@with_session
def get_entry_by_title(list_id, title, session=None):
entry_list = get_list_by_id(list_id=list_id, session=session)
if entry_list:
log.debug('fetching entry with title `%s` from list id %d', title, list_id)
return (
session.query(PendingListEntry)
.filter(and_(PendingListEntry.title == title, PendingListEntry.list_id == list_id))
.first()
)
@with_session
def get_entry_by_id(list_id, entry_id, session=None):
log.debug('fetching entry with id %d from list id %d', entry_id, list_id)
return (
session.query(PendingListEntry)
.filter(and_(PendingListEntry.id == entry_id, PendingListEntry.list_id == list_id))
.one()
)
|
19 Sep - 27 min - Uploaded by SanskritiSeries Accompanists Mrudangam: Karakurichi Mohanaraman Ghatam:Saihari Connect with. 9 Jun - 4 min - Uploaded by visaka50 Music Director and Maestro Veena Vidwan Sri Rajesh Vaidya plays a fusion music with his. 20 Jul - 8 min - Uploaded by froggygoestobollywoo RAJHESH VAIDHYA - VEENA VOYAGE 03 - RAGHUVAMSA I think " Raghuvamsa " is the.
Rajhesh Vaidhya (Tamil: ராஜேஷ் வைத்யா) (or, Vaidya), is an Indian veena player hailing from Tamil Nadu. Besides performing on stage, he has. I learn in his school but haven't asked this question to him. I tried playing it. I could hold the vibration for a few seconds to play one full line. The string is wound. Follow Rajhesh Vaidhya Another beacon in Rajhesh Vaidhya's life was the Carnatic veena maestro Raghuvamsa Sudha – Rajesh Vaidhya | Live Outside.
Find a Rajesh Vaidhya - The Tradition Of Carnatic Music On Veena / La Tradition Carnatique de la Vina 3 - Yemani Vani first pressing or reissue. Complete your. Play Rajesh Vaidhya hit new songs and download Rajesh Vaidhya MP3 songs ":"enthan-kannilveena","artist":"Rajesh Vaidhya######rajesh-vaidhya". The latest Tweets from Rajhesh Vaidhya (@RajheshVaidhya). No Musicians are bigger than MUSIC. Chennai, India. 24 May - 10 min Watch Rajesh Vaidya, Veena, tokopakoles.com by Gunsmark on Dailymotion here. 26 Feb - 4 min Rajesh Vaidhya Evergreen Melodies Nagumomu. 97% 20K 11 minutes 5 years ago. Rajesh.
|
from collections import OrderedDict
from datetime import date, datetime
import pytest
from lima import fields, schema
# model -----------------------------------------------------------------------
class Knight:
'''A knight.'''
def __init__(self, title, name, number, born):
self.title = title
self.name = name
self.number = number
self.born = born
class King(Knight):
'''A king is a knight with subjects.'''
def __init__(self, title, name, number, born, subjects=None):
super().__init__(title, name, number, born)
self.subjects = subjects if subjects is not None else []
# schemas ---------------------------------------------------------------------
class KnightSchema(schema.Schema):
title = fields.String()
name = fields.String()
number = fields.Integer()
born = fields.Date()
class KnightDictSchema(schema.Schema):
title = fields.String(key='title')
name = fields.String(key='name')
number = fields.Integer(key='number')
born = fields.Date(key='born')
class KnightListSchema(schema.Schema):
title = fields.String(key=0)
name = fields.String(key=1)
number = fields.Integer(key=2)
born = fields.Date(key=3)
class FieldWithAttrArgSchema(schema.Schema):
date_of_birth = fields.Date(attr='born')
class FieldWithGetterArgSchema(schema.Schema):
full_name = fields.String(
get=lambda obj: '{} {}'.format(obj.title, obj.name)
)
class FieldWithValArgSchema(schema.Schema):
constant_date = fields.Date(val=date(2014, 10, 20))
class KingWithEmbeddedSubjectsObjSchema(KnightSchema):
subjects = fields.Embed(schema=KnightSchema(many=True))
class KingWithEmbeddedSubjectsClassSchema(KnightSchema):
subjects = fields.Embed(schema=KnightSchema, many=True)
class KingWithEmbeddedSubjectsStrSchema(KnightSchema):
subjects = fields.Embed(schema=__name__ + '.KnightSchema', many=True)
class KingWithReferencedSubjectsObjSchema(KnightSchema):
subjects = fields.Reference(schema=KnightSchema(many=True), field='name')
class KingWithReferencedSubjectsClassSchema(KnightSchema):
subjects = fields.Reference(schema=KnightSchema, field='name', many=True)
class KingWithReferencedSubjectsStrSchema(KnightSchema):
subjects = fields.Reference(schema=__name__ + '.KnightSchema',
field='name', many=True)
class KingSchemaEmbedSelf(KnightSchema):
boss = fields.Embed(schema=__name__ + '.KingSchemaEmbedSelf',
exclude='boss')
class KingSchemaReferenceSelf(KnightSchema):
boss = fields.Reference(schema=__name__ + '.KingSchemaEmbedSelf',
field='name')
# fixtures --------------------------------------------------------------------
@pytest.fixture
def bedevere():
return Knight('Sir', 'Bedevere', 2, date(502, 2, 2))
@pytest.fixture
def lancelot():
return Knight('Sir', 'Lancelot', 3, date(503, 3, 3))
@pytest.fixture
def galahad():
return Knight('Sir', 'Galahad', 4, date(504, 4, 4))
@pytest.fixture
def knights(bedevere, lancelot, galahad):
return [bedevere, lancelot, galahad]
@pytest.fixture
def arthur(knights):
return King('King', 'Arthur', 1, date(501, 1, 1), knights)
@pytest.fixture
def lancelot_dict():
return {
'title': 'Sir',
'name': 'Lancelot',
'number': 3,
'born': date(503, 3, 3),
}
@pytest.fixture
def lancelot_list():
return [
'Sir',
'Lancelot',
3,
date(503, 3, 3),
]
# tests -----------------------------------------------------------------------
def test_dump_single_dict_unordered(lancelot_dict):
knight_dict_schema = KnightDictSchema(many=False, ordered=False)
result = knight_dict_schema.dump(lancelot_dict)
expected = {
'title': 'Sir',
'name': 'Lancelot',
'number': 3,
'born': '0503-03-03'
}
assert type(result) == dict
assert result == expected
def test_dump_single_list_unordered(lancelot_list):
knight_list_schema = KnightListSchema(many=False, ordered=False)
result = knight_list_schema.dump(lancelot_list)
expected = {
'title': 'Sir',
'name': 'Lancelot',
'number': 3,
'born': '0503-03-03'
}
assert type(result) == dict
assert result == expected
def test_dump_single_unordered(lancelot):
knight_schema = KnightSchema(many=False, ordered=False)
result = knight_schema.dump(lancelot)
expected = {
'title': 'Sir',
'name': 'Lancelot',
'number': 3,
'born': '0503-03-03'
}
assert type(result) == dict
assert result == expected
def test_dump_single_ordered(lancelot):
knight_schema = KnightSchema(many=False, ordered=True)
result = knight_schema.dump(lancelot)
expected = OrderedDict([
('title', 'Sir'),
('name', 'Lancelot'),
('number', 3),
('born', '0503-03-03'),
])
assert type(result) == OrderedDict
assert result == expected
def test_dump_many_unordered(knights):
knight_schema = KnightSchema(many=True, ordered=False)
result = knight_schema.dump(knights)
expected = [
dict(title='Sir', name='Bedevere', number=2, born='0502-02-02'),
dict(title='Sir', name='Lancelot', number=3, born='0503-03-03'),
dict(title='Sir', name='Galahad', number=4, born='0504-04-04'),
]
assert all(type(x) == dict for x in result)
assert result == expected
def test_dump_many_ordered(knights):
knight_schema = KnightSchema(many=True, ordered=True)
result = knight_schema.dump(knights)
expected = [
OrderedDict([('title', 'Sir'), ('name', 'Bedevere'),
('number', 2), ('born', '0502-02-02')]),
OrderedDict([('title', 'Sir'), ('name', 'Lancelot'),
('number', 3), ('born', '0503-03-03')]),
OrderedDict([('title', 'Sir'), ('name', 'Galahad'),
('number', 4), ('born', '0504-04-04')]),
]
assert all(type(x) == OrderedDict for x in result)
assert result == expected
def test_field_exclude_dump(lancelot):
knight_schema = KnightSchema(exclude=['born', 'number'])
result = knight_schema.dump(lancelot)
expected = {
'title': 'Sir',
'name': 'Lancelot',
}
assert result == expected
def test_field_only_dump(lancelot):
knight_schema = KnightSchema(only=['name', 'number'])
result = knight_schema.dump(lancelot)
expected = {
'name': 'Lancelot',
'number': 3,
}
assert result == expected
def test_dump_field_with_attr_arg(lancelot):
attr_schema = FieldWithAttrArgSchema()
result = attr_schema.dump(lancelot)
expected = {
'date_of_birth': '0503-03-03'
}
assert result == expected
def test_dump_field_with_getter_arg(lancelot):
getter_schema = FieldWithGetterArgSchema()
result = getter_schema.dump(lancelot)
expected = {
'full_name': 'Sir Lancelot'
}
assert result == expected
def test_dump_field_with_val_arg(lancelot):
val_schema = FieldWithValArgSchema()
result = val_schema.dump(lancelot)
expected = {
'constant_date': '2014-10-20'
}
assert result == expected
def test_fail_on_unexpected_collection(knights):
knight_schema = KnightSchema(many=False)
with pytest.raises(AttributeError):
knight_schema.dump(knights)
@pytest.mark.parametrize(
'king_schema_cls',
[KingWithEmbeddedSubjectsObjSchema,
KingWithEmbeddedSubjectsClassSchema,
KingWithEmbeddedSubjectsStrSchema]
)
def test_dump_embedding_schema(king_schema_cls, arthur):
king_schema = king_schema_cls()
expected = {
'title': 'King',
'name': 'Arthur',
'number': 1,
'born': '0501-01-01',
'subjects': [
dict(title='Sir', name='Bedevere', number=2, born='0502-02-02'),
dict(title='Sir', name='Lancelot', number=3, born='0503-03-03'),
dict(title='Sir', name='Galahad', number=4, born='0504-04-04'),
]
}
assert king_schema.dump(arthur) == expected
@pytest.mark.parametrize(
'king_schema_cls',
[KingWithReferencedSubjectsObjSchema,
KingWithReferencedSubjectsClassSchema,
KingWithReferencedSubjectsStrSchema]
)
def test_dump_referencing_schema(king_schema_cls, arthur):
king_schema = king_schema_cls()
expected = {
'title': 'King',
'name': 'Arthur',
'number': 1,
'born': '0501-01-01',
'subjects': ['Bedevere', 'Lancelot', 'Galahad']
}
assert king_schema.dump(arthur) == expected
def test_embed_self_schema(arthur):
# a king is his own boss
arthur.boss = arthur
king_schema = KingSchemaEmbedSelf()
result = king_schema.dump(arthur)
expected = {
'title': 'King',
'name': 'Arthur',
'number': 1,
'born': '0501-01-01',
'boss': {
'title': 'King',
'name': 'Arthur',
'number': 1,
'born': '0501-01-01',
}
}
assert result == expected
def test_reference_self_schema(arthur):
# a king is his own boss
arthur.boss = arthur
king_schema = KingSchemaReferenceSelf()
result = king_schema.dump(arthur)
expected = {
'title': 'King',
'name': 'Arthur',
'number': 1,
'born': '0501-01-01',
'boss': 'Arthur',
}
assert result == expected
def test_fail_on_unnecessary_keywords():
class EmbedSchema(schema.Schema):
some_field = fields.String()
embed_schema = EmbedSchema(many=True)
class EmbeddingSchema(schema.Schema):
another_field = fields.String()
# here we provide a schema _instance_. the kwarg "many" is unnecessary
incorrect_embed_field = fields.Embed(schema=embed_schema, many=True)
# the incorrect field is constructed lazily. we'll have to access it
with pytest.raises(ValueError):
EmbeddingSchema.__fields__['incorrect_embed_field']._schema_inst
def test_fail_on_unnecessary_arg():
class EmbedSchema(schema.Schema):
some_field = fields.String()
embed_schema = EmbedSchema(many=True)
class EmbeddingSchema(schema.Schema):
another_field = fields.String()
# here we provide a schema _instance_. the kwarg "many" is unnecessary
incorrect_embed_field = fields.Embed(schema=embed_schema, many=True)
# the incorrect field is constructed lazily. we'll have to access it
with pytest.raises(ValueError):
EmbeddingSchema.__fields__['incorrect_embed_field']._schema_inst
def test_dump_exotic_field_names():
exotic_names = [
'', # empty string
'"', # single quote
"'", # double quote
'\u2665', # unicode heart symbol
'print(123)', # valid python code
'print("123\'', # invalid python code
]
class ExoticFieldNamesSchema(schema.Schema):
__lima_args__ = {
'include': {name: fields.String(attr='foo')
for name in exotic_names}
}
class Foo:
def __init__(self):
self.foo = 'foobar'
obj = Foo()
exotic_field_names_schema = ExoticFieldNamesSchema()
result = exotic_field_names_schema.dump(obj)
expected = {name: 'foobar' for name in exotic_names}
assert result == expected
for name in exotic_names:
dump_field_func = exotic_field_names_schema._dump_field_func(name)
result = dump_field_func(obj)
expected = 'foobar'
assert result == expected
|
Digitalisation and globalisation are causing major changes in all areas of our lives. It changes communication, mobility and our social interaction – even our democracy. Therefore it is important that state and administration do not slow down and that the digital gap between developments in the private and public sector is closed. The Congress not only looks at the digital offerings of the economy, but picks up the administration from where it is today with its questions and problems, so that the future can be mastered together.
This year’s 7th Future Congress State & Administration will take place in the Congress Center in Berlin from May 27th-29th 2019. Since 2013, the Future Congress State & Administration is the leading event of the Public Sector for Digital Transformation under the patronage of the Federal Minister of the Interior, Building and Community. Federal Minister Horst Seehofer himself will speak at the opening of the 2nd Congress day (May 28th 2019 from 9-10:15am).
|
from __future__ import unicode_literals
from django.template import Library, Node, TemplateSyntaxError
from django.conf import settings
from django.template.loader import select_template
from django.template.base import token_kwargs, compile_string
register = Library()
@register.tag
def multi_include(parser, token):
bits = token.split_contents()
if len(bits) < 2:
raise TemplateSyntaxError(
"%r tag takes at least one argument: the name of the template to be included." % bits[0])
template_expressions, extra_index = _template_expressions(bits)
template_expressions = [compile_quote_string(path) for path in template_expressions]
options = {}
remaining_bits = bits[extra_index:]
while remaining_bits:
option = remaining_bits.pop(0)
if option in options:
raise TemplateSyntaxError('The %r option was specified more '
'than once.' % option)
if option == 'with':
value = token_kwargs(remaining_bits, parser, support_legacy=False)
if not value:
raise TemplateSyntaxError('"with" in %r tag needs at least '
'one keyword argument.' % bits[0])
elif option == 'only':
value = True
else:
raise TemplateSyntaxError('Unknown argument for %r tag: %r.' %
(bits[0], option))
options[option] = value
isolated_context = options.get('only', False)
namemap = options.get('with', {})
return MultiIncludeNode(template_expressions, extra_context=namemap,
isolated_context=isolated_context)
class MultiIncludeNode(Node):
def __init__(self, template_name_list, *args, **kwargs):
self.template_name_list = template_name_list
self.extra_context = kwargs.pop('extra_context', {})
self.isolated_context = kwargs.pop('isolated_context', False)
super(MultiIncludeNode, self).__init__(*args, **kwargs)
def render_template(self, template, context):
values = dict([(name, var.resolve(context)) for name, var
in self.extra_context.iteritems()])
if self.isolated_context:
return template.render(context.new(values))
context.update(values)
output = template.render(context)
context.pop()
return output
def render(self, context):
try:
template_names = [exp.render(context) for exp in self.template_name_list]
template = select_template(template_names)
return self.render_template(template, context)
except:
if settings.TEMPLATE_DEBUG:
raise
return ''
def _template_expressions(bits):
extra_index = len(bits)
keyword_indexes = []
for keyword in ['with', 'only']:
try:
keyword_indexes.append(bits.index(keyword))
except ValueError:
pass
if keyword_indexes:
extra_index = min(keyword_indexes)
return bits[1:extra_index], extra_index
def compile_quote_string(path):
if path[0] in ('"', "'") and path[-1] == path[0]:
return compile_string(path[1:-1], "")
else:
raise TemplateSyntaxError('String must contain quotes')
|
Technology is rapidly changing step by step and there is an essential requirement of time to move with digitize patterns. Healthcare analytics or hospital ERP software is essential to update with the most recent patterns of advanced world. Patient health and Patient care are the best most priority of any healthcare division at the universal level, along these lines, it is important to have a selective software that controls or enhance the healthcare systems. CloudPital offering unique Hospital EMR Software in Mexico which deals with all hospital and movements in a paperless situation. Full featured Software created by Cloudpital has changed all regulatory, staff and clinical operation of healthcare association.
Cloudpital’s Clinic Management Software offering fully integrated enhanced features that are significantly enough to manage all clinical task. Modified features with advanced technology that is most essential in healthcare industry to enhance health services.
With the usage of Cloudpital’s Best Hospital ERP Software in Gabon offering advance features for patient management and all administrative task becomes easier with advance technology.
Cloudpital offering full featured software which empowers the hospital staff to keep record of the quantity of in-patients, and out-patients, and the quantity of collective beds offered by the particular hospital office.
Full emphasized Software supervises Patient enlistments and confirmations.
Software Providing Exclusive elements which keeps up arrangement planning, Patient screening, counsel and analysis precisely.
Cloudpital’s Hospital Management software in Mexico offering extraordinary components that are altogether enough for best Hospital ERP Software and improved framework that gives paperless condition and quick administration.
|
import os, glob
import golly as g
def convbellman (text, stx, sty):
textln = text.split ('\n')
gen = -1
glcnt = -1
y = sty;
for ln in textln:
if not ln:
break
if ln [0] == '#':
if ln [0:35] == "#C Solution accepted at generation ":
gen = int (ln [35:])
elif ln [0:26] == "#C Glider count at accept ":
glcnt = int (ln [26:])
else:
x = stx;
for c in ln:
if c == '.':
g.setcell (x, y, 0)
x += 1;
elif c == '?':
g.setcell (x, y, 5)
x += 1;
elif c == '*':
g.setcell (x, y, 3)
x += 1;
elif c == '@':
g.setcell (x, y, 1)
x += 1;
y += 1
return (gen, glcnt)
def clean (rect):
for y in xrange (rect [1], rect [1] + rect [3]):
for x in xrange (rect [0], rect [0] + rect [2]):
if g.getcell (x, y) != 1:
g.setcell (x, y, 0)
def addmarkers (rect):
g.setcell (rect [0], rect [1], 1)
g.setcell (rect [0] + rect [2] - 1, rect [1], 1)
g.setcell (rect [0], rect [1] + rect [3] - 1, 1)
g.setcell (rect [0] + rect [2] - 1, rect [1] + rect [3] - 1, 1)
def analyse (gogen, glcnt, minpop, maxpop, mingl):
if glcnt < mingl:
return (False, 0)
g.run (gogen)
inrect = g.getrect ()
clean (inrect)
endpop = int (g.getpop ())
if endpop < minpop or endpop > maxpop:
return (False, 0)
rect = g.getrect ()
if rect == []:
return (True, 0)
else:
addmarkers (inrect)
return (True, g.hash (inrect))
def main ():
g.update ()
g.check (False)
path = g.getstring ("Output directory:")
files = glob.glob (os.path.join (path, "*.out"))
mingls = g.getstring ("Min number of gliders at accept:")
if mingls == "":
mingl = 0
minpop = 0
maxpop = 1024
else:
mingl = int (mingls)
minpops = g.getstring ("Min population except catalyzers:")
if minpops == "":
minpop = 0
maxpop = 1024
else:
minpop = int (minpops)
maxpop = int (g.getstring ("Max population except catalyzers:"))
if g.getname () != "catbellman_temp":
g.addlayer ()
hashdir = {}
catlist = []
catix = 0
g.new ("catbellman_temp")
g.setrule ("LifeBellman")
for fix, filename in enumerate (files):
patt = g.getrect ()
if patt != []:
g.select (patt)
g.clear (0)
g.setgen ("0")
with open(filename, 'r') as f:
filetext = f.read ()
if fix % 16 == 0:
g.show ("Analysing " + str (fix) + "/" + str (len (files)))
(gogen, glcnt) = convbellman (filetext, 0, 0)
if gogen == -1:
gogen = 128
(use, hash) = analyse (gogen, glcnt, minpop, maxpop, mingl)
if use:
if not hash in hashdir:
catlist.append ([])
hashdir [hash] = catix
catix += 1
cat = hashdir [hash]
catlist [cat].append (filetext)
g.new ("catbellman_temp")
g.setrule ("LifeBellman")
fix = 0
y = 0
for cat in catlist:
x = 96 * (len (cat) - 1)
for filetext in cat:
convbellman (filetext, x, y)
x -= 96
fix += 1
if fix % 32 == 0:
g.show ("Rendering " + str (fix) + "/" + str (len (files)))
g.fit ()
g.check (True)
g.update ()
g.check (False)
y += 96
g.show ("Done")
g.fit ()
g.setstep (-1)
g.check (True)
main ()
|
Brick homes have stood the test of time as durable, attractive exterior options for homes, businesses and other buildings. The Triangle area has a lot of brick structures, from historic homes like Ayr Mount in Hillsborough to new construction like the Durham Bulls Athletic Park in Durham.
A new player on the home building material scene is threatening to unseat this traditional material, though. It provides many of the same benefits, and new benefits at a more affordable price. Here are just a few reasons people are choosing fiber cement siding over brick.
Brick is a supremely durable material and fiber cement, which is made of sand, cement, and cellulose, provides a similar level of strength. Unlike brick, fiber cement siding will usually come with a 50 year warranty that brick lacks. Besides strength, both are very insulative against sound and weather when compared to lighter sidings like aluminum and vinyl.
Fiber cement, like James Hardie siding, is far more versatile when it comes to style. Brick may have some variability in color, but it is majority red or painted brick. Fiber cement can be made in virtually any color, giving the homeowner many possibilities on the lighter end of the color palette. There are also different textures and shapes that are meant to mimic various types of wood siding which you cannot achieve with brick.
While brick does last a long time, what they often fail to mention is that the mortar between the bricks does not last nearly as long. The mortar joints can crumble from weather-wear or just from age and will require upkeep over the years. This process is called “repointing” and it can very quickly become an expensive process to complete.
It comes as no surprise–bricks are heavy building materials. The unwieldy nature of brick makes installation a time-consuming and expensive process for the builder and for the consumer, too. It can take quite a long time to put up a full wall or facade brick-by-brick.
Fiber cement, on the other hand, is as easy to install as wood siding and nowhere near as heavy as brick. The cost savings in manual labor are then passed down to the consumer without losing any quality or any of the benefits of fiber cement siding.
Both brick and fiber cement are durable, attractive options for homeowners in the Triangle. But, there are some real advantages that fiber cement sidings have over brick. Robert Gordon Services installs fiber cement siding, specifically James Hardie products, and encourages Triangle area home and business owners to choose fiber cement as a “best of all worlds” siding solution.
Give Robert Gordon Services a call at 919-250-8038 to have fiber cement installed in the greater Triangle area, including Raleigh, Durham, Chapel Hill, Cary, and Morrisville.
|
import json
from wirecloud.oauth2provider import pyoauth2_utils as utils
class Provider(object):
"""Base provider class for different types of OAuth 2.0 providers."""
def _handle_exception(self, exc):
"""Handle an internal exception that was caught and suppressed.
:param exc: Exception to process.
:type exc: Exception
"""
pass
def _make_response(self, body='', headers=None, status_code=200):
"""Return a response object from the given parameters.
:param body: Buffer/string containing the response body.
:type body: str
:param headers: Dict of headers to include in the requests.
:type headers: dict
:param status_code: HTTP status code.
:type status_code: int
:rtype: requests.Response
"""
raise NotImplementedError('Subclasses must implement ' \
'_make_response.')
def _make_redirect_error_response(self, redirect_uri, err):
"""Return a HTTP 302 redirect response object containing the error.
:param redirect_uri: Client redirect URI.
:type redirect_uri: str
:param err: OAuth error message.
:type err: str
:rtype: requests.Response
"""
params = {
'error': err,
'response_type': None,
'client_id': None,
'redirect_uri': None
}
redirect = utils.build_url(redirect_uri, params)
return self._make_response(headers={'Location': redirect},
status_code=302)
def _make_json_response(self, data, headers=None, status_code=200):
"""Return a response object from the given JSON data.
:param data: Data to JSON-encode.
:type data: mixed
:param headers: Dict of headers to include in the requests.
:type headers: dict
:param status_code: HTTP status code.
:type status_code: int
:rtype: requests.Response
"""
response_headers = {}
if headers is not None:
response_headers.update(headers)
response_headers['Content-Type'] = 'application/json;charset=UTF-8'
response_headers['Cache-Control'] = 'no-store'
response_headers['Pragma'] = 'no-cache'
return self._make_response(json.dumps(data),
response_headers,
status_code)
class AuthorizationProvider(Provider):
"""OAuth 2.0 authorization provider. This class manages authorization
codes and access tokens. Certain methods MUST be overridden in a
subclass, thus this class cannot be directly used as a provider.
These are the methods that must be implemented in a subclass:
get_client(self, client_id)
# Return a Client instance. Exception if not found
validate_client_secret(self, client, client_secret)
# Return True or False
validate_scope(self, client, scope)
# Return True or False
validate_redirect_uri(self, client_id, redirect_uri)
# Return True or False
validate_access(self) # Use this to validate your app session user
# Return True or False
from_authorization_code(self, client_id, code, scope)
# Return mixed data or None on invalid
from_refresh_token(self, client_id, refresh_token, scope)
# Return mixed data or None on invalid
persist_authorization_code(self, user, client, code, scope)
# Return value ignored
persist_token_information(self, client_id, scope, access_token,
token_type, expires_in, refresh_token,
data)
# Return value ignored
discard_authorization_code(self, client_id, code)
# Return value ignored
discard_refresh_token(self, client_id, refresh_token)
# Return value ignored
Optionally, the following may be overridden to acheive desired behavior:
@property
token_length(self)
@property
token_type(self)
@property
token_expires_in(self)
generate_authorization_code(self)
generate_access_token(self)
generate_refresh_token(self)
"""
@property
def token_length(self):
"""Property method to get the length used to generate tokens.
:rtype: int
"""
return 40
@property
def token_type(self):
"""Property method to get the access token type.
:rtype: str
"""
return 'Bearer'
@property
def token_expires_in(self):
"""Property method to get the token expiration time in seconds.
:rtype: int
"""
return 3600
def generate_authorization_code(self):
"""Generate a random authorization code.
:rtype: str
"""
return utils.random_ascii_string(self.token_length)
def generate_access_token(self):
"""Generate a random access token.
:rtype: str
"""
return utils.random_ascii_string(self.token_length)
def generate_refresh_token(self):
"""Generate a random refresh token.
:rtype: str
"""
return utils.random_ascii_string(self.token_length)
def validate_authorization_code_request(self, request, user, response_type, client, redirect_uri, scope='', **params):
# Check client
if client is None:
return self._make_error_response(request, 'unauthorized_client')
# Check redirect URI
if not self.validate_redirect_uri(client, redirect_uri):
return self._make_error_response(request, 'invalid_request')
# Ensure proper response_type
if response_type != 'code':
return self._make_redirect_error_response(redirect_uri, 'unsupported_response_type')
# Check conditions
# Return proper error responses on invalid conditions
if not self.validate_access():
err = 'access_denied'
return self._make_redirect_error_response(redirect_uri, err)
if not self.validate_scope(client, scope):
err = 'invalid_scope'
return self._make_redirect_error_response(redirect_uri, err)
def get_authorization_code(self, request, user, response_type, client_id, redirect_uri, **params):
"""Generate authorization code HTTP response.
:param response_type: Desired response type. Must be exactly "code".
:type response_type: str
:param client_id: Client ID.
:type client_id: str
:param redirect_uri: Client redirect URI.
:type redirect_uri: str
:rtype: requests.Response
"""
scope = params.get('scope', '')
client = self.get_client(client_id)
error_response = self.validate_authorization_code_request(request, user, response_type, client, redirect_uri, scope)
if error_response is not None:
return error_response
# Generate authorization code
code = self.generate_authorization_code()
# Save information to be used to validate later requests
self.persist_authorization_code(user=user, client=client, code=code, scope=scope)
# Return redirection response
params.update({
'code': code,
'response_type': None,
'client_id': None,
'redirect_uri': None
})
redirect = utils.build_url(redirect_uri, params)
return self._make_response(headers={'Location': redirect}, status_code=302)
def refresh_token(self, request, client_id, client_secret, refresh_token, **params):
"""Generate access token HTTP response from a refresh token.
:param client_id: Client ID.
:type client_id: str
:param client_secret: Client secret.
:type client_secret: str
:param refresh_token: Refresh token.
:type refresh_token: str
:rtype: requests.Response
"""
scope = params.get('scope', '')
# Check conditions
try:
client = self.get_client(client_id)
except:
return self._make_error_response(request, 'invalid_client')
# Validate grant info
is_valid_client_secret = self.validate_client_secret(client, client_secret)
data = self.from_refresh_token(client_id, refresh_token, scope)
is_valid_grant = data is not None
if not is_valid_client_secret or not is_valid_grant:
return self._make_error_response(request, 'invalid_grant')
# Validate scope
if not self.validate_scope(client, scope):
return self._make_error_response(request, 'invalid_scope')
# Discard original refresh token
self.discard_refresh_token(client_id, refresh_token)
# Generate access tokens once all conditions have been met
access_token = self.generate_access_token()
token_type = self.token_type
expires_in = self.token_expires_in
refresh_token = self.generate_refresh_token()
# Save information to be used to validate later requests
self.persist_token_information(client_id=client_id,
scope=scope,
access_token=access_token,
token_type=token_type,
expires_in=expires_in,
refresh_token=refresh_token,
data=data)
# Return json response
return self._make_json_response({
'access_token': access_token,
'token_type': token_type,
'expires_in': expires_in,
'refresh_token': refresh_token
})
def get_token(self, request, client_id, client_secret, redirect_uri, code, **params):
"""Generate access token HTTP response.
:param client_id: Client ID.
:type client_id: str
:param client_secret: Client secret.
:type client_secret: str
:param redirect_uri: Client redirect URI.
:type redirect_uri: str
:param code: Authorization code.
:type code: str
:rtype: requests.Response
"""
scope = params.get('scope', '')
# Check conditions
try:
client = self.get_client(client_id)
except:
return self._make_error_response(request, 'invalid_client')
# Validate grant info
is_valid_redirect_uri = self.validate_redirect_uri(client, redirect_uri)
is_valid_client_secret = self.validate_client_secret(client, client_secret)
data = self.from_authorization_code(client_id, code, scope)
is_valid_grant = data is not None
if not is_valid_client_secret or not is_valid_grant or not is_valid_redirect_uri:
return self._make_error_response(request, 'invalid_grant')
# Validate scope
if not self.validate_scope(client, scope):
return self._make_error_response(request, 'invalid_scope')
# Discard original authorization code
self.discard_authorization_code(client_id, code)
# Generate access tokens once all conditions have been met
access_token = self.generate_access_token()
token_type = self.token_type
expires_in = self.token_expires_in
refresh_token = self.generate_refresh_token()
# Save information to be used to validate later requests
self.persist_token_information(client_id=client_id,
scope=scope,
access_token=access_token,
token_type=token_type,
expires_in=expires_in,
refresh_token=refresh_token,
data=data)
# Return json response
return self._make_json_response({
'access_token': access_token,
'token_type': token_type,
'expires_in': expires_in,
'refresh_token': refresh_token
})
def get_token_from_post_data(self, request, data):
"""Get a token response from POST data.
:param data: POST data containing authorization information.
:type data: dict
:rtype: requests.Response
"""
try:
# Verify OAuth 2.0 Parameters
for x in ['grant_type', 'client_id', 'client_secret']:
if not data.get(x):
raise TypeError("Missing required OAuth 2.0 POST param: {}".format(x))
# Handle get token from refresh_token
if data['grant_type'] == 'refresh_token':
if 'refresh_token' not in data:
raise TypeError("Missing required OAuth 2.0 POST param: refresh_token")
return self.refresh_token(request, **data)
elif data['grant_type'] == 'authorization_code':
# Handle get token from authorization code
for x in ['redirect_uri', 'code']:
if not data.get(x):
raise TypeError("Missing required OAuth 2.0 POST param: {}".format(x))
return self.get_token(request, **data)
else:
return self._make_error_response(request, 'unsupported_grant_type')
except TypeError as exc:
self._handle_exception(exc)
# Catch missing parameters in request
return self._make_error_response(request, 'invalid_request')
except StandardError as exc:
self._handle_exception(exc)
# Catch all other server errors
return self._make_error_response(request, 'server_error')
def get_client(self, client_id): # pragma: no cover
raise NotImplementedError('Subclasses must implement get_client.')
def validate_client_secret(self, client, client_secret): # pragma: no cover
raise NotImplementedError('Subclasses must implement validate_client_secret.')
def validate_redirect_uri(self, client, redirect_uri): # pragma: no cover
raise NotImplementedError('Subclasses must implement validate_redirect_uri.')
def validate_scope(self, client, scope): # pragma: no cover
raise NotImplementedError('Subclasses must implement validate_scope.')
def validate_access(self): # pragma: no cover
raise NotImplementedError('Subclasses must implement validate_access.')
def from_authorization_code(self, client_id, code, scope): # pragma: no cover
raise NotImplementedError('Subclasses must implement from_authorization_code.')
def from_refresh_token(self, client_id, refresh_token, scope): # pragma: no cover
raise NotImplementedError('Subclasses must implement from_refresh_token.')
def persist_authorization_code(self, client, code, scope): # pragma: no cover
raise NotImplementedError('Subclasses must implement persist_authorization_code.')
def persist_token_information(self, client_id, scope, access_token,
token_type, expires_in, refresh_token,
data): # pragma: no cover
raise NotImplementedError('Subclasses must implement persist_token_information.')
def discard_authorization_code(self, client_id, code): # pragma: no cover
raise NotImplementedError('Subclasses must implement discard_authorization_code.')
def discard_refresh_token(self, client_id, refresh_token): # pragma: no cover
raise NotImplementedError('Subclasses must implement discard_refresh_token.')
|
The Napali coast is a 17-mile expanse of rocky, steep, but oh-so-gorgeous series of cliffs that stretch along Kauai's northwest shore. Travelers agree there's no better way to experience Kauai's natural beauty and eye-catching terrain than at Napali, so you should make plans to marvel at it – either on foot, from the water or from the air (there are no roads that go along the Napali Coast).
Hiking the Kalalau Trail is the least expensive method of surveying this coast, but it's also an incredibly strenuous excursion that only experienced hikers should attempt. Other popular ways of taking in Napali are via boat or helicopter, which allows travelers to see parts of the coast inaccessible on foot, such as untouched beaches, sea caves and sky-high waterfalls that are tucked between the fluted cliffs. Visitors say those prone to seasickness might want to reconsider a boat tour, as the north shore's waves tend to be very rough. If you can handle the exorbitant fees, many agree a one-hour helicopter tour is the best way to view Napali; there will be no achy muscles or queasy stomach, plus you'll get to see more of the area in a shorter period of time. Whatever you choose to do, travelers said the extra effort – whether it's a challenging hike, rocky boat ride or expensive helicopter tour – was completely worth the extra effort.
If you want to hike the Kalalau Trail, you'll need to obtain a permit. Keep in mind if you're planning on just hiking to Hanakapi'ai Valley (2 miles from the trailhead), you won't need a permit. Go farther beyond that and you'll need to get a $20 camping permit, which is valid for a maximum of five nights, even if you aren't planning on camping. Visit the Hawaii State Park's website for more information. You can take a boat tour or helicopter tour year-round. Prices vary according to excursion; visit the Hawaii State Park's website for information on boat trips and Hawaii's tourism website for details on helicopter tours.
|
class Course:
def __init__(self, name, hashcode, code):
self.subjects = set()
self.name = name
self.hashcode = hashcode
self.code = code
@classmethod
def from_db(cls, row):
name = row[0]
hashcode = row[1]
return cls(name, hashcode, "")
def add_subject(self, subject_code):
self.subjects.add(subject_code)
def response_json(self):
return {"name": self.name, "code": self.hashcode, "subjects": list(self.subjects)}
class DataRow:
def __init__(self):
self.week = ""
self.day = ""
self.date = ""
self.start = ""
self.end = ""
self.code = ""
self.type = ""
self.info = ""
self.campus = ""
self.rooms = ""
def get_data_tuple(self):
return (self.week, self.day, self.date, self.start, self.end, self.code, self.type, self.info, self.campus, self.rooms)
def get_csv_data(self):
return ";".join(self.get_data_tuple) + ";\n"
|
Peace may not be as difficult to find as we think. Though world peace can seem like an impossible dream at times, we can all find inner peace and serenity in our own lives, if we look for it.
HuffPost Religion asked, "What does peace look like to you?"
Here, our readers show us what #mypeaceis. If you would like to share an image of peace you can use #MyPeaceIs on Twitter and Instagram, or submit in the slideshow below.
|
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
from flask import request
from eve.validation import ValidationError
from eve.utils import config
import logging
import superdesk
from superdesk.resource import Resource
from superdesk.services import BaseService
from superdesk.utc import utcnow
from superdesk import get_backend
from superdesk import get_resource_service
from superdesk.workflow import get_privileged_actions
_preferences_key = 'preferences'
_user_preferences_key = 'user_preferences'
_session_preferences_key = 'session_preferences'
_privileges_key = 'active_privileges'
_action_key = 'allowed_actions'
logger = logging.getLogger(__name__)
def init_app(app):
endpoint_name = 'preferences'
service = PreferencesService(endpoint_name, backend=get_backend())
PreferencesResource(endpoint_name, app=app, service=service)
app.on_session_end -= service.on_session_end
app.on_session_end += service.on_session_end
app.on_role_privileges_revoked -= service.on_role_privileges_revoked
app.on_role_privileges_revoked += service.on_role_privileges_revoked
superdesk.intrinsic_privilege(resource_name=endpoint_name, method=['PATCH'])
def enhance_document_with_default_prefs(doc):
user_prefs = doc.get(_user_preferences_key, {})
available = dict(superdesk.default_user_preferences)
available.update(user_prefs)
def sync_field(field, dest, default):
if not isinstance(dest, dict) or not isinstance(default, dict):
return
if default.get(field):
dest[field] = default[field]
elif dest.get(field):
dest.pop(field, None)
# make sure label and category are up-to-date
for k, v in available.items():
default = superdesk.default_user_preferences.get(k)
if default:
sync_field('label', v, default)
sync_field('category', v, default)
doc[_user_preferences_key] = available
class PreferencesResource(Resource):
datasource = {
'source': 'users',
'projection': {
_session_preferences_key: 1,
_user_preferences_key: 1,
_privileges_key: 1,
_action_key: 1,
'_etag': 1
}
}
schema = {
_session_preferences_key: {'type': 'dict', 'required': True},
_user_preferences_key: {'type': 'dict', 'required': True},
_privileges_key: {'type': 'dict'},
_action_key: {'type': 'list'}
}
resource_methods = []
item_methods = ['GET', 'PATCH']
superdesk.register_default_user_preference('feature:preview', {
'type': 'bool',
'enabled': False,
'default': False,
'label': 'Enable Feature Preview',
'category': 'feature',
'privileges': ['feature_preview']
})
superdesk.register_default_user_preference('archive:view', {
'type': 'string',
'allowed': ['mgrid', 'compact'],
'view': 'mgrid',
'default': 'mgrid',
'label': 'Users archive view format',
'category': 'archive'
})
superdesk.register_default_user_preference('singleline:view', {
'type': 'bool',
'enabled': None,
'default': False,
'label': 'Enable Single Line View',
'category': 'rows'
})
superdesk.register_default_user_preference('editor:theme', {
'type': 'string',
'theme': '',
})
superdesk.register_default_user_preference('workqueue:items', {
'items': []
})
superdesk.register_default_user_preference('dashboard:ingest', {
'providers': []
})
superdesk.register_default_user_preference('agg:view', {
'active': {},
})
superdesk.register_default_user_preference('templates:recent', {})
superdesk.register_default_user_preference('dateline:located', {
'type': 'dict',
'label': 'Located',
'category': 'article_defaults'
})
superdesk.register_default_user_preference('categories:preferred', {
'type': 'dict',
'category': 'categories',
'label': 'Preferred Categories',
'selected': {},
})
superdesk.register_default_user_preference('desks:preferred', {
'type': 'dict',
'category': 'desks',
'label': 'Preferred Desks',
'selected': {},
})
superdesk.register_default_user_preference('article:default:place', {
'type': 'list',
'label': 'Place',
'category': 'article_defaults',
'place': []
})
superdesk.register_default_user_preference('spellchecker:status', {
'type': 'bool',
'enabled': True,
'default': True
})
superdesk.register_default_user_preference('destination:active', {})
superdesk.register_default_session_preference('scratchpad:items', [])
superdesk.register_default_session_preference('desk:last_worked', '')
superdesk.register_default_session_preference('desk:items', [])
superdesk.register_default_session_preference('stage:items', [])
superdesk.register_default_session_preference('pinned:items', [])
class PreferencesService(BaseService):
def on_session_end(self, user_id, session_id):
service = get_resource_service('users')
user_doc = service.find_one(req=None, _id=user_id)
session_prefs = user_doc.get(_session_preferences_key, {}).copy()
if not isinstance(session_id, str):
session_id = str(session_id)
if session_id in session_prefs:
del session_prefs[session_id]
service.system_update(user_id, {_session_preferences_key: session_prefs}, user_doc)
def set_session_based_prefs(self, session_id, user_id):
service = get_resource_service('users')
user_doc = service.find_one(req=None, _id=user_id)
session_prefs = user_doc.get(_session_preferences_key, {})
available = dict(superdesk.default_session_preferences)
if available.get('desk:last_worked') == '' and user_doc.get('desk'):
available['desk:last_worked'] = user_doc.get('desk')
session_prefs.setdefault(str(session_id), available)
service.system_update(user_id, {_session_preferences_key: session_prefs}, user_doc)
def set_user_initial_prefs(self, user_doc):
if _user_preferences_key not in user_doc:
orig_user_prefs = user_doc.get(_preferences_key, {})
available = dict(superdesk.default_user_preferences)
available.update(orig_user_prefs)
user_doc[_user_preferences_key] = available
def find_one(self, req, **lookup):
session = get_resource_service('sessions').find_one(req=None, _id=lookup['_id'])
_id = session['user'] if session else lookup['_id']
doc = get_resource_service('users').find_one(req, _id=_id)
if doc:
doc['_id'] = session['_id'] if session else _id
return doc
def on_fetched_item(self, doc):
session_id = request.view_args['_id']
session_prefs = doc.get(_session_preferences_key, {}).get(session_id, {})
doc[_session_preferences_key] = session_prefs
self.enhance_document_with_user_privileges(doc)
enhance_document_with_default_prefs(doc)
self._filter_preferences_by_privileges(doc)
def on_update(self, updates, original):
existing_user_preferences = original.get(_user_preferences_key, {}).copy()
existing_session_preferences = original.get(_session_preferences_key, {}).copy()
self.update_user_prefs(updates, existing_user_preferences)
session_id = request.view_args['_id']
self.update_session_prefs(updates, existing_session_preferences, session_id)
def update_session_prefs(self, updates, existing_session_preferences, session_id):
session_prefs = updates.get(_session_preferences_key)
if session_prefs is not None:
for k in (k for k, v in session_prefs.items() if k not in superdesk.default_session_preferences):
raise ValidationError('Invalid preference: %s' % k)
existing = existing_session_preferences.get(session_id, {})
existing.update(session_prefs)
existing_session_preferences[session_id] = existing
updates[_session_preferences_key] = existing_session_preferences
def update_user_prefs(self, updates, existing_user_preferences):
user_prefs = updates.get(_user_preferences_key)
if user_prefs is not None:
# check if the input is validated against the default values
for k in ((k for k, v in user_prefs.items() if k not in superdesk.default_user_preferences)):
raise ValidationError('Invalid preference: %s' % k)
existing_user_preferences.update(user_prefs)
updates[_user_preferences_key] = existing_user_preferences
def update(self, id, updates, original):
session = get_resource_service('sessions').find_one(req=None, _id=original['_id'])
original_unpatched = self.backend.find_one(self.datasource, req=None, _id=session['user'])
updated = original_unpatched.copy()
updated.update(updates)
del updated['_id']
res = self.backend.update(self.datasource, original_unpatched['_id'], updated, original_unpatched)
updates.update(updated)
# Return only the patched session prefs
session_prefs = updates.get(_session_preferences_key, {}).get(str(original['_id']), {})
updates[_session_preferences_key] = session_prefs
self.enhance_document_with_user_privileges(updates)
enhance_document_with_default_prefs(updates)
return res
def enhance_document_with_user_privileges(self, user_doc):
role_doc = get_resource_service('users').get_role(user_doc)
get_resource_service('users').set_privileges(user_doc, role_doc)
user_doc[_action_key] = get_privileged_actions(user_doc[_privileges_key])
def get_user_preference(self, user_id):
"""
This function returns preferences for the user.
"""
doc = get_resource_service('users').find_one(req=None, _id=user_id)
prefs = doc.get(_user_preferences_key, {})
return prefs
def email_notification_is_enabled(self, user_id=None, preferences=None):
"""
This function checks if email notification is enabled or not based on the preferences.
"""
if user_id:
preferences = self.get_user_preference(user_id)
send_email = preferences.get('email:notification', {}) if isinstance(preferences, dict) else {}
return send_email and send_email.get('enabled', False)
def is_authorized(self, **kwargs):
"""
Returns False if logged-in user is trying to update other user's or session's privileges.
:param kwargs:
:return: True if authorized, False otherwise
"""
if not kwargs.get('_id') or not kwargs.get('user_id'):
return False
session = get_resource_service('sessions').find_one(req=None, _id=kwargs.get('_id'))
if not session:
return False
return str(kwargs.get('user_id')) == str(session.get('user'))
def on_role_privileges_revoked(self, role, role_users):
"""Runs when user privilage has been revoked.
Update the session for active user so that preferences can be reloaded.
:param dict role: role getting updated
:param list role_users: list of user belonging to the role.
"""
if not role_users or not role:
return
logger.info('On_Role_Privileges_Revoked: Updating Users for Role:{}.'.format(role.get(config.ID_FIELD)))
for user in role_users:
try:
self.system_update(user[config.ID_FIELD], {config.LAST_UPDATED: utcnow()}, user)
except:
logger.warn('On_Role_Privileges_Revoked:Failed to update user:{} with role:{}.'.
format(user.get(config.ID_FIELD), role.get(config.ID_FIELD)), exc_info=True)
def _filter_preferences_by_privileges(self, doc):
privileges = doc[_privileges_key]
preferences = doc[_user_preferences_key]
def has_missing_privileges(prefs):
prefs = prefs if isinstance(prefs, list) else [prefs]
return [priv for pref in prefs for priv in pref.get('privileges', []) if not privileges.get(priv)]
doc[_user_preferences_key] = {k: v for k, v in preferences.items() if not has_missing_privileges(v)}
|
Stephanie Haymes has been promoted to Manager of A&R at Warner Music Nashville. Haymes has worked in a support role for each artist on the roster in her four years.
She was previously with William Morris Endeavor.
Crazy Pitches will host the inaugural Dress Up For St. Jude, which will be held Thursday, Sept. 10 at Still Working Music Group (1625 Broadway). Held from 5:30 p.m.-8 p.m., the event will feature an array of high-end new and gently used designer dresses, jewelry, purses and other accessories, ranging fro $10-$50, with 100 percent of proceeds being donated to St. Jude Children’s Research Hospital to benefit their music therapy program.
The fundraiser idea came after Crazy Pitches members Chelsea Kent (Still Working Music), Hannah Showmaker (Parallel Entertainment), Ciara Shortridge (Disney Music Publishing) and Sarah Feldman (Writer’s Den Music Group), visited St. Jude in Memphis.
Sponsors of this debut occasion have donated event space, food, beverage and raffle items and include 5/3 Bank, Style Kitchen Hair Salon, Little Cesar’s Pizza, Still Working Music and SNG Music Publishing. Campbell Entertainment Group is handling publicity for the event.
Singer-songwriter Jason Fowler, who went from rising artist to being homeless to fighting, has Elite Talent Agency of Nashville. Jason has an amazing story of rise, fall and redemption. He went from a rising star, to being homeless on the streets of Atlanta, to finding redemption through grace — and now he shares that story through music.
Jason is currently finalizing his latest recording in Nashville at Darkhorse Studios with producer Billy Smiley (Whiteheart, Johnny Cash, Newsboys, Bebe and Cece Winans) for an upcoming new studio album and tour. Fowler has recruited a heavy hitter list of talented artists on his new album including Leigh Nash (Six Pence None the Richer), Will Turpin (Collective Soul), Peter Furler (Newsboys) and more.
|
"""
Django settings for backend project.
Generated by 'django-admin startproject' using Django 1.11.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import environ
import dj_database_url
from configurations import Configuration, values
class BaseConfiguration(Configuration):
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'mnj9$1z4d$yllwa^6(&*&@*_ksz&$!ya-7-!*-hu^419be=+&a'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# THIRD PARTY APPS
'rest_framework',
'rest_framework_swagger',
'corsheaders',
'storages',
'django_extensions',
# LOCAL APPS
'main.bitfinex',
'main.coins',
'main.rates',
'main.stocks',
'core',
]
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticatedOrReadOnly',
)
}
CORS_ORIGIN_ALLOW_ALL = True
MIDDLEWARE = [
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'backend.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'backend.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASE_ENV = values.Value(environ_prefix=None, environ_name='DATABASE_ENV', default='DATABASE_URL')
DATABASE_URL = values.SecretValue(environ_prefix=None, environ_name=str(DATABASE_ENV))
DATABASES = {
'default': dj_database_url.parse(str(DATABASE_URL)),
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Storages
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
AWS_STORAGE_BUCKET_NAME = 'www.dustapp.io'
AWS_S3_REGION_NAME = 'eu-west-2'
AWS_S3_HOST = 's3.eu-west-2.amazonaws.com'
AWS_S3_SIGNATURE_VERSION = 's3v4'
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = [
]
OPEN_EXCHANGE_APP_ID = values.SecretValue(environ_prefix=None)
ALPHA_VANTAGE_API_KEY = values.SecretValue(environ_prefix=None)
|
Below is a short guide on editing content through Siteleaf, a content editing tool connected to Github. Content on Siteleaf is organized into two types: collections of content, and pages. The guide below outlines how to edit both types of content.
One type of collection is blog posts, called News Posts.
When you click on a collection, you’ll be taken to a view that shows the content within that collection. So for News Posts, each document is a blog post.
Within each blog post, you have a number of views both of the content of the post, but also settings.
You can view in Markdown or Visual Editor mode.
Settings views: right side bar and metadata below the blog post.
On the right are options to Save, create tags and set the date of the post.
Below the post is extra metadata. For example, the feature image (large image at the top of the blog post) is a field for you to upload an image. You can also set the author.
The What We Do page is an example of an individual page with content about what is HOT.
Follow the format to edit the blocks of content.
|
from flask import Blueprint
from flask import redirect, session, url_for
from functools import partial, wraps
from oi.model import User
from oi.util import timestamp, TIME_MINUTES
def get_user(db_session, user_id):
user = db_session.query(User).filter(User.id == user_id).first()
return user
def get_user_by_google_id(db_session, google_id):
user = db_session.query(User).filter(User.google_id == google_id).first()
return user
def get_user_in_session(db_session):
return get_user(db_session, session['user_id'])
def set_expire():
session['expires'] = timestamp() + 30 * TIME_MINUTES
def sign_in(user):
session['user_id'] = user.id
session['github_access_token'] = user.github_access_token
set_expire()
def sign_out():
session.pop('user_id', None)
session.pop('github_access_token', None)
session.pop('expires', None)
def check_sign_in(need_github=False):
if 'expires' not in session:
return False
if need_github and session['github_access_token'] is None:
return False
if session['expires'] < timestamp():
sign_out()
return False
set_expire()
return True
def require_sign_in(func=None, need_github=False):
if func is None:
return partial(require_sign_in, need_github=need_github)
@wraps(func)
def new_function(*args, **kwargs):
if check_sign_in(need_github=need_github) == True:
return func(*args, **kwargs)
else:
return redirect(url_for('index'))
return new_function
|
Dark Chocolate Leaves (L: 60mm) are thin with a sharp finish. These decorations can be used on choux, éclairs and ice creams. This pack contains 175 pieces; we also offer this decoration in a pack 550 pieces.
|
import pygtk
pygtk.require('2.0')
import gtk
import sys
import threading
import time
from SimpleCV import VirtualCamera, DrawingLayer, Color, Camera
binarizationValue = 30
class gui:
def __init__(self):
self.gladefile = "binControl.glade"
self.glade = gtk.Builder()
self.glade.add_from_file(self.gladefile)
self.glade.connect_signals(self)
self.glade.get_object("windowMain").show_all()
self.scale = self.glade.get_object("binValue")
#self.scale.connect("value-changed", self.on_binValue_value_changed)
def on_MainWindow_delete_event(self, widget, event):
gtk.main_quit()
def on_binValue_value_changed(self, widget):
print "At change value"
try:
global binarizationValue
binarizationValue = self.glade.get_object("binValue").get_value()
print binarizationValue
except ValueError:
return 0
def on_windowMain_destroy(self, widget):
sys.exit(0)
def startGUI():
gui()
gtk.main()
def startCAM():
global binarizationValue
cam = Camera()
#cam = VirtualCamera("pupilTest.mp4", "video", 300)
while True:
img = cam.getImage().binarize(binarizationValue)
blobs = img.findBlobs()
if blobs is None:
img.show()
else:
blobs[-1].draw(color=(0, 0, 0))
img.drawCircle((blobs[-1].x,blobs[-1].y),6, thickness=-1,color=Color.RED)
img.drawCircle((blobs[-1].centroid()),5, thickness=-1,color=Color.GREEN)
sTmp = "Center of Mass: "+str(blobs[-1].x)+", "+str(blobs[-1].y)
img.drawText(sTmp, x=10, y=30, color=Color.RED, fontsize=20)
sTmp = blobs[-1].centroid()
sTmp = " Bounding Box: "+str(int(sTmp[0]))+", "+ str(int(sTmp[1]))
img.drawText(sTmp, x=10, y=10, color=Color.GREEN, fontsize=20)
img.show()
#time.sleep(10)
def main():
print "First Thread"
guiThread = threading.Thread(target=startGUI)
guiThread.start()
startCAM()
print "Got Here!"
'''
startGUI()
'''
if __name__ == "__main__":
main()
|
How to develop confidence in fluency?
You are working in an MNC and your hard work and effort has paid you with a promotion to next level. You have to go abroad for the next two years. Now, with all the new excitement of success you start preparing yourself and suddenly you realize how difficult it will be to survive in an English speaking country. The US English is more of accent and fluency. You question yourself, will I be able to learn the accent? Will I be able to communicate with the clients in proper English? Are these questions crossing your minds? Then we have an answer for you, follow these tricks and you will learn English fluency in no time.
One thing you must always remember is to be confident when you learn something new. Confidence makes you achieve everything that you desire for.
Fluency is simply the ability to read a book or text clearly. Reading corrects your pronunciation mistakes. The more you practice fluency, the more confident you will be. It not only increases your reading speed, but also helps you to process words faster.If you are nervous about speaking in front of others, start reading loudly to yourself at home. You can ask your friend or a mentor to give you feedback on advanced vocabulary.
Joining an online forum provides you with an option of interacting with native English speakers. It also gives you a chance to practice your English in the comfort of your own home. You can find people who have similar interests for instance photography, travelling, etc. This will keep you motivated to participate in the forums.You can even post your own questions in the forums and respond to answers you get.
An excellent way to practice English is to make friends with people who only speak English.
The more you practice, the more you will gain confidence in your English speaking abilities. You are then forced to speak in English with them. You can join a professional networking group, where you will find language exchange sessions. You can teach them your native language and they will be able to help you improve your English.
Language exchange sessions should become your daily habit, not a twice a week class. You should use English in your life every day. You can create routines that allow you to learn English, which would include learning with music, TV shows, and podcasts. This makes you learn it in a relaxed way.
For any language, confidence is the fundamental tool for communication. The way you communicate and perceive others, is extremely important. Confidence is the most tangible expression of who you are. So, it is important to be confident while you are learning English skills.
Hence, being confident while speaking English will help you develop the fluency skills.
2. How to Anchor a Program?
4. How to praise a good performing child in front of the class?
|
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
from scipy import misc
import STR2CTC
import os
import codecs
def read_image_list(pathToList):
"""Reads a .txt file containing paths to the images
Args:
image_list_file: a .txt file with one /path/to/image per line
label: optionally, if set label will be pasted after each line
Returns:
List with all filenames in file image_list_file
"""
f = open(pathToList, 'r')
filenames = []
for line in f:
if line[-1] == '\n':
filenames.append(line[:-1])
else:
filenames.append(line)
f.close()
return filenames
def get_batch_labels(bList, cm):
u_labels = []
for path in bList:
labelFile = path[:] + ".txt"
tmp = codecs.open(labelFile, 'r', encoding='utf-8')
u_str = tmp.readline()
u_labels.append(u_str)
# print(str)
if tmp is not None:
tmp.close()
idx, val, shape = STR2CTC.target_string_list_to_ctc_tensor_repr(u_labels, cm)
return idx, val, shape
def get_batch_imgs(bList, imgW, mvn):
imgs = []
seqL = []
# print("\n")
for path in bList:
# print(path)
aImg = misc.imread(path)
width = aImg.shape[1]
hei = aImg.shape[0]
# aSeqL = min(width, imgW)
# aSeqL = max(aSeqL, imgW / 2)
aSeqL = width
seqL.append(aSeqL)
# aImg = aImg.astype('float32')
aImg = aImg / 255.0
if mvn:
std = np.std(aImg)
mean = np.mean(aImg)
tImg = (aImg - mean) / std
aImg = tImg
if width < imgW:
padW = imgW - width
npad = ((0, 0), (0, padW))
tImg = np.pad(aImg, npad, mode='constant', constant_values=0)
aImg = tImg
# if width > imgW:
# tImg = aImg[:, :imgW]
# aImg = tImg
# plt.imshow(aImg, cmap=plt.cm.gray)
# plt.show()
imgs.append(aImg)
bSize = len(bList)
imgBatched = np.zeros((bSize, hei, imgW, 1), dtype='float32')
# batch the image list
for idx, img in enumerate(imgs):
imgBatched[idx, :, :, 0] = img
return imgBatched, seqL
def get_list_vals(bList, cm, imgW, mvn=False):
tgtIdx, tgtVal, tgtShape = get_batch_labels(bList, cm)
inpBatch, inpSeqL = get_batch_imgs(bList, imgW, mvn)
return inpBatch, inpSeqL, tgtIdx, tgtVal, tgtShape
def clean_list(list, imgW, cm, subsampling=-1):
res = []
# Count the skipped Images (width too big)
countW = 0
# Count the skipped Images (char not in charMap)
countC = 0
# Count the skipped Images (subsampling too much)
countS = 0
for path in list:
aImg = misc.imread(path)
width = aImg.shape[1]
# Skip image if image width is bigger than imgW
if width > imgW:
countW += 1
continue
# Skip image if a character is not in charMap
skip = False
labelFile = path[:] + ".txt"
tmp = codecs.open(labelFile, 'r', encoding='utf-8')
u_str = tmp.readline()
if subsampling > 0:
if subsampling * len(u_str) > width:
countS += 1
continue
if tmp is not None:
tmp.close()
count = 0
lastCh = -1
for c in u_str:
try:
ch = cm.get_channel(c)
if lastCh == ch:
count += 1
lastCh = ch
except KeyError:
# print('Character \'{}\' not in charMap, skipping Image...'.format(c))
skip = True
countC += 1
break
if not skip:
if subsampling * (count + len(u_str)) > width:
countS += 1
continue
res.append(path)
print("Skipped {} out of {} images...".format(countC + countW + countS, len(list)))
print("...{} too big images, {} images where subsampling is too much and additionally {} images with unknown characters.".format(countW, countS, countC))
return res
if __name__ == '__main__':
os.chdir("..")
list = read_image_list('./resources/lp_only_train.lst')
imgBatches, seqL = get_list_vals(list, STR2CTC.get_charmap_lp(), 100)
# print(seqL)
print(imgBatches.shape)
print(imgBatches.dtype)
plt.imshow(imgBatches[129], cmap=plt.cm.gray)
plt.show()
|
GODFATHER PAWN HAS 3 ORLANDO LOCATIONS & 1 COCOA LOCATION TO SERVE YOU!
WE ARE OPEN FROM 10AM - 7PM MON-SAT and 11AM - 4PM on SUNDAY!
Pawn or Sell something from the comfort of your own home with our Online Appraisal. Simply click here to start!
Godfather Pawn has some of the best deals on Notebooks, Laptops, IPADS, Desktop Computers, Monitors and more!
We are open Monday through Saturday from 10AM - 7PM, and we are one of the few Pawn shops open on Sunday from 11AM - 4PM!
Check out some of our photos from both our Orlando Pawn Shop locations! Click here!
We want to hear from you! Please send us feedback on how your Godfather Pawn experience was. Contact us today!
Do you have some broken gold and do not know what to do with it? Bring it over to any of the four Central Florida Godfather Pawn locations. We buy broken gold, and we will give you a free evaluation on how much we will provide you for yours. Don't just get rid of broken gold because you think it is not worth anything. If you want to get an estimate online without coming to our pawn shop, try using our Online Appraisal form here on the website. You can take a quick picture of your broken gold with your phone and upload it to the site. We can check it out and give you an estimate.
A Pawn Shop should show respect and concern for your needs. If you have questions about rates or values you should feel comfortable enough to speak up and get an answer, not an excuse. You should feel comfortable when pawning and not judged.
It goes without saying that the people pawning for the first time are volatile. Changes in economy, lack of jobs and unstable markets only add to this volatility. They will pick a shop out of connivance and not consider the loan rate or understand the contract they are signing. Take your time and pick a pawn shop that has experience and knowledge respect that will get you the most money for your items.
Most customers use a pawn as a short term loan until payday. We understand this and made our loans conveniently lower interest then the leading pawn shop. Godfather Pawn loans are based on 15 days for 10%. Most pawn shops are 30 day 25%. We also prorate our loans after the 30TH day, most other charge you for the whole month if your one day late.
|
import numpy as np
from ..algo.multicorr_funcs import *
def multicorr(g1, g2, method='cross', upsample_factor=1, verbose=False):
"""Align a reference to an image by cross correlation. The template
and the image must have the same size.
The function takes in FFTs so that any FFT algorithm can be used to
transform the image and template (fft2, mkl, scipack, etc.)
Parameters
----------
g1 : complex ndarray
Fourier transform of reference image.
g2 : complex ndarray
Fourier transform of the image to register (the kernel).
method : str, optional
The correlation method to use. Must be 'phase' or 'cross' or 'hybrid' (default = 'cross')
upsample_factor : int
Upsample factor for subpixel precision of cross correlation. (default = 1)
verbose : bool, default is False
Print output.
Returns
-------
xyShift : list of floats
The shift between G1 and G2 in pixels.
Example
-------
Cross correlate two images already stored as ndarrays. You must input the FFT
of the images.
>>> import ncempy.algo as neval
>>> import numpy as np
>>> im0FFT = np.fft.fft2(im0)
>>> im1FFT = np.fft.fft2(im1)
>>> shifts = neval.multicorr(im0FFT, im1FFT)
"""
# Check to make sure both G1 and G2 are arrays
if type(g1) is not np.ndarray:
raise TypeError('G1 must be an ndarray')
elif type(g2) is not np.ndarray:
raise TypeError('G2 must be an ndarray')
# Check that the inputs are complex FFTs (common error)
if not np.iscomplexobj(g1) or not np.iscomplexobj(g2):
raise TypeError('G1 and G2 must be complex FFTs.')
# Check to make sure method and upsample factor are the correct values
if method not in ['phase', 'cross', 'hybrid']:
print('Unknown method used, setting to cross.')
method = 'cross'
if type(upsample_factor) is not int and type(upsample_factor) is not float:
print('Upsample factor is not an integer or float, setting to 1')
upsample_factor = 1
elif type(upsample_factor) is not int:
print('Upsample factor is not an integer, rounding down')
upsample_factor = int(upsample_factor)
if upsample_factor < 1:
print('Upsample factor is < 1, setting to 1')
upsample_factor = 1
if upsample_factor < 1:
raise ValueError('upsample_factor must be >= 1')
if verbose:
print('upsample factor = {}'.format(upsample_factor))
# Verify images are the same size.
if g1.shape != g2.shape:
raise TypeError('G1 and G2 are not the same size, G1 is {0} and G2 is {1}'.format(g1.shape, g2.shape))
imageCorr = initial_correlation_image(g1, g2, method, verbose=verbose)
xyShift = upsampled_correlation(imageCorr, upsample_factor, verbose=verbose)
return xyShift
|
See the note to the preceding discourse.
goes down into the thick of battle.
There in the battle he strives and makes effort.
his opponents strike him down and finish him off.
He gets carried out and taken to his relatives.
he dies along the way.
but he dies of that injury.
and he recovers from his injury.
who dwells in dependence on a certain village or town.
with his sense faculties unguarded.
of the monk who dwells in dependence on a certain village or town.
he burns in body and mind.
I can't continue in the holy life.
before he has reached them he dies along the way.
This is the second type of warrior-like individual who can be found existing among the monks.
of much stress, much despair, and greater drawbacks.
— of much stress, much despair, and greater drawbacks.
Find delight, friend, in the holy life.
still I can't continue in the holy life.
"And further, there is the case of the monk who dwells in dependence on a certain village or town.
I will find delight in the holy life.
with his sense faculties guarded.
the faculty of the eye.
He guards the faculty of the ear.
the faculty of the ear.
He guards the faculty of the nose.
the faculty of the nose.
He guards the faculty of the tongue.
the faculty of the tongue.
He guards the faculty of the body.
the faculty of the body.
the faculty of the intellect.
The first seven of these comparisons are treated in detail in MN 54. The simile of the butcher's ax and chopping block is mentioned in MN 23, the simile of swords and spears in SN 5:1, and the simile of the snake’s head in Sn 4:1 and Thig 13:5.
Following the Thai edition. The Burmese and PTS editions here read, "I will make an effort."
|
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011-2012 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Glance Scrub Service
"""
import os
import sys
# If ../glance/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')):
sys.path.insert(0, possible_topdir)
from oslo.config import cfg
from glance.common import config
from glance.openstack.common import log
import glance.store
import glance.store.scrubber
CONF = cfg.CONF
def main():
CONF.register_cli_opt(
cfg.BoolOpt('daemon',
short='D',
default=False,
help='Run as a long-running process. When not '
'specified (the default) run the scrub operation '
'once and then exits. When specified do not exit '
'and run scrub on wakeup_time interval as '
'specified in the config.'))
CONF.register_opt(cfg.IntOpt('wakeup_time', default=300))
try:
config.parse_args()
log.setup('glance')
glance.store.create_stores()
glance.store.verify_default_store()
app = glance.store.scrubber.Scrubber(glance.store)
if CONF.daemon:
server = glance.store.scrubber.Daemon(CONF.wakeup_time)
server.start(app)
server.wait()
else:
import eventlet
pool = eventlet.greenpool.GreenPool(1000)
scrubber = app.run(pool)
except RuntimeError as e:
sys.exit("ERROR: %s" % e)
if __name__ == '__main__':
main()
|
RC4Z1-K mica (AS+) cables are suitable for permanent installations where fire resistance is required as well as electromagnetic protection to avoid parasitic currents. Useful in applications for the control and command of inverters, solenoid valves, machine and logic controller start-up, power switches, temperature, current or voltage regulation in motorised valves as well as for installation in computing facilities, airports, road tunnels, railway networks and wherever a low emission of corrosive fumes and gases is required due to fire hazards such as public premises, hospitals, schools and shopping centres.
Suitable for facilities requiring an increased fire protection and a guaranteed functioning of facilities directly subjected to fire for a period of 90 minutes at 400 ºC.
|
from setuptools import setup
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, "README.rst"), encoding="utf-8") as f:
long_description = f.read()
setup(
name="aiohttp-spyne",
version="1.2.0",
description="Aiohttp transport for Spyne RPC library",
long_description=long_description,
url="https://github.com/katajakasa/aiohttp-spyne",
author="Tuomas Virtanen",
author_email="[email protected]",
license="LGPLv2.1",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: GNU Lesser General Public License v2 (LGPLv2)",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: HTTP Servers",
"Operating System :: OS Independent",
"Framework :: AsyncIO",
],
packages=["aiohttp_spyne"],
install_requires=["aiohttp>=3.0.0,<4.0.0", "spyne>=2.13.16"],
)
|
My world is a little darker now, my father – Jerry Collins Evans, Sr. – has passed on from this life and is in heaven with his parents now.
So instead, I will simply share this that I wrote several years ago.
When I was in 6th grade, I attended a small Christian school in Coral Gables Florida. For part of the year, we had a substitute math teacher whom I remember absolutely nothing about except for the fact that he was a University of Oklahoma football fan. As luck would have it, that year, Oklahoma was coming to Miami to play the Hurricanes, and this particular teacher was beside himself with delight.
One day he spent almost half of class regaling us with the stories of this years Oklahoma team, how good they were and what an exciting game it would be. He usually followed these oratories with comments like, â”While Miami doesn’t have much of a team this year, if you get a chance to go to this game it will be great just to see Oklahoma play.” To him, just the exhibition of them running out on the field was enough to warrant the price of the ticket. It was positively mesmerizing just listening to him talk. Of, course, being good 6th grade students, we did everything we could to keep him going under the misguided assumption that the less we covered in class, the less that we would be tested on.
Time passed, maybe a day or two. I suffered the public humiliation of being in the 95% of the class that would not be able to attend the game. It was a nightmare of 6th grade epic proportions. Then one night, again at the dinner table, Dad looked at me and told me that he had managed to arrange for a substitute director for rehearsal and while he had scolded his choir members for missing rehearsal for better reasons, we were going to the game! My mind reeled. I was floating on cloud nine. I couldn’t believe it. I was actually going to my first football game. I couldn’t wait to tell the class at school the next day. I was now part of the elite 5% who were actually going to the game solely on the teacher’s recommendation. It was wonderful, life was good.
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import os
import re
import unittest
from collections import namedtuple
from unittest import mock
import pytest
import sqlalchemy
from cryptography.fernet import Fernet
from parameterized import parameterized
from airflow import AirflowException
from airflow.hooks.base import BaseHook
from airflow.models import Connection, crypto
from airflow.providers.sqlite.hooks.sqlite import SqliteHook
from tests.test_utils.config import conf_vars
ConnectionParts = namedtuple("ConnectionParts", ["conn_type", "login", "password", "host", "port", "schema"])
class UriTestCaseConfig:
def __init__(
self,
test_conn_uri: str,
test_conn_attributes: dict,
description: str,
):
"""
:param test_conn_uri: URI that we use to create connection
:param test_conn_attributes: we expect a connection object created with `test_uri` to have these
attributes
:param description: human-friendly name appended to parameterized test
"""
self.test_uri = test_conn_uri
self.test_conn_attributes = test_conn_attributes
self.description = description
@staticmethod
def uri_test_name(func, num, param):
return f"{func.__name__}_{num}_{param.args[0].description.replace(' ', '_')}"
class TestConnection(unittest.TestCase):
def setUp(self):
crypto._fernet = None
patcher = mock.patch('airflow.models.connection.mask_secret', autospec=True)
self.mask_secret = patcher.start()
self.addCleanup(patcher.stop)
def tearDown(self):
crypto._fernet = None
@conf_vars({('core', 'fernet_key'): ''})
def test_connection_extra_no_encryption(self):
"""
Tests extras on a new connection without encryption. The fernet key
is set to a non-base64-encoded string and the extra is stored without
encryption.
"""
test_connection = Connection(extra='testextra')
assert not test_connection.is_extra_encrypted
assert test_connection.extra == 'testextra'
@conf_vars({('core', 'fernet_key'): Fernet.generate_key().decode()})
def test_connection_extra_with_encryption(self):
"""
Tests extras on a new connection with encryption.
"""
test_connection = Connection(extra='testextra')
assert test_connection.is_extra_encrypted
assert test_connection.extra == 'testextra'
def test_connection_extra_with_encryption_rotate_fernet_key(self):
"""
Tests rotating encrypted extras.
"""
key1 = Fernet.generate_key()
key2 = Fernet.generate_key()
with conf_vars({('core', 'fernet_key'): key1.decode()}):
test_connection = Connection(extra='testextra')
assert test_connection.is_extra_encrypted
assert test_connection.extra == 'testextra'
assert Fernet(key1).decrypt(test_connection._extra.encode()) == b'testextra'
# Test decrypt of old value with new key
with conf_vars({('core', 'fernet_key'): ','.join([key2.decode(), key1.decode()])}):
crypto._fernet = None
assert test_connection.extra == 'testextra'
# Test decrypt of new value with new key
test_connection.rotate_fernet_key()
assert test_connection.is_extra_encrypted
assert test_connection.extra == 'testextra'
assert Fernet(key2).decrypt(test_connection._extra.encode()) == b'testextra'
test_from_uri_params = [
UriTestCaseConfig(
test_conn_uri='scheme://user:password@host%2Flocation:1234/schema',
test_conn_attributes=dict(
conn_type='scheme',
host='host/location',
schema='schema',
login='user',
password='password',
port=1234,
extra=None,
),
description='without extras',
),
UriTestCaseConfig(
test_conn_uri='scheme://user:password@host%2Flocation:1234/schema?'
'extra1=a%20value&extra2=%2Fpath%2F',
test_conn_attributes=dict(
conn_type='scheme',
host='host/location',
schema='schema',
login='user',
password='password',
port=1234,
extra_dejson={'extra1': 'a value', 'extra2': '/path/'},
),
description='with extras',
),
UriTestCaseConfig(
test_conn_uri='scheme://user:password@host%2Flocation:1234/schema?' '__extra__=single+value',
test_conn_attributes=dict(
conn_type='scheme',
host='host/location',
schema='schema',
login='user',
password='password',
port=1234,
extra='single value',
),
description='with extras single value',
),
UriTestCaseConfig(
test_conn_uri='scheme://user:password@host%2Flocation:1234/schema?'
'__extra__=arbitrary+string+%2A%29%2A%24',
test_conn_attributes=dict(
conn_type='scheme',
host='host/location',
schema='schema',
login='user',
password='password',
port=1234,
extra='arbitrary string *)*$',
),
description='with extra non-json',
),
UriTestCaseConfig(
test_conn_uri='scheme://user:password@host%2Flocation:1234/schema?'
'__extra__=%5B%22list%22%2C+%22of%22%2C+%22values%22%5D',
test_conn_attributes=dict(
conn_type='scheme',
host='host/location',
schema='schema',
login='user',
password='password',
port=1234,
extra_dejson=['list', 'of', 'values'],
),
description='with extras list',
),
UriTestCaseConfig(
test_conn_uri='scheme://user:password@host%2Flocation:1234/schema?'
'__extra__=%7B%22my_val%22%3A+%5B%22list%22%2C+%22of%22%2C+%22values%22%5D%2C+%22extra%22%3A+%7B%22nested%22%3A+%7B%22json%22%3A+%22val%22%7D%7D%7D', # noqa: E501
test_conn_attributes=dict(
conn_type='scheme',
host='host/location',
schema='schema',
login='user',
password='password',
port=1234,
extra_dejson={'my_val': ['list', 'of', 'values'], 'extra': {'nested': {'json': 'val'}}},
),
description='with nested json',
),
UriTestCaseConfig(
test_conn_uri='scheme://user:password@host%2Flocation:1234/schema?extra1=a%20value&extra2=',
test_conn_attributes=dict(
conn_type='scheme',
host='host/location',
schema='schema',
login='user',
password='password',
port=1234,
extra_dejson={'extra1': 'a value', 'extra2': ''},
),
description='with empty extras',
),
UriTestCaseConfig(
test_conn_uri='scheme://user:password@host%2Flocation%3Ax%3Ay:1234/schema?'
'extra1=a%20value&extra2=%2Fpath%2F',
test_conn_attributes=dict(
conn_type='scheme',
host='host/location:x:y',
schema='schema',
login='user',
password='password',
port=1234,
extra_dejson={'extra1': 'a value', 'extra2': '/path/'},
),
description='with colon in hostname',
),
UriTestCaseConfig(
test_conn_uri='scheme://user:password%20with%20space@host%2Flocation%3Ax%3Ay:1234/schema',
test_conn_attributes=dict(
conn_type='scheme',
host='host/location:x:y',
schema='schema',
login='user',
password='password with space',
port=1234,
),
description='with encoded password',
),
UriTestCaseConfig(
test_conn_uri='scheme://domain%2Fuser:password@host%2Flocation%3Ax%3Ay:1234/schema',
test_conn_attributes=dict(
conn_type='scheme',
host='host/location:x:y',
schema='schema',
login='domain/user',
password='password',
port=1234,
),
description='with encoded user',
),
UriTestCaseConfig(
test_conn_uri='scheme://user:password%20with%20space@host:1234/schema%2Ftest',
test_conn_attributes=dict(
conn_type='scheme',
host='host',
schema='schema/test',
login='user',
password='password with space',
port=1234,
),
description='with encoded schema',
),
UriTestCaseConfig(
test_conn_uri='scheme://user:password%20with%20space@host:1234',
test_conn_attributes=dict(
conn_type='scheme',
host='host',
schema='',
login='user',
password='password with space',
port=1234,
),
description='no schema',
),
UriTestCaseConfig(
test_conn_uri='google-cloud-platform://?extra__google_cloud_platform__key_'
'path=%2Fkeys%2Fkey.json&extra__google_cloud_platform__scope='
'https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fcloud-platform&extra'
'__google_cloud_platform__project=airflow',
test_conn_attributes=dict(
conn_type='google_cloud_platform',
host='',
schema='',
login=None,
password=None,
port=None,
extra_dejson=dict(
extra__google_cloud_platform__key_path='/keys/key.json',
extra__google_cloud_platform__scope='https://www.googleapis.com/auth/cloud-platform',
extra__google_cloud_platform__project='airflow',
),
),
description='with underscore',
),
UriTestCaseConfig(
test_conn_uri='scheme://host:1234',
test_conn_attributes=dict(
conn_type='scheme',
host='host',
schema='',
login=None,
password=None,
port=1234,
),
description='without auth info',
),
UriTestCaseConfig(
test_conn_uri='scheme://%2FTmP%2F:1234',
test_conn_attributes=dict(
conn_type='scheme',
host='/TmP/',
schema='',
login=None,
password=None,
port=1234,
),
description='with path',
),
UriTestCaseConfig(
test_conn_uri='scheme:///airflow',
test_conn_attributes=dict(
conn_type='scheme',
schema='airflow',
),
description='schema only',
),
UriTestCaseConfig(
test_conn_uri='scheme://@:1234',
test_conn_attributes=dict(
conn_type='scheme',
port=1234,
),
description='port only',
),
UriTestCaseConfig(
test_conn_uri='scheme://:password%2F%21%40%23%24%25%5E%26%2A%28%29%7B%7D@',
test_conn_attributes=dict(
conn_type='scheme',
password='password/!@#$%^&*(){}',
),
description='password only',
),
UriTestCaseConfig(
test_conn_uri='scheme://login%2F%21%40%23%24%25%5E%26%2A%28%29%7B%7D@',
test_conn_attributes=dict(
conn_type='scheme',
login='login/!@#$%^&*(){}',
),
description='login only',
),
]
@parameterized.expand([(x,) for x in test_from_uri_params], UriTestCaseConfig.uri_test_name)
def test_connection_from_uri(self, test_config: UriTestCaseConfig):
connection = Connection(uri=test_config.test_uri)
for conn_attr, expected_val in test_config.test_conn_attributes.items():
actual_val = getattr(connection, conn_attr)
if expected_val is None:
assert expected_val is None
if isinstance(expected_val, dict):
assert expected_val == actual_val
else:
assert expected_val == actual_val
expected_calls = []
if test_config.test_conn_attributes.get('password'):
expected_calls.append(mock.call(test_config.test_conn_attributes['password']))
if test_config.test_conn_attributes.get('extra_dejson'):
expected_calls.append(mock.call(test_config.test_conn_attributes['extra_dejson']))
self.mask_secret.assert_has_calls(expected_calls)
@parameterized.expand([(x,) for x in test_from_uri_params], UriTestCaseConfig.uri_test_name)
def test_connection_get_uri_from_uri(self, test_config: UriTestCaseConfig):
"""
This test verifies that when we create a conn_1 from URI, and we generate a URI from that conn, that
when we create a conn_2 from the generated URI, we get an equivalent conn.
1. Parse URI to create `Connection` object, `connection`.
2. Using this connection, generate URI `generated_uri`..
3. Using this`generated_uri`, parse and create new Connection `new_conn`.
4. Verify that `new_conn` has same attributes as `connection`.
"""
connection = Connection(uri=test_config.test_uri)
generated_uri = connection.get_uri()
new_conn = Connection(uri=generated_uri)
assert connection.conn_type == new_conn.conn_type
assert connection.login == new_conn.login
assert connection.password == new_conn.password
assert connection.host == new_conn.host
assert connection.port == new_conn.port
assert connection.schema == new_conn.schema
assert connection.extra_dejson == new_conn.extra_dejson
@parameterized.expand([(x,) for x in test_from_uri_params], UriTestCaseConfig.uri_test_name)
def test_connection_get_uri_from_conn(self, test_config: UriTestCaseConfig):
"""
This test verifies that if we create conn_1 from attributes (rather than from URI), and we generate a
URI, that when we create conn_2 from this URI, we get an equivalent conn.
1. Build conn init params using `test_conn_attributes` and store in `conn_kwargs`
2. Instantiate conn `connection` from `conn_kwargs`.
3. Generate uri `get_uri` from this conn.
4. Create conn `new_conn` from this uri.
5. Verify `new_conn` has same attributes as `connection`.
"""
conn_kwargs = {}
for k, v in test_config.test_conn_attributes.items():
if k == 'extra_dejson':
conn_kwargs.update({'extra': json.dumps(v)})
else:
conn_kwargs.update({k: v})
connection = Connection(conn_id='test_conn', **conn_kwargs) # type: ignore
gen_uri = connection.get_uri()
new_conn = Connection(conn_id='test_conn', uri=gen_uri)
for conn_attr, expected_val in test_config.test_conn_attributes.items():
actual_val = getattr(new_conn, conn_attr)
if expected_val is None:
assert actual_val is None
else:
assert actual_val == expected_val
@parameterized.expand(
[
(
"http://:password@host:80/database",
ConnectionParts(
conn_type="http", login='', password="password", host="host", port=80, schema="database"
),
),
(
"http://user:@host:80/database",
ConnectionParts(
conn_type="http", login="user", password=None, host="host", port=80, schema="database"
),
),
(
"http://user:password@/database",
ConnectionParts(
conn_type="http", login="user", password="password", host="", port=None, schema="database"
),
),
(
"http://user:password@host:80/",
ConnectionParts(
conn_type="http", login="user", password="password", host="host", port=80, schema=""
),
),
(
"http://user:password@/",
ConnectionParts(
conn_type="http", login="user", password="password", host="", port=None, schema=""
),
),
(
"postgresql://user:password@%2Ftmp%2Fz6rqdzqh%2Fexample%3Awest1%3Atestdb/testdb",
ConnectionParts(
conn_type="postgres",
login="user",
password="password",
host="/tmp/z6rqdzqh/example:west1:testdb",
port=None,
schema="testdb",
),
),
(
"postgresql://user@%2Ftmp%2Fz6rqdzqh%2Fexample%3Aeurope-west1%3Atestdb/testdb",
ConnectionParts(
conn_type="postgres",
login="user",
password=None,
host="/tmp/z6rqdzqh/example:europe-west1:testdb",
port=None,
schema="testdb",
),
),
(
"postgresql://%2Ftmp%2Fz6rqdzqh%2Fexample%3Aeurope-west1%3Atestdb",
ConnectionParts(
conn_type="postgres",
login=None,
password=None,
host="/tmp/z6rqdzqh/example:europe-west1:testdb",
port=None,
schema="",
),
),
]
)
def test_connection_from_with_auth_info(self, uri, uri_parts):
connection = Connection(uri=uri)
assert connection.conn_type == uri_parts.conn_type
assert connection.login == uri_parts.login
assert connection.password == uri_parts.password
assert connection.host == uri_parts.host
assert connection.port == uri_parts.port
assert connection.schema == uri_parts.schema
@mock.patch.dict(
'os.environ',
{
'AIRFLOW_CONN_TEST_URI': 'postgres://username:[email protected]:5432/the_database',
},
)
def test_using_env_var(self):
conn = SqliteHook.get_connection(conn_id='test_uri')
assert 'ec2.compute.com' == conn.host
assert 'the_database' == conn.schema
assert 'username' == conn.login
assert 'password' == conn.password
assert 5432 == conn.port
self.mask_secret.assert_called_once_with('password')
@mock.patch.dict(
'os.environ',
{
'AIRFLOW_CONN_TEST_URI_NO_CREDS': 'postgres://ec2.compute.com/the_database',
},
)
def test_using_unix_socket_env_var(self):
conn = SqliteHook.get_connection(conn_id='test_uri_no_creds')
assert 'ec2.compute.com' == conn.host
assert 'the_database' == conn.schema
assert conn.login is None
assert conn.password is None
assert conn.port is None
def test_param_setup(self):
conn = Connection(
conn_id='local_mysql',
conn_type='mysql',
host='localhost',
login='airflow',
password='airflow',
schema='airflow',
)
assert 'localhost' == conn.host
assert 'airflow' == conn.schema
assert 'airflow' == conn.login
assert 'airflow' == conn.password
assert conn.port is None
def test_env_var_priority(self):
conn = SqliteHook.get_connection(conn_id='airflow_db')
assert 'ec2.compute.com' != conn.host
with mock.patch.dict(
'os.environ',
{
'AIRFLOW_CONN_AIRFLOW_DB': 'postgres://username:[email protected]:5432/the_database',
},
):
conn = SqliteHook.get_connection(conn_id='airflow_db')
assert 'ec2.compute.com' == conn.host
assert 'the_database' == conn.schema
assert 'username' == conn.login
assert 'password' == conn.password
assert 5432 == conn.port
@mock.patch.dict(
'os.environ',
{
'AIRFLOW_CONN_TEST_URI': 'postgres://username:[email protected]:5432/the_database',
'AIRFLOW_CONN_TEST_URI_NO_CREDS': 'postgres://ec2.compute.com/the_database',
},
)
def test_dbapi_get_uri(self):
conn = BaseHook.get_connection(conn_id='test_uri')
hook = conn.get_hook()
assert 'postgres://username:[email protected]:5432/the_database' == hook.get_uri()
conn2 = BaseHook.get_connection(conn_id='test_uri_no_creds')
hook2 = conn2.get_hook()
assert 'postgres://ec2.compute.com/the_database' == hook2.get_uri()
@mock.patch.dict(
'os.environ',
{
'AIRFLOW_CONN_TEST_URI': 'postgres://username:[email protected]:5432/the_database',
'AIRFLOW_CONN_TEST_URI_NO_CREDS': 'postgres://ec2.compute.com/the_database',
},
)
def test_dbapi_get_sqlalchemy_engine(self):
conn = BaseHook.get_connection(conn_id='test_uri')
hook = conn.get_hook()
engine = hook.get_sqlalchemy_engine()
assert isinstance(engine, sqlalchemy.engine.Engine)
assert 'postgres://username:[email protected]:5432/the_database' == str(engine.url)
@mock.patch.dict(
'os.environ',
{
'AIRFLOW_CONN_TEST_URI': 'postgres://username:[email protected]:5432/the_database',
'AIRFLOW_CONN_TEST_URI_NO_CREDS': 'postgres://ec2.compute.com/the_database',
},
)
def test_get_connections_env_var(self):
conns = SqliteHook.get_connections(conn_id='test_uri')
assert len(conns) == 1
assert conns[0].host == 'ec2.compute.com'
assert conns[0].schema == 'the_database'
assert conns[0].login == 'username'
assert conns[0].password == 'password'
assert conns[0].port == 5432
def test_connection_mixed(self):
with pytest.raises(
AirflowException,
match=re.escape(
"You must create an object using the URI or individual values (conn_type, host, login, "
"password, schema, port or extra).You can't mix these two ways to create this object."
),
):
Connection(conn_id="TEST_ID", uri="mysql://", schema="AAA")
def test_masking_from_db(self):
"""Test secrets are masked when loaded directly from the DB"""
from airflow.settings import Session
session = Session()
try:
conn = Connection(
conn_id=f"test-{os.getpid()}",
conn_type="http",
password="s3cr3t",
extra='{"apikey":"masked too"}',
)
session.add(conn)
session.flush()
# Make sure we re-load it, not just get the cached object back
session.expunge(conn)
self.mask_secret.reset_mock()
from_db = session.query(Connection).get(conn.id)
from_db.extra_dejson
assert self.mask_secret.mock_calls == [
# We should have called it _again_ when loading from the DB
mock.call("s3cr3t"),
mock.call({"apikey": "masked too"}),
]
finally:
session.rollback()
@mock.patch.dict(
'os.environ',
{
'AIRFLOW_CONN_TEST_URI': 'sqlite://',
},
)
def test_connection_test_success(self):
conn = Connection(conn_id='test_uri', conn_type='sqlite')
res = conn.test_connection()
assert res[0] is True
assert res[1] == 'Connection successfully tested'
@mock.patch.dict(
'os.environ',
{
'AIRFLOW_CONN_TEST_URI_NO_HOOK': 'fs://',
},
)
def test_connection_test_no_hook(self):
conn = Connection(conn_id='test_uri_no_hook', conn_type='fs')
res = conn.test_connection()
assert res[0] is False
assert res[1] == 'Unknown hook type "fs"'
@mock.patch.dict(
'os.environ',
{
'AIRFLOW_CONN_TEST_URI_HOOK_METHOD_MISSING': 'ftp://',
},
)
def test_connection_test_hook_method_missing(self):
conn = Connection(conn_id='test_uri_hook_method_mising', conn_type='ftp')
res = conn.test_connection()
assert res[0] is False
assert res[1] == "Hook FTPHook doesn't implement or inherit test_connection method"
|
Did you know corporate business presentations which are expressive are likely to receive more attention from clients than those which are overloaded with information? Well yes, it’s a fact. You probably must be wondering from where this expressivity comes! Don’t worry we are here to help. Listed below are some easy to use tips to make your clients go head over heels for your service or product.
Let’s face it – Clients are not there to see how much content each of your slides hold. Rather they would be happy to know what is it that you offer them, how you work and how that work stand out from the rest of the presentation design companies. So, skip out the unnecessary information and focus on the services you offer.
Supplement your content with an appropriate title. Keep the title short, usually between 1-3 words. Review it to see if it matches with your content. Remember within a few minutes you must explain everything the clients need to know. A good title, in that case could be of great help. It would help grab the attention and make them remember the point, that your slide makes.
Proper use of fonts can make your presentation worth the attention of your client. Human mind tends to remember facts that are pleasing to the visual senses. So, choose your font wisely. Also, don’t forget that the size of the font in your laptop and the projector may vary slightly. Therefore, make sure that your font size is large enough so that the person at the other end of the room read it without any extra effort.
To make your slides easily readable, your text and the background should be highly contrasting. Sometimes best presentation designs tend to lose client’s attention just because of the inappropriate contrast. So, choose the best contrast that is visually appealing, impressive as well as that makes the slides compelling to read for the clients.
Say goodbye to those in-stock images! Instead, use professional quality images. Remember not to use cheesy images that would fetch negative impression. Multiple images in a single slide might take away all the focus to themselves. Therefore, try using single and relevant images. Many corporate presentation firms lose clients despite their good content. So, research images well on Google, buy them and use them. Some websites even provide free images. Utilize them to your advantage and you are done with a more professional looking presentation.
In anxiety to impress their clients many business presentation firms use too much of everything. It makes the presentation cluttered. For example, colors look good but too much usage might turn a well-organized presentation into a blunder. So, best is to keep things simple. A clear font, a visible font size, a contrasting background, less numbering and bullets, a suitable template and there you go!
Let’s dive into the realm of psychology! The way things end, gets engraved in the human mind in a better than how things start. Well, a wise conclusion can prove to be a boon for a corporate presentation. Carve out your conclusion such that the clients go home having imprinted your presentation concept in their minds. End it with a short summary of all your slides, give a positive outlook and the client is all yours!
These are some of the best presentation designs followed in corporate presentations. Use these to add glow to your presentation. And keep exploring more! The more you explore the better you learn. Be in tune with the clients. Be conversational and casual yet not so much that you lose the professional shell of your presentation. All the best!
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from base import GAETestCase
from gaepermission import facade
from routes import account
import settings
from tekton.gae.middleware.redirect import RedirectResponse
class AccountTests(GAETestCase):
def test_index(self):
email = '[email protected]'
user = facade.save_user_cmd(email)()
response = account.index(user)
self.assert_can_render(response)
def test_edit(self):
email = '[email protected]'
initial_name = 'initial_name'
user = facade.save_user_cmd(email, initial_name)()
self.assertEqual(initial_name, user.name)
self.assertEqual(settings.DEFAULT_LOCALE, user.locale)
self.assertEqual(settings.DEFAULT_TIMEZONE, user.timezone)
edited_name = 'edited_name'
locale = 'pt_BR'
timezone = 'America/Sao_Paulo'
response = account.edit(user, edited_name, locale, timezone)
user = user.key.get()
self.assertIsInstance(response, RedirectResponse)
self.assertEqual(edited_name, user.name)
self.assertEqual(locale, user.locale)
self.assertEqual(timezone, user.timezone)
|
The Nordic Africa Research Network held a workshop on "How to communicate research on Africa" in conjunction with the Nordic Africa days, that were organized by the Nordic Africa Institute between the 23 - 24th September, 2016. The workshop took place on Friday 23 September, Blåsenhus 12:128, at 16:00-18:00 in Uppsala. The workshop was well attended as demonstrated by the participant list.
The Nordic Africa Research Network (NARN) will hold a Round Table on "Nordic Africa Research Co-operation: Options and Obstacles" in conjunction with the Nordic Conference on Development Research that will be organized by the School of Global Studies (SGS), University of Gothenburg, at the Conference Center Wallenberg in Gothenburg 5-6 November 2015.
|
"""Analysis Category - the category of the analysis service
"""
from AccessControl import ClassSecurityInfo
from bika.lims import bikaMessageFactory as _
from bika.lims.utils import t
from bika.lims.config import PROJECTNAME
from bika.lims.content.bikaschema import BikaSchema
from bika.lims.interfaces import IAnalysisCategory
from plone.indexer import indexer
from Products.Archetypes.public import *
from Products.Archetypes.references import HoldingReference
from Products.CMFCore.utils import getToolByName
from Products.CMFCore.WorkflowCore import WorkflowException
from zope.interface import implements
import sys
import transaction
@indexer(IAnalysisCategory)
def sortable_title_with_sort_key(instance):
sort_key = instance.getSortKey()
if sort_key:
return "{:010.3f}{}".format(sort_key, instance.Title())
return instance.Title()
schema = BikaSchema.copy() + Schema((
TextField('Comments',
default_output_type = 'text/plain',
allowable_content_types = ('text/plain',),
widget=TextAreaWidget (
description = _("To be displayed below each Analysis "
"Category section on results reports."),
label = _("Comments")),
),
ReferenceField('Department',
required=1,
vocabulary='getDepartments',
vocabulary_display_path_bound=sys.maxsize,
allowed_types=('Department',),
relationship='AnalysisCategoryDepartment',
referenceClass=HoldingReference,
widget=ReferenceWidget(
checkbox_bound=0,
label = _("Department"),
description = _("The laboratory department"),
),
),
ComputedField('DepartmentTitle',
expression="context.getDepartment() and context.getDepartment().Title() or ''",
widget=ComputedWidget(
visible=False,
),
),
FloatField('SortKey',
validators=('SortKeyValidator',),
widget=DecimalWidget(
label = _("Sort Key"),
description = _("Float value from 0.0 - 1000.0 indicating the sort order. Duplicate values are ordered alphabetically."),
),
),
))
schema['description'].widget.visible = True
schema['description'].schemata = 'default'
class AnalysisCategory(BaseContent):
implements(IAnalysisCategory)
security = ClassSecurityInfo()
displayContentsTab = False
schema = schema
_at_rename_after_creation = True
def _renameAfterCreation(self, check_auto_id=False):
from bika.lims.idserver import renameAfterCreation
renameAfterCreation(self)
def getDepartments(self):
bsc = getToolByName(self, 'bika_setup_catalog')
deps = []
for d in bsc(portal_type='Department',
inactive_state='active'):
deps.append((d.UID, d.Title))
return DisplayList(deps)
def workflow_script_deactivat(self):
# A instance cannot be deactivated if it contains services
pu = getToolByName(self, 'plone_utils')
bsc = getToolByName(self, 'bika_setup_catalog')
ars = bsc(portal_type='AnalysisService', getCategoryUID=self.UID())
if ars:
message = _("Category cannot be deactivated because "
"it contains Analysis Services")
pu.addPortalMessage(message, 'error')
transaction.get().abort()
raise WorkflowException
registerType(AnalysisCategory, PROJECTNAME)
|
Kim, Backyoung, "Autonomous elementary English learning in Korea using mediated structures" (2004). Theses Digitization Project. 2449.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-12 05:45
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('courses', '0016_coursemember_dri'),
]
operations = [
migrations.AddField(
model_name='course',
name='parent',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='variants', to='courses.Course'),
),
migrations.AlterField(
model_name='course',
name='num_audio',
field=models.PositiveIntegerField(default=2, validators=[django.core.validators.MinValueValidator(1)]),
),
migrations.AlterField(
model_name='course',
name='num_dri',
field=models.PositiveIntegerField(default=2),
),
migrations.AlterField(
model_name='course',
name='num_graphics',
field=models.PositiveIntegerField(default=2, validators=[django.core.validators.MinValueValidator(1)]),
),
migrations.AlterField(
model_name='course',
name='num_presentation',
field=models.PositiveIntegerField(default=2, validators=[django.core.validators.MinValueValidator(1)]),
),
migrations.AlterField(
model_name='course',
name='num_scripting',
field=models.PositiveIntegerField(default=2, validators=[django.core.validators.MinValueValidator(1)]),
),
]
|
Bobby Lyle has never forgotten his humble beginnings and the importance of kindness, his colleagues say.
The businessman, startup investor, educator and philanthropist has given back to his community for more than 50 years.
For his commitment to Dallas, Lyle has been chosen to receive the 2019 Linz Award, an annual honor recognizing enduring civic or humanitarian efforts benefiting the city.
The award, one of the city's oldest and most respected civic honors, is given by The Dallas Morning News, Communities Foundation of Texas and The Dallas Foundation. Lyle will receive the award at the 90th annual Linz Award Luncheon in Dallas on April 17 at the Hilton Anatole.
Lyle said in an interview that he’s grateful for the award.
“The selection committee this year unanimously chose Bobby as the winner due to the breadth of his contributions and his unique ability to bring people together towards a common cause,” Moise said.
Lyle’s family came from modest means. His father died when he was only a few months old. With the help of his extended family, his mother raised him and his brother in East Texas, Arkansas and Louisiana.
After he graduated from Louisiana Tech University, Lyle moved to Dallas in 1963 — just months before President John F. Kennedy’s assassination. Lyle has said he saw Kennedy shortly before he was killed Nov. 22 of that year.
Lyle, who earned a master’s degree from Southern Methodist University in 1967, witnessed the city try to overcome that historical stain in the years that followed JFK's death.
Throughout his career, Lyle has been a prominent figure in the oil and gas industry and played a major role in development in the city, including the Dallas Galleria and InterFirst Bank-Galleria.
After he sold his oil company in 2005, he established Lyco Holdings Inc., a private investment firm.
Randall Stephenson, AT&T’s chief executive and former president of Boy Scouts of America, praised Lyle for his volunteer work and philanthropic efforts. He said Lyle, who has been active with the Scouts' Circle Ten Council, exhibits a true Boy Scout’s attributes, including integrity and selflessness.
In his nomination of Lyle for the award, Stephenson wrote that he and Lyle “share a passion for Scouting and the critical role it plays in creating leaders of character."
“Through Bobby’s work in our community, he has consistently lived these same values,” Stephenson said.
Lyle’s name and work run deep through numerous North Texas organizations, especially SMU, which named its school of engineering after him in 2008. He was a trustee at SMU for 30 years and served as a professor and executive dean in what is now the Cox School of Business. He has a doctorate from the University of Massachusetts at Amherst.
At 78, Lyle said he isn’t ready to slow down or even consider retirement. There’s too much work to get done, he said, and he loves it too much to say goodbye.
He said he wouldn’t even “know how to do” retirement.
|
#_PYTHON_INSERT_SAO_COPYRIGHT_HERE_(2008)_
#_PYTHON_INSERT_GPL_LICENSE_HERE_
"""
Tools for creating, storing, inspecting, and manipulating data sets
"""
import sys
import inspect
from itertools import izip
import numpy
from sherpa.utils.err import DataErr, NotImplementedErr
from sherpa.utils import SherpaFloat, NoNewAttributesAfterInit, \
print_fields, create_expr, calc_total_error, bool_cast, \
filter_bins
_all__ = ('Data', 'DataSimulFit', 'Data1D', 'Data1DInt', 'Data2D', 'Data2DInt')
class BaseData(NoNewAttributesAfterInit):
"Base class for all data set types"
def _get_filter(self):
return self._filter
def _set_filter(self, val):
self._filter = val
self._mask = True
filter = property(_get_filter, _set_filter,
doc='Filter for dependent variable')
def _get_mask(self):
return self._mask
def _set_mask(self, val):
if (val is True) or (val is False):
self._mask = val
elif (val is None) or numpy.isscalar(val):
raise DataErr('ismask')
else:
self._mask = numpy.asarray(val, numpy.bool_)
self._filter = None
mask = property(_get_mask, _set_mask,
doc='Mask array for dependent variable')
def __init__(self):
"""
Initialize a data object. This method can only be called from
a derived class constructor. Attempts to create a BaseData
instance will raise NotImplementedErr.
Derived class constructors must call this method directly (and
not indirectly through a superclass constructor). When thus
invoked, this method will extract the argument names and
values from the derived class constructor invocation and set
corresponding attributes on the instance (thereby eliminating
the need for the derived class constructor to do its own
attribute setting). If the name of an argument matches the
name of a DataProperty of the derived class, then the
corresponding attribute name will have an underscore prepended
(meaning the property will use the value directly instead of
relying on _get_*/_set_* methods).
"""
if type(self) is BaseData:
raise NotImplementedErr('noinstanceallowed', 'BaseData')
frame = sys._getframe().f_back
cond = (frame.f_code is self.__init__.im_func.func_code)
assert cond, (('%s constructor must call BaseData constructor ' +
'directly') % type(self).__name__)
args = inspect.getargvalues(frame)
self._fields = tuple(args[0][1:])
for f in self._fields:
cond = (f not in vars(self))
assert cond, (("'%s' object already has attribute '%s'") %
(type(self).__name__, f))
setattr(self, f, args[3][f])
self.filter = None
self.mask = True
NoNewAttributesAfterInit.__init__(self)
def __str__(self):
"""
Return a listing of the attributes listed in self._fields and,
if present, self._extra_fields.
"""
fields = self._fields + getattr(self, '_extra_fields', ())
fdict = dict(izip(fields, [getattr(self, f) for f in fields]))
return print_fields(fields, fdict)
def apply_filter(self, data):
if data is not None:
if self.filter is not None:
if callable(self.filter):
data = self.filter(data)
else:
data = data[self.filter]
elif self.mask is not True:
if self.mask is False:
raise DataErr('notmask')
data = numpy.asarray(data)
if data.shape != self.mask.shape:
raise DataErr('mismatch', 'mask', 'data array')
data = data[self.mask]
return data
def ignore(self, *args, **kwargs):
kwargs['ignore'] = True
self.notice(*args, **kwargs)
def notice(self, mins, maxes, axislist, ignore=False):
ignore = bool_cast(ignore)
if( str in [type(min) for min in mins] ):
raise DataErr('typecheck', 'lower bound')
elif( str in [type(max) for max in maxes] ):
raise DataErr('typecheck', 'upper bound')
elif( str in [type(axis) for axis in axislist] ):
raise DataErr('typecheck', 'grid')
mask = filter_bins(mins, maxes, axislist)
if mask is None:
self.mask = not ignore
elif not ignore:
if self.mask is True:
self.mask = mask
else:
self.mask |= mask
else:
mask = ~mask
if self.mask is False:
self.mask = mask
else:
self.mask &= mask
class Data(BaseData):
"Generic data set"
def __init__(self, name, indep, dep, staterror=None, syserror=None):
"""
Initialize a Data instance. indep should be a tuple of
independent axis arrays, dep should be an array of dependent
variable values, and staterror and syserror should be arrays
of statistical and systematic errors, respectively, in the
dependent variable (or None).
"""
BaseData.__init__(self)
def __repr__(self):
r = '<%s data set instance' % type(self).__name__
if hasattr(self, 'name'):
r += " '%s'" % self.name
r += '>'
return r
def eval_model(self, modelfunc):
return modelfunc(*self.get_indep())
def eval_model_to_fit(self, modelfunc):
return modelfunc(*self.get_indep(filter=True))
#
# Primary properties. These can depend only on normal attributes (and not
# other properties).
#
def get_indep(self, filter=False):
"Return a tuple containing the independent variables/axes"
indep = getattr(self, 'indep', None)
filter=bool_cast(filter)
if filter:
indep = tuple([self.apply_filter(x) for x in indep])
return indep
def get_dep(self, filter=False):
"Return an array of dependent variable values"
dep = getattr(self, 'dep', None)
filter=bool_cast(filter)
if filter:
dep = self.apply_filter(dep)
return dep
def get_staterror(self, filter=False, staterrfunc=None):
"Return the statistical error array"
staterror = getattr(self, 'staterror', None)
filter=bool_cast(filter)
if filter:
staterror = self.apply_filter(staterror)
if (staterror is None) and (staterrfunc is not None):
dep = self.get_dep()
if filter:
dep = self.apply_filter(dep)
staterror = staterrfunc(dep)
return staterror
def get_syserror(self, filter=False):
"Return the systematic error array"
syserr = getattr(self, 'syserror', None)
filter=bool_cast(filter)
if filter:
syserr = self.apply_filter(syserr)
return syserr
#
# Utility methods
#
def _wrong_dim_error(self, baddim):
raise DataErr('wrongdim', self.name, baddim)
def _no_image_error(self):
raise DataErr('notimage', self.name)
def _no_dim_error(self):
raise DataErr('nodim', self.name)
#
# Secondary properties. To best support subclasses, these should depend
# only on the primary properties whenever possible, though there may be
# instances when they depend on normal attributes.
#
def get_dims(self):
self._no_dim_error()
def get_error(self, filter=False, staterrfunc=None):
"Return total error in dependent variable"
return calc_total_error(self.get_staterror(filter, staterrfunc),
self.get_syserror(filter))
def get_x(self, filter=False):
"Return linear view of independent axis/axes"
self._wrong_dim_error(1)
def get_xerr(self, filter=False):
"Return linear view of bin size in independent axis/axes"
return None
def get_xlabel(self):
"Return label for linear view ofindependent axis/axes"
return 'x'
def get_y(self, filter=False, yfunc=None):
"Return dependent axis in N-D view of dependent variable"
y = self.get_dep(filter)
if yfunc is not None:
if filter:
yfunc = self.eval_model_to_fit(yfunc)
else:
yfunc = self.eval_model(yfunc)
y = (y, yfunc)
return y
def get_yerr(self, filter=False, staterrfunc=None):
"Return errors in dependent axis in N-D view of dependent variable"
return self.get_error(filter, staterrfunc)
def get_ylabel(self, yfunc=None):
"Return label for dependent axis in N-D view of dependent variable"
return 'y'
def get_x0(self, filter=False):
"Return first dimension in 2-D view of independent axis/axes"
self._wrong_dim_error(2)
def get_x0label(self):
"Return label for first dimension in 2-D view of independent axis/axes"
return 'x0'
def get_x1(self, filter=False):
"Return second dimension in 2-D view of independent axis/axes"
self._wrong_dim_error(2)
def get_x1label(self):
"""
Return label for second dimension in 2-D view of independent axis/axes
"""
return 'x1'
# For images, only need y-array
# Also, we do not filter, as imager needs M x N (or
# L x M x N) array
def get_img(self, yfunc=None):
"Return dependent variable as an image"
self._no_image_error()
def get_imgerr(self, yfunc=None):
"Return total error in dependent variable as an image"
self._no_image_error()
def to_guess(self):
arrays = [self.get_y(True)]
arrays.extend(self.get_indep(True))
return tuple(arrays)
def to_fit(self, staterrfunc=None):
return (self.get_dep(True),
self.get_staterror(True, staterrfunc),
self.get_syserror(True))
def to_plot(self, yfunc=None, staterrfunc=None):
return (self.get_x(True),
self.get_y(True, yfunc),
self.get_yerr(True, staterrfunc),
self.get_xerr(True),
self.get_xlabel(),
self.get_ylabel())
def to_contour(self, yfunc=None):
return (self.get_x0(True),
self.get_x1(True),
self.get_y(True, yfunc),
self.get_x0label(),
self.get_x1label())
class DataSimulFit(Data):
def __init__(self, name, datasets):
if len(datasets) == 0:
raise DataErr('zerodatasimulfit', type(self).__name__)
datasets = tuple(datasets)
BaseData.__init__(self)
def eval_model_to_fit(self, modelfuncs):
total_model = []
for func, data in izip(modelfuncs, self.datasets):
total_model.append(data.eval_model_to_fit(func))
return numpy.concatenate(total_model)
def to_fit(self, staterrfunc=None):
total_dep = []
total_staterror = []
total_syserror = []
no_staterror = True
no_syserror = True
for data in self.datasets:
dep, staterror, syserror = data.to_fit(staterrfunc)
total_dep.append(dep)
if staterror is not None:
no_staterror = False
total_staterror.append(staterror)
if syserror is not None:
no_syserror = False
else:
syserror = numpy.zeros_like(dep)
total_syserror.append(syserror)
total_dep = numpy.concatenate(total_dep)
if no_staterror:
total_staterror = None
elif None in total_staterror:
raise DataErr('staterrsimulfit')
else:
total_staterror = numpy.concatenate(total_staterror)
if no_syserror:
total_syserror = None
else:
total_syserror = numpy.concatenate(total_syserror)
return (total_dep, total_staterror, total_syserror)
def to_plot(self, yfunc=None, staterrfunc=None):
return self.datasets[0].to_plot(yfunc.parts[0], staterrfunc)
class DataND(Data):
"Base class for Data1D, Data2D, etc."
def get_dep(self, filter=False):
y = self.y
filter=bool_cast(filter)
if filter:
y = self.apply_filter(y)
return y
class Data1D(DataND):
"1-D data set"
def _set_mask(self, val):
DataND._set_mask(self, val)
try:
self._x = self.apply_filter(self.x)
except DataErr:
self._x = self.x
mask = property(DataND._get_mask, _set_mask,
doc='Mask array for dependent variable')
def __init__(self, name, x, y, staterror=None, syserror=None):
self._x = x
BaseData.__init__(self)
def get_indep(self, filter=False):
filter=bool_cast(filter)
if filter:
return (self._x,)
return (self.x,)
def get_x(self, filter=False):
return self.get_indep(filter)[0]
def get_dims(self, filter=False):
return (len(self.get_x(filter)),)
def get_filter(self, format='%.4f', delim=':'):
# for derived intergrated classes, this will return values in center of
# bin.
x = self.get_x(filter=True)
mask = numpy.ones(len(x), dtype=bool)
if numpy.iterable(self.mask):
mask = self.mask
return create_expr(x, mask, format, delim)
def get_filter_expr(self):
return (self.get_filter(delim='-') + ' ' + self.get_xlabel())
def get_bounding_mask(self):
mask = self.mask
size = None
if numpy.iterable(self.mask):
# create bounding box around noticed image regions
mask = numpy.array(self.mask)
# xi = numpy.where(mask == True)[0]
# xlo = xi.min()
# xhi = xi.max()
# size = (mask[xlo:xhi+1].size,)
# mask = mask[xlo:xhi+1]
size = (mask.size,)
return mask, size
def get_img(self, yfunc=None):
"Return 1D dependent variable as a 1 x N image"
y_img = self.get_y(False, yfunc)
if yfunc is not None:
y_img = (y_img[0].reshape(1,y_img[0].size),
y_img[1].reshape(1,y_img[1].size))
else:
y_img = y_img.reshape(1,y_img.size)
return y_img
def get_imgerr(self):
err = self.get_error()
if err is not None:
err = err.reshape(1,err.size)
return err
def notice(self, xlo=None, xhi=None, ignore=False):
BaseData.notice(self, (xlo,), (xhi,), self.get_indep(), ignore)
class Data1DInt(Data1D):
"1-D integrated data set"
def _set_mask(self, val):
DataND._set_mask(self, val)
try:
self._lo = self.apply_filter(self.xlo)
self._hi = self.apply_filter(self.xhi)
except DataErr:
self._lo = self.xlo
self._hi = self.xhi
mask = property(DataND._get_mask, _set_mask,
doc='Mask array for dependent variable')
def __init__(self, name, xlo, xhi, y, staterror=None, syserror=None):
self._lo = xlo
self._hi = xhi
BaseData.__init__(self)
def get_indep(self, filter=False):
filter=bool_cast(filter)
if filter:
return (self._lo, self._hi)
return (self.xlo, self.xhi)
def get_x(self, filter=False):
indep = self.get_indep(filter)
return (indep[0] + indep[1]) / 2.0
def get_xerr(self, filter=False):
xlo,xhi = self.get_indep(filter)
return xhi-xlo
def notice(self, xlo=None, xhi=None, ignore=False):
BaseData.notice(self, (None, xlo), (xhi, None), self.get_indep(),
ignore)
class Data2D(DataND):
"2-D data set"
def _set_mask(self, val):
DataND._set_mask(self, val)
try:
self._x0 = self.apply_filter(self.x0)
self._x1 = self.apply_filter(self.x1)
except DataErr:
self._x0 = self.x0
self._x1 = self.x1
mask = property(DataND._get_mask, _set_mask,
doc='Mask array for dependent variable')
def __init__(self, name, x0, x1, y, shape=None, staterror=None,
syserror=None):
self._x0 = x0
self._x1 = x1
BaseData.__init__(self)
def get_indep(self, filter=False):
filter=bool_cast(filter)
if filter:
return (self._x0, self._x1)
return (self.x0, self.x1)
def get_x0(self, filter=False):
return self.get_indep(filter)[0]
def get_x1(self, filter=False):
return self.get_indep(filter)[1]
def get_axes(self):
self._check_shape()
# FIXME: how to filter an axis when self.mask is size of self.y?
return (numpy.arange(self.shape[1])+1, numpy.arange(self.shape[0])+1)
def get_dims(self, filter=False):
#self._check_shape()
if self.shape is not None:
return self.shape[::-1]
return (len(self.get_x0(filter)), len(self.get_x1(filter)))
def get_filter_expr(self):
return ''
get_filter = get_filter_expr
def _check_shape(self):
if self.shape is None:
raise DataErr('shape',self.name)
def get_max_pos(self, dep=None):
if dep is None:
dep = self.get_dep(True)
x0 = self.get_x0(True)
x1 = self.get_x1(True)
pos = numpy.asarray(numpy.where(dep == dep.max())).squeeze()
if pos.ndim == 0:
pos = int(pos)
return (x0[pos], x1[pos])
return [(x0[index], x1[index]) for index in pos]
def get_img(self, yfunc=None):
self._check_shape()
y_img = self.get_y(False, yfunc)
if yfunc is not None:
y_img = (y_img[0].reshape(*self.shape),
y_img[1].reshape(*self.shape))
else:
y_img = y_img.reshape(*self.shape)
return y_img
def get_imgerr(self):
self._check_shape()
err = self.get_error()
if err is not None:
err = err.reshape(*self.shape)
return err
def notice(self, x0lo=None, x0hi=None, x1lo=None, x1hi=None, ignore=False):
BaseData.notice(self, (x0lo, x1lo), (x0hi, x1hi), self.get_indep(),
ignore)
class Data2DInt(Data2D):
"2-D integrated data set"
def _set_mask(self, val):
DataND._set_mask(self, val)
try:
self._x0lo = self.apply_filter(self.x0lo)
self._x0hi = self.apply_filter(self.x0hi)
self._x1lo = self.apply_filter(self.x1lo)
self._x1hi = self.apply_filter(self.x1hi)
except DataErr:
self._x0lo = self.x0lo
self._x1lo = self.x1lo
self._x0hi = self.x0hi
self._x1hi = self.x1hi
mask = property(DataND._get_mask, _set_mask,
doc='Mask array for dependent variable')
def __init__(self, name, x0lo, x1lo, x0hi, x1hi, y, shape=None,
staterror=None, syserror=None):
self._x0lo = x0lo
self._x1lo = x1lo
self._x0hi = x0hi
self._x1hi = x1hi
BaseData.__init__(self)
def get_indep(self, filter=False):
filter=bool_cast(filter)
if filter:
return (self._x0lo, self._x1lo, self._x0hi, self._x1hi)
return (self.x0lo, self.x1lo, self.x0hi, self.x1hi)
def get_x0(self, filter=False):
indep = self.get_indep(filter)
return (indep[0] + indep[2]) / 2.0
def get_x1(self, filter=False):
indep = self.get_indep(filter)
return (indep[1] + indep[3]) / 2.0
def notice(self, x0lo=None, x0hi=None, x1lo=None, x1hi=None, ignore=False):
BaseData.notice(self, (None, None, x0lo, x1lo),
(x0hi, x1hi, None, None), self.get_indep(), ignore)
|
What Are Your Wedding Planning Pain Points?
Are you ready to plan an amazing wedding without the stresses that come along with it?
Let us help you in 2018!
Win a pamper kit and personal wedding concierge consultations with Poonam of The Maharani Diaries!
1. Information on how to enter forms part of the terms of entry. Entry into the ‘What Are Your Wedding Planning Pain Points’ Competition’ (“Competition”) is deemed acceptance of these terms and conditions.
2. Entry is open to residents worldwide who are 18 years of age or older at the time of entry. Entrant must be getting married or their entry may be deemed invalid.
3. The competition commences on February 14, 2018 and closes on April 15th, 2018. The winner will be announced on Monday, April 16th midday (UK/IRE time).
a. Provide their name, postal code, email address, wedding date and date of birth.
b. Be engaged or in the process of planning their wedding.
c. Answer 10 questions about their wedding planning.
d. By participating in this survey, you will automatically be opt-in to receive email updates from The Maharani Diaries. You must remain a current subscriber of the database in order to qualify to be a winner.
6. It is free to enter this competition.
8. A limit of one online entry for the life of the competition per person applies.
9. In consideration of the promoter awarding the prize to the winner, the winner hereby permits the winner’s name, entry, image and/or voice or comments, as recorded or documented and/or photographed during the winner’s participation in the prize to appear in connection with The Maharani Diaries or the advertising or marketing thereof, in any media whatsoever throughout the world and the winner will not be entitled to any fee for such use.
10. The prize winner will be notified by email within four (4) days of contest completion date and the winner’s details may published on the The Maharani Diaries website, Instagram, Twitter or Facebook page.
11. An entry that is made on behalf of an entrant by a third party will be invalid.
12. The winner has 72 hours from the date of notification to accept their prize. After this the prize may be awarded to another entrant. We reserve the right to disqualify or not award a prize to an entrant who in our opinion has not entered in the spirit of these terms and conditions or the intention of the competition.
A pamper kit for the blushing bride to be along with free personal wedding planning consultations with a team member of The Maharani Diaries. Note that this is not a wedding planning service. The bride to be will receive advice and ideas on how to plan her wedding and The Maharani Diaries will personally reach out to vendors on behalf of the bride.
14. The total Prize Pool is valued at up to $150 (including GST). The promoter accepts no responsibility for change in prize value between now the ultimate prize redemption dates.
15. Any ancillary costs associated with redeeming the major prize are not included. These are the responsibility of the winner.
16. All prizes are non-transferrable. Prizes cannot be used in conjunction with any other special offer. Prize values are in Australian dollars. The Promoter accepts no responsibility for any variation in the prize values.
17. The promoter is neither responsible nor liable for any damaged, delayed or lost in transit in the delivery of the prize.
18. It is a condition of accepting the prize that the winner must comply with all the conditions of use of the prize and the prize supplier’s requirements.
19. Personal information about all prize winners will be shared with the prize provider, and their agents, to the extent necessary for prizes to be delivered to the prize winners.
20. Prizes will be awarded to the person named in their contestant entry. Should an entrant’s contact details change during the competition period it is the entrant’s responsibility to notify the promoter. A request to access or modify any information provided in an entry should be directed to the promoter.
21. Should the prize winner not meet any of the criteria stated in these terms and conditions to be a valid winner they will forfeit all rights to the prize, and a redraw will take place to reallocate the prize to a valid winner.
22. By accepting the prize, the winner agrees to participate in and co-operate as required with all reasonable media editorial requests relating to the prize, including but not limited to, being interviewed and photographed, filmed and/or chaperoned throughout the duration of the prize.
23. If for any reason this competition is not capable of running as planned, including due to infection by computer virus, bugs, tampering, unauthorised intervention, fraud, technical failures or any causes beyond the control of the promoter, which corrupt or affect the administration, security, fairness or integrity or proper conduct of this promotion, the promoter reserves the right to disqualify an individual who tampers with the entry process, take any action that may be available, and to cancel, terminate, modify or suspend the competition, subject to government legislation.
24. If your entry is selected as a winning entry, validation of your circumstances, the validity of your entry will be undertaken by the promoter. Method of validation will be determined by the promoter at complete discretion. If the winning entry is deemed to be a winner, the winner will be notified as per the terms and conditions.
(b) subject to any written directions from a regulatory authority, to modify, suspend, terminate or cancel the promotion, as appropriate, subject to government legislation.
27. All entries become the property of the promoter. The promoter collects personal information about you to enable you to participate in this promotion.
28. The promoter shall not be liable for any loss or damage whatsoever which is suffered (including but not limited to indirect or consequential loss) or for any personal injury suffered or sustained in connection with any prize/s except for any liability which cannot be excluded by law. The promoter is not responsible for any incorrect or inaccurate information, either caused by the entrant or for any of the equipment or programming associated with or utilised in this competition, or for any technical error, or any combination thereof that may occur in the course of the administration of this competition including any omission, interruption, deletion, defect, delay in operation or transmission, communications line or telephone, mobile or satellite network failure, theft or destruction or unauthorised access to or alteration of entries.
conducting and promoting this competition. For purposes of public statements and advertisements the promoter will only publish the winner’s first name. A request to access, update or correct any information should be directed to the promoter. If you are not willing for this to occur you cannot participate in the promotion.
31. You consent to the use of your answers, in relation to the 10 wedding related questions, to be used for a statistical study for The Maharani Diaries. Please note that at no time will your name or contact information be used in the results published. Only your answers will be used to form a ratio and help to gain useful insights into bride’s spending and the wedding industry.
|
# Copyright 2009 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains the GCI Task Model."""
from google.appengine.ext import db
from django.utils.translation import ugettext
from soc.modules.gci.models.comment import GCIComment
from soc.modules.gci.models.work_submission import GCIWorkSubmission
import soc.models.profile
import soc.modules.gci.models.organization
import soc.modules.gci.models.program
# state that the task is in when it is not yet available
UNPUBLISHED = 'Unpublished'
# state that the task is in when it is open
OPEN = 'Open'
# state that the task is in when it is claimed
CLAIMED = 'Claimed'
# state that task has been claimed but never finished and has been reopened
REOPENED = 'Reopened'
# state that the task has not been approved by org admin
UNAPPROVED = 'Unapproved'
# state that task has been successfully closed
CLOSED = 'Closed'
# TODO(piyush.devel): Define constants for the rest of the statuses.
# states in which a task does not show up publicly
UNAVAILABLE = [UNPUBLISHED, UNAPPROVED]
# states in which a student can claim a task
CLAIMABLE = [OPEN, REOPENED]
# States in which we consider the task to count towards the task quota of
# the student.
ACTIVE_CLAIMED_TASK = [
'ClaimRequested', CLAIMED, 'ActionNeeded', 'NeedsWork', 'NeedsReview']
# States in which we consider that the student can work on a task as long
# as the deadline has not passed.
TASK_IN_PROGRESS = [CLAIMED, 'ActionNeeded', 'NeedsWork', 'NeedsReview']
# states in which the student is allowed to transition the task to NeedsReview
SEND_FOR_REVIEW_ALLOWED = [CLAIMED, 'ActionNeeded', 'NeedsWork']
class DifficultyLevel(object):
"""Enumerates all difficulty levels for GCI Tasks.
"""
EASY = 'Easy'
MEDIUM = 'Medium'
HARD = 'Hard'
UNKNOWN = 'Unknown'
DIFFICULTIES = [
DifficultyLevel.EASY, DifficultyLevel.MEDIUM, DifficultyLevel.HARD,
DifficultyLevel.UNKNOWN]
POINTS = {
DifficultyLevel.EASY: 1,
DifficultyLevel.MEDIUM: 2,
DifficultyLevel.HARD: 4,
DifficultyLevel.UNKNOWN: 0
}
class GCITask(db.Model):
"""Model for a task used in GCI workflow.
"""
#: Required field indicating the "title" of the task
title = db.StringProperty(required=True,
verbose_name=ugettext('Task Title'))
title.help_text = ugettext('Title of the task')
#: Required field containing the description of the task
description = db.TextProperty(required=True,
verbose_name=ugettext('Description'))
description.help_text = ugettext('Complete description of the task')
#: Field indicating the difficulty level of the Task.
difficulty_level = db.StringProperty(required=False,
verbose_name=ugettext('Difficulty'), choices=DIFFICULTIES)
#: Field indicating the types of the Task
types = db.StringListProperty(verbose_name=ugettext('Type'))
#: Field which contains the arbitrary tags for the task. These tags can
#: be assigned by org admins and mentors.
tags = db.StringListProperty(verbose_name=ugettext('Tags'))
#: A field which contains time allowed for completing the task (in hours)
#: from the moment that this task has been assigned to a Student
time_to_complete = db.IntegerProperty(required=True,
verbose_name=('Time to Complete'))
time_to_complete.help_text = ugettext(
'Time allowed to complete the task, in hours, once it is claimed')
#: List of Mentors assigned to this task. A Mentor who creates this
#: task is assigned as the Mentor by default. An Org Admin will have
#: to assign a Mentor upon task creation.
mentors = db.ListProperty(item_type=db.Key, default=[])
#: Student profile to whom this task is currently assigned to.
student = db.ReferenceProperty(reference_class=soc.models.profile.Profile,
required=False,
collection_name='assigned_tasks')
#: Program in which this Task has been created
program = db.ReferenceProperty(
reference_class=soc.modules.gci.models.program.GCIProgram,
required=True, collection_name='tasks')
#: Program in which this Task has been created
org = db.ReferenceProperty(
reference_class=soc.modules.gci.models.organization.GCIOrganization,
required=True, collection_name='org_tasks')
#: Required property which holds the state, the Task is currently in.
#: This is a hidden field not shown on forms. Handled by logic internally.
#: The state can be one of the following:
#: Unapproved: If Task is created by a Mentor, this is the automatically
#: assigned state.
#: Unpublished: This Task is not published yet.
#: OPEN: This Task is open and ready to be claimed.
#: Reopened: This Task has been claimed but never finished and has been
#: reopened.
#: ClaimRequested: A Student has requested to claim this task.
#: CLAIMED: This Task has been claimed and someone is working on it.
#: ActionNeeded: Work on this Task must be submitted for review within
#: 24 hours.
#: Closed: Work on this Task has been completed to the org's content.
#: needs to complete Student registration before this task is closed.
#: This status is now deprecated since we register before any interaction.
#: NeedsWork: This work on this Tasks needs a bit more brushing up. This
#: state is followed by a Mentor review.
#: NeedsReview: Student has submitted work for this task and it should
#: be reviewed by a Mentor.
#: Invalid: The Task is deleted either by an Org Admin/Mentor
status = db.StringProperty(
required=True, verbose_name=ugettext('Status'),
choices=[UNAPPROVED, UNPUBLISHED, OPEN, REOPENED,
'ClaimRequested', CLAIMED, 'ActionNeeded',
CLOSED, 'NeedsWork', 'NeedsReview', 'Invalid'],
default=UNAPPROVED)
#: Indicates when the Task was closed. Its value is None before it is
#: completed.
closed_on = db.DateTimeProperty(required=False,
verbose_name=ugettext('Closed on'))
#: This field is set to the next deadline that will have consequences for
#: this Task. For instance this will store a DateTime property which will
#: tell when this Task should be completed.
deadline = db.DateTimeProperty(required=False,
verbose_name=ugettext('Deadline'))
# Property holding the list of GCIProfiles who are subscribed to the task.
subscribers = db.ListProperty(item_type=db.Key, default=[])
#: Required field containing the Mentor/Org Admin who created this task.
#: If site developer has created the task, it is empty.
created_by = db.ReferenceProperty(reference_class=soc.models.profile.Profile,
required=False,
collection_name='created_tasks',
verbose_name=ugettext('Created by'))
#: Date when the proposal was created
created_on = db.DateTimeProperty(required=True, auto_now_add=True,
verbose_name=ugettext('Created on'))
#: Required field containing the Mentor/Org Admin who last edited this
#: task. It changes only when Mentor/Org Admin changes title, description,
#: difficulty, task_type, time_to_complete. If site developer has modified
#: the task, it is empty.
modified_by = db.ReferenceProperty(reference_class=soc.models.profile.Profile,
required=False,
collection_name='edited_tasks',
verbose_name=ugettext('Modified by'))
#: Date when the proposal was last modified, should be set manually on edit
modified_on = db.DateTimeProperty(required=True, auto_now_add=True,
verbose_name=ugettext('Modified on'))
#: The task can be marked to be featured on program home page.
is_featured = db.BooleanProperty(default=False, required=True,
verbose_name=ugettext('Featured'))
is_featured.help_text = ugettext(
'Should this task be featured on the program homepage.')
#: Determines whether the student who completed this task should
#: receive points for it
points_invalidated = db.BooleanProperty(default=False,
verbose_name=ugettext('Points invalidated.'))
def taskTimeToComplete(self):
days = self.time_to_complete / 24
hours = self.time_to_complete % 24
result = []
if days == 1:
result.append("1 day")
if days > 1:
result.append("%d days" % days)
if days and hours:
result.append(" and ")
if hours == 1:
result.append("1 hour")
if hours > 1:
result.append("%d hours" % hours)
return "".join(result)
def isAvailable(self):
"""Returns True if the task is published."""
return self.status not in UNAVAILABLE
def workSubmissions(self):
"""Returns the GCIWorksubmissions that have the given task as parent."""
q = GCIWorkSubmission.all()
q.ancestor(self)
return q.fetch(1000)
def comments(self):
"""Returns the GCIComments that have the given task as parent.
The results are sorted by the date on which they have been created.
"""
q = GCIComment.all()
q.ancestor(self)
q.order('created_on')
return q.fetch(1000)
|
Looking for qulaity and reasonably priced accommodation in Hong Kong. Enjoy the comforts of a home away from home. Conveniently located in the centre of shopping and commercial area. Close to all kinds of transport lines.
|
from django.db import models
from django.db.models import Sum, F
from django.db.models.functions import Coalesce
from django.contrib.auth.models import User
from django.utils.functional import cached_property
from .managers import ProductManager, PurchaseOrderManager, PurchaseItemManager, SalesOrderManager, SalesItemManager, AdjustmentManager
class MyUser(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
date_format = models.CharField(max_length=32, default='%m/%d/%Y')
date_format_ym = models.CharField(max_length=32, default='%m/%Y')
date_format_ym_short = models.CharField(max_length=32, default='%m/%y')
date_format_md = models.CharField(max_length=32, default='%m/%d')
class Currency(models.Model):
name = models.CharField(max_length=128, unique=True)
symbol = models.CharField(max_length=8, unique=True)
class Meta:
verbose_name_plural = 'currencies'
def __str__(self):
return self.name
class Product(models.Model):
sku = models.CharField(max_length=128, unique=True)
name = models.CharField(max_length=255)
price = models.DecimalField(max_digits=10, decimal_places=2)
objects = ProductManager()
@cached_property
def purchased(self):
return PurchaseItem.objects.filter(product=self).aggregate(value=Coalesce(Sum('quantity'), 0))['value']
def get_sold(self, year=None, month=None):
objects = SalesItem.objects.filter(product=self).exclude(sales_order__status='RE')
if year is not None and month is not None:
objects = objects.filter(sales_order__date__year=year, sales_order__date__month=month)
return objects.aggregate(value=Coalesce(Sum('quantity'), 0))['value']
sold = cached_property(get_sold)
@cached_property
def pending(self):
return SalesItem.objects.filter(product=self, sales_order__status='PR').aggregate(value=Coalesce(Sum('quantity'), 0))['value']
@cached_property
def adjustment(self):
return Adjustment.objects.filter(product=self).aggregate(value=Coalesce(Sum('quantity'), 0))['value']
def available(self):
return self.purchased - self.sold + self.adjustment
def available_str(self):
return str(self.available() + self.pending) + ('' if self.pending == 0 else ' (-' + str(self.pending) + ')')
def avg_price(self):
revenue = SalesItem.objects.filter(product=self) \
.aggregate(value=Sum(F('price')*F('quantity')*(100-F('sales_order__discount'))/100, output_field=models.DecimalField()))['value']
if revenue:
return '${0:,.2f}'.format(revenue / self.sold)
else:
return "N/A"
def last_sold(self):
try:
last_so = SalesItem.objects.filter(product=self).exclude(sales_order__status='RE').values('sales_order__date').latest('sales_order__date')
return last_so['sales_order__date']
except SalesItem.DoesNotExist:
return None
def __str__(self):
return self.sku
class PurchaseOrder(models.Model):
date = models.DateField()
order_id = models.CharField(max_length=128, unique=True)
currency = models.ForeignKey(Currency)
objects = PurchaseOrderManager()
def __str__(self):
return str(self.id)
class PurchaseItem(models.Model):
purchase_order = models.ForeignKey(PurchaseOrder)
product = models.ForeignKey(Product)
price = models.DecimalField(max_digits=10, decimal_places=2)
quantity = models.IntegerField()
objects = PurchaseItemManager()
def __str__(self):
return str(self.id)
class Platform(models.Model):
name = models.CharField(max_length=255, unique=True)
def __str__(self):
return self.name
class SalesOrder(models.Model):
STATUS_CHOICES = (
('PR', 'Processing'),
('CO', 'Complete'),
('RE', 'Returned'),
)
date = models.DateField()
platform = models.ForeignKey(Platform)
order_id = models.CharField(max_length=128)
customer = models.CharField(max_length=255)
currency = models.ForeignKey(Currency)
discount = models.DecimalField(max_digits=10, decimal_places=2)
tax = models.DecimalField(max_digits=10, decimal_places=3)
status = models.CharField(max_length=2, choices=STATUS_CHOICES, default='PR', blank=False)
objects = SalesOrderManager()
def __str__(self):
return str(self.id)
class SalesItem(models.Model):
sales_order = models.ForeignKey(SalesOrder)
product = models.ForeignKey(Product)
price = models.DecimalField(max_digits=10, decimal_places=2)
quantity = models.IntegerField()
objects = SalesItemManager()
def __str__(self):
return str(self.id)
class Adjustment(models.Model):
date = models.DateField()
product = models.ForeignKey(Product)
quantity = models.IntegerField()
comment = models.CharField(max_length=255, blank=True)
objects = AdjustmentManager()
def __str__(self):
return str(self.id)
|
Did you know that choosing a name is among the most vital aspects when starting a company UK? The name of your company reflects pretty much everything about what you do. It creates an impression for you and helps you to stand out from the crowd. Therefore, be sure to pick the best name when starting a company UK.
You must remember that your business’s name is going to be the first point of contact. Your potential buyers may notice your business by the name; you will have it applied on billboards, websites, and many other promotional materials. Therefore, you should pick a sellable name for the business and that will help your marketing plans as well. Your business name will be the brand identity too.
Before you actually use the name on your business after starting a company UK, you should read the selected name many times and test if it has a good appeal. If you are not convinced, you should go for a name that ‘sounds’ great. In simple, avoid difficult or weird to pronounce the name. Next, you should make someone else to read the name out loud; if you feel it appealing, go for it. Don’t mimic an existing brand. Apart from reading and hearing the name, you should print it on a paper (in the form of a logo or an artistic text and see if it has an aesthetic value. Remember, this name will be printed on a variety of materials later on.
It is best to avoid personal touch when you select a name. In fact, a personal name will give a ‘limited’ feeling when it comes to branding. So, be smart to pick something widely appealing and professional when you are starting a company UK.
|
import glob
import zipfile
from collections import defaultdict
from typing import List
from logger import logger
from perfrunner.helpers.misc import pretty_dict
GOLANG_LOG_FILES = ("eventing.log",
"fts.log",
"goxdcr.log",
"indexer.log",
"projector.log",
"query.log")
def check_for_golang_panic(file_name: str) -> List[str]:
zf = zipfile.ZipFile(file_name)
panic_files = []
for name in zf.namelist():
if any(log_file in name for log_file in GOLANG_LOG_FILES):
data = zf.read(name)
if "panic" in str(data):
panic_files.append(name)
return panic_files
def check_for_crash_files(file_name: str) -> List[str]:
zf = zipfile.ZipFile(file_name)
crash_files = []
for name in zf.namelist():
if name.endswith('.dmp'):
crash_files.append(name)
return crash_files
def validate_logs(file_name: str):
panic_files = check_for_golang_panic(file_name)
crash_files = check_for_crash_files(file_name)
return panic_files, crash_files
def main():
failures = defaultdict(dict)
for file_name in glob.iglob('./*.zip'):
panic_files, crash_files = validate_logs(file_name)
if panic_files:
failures['panics'][file_name] = panic_files
if crash_files:
failures['crashes'][file_name] = crash_files
if failures:
logger.interrupt(
"Following failures found: {}".format(pretty_dict(failures)))
if __name__ == '__main__':
main()
|
March 23-24 play date will be considered for league play weather permitting.
The Iowa Soccer League (ISL) is a critical part of the Iowa Soccer player development pathway and competitions pathway. The ISL was created to serve the developmental needs of the youth soccer players in Iowa and is unique in that it is guided by one overarching focus – player development. The ISL strives to be in full alignment with the U.S. Soccer Player Development Initiatives.
There are navigation links on the left. The first navigation: is the main public schedule. All schedules, scores, announcements, and cancellations will be posted to this schedule.
Next to the game will be a green icon. Click on the icon to Post Score. Enter in the score.
WEATHER RELATED RESCHEDULES: In the event of weather, field closures, or an "act of god" the league director will work with clubs and verify the dates, locations, and matches that will be cancelled. These matches may be rescheduled at any time during the season using the request form below.
In the event matches are unable to be rescheduled those matches will be recorded as not played and teams will be refunded after the conclusion of the season.
DIRECTOR APPROVED RESCHEDULES: Building the ISL State League schedule can be a daunting task, we understand schedules are never perfect. In the case both directors agree to a schedule change the league will approve of the request.
FORFEITS: Teams not playing matches as scheduled will be subject to a $400 forfeit fine and possible suspension from the league. Failure to adhere to this policy will render the offending team responsible for all referee fees and match expenses for the game.
The entry fee is 195.00 per team. Assignor fee will remain at $5.00 per match. Referee fee's can be seen below.
Each team will pay 1/2 the referee fee for each scheduled match. Field fees will be charged to teams who rent fields for club matches. We will bill referee and field fees. Clubs are refunded for unplayed matches at the end of the season. All fees should be paid by the team's club.
Individual player registration fees are not included in the ISL fees and are generally determined by the players club.
Iowa Soccer will bill clubs directly for fees.
Interested in becoming a referee? Please contact the assignor in your area!
Iowa Soccer is working with the Iowa Referee Committee (IRC) to provide the best environment to train, develop, and maintain not only our players but also our referees. If you'd like to provide them with feedback please take a minute after your match to complete the form below. This form can also be used to document missing referee assignments in ISL matches.
|
# -*- coding: utf-8 -*-
"""
Django settings for cercanias project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
try:
from local_settings import *
except ImportError:
import sys
sys.stderr.write("Warning: Can't find the file 'local_settings.py")
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'e38u=_m)^5e#-zt_n4uiei9%d@5(wz&ab11$q==3)y$)qva^^$'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'corsheaders',
'cercanias_api',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'cercanias.urls'
WSGI_APPLICATION = 'cercanias.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Madrid'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
CORS_ORIGIN_ALLOW_ALL = True
#CORS_ORIGIN_WHITELIST = (
# 'jorgeas80.github.com',
#)
|
Bravo Concealment offers the best gun holster options to fully conceal your Walther PPS M2. Our Kydex Holsters offer short lead times of only 3-5 business days. Maximize your everyday concealment with the best kydex holster backed by an unheard of Unlimited Lifetime Warranty.
|
"""Python wrappers around TensorFlow ops.
This file is MACHINE GENERATED! Do not edit.
Original C++ source file: gen_clustering_ops.cc
"""
import collections as _collections
import six as _six
from tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow
from tensorflow.python.eager import context as _context
from tensorflow.python.eager import core as _core
from tensorflow.python.eager import execute as _execute
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import errors as _errors
from tensorflow.python.framework import tensor_shape as _tensor_shape
from tensorflow.core.framework import op_def_pb2 as _op_def_pb2
# Needed to trigger the call to _set_call_cpp_shape_fn.
from tensorflow.python.framework import common_shapes as _common_shapes
from tensorflow.python.framework import op_def_registry as _op_def_registry
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import op_def_library as _op_def_library
from tensorflow.python.util.tf_export import tf_export
@tf_export('kmc2_chain_initialization')
def kmc2_chain_initialization(distances, seed, name=None):
r"""Returns the index of a data point that should be added to the seed set.
Entries in distances are assumed to be squared distances of candidate points to
the already sampled centers in the seed set. The op constructs one Markov chain
of the k-MC^2 algorithm and returns the index of one candidate point to be added
as an additional cluster center.
Args:
distances: A `Tensor` of type `float32`.
Vector with squared distances to the closest previously sampled
cluster center for each candidate point.
seed: A `Tensor` of type `int64`.
Scalar. Seed for initializing the random number generator.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int64`. Scalar with the index of the sampled point.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"KMC2ChainInitialization", distances=distances, seed=seed, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = None
_execute.record_gradient(
"KMC2ChainInitialization", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"KMC2ChainInitialization", name, _ctx._post_execution_callbacks,
distances, seed)
return _result
except _core._FallbackException:
return kmc2_chain_initialization_eager_fallback(
distances, seed, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def kmc2_chain_initialization_eager_fallback(distances, seed, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function kmc2_chain_initialization
"""
_ctx = ctx if ctx else _context.context()
distances = _ops.convert_to_tensor(distances, _dtypes.float32)
seed = _ops.convert_to_tensor(seed, _dtypes.int64)
_inputs_flat = [distances, seed]
_attrs = None
_result = _execute.execute(b"KMC2ChainInitialization", 1,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"KMC2ChainInitialization", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
_ops.RegisterShape("KMC2ChainInitialization")(None)
@tf_export('kmeans_plus_plus_initialization')
def kmeans_plus_plus_initialization(points, num_to_sample, seed, num_retries_per_sample, name=None):
r"""Selects num_to_sample rows of input using the KMeans++ criterion.
Rows of points are assumed to be input points. One row is selected at random.
Subsequent rows are sampled with probability proportional to the squared L2
distance from the nearest row selected thus far till num_to_sample rows have
been sampled.
Args:
points: A `Tensor` of type `float32`.
Matrix of shape (n, d). Rows are assumed to be input points.
num_to_sample: A `Tensor` of type `int64`.
Scalar. The number of rows to sample. This value must not be
larger than n.
seed: A `Tensor` of type `int64`.
Scalar. Seed for initializing the random number generator.
num_retries_per_sample: A `Tensor` of type `int64`.
Scalar. For each row that is sampled, this parameter
specifies the number of additional points to draw from the current
distribution before selecting the best. If a negative value is specified, a
heuristic is used to sample O(log(num_to_sample)) additional points.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32`.
Matrix of shape (num_to_sample, d). The sampled rows.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"KmeansPlusPlusInitialization", points=points,
num_to_sample=num_to_sample, seed=seed,
num_retries_per_sample=num_retries_per_sample, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = None
_execute.record_gradient(
"KmeansPlusPlusInitialization", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"KmeansPlusPlusInitialization", name, _ctx._post_execution_callbacks,
points, num_to_sample, seed, num_retries_per_sample)
return _result
except _core._FallbackException:
return kmeans_plus_plus_initialization_eager_fallback(
points, num_to_sample, seed, num_retries_per_sample, name=name,
ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def kmeans_plus_plus_initialization_eager_fallback(points, num_to_sample, seed, num_retries_per_sample, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function kmeans_plus_plus_initialization
"""
_ctx = ctx if ctx else _context.context()
points = _ops.convert_to_tensor(points, _dtypes.float32)
num_to_sample = _ops.convert_to_tensor(num_to_sample, _dtypes.int64)
seed = _ops.convert_to_tensor(seed, _dtypes.int64)
num_retries_per_sample = _ops.convert_to_tensor(num_retries_per_sample, _dtypes.int64)
_inputs_flat = [points, num_to_sample, seed, num_retries_per_sample]
_attrs = None
_result = _execute.execute(b"KmeansPlusPlusInitialization", 1,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"KmeansPlusPlusInitialization", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
_ops.RegisterShape("KmeansPlusPlusInitialization")(None)
_nearest_neighbors_outputs = ["nearest_center_indices",
"nearest_center_distances"]
_NearestNeighborsOutput = _collections.namedtuple(
"NearestNeighbors", _nearest_neighbors_outputs)
@tf_export('nearest_neighbors')
def nearest_neighbors(points, centers, k, name=None):
r"""Selects the k nearest centers for each point.
Rows of points are assumed to be input points. Rows of centers are assumed to be
the list of candidate centers. For each point, the k centers that have least L2
distance to it are computed.
Args:
points: A `Tensor` of type `float32`.
Matrix of shape (n, d). Rows are assumed to be input points.
centers: A `Tensor` of type `float32`.
Matrix of shape (m, d). Rows are assumed to be centers.
k: A `Tensor` of type `int64`.
Scalar. Number of nearest centers to return for each point. If k is larger
than m, then only m centers are returned.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (nearest_center_indices, nearest_center_distances).
nearest_center_indices: A `Tensor` of type `int64`. Matrix of shape (n, min(m, k)). Each row contains the
indices of the centers closest to the corresponding point, ordered by
increasing distance.
nearest_center_distances: A `Tensor` of type `float32`. Matrix of shape (n, min(m, k)). Each row contains the
squared L2 distance to the corresponding center in nearest_center_indices.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"NearestNeighbors", points=points, centers=centers, k=k, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = None
_execute.record_gradient(
"NearestNeighbors", _inputs_flat, _attrs, _result, name)
_result = _NearestNeighborsOutput._make(_result)
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"NearestNeighbors", name, _ctx._post_execution_callbacks, points,
centers, k)
_result = _NearestNeighborsOutput._make(_result)
return _result
except _core._FallbackException:
return nearest_neighbors_eager_fallback(
points, centers, k, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def nearest_neighbors_eager_fallback(points, centers, k, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function nearest_neighbors
"""
_ctx = ctx if ctx else _context.context()
points = _ops.convert_to_tensor(points, _dtypes.float32)
centers = _ops.convert_to_tensor(centers, _dtypes.float32)
k = _ops.convert_to_tensor(k, _dtypes.int64)
_inputs_flat = [points, centers, k]
_attrs = None
_result = _execute.execute(b"NearestNeighbors", 2, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"NearestNeighbors", _inputs_flat, _attrs, _result, name)
_result = _NearestNeighborsOutput._make(_result)
return _result
_ops.RegisterShape("NearestNeighbors")(None)
def _InitOpDefLibrary(op_list_proto_bytes):
op_list = _op_def_pb2.OpList()
op_list.ParseFromString(op_list_proto_bytes)
_op_def_registry.register_op_list(op_list)
op_def_lib = _op_def_library.OpDefLibrary()
op_def_lib.add_op_list(op_list)
return op_def_lib
# op {
# name: "KMC2ChainInitialization"
# input_arg {
# name: "distances"
# type: DT_FLOAT
# }
# input_arg {
# name: "seed"
# type: DT_INT64
# }
# output_arg {
# name: "index"
# type: DT_INT64
# }
# }
# op {
# name: "KmeansPlusPlusInitialization"
# input_arg {
# name: "points"
# type: DT_FLOAT
# }
# input_arg {
# name: "num_to_sample"
# type: DT_INT64
# }
# input_arg {
# name: "seed"
# type: DT_INT64
# }
# input_arg {
# name: "num_retries_per_sample"
# type: DT_INT64
# }
# output_arg {
# name: "samples"
# type: DT_FLOAT
# }
# }
# op {
# name: "NearestNeighbors"
# input_arg {
# name: "points"
# type: DT_FLOAT
# }
# input_arg {
# name: "centers"
# type: DT_FLOAT
# }
# input_arg {
# name: "k"
# type: DT_INT64
# }
# output_arg {
# name: "nearest_center_indices"
# type: DT_INT64
# }
# output_arg {
# name: "nearest_center_distances"
# type: DT_FLOAT
# }
# }
_op_def_lib = _InitOpDefLibrary(b"\n=\n\027KMC2ChainInitialization\022\r\n\tdistances\030\001\022\010\n\004seed\030\t\032\t\n\005index\030\t\np\n\034KmeansPlusPlusInitialization\022\n\n\006points\030\001\022\021\n\rnum_to_sample\030\t\022\010\n\004seed\030\t\022\032\n\026num_retries_per_sample\030\t\032\013\n\007samples\030\001\nl\n\020NearestNeighbors\022\n\n\006points\030\001\022\013\n\007centers\030\001\022\005\n\001k\030\t\032\032\n\026nearest_center_indices\030\t\032\034\n\030nearest_center_distances\030\001")
|
This paper considers the growth in large retirement villages which appear to have many of the same facilities as upscale resorts, and asks what attributes and traits the managers of these facilities require in order to be successful in their roles. After a literature review of what is known about characteristics of hotel managers, the literature available on retirement homes, and the private member clubs sector, the paper discusses the findings of five in-depth interviews with ‘village managers’, the most common title for those in charge of these facilities. Managers were found to be well-qualified, often in hospitality management. Most did not have a medical background and all stressed the customer-focused aspects of their role, and in particular the importance of building long-term relationships with well-educated and eloquent residents. Managers felt their hospitality background helped them but that their village manager roles enabled them to build communities rather than constantly crisis managing as in hotels. Several emphasised that to do well in the retirement village sector required life experience and that while they acknowledged the need to develop future village managers, recent hospitality graduates may struggle to gain respect from the residents. Recommendations for further large-scale quantitative studies and comparisons with other regions are made. Studies into the experience of residents is also recommended.
|
"""
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
The development of this software was sponsored by NAG Ltd. (http://www.nag.co.uk)
and the EPSRC Centre For Doctoral Training in Industrially Focused Mathematical
Modelling (EP/L015803/1) at the University of Oxford. Please contact NAG for
alternative licensing.
"""
# Ensure compatibility with Python 2
from __future__ import absolute_import, division, print_function, unicode_literals
from math import sqrt, sin
import numpy as np
import unittest
from pybobyqa.model import Model
from pybobyqa.util import sumsq, model_value
def array_compare(x, y, thresh=1e-14):
return np.max(np.abs(x - y)) < thresh
def rosenbrock_residuals(x):
return np.array([10.0 * (x[1] - x[0] ** 2), 1.0 - x[0]])
def rosenbrock(x):
return sumsq(rosenbrock_residuals(x))
def objfun(x):
# An arbitrary-dimension objective function
return sin(np.dot(x, np.arange(1,len(x)+1,dtype=float))) # f(x1,...,xn) = sin(x1 + 2*x2 + ... + n*xn)
class TestAddValues(unittest.TestCase):
def runTest(self):
n, m = 2, 2
npt = n + 1
x0 = np.array([-1.2, 1.0])
xl = -1e20 * np.ones((n,))
xu = 1e20 * np.ones((n,))
model = Model(npt, x0, rosenbrock(x0), xl, xu, 1)
self.assertEqual(model.npt(), npt, 'Wrong npt after initialisation')
self.assertTrue(array_compare(model.xopt(abs_coordinates=True), x0), 'Wrong xopt after initialisation')
self.assertTrue(array_compare(model.fopt(), rosenbrock(x0)), 'Wrong fopt after initialisation')
# Now add better point
x1 = np.array([1.0, 0.9])
rvec = rosenbrock(x1)
model.change_point(1, x1 - model.xbase, rvec, allow_kopt_update=True)
self.assertEqual(model.npt(), npt, 'Wrong npt after x1')
self.assertTrue(array_compare(model.xopt(abs_coordinates=True), x1), 'Wrong xopt after x1')
self.assertTrue(array_compare(model.fopt(), rosenbrock(x1)), 'Wrong fopt after x1')
# Now add worse point
x2 = np.array([2.0, 0.9])
rvec = rosenbrock(x2)
model.change_point(2, x2 - model.xbase, rvec, allow_kopt_update=True)
self.assertEqual(model.npt(), npt, 'Wrong npt after x2')
self.assertTrue(array_compare(model.xpt(0, abs_coordinates=True), x0), 'Wrong xpt(0) after x2')
self.assertTrue(array_compare(model.xpt(1, abs_coordinates=True), x1), 'Wrong xpt(1) after x2')
self.assertTrue(array_compare(model.xpt(2, abs_coordinates=True), x2), 'Wrong xpt(2) after x2')
self.assertTrue(array_compare(model.xopt(abs_coordinates=True), x1), 'Wrong xopt after x2')
self.assertTrue(array_compare(model.fopt(), rosenbrock(x1)), 'Wrong fopt after x2')
# Now add best point (but don't update kopt)
x3 = np.array([1.0, 1.0])
rvec = rosenbrock(x3)
model.change_point(0, x3 - model.xbase, rvec, allow_kopt_update=False) # full: overwrite x0
self.assertEqual(model.npt(), npt, 'Wrong npt after x3')
self.assertTrue(array_compare(model.xopt(abs_coordinates=True), x1), 'Wrong xopt after x3')
self.assertTrue(array_compare(model.fopt(), rosenbrock(x1)), 'Wrong fopt after x3')
self.assertAlmostEqual(model.fopt(), rosenbrock(x1), msg='Wrong fopt after x3')
self.assertTrue(array_compare(model.xopt(abs_coordinates=True), model.as_absolute_coordinates(model.xopt())),
'Comparison wrong after x3')
dirns = model.xpt_directions(include_kopt=True)
self.assertTrue(array_compare(x3 - x1, dirns[0, :]), 'Wrong dirn 0')
self.assertTrue(array_compare(x1 - x1, dirns[1, :]), 'Wrong dirn 1')
self.assertTrue(array_compare(x2 - x1, dirns[2, :]), 'Wrong dirn 2')
dirns = model.xpt_directions(include_kopt=False)
self.assertTrue(array_compare(x3 - x1, dirns[0, :]), 'Wrong dirn 0 (no kopt)')
# self.assertTrue(array_compare(x1 - x1, dirns[1, :]), 'Wrong dirn 1')
self.assertTrue(array_compare(x2 - x1, dirns[1, :]), 'Wrong dirn 1 (no kopt)')
class TestSwap(unittest.TestCase):
def runTest(self):
n, m = 2, 2
npt = n + 1
x0 = np.array([-1.2, 1.0])
xl = -1e20 * np.ones((n,))
xu = 1e20 * np.ones((n,))
model = Model(npt, x0, rosenbrock(x0), xl, xu, 1)
# Now add better point
x1 = np.array([1.0, 0.9])
f1 = rosenbrock(x1)
model.change_point(1, x1 - model.xbase, f1, allow_kopt_update=True)
# Now add worse point
x2 = np.array([2.0, 0.9])
f2 = rosenbrock(x2)
model.change_point(2, x2 - model.xbase, f2, allow_kopt_update=True)
model.swap_points(0, 2)
self.assertTrue(array_compare(model.xpt(0, abs_coordinates=True), x2), 'Wrong xpt(0) after swap 1')
self.assertTrue(array_compare(model.xpt(1, abs_coordinates=True), x1), 'Wrong xpt(1) after swap 1')
self.assertTrue(array_compare(model.xpt(2, abs_coordinates=True), x0), 'Wrong xpt(2) after swap 1')
self.assertTrue(array_compare(model.xopt(abs_coordinates=True), x1), 'Wrong xopt after swap 1')
model.swap_points(1, 2)
self.assertTrue(array_compare(model.xpt(0, abs_coordinates=True), x2), 'Wrong xpt(0) after swap 2')
self.assertTrue(array_compare(model.xpt(1, abs_coordinates=True), x0), 'Wrong xpt(1) after swap 2')
self.assertTrue(array_compare(model.xpt(2, abs_coordinates=True), x1), 'Wrong xpt(2) after swap 2')
self.assertTrue(array_compare(model.xopt(abs_coordinates=True), x1), 'Wrong xopt after swap 2')
class TestBasicManipulation(unittest.TestCase):
def runTest(self):
n, m = 2, 2
npt = n + 1
x0 = np.array([-1.2, 1.0])
xl = -1e2 * np.ones((n,))
xu = 1e2 * np.ones((n,))
model = Model(npt, x0, rosenbrock(x0), xl, xu, 1)
self.assertTrue(array_compare(model.sl, xl - x0), 'Wrong sl after initialisation')
self.assertTrue(array_compare(model.su, xu - x0), 'Wrong su after initialisation')
x1 = np.array([1.0, 0.9])
model.change_point(1, x1 - model.xbase, rosenbrock(x1))
self.assertTrue(array_compare(model.as_absolute_coordinates(x1 - x0), x1), 'Wrong abs coords')
self.assertTrue(array_compare(model.as_absolute_coordinates(np.array([-1e3, 1e3])-x0), np.array([-1e2, 1e2])),
'Bad abs coords with bounds')
x2 = np.array([2.0, 0.9])
model.change_point(2, x2 - model.xbase, rosenbrock(x2))
sqdists = model.distances_to_xopt()
self.assertAlmostEqual(sqdists[0], sumsq(x0 - x1), msg='Wrong distance 0')
self.assertAlmostEqual(sqdists[1], sumsq(x1 - x1), msg='Wrong distance 1')
self.assertAlmostEqual(sqdists[2], sumsq(x2 - x1), msg='Wrong distance 2')
model.add_new_sample(0, rosenbrock(x0))
self.assertEqual(model.nsamples[0], 2, 'Wrong number of samples 0')
self.assertEqual(model.nsamples[1], 1, 'Wrong number of samples 1')
self.assertEqual(model.nsamples[2], 1, 'Wrong number of samples 2')
for i in range(50):
model.add_new_sample(0, 0.0)
self.assertEqual(model.kopt, 0, 'Wrong kopt after bad resampling')
self.assertTrue(array_compare(model.fopt(), 2*rosenbrock(x0)/52), 'Wrong fopt after bad resampling')
d = np.array([10.0, 10.0])
dirns_old = model.xpt_directions(include_kopt=True)
model.shift_base(d)
dirns_new = model.xpt_directions(include_kopt=True)
self.assertTrue(array_compare(model.xbase, x0 + d), 'Wrong new base')
self.assertEqual(model.kopt, 0, 'Wrong kopt after shift base')
for i in range(3):
self.assertTrue(array_compare(dirns_old[i, :], dirns_new[i, :]), 'Wrong dirn %i after shift base' % i)
self.assertTrue(array_compare(model.sl, xl - x0 - d), 'Wrong sl after shift base')
self.assertTrue(array_compare(model.su, xu - x0 - d), 'Wrong su after shift base')
# save_point and get_final_results
model.change_point(0, x0 - model.xbase, rosenbrock(x0)) # revert after resampling
model.change_point(1, x1 - model.xbase, rosenbrock(x1)) # revert after resampling
x, f, gradmin, hessmin, nsamples = model.get_final_results()
self.assertTrue(array_compare(x, x1), 'Wrong final x')
self.assertAlmostEqual(rosenbrock(x1), f, msg='Wrong final f')
self.assertTrue(array_compare(np.zeros((2,)), gradmin), 'Wrong final gradmin')
self.assertTrue(array_compare(np.zeros((2,2)), hessmin), 'Wrong final hessmin')
self.assertEqual(1, nsamples, 'Wrong final nsamples')
self.assertIsNone(model.xsave, 'xsave not none after initialisation')
self.assertIsNone(model.fsave, 'fsave not none after initialisation')
self.assertIsNone(model.nsamples_save, 'nsamples_save not none after initialisation')
model.save_point(x0, rosenbrock(x0), 1, x_in_abs_coords=True)
self.assertTrue(array_compare(model.xsave, x0), 'Wrong xsave after saving')
self.assertAlmostEqual(model.fsave, rosenbrock(x0), msg='Wrong fsave after saving')
self.assertEqual(model.nsamples_save, 1, 'Wrong nsamples_save after saving')
x, f, gradmin, hessmin, nsamples = model.get_final_results()
self.assertTrue(array_compare(x, x1), 'Wrong final x after saving')
self.assertAlmostEqual(rosenbrock(x1), f, msg='Wrong final f after saving')
self.assertEqual(1, nsamples, 'Wrong final nsamples after saving')
model.save_point(x2 - model.xbase, 0.0, 2, x_in_abs_coords=False)
self.assertTrue(array_compare(model.xsave, x2), 'Wrong xsave after saving 2')
self.assertAlmostEqual(model.fsave, 0.0, msg='Wrong fsave after saving 2')
self.assertEqual(model.nsamples_save, 2, 'Wrong nsamples_save after saving 2')
x, f, gradmin, hessmin, nsamples = model.get_final_results()
self.assertTrue(array_compare(x, x2), 'Wrong final x after saving 2')
self.assertAlmostEqual(f, 0.0, msg='Wrong final f after saving 2')
self.assertEqual(2, nsamples, 'Wrong final nsamples after saving 2')
model.save_point(x0, rosenbrock(x0), 3, x_in_abs_coords=True) # try to re-save a worse value
self.assertTrue(array_compare(model.xsave, x2), 'Wrong xsave after saving 3')
self.assertAlmostEqual(model.fsave, 0.0, msg='Wrong fsave after saving 3')
self.assertEqual(model.nsamples_save, 2, 'Wrong nsamples_save after saving 3')
class TestAveraging(unittest.TestCase):
def runTest(self):
n, m = 2, 2
npt = n + 1
x0 = np.array([-1.2, 1.0])
xl = -1e2 * np.ones((n,))
xu = 1e2 * np.ones((n,))
model = Model(npt, x0, rosenbrock(x0), xl, xu, 1)
x1 = np.array([1.0, 0.9])
model.change_point(1, x1 - model.xbase, rosenbrock(x1))
x2 = np.array([1.0, 1.0])
model.change_point(2, x2 - model.xbase, rosenbrock(x2))
self.assertEqual(model.kopt, 2, 'Wrong kopt before resampling')
# Originally, x2 is the ideal point
# Here, testing that kopt moves back to x1 after adding heaps of bad x2 samples
for i in range(10):
model.add_new_sample(2, 5.0)
self.assertEqual(model.kopt, 1, 'Wrong kopt after resampling')
class TestMinObjValue(unittest.TestCase):
def runTest(self):
n, m = 2, 2
npt = n + 1
x0 = np.array([-1.2, 1.0])
xl = -1e2 * np.ones((n,))
xu = 1e2 * np.ones((n,))
model = Model(npt, x0, rosenbrock(x0), xl, xu, 1)
x1 = np.array([1.0, 0.9])
model.change_point(1, x1 - model.xbase, rosenbrock(x1))
x2 = np.array([2.0, 0.9])
model.change_point(2, x2 - model.xbase, rosenbrock(x2))
self.assertAlmostEqual(model.min_objective_value(), -1e20, msg='Wrong min obj value')
model = Model(npt, x0, rosenbrock(x0), xl, xu, 1, abs_tol=1.0)
self.assertAlmostEqual(model.min_objective_value(), 1.0, msg='Wrong min obj value 3')
class TestInterpMatrixLinear(unittest.TestCase):
def runTest(self):
n, m = 2, 2
npt = n + 1
x0 = np.array([-1.2, 1.0])
xl = -1e2 * np.ones((n,))
xu = 1e2 * np.ones((n,))
model = Model(npt, x0, rosenbrock(x0), xl, xu, 1, precondition=False)
x1 = np.array([1.0, 0.9])
model.change_point(1, x1 - model.xbase, rosenbrock(x1))
x2 = np.array([2.0, 0.9])
model.change_point(2, x2 - model.xbase, rosenbrock(x2))
A, left_scaling, right_scaling = model.interpolation_matrix()
A_expect = np.zeros((2, 2))
A_expect[0, :] = x0 - x1 # x1 is xopt in this situation
A_expect[1, :] = x2 - x1
self.assertTrue(array_compare(A, A_expect), 'Interp matrix 1')
# For reference: model based around model.xbase
interp_ok, interp_cond_num, norm_chg_grad, norm_chg_hess, interp_error = model.interpolate_model()
self.assertTrue(interp_ok, 'Interpolation failed')
self.assertAlmostEqual(interp_error, 0.0, msg='Expect exact interpolation')
self.assertAlmostEqual(model.model_const, rosenbrock(model.xbase), msg='Wrong constant term')
self.assertTrue(array_compare(model.model_value(x1 - model.xbase, d_based_at_xopt=False, with_const_term=True),
rosenbrock(x1), thresh=1e-10), 'Wrong x1') # allow some inexactness
self.assertTrue(array_compare(model.model_value(x2 - model.xbase, d_based_at_xopt=False, with_const_term=True),
rosenbrock(x2), thresh=1e-10), 'Wrong x2')
# Test some other parameter settings for model.model_value()
self.assertTrue(array_compare(model.model_value(x2 - x1, d_based_at_xopt=True, with_const_term=True),
rosenbrock(x2), thresh=1e-10), 'Wrong x2 (from xopt)')
self.assertTrue(array_compare(model.model_value(x2 - x1, d_based_at_xopt=True, with_const_term=False),
rosenbrock(x2)-rosenbrock(model.xbase), thresh=1e-10), 'Wrong x2 (no constant)')
self.assertTrue(array_compare(model.model_value(x2 - model.xbase, d_based_at_xopt=False, with_const_term=False),
rosenbrock(x2) - rosenbrock(model.xbase), thresh=1e-10), 'Wrong x2 (no constant v2)')
g, hess = model.build_full_model()
self.assertTrue(np.allclose(g, model.model_grad + model.model_hess.dot(model.xopt(abs_coordinates=False))),
'Bad gradient')
self.assertTrue(np.allclose(hess, model.model_hess), 'Bad Hessian')
class TestInterpMatrixUnderdeterminedQuadratic(unittest.TestCase):
def runTest(self):
n = 2
npt = n+2
x0 = np.array([1.0, 1.0])
xl = -1e2 * np.ones((n,))
xu = 1e2 * np.ones((n,))
model = Model(npt, x0, objfun(x0), xl, xu, 1, precondition=False)
x1 = x0 + np.array([1.0, 0.0])
model.change_point(1, x1 - model.xbase, objfun(x1))
x2 = x0 + np.array([0.1, 0.9])
model.change_point(2, x2 - model.xbase, objfun(x2))
x3 = x0 + np.array([-0.1, 0.0])
model.change_point(3, x3 - model.xbase, objfun(x3))
# x2 is xopt in this situation
self.assertTrue(model.kopt == 2, 'Wrong xopt')
xs = [x0, x1, x3]
xopt = x2
nxs = len(xs)
A = np.zeros((nxs+n,nxs+n))
for i in range(nxs):
for j in range(nxs):
A[i,j] = 0.5 * np.dot(xs[i]-xopt, xs[j]-xopt)**2
A[i,nxs:] = xs[i] - xopt
A[nxs:,i] = xs[i] - xopt
A2, left_scaling, right_scaling = model.interpolation_matrix()
# print("Expect", A)
# print("Got", A2)
self.assertTrue(np.allclose(A, A2), 'Interp matrix 1')
# For reference: model based around model.xbase
interp_ok, interp_cond_num, norm_chg_grad, norm_chg_hess, interp_error = model.interpolate_model(verbose=True)
self.assertTrue(interp_ok, 'Interpolation failed')
self.assertAlmostEqual(interp_error, 0.0, msg='Expect exact interpolation')
self.assertAlmostEqual(norm_chg_grad, np.linalg.norm(model.model_grad))
self.assertAlmostEqual(norm_chg_hess, np.linalg.norm(model.model_hess, ord='fro'))
self.assertAlmostEqual(model.model_const, objfun(model.xbase), msg='Wrong constant term')
for xi in [x0, x1, x2, x3]:
self.assertAlmostEqual(model.model_value(xi - model.xbase, d_based_at_xopt=False, with_const_term=True),
objfun(xi), msg='Wrong interp value at %s' % str(xi))
# Test some other parameter settings for model.model_value()
# print("Ignore after here")
g, hess = model.build_full_model()
self.assertTrue(np.allclose(g, model.model_grad + model.model_hess.dot(model.xopt(abs_coordinates=False))),
'Bad gradient')
self.assertTrue(np.allclose(hess, model.model_hess), 'Bad Hessian')
# Build a new model
model2 = Model(npt, x0, objfun(x0), xl, xu, 1, precondition=False)
model2.change_point(1, x1 - model.xbase, objfun(x1))
model2.change_point(2, x2 - model.xbase, objfun(x2))
model2.change_point(3, x3 - model.xbase, objfun(x3))
# Force Hessian to be something else
model2.model_hess = np.eye(n)
A2, left_scaling, right_scaling = model2.interpolation_matrix()
self.assertTrue(np.allclose(A, A2), 'Interp matrix 2')
interp_ok, interp_cond_num, norm_chg_grad, norm_chg_hess, interp_error = model2.interpolate_model()
self.assertTrue(interp_ok, 'Interpolation failed')
self.assertAlmostEqual(interp_error, 0.0, msg='Expect exact interpolation')
self.assertAlmostEqual(model2.model_const, objfun(model2.xbase), msg='Wrong constant term')
for xi in [x0, x1, x2, x3]:
self.assertAlmostEqual(model2.model_value(xi - model2.xbase, d_based_at_xopt=False, with_const_term=True),
objfun(xi), msg='Wrong interp value at %s' % str(xi))
# Compare distance of hessians
h1 = np.zeros((n,n))
h2 = np.eye(n)
self.assertLessEqual(np.linalg.norm(model.model_hess-h1, ord='fro'),
np.linalg.norm(model2.model_hess-h1, ord='fro'), 'Not min frob Hess 1')
self.assertLessEqual(np.linalg.norm(model2.model_hess - h2, ord='fro'),
np.linalg.norm(model.model_hess - h2, ord='fro'), 'Not min frob Hess 2')
# print(model.model_hess)
# print(model2.model_hess)
# Build a new model
model3 = Model(npt, x0, objfun(x0), xl, xu, 1, precondition=False)
model3.change_point(1, x1 - model.xbase, objfun(x1))
model3.change_point(2, x2 - model.xbase, objfun(x2))
model3.change_point(3, x3 - model.xbase, objfun(x3))
# Force Hessian to be something else
model3.model_hess = np.eye(n)
A2, left_scaling, right_scaling = model3.interpolation_matrix()
self.assertTrue(np.allclose(A, A2), 'Interp matrix 3')
interp_ok, interp_cond_num, norm_chg_grad, norm_chg_hess, interp_error = model3.interpolate_model(min_chg_hess=False)
self.assertTrue(interp_ok, 'Interpolation failed')
self.assertAlmostEqual(interp_error, 0.0, msg='Expect exact interpolation')
self.assertAlmostEqual(model3.model_const, objfun(model3.xbase), msg='Wrong constant term')
for xi in [x0, x1, x2, x3]:
self.assertAlmostEqual(model3.model_value(xi - model3.xbase, d_based_at_xopt=False, with_const_term=True),
objfun(xi), msg='Wrong interp value at %s' % str(xi))
self.assertTrue(np.allclose(model.model_hess, model3.model_hess),
'min_chg_hess=False not working')
class TestInterpMatrixUnderdeterminedQuadratic2(unittest.TestCase):
def runTest(self):
n = 2
npt = 2*n+1
x0 = np.array([1.0, 1.0])
xl = -1e2 * np.ones((n,))
xu = 1e2 * np.ones((n,))
model = Model(npt, x0, objfun(x0), xl, xu, 1, precondition=False)
x1 = x0 + np.array([1.0, 0.0])
model.change_point(1, x1 - model.xbase, objfun(x1))
x2 = x0 + np.array([0.1, 0.9])
model.change_point(2, x2 - model.xbase, objfun(x2))
x3 = x0 + np.array([-0.1, 0.0])
model.change_point(3, x3 - model.xbase, objfun(x3))
x4 = x0 + np.array([-0.1, 2.0])
model.change_point(4, x4 - model.xbase, objfun(x4))
# x2 is xopt in this situation
self.assertTrue(model.kopt == 2, 'Wrong xopt')
xs = [x0, x1, x3, x4]
xopt = x2
nxs = len(xs)
A = np.zeros((nxs+n,nxs+n))
for i in range(nxs):
for j in range(nxs):
A[i,j] = 0.5 * np.dot(xs[i]-xopt, xs[j]-xopt)**2
A[i,nxs:] = xs[i] - xopt
A[nxs:,i] = xs[i] - xopt
A2, left_scaling, right_scaling = model.interpolation_matrix()
# print("Expect", A)
# print("Got", A2)
self.assertTrue(np.allclose(A, A2), 'Interp matrix 1')
# For reference: model based around model.xbase
interp_ok, interp_cond_num, norm_chg_grad, norm_chg_hess, interp_error = model.interpolate_model(verbose=True)
self.assertTrue(interp_ok, 'Interpolation failed')
self.assertAlmostEqual(interp_error, 0.0, msg='Expect exact interpolation')
self.assertAlmostEqual(norm_chg_grad, np.linalg.norm(model.model_grad))
self.assertAlmostEqual(norm_chg_hess, np.linalg.norm(model.model_hess, ord='fro'))
self.assertAlmostEqual(model.model_const, objfun(model.xbase), msg='Wrong constant term')
for xi in [x0, x1, x2, x3, x4]:
self.assertAlmostEqual(model.model_value(xi - model.xbase, d_based_at_xopt=False, with_const_term=True),
objfun(xi), msg='Wrong interp value at %s' % str(xi))
# Test some other parameter settings for model.model_value()
g, hess = model.build_full_model()
self.assertTrue(np.allclose(g, model.model_grad + model.model_hess.dot(model.xopt(abs_coordinates=False))),
'Bad gradient')
self.assertTrue(np.allclose(hess, model.model_hess), 'Bad Hessian')
# Build a new model
model2 = Model(npt, x0, objfun(x0), xl, xu, 1, precondition=False)
model2.change_point(1, x1 - model.xbase, objfun(x1))
model2.change_point(2, x2 - model.xbase, objfun(x2))
model2.change_point(3, x3 - model.xbase, objfun(x3))
model2.change_point(4, x4 - model.xbase, objfun(x4))
# Force Hessian to be something else
model2.model_hess = np.eye(n)
A2, left_scaling, right_scaling = model2.interpolation_matrix()
self.assertTrue(np.allclose(A, A2), 'Interp matrix 2')
interp_ok, interp_cond_num, norm_chg_grad, norm_chg_hess, interp_error = model2.interpolate_model()
self.assertTrue(interp_ok, 'Interpolation failed')
self.assertAlmostEqual(interp_error, 0.0, msg='Expect exact interpolation')
self.assertAlmostEqual(model2.model_const, objfun(model2.xbase), msg='Wrong constant term')
for xi in [x0, x1, x2, x3, x4]:
self.assertAlmostEqual(model2.model_value(xi - model2.xbase, d_based_at_xopt=False, with_const_term=True),
objfun(xi), msg='Wrong interp value at %s' % str(xi))
# Compare distance of hessians
h1 = np.zeros((n,n))
h2 = np.eye(n)
self.assertLessEqual(np.linalg.norm(model.model_hess-h1, ord='fro'),
np.linalg.norm(model2.model_hess-h1, ord='fro'), 'Not min frob Hess 1')
self.assertLessEqual(np.linalg.norm(model2.model_hess - h2, ord='fro'),
np.linalg.norm(model.model_hess - h2, ord='fro'), 'Not min frob Hess 2')
# print(model.model_hess)
# print(model2.model_hess)
# Build a new model
model3 = Model(npt, x0, objfun(x0), xl, xu, 1, precondition=False)
model3.change_point(1, x1 - model.xbase, objfun(x1))
model3.change_point(2, x2 - model.xbase, objfun(x2))
model3.change_point(3, x3 - model.xbase, objfun(x3))
model3.change_point(4, x4 - model.xbase, objfun(x4))
# Force Hessian to be something else
model3.model_hess = np.eye(n)
A2, left_scaling, right_scaling = model3.interpolation_matrix()
self.assertTrue(np.allclose(A, A2), 'Interp matrix 3')
interp_ok, interp_cond_num, norm_chg_grad, norm_chg_hess, interp_error = model3.interpolate_model(min_chg_hess=False)
self.assertTrue(interp_ok, 'Interpolation failed')
self.assertAlmostEqual(interp_error, 0.0, msg='Expect exact interpolation')
self.assertAlmostEqual(model3.model_const, objfun(model3.xbase), msg='Wrong constant term')
for xi in [x0, x1, x2, x3, x4]:
self.assertAlmostEqual(model3.model_value(xi - model3.xbase, d_based_at_xopt=False, with_const_term=True),
objfun(xi), msg='Wrong interp value at %s' % str(xi))
self.assertTrue(np.allclose(model.model_hess, model3.model_hess),
'min_chg_hess=False not working')
class TestInterpMatrixFullQuadratic(unittest.TestCase):
def runTest(self):
n = 2
npt = (n+1) * (n+2) // 2
x0 = np.array([1.0, 1.0])
xl = -1e2 * np.ones((n,))
xu = 1e2 * np.ones((n,))
model = Model(npt, x0, objfun(x0), xl, xu, 1)
x1 = x0 + np.array([1.0, 0.0])
model.change_point(1, x1 - model.xbase, objfun(x1))
x2 = x0 + np.array([0.1, 0.9])
model.change_point(2, x2 - model.xbase, objfun(x2))
x3 = x0 + np.array([-0.1, 0.0])
model.change_point(3, x3 - model.xbase, objfun(x3))
x4 = x0 + np.array([-0.1, 2.0])
model.change_point(4, x4 - model.xbase, objfun(x4))
x5 = x0 + np.array([-1.1, 1.0])
model.change_point(5, x5 - model.xbase, objfun(x5))
# For reference: model based around model.xbase
interp_ok, interp_cond_num, norm_chg_grad, norm_chg_hess, interp_error = model.interpolate_model(verbose=True)
self.assertTrue(interp_ok, 'Interpolation failed')
self.assertAlmostEqual(interp_error, 0.0, msg='Expect exact interpolation')
self.assertAlmostEqual(norm_chg_grad, np.linalg.norm(model.model_grad))
self.assertAlmostEqual(norm_chg_hess, np.linalg.norm(model.model_hess, ord='fro'))
self.assertAlmostEqual(model.model_const, objfun(model.xbase), msg='Wrong constant term')
for xi in [x0, x1, x2, x3, x4, x5]:
self.assertAlmostEqual(model.model_value(xi - model.xbase, d_based_at_xopt=False, with_const_term=True),
objfun(xi), msg='Wrong interp value at %s' % str(xi))
# Test some other parameter settings for model.model_value()
g, hess = model.build_full_model()
self.assertTrue(np.allclose(g, model.model_grad + model.model_hess.dot(model.xopt(abs_coordinates=False))),
'Bad gradient')
self.assertTrue(np.allclose(hess, model.model_hess), 'Bad Hessian')
class TestLagrangePolyLinear(unittest.TestCase):
def runTest(self):
n = 2
npt = n + 1
x0 = np.array([-1.2, 1.0])
xl = -1e2 * np.ones((n,))
xu = 1e2 * np.ones((n,))
model = Model(npt, x0, rosenbrock(x0), xl, xu, 1)
x1 = np.array([1.0, 0.9])
model.change_point(1, x1 - model.xbase, rosenbrock(x1))
x2 = np.array([2.0, 0.9])
model.change_point(2, x2 - model.xbase, rosenbrock(x2))
xopt = model.xopt()
for i in range(npt):
c, g, hess = model.lagrange_polynomial(i) # based at xopt
for j in range(npt):
dx = model.xpt(j) - xopt
lag_value = c + model_value(g, hess, dx)
expected_value = 1.0 if i==j else 0.0
self.assertAlmostEqual(lag_value, expected_value, msg="Lagrange for x%g has bad value at x%g" % (i, j))
class TestLagrangePolyUnderdeterminedQuadratic(unittest.TestCase):
def runTest(self):
n = 2
npt = n + 2
x0 = np.array([1.0, 1.0])
xl = -1e2 * np.ones((n,))
xu = 1e2 * np.ones((n,))
model = Model(npt, x0, objfun(x0), xl, xu, 1)
x1 = x0 + np.array([1.0, 0.0])
model.change_point(1, x1 - model.xbase, objfun(x1))
x2 = x0 + np.array([0.1, 0.9])
model.change_point(2, x2 - model.xbase, objfun(x2))
x3 = x0 + np.array([-0.1, 0.0])
model.change_point(3, x3 - model.xbase, objfun(x3))
xopt = model.xopt()
for i in range(npt):
c, g, hess = model.lagrange_polynomial(i) # based at xopt
for j in range(npt):
dx = model.xpt(j) - xopt
lag_value = c + model_value(g, hess, dx)
expected_value = 1.0 if i == j else 0.0
self.assertAlmostEqual(lag_value, expected_value, msg="Lagrange for x%g has bad value at x%g" % (i, j))
class TestLagrangePolyUnderdeterminedQuadratic2(unittest.TestCase):
def runTest(self):
n = 2
npt = 2 * n + 1
x0 = np.array([1.0, 1.0])
xl = -1e2 * np.ones((n,))
xu = 1e2 * np.ones((n,))
model = Model(npt, x0, objfun(x0), xl, xu, 1)
x1 = x0 + np.array([1.0, 0.0])
model.change_point(1, x1 - model.xbase, objfun(x1))
x2 = x0 + np.array([0.1, 0.9])
model.change_point(2, x2 - model.xbase, objfun(x2))
x3 = x0 + np.array([-0.1, 0.0])
model.change_point(3, x3 - model.xbase, objfun(x3))
x4 = x0 + np.array([-0.1, 2.0])
model.change_point(4, x4 - model.xbase, objfun(x4))
xopt = model.xopt()
for i in range(npt):
c, g, hess = model.lagrange_polynomial(i) # based at xopt
for j in range(npt):
dx = model.xpt(j) - xopt
lag_value = c + model_value(g, hess, dx)
expected_value = 1.0 if i == j else 0.0
self.assertAlmostEqual(lag_value, expected_value, msg="Lagrange for x%g has bad value at x%g" % (i, j))
class TestLagrangePolyFullQuadratic(unittest.TestCase):
def runTest(self):
n = 2
npt = (n + 1) * (n + 2) // 2
x0 = np.array([1.0, 1.0])
xl = -1e2 * np.ones((n,))
xu = 1e2 * np.ones((n,))
model = Model(npt, x0, objfun(x0), xl, xu, 1)
x1 = x0 + np.array([1.0, 0.0])
model.change_point(1, x1 - model.xbase, objfun(x1))
x2 = x0 + np.array([0.1, 0.9])
model.change_point(2, x2 - model.xbase, objfun(x2))
x3 = x0 + np.array([-0.1, 0.0])
model.change_point(3, x3 - model.xbase, objfun(x3))
x4 = x0 + np.array([-0.1, 2.0])
model.change_point(4, x4 - model.xbase, objfun(x4))
x5 = x0 + np.array([-1.1, 1.0])
model.change_point(5, x5 - model.xbase, objfun(x5))
xopt = model.xopt()
for i in range(npt):
c, g, hess = model.lagrange_polynomial(i) # based at xopt
for j in range(npt):
dx = model.xpt(j) - xopt
lag_value = c + model_value(g, hess, dx)
expected_value = 1.0 if i == j else 0.0
self.assertAlmostEqual(lag_value, expected_value, msg="Lagrange for x%g has bad value at x%g" % (i, j))
class TestPoisednessLinear(unittest.TestCase):
def runTest(self):
n = 2
npt = n + 1
x0 = np.array([-1.2, 1.0])
delta = 0.5
xl = -1e2 * np.ones((n,))
xu = 1e2 * np.ones((n,))
model = Model(npt, x0, rosenbrock(x0), xl, xu, 1)
model.add_new_sample(0, rosenbrock(x0))
x1 = x0 + delta * np.array([1.0, 0.0])
model.change_point(1, x1 - model.xbase, rosenbrock(x1))
x2 = x0 + delta * np.array([0.0, 1.0])
model.change_point(2, x2 - model.xbase, rosenbrock(x2))
model.kopt = 0 # force this
# Here (use delta=1), Lagrange polynomials are (1-x-y), 1-x and 1-y
# Maximum value in ball is for (1-x-y) at (x,y)=(1/sqrt2, 1/sqrt2) --> max value = 1 + sqrt(2)
self.assertAlmostEqual(model.poisedness_constant(delta), 1.0 + sqrt(2.0), places=6, msg="Poisedness wrong")
class TestPoisednessFullQuadratic(unittest.TestCase):
def runTest(self):
# DFO book, Figure 3.1 (note errata) - solution from Mathematica
n = 2
npt = (n + 1) * (n + 2) // 2
x0 = np.array([0.5, 0.5])
xl = -1e2 * np.ones((n,))
xu = 1e2 * np.ones((n,))
model = Model(npt, x0, objfun(x0), xl, xu, 1)
x1 = np.array([0.05, 0.1])
model.change_point(1, x1 - model.xbase, objfun(x1))
x2 = np.array([0.1, 0.05])
model.change_point(2, x2 - model.xbase, objfun(x2))
x3 = np.array([0.95, 0.9])
model.change_point(3, x3 - model.xbase, objfun(x3))
x4 = np.array([0.9, 0.95])
model.change_point(4, x4 - model.xbase, objfun(x4))
x5 = np.array([0.85, 0.85])
model.change_point(5, x5 - model.xbase, objfun(x5))
delta = 0.5
model.kopt = 0 # force base point
self.assertLessEqual(model.poisedness_constant(delta), 294.898, msg="Poisedness wrong")
class TestPoisednessUnderdeterminedQuadratic(unittest.TestCase):
def runTest(self):
# Based originally on DFO book, Figure 3.3 - solution from Mathematica
n = 2
npt = 2*n + 1
x0 = np.array([0.5, 0.5])
xl = -1e2 * np.ones((n,))
xu = 1e2 * np.ones((n,))
model = Model(npt, x0, objfun(x0), xl, xu, 1)
x1 = np.array([0.524, 0.0006])
model.change_point(1, x1 - model.xbase, objfun(x1))
x2 = np.array([0.032, 0.323])
model.change_point(2, x2 - model.xbase, objfun(x2))
x3 = np.array([0.187, 0.89])
model.change_point(3, x3 - model.xbase, objfun(x3))
x4 = np.array([0.982, 0.368])
model.change_point(4, x4 - model.xbase, objfun(x4))
delta = 0.5
model.kopt = 0 # force base point
self.assertAlmostEqual(model.poisedness_constant(delta), 1.10018, places=3, msg="Poisedness wrong")
class TestAddPoint(unittest.TestCase):
def runTest(self):
n, m = 2, 2
npt = n + 1
x0 = np.array([-1.2, 1.0])
xl = -1e2 * np.ones((n,))
xu = 1e2 * np.ones((n,))
model = Model(npt, x0, rosenbrock(x0), xl, xu, 1)
x1 = np.array([1.0, 0.9])
model.change_point(1, x1 - model.xbase, rosenbrock(x1))
x2 = np.array([2.0, 0.9])
model.change_point(2, x2 - model.xbase, rosenbrock(x2))
# Now add a new point
x3 = np.array([1.0, 1.0]) # good point
add_ok = model.add_new_point(x3 - model.xbase, rosenbrock(x3))
self.assertTrue(add_ok, "Adding x3 failed")
self.assertEqual(model.npt(), 4, "Wrong number of points after x3")
self.assertTrue(array_compare(model.xpt(3, abs_coordinates=True), x3), "Wrong new point after x3")
self.assertTrue(array_compare(model.fval(3), rosenbrock(x3)), "Wrong fval after x3")
self.assertEqual(model.kopt, 3, "Wrong kopt after x3")
self.assertEqual(len(model.nsamples), 4, "Wrong nsamples length after x3")
self.assertEqual(model.nsamples[-1], 1, "Wrong nsample value after x3")
x4 = np.array([-1.8, 1.8]) # bad point
add_ok = model.add_new_point(x4 - model.xbase, rosenbrock(x4))
self.assertTrue(add_ok, "Adding x4 failed")
self.assertEqual(model.npt(), 5, "Wrong number of points after x4")
self.assertTrue(array_compare(model.xpt(4, abs_coordinates=True), x4), "Wrong new point after x4")
self.assertTrue(array_compare(model.fval(4), rosenbrock(x4)), "Wrong fval after x4")
self.assertEqual(model.kopt, 3, "Wrong kopt after x4")
x5 = np.array([-1.0, 1.0])
add_ok = model.add_new_point(x5 - model.xbase, rosenbrock(x5))
self.assertTrue(add_ok, "Adding x5 failed")
self.assertEqual(model.npt(), 6, "Wrong number of points after x5")
x6 = np.array([-1.5, 1.5])
add_ok = model.add_new_point(x6 - model.xbase, rosenbrock(x6))
self.assertFalse(add_ok, "Adding x6 should have failed")
self.assertEqual(model.npt(), 6, "Wrong number of points after x6")
self.assertTrue(array_compare(model.xpt(5, abs_coordinates=True), x5), "Wrong new point after x6")
self.assertTrue(array_compare(model.fval(5), rosenbrock(x5)), "Wrong fval after x6")
self.assertEqual(model.kopt, 3, "Wrong kopt after x6")
|
Before you can set about improving your customers’ experience of your company/brand, you need to understand how they interact with you throughout their entire ‘journey’ with you. This spans from before they recognise a need for what you do all the way through to when they finally no longer require your product/service and leave you.
Journey mapping is about experiencing your company through the eyes of the customer. Its about focussing on each point of contact they have with you from their perspective. These touchpoints could be anything from advertising to a facebook page to a contact centre IVR to online forms to field staff to cancellation processes. Journey maps are about walking a mile in your customers’ shoes.
Below is a list of 8 high level points you need to keep in mind when plotting your customer journey maps.
Know what you stand for. What is your brand promise that you are trying to infuse in your customers at every interaction? Ensure that at each and every touchpoint you are reinforcing your brand and delivering on the brand promise.
Have a plan. What is the ideal journey you’d like a customer to take? Work with key internal stakeholders to define the perfect experience for your customers. This will give you a broader perspective and ensures their buy-in to the final plan.
Segment your customer population into personas. Each persona needs to be a realistic representation of a customer segment. Each will have different attitudes, needs, motivations, behaviours, and expectations and its quite likely that each persona will take a different journey with your company as well.
Plot the journey. For each persona, plot the journey they take from initial awareness, through information gathering, decision to purchase, fulfilment and post sales support. For high involvement purchase decisions for products such as computers, bank accounts and cars there will be many steps in the process.
Understand the deltas. At each touchpoint, what are the customer’s expectations versus what you are delivering? To understand the customer’s expectations you need to understand their emotions, influences and the choices they have available to them. Customer feedback is critical. Collect, collate and analyse it. The deltas between expectation and reality are your opportunities for improvement.
Causal factors. At each touchpoint, determine the factors that are causing the deltas between expectation and experience. Is it the people you have working for you? The processes that support them? The systems you have in place? Or maybe it’s the tools your people are using? On the other side of the coin, maybe your customer’s expectations are just too high. What’s causing this? What do you need to do to manage it?
Moments of Truth. Keep in mind that not every touchpoint is as important as every other. Some touchpoints are more critical than others in terms of their relevancy to your success and these are called Moments of Truth. Prioritise the elimination of the deltas at these points.
Socialise. Communicate your customer journey maps internally. Ensure all key internal stakeholders understand the part they play in the delivery of experiences and the power they have to improve them. Rally them around the plan and continually communicate the steps you’re taking to improve customer experience. People need to know that action is being taken as a result of their buy-in.
|
"""
# TOP2049 Open Source programming suite
#
# Microchip PIC18F2320 DIP18
#
# Copyright (c) 2013 Pavel Stemberk <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
from .microchip8_18f1220family import *
class Chip_PIC18F2320dip28(microchip8_18f1220family):
hasEEPROM = True
writeBufferSize = 8
eraseBufferSize = 64
def __init__(self):
microchip8_18f1220family.__init__(self,
chipPackage="DIP28",
chipPinVCC=20,
chipPinsVPP=1,
chipPinGND=19,
signature=b"\x80\x05",
flashPageSize=0x2000,
flashPages=1,
eepromPageSize=0x100,
eepromPages=1,
fuseBytes=14
)
fuseDesc = (
BitDescription(0o00, "NA"),
BitDescription(0o01, "NA"),
BitDescription(0o02, "NA"),
BitDescription(0o03, "NA"),
BitDescription(0o04, "NA"),
BitDescription(0o05, "NA"),
BitDescription(0o06, "NA"),
BitDescription(0o07, "NA"),
BitDescription(0o10, "FOSC[0], 0=LP, 100=INTOSC"),
BitDescription(0o11, "FOSC[1]"),
BitDescription(0o12, "FOSC[2]"),
BitDescription(0o13, "FOSC[3]"),
BitDescription(0o14, "NA"),
BitDescription(0o15, "NA"),
BitDescription(0o16, "FSCM, 0=Fail-Safe Clock Monitor is disabled"),
BitDescription(0o17, "IESO, 0=Internal/External Switchover mode is disabled"),
BitDescription(0o20, "nPWRT"),
BitDescription(0o21, "BOR"),
BitDescription(0o22, "BORV[0]"),
BitDescription(0o23, "BORV[1]"),
BitDescription(0o24, "NA"),
BitDescription(0o25, "NA"),
BitDescription(0o26, "NA"),
BitDescription(0o27, "NA"),
BitDescription(0o30, "WDT, 0=WDT disabled, 1=WDT enabled"),
BitDescription(0o31, "WDTPS[0]"),
BitDescription(0o32, "WDTPS[1]"),
BitDescription(0o33, "WDTPS[2]"),
BitDescription(0o34, "WDTPS[3]"),
BitDescription(0o35, "NA"),
BitDescription(0o36, "NA"),
BitDescription(0o37, "NA"),
BitDescription(0o40, "NA"),
BitDescription(0o41, "NA"),
BitDescription(0o42, "NA"),
BitDescription(0o43, "NA"),
BitDescription(0o44, "NA"),
BitDescription(0o45, "NA"),
BitDescription(0o46, "NA"),
BitDescription(0o47, "NA"),
BitDescription(0o50, "NA"),
BitDescription(0o51, "NA"),
BitDescription(0o52, "NA"),
BitDescription(0o53, "NA"),
BitDescription(0o54, "NA"),
BitDescription(0o55, "NA"),
BitDescription(0o56, "NA"),
BitDescription(0o57, "MCLRE"),
BitDescription(0o60, "STVR"),
BitDescription(0o61, "NA"),
BitDescription(0o62, "LVP"),
BitDescription(0o63, "NA"),
BitDescription(0o64, "NA"),
BitDescription(0o65, "NA"),
BitDescription(0o66, "NA"),
BitDescription(0o67, "nDEBUG"),
BitDescription(0o70, "NA"),
BitDescription(0o71, "NA"),
BitDescription(0o72, "NA"),
BitDescription(0o73, "NA"),
BitDescription(0o74, "NA"),
BitDescription(0o75, "NA"),
BitDescription(0o76, "NA"),
BitDescription(0o77, "NA"),
BitDescription(0o100, "CP[0]"),
BitDescription(0o101, "CP[1]"),
BitDescription(0o102, "CP[2]"),
BitDescription(0o103, "CP[3]"),
BitDescription(0o104, "NA"),
BitDescription(0o105, "NA"),
BitDescription(0o106, "NA"),
BitDescription(0o107, "NA"),
BitDescription(0o110, "NA"),
BitDescription(0o111, "NA"),
BitDescription(0o112, "NA"),
BitDescription(0o113, "NA"),
BitDescription(0o114, "NA"),
BitDescription(0o115, "NA"),
BitDescription(0o116, "CPB"),
BitDescription(0o117, "CPD"),
BitDescription(0o120, "WRT[0]"),
BitDescription(0o121, "WRT[1]"),
BitDescription(0o122, "NA"),
BitDescription(0o123, "NA"),
BitDescription(0o124, "NA"),
BitDescription(0o125, "NA"),
BitDescription(0o126, "NA"),
BitDescription(0o127, "NA"),
BitDescription(0o130, "NA"),
BitDescription(0o131, "NA"),
BitDescription(0o132, "NA"),
BitDescription(0o133, "NA"),
BitDescription(0o134, "NA"),
BitDescription(0o135, "WRTC"),
BitDescription(0o136, "WRTB"),
BitDescription(0o137, "WRTD"),
BitDescription(0o140, "EBTR[0]"),
BitDescription(0o141, "EBTR[1]"),
BitDescription(0o142, "NA"),
BitDescription(0o143, "NA"),
BitDescription(0o144, "NA"),
BitDescription(0o145, "NA"),
BitDescription(0o146, "NA"),
BitDescription(0o147, "NA"),
BitDescription(0o150, "NA"),
BitDescription(0o151, "NA"),
BitDescription(0o152, "NA"),
BitDescription(0o153, "NA"),
BitDescription(0o154, "NA"),
BitDescription(0o155, "NA"),
BitDescription(0o156, "EBTRB"),
BitDescription(0o157, "NA"),
)
ChipDescription(
Chip_PIC18F2320dip28,
bitfile="microchip01dip28",
chipID="PIC18F2320dip28",
runtimeID=(0xDE07, 0x01),
chipVendors="Microchip",
description="PIC18F2320",
packages=(("DIP18", ""),),
fuseDesc=fuseDesc,
maintainer="Pavel Stemberk <[email protected]>",
)
|
Hotel Olimpo-Le Terrazze is located in Letojanni, a town close to Taormina. Thanks to its perfect location it has some of the best views across Taormina Bay, which can not only be admired from the hotel and its grounds but also on your way down to the town or beach by the glass lifts.
There is an entertainment team that keeps the children and adults entertained throughout their stay. For the children there is a Mini & Junior club offering lots of different activities to keep any child busy. The children also have their own exclusive child-friendly swimming pool. For children of 12 years and older, there are activities designed around their ages, such as football and table tennis tournaments.
During the day the adults can enjoy a daily programme filled with activities such as, aqua gym, sports tournaments and dance classes. Each evening in the Bar Olimpo and Antares there is live music and in the summer months from June, at the Antares theatre there is a cabaret show hosted by the entertainment team.
Standard Terrazze Rooms are approximately, 20-22m² and have modern amenities such as, tea-making facilities, safe, air conditioning, a telephone, satellite TV, free WiFi and a hairdryer. All of the rooms have a balcony that is equipped with a table and two chairs and has a view of the inner garden or the surrounding hills. Each room has a private bathroom with shower or bathtub. There is a selection of Standard Terrazze rooms that have a sea view, which are available at a supplement.
Standard Olimpo Rooms are between 20-26m² and include a fully equipped balcony and the above amenities. For a supplement, sea view rooms are also available. These rooms are located in the Olimpo/Belvedere buildings.
Both rooms are also available for double for sole occupancy. Please call to book these.
Breakfast is served in the La Fontane restaurant offering a selection of Italian and International dishes.
Dinner can be enjoyed in La Bouganville Grill- Pizzeria, which is located by the swimming pool.
There is also an option to include All Inclusive, which you can call to book.
La Fontane Restaurant is open for breakfast, lunch and dinner and serves a variety of Italian and international cuisine buffet style.
La Bouganville is situated by the pool and serves grilled meats and fresh pizza for lunch (open high season only).
Antares American Bar serves cocktails, spirits, soft drinks, teas and coffees, as well as a selection of snacks.
The large outdoor pool at Hotel Olimpo has a varying depth of 1.2 – 2.4 metres and there are two smaller pools with whirlpools to enjoy, too. There are plenty of sun loungers and umbrellas dotted around and the placement of the pool means you’ll get to enjoy fantastic sea views while you sun bathe. Families with children are also welcome to use the pool at Hotel Anantares which has a shallow depth that’s perfect for little ones.
Those who prefer the beach will have access to a beach club in Letojanni-Mazzeo there are sun loungers and umbrellas.
Nettuno Wellness Centre has an indoor pool, sauna, Turkish bath and gym. Beauty treatments and massages are payable locally. Children are not permitted in the wellness centre.
There is a Mini & Junior club for your children to keep them busy during the day and for the adults, a daily programme bursting with different activities.
Hotel Olimpo offers family rooms that are suitable for a family of 4.
Early Booking Offer: Save 10% on the hotel price for holidays booked by 30 Mar 19.
|
#!/usr/bin/python
# Compresses the core Blockly files into a single JavaScript file.
#
# Copyright 2012 Google Inc.
# http://blockly.googlecode.com/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script generates two files:
# demos/blockly_compressed.js
# demos/blockly_uncompressed.js
# The compressed file is a concatenation of all of Blockly's core files which
# have been run through Google's Closure Compiler. This is done using the
# online API (which takes a few seconds and requires an Internet connection).
# The uncompressed file is a script that loads in each of Blockly's core files
# one by one. This takes much longer for a browser to load, but may be useful
# when debugging code since line numbers are meaningful and variables haven't
# been renamed. The oncompressed file also allows for a faster developement
# cycle since there is no need to rebuild or recompile, just reload.
import httplib, json, urllib, sys
filenames = [
'blockly.js',
'block.js',
'block_svg.js',
'bubble.js',
'comment.js',
'connection.js',
'contextmenu.js',
'field.js',
'field_dropdown.js',
'field_checkbox.js',
'field_colour.js',
'field_image.js',
'field_label.js',
'field_textinput.js',
'field_variable.js',
'flyout.js',
'generator.js',
'inject.js',
'input.js',
'mutator.js',
'names.js',
'procedures.js',
'scrollbar.js',
'toolbox.js',
'tooltip.js',
'trashcan.js',
'utils.js',
'variables.js',
'warning.js',
'workspace.js',
'xml.js']
header = ('// Do not edit this file; automatically generated by build.py.\n'
'"use strict";')
def gen_uncompressed():
target_filename = 'demos/blockly_uncompressed.js'
inc = '''%s
document.write('<script type="text/javascript" src="../../../closure-library-read-only/closure/goog/base.js"></script>');
(function() {
var filenames = %s;
for (var x = 0; x < filenames.length; x++) {
document.write('<script type="text/javascript" src="../../core/' + filenames[x] + '"></script>');
}
})();
''' % (header, filenames)
f = open(target_filename, 'w')
f.write(inc)
f.close()
print 'SUCCESS: ' + target_filename
def gen_compressed():
target_filename = 'demos/blockly_compressed.js'
# Define the parameters for the POST request.
params = [
('compilation_level', 'SIMPLE_OPTIMIZATIONS'),
('use_closure_library', 'true'),
('output_format', 'json'),
('output_info', 'compiled_code'),
('output_info', 'warnings'),
('output_info', 'errors'),
('output_info', 'statistics'),
]
# Read in all the source files.
for filename in filenames:
f = open('core/' + filename)
params.append(('js_code', ''.join(f.readlines())))
f.close()
# Send the request to Google.
headers = { "Content-type": "application/x-www-form-urlencoded" }
conn = httplib.HTTPConnection('closure-compiler.appspot.com')
conn.request('POST', '/compile', urllib.urlencode(params), headers)
response = conn.getresponse()
json_str = response.read()
conn.close
# Parse the JSON response.
json_data = json.loads(json_str)
def file_lookup(name):
if not name.startswith('Input_'):
return '???'
n = int(name[6:])
return filenames[n]
if json_data.has_key('errors'):
errors = json_data['errors']
for error in errors:
print 'FATAL ERROR'
print error['error']
print '%s at line %d:' % (
file_lookup(error['file']), error['lineno'])
print error['line']
print (' ' * error['charno']) + '^'
else:
if json_data.has_key('warnings'):
warnings = json_data['warnings']
for warning in warnings:
print 'WARNING'
print warning['warning']
print '%s at line %d:' % (
file_lookup(warning['file']), warning['lineno'])
print warning['line']
print (' ' * warning['charno']) + '^'
print
code = header + '\n' + json_data['compiledCode']
stats = json_data['statistics']
original_b = stats['originalSize']
compressed_b = stats['compressedSize']
if original_b > 0 and compressed_b > 0:
f = open(target_filename, 'w')
f.write(code)
f.close()
original_kb = int(original_b / 1024 + 0.5)
compressed_kb = int(compressed_b / 1024 + 0.5)
ratio = int(float(compressed_b) / float(original_b) * 100 + 0.5)
print 'SUCCESS: ' + target_filename
print 'Size changed from %d KB to %d KB (%d%%).' % (
original_kb, compressed_kb, ratio)
else:
print 'UNKNOWN ERROR'
if __name__ == '__main__':
gen_uncompressed()
gen_compressed()
|
Why'd Anderson leave the party?
Although he became famous for his condemnation of the War on Terror and the Bush administration, former Salt Lake City Mayor Rocky Anderson recently formally left the Democratic Party. City Weekly asked Anderson about his future plans and what prompted his defection.
You asked for your name to be removed from the Democratic Congressional Campaign Committee list. What was the straw that broke the camel’s back?
Actually, I think the final straw occurred long before I finally sent my letter. I have not been enamored with the Democratic Party for many years. I think it’s a party that, especially when it’s in power, is incredibly timid, unprincipled and gutless. The Democratic Party could have passed a single-payer health-care system at least approaching the civilized medical coverage system in the rest of the industrialized world. It could have ended the practice of torture, it could have insisted upon accountability for war crimes, and it could have stopped the outrageous Bush [tax] cuts for the wealthy, which have led our country into very dangerous economic times and are, I think, a real disaster for young people on whose shoulders the burden of our accumulated debt and interest payments is going to be resting.
You have said the answer to the budget problems in the United States is a new political party that promotes the interests of the public rather than the interests of the wealthy. Would you join this party?
We’ve seen an enormous transformation of the parties. It used to be the Republicans were bought and paid for by the banking and insurance interests and the Democrats were bought and paid for by labor and others on the left. Now, they’re all feeding at basically the same trough and are almost indistinguishable, except on certain wedge issues. In the meantime, the pensions and other savings accounts of the middle class in this country have been decimated. The only way out is another party. I would call it, frankly, a second party that actually represents the interests of the American people. There isn’t a real opposition force in Washington, D.C., anymore, and we the people have the capacity to change that and we must if our republic is going to survive. I consider myself an Independent, but I would be very pleased to work with others to form not just a political party to run another campaign, but to launch a sustained movement for major change in this country.
You’ve said you have no interest in running for office. Is that still true?
I really feel that movement-building and organizing people at the grass-roots level is where the power really lies in this country. It’s obvious now that, in most instances, we’re not going to see action being taken to solve problems facing our nation and our world unless political pressure is brought to bear, so I have a tremendous commitment to High Road for Human Rights. But I also would certainly contemplate running for elected office if I believed it could move things forward in an effective manner.
What would you say to young people who want to be politically active?
Everyone should be an active participant in their communities, in their nation and in their world. That’s not only a huge responsibility, but it’s an amazing opportunity to exercise what it means to be a moral human being. As long as there are people who are suffering in the world and we can help, we have an enormous obligation to do what we can. There are many ways of doing that. It’s not necessarily by running for office. It’s organizing with others. It’s pushing for change. It’s teaching. It’s raising awareness in our communities, starting with our families and friends. I think the question everybody should be asking [themselves] is, when they are 70 or 80 years old and they are looking back over their lives, what do they want to be able to say that they did to make this world a better place and to create a legacy that they can be proud of?
|
from pyrocko import gf
from pyrocko import model, util
from pyrocko import orthodrome as otd
from pyrocko import moment_tensor as mt
from pyrocko import trace
from beat.sources import RectangularSource
from beat import ffi, models
import numpy as num
from beat import inputf, utility, heart, config
import os
km = 1000.
util.setup_logging('test_ffi_stacking', 'info')
# set random seed for reproducible station locations
num.random.seed(10)
nuc_dip = 5.
nuc_strike = 2.
time_shift = -10. # from previous inversion
# general
project_dir = '/home/vasyurhm/BEATS/LaquilaJointPonlyUPDATE_wide_kin3'
store_superdirs = ['/home/vasyurhm/GF/Laquila']
white_noise_perc_max = 0.025 # White noise to disturb the synthetic data, in percent to the maximum amplitude [Hallo et al. 2016 use 0.01]
problem = models.load_model(project_dir, mode='ffi', build=False)
event = problem.config.event
components = ['uparr'] #, 'uperp']
starttime_sampling = 0.5
arrival_taper = heart.ArrivalTaper(
a=-15.,
b=-10.,
c=50.,
d=55.)
sc = problem.composites['seismic']
fault = sc.load_fault_geometry()
# get number of patches in dip and strike direction
npdip, npstrike = fault.ordering.get_subfault_discretization(0)
# do fast sweeping to get rupture onset times for patches with respect to hypocenter
velocities = num.ones((npdip, npstrike)) * 3.5
nuc_dip_idx, nuc_strike_idx = fault.fault_locations2idxs(
0, nuc_dip, nuc_strike, backend='numpy')
starttimes = fault.get_subfault_starttimes(
0, velocities, nuc_dip_idx, nuc_strike_idx).ravel() + time_shift
print(starttimes)
# defining distributed slip values for slip parallel and perpendicular directions
uparr = num.ones((npdip, npstrike)) * 2.
#uparr[1:3, 3:7] = 1.5
uperp = num.zeros((npdip, npstrike))
#uperp[0,0] = 1.
#uperp[3,9] = 1.
uperp[1:3, 3:7] = 1.0
# define rupture durations on each patch
durations = num.ones((npdip, npstrike)) * 0.5
slips = {
components[0]: uparr.ravel(),
# components[1]: uperp.ravel(),
'durations': durations.ravel(),
'velocities': velocities.ravel()
}
print('fault parameters', slips)
# update patches with distributed slip and STF values
for comp in components:
patches = fault.get_subfault_patches(0, datatype='seismic', component=comp)
for patch, starttime, duration, slip in zip(
patches, starttimes, durations.ravel(), slips[comp]):
#stf = gf.HalfSinusoidSTF(anchor=-1., duration=float(duration))
patch.stf.duration = float(duration)
#stime = num.round(starttime / starttime_sampling) * starttime_sampling
patch.update(slip=float(slip), time=event.time + float(starttime))
# print(patch)
# synthetics generation
engine = gf.LocalEngine(store_superdirs=store_superdirs)
patchidx = fault.patchmap(
index=0, dipidx=nuc_dip_idx, strikeidx=nuc_strike_idx)
targets = sc.wavemaps[0].targets
filterer = sc.wavemaps[0].config.filterer
ntargets = len(targets)
gfs = ffi.load_gf_library(
directory=project_dir + '/ffi/linear_gfs/',
filename='seismic_uparr_any_P_0')
ats = gfs.reference_times - arrival_taper.b
traces, tmins = heart.seis_synthetics(
engine, patches, targets, arrival_times=ats,
wavename='any_P', arrival_taper=arrival_taper,
filterer=filterer, outmode='stacked_traces')
targetidxs = num.lib.index_tricks.s_[:]
if False:
# for station corrections maybe in the future?
station_corrections = num.zeros(len(traces))
starttimes = (num.tile(starttimes, ntargets) + num.repeat(
station_corrections, fault.npatches)).reshape(
ntargets, fault.npatches)
targetidxs = num.atleast_2d(num.arange(ntargets)).T
gfs.set_stack_mode('numpy')
synthetics_nn = gfs.stack_all(
targetidxs=targetidxs,
starttimes=starttimes,
durations=durations.ravel(),
slips=slips[components[0]],
interpolation='nearest_neighbor')
synthetics_ml = gfs.stack_all(
targetidxs=targetidxs,
starttimes=starttimes,
durations=durations.ravel(),
slips=slips[components[0]],
interpolation='multilinear')
gfs.init_optimization()
synthetics_nn_t = gfs.stack_all(
targetidxs=targetidxs,
starttimes=starttimes,
durations=durations.ravel(),
slips=slips[components[0]],
interpolation='nearest_neighbor').eval()
synthetics_ml_t = gfs.stack_all(
targetidxs=targetidxs,
starttimes=starttimes,
durations=durations.ravel(),
slips=slips[components[0]],
interpolation='multilinear').eval()
synth_traces_nn = []
for i, target in enumerate(targets):
tr = trace.Trace(
ydata=synthetics_nn[i, :],
tmin=gfs.reference_times[i],
deltat=gfs.deltat)
#print('trace tmin synthst', tr.tmin)
tr.set_codes(*target.codes)
tr.set_location('nn')
synth_traces_nn.append(tr)
synth_traces_ml = []
for i, target in enumerate(targets):
tr = trace.Trace(
ydata=synthetics_ml[i, :],
tmin=gfs.reference_times[i],
deltat=gfs.deltat)
#print 'trace tmin synthst', tr.tmin
tr.set_codes(*target.codes)
tr.set_location('ml')
synth_traces_ml.append(tr)
synth_traces_nn_t = []
for i, target in enumerate(targets):
tr = trace.Trace(
ydata=synthetics_nn_t[i, :],
tmin=gfs.reference_times[i],
deltat=gfs.deltat)
#print('trace tmin synthst', tr.tmin)
tr.set_codes(*target.codes)
tr.set_location('nn_t')
synth_traces_nn_t.append(tr)
synth_traces_ml_t = []
for i, target in enumerate(targets):
tr = trace.Trace(
ydata=synthetics_ml_t[i, :],
tmin=gfs.reference_times[i],
deltat=gfs.deltat)
#print 'trace tmin synthst', tr.tmin
tr.set_codes(*target.codes)
tr.set_location('ml_t')
synth_traces_ml_t.append(tr)
# display to check
trace.snuffle(
traces + synth_traces_nn + synth_traces_ml + synth_traces_nn_t + synth_traces_ml_t,
stations=sc.wavemaps[0].stations, events=[event])
traces1, tmins = heart.seis_synthetics(
engine, [patches[0]], targets, arrival_times=ats,
wavename='any_P', arrival_taper=arrival_taper,
filterer=filterer, outmode='stacked_traces')
gfs.set_stack_mode('numpy')
synth_traces_ml1 = []
for i in range(1):
synthetics_ml1 = gfs.stack_all(
targetidxs=targetidxs,
patchidxs=[i],
starttimes=starttimes[0],
durations=durations.ravel()[0],
slips=num.atleast_1d(slips[components[0]][0]),
interpolation='multilinear')
for i, target in enumerate(targets):
tr = trace.Trace(
ydata=synthetics_ml1[i, :],
tmin=gfs.reference_times[i],
deltat=gfs.deltat)
print('trace tmin synthst', tr.tmin)
#print(target.codes)
tr.set_codes(*target.codes)
tr.set_location('ml%i' % i)
synth_traces_ml1.append(tr)
trace.snuffle(
traces1 + synth_traces_ml1,
stations=sc.wavemaps[0].stations, events=[event])
# convert pyrocko traces to beat traces
beat_traces = []
for tr in traces:
#print tr
btrc = heart.SeismicDataset.from_pyrocko_trace(tr)
seis_err_std = num.abs(btrc.ydata).max() * white_noise_perc_max
noise = num.random.normal(0, seis_err_std, btrc.ydata.shape[0])
btrc.ydata += noise
btrc.set_location('0')
beat_traces.append(btrc)
# display to check noisy traces
#trace.snuffle(beat_traces, stations=stations, events=[event])
# save data to project folder
seismic_outpath = os.path.join(project_dir, 'seismic_data.pkl')
#util.ensuredir(project_dir)
#print 'saving synthetic data to: ', seismic_outpath
#utility.dump_objects(seismic_outpath, outlist=[stations, beat_traces])
|
Aimed at dancers in training, The Masks We Wear is a workshop to explore our identity on and off stage.
In this workshop we will talk makeup, filters and masks, to explore the shadows that we hide from ourselves.
Participants in this workshop are invited to stay afterwards for some snacks, followed by an introduction to the work of Zsuzsa Rózsavölgyi before her performance of 1.7 at 8.00pm.
With support from Northern School of Contemporary Dance – CAT Scheme and Phoenix Youth Academy.
|
from nipype.interfaces.base import (traits, File, Directory, TraitedSpec,
OutputMultiPath)
import os.path as op
import glob
from nipype.interfaces.freesurfer.base import FSCommand, FSTraitedSpec
from nipype.utils.filemanip import list_to_filename
from nipype.external import six
import logging
logging.basicConfig()
iflogger = logging.getLogger('interface')
class WatershedBEMInputSpec(FSTraitedSpec):
subject_id = traits.Str(argstr='--subject %s', mandatory=True,
desc='Subject ID (must have a complete Freesurfer directory)')
subjects_dir = Directory(exists=True, mandatory=True, usedefault=True,
desc='Path to Freesurfer subjects directory')
volume = traits.Enum('T1', 'aparc+aseg', 'aseg', 'brain', 'orig', 'brainmask', 'ribbon',
argstr='--volume %s', usedefault=True,
desc='The volume from the "mri" directory to use (defaults to T1)')
overwrite = traits.Bool(True, usedefault=True, argstr='--overwrite',
desc='Overwrites the existing files')
atlas_mode = traits.Bool(argstr='--atlas',
desc='Use atlas mode for registration (default: no rigid alignment)')
class WatershedBEMOutputSpec(TraitedSpec):
mesh_files = OutputMultiPath(File(exists=True),
desc=('Paths to the output meshes (brain, inner '
'skull, outer skull, outer skin)'))
brain_surface = File(exists=True, loc='bem/watershed',
desc='Brain surface (in Freesurfer format)')
inner_skull_surface = File(exists=True, loc='bem/watershed',
desc='Inner skull surface (in Freesurfer format)')
outer_skull_surface = File(exists=True, loc='bem/watershed',
desc='Outer skull surface (in Freesurfer format)')
outer_skin_surface = File(exists=True, loc='bem/watershed',
desc='Outer skin surface (in Freesurfer format)')
fif_file = File(exists=True, loc='bem', altkey='fif',
desc='"fif" format file for EEG processing in MNE')
cor_files = OutputMultiPath(File(exists=True), loc='bem/watershed/ws',
altkey='COR', desc='"COR" format files')
class WatershedBEM(FSCommand):
"""Uses mne_watershed_bem to get information from dicom directories
Examples
--------
>>> from nipype.interfaces.mne import WatershedBEM
>>> bem = WatershedBEM()
>>> bem.inputs.subject_id = 'subj1'
>>> bem.inputs.subjects_dir = '.'
>>> bem.cmdline
'mne_watershed_bem --overwrite --subject subj1 --volume T1'
>>> bem.run() # doctest: +SKIP
"""
_cmd = 'mne_watershed_bem'
input_spec = WatershedBEMInputSpec
output_spec = WatershedBEMOutputSpec
_additional_metadata = ['loc', 'altkey']
def _get_files(self, path, key, dirval, altkey=None):
globsuffix = '*'
globprefix = '*'
keydir = op.join(path, dirval)
if altkey:
key = altkey
globpattern = op.join(keydir, ''.join((globprefix, key, globsuffix)))
return glob.glob(globpattern)
def _list_outputs(self):
outputs = self.output_spec().get()
subjects_dir = self.inputs.subjects_dir
subject_path = op.join(subjects_dir, self.inputs.subject_id)
output_traits = self._outputs()
mesh_paths = []
for k in outputs.keys():
if k != 'mesh_files':
val = self._get_files(subject_path, k,
output_traits.traits()[k].loc,
output_traits.traits()[k].altkey)
if val:
value_list = list_to_filename(val)
if isinstance(value_list, list):
out_files = []
for value in value_list:
out_files.append(op.abspath(value))
elif isinstance(value_list, six.string_types):
out_files = op.abspath(value_list)
else:
raise TypeError
outputs[k] = out_files
if not k.rfind('surface') == -1:
mesh_paths.append(out_files)
outputs['mesh_files'] = mesh_paths
return outputs
|
We have over 20 years experience in designing bespoke conservatories to whatever your desired size, finish and style. We guarantee that one of our finely crafted conservatories will revolutionise the way you use the space in your home.
Traditional in their styling, but perfectly up to date in their security features and thermal efficiency, our Victorian Conservatories are classically elegant with a curved faceted front and add an impressive extra living space to your property.
Ideal for both period and contemporary properties, an Edwardian Conservatory is characterised by strong, clean lines, sophisticated period detailing and are usually square or rectangular in design to maximise floor space.
A popular design, Georgian Conservatories are light and airy with a sloping, vaulted roof and offer maximum use of space and natural light. They can work equally well with modern as well as period properties.
With a Gable Conservatory, the front panel of the roof remains upright and forms a point, so you can take full advantage of the extra height and space in a bright and more open design that will add a "wow" factor to any property.
A Lean-to Conservatory is the perfect space saver and provides a real value for money solution. Simple, clean and uncomplicated, it can be constructed in even the smallest space and comes in a range of designs and finishes.
Using a bespoke combination of features from across our range of conservatory designs, a P-Shape Conservatory creates a large additional room to your home, which is warm, light, bright and functional.
A T-Shape Conservatory is the perfect choice for those wishing to create a stunning contemporary addition to their home with maximum space and impact. Call our design team today to start your bespoke design.
|
#! /usr/bin/env python
#--------------------------------------------------
# Copyright 2008 Emma Goldberg
#
# This file is part of PieTree.
#
# PieTree is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PieTree is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PieTree. If not, see <http://www.gnu.org/licenses/>.
#--------------------------------------------------
######################################################
# Module: PieClasses.py
# Author: Emma Goldberg
# Date: Nov, 2011 (orig Apr 2008)
######################################################
from math import cos, sin, pi
import cairo
import TreeStruct
#--------------------------------------------------
# For drawing a tree of any shape
#--------------------------------------------------
class PieTree():
'''
This class contains:
* tree root
* tree attributes: ntips, nstates
* node/tip drawing functions
further fleshed out in the rectangular and radial subclasses
* cairo surface to be drawn to
* plotting variables
'''
def __init__(self, root, ntips, nstates, surface, plot_values):
self.root = root
self.ntips = ntips
self.nstates = nstates
self.surface = surface
self.plot_vars = plot_values
def MaxTipNameSize(self):
'''Find the longest (widest) tip name in this tree.'''
# todo: extract answer by using return rather than list
def MTNS(node, cr, tipsize):
if node.daughters == None:
thistipsize = cr.text_extents(node.label)[2]
if thistipsize > tipsize[0]:
tipsize[0] = thistipsize
else:
for d in node.daughters:
MTNS(d, cr, tipsize)
tipsize = [-1]
MTNS(self.root, self.surface, tipsize)
return tipsize[0]
def PlotTree(self):
'''Calls the drawing functions for the whole tree.'''
def PT(tree, node):
if node.daughters == None:
tree.DrawTip(node)
else:
tree.DrawFork(node)
if tree.plot_vars.pieradius > 0:
if node.state != None:
tree.DrawPie(node)
else:
print "NOTE: state not specified for %s" \
% (node.label)
if tree.plot_vars.nodenamesize > 0:
tree.DrawNodeLabel(node)
for d in node.daughters:
PT(tree, d)
self.DrawRoot()
PT(self, self.root)
def DrawTipMore(self, node, (x,y), delta):
'''Finish the work of DrawTip.'''
c = self.plot_vars
cr = self.surface
# box border
if c.rimthick > 0 and c.boxsize > 0:
cr.set_line_width(c.rimthick)
cr.set_source_rgb(c.linecolor[0], c.linecolor[1], \
c.linecolor[2])
cr.stroke_preserve()
# tip color
if node.state in range(self.nstates):
i = node.state
cr.set_source_rgb(c.color[i][0], c.color[i][1], c.color[i][2])
else:
cr.set_source_rgb(0.5, 0.5, 0.5)
print "WARNING: check the state of %s" % node.label
cr.fill()
# tip label
if c.tipnamesize > 0:
if c.tipnamestatecolor != "yes":
cr.set_source_rgb(c.textcolor[0], c.textcolor[1], \
c.textcolor[2])
cr.set_font_size(c.tipnamesize)
textheight = cr.text_extents(node.label)[3]
cr.move_to(x + delta/2. + c.tipspacing/4., y + textheight/3.)
if c.underscorespace == "yes":
cr.show_text((node.label).replace("_", " "))
else:
cr.show_text(node.label)
def DrawPieMore(self, node, (x,y)):
'''Finish the work of DrawPie.'''
c = self.plot_vars
cr = self.surface
R = c.pieradius
# the outer circle of the pie
if c.rimthick > 0:
cr.set_line_width(c.rimthick)
cr.set_source_rgb(c.linecolor[0], c.linecolor[1], \
c.linecolor[2])
cr.move_to(x, y)
cr.arc(x, y, R, 0, 2*pi)
cr.stroke()
# the pie pieces
angle_start = -pi/2
for i in range(self.nstates):
angle_stop = node.state[i] * 2 * pi + angle_start
cr.set_source_rgb(c.color[i][0], c.color[i][1], c.color[i][2])
cr.move_to(x, y)
cr.arc(x, y, R, angle_start, angle_stop)
cr.fill()
angle_start = angle_stop
def DrawNodeLabelMore(self, node, (x,y)):
'''Finish the work of DrawNodeLabel.'''
c = self.plot_vars
cr = self.surface
cr.set_source_rgb(c.textcolor[0], c.textcolor[1], c.textcolor[2])
cr.set_font_size(c.nodenamesize)
if node.label != None:
textheight = cr.text_extents(node.label)[3]
cr.move_to(x + c.pieradius + c.tipspacing/5., y + textheight/2.)
if c.underscorespace == "yes":
cr.show_text((node.label).replace("_", " "))
else:
cr.show_text(node.label)
def DrawScalebar(self):
'''Display the time scale.'''
c = self.plot_vars
cr = self.surface
cr.set_line_width(c.linethick)
cr.set_source_rgb(c.linecolor[0], c.linecolor[1], c.linecolor[2])
# size of the label
showme = str(c.scalebar["length"])
tw = (cr.text_extents(showme)[2], cr.text_extents(showme)[3])
# note: "%.*e" % (n-1, x) rounds to n digits
x0 = self.Xform( (self.root.x, 0) )[0]
x1 = self.Xform( (self.root.x + c.scalebar["length"], 0) )[0]
y = c.height - c.ymargin/2
y0 = y - tw[1]
y1 = y + tw[1]
# actual scalebar
cr.move_to(x0, y)
cr.line_to(x1, y)
cr.stroke()
# whiskers
cr.move_to(x0, y0)
cr.line_to(x0, y1)
cr.stroke()
cr.move_to(x1, y0)
cr.line_to(x1, y1)
cr.stroke()
# label
cr.move_to((x0 + x1) / 2. - tw[0], y0)
cr.set_font_size(c.scalebar["textsize"])
cr.show_text(showme)
#--------------------------------------------------
# For drawing a rectangular tree
#--------------------------------------------------
class PieTreeRect(PieTree):
'''For plotting a rectangularly-oriented tree.'''
def CalcXY(self, tipsize):
'''Compute the (x, y) coordinate for each tip and node.
These are stored as .x and .y node attributes.
Also store horizontal scaling info as .xmax and .xscale.'''
# todo: extract answer by using return rather than list
def CXY(node, x, i, xmax):
if node.length != None:
x += node.length
node.x = x
if x > xmax[0]:
xmax[0] = x
if node.daughters != None:
for d in node.daughters:
i = CXY(d, x, i, xmax)
if node.daughters == None:
node.y = i
i += 1
else:
sum_y = 0.0
for d in node.daughters:
sum_y += d.y
node.y = sum_y / len(node.daughters)
return i
c = self.plot_vars
xmax = [-1]
CXY(self.root, 0, 0.5, xmax)
c.xmax = xmax[0]
c.xscale = (c.width - 2*c.xmargin - c.tipspacing - tipsize - \
c.pieradius) / c.xmax
def Xform(self, (x,y)):
'''Transform (x, y) coordinates from tree to canvas.'''
c = self.plot_vars
return(c.xmargin + c.pieradius + c.linethick + x * c.xscale, \
c.ymargin + y * c.tipspacing)
def DrawTip(self, node):
'''Draw the tip box, border, and label.'''
c = self.plot_vars
cr = self.surface
# the tip box
(x, y) = self.Xform( (node.x, node.y) )
delta = c.boxsize
cr.rectangle(x - delta/2., y-delta/2., delta, delta)
# everything else
self.DrawTipMore(node, (x,y), delta)
def DrawPie(self, node):
'''Draw the pie chart at this node.'''
xy = self.Xform( (node.x, node.y) )
self.DrawPieMore(node, xy)
def DrawFork(self, node):
'''Draw the fork to this node's daughters.'''
c = self.plot_vars
cr = self.surface
cr.set_line_width(c.linethick)
cr.set_source_rgb(c.linecolor[0], c.linecolor[1], c.linecolor[2])
(x0, y0) = self.Xform( (node.x, node.y) )
for d in node.daughters:
(x, y) = self.Xform( (d.x, d.y) )
cr.move_to(x0, y0)
cr.line_to(x0, y)
cr.line_to(x, y)
cr.stroke()
def DrawNodeLabel(self, node):
'''Put the text label by this node.'''
xy = self.Xform( (node.x, node.y) )
self.DrawNodeLabelMore(node, xy)
def DrawRoot(self):
'''Draw the branch leading to the root.'''
c = self.plot_vars
cr = self.surface
(x0, y) = self.Xform( (0, self.root.y) )
(x, y) = self.Xform( (self.root.x, self.root.y) )
cr.set_line_width(c.linethick)
cr.set_source_rgb(c.linecolor[0], c.linecolor[1], c.linecolor[2])
cr.move_to(x, y)
cr.line_to(x0, y)
cr.stroke()
#--------------------------------------------------
# For drawing a circular tree
#--------------------------------------------------
class PieTreeRadial(PieTree):
'''For plotting a radially-oriented tree.'''
def CalcXY(self, tipsize):
'''Compute the (x, y) and (r, theta) coordinate for each tip
and node. These are stored as node attributes .x .y .r .t.
Also store horizontal scaling info as .xmax and .xscale.'''
def CalcRT(node, r, i, rmax, ntips):
'''Compute the (r, theta) coordinates for each tip and node.
These are stored as .r and .t attributes.'''
if node.length != None:
r += node.length
node.r = r
if r > rmax[0]:
rmax[0] = r
if node.daughters != None:
for d in node.daughters:
i = CalcRT(d, r, i, rmax, ntips)
if node.daughters == None:
node.t = 2 * pi * i / ntips
i += 1
else:
sum_t = 0.0
for d in node.daughters:
sum_t += d.t
node.t = sum_t / len(node.daughters)
return i
def RTtoXY(node):
'''Convert polar to Cartesian coordinates.'''
if node.daughters != None:
for d in node.daughters:
RTtoXY(d)
node.x = node.r * cos(node.t)
node.y = node.r * sin(node.t)
c = self.plot_vars
rmax = [-1]
CalcRT(self.root, 0, 0, rmax, self.ntips)
RTtoXY(self.root)
c.xmax = rmax[0] * 2
c.xscale = (c.width - 2*c.xmargin - 2*c.tipspacing - 2*tipsize - \
2*c.pieradius) / c.xmax
def Xform(self, (x,y)):
'''transform (x, y) coordinates from tree to canvas'''
c = self.plot_vars
return (x * c.xscale + c.width/2., y * c.xscale + c.height/2.)
def DrawTip(self, node):
'''Draw the tip box, border, and label.'''
c = self.plot_vars
cr = self.surface
# the tip box
(x, y) = self.Xform( (node.x, node.y) )
delta = c.boxsize
m = cr.get_matrix() # for rotation, with set_matrix below
cr.translate(x, y)
cr.rotate(node.t)
cr.rectangle(0, -delta/2., delta, delta)
# everything else
self.DrawTipMore(node, (0,0), delta*2)
cr.set_matrix(m)
def DrawPie(self, node):
'''Draw the pie chart at this node.'''
cr = self.surface
(x, y) = self.Xform( (node.x, node.y) )
m = cr.get_matrix() # for rotation, with set_matrix below
cr.translate(x, y)
cr.rotate(node.t)
self.DrawPieMore(node, (0,0))
cr.set_matrix(m)
def DrawFork(self, node):
'''Draw the fork to this node's daughters.'''
c = self.plot_vars
cr = self.surface
cr.set_line_width(c.linethick)
cr.set_source_rgb(c.linecolor[0], c.linecolor[1], c.linecolor[2])
cr.set_line_cap(cairo.LINE_CAP_ROUND)
(x0, y0) = self.Xform( (node.x, node.y) )
(mint, maxt) = (2*pi, 0)
for d in node.daughters:
if d.t < mint:
mint = d.t
if d.t > maxt:
maxt = d.t
(xd, yd) = self.Xform( (d.x, d.y) )
xa = node.r * cos(d.t)
ya = node.r * sin(d.t)
(xb, yb) = self.Xform( (xa, ya) )
cr.move_to(xd, yd)
cr.line_to(xb, yb)
cr.stroke()
cr.arc(c.width/2., c.height/2., node.r*c.xscale, mint, maxt)
cr.stroke()
def DrawNodeLabel(self, node):
'''Put the text label by this node.'''
cr = self.surface
(x, y) = self.Xform( (node.x, node.y) )
m = cr.get_matrix() # for rotation, with set_matrix below
cr.translate(x, y)
cr.rotate(node.t)
self.DrawNodeLabelMore(node, (0, 0) )
cr.set_matrix(m)
def DrawRoot(self):
'''Draw the branch leading to the root.'''
pass
|
I reside in the beautiful city of Nayagarh. To describe about my family, we are a joint family with strong traditional values. I'm successfully employed as a Executive in a private firm.
|
from google.appengine.api import urlfetch
from google.appengine.ext import ndb
from base import handlers
from base.handlers import AdminAjaxHandler
from generic import utils
from generic.utils import valid_slug
from dontbelate import settings
class AdminBaseDetailHandler(handlers.AdminHandler):
"""
Base object detail handler.
Usage:
Add the following class attrs to your handler.
model: model class
id_url_kwarg: url kwarg used in route
template_name: template name
"""
slug_fields = [('slug_en', 'Slug EN'), ('slug_pt', 'Slug PT')]
template_name = None
id_url_kwarg = 'obj_id'
def get_object(self, *args, **kwargs):
object_id = kwargs.get(self.id_url_kwarg)
try:
object_id = int(object_id)
except ValueError:
return self.abort(404)
obj = self.model.get_by_id(object_id)
if not obj:
return self.abort(404)
return obj
def get(self, *args, **kwargs):
self.render(self.template_name,
self.get_context_data(object=self.get_object(*args, **kwargs)))
def get_context_data(self, **kwargs):
return kwargs
def render_with_errors(self, obj, errors):
self.render(self.template_name,
self.get_context_data(object=obj, errors=errors))
def save_slugs(self, obj, errors):
"""
Call this method when saving form data
When calling this, it assumes the properties in self.slug_fields
are available on self.model
"""
for slug_name, label in self.slug_fields:
slug_value = self.request.get(slug_name)
slug_value = slug_value.strip()
setattr(obj, slug_name, slug_value)
if not slug_value:
errors.append('{} is required'.format(label))
return
if len(slug_value) < settings.MIN_SLUG_LENGTH:
errors.append('{} needs to be at least {} characters long.'.format(label, settings.MIN_SLUG_LENGTH))
if not valid_slug(slug_value):
errors.append('Enter a valid {} consisting of letters, numbers, underscores or hyphens.'.format(label))
else:
# check if obj with provided slug already exists
query = self.model.query(getattr(self.model, slug_name) == slug_value)
query = [item for item in query if not item == obj]
if query:
errors.append('{} with {} \'{}\' already exists'.format(self.model.__name__, label, slug_value))
class AdminImageUploadHandler(AdminAjaxHandler):
"""
Handles image upload from Croppic
"""
def post(self):
image_file = self.request.get('img')
thumbnail = utils.handle_image_upload(image_file)
self.render_json({
'status': 'success',
'url': thumbnail.url,
'width': thumbnail.width,
'height': thumbnail.height,
})
def DenyAccess(self):
self.render_json({'status': 'error', 'message': 'No access granted'})
def XsrfFail(self):
self.render_json({'status': 'error', 'message': 'XSRF token error'})
class AdminImageCropHandler(AdminAjaxHandler):
"""
Handles image crop from Croppic
"""
def post(self):
# handle image upload here
image_url = self.request.get('imgUrl')
image_w = int(float(self.request.get('imgW')))
image_h = int(float(self.request.get('imgH')))
image_x1 = float(self.request.get('imgX1'))
image_y1 = float(self.request.get('imgY1'))
crop_width = float(self.request.get('cropW'))
crop_height = float(self.request.get('cropH'))
image_file = urlfetch.fetch(image_url).content
thumbnail = utils.handle_image_crop(image_file, image_w, image_h, image_x1, image_y1, crop_width, crop_height)
self.render_json({
'status': 'success',
'url': thumbnail.url,
})
def DenyAccess(self):
self.render_json({'status': 'error', 'message': 'No access granted'})
def XsrfFail(self):
self.render_json({'status': 'error', 'message': 'XSRF token error'})
class AdminActionMixin(object):
"""
Adds action handling to admin change list handler.
Currently handles delete, publish and unpublish action.
Could hold more in the future.
Usage:
- Add a class attribute `model` to your handler
which should be set to the model class
- If `post` is implemented in the AdminHandler,
call `self.handle_action` in it. See implementation in `post` below.
- Make sure the change list html is wrapped in a <form>
"""
DELETE = 'delete'
PUBLISH = 'publish'
UNPUBLISH = 'unpublish'
actions = [
(DELETE, 'Delete selected items'),
]
def get_actions(self):
return self.render_to_string('admin/includes/actions.tpl', {
'actions': self.actions,
})
def handle_action(self, **kwargs):
action = self.request.get('_action')
if action not in [item[0] for item in self.actions]:
return
ids = self.request.get_all('_selected_action')
if not ids:
self.add_message('No items selected')
return
keys = [ndb.Key(urlsafe=_id) for _id in ids]
# update with extra keys
extra_keys = kwargs.get('extra_keys', [])
keys.extend(extra_keys)
if action in [self.PUBLISH, self.UNPUBLISH]:
objects = ndb.get_multi(keys)
for obj in objects:
obj.published = action == self.PUBLISH
ndb.put_multi(objects)
count = len(objects)
self.add_message('Successfully {}ed {} items'.format(action, count))
# after delete redirect to current path (prevents replaying the post)
return self.redirect(self.request.path)
# we're dealing with delete
# check if user confirmed, otherwise show confirmation page
if self.request.get('_confirm'):
# already confirmed, delete objects
deleted = ndb.delete_multi(keys)
self.add_message('Deleted {} items'.format(len(deleted)))
return self.redirect(self.request.path)
# show confirmation page
context = {
'object_list': ndb.get_multi(keys),
'cancel_url': self.request.path,
}
self.render('admin/confirm_delete.tpl', context)
return True
def post(self, **kwargs):
if not self.handle_action(**kwargs):
self.get(**kwargs)
|
The TM code of ethics are guidelines for company employees internally and for employee communications with TM customers, suppliers and other entities. The rules apply to the company’s Board of Directors, its agents, contractors and other partners who work or operate under the name of TM.
The code of ethics is intended to support the employees of TM and parties that work or operate under the name of TM in the event of any ethical questions that need to be answered.
We undertake our work with integrity and comply with the laws, rules and guidelines that apply to the company’s operations.
We respect confidentiality provisions in employment contracts and work according to the rules of TM as regards personal information.
We provide those who seek our assistance with quality consultancy services and services that take into account the information on the circumstances of each individual.
We keep in mind at all times the importance of maintaining the reputation of the company, irrespective of who we are interacting with and whether such interaction takes place during working hours or during free time.
We are guided by the values of TM: honesty, fairness, simplicity and innovation in our work.
We obtain the education and training necessary to ensure that we can properly carry out our work and thus develop and maintain professional knowledge, including as regard the goods and services of the company.
We do not personally negotiate with persons with whom we have family or personal ties but refer their affairs to an immediate superior or other appropriate party within TM. This applies irrespective of whether TM is the seller or buyer of goods or services.
We do not take advantage of our position as an employee of TM for personal gain or on behalf of persons who are connected with us through family or friendship ties.
We do not take advantage of information to which we have access in the execution of work on behalf of TM for personal gain.
We understand that assistance or gifts from customers and service providers can appear questionable and follow, therefore, the rules of the company in this respect.
We treat the property of the company with respect and do not use such property for personal gain except with the knowledge and approval of an immediate superior.
We show respect towards one another and honesty in communications and endeavour to have good and honest communications with all who seek our assistance and from whom we seek assistance in our work for TM.
We treat our competitors with respect and guarantee that all information issued by us relating to them is correct and not misleading.
We point out what can be improved in the operation of TM and submit proposals for improvements.
We point out what is well done in the operation of TM and take the time to celebrate when achieving our targets.
We engage in objective exchanges of opinion on the basis of equality but respect the decisions made by the management of the company and work accordingly, provided they are in accordance with the code of ethics of TM.
Employees are given a presentation of the company’s code of ethics at the beginning of employment and as needed. The code of ethics is always accessible on the internal and external website of the company.
The TM code of ethics is presented to the Board of Directors of TM, its agents, contractors and others who work for or operate under the name of TM and are a part of the contract between the parties.
TM management is responsible for the adoption and presentation of the company’s code of ethics and for ensuring that the code is followed in the operation of TM.
In the event that we become aware of violations of the TM code of ethics, we notify of such violation and ensure that such issues are addressed within the company by means of established procedures.
Employees who report mistakes or faults in the operation of TM or inform on the alleged violations of others do not suffer retribution for such action. They are informed of the results of the issue and enjoy anonymity if they so request and if possible.
|
from django.db import models
from djnetaxept.managers import NetaxeptPaymentManager, NetaxeptTransactionManager
STATUS_CHOICES = (
('AUTHORIZED', 'AUTHORIZED'),
('SALE', 'SALE'),
('CAPTURE', 'CAPTURE'),
('CREDIT', 'CREDIT'),
('ANNUL', 'ANNUL')
)
class NetaxeptPayment(models.Model):
transaction_id = models.CharField(max_length=32)
amount = models.IntegerField(null=True, blank=True)
currencycode = models.CharField(max_length=3)
description = models.CharField(max_length=255)
ordernumber = models.CharField(max_length=32)
flagged = models.BooleanField(default=False)
responsecode = models.CharField(max_length=3, null=True, blank=True)
responsesource = models.CharField(max_length=20, null=True, blank=True)
responsetext = models.CharField(max_length=255, null=True, blank=True)
objects = NetaxeptPaymentManager()
def auth(self):
return NetaxeptTransaction.objects.auth_payment(self)
def sale(self):
return NetaxeptTransaction.objects.sale_payment(self)
def completed(self):
return not self.flagged
"""
RECURRING_CHOICES = (
('S', 'S'),
('R', 'R')
)
class NetaxeptRecurringPayment(NetaxeptPayment):
recurring_type = models.CharField(max_length=1, choices=RECURRING_CHOICES)
minimum_frequency = models.PositiveIntegerField(null=True, blank=True)
expiry_date = models.DateField(null=True, blank=True)
"""
OPERATION_CHOICES = (
('AUTH', 'AUTH'),
('SALE', 'SALE'),
('CAPTURE', 'CAPTURE'),
('CREDIT', 'CREDIT'),
('ANNUL', 'ANNUL')
)
class NetaxeptTransaction(models.Model):
payment = models.ForeignKey(NetaxeptPayment)
transaction_id = models.CharField(max_length=32)
operation = models.CharField(max_length=7, choices=OPERATION_CHOICES)
amount = models.PositiveIntegerField(null=True, blank=True)
flagged = models.BooleanField(default=False)
responsecode = models.CharField(max_length=3, null=True, blank=True)
responsesource = models.CharField(max_length=20, null=True, blank=True)
responsetext = models.CharField(max_length=255, null=True, blank=True)
objects = NetaxeptTransactionManager()
def capture(self, amount):
return NetaxeptTransaction.objects.capture_payment(self.payment, amount)
def credit(self, amount):
return NetaxeptTransaction.objects.credit_payment(self.payment, amount)
def annul(self):
return NetaxeptTransaction.objects.annul_payment(self.payment)
def completed(self):
return not self.flagged
|
When a business group members want to travel with ease, they ought to choose the right corporate transportations company. When one conduct a comprehensive research to find the best transportation company, it becomes easy since he will rely on the company for future trips. Customer satisfaction is the key element to the success of most corporate car service. The following are the essential guides to hiring executive corporate car transportation services.
Suppose you intend to travel as a group; you need to ask the ground transportation provider if they will be able to accommodate the entire group. When looking for a corporate transportation service, ensure that you choose the one that has the workforce and experience to take care of the whole group. Furthermore, look for the one that has vehicles of high quality and that they are well maintained. Furthermore, you need to find out if the ground transportation company that you intend to hire their vehicles has been involved in any accidents, and ask how often they inspect or maintain their vehicles. Best ground transportation companies only recruit drives with vast experience and knowledge in automobile handling and their vehicles are regularly maintained.
In addition look for a company that provide a collection of luxury vehicles that you can book twenty-four hours a day, seven days a week.
The other question that you need to get an answer form the corporate car service is if they can offer the services in different places. Choose a ground transportation company that can take your group to whatever place you like.
In addition, when looking for a corporate transportation service, ensure that the company can handle changes to business trips. It is vital to inquire how the company handles last minute changes and if they charge extra fees.
Besides, when looking for a car rental company, you need to pay attention to how the company’s staff communicates with the clients, pay attention how quickly they answer, how thoroughly they answer your questions and try to get a general sense of whether they have your best interests in mind. Since prices vary among various corporate car service, it is vital to know the amount that you are expected to pay before you book a trip with the corporate ground transportation company since this will enable you to get the best value for your business.
It is also advisable that when looking for a corporate ground car rental company for your CEO consider looking at what the company’s previous clients are saying about the services that the company offers, such information can be obtained on the company’s website on the client review section.
|
# -*- coding: utf-8 -*-
''' These are the things that are used when you `cytoplasm serve`.
'''
import os
import sys
import cytoplasm
# make this work in either Python 2.x or 3.x
if sys.version_info.major >= 3:
from http.server import SimpleHTTPRequestHandler, HTTPServer
else:
from SimpleHTTPServer import SimpleHTTPRequestHandler
from BaseHTTPServer import HTTPServer
# keep track of when things were last built in this global variable
most_recent_time = 0
# keep track of the site in this global variable
site = None
def serve(port, rebuild, event=None):
"Serve the Cytoplasm site."
# keep track of the most recently modified time in global variable
# most_recent_time
global most_recent_time
global site
# create a site and rebuild it first.
site = cytoplasm.Site(".")
site.build()
# set the most recent time.
most_recent_time = most_recent()
# change to the build directory, where things are to be served from.
os.chdir(site.config.build_dir)
# use either SimpleHTTPRequestHandler or RebuildHandler, depending on
# whether rebuild is True.
if rebuild:
handler = RebuildHandler
else:
handler = SimpleHTTPRequestHandler
# make a server with the handler and the port
httpd = HTTPServer(('', port), handler)
# serve!
httpd.serve_forever()
def most_recent():
"""Determine the most recent modified time in the source directory,
ignoring dotfiles and _build.
"""
directory = site.source
build_dir = site.config.build_dir
# get the candidate files:
files = [f for f in os.listdir(directory) if f != build_dir and not
f.startswith(".")]
# append files in additional watch directories
for dir in site.config.watch_dirs:
files += [os.path.join(dir, f) for f in
os.listdir(os.path.join(directory, dir)) if not f.startswith(".")]
# get each of their times
times = [os.stat(os.path.join(directory, f)).st_mtime for f in files]
# the highest time here is the most recent; return that.
return max(times)
class RebuildHandler(SimpleHTTPRequestHandler):
def handle(self):
"Handle a request and, if anything has changed, rebuild the site."
# overwrite the handle method in SimpleHTTPRequestHandler with this.
# declare most_recent_time global; we'll be changing it later.
global most_recent_time
# figure out the most recent time edited in the source directory
new_recent = most_recent()
# only build the site if the new most recent is more recent than the
# old one, i.e. if one or more of the files has been edited.
if new_recent > most_recent_time:
# update most_recent_time
most_recent_time = new_recent
# Build the site from the source directory
print("Rebuilding your Cytoplasm site...")
site.build()
# Call SimpleHTTPRequestHandler.handle(), so it can do stuff there too.
SimpleHTTPRequestHandler.handle(self)
|
The chocolate manufacturer, known for its hand-crafted bonbons and gelato, plans to open in a space across from the outlet mall's food pavilion.
Chocolaterie Stam will open its third metro location in November at Altoona's Outlets of Des Moines.
Chocolaterie Stam's menu includes more than 50 flavors of gelato and sorbetto, its own coffee blend and several flavors and shapes of bonbons, truffles and other chocolate confections.
Chocolaterie Stam has two other metro stores — 2814 Ingersoll Ave. in Des Moines and Valley West Mall in West Des Moines — and several other locations throughout the U.S.
The outlet mall, located at 801 Bass Pro Drive in Altoona, is part of the Shoppes at Prairie Crossing, a retail, commercial and residential development south of Bass Pro Shops.
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
"""
Utilities for using modules
"""
import webnotes, os
from webnotes import conf
import webnotes.utils
lower_case_files_for = ['DocType', 'Page', 'Report',
"Workflow", 'Module Def', 'Desktop Item', 'Workflow State', 'Workflow Action']
def scrub(txt):
return txt.replace(' ','_').replace('-', '_').replace('/', '_').lower()
def scrub_dt_dn(dt, dn):
"""Returns in lowercase and code friendly names of doctype and name for certain types"""
ndt, ndn = dt, dn
if dt in lower_case_files_for:
ndt, ndn = scrub(dt), scrub(dn)
return ndt, ndn
def get_module_path(module):
"""Returns path of the given module"""
m = scrub(module)
app_path = webnotes.utils.get_base_path()
if m in ('core', 'website'):
return os.path.join(app_path, 'lib', m)
else:
return os.path.join(app_path, 'app', m)
def get_doc_path(module, doctype, name):
dt, dn = scrub_dt_dn(doctype, name)
return os.path.join(get_module_path(module), dt, dn)
def reload_doc(module, dt=None, dn=None, plugin=None, force=True):
from webnotes.modules.import_file import import_files
return import_files(module, dt, dn, plugin=plugin, force=force)
def export_doc(doctype, name, module=None, plugin=None):
"""write out a doc"""
from webnotes.modules.export_file import write_document_file
import webnotes.model.doc
if not module: module = webnotes.conn.get_value(doctype, name, 'module')
write_document_file(webnotes.model.doc.get(doctype, name), module, plugin=plugin)
def get_doctype_module(doctype):
return webnotes.conn.get_value('DocType', doctype, 'module')
|
You can follow the discussion on Show all child pages in Confluence without having to leave a comment. Cool, huh? Just enter your email address in the form here below and you’re all set.
|
from spider import Spider
import time
import sqlite3
class Scheduler:
# userList acts as a seed to spider
userList = ()
# processedDict contains all users that
# have been processed
userDict = {}
#tempList is the list that contain most recent names that
# are crawled by spider
tempList = []
# tempListCount represent the number of users
# that have been crawled
tempListCount = 0
def __init__(self, inputfile):
# open given file and read from it
self.userList = [line.strip() for line in open(inputfile)]
self.preTime = time.time()
self.storeUnit = 10000
# return true if given username has been processed, otherwise
# add it to the userDict and return false
def hasProcessed(self, username):
if username in self.userDict:
return True
self.userDict[username] = '1'
self.tempList.append(username)
self.tempListCount += 1
if self.tempListCount > self.storeUnit:
self.storeData()
return False
def startCrawl(self):
spider = Spider(self.userList)
spider.crawl(self.hasProcessed)
def storeData(self):
#timeDiff is time(measured in minutes) that used to crawl 10000
timeDiff = (time.time() - self.preTime) / 60
self.preTime = time.time()
# filename will be in format like "Thu,28,2013-06:50:07=2.56"
# where 2.56 is the first 4 digits of timeDiff
filename = time.strftime("%a, %d, %b, %Y-%H:%M:%S", time.gtime(), + "=" + str(timeDiff)[:4])
# write data into test file, one username per line
f = open(filename + '.txt' + 'w')
f.write('\n'.join(self.tempList))
f.close()
# reset tempList to empty and set count to 0
self.tempList = []
self.tempListCount = 0
|
Explore more readings and media presentations on The John Marshall Foundation website.
CLICK HERE for a printable biography worksheet about John Marshall and his influence on the role of the Supreme Court.
Here’s a great link for further exploration for older students. I’m linking here to remember to give to my high school student for this topic.
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2010 Thomas Perl and the gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# draw.py -- Draw routines for gPodder-specific graphics
# Thomas Perl <[email protected]>, 2007-11-25
#
import gpodder
import gtk
import pango
import pangocairo
import cairo
import StringIO
import math
class TextExtents(object):
def __init__(self, ctx, text):
tuple = ctx.text_extents(text)
(self.x_bearing, self.y_bearing, self.width, self.height, self.x_advance, self.y_advance) = tuple
RRECT_LEFT_SIDE = 1
RRECT_RIGHT_SIDE = 2
def draw_rounded_rectangle(ctx, x, y, w, h, r=10, left_side_width = None, sides_to_draw=0, close=False):
if left_side_width is None:
left_side_width = flw/2
x = int(x)
offset = 0
if close: offset = 0.5
if sides_to_draw & RRECT_LEFT_SIDE:
ctx.move_to(x+int(left_side_width)-offset, y+h)
ctx.line_to(x+r, y+h)
ctx.curve_to(x, y+h, x, y+h, x, y+h-r)
ctx.line_to(x, y+r)
ctx.curve_to(x, y, x, y, x+r, y)
ctx.line_to(x+int(left_side_width)-offset, y)
if close:
ctx.line_to(x+int(left_side_width)-offset, y+h)
if sides_to_draw & RRECT_RIGHT_SIDE:
ctx.move_to(x+int(left_side_width)+offset, y)
ctx.line_to(x+w-r, y)
ctx.curve_to(x+w, y, x+w, y, x+w, y+r)
ctx.line_to(x+w, y+h-r)
ctx.curve_to(x+w, y+h, x+w, y+h, x+w-r, y+h)
ctx.line_to(x+int(left_side_width)+offset, y+h)
if close:
ctx.line_to(x+int(left_side_width)+offset, y)
def rounded_rectangle(ctx, x, y, width, height, radius=4.):
"""Simple rounded rectangle algorithmn
http://www.cairographics.org/samples/rounded_rectangle/
"""
degrees = math.pi / 180.
ctx.new_sub_path()
if width > radius:
ctx.arc(x + width - radius, y + radius, radius, -90. * degrees, 0)
ctx.arc(x + width - radius, y + height - radius, radius, 0, 90. * degrees)
ctx.arc(x + radius, y + height - radius, radius, 90. * degrees, 180. * degrees)
ctx.arc(x + radius, y + radius, radius, 180. * degrees, 270. * degrees)
ctx.close_path()
def draw_text_box_centered(ctx, widget, w_width, w_height, text, font_desc=None, add_progress=None):
style = widget.rc_get_style()
text_color = style.text[gtk.STATE_PRELIGHT]
red, green, blue = text_color.red, text_color.green, text_color.blue
text_color = [float(x)/65535. for x in (red, green, blue)]
text_color.append(.5)
if font_desc is None:
font_desc = style.font_desc
font_desc.set_size(14*pango.SCALE)
pango_context = widget.create_pango_context()
layout = pango.Layout(pango_context)
layout.set_font_description(font_desc)
layout.set_text(text)
width, height = layout.get_pixel_size()
ctx.move_to(w_width/2-width/2, w_height/2-height/2)
ctx.set_source_rgba(*text_color)
ctx.show_layout(layout)
# Draw an optional progress bar below the text (same width)
if add_progress is not None:
bar_height = 10
ctx.set_source_rgba(*text_color)
ctx.set_line_width(1.)
rounded_rectangle(ctx, w_width/2-width/2-.5, w_height/2+height-.5, width+1, bar_height+1)
ctx.stroke()
rounded_rectangle(ctx, w_width/2-width/2, w_height/2+height, int(width*add_progress)+.5, bar_height)
ctx.fill()
def draw_text_pill(left_text, right_text, x=0, y=0, border=2, radius=14, font_desc=None):
if gpodder.ui.fremantle:
border += 3
# Create temporary context to calculate the text size
ctx = cairo.Context(cairo.ImageSurface(cairo.FORMAT_ARGB32, 1, 1))
# Use GTK+ style of a normal Button
widget = gtk.Label()
style = widget.rc_get_style()
x_border = border*2
if font_desc is None:
font_desc = style.font_desc
font_desc.set_weight(pango.WEIGHT_BOLD)
pango_context = widget.create_pango_context()
layout_left = pango.Layout(pango_context)
layout_left.set_font_description(font_desc)
layout_left.set_text(left_text)
layout_right = pango.Layout(pango_context)
layout_right.set_font_description(font_desc)
layout_right.set_text(right_text)
width_left, height_left = layout_left.get_pixel_size()
width_right, height_right = layout_right.get_pixel_size()
text_height = max(height_left, height_right)
image_height = int(y+text_height+border*2)
image_width = int(x+width_left+width_right+x_border*4)
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, image_width, image_height)
ctx = pangocairo.CairoContext(cairo.Context(surface))
if left_text == '0':
left_text = None
if right_text == '0':
right_text = None
left_side_width = width_left + x_border*2
right_side_width = width_right + x_border*2
rect_width = left_side_width + right_side_width
rect_height = text_height + border*2
if left_text is not None:
draw_rounded_rectangle(ctx,x,y,rect_width,rect_height,radius, left_side_width, RRECT_LEFT_SIDE, right_text is None)
linear = cairo.LinearGradient(x, y, x+left_side_width/2, y+rect_height/2)
linear.add_color_stop_rgba(0, .8, .8, .8, .5)
linear.add_color_stop_rgba(.4, .8, .8, .8, .7)
linear.add_color_stop_rgba(.6, .8, .8, .8, .6)
linear.add_color_stop_rgba(.9, .8, .8, .8, .8)
linear.add_color_stop_rgba(1, .8, .8, .8, .9)
ctx.set_source(linear)
ctx.fill()
xpos, ypos, width_left, height = x+1, y+1, left_side_width, rect_height-2
if right_text is None:
width_left -= 2
draw_rounded_rectangle(ctx, xpos, ypos, rect_width, height, radius, width_left, RRECT_LEFT_SIDE, right_text is None)
ctx.set_source_rgba(1., 1., 1., .3)
ctx.set_line_width(1)
ctx.stroke()
draw_rounded_rectangle(ctx,x,y,rect_width,rect_height,radius, left_side_width, RRECT_LEFT_SIDE, right_text is None)
ctx.set_source_rgba(.2, .2, .2, .6)
ctx.set_line_width(1)
ctx.stroke()
ctx.move_to(x+x_border, y+1+border)
ctx.set_source_rgba( 0, 0, 0, 1)
ctx.show_layout(layout_left)
ctx.move_to(x-1+x_border, y+border)
ctx.set_source_rgba( 1, 1, 1, 1)
ctx.show_layout(layout_left)
if right_text is not None:
draw_rounded_rectangle(ctx, x, y, rect_width, rect_height, radius, left_side_width, RRECT_RIGHT_SIDE, left_text is None)
linear = cairo.LinearGradient(x+left_side_width, y, x+left_side_width+right_side_width/2, y+rect_height)
linear.add_color_stop_rgba(0, .2, .2, .2, .9)
linear.add_color_stop_rgba(.4, .2, .2, .2, .8)
linear.add_color_stop_rgba(.6, .2, .2, .2, .6)
linear.add_color_stop_rgba(.9, .2, .2, .2, .7)
linear.add_color_stop_rgba(1, .2, .2, .2, .5)
ctx.set_source(linear)
ctx.fill()
xpos, ypos, width, height = x, y+1, rect_width-1, rect_height-2
if left_text is None:
xpos, width = x+1, rect_width-2
draw_rounded_rectangle(ctx, xpos, ypos, width, height, radius, left_side_width, RRECT_RIGHT_SIDE, left_text is None)
ctx.set_source_rgba(1., 1., 1., .3)
ctx.set_line_width(1)
ctx.stroke()
draw_rounded_rectangle(ctx, x, y, rect_width, rect_height, radius, left_side_width, RRECT_RIGHT_SIDE, left_text is None)
ctx.set_source_rgba(.1, .1, .1, .6)
ctx.set_line_width(1)
ctx.stroke()
ctx.move_to(x+left_side_width+x_border, y+1+border)
ctx.set_source_rgba( 0, 0, 0, 1)
ctx.show_layout(layout_right)
ctx.move_to(x-1+left_side_width+x_border, y+border)
ctx.set_source_rgba( 1, 1, 1, 1)
ctx.show_layout(layout_right)
return surface
def draw_pill_pixbuf(left_text, right_text):
return cairo_surface_to_pixbuf(draw_text_pill(left_text, right_text))
def cairo_surface_to_pixbuf(s):
"""
Converts a Cairo surface to a Gtk Pixbuf by
encoding it as PNG and using the PixbufLoader.
"""
sio = StringIO.StringIO()
try:
s.write_to_png(sio)
except:
# Write an empty PNG file to the StringIO, so
# in case of an error we have "something" to
# load. This happens in PyCairo < 1.1.6, see:
# http://webcvs.cairographics.org/pycairo/NEWS?view=markup
# Thanks to Chris Arnold for reporting this bug
sio.write('iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAAAXNSR0IArs4c6QAAAAZiS0dEAP8A\n/wD/oL2nkwAAAAlwSFlzAAALEwAACxMBAJqcGAAAAAd0SU1FB9cMEQkqIyxn3RkAAAAZdEVYdENv\nbW1lbnQAQ3JlYXRlZCB3aXRoIEdJTVBXgQ4XAAAADUlEQVQI12NgYGBgAAAABQABXvMqOgAAAABJ\nRU5ErkJggg==\n'.decode('base64'))
pbl = gtk.gdk.PixbufLoader()
pbl.write(sio.getvalue())
pbl.close()
pixbuf = pbl.get_pixbuf()
return pixbuf
def progressbar_pixbuf(width, height, percentage):
COLOR_BG = (.4, .4, .4, .4)
COLOR_FG = (.2, .9, .2, 1.)
COLOR_FG_HIGH = (1., 1., 1., .5)
COLOR_BORDER = (0., 0., 0., 1.)
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, width, height)
ctx = cairo.Context(surface)
padding = int(float(width)/8.0)
bar_width = 2*padding
bar_height = height - 2*padding
bar_height_fill = bar_height*percentage
# Background
ctx.rectangle(padding, padding, bar_width, bar_height)
ctx.set_source_rgba(*COLOR_BG)
ctx.fill()
# Foreground
ctx.rectangle(padding, padding+bar_height-bar_height_fill, bar_width, bar_height_fill)
ctx.set_source_rgba(*COLOR_FG)
ctx.fill()
ctx.rectangle(padding+bar_width/3, padding+bar_height-bar_height_fill, bar_width/4, bar_height_fill)
ctx.set_source_rgba(*COLOR_FG_HIGH)
ctx.fill()
# Border
ctx.rectangle(padding-.5, padding-.5, bar_width+1, bar_height+1)
ctx.set_source_rgba(*COLOR_BORDER)
ctx.set_line_width(1.)
ctx.stroke()
return cairo_surface_to_pixbuf(surface)
|
Kessie from the cartoon Winnie the Pooh.
I saw this character once when I watched the episode "Find her, keep her". I think she is the most beautiful bird of all the cartoons I've ever seen.
I remember that episode of pooh. Even as a child it was an emotional episode. I remember rabbit being so motherly (when i was a kid i thought rabbit was a girl anyway lol.) and i remember it being so sad when he had to let her fly off.
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# JewelCraft jewelry design toolkit for Blender.
# Copyright (C) 2015-2019 Mikhail Rachinskiy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# ##### END GPL LICENSE BLOCK #####
from math import pi, tau, sin, cos
import bmesh
from mathutils import Matrix
def create_prongs(self):
# Prong
# ---------------------------
prong_rad = self.diameter / 2
taper = self.taper + 1
if self.bump_scale:
curve_resolution = int(self.detalization / 4) + 1
angle = (pi / 2) / (curve_resolution - 1)
v_cos = []
v_co_app = v_cos.append
x = 0.0
for i in range(curve_resolution):
y = sin(i * angle) * prong_rad
z = cos(i * angle) * prong_rad * self.bump_scale + self.z_top
v_co_app((x, y, z))
v_co_app((x, prong_rad * taper, -self.z_btm))
else:
v_cos = (
(0.0, 0.0, self.z_top),
(0.0, prong_rad, self.z_top),
(0.0, prong_rad * taper, -self.z_btm),
)
bm = bmesh.new()
v_profile = [bm.verts.new(v) for v in v_cos]
for i in range(len(v_profile) - 1):
bm.edges.new((v_profile[i], v_profile[i + 1]))
bmesh.ops.spin(bm, geom=bm.edges, angle=tau, steps=self.detalization, axis=(0.0, 0.0, 1.0), cent=(0.0, 0.0, 0.0))
bmesh.ops.remove_doubles(bm, verts=bm.verts, dist=0.00001)
v_boundary = [x for x in bm.verts if x.is_boundary]
bm.faces.new(reversed(v_boundary))
# Transforms
# ---------------------------
pos_offset = (self.gem_l / 2 + prong_rad) - (self.diameter * (self.intersection / 100))
spin_steps = self.number - 1
if self.alignment:
bmesh.ops.rotate(bm, verts=bm.verts, cent=(0.0, 0.0, 0.0), matrix=Matrix.Rotation(-self.alignment, 4, "X"))
bmesh.ops.translate(bm, verts=bm.verts, vec=(0.0, pos_offset, 0.0))
if spin_steps:
spin_angle = tau - tau / self.number
bmesh.ops.spin(bm, geom=bm.faces, angle=spin_angle, steps=spin_steps, axis=(0.0, 0.0, 1.0), cent=(0.0, 0.0, 0.0), use_duplicate=True)
bmesh.ops.rotate(bm, verts=bm.verts, cent=(0.0, 0.0, 0.0), matrix=Matrix.Rotation(-self.position, 4, "Z"))
if self.symmetry:
bmesh.ops.mirror(bm, geom=bm.faces, merge_dist=0, axis="Y")
bmesh.ops.recalc_face_normals(bm, faces=bm.faces)
bmesh.ops.rotate(bm, verts=bm.verts, cent=(0.0, 0.0, 0.0), matrix=Matrix.Rotation(-self.symmetry_pivot, 4, "Z"))
return bm
|
The issue of inappropriate image manipulation in figures published in peer-reviewed journals has received much attention in the last few years. I often wonder whether this has led to more “ugly” results – which obviously have not been manipulated – passing through the peer review process. It is also increasingly becoming a requirement to quantify data present in western blots and agarose gels (e.g. RT-PCR results) which are, by nature, qualitative assays. Often, it seems that reviewers accept the results of such quantitation, even if the quality of the blots/gels are suspect. I come across examples of these problems regularly – in good and bad journals. The example below comes from a very recent issue of a highly ranked specialty journal.
The authors exposed cells to Treatment 1 or Treatment 2 for different lengths of time and analyzed expression of Protein A by western blot. The Protein A blot is not terrible, even though it is not clean, and you need to take the authors’ word that the protein the arrow is pointing at is indeed Protein A (especially since there are no molecular weight markers indicated). The actin blot, however, is quite awful. Actin is a control protein whose expression should not be modulated by the treatments. The problem is that lanes 3-7 of the actin blot have clearly saturated the western blot film (they are black whereas lanes 1 and 2 are grey) indicating that actin levels are much higher in those samples and that equal amounts of protein were not loaded in all lanes. Also, the samples in lanes 3 and 4 have apparently leaked, giving a single actin band with no clear boundary between the lanes.
Because of these two issues, the authors have no business trying to quantify this result. To quantify bands in a western blot, they cannot be saturated, or you end up underestimating the amount of protein present. They are doubly wrong in trying to quantify neighboring bands whose boundaries can’t be discerned!
Below the western blots, the authors present a graph whose values represent the density of the “Protein A” bands normalized to the density of actin bands. A final issue that should have been recognized by the reviewers of this article is that the statistical analysis of the quantitation is incorrect. The authors repeated this experiment only twice, but the error bars on the graph are standard deviations, which should be based on at least three independent experiments.
This problematic figure makes up only a small part of a large and complex paper. Nonetheless, it consists of several figures that present densitometric quantitations of other gels, making the reader wonder whether they are similarly unreliable!
Are ultra low-temperature freezers and liquid nitrogen tanks really necessary?
A molecular biology lab in my basement?
|
from unittest import TestCase
import os
from opencog.atomspace import AtomSpace, TruthValue, Atom, types
from opencog.bindlink import stub_bindlink, bindlink, single_bindlink,\
first_n_bindlink, af_bindlink, \
satisfaction_link, satisfying_set, \
satisfying_element, first_n_satisfying_set, \
execute_atom, evaluate_atom
from opencog.type_constructors import *
from opencog.utilities import initialize_opencog, finalize_opencog
from test_functions import green_count, red_count
__author__ = 'Curtis Faith'
class BindlinkTest(TestCase):
bindlink_atom = None
getlink_atom = None
atomspace = AtomSpace()
starting_size = 0
def setUp(self):
print ("setUp - atomspace = ", self.atomspace)
# Clear atoms from previous test
self.atomspace.clear()
# Initialize Python
initialize_opencog(self.atomspace)
set_type_ctor_atomspace(self.atomspace)
# Define several animals and something of a different type as well
InheritanceLink( ConceptNode("Frog"), ConceptNode("animal"))
InheritanceLink( ConceptNode("Zebra"), ConceptNode("animal"))
InheritanceLink( ConceptNode("Deer"), ConceptNode("animal"))
InheritanceLink( ConceptNode("Spaceship"), ConceptNode("machine"))
# Define a graph search query
self.bindlink_atom = \
BindLink(
# The variable node to be grounded.
VariableNode("$var"),
# The pattern to be grounded.
InheritanceLink(
VariableNode("$var"),
ConceptNode("animal")
),
# The grounding to be returned.
VariableNode("$var")
# bindlink needs a handle
)
# Define a pattern to be grounded
self.getlink_atom = \
GetLink(
InheritanceLink(
VariableNode("$var"),
ConceptNode("animal")
)
)
# Remember the starting atomspace size.
self.starting_size = self.atomspace.size()
def tearDown(self):
print ("tearDown - atomspace = ", self.atomspace)
# Can't do this; finalize can be called only once, ever, and
# then never again. The second call will never follow through.
# Also, cannot create and delete atomspaces here; this will
# confuse the PythonEval singletonInstance.
# finalize_opencog()
# del self.atomspace
def test_stub_bindlink(self):
# Remember the starting atomspace size. This test should not
# change the atomspace.
starting_size = self.atomspace.size()
# Run bindlink.
atom = stub_bindlink(self.atomspace, self.bindlink_atom)
self.assertTrue(atom is not None)
# Check the ending atomspace size, it should be the same.
ending_size = self.atomspace.size()
self.assertEquals(ending_size, starting_size)
def _check_result_setlink(self, atom, expected_arity):
# Check if the atom is a SetLink
self.assertTrue(atom is not None)
self.assertEquals(atom.type, types.SetLink)
# Check the ending atomspace size, it should have added one SetLink.
ending_size = self.atomspace.size()
self.assertEquals(ending_size, self.starting_size + 1)
# The SetLink should have expected_arity items in it.
self.assertEquals(atom.arity, expected_arity)
def test_bindlink(self):
atom = bindlink(self.atomspace, self.bindlink_atom)
self._check_result_setlink(atom, 3)
def test_single_bindlink(self):
atom = single_bindlink(self.atomspace, self.bindlink_atom)
self._check_result_setlink(atom, 1)
def test_first_n_bindlink(self):
atom = first_n_bindlink(self.atomspace, self.bindlink_atom, 5)
self._check_result_setlink(atom, 3)
def test_af_bindlink(self):
atom = af_bindlink(self.atomspace, self.bindlink_atom)
# The SetLink is empty. ??? Should it be.
self._check_result_setlink(atom, 0)
def test_satisfying_set(self):
atom = satisfying_set(self.atomspace, self.getlink_atom)
self._check_result_setlink(atom, 3)
def test_satisfying_element(self):
atom = satisfying_element(self.atomspace, self.getlink_atom)
self._check_result_setlink(atom, 1)
def test_first_n_satisfying_set(self):
atom = first_n_satisfying_set(self.atomspace, self.getlink_atom, 5)
self._check_result_setlink(atom, 3)
def test_satisfy(self):
satisfaction_atom = SatisfactionLink(
VariableList(), # no variables
SequentialAndLink(
EvaluationLink(
GroundedPredicateNode("py: test_functions.stop_go"),
ListLink(
ConceptNode("green light")
)
),
EvaluationLink(
GroundedPredicateNode("py: test_functions.stop_go"),
ListLink(
ConceptNode("green light")
)
),
EvaluationLink(
GroundedPredicateNode("py: test_functions.stop_go"),
ListLink(
ConceptNode("red light")
)
),
EvaluationLink(
GroundedPredicateNode("py: test_functions.stop_go"),
ListLink(
ConceptNode("traffic ticket")
)
)
)
)
atom = satisfaction_link(self.atomspace, satisfaction_atom)
self.assertTrue(atom is not None and atom.mean <= 0.5)
self.assertEquals(green_count(), 2)
self.assertEquals(red_count(), 1)
def test_execute_atom(self):
result = execute_atom(self.atomspace,
ExecutionOutputLink(
GroundedSchemaNode("py: test_functions.add_link"),
ListLink(
ConceptNode("one"),
ConceptNode("two")
)
)
)
list_link = ListLink(
ConceptNode("one"),
ConceptNode("two")
)
self.assertEquals(result, list_link)
def test_evaluate_atom(self):
result = evaluate_atom(self.atomspace,
EvaluationLink(
GroundedPredicateNode("py: test_functions.bogus_tv"),
ListLink(
ConceptNode("one"),
ConceptNode("two")
)
)
)
self.assertEquals(result, TruthValue(0.6, 0.234))
|
Since 1974, lee shuknecht & sons, inc. Has changed the evolution of onion harvesting technology. We have engineered, developed, and manufactured a total mechanical onion harvesting and onion packing house system, along with manufacturing a total equipment product line devoted to the onion industry. Our equipment is available in different models, with many options to fit the needs of each customer. Lee designed and built a self-propelled onion harvester and since then the company has grown and the equipment has evolved. In the late 80’s the business moved to a new and larger location which includes a farm parts store and full machine shop. Lee and Joan’s two sons, Dale and Marc, and daughter Wendy, currently run the business and are looking to the third generation to continue in Lee’s footsteps.
Lee Shuknecht & Sons, Inc. is located in the heart of agricultural country in Western New York State, and was founded in 1974 by Joan & Lee Shuknecht. Lee grew up on a local family owned dairy farm in Elba, NY. After high school he worked for a local equipment dealer and then an Industrial Equipment manufacturer. While working for a local repair shop farmers expressed the need for better onion harvesting equipment, and Lee decided to start his own fabrication and repair business that would work closely with local onion growers. Lee designed and built a self-propelled onion harvester and since then the company has grown and the equipment has evolved. In the late 80’s the business moved to a new and larger location which includes a farm parts store and full machine shop. Lee and Joan’s two sons, Dale and Marc, and daughter Wendy, currently run the business and are looking to the third generation to continue in Lee’s footsteps.
|
# Description: Shows how to use value transformers
# Category: preprocessing
# Classes: TransformValue, Continuous2Discrete, Discrete2Continuous, MapIntValue
# Uses:
# Referenced:
import orange
print
def printExample(ex):
for val in ex:
print "%16s: %s" % (val.variable.name, val)
data = orange.ExampleTable("bridges")
for attr in data.domain:
if attr.varType == orange.VarTypes.Continuous:
print "%20s: continuous" % attr.name
else:
print "%20s: %s" % (attr.name, attr.values)
print
print "Original 15th example:"
printExample(data[15])
continuizer = orange.DomainContinuizer()
continuizer.multinomialTreatment = continuizer.LowestIsBase
domain0 = continuizer(data)
data0 = data.translate(domain0)
print
print "Lowest is base"
printExample(data0[15])
continuizer.multinomialTreatment = continuizer.FrequentIsBase
domain0 = continuizer(data)
data0 = data.translate(domain0)
print
print "Frequent is base"
printExample(data0[15])
continuizer.multinomialTreatment = continuizer.NValues
domain0 = continuizer(data)
data0 = data.translate(domain0)
print
print "NValues"
printExample(data0[15])
continuizer.multinomialTreatment = continuizer.Ignore
domain0 = continuizer(data)
data0 = data.translate(domain0)
print
print "Ignore"
printExample(data0[15])
continuizer.multinomialTreatment = continuizer.AsOrdinal
domain0 = continuizer(data)
data0 = data.translate(domain0)
print
print "As ordinal"
printExample(data0[15])
continuizer.multinomialTreatment = continuizer.AsNormalizedOrdinal
domain0 = continuizer(data)
data0 = data.translate(domain0)
print
print "As normalized ordinal"
printExample(data0[15])
|
8 pp. Roman, 283 pp.
This bibliography contains approx. 2,000 titles of linguistic relevance. Apart from linguistic studies in the concrete sense, the volume also includes original texts (fairy tales, proverbs, legends, etc.), didactical texts for alphabetization, ethnological literature containing linguistic material, as well as some texts that are historically relevant for the localization of languages and peoples. The bibliography includes, apart from published material which is generally accessible, also a great amount of manuscripts and unpublished papers (grey literature).
The entries are classified by the author or editor and contain key words referring to the contents as well as the languages and groups discussed in the text. Furthermore, some entries contain bibliographical annotations or cross references for other book titles. A language and subject index is also given in the bibliography.
The list of the different language terms mentioned in the texts is added to the bibliography and contains approx. 1,500 entries. For each language term, the list mentions its source, its classification and, if possible, its etymology and gives cross reference to the term officially acknowledged or most commonly used in recent research. Each entry has its own code as well as a classifying number which guarantees an immediate identification. The bibliography provides a useful and extensive reference book for anyone interested in Gur languages.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.