text
stringlengths 29
850k
|
---|
"""Classes for representing a Collection+JSON document."""
from __future__ import absolute_import, unicode_literals
import json
__version__ = '0.1.1'
class ArrayProperty(object):
"""A descriptor that converts from any enumerable to a typed Array."""
def __init__(self, cls, name):
"""Constructs typed array property
:param cls type: the type of objects expected in the array
:param name str: name of the property
"""
self.cls = cls
self.name = name
def __get__(self, instance, owner):
target = instance
if target is None:
target = owner
if self.name in target.__dict__:
return target.__dict__[self.name]
raise AttributeError
def __set__(self, instance, value):
if value is None:
value = []
instance.__dict__[self.name] = Array(self.cls, self.name, value)
class DictProperty(object):
"""A descriptor that converts to a dictionary containing Arrays or objects of a given type"""
def __init__(self, cls, name):
"""Constructs the dictionary
:param cls type: the expected type of the objects
"""
self.cls = cls
self.name = name
def __get__(self, instance, owner):
target = instance
if target is None:
target = owner
if self.name in target.__dict__:
return target.__dict__[self.name]
raise AttributeError
def __set__(self, instance, vals):
instance.__dict__[self.name] = {}
if vals is not None:
for name, value in vals.items():
if value is None or isinstance(value, self.cls):
instance.__dict__[self.name][name] = value
elif isinstance(value, dict):
instance.__dict__[self.name][name] = self.cls(**value)
elif isinstance(value, list):
self.cls = self.cls
instance.__dict__[self.name][name] = Array(self.cls, None, value)
else:
raise TypeError("Invalid value '%s', "
"expected dict, list or '%s'" % (value,
self.cls.__name__))
class TypedProperty(object):
"""A descriptor for assigning only a specific type of instance.
Additionally supports assigning a dictionary convertable to the type.
"""
def __init__(self, cls, name):
"""Constructs the typed property
:param cls type: the type of object expected
"""
self.cls = cls
self.name = name
def __get__(self, instance, owner):
target = instance
if target is None:
target = owner
if self.name in target.__dict__:
return target.__dict__[self.name]
raise AttributeError
def __set__(self, instance, value):
if value is None or isinstance(value, self.cls):
instance.__dict__[self.name] = value
elif isinstance(value, dict):
instance.__dict__[self.name] = self.cls(**value)
else:
raise TypeError("Invalid value '%s', "
"expected dict or '%s'" % (value,
self.cls.__name__))
class ComparableObject(object):
"""Abstract base class for objects implementing equality comparison.
This class provides default __eq__ and __ne__ implementations.
"""
def __eq__(self, other):
"""Return True if both instances are equivalent."""
return (type(self) == type(other) and
self.__dict__ == other.__dict__)
def __ne__(self, other):
"""Return True if both instances are not equivalent."""
return (type(self) != type(other) or
self.__dict__ != other.__dict__)
class Data(ComparableObject):
"""Object representing a Collection+JSON data object."""
def __init__(self, name, value=None, prompt=None, array=None, object=None):
self.name = name
self.value = value
self.array = array
self.object = object
self.prompt = prompt
property_count = 0
if value is not None: property_count = property_count+1
if array is not None: property_count = property_count+1
if object is not None: property_count = property_count+1
if property_count > 1:
raise ValueError('Data can only have one of the three properties.')
def __repr__(self):
data = "name='%s'" % self.name
if self.prompt is not None:
data += " prompt='%s'" % self.prompt
return "<Data: %s>" % data
def to_dict(self):
"""Return a dictionary representing a Data object."""
output = {
'name': self.name
}
if self.value is not None:
output['value'] = self.value
elif self.array is not None:
output['array'] = self.array
elif self.object is not None:
output['object'] = self.object
if self.prompt is not None:
output['prompt'] = self.prompt
return output
class Link(ComparableObject):
"""Object representing a Collection+JSON link object."""
def __init__(self, href, rel, name=None, render=None, prompt=None,
length=None, inline=None):
self.href = href
self.rel = rel
self.name = name
self.render = render
self.prompt = prompt
self.length = length
self.inline = inline
def __repr__(self):
data = "rel='%s'" % self.rel
if self.name:
data += " name='%s'" % self.name
if self.render:
data += " render='%s'" % self.render
if self.prompt:
data += " prompt='%s'" % self.prompt
if self.length:
data += " length='%s'" % self.length
if self.inline:
data += " inline='%s'" % self.inline
return "<Link: %s>" % data
def to_dict(self):
"""Return a dictionary representing a Link object."""
output = {
'href': self.href,
'rel': self.rel,
}
if self.name is not None:
output['name'] = self.name
if self.render is not None:
output['render'] = self.render
if self.prompt is not None:
output['prompt'] = self.prompt
if self.length is not None:
output['length'] = self.length
if self.inline is not None:
output['inline'] = self.inline
return output
class Error(ComparableObject):
"""Object representing a Collection+JSON error object."""
def __init__(self, code=None, message=None, title=None):
self.code = code
self.message = message
self.title = title
def __repr__(self):
data = ''
if self.code is not None:
data += " code='%s'" % self.code
if self.message is not None:
data += " message='%s'" % self.message
if self.title is not None:
data += " title='%s'" % self.title
return "<Error%s>" % data
def to_dict(self):
"""Return a dictionary representing the Error instance."""
output = {}
if self.code:
output['code'] = self.code
if self.message:
output['message'] = self.message
if self.title:
output['title'] = self.title
return output
class Template(ComparableObject):
"""Object representing a Collection+JSON template object."""
data = ArrayProperty(Data, "data")
@staticmethod
def from_json(data):
"""Return a template instance.
Convenience method for parsing 'write' responses,
which should only contain a template object.
This method parses a json string into a Template object.
Raises `ValueError` when no valid document is provided.
"""
try:
data = json.loads(data)
kwargs = data.get('template')
if not kwargs:
raise ValueError
except ValueError:
raise ValueError('Not valid Collection+JSON template data.')
template = Template(**kwargs)
return template
def __init__(self, data=None):
self.data = data
def __repr__(self):
data = [str(item.name) for item in self.data]
return "<Template: data=%s>" % data
def __getattr__(self, name):
return getattr(self.data, name)
@property
def properties(self):
"""Return a list of names that can be looked up on the template."""
return [item.name for item in self.data]
def to_dict(self):
"""Return a dictionary representing a Template object."""
return {
'template': self.data.to_dict()
}
class Array(ComparableObject, list):
"""Object representing a Collection+JSON array."""
def __init__(self, item_class, collection_name, items):
self.item_class = item_class
self.collection_name = collection_name
super(Array, self).__init__(self._build_items(items))
def _build_items(self, items):
result = []
for item in items:
if isinstance(item, self.item_class):
result.append(item)
elif isinstance(item, dict):
result.append(self.item_class(**item))
else:
raise ValueError("Invalid value for %s: %r" % (
self.item_class.__name__, item))
return result
def __eq__(self, other):
"""Return True if both instances are equivalent."""
return (super(Array, self).__eq__(other) and
list.__eq__(self, other))
def __ne__(self, other):
"""Return True if both instances are not equivalent."""
return (super(Array, self).__ne__(other) or
list.__ne__(self, other))
def __getattr__(self, name):
results = self.find(name=name)
if not results:
raise AttributeError
elif len(results) == 1:
results = results[0]
return results
def _matches(self, name=None, rel=None):
for item in self:
item_name = getattr(item, 'name', None)
item_rel = getattr(item, 'rel', None)
if name is not None and item_name == name and rel is None:
# only searching by name
yield item
elif rel is not None and item_rel == rel and name is None:
# only searching by rel
yield item
elif item_name == name and item_rel == rel:
# searching by name and rel
yield item
def find(self, name=None, rel=None):
"""Return a list of items in the array matching name and/or rel.
If both name and rel parameters are provided, returned items must match
both properties.
"""
return list(self._matches(name=name, rel=rel))
def get(self, name=None, rel=None):
"""Return the first item in the array matching name and/or rel.
If both name and rel parameters are provided, the returned item must
match both properties.
If no item is found, raises ValueError.
"""
try:
return next(self._matches(name=name, rel=rel))
except StopIteration:
raise ValueError('No matching item found.')
def to_dict(self):
"""Return a dictionary representing an Array object."""
if self.item_class is Collection:
data = {
item.href: item.to_dict() for item in self
}
else:
data = [
item.to_dict() for item in self
]
if self.collection_name is not None:
return {
self.collection_name: data
}
return data
class Item(ComparableObject):
"""Object representing a Collection+JSON item object."""
data = ArrayProperty(Data, "data")
links = ArrayProperty(Link, "links")
def __init__(self, href=None, data=None, links=None):
self.href = href
self.data = data
self.links = links
def __repr__(self):
return "<Item: href='%s'>" % self.href
def __getattr__(self, name):
return getattr(self.data, name)
@property
def properties(self):
"""Return a list of names that can be looked up on the item."""
return [item.name for item in self.data]
def to_dict(self):
"""Return a dictionary representing an Item object."""
output = {}
if self.href:
output['href'] = self.href
if self.data:
output.update(self.data.to_dict())
if self.links:
output.update(self.links.to_dict())
return output
class Query(ComparableObject):
"""Object representing a Collection+JSON query object."""
data = ArrayProperty(Data, "data")
def __init__(self, href, rel, name=None, prompt=None, data=None):
self.href = href
self.rel = rel
self.name = name
self.prompt = prompt
self.data = data
def __repr__(self):
data = "rel='%s'" % self.rel
if self.name:
data += " name='%s'" % self.name
if self.prompt:
data += " prompt='%s'" % self.prompt
return "<Query: %s>" % data
def to_dict(self):
"""Return a dictionary representing a Query object."""
output = {
'href': self.href,
'rel': self.rel,
}
if self.name is not None:
output['name'] = self.name
if self.prompt is not None:
output['prompt'] = self.prompt
if len(self.data):
output.update(self.data.to_dict())
return output
class Collection(ComparableObject):
"""Object representing a Collection+JSON document."""
@staticmethod
def from_json(data):
"""Return a Collection instance.
This method parses a json string into a Collection object.
Raises `ValueError` when no valid document is provided.
"""
try:
data = json.loads(data)
kwargs = data.get('collection')
if not kwargs:
raise ValueError
if 'inline' in kwargs and kwargs['inline']:
kwargs['inline'] = [Collection(**data.get('collection'))
for data in kwargs['inline'].values()]
except ValueError:
raise ValueError('Not a valid Collection+JSON document.')
collection = Collection(**kwargs)
return collection
def __new__(cls, *args, **kwargs):
cls.error = TypedProperty(Error, 'error')
cls.errors = DictProperty(Error, 'errors')
cls.template = TypedProperty(Template, 'template')
cls.items = ArrayProperty(Item, 'items')
cls.links = ArrayProperty(Link, 'links')
cls.inline = ArrayProperty(Collection, 'inline')
cls.queries = ArrayProperty(Query, 'queries')
return super(Collection, cls).__new__(cls)
def __init__(self, href, links=None, items=None, inline=None, queries=None,
template=None, error=None, errors=None, version='1.0'):
self.version = version
self.href = href
self.error = error
self.errors = errors
self.template = template
self.items = items
self.links = links
self.inline = inline
self.queries = queries
def __repr__(self):
return "<Collection: version='%s' href='%s'>" % (
self.version, self.href)
def __str__(self):
return json.dumps(self.to_dict())
def to_dict(self):
"""Return a dictionary representing a Collection object."""
output = {
'collection': {
'version': self.version,
'href': self.href,
}
}
if self.links:
output['collection'].update(self.links.to_dict())
if self.items:
output['collection'].update(self.items.to_dict())
if self.inline:
output['collection'].update(self.inline.to_dict())
if self.queries:
output['collection'].update(self.queries.to_dict())
if self.template:
output['collection'].update(self.template.to_dict())
if self.error:
output['collection'].update(self.error.to_dict())
if self.errors:
output['collection']['errors'] = {name : value.to_dict() for name, value in self.errors.items()}
return output
|
Knutzen, B. & Kennedy, D.M. (2008). Can Learning to Use Moodle Alter Teachers’ Approaches to Teaching?. In J. Luca & E. Weippl (Eds.), Proceedings of ED-MEDIA 2008--World Conference on Educational Multimedia, Hypermedia & Telecommunications (pp. 3809-3818). Vienna, Austria: Association for the Advancement of Computing in Education (AACE).
Stevens, G., Gatling, S. & Murdock, T. (2004). Designing "Culturally Dynamic" Online Learning Environments Using MOODLE Implementations. In J. Nall & R. Robson (Eds.), Proceedings of E-Learn 2004--World Conference on E-Learning in Corporate, Government, Healthcare, and Higher Education (pp. 2440-2445). Washington, DC, USA: Association for the Advancement of Computing in Education (AACE).
McMullen, D., Melia, M., McEvoy, M., Pahl, C. & Jennings, E. (2005). Migrating Existing Multimedia Courseware to Moodle. In P. Kommers & G. Richards (Eds.), Proceedings of ED-MEDIA 2005--World Conference on Educational Multimedia, Hypermedia & Telecommunications (pp. 1797-1804). Montreal, Canada: Association for the Advancement of Computing in Education (AACE).
Sodoke, K., Riopel, M., Raîche, G., Nkambou, R. & Lesage, M. (2007). Extending Moodle Functionalities to Adaptive Testing Framework. In T. Bastiaens & S. Carliner (Eds.), Proceedings of E-Learn 2007--World Conference on E-Learning in Corporate, Government, Healthcare, and Higher Education (pp. 476-482). Quebec City, Canada: Association for the Advancement of Computing in Education (AACE).
Al Naddabi, Z. (2007). A Moodle Course: Design and Implementation in English for Academic Purposes Instruction. In T. Bastiaens & S. Carliner (Eds.), Proceedings of E-Learn 2007--World Conference on E-Learning in Corporate, Government, Healthcare, and Higher Education (pp. 1371-1376). Quebec City, Canada: Association for the Advancement of Computing in Education (AACE).
Factors influencing Pre-Service teachers’ attitudes towards using a Moodle environment for teaching and learning.
Cowan, P. (2006). Factors influencing Pre-Service teachers’ attitudes towards using a Moodle environment for teaching and learning. In C. Crawford, R. Carlsen, K. McFerrin, J. Price, R. Weber & D. Willis (Eds.), Proceedings of SITE 2006--Society for Information Technology & Teacher Education International Conference (pp. 2817-2824). Orlando, Florida, USA: Association for the Advancement of Computing in Education (AACE).
Cowan, P. (2007). Encouraging reflection on pedagogical practices through the use of MOODLE. In C. Montgomerie & J. Seale (Eds.), Proceedings of ED-MEDIA 2007--World Conference on Educational Multimedia, Hypermedia & Telecommunications (pp. 29-36). Vancouver, Canada: Association for the Advancement of Computing in Education (AACE).
Burrell-Ihlow, M. (2009). An Investigation of Teaching and Learning: Using Course Management Software (CMS) in a Typically Face to Face Course. International Journal on E-Learning, 8 (1), 5-16. Waynesville, NC USA: Association for the Advancement of Computing in Education (AACE). |
import datetime
import django_rq
from django.apps import AppConfig
class TheFederationConfig(AppConfig):
name = "thefederation"
verbose_name = "The Federation"
def ready(self):
from thefederation.social import make_daily_post
from thefederation.tasks import aggregate_daily_stats
from thefederation.tasks import poll_nodes
scheduler = django_rq.get_scheduler('high')
# Delete any existing jobs in the scheduler when the app starts up
for job in scheduler.get_jobs():
job.delete()
scheduler.schedule(
scheduled_time=datetime.datetime.utcnow(),
func=aggregate_daily_stats,
interval=5500,
)
scheduler.cron(
'0 10 * * *',
func=make_daily_post,
queue_name='high',
)
scheduler = django_rq.get_scheduler('medium')
scheduler.schedule(
scheduled_time=datetime.datetime.utcnow(),
func=poll_nodes,
interval=10800,
)
|
TOKYO — Japanese semiconductor company Renesas Electronics plans to eliminate roughly 1000 jobs — equivalent to 5% of its entire workforce. Cuts are targeted at those working in Japan.
Renesas will soon start soliciting a voluntary early retirement.
Renesas, which will announce its fourth quarter financial results Friday, told EE Times that the move reflects the Japanese semiconductor company’s desire to spend more resources on its business opportunities overseas. |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import wx
import armid
from BasePanel import BasePanel
from DimensionNameDialog import DimensionNameDialog
from DialogClassParameters import DialogClassParameters
from DocumentReferenceDialog import DocumentReferenceDialog
from ConceptReferenceDialog import ConceptReferenceDialog
from Borg import Borg
class PersonaCharacteristicPanel(BasePanel):
def __init__(self,parent):
BasePanel.__init__(self,parent,armid.PERSONACHARACTERISTIC_ID)
self.theId = None
b = Borg()
self.dbProxy = b.dbProxy
def buildControls(self,isCreate,inPersona):
mainSizer = wx.BoxSizer(wx.VERTICAL)
if (inPersona == False):
personas = self.dbProxy.getDimensionNames('persona')
mainSizer.Add(self.buildComboSizerList('Persona',(87,30),armid.PERSONACHARACTERISTIC_COMBOPERSONA_ID,personas),0,wx.EXPAND)
mainSizer.Add(self.buildRadioButtonSizer('Type',(87,30),[(armid.PERSONACHARACTERISTIC_RADIOREFERENCE_ID,'Reference'),(armid.PERSONACHARACTERISTIC_RADIOCONCEPT_ID,'Concept')]))
refs = ['[New reference]']
refs += self.dbProxy.getDimensionNames('document_reference')
mainSizer.Add(self.buildComboSizerList('Reference',(87,30),armid.PERSONACHARACTERISTIC_COMBOREFERENCE_ID,refs),0,wx.EXPAND)
if (inPersona == False):
bVars = self.dbProxy.getDimensionNames('behavioural_variable')
mainSizer.Add(self.buildComboSizerList('Behavioural Variable',(87,30),armid.PERSONACHARACTERISTIC_COMBOVARIABLE_ID,bVars),0,wx.EXPAND)
mainSizer.Add(self.buildMLTextSizer('Characteristic',(87,30),armid.PERSONACHARACTERISTIC_TEXTCHARACTERISTIC_ID),1,wx.EXPAND)
mainSizer.Add(self.buildCommitButtonSizer(armid.PERSONACHARACTERISTIC_BUTTONCOMMIT_ID,isCreate),0,wx.CENTER)
wx.EVT_COMBOBOX(self,armid.PERSONACHARACTERISTIC_COMBOREFERENCE_ID,self.onReferenceChange)
wx.EVT_RADIOBUTTON(self,armid.PERSONACHARACTERISTIC_RADIOREFERENCE_ID,self.onReferenceSelected)
wx.EVT_RADIOBUTTON(self,armid.PERSONACHARACTERISTIC_RADIOCONCEPT_ID,self.onConceptSelected)
self.SetSizer(mainSizer)
def loadControls(self,objt,inPersona):
self.theId = objt.id()
refCtrl = self.FindWindowById(armid.PERSONACHARACTERISTIC_COMBOREFERENCE_ID)
charCtrl = self.FindWindowById(armid.PERSONACHARACTERISTIC_TEXTCHARACTERISTIC_ID)
refCtrl.SetValue(objt.reference())
charCtrl.SetValue(objt.characteristic())
if (inPersona == False):
pCtrl = self.FindWindowById(armid.PERSONACHARACTERISTIC_COMBOPERSONA_ID)
varCtrl = self.FindWindowById(armid.PERSONACHARACTERISTIC_COMBOVARIABLE_ID)
pCtrl.SetValue(objt.persona())
varCtrl.SetValue(objt.behaviouralVariable())
def onReferenceChange(self,evt):
refValue = evt.GetString()
if (refValue == '[New reference]' or refValue == '[New concept]'):
if (refValue == '[New reference]'):
addParameters = DialogClassParameters(armid.DOCUMENTREFERENCE_ID,'Add Document Reference',DocumentReferenceDialog,armid.DOCUMENTREFERENCE_BUTTONCOMMIT_ID,self.dbProxy.addDocumentReference,True)
else:
addParameters = DialogClassParameters(armid.CONCEPTREFERENCE_ID,'Add Concept Reference',ConceptReferenceDialog,armid.CONCEPTREFERENCE_BUTTONCOMMIT_ID,self.dbProxy.addConceptReference,True)
dialogClass = addParameters.dclass()
addDialog = dialogClass(self,addParameters)
if (addDialog.ShowModal() == addParameters.createButtonId()):
dialogOutParameters = addDialog.parameters()
addFn = addParameters.setter()
objtId = addFn(dialogOutParameters)
dimName = dialogOutParameters.name()
refCtrl = self.FindWindowById(armid.PERSONACHARACTERISTIC_COMBOREFERENCE_ID)
refCtrl.Append(dimName)
refCtrl.SetValue(dimName)
addDialog.Destroy()
def onReferenceSelected(self,evt):
refCtrl = self.FindWindowById(armid.PERSONACHARACTERISTIC_COMBOREFERENCE_ID)
refs = ['[New reference]']
refs += self.dbProxy.getDimensionNames('document_reference')
refCtrl.SetItems(refs)
refCtrl.SetValue('')
def onConceptSelected(self,evt):
refCtrl = self.FindWindowById(armid.PERSONACHARACTERISTIC_COMBOREFERENCE_ID)
refs = ['[New concept]']
refs += self.dbProxy.getDimensionNames('concept_reference')
refCtrl.SetItems(refs)
refCtrl.SetValue('')
|
Wool is the hair from a sheep, goat, camel or one of its relatives, like the alpaca. The gold standard of high end carpeting, wool has a lustrous feel, resists static and stains and is naturally flame retardant. The downside of wool is that it is expensive and so is often blended with other fibers such as nylon and polyester.
Nylon is a synthetic fiber. Indeed, it is the strongest and the most popular of the synthetic fibers and resists mildew and stains.
Polyester looks like wool but is much less expensive. It can be used anywhere in the home, including high traffic areas and is famous for its bright colors. However, it is subject to matting.
Acrylic is also very wool-like and is mildew, insect and abrasion resistant. It also comes in a wide range of bright colors, but has a tendency to pill with time.
This synthetic fiber can be used indoors or outdoors. This is because it’s very tough and nonabsorbent. It’s the most stain-resistant fiber of those mentioned.
Many different species of oak are used for flooring, including red and white. Quartered oak is a type of oak where the planks have been sawed through the center of the tree, which makes the grain more decorative.
Maple is a strong, hard, light-colored wood.
Birch is similar to maple in that it’s light and strong, and has a fine-grain. It can be stained to mimic more expensive woods such as mahogany or walnut.
Beech can come from America or Europe and it resembles both birch and maple.
Walnut is a light brown wood that’s grown on every continent but Antarctica. A homeowner has a choice of English or black walnut or white walnut, or butternut. Walnut was once such a popular wood for furnishings that there’s a period named after it that dated from around 1660 to 1720.
This much sought after tropical hardwood has a gorgeous reddish hue and an equally beautiful grain. |
import numpy as np
import scipy.signal
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def cnn_convolve(patch_dim, num_features, images, W, b, zca_white, patch_mean):
"""
Returns the convolution of the features given by W and b with
the given images
:param patch_dim: patch (feature) dimension
:param num_features: number of features
:param images: large images to convolve with, matrix in the form
images(r, c, channel, image number)
:param W: weights of the sparse autoencoder
:param b: bias of the sparse autoencoder
:param zca_white: zca whitening
:param patch_mean: mean of the images
:return:
"""
num_images = images.shape[3]
image_dim = images.shape[0]
image_channels = images.shape[2]
# Instructions:
# Convolve every feature with every large image here to produce the
# numFeatures x numImages x (imageDim - patchDim + 1) x (imageDim - patchDim + 1)
# matrix convolvedFeatures, such that
# convolvedFeatures(featureNum, imageNum, imageRow, imageCol) is the
# value of the convolved featureNum feature for the imageNum image over
# the region (imageRow, imageCol) to (imageRow + patchDim - 1, imageCol + patchDim - 1)
#
# Expected running times:
# Convolving with 100 images should take less than 3 minutes
# Convolving with 5000 images should take around an hour
# (So to save time when testing, you should convolve with less images, as
# described earlier)
convolved_features = np.zeros(shape=(num_features, num_images, image_dim - patch_dim + 1,
image_dim - patch_dim + 1),
dtype=np.float64)
WT = W.dot(zca_white)
bT = b - WT.dot(patch_mean)
for i in range(num_images):
for j in range(num_features):
# convolution of image with feature matrix for each channel
convolved_image = np.zeros(shape=(image_dim - patch_dim + 1, image_dim - patch_dim + 1),
dtype=np.float64)
for channel in range(image_channels):
# Obtain the feature (patchDim x patchDim) needed during the convolution
patch_size = patch_dim * patch_dim
feature = WT[j, patch_size * channel:patch_size * (channel + 1)].reshape(patch_dim, patch_dim)
# Flip the feature matrix because of the definition of convolution, as explained later
feature = np.flipud(np.fliplr(feature))
# Obtain the image
im = images[:, :, channel, i]
# Convolve "feature" with "im", adding the result to convolvedImage
# be sure to do a 'valid' convolution
convolved_image += scipy.signal.convolve2d(im, feature, mode='valid')
# Subtract the bias unit (correcting for the mean subtraction as well)
# Then, apply the sigmoid function to get the hidden activation
convolved_image = sigmoid(convolved_image + bT[j])
# The convolved feature is the sum of the convolved values for all channels
convolved_features[j, i, :, :] = convolved_image
return convolved_features
def cnn_pool(pool_dim, convolved_features):
"""
Pools the given convolved features
:param pool_dim: dimension of the pooling region
:param convolved_features: convolved features to pool (as given by cnn_convolve)
convolved_features(feature_num, image_num, image_row, image_col)
:return: pooled_features: matrix of pooled features in the form
pooledFeatures(featureNum, imageNum, poolRow, poolCol)
"""
num_images = convolved_features.shape[1]
num_features = convolved_features.shape[0]
convolved_dim = convolved_features.shape[2]
assert convolved_dim % pool_dim == 0, "Pooling dimension is not an exact multiple of convolved dimension"
pool_size = convolved_dim / pool_dim
pooled_features = np.zeros(shape=(num_features, num_images, pool_size, pool_size),
dtype=np.float64)
for i in range(pool_size):
for j in range(pool_size):
pool = convolved_features[:, :, i * pool_dim:(i + 1) * pool_dim, j * pool_dim:(j + 1) * pool_dim]
pooled_features[:, :, i, j] = np.mean(np.mean(pool, 2), 2)
return pooled_features |
Information and Decision Sciences Associate Professor Brad Greenwood received the Past Division Chairs’ Emerging Scholar Award at the recent Academy of Management (AOM) annual conference in Chicago.
The award, presented by the Technology and Innovation Management (TIM) Division of the AOM, is given annually to an up-and-coming scholar who has achieved a notable publication record early in his or her academic career and whose scholarly contributions show exceptional quality and great promise of becoming influential in the area of technology and innovation management. The recipient is selected by a committee of past TIM division chairs. |
#!/usr/bin/env python
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mock a ping response.
CNCore.ping() → null
https://releases.dataone.org/online/api-documentation-v2.0.1/apis/CN_APIs.html#CNCore.ping
MNRead.ping() → null
https://releases.dataone.org/online/api-documentation-v2.0.1/apis/MN_APIs.html#MNCore.ping
A DataONEException can be triggered by adding a custom header. See
d1_exception.py
"""
import logging
import re
import responses
import d1_common.const
import d1_common.url
import d1_test.mock_api.d1_exception
PING_ENDPOINT_RX = r"v([123])/monitor/ping"
def add_callback(base_url):
responses.add_callback(
responses.GET,
re.compile(r"^" + d1_common.url.joinPathElements(base_url, PING_ENDPOINT_RX)),
callback=_request_callback,
content_type="",
)
def _request_callback(request):
logging.debug('Received callback. url="{}"'.format(request.url))
# Return DataONEException if triggered
exc_response_tup = d1_test.mock_api.d1_exception.trigger_by_header(request)
if exc_response_tup:
return exc_response_tup
# Return regular response
body_str = "OK"
header_dict = {"Content-Type": d1_common.const.CONTENT_TYPE_OCTET_STREAM}
return 200, header_dict, body_str
|
"So You Want to Learn More? Find Your Voice"
Join Lacie as she teaches you how to discover your voice onstage. Learn to access dozens of characters, learn the difference between projecting and volume, how your body can inspire new voices and so much more. |
import pygame as pg
from pygame.locals import *
from constants import *
import sys
AROUND = [[0,0],
[1,0],
[1,1],
[0,1],
[-1,1],
[-1,0],
[-1,-1],
[0,-1],
[1,-1]]
def MOVE(cell,vec):
return [cell[0]+vec[0],cell[1]+vec[1]]
KEY_ON = {
"UP":False,
"DOWN":False,
"LEFT":False,
"RIGHT":False,
"SPACE":False,
"LCLICK":False,
"RCLICK":False}
def onScreen(pos):
ret = True
if pos[0] < 0:
ret = False
elif pos[0] >= W:
ret = False
if pos[1] < 0:
ret = False
elif pos[1] >= H:
ret = False
return ret
def verifyColor(color):
r = color[0]
g = color[1]
b = color[2]
if r < 0:
r = 0
elif r > 255:
r = 255
if g < 0:
g = 0
elif r > 255:
g = 0
if b < 0:
b = 0
elif b > 255:
b = 255
return (r,g,b)
def handleEvent(WORLD,event,M_MASK):
mb = pg.mouse.get_pressed()
N_MASK = 0
if mb[0]:
N_MASK += M_L
if mb[1]:
N_MASK += M_M
if mb[2]:
N_MASK += M_R
D_MASK = -(N_MASK - M_MASK)
M_MASK = N_MASK
if event.type == QUIT:
pg.quit()
sys.exit()
if event.type == KEYDOWN:
if event.key == K_F4:
pg.quit()
sys.exit()
if event.key == K_DOWN or event.key == K_s:
KEY_ON["DOWN"] = True
if event.key == K_UP or event.key == K_w:
KEY_ON["UP"] = True
if event.key == K_LEFT or event.key == K_a:
KEY_ON["LEFT"] = True
if event.key == K_RIGHT or event.key == K_d:
KEY_ON["RIGHT"] = True
if event.key == K_SPACE:
KEY_ON["SPACE"] = True
if event.type == MOUSEBUTTONDOWN:
if pg.mouse.get_pressed()[0]:
KEY_ON["LCLICK"] = True
WORLD.signal('LCLICK')
if pg.mouse.get_pressed()[2]:
KEY_ON["RCLICK"] = True
WORLD.signal('RCLICK')
else:
if D_MASK & M_R:
KEY_ON["RCLICK"] = False
if D_MASK & M_L:
KEY_ON["LCLICK"] = False
if event.type == KEYUP:
if event.key == K_DOWN or event.key == K_s:
KEY_ON["DOWN"] = False
if event.key == K_UP or event.key == K_w:
KEY_ON["UP"] = False
if event.key == K_LEFT or event.key == K_a:
KEY_ON["LEFT"] = False
if event.key == K_RIGHT or event.key == K_d:
KEY_ON["RIGHT"] = False
if event.key == K_SPACE:
KEY_ON["SPACE"] = False
return M_MASK
|
Currently, single email consultations are free for all newsletter subscribers, but if your situation needs immediate attention, or if you’d like a more thorough evaluation, you can book a Skype consultation for $53.
Many times, we don’t see things clearly from our point of view and it takes someone from the outside looking in to point out what we’re missing.
With all of the demands life brings to the table, it’s easy to feel lost at times. Someone with the right resources and information can help provide you with a sense of direction.
We’re often not the best judges of our own progress, decisions, or situations. Because of this, working with someone who can give you a healthy reality check is one of the most effective ways to ensure you’re staying on track.
Will our session remain private?
Absolutely. Only I will attend on my end and nothing will even be recorded. Your privacy is a high priority of mine.
If something requires further advice after the initial problem is resolved, can I check back in with you?
Yes, but this will need to be done through email. And because I don’t record anything, with time and other clients, you’ll also need to remind me of your particular situation.
If for some reason I’m not able to make the appointment and need to reschedule, will I be able to do so?
Yes. I book my appointments through Calendly and it will allow you to reschedule. |
import os
import random
import numpy as np
import importlib
def get_listfile(image_dir, extension=".jpg"):
if not image_dir.endswith("/"):
image_dir = image_dir + "/"
image_list = os.listdir(image_dir)
image_list = [image_dir + image for image in image_list if image.endswith(extension)]
return image_list
def get_dir_list(frame_dir):
if not frame_dir.endswith("/"):
frame_dir = frame_dir + "/"
dir_list = os.listdir(frame_dir)
dir_list = [frame_dir +
image_dir for image_dir in dir_list if os.path.isdir(frame_dir + image_dir)]
return dir_list
def delete_last_empty_line(s):
end_index = len(s) - 1
while(end_index >= 0 and (s[end_index] == "\n" or s[end_index] == "\r")):
end_index -= 1
s = s[:end_index + 1]
return s
def read_file(file_name):
with open(file_name, "r") as f:
s = f.read()
s = delete_last_empty_line(s)
s_l = s.split("\n")
for i, l in enumerate(s_l):
if l.endswith("\r"):
s_l[i] = s_l[i][:-1]
return s_l
def save_file(string_list, file_name, shuffle_data=False):
if (shuffle_data):
random.shuffle(string_list)
with open(file_name, "w") as f:
if not len(string_list):
f.write("")
else:
file_string = '\n'.join(string_list)
if (file_string[-1] != "\n"):
file_string += "\n"
f.write(file_string)
def get_file_length(file_name):
with open(file_name, 'r') as f:
s = f.read()
s_l = s.split("\n")
total_len = len(s_l)
return total_len
def save_numpy_array(numpy_array, file_name):
numpy_array.tofile(file_name)
def remove_extension(file_name):
index = file_name.rfind(".")
if (index == -1):
return file_name
else:
return file_name[0:index]
def import_module_class(module_name, class_name=None):
module = importlib.import_module(module_name)
if class_name == None:
return module
else:
return getattr(module, class_name)
def check_exist(file_name):
"""
Args:
file_name: file name of the file list
i.e.: train_list.txt
"""
file_list = read_file(file_name)
for i, f in enumerate(file_list):
f_l = f.split(" ")
for ff in f_l:
is_exist = os.path.exists(ff)
if not is_exist:
raise OSError("In %s, row: %d, "
"%s does not exist" % (file_name, i, ff))
def save_string(input_string, file_name):
if os.path.exists(file_name):
mode = "a"
else:
mode = "w"
if not input_string.endswith("\n"):
input_string += "\n"
with open(file_name, mode) as f:
f.write(input_string)
|
It is vital to keep a stock of standard medical treatments, such as pain-killers, cold remedies, plasters and antiseptic. You never know when you're going to come down with something, and the last thing you need is a trip to the pharmacy with a migraine.
We can help you to stock your first aid kit with all the necessities (where would we all be without a box of Lemsip or Ibuprofen in the kitchen cupboard?), as well as a few extras that you may need from time to time.
If you are having trouble sleeping, then our selection of herbal remedies (such as Nytol or Kalms) may be just the ticket. Or if you suffer with Hayfever, then check out our choice of Benadryl and Clarityn - you'll be hayfever-free in no time!
There are a huge number of minor ailments that, while not life-threatening, can cause a great deal of discomfort. We can provide you with everything you need to soothe these discomforts, so you can get on with your day-to-day life. |
# -*- coding: utf-8 -*-
"""
longboxed.manage.users
~~~~~~~~~~~~~~~~~~~~~
user management commands
"""
from flask import current_app
from flask.ext.script import Command, Option, prompt, prompt_pass
from flask.ext.security.forms import RegisterForm
from flask.ext.security.registerable import register_user
from werkzeug.datastructures import MultiDict
from werkzeug.local import LocalProxy
from ..core import db
from ..models import User, Role, Publisher
class RemovePublisherTitleFromPullLists(Command):
"""
Removes all instances of titles by a certain publisher from all users pull
lists
"""
def get_options(self):
return [
Option('-p', '--publisher', dest='publisher', required=True),
]
def run(self, publisher=None):
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print '!! Starting: Removing all \'%s\' titles from users pull lists' % publisher
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
if Publisher.query.filter_by(name=publisher).first():
pagination = User.query.paginate(1, per_page=20, error_out=False)
has_next = True
while has_next:
for user in pagination.items:
save_user = False
for title in user.pull_list:
if title.publisher.name == publisher:
print 'Removing %s from %s\'s pull list...' % (title.name, user.email)
save_user = True
user.pull_list.remove(title)
if save_user:
user.save()
if pagination.page:
percent_complete = (pagination.page/float(pagination.pages)) * 100.0
print '%.2f%% complete...' % percent_complete
if pagination.has_next:
pagination = pagination.next(error_out=False)
else:
has_next = False
else:
print 'Publisher \'%s\' not found' % publisher
class CreateNewRoleCommand(Command):
"""Creates a role"""
def run(self):
name = prompt('Role Name')
description = prompt('Role Description')
_security_datastore = LocalProxy(lambda: current_app.extensions['security'].datastore)
_security_datastore.create_role(name=name, description=description)
db.session.commit()
return
class CreateDefaultRolesCommand(Command):
"""Creates inital roles (user, admin, super)"""
def run(self):
default_roles = [('user', 'No Permissions'), ('admin', 'Comic specific permissions'), ('super', 'All permissions')]
_security_datastore = LocalProxy(lambda: current_app.extensions['security'].datastore)
for role in default_roles:
_security_datastore.find_or_create_role(name=role[0], description=role[1])
db.session.commit()
print 'Sucessfully added roles'
class CreateUserCommand(Command):
"""Create a user"""
def run(self):
email = prompt('Email')
password = prompt_pass('Password')
password_confirm = prompt_pass('Confirm Password')
data = MultiDict(dict(email=email, password=password, password_confirm=password_confirm))
form = RegisterForm(data, csrf_enabled=False)
if form.validate():
user = register_user(email=email, password=password)
print '\nUser created successfully'
print 'User(id=%s email=%s' % (user.id, user.email)
return
print '\nError creating user:'
for errors in form.errors.values():
print '\n'.join(errors)
class AddSuperUserRoleCommand(Command):
"""Gives the given user SuperUser role"""
def run(self):
email = prompt('Email')
# user = users.first(email=email)
user = User.query.filter_by(email=email).first()
if user:
_security_datastore = LocalProxy(lambda: current_app.extensions['security'].datastore)
admin_role = _security_datastore.find_role('admin')
super_role = _security_datastore.find_role('super')
_security_datastore.add_role_to_user(user, super_role)
_security_datastore.add_role_to_user(user, admin_role)
db.session.commit()
print '\nUser given super role sucessfully'
return
print '\nNo user found'
class AddAdminUserRoleCommand(Command):
"""Gives the given user admin role"""
def run(self):
email = prompt('Email')
# user = users.first(email=email)
user = User.query.filter_by(email=email).first()
if user:
_security_datastore = LocalProxy(lambda: current_app.extensions['security'].datastore)
admin_role = _security_datastore.find_role('admin')
_security_datastore.add_role_to_user(user, admin_role)
db.session.commit()
print '\nUser given admin role sucessfully'
return
print '\nNo user found'
class ListRolesCommand(Command):
"""List all roles"""
def run(self):
for r in Role.query.all():
print 'Role(name=%s description=%s)' % (r.name, r.description)
# for r in roles.all():
# print 'Role(name=%s description=%s)' % (r.name, r.description)
class ListUsersCommand(Command):
"""List all users"""
def run(self):
for u in User.query.all():
print 'User(id=%s email=%s)' % (u.id, u.email)
# for u in users.all():
# print 'User(id=%s email=%s)' % (u.id, u.email)
|
Text and color photographs describe internal and external skeleton structures.
|a Skeletons and exoskeletons / |c by Julie K. Ludgren.
|a Text and color photographs describe internal and external skeleton structures.
|a Life science (Vero Beach, Fla.). |
#!/usr/bin/env python
#
# Copyright 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import plaidbench.cli
SUPPORTED_NETWORKS = {
'keras': [
'densenet121',
'densenet169',
'densenet201',
'imdb_lstm',
'inception_resnet_v2',
'inception_v3',
'mobilenet',
'mobilenet_v2',
'nasnet_large',
'nasnet_mobile',
'resnet50',
'resnet50_v2',
'resnext50',
'vgg16',
'vgg19',
'xception',
],
'onnx': [
'bvlc_alexnet',
'densenet121',
'inception_v1',
'inception_v2',
'resnet50',
'shufflenet',
'squeezenet', # TODO: Fix inputs/outputs (only available as *.pb)
'vgg16',
'vgg19',
],
}
def make_parser():
# Create the parser outside of main() so the doc system can call this function
# and thereby generate a web page describing these options. See docs/index.rst.
parser = argparse.ArgumentParser()
plaidargs = parser.add_mutually_exclusive_group()
plaidargs.add_argument('--plaid', action='store_true', help="Use PlaidML as the backend.")
plaidargs.add_argument('--plaid-edsl',
action='store_true',
help="EXPERIMENTAL: Use PlaidML2 (EDSL) as the backend")
plaidargs.add_argument('--caffe2', action='store_true', help="Use Caffe2 as the backend.")
plaidargs.add_argument('--tf', action='store_true', help="Use TensorFlow as the backend.")
plaidargs.add_argument(
'--no-plaid',
action='store_true',
help="Use the non-PlaidML backend most appropriate to the chosen frontend")
frontendargs = parser.add_mutually_exclusive_group()
frontendargs.add_argument('--keras', action='store_true', help='Use Keras as the frontend')
frontendargs.add_argument('--onnx', action='store_true', help='Use ONNX as the frontend')
parser.add_argument('--fp16',
action='store_true',
help="Use half-precision floats, setting floatx='float16'.")
parser.add_argument('-v',
'--verbose',
action='count',
default=0,
help="Logging verbosity level (0..4).")
parser.add_argument('--results',
default='/tmp/plaidbench_results',
help="Destination directory for results output.")
parser.add_argument('--callgrind',
action='store_true',
help="Invoke callgrind during timing runs.")
parser.add_argument('--no-warmup', action='store_true', help="Skip the warmup runs.")
parser.add_argument('--no-kernel-timing', action='store_true', help="Skip the warmup runs.")
parser.add_argument('-n',
'--examples',
type=int,
default=None,
help="Number of examples to use.")
parser.add_argument('--epochs', type=int, default=1, help="Number of epochs per test.")
parser.add_argument('--batch-size', type=int, default=1)
parser.add_argument('--train',
action='store_true',
help="Measure training performance instead of inference.")
parser.add_argument('--blanket-run',
action='store_true',
help="Run all networks at a range of batch sizes, ignoring the "
"--batch-size and --examples options and the choice of network.")
parser.add_argument('--print-stacktraces',
action='store_true',
help="Print a stack trace if an exception occurs.")
parser.add_argument('--onnx-cpu',
action='store_true',
help='Use CPU instead of GPU (only used by ONNX)')
parser.add_argument('--refresh-onnx-data',
action='store_true',
help='Download ONNX data even if cached')
parser.add_argument('--tile', default=None, help='Export to this *.tile file')
parser.add_argument(
'--fix-learn-phase',
action='store_true',
help='Set the Keras learning_phase to an integer (rather than an input tensor)')
all_supported_networks = set()
for _, networks in SUPPORTED_NETWORKS.items():
all_supported_networks = all_supported_networks.union(networks)
parser.add_argument('module', choices=all_supported_networks, metavar='network')
return parser
def main():
exit_status = 0
parser = make_parser()
args = parser.parse_args()
argv = []
# plaidbench arguments
if args.verbose:
argv.append('-{}'.format('v' * args.verbose))
if args.results:
argv.append('--results={}'.format(args.results))
if args.callgrind:
argv.append('--callgrind')
if args.examples:
argv.append('--examples={}'.format(args.examples))
if args.epochs:
argv.append('--epochs={}'.format(args.epochs))
if args.batch_size:
argv.append('--batch-size={}'.format(args.batch_size))
if args.blanket_run:
argv.append('--blanket-run')
if args.no_warmup:
argv.append('--no-warmup')
if args.no_kernel_timing:
argv.append('--no-kernel-timing')
if args.print_stacktraces:
argv.append('--print-stacktraces')
if args.onnx:
# onnx arguments
argv.append('onnx')
if args.fp16:
raise NotImplementedError(
'With ONNX, --fp16 is defined by the model, not by the caller')
if args.train:
raise NotImplementedError('With ONNX, training vs. inference is model-specific')
if args.tile:
raise NotImplementedError(
'Can\'t currently save Tile code with PlaidBench ONNX backend.')
if args.onnx_cpu:
argv.append('--cpu')
if args.refresh_onnx_data:
argv.append('--no-use-cached-data')
if args.plaid_edsl:
argv.append('--plaid-edsl')
elif args.plaid or (not args.no_plaid and not args.caffe2 and not args.tf):
argv.append('--plaid')
elif args.caffe2:
argv.append('--caffe2')
else:
argv.append('--tensorflow')
else:
# keras arguments
argv.append('keras')
if args.tile:
argv.append('--tile={}'.format(args.tile))
if args.fp16:
argv.append('--fp16')
if args.train:
argv.append('--train')
if args.onnx_cpu:
raise NotImplementedError('--onnx_cpu is only meaningful with --onnx')
if args.refresh_onnx_data:
argv.append('--refresh-onnx-data is only meaningful with --onnx')
if args.fix_learn_phase:
argv.append('--fix-learn-phase')
if args.plaid_edsl:
argv.append('--plaid-edsl')
os.environ["KERAS_BACKEND"] = "plaidml2.bridge.keras.__init__"
elif args.plaid or (not args.no_plaid and not args.caffe2 and not args.tf):
argv.append('--plaid')
elif args.caffe2:
raise ValueError('There is no Caffe2 backend for Keras')
else:
argv.append('--tensorflow')
if args.tile:
raise NotImplementedError('Can\'t save Tile code except in PlaidML')
# Networks
if args.module:
argv.append(args.module)
# Invoke plaidbench to do the actual benchmarking.
plaidbench.cli.plaidbench(args=argv)
if __name__ == '__main__':
main()
|
It has been taught to people from early childhood about how important a good posture is. Not only does good posture make individuals look better and self confident, but also is healthy for the body. People with good posture are not prone to bulging stomach and humps in their back and also look more appealing. Now, the thing about posture is that it is developed from the daily habits of individuals and any repetitive action can become a habit and therefore most people have bad posture owing to their occupation. For example, office workers, truck drivers, car drivers and people who have to sit throughout a long part of the day. Not only are these individuals prone to bad posture but also some severe disorders related to it, like back pain, neck pain, scoliosis etc. This is where the use of posture brace for women, men and children come into scene.
Poor posture is a fairly new but really damaging problem in the first world which most people do not even realize. Bad posture has been causing more mental as well as physical health complications than imaginable. The human body is prepared and designed for movement and not for sitting for several hours without enough movement. With time, sitting all the time becomes a bad habit, and this bad habit causes fatigue, pain, headaches and depression.
Slouching and poor posture tend to cause bad mood. A slouching posture of an individual prevents him/her to work optimally. Inability to produce optimal output causes individuals to have poor mood.
Sitting for an extended time period or slouching causes the internal body processes to slow down and hence causing energy levels to decrease. This makes individuals irritable, aggravated or tired.
Poor posture causes the body to compress and constrict, thereby, making the lungs and heart work harder to circulate oxygen and pump blood. This causes a lot of stress on the internal organs and muscles.
Not only does sitting or slouching compresses and constricts heart and lungs but also the intestines and stomach; thus causing different digestive problems like acid refluxes or even hernias. Also, not only does bad posture weakens the digestive system but also causes individuals to develop that belly pouch.
Also known widely as “clavicle braces”, posture braces are products which are required to assist people to relieve back pain, correct poor posture, prevent shoulder slump by prevented upper body support. These products come in various sizes, colours and designs. Posture braces can be both gender specific as well as unisexual i.e there is an availability of specific posture brace for women, men and for children. These are excellent tools to assist individuals to get their posture back in order.
There are numerous more negative effects of poor posture like poor breathing, back shoulder and neck pains and tension headaches. Posture braces are great ways to correct the posture and avoid numerous bodily problems. |
from epics import caget, caput, camonitor
from time import sleep
# Settings for the performance test
# open: OPEN THRESHOLD - 75%
# close: CLOSE THRESHOLD - 25%
open = caget('CTRL-SUP-BOY:VC101-COH.VAL') + 15
close = caget('CTRL-SUP-BOY:VC101-COL.VAL') - 15
start = caget('CTRL-SUP-BOY:PERF-SRT.VAL')
init = 40
firstValve = 101
lastValve = 350
tempo = 5
# scan: scan rate for the valve position
scan = caget('CTRL-SUP-BOY:PERF-SCAN.VAL')
maxLoop = int(caget('CTRL-SUP-BOY:PERF-LOOP.VAL'))
# define a callback function on 'pvname' and 'value'
def onChanges(pvname=None, value=None, **kw):
scan = caget('CTRL-SUP-BOY:PERF-SCAN.VAL')
print pvname, str(value), repr(kw)
camonitor('CTRL-SUP-BOY:PERF-SCAN.VAL', callback=onChanges)
# Valve status initialisation
for iValve in range(firstValve, lastValve+1):
caput('CTRL-SUP-BOY:VC'+ str(iValve) + '-FB', init)
caput('CTRL-SUP-BOY:VC'+ str(iValve) + '-CO', init)
caput('CTRL-SUP-BOY:VC'+ str(iValve) + '-TRIP', 0)
caput('CTRL-SUP-BOY:VC'+ str(iValve) + '-INTLK', 0)
caput('CTRL-SUP-BOY:VC'+ str(iValve) + '-FOMD', 0)
caput('CTRL-SUP-BOY:VC'+ str(iValve) + '-LOMD', 0)
caput('CTRL-SUP-BOY:VC'+ str(iValve) + '-MAMD', 0)
caput('CTRL-SUP-BOY:VC'+ str(iValve) + '-AUMD', 0)
caput('CTRL-SUP-BOY:VC'+ str(iValve) + '-IOERR', 0)
caput('CTRL-SUP-BOY:VC'+ str(iValve) + '-IOSIM', 0)
# wait for START
for iLoop in range(0, maxLoop):
sleep(tempo)
for iValve in range(firstValve, lastValve+1):
caput('CTRL-SUP-BOY:VC'+ str(iValve) + '-CO', open)
for x in range(int(init), int(open)+1):
for iValve in range(firstValve, lastValve+1):
caput('CTRL-SUP-BOY:VC'+ str(iValve) + '-FB', x)
extra = [ 'CTRL-SUP-BOY:VC'+ str(iValve) + '-FOMD'
, 'CTRL-SUP-BOY:VC'+ str(iValve) + '-LOMD'
, 'CTRL-SUP-BOY:VC'+ str(iValve) + '-MAMD'
, 'CTRL-SUP-BOY:VC'+ str(iValve) + '-AUMD']
output = (0 if caget(extra[x % len(extra)]) else 1)
caput(extra[x % len(extra)], output)
sleep(scan)
sleep(tempo)
for iValve in range(firstValve, lastValve+1):
caput('CTRL-SUP-BOY:VC'+ str(iValve) + '-CO', close)
for x in range(int(open), int(close)-1, -1):
for iValve in range(firstValve, lastValve+1):
caput('CTRL-SUP-BOY:VC'+ str(iValve) + '-FB', x)
extra = [ 'CTRL-SUP-BOY:VC'+ str(iValve) + '-INTLK'
, 'CTRL-SUP-BOY:VC'+ str(iValve) + '-TRIP'
, 'CTRL-SUP-BOY:VC'+ str(iValve) + '-IOSIM'
, 'CTRL-SUP-BOY:VC'+ str(iValve) + '-IOERR']
output = (0 if caget(extra[x % len(extra)]) else 1)
caput(extra[x % len(extra)], output)
sleep(scan)
|
Daily departures 7 days a week, 365 days of the year.
This tour is perfect for those smart travelers who wish to take advantage of a few hours stop at Medellin´s International airport Jose Maria Cordova and explore the beautiful country side. Does not matter if you have just a quick stop, our personalized tours will perfectly fit your needs allowing you to make the best use of your limited time while in Medellin. Being 45 minutes away from Medellin city, the country side near the International airport is a perfect excuse to get out and discover the beauty of the area. Tour starts upon your arrival at Medellin´s International airport Jose Maria Cordova (JMC), where your private guide will hold a sign with your name outside of baggage claim. Just make sure and provide your full names and exact flight info.
Once your bilingual private guide has met you, tour embarks on a personalized journey by car (with AC), around the coziest villages located near the airport. Such places include the picturesque San Antonio de Pereira village, El Retiro village, and a final stop at a well-recognized local restaurant for a traditional tasty snack as a treat. Tour is estimated to last 4 hours and stops can be made at your convenience.
As you walk out of Medellin´s International airport, get ready to explore the soothing sceneries on the way to the first stop at San Antonio de Pereira, a warm little town that evokes visitors with delicious dishes made in the region. Sample a traditional dessert (included in price), as you walk around tranquilly and take the best postcard photos, there are bathrooms and cafeterias so we recommend bringing Colombian pesos since not every store takes cash.
Moving right along, tour drives you to Tequendamita natural retreat passing through lush landscapes and magnificent mansions. Take a few minutes to explore this relaxing retreat where ice cold water runs actively from the top of the mountains, an amazing moment with mother nature as you are headed to the next stop at El Retiro town where a massive slaves release took place in 1757.
Finally, at Retiro´s plaza you will learn closer about the first village where slavery came to an historical end in the late 1800´s. Reason why they celebrate a yearly tribute to black and white communities by painting their faces in such colors. Discover the temple that was finally built by slave’s hands before their massive release, then walk around the square to learn closer about our nation´s heroes, absorb the local atmosphere, and get some souvenirs.
Finally, tour takes you to one of the most popular local venues for a courtesy snack before heading back to the airport. Discover traditional tasty meals that will definitely inspire you to come back to Medellin and spend more time in the future. After approximately 4 hours, your guide gets ready to drive you back to the local or international airport in order to catch your next flight on time.
El Retiro village, Stop at a local restaurant for a Courtesy Snack.
365 days of the year, Daily departures 7 days a week. *flexible start time, please inquire. |
from lxml import etree
import hashlib
import re
from consts import *
def parseXml(filename):
parser = etree.XMLParser(strip_cdata=False)
try:
tree = etree.parse(filename, parser)
except etree.XMLSyntaxError as e:
print e
exit()
tree = tree.getroot()
return tree
def hasAttrib(e, a):
try:
return a in e.attrib
except:
return False
def deleteAttribFromTree(t, attrib):
if t == None:
return
if hasattr(t, 'attrib') and attrib in t.attrib:
del t.attrib[attrib]
for e in t:
deleteAttribFromTree(e, attrib)
def getAttribVal(node, attribName):
if hasattr(node, 'attrib') and attribName in node.attrib:
return node.attrib[attribName]
return None
def appendToAttrib(node, attribName, attribVal):
'''
Appends the string `attribVal` to the attribute `attribName` in `node`.
`node` is an lxml Element.
'''
oldAttribVal = getAttribVal(node, attribName) or ''
if not attribVal:
return
if attribVal in oldAttribVal.split():
return
if oldAttribVal: newAttribVal = oldAttribVal + SEP_FLAGS + attribVal
else: newAttribVal = attribVal
node.attrib[attribName] = newAttribVal
def setSourceline(t, sourceline):
if t == None:
return
t.sourceline = sourceline
for e in t:
setSourceline(e, sourceline)
def getAll(node, keep=None, descendantOrSelf=True):
'''
Returns the list of nodes which are the descendants of `node`. Optionally,
`node` can be included in the list.
'''
keepIsNone = keep == None
keepIsFunction = hasattr(keep, '__call__')
assert keepIsNone or keepIsFunction
# Get all nodes
if descendantOrSelf: all = node.xpath('.//*')
else: all = node.xpath(' //*')
# Keep nodes in `all` for which keep(node) evaluates to true
if keep:
all = filter(keep, all)
return all
def appendNotNone(src, dst):
if src == None:
return
dst.append(src)
def extendFlatly(node, children):
'''
Acts like `lxml.etree._Element.extend`, except it flattens the list of
`children`. For example, calling `extendFlatly(node, [a, [b, c]])` is
equivalent to `node.extend([a, b, c, d])` if `node`, `a`, `b` and `c` are
instances of `lxml.etree._Element.extend`.
'''
listTypes = [list, tuple]
okTypes = listTypes + [etree._Element]
assert type(children) in okTypes
if node is None:
return
if type(children) == etree._Element:
children = [children]
for child in children:
if type(child) == etree._Element: node.append(child)
if type(child) in listTypes: node.extend(child)
def flagAll(nodes, attrib, value):
for n in nodes:
n.attrib[attrib] = value
def getIndex(node):
parent = node.getparent()
if parent == None: return 0
else: return parent.index(node)
def getPath(node):
'''
Returns a list of strings representing the ancestors of `node`, plus `node`
itself. The strings are the ancestors' tag names. For example, if `node` is
the lxml element `<My_ID/>` from a module.xml file which contains the
following:
<My_Tab_Group>
<My_Tab>
<My_ID/>
</My_Tab>
</My_Tab_Group>
'''
if node == None:
return []
if node is node.getroottree().getroot():
return []
return getPath(node.getparent()) + [node.tag]
def getPathString(node, sep='/'):
return sep.join(getPath(node))
def getPathIndex(node):
nodes = getPath(node)
return [str(getIndex(n)) for n in nodes]
def getPathIndexString(node, sep='/'):
return sep.join(getPathIndex(node))
def nodeHash(node, hashLen=10):
path = getPathString(node)
hash = hashlib.sha256(path)
hash = hash.hexdigest()
hash = hash[:hashLen]
return hash
def treeHash(node):
s = etree.tostring(
node,
pretty_print=True,
xml_declaration=True,
encoding='utf-8'
)
hash = hashlib.sha256(s)
hash = hash.hexdigest()
return hash
def replaceElement(element, replacements):
if replacements is None:
return
# Canonicalise input
if type(replacements) in (list, tuple):
container = etree.Element('container')
for r in replacements:
container.append(r)
replacements = container
# Insert each element in `replacements` at the location of `element`. The
# phrasing is a bit opaque here because lxml *moves* nodes from
# `replacements` instead of copying them, when `insert(index, r)` is called.
returnVal = []
index = element.getparent().index(element) # Index of `element`
while len(replacements):
r = replacements[-1]
element.getparent().insert(index, r)
returnVal.append(r)
element.getparent().remove(element)
return returnVal
def insertAfter(node, nodeToInsert):
'''
Inserts `nodeToInsert` immediately after `node`.
'''
index = node.getparent().index(node) # Index of `node`
node.getparent().insert(index+1, nodeToInsert)
def insertBefore(node, nodeToInsert):
index = node.getparent().index(node) # Index of `node`
node.getparent().insert(index, nodeToInsert)
|
One of the more obscure monuments in Tovste commemorates the death of a Turkish Pasha, who is thought to have died in battle around 1673.
The small stone monument is said to refer to a chieftain killed during an uprising that started near Jagielnica (about 10 km away) and moved to Tluste. It is thought that he died and was buried near a narrow road that once passed through the area, presently occupied by houses.
By way of historical background the Turks and their allies, the Cossacks and Tartars, had invaded the Polish territory of Podolia in 1671, and had achieved widespread military success. Poland was compelled to sign the so-called Treaty of Buczacz in October 1672, under the terms of which Podolia and the southern part of Ukraine became part of the Turkish Empire. Over the next eleven years of Turkish control, new administrative divisions were introduced and saw Tluste fall under the supervision of Chortkiv.
The monument to the Pasha is located next to a shed belonging to a private dwelling on Shkilna Str. Partially buried and hidden under a woodpile, it is only about 20-25 cm wide and about 50-70 cm high. In the 1970s or 1980s, an archaeologist from Ternopil confirmed that the inscription – which, today, one can barely discern – was consistent with ‘Arabic’ (sic) script. No one is sure whether or not the grave may have been looted over the centuries.
Interestingly, the townspeople of Tovste have always known about the monument, considering it to be a ‘magic stone’ that will bring harm if it is touched. Care was taken to avoid it when ploughing the fields. The current occupants of the house were the last to move onto Shkilna Str. in the early 1960s, taking possession of the one remaining vacant lot on the street. While their house and shed were being constructed, the builder wanted to move the stone, but the owner objected and it was left in place. A proposal to relocate the stone to the museum around the 1980s was similarly rejected by the neighbours, saying that to move it would bring bad consequences.
Dmytrenko, H. pers. comm., 2005.
Kowalski, S. “Jazlowiec: The Town Lost in History”. Chapter VI. http://www.aerobiologicalengineering.com/wxk116/sjk/jazlow.html.
Pawlyk, J. History of Tovste. Chortkiv, 2000. p. 33, and pers. comm. |
# from django.contrib.contenttypes.models import ContentType
# from django.contrib.auth.models import User
# from django.db import models
# # am I importing these session libraries correctly?
# from django.contrib.sessions.models import Session
# from django.contrib.sessions.backends.cached_db import SessionStore
# from django.contrib.sessions.backends.db import SessionStore
# from django.conf import settings
# import random, string
# from importlib import import_module
# def get_path(instance, filename):
# ctype = ContentType.objects.get_for_model(instance)
# model = ctype.model
# app = ctype.app_label
# extension = filename.split('.')[-1]
# ############################################################################
# # string for the session id
# # s = request.session._session_key
# # request.user.username
# # user_print_test = models.IntegerField(User)
# # user_print_test = models.ForeignKey(User, unique=True)
# # print "in files.py this is the user_print_test value: %s" % user_print_test
# # using session outside of the views
# # SessionStore = import_module(settings.SESSION_ENGINE).SessionStore
# # this randomly creates a sessionid everytime
# # s = SessionStore()
# # s.save()
# # s.session_key
# # print "in files.py this is the s.session_key value: %s" % s.session_key
# # get_session_key = s.session_key
# # session_var = Session.objects.get(pk=s.session_key).get_decoded()
# # print "in files.py this is the s.session_var value: %s" % session_var
# # this does not work
# # user_get_id = User.objects.get(id = session_var['_auth_user_id'])
# # print "this is the session_key value: %s" % user_get_id
# ############################################################################
# # modified code
# # dir = get_session_key
# # original code
# dir = "site"
# # if model == "job":
# # dir += "/pdf/job_attachment"
# # else:
# # dir += "/img/%s" % app
# # if model == "image_type_1":
# # dir += "/type1/%s" % instance.category
# # elif model == "image_type_2":
# # dir += "/type2"
# # elif model == "restaurant":
# # dir += "/logo"
# # else:
# # dir += "/%s" % model
# chars = string.letters + string.digits
# name = string.join(random.sample(chars, 8), '')
# # original code
# # return "%s/%s/%s.%s" % (dir, name, extension)
# return "%s/%s.%s" % (dir, filename, extension) |
Melissa populates a space with plinths. Feels compelled to blog about how clever this is.
This is an image taken today of the Showcase Gallery, at the CIT, Northbridge. (WA, y’all) Next door is an even larger gallery being used for the massive Graduate Metal XII show, that was also in the process of being mounted today. When I say also, the plinths finished and the wall boards prepped is as far as I got today in our space. This is the calm before the storm. Tomorrow, it pours jewels!
2 teaspoons. two teaspoon plans. what will they become?
Two bowl plans. I’ve borrowed from the pattern on the handle to make the shapes in the two patterns above. Thanks to Camberwell Markets I bought two spoons exactly the same, so I didn’t have to choose which one I would prefer to see finished. |
# encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http:# mozilla.org/MPL/2.0/.
#
# Contact: Kyle Lahnakoski ([email protected])
#
"""
# NOTE:
THE self.lang[operator] PATTERN IS CASTING NEW OPERATORS TO OWN LANGUAGE;
KEEPING Python AS# Python, ES FILTERS AS ES FILTERS, AND Painless AS
Painless. WE COULD COPY partial_eval(), AND OTHERS, TO THIER RESPECTIVE
LANGUAGE, BUT WE KEEP CODE HERE SO THERE IS LESS OF IT
"""
from __future__ import absolute_import, division, unicode_literals
from jx_base.expressions._utils import simplified
from jx_base.expressions.expression import Expression
from jx_base.language import is_op
from mo_json import BOOLEAN
class EsNestedOp(Expression):
data_type = BOOLEAN
has_simple_form = False
def __init__(self, terms):
Expression.__init__(self, terms)
self.path, self.query = terms
@simplified
def partial_eval(self):
if self.path.var == ".":
return self.query.partial_eval()
return self.lang[
EsNestedOp("es.nested", [self.path, self.query.partial_eval()])
]
def __data__(self):
return {"es.nested": {self.path.var: self.query.__data__()}}
def __eq__(self, other):
if is_op(other, EsNestedOp):
return self.path.var == other.path.var and self.query == other.query
return False
|
Borja Iglesias celebrates a goal.
Having spent a successful season on loan with Real Zaragoza last season, Borja Iglesias has fond memories of the region of Aragon and now he has another one, having scored the brace that moved Espanyol into second place.
The striker scored the only two goals of the 2-0 win as his current team visited Aragonese side Huesca in LaLiga Santander.
Just five minutes in he had a great chance, but he was flagged for offside.
Yet Iglesias did finally score just before the half time break, converting a Didac Vila cross in the 41st minute, even if this goal also had to be checked by VAR for offside, with it decided there was none.
Victor Sanchez then had a goal ruled out for offside five minutes into the second half, before Iglesias did make it 2-0 in the 64th minute, finishing well when through one on one against Aleksandar Jovanovic.
The remainder of the game was a fiery affair, with fouls, bookings, VAR consultations and nine minutes of stoppage time.
Yet Espanyol weathered the storm and now sit second, just one point behind city rivals Barcelona. |
# # # #
# Examples of how to run downscaling with this new package.
# this is hardwired junk unless you are on the SNAP servers at UAF.
# # # #
# AR5
if __name__ == '__main__':
# import modules
import downscale
# minimum required arguments
ar5_modeled = '/workspace/Shared/Tech_Projects/ESGF_Data_Access/project_data/data/prepped/clt_prepped/IPSL-CM5A-LR/clt/clt_Amon_IPSL-CM5A-LR_rcp26_r1i1p1_200601_210012.nc'
ar5_historical = '/workspace/Shared/Tech_Projects/ESGF_Data_Access/project_data/data/prepped/clt_prepped/IPSL-CM5A-LR/clt/clt_Amon_IPSL-CM5A-LR_historical_r1i1p1_185001_200512.nc'
clim_path = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_october_final/cru_cl20/cld/akcan'
template_raster_fn = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/templates/tas_mean_C_AR5_GFDL-CM3_historical_01_1860.tif'
base_path = '/atlas_scratch/malindgren/CMIP5/TEST_AR5'
# run
# down = DownscaleAR5.DownscaleAR5( ar5_modeled, ar5_historical, base_path, clim_path, template_raster_fn=template_raster_fn, ncores=32 ) #, climatology_begin, climatology_end, plev, absolute, metric, ncores )
# output = down.downscale_ar5_ts()
down = downscale.Dataset( ar5_modeled, ar5_historical, base_path, clim_path, template_raster_fn=template_raster_fn, ncores=32 ) #, climatology_begin, climatology_end, plev, absolute, metric, ncores )
output = down.downscale_ar5_ts()
# CRU
if __name__ == '__main__':
# import modules
from downscale import DownscaleCRU
# example of post_downscale_function - pass in at DownscaleCRU()
def clamp_vals( x ):
''' clamp the values following the relative humidity downscaling '''
x[ (x > 100) & (x < 500) ] = 95
return x
# minimum required arguments
cru_ts = '/Data/Base_Data/Climate/World/CRU_grids/CRU_TS323/cru_ts3.23.1901.2014.cld.dat.nc'
clim_path = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_october_final/cru_cl20/cld/akcan'
template_raster_fn = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/templates/tas_mean_C_AR5_GFDL-CM3_historical_01_1860.tif'
base_path = '/atlas_scratch/malindgren/CMIP5/CRU2'
# run
down = DownscaleCRU.DownscaleCRU( cru_ts, clim_path, template_raster_fn, base_path, absolute=False, ncores=32 )
output = down.downscale_cru_ts()
|
Comprised of 63% Chardonnay and 37% Pinot Noir, this wine has fine bubbles, with fresh aromas of citrus fruit, green apple and baked bread. Midweight and creamy on the palate, well balanced, with a long clean finish.
Selected in the vineyard for making sparkling wines, the two plots of Pinot Noir and Chardonnay were harvested early to ensure the final wine had moderate alcohol, and sufficient acidity. Picked by hand and gently pressed in fractions, the clear juice was fermented at cold temperatures in stainless steel. Based on the 2014 vintage the “cuvee” was blended, with wines dating back to 2009. The blend was then cold stabilized and sterile filtered. Secondary fermentation took place in the bottle and was aged on its lees for 30 months before riddling and disgorging. |
# Copyright (c) 2007-2010 The PyAMF Project.
# See LICENSE for details.
"""
Elixir adapter module. Elixir adds a number of properties to the mapped instances.
@see: U{Elixir homepage (external)<http://elixir.ematia.de>}
@since: 0.6
"""
import elixir.entity
import pyamf
from pyamf import adapters
adapter = adapters.get_adapter('sqlalchemy.orm')
adapter.class_checkers.append(elixir.entity.is_entity)
class ElixirAdapter(adapter.SaMappedClassAlias):
EXCLUDED_ATTRS = adapter.SaMappedClassAlias.EXCLUDED_ATTRS + [
'_global_session']
def getCustomProperties(self):
adapter.SaMappedClassAlias.getCustomProperties(self)
self.descriptor = self.klass._descriptor
self.parent_descriptor = None
if self.descriptor.parent:
self.parent_descriptor = self.descriptor.parent._descriptor
if self.descriptor.polymorphic:
self.exclude_attrs.update([self.descriptor.polymorphic])
def _compile_base_class(self, klass):
if klass is elixir.EntityBase or klass is elixir.Entity:
return
pyamf.ClassAlias._compile_base_class(self, klass)
pyamf.register_alias_type(ElixirAdapter, elixir.entity.is_entity) |
The 20th Annual Pooch Parade was a huge success! A huge thank you to all who participated in our event. A big thank you to all the volunteers and donors who help make this a very successful event year after year!
We at Tri Valley Guide Dogs would like to express our heartfelt appreciation to all of the generous supporters of the 2017 Pooch Parade. Whether through a gift of time or money, (or both), each donation makes a huge impact in the lives of our dogs, our program, and ultimately the lives of those that these dedicated and loving dogs will serve.
NOTE: Due to the elimination of the Wednesday Street Parties, we will not be doing our annual Pooch Parade in 2018. We have other opportunities to donate and support our club, contact us to see how you can help.
See 2016 pooch parade photos here.
See 2017 pooch parade photos here.
See more photos from Shutterfly here. |
import sublime
import sublime_plugin
import subprocess
import time
import webbrowser
from threading import Thread
aml_manual_path = ""
aml_start_path = ""
aml_batch_file = ""
repl_process = None
output_lines = []
output_view = None
class AmlReplCommand(sublime_plugin.TextCommand):
def PrintStdout(self, edit, process):
global output_lines
while process.poll() is None:
output = process.stdout.readline()
output_lines.append(output)
def run(self, edit):
self.view.set_name("AML REPL")
self.view.set_syntax_file("Packages/AML/Aml.tmLanguage")
global aml_manual_path, aml_start_path, aml_batch_file
settings = sublime.load_settings("AMLRepl.sublime-settings")
aml_manual_path = settings.get("aml_manual_path")
aml_start_path = settings.get("aml_start_path")
aml_batch_file = settings.get("aml_batch_file")
global repl_process
repl_process = subprocess.Popen(
aml_batch_file, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, cwd=aml_start_path)
global output_view
output_view = self.view
stdout_thread = Thread(
target=self.PrintStdout, args=[edit, repl_process])
stdout_thread.setDaemon(True)
stdout_thread.start()
counter = 0
while (not 'nil\r\n' in output_lines) and counter < 100:
time.sleep(0.1)
counter += 1
self.view.run_command('output_lines')
class WindowEventCommand(sublime_plugin.EventListener):
def on_close(self, view):
global repl_process
if repl_process:
repl_process.stdin.write("(quit)\n")
repl_process.terminate()
repl_process = None
class ReplQuitCommand(sublime_plugin.TextCommand):
def run(self, edit):
global repl_process
if repl_process:
output_view.run_command('output_lines')
repl_process.stdin.write("(quit)\n")
repl_process.terminate()
repl_process = None
output_view.insert(
edit, output_view.size(), "AML process terminated. Bye :-)")
class ReplEvalCommand(sublime_plugin.TextCommand):
def last_sexp(self, string):
sexp, bracet_count, bracket_match, done = "", 0, 0, 0
for c in reversed(string):
if c == ')':
bracket_match += 1
bracet_count += 1
elif c == '(':
bracket_match -= 1
bracet_count += 1
if done == 0 and bracet_count > 0:
sexp = c + sexp
elif done == 1 and c == '\'':
sexp = c + sexp
elif done > 1:
break
if bracet_count > 1 and bracket_match == 0:
done += 1
return sexp
def run(self, edit):
global repl_process
if repl_process:
input_substr = None
position = self.view.sel()[0]
if position.begin() == position.end():
input_substr = self.last_sexp(
self.view.substr(sublime.Region(0, self.view.size())))
else:
input_substr = self.view.substr(
sublime.Region(position.begin(), position.end()))
output_view.insert(edit, self.view.size(), "\n")
repl_process.stdin.write("%s\n" % input_substr)
output_view.run_command('output_lines')
else:
output_view.insert(
edit, output_view.size(), "No AML process initialized. Please restart AMLRepl.\n")
class FileEvalCommand(sublime_plugin.TextCommand):
def run(self, edit):
global repl_process
global output_view
if repl_process:
input_substr = self.view.substr(
sublime.Region(0, self.view.size()))
repl_process.stdin.write("%s\n" % input_substr)
output_view.run_command('output_lines')
else:
output_view.insert(
edit, self.view.size(), "No AML process initialized. Please restart AMLRepl.\n")
class OutputLinesCommand(sublime_plugin.TextCommand):
def run(self, edit):
global output_lines
counter = 0
while output_lines == [] and counter < 10:
time.sleep(0.1)
counter += 1
for line in output_lines:
self.view.insert(edit, self.view.size(), line)
self.view.run_command("goto_line", {"line": self.view.size()})
output_lines = []
class AmlReferenceManualCommand(sublime_plugin.TextCommand):
def run(self, edit):
global aml_manual_path
url = "file:///" + aml_manual_path + "index.html"
webbrowser.open_new(url)
class AmlGuiCommand(sublime_plugin.TextCommand):
def run(self, edit):
if repl_process:
output_view.insert(edit, self.view.size(), "\n")
repl_process.stdin.write("%s\n" % "(aml)")
output_view.run_command('output_lines')
else:
output_view.insert(
edit, output_view.size(), "No AML process initialized. Please restart AMLRepl.\n")
class AunitGuiCommand(sublime_plugin.TextCommand):
def run(self, edit):
if repl_process:
output_view.insert(edit, self.view.size(), "\n")
repl_process.stdin.write(
"%s\n" % "(compile-system :aunit-core-system)")
output_view.run_command('output_lines')
repl_process.stdin.write(
"%s\n" % "(compile-system :aunit-print-system)")
output_view.run_command('output_lines')
repl_process.stdin.write(
"%s\n" % "(compile-system :aunit-gui-system)")
output_view.run_command('output_lines')
repl_process.stdin.write(
"%s\n" % "(compile-system :aunit-main-system)")
output_view.run_command('output_lines')
repl_process.stdin.write("%s\n" % "(aunit)")
output_view.run_command('output_lines')
else:
output_view.insert(
edit, output_view.size(), "No AML process initialized. Please restart AMLRepl.\n")
|
The lunch special is excellent value for money - $4.99 / $5.99 for a starter plus main. The pad prick was our favorite!
Terrible service and the food is ordinary at best. Don't go. Ever.
The curries are super delicious and are worth the wait!
Cant say enough how wonderful and fresh and tasty the Pad Khee Mao dish was.. Spicy and yummy!
Love the crazy inexpensive chicken noodle soup!
The lunch special is not to be missed. $5 for entree and appetizer!
Best Thai Food in Chicago, and so well priced!!!
Check out the women's restroom! Love this place. Super friendly, affordable and yummy!
the pad khee mao is the best I've ever had.
A very affordable/quick/quality lunch while taking daytime courses at iO.
Monday through Friday from 11am to 3pm they have some great deals, including free mini appetizers. Also check out their text messaging coupons by texting 686868 with the word Cozy.
Great Thai food, cozy atmosphere, and wonderful customer service. If you become a regular they make an effort to go the extra mile. By far my favorite place to eat in Wrigleyville.
The panang curry here is my favorite in the city!! They include huge bell peppers!!
If you're in the Wrigleyville/Lakeview neighborhood Cozy Noodles & Rice is the place for Thai. Try the Pad See Ewe w/chicken. They also offer the best coupon deals for delivery.
A bubble tea and crab rangoons are a must! |
from setuptools import setup, find_packages
from setuptools.command.install import install
def _pre_install():
print("Verifying that the library is accessible.")
import sys
import os.path
dev_path = os.path.dirname(__file__)
sys.path.insert(0, dev_path)
try:
import pyzap.library
except OSError as e:
print("Library can not be loaded: %s" % (str(e)))
raise
class _custom_install(install):
def run(self):
_pre_install()
install.run(self)
description = "Python wrapper library for ZapLib digital television (DVB) " \
"tuning library."
long_description = """\
This library allows a Python script to tune channels with nothing more than the
ZapLib library, and the correct tuning values for the type of DVB that you're
trying to decode. No channels.conf file is required.
"""
setup(name='pyzap',
version='0.3.1',
description=description,
long_description=long_description,
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python',
'Topic :: Multimedia :: Video :: Capture',
],
keywords='dvb dvb-a dvb-c dvb-s dvb-t dvb-apps television cable',
author='Dustin Oprea',
author_email='[email protected]',
url='https://github.com/dsoprea/PyZap',
license='GPL 2',
packages=find_packages(exclude=[]),
include_package_data=True,
zip_safe=True,
cmdclass={ 'install': _custom_install },
)
|
This article first appeared in The Baptist Preacher’s Journal 13 (Spring 2003): 44-47.
One of my responsibilities as Library Director at Baptist Bible College is overseeing the development of our library’s collection. Over the last seven years we’ve spent more than $350,000 on books, periodicals, on-line databases, and other media. Needless to say, I’ve learned a lot about selecting library resources during this time.
Of course, building one’s private library is different from developing a collection to support the research activities of hundreds of students and faculty. Nevertheless, I’ve found that many of the strategies I employ in building the Vick Library’s collection have served me well as I’ve expanded my personal library. Below I will share seven principles of library development that should enable readers to build effective collections while wasting minimal amounts of time, money, and shelf space.
Several months ago, I asked my pastor, Bill Kolb, what advice he would offer preachers concerning the development of their libraries. He emphatically responded, “Plan to spend money.” While this counsel may seem trite to the preacher who can’t let a week pass without buying a few books, it is true that some will not accumulate resources simply because they don’t allocate money to do so. My pastor explained that during the first half of his ministry he didn’t place a priority on building his library. Over time he has come to view his books as the tools of a man who takes his work seriously.
Planning to spend money is also necessary because it is tempting to fill one’s library with inexpensive books. I have personally bought many books simply because they were cheap, regardless of their quality or usefulness. However, I have come to realize that a book’s price is often a reflection of its value. This isn’t to say that there are no true bargains on the market, but we shouldn’t be as concerned with cost as with quality. I’ll say more about making wise selections later in this article.
A second principle modifies our understanding of the first: We must resolve that bigger is not necessarily better. As a matter of principle, it is better to buy one costly, high-quality book than five inexpensive ones that will prove to be of little substance. A classmate of mine accumulated a library at a rapid pace as he went through Bible college. By the time we graduated, his library consisted of some 3,000 volumes—many, I am afraid, chosen quite uncritically. I surmise that in recent years he has come to question some of his selections, not least because of the student loans he assumed in order to support his book-buying habit.
The bottom line is that we should exercise discipline in acquiring material for our libraries. If we find little use for a particular volume, we should sell it or give it to someone who can use it. For some of us, weeding our collections may prove quite painful. Several months ago I read of a church music library that housed scores for more than 2,600 anthems. If the church’s choir were to sing a different anthem from its collection each week, it wouldn’t repeat a number for more than 50 years! Quite obviously, some weeding could have saved storage space, left behind a higher-quality collection, and benefited a less fortunate church.2 The same may be true of our personal libraries.
It is no secret that we live in an information-rich society. Information is available to us in a mind-boggling array of media. An increasing amount of information of interest to pastors and missionaries is available in digital form. In this context, you must choose the media and formats that will make up your library. I use the word library loosely here. Etymologically, the word signifies a collection of books. Of course, we have come to understand that libraries house much more than just books: periodicals, sound and video recordings, software, and more. However, in recent years, libraries of all types have faced the dilemma of relinquishing ownership of collections in favor of access to on-line information.
As a result, today’s preacher may choose Bible study resources in three basic forms: in print (books, periodicals, etc.), on digital media (diskettes, CD-ROMs, etc.), or via the World Wide Web. Each medium has its strengths and weaknesses. Paper-based media are relatively expensive and difficult to transport; in addition, they lack sophisticated search tools. However, the information they contain will remain usable for decades. Digital media are often more affordable (in part due to lower production costs) and take up very little space, but may be incompatible with computer operating systems and/or hardware in the future.
Web-based resources often provide powerful search options, but may become unavailable suddenly. Free sites usually provide little up-to-date, copyrighted material; they may also contain annoying advertisements. While fee-based sites feature access to current literature with little or no advertising, information is available only as long as one maintains a subscription. In addition, both digital and Web-based media are difficult to use in some places and situations, and may prevent the user from performing seemingly legitimate operations (cutting and pasting, printing, etc.). As a consequence, no medium should be touted as best for all preachers. One’s choice of media should take account of factors such as affordability, quality, ease of use, reliability, and portability.
An additional dimension of the media/format equation is the fact that research resources—both print and electronic—appear in the form of reference materials, monographs, and periodical literature. It is easy to conceive of a library as a collection of books on specific topics (i.e., monographs). However, there is much to be said for building a personal library in which reference and periodical literature occupy a prominent place. Both consist of fairly brief articles that can be digested more easily than a typical book.
Periodical literature often allows a researcher to locate information that is more current than that contained in reference works and monographs. In addition, it addresses specialized topics that are not covered in monographs for one reason or another.3 Reference works organize information in ways that make it easy to locate and use. Certain kinds of reference works, including dictionaries, encyclopedias, and handbooks—actually systematize knowledge.
By definition, reference works are sources designed to be consulted in part rather than read as a whole. (When was the last time you curled up in an easy chair to devour a volume of Encyclopaedia Britannica?) Reference resources relevant to Christian ministry include dictionaries, encyclopedias, atlases, surveys, commentaries, lexicons, and concordances. Whether in print or electronic form, they are absolutely essential to effective sermon and lesson preparation. This leads us to a fourth principle: Build your reference collection first.
In helping hundreds of library users over the years, I have found that a high percentage of questions—especially questions of fact—can be answered simply by consulting appropriate reference sources. In addition, reference articles are a logical place to begin one’s research: They define terms, provide overviews of topics, and list resources for further reading. Therefore, it makes little sense for a preacher to focus on building another segment of his library before acquiring a critical mass of reference works.
What sorts of reference resources should a pastor or missionary own? Most importantly, he should collect in the area of biblical studies: Bible dictionaries and encyclopedias; concordances and indexes of Bible words, phrases, and topics; Bible commentaries of various lengths and types; introductions to the Old and New Testaments; lexical resources to support study in the biblical languages; and bibliographies that will guide him in further reading on biblical topics. But the preacher’s reference collection should extend far beyond the Bible, to systematic theology, various areas of practical theology (preaching, missions, counseling, leadership, worship, education, ethics, etc.), and church history.
Accumulating a collection such as I have just described is no small task. In fact, nearly eight years out of Bible college, I still find myself acquiring more reference resources than monographs or periodicals. While my personal collection is admittedly lacking in works on a variety of topics, my library supports robust study for lessons and sermons. My strategy for acquiring topical works—and for commentaries on specific books of the Bible as well—is simple: I purchase materials as my ministry requires it.
All of this leads to a very important question: How is one to choose commentaries (and other sources) wisely? Students often ask me if I can recommend a commentary on a particular Bible book, or a monograph on a particular topic. My answer is a qualified “yes.” I do not necessarily know what the best books or commentaries are, but I know how to find out.4 I understand that it is impossible for me to master a wide range of disciplines, and am willing to consider others’ advice.
Some months ago I was beginning to prepare a Sunday School lesson series on the book of Romans. Quite obviously, I would need to consult some commentaries as I worked on my lessons. I was inundated with available sources. (The Vick Library’s catalog lists 96 commentaries on Romans, not including some that are in sets on the New Testament or the entire Bible.) Faced with this dilemma, I chose to look at bibliographies and book reviews in order to identify the source(s) that would suit me best. Through the testimony of a number of experts, I found a high-quality, up-to-date commentary that was readable, yet did not ignore technical issues.
When I need to identify a well-respected work in an unfamiliar area, I typically appeal to bibliographies. Sometimes I am content to refer to the works cited at the end of an encyclopedia article on my topic. For example, The International Standard Bible Encyclopedia, 4th edition, lists eight titles at the end of its entry on 2 Corinthians. (This is one reason why it’s important to have an adequate supply of reference works on hand.) On other occasions I consult specialized bibliographies such as those by Cyril Barber, Robert Krauss, Jr., D. A. Carson, Tremper Longman, III, and Charles Spurgeon.5 I also make use of book reviews, which appear in a wide variety of periodicals. (Having access to book reviews is one argument for including key periodicals in your library.) If I find that several authorities agree on the quality of a work, I feel greater confidence in acquiring it.
I propose two other ways of identifying the “best” books on a subject: First, solicit the opinion of peers and mentors. Your friends, associates, former professors, and ministry mentors may be able to supply valuable advice. Second, if at all possible, examine materials in a bookstore or on the Web before purchasing them. Doing so may save you the frustration of buying an item that will prove to be of little value to you.
A sixth principle of library development is this: Take advantage of local library resources. If you are fortunate enough to live near a Christian library,6 you can use its resources for your own study and experiment with materials firsthand as you decide what to buy for yourself. Even public libraries have much to offer to ministers. Nearly forty years ago, Marie Loizeaux observed that public libraries might inform ministers concerning statistics, social trends, current affairs, modern culture, and more.7 Many public libraries now offer their patrons access to massive databases via their Web sites, making it possible to do some research without ever visiting the library. Becoming familiar with local library resources allows you to focus your collection efforts in the areas that are most needful.
1 Robert G. Delnay, “Resources for Preaching,” Faith Pulpit, July 1999, 1.
2 Bob Burroughs, “This Idea Will Work,” Church Musician Today, April 2001, 41.
3 Responses to a survey administered by The Baptist Preacher in 1996 suggested that pastors associated with the Baptist Bible Fellowship are not taking advantage of challenging periodical literature. Of particular concern was the finding that only 16% of respondents regularly read theological journals. Readership of specific periodicals was reported as follows: Christianity Today (11%); Leadership (19%); Sword of the Lord (49%); Pulpit Helps (55%); and other publications (62%). This data, never reported in print, was obtained from Keith Bassham, Executive Editor of the Baptist Bible Tribune and The Baptist Preacher, 11 Sept. 2002.
4 British author Samuel Johnson stated, “Knowledge is of two kinds. We know a subject ourselves, or we know where we can find information upon it.” As a librarian, I aim for the latter.
5 Cyril J. Barber, The Minister’s Library, 2 vols. (Neptune, N.J.: Loizeaux, 1974-85); Cyril J. Barber and Robert M. Krauss, Jr., An Introduction to Theological Research: A Guide for College and Seminary Students (Lanham, Md.: University Press of America, 2000); D. A. Carson, New Testament Commentary Survey, 5th ed. (Grand Rapids: Baker; Leicester, U.K.: Inter-Varsity, 2001); Tremper Longman, III, Old Testament Commentary Survey, 2d ed. (Grand Rapids: Baker, 1995); and Charles H. Spurgeon, Lectures to My Students (Pasadena, Tex.: Pilgrim Publications, 1990) [includes reprint of Commenting and Commentaries].
6 Christian libraries include those maintained by Christian schools, Bible colleges, Christian liberal arts colleges, theological seminaries, denominational headquarters, Christian publishers, and religious study centers. In addition, local church libraries are prevalent in many areas of the United States, especially among Southern Baptists.
7 Marie D. Loizeaux, “The Minister and the Public Library,” Christianity Today, 5 June 1964, 10-11. Loizeaux also called on ministers to assist public libraries in building better religious collections.
8 Thomas M. Tanner, What Ministers Know: A Qualitative Study of Pastors as Information Professionals ([Evanston, Ill.]: American Theological Library Association; Metuchen, N.J.: Scarecrow Press, 1994).
9 Leith Anderson et al, “Managing the Information Overload,” Leadership 16 (spring 1995): 121-22.
10 James Montgomery Boice, “The Preacher and Scholarship,” in The Preacher and Preaching: Reviving the Art in the Twentieth Century, ed. Samuel T. Logan, Jr. (Phillipsburg, N.J.: Presbyterian & Reformed, 1986), 95-97.
11 Anderson et al, 125-26.
12 Anderson et al, 126-27.
13 Iain H. Murray, “Reading Church History,” The Banner of Truth, no. 390 (1996): 10-20.
14 Richard Orchard, “The Pastor and His Preaching-Teaching,” in And He Gave Pastors: Pastoral Theology in Action, ed. Thomas F. Zimmerman (Springfield, Mo.: Gospel Publishing House, 1979), 172-78.
15 William Shishko, “The Minister’s Treasure,” The Banner of Truth, no. 415 (1998): 16-21.
16 Iain H. Murray, “The Preacher and Books,” The Banner of Truth, no. 389 (1996): 7.
17 John Arnold, “Pastor, Reclaim Your Calling,” The Baptist Preacher, Sept. 2002, 23+.
18 James Montgomery Boice, “The Preacher and Scholarship,” 95. |
import operator
from enum import Enum, unique
import maya
# pylint: disable=invalid-name
@unique
class WeekDay(Enum):
Monday = 0
Tuesday = 1
Wednesday = 2
Thursday = 3
Friday = 4
Saturday = 5
Sunday = 6
_DAY_NAMES = {day.name.lower(): day for day in WeekDay}
_DAY_NAMES.update((day.name.lower()[:3], day) for day in WeekDay)
_DAY_NAMES.update((day, day) for day in WeekDay)
def find_weekday(string_or_day):
if isinstance(string_or_day, WeekDay):
return string_or_day
if isinstance(string_or_day, str):
return _DAY_NAMES[string_or_day.lower()]
raise ValueError('Not a week day: %r' % string_or_day)
class WeeklyInterval:
def __init__(self, from_weekday, from_time, to_weekday, to_time):
from_weekday = find_weekday(from_weekday)
to_weekday = find_weekday(to_weekday)
# the class invariant is that from_weekday <= to_weekday; so when this
# is not the case (e.g. a Fri-Mon interval), we store the complement interval
# (in the example, Mon-Fri), and invert the criterion
self._is_complement_interval = from_weekday.value > to_weekday.value
if self._is_complement_interval:
self._from_weekday = to_weekday
self._from_time = to_time
self._to_weekday = from_weekday
self._to_time = from_time
else:
self._from_weekday = from_weekday
self._from_time = from_time
self._to_weekday = to_weekday
self._to_time = to_time
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
return False
def __ne__(self, other):
return not self == other
def __repr__(self):
pat = '{class_name}({from_weekday}, {from_time}, {to_weekday}, {to_time})'
if self._is_complement_interval:
return pat.format(
class_name=self.__class__.__name__,
from_weekday=self._to_weekday,
from_time=self._to_time,
to_weekday=self._from_weekday,
to_time=self._from_time,
)
return pat.format(
class_name=self.__class__.__name__,
from_weekday=self._from_weekday,
from_time=self._from_time,
to_weekday=self._to_weekday,
to_time=self._to_time,
)
@classmethod
def from_human(cls, string):
from_, to_ = string.split('-')
def parse_part(part):
part = part.replace('@', ' ')
parts = part.split()
weekday = parts[0]
time = parts[1]
timezone = parts[2] if len(parts) > 2 else 'UTC'
weekday = find_weekday(weekday)
time = maya.parse(time, timezone=timezone).datetime().time()
return weekday, time
from_weekday, from_time = parse_part(from_)
to_weekday, to_time = parse_part(to_)
return cls(from_weekday, from_time, to_weekday, to_time)
def covers(self, date):
return self._interval_covers(date) != self._is_complement_interval
def _interval_covers(self, date):
weekday = date.date().weekday()
time = date.time()
before = operator.le if self._is_complement_interval else operator.lt
if not self._from_weekday.value <= weekday <= self._to_weekday.value:
return False
if self._from_weekday.value == weekday and before(time, self._from_time):
return False
if self._to_weekday.value == weekday and before(self._to_time, time):
return False
return True
class IntervalUnion:
def __init__(self, iterable):
self._intervals = list(iterable)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
return False
def __ne__(self, other):
return not self == other
def __repr__(self):
return '{o.__class__.__name__}({o._intervals})'.format(o=self)
@classmethod
def empty(cls):
return cls(())
@classmethod
def from_human(cls, string):
strings = string.split(',')
return cls(WeeklyInterval.from_human(s) for s in strings)
def covers(self, date):
return any(interval.covers(date) for interval in self._intervals)
|
The kitchen is the most emotionally bonding room in the home in Poinsett Park, California. It serves to establish a bond with the first viewing of the home by prospective buyers. After the purchase, each visitor is ushered into the kitchen in California, preferably the home's owner does so with pride. He or she does this to initiate and solidify yet another bond with another person. If your kitchen fails to meet certain standards, it's almost impossible to form these essential bonds with others successfully. The state of your kitchen may be indicating that you desperately need Kitchen Remodeling in Poinsett Park, California. aristaskitchen.com is a great place to get the inspiration and guidance needed to create the ideal functioning kitchen for your family and home.
The designer in Poinsett Park, California begins working on conceptualizing a new look for your kitchen. When you agree on the new layout, a work contract will be presented for you to sign. The contract will include an estimate of the costs and will probably include floor plans as well as list the parties involved including the contractor, designer, planner and project manager. The contract may need to be revised and your signature may be required more than once for each change or addition.
Design ideas include the working triangle's configuration, the appliances selected, the look of your countertops and the look of your cabinets in CA. In addition, there are wall colors to choose and lighting elements along with your flooring choice. If you are a dedicated chef, you may require upscale appliances or a gas range versus an electric range. If you prefer automation, you may need a built-in microwave, toaster oven and or a dishwasher. Effective countertops can be granite, Carrara marble, quartz, Corian, Formica, butcher block or concrete. Cabinet choices are plentiful also. Owners can choose cabinets made from solid wood, composite, laminate or Thermofoil. The owner will pay $80-150 per linear foot for solid wood cabinets and $50-$75 per linear foot for laminate or Thermofoil cabinets. This particular kitchen element will probably be the most costly element of the kitchen remodel in Poinsett Park, California.
Working along with the choices for appliances, kitchen elements such as cabinets, countertops and lighting are flooring and wall treatments. The designer and the homeowner will select the flooring material that serves their needs and taste best. Wall treatments are the most arbitrary of the design options, as wall colors rarely affect the performance of the kitchen in Poinsett Park, California. Arista Kitchen will guide all your selections so that they combine to formulate the most successful final product, your remodeled kitchen.
AristaKitchen is the number one source for all your Kitchen Remodeling needs in CA. All our designers and tradesmen are professional and licensed. We finish every remodeling job on time and on budget with the finesse desired by our patrons. Our showroom has access to all the latest elements of decor and the most technologically advanced appliances and our workman use the most modern techniques. We proudly stand by our work and are happy to share a viewing of our portfolio, which documents our successful kitchens in Poinsett Park, California. It is always a pleasure to serve the homeowner who is ready to remodel the most lived-in room in the house. |
import struct
from .exceptions import AMQPError
from datetime import datetime, timezone
def rethrow_as(expected_cls, to_throw):
def decorator(f):
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except expected_cls as e:
raise to_throw from e
return wrapper
return decorator
###########################################################
# Deserialisation
###########################################################
@rethrow_as(struct.error, AMQPError('failed to read an octet'))
def read_octet(stream):
return _read_octet(stream)[0]
@rethrow_as(struct.error, AMQPError('failed to read a short'))
def read_short(stream):
return _read_short(stream)[0]
@rethrow_as(struct.error, AMQPError('failed to read an unsigned short'))
def read_unsigned_short(stream):
return _read_unsigned_short(stream)[0]
@rethrow_as(struct.error, AMQPError('failed to read a long'))
def read_long(stream):
return _read_long(stream)[0]
@rethrow_as(struct.error, AMQPError('failed to read an unsigned long'))
def read_unsigned_long(stream):
return _read_unsigned_long(stream)[0]
@rethrow_as(struct.error, AMQPError('failed to read a long long'))
def read_long_long(stream):
return _read_long_long(stream)[0]
@rethrow_as(struct.error, AMQPError('failed to read an unsigned long long'))
def read_unsigned_long_long(stream):
return _read_unsigned_long_long(stream)[0]
@rethrow_as(struct.error, AMQPError('failed to read a short string'))
def read_short_string(stream):
return _read_short_string(stream)[0]
@rethrow_as(struct.error, AMQPError('failed to read a long string'))
def read_long_string(stream):
return _read_long_string(stream)[0]
@rethrow_as(KeyError, AMQPError('failed to read a table'))
@rethrow_as(struct.error, AMQPError('failed to read a table'))
def read_table(stream):
return _read_table(stream)[0]
@rethrow_as(KeyError, AMQPError('failed to read an array'))
@rethrow_as(struct.error, AMQPError('failed to read an array'))
def read_array(stream):
return _read_array(stream)[0]
@rethrow_as(struct.error, AMQPError('failed to read a boolean'))
def read_bool(stream):
return _read_bool(stream)[0]
@rethrow_as(struct.error, AMQPError('failed to read a boolean'))
def read_bools(byte, number_of_bools):
bits = "{0:b}".format(byte)
bits = "0" * (number_of_bools - len(bits)) + bits
return (b == "1" for b in reversed(bits))
@rethrow_as(struct.error, AMQPError('failed to read a boolean'))
def read_timestamp(stream):
return _read_timestamp(stream)[0]
def qpid_rabbit_mq_table():
# TODO: fix amqp 0.9.1 compatibility
# TODO: Add missing types
TABLE_VALUE_PARSERS = {
b't': _read_bool,
b'b': _read_signed_byte,
b's': _read_short,
b'I': _read_long,
b'l': _read_long_long,
b'f': _read_float,
b'S': _read_long_string,
b'A': _read_array,
b'V': _read_void,
b'x': _read_byte_array,
b'F': _read_table,
b'T': _read_timestamp
}
return TABLE_VALUE_PARSERS
def _read_table(stream):
TABLE_VALUE_PARSERS = qpid_rabbit_mq_table()
table = {}
table_length, initial_long_size = _read_unsigned_long(stream)
consumed = initial_long_size
while consumed < table_length + initial_long_size:
key, x = _read_short_string(stream)
consumed += x
value_type_code = stream.read(1)
consumed += 1
value, x = TABLE_VALUE_PARSERS[value_type_code](stream)
consumed += x
table[key] = value
return table, consumed
def _read_short_string(stream):
str_length, x = _read_octet(stream)
string = stream.read(str_length).decode('utf-8')
return string, x + str_length
def _read_long_string(stream):
str_length, x = _read_unsigned_long(stream)
buffer = stream.read(str_length)
if len(buffer) != str_length:
raise AMQPError("Long string had incorrect length")
return buffer.decode('utf-8'), x + str_length
def _read_octet(stream):
x, = struct.unpack('!B', stream.read(1))
return x, 1
def _read_signed_byte(stream):
x, = struct.unpack_from('!b', stream.read(1))
return x, 1
def _read_bool(stream):
x, = struct.unpack('!?', stream.read(1))
return x, 1
def _read_short(stream):
x, = struct.unpack('!h', stream.read(2))
return x, 2
def _read_unsigned_short(stream):
x, = struct.unpack('!H', stream.read(2))
return x, 2
def _read_long(stream):
x, = struct.unpack('!l', stream.read(4))
return x, 4
def _read_unsigned_long(stream):
x, = struct.unpack('!L', stream.read(4))
return x, 4
def _read_long_long(stream):
x, = struct.unpack('!q', stream.read(8))
return x, 8
def _read_unsigned_long_long(stream):
x, = struct.unpack('!Q', stream.read(8))
return x, 8
def _read_float(stream):
x, = struct.unpack('!f', stream.read(4))
return x, 4
def _read_timestamp(stream):
x, = struct.unpack('!Q', stream.read(8))
# From datetime.fromutctimestamp converts it to a local timestamp without timezone information
return datetime.fromtimestamp(x * 1e-3, timezone.utc), 8
def _read_array(stream):
TABLE_VALUE_PARSERS = qpid_rabbit_mq_table()
field_array = []
# The standard says only long, but unsigned long seems sensible
array_length, initial_long_size = _read_unsigned_long(stream)
consumed = initial_long_size
while consumed < array_length + initial_long_size:
value_type_code = stream.read(1)
consumed += 1
value, x = TABLE_VALUE_PARSERS[value_type_code](stream)
consumed += x
field_array.append(value)
return field_array, consumed
def _read_void(stream):
return None, 0
def _read_byte_array(stream):
byte_array_length, x = _read_unsigned_long(stream)
return stream.read(byte_array_length), byte_array_length + x
###########################################################
# Serialisation
###########################################################
def pack_short_string(string):
buffer = string.encode('utf-8')
return pack_octet(len(buffer)) + buffer
def pack_long_string(string):
buffer = string.encode('utf-8')
return pack_unsigned_long(len(buffer)) + buffer
def pack_field_value(value):
if value is None:
return b'V'
if isinstance(value, bool):
return b't' + pack_bool(value)
if isinstance(value, dict):
return b'F' + pack_table(value)
if isinstance(value, list):
return b'A' + pack_array(value)
if isinstance(value, bytes):
return b'x' + pack_byte_array(value)
if isinstance(value, str):
return b'S' + pack_long_string(value)
if isinstance(value, datetime):
return b'T' + pack_timestamp(value)
if isinstance(value, int):
if value.bit_length() < 8:
return b'b' + pack_signed_byte(value)
if value.bit_length() < 32:
return b'I' + pack_long(value)
if isinstance(value, float):
return b'f' + pack_float(value)
raise NotImplementedError()
def pack_table(d):
buffer = b''
for key, value in d.items():
buffer += pack_short_string(key)
# todo: more values
buffer += pack_field_value(value)
return pack_unsigned_long(len(buffer)) + buffer
def pack_octet(number):
return struct.pack('!B', number)
def pack_signed_byte(number):
return struct.pack('!b', number)
def pack_unsigned_byte(number):
return struct.pack('!B', number)
def pack_short(number):
return struct.pack('!h', number)
def pack_unsigned_short(number):
return struct.pack('!H', number)
def pack_long(number):
return struct.pack('!l', number)
def pack_unsigned_long(number):
return struct.pack('!L', number)
def pack_long_long(number):
return struct.pack('!q', number)
def pack_unsigned_long_long(number):
return struct.pack('!Q', number)
def pack_float(number):
return struct.pack('!f', number)
def pack_bool(b):
return struct.pack('!?', b)
def pack_timestamp(timeval):
number = int(timeval.timestamp() * 1e3)
return struct.pack('!Q', number)
def pack_byte_array(value):
buffer = pack_unsigned_long(len(value))
buffer += value
return buffer
def pack_array(items):
buffer = b''
for value in items:
buffer += pack_field_value(value)
return pack_unsigned_long(len(buffer)) + buffer
def pack_bools(*bs):
tot = 0
for n, b in enumerate(bs):
x = 1 if b else 0
tot += (x << n)
return pack_octet(tot)
|
We made a delivery of a Zelkova table. The zelkova came from Shiga-ken. It was the base part of the tree, so the outline of the roots by which the weight of the whole tree was supported is clearly visible in the grain.
It was originally a 2 meter slab but the customer wanted two separate tables, and combine them together when they have guests. For normal days, they will use the large table as a dining table and the small table as a personal desk. |
# Copyright (C) 2015 Patrick Happel <[email protected]>
#
# This file is part of pySICM.
#
# pySICM is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 2 of the License, or (at your option) any later
# version.
#
# pySICM is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# pySICM. If not, see <http://www.gnu.org/licenses/>.
import pySICM.sicm
from twisted.internet import defer, reactor
import struct
import pycomedi.device as Device
import pycomedi.subdevice as Subdevice
import pycomedi.channel as Channel
import pycomedi.constant as CONSTANTS
import pycomedi.utility as Util
import numpy as np
import time
class MyCallbackReader(Util.Reader):
def __init__(self, callback=None, count=None, **kwargs):
self.callback = callback
self.count = count
super(MyCallbackReader, self).__init__(**kwargs)
def start(self):
super(MyCallbackReader,self).start()
if self.callback:
self.callback(self.buffer)
def run(self):
count = self.count
block_while_running = self.block_while_running
while count is None or count > 0:
if count is not None:
count -= 1
try:
self.block_while_running = False
super(MyCallbackReader, self).run()
finally:
self.block_while_running = block_while_running
if self.block_while_running:
self.block()
def isAlive(self):
return super(MyCallbackReader,self,).isAlive()
class ReadVIn (pySICM.sicm._SICMMeasurement):
def __init__(self):
super(ReadVIn, self).__init__()
self._setRequired('InputSignal', 1, pySICM.sicm.InputSignal)
self._setRequiredOptions('ReadVIn.InputSignal', 1, int, 'Id of the InputSignal to use. (int)')
self._setRequired('Samples', 1, int)
self._setRequiredOptions('ReadVIn.Samples', 1, int, 'Number of samples to be read in one run. (int)')
self._setRequired('Duration', 1, int)
self._setRequiredOptions('ReadVIn.Duration', 1, int, 'Duration of one run in milliseconds. (int)')
self._setRequiredOptions('ReadVIn.Loop', 1, bool, 'Loop infinitely? (bool)')
self._setRequired('Loop', 1, bool)
self.stop = False
self.device = None
self.ai = None
self.channel = None
self.runs=0
self.reader=None
def checkAndSetConfig(self, settings):
if 'mode' not in settings:
return False
if settings['mode'] != 'readVIn':
return False
if 'readVIn' not in settings:
return False
rsettings = settings['readVIn']
if 'Duration' not in rsettings:
return False
self.setConfig('Duration', int(rsettings['Duration']))
if 'Samples' not in rsettings:
return False
self.setConfig('Samples', int(rsettings['Samples']))
if 'Loop' not in rsettings:
return False
self.setConfig('Loop', bool(int(rsettings['Loop'])))
return True
def fake(self, settings, writeResponse):
self.writeResponse = writeResponse
self.stop = False
self.runs=0;
if self.checkAndSetConfig(settings):
self.nextFakeDataPoint('')
else:
self.writeResponse("NACK\r\n")
def generateFakeDeferred(self):
self.d = defer.Deferred()
self.d.addCallback(self.writeResponse)
self.d.addCallback(self.nextFakeDataPoint)
def generateDeferred(self):
self.d = defer.Deferred()
self.d.addCallback(self.writeResponse)
self.d.addCallback(self.nextDataPoint)
def checkData(self):
pass
def nextFakeDataPoint(self, args):
self.runs=self.runs+1
self.data = []
global reactor
self.d = None
self.generateFakeDeferred()
if (self.getConfig('Loop') and self.stop is False) or self.runs == 1:
self.call = reactor.callLater(0,self._fake)
else:
self.destroy()
def _fake(self):
time.sleep(float(self.getConfig('Duration'))*1e-3)
y = []
noise = np.random.normal(0,0.05,1024)
for i in xrange(self.getConfig('Samples')):
y.append(np.sin(np.pi*2*i/(self.getConfig('Samples')/4.0))+noise[i%1024])
y = y - np.min(y)
y = y / np.max(y)
y = y * np.iinfo(np.uint16).max
data = ""
for i in y:
data = data + self.mkByte(int(round(i)))
self.d.callback(data)
#return d
def mkByte(self, number):
# little endian
a = int(number / 256)
b = int(number % 256)
return struct.pack('B',b)+struct.pack('B',a)
def setStop(self):
self.stop = True
def scan(self, settings, writeResponse):
self.writeResponse = writeResponse
self.device = Device.Device('/dev/comedi0')
self.device.open()
self.ai = self.device.find_subdevice_by_type(
CONSTANTS.SUBDEVICE_TYPE.ai,
factory = Subdevice.StreamingSubdevice)
channel = self.ai.channel(
3,
factory = Channel.AnalogChannel,
aref = CONSTANTS.AREF.ground)
best_range = channel.find_range(
unit=CONSTANTS.UNIT.volt,
min = -5,
max = 5)
self.channel = self.ai.channel(
3,
factory = Channel.AnalogChannel,
aref = CONSTANTS.AREF.ground,
range = best_range)
if self.checkAndSetConfig(settings):
self.frequency = 1e3*(
float(self.getConfig('Samples'))/float(self.getConfig('Duration')))
command = self.ai.get_cmd_generic_timed(1, scan_period_ns=1e9/self.frequency)
command.chanlist = [self.channel]
command.stop_src = CONSTANTS.TRIG_SRC.count
command.stop_arg = self.getConfig('Samples')
self.command=command
buf = np.zeros(self.getConfig('Samples'), np.uint16)
self.nextDataPoint('');
def nextDataPoint(self, args):
self.runs=self.runs+1
self.ai.cmd = self.command
while self.ai.get_flags().busy and self.ai.get_flags().running:
time.sleep(.0001)
print "Sleeping..."
self.ai.cancel()
self.ai.command()
self.data = []
global reactor
self.d = None
self.generateDeferred()
if (self.getConfig('Loop') and self.stop is False) or self.runs == 1:
self.call = reactor.callLater(0,self._scan)
else:
self.destroy()
def dataMeasured(self, data):
# print "Runs is: %i"% self.runs
# print "Data Received: %i"%time.time()
print s#self.writeResponse(s)
# if self.reader.isAlive():
# self.reader.join()
# self.nextDataPoint()
def _scan(self):
buf = np.zeros(self.getConfig('Samples'),np.uint16);
reader = Util.Reader(self.ai, buf);
reader.start()
reader.join()
print "Length after reader joined: %i" % len(reader.buffer)
s = ''
for i in reader.buffer:
s = s + self.mkByte(i)
self.d.callback(s)
def destroy(self):
self.runs=0
super(ReadVIn, self).destroy()
self.device.close()
|
So, if you like to acquire all of these magnificent photos about (8 Precautions You Must Take Before Attending Smart Trike Recliner Instructions Pdf | Smart Trike Recliner Instructions Pdf), press save icon to save the pics to your laptop. There’re all set for save, if you’d rather and want to get it, simply click save logo on the page, and it will be immediately saved in your laptop computer.} Lastly if you’d like to receive new and recent image related with (8 Precautions You Must Take Before Attending Smart Trike Recliner Instructions Pdf | Smart Trike Recliner Instructions Pdf), please follow us on google plus or bookmark this blog, we try our best to offer you daily update with all new and fresh graphics. Hope you love staying right here. For many updates and recent information about (8 Precautions You Must Take Before Attending Smart Trike Recliner Instructions Pdf | Smart Trike Recliner Instructions Pdf) graphics, please kindly follow us on tweets, path, Instagram and google plus, or you mark this page on bookmark section, We try to provide you with update periodically with all new and fresh graphics, enjoy your surfing, and find the perfect for you.
Here you are at our website, contentabove (8 Precautions You Must Take Before Attending Smart Trike Recliner Instructions Pdf | Smart Trike Recliner Instructions Pdf) published . Nowadays we are delighted to declare that we have discovered an incrediblyinteresting contentto be pointed out, that is (8 Precautions You Must Take Before Attending Smart Trike Recliner Instructions Pdf | Smart Trike Recliner Instructions Pdf) Lots of people searching for details about(8 Precautions You Must Take Before Attending Smart Trike Recliner Instructions Pdf | Smart Trike Recliner Instructions Pdf) and definitely one of them is you, is not it? |
import sys
class Machine():
def __init__(self, a, b, c, d, pc):
self.a = a
self.b = b
self.c = c
self.d = d
self.pc = pc
def __hash__(self):
return hash((self.a, self.b, self.c, self.d, self.pc))
def __eq__(self, other):
return (self.a, self.b, self.c, self.d, self.pc) == (other.a, other.b, other.c, other.d, other.pc)
def copy(self):
return Machine(self.a, self.b, self.c, self.d, self.pc)
def solve(inp):
i = 0
while True:
copied_list = inp[:]
m = Machine(i, 0, 0, 0, 0)
if execute(m, copied_list):
return i
i += 1
def execute(m, instructions):
visited = set()
signal = []
while m.pc < len(instructions):
xs = instructions[m.pc].strip().split()
if xs[0] == 'cpy':
cpy(m, xs[1], xs[2])
elif xs[0] == 'inc':
inc(m, xs[1])
elif xs[0] == 'dec':
dec(m, xs[1])
elif xs[0] == 'jnz':
jnz(m, xs[1], xs[2])
elif xs[0] == 'tgl':
tgl(m, xs[1], instructions)
elif xs[0] == 'out':
out(m, signal, xs[1])
if not valid_signal(signal):
return False
m_copy = m.copy()
if (m_copy, tuple(instructions)) in visited:
return True
visited |= {(m_copy, tuple(instructions))}
else:
raise ValueError('Unexpected instruction [%s]' % inp[0])
def valid_signal(signal):
if not signal:
return True
a = signal[0]
if a != 0 and a != 1:
return False
for b in signal[1:]:
if b != (a + 1) % 2:
return False
a = b
return True
def out(m, signal, unknown):
v = register_or_constant(m, unknown)
signal.append(v)
m.pc += 1
def register_or_constant(machine, unknown):
if is_constant(unknown):
return int(unknown)
else:
return get_register(machine, unknown)
def is_constant(unknown):
return unknown not in ('a', 'b', 'c', 'd')
def cpy(m, value, register):
v = register_or_constant(m, value)
if not is_constant(register):
set_register(m, register, v)
m.pc += 1
def inc(m, register):
if not is_constant(register):
v = get_register(m, register)
set_register(m, register, v + 1)
m.pc += 1
def dec(m, register):
if not is_constant(register):
v = get_register(m, register)
set_register(m, register, v - 1)
m.pc += 1
def jnz(m, value, delta):
v = register_or_constant(m, value)
d = register_or_constant(m, delta)
if v == 0:
m.pc += 1
else:
m.pc += int(d)
def tgl(m, unknown, instructions):
v = register_or_constant(m, unknown)
if m.pc + v >= 0 and m.pc + v < len(instructions):
instruction = instructions[m.pc + v].strip().split()
if len(instruction) == 2:
if instruction[0] == 'inc':
instructions[m.pc + v] = 'dec %s' % (instruction[1])
else:
instructions[m.pc + v] = 'inc %s' % (instruction[1])
else:
assert(len(instruction) == 3)
if instruction[0] == 'jnz':
instructions[m.pc + v] = 'cpy %s %s' % (instruction[1], instruction[2])
else:
instructions[m.pc + v] = 'jnz %s %s' % (instruction[1], instruction[2])
m.pc += 1
def get_register(machine, register):
if register == 'a':
return machine.a
if register == 'b':
return machine.b
if register == 'c':
return machine.c
if register == 'd':
return machine.d
raise ValueError('Unexpected register %s' % register)
def set_register(machine, register, value):
if register == 'a':
machine.a = value
elif register == 'b':
machine.b = value
elif register == 'c':
machine.c = value
elif register == 'd':
machine.d = value
else:
raise ValueError('Unexpected register %s' % register)
inp = sys.stdin.readlines()
print(solve(inp))
|
At Royal Dumpster, we strive to bring you the best service at the best price available. We have a wide selection of dumpster sizes in stock in Trenary, MI, with flexible & timley pickup and delivery. Give us a call today for all your dumpster rental and roll off needs.
They engage in an important role to keep your location and roadways roads sleek and let you breathe the fresh air, whenever you wander around your own home area. To put it differently, they keep a clean and healthy way of living around your own home.
Household, work environment or get together trash, you can now get it moving in an green way. Only decide your financial allowance and you are all set for these comfy services.
The great thing is, there are dumpster advantages out there which will take care of the selection and the disposal of the garbage you're reducing. If you are looking for comprehensive cleanout assist, ask around at different dumpster companies to find out which offers that type of program. Knowing which offers just what ahead of time will allow you to make a knowledgeable decision when it comes to hiring a dumpster program.
Search for a 20 Yard Dumpster Rentals in Trenary, MI firm that is nearby and are operating in your specific location. It's a good idea to make use of the help of the yellow pages and the internet. Employing a local program means that they are more likely to supply and recognize time since they are close by. A corporation that is a country wide chain may have unexpected flight delays which can provide unwanted and ugly trash in your yard longer than you desire. A local you can get to you faster and has the opportunity to provide better customer satisfaction.
Was right now there ever really any doubt about the fact that you're going to end up with a ton of waste when you get rid of a demolition or redesigning project? Because the two normally aren't together exclusive, you might be very likely to inevitably be with building waste coming out your familiar ears previous to too long! The remedy is simple-rent any dumpster. After you've determined that, one and only thing left determine is how a lot you can include your rental dumpster! One 20 Yard Dumpster Rentals in Trenary, MI company offering related party services is S & B Porta-Bowl Restrooms inside Denver, Denver colorado. With spots in Aurora, Denver colorado Springs, in addition to Evans, Colorado, Ersus & B Porta-Bowl Toilets provides the most trusted 20 Yard Dumpster Rentals in Trenary, MI Denver residents have come to count on. S & B Porta-Bowl Restrooms doesn't only provide dumpster program; they also have mobile hand sinks, portable loos, portable toilets, and even mobile showers for rent. S & B Porta-Bowl Restrooms can easily pair all these services making use of their stellar solutions to make your next important event a success. Bundling services improves a companies value because it allows them to deal with more of their clients needs.
Connection with a company when it comes to this business also need to be factored in. You could think that transferring waste is a thing that does not need much encounter but the truth is in any sector or area of work there are challenges when someone does work there, they will have developed ways of overcoming these challenges and ideal their work. Therefore, when using a skilled company, it's likely you'll get world class services when compared to one that is just getting started. Consider this among other things such as proximity of a company when searching for Dumpster rental services.
This company should answer the phone if you call. Your time and effort is worth anything and you wish to avoid the cellphone tag recreation. Ensure that if you call the particular service provider that you get a professional title of sales agent so if you are on hold listen to how professional the particular on-hold message is definitely. The more professional these aspects are, the much more likely that you are dealing with a reputable organization.
Once the task gets commenced all of the cabinets may be attractive out and wish a place to be thrown away. You don't want old components of wood and also cabinet components to be already there. The construction team wants to maintain the work area since neat as is possible, too.
When you are planning a home development project, it could often be simple to overlook the cleanup aspect of items. Understandably, your focus is frequently on the precisely what, the when, and the how. This can go back to haunt you actually, though, when you are getting to the finish and you're confronted by a ton of additional work cleaning anything up. Usually, it can feel as if you're doing whole 2nd project! That is where a roll-off dumpster can step in to be a lifesaver. Learn how renting a dumpster can help you save big if you embark on the following home improvement task.
For all-out getting rid of projects, household renovations, or even appliance or even furniture disposal, having a dumpster on hand is a great idea because there's one dierected location to put all the particular stuff you need to get rid of. It's not necessary to worry about that piling up, and you don't have to see how you're going to get all of it off your possessions. Your dumpster organization will take care of that.
Make sure to consult your local organization if they have the means and capacity to handle your " special " needs regarding clearing out the various forms of building debris.
40 yard throw off dumpsters have a tendency to only genuinely get used on full level construction web-sites. They can shift an incredible quantity of junk, but because they take up a considerable amount regarding room it isn't really feasible to start using these kind of receptacles on most commercial or residential sites.
Acquiring help from professional rental service provider to select the right measured dumpster will save your hard earned money and you will not necessarily end up in investing in the free living space in the dumpster. Additionally, help from the particular professional rental company to meet your waste operations needs will set you relaxed from all your worries with regards to the trash disposal and will save you much of your time.
According to their desires, the different types regarding containers consist of varying prices, depending on the sizing, the duration of time it would be desired and the consumers need to find additional information about these types of. Having a standard idea about the price of dumpster that they want, will help them in coming up with a finances and also in finding a company that offers affordable prices without compromising for the quality of service. Whilst researching for the price, they must also strive to find out the type of lingo used in this market in order to find a great way to communicate with you can actually staff.
Most of these roll down dumpsters are probably the hottest size applied amongst house owners. They are just the thing for getting rid of extra trash soon after dealing with some general maintenance on a house, especially when enhancing or cleaning up room inside attic.
Don't trust a bid from a junk hauler that does not offer to come out and offer free on-site rates. It is not possible for a junk removal intend to provide you with a detailed price without having seeing just what exactly junk or even waste items you have to be hauled apart. In addition it is pleasant to get a on-site quote. This can be a way of realizing the quality of support services a company provides without paying money. This is your possiblity to observe the variety of the truck. Dimension does matter! The good thing is, there are dumpster pros out there who'll take care of the selection and the discretion of the rubbish you're eliminating. If you are looking intended for comprehensive cleanout assist, ask around with different dumpster corporations to find out that offers which type of support. Knowing that offers what ahead of time will allow you to make a knowledgeable decision on the subject of hiring a dumpster support.
Do you desire your state to stand one of many top nations around the world of the world? If that's the case, then motivate yourself to promote trying to recycle of the spend in Eco-friendly way there by reducing the impact associated with pollution about the environment. Few people who produces the spend knows the whole process of recycling, hence the customer was in dilemma yet again at this juncture. Professional 20 Yard Dumpster Rentals in Trenary, MI service providers in the market provides spellbound services producing your task simple at very reasonable cost. Most of these professional 20 Yard Dumpster Rentals in Trenary, MI service providers being an professional in the subject takes a owing care to make certain that disposed spend is recycled in the environmental friendly way. Service providers supply dumpsters on rental base in order to reduce the chances of your generated waste and take care of the write-up responsibility associated with recycling this waste Eco-friendly way.
Let's currently hop on towards the charges about bat roosting services that are really not expensive. Every firm may offer some other fee structure. Some cost flat fee for any service every day and others may offer the canisters for a 1 week or less. Some corporations may offer more cost-effective rates. You need to simply beware of invisible costs which may be hiding beneath those statements. To avoid almost any unwanted unexpected situations, make sure you feel the terms and conditions of the company providing you the services.
Acquired trash? Dumpster rental or even full-service trash haulers can provide the custom made junk treatment service you would like for your distinct needs! When you purchase a 20 Yard Dumpster Rentals in Trenary, MI firm, service choices vary so sift through these and work out the best site for you.
Let us discuss the reason behind each one of these horrifying issues affecting environmental surroundings of Alaska; it is the one and only our careless deeds. We're contaminating each of our surroundings by simply disposing this solid spend in the wrong type of way. Aches and pains ? the pollution of the state affected by the wrong type of disposal associated with waste? Your solid spend generated by way of household jobs and professional projects when piled up with our job location for longer period of time, discharges toxic gases into the surroundings contaminating fresh air. The spend waiting in your job spot will get accumulated with the particles probing your employees to many infectious diseases. A number of residents of the state elect to channel the trash into normal water. Contaminating the stream by dropping the strong waste can cause making this survival associated with inhabitants in the water miserable. Intake of this toxified water will probably affect the health of occupants as well. |
import discord
from discord.ext import commands
from sys import argv
class Memes:
"""
Meme commands
"""
def __init__(self, bot):
self.bot = bot
print('Addon "{}" loaded'.format(self.__class__.__name__))
async def _meme(self, ctx, msg):
author = ctx.message.author
if ctx.message.channel.name[0:5] == "help-" or "assistance" in ctx.message.channel.name or (self.bot.nomemes_role in author.roles):
await self.bot.delete_message(ctx.message)
try:
await self.bot.send_message(author, "Meme commands are disabled in this channel, or your priviledges have been revoked.")
except discord.errors.Forbidden:
await self.bot.say(author.mention + " Meme commands are disabled in this channel, or your priviledges have been revoked.")
else:
await self.bot.say(self.bot.escape_name(ctx.message.author.display_name) + ": " + msg)
# list memes
@commands.command(name="listmemes", pass_context=True)
async def _listmemes(self, ctx):
"""List meme commands."""
# this feels wrong...
funcs = dir(self)
msg = "```\n"
msg += ", ".join(func for func in funcs if func != "bot" and func[0] != "_")
msg += "```"
await self._meme(ctx, msg)
# 3dshacks memes
@commands.command(pass_context=True, hidden=True)
async def s_99(self, ctx):
"""Memes."""
await self._meme(ctx, "**ALL HAIL BRITANNIA!**")
@commands.command(pass_context=True, hidden=True)
async def adrian1(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/j0Dkv2Z.png")
@commands.command(pass_context=True, hidden=True)
async def adrian2(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/2SBC1Qo.jpg")
@commands.command(pass_context=True, hidden=True)
async def adrian3(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/EsDWK9U.png")
@commands.command(pass_context=True, hidden=True)
async def dubyadud(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/xesCnmM.jpg")
@commands.command(pass_context=True, hidden=True)
async def megumi(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/GMRp1dj.jpg")
@commands.command(pass_context=True, hidden=True)
async def inori(self, ctx):
"""Memes."""
await self._meme(ctx, "https://i.imgur.com/WLncIsi.gif")
@commands.command(pass_context=True, hidden=True)
async def inori2(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/V0uu99A.jpg")
@commands.command(pass_context=True, hidden=True)
async def inori3(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/so8thgu.gifv")
@commands.command(pass_context=True, hidden=True)
async def inori4(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/267IXh1.gif")
@commands.command(pass_context=True, hidden=True)
async def inori5(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/lKcsiBP.png")
@commands.command(pass_context=True, hidden=True)
async def inori6(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/SIJzpau.gifv")
@commands.command(pass_context=True, hidden=True)
async def kina(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/8Mm5ZvB.jpg")
@commands.command(pass_context=True, hidden=True)
async def shotsfired(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/zf2XrNk.gifv")
@commands.command(pass_context=True, hidden=True)
async def rusure(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/dqh3fNi.png")
@commands.command(pass_context=True, hidden=True)
async def r34(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/sjQZKBF.gif")
@commands.command(pass_context=True, hidden=True)
async def lenny(self, ctx):
"""Memes."""
await self._meme(ctx, "( ͡° ͜ʖ ͡°)")
@commands.command(pass_context=True, hidden=True)
async def rip(self, ctx):
"""Memes."""
await self._meme(ctx, "Press F to pay respects.")
@commands.command(pass_context=True, hidden=True)
async def permabrocked(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/ARsOh3p.jpg")
@commands.command(pass_context=True, hidden=True)
async def knp(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/EsJ191C.png")
@commands.command(pass_context=True, hidden=True)
async def lucina(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/tnWSXf7.png")
@commands.command(pass_context=True, hidden=True)
async def lucina2(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/ZPMveve.jpg")
@commands.command(pass_context=True, hidden=True)
async def xarec(self, ctx):
"""Memes."""
await self._meme(ctx, "https://i.imgur.com/wRVuidH.gif")
@commands.command(pass_context=True, hidden=True)
async def xarec2(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/A59RbRT.png")
@commands.command(pass_context=True, hidden=True)
async def clap(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/UYbIZYs.gifv")
@commands.command(pass_context=True, hidden=True)
async def ayyy(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/bgvuHAd.png")
@commands.command(pass_context=True, hidden=True)
async def hazel(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/vpu8bX3.png")
@commands.command(pass_context=True, hidden=True)
async def thumbsup(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/hki1IIs.gifv")
@commands.command(pass_context=True, hidden=True)
async def bigsmoke(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/vo5l6Fo.jpg\nALL YOU HAD TO DO WAS FOLLOW THE DAMN GUIDE CJ!")
@commands.command(pass_context=True, hidden=True)
async def bigorder(self, ctx):
"""Memes."""
await self._meme(ctx, "I’ll have two number 9s, a number 9 large, a number 6 with extra dip, a number 7, two number 45s, one with cheese, and a large soda.")
# Cute commands :3
@commands.command(pass_context=True, hidden=True)
async def headpat(self, ctx):
"""Cute"""
await self._meme(ctx, "http://i.imgur.com/7V6gIIW.jpg")
@commands.command(pass_context=True, hidden=True)
async def headpat2(self, ctx):
"""Cute"""
await self._meme(ctx, "http://i.imgur.com/djhHX0n.gifv")
@commands.command(pass_context=True, hidden=True)
async def sudoku(self, ctx):
"""Cute"""
await self._meme(ctx, "http://i.imgur.com/VHlIZRC.png")
@commands.command(pass_context=True, hidden=True)
async def rawr(self, ctx):
"""Cute"""
await self._meme(ctx, "http://i.imgur.com/Bqw4OwQ.png")
@commands.command(pass_context=True, hidden=True)
async def baka(self, ctx):
"""Cute"""
await self._meme(ctx, "http://i.imgur.com/OyjCHNe.png")
@commands.command(pass_context=True, hidden=True)
async def led(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/FYsxaUZ.jpg")
@commands.command(pass_context=True, hidden=True)
async def snickers(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/Ek0uDUn.jpg")
@commands.command(pass_context=True, hidden=True)
async def mugi(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/lw80tT0.gif")
@commands.command(pass_context=True, hidden=True)
async def rollsafe(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/n0xi1gZ.png")
@commands.command(pass_context=True, hidden=True)
async def lisp(self, ctx):
"""Memes."""
await self._meme(ctx, "http://i.imgur.com/RQeZErU.png")
@commands.command(pass_context=True, hidden=True)
async def dev(self, ctx):
"""Reminds user where they are."""
await self.bot.say("You seem to be in <#196635781798952960>.")
# Load the extension
def setup(bot):
bot.add_cog(Memes(bot))
|
On such a buzzy day in the nonprofit world, your #GivingTuesday campaign will be the key to standing out. As Executive Director of The David Sheldrick Wildlife Trust, Melissa Sciacca leverages over a decade of experience in wildlife conservation to support, promote, and expand her organization. Here are Melissa’s top tips to help you create a campaign that resonates on the nonprofit community’s biggest day of the year.
#1. Create original blog and video content that compels people to give.
#2. Make your Giving Tuesday ask goal-oriented.
“Be specific about what you’re asking them for so they can see a direct impact,” she advises. “Nonprofits fall into a trap of generating a general ask on Giving Tuesday when they should be promoting a specific campaign.” Think compelling, creative, fun. A memorable campaign is a successful one.
#3. Offer an added benefit to participating on Giving Tuesday.
#4. Set yourself apart from the rest of the participating nonprofits.
#5. Learn from your Giving Tuesday experience.
The day after Giving Tuesday is a day to celebrate your successes, pinpoint opportunities to grow, and set goals for the following year. Melissa pulls Giving Tuesday reports in DonorPerfect to set a benchmark for what her team can accomplish the next year. She also analyzes their Giving Tuesday campaigns in Constant Contact and on social media to understand what people respond to so they can apply those learnings in the future.
Many people celebrate Giving Tuesday, so how will your nonprofit stand out? The key to successfully drawing donors is to create the perfect blend of excitement with easy ways to give.
From rallying your supporters through a buzzy crowdfunding campaign to keeping in touch with your #GivingTuesday goal progress on social media, discover many ways to shine online in this brand new free guide, #GivingTuesday Ideas for 2018. Read it now!
What Are the Best Practices for Nonprofit Reporting?
Nonprofit Expert provides free resources for nonprofits of all types and sizes! Providing tools, guides, and advice to nonprofits since 1998. DonorPerfect is proud to sponsor this information resource for the nonprofit community.
Subscribe to receive inspiration, news, and ideas from Nonprofit Expert – directly to your inbox! |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
class account_statement_from_invoice_lines(osv.osv_memory):
"""
Generate Entries by Statement from Invoices
"""
_inherit = "account.statement.from.invoice.lines"
def populate_statement(self, cr, uid, ids, context=None):
context = dict(context or {})
statement_id = context.get('statement_id', False)
if not statement_id:
return {'type': 'ir.actions.act_window_close'}
data = self.read(cr, uid, ids, context=context)[0]
line_ids = data['line_ids']
if not line_ids:
return {'type': 'ir.actions.act_window_close'}
line_obj = self.pool.get('account.move.line')
statement_obj = self.pool.get('account.bank.statement')
statement_line_obj = self.pool.get('account.bank.statement.line')
currency_obj = self.pool.get('res.currency')
line_date = time.strftime('%Y-%m-%d')
statement = statement_obj.browse(
cr, uid, statement_id, context=context)
# for each selected move lines
for line in line_obj.browse(cr, uid, line_ids, context=context):
ctx = context.copy()
# take the date for computation of currency => use payment date
ctx['date'] = line_date
amount = 0.0
if line.debit > 0:
amount = line.amount_residual
elif line.credit > 0:
amount = -line.amount_residual
if line.amount_currency:
amount = currency_obj.compute(cr, uid, line.currency_id.id,
statement.currency.id, -line.amount_residual_currency, context=ctx)
context.update({'move_line_ids': [line.id],
'invoice_id': line.invoice.id})
statement_line_obj.create(cr, uid, {
'name': line.name or '?',
'amount': amount,
'partner_id': line.partner_id.id,
'statement_id': statement_id,
'ref': line.ref,
'date': statement.date,
}, context=context)
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
We find related hooks using word stems. load_textdomain_mofile has 3 significant word stem(s): load, textdomain, mofile. Note that some of the hooks below may not appear in all versions of WP. |
#!/usr/bin/env python3
import re
import sys
import json
import requests
from lxml import etree
from pymods import OAIReader
from bs4 import BeautifulSoup
sys.path.append('../assets')
import assets
tn = {'name': 'sobek', 'prefix': 'http://dpanther.fiu.edu/sobek/content'}
nameSpace_default = { None: '{http://www.loc.gov/mods/v3}',
'oai_dc': '{http://www.openarchives.org/OAI/2.0/oai_dc/}',
'dc': '{http://purl.org/dc/elements/1.1/}',
'mods': '{http://www.loc.gov/mods/v3}',
'dcterms': '{http://purl.org/dc/terms/}',
'xlink': '{http://www.w3.org/1999/xlink}',
'repox': '{http://repox.ist.utl.pt}',
'oai_qdc': '{http://worldcat.org/xmlschemas/qdc-1.0/}'}
PROVIDER = 'FSU'
dprovide = 'FSU'
dc = nameSpace_default['dc']
VERBOSE = True
def write_json_ld(docs):
with open('testData/fiu-repoxfull.json', 'w') as jsonOutput:
json.dump(docs, jsonOutput, indent=2)
with open('test_data/fiu_bzs-1.xml', encoding='utf-8') as data_in:
records = OAIReader(data_in)
docs = []
for record in records:
if 'deleted' in record.attrib.keys():
if record.attrib['deleted'] == 'true':
pass
else:
oai_id = record.oai_urn
if VERBOSE:
print(oai_id)
# logging.debug(oai_id)
sourceResource = {}
# sourceResource.alternative
# sourceResource.collection
# sourceResource.contributor
if record.metadata.get_element('.//{0}contributor'.format(dc)):
sourceResource['contributor'] = [{"name": name}
for name in
record.metadata.get_element(
'.//{0}contributor'.format(dc),
delimiter=';')]
# sourceResource.creator
if record.metadata.get_element('.//{0}creator'.format(dc)):
sourceResource['creator'] = []
for name in record.metadata.get_element('.//{0}creator'.format(dc),
delimiter=';'):
# need to test for ( Contributor ) and ( contributor )
if (len(name) > 0) and ("ontributor )" not in name):
sourceResource['creator'].append({"name": name.strip(" ")})
elif "ontributor )" in name:
if 'contributor' not in sourceResource.keys():
sourceResource['contributor'] = []
sourceResource['contributor'].append({"name": name.strip(
" ").rstrip("( Contributor )").rstrip(
"( contributor )")})
else:
sourceResource['contributor'].append(
{"name": name.strip(" ").rstrip(
"( Contributor )").rstrip("( contributor )")})
# sourceResource.date
date = record.metadata.get_element('.//{0}date'.format(dc))
if date:
sourceResource['date'] = {"begin": date[0], "end": date[0]}
# sourceResource.description
if record.metadata.get_element('.//{0}description'.format(dc)):
sourceResource['description'] = record.metadata.get_element(
'.//{0}description'.format(dc), delimiter=';')
# sourceResource.extent
# sourceResource.format
if record.metadata.get_element('.//{0}format'.format(dc)):
sourceResource['format'] = record.metadata.get_element(
'.//{0}format'.format(dc))
# sourceResource.genre
# sourceResource.identifier
dPantherPURL = re.compile('dpService/dpPurlService/purl')
identifier = record.metadata.get_element('.//{0}identifier'.format(dc))
try:
for ID in identifier:
PURL = dPantherPURL.search(ID)
try:
PURL_match = PURL.string
except AttributeError as err:
# logging.warning(
# 'sourceResource.identifier: {0} - {1}'.format(err,
# oai_id))
print(err, oai_id)
pass
sourceResource['identifier'] = PURL_match
except TypeError as err:
# logging.warning(
# 'sourceResource.identifier: {0} - {1}'.format(err,
# oai_id))
print(err, oai_id)
pass
# if identifier is not None and len(identifier) > 1:
# sourceResource['identifier'] = []
# for ID in identifier:
# try:
# PURL = dPantherPURL.search(ID)
# if PURL:
# PURL_match = PURL.string
# else:
# sourceResource['identifier'].append(ID)
# except TypeError as err:
# # logging.warning(
# # 'sourceResource.identifier: {0} - {1}'.format(err,
# # oai_id))
# print(err, oai_id)
# pass
# else:
# sourceResource['identifier'] = identifier
# sourceResource.language
if record.metadata.get_element('.//{0}language'.format(dc)):
sourceResource['language'] = []
for element in record.metadata.get_element(
'.//{0}language'.format(dc), delimiter=';'):
if len(element) > 3:
sourceResource['language'].append({"name": element})
else:
sourceResource['language'].append({"iso_639_3": element})
# sourceResource.place : sourceResource['spatial']
if record.metadata.get_element('.//{0}coverage'.format(dc)):
sourceResource['spatial'] = [{'name': place}
for place in
record.metadata.get_element(
'.//{0}coverage'.format(dc))]
# sourceResource.publisher
if record.metadata.get_element('.//{0}publisher'.format(dc)):
sourceResource['publisher'] = record.metadata.get_element(
'.//{0}publisher'.format(dc))
# sourceResource.relation
# sourceResource.isReplacedBy
# sourceResource.replaces
# sourceResource.rights
rights = record.metadata.get_element('.//{0}rights'.format(dc))
if rights:
sourceResource['rights'] = [{'text': rights[0]}]
else:
# logging.warning('No sourceResource.rights - {0}'.format(oai_id))
continue
# sourceResource.subject
if record.metadata.get_element('.//{0}subject'.format(dc)):
sourceResource['subject'] = []
for term in record.metadata.get_element('.//{0}subject'.format(dc),
delimiter=';'):
term = re.sub("\( lcsh \)$", '', term)
if len(term) > 0:
sourceResource['subject'].append({"name": term.strip(" ")})
# sourceResource.title
title = record.metadata.get_element('.//{0}title'.format(dc))
if title:
sourceResource['title'] = title
else:
# logging.warning('No sourceResource.rights - {0}'.format(oai_id))
print('Rights', oai_id)
continue
# sourceResource.type
if record.metadata.get_element('.//{0}type'.format(dc)):
sourceResource['type'] = record.metadata.get_element(
'.//{0}type'.format(dc), delimiter=';')
# webResource.fileFormat
# aggregation.dataProvider
data_provider = dprovide
# aggregation.intermediateProvider
# aggregation.isShownAt
# aggregation.preview
try:
preview = assets.thumbnail_service(PURL_match, tn)
except UnboundLocalError as err:
# logging.warning('aggregation.preview: {0} - {1}'.format(err, oai_id))
print(err, oai_id)
continue
# aggregation.provider
try:
docs.append({"@context": "http://api.dp.la/items/context",
"sourceResource": sourceResource,
"aggregatedCHO": "#sourceResource",
"dataProvider": data_provider,
"isShownAt": PURL_match,
"preview": preview,
"provider": PROVIDER})
except NameError as err:
# logging.warning('aggregation.preview: {0} - {1}'.format(err, oai_id))
print(err, oai_id)
pass
#write_json_ld(docs) # write test
print(json.dumps(docs, indent=2)) # dump test
|
Gone ‘Til November | A Consequence of Hypoglycemia.
Thankful to have insurance and the ability to book an appointment with a new endocrinologist in San Francisco. Bummed that it’s in November.
The good news is I’ve scheduled an appointment with a local endocrinologist. The not so good news is that it’s in November – that’s the soonest I could get in as a new patient. The somewhat comforting news is that my existing prescriptions, and refills will last me until then. But the bummer is that I’m officially locked into my current diabetes treatment until I meet my new doctor.
What bums me out the most is that I have to wait until November to find out if this endocrinologist will be a good fit for me. Do they take the time to listen instead of waiting for her turn to talk? What are their thoughts on the value of social media and peer-to-peer support structures? Do I mention my blog as soon as possible? Should I go in with a list of questions? (Yes) What should be on that list?
Thankfully, I’ll have plenty of time to get back in shape and offer up a legitimate A1c by the time my appointment comes up. First impressions are everything, right? But as I continue to mull over switching to an insulin pump I’m not sure if this wait will become the tipping point that favors the switch, or delays it for another indeterminate amount of time.
Regardless of how I eventually decide to treat my diabetes, I need to rebuild my exercise habits.
I just need to remember: baby steps to a better version of me. |
import plotly.offline as py
py.init_notebook_mode()
from temp_pre_process import temp_pre_process
def temp_map(temp, year):
# Pre-processes the temperature data so that it can be plotted by plotly.
df2 = temp_pre_process(temp, year)
#scl = [[0.0, 'rgb(242,240,247)'],[0.2, 'rgb(218,218,235)'],[0.4, 'rgb(188,189,220)'],\
#[0.6, 'rgb(158,154,200)'],[0.8, 'rgb(117,107,177)'],[1.0, 'rgb(84,39,143)']]
data = [ dict(
type='choropleth',
#colorscale = scl,
autocolorscale = True,
locations = df2.index,
z = df2['AverageTemperature'].astype(float),
locationmode = 'USA-states',
text = df2['text'],
marker = dict(
line = dict (
color = 'rgb(255,255,255)',
width = 2
) ),
colorbar = dict(
title = '°C')
) ]
layout = dict(
title = year+' US Average Temperature by State<br>(Hover for details)',
geo = dict(
scope='usa',
projection=dict( type='albers usa' ),
showlakes = True,
lakecolor = 'rgb(255, 255, 255)'),
)
fig = dict( data=data, layout=layout )
py.iplot( fig, filename='us-temperature-map' )
plotSuccessful = "Temperature map plotted."
return fig, plotSuccessful
|
Until now, the Apple Xserve was the Metadata Controller of choice for Xsan installations, providing enterprise form factor and capabilities.
Xsan users in high-pressure Media and Entertainment markets require datacenter-level quality products to manage their SAN volumes, but also demand the ease of use and setup of Xsan.
Previously, that meant one thing: Xsan on an Xserve. But all that changes—with ActiveSAN.
Designed to be the best Metadata Controller for Xsan and StorNext® installations, ActiveSAN is much more than a generic server with StorNext® software loaded on it. ActiveSAN is a fully integrated Metadata Controller solution consisting of a highly innovative Intel Xeon server-based appliance that features a straightforward design for easy installation and serviceability. It combines stunning Active Storage product design and the rock-solid performance and reliability of the Intel Nehalem platform in a 1U rack form factor. ActiveSAN utilizes an enterprise hardened Linux operating system and the Quantum StorNext® SAN file system. |
#!/bin/python3
import pika
import json
import logging
class FaceRecognizerClient(object):
def __init__(self,host,queue_send,queue_receive):
self.queue_send = queue_send
self.queue_receive = queue_receive
self.host = host
self.__connect()
def __connect(self):
self.connection = pika.BlockingConnection(pika.ConnectionParameters(host=self.host))
self.channel = self.connection.channel()
self.channel.queue_declare( queue=self.queue_receive)
self.channel.basic_consume(self.on_response,
no_ack=True,
queue=self.queue_receive)
def on_response(self, ch, method, props, body):
self.response = body
def publish(self, message):
self.response = None
try:
self.__publish(message)
except pika.exceptions.ConnectionClosed:
logging.warning('Se perdio la conexion, volviendo a conectarse')
self.__connect()
self.__publish(message)
logging.debug('Se envio mensaje al face recognizer. Esperando su respuesta')
while self.response is None:
self.connection.process_data_events()
logging.debug('El face recognizer respondio %s',self.response)
return self.response
def __publish(self,message):
self.channel.basic_publish(exchange='',
routing_key=self.queue_send,
properties=pika.BasicProperties(
reply_to = self.queue_receive
),
body=message)
def update(self,images):
message = {
'type': 'update',
'images': images
}
response = json.loads(self.publish(json.dumps(message)).decode('utf-8'))
return response['id']
def predict(self,faces):
message = {
'type': 'predict',
'faces': faces
}
response = json.loads(self.publish(json.dumps(message)).decode('utf-8'))
return response['ids']
def close(self):
try:
self.connection.close()
except pika.exceptions.ConnectionClosed:
logging.warning('La conexion ya estaba cerrada')
|
Main floor ranch style condominium with 2 bedrooms and 2 full baths! Updated washer, dryer, and fridge arriving April 14, 2019. Cozy 3rd bedroom option near living room. No stairs! First level in the back. One ca garage is right across the street. Conveniently close to, Downtown AA, Briarwood Mall, I94, Costco / Meijer! Great views out the rear and $250 non-refundable pet fee. |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from collections import defaultdict
import posixpath
from future import Future
from path_util import SplitParent
from special_paths import SITE_VERIFICATION_FILE
def _SimplifyFileName(file_name):
return (posixpath.splitext(file_name)[0]
.lower()
.replace('.', '')
.replace('-', '')
.replace('_', ''))
class PathCanonicalizer(object):
'''Transforms paths into their canonical forms. Since the docserver has had
many incarnations - e.g. there didn't use to be apps/ - there may be old
paths lying around the webs. We try to redirect those to where they are now.
'''
def __init__(self,
file_system,
object_store_creator,
strip_extensions):
# |strip_extensions| is a list of file extensions (e.g. .html) that should
# be stripped for a path's canonical form.
self._cache = object_store_creator.Create(
PathCanonicalizer, category=file_system.GetIdentity())
self._file_system = file_system
self._strip_extensions = strip_extensions
def _LoadCache(self):
cached_future = self._cache.GetMulti(('canonical_paths',
'simplified_paths_map'))
def resolve():
# |canonical_paths| is the pre-calculated set of canonical paths.
# |simplified_paths_map| is a lazily populated mapping of simplified file
# names to a list of full paths that contain them. For example,
# - browseraction: [extensions/browserAction.html]
# - storage: [apps/storage.html, extensions/storage.html]
cached = cached_future.Get()
canonical_paths, simplified_paths_map = (
cached.get('canonical_paths'), cached.get('simplified_paths_map'))
if canonical_paths is None:
assert simplified_paths_map is None
canonical_paths = set()
simplified_paths_map = defaultdict(list)
for base, dirs, files in self._file_system.Walk(''):
for path in dirs + files:
path_without_ext, ext = posixpath.splitext(path)
canonical_path = posixpath.join(base, path_without_ext)
if (ext not in self._strip_extensions or
path == SITE_VERIFICATION_FILE):
canonical_path += ext
canonical_paths.add(canonical_path)
simplified_paths_map[_SimplifyFileName(path)].append(canonical_path)
# Store |simplified_paths_map| sorted. Ties in length are broken by
# taking the shortest, lexicographically smallest path.
for path_list in simplified_paths_map.itervalues():
path_list.sort(key=lambda p: (len(p), p))
self._cache.SetMulti({
'canonical_paths': canonical_paths,
'simplified_paths_map': simplified_paths_map,
})
else:
assert simplified_paths_map is not None
return canonical_paths, simplified_paths_map
return Future(callback=resolve)
def Canonicalize(self, path):
'''Returns the canonical path for |path|.
'''
canonical_paths, simplified_paths_map = self._LoadCache().Get()
# Path may already be the canonical path.
if path in canonical_paths:
return path
# Path not found. Our single heuristic: find |base| in the directory
# structure with the longest common prefix of |path|.
_, base = SplitParent(path)
potential_paths = simplified_paths_map.get(_SimplifyFileName(base))
if not potential_paths:
# There is no file with anything close to that name.
return path
# The most likely canonical file is the one with the longest common prefix
# with |path|. This is slightly weaker than it could be; |path| is
# compared, not the simplified form of |path|, which may matter.
max_prefix = potential_paths[0]
max_prefix_length = len(posixpath.commonprefix((max_prefix, path)))
for path_for_file in potential_paths[1:]:
prefix_length = len(posixpath.commonprefix((path_for_file, path)))
if prefix_length > max_prefix_length:
max_prefix, max_prefix_length = path_for_file, prefix_length
return max_prefix
def Cron(self):
return self._LoadCache()
|
Fact: No other type of footwear is as versatile as the white sneaker. Clean, minimal, and endlessly versatile, white sneakers look just as great with a pair of sweatpants as they do with a dressy suit. And now that the weather's finally clearing up, you have an excuse to bust out some lighter footwear. So put a spring in your step with these seven stylish pairs. |
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import sys
import numpy as np
from psi4 import core
from .exceptions import *
### Matrix and Vector properties
# The next three functions make me angry
def translate_interface(interface):
"""
This is extra stupid with unicode
"""
if sys.version_info[0] > 2:
return interface
nouni_interface = {}
for k, v in interface.items():
if k == 'typestr':
nouni_interface[k.encode('ascii', 'ignore')] = v.encode('ascii', 'ignore')
else:
nouni_interface[k.encode('ascii', 'ignore')] = v
return nouni_interface
class numpy_holder(object):
"""
Blank object, stupid. Apparently you cannot create a view directly from a dictionary
"""
def __init__(self, interface):
self.__array_interface__ = translate_interface(interface)
def _get_raw_views(self, copy=False):
"""
Gets simple raw view of the passed in object.
"""
ret = []
for data in self.array_interface():
# Yet another hack
if isinstance(data["shape"], list):
data["shape"] = tuple(data["shape"])
if 0 in data["shape"]:
ret.append(np.empty(shape=data["shape"]))
else:
ret.append(np.array(numpy_holder(data), copy=copy))
return ret
def _find_dim(arr, ndim):
"""
Helper function to help deal with zero or sized arrays
"""
# Zero arrays
if (arr is None) or (arr is False):
return [0] * ndim
# Make sure this is a numpy array like thing
try:
arr.shape
except:
raise ValidationError("Expected numpy array, found object of type '%s'", type(arr))
if len(arr.shape) == ndim:
return [arr.shape[x] for x in range(ndim)]
else:
raise ValidationError("Input array does not have a valid shape.")
def array_to_matrix(self, arr, name="New Matrix", dim1=None, dim2=None):
"""
Converts a numpy array or list of numpy arrays into a Psi4 Matrix (irreped if list).
Parameters
----------
arr : array or list of arrays
Numpy array or list of arrays to use as the data for a new core.Matrix
name : str
Name to give the new core.Matrix
dim1 : list, tuple, or core.Dimension (optional)
If a single dense numpy array is given, a dimension can be supplied to
apply irreps to this array. Note that this discards all extra information
given in the matrix besides the diagonal blocks determined by the passed
dimension.
dim2 :
Same as dim1 only if using a Psi4.Dimension object.
Returns
-------
matrix : :py:class:`~psi4.core.Matrix` or :py:class:`~psi4.core.Vector`
Returns the given Psi4 object
Notes
-----
This is a generalized function to convert a NumPy array to a Psi4 object
Examples
--------
>>> data = np.random.rand(20)
>>> vector = array_to_matrix(data)
>>> irrep_data = [np.random.rand(2, 2), np.empty(shape=(0,3)), np.random.rand(4, 4)]
>>> matrix = array_to_matrix(irrep_data)
>>> print matrix.rowspi().to_tuple()
(2, 0, 4)
"""
# What type is it? MRO can help.
arr_type = self.__mro__[0]
# Irreped case
if isinstance(arr, (list, tuple)):
if (dim1 is not None) or (dim2 is not None):
raise ValidationError("Array_to_Matrix: If passed input is list of arrays dimension cannot be specified.")
irreps = len(arr)
if arr_type == core.Matrix:
sdim1 = core.Dimension(irreps)
sdim2 = core.Dimension(irreps)
for i in range(irreps):
d1, d2 = _find_dim(arr[i], 2)
sdim1[i] = d1
sdim2[i] = d2
ret = self(name, sdim1, sdim2)
elif arr_type == core.Vector:
sdim1 = core.Dimension(irreps)
for i in range(irreps):
d1 = _find_dim(arr[i], 1)
sdim1[i] = d1[0]
ret = self(name, sdim1)
else:
raise ValidationError("Array_to_Matrix: type '%s' is not recognized." % str(arr_type))
for view, vals in zip(ret.nph, arr):
if 0 in view.shape: continue
view[:] = vals
return ret
# No irreps implied by list
else:
if arr_type == core.Matrix:
# Build an irreped array back out
if dim1 is not None:
if dim2 is None:
raise ValidationError ("Array_to_Matrix: If dim1 is supplied must supply dim2 also")
dim1 = core.Dimension.from_list(dim1)
dim2 = core.Dimension.from_list(dim2)
if dim1.n() != dim2.n():
raise ValidationError("Array_to_Matrix: Length of passed dim1 must equal length of dim2.")
ret = self(name, dim1, dim2)
start1 = 0
start2 = 0
for num, interface in enumerate(ret.nph):
d1 = dim1[num]
d2 = dim2[num]
if (d1 == 0) or (d2 == 0):
continue
view = np.asarray(interface)
view[:] = arr[start1:start1 + d1, start2:start2 + d2]
start1 += d1
start2 += d2
return ret
# Simple case without irreps
else:
ret = self(name, arr.shape[0], arr.shape[1])
view = _get_raw_views(ret)[0]
view[:] = arr
return ret
elif arr_type == core.Vector:
# Build an irreped array back out
if dim1 is not None:
if dim2 is not None:
raise ValidationError ("Array_to_Matrix: If dim2 should not be supplied for 1D vectors.")
dim1 = core.Dimension.from_list(dim1)
ret = self(name, dim1)
start1 = 0
for num, interface in enumerate(ret.nph):
d1 = dim1[num]
if (d1 == 0):
continue
view = np.asarray(interface)
view[:] = arr[start1:start1 + d1]
start1 += d1
return ret
# Simple case without irreps
else:
ret = self(name, arr.shape[0])
ret.np[:] = arr
return ret
else:
raise ValidationError("Array_to_Matrix: type '%s' is not recognized." % str(arr_type))
def _to_array(matrix, copy=True, dense=False):
"""
Converts a Psi4 Matrix or Vector to a numpy array. Either copies the data or simply
consturcts a view.
Parameters
----------
matrix : :py:class:`~psi4.core.Matrix` or :py:class:`~psi4.core.Vector`
Pointers to which Psi4 core class should be used in the construction.
copy : bool
Copy the data if True, return a view otherwise
dense : bool
Converts irreped Psi4 objects to diagonally blocked dense arrays. Returns a list of arrays otherwise.
Returns
-------
array : np.array or list of of np.array
Returns either a list of np.array's or the base array depending on options.
Notes
-----
This is a generalized function to convert a Psi4 object to a NumPy array
Examples
--------
>>> data = psi4.Matrix(3, 3)
>>> data.to_array()
[[ 0. 0. 0.]
[ 0. 0. 0.]
[ 0. 0. 0.]]
"""
if matrix.nirrep() > 1:
# We will copy when we make a large matrix
if dense:
copy = False
ret = _get_raw_views(matrix, copy=copy)
# Return the list of arrays
if dense is False:
return ret
# Build the dense matrix
if isinstance(matrix, core.Vector):
ret_type = '1D'
elif isinstance(matrix, core.Matrix):
ret_type = '2D'
else:
raise ValidationError("Array_to_Matrix: type '%s' is not recognized." % type(matrix))
dim1 = []
dim2 = []
for h in ret:
# Ignore zero dim irreps
if 0 in h.shape:
dim1.append(0)
dim2.append(0)
else:
dim1.append(h.shape[0])
if ret_type == '2D':
dim2.append(h.shape[1])
ndim1 = np.sum(dim1)
ndim2 = np.sum(dim2)
if ret_type == '1D':
dense_ret = np.zeros(shape=(ndim1))
start = 0
for d1, arr in zip(dim1, ret):
if d1 == 0: continue
dense_ret[start: start + d1] = arr
start += d1
else:
dense_ret = np.zeros(shape=(ndim1, ndim2))
start1 = 0
start2 = 0
for d1, d2, arr in zip(dim1, dim2, ret):
if d1 == 0: continue
dense_ret[start1: start1 + d1, start2: start2 + d2] = arr
start1 += d1
start2 += d2
return dense_ret
else:
return _get_raw_views(matrix, copy=copy)[0]
def _build_view(matrix):
"""
Builds a view of the vector or matrix
"""
views = _to_array(matrix, copy=False, dense=False)
if matrix.nirrep() > 1:
return tuple(views)
else:
return views
def get_view(self):
if hasattr(self, '_np_view_data'):
return self._np_view_data
else:
self._np_view_data = _build_view(self)
return self._np_view_data
@property
def _np_shape(self):
"""
Shape of the Psi4 data object
"""
view_data = get_view(self)
if self.nirrep() > 1:
return tuple(view_data[x].shape for x in range(self.nirrep()))
else:
return view_data.shape
@property
def _np_view(self):
"""
View without only one irrep
"""
if self.nirrep() > 1:
raise ValidationError("Attempted to call .np on a Psi4 data object with multiple irreps. Please use .nph for objects with irreps.")
return get_view(self)
@property
def _nph_view(self):
"""
View with irreps.
"""
if self.nirrep() > 1:
return get_view(self)
else:
return get_view(self),
@property
def _array_conversion(self):
if self.nirrep() > 1:
raise ValidationError("__array__interface__ can only be called on Psi4 data object with only one irrep!")
else:
return self.np.__array_interface__
def _np_write(self, filename=None, prefix=""):
ret = {}
ret[prefix + "Irreps"] = self.nirrep()
ret[prefix + "Name"] = self.name
for h, v in enumerate(self.nph):
ret[prefix + "IrrepData" + str(h)] = v
if isinstance(self, core.Matrix):
ret[prefix + "Dim1"] = self.rowdim().to_tuple()
ret[prefix + "Dim2"] = self.coldim().to_tuple()
if isinstance(self, core.Vector):
ret[prefix + "Dim"] = [self.dim(x) for x in range(self.nirrep())]
if filename is None:
return ret
np.savez(filename, **ret)
def _np_read(self, filename, prefix=""):
if isinstance(filename, np.lib.npyio.NpzFile):
data = filename
elif (sys.version_info[0] == 2) and isinstance(filename, (str, unicode)):
if not filename.endswith('.npz'):
filename = filename + '.npz'
data = np.load(filename)
elif (sys.version_info[0] > 2) and isinstance(filename, str):
if not filename.endswith('.npz'):
filename = filename + '.npz'
data = np.load(filename)
else:
raise Exception("Filename not understood: %s" % filename)
ret_data = []
if ((prefix + "Irreps") not in data.keys()) or ((prefix + "Name") not in data.keys()):
raise ValidationError("File %s does not appear to be a numpyz save" % filename)
for h in range(data[prefix + "Irreps"]):
ret_data.append(data[prefix + "IrrepData" + str(h)])
arr_type = self.__mro__[0]
if arr_type == core.Matrix:
dim1 = core.Dimension.from_list(data[prefix + "Dim1"])
dim2 = core.Dimension.from_list(data[prefix + "Dim2"])
ret = self(str(data[prefix + "Name"]), dim1, dim2)
elif arr_type == core.Vector:
dim1 = core.Dimension.from_list(data[prefix + "Dim"])
ret = self(str(data[prefix + "Name"]), dim1)
for h in range(data[prefix + "Irreps"]):
ret.nph[h][:] = ret_data[h]
return ret
def _to_serial(data):
"""
Converts an object with a .nph accessor to a serialized dictionary
"""
json_data = {}
json_data["shape"] = []
json_data["data"] = []
for view in data.nph:
json_data["shape"].append(view.shape)
json_data["data"].append(view.tostring())
if len(json_data["shape"][0]) == 1:
json_data["type"] = "vector"
elif len(json_data["shape"][0]) == 2:
json_data["type"] = "matrix"
else:
raise ValidationError("_to_json is only used for vector and matrix objects.")
return json_data
def _from_serial(self, json_data):
"""
Converts serialized data to the correct Psi4 data type
"""
if json_data["type"] == "vector":
dim1 = core.Dimension.from_list([x[0] for x in json_data["shape"]])
ret = self("Vector from JSON", dim1)
elif json_data["type"] == "matrix":
dim1 = core.Dimension.from_list([x[0] for x in json_data["shape"]])
dim2 = core.Dimension.from_list([x[1] for x in json_data["shape"]])
ret = self("Matrix from JSON", dim1, dim2)
else:
raise ValidationError("_from_json did not recognize type option of %s." % str(json_data["type"]))
for n in range(len(ret.nph)):
ret.nph[n].flat[:] = np.fromstring(json_data["data"][n], dtype=np.double)
return ret
# Matrix attributes
def _chain_dot(*args, **kwargs):
"""
Chains dot products together from a series of Psi4 Matrix classes.
By default there is no transposes, an optional vector of booleans can be passed in.
"""
trans = kwargs.pop("trans", None)
if trans is None:
trans = [False for x in range(len(args))]
else:
if len(trans) != len(args):
raise ValidationError("Chain dot: The length of the transpose arguements is not equal to the length of args.")
# Setup chain
ret = args[0]
if trans[0]:
ret = ret.transpose()
# Run through
for n, mat in enumerate(args[1:]):
ret = core.Matrix.doublet(ret, mat, False, trans[n + 1])
return ret
# Matirx attributes
core.Matrix.from_array = classmethod(array_to_matrix)
core.Matrix.to_array = _to_array
core.Matrix.shape = _np_shape
core.Matrix.np = _np_view
core.Matrix.nph = _nph_view
core.Matrix.__array_interface__ = _array_conversion
core.Matrix.np_write = _np_write
core.Matrix.np_read = classmethod(_np_read)
core.Matrix.to_serial = _to_serial
core.Matrix.from_serial = classmethod(_from_serial)
core.Matrix.chain_dot = _chain_dot
# Vector attributes
core.Vector.from_array = classmethod(array_to_matrix)
core.Vector.to_array = _to_array
core.Vector.shape = _np_shape
core.Vector.np = _np_view
core.Vector.nph = _nph_view
core.Vector.__array_interface__ = _array_conversion
core.Vector.np_write = _np_write
core.Vector.np_read = classmethod(_np_read)
core.Vector.to_serial = _to_serial
core.Vector.from_serial = classmethod(_from_serial)
### CIVector properties
@property
def _civec_view(self):
"Returns a view of the CIVector's buffer"
return np.asarray(self)
core.CIVector.np = _civec_view
### Dimension properties
@classmethod
def _dimension_from_list(self, dims, name="New Dimension"):
"""
Builds a core.Dimension object from a python list or tuple. If a dimension
object is passed a copy will be returned.
"""
if isinstance(dims, (tuple, list, np.ndarray)):
irreps = len(dims)
elif isinstance(dims, core.Dimension):
irreps = dims.n()
else:
raise ValidationError("Dimension from list: Type '%s' not understood" % type(dims))
ret = core.Dimension(irreps, name)
for i in range(irreps):
ret[i] = dims[i]
return ret
def _dimension_to_tuple(dim):
"""
Converts a core.Dimension object to a tuple.
"""
if isinstance(dim, (tuple, list)):
return tuple(dim)
irreps = dim.n()
ret = []
for i in range(irreps):
ret.append(dim[i])
return tuple(ret)
def _dimension_iter(dim):
"""
Provides an iterator class for the Dimension object.
Allows:
dim = psi4.core.Dimension(...)
list(dim)
"""
for i in range(dim.n()):
yield dim[i]
# Dimension attributes
core.Dimension.from_list = _dimension_from_list
core.Dimension.to_tuple = _dimension_to_tuple
core.Dimension.__iter__ = _dimension_iter
|
If you are gay and you want to practise cruising in public places in Benialfaqui in an anonymous way, here you can find spots such as beaches, parks, forests and other spaces next to urban areas, as well as every kind of public toilets and rest areas of highways where you can practise cruising in Benialfaqui, Alicante.
Below we show a Benialfaqui cruising map with all cruising areas and spots that shared our gay community. Click on the map markers for details of each spot. |
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 29 10:09:44 2018
@author: Dell
"""
import uuid
from sqlalchemy.sql import and_
from .orm import Config,Point,Frame,FrameSection,FrameLoadDistributed,FrameLoadConcentrated,FrameLoadTemperature,FrameLoadStrain
import logger
def add_frame(self,pt0_coor,pt1_coor,section,name=None):
"""
Add frame object to model, if the name already exists, an exception will be raised.
param:
pt0_coor: tuple, coordinate of the end point 0 in current unit.
pt1_coor: tuple, coordinate of the end point 1 in current unit.
[name]: str, name, optional.
return:
str, the new frame's name.
"""
assert(len(pt0_coor)==3 and len(pt1_coor)==3)
if name and self.session.query(Frame).filter_by(name=name).first()!=None:
raise Exception('Name already exist!')
if self.session.query(FrameSection).filter_by(name=section).first() is None:
raise Exception("Frame section doesn't exits!")
frm=Frame()
scale=self.scale()
tol=self.session.query(Config).first().tolerance
pt0=self.session.query(Point).filter(and_(
(Point.x-pt0_coor[0]*scale['L'])<tol,(pt0_coor[0]*scale['L']-Point.x)<tol,
(Point.y-pt0_coor[1]*scale['L'])<tol,(pt0_coor[1]*scale['L']-Point.y)<tol,
(Point.z-pt0_coor[2]*scale['L'])<tol,(pt0_coor[2]*scale['L']-Point.z)<tol)).first()
if pt0==None:
pt0_name=self.add_point(pt0_coor[0]*scale['L'],pt0_coor[1]*scale['L'],pt0_coor[2]*scale['L'])
else:
pt0_name=pt0.name
pt1=self.session.query(Point).filter(and_(
(Point.x-pt1_coor[0])<tol,(pt1_coor[0]-Point.x)<tol,
(Point.y-pt1_coor[1])<tol,(pt1_coor[1]-Point.y)<tol,
(Point.z-pt1_coor[2])<tol,(pt1_coor[2]-Point.z)<tol)).first()
if pt1==None:
pt1_name=self.add_point(pt1_coor[0],pt1_coor[1],pt1_coor[2])
else:
pt1_name=pt1.name
if pt0_name<pt1_name:
order='01'
frm.pt0_name=pt0_name
frm.pt1_name=pt1_name
frm.order=order
elif pt0_name>pt1_name:
order='10'
frm.pt0_name=pt1_name
frm.pt1_name=pt0_name
frm.order=order
else:
raise Exception('Two points should not be the same!')
frm.section_name=section
frm.uuid=str(uuid.uuid1())
if name:
frm.name=name
else:
frm.name=frm.uuid
self.session.add(frm)
return frm.name
def add_frame_batch(self,pt_coors,section):
"""
Add batch of frame objects to model..
param:
pt_coors: list of float tuples as ((pt0.x,pt0.y,pt0.z),(pt1.x,pt1.y,pt1.z))
return:
status of success, and list of str, the new frame's names if successful.
"""
try:
assert(len(pt_coors[0][0])==len(pt_coors[0][1]))
if self.session.query(FrameSection).filter_by(name=section).first() is None:
raise Exception("Frame section doesn't exits!")
names=[]
frm_ends=[]
scale=self.scale()
for pt0,pt1 in pt_coors:
pt0_name=self.add_point(pt0[0]*scale['L'],pt0[1]*scale['L'],pt0[2]*scale['L'])
pt1_name=self.add_point(pt1[0]*scale['L'],pt1[1]*scale['L'],pt1[2]*scale['L'])
frm_ends.append((pt0_name,pt1_name))
tol=self.session.query(Config).first().tolerance
pts=self.session.query(Point).order_by(Point.x,Point.y,Point.z).all()
pt_map=dict([(pt.name,pt.name) for pt in pts])
pts_to_rmv=[]
for pti,ptj in zip(pts[:-1],pts[1:]):
if (ptj.x-pti.x)**2+(ptj.y-pti.y)**2+(ptj.z-pti.z)**2<tol**2:
pt_map[ptj.name]=pt_map[pti.name]
pts_to_rmv.append(ptj)
for (pt0_name,pt1_name) in frm_ends:
frm=Frame()
if pt_map[pt0_name]<pt_map[pt1_name]:
frm.pt0_name=pt_map[pt0_name]
frm.pt1_name=pt_map[pt1_name]
frm.order='01'
elif pt_map[pt0_name]>pt_map[pt1_name]:
frm.pt0_name=pt_map[pt1_name]
frm.pt1_name=pt_map[pt0_name]
frm.order='10'
else:
continue
frm.section_name=section
frm.uuid=str(uuid.uuid1())
frm.name=frm.uuid
names.append(frm.name)
self.session.add(frm)
for pt in pts_to_rmv:
self.session.delete(pt)
self.session.commit()
return True,names
except Exception as e:
logger.info(str(e))
self.session.rollback()
return False
def set_frame_section(self,frame,section):
"""
Assign a frame section to a frame.
params:
frame: str, name of frame.
section: str, name of section.
"""
try:
frm=self.session.query(Frame).filter_by(name=frame).first()
if frm is None:
raise Exception("Frame doesn't exists.")
frm.section_name=section
self.session.add(frm)
return True
except Exception as e:
logger.info(str(e))
self.session.rollback()
return False
def set_frame_mesh(self,frame):
pass
def set_frame_load_distributed(self,frame,loadcase,load):
"""
params:
point: str, name of point.
loadcase: str, name of loadcase.
load: float, list of 6 to set restraints.
return:
status of success.
"""
try:
assert len(load)==12
frm=self.session.query(Frame).filter_by(name=frame).first()
if frm is None:
raise Exception("Frame doesn't exists.")
ld=self.session.query(FrameLoadDistributed).filter_by(frame_name=frame,loadcase_name=loadcase).first()
if ld is None:
ld=FrameLoadDistributed()
scale=self.scale()
ld.frame_name=frame
ld.loadcase_name=loadcase
ld.p01=load[0]*scale['F']
ld.p02=load[1]*scale['F']
ld.p03=load[2]*scale['F']
ld.m01=load[3]*scale['F']*scale['L']
ld.m02=load[4]*scale['F']*scale['L']
ld.m03=load[5]*scale['F']*scale['L']
ld.p11=load[6]*scale['F']
ld.p12=load[7]*scale['F']
ld.p13=load[8]*scale['F']
ld.m11=load[9]*scale['F']*scale['L']
ld.m12=load[10]*scale['F']*scale['L']
ld.m13=load[11]*scale['F']*scale['L']
self.session.add(ld)
return True
except Exception as e:
logger.info(str(e))
self.session.rollback()
return False
def set_frame_load_concentrated(self,frame,loadcase,load,loc):
"""
params:
point: str, name of point.
loadcase: str, name of loadcase.
load: float, list of 6 to set restraints.
return:
status of success.
"""
try:
assert (len(load)==6 and (loc<=1 and loc>=0))
frm=self.session.query(Frame).filter_by(name=frame).first()
if frm is None:
raise Exception("Frame doesn't exists.")
ld=self.session.query(FrameLoadConcentrated).filter_by(frame_name=frame,loadcase_name=loadcase).first()
if ld is None:
ld=FrameLoadConcentrated()
scale=self.scale()
ld.frame_name=frame
ld.loadcase_name=loadcase
ld.p1=load[0]*scale['F']
ld.p2=load[1]*scale['F']
ld.p3=load[2]*scale['F']
ld.m1=load[3]*scale['F']*scale['L']
ld.m2=load[4]*scale['F']*scale['L']
ld.m3=load[5]*scale['F']*scale['L']
ld.loc=loc
self.session.add(ld)
return True
except Exception as e:
logger.info(str(e))
self.session.rollback()
return False
def set_frame_load_strain(self,frame,loadcase,strain):
"""
params:
point: str, name of point.
loadcase: str, name of loadcase.
strain: float, strain in 1-1 axis.
return:
status of success.
"""
try:
assert (strain<=1 and strain>=0)
frm=self.session.query(Frame).filter_by(name=frame).first()
if frm is None:
raise Exception("Frame doesn't exists.")
ld=self.session.query(FrameLoadStrain).filter_by(frame_name=frame,loadcase_name=loadcase).first()
if ld is None:
ld=FrameLoadStrain()
ld.frame_name=frame
ld.loadcase_name=loadcase
ld.strain=strain
self.session.add(ld)
return True
except Exception as e:
logger.info(str(e))
self.session.rollback()
return False
def set_frame_load_temperature(self,frame,loadcase,temperature):
"""
params:
frame: str, name of frame.
loadcase: str, name of loadcase.
temperature: float, temperature in 1-1 axis.
return:
status of success.
"""
try:
frm=self.session.query(Frame).filter_by(name=frame).first()
if frm is None:
raise Exception("Frame doesn't exists.")
ld=self.session.query(FrameLoadTemperature).filter_by(frame_name=frame,loadcase_name=loadcase).first()
if ld is None:
ld=FrameLoadTemperature()
ld.frame_name=frame
ld.loadcase_name=loadcase
ld.T=temperature
self.session.add(ld)
return True
except Exception as e:
logger.info(str(e))
self.session.rollback()
return False
def get_frame_names_by_points(self,pt1,pt2):
"""
params:
name: str
returns:
frame name list satisfies the points
"""
pass
def get_frame_names(self):
"""
Get all the name of points in the database
returns:
frame name list if successful or None if failed.
"""
try:
frms=self.session.query(Frame).all()
return [frm.name for frm in frms]
except Exception as e:
logger.info(str(e))
self.session.rollback()
return None
def get_frame_end_names(self,frame):
"""
params:
frame: str, name of frame.
return:
two point names as frames start and end if successful or None if failed
"""
try:
frm=self.session.query(Frame).filter_by(name=frame).first()
if frm is None:
raise Exception("Frame doesn't exists.")
return frm.pt0.name,frm.pt1.name
except Exception as e:
logger.info(str(e))
self.session.rollback()
return None
def get_frame_end_coors(self,frame):
"""
params:
frame: str, name of frame.
return:
6-list of floats end_coors in current unit if successful or None if failed
"""
try:
scale=self.scale()
frm=self.session.query(Frame).filter_by(name=frame).first()
if frm is None:
raise Exception("Frame doesn't exists.")
pt0=frm.pt0
pt1=frm.pt1
return [pt0.x/scale['L'],pt0.y/scale['L'],pt0.z/scale['L'],pt1.x/scale['L'],pt1.y/scale['L'],pt1.z/scale['L']]
except Exception as e:
logger.info(str(e))
self.session.rollback()
return None
def get_frame_section_attribute(self,name):
"""
params:
name: str
returns:
frame section object if exist
"""
pass
def delete_frame(self,name):
try:
frm=self.session.query(Frame).filter_by(name=name)
if frm is None:
raise Exception("Frame doen't exist!")
self.session.delete(frm)
except Exception as e:
log.info(str(e))
self.session.rollback()
return False |
Epitacio Huerta is a Municipality of 16218 inhabitants, placed in the State of Michoacán de Ocampo, with a fertility rate of 3.11 children per woman. 26.91% of the population migrated from outside the State of Michoacán de Ocampo. 0.56% of the population is indigenous, 0.29% of the inhabitants speak one indigenous language, and 0.01% speak that indigenous language, but not Spanish.
98.37% of the inhabitants of Epitacio Huerta are Catholic, 45.80% are economically active and, within this active population, 94.67% are employed. Additionally, 88.08% of the dwellings have piped water and 1.03% have Internet access.
Do you want to know more statistical data of Epitacio Huerta? Please click on this link. Additionally, you can browse a beautiful map of Epitacio Huerta following the next link. |
"""Viessmann ViCare sensor device."""
import logging
import requests
from homeassistant.components.sensor import SensorEntity
from homeassistant.const import (
CONF_DEVICE_CLASS,
CONF_ICON,
CONF_NAME,
CONF_UNIT_OF_MEASUREMENT,
DEVICE_CLASS_ENERGY,
DEVICE_CLASS_TEMPERATURE,
ENERGY_KILO_WATT_HOUR,
ENERGY_WATT_HOUR,
PERCENTAGE,
TEMP_CELSIUS,
TIME_HOURS,
)
from . import (
DOMAIN as VICARE_DOMAIN,
PYVICARE_ERROR,
VICARE_API,
VICARE_HEATING_TYPE,
VICARE_NAME,
HeatingType,
)
_LOGGER = logging.getLogger(__name__)
CONF_GETTER = "getter"
SENSOR_TYPE_TEMPERATURE = "temperature"
SENSOR_OUTSIDE_TEMPERATURE = "outside_temperature"
SENSOR_SUPPLY_TEMPERATURE = "supply_temperature"
SENSOR_RETURN_TEMPERATURE = "return_temperature"
# gas sensors
SENSOR_BOILER_TEMPERATURE = "boiler_temperature"
SENSOR_BURNER_MODULATION = "burner_modulation"
SENSOR_BURNER_STARTS = "burner_starts"
SENSOR_BURNER_HOURS = "burner_hours"
SENSOR_BURNER_POWER = "burner_power"
SENSOR_DHW_GAS_CONSUMPTION_TODAY = "hotwater_gas_consumption_today"
SENSOR_DHW_GAS_CONSUMPTION_THIS_WEEK = "hotwater_gas_consumption_heating_this_week"
SENSOR_DHW_GAS_CONSUMPTION_THIS_MONTH = "hotwater_gas_consumption_heating_this_month"
SENSOR_DHW_GAS_CONSUMPTION_THIS_YEAR = "hotwater_gas_consumption_heating_this_year"
SENSOR_GAS_CONSUMPTION_TODAY = "gas_consumption_heating_today"
SENSOR_GAS_CONSUMPTION_THIS_WEEK = "gas_consumption_heating_this_week"
SENSOR_GAS_CONSUMPTION_THIS_MONTH = "gas_consumption_heating_this_month"
SENSOR_GAS_CONSUMPTION_THIS_YEAR = "gas_consumption_heating_this_year"
# heatpump sensors
SENSOR_COMPRESSOR_STARTS = "compressor_starts"
SENSOR_COMPRESSOR_HOURS = "compressor_hours"
SENSOR_COMPRESSOR_HOURS_LOADCLASS1 = "compressor_hours_loadclass1"
SENSOR_COMPRESSOR_HOURS_LOADCLASS2 = "compressor_hours_loadclass2"
SENSOR_COMPRESSOR_HOURS_LOADCLASS3 = "compressor_hours_loadclass3"
SENSOR_COMPRESSOR_HOURS_LOADCLASS4 = "compressor_hours_loadclass4"
SENSOR_COMPRESSOR_HOURS_LOADCLASS5 = "compressor_hours_loadclass5"
# fuelcell sensors
SENSOR_POWER_PRODUCTION_CURRENT = "power_production_current"
SENSOR_POWER_PRODUCTION_TODAY = "power_production_today"
SENSOR_POWER_PRODUCTION_THIS_WEEK = "power_production_this_week"
SENSOR_POWER_PRODUCTION_THIS_MONTH = "power_production_this_month"
SENSOR_POWER_PRODUCTION_THIS_YEAR = "power_production_this_year"
SENSOR_TYPES = {
SENSOR_OUTSIDE_TEMPERATURE: {
CONF_NAME: "Outside Temperature",
CONF_ICON: None,
CONF_UNIT_OF_MEASUREMENT: TEMP_CELSIUS,
CONF_GETTER: lambda api: api.getOutsideTemperature(),
CONF_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE,
},
SENSOR_SUPPLY_TEMPERATURE: {
CONF_NAME: "Supply Temperature",
CONF_ICON: None,
CONF_UNIT_OF_MEASUREMENT: TEMP_CELSIUS,
CONF_GETTER: lambda api: api.getSupplyTemperature(),
CONF_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE,
},
# gas sensors
SENSOR_BOILER_TEMPERATURE: {
CONF_NAME: "Boiler Temperature",
CONF_ICON: None,
CONF_UNIT_OF_MEASUREMENT: TEMP_CELSIUS,
CONF_GETTER: lambda api: api.getBoilerTemperature(),
CONF_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE,
},
SENSOR_BURNER_MODULATION: {
CONF_NAME: "Burner modulation",
CONF_ICON: "mdi:percent",
CONF_UNIT_OF_MEASUREMENT: PERCENTAGE,
CONF_GETTER: lambda api: api.getBurnerModulation(),
CONF_DEVICE_CLASS: None,
},
SENSOR_DHW_GAS_CONSUMPTION_TODAY: {
CONF_NAME: "Hot water gas consumption today",
CONF_ICON: "mdi:power",
CONF_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
CONF_GETTER: lambda api: api.getGasConsumptionDomesticHotWaterToday(),
CONF_DEVICE_CLASS: None,
},
SENSOR_DHW_GAS_CONSUMPTION_THIS_WEEK: {
CONF_NAME: "Hot water gas consumption this week",
CONF_ICON: "mdi:power",
CONF_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
CONF_GETTER: lambda api: api.getGasConsumptionDomesticHotWaterThisWeek(),
CONF_DEVICE_CLASS: None,
},
SENSOR_DHW_GAS_CONSUMPTION_THIS_MONTH: {
CONF_NAME: "Hot water gas consumption this month",
CONF_ICON: "mdi:power",
CONF_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
CONF_GETTER: lambda api: api.getGasConsumptionDomesticHotWaterThisMonth(),
CONF_DEVICE_CLASS: None,
},
SENSOR_DHW_GAS_CONSUMPTION_THIS_YEAR: {
CONF_NAME: "Hot water gas consumption this year",
CONF_ICON: "mdi:power",
CONF_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
CONF_GETTER: lambda api: api.getGasConsumptionDomesticHotWaterThisYear(),
CONF_DEVICE_CLASS: None,
},
SENSOR_GAS_CONSUMPTION_TODAY: {
CONF_NAME: "Heating gas consumption today",
CONF_ICON: "mdi:power",
CONF_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
CONF_GETTER: lambda api: api.getGasConsumptionHeatingToday(),
CONF_DEVICE_CLASS: None,
},
SENSOR_GAS_CONSUMPTION_THIS_WEEK: {
CONF_NAME: "Heating gas consumption this week",
CONF_ICON: "mdi:power",
CONF_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
CONF_GETTER: lambda api: api.getGasConsumptionHeatingThisWeek(),
CONF_DEVICE_CLASS: None,
},
SENSOR_GAS_CONSUMPTION_THIS_MONTH: {
CONF_NAME: "Heating gas consumption this month",
CONF_ICON: "mdi:power",
CONF_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
CONF_GETTER: lambda api: api.getGasConsumptionHeatingThisMonth(),
CONF_DEVICE_CLASS: None,
},
SENSOR_GAS_CONSUMPTION_THIS_YEAR: {
CONF_NAME: "Heating gas consumption this year",
CONF_ICON: "mdi:power",
CONF_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
CONF_GETTER: lambda api: api.getGasConsumptionHeatingThisYear(),
CONF_DEVICE_CLASS: None,
},
SENSOR_BURNER_STARTS: {
CONF_NAME: "Burner Starts",
CONF_ICON: "mdi:counter",
CONF_UNIT_OF_MEASUREMENT: None,
CONF_GETTER: lambda api: api.getBurnerStarts(),
CONF_DEVICE_CLASS: None,
},
SENSOR_BURNER_HOURS: {
CONF_NAME: "Burner Hours",
CONF_ICON: "mdi:counter",
CONF_UNIT_OF_MEASUREMENT: TIME_HOURS,
CONF_GETTER: lambda api: api.getBurnerHours(),
CONF_DEVICE_CLASS: None,
},
# heatpump sensors
SENSOR_COMPRESSOR_STARTS: {
CONF_NAME: "Compressor Starts",
CONF_ICON: "mdi:counter",
CONF_UNIT_OF_MEASUREMENT: None,
CONF_GETTER: lambda api: api.getCompressorStarts(),
CONF_DEVICE_CLASS: None,
},
SENSOR_COMPRESSOR_HOURS: {
CONF_NAME: "Compressor Hours",
CONF_ICON: "mdi:counter",
CONF_UNIT_OF_MEASUREMENT: TIME_HOURS,
CONF_GETTER: lambda api: api.getCompressorHours(),
CONF_DEVICE_CLASS: None,
},
SENSOR_COMPRESSOR_HOURS_LOADCLASS1: {
CONF_NAME: "Compressor Hours Load Class 1",
CONF_ICON: "mdi:counter",
CONF_UNIT_OF_MEASUREMENT: TIME_HOURS,
CONF_GETTER: lambda api: api.getCompressorHoursLoadClass1(),
CONF_DEVICE_CLASS: None,
},
SENSOR_COMPRESSOR_HOURS_LOADCLASS2: {
CONF_NAME: "Compressor Hours Load Class 2",
CONF_ICON: "mdi:counter",
CONF_UNIT_OF_MEASUREMENT: TIME_HOURS,
CONF_GETTER: lambda api: api.getCompressorHoursLoadClass2(),
CONF_DEVICE_CLASS: None,
},
SENSOR_COMPRESSOR_HOURS_LOADCLASS3: {
CONF_NAME: "Compressor Hours Load Class 3",
CONF_ICON: "mdi:counter",
CONF_UNIT_OF_MEASUREMENT: TIME_HOURS,
CONF_GETTER: lambda api: api.getCompressorHoursLoadClass3(),
CONF_DEVICE_CLASS: None,
},
SENSOR_COMPRESSOR_HOURS_LOADCLASS4: {
CONF_NAME: "Compressor Hours Load Class 4",
CONF_ICON: "mdi:counter",
CONF_UNIT_OF_MEASUREMENT: TIME_HOURS,
CONF_GETTER: lambda api: api.getCompressorHoursLoadClass4(),
CONF_DEVICE_CLASS: None,
},
SENSOR_COMPRESSOR_HOURS_LOADCLASS5: {
CONF_NAME: "Compressor Hours Load Class 5",
CONF_ICON: "mdi:counter",
CONF_UNIT_OF_MEASUREMENT: TIME_HOURS,
CONF_GETTER: lambda api: api.getCompressorHoursLoadClass5(),
CONF_DEVICE_CLASS: None,
},
SENSOR_RETURN_TEMPERATURE: {
CONF_NAME: "Return Temperature",
CONF_ICON: None,
CONF_UNIT_OF_MEASUREMENT: TEMP_CELSIUS,
CONF_GETTER: lambda api: api.getReturnTemperature(),
CONF_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE,
},
# fuelcell sensors
SENSOR_POWER_PRODUCTION_CURRENT: {
CONF_NAME: "Power production current",
CONF_ICON: None,
CONF_UNIT_OF_MEASUREMENT: ENERGY_WATT_HOUR,
CONF_GETTER: lambda api: api.getPowerProductionCurrent(),
CONF_DEVICE_CLASS: DEVICE_CLASS_ENERGY,
},
SENSOR_POWER_PRODUCTION_TODAY: {
CONF_NAME: "Power production today",
CONF_ICON: None,
CONF_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
CONF_GETTER: lambda api: api.getPowerProductionToday(),
CONF_DEVICE_CLASS: DEVICE_CLASS_ENERGY,
},
SENSOR_POWER_PRODUCTION_THIS_WEEK: {
CONF_NAME: "Power production this week",
CONF_ICON: None,
CONF_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
CONF_GETTER: lambda api: api.getPowerProductionThisWeek(),
CONF_DEVICE_CLASS: DEVICE_CLASS_ENERGY,
},
SENSOR_POWER_PRODUCTION_THIS_MONTH: {
CONF_NAME: "Power production this month",
CONF_ICON: None,
CONF_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
CONF_GETTER: lambda api: api.getPowerProductionThisMonth(),
CONF_DEVICE_CLASS: DEVICE_CLASS_ENERGY,
},
SENSOR_POWER_PRODUCTION_THIS_YEAR: {
CONF_NAME: "Power production this year",
CONF_ICON: None,
CONF_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
CONF_GETTER: lambda api: api.getPowerProductionThisYear(),
CONF_DEVICE_CLASS: DEVICE_CLASS_ENERGY,
},
}
SENSORS_GENERIC = [SENSOR_OUTSIDE_TEMPERATURE, SENSOR_SUPPLY_TEMPERATURE]
SENSORS_BY_HEATINGTYPE = {
HeatingType.gas: [
SENSOR_BOILER_TEMPERATURE,
SENSOR_BURNER_HOURS,
SENSOR_BURNER_MODULATION,
SENSOR_BURNER_STARTS,
SENSOR_DHW_GAS_CONSUMPTION_TODAY,
SENSOR_DHW_GAS_CONSUMPTION_THIS_WEEK,
SENSOR_DHW_GAS_CONSUMPTION_THIS_MONTH,
SENSOR_DHW_GAS_CONSUMPTION_THIS_YEAR,
SENSOR_GAS_CONSUMPTION_TODAY,
SENSOR_GAS_CONSUMPTION_THIS_WEEK,
SENSOR_GAS_CONSUMPTION_THIS_MONTH,
SENSOR_GAS_CONSUMPTION_THIS_YEAR,
],
HeatingType.heatpump: [
SENSOR_COMPRESSOR_STARTS,
SENSOR_COMPRESSOR_HOURS,
SENSOR_COMPRESSOR_HOURS_LOADCLASS1,
SENSOR_COMPRESSOR_HOURS_LOADCLASS2,
SENSOR_COMPRESSOR_HOURS_LOADCLASS3,
SENSOR_COMPRESSOR_HOURS_LOADCLASS4,
SENSOR_COMPRESSOR_HOURS_LOADCLASS5,
SENSOR_RETURN_TEMPERATURE,
],
HeatingType.fuelcell: [
# gas
SENSOR_BOILER_TEMPERATURE,
SENSOR_BURNER_HOURS,
SENSOR_BURNER_MODULATION,
SENSOR_BURNER_STARTS,
SENSOR_DHW_GAS_CONSUMPTION_TODAY,
SENSOR_DHW_GAS_CONSUMPTION_THIS_WEEK,
SENSOR_DHW_GAS_CONSUMPTION_THIS_MONTH,
SENSOR_DHW_GAS_CONSUMPTION_THIS_YEAR,
SENSOR_GAS_CONSUMPTION_TODAY,
SENSOR_GAS_CONSUMPTION_THIS_WEEK,
SENSOR_GAS_CONSUMPTION_THIS_MONTH,
SENSOR_GAS_CONSUMPTION_THIS_YEAR,
# fuel cell
SENSOR_POWER_PRODUCTION_CURRENT,
SENSOR_POWER_PRODUCTION_TODAY,
SENSOR_POWER_PRODUCTION_THIS_WEEK,
SENSOR_POWER_PRODUCTION_THIS_MONTH,
SENSOR_POWER_PRODUCTION_THIS_YEAR,
],
}
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Create the ViCare sensor devices."""
if discovery_info is None:
return
vicare_api = hass.data[VICARE_DOMAIN][VICARE_API]
heating_type = hass.data[VICARE_DOMAIN][VICARE_HEATING_TYPE]
sensors = SENSORS_GENERIC.copy()
if heating_type != HeatingType.generic:
sensors.extend(SENSORS_BY_HEATINGTYPE[heating_type])
add_entities(
[
ViCareSensor(hass.data[VICARE_DOMAIN][VICARE_NAME], vicare_api, sensor)
for sensor in sensors
]
)
class ViCareSensor(SensorEntity):
"""Representation of a ViCare sensor."""
def __init__(self, name, api, sensor_type):
"""Initialize the sensor."""
self._sensor = SENSOR_TYPES[sensor_type]
self._name = f"{name} {self._sensor[CONF_NAME]}"
self._api = api
self._sensor_type = sensor_type
self._state = None
@property
def available(self):
"""Return True if entity is available."""
return self._state is not None and self._state != PYVICARE_ERROR
@property
def unique_id(self):
"""Return a unique ID."""
return f"{self._api.service.id}-{self._sensor_type}"
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self._sensor[CONF_ICON]
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._sensor[CONF_UNIT_OF_MEASUREMENT]
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return self._sensor[CONF_DEVICE_CLASS]
def update(self):
"""Update state of sensor."""
try:
self._state = self._sensor[CONF_GETTER](self._api)
except requests.exceptions.ConnectionError:
_LOGGER.error("Unable to retrieve data from ViCare server")
except ValueError:
_LOGGER.error("Unable to decode data from ViCare server")
|
Anatomy and Radiological anatomy (X-Ray) apply the knowledge from dissection of dead bodies to the living individuals, which has become important for clinical studies.
Embryology deals with the study of intra uterine development of human embryos and fetuses. It attempts to explore factors responsible for growth and differentiation of tissues and organs and causes occasional malformation.
Histology deals with the study of microscopic structures of different organs.
Clinical anatomy emphasizes on the application of anatomical knowledge in clinical practice for helping clinicians to reach correct diagnosis and apply remedial measures. |
from datetime import datetime, timedelta
import threading
import time
from heapq import heappush,heappop
##
# The different states that the task can be in. States should
# not be set manually. They should only be set by the scheduler.
class TaskState:
##
# Either unscheduled or scheduled and waiting to be run.
PENDING = 0
##
# When the task is running.
RUNNING = 1
##
# The task has finished executing.
FINISHED = 2
##
# A task is a unit of work that is to be performed on behalf
# of another part of the server or for a plugin. A task is
# scheduled to run at some point in the future and can be
# set to be a recurring event that happens at an interval.
#
# There are two methods to create a task. The first is to
# implement the ITask interface. The benefit to doing that is
# that the task can have more specialized funtionality and
# can be given more data to use for processing.
#
# The second method is to instantiate the interface and just pass
# some settings as well as a function to call when the task
# is run.
class ITask(object):
##
# Creates a task with the given time before its called
# and performs the requested action when called. The task
# can also b e set to repeat itself at the same delay
# interval.
#
# @param task the task to perform when called
# @param minutes the number of minutes to wait (default 0)
# @param hours the number of hours to wait (default 0)
# @param days the number of days to wait (default 0)
# @param recurring if the task is to repeat itself
#
# @return the task object that was created
@staticmethod
def createTask(task, minutes = 1, hours = 0, days = 0, recurring = False):
if task == None or not hasattr(task, '__call__'):
raise TypeError('A function must be given to create a task.')
if not issubclass(minutes.__class__, int) or not issubclass(hours.__class__, int) or not issubclass(days.__class__, int):
raise TypeError('The time given must be in an integer form.')
ret = ITask(minutes, hours, days, recurring)
ret.run = task
return ret
##
# Creates a task with the given time before its called.
# The task can also be set to repeat itself at the same
# delay interval.
#
# @param minutes the number of minutes to wait (default 0)
# @param hours the number of hours to wait (default 0)
# @param days the number of days to wait (default 0)
# @param recurring if the task is to repeat itself
def __init__(self, minutes = 1, hours = 0, days = 0, recurring = False):
self.minutes = minutes
self.hours = hours
self.days = days
self.recurring = recurring
self.state = TaskState.PENDING
self.timestamp = self.calculateTimestamp()
##
# Called when the task is to run. In case the task cares
# about when it is actually being called it is provided
# the time that it was executed at. This is given as a
# datetime object.
#
# @param time the time the task was actually selected
# to run at
def run(self, time):
raise NotImplementedError('Task method was not implemented.')
##
# Calculates the timestamp of when the task should next run.
#
# @return a datetime object for the next run time
def calculateTimestamp(self):
return datetime.now() + timedelta(minutes = self.minutes,
hours = self.hours,
days = self.days)
##
# Less than comparison between tasks. Based on the timestamp to next run.
def __lt__(self, other):
if not isinstance(other, ITask):
raise TypeError('Can only compare ITask objects with other ITask objects')
return self.timestamp < other.timestamp
##
# Less than or equal to comparison between tasks. Based on the timestamp to next run.
def __le__(self, other):
if not isinstance(other, ITask):
raise TypeError('Can only compare ITask objects with other ITask objects')
return self.timestamp <= other.timestamp
##
# Equal to comparison between tasks. Based on the timestamp to next run.
def __eq__(self, other):
if not isinstance(other, ITask):
raise TypeError('Can only compare ITask objects with other ITask objects')
return self.timestamp == other.timestamp
##
# Not equal to comparison between tasks. Based on the timestamp to next run.
def __ne__(self, other):
if not isinstance(other, ITask):
raise TypeError('Can only compare ITask objects with other ITask objects')
return self.timestamp != other.timestamp
##
# Greater than or equal to comparison between tasks. Based on the timestamp to next run.
def __ge__(self, other):
if not isinstance(other, ITask):
raise TypeError('Can only compare ITask objects with other ITask objects')
return self.timestamp >= other.timestamp
##
# Greater than comparison between tasks. Based on the timestamp to next run.
def __gt__(self, other):
if not isinstance(other, ITask):
raise TypeError('Can only compare ITask objects with other ITask objects')
return self.timestamp > other.timestamp
##
# Maximum number of threads to run tasks on
maxThreads = -1
##
# This class provides an method to run task objects given to it at a specific time.
class TaskScheduler():
##
# The main thread handle
mainThread = None
##
# Status bool
running = True
##
# Main init
# @param maxThreads Maximum number of threads
def __init__(self, maxNumThreads = 10):
global maxThreads
maxThreads = maxNumThreads
self.taskList = []
self.running = False
##
# Starts the scheduler's main thread.
# No tasks can be run before this method is called
def startScheduler(self):
if self.mainThread is not None:
if self.mainThread.isAlive():
raise RuntimeError("Tried to start an already started Scheduler")
self.mainThread = None
self.mainThread = MainSchedulerThread(self)
self.mainThread.start()
self.running = True
##
# Stops the scheduler's main thread
# No tasks can be run after this method is called
def stopScheduler(self):
if self.mainThread is None:
raise RuntimeError("Trying to stop a None Thread")
if not self.mainThread.isAlive():
raise RuntimeError("Trying to stop a Thread that wasn't started")
self.mainThread.stopThread()
self.running = False
##
# Adds a task to be executed
def addTask(self, task):
if not self.running:
raise RuntimeError("Tried to add a task to a stopped scheduler")
if datetime.now() > task.timestamp:
raise RuntimeError("Tried to schedule a task that should have already been run")
heappush(self.taskList, task)
##
# Returns if the scheduler is still running
def isAlive(self):
return self.running
##
# This is the main thread of the TaskScheduler
class MainSchedulerThread(threading.Thread):
##
# Main init
# @param A TaskScheduler to pull Tasks from
def __init__(self, taskScheduler):
threading.Thread.__init__(self)
self.tasks = taskScheduler.taskList
self.stop = False
self.daemon = True
global maxThreads
self.pool = []
for a in range(maxThreads):
t = TaskThread()
t.start()
self.pool.append(t)
##
# Main method, starts checking for new tasks to run
def run(self):
while not self.stop:
while True:
if len(self.tasks) is 0:
break
if datetime.now() < self.tasks[0].timestamp:
#If it should be run
#Run it after poping
task = heappop(self.tasks)
global maxThreads
for a in range(maxThreads):
result = self.pool[a].runTask(task)
if result:
break #Task was added
else:
pass #Thread already had a task, check next
#Check if it needs to reoccur
if task.recurring:
task.timestamp = task.calculateTimestamp()
heappush(self.tasks, task)
else:
break
#After breaking, all tasks that should be run are now running or queued, sleep for 1 min
time.sleep(60)
#When we are stopping, join worker threads, they are already marked as stopped
for a in range(maxThreads):
self.pool[a].join()
##
# Stops the exectuion of the scheduler after the next task check, this call will not block
# Call isAlive() to check for when it is stopped
def stopThread(self):
for a in range(maxThreads):
self.pool[a].stopThread()
self.stop = True
##
# This a task thread
class TaskThread(threading.Thread):
##
# Main init
def __init__(self):
threading.Thread.__init__(self)
self.busy = False
self.task = None
self.stop = False
self.daemon = True
##
# Runs the thread in a loop running any given task
def run(self):
while not self.stop:
if self.busy: # Has task
if self.task is None:
self.busy = False
else:
self.task.state = TaskState.RUNNING
self.task.run(datetime.now())
self.task.state = TaskState.FINISHED
self.task = None
else:
time.sleep(1)
##
# Runs the given task, returns False if we already have a task
# @param task The task to run
def runTask(self,task):
if self.busy:
if self.task is not None:
return False
self.task = task
self.busy = True
return True
##
# Stops the TaskThread, returns a task object if one exists
def stopThread(self):
self.stop = True
return self.task
|
Are aches and pains preventing you from experiencing optimal health?
Do you suffer from headaches, back pain, joint pain or arthritis? Perhaps you wake up with a stiff back or complain of numbing in the fingers and toes, tingling sensations or debilitating migraines?
The effects of stored body stress may have diverse effects on the body, ranging from muscle tension and discomfort to systemic imbalances such as infertility, heartburn, hyperactivity in children and a myriad of other complaints.
The underlying premise of Body Stress Release is to release stored tension to optimise the body’s communication system, thereby stimulating the body to heal itself and maintain optimal health.
This non-therapeutic, complementary health technique is recommended by doctors and specialists and has assisted hundreds of people in their path to restored vitality and optimal health.
Body Stress Release was founded in South Africa in 1981 (The Origins of Body Stress Release). The technique is now practiced in over 20 countries worldwide, including the UK, Canada, Japan, US, Australia, the UK and Europe.
Read More "What is Body Stress Release?"
Read More "Types of Stress"
Read More "Origins of BSR"
Read More "Brain Damage and Body Stress"
Read More "Prolapsed Spinal disc and recovery with BSR"
Read More "Swelling Hands and Carpal Tunnel Syndrome" |
from gamtools import segregation
import io
import pytest
fixture_two_samples = io.StringIO(
u"""chrom start stop Sample_A Sample_B
chr1 0 50000 0 0
chr1 50000 100000 0 0
chr1 100000 150000 0 0
chr1 150000 200000 0 0
chr1 200000 250000 0 0
chr1 250000 300000 0 0
chr1 300000 350000 0 0
chr1 350000 400000 0 0
chr1 400000 450000 0 0
""")
data_two_samples = segregation.open_segregation(fixture_two_samples)
#########################################
#
# segregation.index_from_interval tests
#
#########################################
def test_interval_within_bin():
interval = 'chr1', 50100, 50300
start_index, stop_index = segregation.index_from_interval(data_two_samples,
interval)
found_windows = data_two_samples.index[start_index:stop_index]
assert len(found_windows) == 1
print(found_windows)
found_chrom, found_start, found_stop = found_windows[0]
assert found_chrom == 'chr1'
assert found_start == 50000
assert found_stop == 100000
def test_interval_is_bin():
interval = 'chr1', 50000, 100000
start_index, stop_index = segregation.index_from_interval(data_two_samples,
interval)
found_windows = data_two_samples.index[start_index:stop_index]
assert len(found_windows) == 1
print(found_windows)
found_chrom, found_start, found_stop = found_windows[0]
assert found_chrom == 'chr1'
assert found_start == 50000
assert found_stop == 100000
def test_interval_overlaps_two_bins():
interval = 'chr1', 50500, 100500
start_index, stop_index = segregation.index_from_interval(data_two_samples,
interval)
found_windows = data_two_samples.index[start_index:stop_index]
print(found_windows)
assert len(found_windows) == 2
assert found_windows[0] == ('chr1', 50000, 100000)
assert found_windows[-1] == ('chr1', 100000, 150000)
def test_interval_overlaps_many_bins():
interval = 'chr1', 50500, 300500
start_index, stop_index = segregation.index_from_interval(data_two_samples,
interval)
found_windows = data_two_samples.index[start_index:stop_index]
print(found_windows)
assert len(found_windows) == 6
assert found_windows[0] == ('chr1', 50000, 100000)
assert found_windows[-1] == ('chr1', 300000, 350000)
def test_interval_end_before_start():
interval = 'chr1', 300500, 50500
with pytest.raises(ValueError):
segregation.index_from_interval(data_two_samples, interval)
def test_invalid_chromosome():
interval = 'chr3', 50000, 100000
with pytest.raises(segregation.InvalidChromError):
segregation.index_from_interval(data_two_samples, interval)
|
Bill is the founder and principal of Wiersma and Associates. He is known as a gifted workshop leader, talented author, and trusted business consultant. Bill has a breadth of business experience spanning over 25 years. Prior to founding Wiersma and Associates, he was an Operations Director in a Fortune 100 and was later appointed CEO of an investor-owned technology company. His consulting work, which spans 10 years, includes extensive corporate experience at Pacific Gas and Electric where he was a senior organizational consultant to the Directors. Bill and his wife Holly reside in Pleasant Hill, California and are the parents of four children. |
from t_core.composer import Composer
from tc_python.arule import ARule
from tc_python.srule import SRule
from tc_python.frule import FRule
from HGiveRuleLHS import HGiveRuleLHS
from HGiveRuleRHS import HGiveRuleRHS
from HMountRuleLHS import HMountRuleLHS
from HMountRuleRHS import HMountRuleRHS
from HNewRuleLHS import HNewRuleLHS
from HNewRuleRHS import HNewRuleRHS
from HReleaseRuleLHS import HReleaseRuleLHS
from HReleaseRuleRHS import HReleaseRuleRHS
from HRequestRuleLHS import HRequestRuleLHS
from HRequestRuleRHS import HRequestRuleRHS
from HTakeRuleLHS import HTakeRuleLHS
from HTakeRuleRHS import HTakeRuleRHS
from HGiveRulePivotLHS import HGiveRulePivotLHS
from HGiveRulePivotRHS import HGiveRulePivotRHS
from HReleaseRulePivotLHS import HReleaseRulePivotLHS
from HReleaseRulePivotRHS import HReleaseRulePivotRHS
from HTakeRulePivotLHS import HTakeRulePivotLHS
from HTakeRulePivotRHS import HTakeRulePivotRHS
class ShortTransformationSequence(Composer):
def __init__(self, N, debug_folder=''):
super(ShortTransformationSequence, self).__init__()
self.length = 0
self.debug_suffix = 'sts'
self.debug_folder = debug_folder
self.N = N
self.NewRule = SRule(HNewRuleLHS(), HNewRuleRHS(), max_iterations=N - 2, ignore_resolver=True)
self.MountRule = ARule(HMountRuleLHS(), HMountRuleRHS(), ignore_resolver=True)
self.RequestRule = FRule(HRequestRuleLHS(), HRequestRuleRHS(), max_iterations=N, ignore_resolver=True)
self.TakeRule = ARule(HTakeRuleLHS(), HTakeRuleRHS(), ignore_resolver=True)
self.ReleaseRule = ARule(HReleaseRuleLHS(), HReleaseRuleRHS(), ignore_resolver=True)
self.GiveRule = ARule(HGiveRuleLHS(), HGiveRuleRHS(), ignore_resolver=True)
def packet_in(self, packet):
if self.debug_folder: packet.graph.draw(label='name', debug=True).save(self.debug_folder + '/%s%d.png' % (self.debug_suffix, self.length))
# New Processes
packet = self.NewRule.packet_in(packet)
packet.clean()
if not self.NewRule.is_success:
if self.NewRule.exception is not None:
self.exception = self.NewRule.exception
return packet
self.length += self.NewRule.I.iterations
if self.debug_folder: packet.graph.draw(label='name', debug=True).save(self.debug_folder + '/%s%d.png' % (self.debug_suffix, self.length))
# Mount
packet = self.MountRule.packet_in(packet)
packet.clean()
if not self.MountRule.is_success:
if self.MountRule.exception is not None:
self.exception = self.MountRule.exception
return packet
self.length += self.MountRule.I.iterations
if self.debug_folder: packet.graph.draw(label='name', debug=True).save(self.debug_folder + '/%s%d.png' % (self.debug_suffix, self.length))
# Request
packet = self.RequestRule.packet_in(packet)
packet.clean()
if not self.RequestRule.is_success:
if self.RequestRule.exception is not None:
self.exception = self.RequestRule.exception
return packet
self.length += self.RequestRule.I.iterations
if self.debug_folder: packet.graph.draw(label='name', debug=True).save(self.debug_folder + '/%s%d.png' % (self.debug_suffix, self.length))
# Pass it around
for _ in range(self.N):
# Take
packet = self.TakeRule.packet_in(packet)
packet.clean()
if not self.TakeRule.is_success:
if self.TakeRule.exception is not None:
self.exception = self.TakeRule.exception
return packet
self.length += self.TakeRule.I.iterations
if self.debug_folder: packet.graph.draw(label='name', debug=True).save(self.debug_folder + '/%s%d.png' % (self.debug_suffix, self.length))
# Release
packet = self.ReleaseRule.packet_in(packet)
packet.clean()
if not self.ReleaseRule.is_success:
if self.ReleaseRule.exception is not None:
self.exception = self.ReleaseRule.exception
return packet
self.length += self.ReleaseRule.I.iterations
if self.debug_folder: packet.graph.draw(label='name', debug=True).save(self.debug_folder + '/%s%d.png' % (self.debug_suffix, self.length))
# Give
packet = self.GiveRule.packet_in(packet)
packet.clean()
if not self.GiveRule.is_success:
if self.GiveRule.exception is not None:
self.exception = self.GiveRule.exception
return packet
self.length += self.GiveRule.I.iterations
if self.debug_folder: packet.graph.draw(label='name', debug=True).save(self.debug_folder + '/%s%d.png' % (self.debug_suffix, self.length))
self.is_success = True
return packet
class ShortTransformationSequencePivot(ShortTransformationSequence):
def __init__(self, N, debug_folder=''):
super(ShortTransformationSequencePivot, self).__init__(N, debug_folder)
self.debug_suffix = 'sts_pivot'
self.TakeRule = ARule(HTakeRulePivotLHS(), HTakeRulePivotRHS(), ignore_resolver=True)
self.ReleaseRule = ARule(HReleaseRulePivotLHS(), HReleaseRulePivotRHS(), ignore_resolver=True)
self.GiveRule = ARule(HGiveRulePivotLHS(), HGiveRulePivotRHS(), ignore_resolver=True)
|
We only collect data that is important for the functioning or improvement of our products.
Whenever possible we refrain from collecting individual-related data.
We strive to protect all information we have about our users.
We do not sell individual-related data.
Please note: Before furnishing any information, we may insist on being sent some form of documentation to prove the enquirer’s identity and confirm its authenticity.
Thoughtfish collects information as described below. Our primary goals in collecting and using information is to create your account, provide Services to you, improve our Service, contact you, conduct research and create reports for internal use. We store information on servers located in Germany via Amazon Web Services and may store information on servers and equipment in other countries.
These data cannot be related to a specific person. We do not merge these data with other sources. After a statistical analysis, these data are deleted regularly.
If you register on our website, participate in competitions or subscribe to our newsletter, it is necessary that you provide us with personal data. This data may include, for example, your name, address, email address or other personal data that will be requested by Thoughtfish in individual cases.
Our games, services and websites may also contain other plugins. Each plugin is the responsibility of its provider (e.g. Twitter, Google). This means that we have no control over the amount of information that the respective provider collects with the help of its own plugin, or how it then uses that information. If the user is not yet a customer of the provider or has not (yet) consented there to having his/her data collected and used, we recommend not clicking on these plugins. If, on the other hand, the user is already a customer of the plugin provider, he/she should refer directly to the relevant provider for information on the kind and amount of personal data it collects and uses, and for what purpose.
When you use any of the Thoughtfish games or applications on a mobile platform, we may collect and record certain information such as your unique device ID (persistent / non-persistent), hardware type, media access control (“MAC”) address, international mobile equipment identity (“IMEI”), the version of your operating system (“OS”), your device name, your email address (if you have connected to Facebook or Google+), and your location (based on your Internet Protocol (“IP”) address). This information is useful to us for troubleshooting and helps us understand usage trends.
We may collect your location based information for the purpose of making the game work properly on your device (Fightlings is a location-aware game, altering your gaming experience according to time, weather and location) and providing you with the correct version of the application. If you no longer wish to allow us to track or use this information, you may turn it off at the device level. Please not:, the application may not work properly if we are unable to tell where you are or where you are coming from (country-specific).
In addition, we create a unique user ID to track your use of our Service. This unique user ID is stored in connection with your profile information to track the Thoughtfish games you are playing.
Thoughtfish uses plugins to enable the invitation of Friends via Facebook, from the social networking site facebook.com. These plugins are operated by Facebook Inc., 1601 S. California Ave, Palo Alto, CA 94304, USA (“Facebook”). The plugins can either be identified by one of the Facebook logos (a white “f” on a blue tile or a “thumbs up” sign), or by being specially marked “Facebook Social Plugin”.
If a user pushes the button “Invite via Facebook”, his or her internet browser connects directly with the Facebook servers. Facebook sends the content of the plugins directly to the user’s browser and back. Since we have absolutely no control over the amount of data that Facebook collects with the help of these plugins, we would like to share our latest knowledge with you: If the user is logged in on Facebook at the same time, Facebook can allocate the visit to the user’s personal Facebook account. If the user interacts with the plugins, that information is immediately transmitted by the browser directly to Facebook, where it is saved. If the user is not using Facebook, the service can still collect and store the IP address. More information on how much data is collected and why, and how it is then processed and used by Facebook, as well as details about the users’ rights and the settings available for protecting users’ privacy, can be obtained from Facebook’s data protection policy at www.facebook.com/policy.php. If a user has signed up to Facebook but does not want Facebook to collect data and link it up with the data stored on Facebook, he/she must log off Facebook. If you have no Facebook account and/or have not (yet) consented to have your data collected and used by Facebook, we recommend you not use or activate Facebook plugins.
We may collect your email address when you contact our support crew and we may use that email address to contact you about your gaming experience with Thoughtfish games and notify you about company news and promotions. If you no longer wish to receive certain email notifications you may opt-out at any time by following the unsubscribe link located at the bottom of each communication.
We may occasionally send push notifications through our mobile applications to send you game updates and other service-related notifications that may be of importance to you. You may opt-out of receiving these types of communications at any time by turning them off at the device level through your settings.
Whenever you play our games, we collect data about all of your interactions with the game and with the other players inside the game via server log files. This information may be associated with your player ID, IP address, or device ID for the purpose of providing you our Services and improving them. This data is stored within Amazon Web Services.
We use this information to notify winners and award prizes, to monitor traffic, or to personalize the Service. We may use a third-party service provider to conduct these sweepstakes or contests; that company is prohibited from using our users’ personal information for any other purpose.
We use mobile analytics software to allow us to better understand the functionality of our Mobile Software on your device. This software may record information such as how often you use the application, the events that occur within the application, aggregated usage, performance data, and from where the application was downloaded. We do not link the information we store within the analytics software to any personal information you submit within the mobile application.
Thoughtfish may engage other companies and individuals to perform services on our behalf. An example of these services includes analyzing data and providing customer support. These agents and service providers may have access to your personal information in connection with the performance of services for Thoughtfish.
Thoughtfish may share your information in connection with any merger, sale of our assets, or a financing or acquisition of all or a portion of our business to another company. You will be notified via email and/or notice on our site of any change in ownership or users of your personal information.
Thoughtfish takes reasonable measures to protect your information from unauthorized access or against loss, misuse or alteration by third parties.
Although we make good-faith efforts to store the information collected on the Service in a secure operating environment that is not available to the public, we cannot guarantee the absolute security of that information during its transmission or its storage on our systems. Further, while we attempt to ensure the integrity and security of our network and systems, we cannot guarantee that our security measures will prevent third-party “hackers” from illegally obtaining access to this information. We do not warrant or represent that your information will be protected against, loss, misuse, or alteration by third parties. No method of transmission over the internet, or method of electronic storage, is 100% secure, however. Therefore, we cannot guarantee its absolute security.
We will retain your information for as long as your account is active or as needed to provide you services. If you wish to cancel your account or request that we no longer use your information to provide you services, contact us at [email protected]. We will retain and use your information as necessary to comply with our legal obligations, resolve disputes, and enforce our agreements.
We do not knowingly collect or solicit personal information from anyone under the age of 13 or knowingly allow such persons to use our Service. If you are under 13, please do not send any information about yourself to us, including your name, address, telephone number, or email address. No one under the age of 13 may provide any personal information. In the event that we learn that we have collected personal information from a child under age 13, we will delete that information as soon as possible. If you believe that we might have any information from or about a child under the age of 13, please contact us at [email protected].
We may transfer information that we collect about you to affiliated entities, or to other third parties across borders and from your country or jurisdiction to other countries or jurisdictions around the world. Please note that these countries and jurisdictions may not have the same data protection laws as your own jurisdiction, and you consent to the transfer of information to Germany or other countries and the use and disclosure of information about you, including personal information, as described in this Policy. |
# coding=ISO-8859-1
'''
Creado el 10/06/2011
@autor: dolphinziyo
http://twitter.com/dolphinziyo
Web: http://tecnogame.org
'''
# Este programa sirve para buscar ciertos patrones en archivos de texto plano,
# Su forma de uso es muy sencilla, requiere de la introducción del patrón a buscar,
# el directorio en el que se realizará la búsqueda y la extensión de los
# ficheros en los que se buscará. El directorio y la extensión son parámetros
# opcionales, si no se introducen se buscará en el directorio actual, en cual quiera
# que sea la extensión de los ficheros, respectivamente.
#
# encontrarPatron patron [directorio] [extensión]
#
# Se requiere Python 2.7
# Módulos
import string,os,sys
from subprocess import call
# Constantes
# Clases
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
# Funciones
# ---------------------------------------------------------------------
def encontrar(patron,carpeta,extension): # Función que busca lo que se envía desde el main
cmd = 'find ' + carpeta+ ' -name "*' + extension + '" -print 2> /dev/null' # Comando a ejecutar
for file in os.popen(cmd).readlines(): #Por cada fichero encontrado dentro del comando cargamos las lineas
num = 1
name = file[:-1] # Almacenamos el nombre del fichero
if os.path.isdir(name) == False: # Comprobamos que lo analizado no sea un directorio
try: #Tratamiento de excepciones
for line in open(name).readlines(): # Realizamos la comparación línea por línea
pos = string.find(line,patron) # Recogemos la posición en la línea del patrón buscado
if pos>=0: # Si la posición es mayor que 0
print "Fichero: ",name,"Línea: ",num,"Posición: ",pos # Imprimimos el nombre del fichero, la línea dónde se ha encontrado el patrón y la posición concreta
print '....',line[:-1] # Imprimimos la línea concreta
print '....', ' '*pos + '*', '\n' # Indicamos con un * la posición exacta
print '----------------------------------------------------' # Usamos una línea de guiones para delimitar los datos mostrados
num+=1 # Aumentamos el número de línea
except:
print "El archivo " + name + " no se puede leer" # Mostramos un mensaje de error para aquellos archivos que no pueden ser abiertos
return True;
def options():
print "Uso:\t localizarPatron patron [directorio] [extensión]" # Mensaje explicativo sobre cómo utilizar el programa
return
def keyInterrupt():
print "\nSe ha interrumpido la ejecución del programa" # Mensaje que se muestra el interrumpir la ejecución del programa desde el teclado (por el usuario)
return
# ---------------------------------------------------------------------
def main():
if len(sys.argv)==1: # Si no se ha introducido el patrón a buscar
options() # Mostramos un mensaje sobre el usi del programa
elif len(sys.argv)==2: # Si sólo hay 2 parámetros pasados
try:
patron = sys.argv[1] # Obtenemos el patrón a buscar
carpeta = "." # Establecemos el directorio actual para la búsqueda
ext = "" # Y dejamos la extensión vacía (buscar en todos los ficheros)
encontrar(patron,carpeta,ext) # Llamamos a la función y le pasamos el patrón a buscar, el directorio y la extensión
except KeyboardInterrupt: # Controlamos la excepción de interrupción por teclado
keyInterrupt() # Llamamos a la función para mostrar un mensaje
elif len(sys.argv)==3: # En caso de haber tres parámetros introducidos
try:
patron = sys.argv[1] # Recogemos el patrón a buscar
carpeta = sys.argv[2] # Recogemos el PATH dónde se encuentran los ficheros en los que realizaremos la búsqueda
ext = "" # Dejamos la extensión vacía para buscar en todos los ficheros
encontrar(patron,carpeta,ext) # Llamamos a la función y le pasamos el patrón a buscar, el directorio y la extensión
except KeyboardInterrupt: # Controlamos la excepción de interrupción por teclado
keyInterrupt() # Llamamos a la función para mostrar un mensaje
elif len(sys.argv)==4: # En caso de haber cuatro parámetros introducidos
try:
patron = sys.argv[1] # Recogemos el patrón a buscar
carpeta = sys.argv[2] # Recogemos el PATH dónde se encuentran los ficheros en los que realizaremos la búsqueda
ext = sys.argv[3] # Recogemos la extensión
ext = "." + ext # Le concatenamos el punto delante (para que el usuario no tenga que introducirlo)
encontrar(patron,carpeta,ext) # Llamamos a la función y le pasamos el patrón a buscar, el directorio y la extensión
except KeyboardInterrupt: # Controlamos la excepción de interrupción por teclado
keyInterrupt() # Llamamos a la función para mostrar un mensaje
else: # Si se han introducido más parámetros de los necesarios
options() # Mostramos el mensaje de "uso" del programa
if __name__ == "__main__":
main()
|
Posted by Joan Brasher on Thursday, January 17, 2019 in Peabody.
A Vanderbilt special education research team served as guest editors of the January issue of Exceptional Children, the distinguished research journal of the Council for Exceptional Children.
Douglas Fuchs, Nicholas Hobbs Professor of Special Education; and Lynn S. Fuchs, the Dunn Family Professor of Psychoeducational Assessment, co-edited the issue, choosing the theme of moderator analysis.
The Fuchs asked five accomplished intervention researchers to join them in writing for the issue, and seven research papers are featured. Each team selected a previously published intervention study on which to conduct moderator analysis, examining whether at-risk students’ pretreatment performance in reading or mathematics interacts with the effects of intervention.
An eighth and final article is authored by two respected quantitative psychologists at Vanderbilt, Kristopher Preacher and Sonya Sterba. Together they reflect on study findings and comment on methodological implications for guiding future work.
Read the January issue of Exceptional Children. |
#!/usr/bin/env python
import sys
for line in sys.stdin:
line = line.split()
i = 0
n = len(line)
print '(',
while i < n:
if line[i] == ';':
i += 1
elif line[i] in ('DT','CD'):
start = i
i += 1
while i < n and line[i] == 'JJ':
i += 1
while i < n and line[i] in ('NN','NNS'):
i += 1
if i - start > 1:
j = start
print '(',
for j in range(start,i):
print line[j],
print ')',
# elif line[i] in ('JJ', 'NN','NNS'):
# start = i
# i += 1
#
# if line[i-1] == 'JJ':
# while i < n and line[i] == 'JJ':
# i += 1
#
# while i < n and line[i] in ('NN','NNS'):
# i += 1
#
# if i - start > 1:
# j = start
# print '(',
# for j in range(start,i):
# print line[j],
# print ')',
elif line[i] == 'NNP':
start = i
i += 1
while i < n and line[i] == 'NNP':
i += 1
if i - start > 1:
print '(',
for j in range(start,i):
print line[j],
print ')',
elif i < n:
print line[i],
i += 1
print ')'
|
1. Log into the router firmware and click Tools.
2. Click System Commands in the Tools section. |
"""Clean the Brazil data from BuzzFeed
https://github.com/BuzzFeedNews/zika-data
"""
import os.path
from glob import glob
import re
import pandas as pd
import unicodedata
import numpy as np
def strip_accents(s):
return(''.join(c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'))
def load_data(filepath):
df = pd.read_csv(filepath)
return(df)
def get_report_date(filepath,
dir_delim='/',
file_name_position=-1,
date_pattern=r'\d{4}-\d{2}-\d{2}',
date_result_position=0):
file_name = filepath.split(dir_delim)[file_name_position]
return(re.findall(date_pattern, file_name)[date_result_position])
def get_cdc_places_match(df, cdc_places_df, df_col_name, cdc_places_col_name):
match = cdc_places_df[cdc_places_df[
cdc_places_col_name].isin(df[df_col_name])]
return(match)
def get_location(df, cdc_location_df):
location = get_cdc_places_match(df, cdc_location_df,
'state_no_accents', 'state_province')
return(location['location'].reset_index(drop=True))
def get_location_type(df, cdc_location_df):
location_type = get_cdc_places_match(df, cdc_location_df,
'state_no_accents', 'state_province')
return(location_type['location_type'].reset_index(drop=True))
def clean_data(df):
df['state_no_accents'] = df['state'].apply(strip_accents)
df = df.replace(r'\s*[Ss]em registros\s*', np.nan, regex=True)
return df
def get_cdc_data_field_code(cdc_data_guide_df, cdc_str):
return(cdc_data_guide_df[cdc_data_guide_df['data_field'] ==
cdc_str]['data_field_code'].values[0])
def main():
here = os.path.abspath(os.path.dirname(__file__))
recodes = pd.read_csv(os.path.join(
here, '../../data/buzzfeed_recodes.csv'))
cdc_brazil_places_df = pd.read_csv(os.path.join(
here, '../../../zika/Brazil/BR_Places.csv'))
cdc_brazil_data_guide_df = pd.read_csv(os.path.join(
here, '../../../zika/Brazil/BR_Data_Guide.csv'))
cdc_brazil_data_guide_right = cdc_brazil_data_guide_df[
['data_field_code', 'data_field', 'unit', 'time_period_type']]
buzzfeed_brazil_datasets = glob(
'../../../zika-data/data/parsed/brazil/*.csv')
num_data_sets = len(buzzfeed_brazil_datasets)
for i, brazil_dataset in enumerate(buzzfeed_brazil_datasets):
print("Cleaning dataset {} of {}".format(i + 1, num_data_sets))
df = load_data(brazil_dataset)
df = clean_data(df)
report_date = get_report_date(brazil_dataset)
location = get_location(df, cdc_brazil_places_df)
location_type = get_location_type(df, cdc_brazil_places_df)
df['report_date'] = report_date
df['location'] = location
df['location_type'] = location_type
df['time_period'] = 'NA'
melt_value_vars = [c for c in df.columns if re.search(
'^cases|^microcephaly', c)]
df = pd.melt(df, id_vars=[ # 'no', 'state',
'report_date', 'location', 'location_type',
'time_period'],
value_vars=melt_value_vars,
var_name='data_field_original',
value_name='value')
df = pd.merge(df, recodes,
left_on='data_field_original', right_on='buzzfeed')
df = pd.merge(df, cdc_brazil_data_guide_right,
left_on='cdc', right_on='data_field')
# `cases_reported_total` is not a field in the CDC guidelines
# this value was a row sum of the other counts and could always
# be recalculated
df = df.loc[df['data_field'] != 'cases_reported_total']
# clean up before export
df = df.drop(['buzzfeed', 'cdc'], axis=1)
df = df.fillna('NA')
df_file_path = os.path.join(
here, '..', '..', 'output', brazil_dataset.split('/')[-1])
df.to_csv(df_file_path, index=False)
if __name__ == "__main__":
main()
|
The VIA VX900H media system processor provides DirectX 9.0 integrated graphics, crystal clear HD audio and Gigabit networking.
Rear panel I/O includes a GigaLAN port, an HDMI port, a VGA port, four USB 2.0 ports, a serial port connector, a DC-in jack (manufacturing option) and both line-out and mic-in jacks. On board connectors provide a wealth of options including a PCIe Gen2 x1 slot, a Mini-PCIe socket, and a PCI slot, two SATA connections, two voltage selectable RS-232 pin headers (selectable to 5V/12V), two USB pin-headers for three ports and an additional three COM ports. |
# from:
# http://piratefache.ch/twitter-streaming-api-with-tweepy/
# This is just a place folder all referennces to this code in other scripts
# Will be changed to twit_auths but I have kept this in the repo for explication
class authentication1:
def __init__(self):
self.consumer_key = "######################################"
self.consumer_secret = "#######################################"
# Go to http://apps.twitter.com and create an app.
# The consumer key and secret will be generated for you after
# After the step above, you will be redirected to your app's page.
# Create an access token under the the "Your access token" section
self.access_token = "###########################################"
self.access_token_secret = "#######################################"
def getconsumer_key(self):
return self.consumer_key
def getconsumer_secret(self):
return self.consumer_secret
def getaccess_token(self):
return self.access_token
def getaccess_token_secret(self):
return self.access_token_secret
class authentication2:
def __init__(self):
self.consumer_key = "#############################"
self.consumer_secret = "####################################"
self.access_token = "##########################################"
self.access_token_secret = "################################"
def getconsumer_key(self):
return self.consumer_key
def getconsumer_secret(self):
return self.consumer_secret
def getaccess_token(self):
return self.access_token
def getaccess_token_secret(self):
return self.access_token_secret
class authentication3:
def __init__(self):
self.consumer_key = "###################################"
self.consumer_secret = "##########################################"
self.access_token = "######################################"
self.access_token_secret = "###############################"
def getconsumer_key(self):
return self.consumer_key
def getconsumer_secret(self):
return self.consumer_secret
def getaccess_token(self):
return self.access_token
def getaccess_token_secret(self):
return self.access_token_secret
class authentication4:
def __init__(self):
self.consumer_key = "######################"
self.consumer_secret = "##################################"
self.access_token = "####################################"
self.access_token_secret = "#######################################"
def getconsumer_key(self):
return self.consumer_key
def getconsumer_secret(self):
return self.consumer_secret
def getaccess_token(self):
return self.access_token
def getaccess_token_secret(self):
return self.access_token_secret
class authentication5:
def __init__(self):
self.consumer_key = "########################################"
self.consumer_secret = "#########################################"
self.access_token = "################################################"
self.access_token_secret = "#############################################"
def getconsumer_key(self):
return self.consumer_key
def getconsumer_secret(self):
return self.consumer_secret
def getaccess_token(self):
return self.access_token
def getaccess_token_secret(self):
return self.access_token_secret
|
Croatia is best known as a dream getaway for a perfect holiday. It has everything to satisfy the fantasy of any vacationer. Most famous historical places, untouched nature, hidden beaches where the water is so clear that the boats in sea appear like they are floating in the air, not to mention the gastronomy, and this is just the beginning. As expeienced guides we would love to invite you to join us on our Blue Cave and six island tour. Blue Cave is a natural phenomenom which is situated 1 hour and 30 minutes from Split's main harbor, on Biševo island. The next destination will be surprise destination - but it wouldn't be a surprise if we revel it to you now. The Monk seal Cave consists of a spacious outer part, and the inner narrow and darker part with the underwater opening to the partially shaded place where it housed a type of seals that got the name, the Mediterranean Sea Bear. The local people called him Sea Bear or Sea Cow, while his Latin name was Monachus monachus, in translation - a monk. In this 70-meter-long and 4-meter-wide cave they lived, they were born and perhaps died, also nicknamed the sirens of the Bisevo island. The last ones were seen somewhere in the 60s of last century. The cave is very mysterious and attractive, and the attractiveness increases if you are lucky maybe to see one of the sea bears.Stiniva is one of the most famous coves on Vis island, and certainly has the most unusual form. It is located on the south side of the island, bordered by high cliffs that leave only four meters close to the entrance from the sea. After passing through the "door" , it opens a beautiful rounded bay with a pebble beach. Stiniva was declared the most beautiful beach in Europe in 2015. Our next destination is a small paradise on earth situated between 3 islets Blue Lagoon on Budikovac island. Blue Lagoon is one of the prettiest locations in the middle part of the Adriatic for snorkeling and swimming. Here you can let your self be lazy and enjoy in tranquility in one of the most beautiful seas in the world. The next location that will leave you breathless is Pakleni islands or translated to english The Devil's island. Don't worry there is no burning flames but they are most beautiful island complex in Adriatic sea. Over 20 hidden beaches and bays this place will wake explorer in you and make you wanna stay. From Pakleni islands we are heading to the most famous island in Croatia, Hvar island ( Hvar town - which is probably one of the most beautiful harbors in the world ) . In Hvar you will have some free time to discover the Old Town, climb the Fortress or just get lost in the small narrow streets like locals do. We let you get acquainted with the town by yourselves. One of the most popular boat tours is certanly this one, it is one of those tours that make you ask for more, and it is a perfect way to discover the undiscovered. |
#!/usr/bin/env python
# This implements the FastMap algorithm
# for mapping points where only the distance between them is known
# to N-dimension coordinates.
# The FastMap algorithm was published in:
#
# FastMap: a fast algorithm for indexing, data-mining and
# visualization of traditional and multimedia datasets
# by Christos Faloutsos and King-Ip Lin
# http://portal.acm.org/citation.cfm?id=223812
# This code made available under the BSD license,
# details at the bottom of the file
# Copyright (c) 2009, Gunnar Aastrand Grimnes
import math
import random
# need scipy as usual
import scipy
# we will repeat the pick-pivot points heuristic this many times
# a higher value means "better" results, but 1 also works well
DISTANCE_ITERATIONS=1
class FastMap:
def __init__(self, dist,verbose=False):
if dist.max()>1:
dist/=dist.max()
self.dist=dist
self.verbose=verbose
def _furthest(self, o):
mx=-1000000
idx=-1
for i in range(len(self.dist)):
d=self._dist(i,o, self.col)
if d>mx:
mx=d
idx=i
return idx
def _pickPivot(self):
"""Find the two most distant points"""
o1=random.randint(0, len(self.dist)-1)
o2=-1
i=DISTANCE_ITERATIONS
while i>0:
o=self._furthest(o1)
if o==o2: break
o2=o
o=self._furthest(o2)
if o==o1: break
o1=o
i-=1
self.pivots[self.col]=(o1,o2)
return (o1,o2)
def _map(self, K):
if K==0: return
px,py=self._pickPivot()
if self.verbose: print "Picked %d,%d at K=%d"%(px,py,K)
if self._dist(px,py,self.col)==0:
return
for i in range(len(self.dist)):
self.res[i][self.col]=self._x(i, px,py)
self.col+=1
self._map(K-1)
def _x(self,i,x,y):
"""Project the i'th point onto the line defined by x and y"""
dix=self._dist(i,x,self.col)
diy=self._dist(i,y,self.col)
dxy=self._dist(x,y,self.col)
return (dix + dxy - diy) / 2*math.sqrt(dxy)
def _dist(self, x,y,k):
"""Recursively compute the distance based on previous projections"""
if k==0: return self.dist[x,y]**2
rec=self._dist(x,y, k-1)
resd=(self.res[x][k] - self.res[y][k])**2
return rec-resd
def map(self, K):
self.col=0
self.res=scipy.zeros((len(self.dist),K))
self.pivots=scipy.zeros((K,2),"i")
self._map(K)
return self.res
def fastmap(dist, K):
"""dist is a NxN distance matrix
returns coordinates for each N in K dimensions
"""
return FastMap(dist,True).map(K)
# Below here are methods for testing
def vlen(x,y):
return math.sqrt(sum((x-y)**2))
def distmatrix(p, c=vlen):
dist=scipy.zeros((len(p),len(p)))
for x in range(len(p)):
for y in range(x,len(p)):
if x==y: continue
dist[x,y]=c(p[x], p[y])
dist[y,x]=dist[x,y]
return dist
def distortion(d1,d2):
return scipy.sum(((d1/d1.max())-(d2/d2.max()))**2)/d1.size
def distortiontest():
points=[]
n=10
mean=10
dim=5
print "Generating %d %d-D points randomly distributed between [0-%d]"%(n,dim,mean)
for i in range(n):
points.append(scipy.array([random.uniform(0,mean) for x in range(dim)]))
print "Computing distance matrix"
dist=distmatrix(points)
print "Mapping"
p1=fastmap(dist,1)
print "K=1"
print "Distortion: ", distortion(distmatrix(p1),dist)
p2=fastmap(dist,2)
print "K=2"
print "Distortion: ", distortion(distmatrix(p1),dist)
p3=fastmap(dist,3)
print "K=3"
print "Distortion: ", distortion(distmatrix(p3),dist)
import pylab
pylab.scatter([x[0]/mean for x in points], [x[1]/mean for x in points],s=50)
pylab.scatter([x[0] for x in p2], [x[1] for x in p2], c="r")
pylab.show()
def stringtest():
import Levenshtein
strings=[ "acting", "activist", "compute", "coward", "forward", "interaction", "activity", "odor", "order", "international" ]
dist=distmatrix(strings, c=lambda x,y: 1-Levenshtein.ratio(x,y))
p=fastmap(dist,2)
import pylab
pylab.scatter([x[0] for x in p], [x[1] for x in p], c="r")
for i,s in enumerate(strings):
pylab.annotate(s,p[i])
pylab.title("Levenshtein distance mapped to 2D coordinates")
pylab.show()
if __name__=='__main__':
stringtest()
#distortiontest()
# Copyright (c) 2009, Gunnar Aastrand Grimnes
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# * Neither the name of the <ORGANIZATION> nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
Chanel Paris-Dallas 2014 Shoes - I'm handbagholic!
This entry was posted in Chanel, Chanel 2014, Chanel Boots, Chanel Flats, Chanel High Heels, Chanel Sandals, Chanel Shoes. Bookmark the permalink. |
import requests
import json
# The data entry form
##Geoserver_URL=string http://host[:port]/geoserver
# test -> ##Geoserver_URL=string http://192.168.50.171:8080/geoserver
##Username=string your_username
##Password=string your_password
##Image_mosaic_Store_Name=string a datastore name
# ##Image_mosaic_Store_Name=string countryMosaic
##Mosaic_Granule_to_add=vector
usr=Username
pswd=Password
geoserver_url=Geoserver_URL
store_name=Image_mosaic_Store_Name
granule_abs_path=Mosaic_Granule_to_add
base_rest_path="/rest/imports"
print "STEP 1 - Creating a new importer for the datastore: '" + store_name + "'..."
headers = {'content-type': 'application/json'}
jsonRunImporter = '{"import": {"targetWorkspace": {"workspace": {"name": "geonode"}},"targetStore": {"dataStore": {"name": "' + str(store_name) + '"}}}}'
print(jsonRunImporter)
url = geoserver_url + base_rest_path
r = requests.post(url, jsonRunImporter, auth=(usr, pswd), headers=headers)
print(r.text)
data = json.loads(r.text)
importerId = data["import"]["id"]
print "...importer successfuly created! importerId:'" + str(importerId) + "'"
print ""
print "STEP 2 - Going to load from filesystem the geotif to upload..."
upload = {'files': ('country.tiff', open(granule_abs_path, "rb"), 'application/octet-stream')}
print "...geotif successfuly loaded! ready to create a run a task for the importer " + str(importerId) + "..."
url += "/" + str(importerId) + "/tasks"
r = requests.post(url, files=upload, auth=(usr, pswd))
print "...task created! taskId: '" + ""
print ""
data = json.loads(r.text)
taskId = data["task"]["id"]
print "STEP 3 - Importer: '" + str(importerId) +"' run taskId: '" + str(taskId) + "'"
url = url = geoserver_url + base_rest_path + "/" + str(importerId)
print str(url)
r = requests.post(url, auth=(usr, pswd))
print "task started!"
|
Watson's Clothing Drive | Watson Inc.
This December we are holding our annual clothing drive. We are asking our employees to donate new hats, gloves, underwear and socks. For every 25 items donated the company will donate $100. This money will be pooled together and we will challenge our employees to find the best deals on coats and other winter clothing. Also we will be packaging small single use toiletry items into bags. All items will be donated to the Columbus House in New Haven! |
import math
import pygame
import forestofsquirrels.trees
pygame.init()
class SpeechBubble(pygame.sprite.Sprite):
font = pygame.font.Font("freesansbold.ttf", 20)
topleft = pygame.image.load("forestofsquirrels/graphics/corner.png")
topright = pygame.transform.flip(topleft, True, False)
bottomleft = pygame.transform.flip(topleft, False, True)
bottomright = pygame.transform.flip(topleft, True, True)
def __init__(self, forest, squirrel, message):
pygame.sprite.Sprite.__init__(self, forest)
self.message = message
self.squirrel = squirrel
image = self.font.render(self.message, False, (0, 0, 0), (255, 255, 255))
self.rect = image.get_rect().inflate(4, 4)
self.image = pygame.Surface((self.rect.width, self.rect.height))
self.image.blit(image, (2, 2))
self.image.blit(self.topleft, (0, 0))
self.image.blit(self.topright, (self.rect.width - 4, 0))
if self.squirrel.image == self.squirrel.leftimg:
self.rect.bottomright = self.squirrel.rect.topleft
self.image.blit(self.bottomleft, (0, self.rect.height - 4))
else:
self.rect.bottomleft = self.squirrel.rect.topright
self.image.blit(self.bottomright, (self.rect.width - 4, self.rect.height - 4))
self.updated = 0
self.y = self.squirrel.y
def update(self):
self.y = self.squirrel.y
image = self.font.render(self.message, False, (0, 0, 0), (255, 255, 255))
self.rect = image.get_rect().inflate(4, 4)
self.image = pygame.Surface((self.rect.width, self.rect.height))
self.image.blit(image, (2, 2))
self.image.blit(self.topleft, (0, 0))
self.image.blit(self.topright, (self.rect.width - 4, 0))
if self.squirrel.image == self.squirrel.leftimg:
self.rect.bottomright = self.squirrel.rect.topleft
self.image.blit(self.bottomleft, (0, self.rect.height - 4))
else:
self.rect.bottomleft = self.squirrel.rect.topright
self.image.blit(self.bottomright, (self.rect.width - 4, self.rect.height - 4))
self.updated += 1
if self.updated > 60:
self.kill()
class Squirrel(pygame.sprite.Sprite):
""" Base class for squirrels.
"""
def __init__(self, forest, x, y):
self.x = x
self.y = y
self.z = 0
self.xoffset = 0
self.yoffset = 0
self.climbing = None
self.hoppingLeft = False
self.hoppingRight = False
self.hoppingUp = False
self.hoppingDown = False
self.goingLeft = False
self.goingRight = False
self.goingUp = False
self.goingDown = False
self.hopstep = -1
self.leftimg = pygame.image.load("forestofsquirrels/graphics/squirrel.png").convert_alpha()
self.rightimg = pygame.transform.flip(self.leftimg, True, False)
self.leftrunimg = pygame.image.load("forestofsquirrels/graphics/runningsquirrel.png").convert_alpha()
self.rightrunimg = pygame.transform.flip(self.leftrunimg, True, False)
self.image = self.leftimg
self.rect = self.image.get_rect()
self.colliderect = self.rect
self.level = 0
pygame.sprite.Sprite.__init__(self, forest)
self.forest = forest
self.can_climb = None
self.acorn = False
self.health = 8
def startright(self):
self.goingRight = True
self.goingLeft = False
self.image = self.rightrunimg
self.hopstep = max(self.hopstep, 0)
def stopright(self):
self.goingRight = False
def startleft(self):
self.goingLeft = True
self.goingRight = False
self.image = self.leftrunimg
self.hopstep = max(self.hopstep, 0)
def stopleft(self):
self.goingLeft = False
def startup(self):
self.goingUp = True
self.goingDown = False
self.hopstep = max(self.hopstep, 0)
def stopup(self):
self.goingUp = False
def startdown(self):
self.goingDown = True
self.goingUp = False
self.hopstep = max(self.hopstep, 0)
def stopdown(self):
self.goingDown = False
def start_climbing(self):
self.climbing = self.can_climb
def stop_climbing(self):
if self.z < 10:
self.climbing = None
def say(self, message):
SpeechBubble(self.forest, self, message)
def on_space(self, window, clock):
if self.climbing:
for hole in self.climbing[0].holes:
if hole[0] == self.climbing[1] and hole[1] < self.z < hole[2]:
area = __import__("forestofsquirrels.world.rooms." + hole[3], fromlist=["main"])
area.main(self, window, clock)
return True
if self.z == self.climbing[0].maxheight:
self.acorn = True
return False
def update(self):
if not self.climbing:
self.yoffset = 0
if self.hopstep >= 0:
if self.hopstep == 0:
self.hoppingLeft = self.goingLeft
self.hoppingRight = self.goingRight
self.hoppingDown = self.goingDown
self.hoppingUp = self.goingUp
self.hopstep += 1
self.z = math.sin(self.hopstep * math.pi / 10) * 10
if self.hopstep == 10:
if self.goingRight or self.goingLeft or self.goingUp or self.goingDown:
self.hopstep = 0
else:
if self.hoppingLeft:
self.image = self.leftimg
elif self.hoppingRight:
self.image = self.rightimg
self.hopstep = -1
self.hoppingLeft = False
self.hoppingRight = False
self.hoppingUp = False
self.hoppingDown = False
if self.hoppingRight:
self.x += 3
elif self.hoppingLeft:
self.x -= 3
if self.hoppingUp:
self.y -= 2
elif self.hoppingDown:
self.y += 2
self.colliderect = pygame.Rect(self.x, self.y, 18, 18)
self.can_climb = None
for tree in filter(lambda s: isinstance(s, forestofsquirrels.trees.Tree), self.forest.sprites()):
if tree.colliderect.colliderect(self.colliderect):
overlap = self.colliderect.union(tree.colliderect)
xoffset, yoffset = overlap.width, overlap.height
if self.hoppingDown and self.colliderect.bottom < tree.colliderect.bottom and (
xoffset > yoffset or not (self.hoppingLeft or self.hoppingRight)):
self.colliderect.bottom = tree.colliderect.top
elif self.hoppingUp and self.colliderect.top > tree.colliderect.top and (
xoffset > yoffset or not (self.hoppingLeft or self.hoppingRight)):
self.colliderect.top = tree.colliderect.bottom
elif self.hoppingLeft and (xoffset < yoffset or not (self.hoppingUp or self.hoppingDown)):
self.colliderect.left = tree.colliderect.right
self.climbing = [tree, "right"]
elif self.hoppingRight and (xoffset < yoffset or not (self.hoppingUp or self.hoppingDown)):
self.colliderect.right = tree.colliderect.left
self.climbing = [tree, "left"]
self.x, self.y = self.colliderect.topleft
else:
if self.goingRight:
if self.climbing[1] == "right":
self.z -= 2
self.image = pygame.transform.rotate(self.rightrunimg, -90)
if self.z <= 18:
self.climbing = None
self.image = self.rightrunimg
else:
if self.z < self.climbing[0].maxheight:
self.z += 2
else:
self.z = self.climbing[0].maxheight
self.image = pygame.transform.rotate(self.rightrunimg, 90)
elif self.goingLeft:
if self.climbing[1] == "right":
if self.z < self.climbing[0].maxheight:
self.z += 2
else:
self.z = self.climbing[0].maxheight
self.image = pygame.transform.rotate(self.leftrunimg, -90)
else:
self.z -= 2
self.image = pygame.transform.rotate(self.leftrunimg, 90)
if self.z <= 18:
self.climbing = None
self.image = self.leftrunimg
self.yoffset = -self.z
self.rect = pygame.Rect(self.x + self.xoffset, self.y + self.yoffset, 18, 18)
|
Note: This post was originally posted on growthmarketingconference.com.
The most common questions we're asked at Scripted.com are, "What's the ROI on a blog post?" or "How do I know if I'm doing this content marketing thing right?"
Until recently, we didn't have insight into these answers because we only delivered the writing.
But, we recently launched our Scripted Analytics product and now have the ability to dive into the most difficult questions that content marketers face.
We're excited to share them with you now!
First, let's define some important key concepts.
Evergreen content. We define an evergreen post as one that continues to drive readers to your blog one year after it was published. Evergreen posts have, via good writing, interesting and lasting topics - or good distribution - remained popular and accessible by readers. A post about the history of a product category would be more likely to have lasting engagement, and therefore remain evergreen, than a post about a specific product release. Evergreen posts are the gift that keeps on giving, driving readers to your blog long after publishing.
Viral content. When one of your recent posts clearly dominates your recent traffic, we define that as viral. Not everything you push out will get on the front page of Reddit and Buzzfeed, but if it performs far better than your other recent posts, we view that as viral relative to your baseline. Viral content can signal hot topics for your readers while also providing clues as to which distribution channels work best for your content.
Users, Visits, and Time on Page. Each of these statistics is trackable and each surfaces a different facet of your customer engagement. Users shows how many unique individuals came to read a post on your blog. Visits tells you how many times they returned. Time on Page informs you of their overall interest. The longer you keep your customers engaged, the more likely they are to convert.
The dark blue section represents the volume of traffic coming in from evergreen content (published more than one year ago), the lighter blue section is transitioning content (published between 3 and 12 months ago,) and the faint blue on top is recent content published in the past 3 months.
These time frames are all relative to the week you select, by mousing over and clicking. The sum of these distinct cohort streams, as shown by their stacking up, represents your total blog traffic.
A successful evergreen donut has most of its mass in the "All Other Posts" category.
This means that there is no one single post responsible for evergreen traffic, which makes your readership less vulnerable to shifts in distribution channels (a linking partner changing a post, a social media drive ending, a front page mention timing out) or topic fatigue.
A diverse post portfolio in your evergreen chart ensures a lasting and stable traffic base for your blog.
The counterpart to the evergreen donut, the recent donut, seeks to inform you about which of your recent posts are performing best.
Posts are like diverse experiments in reader interest, and this chart quickly highlights which experiments are going well. The ideal recent donut has most of its traffic driven by a few superstars, which, with the right focus on distribution, will ideally trickle down the streamgraph over time to become part of your evergreen bedrock.
If the evergreen and recent donuts inform you on the 'what' of your successful content, the sources donut clues you into the 'how'.
Marketing campaigns via social or email marketing, paid ads, or good ol' organic search can quickly be surfaced here.
Armed with the knowledge of which posts are most engaging to users from the evergreen and recent donuts, the sources donut can direct efforts to reach customers in the ways that have been most successful to you in the past - and track the progress of new ventures.
Let's look at three examples of real company blog analytics and discuss what we can learn from them.
Evergreen Decay is a blog that initially looks spectacular, with high weekly traffic flow and an abundance of evergreen content. However, on close examination of the cohort streams, we can identify a stale content strategy as the primary cause of an overall slow decay.
Even the most engaging content will get less readership over time due to topic saturation and changing reader interests.
In this case, too much of this blog's traffic is coming from evergreen content. Some weeks, evergreen content accounts for 90% of traffic.
While that speaks to the quality of the writing and distribution of old posts, without new content to interest readers, the overall engagement of the blog is steadily dropping. Fresh content is almost non-existent in some weeks, meaning the company is not continuing to publish new content or distribute its new content successfully.
This is slowing performance partially masked by the strength of their evergreen cohort, but without replenishment, reader interest is slowly grinding down.
This company needs to post new content regularly again.
They should look at both recent and evergreen posts in their donut charts that have been successful and expand upon those topics, while also promoting these posts via the distribution channels from the sources chart that have traditionally worked well.
In contrast to the Evergreen Decay blog, this Healthy Baby shows a lot of promise.
This blog has the breakdown of recent to evergreen traffic that we've seen as indicative of the best-performing blogs: about 2/3 evergreen and 1/3 new posts.
This way, the new posts will convert to transitioning, and eventually to evergreen, diversifying this blog's content age and engagement. We found that despite two hiccups in their traffic - which were unfortunate instances of blog downtime in March and April - their readership steadily increased.
These guys simply need to maintain! Their topics and choices of distribution channels are building up readership quickly and keeping it engaged with a diverse content offering. They should strive for better uptime so readers don't shy away from their blog. Other than that, it appears to keep moving up and to the right.
In our final example, we see the Growing Pains blog, a new blog that is having trouble getting off the ground and really engaging with readers.
At a glance, overall traffic trends positively, but a closer look reveals some trouble areas.
An overwhelming part of the traffic is driven by fresh content, but despite high levels of conversion to the transition stream, very little of that early, successful content is converting into evergreen. This means that a large portion of traffic is dependent on recent blog posts as opposed to a more diverse offering, making day to day traffic shaky and unpredictable.
Compounding this problem, topics selected for these blog posts aren't carrying weight with readers or being distributed in successful channels, resulting in a fast decay with no evergreen conversion.
This team needs to identify topics with evergreen potential by analyzing the few posts that have withstood the test of time.
They might also consider using popular trending topics to make their posts more widely accessible. We also recommend that they diversify their distribution channels and track which ones really work over the coming weeks, then double down on those channels.
The key here is to make sure that the posts they publish recently are getting traffic long after publication.
About two-thirds of your blog's traffic should come from evergreen posts.
Your evergreen traffic should come from a variety of posts, distributed evenly, rather than just a few high performers.
About one-third of your blog's traffic should come from recent posts.
Your recent post traffic should be spikey, with a few posts going (relatively) viral among your readers and their followers.
If you can maintain this traffic profile while publishing consistently, you will see your overall blog traffic growth snowball upward as recent posts transition into evergreen posts.
It matters because the more minutes that visitors spend on your site, the more likely they are to convert. This conversion could be as simple as signing up for a newsletter or demo, or going all the way through an e-commerce shopping cart purchase. This is a numbers game, and the more activity at the top of the funnel will drive conversions through the bottom of the funnel.
Whether you use our Scripted Analytics tool or any number of other blogging analytics products, the important takeaway is that it's critical to discover trends in your content's performance.
If you see that recent content is not performing well, you have to address it quickly or risk compromising future growth from evergreen content. There's a discipline involved here in maintaining both a macro and a micro view of your blog, but the benefits will pay off. I promise!
Published by Ryan Buckley on Thursday, August 4, 2016 in Content Marketing, Analytics, Blog Measuring, Roi. |
import microbit
def rescale(src_scale, dest_scale, x):
"""Map one number scale to another
For example, to convert a score of 4 stars out of 5 into a percentage:
>>> rescale((0, 5), (0, 100), 4)
80.0
Great for mapping different input values into LED pixel brightnesses!
"""
src_start, src_end = src_scale
# what proportion along src_scale x is:
proportion = 1.0 * (x - src_start) / (src_end - src_start)
dest_start, dest_end = dest_scale
# apply our proportion to the dest_scale
return proportion * (dest_end - dest_start) + dest_start
UNKNOWN = type('_', (), {'__str__': lambda _: 'UNKNOWN'})
UNKNOWN_ANGLE = UNKNOWN()
DUTY_0PC = 0
DUTY_100PC = 1023
class Servo:
"""
Futaba S3003 - Servo
Control System: +Pulse Width Control 1520usec Neutral
Required Pulse: 3-5 Volt Peak to Peak Square Wave
Operating Voltage: 4.8-6.0 Volts
Operating Temperature Range: -20 to +60 Degree C
Operating Speed (4.8V): 0.23sec/60 degrees at no load
Operating Speed (6.0V): 0.19sec/60 degrees at no load
Stall Torque (4.8V): 44 oz/in. (3.2kg.cm)
Stall Torque (6.0V): 56.8 oz/in. (4.1kg.cm)
Operating Angle: 45 Deg. one side pulse traveling 400usec
Continuous Rotation Modifiable: Yes
Direction: Counter Clockwise/Pulse Traveling 1520-1900usec
"""
PERIOD = 20000 # microseconds
pin = microbit.pin0
min_pulse_width = 500 # microseconds
mid_pulse_width = 1520
max_pulse_width = 3000
min_deg = 0 # degrees
mid_deg = 90
max_deg = 180
max_on_time = 1200 # milliseconds
clockwise_speed_factor = 0.85
def __init__(self):
print('Initialise PWM to {} μs {:.0f} ms {:.0f} Hz'.format(
self.PERIOD, self.PERIOD/1000, 1000000./self.PERIOD))
self.pin.set_analog_period_microseconds(self.PERIOD)
self.angle = UNKNOWN_ANGLE
self.point(self.mid_deg)
def deg_to_pulse_width(self, deg):
return rescale(
(self.min_deg, self.max_deg),
(self.max_pulse_width, self.min_pulse_width),
deg
)
def pulse_width_to_duty_cycle_value(self, pulse_width):
return rescale(
(0, self.PERIOD),
(DUTY_0PC, DUTY_100PC),
pulse_width
)
def deg_to_duty_cycle_value(self, deg):
pulse_width = self.deg_to_pulse_width(deg)
assert self.min_pulse_width <= pulse_width <= self.max_pulse_width
print('\tpulse width {:.0f} μs'.format(pulse_width))
duty_cycle_value = self.pulse_width_to_duty_cycle_value(pulse_width)
percent = rescale((0, DUTY_100PC), (0, 100), duty_cycle_value)
print('\tduty cycle {:.0f}/{} ({:.1f}%)'.format(
duty_cycle_value, DUTY_100PC, percent))
return duty_cycle_value
def calc_on_time(self, deg):
"""
Operating Speed (4.8V): 0.23sec/60 degrees at no load
ms_per_deg = 230 / 60.
"""
# from observations:
ms_per_deg = 600 / 90.
if self.angle is UNKNOWN_ANGLE:
return self.max_on_time / 2.
is_clockwise = self.angle < deg
travel = abs(deg - self.angle)
on_time = travel * ms_per_deg
if is_clockwise:
on_time *= self.clockwise_speed_factor
assert on_time <= self.max_on_time
return on_time
def wait_and_display_pwm(self, duty_cycle_value, on_time):
start = microbit.running_time()
width = round(on_time / 15)
hits = range(0, width, round(DUTY_100PC / duty_cycle_value))
points = [('#' if i in hits else '.') for i in range(width)]
while True:
microbit.sleep(1)
duration = microbit.running_time() - start
progress_left = 1 - (duration / on_time)
points_left = int((width * progress_left)) + 1
while points and len(points) > points_left:
point = points.pop(0)
print(point, end='', flush=True)
if duration >= on_time:
break
print()
def pulse_burst(self, duty_cycle_value, on_time):
try:
microbit.pin0.write_analog(duty_cycle_value)
self.wait_and_display_pwm(duty_cycle_value, on_time)
finally:
# ensure we don't leave the pwm on
microbit.pin0.write_analog(0)
def point(self, deg):
print('point {}° to {}°'.format(self.angle, deg))
duty_cycle_value = self.deg_to_duty_cycle_value(deg)
on_time = self.calc_on_time(deg)
print('\ton for {:.0f} ms'.format(on_time))
self.angle = deg
self.pulse_burst(duty_cycle_value, on_time)
pause_time = 5
def demo():
servo = Servo()
for deg in 0, 180, 90, 180, 0:
servo.point(deg)
microbit.sleep(pause_time)
if __name__ == '__main__':
demo()
|
I decided to make Tankard for a personal project and then figured why not put it on github as I am working on it. Tankard is very immature at the moment. I plan on adding support for all of the get routes to the BreweryDB Api. I've also been pretty interested in Jruby as of late and wanted to make Tankard thread safe.
If you visit rubygems you can find the documentation for a specific gem release.
Issues can be reported right here on the git repo. Everyone is encouraged to write an issue if they encounter a bug or have a feature request.
I will be accepting pull requests once I get most of the endpoints implemented. Check back soon for more details.
Generated on Fri Apr 5 02:32:50 2019 by yard 0.9.19 (ruby-2.5.1). |
from babelfy import babelfy
from ukb import wsd
from candc import postag
from spotlight import spotlight
import ConfigParser
import logging as log
from mappings import bn2dbpedia, offset2bn, bn2offset
from os.path import join, dirname
# read configuration
config = ConfigParser.ConfigParser()
config.read(join(dirname(__file__),'../config/disambiguation.conf'))
config_mapping = ConfigParser.ConfigParser()
config_mapping.read(join(dirname(__file__),'../config/mapping.conf'))
def disambiguation(tokenized, drs):
# Word Sense Disambiguation
entities = []
if config.get('wsd', 'module') == 'babelfy':
log.info("Calling Babelfy")
disambiguated = babelfy(tokenized)
synsets = disambiguated['synsets']
if config_mapping.get('net', 'module') == 'wordnet':
synsets = babelfy_to_wordnet(synsets)
if config.get('el', 'module') == 'babelfy':
log.info("Using Babelfy also for entities")
if(disambiguated != None):
entities = disambiguated['entities']
elif config.get('wsd', 'module') == 'ukb':
log.info("Calling POS-tagger")
postags = postag(tokenized)
log.info("Calling UKB")
disambiguated = wsd(postags)
synsets = disambiguated['synsets']
if config_mapping.get('net', 'module') == 'babelnet':
synsets = ubk_to_babelnet(synsets)
# Entity Linking
if config.get('el', 'module') == 'babelfy' and config.get('wsd', 'module') != 'babelfy':
log.info("Calling Babelfy")
disambiguated = babelfy(tokenized)
if(disambiguated != None):
entities = disambiguated['entities']
elif config.get('el', 'module') == 'spotlight':
log.info("Calling Spotlight")
disambiguated = spotlight(tokenized)
if not disambiguated:
return None, None
if(disambiguated != None):
entities = disambiguated['entities']
elif config.get('el', 'module') == 'none':
log.info("No module selected for entity linking")
entities = []
# enriching the entity list with WordNet mapping
'''
for synset in synsets:
offset = synset['synset'].split('/')[-1]
if offset in offset2bn:
bn = offset2bn[offset]
if bn in bn2dbpedia:
entity = bn2dbpedia[bn]
if entity != '-NA-':
uri = u'http://dbpedia.org/resource/{0}'.format(entity)
if not uri in [e['entity'] for e in entities]:
entities.append({'token_start': synset['token_start'],
'token_end': synset['token_end'],
'entity': uri})
'''
return synsets, entities
def babelfy_to_wordnet(synsets):
try:
for synset in synsets:
bn_id = synset['synset'].split('/')[-1]
if bn_id in bn2offset:
synset['synset'] = 'http://wordnet-rdf.princeton.edu/wn31/{0}'.format(bn2offset[bn_id])
else:
synset['synset'] = ''
except:
log.error("babelfy(): error linking to WordNet output")
return None
return synsets
def ubk_to_babelnet(synsets):
try:
for synset in synsets:
wn_id = synset['synset'].split('/')[-1]
if wn_id in offset2bn:
synset['synset'] = 'http://babelnet.org/rdf/{0}'.format(offset2bn[wn_id])
else:
synset['synset'] = ''
except:
log.error("UBK(): error linking to BabelNet output")
return None
return synsets
|
Do you have any questions or concerns? We’d love to hear from you! At Davis Homes, prompt customer service is not just part of our business, it’s something we live by.
If you want to connect with us and stay up to date with Davis Homes, follow us with the social links below! |
import random
import sys
import copy
from utils import Point, Box
from walled_matrix import WalledMatrix
from growing_tree import GrowingTreeMaze
class BraidMaze(object):
"""Convert a perfect maze into a braided maze"""
def __init__(self, x_max, y_max):
self._x_max = x_max
self._y_max = y_max
gt = GrowingTreeMaze(x_max, y_max)
gt.generate()
self._matrix = gt.matrix
def generate(self):
"""Remove dead ends from the maze
"""
print self._matrix
for point, cell in self._matrix:
walls = zip(('U', 'L', 'D', 'R'), cell)
blocked = [x for x in walls if not x[1]]
if len(blocked) < 3:
# we have more than one exit, this isn't a dead end and we
# don't need to do anything
continue
print "***"
print "%s: %s" % (blocked, len(blocked))
random.shuffle(blocked)
while(blocked):
try:
self._matrix.carve(point, blocked.pop()[0])
except IndexError:
continue
break
def __str__(self):
return str(self._matrix)
def main():
"""Driver function
"""
if len(sys.argv) == 2:
seed = int(sys.argv[1])
else:
seed = random.randint(0, sys.maxint)
print "Seeding with %s" % seed
random.seed(seed)
maze = BraidMaze(50, 50)
maze.generate()
print maze
if __name__ == '__main__':
main()
|
Asian industry Orem offers Chinese foods and asian nation foods cooking pedagogy on this Friday May 4th and at pm and Saturday May 5th at pm fri change of state course at pm (Chinese foods ) Menu: won ton attic position and Lo presence It costs $15,99 person including appetizers.
Please register to participate in our discussions with 2 million other members - it's free and quick! Some forums can only be seen by registered members. After you make your account, you'll be able to customize options and operation all our 15,000 new posts/day with a few ads.
The asiatic trade SNAP retailer accepts EBT cards located in UT. This sales outlet is authorized to accept Utah intellectual nourishment stamps by the conjugated States Department of Agriculture. This EBT placement is mandatory to subject matter certain kinds of foods to be desirable as a cookie retailer. |
#!/usr/bin/env python3
import os, sys
sys.path.append( os.path.dirname(__file__ ) )
from data.vot2016 import VOT2016_Data_Provider
from config import cfg
from model import Model
import sklearn
import tensorflow as tf
import numpy as np
from PIL import Image
from meval import calc_auc, print_step_auc
import text_histogram
def ave_weight(gtdata):
gtdata_flat = gtdata.reshape((-1))
weight = np.zeros(shape=gtdata_flat.shape)
ones = np.sum(gtdata)
length = len(gtdata_flat)
zeros = length-ones
zeros_weight = ones/zeros
for i in range(len(gtdata_flat)):
if gtdata_flat[i] > 0.5:
weight[i] = 1.
else:
weight[i] = zeros_weight
weight = weight.reshape(gtdata.shape)
return weight
def train(model_path = None,
save_path = '/home/cjl/tf_runet/models/20180612',
pro_path = '/home/cjl/tf_runet',
max_size = None,
total_step = 0,
display = False,
displaystep = 30,
save = True,
dataidx = 10):
print('begin_train')
data_path = pro_path + '/data/vot2016'
data_provider = VOT2016_Data_Provider(data_path, cfg)
data_provider.random_batch_init()
data_provider.dataidx = dataidx
model = Model()
train_writer = tf.summary.FileWriter(save_path, model.sess.graph)
if model_path is None:
model.init_vars_random()
else:
model.restore(model_path)
import psutil
training = True
while training:
total_step += 1
print('--------------------------------------')
print('total_step:', total_step)
iptdata, gtdata = data_provider.get_a_random_batch(jump=2)
weight = ave_weight(gtdata)
summary, cost, otherlabels, predict = model.train(iptdata, gtdata, weight)
#text_histogram.histogram(list(iptdata.astype(float).reshape((-1))))
#text_histogram.histogram(list(gtdata.astype(float).reshape((-1))))
auc = calc_auc(predict, otherlabels)
print("cost:", cost, " auc:" , auc)
print_step_auc(predict, otherlabels)
text_histogram.histogram(list(predict.astype(float).reshape((-1))))
train_writer.add_summary(summary, total_step)
if (save and total_step % 20 == 0):
filename = save_path + '/train' + str(total_step)
model.save(filename)
print('========================================')
if __name__ == '__main__':
#train()
#train('/home/cjl/models/20171127/train200')
#newclass()
#predict('/home/cjl/models/20171201/train150')
scripts_path = os.path.split( os.path.realpath( sys.argv[0] ) )[0]
train(
pro_path = scripts_path,
model_path = None,
save_path = scripts_path + '/models/0402_all',
max_size = (300,300),
dataidx = 10)
|
This handsome little guy is Coco. He is a happy guy that enjoys playing with his toys and giving kisses to his foster family.
Coco has been with his foster family for a week now and he is doing excellent on a leash with a harness. He enjoys the walks throughout the neighbourhood; however he can startle easily with too much action around him. He has learned to stay by his human’s side when we approach other people or someone on a bike and this helps him peacefully walk along beside you. He is crated trained during the day when his foster family is out and is as quiet as a mouse. It seems he is house trained as he has not had any accidents in the house since his arrival.
He is timid with humans (more so men) at first but warms up nicely and is super sweet and playful once he feels comfortable around you. The movements of children make Coco nervous. He will bark, and act defensive, and try to run away. He would do best in a home with no small children.
Coco is always polite when there is food around. He will sit nicely for treats and while his foster mom gets his meals ready. During family dinners, he never begs or jumps up towards the table for food. He has not shown any interest in chewing on anything in his foster home except his toys and a large dog bone (which he carries around the house with him). He is even willing to trade his bone or any toy for some snuggles.
He is young and still has some puppy characteristics to work on; such as when excited he will nip in play. Obedience classes are a must for Coco He has a good appetite which will help with obedience training and understands the command ‘sit’ to receive a treat. He is currently on a grain free diet and loves cut up vegetables as a treat. He also enjoys car rides.
Coco sleeps through the night in the master bedroom on his foster parent’s bed. He loves his sleep and is never ready to get up when the alarm goes off. Coco does not need a backyard as he is happy to do his business outside on a walk. He has had exposure to another small female dog (his age and size) and loves to play and run with her; however, he does not need another dog in his forever home as he enjoys all of the attention himself. |
from gi.repository import Gtk
from gaphas.view import GtkView
class ZoomData:
x0: int
y0: int
sx: float
sy: float
def zoom_tool(view: GtkView) -> Gtk.GestureZoom:
"""Create a zoom tool as a Gtk.Gesture.
Note: we need to keep a reference to this gesture, or else it will be destroyed.
"""
zoom_data = ZoomData()
gesture = (
Gtk.GestureZoom.new(view)
if Gtk.get_major_version() == 3
else Gtk.GestureZoom.new()
)
gesture.set_propagation_phase(Gtk.PropagationPhase.CAPTURE)
gesture.connect("begin", on_begin, zoom_data)
gesture.connect("scale-changed", on_scale_changed, zoom_data)
return gesture
def on_begin(
gesture: Gtk.GestureZoom,
sequence: None,
zoom_data: ZoomData,
) -> None:
_, zoom_data.x0, zoom_data.y0 = gesture.get_point(sequence)
view = gesture.get_widget()
zoom_data.sx = view.matrix[0]
zoom_data.sy = view.matrix[3]
def on_scale_changed(
gesture: Gtk.GestureZoom, scale: float, zoom_data: ZoomData
) -> None:
if zoom_data.sx * scale < 0.2:
scale = 0.2 / zoom_data.sx
elif zoom_data.sx * scale > 20.0:
scale = 20.0 / zoom_data.sx
view = gesture.get_widget()
m = view.matrix
sx = m[0]
sy = m[3]
ox = (m[4] - zoom_data.x0) / sx
oy = (m[5] - zoom_data.y0) / sy
dsx = zoom_data.sx * scale / sx
dsy = zoom_data.sy * scale / sy
m.translate(-ox, -oy)
m.scale(dsx, dsy)
m.translate(+ox, +oy)
view.request_update((), view.model.get_all_items())
|
Below are a few of my favorite places from when I lived in Ho Chi Minh City.
Au Parc - the cutest restaurant off the park. My home away from home. The table muster is to die for.
Annam Gourmet Market - on the second level they make great sandwiches and cheese plates.
Black Cat - if you want a taste of Western food, they have huge hamburgers.
Le Pub - Vietnamese and Western food. Located on a little side street in Pham Ngu Lao.
They also have trivia once a week. |
#!/usr/bin/env python
import re
import bson
import logging
from functools import wraps
from happymongo import HapPyMongo
from flask.ext.github import GitHub
from flask import (Flask, session, g, request, url_for, redirect, flash,
render_template, abort)
try:
from cPickle import dumps as pickle_dumps
from cPickle import loads as pickle_loads
except ImportError:
from pickle import dumps as pickle_dumps
from pickle import loads as pickle_loads
app = Flask('turquoise')
app.config.from_envvar('TURQUOISE_CONFIG')
github = GitHub(app)
mongo, db = HapPyMongo(app)
@app.template_filter()
def re_pattern(value):
try:
return pickle_loads(value.encode('utf-8')).pattern
except:
return value
@github.access_token_getter
def token_getter():
user = g.user
if user is not None:
return user['github_access_token']
def login_required(f):
@wraps(f)
def wrapped(*args, **kwargs):
user_id = session.get('user_id')
if not user_id:
return redirect(url_for('login'))
return f(*args, **kwargs)
return wrapped
@app.before_first_request
def logger():
app.logger.addHandler(logging.StreamHandler())
app.logger.setLevel(logging.INFO)
@app.errorhandler(500)
def internal_server_error(e):
app.logger.exception(e)
return abort(500)
@app.route('/')
def index():
return render_template('index.html', repos=app.config['GITHUB_REPOS'])
@app.route('/login')
def login():
return github.authorize(scope='user:email')
@app.route('/login/authorized')
@github.authorized_handler
def authorized(oauth_token):
if oauth_token is None:
flash('Authorization failed.', 'danger')
return redirect('index')
g.user = db.users.find_one({'github_access_token': oauth_token})
if not g.user:
g.user = {
'github_access_token': oauth_token,
'regex': '',
'files': [],
'notified': {},
'extra_contact': '',
'self_notify': False,
}
details = github.get('user')
existing = db.users.find_one({'login': details['login']})
if not existing:
g.user.update(details)
g.user['_id'] = db.users.insert(g.user, manipulate=True)
else:
existing['github_access_token'] = oauth_token
existing.update(details)
db.users.update({'_id': existing['_id']},
{'$set': existing})
g.user = existing
else:
details = github.get('user')
g.user.update(details)
db.users.update({'_id': bson.ObjectId(g.user['_id'])},
{'$set': details})
session['user_id'] = str(g.user['_id'])
return redirect(url_for('profile'))
@app.before_request
def before_request():
g.user = None
if 'user_id' in session:
g.user = db.users.find_one({'_id': bson.ObjectId(session['user_id'])})
@app.route('/profile')
@login_required
def profile():
return render_template('profile.html', repos=app.config['GITHUB_REPOS'])
@app.route('/profile/contact', methods=['POST'])
@login_required
def contact():
partial = {
'extra_contact': request.form.get('contact'),
'self_notify': bool(request.form.get('self_notify'))
}
db.users.update({'_id': bson.ObjectId(session['user_id'])},
{'$set': partial})
return redirect(url_for('profile'))
@app.route('/profile/file/add/<path:filename>')
@login_required
def file_add(filename):
db.users.update({'_id': bson.ObjectId(session['user_id'])},
{'$push': {'files': filename}})
return redirect(url_for('profile'))
@app.route('/profile/file/delete/<path:filename>')
@login_required
def file_delete(filename):
db.users.update({'_id': bson.ObjectId(session['user_id'])},
{'$pull': {'files': filename}})
return redirect(url_for('profile'))
@app.route('/profile/regex', methods=['POST'])
@login_required
def regex():
try:
compiled = re.compile(request.form.get('regex'))
except re.error as e:
db.users.update({'_id': bson.ObjectId(session['user_id'])},
{'$set': {'regex': request.form.get('regex')}})
flash('Invalid regular expression: %s' % e, 'danger')
else:
pickled = pickle_dumps(compiled)
db.users.update({'_id': bson.ObjectId(session['user_id'])},
{'$set': {'regex': pickled}})
return redirect(url_for('profile'))
if __name__ == '__main__':
app.run('0.0.0.0', 5000, debug=True)
|
XXsimplyLOUXX . KeylaHotSexx. VesperScott. EmberWood.
xCUTELATINGIRLxStacyCurvyTinnaWineseXxXyAnDrEs4U .LexyeGrayandrogenxxxkashunhuangWantedSubLover .BoonyHothugechestFoxyAnnabelbettywood .amaizingarotaxkashunhuangCHOCOLATXXX10bettywood .DiamondjessicaEstelaSanzLolliLoveLandennKevinLondon .EmberWoodFelixxManlyClaraClayton4UAttractionFlex .FelixxManlyseXxXyAnDrEs4UGuyWoodPervertTimeGirl .SexyAnnabeleMyAsianSexyLoveLoveToDo69EstelaSanz .FelixxManlySkinnyboyloversjewellovesYoungBoy201 .WantedSubLoverRekekaKittyLorelayLoveToDo69 .xCUTELATINGIRLxNASTYBRUNETTExxxDionaSiLuannysexy .
coacaze1985LuannysexyLOvePOtionTSDarlingNice .AttractionFlexCHOCOLATXXX10HelenReedDanteMX .CHOCOLATXXX10ValerieFosterVanityNashaanicebj4you .VicentexxxFelixxManlyMistyMILFAmmyNicole .SweetSexxyRoseMonice666DiamondNikki69TedJamison .SkinnyboyloversDanteMXLuannysexyDanteMX .seXxXyAnDrEs4UVicentexxxGuyWoodCookieThumper .bettywoodKnopo4kkaKeylaHotSexxCHOCOLATXXX10 .VesperScottKarinaKbettywoodStacyCurvy .jewellovesLanaCrossSplinterXTOYSLAVESUMISE .anicebj4youPrimaBelllaCHOCOLATXXX10SweetKiraKate . |
import boto3
import re
from flask import render_template
from flask_mail import Message
from boto3.dynamodb.conditions import Key, Attr
from utils.ResponseCreation import ControllerResponse
from utils import db_utils as dbUtils
import utils.MentiiLogging as MentiiLogging
import uuid
import hashlib
import class_ctrl as class_ctrl
from flask import g
def sendForgotPasswordEmail(httpOrigin, jsonData, mailer, dbInstance):
email = jsonData.get('email', None)
resetPasswordId = str(uuid.uuid4())
success = addResetPasswordIdToUser(email, resetPasswordId, dbInstance)
if success == True:
host = getProperEnvironment(httpOrigin)
url = host + '/reset-password/{0}'.format(resetPasswordId)
message = render_template('forgotPasswordEmail.html', url=url)
#Build Message
msg = Message('Mentii: Reset Password', recipients=[email], extra_headers={'Content-Transfer-Encoding': 'quoted-printable'}, html=message)
#Send Email
mailer.send(msg)
def addResetPasswordIdToUser(email, resetPasswordId, dbInstance):
success = False;
table = dbUtils.getTable('users', dbInstance)
if table is not None:
user = getUserByEmail(email,dbInstance)
if user is not None:
jsonData = {
'Key': {'email': email},
'UpdateExpression': 'SET resetPasswordId = :a',
'ExpressionAttributeValues': { ':a': resetPasswordId },
'ReturnValues' : 'UPDATED_NEW'
}
dbUtils.updateItem(jsonData, table)
success = True
return success
def resetUserPassword(jsonData, dbInstance):
response = ControllerResponse()
email = jsonData.get('email', None)
password = jsonData.get('password', None)
resetPasswordId = jsonData.get('id', None)
if email is not None and password is not None and resetPasswordId is not None:
res = updatePasswordForEmailAndResetId(email, password, resetPasswordId, dbInstance)
if res is not None:
response.addToPayload('status', 'Success')
else:
response.addError('Failed to Reset Password', 'We were unable to update the password for this account.')
else:
response.addError('Failed to Reset Password', 'We were unable to update the password for this account.')
return response
def updatePasswordForEmailAndResetId(email, password, resetPasswordId, dbInstance):
res = None
user = getUserByEmail(email, dbInstance)
if user is not None:
storedResetPasswordId = user.get('resetPasswordId', None)
if storedResetPasswordId == resetPasswordId:
table = dbUtils.getTable('users', dbInstance)
if table is not None:
hashedPassword = hashPassword(password)
jsonData = {
'Key': {'email': email},
'UpdateExpression': 'SET password = :a REMOVE resetPasswordId',
'ExpressionAttributeValues': { ':a': hashedPassword },
'ReturnValues' : 'UPDATED_NEW'
}
res = dbUtils.updateItem(jsonData, table)
return res
def getProperEnvironment(httpOrigin):
host = ''
if httpOrigin.find('stapp') != -1:
host = 'http://stapp.mentii.me'
elif httpOrigin.find('app') != -1:
host = 'http://app.mentii.me'
else:
host = 'http://localhost:3000'
return host
def register(httpOrigin, jsonData, mailer, dbInstance):
response = ControllerResponse()
if not validateRegistrationJSON(jsonData):
response.addError('Register Validation Error', 'The json data did not have an email or did not have a password')
else:
email = parseEmail(jsonData)
password = parsePassword(jsonData)
if not isEmailValid(email):
response.addError('Email invalid', 'The email is invalid')
if not isPasswordValid(password):
response.addError('Password Invalid', 'The password is invalid')
if isEmailInSystem(email, dbInstance) and isUserActive(getUserByEmail(email, dbInstance)):
response.addError('Registration Failed', 'We were unable to register this user')
if not response.hasErrors():
hashedPassword = hashPassword(parsePassword(jsonData))
activationId = addUserAndSendEmail(httpOrigin, email, hashedPassword, mailer, dbInstance)
if activationId is None:
response.addError('Activation Id is None', 'Could not create an activation Id')
return response
def hashPassword(password):
return hashlib.md5( password ).hexdigest()
def validateRegistrationJSON(jsonData):
'''
Validate that the JSON object contains
an email and password attributes
'''
if jsonData is not None:
return 'password' in jsonData.keys() and 'email' in jsonData.keys()
return False
def parseEmail(jsonData):
try:
email = jsonData['email']
return email
except Exception as e:
MentiiLogging.getLogger().exception(e)
return None
def parsePassword(jsonData):
try:
password = jsonData['password']
return password
except Exception as e:
MentiiLogging.getLogger().exception(e)
return None
def isEmailValid(email):
'''
Validate that thee email is matches the
format required.
'''
emailRegex = re.compile(r"[^@]+@[^@]+\.[^@]+")
return emailRegex.match(email) is not None
def isPasswordValid(password):
return len(password) >= 8
def addUserAndSendEmail(httpOrigin, email, password, mailer, dbInstance):
activationId = str(uuid.uuid4())
table = dbUtils.getTable('users', dbInstance)
jsonData = {
'email': email,
'password': password,
'activationId': activationId,
'active': 'F',
'userRole' : "student"
}
if table is None:
MentiiLogging.getLogger().error('Unable to get table users in addUserAndSendEmail')
activationId = None
#This will change an existing user with the same email.
response = dbUtils.putItem(jsonData,table)
if response is None:
MentiiLogging.getLogger().error('Unable to add user to table users in addUserAndSendEmail')
activationId = None
try:
sendEmail(httpOrigin, email, activationId, mailer)
except Exception as e:
MentiiLogging.getLogger().exception(e)
return activationId
def deleteUser(email, dbInstance):
table = dbUtils.getTable('users', dbInstance)
key = {'email': email}
response = dbUtils.deleteItem(key, table)
return response
def sendEmail(httpOrigin, email, activationId, mailer):
'''
Create a message and send it from our email to
the passed in email. The message should contain
a link built with the activationId
'''
if activationId is None:
return
#Change the URL to the appropriate environment
host = getProperEnvironment(httpOrigin)
url = host + '/activation/{0}'.format(activationId)
message = render_template('registrationEmail.html', url=url)
#Build Message
msg = Message('Mentii: Thank You for Creating an Account!', recipients=[email],
extra_headers={'Content-Transfer-Encoding': 'quoted-printable'}, html=message)
#Send Email
mailer.send(msg)
def isEmailInSystem(email, dbInstance):
user = getUserByEmail(email, dbInstance)
return user != None and 'email' in user.keys()
def activate(activationId, dbInstance):
response = ControllerResponse()
table = dbUtils.getTable('users', dbInstance)
items = []
if table is None:
MentiiLogging.getLogger().error('Unable to get table users in activate')
response.addError('Could not access table. Error', 'The DB did not give us the table')
return response
#Scan for the email associated with this activationId
scanResponse = dbUtils.scanFilter('activationId', activationId, table)
if scanResponse is not None:
#scanResponse is a dictionary that has a list of 'Items'
items = scanResponse['Items']
if not items or 'email' not in items[0].keys():
response.addError('No user with activationid', 'The DB did not return a user with the passed in activationId')
else:
email = items[0]['email']
jsonData = {
'Key': {'email': email},
'UpdateExpression': 'SET active = :a',
'ExpressionAttributeValues': { ':a': 'T' },
'ReturnValues' : 'UPDATED_NEW'
}
#Update using the email we have
res = dbUtils.updateItem(jsonData, table)
response.addToPayload('status', 'Success')
return response
def isUserActive(user):
return user != None and 'active' in user.keys() and user['active'] == 'T'
def getUserByEmail(email, dbInstance):
user = None
table = dbUtils.getTable('users', dbInstance)
if table is None:
MentiiLogging.getLogger().error('Unable to get table users in getUserByEmail')
else:
key = {'Key' : {'email': email}}
result = dbUtils.getItem(key, table)
if result is None:
MentiiLogging.getLogger().error('Unable to get the user with email: ' + email + ' in getUserByEmail ')
elif 'Item' in result.keys():
user = result['Item']
return user
def changeUserRole(jsonData, dbInstance, adminRole=None):
response = ControllerResponse()
#g will be not be available during testing
#and adminRole will need to be passed to the function
if g: # pragma: no cover
adminRole = g.authenticatedUser['userRole']
#adminRole is confirmed here incase changeUserRole is called from somewhere
#other than app.py changeUserRole()
if adminRole != 'admin':
response.addError('Role Error', 'Only admins can change user roles')
elif 'email' not in jsonData.keys() or 'userRole' not in jsonData.keys():
response.addError('Key Missing Error', 'Email or role missing from json data')
else:
email = jsonData['email']
userRole = jsonData['userRole']
userTable = dbUtils.getTable('users', dbInstance)
if userTable is None:
MentiiLogging.getLogger().error('Unable to get table "users" in changeUserRole')
response.addError('No Access to Data', 'Unable to get data from database')
else:
if userRole != 'student' and userRole != 'teacher' and userRole != 'admin':
MentiiLogging.getLogger().error('Invalid role: ' + userRole + ' specified. Unable to change user role')
response.addError('Invalid Role Type', 'Invaid role specified')
else:
data = {
'Key': {'email': email},
'UpdateExpression': 'SET userRole = :ur',
'ExpressionAttributeValues': { ':ur': userRole },
'ReturnValues' : 'UPDATED_NEW'
}
result = dbUtils.updateItem(data, userTable)
if result is None:
MentiiLogging.getLogger().error('Unable to update the user with email: ' + email + ' in changeUserRole')
response.addError('Result Update Error', 'Could not update the user role in database')
else:
response.addToPayload('Result:', result)
response.addToPayload('success', 'true')
return response
def getRole(userEmail, dynamoDBInstance):
'''
Returns the role of the user whose email is pased. If we are unable to get
this information from the DB the role None is returned. Calling code must
grant only student permissions in this case.
'''
userRole = None
table = dbUtils.getTable('users', dynamoDBInstance)
if table is None:
MentiiLogging.getLogger().error('Could not get user table in getUserRole')
else:
request = {"Key" : {"email": userEmail}, "ProjectionExpression": "userRole"}
res = dbUtils.getItem(request, table)
if res is None or 'Item' not in res:
MentiiLogging.getLogger().error('Could not get role for user ' + userEmail)
else:
userRole = res['Item']['userRole']
return userRole
def joinClass(jsonData, dynamoDBInstance, email=None, userRole=None):
response = ControllerResponse()
#g will be not be available during testing
#and email will need to be passed to the function
if g: # pragma: no cover
email = g.authenticatedUser['email']
userRole = g.authenticatedUser['userRole']
if 'code' not in jsonData.keys() or not jsonData['code']:
response.addError('Key Missing Error', 'class code missing from data')
elif userRole == 'teacher' or userRole == 'admin':
if class_ctrl.isCodeInTaughtList(jsonData, dynamoDBInstance, email):
response.addError('Role Error', 'Teachers cannot join their taught class as a student')
else:
classCode = jsonData['code']
addDataToClassAndUser(classCode, email, response, dynamoDBInstance)
else:
classCode = jsonData['code']
addDataToClassAndUser(classCode, email, response, dynamoDBInstance)
return response
def addDataToClassAndUser(classCode, email, response, dynamoDBInstance):
updatedClassCodes = addClassCodeToStudent(email, classCode, dynamoDBInstance)
if not updatedClassCodes:
response.addError('joinClass call Failed', 'Unable to update user data')
else:
updatedClass = addStudentToClass(classCode, email, dynamoDBInstance)
if not updatedClass:
response.addError('joinClass call Failed', 'Unable to update class data')
else:
response.addToPayload('title', updatedClass['title'])
response.addToPayload('code', updatedClass['code'])
def leaveClass(jsonData, dynamoDBInstance, email=None):
response = ControllerResponse()
data = None
if g: # pragma: no cover
email = g.authenticatedUser['email']
if 'code' not in jsonData.keys() or not jsonData['code']:
response.addError('Key Missing Error', 'class code missing from data')
else:
classCode = jsonData['code']
data = {
'email': email,
'classCode': classCode
}
return class_ctrl.removeStudent(dynamoDBInstance, data, response=response, userRole=None)
def addClassCodeToStudent(email, classCode, dynamoDBInstance):
userTable = dbUtils.getTable('users', dynamoDBInstance)
if userTable:
codeSet = set([classCode])
addClassToUser = {
'Key': {'email': email},
'UpdateExpression': 'ADD classCodes :i',
'ExpressionAttributeValues': { ':i': codeSet },
'ReturnValues' : 'UPDATED_NEW'
}
res = dbUtils.updateItem(addClassToUser, userTable)
if ( res and
'Attributes' in res and
'classCodes' in res['Attributes'] and
classCode in res['Attributes']['classCodes']
):
return res['Attributes']['classCodes']
return None
def addStudentToClass(classCode, email, dynamoDBInstance):
classTable = dbUtils.getTable('classes', dynamoDBInstance)
if classTable:
emailSet = set([email])
addUserToClass = {
'Key': {'code': classCode},
'UpdateExpression': 'ADD students :i',
'ExpressionAttributeValues': { ':i': emailSet },
'ReturnValues' : 'ALL_NEW'
}
res = dbUtils.updateItem(addUserToClass, classTable)
if ( res and
'Attributes' in res and
'students' in res['Attributes'] and
email in res['Attributes']['students'] and
'title' in res['Attributes']
):
return res['Attributes']
return None
|
Kanths IE Consultants Pvt. Ltd. is a result-oriented company, with a distinct approach in tailoring education and career paths to suit individual profiles within the parameters that may triumph in each case. The parameters may be educational, financial or personal.
Kanths IE Consultants was formed in 2002, by a group of young and energetic entrepreneurs and quickly became a reputed organization with dedicated and result-oriented staff that place great focus on students education needs.
Kanths IE Consultants is the one stop solution for all your international education needs. The foundation of our activities lies in assisting students to make the right choice to pursue further education overseas.
Kanths IE Consultants provide information regarding higher education in various countries such as Canada, USA, Australia, France, UK, Ireland, New Zealand and Switzerland, and has a command over the education system world wide.
to every student, by making it affordable and devoid of cumbersome procedures. |
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from .package import EggPackage, SourcePackage, WheelPackage
class Sorter(object):
DEFAULT_PACKAGE_PRECEDENCE = (
WheelPackage,
EggPackage,
SourcePackage,
)
@classmethod
def package_type_precedence(cls, package, precedence=DEFAULT_PACKAGE_PRECEDENCE):
for rank, package_type in enumerate(reversed(precedence)):
if isinstance(package, package_type):
return rank
# If we do not recognize the package, it gets lowest precedence
return -1
@classmethod
def package_precedence(cls, package, precedence=DEFAULT_PACKAGE_PRECEDENCE):
return (
package.version, # highest version
cls.package_type_precedence(package, precedence=precedence), # type preference
package.local) # prefer not fetching over the wire
def __init__(self, precedence=None):
self._precedence = precedence or self.DEFAULT_PACKAGE_PRECEDENCE
# return sorted list of (possibly filtered) packages from the list
def sort(self, packages, filter=True):
key = lambda package: self.package_precedence(package, self._precedence)
return [
package for package in sorted(packages, key=key, reverse=True)
if not filter or any(isinstance(package, package_cls) for package_cls in self._precedence)]
|
The Friends of Torkington Park are holding a Christmas Carol Concert outside the Bowling Hut on Torkington Park Saturday 7th December from 3.30 until 5pm.Everyone is welcome so come on down.Subscribe to Hazel Grove to stay up to date on Local ..
Torkington Park Duck Race! Spread the word!
Torkington Park is a Park in..
Torkington Park is the main park in the Hazel Grove area and the home of the Secret Garden. The formal flower garden is a great place to walk round or just sit down and enjoy the wildlife. |
from __future__ import absolute_import
from django.db import transaction
from rest_framework import serializers
from rest_framework.response import Response
from sentry.api.bases.organization import (
OrganizationEndpoint, OrganizationPermission
)
from sentry.api.exceptions import ResourceDoesNotExist
from sentry.models import (
AuditLogEntryEvent, AuthIdentity, AuthProvider, OrganizationMember,
OrganizationMemberType
)
ERR_NO_AUTH = 'You cannot remove this member with an unauthenticated API request.'
ERR_INSUFFICIENT_ROLE = 'You cannot remove a member who has more access than you.'
ERR_INSUFFICIENT_SCOPE = 'You are missing the member:delete scope.'
ERR_ONLY_OWNER = 'You cannot remove the only remaining owner of the organization.'
ERR_UNINVITABLE = 'You cannot send an invitation to a user who is already a full member.'
class OrganizationMemberSerializer(serializers.Serializer):
reinvite = serializers.BooleanField()
class RelaxedOrganizationPermission(OrganizationPermission):
scope_map = {
'GET': ['member:read', 'member:write', 'member:delete'],
'POST': ['member:write', 'member:delete'],
'PUT': ['member:write', 'member:delete'],
# DELETE checks for role comparison as you can either remove a member
# with a lower access role, or yourself, without having the req. scope
'DELETE': ['member:read', 'member:write', 'member:delete'],
}
class OrganizationMemberDetailsEndpoint(OrganizationEndpoint):
permission_classes = [RelaxedOrganizationPermission]
def _get_member(self, request, organization, member_id):
if member_id == 'me':
queryset = OrganizationMember.objects.filter(
organization=organization,
user__id=request.user.id,
)
else:
queryset = OrganizationMember.objects.filter(
organization=organization,
id=member_id,
)
return queryset.select_related('user').get()
def _is_only_owner(self, member):
if member.type != OrganizationMemberType.OWNER:
return False
queryset = OrganizationMember.objects.filter(
organization=member.organization_id,
type=OrganizationMemberType.OWNER,
has_global_access=True,
user__isnull=False,
).exclude(id=member.id)
if queryset.exists():
return False
return True
def put(self, request, organization, member_id):
try:
om = self._get_member(request, organization, member_id)
except OrganizationMember.DoesNotExist:
raise ResourceDoesNotExist
serializer = OrganizationMemberSerializer(data=request.DATA, partial=True)
if not serializer.is_valid():
return Response(status=400)
has_sso = AuthProvider.objects.filter(
organization=organization,
).exists()
result = serializer.object
# XXX(dcramer): if/when this expands beyond reinvite we need to check
# access level
if result.get('reinvite'):
if om.is_pending:
om.send_invite_email()
elif has_sso and not getattr(om.flags, 'sso:linked'):
om.send_sso_link_email()
else:
# TODO(dcramer): proper error message
return Response({'detail': ERR_UNINVITABLE}, status=400)
return Response(status=204)
def delete(self, request, organization, member_id):
if request.user.is_superuser:
authorizing_access = OrganizationMemberType.OWNER
elif request.user.is_authenticated():
try:
authorizing_access = OrganizationMember.objects.get(
organization=organization,
user=request.user,
has_global_access=True,
).type
except OrganizationMember.DoesNotExist:
return Response({'detail': ERR_INSUFFICIENT_ROLE}, status=400)
elif request.access.has_scope('member:delete'):
authorizing_access = OrganizationMemberType.OWNER
else:
return Response({'detail': ERR_INSUFFICIENT_SCOPE}, status=400)
try:
om = self._get_member(request, organization, member_id)
except OrganizationMember.DoesNotExist:
raise ResourceDoesNotExist
if om.type < authorizing_access:
return Response({'detail': ERR_INSUFFICIENT_ROLE}, status=400)
if self._is_only_owner(om):
return Response({'detail': ERR_ONLY_OWNER}, status=403)
audit_data = om.get_audit_log_data()
with transaction.atomic():
AuthIdentity.objects.filter(
user=om.user,
auth_provider__organization=organization,
).delete()
om.delete()
self.create_audit_entry(
request=request,
organization=organization,
target_object=om.id,
target_user=om.user,
event=AuditLogEntryEvent.MEMBER_REMOVE,
data=audit_data,
)
return Response(status=204)
|
Comments: Backup Camera, Bluetooth, 3rd Row Seating, Heated Front Seats, Satellite Radio, Blind Spot Assist, Lane Assist, Remote Engine Start, HD Radio, Hard Drive Media Storage, Keyless Start, Multi-Zone Air Conditioning, Rear Air Conditioning, Automatic Headlights, Keyless Entry, Rear Spoiler, Tire Pressure Monitors, Driver airbag, Knee Airbag, and Passenger Airbag This White Diamond Pearl 2019 Honda Odyssey EX is priced to sell fast! David McDavid Honda of Irving prides itself on value pricing its vehicles and exceeding all customer expectations! The next step? Give us a call to confirm availability and schedule a hassle free test drive! We are located at: 3700 W Airport Fwy, Irving, TX 75062. "" Contact our Internet Department at our Toll Free Number for your No Hassle Price. David McDavid of Irving has been serving the Dallas Fort Worth Metroplex for over 70 years at one location. * All New Honda website pricing excludes TT&L and Dealer installed equipment."" |
# coding=utf-8
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Android environment implementation."""
from typing import Any, Dict
from absl import logging
from android_env.components import coordinator as coordinator_lib
import dm_env
import numpy as np
class AndroidEnv(dm_env.Environment):
"""An RL environment that interacts with Android apps."""
def __init__(self, coordinator: coordinator_lib.Coordinator):
"""Initializes the state of this AndroidEnv object."""
self._coordinator = coordinator
self._latest_action = {}
self._latest_observation = {}
self._latest_extras = {}
self._reset_next_step = True
logging.info('Action spec: %s', self.action_spec())
logging.info('Observation spec: %s', self.observation_spec())
logging.info('Task extras spec: %s', self.task_extras_spec())
def action_spec(self) -> Dict[str, dm_env.specs.Array]:
return self._coordinator.action_spec()
def observation_spec(self) -> Dict[str, dm_env.specs.Array]:
return self._coordinator.observation_spec()
def task_extras_spec(self) -> Dict[str, dm_env.specs.Array]:
return self._coordinator.task_extras_spec()
@property
def raw_action(self):
return self._latest_action
@property
def raw_observation(self):
return self._latest_observation
def android_logs(self) -> Dict[str, Any]:
return self._coordinator.get_logs()
def reset(self) -> dm_env.TimeStep:
"""Resets the environment for a new RL episode."""
logging.info('Resetting AndroidEnv...')
# Reset state of the environment.
self._coordinator.reset_environment_state()
# Execute selected action (None when resetting).
obs, _, extras, _ = self._coordinator.execute_action(action=None)
# Process relevant information.
if obs is not None:
self._latest_observation = obs.copy()
self._latest_extras = extras.copy()
self._latest_action = {}
self._reset_next_step = False
logging.info('Done resetting AndroidEnv.')
logging.info('************* NEW EPISODE *************')
return dm_env.TimeStep(
step_type=dm_env.StepType.FIRST,
observation=self._latest_observation,
reward=0.0,
discount=0.0)
def step(self, action: Dict[str, np.ndarray]) -> dm_env.TimeStep:
"""Takes a step in the environment."""
# Check if it's time to reset the episode.
if self._reset_next_step:
return self.reset()
# Execute selected action.
obs, reward, extras, episode_end = self._coordinator.execute_action(action)
# Process relevant information.
if obs is not None:
self._latest_observation = obs.copy()
self._latest_extras = extras.copy()
self._latest_action = action.copy()
self._reset_next_step = episode_end
# Return timestep with reward and observation just computed.
if episode_end:
return dm_env.termination(
observation=self._latest_observation, reward=reward)
else:
return dm_env.transition(
observation=self._latest_observation, reward=reward, discount=0.0)
def task_extras(self, latest_only: bool = True) -> Dict[str, np.ndarray]:
"""Returns latest task extras."""
task_extras = {}
for key, spec in self.task_extras_spec().items():
if key in self._latest_extras:
extra_values = self._latest_extras[key].astype(spec.dtype)
for extra in extra_values:
spec.validate(extra)
task_extras[key] = extra_values[-1] if latest_only else extra_values
return task_extras
def close(self) -> None:
"""Cleans up running processes, threads and local files."""
logging.info('Cleaning up AndroidEnv...')
if hasattr(self, '_coordinator'):
self._coordinator.close()
logging.info('Done cleaning up AndroidEnv.')
def __del__(self) -> None:
self.close()
|
Enjoy this new Artbooking video from Close To My Heart.
Follow Up Friday: Ariana Gets a Boost!
Throw Back Thursday: Swapping Pins! |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.