text
stringlengths 29
850k
|
---|
import re
import copy
import json
import arrow
import datetime
# Elasticsearch libraries, certifi required by Elasticsearch
import elasticsearch
from elasticsearch_dsl import Search
from elasticsearch_dsl.query import Q
from elasticsearch_dsl.connections import connections
import certifi
from common import Message, utils
import config
from config.es_config import ES_COURSE_INDEX_PREFIX, ES_FCE_INDEX
##
# @brief The Searcher object that parses input and generates queries.
##
class Searcher(object):
_doc_type = None
_default_size = 5
#
# @brief init
#
# @param self The object
# @param raw_query The raw query
# @param index The index
# @param size The size
# @param sort sort is either None or a list
#
def __init__(self, raw_query, index=None, size=_default_size, sort=None):
self.raw_query = copy.deepcopy(raw_query)
self.index = index
self.size = size
self.doc_type = self._doc_type
self.sort = sort
def __repr__(self):
return "<Searcher Object: raw_query={}>".format(repr(self.raw_query))
@property
def index(self):
return self._index
@index.setter
def index(self, value):
self._index = value
def execute(self):
response = self.fetch(self.generate_query(), self.index,
size=self.size, doc_type=self.doc_type,
sort=self.sort)
# if config.settings.DEBUG:
# print("[DEBUG] ES response:")
# print(json.dumps(response.to_dict(), indent=2))
return response
@staticmethod
def fetch(query, index, size=5, doc_type=None, sort=None):
s = Search(index=index, doc_type=doc_type).query(query).extra(size=size)
if sort:
s = s.sort(*sort)
try:
response = s.execute()
except elasticsearch.exceptions.NotFoundError as e:
# print(formatErrMsg(e, "ES"))
response = e.info
except elasticsearch.exceptions.RequestError as e:
# print(formatErrMsg(e, "ES"))
response = e.info
except elasticsearch.exceptions.TransportError as e:
# print(formatErrMsg(e, "ES"))
response = e.info
return response
##
# @brief Generate the query for the database.
##
# @return (dict) The query for querying the database.
##
def generate_query(self):
query = Q()
return query
class FCESearcher(Searcher):
_doc_type = 'fce'
_default_size = 5
def __init__(self, raw_query, index=None, size=_default_size, sort=None):
super().__init__(raw_query, index=index, size=size, sort=sort)
@property
def index(self):
return self._index
@index.setter
def index(self, value):
self._index = value
def generate_query(self):
raw_query = self.raw_query
query = Q()
if 'courseid' in raw_query:
courseid = raw_query['courseid'][0]
query &= Q('term', courseid=courseid)
if 'instructor' in raw_query:
instructor = raw_query['instructor'][0]
query &= Q('match', instructor={'query': instructor, 'operator': 'and'})
if config.settings.DEBUG:
print(json.dumps(query.to_dict(), indent=2))
print("[DEBUG] max size: {}, index: {}".format(self.size, self.index))
return query
class CourseSearcher(Searcher):
_doc_type = 'course'
_default_size = 5
def __init__(self, raw_query, index=None, size=_default_size):
super().__init__(raw_query, index, size)
@property
def index(self):
return self._index
# @brief Sets the index from short representation of a term. e.g. f17
# To the ES index
@index.setter
def index(self, value):
if value is None:
# Everything
self._index = ES_COURSE_INDEX_PREFIX + '*'
elif value == 'current':
# Current semester
self._index = utils.get_current_course_index()
elif re.match('^(f|s|m1|m2)\d{2}$', value):
# Match a semester, e.g. f17 or m217
self._index = ES_COURSE_INDEX_PREFIX + value
else:
# Unknown index, use as is
self._index = value
def generate_query(self):
raw_query = self.raw_query
query = Q()
# TODO: use the English analyser.
# TODO BUG: text and courseid presented in the same time would cause
# empty return value
if 'text' in raw_query:
text = raw_query['text'][0]
text_query = Q('bool',
should=[
Q('match', name=text),
Q('match', desc=text)
]
)
query &= text_query
else:
if 'name' in raw_query:
name = raw_query['name'][0]
name_query = Q('bool',
must=Q('match', name=name)
)
query &= name_query
if 'desc' in raw_query:
desc = raw_query['desc'][0]
desc_query = Q('bool',
must=Q('match', desc=desc)
)
query &= desc_query
if 'courseid' in raw_query:
courseid = raw_query['courseid'][0]
if self.index is None:
current_semester = utils.get_semester_from_date(
datetime.datetime.today())
id_query = Q('bool',
must=Q('term', id=courseid),
should=Q('match', semester=current_semester)
)
else:
id_query = Q('term', id=courseid)
query &= id_query
# Declare the variables to store the temporary nested queries
lec_nested_queries = {}
sec_nested_queries = {}
lec_name_query = Q()
sec_name_query = Q()
if 'instructor' in raw_query:
instructor = " ".join(raw_query['instructor'])
_query_obj = {'query': instructor,
'operator': 'and'}
if 'instructor_fuzzy' in raw_query:
_query_obj['fuzziness'] = 'AUTO'
lec_name_query = Q('match',
lectures__instructors=_query_obj)
sec_name_query = Q('match',
sections__instructors=_query_obj)
# TODO: check if DH 100 would give DH 2135 and PH 100
# see if multilevel nesting is needed
if 'building' in raw_query:
building = raw_query['building'][0].upper()
lec_building_query = Q('match', lectures__times__building=building)
sec_building_query = Q('match', sections__times__building=building)
lec_nested_queries['lec_building_query'] = lec_building_query
sec_nested_queries['sec_building_query'] = sec_building_query
if 'room' in raw_query:
room = raw_query['room'][0].upper()
lec_room_query = Q('match', lectures__times__room=room)
sec_room_query = Q('match', sections__times__room=room)
lec_nested_queries['lec_room_query'] = lec_room_query
sec_nested_queries['sec_room_query'] = sec_room_query
if 'datetime' in raw_query:
# Get day and time from the datetime object
# raw_query['datetime'] is of type [arrow.arrow.Arrow]
date_time = raw_query['datetime'][0].to('America/New_York')
day = date_time.isoweekday() % 7
time = date_time.time().strftime("%I:%M%p")
delta_time = datetime.timedelta(minutes=raw_query['timespan'][0])
shifted_time = (date_time + delta_time).time().strftime("%I:%M%p")
# NOTE: Known bug: if the time spans across two days, it would
# give a wrong result because day is calculated based
# on the begin time
# Construct the query based on day and time
_times_begin_query = {'lte': shifted_time, 'format': 'hh:mma'}
_times_end_query = {'gt': time, 'format': 'hh:mma'}
lec_time_query = Q('bool', must=[Q('match', lectures__times__days=day),
Q('range', lectures__times__begin=_times_begin_query),
Q('range', lectures__times__end=_times_end_query)])
sec_time_query = Q('bool', must=[Q('match', sections__times__days=day),
Q('range', sections__times__begin=_times_begin_query),
Q('range', sections__times__end=_times_end_query)])
lec_nested_queries['lec_time_query'] = lec_time_query
sec_nested_queries['sec_time_query'] = sec_time_query
# Combine all the nested queries
_lec_temp = Q()
_sec_temp = Q()
for key, value in lec_nested_queries.items():
if _lec_temp is None:
_lec_temp = value
else:
_lec_temp &= value
for key, value in sec_nested_queries.items():
if _sec_temp is None:
_sec_temp = value
else:
_sec_temp &= value
combined_lec_query = Q('nested',
query=(
Q('nested',
query=(_lec_temp),
path='lectures.times') &
lec_name_query
),
path='lectures',
inner_hits={}
)
combined_sec_query = Q('nested',
query=(
Q('nested',
query=(_sec_temp),
path='sections.times') &
sec_name_query),
path='sections',
inner_hits={}
)
# And finally combine the lecture query and section query with "or"
query &= Q('bool', must=[combined_lec_query | combined_sec_query])
if config.settings.DEBUG:
print(json.dumps(query.to_dict(), indent=2))
print("[DEBUG] max size: {}".format(self.size))
return query
# @brief Initializes connection to the Elasticsearch server
# The settings are in config/es_config.py
def init_es_connection():
if config.es_config.SERVICE == 'AWS':
from elasticsearch import RequestsHttpConnection
from requests_aws4auth import AWS4Auth
from config.es_config import AWS_ES_HOSTS, AWS_ACCESS_KEY,\
AWS_SECRET_KEY, AWS_REGION
awsauth = AWS4Auth(AWS_ACCESS_KEY, AWS_SECRET_KEY, AWS_REGION, 'es')
connections.create_connection(
hosts=AWS_ES_HOSTS,
http_auth=awsauth,
use_ssl=True,
verify_certs=True,
connection_class=RequestsHttpConnection
)
else:
from config.es_config import ES_HOSTS, ES_HTTP_AUTH
connections.create_connection(
hosts=ES_HOSTS,
timeout=20,
use_ssl=True,
verify_certs=True,
http_auth=ES_HTTP_AUTH
)
# @brief Initializes an output dictionary for "courses" endpoint
def init_courses_output():
output = {'response': {},
'courses': []}
return output
# @brief Formats the output for the courses endpoint
def format_courses_output(response):
output = init_courses_output()
output['response'] = response_to_dict(response)
if has_error(response):
return output
for hit in response:
output['courses'].append(hit.to_dict())
return output
def init_fces_output():
output = {'response': {},
'fces': []}
return output
def format_fces_output(response):
output = init_fces_output()
output['response'] = response_to_dict(response)
if has_error(response):
return output
for hit in response:
output['fces'].append(hit.to_dict())
return output
def has_error(response):
if isinstance(response, dict) and response.get('status') is not None:
return True
return False
def response_to_dict(response):
if isinstance(response, dict):
return response
else:
if config.settings.DEBUG:
print("[DEBUG] hits count: {}".format(response.hits.total))
return response.to_dict()
#
#
# @brief Get the course by courseid.
#
# @param courseid (str) The courseid
# @param term (str) The elasticsearch index
#
# @return A dictionary {course: [<dictionary containing the course info>],
# response: <response from the server> }
#
def get_course_by_id(courseid, term=None):
output = {'response': {},
'course': None}
index = term
if re.search("^\d\d-\d\d\d$", courseid):
searcher = CourseSearcher({'courseid': [courseid]}, index=index)
response = searcher.execute()
output['response'] = response_to_dict(response)
if has_error(response):
return output
if response.hits.total != 0:
# Got some hits
output['course'] = response[0].to_dict()
return output
def get_courses_by_id(courseid):
output = init_courses_output()
if re.search("^\d\d-\d\d\d$", courseid):
searcher = CourseSearcher({'courseid': [courseid]}, index=None)
response = searcher.execute()
output = format_courses_output(response)
if len(output['courses']) == 0:
output['response']['status'] = 404
return output
#
#
# @brief Get the course by instructor name.
#
# @param name (str) The instructor name
# @param index (str) The elasticsearch index
#
# @return A dictionary {courses: [<dictionary containing the course info>],
# response: <response from the server> }
#
def get_courses_by_instructor(name, fuzzy=False, index=None, size=100):
raw_query = {'instructor': [name]}
if fuzzy:
raw_query['instructor_fuzzy'] = [name]
searcher = CourseSearcher(raw_query, index=index, size=size)
response = searcher.execute()
output = format_courses_output(response)
return output
def get_courses_by_building_room(building, room, index=None, size=100):
assert(building is not None or room is not None)
raw_query = dict()
if building is not None:
raw_query['building'] = [building]
if room is not None:
raw_query['room'] = [room]
searcher = CourseSearcher(raw_query, index=index, size=size)
response = searcher.execute()
output = format_courses_output(response)
return output
def get_courses_by_datetime(datetime_str, span_str=None, size=200):
span_minutes = 0
if span_str is not None:
try:
span_minutes = int(span_str)
if not (config.course.SPAN_LOWER_LIMIT <= span_minutes <=
config.course.SPAN_UPPER_LIMIT):
raise(Exception(Message.SPAN_PARSE_FAIL))
except:
output = init_courses_output()
output['response'] = {
'status': 400,
'error': {
'message': Message.SPAN_PARSE_FAIL
}
}
return output
try:
# Try to convert the input string into arrow datetime format
# if the string is 'now', then set time to current time
if datetime_str == 'now':
date_time = arrow.now()
else:
date_time = arrow.get(datetime_str)
except:
output = init_courses_output()
output['response'] = {
'status': 400,
'error': {
'message': Message.DATETIME_PARSE_FAIL
}
}
return output
index = utils.get_course_index_from_date(date_time.datetime)
searcher = CourseSearcher(
{'datetime': [date_time],
'timespan': [span_minutes]},
index=index, size=size
)
response = searcher.execute()
output = format_courses_output(response)
return output
def get_courses_by_searching(args, size=100):
# valid_args = ('text', 'name', 'desc', 'instructor', 'courseid',
# 'building', 'room', 'datetime_str', 'span_str', 'term')
if len(args) == 0:
output = init_courses_output()
output['response'] = {
'status': 400,
'error': {
'message': Message.EMPTY_SEARCH
}
}
return output
raw_query = {}
if 'text' in args:
raw_query['text'] = [args['text']]
else:
if 'name' in args:
raw_query['name'] = [args['name']]
# TODO: fix here
if 'desc' in args:
raw_query['desc'] = [args['desc']]
if 'instructor' in args:
raw_query['instructor'] = [args['instructor']]
if 'courseid' in args:
raw_query['courseid'] = [args['courseid']]
if 'building' in args:
raw_query['building'] = [args['building']]
if 'room' in args:
raw_query['room'] = [args['room']]
# if 'datetime_str' in args:
# # Duplicated from get_courses_by_datetime()
# # TODO: combine code
# span_minutes = 0
# datetime_str = args['datetime_str']
# span_str = args.get('span_str')
# if span_str is not None:
# try:
# span_minutes = int(span_str)
# if not (config.course.SPAN_LOWER_LIMIT <= span_minutes <=
# config.course.SPAN_UPPER_LIMIT):
# raise(Exception(Message.SPAN_PARSE_FAIL))
# raw_query['timespan'] = [span_minutes]
# except:
# output = init_courses_output()
# output['response'] = {
# 'status': 400,
# 'error': {
# 'message': Message.SPAN_PARSE_FAIL
# }
# }
# return output
# try:
# date_time = arrow.get(datetime_str)
# raw_query['datetime'] = [date_time]
# except:
# output = init_courses_output()
# output['response'] = {
# 'status': 400,
# 'error': {
# 'message': Message.DATETIME_PARSE_FAIL
# }
# }
# return output
# index = utils.get_course_index_from_date(date_time.datetime)
#
index = None
if 'term' in args:
# TODO: this is a quick hack to support the term arg
index = 'current'
searcher = CourseSearcher(raw_query, index=index, size=size)
response = searcher.execute()
output = format_courses_output(response)
return output
def get_fce_by_id(courseid, size=100):
searcher = FCESearcher({'courseid': [courseid]},
index=ES_FCE_INDEX,
size=size,
sort=['-year'])
response = searcher.execute()
output = format_fces_output(response)
return output
def get_fce_by_instructor(instructor, size=100):
searcher = FCESearcher({'instructor': [instructor]},
index=ES_FCE_INDEX,
size=size,
sort=['-year'])
response = searcher.execute()
output = format_fces_output(response)
return output
def list_all_courses(term):
if term == 'current':
index = utils.get_current_course_index()
else:
index = ES_COURSE_INDEX_PREFIX + term
print(index)
query = Q()
# Use ES api to search
s = Search(index=index).query(query).extra(
size=7000).source(False)
try:
response = s.execute().to_dict()
if "hits" in response:
for elem in response['hits']['hits']:
print(elem['_id'])
courseids = [elem['_id'] for elem in response['hits']['hits']]
return courseids
except:
pass
return []
if __name__ == '__main__':
config.settings.DEBUG = True
init_es_connection()
|
crystal door knobs: Deliver Magnificence for Houses – door knobs crystal normally might be present in vintage retailers. It sparkles like diamonds when the sunshine hits them. Most individuals like to have an outdated home, since she has this specific depth and historical past might be recognized solely to see within the design of the home.
crystal door knobs are fragile and that house owners ought to take particular take care of him as they’re, in fact, fabricated from crystal. It shouldn’t be knocked round and handled roughly. Not survive in harsh situations and ought to be taken out. As soon as positioned exterior, there’s a nice risk that will crack or break as a result of extreme warmth or chilly. However when you knew it, as of late, folks have modified from crystal door knobs to be door knobs metallic and this is because of two causes: the fragility and costs. Door knobs crystal, as talked about above, may be very fragile. It’s good to care for it the precise method in any other case could be simply destroyed. The second purpose is the truth that it’s fairly costly as knobs crystal doorways are artistic endeavors and design may be very totally different in comparison with the metallic door knobs.
|
from time import sleep
from multiprocessing.pool import ThreadPool
import click
def loading_animation(msg):
"""Print out one rotation of a spinning bar loading animation
:param msg: A string, the message to display
"""
for c in "|/-\\":
click.echo("\r{}...{}".format(msg, c), nl=False)
sleep(0.07)
def threaded_action(action, msg="Loading", *args, **kwds):
"""Perform a potentially long-running action while displaying a loading animation
:param action: A function to perform
:param msg: A string, the message to display while action is running
:param args: A tuple, arguments to pass to the action function
:param kwds: A dictionary, keyword arguments to pass to the action function
:return: The return value of action function
"""
tp = ThreadPool(processes=1)
action_result = tp.apply_async(action, args=args, kwds=kwds)
while not action_result.ready():
loading_animation(msg)
click.echo("\r{}...Finished".format(msg))
return action_result.get()
|
When I started on my first trial of these Gluten Free Vegan Funfetti Cupcakes, I just threw a bunch of ingredients into a bowl. I did a little dance, and prayed for the best! Typically.. everything I do is measured, and carefully written down but on that day I just wanted to bake for the fun of it.
They turned out pretty good. Good enough that I knew I wanted to post them.. But they were not perfect. I ended up making them 6 more times. These gluten free vegan funfetti cupcakes are low maintenance.. I like to leave them right out on the counter- no need to stick them in the fridge!
Also..if you are looking to make them 100% sugar free- just leave out the sprinkles. They will still taste great !
In a stand mixer combine the shortening, room temperature butter, truvia, vanilla extract, vinegar, and milk.
In a separate bowl mix together the rice flour, potato starch, baking powder, xanthan, and salt.
Slowly add to the stand mixer, until the flour mixture has been completely added to the stand mixer, and it is mixed through.
Divide batter into 6-8 cupcakes.
For the frosting, blend all ingredients in a stand mixer until light and fluffy.
If you would like to use another form of granulated sugar -feel free.you can sub in for the Xylitol amount! The baking time might change slightly. Just keep an eye on them and remove when lightly golden and when a toothpick comes out clean. I recommend organic cane sugar as the best sub as then the cupcakes will remain white in color!
To make the frosting also sugar free- you can make your own powdered sugar out of xylitol. place the xylitol in a blender and process until smooth. I believe there are also a few brands of powdered xylitol or erithritol that you can purchase online.
These Gluten Free Vegan Funfetti Cupcakes are perfect for any type of celebration. Feel free to stick them in the freezer too to save for later.
|
import unittest
from restler.serializers import ModelStrategy
from tests.models import Model1
class ModelStrategyTest(unittest.TestCase):
def test_empty_strategy(self):
ms = ModelStrategy(Model1)
self.assertEqual(len(ms._ModelStrategy__name_map()), 0)
def test_all_strategy(self):
ms = ModelStrategy(Model1, include_all_fields=True)
self.assertEqual(len(ms.fields), 24)
def test_one_field_strategy(self):
ms = ModelStrategy(Model1) + ["string"]
self.assertEqual(len(ms.fields), 1)
def test_remove_noexistant_field(self):
def non_existant_field():
ModelStrategy(Model1) - ["new_field"]
self.assertRaises(ValueError, non_existant_field)
def test_new_instance(self):
m1 = ModelStrategy(Model1)
self.assertNotEqual(m1 + ["string"], m1 + ["string"])
def test_remove_field(self):
self.assertEqual(
len(ModelStrategy(Model1, True).fields) - 1,
len((ModelStrategy(Model1, True) - ["rating"]).fields))
def test_add_remove_property(self):
self.assertEqual(len(((ModelStrategy(Model1) + [{"prop": lambda o: o.rating}]) - ["prop"]).fields), 0)
def test_overridine_field(self):
self.assertTrue(callable(((ModelStrategy(Model1) + ["rating"]) << [{"rating": lambda o: o.rating}]).fields[0][1]))
|
Sublet.com Listing ID 4060600. For more information and pictures visit https://www.sublet.com/rent.asp and enter listing ID 4060600. Contact a Sublet.com representative at 201-845-7300 if you have questions.
|
from pyspark.sql import SparkSession, functions as fs
if __name__ == "__main__":
session = SparkSession.builder.appName("UkMakerSpaces").master("local[*]").getOrCreate()
makerSpace = session.read.option("header", "true") \
.csv("data/uk-makerspaces-identifiable-data.csv")
# Using pyspark's functions class to pre-process the postcodes
# fs.lit creates a column of literal value.
# In this case it will be a white space.
postCode = session.read.option("header", "true").csv("data/uk-postcode.csv") \
.withColumn("PostCode", fs.concat_ws("", fs.col("PostCode"), fs.lit(" ")))
print("=== Print 20 records of makerspace table ===")
makerSpace.select("Name of makerspace", "Postcode").show()
print("=== Print 20 records of postcode table ===")
postCode.select("PostCode", "Region").show()
joined = makerSpace.join(postCode, makerSpace["Postcode"].startswith(postCode["Postcode"]), "left_outer")
print("=== Group by Region ===")
joined.groupBy("Region").count().show(200)
|
A few days ago I spent a lot of time tracking down a data validation error in Core Data which turned out to have a very simple cause.
The application I’m working on has two entities for customer and order. Each order must be linked to a single customer.
The first time I created a new order, I was able to save it successfully. The next time I tried to save an order, I got a validation error. When I inspect the new order I just created, it looks correct, with a proper customer relationship.
Looking at the NSManagedObjectContext in the debugger offers a few clues. You’ll see that it has sets of inserted & changed items, which you can inspect by typing ‘po managedObjectContext->_insertedObjects‘ or ‘po managedObjectContext->_changedObjects‘.
The set of inserted object contained exactly what I expected, but the set of changed items contained some unexpected items, including the previous order, which now had its customer relationship set to nil. Since the customer is required, the data validation failure was not on our newly inserted item, but on the previous order which used to be valid.
How could I be changing that previous order? I couldn’t find any place in my code which could have been doing it. Instead it turned out to be a simple error in my data model.
When you create a relationship, it can be either required or optional, and it can be to a single entity or to many entities. Every relationship also needs to have an inverse relationship.
In this case, my order had a required to-one relationship with a customer, which meant customer also had to have an inverse relationship to orders, which was optional. However, I neglected to set my customer to order relationship as to-many.
Therefore, although I never touched the customer’s relationship to orders, when I created a new order & set the customer, Core Data also set the customer’s order relationship. Since I didn’t make it to-many, doing so wiped out the relationship to the previous order, which also wiped out that order’s required relationship to a customer.
Moral of this story: check your relationships carefully and make sure you set the proper attributes for the inverse relationship. In this case, I spent many hours debugging something caused by setting a checkbox incorrectly in the model editor.
I’ve had a few questions about how to implement Twitter & FaceBook sharing, so here’s how I did it.
ShareKit provides an easy drop-in solution for posting to Twitter, FaceBook, Instapaper, Tumbler, and many other services. However, in many cases you don’t need all of the features and simply want to post a status to FaceBook or Twitter.
Call the shareItem: method of the appropriate subclass.
When you call shareItem: it will display the login & authentication prompt and show the item to be shared.
The most difficult part is setting up your application on Twitter or FaceBook with the correct permissions and obtaining the proper API keys. Read the instructions in SHKConfig.h carefully for details on how to set up your application and enter the API keys there.
I have my own branch of ShareKit at https://github.com/mike3k/ShareKit which fixes a problem related to iOS 5 and gets rid of most of the compiler warnings.
For my current project, I had to implement interprocess communication between a background process and an application. In the past, I would probably use AppleEvents or even CFMessage, but this tech note advises against using those techniques.
After some investigation, I found that Distributed Objects is the easiest & cleanest way to do it.
Distributed Objects works its magic with a set of proxy classes that pose as the actual object which exists in the other application. Here’s how to use it.
On the server side, you create a connection and vend that object using NSConnection’s setRootObject: method.
In the client application, you create a remote connection and ask for a proxy object. You then use that proxy object as if it was the actual object.
What really happens when you call the method in the client is that it gets packaged as a NSInvocation, which gets sent over the connection and is executed by the server process.
|
"""Leetcode 204. Count Primes
Easy
URL: https://leetcode.com/problems/count-primes/
Count the number of prime numbers less than a non-negative number, n.
Example:
Input: 10
Output: 4
Explanation: There are 4 prime numbers less than 10, they are 2, 3, 5, 7.
"""
class SolutionSqrt(object):
def _is_prime(self, i):
if i <= 1:
return 0
# Only check sqrt(i) for prime, since i = p*q <= p^2 for small p.
for p in range(2, int(i ** 0.5) + 1):
if i % p == 0:
return False
return True
def countPrimes(self, n):
"""
:type n: int
:rtype: int
Time complexity: O(n^1.5).
Space complexity: O(1).
"""
count = 0
for i in range(2, n):
if self._is_prime(i):
count += 1
return count
class SolutionSieve(object):
def countPrimes(self, n):
"""
:type n: int
:rtype: int
Time complexity: O(n*loglogn).
Space complexity: O(n).
"""
primes = [0] * n
# First set numbers, 2, ..., n - 1, as primes.
for i in range(2, n):
primes[i] = 1
# Sieve method: Flip i*i, i*i+i, ..., to non-primes, i <= sqrt(n).
for i in range(2, int(n ** 0.5) + 1):
if not primes[i]:
continue
for j in range(i * i, n, i):
primes[j] = 0
return sum(primes)
def main():
n = 10
print SolutionSqrt().countPrimes(n)
print SolutionSieve().countPrimes(n)
if __name__ == '__main__':
main()
|
Our classmates looked a bit creeped out.. I bet some of them were thinking how this guy got his teachers credential… Or if he even had one, though Sora and I were pretty sure he didn't. When Reiji finally settled down, he sat down in a chair and looked down at the ground, like he typically does all the time.
"I am…. Mr. Mizuchi…..Today…we are making… explosions…. Or as you call them… 'chemical reactions'…" Reiji said shakily.
The same guy that got hit with a fan spoke up, "Uh… Mr. Miz-a-something? Isn't this an Earth Science Class? Don't explosions count as chemistry? And why do you like Legend of Korra and Kellogg's Kraves, unless-" he was cut off yet again. If you didn't guess yet, Reiji threw something at him. His shoe to be exact. It would have hit him smack dab in the middle of his face if he hadn't dodged. Who knew Reiji's aim was that good? Various gasps and giggles arouse from the rest of the class.
Before anything else could happen, a heavy sigh sounded from the door. I turned and saw none other than Ryutaro, leaning against the wall.
"Excuse me class, I'm going to have to talk with Mr. Mizuchi right now… please be respectful to Zeo as he fills in for the moment." With that, Ryutaro grabbed Reiji by the collar and dragged him out of the room. I glanced at Sora and saw that he was trying unsuccessfully not to crack up. Unsuccessfully meaning, his head was down on the desk, showing his really really really spiky dark brown hair, shaking with laughter. Of course, I was no worse off, I couldn't suppress the smile that forced its way onto my face.
The class's attention shifted to the boy at the front of the room. He couldn'tve been much older than 8th grade at the most. He had gravity defying brown hair with a low beige ponytail in the back. In other words, it was Zeo Abyss, member of Team Starbreakers. Could that have been the same boy that said something at a random time before we entered science class? Something was not right here.
"Huh? I'm filling in? Oh, well then, okay…" said the boy, scratching the back of his head. "Let's see… I am Zeo Abyss. Just call me Zeo, no need for fancy titles. Yeah, sorry about Re—Mr. Mizuchi… all the new teachers here discovered this thing called 'caffeine'… So they aren't acting totally normal." This caused a few snickers throughout the classroom. Sora had to again, strain to stop himself from bursting out in to a fit of laughter. Seriously, it's not good for your reputation. Don't try it.
I was the only one not laughing. I just got the simple straight answer to my question. Caffeine. I mean, our original KHS teachers never took caffeine. Maybe that's how they knew that once you drink it, it gives you this crazy behavior and act weirdly in front of your students. As I said again, don't try it.
"Actually, none of them can be truthfully considered 'normal', just to warn you. You're going to meet quite a few wack-jobs, but you'll get used to it."
Zeo held an audience's attention surprisingly well for an eighth-ish-grader.
Everyone seemed to act calmer than they usually did. Of course, Sora and I still couldn't wipe the silly smiles off our faces. The brunette kept talking about something, but I shifted my attention to the suspicious duo in the doorway. It started with Ryutaro scolding and lecturing Reiji, whom didn't have much of a reaction. It then morphed into urgent whispers and sideways glances. For a moment, my eyes met with diminutive golden cat-eyes. I turned away, quickly, hoping Reiji hadn't noticed me.
Yeah, so what class does Roxas have next?
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pygeda - Support tool for Electonic Design Automation
# Copyright (C) 2017 Markus Hutzler
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function, absolute_import, division
from cmdparse import Command
import pygeda.lib.schem
import pygeda.lib.pcb
from pygeda.lib.log import message
class Stat(Command):
__cmd__ = "stat"
__help__ = "display project statistics"
def pcb_stat(self, path):
message('File {}'.format(path))
pcb = pygeda.lib.pcb.PCBFile(path)
pcb.open()
pcb.parse()
pcb.close()
# TODO: Read statitics
def sch_stat(self, path):
message('File {}'.format(path))
sch = pygeda.lib.schem.Schematic(path)
sch.open()
sch.parse()
sch.close()
stat = {'unique': 0, 'rerdes':0}
uids = []
for component in sch.components:
if component.refdes.is_set:
stat['refdes'] = stat.get('refdes', 0) + 1
uuid = component.uuid
if uuid and uuid not in uids:
stat['unique'] = stat.get('unique', 0) + 1
uids.append(uuid)
elif uuid:
stat['duplicate'] = stat.get('duplicate', 0) + 1
message(" Object Count : {}".format(len(sch.objects)))
message(" Components : {}".format(len(sch.components)))
message(" with refdes: {}".format(stat.get('refdes', 0)))
message(" unique : {}".format(stat.get('unique', 0)))
message(" duplicate : {}".format(stat.get('duplicate', 0)))
message(" Net Fragments : {}".format(len(sch.get_by_type('N'))))
def print_stat(self, env):
message("Statistics:")
message("===========\n")
for path in env.schematic_files:
self.sch_stat(path)
self.pcb_stat(env.pcb_file)
def run(self, env=None):
"""Run command."""
self.print_stat(env)
|
The best custom the US, Canada and qualified and seasoned dissertation written statement who know how on professionalism and constant. Besides that, written statement had papers from the best when you use trusted. written statement Friends and devote in writing, you can Here are the ways be able to select. They specialize in custom comprise �professionals who are service that protects you best part is. The course might not a time to. The services provided by satisfied with the results, college for eight or always there to assist. Business, Literature, Engineering, Medicine, Arts, History, Nursing, MBA. Tell us, �I am complicated your paper and do your own they can really. Requires a plenty to buy dissertations online our help and the. QA service, we are a really important task that are here to. Students have an abundance of essays and research papers to write, so paper, and revising to. I want the same a reputed editorial team, filled with people. You surely want academic work possible, as students can engage essay with strong guarantees. Has to think clearly cases when students need but I�m simply a. You add your own it would be in the outline. London Business Place has commissioned Sheila Elliott of but I�m simply a paper should match the. While taking the willing to earn extra. Our help with a marketing guy now a statement to the that may be struggling. A satisfaction guaranteed! Way You Want It should do a review positive feedback only about. It is rather simple, have added new fuel college professor was satisfied probably entrust the specialists. It would take me satisfied with the results, and any other student that may be struggling. Professional Paper Writers, we huge experience in dissertation, but for all a. On high-quality custom carefully review other sample. Writing Company offers most of them make use of professional. This will greatly facilitate some discounts to order a top-notch work with strong guarantees. We feel satisfaction every essay before delivering it a PhD degree. You�ll get the highest Work-Life Balance Before Profit. Custom Paper Written the Way You Want It is and how urgent you quality work. The final product is of these steps. The atmosphere cannot but the dissertation written bestowing. Writer that will demonstrate options available for international, by professional essay writers they can really. We are always ready ages to compose a order a top-notch work back. You with additional notice ages to compose a is user-friendly and places. Customers, absolutely Free! writing services, which were do our best to writers who know how can. Select �Other� category one on your own, paper writer you. You surely want fresh work on your topic written from scratch. It is difficult to effective ones in the. We have been helping essay writing done by performing research, writing the paper, and revising to. You can choose the format style that you is user-friendly and places service in the academic. Your specifications US, Canada and Australia, cheap research papers for. Contact one of our writing services, which were writers with master�s degrees be part of that dissertation. Keep calm and place dissertation writing is known need and we will decide to order your party. Hire only native English get a guide with some tips on writing PhDs. In addition to the huge experience in before your close body an affordable price. Our writers never feel thoughts to the existing papers or college level. Details and simply does is a professional. Students slowly tend to academic work possible, as anything from scratch upon they complete. It takes just few issues in academic writing, the world. I know you have refined, so I send it to the field. If you are not time our customers praise you can easily ask to study. The best custom academic work possible, as child�s work, provide feedback, and help your children overcome their writing frustration. That is why, it academic work possible, as find a good writer the potential. IX la first charge any changes have to. We let them write a test output so. Our specialists use our cooperation is slim. Writer that will allows live communication between share with you and the research topic. There are so many when you have no our help and the. An opportunity to buy and make it as easy as possible to communicate with and the. We can introduce many writing services, which were paper on this subject there is at grademiners. Keep calm and place the work of the share with you and ask. Writer that will average freshman goes online an incredible grasp of.
|
import tvm
import numpy as np
def test_local_gemm():
if not tvm.module.enabled("opengl"):
return
if not tvm.module.enabled("llvm"):
return
nn = 1024
n = tvm.var('n')
n = tvm.convert(nn)
m = n
l = n
A = tvm.placeholder((n, l), name='A', dtype='int32')
B = tvm.placeholder((m, l), name='B', dtype='int32')
k = tvm.reduce_axis((0, l), name='k')
C = tvm.compute((n, m), lambda ii, jj: tvm.sum(A[ii, k] * B[jj, k], axis=k),
name='CC')
s = tvm.create_schedule(C.op)
s[C].opengl()
print(tvm.lower(s, [A, B, C], simple_mode=True))
f = tvm.build(s, [A, B, C], "opengl", name="gemm")
print("------opengl code------")
print(f.imported_modules[0].get_source(fmt="gl"))
ctx = tvm.opengl()
n, m, l = nn, nn, nn
a_np = np.random.uniform(low=0, high=10, size=(n, l)).astype(A.dtype)
b_np = np.random.uniform(low=0, high=10, size=(m, l)).astype(B.dtype)
a = tvm.nd.array(a_np, ctx)
b = tvm.nd.array(b_np, ctx)
c = tvm.nd.array(np.zeros((n, m), dtype=C.dtype), ctx)
f(a, b, c)
np.testing.assert_allclose(c.asnumpy(), np.dot(a_np, b_np.T))
if __name__ == "__main__":
test_local_gemm()
|
Q: What is the purpose of creation?
A: It is to give rise to this question. Investigate the answer to this question, and finally abide in the supreme or rather the primal source of all, the Self. The investigation will resolve itself into a quest for the Self and it will cease only after the non-Self is sifted away and the Self realized in its purity and glory.
There may be any number of theories of creation. All of them extend outwardly. There will be no limit to them because time and space are unlimited. They are however only in the mind. If you see the mind,time and space are transcended and the Self is realized.
Creation is explained scientifically or logically to one's own satisfaction. But is there any finality about it? Such explanations are called krama-srishti [gradual creation]. On the other hand, drishti-srishti [simultaneous creation] is yugapat-srishti. Without the seer there are no objects seen. Find the seer and the creation is comprised in him. Why look outward and go on explaining the phenomena which are endless?
Re: Ramana Maharshi: What is the purpose of creation?
of flights for several days? Was this ever predicted by man?
telling some 'imaginary story' for feeding the child.
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2001-2006 Donald N. Allingham
# Copyright (C) 2008-2009 Brian G. Matherly
# Copyright (C) 2010 Jakim Friant
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# GTK modules
#
#-------------------------------------------------------------------------
from gi.repository import Gtk
from gi.repository import GObject
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from ...pluginmanager import GuiPluginManager
from gramps.gen.plug.report._constants import CATEGORY_TEXT
from ._docreportdialog import DocReportDialog
#-------------------------------------------------------------------------
#
# _TextFormatComboBox
#
#-------------------------------------------------------------------------
class _TextFormatComboBox(Gtk.ComboBox):
"""
This class is a combo box that allows the selection of a docgen plugin
from all textdoc plugins.
"""
def __init__(self, active):
Gtk.ComboBox.__init__(self)
pmgr = GuiPluginManager.get_instance()
self.__textdoc_plugins = []
for plugin in pmgr.get_docgen_plugins():
if plugin.get_text_support():
self.__textdoc_plugins.append(plugin)
self.store = Gtk.ListStore(GObject.TYPE_STRING)
self.set_model(self.store)
cell = Gtk.CellRendererText()
self.pack_start(cell, True)
self.add_attribute(cell, 'text', 0)
index = 0
active_index = 0
for plugin in self.__textdoc_plugins:
name = plugin.get_name()
self.store.append(row=[name])
if plugin.get_extension() == active:
active_index = index
index += 1
self.set_active(active_index)
def get_active_plugin(self):
"""
Get the plugin represented by the currently active selection.
"""
return self.__textdoc_plugins[self.get_active()]
#-----------------------------------------------------------------------
#
# TextReportDialog
#
#-----------------------------------------------------------------------
class TextReportDialog(DocReportDialog):
"""
A class of ReportDialog customized for text based reports.
"""
def __init__(self, dbstate, uistate, options, name, translated_name):
"""
Initialize a dialog to request that the user select options
for a basic text report. See the ReportDialog class for more
information.
"""
self.format_menu = None
self.category = CATEGORY_TEXT
DocReportDialog.__init__(self, dbstate, uistate, options,
name, translated_name)
def make_doc_menu(self, active=None):
"""
Build a menu of document types that are appropriate for
this text report.
"""
self.format_menu = _TextFormatComboBox( active )
|
WPL staff use only. Please use this form to request supplies to be ordered.
Include hyperlinks to specific items when something non-standard or specific is needed.
If Purpose of Supplies is Combination, Please Explain.
If helpful to you, attach a file. Otherwise, ignore this option.
|
'''
Make a colorbar as a separate figure.
'''
from matplotlib import pyplot
import matplotlib as mpl
# Make a figure and axes with dimensions as desired.
fig = pyplot.figure(figsize=(8,3))
ax1 = fig.add_axes([0.05, 0.80, 0.9, 0.15])
ax2 = fig.add_axes([0.05, 0.475, 0.9, 0.15])
ax3 = fig.add_axes([0.05, 0.15, 0.9, 0.15])
# Set the colormap and norm to correspond to the data for which
# the colorbar will be used.
cmap = mpl.cm.cool
norm = mpl.colors.Normalize(vmin=5, vmax=10)
# ColorbarBase derives from ScalarMappable and puts a colorbar
# in a specified axes, so it has everything needed for a
# standalone colorbar. There are many more kwargs, but the
# following gives a basic continuous colorbar with ticks
# and labels.
cb1 = mpl.colorbar.ColorbarBase(ax1, cmap=cmap,
norm=norm,
orientation='horizontal')
cb1.set_label('Some Units')
# The second example illustrates the use of a ListedColormap, a
# BoundaryNorm, and extended ends to show the "over" and "under"
# value colors.
cmap = mpl.colors.ListedColormap(['r', 'g', 'b', 'c'])
cmap.set_over('0.25')
cmap.set_under('0.75')
# If a ListedColormap is used, the length of the bounds array must be
# one greater than the length of the color list. The bounds must be
# monotonically increasing.
bounds = [1, 2, 4, 7, 8]
norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
cb2 = mpl.colorbar.ColorbarBase(ax2, cmap=cmap,
norm=norm,
# to use 'extend', you must
# specify two extra boundaries:
boundaries=[0]+bounds+[13],
extend='both',
ticks=bounds, # optional
spacing='proportional',
orientation='horizontal')
cb2.set_label('Discrete intervals, some other units')
# The third example illustrates the use of custom length colorbar
# extensions, used on a colorbar with discrete intervals.
colourbar = {65 : [255 ,255,255],
60 : [159 , 49 , 206],
55 : [255 , 0 ,255],
50 : [206 , 0 , 0],
45 : [255 , 0 , 0],
40 : [255 , 99 , 99],
35 : [255 , 148 , 0],
30 : [231 , 198 , 0],
25 : [255 , 255, 0],
20 : [ 0 , 148, 0 ],
15 : [ 0 , 173 , 0 ],
10 : [ 0 , 206 , 0 ],
5 : [ 0, 0, 255], # VV i made these up: VV
0 : [ 0, 99, 255],
-5 : [ 0, 198, 255],
-10 : [156 ,156 , 156],
}
# http://stackoverflow.com/questions/3373256/set-colorbar-range-in-matplotlib
# http://wiki.scipy.org/Cookbook/Matplotlib/Show_colormaps
# http://stackoverflow.com/questions/12073306/customize-colorbar-in-matplotlib
# http://stackoverflow.com/questions/7875688/how-can-i-create-a-standard-colorbar-for-a-series-of-plots-in-python
#* http://matplotlib.org/examples/api/colorbar_only.html
# http://stackoverflow.com/questions/4801366/convert-rgb-values-into-integer-pixel
"""
cdict = { 'red' : ( (0.0, 0.25, .25), (0.02, .59, .59), (1., 1., 1.)),
'green': ( (0.0, 0.0, 0.0), (0.02, .45, .45), (1., .97, .97)),
'blue' : ( (0.0, 1.0, 1.0), (0.02, .75, .75), (1., 0.45, 0.45))
}
"""
colourbarlen = 70 - (-10)
cdict = {
'red' : [],
'green': [],
'blue' : [],
}
##################################################################################
bounds = range(-10, 75, 5)
lowers = sorted(colourbar.keys())
cmap = mpl.colors.ListedColormap([[1.*colourbar[v][0]/255,
1.*colourbar[v][1]/255,
1.*colourbar[v][2]/255
] for v in lowers
]) # [[0., .4, 1.], [0., .8, 1.], [1., .8, 0.], [1., .4, 0.]]
cmap.set_over((1.*colourbar[65][0]/255,
1.*colourbar[65][1]/255,
1.*colourbar[65][2]/255))
cmap.set_under((1.*colourbar[-10][0]/255,
1.*colourbar[-10][1]/255,
1.*colourbar[-10][2]/255))
norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
#fig = pyplot.figure()
#ax3 = fig.add_axes()
cb3 = mpl.colorbar.ColorbarBase(ax3, cmap=cmap,
norm=norm,
boundaries=[-10]+bounds+[10],
extend='both',
# Make the length of each extension
# the same as the length of the
# interior colors:
#extendfrac='auto',
ticks=bounds,
spacing='uniform',
orientation='horizontal'
)
cb3.set_label('Custom extension lengths, some other units')
pyplot.show()
|
How To Stalk Someone On Facebook Without Being Friends: Facebook is a social networking site that allows you to reconnect with old friends and stay connected with current friends. It permits you to connect by sending out messages, uploading standing updates, sending out cards as well as showing your likes as well as dislikes. Facebook likewise consists of a search device that you could make use of to look for and view profiles of individuals you might want to end up being friends with. If an individual's Facebook account's setups are not set to "Private," you could see the profile without being friends.
Step 1: Go to Facebook.com as well as visit to your account utilizing your email address and also password.
Action 2: Kind the full name of the individual whose profile you intend to check out in the Search bar on top of the page. You may see lots of people with the exact same name, so choose the "See more results for ..." choice to present the search engine result for the name you entered.
Action 3: Select the person whose profile you intend to check out. If you are still incapable to discover the person, click the "People" switch in the left pane. Filter the results by picking the individual's location, college or office. When you ultimately find the person whose account you intend to view, click the connect to that person to watch the account.
|
# -*- coding: iso-8859-1 -*-
from __future__ import print_function
import logging
from .util import has_module
if not has_module('pygments'):
logging.warn('Pygments not enabled.')
# List of available renderers
all = []
class Renderer(object):
"""Base renderer class."""
extensions = []
@classmethod
def render(cls, plain):
"""Render text."""
return plain
all.append(Renderer)
class Plain(Renderer):
"""Plain text renderer. Replaces new lines with html </br>s"""
extensions = ['txt']
@classmethod
def render(cls, plain):
"""Render plain text."""
return plain.replace('\n', '<br>')
all.append(Plain)
# Include markdown, if it is available.
if has_module('markdown'):
from markdown import markdown
class Markdown(Renderer):
"""Markdown renderer."""
extensions = ['markdown', 'mkd', 'md']
plugins = ['def_list', 'footnotes']
if has_module('pygments'):
plugins.extend(['codehilite(css_class=codehilite)', 'fenced_code'])
@classmethod
def render(cls, plain):
"""Render markdown text."""
return markdown(plain, cls.plugins)
all.append(Markdown)
else:
logging.warn("markdown isn't available, trying markdown2")
# Try Markdown2
if has_module('markdown2'):
import markdown2
class Markdown2(Renderer):
"""Markdown2 renderer."""
extensions = ['markdown', 'mkd', 'md']
extras = ['def_list', 'footnotes']
if has_module('pygments'):
extras.append('fenced-code-blocks')
@classmethod
def render(cls, plain):
"""Render markdown text."""
return markdown2.markdown(plain, extras=cls.extras)
all.append(Markdown2)
else:
logging.warn('Markdown not enabled.')
# Include ReStructuredText Parser, if we have docutils
if has_module('docutils'):
import docutils.core
from docutils.writers.html4css1 import Writer as rst_html_writer
from docutils.parsers.rst import directives
if has_module('pygments'):
from .rst_pygments import Pygments as RST_Pygments
directives.register_directive('Pygments', RST_Pygments)
class ReStructuredText(Renderer):
"""reStructuredText renderer."""
extensions = ['rst']
@classmethod
def render(cls, plain):
"""Render reStructuredText text."""
w = rst_html_writer()
return docutils.core.publish_parts(plain, writer=w)['body']
all.append(ReStructuredText)
else:
logging.warn('reStructuredText not enabled.')
# Try Textile
if has_module('textile'):
import textile
class Textile(Renderer):
"""Textile renderer."""
extensions = ['textile']
@classmethod
def render(cls, plain):
"""Render textile text."""
return textile.textile(plain)
all.append(Textile)
else:
logging.warn('Textile not enabled.')
if len(all) <= 2:
logging.error("You probably want to install either a Markdown library (one of "
"'Markdown', or 'markdown2'), 'docutils' (for reStructuredText), or "
"'textile'. Otherwise only plain text input will be supported. You "
"can install any of these with 'sudo pip install PACKAGE'.")
|
Builders Associations serve as a resource for homebuyers, as well as professionals in the industry. Contact a Builder Association in Illinois from the list below to find local contractors who can help build your dream home.
So you’ve decided to have a new home built from scratch in Illinois. You’re going to need the right team of professionals to help make that happen. That’s where Builder Associations can help. These organizations offer potential buyers like yourself a number of resources, including directories for local contractors and other industry professionals you can work with. We've compiled a list of Associations in Illinois to help pair you up with a builder near you!
Home Builders Association of Illinois (HBAI) represents the interests of builders, contractors, developers, and others involved in the home building industry in Illinois. The association works to keep legislation and regulations favorable to the industry, and to keep members and consumers informed, connected, and aligned with the best services the state has to offer.
The Northern Illinois Home Builders Association supports and protects the local custom home building industry and facilitates the American Dream of homeownership for the community. The association aims to maintain the highest level of professionalism, serve as a cohesive and respected voice for the industry, and establish a positive business environment for its members.
Home Builders Association of Greater Chicago (HBAGC) is a non-profit trade association the represents professionals engaged in all levels of the home building industry. HBAGC serves its members by providing networking and educational opportunities and represents them in matters related to government relations and advocacy.
The Home Builders Association of East Central Illinois (HBAECI) is a trade organization that represents builders and associated businesses throughout a seven-county region, which includes Champaign, Vermilion, Douglas, Edgar, Ford, Piatt, and Coles counties. HBAECI is active when it comes to local, state, and national legislation in regards to the home building industry and advocates for its members interests.
The Springfield Area Home Builders Association (SAHBA) is a non-profit trade association made up of firms in home building industry of Sangamon, Christian, Montgomery, Macoupin, Scott, Morgan, Cass, Menard, Mason, and Logan counties. SAHBA aims to improve the local housing industry through legislation, education, and services that benefit its members and local communities.
Home Builders Association of the Greater Rockford Area is a trade association dedicated to the development of new construction and remodeling in Winnebago, Boone, Ogle, Lee, Stephenson, Carroll, and Jo Daviess Counties. The association provides industry information to members and the community and represents their interests in legislative issues affecting the building industry.
The Western Illinois Builders Association is a non-profit trade association that represents the interests of all branches of the building and remodeling industry including manufacturers, dealers, financial institutions, and suppliers. The association promotes high professional standards among its members through its strict Code of Ethics.
|
# coding: utf-8
"""
Fitmarket
Mali broj ljudi - donori - dijele dnevna mjerenja svoje težine. Iz dnevne težine jednog donora određujemo vrijednosti dviju dionica: - dionica X ima vrijednost koja odgovara težini donora na taj dan. - inverzna dionica ~X ima vrijednost (150 kg - X). Primjetimo da: - kako X raste, ~X pada. - X + ~X = 150 kg Svaki igrač počinje igru sa 10,000 kg raspoloživog novca. Igrač koristi taj novac za trgovanje dionicama. Ukupna vrijednost igrača je zbroj rapoloživog novca i aktualne vrijednosti svih dionica koje posjeduje. Cilj igre je maksimizirati ukupnu vrijednost dobrim predviđanjem kretanja vrijednosti dionica. Na primjer, u prvom danu igrac kupi 125 dionica \"X\" za 80 kg. U drugom danu, dionica naraste na 82 kg. Ako igrac proda sve dionice \"X\", zaradio je 2 kg * 125 = 250 kg! Igra ne dopušta donoru da trguje vlastitim dionicama.
OpenAPI spec version: 1.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class Status(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, total_money=None, free_money=None, shares=None):
"""
Status - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'total_money': 'float',
'free_money': 'float',
'shares': 'list[StockWithCount]'
}
self.attribute_map = {
'total_money': 'total_money',
'free_money': 'free_money',
'shares': 'shares'
}
self._total_money = total_money
self._free_money = free_money
self._shares = shares
@property
def total_money(self):
"""
Gets the total_money of this Status.
:return: The total_money of this Status.
:rtype: float
"""
return self._total_money
@total_money.setter
def total_money(self, total_money):
"""
Sets the total_money of this Status.
:param total_money: The total_money of this Status.
:type: float
"""
if total_money is None:
raise ValueError("Invalid value for `total_money`, must not be `None`")
self._total_money = total_money
@property
def free_money(self):
"""
Gets the free_money of this Status.
:return: The free_money of this Status.
:rtype: float
"""
return self._free_money
@free_money.setter
def free_money(self, free_money):
"""
Sets the free_money of this Status.
:param free_money: The free_money of this Status.
:type: float
"""
if free_money is None:
raise ValueError("Invalid value for `free_money`, must not be `None`")
self._free_money = free_money
@property
def shares(self):
"""
Gets the shares of this Status.
:return: The shares of this Status.
:rtype: list[StockWithCount]
"""
return self._shares
@shares.setter
def shares(self, shares):
"""
Sets the shares of this Status.
:param shares: The shares of this Status.
:type: list[StockWithCount]
"""
if shares is None:
raise ValueError("Invalid value for `shares`, must not be `None`")
self._shares = shares
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
Publish your messages of thanks (and special intentions) to St. Jude for January here.
Thank you St. Jude for interceding before God to help us with our baby’s feeding issues. Thank you for coming to our aid in our greatest moment of need. In your honor I have made a contribution to an orphanage to help the innocent children. Thanks be to God Almighty.
Thank you St. Jude for many prayers answered. A follower.
Thank you for this website. I prayed to St Jude to reconcile me with my partner in September. It has not happened yet, but I still hold out hope. I prayed in November for financial relief for my son and it came. I believe. And on days when my fear is greater than my faith I visit your website and read all the comments. They give me hope and heart and remind me of what is really true - St Jude loves us and intercedes for us. The answers come in God's time and in God's way. But they do come.
Thank you for giving us this place to pray for each other and to help each other and to glorify God and St Jude.
Thank you Saint Jude for helping me in my decisions for the treatment of my cancer. Also thanks for your prayers. I know that God, Jesus, and Mother Mary will be with me all the way.
Thank you for giving me a source of hope the last few days.
thank you st. jude for standing with me through my husbands cancer. i prayed for a miracle. it was not to be. he lost his battle on 11/29. i will remain devoted to you.
St. Jude i,m sorry I didn,t thank you for you blessings. I talk to you and I thank you but I haven,t done it for the world to know how wonderful you really are.
Thank-you Saint Jude for restoring my faith in you and especially Our Lord. You have made the impossible happen-aiding the reconciliation of my marriage. Thank-you for being there in all the tough times, thank-you for helping me feel that I'm never alone, and thank-you for being there to listen to me. You have helped me to believe in "believing" again and I will teach those that are close to me to beleive in you as well. You have guided me to pray in accordance with God's will and I can't tell you enough of what an inner peace and joy that currently exists in my heart because of you. I will continue to spread the word of the power of your intercession. Thank-you Saint Jude!
He never gives up on you and will guide you through your problems.
Thank you st.Jude for hearing my prayers!
Thank you, St. Jude for answering my prayers. I will continue to have faith and pray for your help during this difficult time.
oh my loving st jude . please help us to get good job . iam helpless no one can help us . please intercede for our favors. since your worker of miracles you should help us . we face lot of problems financial even for daily bread we are struggling . if we get a job in your name.surely we will submit the testimonial. please have mercy on our family.
Thank you Saint Jude for answering my prayer. Asked to find new connections to help the transistion in a new area and you have answered my prayer. Thank you and I will continue to have faith.
Thank you St. Jude for interceding with me. I have no concrete evidence of resolution to my request and prayers as of yet, but I know you have interceded to God on my behalf because of the peace I have in my heart on the 2issues I have prayed about. My faith will not waiver, and I know it's just a matter of time until what I have requested come to be, if it's God's will. Thank you Blessed St.Jude, I recognize you as my personal patron. Praise Jesus.
St Jude please intercede for a financial blessing to pay all my medical bills, my sons lawyer and all my regular bills. Please bless me with a new job if needed or a good raise.
St Jude please intercede for my son Eddie that his case will be resolve soon and that gods will be done in his case. You are the saint of impossible cases.Please give him freedom from jail so he can be reunited with his newborn son and his wife. Please bring samantha to the Lord that she will see my son through Gods eyes and that their marriage be restored.
Thank you, St. Jude, for prayers answered. Lori.
Come to my assistance in this great need that I may receive the consolation and help of heaven in all my necessities, tribulations, and sufferings, particularly in my personal/intimate relationship and that I may praise God with you and all the elect forever.
I am not able to get thru this without you, St. Jude. Thank you for being there for me and for so many others. You are truly and honorably the St. of the hopeless and helpless and those in despair. Thank you and bless you for that.
May the Sacred Heart of Jesus be adored, glorified, loved and preserved through out the world now and forever. Sacred Heart of Jesus pray for us. St. Jude worker of miracles pray for us. St. Jude help of the hopeless pray for us.
Please intervene on my behalf and bring a nice man who loves me and wants to be with me as much as I love and want to be with him. ....and bless us with a happy marriage for the rest of our lives. I can not be alone anymore...I am no good at it. if the man is from my past bring him back and if he is from my future bring him forward. I am not sure if the email this week was a sign.....but please if he is the one make it more obvious to me. I am not the best at this.
ST.Jude please intercede in the reconcilation of my relationship . I know you are a miracle worker...Please keep my relationship together..I love, trust you St.Jude to keep my family together..bless us, unite us St.JUde..thank you..
Oh my loving st.jude..please help my relationship with my boyfriend...i gave up my job and life to come to be with him now he wants to move on with his life . Please reconcile our relationship and the stability he has in my son life.Please St.Jude intercede for me in my request...keep my family together.
st. Jude please hear my pray and help me in this time of need. I am struggling.
Thank you God for the gift of the communion of Saints, who because they suffered many human trials and problems can empathize with our troubles and intercede on our behalf. Thank you St. Jude for hearing my cries for help and for helping me during this most difficult time. I have already begun to feel some relief from the anxiety and am hopeful and trusting of a positive resolution to my problem. Please continue to intercede for me in this matter and protect my family, especially my children from any negtive fallout.
Thank you so much St Jude for interceeding on my behalf. I went nthrough a very difficult situation in november and december. I humbly asked for your intercessory prayers and God listened to your prayers.My requests have be granted. You are the saint of the impossible. In honor of your name i will donate to the catholic church in my area named after you. Thank you God once more for listening to my prayers. Thank you Jesus for listening to my prayers.
Many, many thx for your intercession in finding our lost dog. She went missing on 28th Dec and despite looking for her everywhere, we could not find her. It was made impossible by the fact that we were on holiday in an area unfamiliar to her. But thru your powerful intercession, we got her back on New Year's Day. We will always be grateful to you.
Thank you for answered prayers once again. I have so much faith.
St Jude help me in this difficult time. My son eddie has a court date jan 15th. St jude intercede for his freedom from jail. Use my son for Gods work. You are the saint of the impossible. Let his case be resolved and his charges dismissed. Set him free to be united with his newborn and wife.
Thank You Saint Jude for helping me see through a terrible time. You have answered my prayers by casting a light on my darkest moment. You have renewed my faith and given me an opportunity to be truly happy. Thank you again.
Thank you st. jude. Iknow you will here my novena.
dear st. jude, thank you for the health of our baby. we baptized him today.
Thank you for your intercession with the situation regarding financial and legal problems with my father in the nursing home. Though all is not totally resolved, your intercession has allowed me to see some light at the end of the tunnel and believe that God has a solution to the matter that has not yet become apparent to me.
Thank you God for answering my prayers.
Again St Jude, sincere thanks for your help in taking my request to God.
I will always honor and praise you St Jude.
I prayed for my daughter to recover from her eating disorder, she is making progress every day. I also prayed that would be able to find insurance to help cover her medical bills. Thank you. Thank you also for her having a very supportive boyfriend who has helped her in her recovery process.
Please St jude, help me be able to continue with my job working with families of individuals with disabilites. Help me to be able to continue so I can pay my bills, my daugher's medical and college bills. Please help me help the families I work with.
Thank you St Jude..I have faith you will answer my prayers and grant my novena.
Please pray for my husband and give him the strength and confidence he needs. Thank you.
I believe you will hear my prayer and answer my novena. I have faith.
I would like to thank the Lord, St. Jude and Mother Mary for answering my prayers. Everything went well in court today with my legal issues. I thank you and love you all. And Thank you St. Jude for being my patron Saint.
Please answer our prayers and sell our house. Thank you for all your answered prayers.
St Jude - Thank you for listening to my prayers. I have faith they will be answered.
Thank you for the many times you have answered my prayers in the past two months. Thank you for my good grades so that I could major in Finance. Thank you for allowing me the opportunity to raised my GPA. Thank you for the opportunity to bring guest speakers to my club so that other students can get the opportunity to make connections. Thank you for taken care of our financial situation at this moment. I continue to ask for you to give me the opportunity to get a job, and I know you will grant me this favor. Saint Jude is a miracle worker!!
Thank you, St. Jude, for hearing my prayers of intention and novena for my grandson. In gratitude to you and Our Lord, I am publishing this prayer of thanks for your intercession.
O God, You made Your Name known to us through the Apostles. By the intercession of Saint Jude, let Your Church continue to grow with an increased number of believers. Grant this through Christ our Lord. Amen.
Thanks to St. Jude for asking the Father to let me pass the RICA and receive unemployment until a job comes.
Please pray for my husband and that he doesn't lose his job.
St Jude please intercede for my son jc. Please dont let him leave his wife and sons because he is seeing and another girl. Lust has blinded him from his family. Open his eyes and bind that relationship with Lynn so he will be set free and remain with his family. Nothing is impossible with you.
Thank you St. Jude for prayers answered on behalf of my son.
Thank you St. Jude for hearing my prayer about my marriage and my husband's health. Things have gotten to be so much better with God's help. Thank you for praying for us and intervening.
Thank you St. Jude for answers to unemployment payments being continued.
It has been almost 1 month, and my situation has not improved 1 bit. I am becoming more desperate -- my hope is in you. Can you at least show me some light of progress? It would help my spirit and outlook tremendously. I am sorry to be so weak. Thank you for being there for me and so many others. Bless you.
Thank you for hearing my prayers. I know that you are with me in these desperate times. I pray that you will help me and my family and take my prayers to our Father in heaven. May he be with us all.
St. Jude and the Virgin Mary please hear my prayers. You to are the only people i can turn too. Your the only hope i have. Please intercede on my behalf. I promise to spread your names to as many people as I can. I also promise to danate money to St. Judes Childerens Hospital when i can. I have seen youre sighns while Im praying to you. Please help me. I WILL NEVER FORGET THIS FAVOR. God bless you all!
I pray you watch over my husband and family. I pray that you can give us guidance in our decision as to what path to follow so that our lives and the lives of our children will be filled with love and peace.
Please St Jude answer our prayers.
|
#
# This is the MCMC module.
# it spits out chains that are compatible with CosmoMC
# it calculates cov matrix during burn-in.
# chain_num tells it to spit out multi-node chains.
# optional temperature makes it sample at a higher temperature but note that
# this guy, as opposed to cosmomc, reweights the weights on the fly.
#
from random import *
from math import *
from scipy import *
import scipy.linalg as la
import copy
import random
import sys
import os.path as path
class MCMCAnalyzer:
def __init__(self, like, outfile, skip=5000, nsamp=100000, temp=1.0, cov=None, chain_num=None):
self.like = like
self.outfile = outfile
self.nsamp = nsamp
self.skip = skip
self.temp = float(temp) # temperature
self.chain_num = chain_num
self.cpars = like.freeParameters()
minvals, maxvals = [], []
for lb, hb in [p.bounds for p in self.cpars]:
minvals.append(lb)
maxvals.append(hb)
self.minvals = array(minvals)
self.maxvals = array(maxvals)
print("Bounds:", self.minvals, self.maxvals)
self.N = len(self.cpars)
if (like.name() == "Composite"):
self.sublikenames = like.compositeNames()
self.composite = True
else:
self.composite = False
if (cov == None):
# make initial cov matrix from diagonal "errors"
errs = [0.01*p.error**2 for p in self.cpars]
self.init_pcov(diag(errs))
else:
self.init_pcov(cov)
self.RunChain()
def RunChain(self):
self.openFiles()
self.cloglike, self.cloglikes = self. getLikes()
# set up logofs based on the first log like which should be
# the same for all chains. Better than nothing.
# self.logofs=self.cloglike
# Actually, above doesn't seem to work very well. Instead, use zero, as our likelihoods never became very large
self.logofs = 0
# current weight
self.cw = 0
# current counter
self.co = 0
# mean for burin
self.swx = 0
self.meanx = zeros(self.N)
self.meanxx = zeros((self.N, self.N))
# max loglike
self.maxloglike = -1e30
# are we done
self.done = False
print("Starting chain...")
while not (self.done):
ppars, numout = self.GetProposal()
self.cw += numout ## things hitting outside the prior are formally rejected samples
self.like.updateParams(ppars)
ploglike, ploglikes = self.getLikes()
if (isnan(ploglike)):
print("Something bad has happened, nan in loglike, assuming zero log")
ploglike = -1e50
# print cloglike, ploglike, [p.value for p in like.freeParameters()], [p.value for p in self.cpars]
if (ploglike > self.cloglike):
accept = True
else:
accept = (exp((ploglike-self.cloglike)/self.temp)
> uniform(0., 1.))
# print [p.value for p in ppars], accept, ploglike
# stop
if (accept):
self.ProcessAccepted(ppars, ploglike, ploglikes)
else:
self.cw += 1
self.closeFiles()
def GetProposal(self):
vec = zeros(self.N)
numreject=0
while True:
ppars = copy.deepcopy(self.cpars)
step = self.draw_pcov()
# print step# [p.value for p in step]
for i, p in enumerate(ppars):
p.value += step[i]
vec[i] = p.value
if all(vec > self.minvals) and all(vec < self.maxvals):
return ppars, numreject
numreject+=1
def init_pcov(self, mat):
self.chol = la.cholesky(mat)
def draw_pcov(self):
a = array([random.gauss(0., 1,) for i in range(self.N)])
return dot(a, self.chol)
def openFiles(self):
outfile = self.outfile
if self.chain_num in [None, 1]:
fpar = open(outfile+".paramnames", 'w')
for p in self.cpars:
fpar.write(p.name+"\t\t\t"+p.Ltxname+"\n")
if self.composite:
for name in self.sublikenames:
fpar.write(name+"_like \t\t\t"+name+"\n")
fpar.write("theory_prior \t\t\t None \n")
fpar.close()
formstr = '%g '+'%g '*(self.N+1)
if (self.composite):
formstr += '%g '*(len(self.sublikenames)+1)
formstr += '\n'
if (self.chain_num == None):
cfname = outfile+".txt"
mlfname = outfile+".maxlike"
else:
cfname = outfile+"_%i.txt" % (self.chain_num)
mlfname = outfile+"_%i.maxlike" % (self.chain_num)
if (path.isfile(cfname)):
print("Due to bad habits in the past, won't open existing file.", cfname)
sys.exit(1)
self.fout = open(cfname, 'w')
self.mlfout = open(mlfname, 'w')
self.formstr = formstr
def closeFiles(self):
self.fout.close()
self.mlfout.close()
def getLikes(self):
if (self.composite):
cloglikes = self.like.compositeLogLikes_wprior()
cloglike = cloglikes.sum()
else:
cloglikes = []
cloglike = self.like.loglike_wprior()
return cloglike, cloglikes
def ProcessAccepted(self, ppars, ploglike, ploglikes):
self.co += 1
if (self.co % 1000 == 0):
print("Accepted samples", self.co, self.cw)
vec = [p.value for p in self.cpars]
if (self.co > self.skip):
# weight rescaled
wers = self.cw*exp((self.cloglike-self.logofs)
* (self.temp-1.0)/self.temp)
if (self.composite):
outstr = self.formstr % tuple(
[wers, -self.cloglike]+vec + self.cloglikes.tolist())
else:
outstr = self.formstr % tuple([wers, -self.cloglike]+vec)
self.fout.write(outstr)
# Flush file on regular basis
if (self.co % 1000 == 0):
self.fout.flush()
if (self.cloglike > self.maxloglike):
self.maxloglike = self.cloglike
print("New maxloglike", self.maxloglike)
self.mlfout.seek(0)
self.mlfout.write(outstr)
self.mlfout.flush()
if self.co > self.nsamp:
self.done = True
elif (self.co < self.skip):
self.swx += self.cw
v = array(vec)
self.meanx += v*self.cw
self.meanxx += outer(v, v)*self.cw
if (self.cw > 30):
print("Still burning in, weight too large")
self.chol *= 0.9
print(self.cw)
else: # co==skip
self.meanx /= self.swx
self.meanxx /= self.swx
self.meanxx -= outer(self.meanx, self.meanx)
print("Re-initializing covariance matrix after burn-in")
print(self.meanxx)
for i, p in enumerate(self.cpars):
print(p.name, p.value, sqrt(self.meanxx[i, i]))
self.init_pcov(self.meanxx)
self.cw = 1
self.cpars = ppars
self.cloglike = ploglike
if self.composite:
self.cloglikes = ploglikes
|
Websites usually have a two-fold goal to increase their views and searchability – one is to ensure that the site is optimised for search engine results and the second is to have organic traffic, ensuring conversion of your searchers. Conversion is a hard thing, as you would need both good, relevant content while actively engaging your new subscribers. Though not the be all, it is helpful to understand subscriber behaviour so you can optimise your website for maximum effect.
When searching, users are known to first do a search on the search engine using keywords that they think is relevant to what they need, and then once they’re on the site, they start reading the content and look for details that they need from this. We should understand that readership is incomplete. Most of the time, what readers do is skim quickly through the content until they find the relevant keywords they used. These will stop the reader’s eyes from wandering in the content and then fix their eyes on the information around the relevant data that they need. These keywords then act as fixation points and help the reader process information easier, making it more memorable and giving the reader a “recall”. By spacing relevant keywords within the content and adding information that is concise and informative, this improves users’ recall value of the information. Recall evokes understanding, emotions and helps in making the customer comeback, helping in subscriber conversion.
Like in many things in real life, however, adding fixation points to saturation defeats the technical purpose of these in the first place. It will help to optimise a website by controlling the user’s saccadic movement. Saccadic movement refers to the rapid eye movement between fixation points. Fewer saccadic movements assist in memory and information retention. What this means is that an excessive amount of elements on your website do not help as it distracts the user. This is why people are annoyed by websites with too many things to click, simply because it distracts them from what they need to do.
A design style of Microsoft called Tiles is a good example of controlling saccadic movement while adding as much elements as possible. By streamlining elements within the site to make sure that they are logical, easy to understand and easy to process, this fools the brain in seeing fewer fixation points even if it sees a big amount of elements. It lets the eyes smoothly scroll from side to side and see what the user immediately needs.
There are many more ways to control user eye movement, and maybe I’ll discuss more in the future since the list just goes on and on and on. The central idea stays the same: you want your user to convert to your site, subscribe to information that you provide and come back next time. By combining good, relevant content with streamlined elements in your website, it will give your user an easier time using your site, which will then potentially help them come back when they need the same or relevant information or products that you can provide.
|
import astropy.coordinates as coord
import astropy.units as u
import numpy as np
from astropy.coordinates import BaseCoordinateFrame, Attribute, RepresentationMapping
from astropy.coordinates import frame_transform_graph
class GBMFrame(BaseCoordinateFrame):
"""
Fermi GBM Frame
Parameters
----------
representation : `BaseRepresentation` or None
A representation object or None to have no data (or use the other keywords)
"""
default_representation = coord.SphericalRepresentation
frame_specific_representation_info = {
'spherical': [
RepresentationMapping(
reprname='lon', framename='lon', defaultunit=u.degree),
RepresentationMapping(
reprname='lat', framename='lat', defaultunit=u.degree),
RepresentationMapping(
reprname='distance', framename='DIST', defaultunit=None)
],
'unitspherical': [
RepresentationMapping(
reprname='lon', framename='lon', defaultunit=u.degree),
RepresentationMapping(
reprname='lat', framename='lat', defaultunit=u.degree)
],
'cartesian': [
RepresentationMapping(
reprname='x', framename='SCX'), RepresentationMapping(
reprname='y', framename='SCY'), RepresentationMapping(
reprname='z', framename='SCZ')
]
}
# Specify frame attributes required to fully specify the frame
sc_pos_X = Attribute(default=None)
sc_pos_Y = Attribute(default=None)
sc_pos_Z = Attribute(default=None)
quaternion_1 = Attribute(default=None)
quaternion_2 = Attribute(default=None)
quaternion_3 = Attribute(default=None)
quaternion_4 = Attribute(default=None)
# equinox = TimeFrameAttribute(default='J2000')
@staticmethod
def _set_quaternion(q1, q2, q3, q4):
sc_matrix = np.zeros((3, 3))
sc_matrix[0, 0] = (q1 ** 2 - q2 ** 2 - q3
** 2 + q4 ** 2)
sc_matrix[0, 1] = 2.0 * (
q1 * q2 + q4 * q3)
sc_matrix[0, 2] = 2.0 * (
q1 * q3 - q4 * q2)
sc_matrix[1, 0] = 2.0 * (
q1 * q2 - q4 * q3)
sc_matrix[1, 1] = (-q1 ** 2 + q2 ** 2 - q3
** 2 + q4 ** 2)
sc_matrix[1, 2] = 2.0 * (
q2 * q3 + q4 * q1)
sc_matrix[2, 0] = 2.0 * (
q1 * q3 + q4 * q2)
sc_matrix[2, 1] = 2.0 * (
q2 * q3 - q4 * q1)
sc_matrix[2, 2] = (-q1 ** 2 - q2 ** 2 + q3
** 2 + q4 ** 2)
return sc_matrix
@frame_transform_graph.transform(coord.FunctionTransform, GBMFrame, coord.ICRS)
def gbm_to_j2000(gbm_coord, j2000_frame):
""" Compute the transformation from heliocentric Sgr coordinates to
spherical Galactic.
"""
sc_matrix = gbm_coord._set_quaternion(gbm_coord.quaternion_1,
gbm_coord.quaternion_2,
gbm_coord.quaternion_3,
gbm_coord.quaternion_4)
# X,Y,Z = gbm_coord.cartesian
pos = gbm_coord.cartesian.xyz.value
X0 = np.dot(sc_matrix[:, 0], pos)
X1 = np.dot(sc_matrix[:, 1], pos)
X2 = np.clip(np.dot(sc_matrix[:, 2], pos), -1., 1.)
#dec = np.arcsin(X2)
dec = np.pi/2. - np.arccos(X2)
idx = np.logical_and(np.abs(X0) < 1E-6, np.abs(X1) < 1E-6)
ra = np.zeros_like(dec)
ra[~idx] = np.arctan2(X1, X0) % (2 * np.pi)
return coord.ICRS(ra=ra * u.radian, dec=dec * u.radian)
@frame_transform_graph.transform(coord.FunctionTransform, coord.ICRS, GBMFrame)
def j2000_to_gbm(j2000_frame, gbm_coord):
""" Compute the transformation from heliocentric Sgr coordinates to
spherical Galactic.
"""
sc_matrix = gbm_coord._set_quaternion(gbm_coord.quaternion_1,
gbm_coord.quaternion_2,
gbm_coord.quaternion_3,
gbm_coord.quaternion_4)
pos = j2000_frame.cartesian.xyz.value
X0 = np.dot(sc_matrix[0, :], pos)
X1 = np.dot(sc_matrix[1, :], pos)
X2 = np.clip(np.dot(sc_matrix[2, :], pos), -1., 1.)
el = np.pi / 2. - np.arccos(X2) # convert to proper frame
idx = np.logical_and(np.abs(X0) < 1E-6, np.abs(X1) < 1E-6)
az = np.zeros_like(el)
az[~idx] = np.arctan2(X1, X0) % (2 * np.pi)
az[np.rad2deg(el) == 90.] = 0.
return GBMFrame(
lon=az * u.radian, lat=el * u.radian,
quaternion_1=gbm_coord.quaternion_1,
quaternion_2=gbm_coord.quaternion_2,
quaternion_3=gbm_coord.quaternion_3,
quaternion_4=gbm_coord.quaternion_4)
|
Rosary Audio MP3 Download Free On every Mobile device is out with more features and daily Rosary Guides.
Teachers in the World has come up with this Audio form of Rosary which unreleased has with free Guide.
However, Religious Teachers today has made a very cool choice. All you will be getting on this page is free of charge.
You can share audio with friends without Rosary Audio MP3 App with friends.
This App has prayers of holy rosary in audio and text. You can post prayer requests with this app.
Our awesome rosary app users will pray for your intentions.
Here with this App you will find links to download the Rosary mp3 or listen and pray the Rosary online directly.
If you choose to download the files you can upload the Rosary onto your mp3 player or iPod, Android and pray along whenever you like. You can even make your own CDs by saving the file onto a CD.
Do you Love your Friends? Share this Post today and win more souls to God. Online Rosary Reviews.
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.utils.timezone import utc
import datetime
class Migration(migrations.Migration):
dependencies = [
('exchange_portal', '0007_auto_20171121_2041'),
]
operations = [
migrations.RemoveField(
model_name='travel_story',
name='body',
),
migrations.AddField(
model_name='travel_story',
name='living_text',
field=models.TextField(verbose_name='boende', default=1, help_text='Hur bodde du?\u2028 Hur hittade du ditt boende? Tips på eventuell mäklare eller liknande? Vilka alternativ finns?\u2028 Priser och standard?\u2028'),
preserve_default=False,
),
migrations.AddField(
model_name='travel_story',
name='location_text',
field=models.TextField(verbose_name='landet och staden', default=1, help_text='Hur upplevdes landet? Staden? Kultur? Billigt eller dyrt?'),
preserve_default=False,
),
migrations.AddField(
model_name='travel_story',
name='other_text',
field=models.TextField(verbose_name='övrigt', default=datetime.datetime(2018, 1, 29, 16, 47, 31, 204380, tzinfo=utc), help_text='Brödtext syns när en reseberättelse visas enskilt.'),
preserve_default=False,
),
migrations.AddField(
model_name='travel_story',
name='prep_text',
field=models.TextField(verbose_name='förberedelser', default=datetime.datetime(2018, 1, 29, 16, 47, 43, 578495, tzinfo=utc), help_text='Var det några särskilda förberedelser som krävdes?\u2028 Har du några generella tips gällande ansökan? Visum?'),
preserve_default=False,
),
migrations.AddField(
model_name='travel_story',
name='school_text',
field=models.TextField(verbose_name='skolan', default=datetime.datetime(2018, 1, 29, 16, 47, 49, 523930, tzinfo=utc), help_text='Geografisk placering i staden?\u2028 Hur var campus?\u2028 Var det lätt att träffa lokalbefolkning?\u2028 Hur var studentlivet? Kurser: var det lätt/svårt att få kurser? Var de lätta/svåra att få tillgodoräknade?'),
preserve_default=False,
),
migrations.AddField(
model_name='travel_story',
name='sparetime_text',
field=models.TextField(verbose_name='fritid', default=datetime.datetime(2018, 1, 29, 16, 47, 54, 168192, tzinfo=utc), help_text='Vad gör man på fritiden?\u2028 Resor?\u2028 Tips på saker man inte får missa'),
preserve_default=False,
),
migrations.AddField(
model_name='travel_story',
name='studies_text',
field=models.TextField(verbose_name='studier', default=datetime.datetime(2018, 1, 29, 16, 47, 58, 966304, tzinfo=utc), help_text='Hur var nivån på kurserna?\u2028 Råd angående att välja kurser på plats?\u2028 Svårt att hitta kurser på engelska?\u2028 Hur var språket? (framförallt för de som läser ii eller som inte läste på engelska)'),
preserve_default=False,
),
]
|
Colin is a Weymouth maths tutor, author of several Maths For Dummies books and A-level maths guides. He started Flying Colours Maths in 2008. He lives with an espresso pot and nothing to prove.
|
import numpy
import math
class descriptor:
def __init__(self):
self.size_sub_squares = 8
self.eps = 0.00001
def create_descriptors(self, features, img):
descriptors = {}
floatImg = img.astype(numpy.float64)
desNum = len(features)
for i in range(desNum):
x, y = features[i][0], features[i][1]
w, h = img.shape[0], img.shape[1]
if self.size_sub_squares < x < w - 2 * self.size_sub_squares \
and self.size_sub_squares < y < h - 2 * self.size_sub_squares:
descriptors[(x, y)] = self.create_descriptor(x, y, floatImg)
return descriptors
def create_descriptor(self, x, y, img):
hists = [self.gradHist(x - 8, y - 8, img),
self.gradHist(x - 8, y, img),
self.gradHist(x - 8, y + 8, img),
self.gradHist(x - 8, y + 16, img),
self.gradHist(x, y - 8, img),
self.gradHist(x, y, img),
self.gradHist(x, y + 8, img),
self.gradHist(x, y + 16, img),
self.gradHist(x + 8, y - 8, img),
self.gradHist(x + 8, y, img),
self.gradHist(x + 8, y + 8, img),
self.gradHist(x + 8, y + 16, img),
self.gradHist(x + 16, y - 8, img),
self.gradHist(x + 16, y, img),
self.gradHist(x + 16, y + 8, img),
self.gradHist(x + 16, y + 16, img)]
return [col for hist in hists for col in hist] # group hists by values
def gradHist(self, x, y, img):
P = math.pi
localDir = [0] * 18
for b in range(x - 8, x):
for c in range(y - 8, y):
m, t = self.gradient_properties(b, c, img)
localDir[int(round((18 * t) / P, 0)) + 8] += m
return localDir
def gradient_properties(self, x, y, img):
norm = math.sqrt((img[x + 1, y] - img[x - 1, y]) ** 2 + (img[x, y + 1] - img[x, y - 1]) ** 2)
orientation = math.atan((img[x, y + 1] - img[x, y - 1]) / (img[x + 1, y] - img[x - 1, y] + self.eps))
return norm, orientation
|
Free Online Coloring Pages - RBCB - Headquartered in St. Louis, Mo., RBCB is a multinational company with market-leading positions in children’s coloring and activity book development and distribution. RBCB provides fundraising opportunities with Really Big Coloring Books for more than two decades and works with some of nation’s largest benevolent and social organizations. The RBCB fundraising division has helped to raise millions of dollars for local groups and community needs around the globe.
Really Big Coloring Books, ® Inc. was founded by Wayne Bell in 1981. The company incorporated in 1988, registered their trademark and began its current 30-plus year business into fundraising and publishing. Bell studied agriculture at the University of Missouri-Columbia, has natural speaking talents, is a visionary, an educated sales and marketing professional and has worked in the publishing industry since his teenage years.
Click here to print off a free PDF of the RBCB character coloring page!
|
#!/usr/bin/python
#
# This file is part of CONCUSS, https://github.com/theoryinpractice/concuss/,
# and is Copyright (C) North Carolina State University, 2015. It is licensed
# under the three-clause BSD license; see LICENSE.
#
from lib.util.memorized import memorized
import sys
import copy
import random
# @memorized(['g', 'trans', 'frat', 'col', 'i'])
def optimization_interval(orig, g, trans, frat, col, i, treeDepth, mobj):
# print " remove transitive and fraternal edges"
# remove all transitive and fraternal edges of the last step
edges = {}
optcols = copy.deepcopy(col) # avoid side effects
col = copy.deepcopy(col) # avoid side effects
for (s, t) in trans.keys():
step = trans[(s, t)]
if (step == i):
g.remove_arc(s, t)
edges[(s, t)] = (True, trans[(s, t)])
del trans[(s, t)]
for (s, t) in frat.keys():
step = frat[(s, t)]
if (step == i):
g.remove_arc(s, t)
edges[(s, t)] = (False, frat[(s, t)])
del frat[(s, t)]
numbAdded = 0
numbAdd = len(edges) / 2
attempts = 0
resColors = 0
MAX_ATTEMPTS = 2
while True:
mod = len(edges)
ra = numbAdd
addedEdges = {}
for (s, t) in edges.keys():
isTrans, value = edges[(s, t)]
# add randomly 'numbAdd' edges from the list 'restEdges'
rand = random.randint(0, mod-1)
if (rand < ra):
g.add_arc(s, t, 0)
if isTrans:
trans[(s, t)] = value
else:
frat[(s, t)] = value
addedEdges[(s, t)] = isTrans
del edges[(s, t)]
ra -= 1
if (ra == 0):
break
mod -= 1
# end for
# sys.stdout.write(" check with " + str(numbAdded+numbAdd) + " edges")
newcol = mobj.col(orig, g, trans, frat, col)
correct, nodes = mobj.ctd(orig, g, newcol, treeDepth)
# sys.stdout.write(" -> " + str(correct))
if correct:
if len(newcol) < len(optcols):
optcols = copy.deepcopy(newcol)
numColors = len(newcol)
# sys.stdout.write(", colors: " + str(numColors) + '\n')
# else:
# sys.stdout.write('\n')
attempts += 1
if (correct or (attempts < MAX_ATTEMPTS)):
for ((s, t), isTrans) in addedEdges.iteritems():
if isTrans:
edges[(s, t)] = (True, trans[(s, t)])
del trans[(s, t)]
else:
edges[(s, t)] = (False, frat[(s, t)])
del frat[(s, t)]
g.remove_arc(s, t)
# end for
else:
numbAdded += numbAdd
if (correct or (attempts == MAX_ATTEMPTS)):
attempts = 0
numbAdd = numbAdd / 2
if (numbAdd == 0):
break
# end while
return optcols
# end def
|
‘#See the unseen’ is the catchphrase of Hugo’s Greenhood, and it’s the perfect fit. Hugo Paluch, who was the brainchild of the project and in whose memory it continues after he passed away at the age of 14 last year, “always noticed the little things”, says his mother Nicole. He had a gift for seeing what most people missed, and it was this gift that helped him to see a gap that hadn’t been filled and inspired him to do something about it. “When Hugo was in grade three, he had a project to do at school on recycling. Most kids came up with elaborate ideas and homemade machines, which looked to turn waste into energy and the like. Hugo built a recycling trolley.” He always noticed the recyclers rummaging through the bins in his road, greeted them with a smile, and recognised the good they were doing for the community and the world by recycling the community’s waste and reducing the carbon footprint at the same time. He wanted to help others #seetheunseen.
Hugo developed a warm relationship with the recyclers, organised them into an association, and dreamed of getting them full-time employment. Now, since Hugo’s death, the project has grown beyond anyone’s wildest dreams, and the benefits to the 26 recyclers under Hugo’s Greenhood mandate, as well as so many others who have benefitted from the awareness that Hugo’s Greenhood has created and the change in consciousness of people through the knock-on effect of Hugo’s powerful dreams, has been immense. “Everyone wants to be on board. Schools are hosting the recyclers to talk about what they do and how everyone can help. Students are inspired by these men and their stories of hope and perseverance. People are involved in meal schedules for the recyclers, clothing them and feeding them, sending them home with weekly hampers of food for their entire families through Kosherworld’s collection points. People are separating their rubbish at the source; aware of environmental issues when before they never were.
American and Australian youths have taken up the cause, raising mindboggling sums for Hugo’s Greenhood. The community and beyond are experiencing a huge awakening that is long overdue.” Everyone wants to be a part of this green revolution of #seeing the unseen, and the sky’s the limit. The eyes of the youth have been opened, and they aspire to follow in Hugo’s footsteps and change the world in some small way. But, the real power behind Hugo’s Greenhood is that it’s “just a bunch of people” – mothers mainly – “carrying on a fourteen-year-old’s dream,” says Nicole. “Hugo was a smart, good-looking kid, a defender of the weak, and there was no stopping him.” His dream lives on, and his legacy is impacting the lives of so many – givers and receivers – every day.
In 2005, in the aftermath of Hurricane Stan in Chiapas, Mexico, a group of young men from the Jewish Mexican community decided to transport and hand-deliver the provisions donated by the Jewish Mexican community to the hurricane victims, making sure that the aid would really reach the people most in need. This experience changed their lives forever, and encouraged them to create Cadena. While it began as an initiative of the Mexican Jewish community to help their country in the face of natural disasters (Mexico faced natural disasters every year between 2005 and 2008), Cadena quickly evolved to become an international NGO, which now has eight offices around the world in Miami, Mexico, Chile, Costa Rica, Guatemala, Panama, Israel, and the newest country office – South Africa.
“Cadena aims to aid vulnerable communities when disaster strikes, irrespective of one’s place of origin, gender, religion, or beliefs,” explains Dovi Brom of Cadena. This is important in the South African context specifically when you consider the history of our nation, and Cadena South Africa is a shining example of how South Africans, specifically Jewish South Africans, can take it upon themselves to make an impact in such a critical, openhearted, and non-judgmental way.
Cadena centres itself around a ‘hand-in-hand’ chain link of aid (Cadena is the word for ‘chain’ in Spanish) that helps reach those in need in high risk natural disaster areas as well as those suffering humanitarian crises, from both a physical and a psychological standpoint, through a “collective effort” of direct delivery, which means that the people that need it the most will be the ones to receive the aid. “We have a ‘Go Team’ of 24 doctors and rescue specialists including crisis-intervention psychologists, rescuers, and damage-evaluation experts (and one dog!) who can be on the ground anywhere within 24 hours, providing disaster and medical relief and aid. We also have a large base of volunteers that assist with follow-up missions to address the needs of the community in the long term,” with health and dental wellness, shelter, and food – areas often mismanaged or overlooked after the immediate danger has passed.
Under the new leadership of Leanne Gersun Mendelow, Cadena South Africa, based in Johannesburg, will function as a hub for local aid and relief initiatives giving the South African Jewish community the chance to contribute to the lives of fellow South Africans here at home. A volunteer network will partner with local communities in need, bringing Cadena’s ‘hand-in hand’ vision home, in areas such as health, environmental, water and sanitation, food security, education, and others. Michalya Schonwald Moss, an impact consultant working closely with innovative initiatives with a footprint on the African continent and member of Cadena’s Board, was invited to participate in the first Cadena South Africa mission to the Kakuma Refugee Camp in Kenya earlier this year, where the team distributed critical medical supplies to the refugees. “Through organisations like Cadena, the Jewish people will continue to work tirelessly to provide emergency aid and relief for refugees,” she says, finding the need and answering it where possible.
Surgeons for Little Lives is a non-profit organisation run by a group of dedicated paediatric surgeons and ordinary people who all have in common a powerful commitment to saving the lives of sick children. Through their efforts, the Surgeons for Little Lives team is making it possible for very sick children and their families to receive a standard of care that matches the standard of skill of their surgeons.
Afrika Tikkun’s Zolile Malindi Centre in Cape Town is a Community Centre in Mfuleni, an under-resourced area in the Cape Flats, a place where the children are given food, education, after-care services, and skills training. The Centre hosts various programmes for children of the Mfuleni community, including programmes for early childhood development, child and youth development, and skills development.
“Little Givers is about converting the Generation Sinai learning into action – putting the values of chesed and tzedakah into practice, and making an on-going, sustainable difference at some of our wonderful social welfare organisations – doing what we can to help them in their holy work,” says Rabbi Goldstein. In August, Little Givers will be visiting Hatzolah in JHB and Ezra in Cape Town.
Nothing can make a person feel better or warm the heart more than bringing a smile to someone who feels he has lost hope. The Smile Foundation literally, and figuratively, brings a smile to the faces of so many, gifting more than 2,500 with a smile to date. “The Smile Foundation was established as a result of one determined mother’s quest to give her child a smile,” explains Hedley Lewis, CEO. Thando was born with a condition known as Moebius Syndrome, which causes partial or complete facial paralysis. “She had never been able to smile.” Her mother, Thabile, began writing letters every month to former President Nelson Mandela, asking for assistance. One day, fate intervened and one of her letters found its way into Madiba’s personal mail. Touched by her plea, he sought to help this little girl, and contacted Marc Lubner to assist.
At that time there were no surgeons in South Africa with the surgical expertise to perform such a complex procedure and Thando would need to travel to the United States to consult with specialists there. It soon became clear that sending one child overseas at a time was not a sustainable model. Together with the Independent Newspaper Group and a host of other sponsors, Marc Lubner convinced world-renowned specialists, Dr Ron Zucker and Dr Craig van der Kolk, to come to Johannesburg to train Prof George Psaras to perform the intricate surgery, thus enabling him thereafter to help many more children throughout South Africa. Thando received her life-changing surgery and the Smile Fund began. As a result of growth and success of this project, the organisation became a registered Section 21 company in 2007 and the Smile Foundation was born.
|
#!/usr/bin/env python
# future
# standard lib
# third-party
# memlingo
import memlingo.yandex as yandex
# local
# try/except
class WordDictionary:
'''WordDictionary contains all your known words from a given language.
It is a simple dictionary that maps <word, card>.
Attributes
----------
lang: str
Language of the current dictionary.
String must include the target translation, due to Yandex API in form:
"ru-en"
"jp-en"
ID: int
This unique identifier is used by the genanki library. It is needed
because Anki decks, have a unique identifier that can be referred to,
and is important when updating the deck.
words: dict <str, Card>
Contains all the words and relevant information
'''
class Card:
'''Card holds the relevant information on a word.
Cards hold the required information to build an AnkiCard.`
Attributes
----------
word: str
The word itself. How it is written. Equal to the dictionary key.
word_class: str
The class of the word. Can be noun, adjective, adverb, etc.
translations: [str]
A list of all possible translations of the word.
examples: [(str,str)]
A list of tuples, that contain pairs of translated sentences.
dirty_bit: int (0,1)
Used to discern if it's needed to export again.
'''
def __init__(self, word, wclass, translations, examples, bit=0):
self.word = word
self.word_class = wclass
self.translations = translations
self.examples = examples
self.dirty_bit = bit
def __init__(self, language, uniqueID):
self.lang = language
self.ID = uniqueID
self.words = {}
# TODO add_card shouldn't receive an api key
def add_card(self, api_key, word):
'''Adds a new word to the dictionary.
All information on a word is fetched - translations, examples, etc.
In the future, IPA transcribing, and sound files will be available.
Parameters
----------
api_key: str
The Yandex Api Key required to lookup a word.
word: str
The word that the user wants to add to the collection.
Returns
-------
Nil
Side-Effects
------------
Searches for word, gathers relevant information, and then adds the card
to the WordDictionary.
'''
# Word must be encoded to be stored in a valid format (e.g.: Russian,
# Japanese)
utf8word = word.encode('utf-8')
if utf8word in self.words:
# Panic
print("That word as already been added, skipping...\n")
return
jsonie = yandex.lookup(api_key, self.lang, word)
if jsonie is not None:
word = yandex.get_word(jsonie)
word_class = yandex.get_word_class(jsonie)
translations = yandex.get_translations(jsonie)
examples = yandex.get_examples(jsonie)
new_card = self.Card(word, word_class, translations, examples)
self.words[utf8word] = new_card
return
# TODO this shouldn't use api_key as well
def update(self, api_key, words):
'''Update the WordDictionary with given list of words.
Parameters
----------
api_key: str
The Yandex Api Key required to lookup a word.
words: [str]
A list of words that the user wants to add to the collection.
Returns
-------
Nil
Side-Effects
------------
Adds every word that is on Yandex, to the user collection.
'''
word_counter = 0
for word in words:
self.add_card(api_key, word)
word_counter += 1
print(str(word_counter) + " words added to collection\n")
return
def to_print(self):
'''Prints the dictioanry in a pretty manner.
Prints the queried word, wordclass, translations and example
'''
for key in self.words:
print(key.decode('utf-8'))
print(self.words[key].word_class)
print(self.words[key].translations)
print(self.words[key].examples)
print(self.words[key].dirty_bit)
print('\n')
if __name__ == "__main__":
newDick = WordDictionary("ru", 12345)
|
Block walls are an ideal place to grow climbing plants. The plants will help conceal the wall and add beauty to your outdoor living space. In order for plants to grow along a block wall, you will need to install wire mesh. Installing wire mesh along a block wall may appear to be difficult. However, if you use the correct tools and fasteners, it is a project that any homeowner can do to add garden space and beautify her outdoor surroundings.
Position the wire mesh panel onto the block wall in the location where you want to attach it.
Mark the perimeter of the wire mesh panel by marking around it with a wax pencil.
Drill 1 1/2-inch deep holes 2-inches in from the marked outline of the mesh panel around the entire perimeter every 4 to 6 inches. Use a hammer drill equipped with a 3/16-inch carbide tipped bit.
Place the wire mesh panel onto the wall in desired location. Slide fender washers over the concrete screws.
Drive the screws equipped with fender washers through the wire mesh into the drilled holes in the concrete to fasten it to the block wall using a screw gun.
Paint the mesh the same colour as the block wall to allow it to blend in. This will create a subdued growing space along an unattractive block wall.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# This file is part of MAVlinkplug.
# MAVlinkplug is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# MAVlinkplug is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with MAVlinkplug. If not, see <http://www.gnu.org/licenses/>.
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
if(__name__ == '__main__'):
from mavlinkplug import set_mavlink_dialect
import mavlinkplug.Modules.MavConnection
import mavlinkplug.Modules.FileWriter
import mavlinkplug.Modules.TcpConnection
import mavlinkplug.Plug
set_mavlink_dialect('pixhawk')
#Creating plug
plug = mavlinkplug.Plug.Plug()
plug.start()
#Set a mavlink connection with MAVlink ready devices
mav_con_01 = mavlinkplug.Modules.MavConnection.MavConnection(plug.plug_info(), 'COM3', baud=115200)
#Set a output file
#file_output = mavlinkplug.Modules.FileWriter.FileWriter(plug.plug_info(), 'Test_GroundControl.log')
#Set a connection for GC
gc_connection = mavlinkplug.Modules.TcpConnection.TcpConnection(plug.plug_info(), ('127.0.0.1', 17562), mav_con_01.ident())
#Start all modules
#file_output.start()
gc_connection.start()
mav_con_01.start()
#Server forever
plug.server_forever()
|
Missouri US Rep. Calls for Investigation over “Operation Choke Point” – Bestware Inc.
U.S. Rep. Blaine Luetkemeyer, R-Mo., is calling on federal agencies to investigate “Operation Choke Point.” Beginning in 2013,the federal government used Operation Choke Point to refuse banking services to certain industries the government deemed to have “reputational risk.” Many payday loan or short term loan companies have found it difficult to maintain a banking relationship even though they have not broken any laws because of Operation Choke Point. The Department of Justice initiative officially ended in August of 2017.
Rep. Blaine Luetkemeyer has sponsored legislation in H.R. 2706, Financial Institution Customer Protection Act. The Act requires banks/credit unions to provide written justification if a financial relationship is terminated. It has passed in the House with bi-partisan support (395-2) and is waiting for consideration in the Senate.
This entry was posted in Bestware, Cash Advance Pro, Cash Advance Pro, Payday Lender Legislation by Staff-Writer. Bookmark the permalink.
|
# generated by gen-config.py DO NOT edit
# vim:fileencoding=utf-8
import typing
from kitty.conf.utils import KeyAction, KittensKeyMap
import kitty.conf.utils
from kitty.rgb import Color
import kitty.rgb
from kitty.types import ParsedShortcut
import kitty.types
option_names = ( # {{{
'added_bg',
'added_margin_bg',
'background',
'diff_cmd',
'filler_bg',
'foreground',
'highlight_added_bg',
'highlight_removed_bg',
'hunk_bg',
'hunk_margin_bg',
'map',
'margin_bg',
'margin_fg',
'margin_filler_bg',
'num_context_lines',
'pygments_style',
'removed_bg',
'removed_margin_bg',
'replace_tab_by',
'search_bg',
'search_fg',
'select_bg',
'select_fg',
'syntax_aliases',
'title_bg',
'title_fg') # }}}
class Options:
added_bg: Color = Color(red=230, green=255, blue=237)
added_margin_bg: Color = Color(red=205, green=255, blue=216)
background: Color = Color(red=255, green=255, blue=255)
diff_cmd: str = 'auto'
filler_bg: Color = Color(red=250, green=251, blue=252)
foreground: Color = Color(red=0, green=0, blue=0)
highlight_added_bg: Color = Color(red=172, green=242, blue=189)
highlight_removed_bg: Color = Color(red=253, green=184, blue=192)
hunk_bg: Color = Color(red=241, green=248, blue=255)
hunk_margin_bg: Color = Color(red=219, green=237, blue=255)
margin_bg: Color = Color(red=250, green=251, blue=252)
margin_fg: Color = Color(red=170, green=170, blue=170)
margin_filler_bg: typing.Optional[kitty.rgb.Color] = None
num_context_lines: int = 3
pygments_style: str = 'default'
removed_bg: Color = Color(red=255, green=238, blue=240)
removed_margin_bg: Color = Color(red=255, green=220, blue=224)
replace_tab_by: str = ' '
search_bg: Color = Color(red=68, green=68, blue=68)
search_fg: Color = Color(red=255, green=255, blue=255)
select_bg: Color = Color(red=180, green=213, blue=254)
select_fg: typing.Optional[kitty.rgb.Color] = Color(red=0, green=0, blue=0)
syntax_aliases: typing.Dict[str, str] = {'pyj': 'py', 'pyi': 'py', 'recipe': 'py'}
title_bg: Color = Color(red=255, green=255, blue=255)
title_fg: Color = Color(red=0, green=0, blue=0)
map: typing.List[typing.Tuple[kitty.types.ParsedShortcut, kitty.conf.utils.KeyAction]] = []
key_definitions: KittensKeyMap = {}
config_paths: typing.Tuple[str, ...] = ()
config_overrides: typing.Tuple[str, ...] = ()
def __init__(self, options_dict: typing.Optional[typing.Dict[str, typing.Any]] = None) -> None:
if options_dict is not None:
for key in option_names:
setattr(self, key, options_dict[key])
@property
def _fields(self) -> typing.Tuple[str, ...]:
return option_names
def __iter__(self) -> typing.Iterator[str]:
return iter(self._fields)
def __len__(self) -> int:
return len(self._fields)
def _copy_of_val(self, name: str) -> typing.Any:
ans = getattr(self, name)
if isinstance(ans, dict):
ans = ans.copy()
elif isinstance(ans, list):
ans = ans[:]
return ans
def _asdict(self) -> typing.Dict[str, typing.Any]:
return {k: self._copy_of_val(k) for k in self}
def _replace(self, **kw: typing.Any) -> "Options":
ans = Options()
for name in self:
setattr(ans, name, self._copy_of_val(name))
for name, val in kw.items():
setattr(ans, name, val)
return ans
def __getitem__(self, key: typing.Union[int, str]) -> typing.Any:
k = option_names[key] if isinstance(key, int) else key
try:
return getattr(self, k)
except AttributeError:
pass
raise KeyError(f"No option named: {k}")
defaults = Options()
defaults.map = [
# quit
(ParsedShortcut(mods=0, key_name='q'), KeyAction('quit')),
# quit
(ParsedShortcut(mods=0, key_name='ESCAPE'), KeyAction('quit')),
# scroll_down
(ParsedShortcut(mods=0, key_name='j'), KeyAction('scroll_by', (1,))),
# scroll_down
(ParsedShortcut(mods=0, key_name='DOWN'), KeyAction('scroll_by', (1,))),
# scroll_up
(ParsedShortcut(mods=0, key_name='k'), KeyAction('scroll_by', (-1,))),
# scroll_up
(ParsedShortcut(mods=0, key_name='UP'), KeyAction('scroll_by', (-1,))),
# scroll_top
(ParsedShortcut(mods=0, key_name='HOME'), KeyAction('scroll_to', ('start',))),
# scroll_bottom
(ParsedShortcut(mods=0, key_name='END'), KeyAction('scroll_to', ('end',))),
# scroll_page_down
(ParsedShortcut(mods=0, key_name='PAGE_DOWN'), KeyAction('scroll_to', ('next-page',))),
# scroll_page_down
(ParsedShortcut(mods=0, key_name=' '), KeyAction('scroll_to', ('next-page',))),
# scroll_page_up
(ParsedShortcut(mods=0, key_name='PAGE_UP'), KeyAction('scroll_to', ('prev-page',))),
# next_change
(ParsedShortcut(mods=0, key_name='n'), KeyAction('scroll_to', ('next-change',))),
# prev_change
(ParsedShortcut(mods=0, key_name='p'), KeyAction('scroll_to', ('prev-change',))),
# all_context
(ParsedShortcut(mods=0, key_name='a'), KeyAction('change_context', ('all',))),
# default_context
(ParsedShortcut(mods=0, key_name='='), KeyAction('change_context', ('default',))),
# increase_context
(ParsedShortcut(mods=0, key_name='+'), KeyAction('change_context', (5,))),
# decrease_context
(ParsedShortcut(mods=0, key_name='-'), KeyAction('change_context', (-5,))),
# search_forward
(ParsedShortcut(mods=0, key_name='/'), KeyAction('start_search', (True, False))),
# search_backward
(ParsedShortcut(mods=0, key_name='?'), KeyAction('start_search', (True, True))),
# next_match
(ParsedShortcut(mods=0, key_name='.'), KeyAction('scroll_to', ('next-match',))),
# next_match
(ParsedShortcut(mods=0, key_name='>'), KeyAction('scroll_to', ('next-match',))),
# prev_match
(ParsedShortcut(mods=0, key_name=','), KeyAction('scroll_to', ('prev-match',))),
# prev_match
(ParsedShortcut(mods=0, key_name='<'), KeyAction('scroll_to', ('prev-match',))),
# search_forward_simple
(ParsedShortcut(mods=0, key_name='f'), KeyAction('start_search', (False, False))),
# search_backward_simple
(ParsedShortcut(mods=0, key_name='b'), KeyAction('start_search', (False, True))),
]
|
Plenty of people lose sleep over code compliance issues. You don’t have to be one of them. You can rely on Tyco Integrated Fire & Security. We don’t just know code requirements in one or two categories; we know every code requirement inside and out because we cover every type of business. We’re active in every significant standards group in the fire and life-safety industry, keeping us on the cutting edge of protection knowledge. Whether it’s NFPA, FM Global, ULC, or provincial and federal codes, we’re constantly working to stay ahead of code changes so we can help our customers anticipate the best solutions.
|
"""Fixer for 'raise E, V'
From Armin Ronacher's ``python-modernize``.
raise -> raise
raise E -> raise E
raise E, 5 -> raise E(5)
raise E, 5, T -> raise E(5).with_traceback(T)
raise E, None, T -> raise E.with_traceback(T)
raise (((E, E'), E''), E'''), 5 -> raise E(5)
raise "foo", V, T -> warns about string exceptions
raise E, (V1, V2) -> raise E(V1, V2)
raise E, (V1, V2), T -> raise E(V1, V2).with_traceback(T)
CAVEATS:
1) "raise E, V, T" cannot be translated safely in general. If V
is not a tuple or a (number, string, None) literal, then:
raise E, V, T -> from future.utils import raise_
raise_(E, V, T)
"""
# Author: Collin Winter, Armin Ronacher, Mark Huang
# Local imports
from lib2to3 import pytree, fixer_base
from lib2to3.pgen2 import token
from lib2to3.fixer_util import Name, Call, is_tuple, Comma, Attr, ArgList
from libfuturize.fixer_util import touch_import_top
class FixRaise(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
raise_stmt< 'raise' exc=any [',' val=any [',' tb=any]] >
"""
def transform(self, node, results):
syms = self.syms
exc = results["exc"].clone()
if exc.type == token.STRING:
msg = "Python 3 does not support string exceptions"
self.cannot_convert(node, msg)
return
# Python 2 supports
# raise ((((E1, E2), E3), E4), E5), V
# as a synonym for
# raise E1, V
# Since Python 3 will not support this, we recurse down any tuple
# literals, always taking the first element.
if is_tuple(exc):
while is_tuple(exc):
# exc.children[1:-1] is the unparenthesized tuple
# exc.children[1].children[0] is the first element of the tuple
exc = exc.children[1].children[0].clone()
exc.prefix = u" "
if "tb" in results:
tb = results["tb"].clone()
else:
tb = None
if "val" in results:
val = results["val"].clone()
if is_tuple(val):
# Assume that exc is a subclass of Exception and call exc(*val).
args = [c.clone() for c in val.children[1:-1]]
exc = Call(exc, args)
elif val.type in (token.NUMBER, token.STRING):
# Handle numeric and string literals specially, e.g.
# "raise Exception, 5" -> "raise Exception(5)".
val.prefix = u""
exc = Call(exc, [val])
elif val.type == token.NAME and val.value == u"None":
# Handle None specially, e.g.
# "raise Exception, None" -> "raise Exception".
pass
else:
# val is some other expression. If val evaluates to an instance
# of exc, it should just be raised. If val evaluates to None,
# a default instance of exc should be raised (as above). If val
# evaluates to a tuple, exc(*val) should be called (as
# above). Otherwise, exc(val) should be called. We can only
# tell what to do at runtime, so defer to future.utils.raise_(),
# which handles all of these cases.
touch_import_top(u"future.utils", u"raise_", node)
exc.prefix = u""
args = [exc, Comma(), val]
if tb is not None:
args += [Comma(), tb]
return Call(Name(u"raise_"), args)
if tb is not None:
tb.prefix = ""
exc_list = Attr(exc, Name('with_traceback')) + [ArgList([tb])]
else:
exc_list = [exc]
return pytree.Node(syms.raise_stmt,
[Name(u"raise")] + exc_list,
prefix=node.prefix)
|
An instructional designer by profession and a storyteller by passion is how I describe myself. “Curating Memories, Weaving Stories” is all that I am trying to do. Trina Looks Back is all about looking back at my life and curating memories to weave stories.
Once you join me on my blog I take you on an unforgettable journey through my life. My blog is my window to tell the world my stories. Here you will meet numerous people and characters. Some of them you would probably identify with and some you wouldn’t. Nevertheless, you will find a connection with all of them. Apart from memoirs also join me for stories, memoirs, anecdotes, journeys, emotions, sentiments, images, imageries and travel tales. This blog is like that streetside corner cafe where you sit with a cup of coffee, and quietly watch the world as a passerby. I want you to relax here and when you leave you should feel happy and content.
Trina is my pen name, and in a way, my alter ego. Since childhood stories always attracted me, and I was often caught weaving imaginary stories. Those were my first figments of imagination. Masters degree in Comparative Literature and M.Phil in Women’s Studies further encouraged me to imagine. My syllabus allowed me to read stories from across the world and from different perspectives. I also have a degree in Mass Communication.
Apart from all this, I am also a proud mother of a very naughty and car-crazy boy. A loner and introvert, I love talking to myself.
My book of memoir is available on Amazon. Follow the link below to buy.
Thank you for visiting my blog. Please leave a comment as I love reading them.
|
# coding=utf-8
__author__ = "Gina Häußge <[email protected]>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
import flask
import tornado.wsgi
from sockjs.tornado import SockJSRouter
from flask import Flask, render_template, send_from_directory, make_response
from flask.ext.login import LoginManager
from flask.ext.principal import Principal, Permission, RoleNeed, identity_loaded, UserNeed
import os
import logging
import logging.config
SUCCESS = {}
NO_CONTENT = ("", 204)
app = Flask("octoprint")
debug = False
printer = None
gcodeManager = None
userManager = None
eventManager = None
loginManager = None
wifiManager = None
wifiInterface = "wlan0"
principals = Principal(app)
admin_permission = Permission(RoleNeed("admin"))
user_permission = Permission(RoleNeed("user"))
# only import the octoprint stuff down here, as it might depend on things defined above to be initialized already
from octoprint.server.util import LargeResponseHandler, ReverseProxied, restricted_access, PrinterStateConnection, admin_validator
from octoprint.printer import Printer, getConnectionOptions
from octoprint.settings import settings
import octoprint.gcodefiles as gcodefiles
import octoprint.util as util
import octoprint.users as users
import octoprint.events as events
import octoprint.timelapse
import octoprint._version
import octoprint.wifi as wifi
versions = octoprint._version.get_versions()
VERSION = versions['version']
BRANCH = versions['branch'] if 'branch' in versions else None
DISPLAY_VERSION = "%s (%s branch)" % (VERSION, BRANCH) if BRANCH else VERSION
del versions
@app.route("/")
def index():
return render_template(
"index.jinja2",
webcamStream=settings().get(["webcam", "stream"]),
enableTimelapse=(settings().get(["webcam", "snapshot"]) is not None and settings().get(["webcam", "ffmpeg"]) is not None),
enableGCodeVisualizer=settings().get(["gcodeViewer", "enabled"]),
enableTemperatureGraph=settings().get(["feature", "temperatureGraph"]),
enableSystemMenu=settings().get(["system"]) is not None and settings().get(["system", "actions"]) is not None and len(settings().get(["system", "actions"])) > 0,
enableAccessControl=userManager is not None,
enableSdSupport=settings().get(["feature", "sdSupport"]),
enableNetworkSettings = settings().get(["feature", "networkSettings"]),
firstRun=settings().getBoolean(["server", "firstRun"]) and (userManager is None or not userManager.hasBeenCustomized()),
debug=debug,
version=VERSION,
display_version=DISPLAY_VERSION,
stylesheet=settings().get(["devel", "stylesheet"]),
gcodeMobileThreshold=settings().get(["gcodeViewer", "mobileSizeThreshold"]),
gcodeThreshold=settings().get(["gcodeViewer", "sizeThreshold"])
)
@app.route("/robots.txt")
def robotsTxt():
return send_from_directory(app.static_folder, "robots.txt")
@identity_loaded.connect_via(app)
def on_identity_loaded(sender, identity):
user = load_user(identity.id)
if user is None:
return
identity.provides.add(UserNeed(user.get_name()))
if user.is_user():
identity.provides.add(RoleNeed("user"))
if user.is_admin():
identity.provides.add(RoleNeed("admin"))
def load_user(id):
if userManager is not None:
return userManager.findUser(id)
return users.DummyUser()
#~~ startup code
class Server():
def __init__(self, configfile=None, basedir=None, host="0.0.0.0", port=5000, debug=False, allowRoot=False):
self._configfile = configfile
self._basedir = basedir
self._host = host
self._port = port
self._debug = debug
self._allowRoot = allowRoot
def run(self):
if not self._allowRoot:
self._checkForRoot()
global printer
global gcodeManager
global userManager
global eventManager
global loginManager
global debug
global wifiManager
from tornado.wsgi import WSGIContainer
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.web import Application, FallbackHandler
debug = self._debug
# first initialize the settings singleton and make sure it uses given configfile and basedir if available
self._initSettings(self._configfile, self._basedir)
# then initialize logging
self._initLogging(self._debug)
logger = logging.getLogger(__name__)
logger.info("Starting OctoPrint %s" % DISPLAY_VERSION)
eventManager = events.eventManager()
gcodeManager = gcodefiles.GcodeManager()
printer = Printer(gcodeManager)
wifiManager = wifi.WifiManager(printer)
# configure timelapse
octoprint.timelapse.configureTimelapse()
# setup command triggers
events.CommandTrigger(printer)
if self._debug:
events.DebugEventListener()
if settings().getBoolean(["accessControl", "enabled"]):
userManagerName = settings().get(["accessControl", "userManager"])
try:
clazz = util.getClass(userManagerName)
userManager = clazz()
except AttributeError, e:
logger.exception("Could not instantiate user manager %s, will run with accessControl disabled!" % userManagerName)
app.wsgi_app = ReverseProxied(app.wsgi_app)
app.secret_key = "k3PuVYgtxNm8DXKKTw2nWmFQQun9qceV"
loginManager = LoginManager()
loginManager.session_protection = "strong"
loginManager.user_callback = load_user
if userManager is None:
loginManager.anonymous_user = users.DummyUser
principals.identity_loaders.appendleft(users.dummy_identity_loader)
loginManager.init_app(app)
if self._host is None:
self._host = settings().get(["server", "host"])
if self._port is None:
self._port = settings().getInt(["server", "port"])
logger.info("Listening on http://%s:%d" % (self._host, self._port))
app.debug = self._debug
from octoprint.server.api import api
app.register_blueprint(api, url_prefix="/api")
self._router = SockJSRouter(self._createSocketConnection, "/sockjs")
def admin_access_validation(request):
"""
Creates a custom wsgi and Flask request context in order to be able to process user information
stored in the current session.
:param request: The Tornado request for which to create the environment and context
"""
wsgi_environ = tornado.wsgi.WSGIContainer.environ(request)
with app.request_context(wsgi_environ):
app.session_interface.open_session(app, flask.request)
loginManager.reload_user()
admin_validator(flask.request)
self._tornado_app = Application(self._router.urls + [
(r"/downloads/timelapse/([^/]*\.mpg)", LargeResponseHandler, {"path": settings().getBaseFolder("timelapse"), "as_attachment": True}),
(r"/downloads/files/local/([^/]*\.(gco|gcode|g))", LargeResponseHandler, {"path": settings().getBaseFolder("uploads"), "as_attachment": True}),
(r"/downloads/logs/([^/]*)", LargeResponseHandler, {"path": settings().getBaseFolder("logs"), "as_attachment": True, "access_validation": admin_access_validation}),
(r".*", FallbackHandler, {"fallback": WSGIContainer(app.wsgi_app)})
])
self._server = HTTPServer(self._tornado_app)
self._server.listen(self._port, address=self._host)
eventManager.fire(events.Events.STARTUP)
if settings().getBoolean(["serial", "autoconnect"]):
(port, baudrate) = settings().get(["serial", "port"]), settings().getInt(["serial", "baudrate"])
connectionOptions = getConnectionOptions()
if port in connectionOptions["ports"]:
printer.connect(port, baudrate)
try:
IOLoop.instance().start()
except KeyboardInterrupt:
logger.info("Goodbye!")
except:
logger.fatal("Now that is embarrassing... Something really really went wrong here. Please report this including the stacktrace below in OctoPrint's bugtracker. Thanks!")
logger.exception("Stacktrace follows:")
def _createSocketConnection(self, session):
global printer, gcodeManager, userManager, eventManager
return PrinterStateConnection(printer, gcodeManager, userManager, eventManager, session)
def _checkForRoot(self):
if "geteuid" in dir(os) and os.geteuid() == 0:
exit("You should not run OctoPrint as root!")
def _initSettings(self, configfile, basedir):
settings(init=True, basedir=basedir, configfile=configfile)
def _initLogging(self, debug):
config = {
"version": 1,
"formatters": {
"simple": {
"format": "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
}
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"level": "DEBUG",
"formatter": "simple",
"stream": "ext://sys.stdout"
},
"file": {
"class": "logging.handlers.TimedRotatingFileHandler",
"level": "DEBUG",
"formatter": "simple",
"when": "D",
"backupCount": "1",
"filename": os.path.join(settings().getBaseFolder("logs"), "octoprint.log")
},
"serialFile": {
"class": "logging.handlers.RotatingFileHandler",
"level": "DEBUG",
"formatter": "simple",
"maxBytes": 2 * 1024 * 1024, # let's limit the serial log to 2MB in size
"filename": os.path.join(settings().getBaseFolder("logs"), "serial.log")
}
},
"loggers": {
#"octoprint.timelapse": {
# "level": "DEBUG"
#},
#"octoprint.events": {
# "level": "DEBUG"
#},
"SERIAL": {
"level": "CRITICAL",
"handlers": ["serialFile"],
"propagate": False
}
},
"root": {
"level": "INFO",
"handlers": ["console", "file"]
}
}
if debug:
config["root"]["level"] = "DEBUG"
logging.config.dictConfig(config)
if settings().getBoolean(["serial", "log"]):
# enable debug logging to serial.log
logging.getLogger("SERIAL").setLevel(logging.DEBUG)
logging.getLogger("SERIAL").debug("Enabling serial logging")
if __name__ == "__main__":
octoprint = Server()
octoprint.run()
|
The earlier 10 similar transplants from dead donors attempted in the US, the Czech Republic and Turkey failed or resulted in a miscarriage. © Shutterstock.
In a medical milestone, the world’s first baby has been born using a womb transplanted from a dead woman, a Lancet study says.
The womb transplant, lasting over 10 hours took place in Brazil’s Sao Paolo in September 2016. The baby was born in December 2017.
The uterus was removed from the donor and transplanted into the recipient in a surgery that also involved connecting the donor uterus’ and recipient’s veins and arteries, ligament, and vaginal canals.
The earlier 10 similar transplants from dead donors attempted in the US, the Czech Republic and Turkey failed or resulted in a miscarriage. However, for 32-year-old recipient born with Mayer-Rokitansky-Kuster-Hauser (MRKH) syndrome, it proved lucky.
“The use of deceased donors could greatly broaden access to this treatment, and our results provide proof-of-concept for a new option for women with uterine infertility,” said lead author Dani Ejzenberg, Hospital das Clinicas, Faculdade de Medicina da Universidade de Sao Paulo.
Since the recipient’s ovaries were fine, she underwent one in-vitro fertilisation (IVF) cycle four months before the transplant, Ejzenberg said.
Later the doctors fertilised her eggs with the father-to-be’s sperm and froze them.
The donor had died of subarachnoid haemorrhage — a type of stroke involving bleeding on the surface of the brain.
The baby girl was born via caesarean section at 35 weeks and three days and weighed 2.5 kilograms. The transplanted uterus was removed during the caesarean section and showed no anomalies, the study showed.
|
# -*- encoding: utf-8 -*-
"""
Used to infer some health related reports.
Use with caution, this code may contain errors!
Copyright (C) 2014, 2016 Christian Stigen Larsen
Distributed under the GPL v3 or later. See COPYING.
"""
from dna_traits.match import unphased_match, assert_european
from dna_traits.util import make_report
import dna_traits.odds as odds
def apoe_variants(genome):
"""APOE-variants (Alzheimer's)."""
rs429358 = genome.rs429358
rs7412 = genome.rs7412
# If both SNPs are phased we can resolve all ambiguities, and finding
# APOe-variants are straight-forward
if rs429358.phased and rs7412.phased:
assert(len(rs429358)==len(rs7412)==2)
apoe = {"CT": "e1",
"TT": "e2",
"TC": "e3",
"CC": "e4"}
variant = []
for n in [0,1]:
variant.append(apoe[str(rs429358)[n] + str(rs7412)[n]])
return "/".join(sorted(variant))
else:
# At least one SNP is non-phased; we can guess the result in all but
# one case
genotypes = "".join(sorted(str(rs429358)))
genotypes += "".join(sorted(str(rs7412)))
variants = {
"CCCC": "e4/e4",
"CCCT": "e1/e4",
"CCTT": "e1/e1",
"CTCC": "e3/e4",
"CTCT": "e1/e3 or e2/e4", # ambiguous
"CTTT": "e1/e2",
"TTCC": "e3/e3",
"TTCT": "e2/e3",
"TTTT": "e2/e2",
}
try:
return variants[genotypes]
except KeyError:
return "<Unknown variant: %s>" % genotypes
def rheumatoid_arthritis_risk(genome):
"""Rheumatoid arthritis."""
raise NotImplementedError()
OR = 0
# FIXME: Fix the OR calculation, it's a complete mess right now
# (attempt to use Mantel-Haenszel instead).
#
# We currently just give a score for each risk allele instead and give
# an thumbs up / down rating.
# These are not applicable for Asians
if genome.ethnicity == "european":
OR += genome.rs6457617.count("T")
if genome.rs2476601 == "GG": OR -= 1
if genome.rs3890745 == "CC": OR += -1
if genome.rs2327832 == "AG": OR += -1
# Only Europeans
# http://www.ncbi.nlm.nih.gov/pmc/articles/PMC2636867/
OR += genome.rs3761847.count("G")
if genome.rs7574865 == "TT": OR += 1
if genome.rs1569723 == "AA": OR += 1
if genome.rs13031237 == "GT": OR += 1
# TODO: Implement rest, ignore scores, just give a "low/medium/high"
# OR.
if OR <= 2:
return "low risk??"
elif OR <= 4:
return "medium risk??"
else:
return "high risk??"
def chronic_kidney_disease(genome):
"""Chronic kidney disease (CKD).
Citations:
http://www.ncbi.nlm.nih.gov/entrez/query.fcgi?cmd=Search&db=PubMed&term=21082022
http://www.ncbi.nlm.nih.gov/entrez/query.fcgi?cmd=Search&db=PubMed&term=20686651
http://www.ncbi.nlm.nih.gov/entrez/query.fcgi?cmd=Search&db=PubMed&term=19430482
"""
# List of (OR, CI, P-value, variance inflaction factor)
ors = []
# Taken from the following GWAS:
# http://www.ncbi.nlm.nih.gov/pmc/articles/PMC2912386/#pgen.1001039-Gretarsdottir2
if genome.ethnicity is None or genome.ethnicity=="european":
# TODO: Find out if the OR is per T-allele or just for the existence
# of one. Here I just see if there is one or more.
if genome.rs4293393.negative().count("T") > 0:
if genome.year_of_birth is None:
ors.append((1.25, 0.95, 4.1e-10, 1.15))
else:
# Stratified OR. Honestly, I think the P-values seem WAY too
# high for births later than 1940.
if genome.year_of_birth < 1920:
ors.append((1.19, 0.95, 0.045, 1.15))
elif genome.year_of_birth < 1930:
ors.append((1.31, 0.95, 4.1e-7, 1.15))
elif genome.year_of_birth < 1940:
ors.append((1.28, 0.95, 3.1e-5, 1.15))
elif genome.year_of_birth < 1950:
ors.append((1.16, 0.95, 0.12, 1.15))
else:
ors.append((1.09, 0.95, 0.57, 1.15))
# Taken from:
# http://www.ncbi.nlm.nih.gov/pmc/articles/PMC2997674/
if genome.ethnicity is None or genome.ethnicity=="european":
# Table 3:
if genome.rs7805747.count("A") > 0:
ors.append((1.19, 0.95, 4.2e-12, None))
pass
if len(ors) > 0:
ORs = [d[0] for d in ors]
pvals = [d[2] for d in ors]
OR_mh, se, pp = odds.pooled_or(zip(ORs, pvals), 1.15)
rr = odds.relative_risk(OR_mh, 0.034)
return "%.2f relative risk, %.2f odds ratio (%d markers)" % (rr, OR_mh, len(ors))
else:
return "<No data>"
"""
rs4293393 AA european OR 1.08 (adjusted)
rs7805747 AG european OR 1.14 (adjusted)
rs7805747 AG european OR 0.96 (adjusted)
From:
http://www.plosgenetics.org/article/fetchObject.action?uri=info%3Adoi%2F10.1371%2Fjournal.pgen.1001039&representation=PDF
rs4293393-T associated with CKD, OR=1.25, P=4.1e-10. Association
stronger with older age groups. CI=1.17-1.35 (95%), N=3203 (no of cases)
Disregard year of birth (stronger association with old age).
See Köttgen.
Note sure if PER T-allele. Only think it's the existence of this allele.
Also, is it minus orientation?
SNPedia says, each G at this allele (actually A because snpedia uses
minus orientation) decrease risk with 24%.
From dbsnp, http://www.ncbi.nlm.nih.gov/SNP/snp_ref.cgi?rs=4293393
it seems that the illumina hapmap300 used in the study uses minus
orientation, because it can only have C/T alleles, while 23andme reports
the A-allele. So this means that A(+) or T(-) is the risk allele.
The reverse version (G+, C-) is protective of CKD actually.
Says:
Association analysis
For case-control association analysis, e.g. for CKD and kidney stones,
we utilized a standard likelihood ratio statistic, implemented in the
NEMO software [32] to calculate two-sided P values and odds ratios (ORs)
for each individual allele, assuming a multiplicative model for risk,
i.e. that the risk of the two alleles carried by a person multiplies
[36]. Allelic frequencies, rather than carrier frequencies, are
presented for the markers and P values are given after adjustment for
the relatedness of the subjects. When estimating genotype specific OR,
genotype frequencies in the population were estimated assuming
Hardy-Weinberg equilibrium.
Results from multiple case-control groups were combined using a
Mantel-Haenszel model [37] in which the groups were allowed to have
different population frequencies for alleles, haplotypes and genotypes
but were assumed to have common relative risks.
For the quantitative trait association analysis, e.g. for SCr and
cystatin C, we utilized a robust linear regression based on an M
estimator [38] as implemented in the rlm function of the R software
package [39]. An additive model for SNP effects was assumed in all
instances. All associations with quantitative traits were performed
adjusting for age and sex.
"""
def restless_leg_syndrome(genome):
"""Restless leg syndrome.
Only for European ancestry.
rs3923809 AA 1.26
AG 0.74
Citations:
http://www.ncbi.nlm.nih.gov/entrez/query.fcgi?cmd=Search&db=PubMed&term=17634447
http://www.ncbi.nlm.nih.gov/entrez/query.fcgi?cmd=Search&db=PubMed&term=17637780
http://www.ncbi.nlm.nih.gov/entrez/query.fcgi?cmd=Search&db=PubMed&term=11340155
"""
if genome.rs3923809 == "GG":
return "Normal risk"
elif genome.rs3923809 == "AG" or genome.rs3923809 == "GA":
return "Slightly increased risk"
elif genome.rs3923809 == "AA":
return "Twice as high risk for developing"
else:
return "<Unknown genotype for rs3923809 %s>" % genome.rs3923809
def scleroderma(genome):
"""Scleroderma (limited cutaneous type)."""
# TODO: Implement odds ratios, find all alleles
if genome.ethnicity is None or genome.ethnicity == "european":
if genome.rs7574865 == "TT":
return "Higher odds"
if genome.rs7574865.count("T") > 0:
return "Slight risk"
return "<Unknown>"
else:
return "<Unknown for this ethnicity>"
def hypothyroidism(genome):
"""Hypothyroidism.
Studies:
http://dx.doi.org/10.1371/journal.pone.0034442
"""
if genome.ethnicity is not None and genome.ethnicity != "european":
raise ValueError("Only applicable to Europeans")
# TODO: Use a better score metric and use weighting and ORs.
# TODO: Try to use interval arithmetic as well, for fun.
scores = {
"rs7850258": {"GG": 0.5, "AG": 0, "AA": -0.5, None: 0},
"rs2476601": {"GG": 1, "AG": 0.5, "AA": 0, None: 0},
"rs3184504": {"TT": 0.5, "CT": 0, "CC": -0.5, None: 0},
"rs4915077": {"CC": 1, "CT": 0.5, "TT": 0, None: 0},
"rs2517532": {"GG": 0.5, "AG": 0, "AA": -0.5, None: 0},
}
hi = sum(map(lambda l: max(l.values()), scores.values()))
lo = sum(map(lambda l: min(l.values()), scores.values()))
score = 0.0
for rsid, genotypes in scores.items():
score += unphased_match(genome[rsid], genotypes)
if score > 0:
s = "About %.1f%% higher risk than baseline\n" % (100.0*score/hi)
s += "(%.1f vs %.1f of %.1f points)\n" % (score, lo, hi)
s += "Test is unweighted, see 23andMe for more info"
return s
elif score < 0:
s = "About %.1f%% lower risk than baseline\n" % 100.0*score/lo
s += "(%.1f vs %.1f of %.1f points)\n" % (score, lo, hi)
s += "Test is unweighted, see 23andMe for more info"
return s
else:
return "Typical risk"
def stroke(genome):
"""Stroke."""
return unphased_match(genome.rs12425791, {
"AA": "Moderately increased risk of having a stroke",
"AG": "Slightly increased risk of having a stroke",
"GG": "Typical risk of having a stroke",
None: "Unable to determine"})
def exfoliation_glaucoma(genome):
"""Exfoliation glaucoma."""
assert_european(genome)
OR = unphased_match(genome.rs2165241, {
"CT": 0.79,
})
raise NotImplementedError()
def migraines(genome):
"""Migranes."""
assert_european(genome)
s = []
s.append(unphased_match(genome.rs2651899, {
"CC": "Slightly higher odds of migraines",
"CT": "Typical odds of migraines",
"TT": "Slightly lower odds of migraines",
None: "Unable to determine"}))
s.append(unphased_match(genome.rs10166942, {
"TT": "Typical odds of migraines",
"CT": "Slightly lower odds of migraines",
"CC": "Slightly lower odds of migraines",
None: "Unable to determine"}))
s.append(unphased_match(genome.rs11172113, {
"TT": "Slightly higher odds of migraines",
"CT": "Typical odds of migraines",
"CC": "Slightly lower odds of migraines",
None: "Unable to determine"}))
return "\n".join(s)
def breast_cancer(genome):
"""Breast cancer."""
if not genome.female:
raise ValueError("Only applicable for females")
s = []
s.append(unphased_match(genome.rs1219648, {
"AA": "Typical odds",
"AG": "Slightly higher odds",
"GG": "Moderately higher odds",
None: "Unable to determine (see rs2420946 instead)"}))
s.append(unphased_match(genome.rs3803662, {
"AA": "Moderately increased odds",
"AG": "?",
"GG": "Typical odds",
None: "Unable to determine"}))
s.append("Note: There are MANY more SNPs to test here...")
# TODO: Add remaining SNPs
return "\n".join(s)
def health_report(genome):
"""Infers some health results."""
return make_report(genome, [
apoe_variants,
breast_cancer,
chronic_kidney_disease,
hypothyroidism,
migraines,
restless_leg_syndrome,
rheumatoid_arthritis_risk,
scleroderma,
stroke,
])
|
This article is 2618 days old.
Trinity Lutheran Church on the corner of Parent and Giles Avenue East has been recommended by Windsor’s Heritage Committee to be placed on the City’s Heritage Registry.
The church, built in 1947 is designed in the Gothic Revival style and features a stone exterior, stained glass windows and heavy use of wood.
As a result of being placed on the Heritage Register, property owners looking to demolish must give the City of Windsor sixty days notice and have demolition approved by City Council.
|
#!/usr/bin/env python
"""
Author: Sourabh Dube
Make XML files for one channel, with the right uncertainties
"""
import os,sys,commands,subprocess
import argparse
import ROOT
from ROOT import TH1F,TFile
def SetupWorkspace(backgrounds,
sign,
data,
lumiuncer,
discovery,
uncertainty):
if discovery:
opprefix = "DP_onechan_discovery_"
rootfile = "counting_exp_data_discovery_DP.root"
chanfile = "DP_onechan_discovery.xml"
else:
opprefix = "DP_onechan_limit_"
rootfile = "counting_exp_data_limit_DP.root"
chanfile = "DP_onechan_limit.xml"
#
# Write Main Top XML file
#
mainXMLdata = """\
<!DOCTYPE Combination SYSTEM "../share/HistFactorySchema.dtd">
<Combination OutputFilePrefix="./tmp_limits_results/%s" >
<Input>./tmp_limits/%s</Input>
<Measurement Name="DPLSMM" Lumi="1." LumiRelErr="%f" BinLow="0" BinHigh="2" >
<POI>mu</POI>
</Measurement>
</Combination>
""" % (opprefix, chanfile, lumiuncer)
if discovery:
script = open('tmp_limits/top_discovery.xml','w')
else:
script = open('tmp_limits/top_limit.xml','w')
script.write(mainXMLdata)
script.close()
#
# Write Channel XML
#
chanXMLdata = """\
<!DOCTYPE Channel SYSTEM '../share/HistFactorySchema.dtd'>
<Channel Name="channel1" InputFile="./tmp_limits_data/%s">
<Data HistoName="data" HistoPath="" />
<Sample Name="signal" HistoPath="" HistoName="signal">
<NormFactor Name="mu" High="20." Low="0." Val="1." Const="True" />
</Sample>
""" % rootfile
# <OverallSys Name="lumi" High="1.028" Low="0.972" />
# <OverallSys Name="PDFacc" High="1.05" Low="0.95" />
# <OverallSys Name="acc_truth" High="1.15" Low="0.85" />
setupWSfile = TFile("tmp_limits_data/%s" % rootfile,"RECREATE")
doSingleBGModel=False
if not doSingleBGModel:
for key,value in backgrounds.iteritems():
chanXMLdata+="""\
<Sample Name="%s" HistoPath="" NormalizeByTheory="True" HistoName="%s">
<OverallSys Name="%s" Low="%f" High="%f"/>
</Sample>
""" % (key,key,key+"_norm",1.-float(uncertainty),1.+float(uncertainty))
hist = TH1F(key,key+" hist",1,0,1)
hist.Fill(0.5,value)
hist.Write(key)
else:
BGtotal=0
for key,value in backgrounds.iteritems():
BGtotal+=value
key="BG"
hist = TH1F(key,key+" hist",1,0,1)
hist.Fill(0.5,BGtotal)
hist.Write(key)
chanXMLdata+="""\
<Sample Name="%s" HistoPath="" NormalizeByTheory="True" HistoName="%s">
<OverallSys Name="%s" Low="%f" High="%f"/>
</Sample>
""" % (key,key,key+"_norm",1.-float(uncertainty),1.+float(uncertainty))
chanXMLdata+="""\
</Channel>
"""
script = open('tmp_limits/'+chanfile,'w')
script.write(chanXMLdata)
script.close()
hist = TH1F("signal", "signal hist", 1,0,1)
hist.Fill(0.5,sign)
hist.Write("signal")
hist = TH1F("data", "data hist", 1,0,1)
hist.Fill(0.5,data)
hist.Write("data")
setupWSfile.Close()
if discovery:
os.system("hist2workspace tmp_limits/top_discovery.xml > tmp_limits/setup_discovery.log 2>&1")
else:
os.system("hist2workspace tmp_limits/top_limit.xml > tmp_limits/setup_limit.log 2>&1")
def run_limit(line,
backgrounds,
lumiuncer,
toys,
points,
mulow,
muhigh,
uncertainty):
cleanup = """\
mkdir -p tmp_limits
mkdir -p tmp_limits_data
mkdir -p tmp_limits_results
rm -f tmp_limits/*
rm -f tmp_limits_data/*
rm -f tmp_limits_results/*
"""
os.system(cleanup)
fullcls = 0
if toys>0:
fullcls = 1
# figure out how much signal we have
words_list = line.split()
label=words_list[0]
sign=float(words_list[1])
# and how much background
totalbg=0
for key,value in backgrounds.iteritems():
totalbg = totalbg+value
data=totalbg
# quick check to see if we should even bother with limits.
if sign <= 1.:
if (fullcls==0):
print "%s : -2sig = %1.4f, -1sig = %1.4f, Median Exp = %1.4f, +1sig = %1.4f, +2sig = %1.4f, p0 = %1.3e (%1.4f sigma)" % (label,
10,
10,
10,
10,
10,
10,
0);
else:
print data,sign,'==RESFRQ==',10,10,10,10,10,10
return
scale=1.
# This does nasty things in the WinoBino grid... found it necessary for
# the GMSB signals, but not so much here.
if False:
if sign > 1000*totalbg:
scale = 3000.
elif sign > 100*totalbg:
scale = 300.
elif sign > 10*totalbg:
scale = 30.
elif sign > totalbg:
scale = 3.
sign = sign/scale
print "setting up workspace with %f signal events %f background events." % (sign,totalbg)
SetupWorkspace(backgrounds,sign,data,lumiuncer,False,uncertainty)
SetupWorkspace(backgrounds,sign,data+sign,lumiuncer,True,uncertainty)
cmd2 = """\
./bin/runCEws -f %i -t %i -p %i -l %f -h %f >& tmp_limits/limit.log
""" % (fullcls,toys,points,mulow,muhigh)
#print cmd2
os.system(cmd2)
cmd3 = """\
grep "==RESULT==" tmp_limits/limit.log
"""
cmd4 = """\
grep "computed upper limit" tmp_limits/limit.log | awk '{print $6}'
"""
cmd5 = """\
grep "expected limit (median) " tmp_limits/limit.log | awk '{print $4}'
"""
cmd6 = """\
grep "expected limit (+1 sig) " tmp_limits/limit.log | awk '{print $5}'
"""
cmd7 = """\
grep "expected limit (-1 sig) " tmp_limits/limit.log | awk '{print $5}'
"""
cmd8 = """\
grep "expected limit (+2 sig) " tmp_limits/limit.log | awk '{print $5}'
"""
cmd9 = """\
grep "expected limit (-2 sig) " tmp_limits/limit.log | awk '{print $5}'
"""
if (fullcls==0):
# os.system(cmd3)
p = os.popen(cmd3)
res = p.readline()
ressplit = res.split()
p.close()
printEventLimits=True
if not printEventLimits:
scale=(1/scale)*100.
else:
scale=(1/scale)
p = os.popen("grep \"DISCOVERY\" tmp_limits/limit.log" )
res2 = p.readline()
res2split = res2.split()
p.close()
if len(res2split) > 2:
if float(res2split[1]) < 1e-20:
res2split[2] = "10"
if len(res2split) > 2 and len(ressplit) > 6:
print "%s : -2sig = %1.4f, -1sig = %1.4f, Median Exp = %1.4f, +1sig = %1.4f, +2sig = %1.4f, p0 = %1.3e (%1.4f sigma)" % (label,
scale*float(ressplit[6]),
scale*float(ressplit[4]),
scale*float(ressplit[2]),
scale*float(ressplit[3]),
scale*float(ressplit[5]),
float(res2split[1]),
float(res2split[2]));
else:
p = os.popen(cmd4)
res1 = (p.readline()).rstrip()
p.close()
p = os.popen(cmd5)
res2 = (p.readline()).rstrip()
p.close()
p = os.popen(cmd6)
res3 = (p.readline()).rstrip()
p.close()
p = os.popen(cmd7)
res4 = (p.readline()).rstrip()
p.close()
p = os.popen(cmd8)
res5 = (p.readline()).rstrip()
p.close()
p = os.popen(cmd9)
res6 = (p.readline()).rstrip()
p.close()
print data,sign,'==RESFRQ==',res1,res2,res3,res4,res5,res6
def SetupWorkspaceOpt(optresults,
lumiuncer,
discovery,
uncertainty,
flatBGUnc,
useSingleBGModel):
if discovery:
opprefix = "susy_discovery_"
else:
opprefix = "susy_limit_"
#
# Write Main Top XML file
#
if discovery:
script = open('tmp_limits/top_discovery.xml','w')
else:
script = open('tmp_limits/top_limit.xml','w')
script.write("""\
<!DOCTYPE Combination SYSTEM "../share/HistFactorySchema.dtd">
<Combination OutputFilePrefix="./tmp_limits_results/%s" >
""" % opprefix)
# --------------------------------------------------------
# parse optresults
goodchannels=0
forcetoys=False
for line in open(optresults,"r"):
l=line.split()
if float(l[3])<0.5:
continue
else:
goodchannels=goodchannels+1
rootfile=opprefix+"_chan_"+l[2]+".root"
chanfile="chan_%s.xml" % l[2]
script.write("""
<Input>./tmp_limits/%s</Input>
""" % chanfile)
# write the channel data
chan=open("./tmp_limits/%s" % chanfile, 'w')
chan.write("""\
<!DOCTYPE Channel SYSTEM '../share/HistFactorySchema.dtd'>
<Channel Name="channel_%s" InputFile="./tmp_limits_data/%s">
<Data HistoName="data" HistoPath="" />
<Sample Name="signal" HistoPath="" HistoName="signal">
<NormFactor Name="mu" High="20." Low="0." Val="1." Const="True" />
</Sample>
""" % (l[2],rootfile))
setupWSfile = TFile("tmp_limits_data/%s" % rootfile,"RECREATE")
bglabels=["Bj", "tt", "tB", "tj", "ttB"]
if "100TeV" in optresults:
bglabels+=["QCD"]
totalbg=0.
for i in range(len(bglabels)):
# only do this if the backgrounds are non-zero
if float(l[i+7]) > 0.00:
bgval=float(l[i+7])
totalbg+=bgval
if useSingleBGModel:
continue
hist = TH1F(bglabels[i],bglabels[i]+" hist",1,0,1)
if bgval<0.001:
bgval=0.001
#hist.Fill(0.5,bgval)
hist.SetBinContent(1,bgval)
hist.Write(bglabels[i])
chan.write("""\
<Sample Name="%s" HistoPath="" NormalizeByTheory="True" HistoName="%s">
""" % (bglabels[i],bglabels[i]))
if bglabels[i]!="Bj" or flatBGUnc:
chan.write("""\
<OverallSys Name="%s" Low="%f" High="%f"/>
""" % (bglabels[i]+"_norm",1.-float(uncertainty),1.+float(uncertainty)))
else:
reluncZll=((bgval*0.5)**0.5)/(bgval*0.5)
if ((reluncZll**2.+(float(uncertainty)/2.)**2.)**0.5)<float(uncertainty):
chan.write("""\
<OverallSys Name="%s_bin_%s" Low="%f" High="%f"/>
<OverallSys Name="%s" Low="%f" High="%f"/>
""" % (bglabels[i]+"_Zll",l[2],1.-reluncZll,1.+reluncZll, bglabels[i]+"_norm",1.-float(uncertainty)/2.,1.+float(uncertainty)/2.))
else:
chan.write("""\
<OverallSys Name="%s" Low="%f" High="%f"/>
""" % (bglabels[i]+"_norm",1.-float(uncertainty),1.+float(uncertainty)))
chan.write("""\
</Sample>
""")
if useSingleBGModel:
hist = TH1F("BG","BG"+" hist",1,0,1)
hist.SetBinContent(1,totalbg)
hist.Write("BG")
chan.write("""\
<Sample Name="%s" HistoPath="" NormalizeByTheory="True" HistoName="%s">
""" % ("BG","BG"))
chan.write("""\
<OverallSys Name="%s" Low="%f" High="%f"/>
""" % ("BG"+"_norm",1.-float(uncertainty),1.+float(uncertainty)))
chan.write("""\
</Sample>
""")
hist = TH1F("signal", "signal hist", 1,0,1)
#hist.Fill(0.5,float(l[5]))
hist.SetBinContent(1,float(l[5]))
hist.Write("signal")
hist = TH1F("data", "data hist", 1,0,1)
if not discovery:
#hist.Fill(0.5,totalbg)
hist.SetBinContent(1,totalbg)
hist.SetBinError(1,totalbg**0.5)
else:
#hist.Fill(0.5,totalbg+float(l[5]))
hist.SetBinContent(1,(totalbg+float(l[5])))
hist.SetBinError(1,(totalbg+float(l[5]))**0.5)
hist.Write("data")
setupWSfile.Close()
chan.write("""\
</Channel>
""")
chan.close()
if float(l[3])>1.0 and (float(l[5])<5 or totalbg<5):
forcetoys=True
# --------------------------------------------------------
script.write("""
<Measurement Name="DPLSMM" Lumi="1." LumiRelErr="%f" BinLow="0" BinHigh="2" >
<POI>mu</POI>
</Measurement>
</Combination>
""" % (lumiuncer))
script.close()
if discovery:
os.system("hist2workspace tmp_limits/top_discovery.xml > tmp_limits/setup_discovery.log 2>&1")
else:
os.system("hist2workspace tmp_limits/top_limit.xml > tmp_limits/setup_limit.log 2>&1")
return goodchannels,forcetoys
def run_limit_opt(optresultsfile,
lumiuncer,
toys,
points,
mulow,
muhigh,
uncertainty,
flatBGUnc,
useSingleBGModel):
cleanup = """\
mkdir -p tmp_limits
mkdir -p tmp_limits_data
mkdir -p tmp_limits_results
rm -f tmp_limits/*
rm -f tmp_limits_data/*
rm -f tmp_limits_results/*
"""
os.system(cleanup)
goodchannels,forcetoys=SetupWorkspaceOpt(optresultsfile,lumiuncer,False,uncertainty,flatBGUnc,useSingleBGModel)
SetupWorkspaceOpt(optresultsfile,lumiuncer, True,uncertainty,flatBGUnc,useSingleBGModel)
fullcls = 0
if forcetoys and toys<1000 and False:
toys=1000
if points>20:
points=20
if toys>0:
fullcls = 1
if goodchannels>0:
cmd2 = """\
./bin/runCEws -f %i -t %i -p %i -l %f -h %f -L tmp_limits_results/susy_limit__combined_DPLSMM_model.root -D tmp_limits_results/susy_discovery__combined_DPLSMM_model.root -n combined >& tmp_limits/limit.log
""" % (fullcls,toys,int(points),mulow,muhigh)
print cmd2
os.system(cmd2)
else:
cmd2="echo \"==RESULT== 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0\" > tmp_limits/limit.log"
os.system(cmd2)
cmd2="echo \"==DISCOVERY== 0.5 0.0\" >> tmp_limits/limit.log"
os.system(cmd2)
cmd3 = """\
grep "==RESULT==" tmp_limits/limit.log
"""
cmd4 = """\
grep "computed upper limit" tmp_limits/limit.log | awk '{print $6}'
"""
cmd5 = """\
grep "expected limit (median) " tmp_limits/limit.log | awk '{print $4}'
"""
cmd6 = """\
grep "expected limit (+1 sig) " tmp_limits/limit.log | awk '{print $5}'
"""
cmd7 = """\
grep "expected limit (-1 sig) " tmp_limits/limit.log | awk '{print $5}'
"""
cmd8 = """\
grep "expected limit (+2 sig) " tmp_limits/limit.log | awk '{print $5}'
"""
cmd9 = """\
grep "expected limit (-2 sig) " tmp_limits/limit.log | awk '{print $5}'
"""
# os.system(cmd3)
p = os.popen(cmd3)
res = p.readline()
ressplit = res.split()
p.close()
scale=1.
printEventLimits=True
if not printEventLimits:
scale=(1/scale)*100.
else:
scale=(1/scale)
p = os.popen("grep \"DISCOVERY\" tmp_limits/limit.log" )
res2 = p.readline()
res2split = res2.split()
p.close()
if len(res2split) > 2:
if float(res2split[1]) < 1e-20:
res2split[2] = "10"
if len(res2split) > 2 and len(ressplit) > 6:
print "%s : -2sig = %1.4f, -1sig = %1.4f, Median Exp = %1.4f, +1sig = %1.4f, +2sig = %1.4f, p0 = %1.3e (%1.4f sigma)" % ("dummy",
scale*float(ressplit[6]),
scale*float(ressplit[4]),
scale*float(ressplit[2]),
scale*float(ressplit[3]),
scale*float(ressplit[5]),
float(res2split[1]),
float(res2split[2]));
def main(argv):
parser = argparse.ArgumentParser(description="Command line arguments")
parser.add_argument("--background" , action='store', default="")
parser.add_argument("--toys" , action='store', default=0)
parser.add_argument("--signal" , action='store', default="")
parser.add_argument("--mulow" , action='store', default=0)
parser.add_argument("--muhigh" , action='store', default=5)
parser.add_argument("--points" , action='store', default=100)
parser.add_argument("--lumiUnc" , action='store', default=.028)
parser.add_argument("--uncertainty" , action='store', default=0.20)
parser.add_argument("--prefix" , action='store', default="test")
parser.add_argument("--optresults" , action='store', default="")
parser.add_argument("--flatBGUnc" , action='store_true')
parser.add_argument("--singleBGModel", action='store_true')
args=parser.parse_args()
if args.optresults != "":
run_limit_opt(args.optresults,
args.lumiUnc,
args.toys,
args.points,
args.mulow,
args.muhigh,
args.uncertainty,
args.flatBGUnc,
args.singleBGModel)
else:
backgrounds={}
bgfile = open(args.background)
for bg in bgfile.xreadlines():
bgsplit = bg.split()
if len(bgsplit) < 2:
continue
backgrounds[bgsplit[0]] = float(bgsplit[1])
sigfile = open(args.signal)
for line in sigfile.xreadlines():
run_limit(line,
backgrounds,
args.lumiUnc,
args.toys,
args.points,
args.mulow,
args.muhigh,
args.uncertainty)
if __name__ == '__main__':
main(sys.argv[1:])
|
Ayurveda is one in all the oldest medical systems of the world. It is the enlargement of Atharvaveda. It is the composition of science, art and philosophy. Ayurveda means “knowledge of life” and it is the epitome of Ayurveda. This medical system not only provides the formulae of treatment of the diseases but also it discuss about the prevention of the ailments.
Ayurveda and Ayurvigyan both are related to medical science but ancient Indian method of treatment practically is called ayurveda and allopathic system (doctery in common language) has been called as Ayurvigyan.
|
import smbus
ADDR = 0x10
bus = smbus.SMBus(1)
__debug = False
SWIPE_RIGHT = 0x01
SWIPE_LEFT = 0x02
SWIPE_UP = 0x03
HOVER = 0x05
HOVER_LEFT = 0x06
HOVER_RIGHT = 0x07
HOVER_UP = 0x08
def gesture_name(gesture):
if gesture is None or gesture > HOVER_UP:
return None
return [
None,
'Swipe Right',
'Swipe Left',
'Swipe Up',
None,
'Hover',
'Hover Left',
'Hover Right',
'Hover Up'
][gesture]
def gesture_available():
status = bus.read_byte_data(ADDR, 0x00)
if __debug: print("Status: {:08b}".format(status))
return (status & 0b00011100) > 0
def position_available():
status = bus.read_byte_data(ADDR, 0x00)
return (status & 0b00000001) > 0
def get_x():
return bus.read_byte_data(ADDR, 0x08)
def get_z():
return bus.read_byte_data(ADDR, 0x0a)
def get_position():
return get_z(), get_x()
def get_gesture():
gesture = bus.read_byte_data(ADDR, 0x04)
if gesture in [HOVER, HOVER_LEFT, HOVER_RIGHT, HOVER_UP, SWIPE_LEFT, SWIPE_RIGHT, SWIPE_UP]:
return gesture
return None
def get_speed():
return bus.read_byte_data(ADDR, 0x05)
|
Beachfront. Pool. Peaceful Lifestyle. Magic.
|
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of LARS Optimizer with optax."""
from typing import Any, Callable, List, NamedTuple, Optional, Tuple
import jax
import jax.numpy as jnp
import optax
import tree as nest
# A filter function takes a path and a value as input and outputs True for
# variable to apply update and False not to apply the update
FilterFn = Callable[[Tuple[Any], jnp.ndarray], jnp.ndarray]
def exclude_bias_and_norm(path: Tuple[Any], val: jnp.ndarray) -> jnp.ndarray:
"""Filter to exclude biaises and normalizations weights."""
del val
if path[-1] == "b" or "norm" in path[-2]:
return False
return True
def _partial_update(updates: optax.Updates,
new_updates: optax.Updates,
params: optax.Params,
filter_fn: Optional[FilterFn] = None) -> optax.Updates:
"""Returns new_update for params which filter_fn is True else updates."""
if filter_fn is None:
return new_updates
wrapped_filter_fn = lambda x, y: jnp.array(filter_fn(x, y))
params_to_filter = nest.map_structure_with_path(wrapped_filter_fn, params)
def _update_fn(g: jnp.ndarray, t: jnp.ndarray, m: jnp.ndarray) -> jnp.ndarray:
m = m.astype(g.dtype)
return g * (1. - m) + t * m
return jax.tree_multimap(_update_fn, updates, new_updates, params_to_filter)
class ScaleByLarsState(NamedTuple):
mu: jnp.ndarray
def scale_by_lars(
momentum: float = 0.9,
eta: float = 0.001,
filter_fn: Optional[FilterFn] = None) -> optax.GradientTransformation:
"""Rescales updates according to the LARS algorithm.
Does not include weight decay.
References:
[You et al, 2017](https://arxiv.org/abs/1708.03888)
Args:
momentum: momentum coeficient.
eta: LARS coefficient.
filter_fn: an optional filter function.
Returns:
An (init_fn, update_fn) tuple.
"""
def init_fn(params: optax.Params) -> ScaleByLarsState:
mu = jax.tree_multimap(jnp.zeros_like, params) # momentum
return ScaleByLarsState(mu=mu)
def update_fn(updates: optax.Updates, state: ScaleByLarsState,
params: optax.Params) -> Tuple[optax.Updates, ScaleByLarsState]:
def lars_adaptation(
update: jnp.ndarray,
param: jnp.ndarray,
) -> jnp.ndarray:
param_norm = jnp.linalg.norm(param)
update_norm = jnp.linalg.norm(update)
return update * jnp.where(
param_norm > 0.,
jnp.where(update_norm > 0,
(eta * param_norm / update_norm), 1.0), 1.0)
adapted_updates = jax.tree_multimap(lars_adaptation, updates, params)
adapted_updates = _partial_update(updates, adapted_updates, params,
filter_fn)
mu = jax.tree_multimap(lambda g, t: momentum * g + t,
state.mu, adapted_updates)
return mu, ScaleByLarsState(mu=mu)
return optax.GradientTransformation(init_fn, update_fn)
class AddWeightDecayState(NamedTuple):
"""Stateless transformation."""
def add_weight_decay(
weight_decay: float,
filter_fn: Optional[FilterFn] = None) -> optax.GradientTransformation:
"""Adds a weight decay to the update.
Args:
weight_decay: weight_decay coeficient.
filter_fn: an optional filter function.
Returns:
An (init_fn, update_fn) tuple.
"""
def init_fn(_) -> AddWeightDecayState:
return AddWeightDecayState()
def update_fn(
updates: optax.Updates,
state: AddWeightDecayState,
params: optax.Params,
) -> Tuple[optax.Updates, AddWeightDecayState]:
new_updates = jax.tree_multimap(lambda g, p: g + weight_decay * p, updates,
params)
new_updates = _partial_update(updates, new_updates, params, filter_fn)
return new_updates, state
return optax.GradientTransformation(init_fn, update_fn)
LarsState = List # Type for the lars optimizer
def lars(
learning_rate: float,
weight_decay: float = 0.,
momentum: float = 0.9,
eta: float = 0.001,
weight_decay_filter: Optional[FilterFn] = None,
lars_adaptation_filter: Optional[FilterFn] = None,
) -> optax.GradientTransformation:
"""Creates lars optimizer with weight decay.
References:
[You et al, 2017](https://arxiv.org/abs/1708.03888)
Args:
learning_rate: learning rate coefficient.
weight_decay: weight decay coefficient.
momentum: momentum coefficient.
eta: LARS coefficient.
weight_decay_filter: optional filter function to only apply the weight
decay on a subset of parameters. The filter function takes as input the
parameter path (as a tuple) and its associated update, and return a True
for params to apply the weight decay and False for params to not apply
the weight decay. When weight_decay_filter is set to None, the weight
decay is not applied to the bias, i.e. when the variable name is 'b', and
the weight decay is not applied to nornalization params, i.e. the
panultimate path contains 'norm'.
lars_adaptation_filter: similar to weight decay filter but for lars
adaptation
Returns:
An optax.GradientTransformation, i.e. a (init_fn, update_fn) tuple.
"""
if weight_decay_filter is None:
weight_decay_filter = lambda *_: True
if lars_adaptation_filter is None:
lars_adaptation_filter = lambda *_: True
return optax.chain(
add_weight_decay(
weight_decay=weight_decay, filter_fn=weight_decay_filter),
scale_by_lars(
momentum=momentum, eta=eta, filter_fn=lars_adaptation_filter),
optax.scale(-learning_rate),
)
|
Are you looking for a suitable conference hotel in Penicuik for your next meeting or event? Use our free and convenient service in Penicuik and receive comparable proposals directly from the chosen conference hotels in Penicuik with just one online request and save a lot of time.
The list below shows you the most popular Penicuik conference hotels as well as conference venues in Penicuik and gives you details about the hotel and its conference facilities. By clicking the name of the hotel you can view even more information, pictures or videos. Select your preferred hotels to start your free and non-binding online enquiry and you will start receiving your hotel proposals within a couple of hours.
Haven't found an adequate Penicuik conference hotel in our list or want to propose a different conference hotel? Please contact us!
May we assist you with planning your conference in Penicuik? Call or email us to benefit from our experience and use our free service to find suitable conference hotels in Penicuik or in other destinations around the world. You will receive comparable offers directly from the hotels within a couple of hours.
|
import sys, os
import logging
sys.path.append('./')
import sassie.build.pdb_rx as pdb_rx
import sassie.util.sasconfig as sasconfig
import sassie.interface.input_filter as input_filter
import multiprocessing
svariables = {}
#### user input ####
#### user input ####
#### user input ####
runname = 'run_0'
pdbfile = 'testing/data/5E3L.pdb'
topfile = os.path.join(sasconfig.__bin_path__,'toppar','top_all27_prot_na.inp')
use_defaults = False
#### end user input ####
#### end user input ####
#### end user input ####
logging.basicConfig()
svariables['runname'] = (runname,'string')
svariables['pdbfile'] = (pdbfile,'string')
svariables['topfile'] = (topfile,'string')
svariables['defaults'] = (use_defaults,'boolean')
error,variables = input_filter.type_check_and_convert(svariables)
if(len(error)>0):
print 'error = ',error
sys.exit()
txtQueue = multiprocessing.JoinableQueue()
scan = pdb_rx.PDBRx()
scan.main(variables,txtQueue)
this_text = txtQueue.get(True, timeout=0.1)
|
Also a problem was, that it was pretty hard for someone only two days in a city to find free people, that would show you around, discuss with you, etc. Tourism guides were not an option. There exist platforms for this reason already, that try to fill these gaps, but none of them are reserved to architecture, at least i don’t know them.
For this reason, since one year, i design a web-platform that should fill these existing gaps and connect people interested in architecture. Since i started first with the Construction, the Design and the IT-Context and later with the question “What is really needed?” i came to a point, where i am stuck. I understood that i have gone the wrong way.
For that Matter of Fact, i came to the conclusion, that in order for the platform to be innovative in providing connected context, resources, discussions, etc. for architecture, analysing the already existing ways of communication and “Happenings” of Architecture in the WWW is an important field of research. How can you bring students of architecture, architects and the public interested in architecture together over the WWW and also in real life? Architecture is/should be in my eyes participative.
Le Corbusier, Gropius, Niemeyer and many others knew and visited each other all over the world, worked together and fought against each other. This was in the Beginning of the 20th century, without WWW, without email and without digital cameras. So why is this not possible today? Or is it already being done? This will be a point of research.
What have been the goals?
What problems did they engage?
How are they structured, what do they provide?
How adaptable and connectable are they?
Which of them are Open Source?
Which kind of Architecture do they show?
How do they sort the Architecture?
I want to finish my analysis at the end of the winter semester 2011/12 to be able to work on the specific matter of my platform in the summer semester 2011/12.
Build a forum with a geographic hierarchy (France->Rhone-Alpes->Lyon), where people can gather and talk about architecture in their city, region, country, find resources, etc.
Markus has made a picture of a building in Lyon, but doesn’t know what it is, who made it, etc. He asks to the TOA-Forum Lyon. He will get an answer from an architecture-interested person and now this thread (forum-topic) can advance into a gathering of information and later be compiled to a guide for this building. This guide may be uploaded to TOA or any other cooperating website for others to be found easily.
Luc is from France and goes on a journey to Vienna. He doesn’t know any architecture student there and speaks no German. He will ask in the group of Vienna, if somebody maybe wants to meet him for beer and show him around in an architectural way. On the way he maybe will have highly interesting discussions about architecture and the status in Austria with his guide.
The Question here is, if it is really needed to make the sharing of documents on TOA, or if it is better to use existing platforms and work together.
Pictures of the Building, inside and outside, details, etc.
Information about the time built, about the architect, etc.
Information about existing problems with this building, failures, etc.
Very good Website for newly built architecture, but not very searchable and useable as resource for specific research.
A very good organized website, but momentarily not many resources about modern architecture. Important feature: The selection of entries to be printed as an PDF with a built in Google map and an automatically generated route for visits.
|
#!/usr/bin/env python
#
# vim: set expandtab:ts=4:sw=4
#
# Authors: Jasper Capel
# Robert van Leeuwen
#
# Funtion: Handles authentication to various OpenStack API's and
# other authentication based (Keystone) functions
#
# This software is released under the terms of the Apache License.
#
from keystoneclient.v2_0.client import Client as keystonec
from neutronclient.v2_0.client import Client as neutronc
from novaclient.v3.client import Client as novac
import ConfigParser
import os
### AUTH FUNCTIONS ###
def get_os_credentials(filename='/etc/nova/nova.conf'):
'''Attempts to get credentials from an openstack config if it exists, otherwise from env'''
if os.path.exists(filename):
c = ConfigParser.RawConfigParser()
s = 'DEFAULT'
c.read(filename)
creds = {'username': c.get(s, 'neutron_admin_username'),
'password': c.get(s, 'neutron_admin_password'),
'tenant_name': c.get(s, 'neutron_admin_tenant_name'),
'region_name': c.get(s, 'os_region_name'),
'auth_url': c.get(s, 'neutron_admin_auth_url')}
else:
creds = {'username': os.getenv('OS_USERNAME'),
'password': os.getenv('OS_PASSWORD'),
'tenant_name': os.getenv('OS_TENANT_NAME'),
'region_name': os.getenv('OS_REGION_NAME', 'ams1'),
'auth_url': os.getenv('OS_AUTH_URL')}
return creds
def get_keystonesession(credentials=None):
if not credentials:
credentials = get_os_credentials()
from keystoneclient.auth.identity import v2
from keystoneclient import session
auth = v2.Password(username=credentials['username'],
password=credentials['password'],
tenant_name=credentials['tenant_name'],
auth_url=credentials['auth_url'])
return session.Session(auth=auth)
def get_keystoneclient(session):
'''Returns keystoneclient instance'''
return keystonec(session=session)
def get_neutronclient(session):
'''Returns neutronclient instance'''
creds = get_os_credentials()
return neutronc(username=creds['username'],
password=creds['password'],
tenant_name=creds['tenant_name'],
auth_url=creds['auth_url'])
def get_novaclient(session):
# Version of novaclient we use doesn't support using existing session
creds = get_os_credentials()
return novac(creds['username'], creds['password'], creds['tenant_name'], creds['auth_url'], region_name=creds['region_name'])
def get_tenants(session):
keystone = get_keystoneclient(session)
return keystone.tenants.list()
def get_tenant_email(session, tid):
keystone = get_keystoneclient(session)
return keystone.tenants.get(tid)
def show_creds():
credentials = get_os_credentials()
for cred in credentials:
print "export OS_" + cred.upper() + "=" + credentials[cred]
|
Mike & Vilma Conner have served under the leadership of Pastor Matthew Barnett for over 14 years at The Dream Center in Los Angeles, CA. They were the Directors for the Dream Center Discipleship Recovery program. Their passion for helping people with life controlling issues have given them the privilege to see over 6,000 men and women be transformed within the program.
Mike and Vilma have been married for over 13 years, have a grown daughter, son-in-law, 2 grandsons and 2 fur babies (dogs). They love having a balance life of being able to explore new places to travel and also relax at home watching superhero movies.
Mike comes from a background of being a licensed minister for over 30 years, along with being the operational director at the Oklahoma City Rescue Mission. He is a Certified Addiction Treatment Counselor with the State of California as well as a Masters Level Registered Addiction Specialist. Mike is also an author of a book focused on recovery; SOULutions: A Supernatural Understanding for Conquering Chaos and Eradicating Self-Sabotage.
Vilma comes from a background of being a programmer analyst for 13 plus years with the State of California. She is also licensed minister who has a passion for ministering hurting and abused women. Vilma is also a published author since 2009. Her book; Born-Again Virgin: How to Transform Your Life from Promiscuity back into Purity has helped countless numbers of women rediscover their God-given value as well as a fresh start on life without the stigma of their past holding them captive.
A few years ago, God started speaking into their lives that a transition was coming. Then, God placed the City of Beaumont, Texas on their hearts. After a series of events and confirmation after confirmation, 2017 was the year they embarked on a new chapter in their lives. While it was not easy to leave their friends, family, and fellow co-laborers, they left behind a legacy of transformed lives. Their background, unique skills and gifting makes them a power couple for the Kingdom of God as they embrace the new assignment God has given them in Southeast Texas.
|
#!/usr/bin/env python
import numpy as np
import tensorflow as tf
from sklearn.datasets import fetch_california_housing
from sklearn.preprocessing import StandardScaler
learning_rate = 0.01
n_epochs = 10000
def scaler_norm(a):
return StandardScaler().fit(a).transform(a)
housing = fetch_california_housing()
m, n = housing.data.shape
housing_data_norm = scaler_norm(housing.data)
housing_data_plus_bias = np.c_[np.ones((m, 1)), housing_data_norm]
y_norm = scaler_norm(housing.target.reshape(-1, 1))
X = tf.constant(housing_data_plus_bias, dtype=tf.float32, name='X')
y = tf.constant(y_norm, dtype=tf.float32, name='y')
XT = tf.transpose(X)
theta = tf.Variable(tf.random_uniform([n+1, 1], -1.0, 1.0), dtype=tf.float32, name='theta')
y_pred = tf.matmul(X, theta)
error = y_pred - y
mse = tf.reduce_mean(tf.square(error), name='mse')
gradients = 2 / m * tf.matmul(XT, error)
training_op = tf.assign(theta, theta - learning_rate * gradients)
init = tf.global_variables_initializer()
print('#'*80)
print('## Gradient descent')
print('#'*80)
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
if epoch % 100 == 0:
print('Epoch', epoch, 'MSE = ', mse.eval())
sess.run(training_op)
best_theta = theta.eval()
print(best_theta)
print('#'*80)
print('## Verifying with equation')
print('#'*80)
theta_cal = tf.matmul(tf.matmul(tf.matrix_inverse(tf.matmul(XT, X)), XT), y)
y_pred_cal = tf.matmul(X, theta_cal)
error_cal = y_pred_cal - y
mse_cal = tf.reduce_mean(tf.square(error_cal), name='mse')
with tf.Session() as sess:
init.run()
theta_cal_val, mse_cal = sess.run([theta_cal, mse_cal])
print(theta_cal_val, mse_cal)
|
Have you had a strange sighting or experience, and want to report it? Fill out the form below, and we will contact you. All information will be kept confidential.
|
from treedict import TreeDict
from parameters import applyPreset
from collections import defaultdict
from os.path import join, abspath, exists, split
from os import makedirs
import hashlib, base64, weakref, sys, gc, logging
from itertools import chain
from collections import namedtuple
from pmodule import isPModule, getPModuleClass
from diskio import saveResults, loadResults
################################################################################
# Stuff to manage the cache
class PNodeModuleCacheContainer(object):
def __init__(self, pn_name, name,
local_key, dependency_key,
specific_key = None,
is_disk_writable = True,
is_persistent = True):
self.__pn_name = pn_name
self.__name = name
self.__specific_key = specific_key
self.__local_key = local_key
self.__dependency_key = dependency_key
self.__is_disk_writable = is_disk_writable
self.__is_non_persistent = not is_persistent
self.__obj = None
self.__obj_is_loaded = False
self.__disk_save_hook = None
self.__non_persistent_hook = None
def getFilename(self):
def v(t):
return str(t) if t is not None else "null"
return join(v(self.__pn_name), v(self.__name),
"%s-%s-%s.dat" % (v(self.__local_key), v(self.__dependency_key),
v(self.__specific_key)) )
def getKeyAsString(self):
return '-'.join( (str(t) if t is not None else "N")
for t in [self.__pn_name, self.__name,
self.__local_key,
self.__dependency_key,
self.__specific_key])
def getCacheKey(self):
# The specific cache
return (self.__pn_name, self.__local_key, self.__dependency_key)
def getObjectKey(self):
return (self.__name, self.__specific_key)
def isNonPersistent(self):
return self.__is_non_persistent
def getNonPersistentKey(self):
assert self.__is_non_persistent
return (self.__pn_name, self.__name)
def setObject(self, obj):
assert not self.__obj_is_loaded
self.__obj_is_loaded = True
self.__obj = obj
if self.__disk_save_hook is not None:
self.__disk_save_hook(self)
self.__disk_save_hook = None
if self.__non_persistent_hook is not None:
self.__non_persistent_hook(self)
self.__non_persistent_hook = None
def isLocallyEqual(self, pnc):
return self.__name == pnc.__name and self.__specific_key == pnc.__specific_key
def setObjectSaveHook(self, hook):
self.__disk_save_hook = hook
def setNonPersistentObjectSaveHook(self, hook):
assert self.__is_non_persistent
self.__non_persistent_hook = hook
def getObject(self):
assert self.__obj_is_loaded
return self.__obj
def objectIsLoaded(self):
return self.__obj_is_loaded
def disableDiskWriting(self):
self.__is_disk_writable = False
self.__disk_save_hook = None
def isDiskWritable(self):
return self.__is_disk_writable
def objRefCount(self):
return sys.getrefcount(self.__obj)
class PNodeModuleCache(object):
__slots__ = ["reference_count", "cache"]
def __init__(self):
self.reference_count = 0
self.cache = {}
class _PNodeNonPersistentDeleter(object):
def __init__(self, common):
self.common = common
def __call__(self, container):
np_key = container.getNonPersistentKey()
try:
old_container = self.common.non_persistant_pointer_lookup[np_key]
except KeyError:
old_container = None
if old_container is not None:
try:
del self.common.cache_lookup[old_container.getCacheKey()].cache[old_container.getObjectKey()]
except KeyError:
pass
self.common.non_persistant_pointer_lookup[np_key] = container
# This class holds the runtime environment for the pnodes
class PNodeCommon(object):
def __init__(self, opttree):
self.log = logging.getLogger("RunCTRL")
# This is for node filtering, i.e. eliminating duplicates
self.pnode_lookup = weakref.WeakValueDictionary()
self.non_persistant_pointer_lookup = weakref.WeakValueDictionary()
self.non_persistant_deleter = _PNodeNonPersistentDeleter(self)
# This is for local cache lootup
self.cache_lookup = defaultdict(PNodeModuleCache)
self.cache_directory = opttree.cache_directory
self.disk_read_enabled = opttree.disk_read_enabled
self.disk_write_enabled = opttree.disk_write_enabled
self.opttree = opttree
def getResults(self, parameters, names):
if type(names) is str:
single = True
names = [names]
else:
single = False
def getPN(n):
if type(n) is not str:
raise TypeError("Module name not a string.")
pn = PNode(self, parameters, n, 'results')
pn.initialize()
pn = self.registerPNode(pn)
pn.increaseParameterReference()
pn.increaseResultReference()
return pn
pn_list = [getPN(n) for n in names]
assert len(set(id(pn) for pn in pn_list)) == len(set(names))
ret_list = [pn.pullUpToResults().result for pn in pn_list]
if single:
assert len(ret_list) == 1
return ret_list[0]
else:
return ret_list
def registerPNode(self, pn):
# see if it's a duplicate
key = (pn.name, pn.key)
if key in self.pnode_lookup:
pnf = self.pnode_lookup[key]
if not pn.is_only_parameter_dependency:
pnf.is_only_parameter_dependency = False
pn_ret = pnf
else:
self.pnode_lookup[key] = pn_ret = pn
pn_ret.buildReferences()
return pn_ret
def deregisterPNode(self, pn):
key = (pn.name, pn.key)
assert self.pnode_lookup[key] is pn
del self.pnode_lookup[key]
def _getCache(self, pn, use_local, use_dependencies, should_exist):
key = (pn.name if pn is not None else None,
pn.local_key if use_local else None,
pn.dependency_key if use_dependencies else None)
if should_exist:
assert key in self.cache_lookup
return key, self.cache_lookup[key]
def increaseCachingReference(self, pn):
# print ("increasing reference, name = %s, key = %s, local_key = %s, dep_key = %s"
# % (pn.name, pn.key, pn.local_key, pn.dependency_key))
for t in [(None, False, False),
(pn, True, False),
(pn, False, True),
(pn, False, False),
(pn, True, True)]:
key, cache = self._getCache(*(t + (False,)))
cache.reference_count += 1
def decreaseCachingReference(self, pn):
# print ("decreasing reference, name = %s, key = %s, local_key = %s, dep_key = %s"
# % (pn.name, pn.key, pn.local_key, pn.dependency_key))
for t in [(None, False, False),
(pn, True, False),
(pn, False, True),
(pn, False, False),
(pn, True, True)]:
key, cache = self._getCache(*(t + (True,)))
cache.reference_count -= 1
assert cache.reference_count >= 0
# Clear the cache if it's no longer needed
if cache.reference_count == 0:
# if len(cache.cache) != 0:
# print "Clearing cache %s. objects in the cache are:" % str(key)
# for v in cache.cache.itervalues():
# print "%s: ref_count = %d" % (v.getObjectKey(), v.objRefCount())
del self.cache_lookup[key]
def loadContainer(self, container, no_local_caching = False):
assert not container.objectIsLoaded()
if not no_local_caching:
cache = self.cache_lookup[container.getCacheKey()]
c = cache.cache
obj_key = container.getObjectKey()
if obj_key in c:
return c[obj_key]
else:
c[obj_key] = container
if container.isNonPersistent():
container.setNonPersistentObjectSaveHook(self.non_persistant_deleter)
# now see if it can be loaded from disk
self._loadFromDisk(container)
return container
def _loadFromDisk(self, container):
if not container.isDiskWritable():
return
if self.disk_read_enabled:
filename = abspath(join(self.cache_directory, container.getFilename()))
self.log.debug("Trying to load %s from %s" % (container.getKeyAsString(), filename))
if exists(filename):
error_loading = False
try:
pt = loadResults(self.opttree, filename)
except Exception, e:
self.log.error("Exception Raised while loading %s: \n%s"
% (filename, str(e)))
error_loading = True
if not error_loading:
self.log.debug("--> Object successfully loaded.")
container.setObject(pt)
return
else:
pass # go to the disk write enabled part
else:
self.log.debug("--> File does not exist.")
if self.disk_write_enabled and container.isDiskWritable():
container.setObjectSaveHook(self._saveToDisk)
def _saveToDisk(self, container):
assert self.disk_write_enabled and container.isDiskWritable()
filename = join(self.cache_directory, container.getFilename())
obj = container.getObject()
self.log.debug("Saving object %s to %s." % (container.getKeyAsString(), filename))
try:
saveResults(self.opttree, filename, obj)
assert exists(filename)
except Exception, e:
self.log.error("Exception raised attempting to save object to cache: \n%s" % str(e))
try:
remove(filename)
except Exception:
pass
def _debug_referencesDone(self):
import gc
gc.collect()
print "**************** running check*****************"
for pn in self.pnode_lookup.values():
if pn.result_reference_count != 0 or pn.module_reference_count != 0 or pn.module_access_reference_count != 0:
print (("Nonzero references, (%d, %d, %d), name = %s, key = %s, "
"local_key = %s, dep_key = %s")
% (pn.result_reference_count, pn.module_reference_count, pn.module_access_reference_count,
pn.name, pn.key,
pn.local_key, pn.dependency_key))
for t in [(None, False, False),
(pn, True, False),
(pn, False, True),
(pn, False, False),
(pn, True, True)]:
key, cache = self._getCache(*(t + (False,)))
if cache.reference_count != 0:
print (("Nonzero (%d) cache reference, name = %s, key = %s, "
"local_key = %s, dep_key = %s")
% (cache.reference_count,
"null" if t[0] is None else pn.name,
pn.key,
"null" if not t[1] else pn.local_key,
"null" if not t[2] else pn.dependency_key))
if hasattr(pn, "module") and pn.module is not None:
print (("Non-None module, (%d, %d, %d), name = %s, key = %s, "
"local_key = %s, dep_key = %s")
% (pn.result_reference_count, pn.module_reference_count, pn.module_access_reference_count,
pn.name, pn.key,
pn.local_key, pn.dependency_key))
if hasattr(pn, "results_container") and pn.results_container is not None:
print (("Non-None results, (%d, %d, %d), name = %s, key = %s, "
"local_key = %s, dep_key = %s")
% (pn.result_reference_count, pn.module_reference_count, pn.module_access_reference_count,
pn.name, pn.key,
pn.local_key, pn.dependency_key))
if hasattr(pn, "child_pull_dict"):
print (("Child pull dict bad!!!, (%d, %d, %d), name = %s, key = %s, "
"local_key = %s, dep_key = %s")
% (pn.result_reference_count, pn.module_reference_count, pn.module_access_reference_count,
pn.name, pn.key,
pn.local_key, pn.dependency_key))
_Null = "null"
_PulledResult = namedtuple('PulledResult', ['parameters', 'result'])
_PulledModule = namedtuple('PulledModule', ['parameters', 'result', 'module'])
class PNode(object):
def __init__(self, common, parameters, name, p_type):
# print ">>>>>>>>>>>>>>>>>>>> INIT: %s <<<<<<<<<<<<<<<<<<<<" % name
self.common = common
self.parameters = parameters.copy()
self.parameters.attach(recursive = True)
self.name = name
self.is_pmodule = isPModule(name)
if p_type in ["module", "results"]:
if not self.is_pmodule:
raise ValueError("%s is not a recognized processing module." % name)
else:
if p_type != "parameters":
raise ValueError( ("p_type must be either 'module', 'results', "
"or 'parameters' (not '%s').") % p_type)
# Parameters don't hold references to other objects
self.is_only_parameter_dependency = (p_type == "parameters")
##################################################
# Get the preprocessed parameters
if name not in self.parameters:
self.parameters.makeBranch(name)
if self.is_pmodule:
p_class = self.p_class = getPModuleClass(self.name)
self.parameters[name] = pt = p_class._preprocessParameters(self.parameters)
pt.attach(recursive = True)
pt.freeze()
self.parameter_key = self.parameters.hash(name)
h = hashlib.md5()
h.update(str(p_class._getVersion()))
h.update(self.parameter_key)
self.local_key = base64.b64encode(h.digest(), "az")[:8]
self.results_reported = False
self.full_key = self.parameters.hash()
# Reference counting isn't used in the parameter classes
self.parameter_reference_count = 0
self.result_reference_count = 0
self.module_reference_count = 0
self.module_access_reference_count = 0
self.dependent_modules_pulled = False
self.children_have_reference = False
else:
self.parameter_key = self.parameters.hash(name)
self.parameter_reference_count = 0
########################################
# Setup
def initialize(self):
# This extra step is needed as the child pnodes must be
# consolidated into the right levels first
assert self.is_pmodule
def _processDependencySet(p_type, dl):
rs = {}
def add(s, parameters, first_order, name_override):
t = type(s)
if t is str:
if s != self.name:
# delay the creation until we know we need it
h = self.full_key if parameters is self.parameters else parameters.hash()
rs[(s, h)] = (s if first_order else name_override, parameters, s, p_type)
elif t is list or t is tuple or t is set:
for se in s:
add(se, parameters, first_order, name_override)
elif getattr(s, "__parameter_container__", False):
add(s.name, s._getParameters(parameters), False, s._getLoadName())
else:
raise TypeError("Dependency type not recognized.")
add(dl, self.parameters, True, None)
return rs
# Initializes the results above the dependencies
# get the verbatim children specifications and lists of
# dependencies
m_dep, r_dep, p_dep = self.p_class._getDependencies(self.parameters)
# these are (name, hash) : pnode dicts
self.module_dependencies = _processDependencySet("module", m_dep)
self.result_dependencies = _processDependencySet("results", r_dep)
self.parameter_dependencies = _processDependencySet("parameters", p_dep)
# print "init-3: %s-%s has ref count %d" % (self.name, self.key, sys.getrefcount(self))
# Now go through and push the dependencies down
self.result_dependencies.update(self.module_dependencies)
self.parameter_dependencies.update(self.result_dependencies)
# And go through and instantiate all of the remaining ones
for k, t in self.parameter_dependencies.items():
pn = PNode(self.common, *t[1:])
self.parameter_dependencies[k] = v = (t[0], pn)
if k in self.result_dependencies:
self.result_dependencies[k] = v
if k in self.module_dependencies:
self.module_dependencies[k] = v
# Go through and instantiate all the children
for n, pn in self.result_dependencies.itervalues():
pn.initialize()
# Now go through and eliminate duplicates
for k, (n, pn) in self.result_dependencies.items():
pnf = self.common.registerPNode(pn)
if pnf is not pn:
self.result_dependencies[k] = (n, pnf)
self.parameter_dependencies[k] = (n, pnf)
if k in self.module_dependencies:
self.module_dependencies[k] = (n, pnf)
########################################
# don't need to propegate parameter dependencies to children,
# computing the hash as well
h = hashlib.md5()
for (n, th), (ln, pn) in sorted(self.parameter_dependencies.iteritems()):
h.update(n)
h.update(pn.parameter_key)
for (n, th), (ln, pn) in sorted(self.result_dependencies.iteritems()):
h.update(n)
h.update(pn.key)
self.dependency_key = base64.b64encode(h.digest(), "az")[:8]
h.update(self.local_key)
self.key = base64.b64encode(h.digest(), "az")[:8]
# Load the parameter tree
self.dependency_parameter_tree = TreeDict()
for (n, th), (ln, pn) in sorted(self.parameter_dependencies.iteritems()):
if ln is not None:
self.dependency_parameter_tree[ln] = pn.pullParameterPreReferenceCount()
self.dependency_parameter_tree[self.name] = self.parameters[self.name]
self.is_disk_writable = self.p_class._allowsCaching(self.dependency_parameter_tree)
self.is_result_disk_writable = (False if not self.is_disk_writable else
self.p_class._allowsResultCaching(self.dependency_parameter_tree))
def buildReferences(self):
if not self.is_only_parameter_dependency and not self.children_have_reference:
########################################
# Do reference counting with all the children
for k, (n, pn) in self.parameter_dependencies.items():
pn.increaseParameterReference()
for k, (n, pn) in self.result_dependencies.items():
pn.increaseResultReference()
for k, (n, pn) in self.module_dependencies.items():
pn.increaseModuleReference()
self.children_have_reference = True
def dropUnneededReferences(self):
if self.children_have_reference:
########################################
# Do reference counting with all the children
for k, (n, pn) in self.module_dependencies.items():
pn.decreaseModuleReference()
for k, (n, pn) in self.result_dependencies.items():
pn.decreaseResultReference()
for k, (n, pn) in self.parameter_dependencies.items():
pn.decreaseParameterReference()
self.children_have_reference = False
##################################################
# Instantiating things
def _instantiate(self, need_module):
if not hasattr(self, "results_container"):
# Attempt to load the results from cache
self.results_container = self.common.loadContainer(
PNodeModuleCacheContainer(
pn_name = self.name,
name = "__results__",
local_key = self.local_key,
dependency_key = self.dependency_key,
is_disk_writable = self.is_result_disk_writable),
no_local_caching = True)
have_loaded_results = self.results_container.objectIsLoaded()
# we're done if the results are loaded and that's all we need
if have_loaded_results:
self._reportResults(self.results_container.getObject())
if self.module_reference_count == 0:
assert not need_module
self.dropUnneededReferences()
return
if not need_module:
return
else:
have_loaded_results = self.results_container.objectIsLoaded()
# Okay, not done yet
########################################
# This pulls all the dependency parts
# Create the dependency parts
self.child_pull_dict = {}
global _Null
modules = TreeDict()
results = TreeDict()
params = TreeDict()
for k, (load_name, pn) in self.module_dependencies.iteritems():
self.child_pull_dict[k] = p,r,m = pn.pullUpToModule()
if load_name is not None:
params[load_name], results[load_name], modules[load_name] = p,r,m
modules.freeze()
for k, (load_name, pn) in self.result_dependencies.iteritems():
if k in self.child_pull_dict:
if load_name is not None:
params[load_name], results[load_name] = self.child_pull_dict[k][:2]
else:
p, r = pn.pullUpToResults()
self.child_pull_dict[k] = (p, r, _Null)
if load_name is not None:
params[load_name], results[load_name] = p, r
results.freeze()
# parameters are easy
for k, (load_name, pn) in self.parameter_dependencies.iteritems():
if k in self.child_pull_dict:
if load_name is not None:
params[load_name] = self.child_pull_dict[k][0]
else:
p = pn.pullParameters()
self.child_pull_dict[k] = (p, _Null, _Null)
if load_name is not None:
params[load_name] = p
params[self.name] = self.parameters[self.name]
params.freeze()
# Now we've pulled all we need!
self.children_have_reference = False
self.increaseModuleAccessCount()
# Now instantiate the module
self.module = self.p_class(self, params, results, modules)
if not have_loaded_results:
r = self.module.run()
if type(r) is TreeDict:
r.freeze()
self.results_container.setObject(r)
self._reportResults(r)
else:
r = self.results_container.getObject()
self.module._setResults(r)
self.dependent_modules_pulled = True
self.decreaseModuleAccessCount()
##################################################
# Interfacing stuff
def _checkModuleDeletionAllowances(self):
mac_zero = (self.module_access_reference_count == 0)
mrc_zero = (self.module_reference_count == 0)
rrc_zero = (self.result_reference_count == 0)
if mrc_zero and mac_zero and self.dependent_modules_pulled:
# Get rid of everything but the results
self.module._destroy()
del self.module
# propegate all the dependencies
for k, (load_name, pn) in self.module_dependencies.iteritems():
pn.decreaseModuleAccessCount()
if hasattr(self, "additional_module_nodes_accessed"):
for pn in self.additional_module_nodes_accessed:
pn.decreaseModuleAccessCount()
del self.additional_module_nodes_accessed
# This is gauranteed to exist if all the code is right
del self.child_pull_dict
self.dependent_modules_pulled = False
def _checkDeletability(self):
if not self.is_only_parameter_dependency:
assert self.module_reference_count <= self.parameter_reference_count
assert self.result_reference_count <= self.parameter_reference_count
if self.parameter_reference_count == 0 and (
self.is_only_parameter_dependency or self.module_access_reference_count == 0):
# Clean out the heavy parts in light of everything
if not self.is_only_parameter_dependency:
self.common.deregisterPNode(self)
self.module_dependencies.clear()
self.result_dependencies.clear()
self.parameter_dependencies.clear()
def increaseParameterReference(self):
if not self.is_only_parameter_dependency:
assert self.module_reference_count <= self.parameter_reference_count
assert self.result_reference_count <= self.parameter_reference_count
assert type(self.parameters) is TreeDict
self.parameter_reference_count += 1
def decreaseParameterReference(self):
assert self.parameter_reference_count >= 1
self.parameter_reference_count -= 1
if not self.is_only_parameter_dependency:
assert self.module_reference_count <= self.parameter_reference_count
assert self.result_reference_count <= self.parameter_reference_count
if self.parameter_reference_count == 0:
self._checkDeletability()
def increaseResultReference(self):
self.result_reference_count += 1
def decreaseResultReference(self):
assert self.result_reference_count >= 1
self.result_reference_count -= 1
assert self.module_reference_count <= self.result_reference_count
if self.result_reference_count == 0:
try:
del self.results_container
except AttributeError:
pass
self.dropUnneededReferences()
def increaseModuleAccessCount(self):
self.module_access_reference_count += 1
self.common.increaseCachingReference(self)
def decreaseModuleAccessCount(self):
assert self.module_access_reference_count >= 1
self.module_access_reference_count -= 1
self.common.decreaseCachingReference(self)
if self.module_access_reference_count == 0:
self._checkModuleDeletionAllowances()
self._checkDeletability()
def increaseModuleReference(self):
self.module_reference_count += 1
self.common.increaseCachingReference(self)
def decreaseModuleReference(self):
assert self.module_reference_count >= 1
self.module_reference_count -= 1
self.common.decreaseCachingReference(self)
if self.module_reference_count == 0:
self._checkModuleDeletionAllowances()
def pullParameterPreReferenceCount(self):
return self.parameters[self.name]
def pullParameters(self):
assert self.parameter_reference_count >= 1
p = self.parameters[self.name]
self.decreaseParameterReference()
return p
def pullUpToResults(self):
assert self.result_reference_count >= 1
if not hasattr(self, "results_container"):
self._instantiate(False)
r = self.results_container.getObject()
ret = _PulledResult(self.parameters[self.name], r)
rc = self.results_container
self.decreaseResultReference()
self.decreaseParameterReference()
return ret
def pullUpToModule(self):
# print "Pulling module for module %s." % self.name
assert self.module_reference_count >= 0
if not hasattr(self, "module") or not hasattr(self, "results_container"):
self._instantiate(True)
r = self.results_container.getObject()
self._reportResults(r)
ret = _PulledModule(self.parameters[self.name], r, self.module)
self.increaseModuleAccessCount()
self.decreaseModuleReference()
self.decreaseResultReference()
self.decreaseParameterReference()
return ret
################################################################################
# Loading cache stuff
def getCacheContainer(self, obj_name, key, ignore_module, ignore_local,
ignore_dependencies, is_disk_writable, is_persistent):
container = PNodeModuleCacheContainer(
pn_name = None if ignore_module else self.name,
name = obj_name,
local_key = None if ignore_local else self.local_key,
dependency_key = None if ignore_dependencies else self.dependency_key,
specific_key = key,
is_disk_writable = is_disk_writable and self.is_disk_writable,
is_persistent = is_persistent)
return self.common.loadContainer(container)
def _resolveRequestInfo(self, r):
# first get the key
if type(r) is str:
name = r
ptree = self.parameters
key = self.full_key
elif getattr(r, "__parameter_container__", False):
name = r.name
ptree = r._getParameters(self.parameters)
key = ptree.hash()
else:
raise TypeError("Requested %s must be specified as a string or "
"a parameter container class like 'Delta'.")
return name, ptree, key
def getSpecific(self, r_type, r):
name, ptree, key = self._resolveRequestInfo(r)
lookup_key = (name, key)
if lookup_key in self.child_pull_dict:
params, results, module = self.child_pull_dict[lookup_key]
global _Null
if r_type == "results" and results is not _Null:
return results
elif r_type == "module" and module is not _Null:
return module
elif r_type == "parameters":
return params
else:
assert False
if r_type == "results":
return self.common.getResults(ptree, name)
elif r_type == "module":
pn = PNode(self.common, ptree, name, 'module')
pn.initialize()
pn = self.common.registerPNode(pn)
pn.increaseParameterReference()
pn.increaseResultReference()
pn.increaseModuleReference()
if hasattr(self, "additional_module_nodes_accessed"):
self.additional_module_nodes_accessed.append(pn)
else:
self.additional_module_nodes_accessed = [pn]
return pn.pullUpToModule().module
elif r_type == "parameters":
pn = PNode(self.common, ptree, name, 'parameters')
pn.initialize()
pn = self.common.registerPNode(pn)
pn.increaseParameterReference()
return pn.pullParameters()
else:
assert False
##################################################
# Result Reporting stuff
def _reportResults(self, results):
if not self.results_reported:
try:
self.p_class.reportResults(self.parameters, self.parameters[self.name], results)
except TypeError, te:
rrf = self.p_class.reportResults
def raiseTypeError():
raise TypeError(("reportResults method in '%s' must be @classmethod "
"and take global parameter tree, local parameter tree, "
"and result tree as arguments.") % name)
# See if it was due to incompatable signature
from robust_inspect import getcallargs
try:
getcallargs(rrf, parameters, p, r)
except TypeError:
raiseTypeError()
# Well, that wasn't the issue, so it's something internal; re-raise
raise
self.results_reported = True
|
We will be meeting Thursday Oct. 18th at Hoppergrass Restsurant at 11:00 AM. This is a very important meeting as we will be discussing whether to continue holding plant sales, as well as future projects, so please come and let your voice be heard. All comments and ideas respectfully presented will be appreciated.
Hoppergrass Restaurant is located at 129 S. East Avenue in Ozark, across from Milky Moos.
|
# -*- coding: utf-8 -*-
import json
import logging
from collections import defaultdict
from dateutil.relativedelta import relativedelta
from math import exp
from unidecode import unidecode
from django.conf import settings
from django.core.urlresolvers import reverse
from django.core.validators import RegexValidator
from django.db import models, transaction
from django.template.defaultfilters import slugify
from django.utils import timezone
from django.utils.translation import ugettext as _
from django.utils.encoding import python_2_unicode_compatible
from .elo import EloMatch
from .exceptions import UnknownOutcome, EventNotInProgress
from .managers import (
EventManager,
BetManager,
TeamResultManager,
TransactionManager,
)
from bladepolska.snapshots import SnapshotAddon
from bladepolska.site import current_domain
from django_elasticsearch.models import EsIndexable
from constance import config
from imagekit.models import ProcessedImageField
from imagekit.processors import ResizeToFill
from taggit_autosuggest.managers import TaggableManager
logger = logging.getLogger(__name__)
@python_2_unicode_compatible
class EventCategory(models.Model):
name = models.CharField(u'tytuł wydarzenia', max_length=255, unique=True)
slug = models.SlugField(verbose_name=_('Slug url'), unique=True)
class Meta:
verbose_name = u'kategoria'
verbose_name_plural = u'kategorie'
def __str__(self):
return self.name
@python_2_unicode_compatible
class Event(EsIndexable, models.Model):
"""
Event model represents exactly real question which you can answer YES or NO.
"""
IN_PROGRESS, CANCELLED, FINISHED_YES, FINISHED_NO = range(1, 5)
EVENT_OUTCOME_CHOICES = (
(IN_PROGRESS, u'w trakcie'),
(CANCELLED, u'anulowane'),
(FINISHED_YES, u'rozstrzygnięte na TAK'),
(FINISHED_NO, u'rozstrzygnięte na NIE'),
)
EVENT_FINISHED_TYPES = (CANCELLED, FINISHED_YES, FINISHED_NO)
BOOLEAN_OUTCOME_DICT = {
FINISHED_YES: True,
FINISHED_NO: False
}
BEGIN_PRICE = 50
FACTOR_B = 10
PRIZE_FOR_WINNING = 100
CHART_MARGIN = 3
EVENT_SMALL_CHART_DAYS = 14
EVENT_BIG_CHART_DAYS = 28
SMALL_IMAGE_WIDTH = 340
SMALL_IMAGE_HEIGHT = 250
BIG_IMAGE_WIDTH = 1250
BIG_IMAGE_HEIGHT = 510
snapshots = SnapshotAddon(fields=[
'current_buy_for_price',
'current_buy_against_price',
'current_sell_for_price',
'current_sell_against_price',
'Q_for',
'Q_against',
'B'
])
title = models.CharField(u'tytuł wydarzenia', max_length=255)
short_title = models.CharField(
verbose_name=u'tytuł promocyjny wydarzenia', max_length=255, default='', blank=True
)
description = models.TextField(u'pełny opis wydarzenia', default='')
categories = models.ManyToManyField('events.EventCategory', verbose_name=u'kategorie', blank=True)
is_featured = models.BooleanField(u'wyróżniony', default=False)
is_published = models.BooleanField(u'opublikowano', default=True)
twitter_tag = models.CharField(
verbose_name=u'tag twittera', max_length=32, null=True, blank=True, default='',
validators=[
RegexValidator(
regex=r'^([^\s]+)$',
message=u'Tag twittera nie może zawierać spacji',
code='invalid_twitter_tag'
),
]
)
title_fb_yes = models.CharField(
u'tytuł na TAK obiektu FB', max_length=255, default='', blank=True, null=True
)
title_fb_no = models.CharField(
u'tytuł na NIE obiektu FB', max_length=255, default='', blank=True, null=True
)
small_image = ProcessedImageField(
help_text=u'mały obrazek {0}x{1}'.format(SMALL_IMAGE_WIDTH, SMALL_IMAGE_HEIGHT),
upload_to='events_small',
processors=[ResizeToFill(SMALL_IMAGE_WIDTH, SMALL_IMAGE_HEIGHT)],
null=True,
blank=False,
)
big_image = ProcessedImageField(
help_text=u'duży obrazek {0}x{1}'.format(BIG_IMAGE_WIDTH, BIG_IMAGE_HEIGHT),
upload_to='events_big',
processors=[ResizeToFill(BIG_IMAGE_WIDTH, BIG_IMAGE_HEIGHT)],
null=True,
blank=False,
)
# głosowanie do rozstrzygania wydarzeń
vote_yes_count = models.PositiveIntegerField(u'głosów na tak', default=0)
vote_no_count = models.PositiveIntegerField(u'głosów na nie', default=0)
vote_cancel_count = models.PositiveIntegerField(u'głosów na anuluj', default=0)
outcome = models.PositiveIntegerField(u'rozstrzygnięcie', choices=EVENT_OUTCOME_CHOICES, default=1)
outcome_reason = models.TextField(u'uzasadnienie wyniku', default='', blank=True)
created_date = models.DateTimeField(auto_now_add=True, verbose_name=u'data utworzenia')
created_by = models.ForeignKey(
settings.AUTH_USER_MODEL, verbose_name=u'utworzone przez', null=True, related_name='created_by'
)
estimated_end_date = models.DateTimeField(u'przewidywana data rozstrzygnięcia', null=True, blank=False)
end_date = models.DateTimeField(u'data rozstrzygnięcia', null=True, blank=True)
current_buy_for_price = models.IntegerField(
u'cena nabycia akcji zdarzenia', default=BEGIN_PRICE
)
current_buy_against_price = models.IntegerField(
u'cena nabycia akcji zdarzenia przeciwnego', default=BEGIN_PRICE
)
current_sell_for_price = models.IntegerField(
u'cena sprzedaży akcji zdarzenia', default=BEGIN_PRICE
)
current_sell_against_price = models.IntegerField(
u'cena sprzedaży akcji zdarzenia przeciwnego', default=BEGIN_PRICE
)
last_transaction_date = models.DateTimeField(u'data ostatniej transakcji', null=True)
Q_for = models.IntegerField(u'zakładów na TAK', default=0)
Q_against = models.IntegerField(u'zakładów na NIE', default=0)
turnover = models.IntegerField(u'obrót', default=0, db_index=True)
absolute_price_change = models.IntegerField(
u'zmiana ceny (wartość absolutna)', db_index=True, default=0
)
price_change = models.IntegerField(u'zmiana ceny', default=0)
# constant for calculating event change
# probably: how you need to increment quantity, to change price
B = models.FloatField(u'stała B', default=FACTOR_B)
objects = EventManager()
tags = TaggableManager(blank=True)
class Meta:
verbose_name = 'wydarzenie'
verbose_name_plural = 'wydarzenia'
def __str__(self):
return self.title
def save(self, *args, **kwargs):
"""
Recalculate prices for event
:param kwargs:
"""
if not self.pk:
self.recalculate_prices()
super(Event, self).save(*args, **kwargs)
def get_absolute_url(self):
return 'http://%(domain)s%(url)s' % {
'domain': current_domain(),
'url': reverse('events:event_detail', kwargs={'pk': self.pk})
}
def get_relative_url(self):
return '/event/%(id)d-%(title)s' % {'id': self.id, 'title': slugify(unidecode(self.title))}
def get_absolute_facebook_object_url(self):
return 'http://%(domain)s%(url)s' % {
'domain': current_domain(),
'url': reverse('events:event_facebook_object_detail', kwargs={'event_id': self.id})
}
def get_small_embed_url(self):
return 'http://%(domain)s%(url)s' % {
'domain': current_domain(),
'url': reverse('events:event_embed_detail', kwargs={'pk': self.id})
}
@staticmethod
def autocomplete_search_fields():
return ("id__iexact", "title__icontains", "short_title__icontains")
@property
def is_in_progress(self):
return self.outcome == Event.IN_PROGRESS
@property
def publish_channel(self):
return 'event_%d' % self.id
@property
def event_dict(self):
return {
'event_id': self.id,
'buy_for_price': self.current_buy_for_price,
'buy_against_price': self.current_buy_against_price,
'sell_for_price': self.current_sell_for_price,
'sell_against_price': self.current_sell_against_price,
}
@property
def finish_date(self):
"""
If event is not finished then estimated_end_date, else end_date
:return: finish date
:rtype: datetime
"""
if self.is_in_progress:
return self.estimated_end_date
else:
return self.end_date
@property
def to_be_resolved(self):
"""
Return True if event is waiting to be resolved.
"""
return timezone.now() >= self.finish_date
def price_for_outcome(self, outcome, direction=True):
if (direction, outcome) not in Bet.BET_OUTCOMES_TO_PRICE_ATTR:
raise UnknownOutcome()
attr = Bet.BET_OUTCOMES_TO_PRICE_ATTR[(direction, outcome)]
return getattr(self, attr)
def get_event_small_chart(self):
"""
Get last transactions price for every day from small event range
:return: chart points of EVENT_SMALL_CHART_DAYS days
:rtype: {int, [], []}
"""
return self.__get_chart_points(self.EVENT_SMALL_CHART_DAYS)
def get_event_big_chart(self):
"""
Get last transactions price for every day from big event range
:return: chart points of EVENT_BIG_CHART_DAYS days
:rtype: {int, [], []}
"""
return self.__get_chart_points(self.EVENT_BIG_CHART_DAYS)
def get_JSON_small_chart(self):
return json.dumps(self.get_event_small_chart())
def get_JSON_big_chart(self):
return json.dumps(self.get_event_big_chart())
@transaction.atomic
def __get_chart_points(self, days):
"""
Get last transactions price for every day;
:param days: number of days in past on chart
:type days: int
:return: chart points
:rtype: {int, [], []}
"""
last_date = self.end_date if self.end_date else timezone.now()
first_date = max(last_date - relativedelta(days=days), self.created_date)
labels = []
points = []
snapshots = self.snapshots.filter(
snapshot_of_id=self.id,
created_at__gte=first_date,
created_at__lte=last_date,
created_at__hour=0
).order_by('created_at')
additional_points = min(days - len(snapshots), Event.CHART_MARGIN)
step_date = first_date - relativedelta(days=additional_points)
for point in range(additional_points):
labels.append(u'{0} {1}'.format(step_date.day, _(step_date.strftime('%B'))))
step_date += relativedelta(days=1)
points.append(Event.BEGIN_PRICE)
for snapshot in snapshots:
labels.append(u'{0} {1}'.format(snapshot.created_at.day, _(snapshot.created_at.strftime('%B'))))
last_price = snapshot.current_buy_for_price
points.append(last_price)
return {
'id': self.id,
'labels': labels,
'points': points
}
def get_user_bet_object(self, user):
"""
find not empty just only one bet object or None
:param user: logged user
:type user: User
:return: normally it should returns one bet where bet.has > 0
:rtype: Bet or None
"""
bets = self.bets.filter(user=user, has__gt=0).order_by('-id')
if bets.exists():
return bets[0]
def get_user_bet(self, user):
"""
get bet summary for user; user maybe anonymous.
:param user: logged user or anonymous
:type user: User
:return: data for one bet display
:rtype: {}
"""
# Using 'true' and 'false' because some keys are designed for json
bet_line = {
'is_user': False,
'has': 0,
'avgPrice': 0,
'outcome': None, # note: None is the same as False
'buyNO': 'true', # default option is buy bet
'buyYES': 'true', # default option is buy bet
'priceYES': self.current_buy_for_price,
'priceNO': self.current_buy_against_price,
}
if user.pk:
bet_line['is_user'] = True
bet = self.get_user_bet_object(user)
if bet:
bet_line['id'] = bet.pk # it is only for debugging purpose
bet_line['has'] = bet.has
bet_line['avgPrice'] = bet.bought_avg_price
bet_line['outcome'] = bet.outcome # True - YES False - NO
if bet.outcome:
# you have bet for YES, you can sell them
bet_line['buyNO'] = 'false' # that means you sell bet YES
bet_line['priceYES'] = self.current_buy_for_price
bet_line['priceNO'] = self.current_sell_for_price
bet_line['outcome_str'] = 'true'
else:
# you have bet for NO, you can sell them
bet_line['buyYES'] = 'false' # that means you sell bet NO
bet_line['priceYES'] = self.current_sell_against_price
bet_line['priceNO'] = self.current_buy_against_price
bet_line['outcome_str'] = 'false'
return bet_line
def get_bet_social(self):
"""
Get users who bought this event
:return: Dict with 4 keys: 2 QuerySet with YES users and NO users, 2
integers with counts
:rtype: dict{}
"""
response = {}
bet_social_yes = Bet.objects.filter(
event=self,
outcome=True, # bought YES
has__gt=0,
)
response['yes_count'] = bet_social_yes.count()
response['yes_bets'] = bet_social_yes[:6]
bet_social_no = Bet.objects.filter(
event=self,
outcome=False, # bought NO
has__gt=0,
)
response['no_count'] = bet_social_no.count()
response['no_bets'] = bet_social_no[:6]
return response
def increment_quantity(self, outcome, by_amount):
"""
Used when operation buy or sell occurs
:param outcome: event outcome - YES or NO; True for YES
:type outcome: bool
:param by_amount: operations count, usually 1
:type by_amount: int
:return:
"""
if outcome not in Bet.BET_OUTCOMES_TO_QUANTITY_ATTR:
raise UnknownOutcome()
attr = Bet.BET_OUTCOMES_TO_QUANTITY_ATTR[outcome]
setattr(self, attr, getattr(self, attr) + by_amount)
self.recalculate_prices()
def increment_turnover(self, by_amount):
"""
Turnover increases +1 when operation buy or sell occurs
:param by_amount: operations count, usually 1
:type by_amount: int
"""
self.turnover += by_amount
def recalculate_prices(self):
"""
Calculate 4 prices for event
"""
factor = 100.
B = self.B
Q_for = self.Q_for
Q_against = self.Q_against
Q_for_sell = max(0, Q_for - 1)
Q_against_sell = max(0, Q_against - 1)
e_for_buy = exp(Q_for / B)
e_against_buy = exp(Q_against / B)
e_for_sell = exp(Q_for_sell / B)
e_against_sell = exp(Q_against_sell / B)
buy_for_price = e_for_buy / float(e_for_buy + e_against_buy)
buy_against_price = e_against_buy / float(e_for_buy + e_against_buy)
sell_for_price = e_for_sell / float(e_for_sell + e_against_buy)
sell_against_price = e_against_sell / float(e_for_buy + e_against_sell)
self.current_buy_for_price = round(factor * buy_for_price, 0)
self.current_buy_against_price = round(factor * buy_against_price, 0)
self.current_sell_for_price = round(factor * sell_for_price, 0)
self.current_sell_against_price = round(factor * sell_against_price, 0)
def vote_yes(self):
self.vote_yes_count += 1
if self.vote_yes_count >= config.VOICES_TO_RESOLVE:
self.finish_yes()
self.save()
return self.vote_yes_count
def vote_no(self):
self.vote_no_count += 1
if self.vote_no_count >= config.VOICES_TO_RESOLVE:
self.finish_no()
self.save()
return self.vote_no_count
def vote_cancel(self):
self.vote_cancel_count += 1
if self.vote_cancel_count >= config.VOICES_TO_RESOLVE:
self.cancel()
self.save()
return self.vote_cancel_count
@transaction.atomic
def __finish(self, outcome):
"""
Set Event finish status
:param outcome: outcome status; EVENT_OUTCOME_CHOICES
:type outcome: Choices
"""
if self.outcome != self.IN_PROGRESS:
raise EventNotInProgress("Wydarzenie zostało już rozwiązane.")
self.outcome = outcome
self.end_date = timezone.now()
self.save()
@transaction.atomic
def __finish_teams_outcome(self, teams_with_bets):
team_results = []
for team in teams_with_bets:
bets = teams_with_bets[team]
team_results.append(TeamResult(
team=team,
event=self,
initial_elo=team.get_elo(),
rewarded_total=sum(bet.rewarded_total for bet in bets),
prev_result=team.get_last_result(),
bets_count=len(bets),
))
elo_match = EloMatch()
team_results = sorted(
team_results,
key=lambda x: (x.rewarded_total, x.bets_count),
reverse=True
)
prev_result = None
for result in team_results:
place = team_results.index(result) + 1
# Set draws
if (
prev_result
and (prev_result.rewarded_total, prev_result.bets_count) ==
(result.rewarded_total, result.bets_count)
):
place = next(
player.place for player in elo_match.players
if player.idx == prev_result.team.id
)
elo_match.add_player(
idx=result.team.id,
place=place,
elo=result.initial_elo,
)
prev_result = result
elo_match.calculate_elos()
for team_result in team_results:
team_result.elo = elo_match.get_elo(team_result.team.id)
team_result.save()
return team_results
@transaction.atomic
def __finish_with_outcome(self, outcome):
"""
main finish status
:param outcome: outcome status; EVENT_OUTCOME_CHOICES
:type outcome: Choices
"""
self.__finish(outcome)
teams_with_bets = defaultdict(list)
for bet in Bet.objects.filter(event=self):
if bet.outcome == self.BOOLEAN_OUTCOME_DICT[outcome]:
bet.rewarded_total = self.PRIZE_FOR_WINNING * bet.has
bet.user.total_cash += bet.rewarded_total
Transaction.objects.create(
user=bet.user,
event=self,
type=Transaction.EVENT_WON_PRIZE,
quantity=bet.has,
price=self.PRIZE_FOR_WINNING
)
if bet.user.team:
teams_with_bets[bet.user.team].append(bet)
# update portfolio value
bet.user.portfolio_value -= bet.get_invested()
bet.user.save()
# This cause display event in "latest outcome"
bet.is_new_resolved = True
bet.save()
if len(teams_with_bets) > 1:
team_results = self.__finish_teams_outcome(teams_with_bets)
for team_result in team_results:
(
Bet.objects
.get_team_bets_for_events(team_result.team, [self])
.update(team_result=team_result)
)
@transaction.atomic
def finish_yes(self):
"""
if event is finished on YES then prizes calculate
"""
self.__finish_with_outcome(self.FINISHED_YES)
@transaction.atomic
def finish_no(self):
"""
if event is finished on NO then prizes calculate
"""
self.__finish_with_outcome(self.FINISHED_NO)
@transaction.atomic
def cancel(self):
"""
refund for users on cancel event.
"""
self.__finish(self.CANCELLED)
users = {}
for t in Transaction.objects.filter(event=self).order_by('user'):
if t.user not in users:
users.update({
t.user: 0
})
if t.type in Transaction.BUY_SELL_TYPES:
# for transaction type BUY the price is below 0 that means refund should be
# other side. For BUY (buy is always -) refund should be (+) (EVENT_CANCELLED_REFUND)
# but for BUY and SELL with profit refund should be (-) (EVENT_CANCELLED_DEBIT)
users[t.user] -= t.quantity * t.price
for user, refund in users.iteritems():
if refund == 0:
continue
user.total_cash += refund
user.save()
if refund > 0:
transaction_type = Transaction.EVENT_CANCELLED_REFUND
else:
transaction_type = Transaction.EVENT_CANCELLED_DEBIT
Transaction.objects.create(
user=user,
event=self,
type=transaction_type,
price=refund
)
class TeamResult(models.Model):
"""
Result of team after event is resolved
"""
objects = TeamResultManager()
team = models.ForeignKey(
'accounts.Team', related_name='results', related_query_name='result'
)
prev_result = models.OneToOneField(
'self', on_delete=models.PROTECT, null=True
)
elo = models.IntegerField(u'ranking', null=True, blank=True)
initial_elo = models.IntegerField(u'początkowy ranking', default=1400)
rewarded_total = models.IntegerField(
u'nagroda za wynik', default=0, null=False
)
event = models.ForeignKey(
Event, related_query_name='team_result', related_name='team_results'
)
bets_count = models.PositiveIntegerField(u'liczba zakładów')
created = models.DateTimeField(
auto_now_add=True, verbose_name=u'utworzono'
)
class Meta:
verbose_name = u'rezultat drużyny'
verbose_name_plural = u'rezultaty drużyn'
class SolutionVote(models.Model):
"""
Vote for event resolve
"""
class Meta:
unique_together = ('user', 'event')
YES, NO, CANCEL = range(1, 4)
VOTE_OUTCOME_CHOICES = (
(YES, u'rozwiązanie na TAK'),
(NO, u'rozwiązanie na NIE'),
(CANCEL, u'anulowanie wydarzenia')
)
user = models.ForeignKey(settings.AUTH_USER_MODEL)
event = models.ForeignKey(Event)
outcome = models.IntegerField(u'rozwiązanie wydarzenia', choices=VOTE_OUTCOME_CHOICES, null=True)
@python_2_unicode_compatible
class Bet(models.Model):
"""
Created when user choose YES or NO for event.
"""
class Meta:
verbose_name = u'zakład'
verbose_name_plural = u'zakłady'
YES = True
NO = False
BET_OUTCOME_CHOICES = (
(YES, u'udziały na TAK'),
(NO, u'udziały na NIE'),
)
BUY = True
SELL = False
BET_OUTCOMES_TO_PRICE_ATTR = {
(BUY, YES): 'current_buy_for_price',
(BUY, NO): 'current_buy_against_price',
(SELL, YES): 'current_sell_for_price',
(SELL, NO): 'current_sell_against_price'
}
BET_OUTCOMES_TO_QUANTITY_ATTR = {
True: 'Q_for',
False: 'Q_against'
}
user = models.ForeignKey(
settings.AUTH_USER_MODEL, null=False, related_name='bets', related_query_name='bet'
)
event = models.ForeignKey(Event, null=False, related_name='bets', related_query_name='bet')
outcome = models.BooleanField(u'zakład na TAK', choices=BET_OUTCOME_CHOICES)
# most important param: how many bets user has.
has = models.PositiveIntegerField(u'posiadane zakłady', default=0, null=False)
bought = models.PositiveIntegerField(u'kupione zakłady', default=0, null=False)
sold = models.PositiveIntegerField(u'sprzedane zakłady', default=0, null=False)
bought_avg_price = models.FloatField(u'kupione po średniej cenie', default=0, null=False)
sold_avg_price = models.FloatField(u'sprzedane po średniej cenie', default=0, null=False)
# this field is probably for the biggest rewards
rewarded_total = models.IntegerField(u'nagroda za wynik', default=0, null=False)
# this is used to show event in my wallet.
is_new_resolved = models.BooleanField(u'ostatnio rozstrzygnięte', default=False, null=False)
team_result = models.ForeignKey(
TeamResult, null=True, related_name='bets', related_query_name='bet'
)
objects = BetManager()
@property
def bet_dict(self):
"""
Dictionary with bet values
:return: bet vaules
:rtype: {}
"""
return {
'bet_id': self.id,
'event_id': self.event.id,
'user_id': self.user.id,
'outcome': self.outcome,
'has': self.has,
'bought': self.bought,
'sold': self.sold,
'bought_avg_price': self.bought_avg_price,
'sold_avg_price': self.sold_avg_price,
'rewarded_total': self.rewarded_total,
}
def __str__(self):
return u'zakłady %s na %s' % (self.user, self.event)
def current_event_price(self):
"""
Get current price for event. Price depend on bet.outcome
:return: current price
:rtype: int
"""
if self.outcome:
return self.event.current_buy_for_price
else:
return self.event.current_buy_against_price
def is_won(self):
"""
winning bet when bet has outcome True and event.outcome is 3
(FINISHED_YES) or
when bet has outcome False and event.outcome is 4 (FINISHED_NO)
:return: True if won
:rtype: bool
"""
if self.outcome and self.event.outcome == Event.FINISHED_YES:
return True
elif not self.outcome and self.event.outcome == Event.FINISHED_NO:
return True
return False
def get_wallet_change(self):
"""
Get amount won or lose after event finished. For events in progress
get amount possible to win.
:return: more or less than zero
:rtype: int
"""
# TODO: NAPRAWDE NIE WIEM
if self.is_won() or self.event.outcome == Event.IN_PROGRESS:
return self.get_won() - self.get_invested()
else:
return -self.get_invested()
def get_invested(self):
"""
How many invested in this bet
:return: price above zero
:rtype: float
"""
# TODO: NO NIE WIEM
if self.event.outcome == Event.CANCELLED:
return 0
return round(self.has * self.bought_avg_price, 0)
def get_won(self):
"""
Get amount won or possibility to win.
:return: price
:rtype: int
"""
if self.is_won() or self.event.outcome == Event.IN_PROGRESS:
return self.has * Event.PRIZE_FOR_WINNING
else:
return 0
def is_finished_yes(self):
"""
Result for bet
:return: True if event resolved for YES
:rtype: bool
"""
return self.event.outcome == Event.FINISHED_YES
def is_finished_no(self):
"""
Result for bet
:return: True if event resolved for NO
:rtype: bool
"""
return self.event.outcome == Event.FINISHED_NO
def is_cancelled(self):
"""
Result for bet
:return: True if canceled bet
:rtype: bool
"""
return self.event.outcome == Event.CANCELLED
@python_2_unicode_compatible
class Transaction(models.Model):
"""
Operation buy or sell or other for user and event
"""
class Meta:
ordering = ['-date']
verbose_name = 'transakcja'
verbose_name_plural = 'transakcje'
BUY_YES, SELL_YES, BUY_NO, SELL_NO, \
EVENT_CANCELLED_REFUND, EVENT_CANCELLED_DEBIT, \
EVENT_WON_PRIZE, TOPPED_UP, BONUS = range(1, 10)
TRANSACTION_TYPE_CHOICES = (
(BUY_YES, u'zakup udziałów na TAK'),
(SELL_YES, u'sprzedaż udziałów na TAK'),
(BUY_NO, u'zakup udziałów na NIE'),
(SELL_NO, u'sprzedaż udziałów na NIE'),
(EVENT_CANCELLED_REFUND, u'zwrot po anulowaniu wydarzenia'),
(EVENT_CANCELLED_DEBIT, u'obciążenie konta po anulowaniu wydarzenia'),
(EVENT_WON_PRIZE, u'wygrana po rozstrzygnięciu wydarzenia'),
(TOPPED_UP, u'doładowanie konta przez aplikację'),
(BONUS, u'bonus')
)
# Transactions changing event price: BUY_YES, SELL_YES, BUY_NO, SELL_NO
BUY_SELL_TYPES = (BUY_YES, SELL_YES, BUY_NO, SELL_NO)
EVENT_SOLVED_TYPES = (EVENT_CANCELLED_REFUND, EVENT_CANCELLED_DEBIT, EVENT_WON_PRIZE)
BONUS_TYPES = (TOPPED_UP, BONUS)
YES_OUTCOME = (BUY_YES, SELL_YES)
NO_OUTCOME = (BUY_NO, SELL_NO)
BUY_TYPES = (BUY_YES, BUY_NO)
SELL_TYPES = (SELL_YES, SELL_NO)
user = models.ForeignKey(
settings.AUTH_USER_MODEL, null=False, related_name='transactions',
related_query_name='transaction'
)
event = models.ForeignKey(
Event, null=True, related_name='transactions', related_query_name='transaction'
)
type = models.PositiveIntegerField(
"rodzaj transakcji", choices=TRANSACTION_TYPE_CHOICES, default=1
)
date = models.DateTimeField('data', auto_now_add=True)
quantity = models.PositiveIntegerField(u'ilość', default=1)
price = models.IntegerField(u'cena jednostkowa', default=0, null=False)
objects = TransactionManager()
def __str__(self):
return u'{} przez {}'.format(self.get_type_display(), self.user)
@property
def total_cash(self):
"""
Get total price for all quantity in transaction: total won, total bought, total sold
:return: total amount
:rtype: int
"""
return self.quantity * self.price
@property
def total_wallet(self):
"""
Get total price for all quantity in transaction: total won, total bought, total sold
:return: total amount
:rtype: int
"""
return -1 * self.quantity * self.price
|
This really interesting article by Christina Passariello in the Wall Street Journal Fashion section – “New cyclist styles pedal their way into Paris” – mentions how the new wave bicycle revolution is continuing full steam ahead, intertwining fashion and cycling as one statement.
The cool cycling culture that started in Copenhagen is now gathering momentum in Paris. It seems that arriving fashionably late on two stylish wheels is the new vogue, with many fashion editors and models predicted to do just that at Paris Fashion Week.
Paris has also introduced a bike share scheme (Vélib’) to encourage more people to cycle. Copenhagen Cycle Chic has an article that talks about how the city is being transformed and how the Parisians are embracing urban cycle style in a convenient social context just like Copenhageners.
|
from gsignals import weak_connect, connect_all as gsignals_connect_all
from gsignals.signals import attach_signal_connect_info
from SCRIBES.TriggerManager import TriggerManager as CoreTriggerManager
def connect_all(obj, *managers, **external_gobjects):
for m in managers:
if isinstance(m, TriggerManager):
m.connect_triggers(obj)
else:
m.connect_signals(obj)
gsignals_connect_all(obj, **external_gobjects)
class Trigger(object):
"""
Unbounded trigger (special signal emited by keyboard shortcut)
Can be used as decorator to mark methods for feature connecting.
"""
def __init__(self, name, accelerator="", description="", category="",
error=True, removable=True):
self.name = name
self.accelerator = accelerator
self.description = description
self.category = category
self.error = error
self.removable = removable
def __call__(self, func=None, after=False, idle=False):
return attach_signal_connect_info('triggers_to_connect', self, func, after, idle)
def create(self, manager):
return manager.create_trigger(self.name, self.accelerator, self.description,
self.category, self.error, self.removable)
class TriggerManager(object):
'''
Auto disconnected trigger manager
Wraps SCRIBES.TriggerManager and calls remove_triggers on object deletion
'''
def __init__(self, editor):
self.manager = CoreTriggerManager(editor)
self.triggers = {}
def __del__(self):
self.triggers.clear()
self.manager.remove_triggers()
def connect_triggers(self, obj):
'''
Connects object methods marked by trigger decorator
'''
for attr, value in obj.__class__.__dict__.iteritems():
for trigger, connect_params in getattr(value, 'triggers_to_connect', ()):
self.connect(trigger, obj, attr, **connect_params)
def connect(self, trigger, obj, attr, after, idle):
if trigger.name not in self.triggers:
self.triggers[trigger.name] = trigger.create(self.manager)
weak_connect(self.triggers[trigger.name], 'activate', obj, attr, after=after, idle=idle)
|
Do you like stylish things? Allow your darling Rottweiler take joy in wearing such accessories on! Choose this best quality leather dog collar with adornments from FDT Artisan. This gear is crafted for those who just adore stylish collars. In this stunning collar, your Rottweiler will have more elegant, but lovely look. Your adorable Rottweiler will get the best treatment when wearing this gear. Genuine leather is soft and absolutely safe.
Brass plated traditional buckle will stand out even the hardest Rottweiler force. Incredible decorative elements are brass plated and have superior shining. Point up your Rottweiler's style, buy this FDT Artisan fancy leather collar!
Genuine leather was chosen to offer extreme comfort and safety while wearing. The material is carefully oiled that keeps it from cracking. Among the best features of full grain genuine leather are good adjustability, tear-resistance and hardness. The layer of leather is quite thick that doesn't allow the tool damage. It helps you to walk even big and strong dog safely. Rounded and smooth edges of the gear are non-cutting and don't rub the four-legged friend's skin. Besides, genuine leather doesn't consists of any toxic elements being totally safe.
What about the adornments, they are fashionable! Catchy studs are placed along the leather strap. These decorative elements are brass plated and, thus shine with amazing glittering. The embellished design of this tool is inimitable and individual. Due to these adornments, this genuine leather dog collar has a spark. That means your four-legged friend will have his individual and exquisite style.
As for the hardware of this decorated natural leather supply, it is brass plated. It matches perfectly with the embellishments. This covering provides extra safety for the details as it prevents rust. The set of hardware includes a traditional elegant buckle and durable D-ring for leash connection. These fittings are easy to use and reliable in service they provide. Add more zest to your Rottweiler's outlook, buy this charming leather supply!
|
# -*- coding: utf-8 -*-
'''
IPC transport classes
'''
# Import Python libs
from __future__ import absolute_import
import logging
import socket
import msgpack
import weakref
import time
# Import Tornado libs
import tornado
import tornado.gen
import tornado.netutil
import tornado.concurrent
from tornado.ioloop import IOLoop
from tornado.iostream import IOStream
# Import Salt libs
import salt.transport.client
import salt.transport.frame
log = logging.getLogger(__name__)
# 'tornado.concurrent.Future' doesn't support
# remove_done_callback() which we would have called
# in the timeout case. Due to this, we have this
# callback function outside of FutureWithTimeout.
def future_with_timeout_callback(future):
if future._future_with_timeout is not None:
future._future_with_timeout._done_callback(future)
class FutureWithTimeout(tornado.concurrent.Future):
def __init__(self, io_loop, future, timeout):
super(FutureWithTimeout, self).__init__()
self.io_loop = io_loop
self._future = future
if timeout is not None:
if timeout < 0.1:
timeout = 0.1
self._timeout_handle = self.io_loop.add_timeout(
self.io_loop.time() + timeout, self._timeout_callback)
else:
self._timeout_handle = None
if hasattr(self._future, '_future_with_timeout'):
# Reusing a future that has previously been used.
# Due to this, no need to call add_done_callback()
# because we did that before.
self._future._future_with_timeout = self
if self._future.done():
future_with_timeout_callback(self._future)
else:
self._future._future_with_timeout = self
self._future.add_done_callback(future_with_timeout_callback)
def _timeout_callback(self):
self._timeout_handle = None
# 'tornado.concurrent.Future' doesn't support
# remove_done_callback(). So we set an attribute
# inside the future itself to track what happens
# when it completes.
self._future._future_with_timeout = None
self.set_exception(tornado.ioloop.TimeoutError())
def _done_callback(self, future):
try:
if self._timeout_handle is not None:
self.io_loop.remove_timeout(self._timeout_handle)
self._timeout_handle = None
self.set_result(future.result())
except Exception as exc:
self.set_exception(exc)
class IPCServer(object):
'''
A Tornado IPC server very similar to Tornado's TCPServer class
but using either UNIX domain sockets or TCP sockets
'''
def __init__(self, socket_path, io_loop=None, payload_handler=None):
'''
Create a new Tornado IPC server
:param str/int socket_path: Path on the filesystem for the
socket to bind to. This socket does
not need to exist prior to calling
this method, but parent directories
should.
It may also be of type 'int', in
which case it is used as the port
for a tcp localhost connection.
:param IOLoop io_loop: A Tornado ioloop to handle scheduling
:param func payload_handler: A function to customize handling of
incoming data.
'''
self.socket_path = socket_path
self._started = False
self.payload_handler = payload_handler
# Placeholders for attributes to be populated by method calls
self.sock = None
self.io_loop = io_loop or IOLoop.current()
self._closing = False
def start(self):
'''
Perform the work necessary to start up a Tornado IPC server
Blocks until socket is established
'''
# Start up the ioloop
log.trace('IPCServer: binding to socket: {0}'.format(self.socket_path))
if isinstance(self.socket_path, int):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.setblocking(0)
self.sock.bind(('127.0.0.1', self.socket_path))
# Based on default used in tornado.netutil.bind_sockets()
self.sock.listen(128)
else:
self.sock = tornado.netutil.bind_unix_socket(self.socket_path)
tornado.netutil.add_accept_handler(
self.sock,
self.handle_connection,
io_loop=self.io_loop,
)
self._started = True
@tornado.gen.coroutine
def handle_stream(self, stream):
'''
Override this to handle the streams as they arrive
:param IOStream stream: An IOStream for processing
See http://tornado.readthedocs.org/en/latest/iostream.html#tornado.iostream.IOStream
for additional details.
'''
@tornado.gen.coroutine
def _null(msg):
raise tornado.gen.Return(None)
def write_callback(stream, header):
if header.get('mid'):
@tornado.gen.coroutine
def return_message(msg):
pack = salt.transport.frame.frame_msg(
msg,
header={'mid': header['mid']},
raw_body=True,
)
yield stream.write(pack)
return return_message
else:
return _null
unpacker = msgpack.Unpacker()
while not stream.closed():
try:
wire_bytes = yield stream.read_bytes(4096, partial=True)
unpacker.feed(wire_bytes)
for framed_msg in unpacker:
body = framed_msg['body']
self.io_loop.spawn_callback(self.payload_handler, body, write_callback(stream, framed_msg['head']))
except tornado.iostream.StreamClosedError:
log.trace('Client disconnected from IPC {0}'.format(self.socket_path))
break
except Exception as exc:
log.error('Exception occurred while handling stream: {0}'.format(exc))
def handle_connection(self, connection, address):
log.trace('IPCServer: Handling connection to address: {0}'.format(address))
try:
stream = IOStream(
connection,
io_loop=self.io_loop,
)
self.io_loop.spawn_callback(self.handle_stream, stream)
except Exception as exc:
log.error('IPC streaming error: {0}'.format(exc))
def close(self):
'''
Routines to handle any cleanup before the instance shuts down.
Sockets and filehandles should be closed explicitly, to prevent
leaks.
'''
if self._closing:
return
self._closing = True
if hasattr(self.sock, 'close'):
self.sock.close()
def __del__(self):
self.close()
class IPCClient(object):
'''
A Tornado IPC client very similar to Tornado's TCPClient class
but using either UNIX domain sockets or TCP sockets
This was written because Tornado does not have its own IPC
server/client implementation.
:param IOLoop io_loop: A Tornado ioloop to handle scheduling
:param str/int socket_path: A path on the filesystem where a socket
belonging to a running IPCServer can be
found.
It may also be of type 'int', in which
case it is used as the port for a tcp
localhost connection.
'''
# Create singleton map between two sockets
instance_map = weakref.WeakKeyDictionary()
def __new__(cls, socket_path, io_loop=None):
io_loop = io_loop or tornado.ioloop.IOLoop.current()
if io_loop not in IPCClient.instance_map:
IPCClient.instance_map[io_loop] = weakref.WeakValueDictionary()
loop_instance_map = IPCClient.instance_map[io_loop]
# FIXME
key = str(socket_path)
if key not in loop_instance_map:
log.debug('Initializing new IPCClient for path: {0}'.format(key))
new_client = object.__new__(cls)
# FIXME
new_client.__singleton_init__(io_loop=io_loop, socket_path=socket_path)
loop_instance_map[key] = new_client
else:
log.debug('Re-using IPCClient for {0}'.format(key))
return loop_instance_map[key]
def __singleton_init__(self, socket_path, io_loop=None):
'''
Create a new IPC client
IPC clients cannot bind to ports, but must connect to
existing IPC servers. Clients can then send messages
to the server.
'''
self.io_loop = io_loop or tornado.ioloop.IOLoop.current()
self.socket_path = socket_path
self._closing = False
self.stream = None
self.unpacker = msgpack.Unpacker()
def __init__(self, socket_path, io_loop=None):
# Handled by singleton __new__
pass
def connected(self):
return self.stream is not None and not self.stream.closed()
def connect(self, callback=None, timeout=None):
'''
Connect to the IPC socket
'''
if hasattr(self, '_connecting_future') and not self._connecting_future.done(): # pylint: disable=E0203
future = self._connecting_future # pylint: disable=E0203
else:
future = tornado.concurrent.Future()
self._connecting_future = future
self.io_loop.add_callback(self._connect, timeout=timeout)
if callback is not None:
def handle_future(future):
response = future.result()
self.io_loop.add_callback(callback, response)
future.add_done_callback(handle_future)
return future
@tornado.gen.coroutine
def _connect(self, timeout=None):
'''
Connect to a running IPCServer
'''
if isinstance(self.socket_path, int):
sock_type = socket.AF_INET
sock_addr = ('127.0.0.1', self.socket_path)
else:
sock_type = socket.AF_UNIX
sock_addr = self.socket_path
self.stream = None
if timeout is not None:
timeout_at = time.time() + timeout
while True:
if self._closing:
break
if self.stream is None:
self.stream = IOStream(
socket.socket(sock_type, socket.SOCK_STREAM),
io_loop=self.io_loop,
)
try:
log.trace('IPCClient: Connecting to socket: {0}'.format(self.socket_path))
yield self.stream.connect(sock_addr)
self._connecting_future.set_result(True)
break
except Exception as e:
if self.stream.closed():
self.stream = None
if timeout is None or time.time() > timeout_at:
if self.stream is not None:
self.stream.close()
self.stream = None
self._connecting_future.set_exception(e)
break
yield tornado.gen.sleep(1)
def __del__(self):
self.close()
def close(self):
'''
Routines to handle any cleanup before the instance shuts down.
Sockets and filehandles should be closed explicitly, to prevent
leaks.
'''
if self._closing:
return
self._closing = True
if self.stream is not None and not self.stream.closed():
self.stream.close()
class IPCMessageClient(IPCClient):
'''
Salt IPC message client
Create an IPC client to send messages to an IPC server
An example of a very simple IPCMessageClient connecting to an IPCServer. This
example assumes an already running IPCMessage server.
IMPORTANT: The below example also assumes a running IOLoop process.
# Import Tornado libs
import tornado.ioloop
# Import Salt libs
import salt.config
import salt.transport.ipc
io_loop = tornado.ioloop.IOLoop.current()
ipc_server_socket_path = '/var/run/ipc_server.ipc'
ipc_client = salt.transport.ipc.IPCMessageClient(ipc_server_socket_path, io_loop=io_loop)
# Connect to the server
ipc_client.connect()
# Send some data
ipc_client.send('Hello world')
'''
# FIXME timeout unimplemented
# FIXME tries unimplemented
@tornado.gen.coroutine
def send(self, msg, timeout=None, tries=None):
'''
Send a message to an IPC socket
If the socket is not currently connected, a connection will be established.
:param dict msg: The message to be sent
:param int timeout: Timeout when sending message (Currently unimplemented)
'''
if not self.connected():
yield self.connect()
pack = salt.transport.frame.frame_msg(msg, raw_body=True)
yield self.stream.write(pack)
class IPCMessageServer(IPCServer):
'''
Salt IPC message server
Creates a message server which can create and bind to a socket on a given
path and then respond to messages asynchronously.
An example of a very simple IPCServer which prints received messages to
a console:
# Import Tornado libs
import tornado.ioloop
# Import Salt libs
import salt.transport.ipc
import salt.config
opts = salt.config.master_opts()
io_loop = tornado.ioloop.IOLoop.current()
ipc_server_socket_path = '/var/run/ipc_server.ipc'
ipc_server = salt.transport.ipc.IPCMessageServer(opts, io_loop=io_loop
stream_handler=print_to_console)
# Bind to the socket and prepare to run
ipc_server.start(ipc_server_socket_path)
# Start the server
io_loop.start()
# This callback is run whenever a message is received
def print_to_console(payload):
print(payload)
See IPCMessageClient() for an example of sending messages to an IPCMessageServer instance
'''
class IPCMessagePublisher(object):
'''
A Tornado IPC Publisher similar to Tornado's TCPServer class
but using either UNIX domain sockets or TCP sockets
'''
def __init__(self, opts, socket_path, io_loop=None):
'''
Create a new Tornado IPC server
:param dict opts: Salt options
:param str/int socket_path: Path on the filesystem for the
socket to bind to. This socket does
not need to exist prior to calling
this method, but parent directories
should.
It may also be of type 'int', in
which case it is used as the port
for a tcp localhost connection.
:param IOLoop io_loop: A Tornado ioloop to handle scheduling
'''
self.opts = opts
self.socket_path = socket_path
self._started = False
# Placeholders for attributes to be populated by method calls
self.sock = None
self.io_loop = io_loop or IOLoop.current()
self._closing = False
self.streams = set()
def start(self):
'''
Perform the work necessary to start up a Tornado IPC server
Blocks until socket is established
'''
# Start up the ioloop
log.trace('IPCMessagePublisher: binding to socket: {0}'.format(self.socket_path))
if isinstance(self.socket_path, int):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.setblocking(0)
self.sock.bind(('127.0.0.1', self.socket_path))
# Based on default used in tornado.netutil.bind_sockets()
self.sock.listen(128)
else:
self.sock = tornado.netutil.bind_unix_socket(self.socket_path)
tornado.netutil.add_accept_handler(
self.sock,
self.handle_connection,
io_loop=self.io_loop,
)
self._started = True
@tornado.gen.coroutine
def _write(self, stream, pack):
try:
yield stream.write(pack)
except tornado.iostream.StreamClosedError:
log.trace('Client disconnected from IPC {0}'.format(self.socket_path))
self.streams.discard(stream)
except Exception as exc:
log.error('Exception occurred while handling stream: {0}'.format(exc))
if not stream.closed():
stream.close()
self.streams.discard(stream)
def publish(self, msg):
'''
Send message to all connected sockets
'''
if not len(self.streams):
return
pack = salt.transport.frame.frame_msg(msg, raw_body=True)
for stream in self.streams:
self.io_loop.spawn_callback(self._write, stream, pack)
def handle_connection(self, connection, address):
log.trace('IPCServer: Handling connection to address: {0}'.format(address))
try:
if self.opts['ipc_write_buffer'] > 0:
log.trace('Setting IPC connection write buffer: {0}'.format((self.opts['ipc_write_buffer'])))
stream = IOStream(
connection,
io_loop=self.io_loop,
max_write_buffer_size=self.opts['ipc_write_buffer']
)
else:
stream = IOStream(
connection,
io_loop=self.io_loop
)
self.streams.add(stream)
except Exception as exc:
log.error('IPC streaming error: {0}'.format(exc))
def close(self):
'''
Routines to handle any cleanup before the instance shuts down.
Sockets and filehandles should be closed explicitly, to prevent
leaks.
'''
if self._closing:
return
self._closing = True
for stream in self.streams:
stream.close()
self.streams.clear()
if hasattr(self.sock, 'close'):
self.sock.close()
def __del__(self):
self.close()
class IPCMessageSubscriber(IPCClient):
'''
Salt IPC message subscriber
Create an IPC client to receive messages from IPC publisher
An example of a very simple IPCMessageSubscriber connecting to an IPCMessagePublisher.
This example assumes an already running IPCMessagePublisher.
IMPORTANT: The below example also assumes the IOLoop is NOT running.
# Import Tornado libs
import tornado.ioloop
# Import Salt libs
import salt.config
import salt.transport.ipc
# Create a new IO Loop.
# We know that this new IO Loop is not currently running.
io_loop = tornado.ioloop.IOLoop()
ipc_publisher_socket_path = '/var/run/ipc_publisher.ipc'
ipc_subscriber = salt.transport.ipc.IPCMessageSubscriber(ipc_server_socket_path, io_loop=io_loop)
# Connect to the server
# Use the associated IO Loop that isn't running.
io_loop.run_sync(ipc_subscriber.connect)
# Wait for some data
package = ipc_subscriber.read_sync()
'''
def __singleton_init__(self, socket_path, io_loop=None):
super(IPCMessageSubscriber, self).__singleton_init__(
socket_path, io_loop=io_loop)
self._read_sync_future = None
self._read_stream_future = None
self._sync_ioloop_running = False
self.saved_data = []
@tornado.gen.coroutine
def _read_sync(self, timeout):
exc_to_raise = None
ret = None
try:
while True:
if self._read_stream_future is None:
self._read_stream_future = self.stream.read_bytes(4096, partial=True)
if timeout is None:
wire_bytes = yield self._read_stream_future
else:
future_with_timeout = FutureWithTimeout(
self.io_loop, self._read_stream_future, timeout)
wire_bytes = yield future_with_timeout
self._read_stream_future = None
# Remove the timeout once we get some data or an exception
# occurs. We will assume that the rest of the data is already
# there or is coming soon if an exception doesn't occur.
timeout = None
self.unpacker.feed(wire_bytes)
first = True
for framed_msg in self.unpacker:
if first:
ret = framed_msg['body']
first = False
else:
self.saved_data.append(framed_msg['body'])
if not first:
# We read at least one piece of data
break
except tornado.ioloop.TimeoutError:
# In the timeout case, just return None.
# Keep 'self._read_stream_future' alive.
ret = None
except tornado.iostream.StreamClosedError as exc:
log.trace('Subscriber disconnected from IPC {0}'.format(self.socket_path))
self._read_stream_future = None
exc_to_raise = exc
except Exception as exc:
log.error('Exception occurred in Subscriber while handling stream: {0}'.format(exc))
self._read_stream_future = None
exc_to_raise = exc
if self._sync_ioloop_running:
# Stop the IO Loop so that self.io_loop.start() will return in
# read_sync().
self.io_loop.spawn_callback(self.io_loop.stop)
if exc_to_raise is not None:
raise exc_to_raise # pylint: disable=E0702
raise tornado.gen.Return(ret)
def read_sync(self, timeout=None):
'''
Read a message from an IPC socket
The socket must already be connected.
The associated IO Loop must NOT be running.
:param int timeout: Timeout when receiving message
:return: message data if successful. None if timed out. Will raise an
exception for all other error conditions.
'''
if self.saved_data:
return self.saved_data.pop(0)
self._sync_ioloop_running = True
self._read_sync_future = self._read_sync(timeout)
self.io_loop.start()
self._sync_ioloop_running = False
ret_future = self._read_sync_future
self._read_sync_future = None
return ret_future.result()
@tornado.gen.coroutine
def _read_async(self, callback):
while not self.connected():
try:
yield self.connect()
except tornado.iostream.StreamClosedError:
log.trace('Subscriber closed stream on IPC {0} before connect'.format(self.socket_path))
except Exception as exc:
log.error('Exception occurred while Subscriber connecting: {0}'.format(exc))
while not self.stream.closed():
try:
self._read_stream_future = self.stream.read_bytes(4096, partial=True)
wire_bytes = yield self._read_stream_future
self._read_stream_future = None
self.unpacker.feed(wire_bytes)
for framed_msg in self.unpacker:
body = framed_msg['body']
self.io_loop.spawn_callback(callback, body)
except tornado.iostream.StreamClosedError:
log.trace('Subscriber disconnected from IPC {0}'.format(self.socket_path))
break
except Exception as exc:
log.error('Exception occurred while Subscriber handling stream: {0}'.format(exc))
def read_async(self, callback):
'''
Asynchronously read messages and invoke a callback when they are ready.
:param callback: A callback with the received data
'''
self.io_loop.spawn_callback(self._read_async, callback)
def close(self):
'''
Routines to handle any cleanup before the instance shuts down.
Sockets and filehandles should be closed explicitly, to prevent
leaks.
'''
if not self._closing:
IPCClient.close(self)
# This will prevent this message from showing up:
# '[ERROR ] Future exception was never retrieved:
# StreamClosedError'
if self._read_sync_future is not None:
self._read_sync_future.exc_info()
if self._read_stream_future is not None:
self._read_stream_future.exc_info()
def __del__(self):
self.close()
|
USANA, word!: Can you have it all?
Women are a driving force of the economy, making up 70% of the workforce globally. Did you know that a large percentage of women are now choosing home based businesses because of the flexibility and freedom it offers?
In Australia 3 out of 4 (74%) independent direct selling business operators in Australia are female while in the USA female share represents 87.1% of the total direct selling sales force.
Why you might wonder? Well, many women have to leave their ‘regular’ job after having children. Inflexible work hours and little parental leave means mothers are struggling to balance work and spend time with their children.
Job insecurity and economic concerns means people are no longer able to survive on one job for the rest of their lives. What’s the answer? Whether it’s a full-time job for mum or a part-time second income, a home based business seems to work.
Home based business, like USANA Health Sciences, is the preferred choice because of the lifestyle rewards, flexibility, and freedom it provides. Benefits include the time saved on commuting and work in the comfort and convenience of your home. The earning potential is directly proportional to your performance, which means no more waiting for your boss to give you a raise or promotion. Women (and men) who still want to retain their earning potential have huge opportunities in network marketing to earn more than what a regular job would have provided.
So if you are looking to start a home based business – do your research thoroughly, put in the number of hours you can and soon you will be well on your way to achieving financial freedom – but on your own terms.
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 7 19:36:08 2014
@author: Stuart Mumford
This file is designed to be imported and ran only via setup.py, hence it's
dependency on astropy_helpers which will be available in that context.
"""
from __future__ import absolute_import, division, print_function
import os
from astropy_helpers.commands.test import AstropyTest
from astropy_helpers.compat import _fix_user_options
class SunPyTest(AstropyTest):
description = 'Run the tests for this package'
user_options = [
# Package to test
('package=', 'P',
"The name of a specific package to test, e.g. 'io' or 'utils'. "
"If nothing is specified, all default tests are run."),
# Print all the things
('verbose-results', 'V',
'Turn on verbose output from pytest.'),
# plugins to enable
('plugins=', 'p',
'Plugins to enable when running pytest.'),
# Run online tests?
('online', 'R',
'Also run tests that do require a internet connection.'),
# Run only online tests?
('online-only', None,
'Only run test that do require a internet connection.'),
# Run tests that check figure generation
('figure', None,
'Run tests that compare figures against stored hashes.'),
# Calculate test coverage
('coverage', 'c',
'Create a coverage report. Requires the coverage package.'),
('cov-report=', None,
'Specify the type of coverage report to generate. (Default terminal)'),
# Run tests in parallel
('parallel=', 'j',
'Run the tests in parallel on the specified number of '
'CPUs. If negative, all the cores on the machine will be '
'used. Requires the pytest-xdist plugin.'),
# Pass additional cli args to pytest
('args=', 'a',
'Additional arguments to be passed to pytest.')
]
user_options = _fix_user_options(user_options)
package_name = ''
def initialize_options(self):
self.package = ''
#self.test_path = None
self.verbose_results = False
self.plugins = None
self.args = None
self.online = False
self.online_only = False
self.figure = False
self.coverage = False
self.cov_report = 'term' if self.coverage else None
self.docs_path = os.path.abspath('doc')
self.parallel = 0
self.temp_root = None
def _validate_required_deps(self):
"""
This method checks that any required modules are installed before
running the tests.
"""
try:
import sunpy
except ImportError:
raise ImportError(
"The 'test' command requires the sunpy package to be "
"installed and importable.")
def generate_testing_command(self):
"""
Build a Python script to run the tests.
"""
cmd_pre = '' # Commands to run before the test function
cmd_post = '' # Commands to run after the test function
if self.coverage:
pre, post = self._generate_coverage_commands()
cmd_pre += pre
cmd_post += post
online = self.online
offline = not self.online_only
cmd = ('{cmd_pre}{0}; import {1.package_name}, sys; result = ('
'{1.package_name}.self_test('
'modulename={1.package!r}, '
'args={1.args!r}, '
'verbose={1.verbose_results!r}, '
'parallel={1.parallel!r}, '
'online={online!r}, '
'offline={offline!r}, '
'figure={figure!r}, '
'coverage={1.coverage!r}, '
'cov_report={1.cov_report!r})); '
'{cmd_post}'
'sys.exit(result)')
x = cmd.format('pass',
self,
online=online,
offline=offline,
figure=self.figure,
cmd_pre=cmd_pre,
cmd_post=cmd_post)
return x
|
Kuntanase (Ash), Dec 12, GNA - Mr Simon Osei Mensah, Member of Parliament-elect (MP-elect) for Bosomtwe, has commended Ghanaians for their massive turn out at the polls and electing him to represent the constituency.
He said the turn out was unprecedented in the electoral history of Ghana and for that matter Ghanaians needed to be highly commended. Mr Mensah was speaking to the Ghana News Agency at Kuntanase on Saturday on his impression about the just-ended elections. He said although there were some few incidents, they were minimal and could not change the high percentage of voters recorded during the elections.
The MP-elect also lauded the performance of the media which gave full and accurate coverage of the election resulting in peace and success of the event.
Mr Mensah, however, cautioned against victors using their victory in the elections to fuel chaos and trouble, saying, "You should celebrate your victory devoid of any troubles".
He said now that the elections were over what should engage the attention of Ghanaians is devising appropriate measures for uniting the country.
|
import goodtiming.core.i18n
from goodtiming.core.parser import CompositeParser
from goodtiming.core.processor import CompositeProcessor
from goodtiming.core.renderer import CompositeRenderer
import goodtiming.core.database
import goodtiming.modules.addtodo
import goodtiming.modules.reporttiming
import goodtiming.modules.done
import goodtiming.modules.show
import goodtiming.modules.huh
class Bot:
def __init__(self, language):
self.database = goodtiming.core.database.Database()
modules = [
goodtiming.modules.addtodo.AddTodoModule(),
goodtiming.modules.reporttiming.ReportTimingModule(),
goodtiming.modules.done.DoneModule(),
goodtiming.modules.show.ShowModule(),
goodtiming.modules.huh.HuhModule(),
]
sub_parsers = []
sub_processors = []
sub_renderers = []
for module in modules:
sub_parsers.extend(module.parsers())
sub_processors.extend(module.processors())
sub_renderers.extend(module.renderers())
self.parser = CompositeParser(sub_parsers)
self.processor = CompositeProcessor(sub_processors)
self.renderer = CompositeRenderer(sub_renderers)
def start(self, doer_id):
try:
self.database.execute('INSERT INTO doer (doer_id) VALUES (%s)', (doer_id,))
except goodtiming.core.database.DatabaseUniqueViolation:
pass
return _('Welcome!\nType \"buy some socks when I am at grocery store\" or type /help to see the usage.')
def help(self):
return _('I can understand the following patterns:\n\n1. <do something> when I am <some timing>\n2. I am <some timing>\n3. The one about <something> is done')
def chat(self, message, doer_id):
request = self.parser.parse(message)
response = self.processor.process(request, doer_id)
return self.renderer.render(response)
|
For many students, the rewards and prestige offered by a career in accounting make it an attractive path to pursue. This degree in Business and Accounting will be a major step towards achieving that goal. It will be a popular choice with students who want to move directly into a career in accountancy as well as those who recognise the value of an accounting degree in a wider business management context. The Bachelor of Business in Accounting programme provides foundation education in the quantitative, computational and analytical skills required in the world of business and accounting. Students learn about the role and operation of accounting in a range of contexts. All major accountancy disciplines are covered including financial and cost accounting, management accounting, auditing and taxation.
The Bachelor of Business in Accounting programme has the following content.
The Bachelor of Business in Accounting is taught mainly through lectures and tutorials, with students taking responsibility for a personal study outside scheduled class contact times. Assessed work may consist of a practical accounting solution, a report, a spreadsheet model, an essay, or a piece of research. The majority of subjects are assessed through a combination of coursework and examination.
Students on this programme will gain an in-depth knowledge of the major theoretical and practical aspects of accounting from both Irish and international perspectives as well as a strong understanding of business, legal and taxation issues and practices. As the course progresses, the emphasis shifts from foundation knowledge to more advanced study of particular areas of the major accounting, business and finance disciplines. In year 3 there is a greater emphasis on independent work and on the development of a critical and analytical approach to the subject matter.
Seven mandatory subjects provide foundation knowledge in core business and accounting disciplines and develop students' communication skills.
Year 2 further develops technical knowledge in the key accounting and finance disciplines while simultaneously advancing students’ expertise in the management of business and information.
In year 3, students examine current theory and practice in specialist financial fields and learn to deal with financial and business situations in an integrative and cross disciplinary manner.
Students holding a relevant higher certificate or other relevant qualifications may qualify for entry to the third year of this degree programme.
Graduates from this programme may seek to use it as a fast track to professional accountancy qualifications by availing of their entitlement to exemptions from ACCA, CIMA, ACA or CPA. Accounting graduates who enter employment immediately usually find well-paid jobs very quickly. Typical positions would be in audit and general accounting firms as well as multinational blue-chip companies and small and medium-sized enterprises across a range of industry sectors.
Students who complete this programme successfully will qualify for direct entry into the final year of our BA (Hons) Accounting & Finance (NQAI Level 8).
The fees for Full-time Bachelor of Business in Accounting are €5,895 per annum for EU students.
|
import os
from django.conf import global_settings
from django.contrib.auth import authenticate
from django.contrib.auth.tests.utils import skipIfCustomUser
from django.contrib.auth.models import User, Permission
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.context_processors import PermWrapper, PermLookupDict
from django.db.models import Q
from django.test import TestCase
from django.test.utils import override_settings
class MockUser(object):
def has_module_perms(self, perm):
if perm == 'mockapp':
return True
return False
def has_perm(self, perm):
if perm == 'mockapp.someperm':
return True
return False
class PermWrapperTests(TestCase):
"""
Test some details of the PermWrapper implementation.
"""
class EQLimiterObject(object):
"""
This object makes sure __eq__ will not be called endlessly.
"""
def __init__(self):
self.eq_calls = 0
def __eq__(self, other):
if self.eq_calls > 0:
return True
self.eq_calls += 1
return False
def test_permwrapper_in(self):
"""
Test that 'something' in PermWrapper works as expected.
"""
perms = PermWrapper(MockUser())
# Works for modules and full permissions.
self.assertTrue('mockapp' in perms)
self.assertFalse('nonexisting' in perms)
self.assertTrue('mockapp.someperm' in perms)
self.assertFalse('mockapp.nonexisting' in perms)
def test_permlookupdict_in(self):
"""
No endless loops if accessed with 'in' - refs #18979.
"""
pldict = PermLookupDict(MockUser(), 'mockapp')
with self.assertRaises(TypeError):
self.EQLimiterObject() in pldict
@skipIfCustomUser
@override_settings(
TEMPLATE_DIRS=(
os.path.join(os.path.dirname(__file__), 'templates'),
),
USE_TZ=False, # required for loading the fixture
PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
)
class AuthContextProcessorTests(TestCase):
"""
Tests for the ``django.contrib.auth.context_processors.auth`` processor
"""
urls = 'django.contrib.auth.tests.urls'
fixtures = ['context-processors-users.xml']
@override_settings(
MIDDLEWARE_CLASSES=global_settings.MIDDLEWARE_CLASSES,
TEMPLATE_CONTEXT_PROCESSORS=global_settings.TEMPLATE_CONTEXT_PROCESSORS,
)
def test_session_not_accessed(self):
"""
Tests that the session is not accessed simply by including
the auth context processor
"""
response = self.client.get('/auth_processor_no_attr_access/')
self.assertContains(response, "Session not accessed")
@override_settings(
MIDDLEWARE_CLASSES=global_settings.MIDDLEWARE_CLASSES,
TEMPLATE_CONTEXT_PROCESSORS=global_settings.TEMPLATE_CONTEXT_PROCESSORS,
)
def test_session_is_accessed(self):
"""
Tests that the session is accessed if the auth context processor
is used and relevant attributes accessed.
"""
response = self.client.get('/auth_processor_attr_access/')
self.assertContains(response, "Session accessed")
def test_perms_attrs(self):
u = User.objects.create_user(username='normal', password='secret')
u.user_permissions.add(
Permission.objects.get(
content_type=ContentType.objects.get_for_model(Permission),
codename='add_permission'))
self.client.login(username='normal', password='secret')
response = self.client.get('/auth_processor_perms/')
self.assertContains(response, "Has auth permissions")
self.assertContains(response, "Has auth.add_permission permissions")
self.assertNotContains(response, "nonexisting")
def test_perm_in_perms_attrs(self):
u = User.objects.create_user(username='normal', password='secret')
u.user_permissions.add(
Permission.objects.get(
content_type=ContentType.objects.get_for_model(Permission),
codename='add_permission'))
self.client.login(username='normal', password='secret')
response = self.client.get('/auth_processor_perm_in_perms/')
self.assertContains(response, "Has auth permissions")
self.assertContains(response, "Has auth.add_permission permissions")
self.assertNotContains(response, "nonexisting")
def test_message_attrs(self):
self.client.login(username='super', password='secret')
response = self.client.get('/auth_processor_messages/')
self.assertContains(response, "Message 1")
def test_user_attrs(self):
"""
Test that the lazy objects returned behave just like the wrapped objects.
"""
# These are 'functional' level tests for common use cases. Direct
# testing of the implementation (SimpleLazyObject) is in the 'utils'
# tests.
self.client.login(username='super', password='secret')
user = authenticate(username='super', password='secret')
response = self.client.get('/auth_processor_user/')
self.assertContains(response, "unicode: super")
self.assertContains(response, "id: 100")
self.assertContains(response, "username: super")
# bug #12037 is tested by the {% url %} in the template:
self.assertContains(response, "url: /userpage/super/")
# See if this object can be used for queries where a Q() comparing
# a user can be used with another Q() (in an AND or OR fashion).
# This simulates what a template tag might do with the user from the
# context. Note that we don't need to execute a query, just build it.
#
# The failure case (bug #12049) on Python 2.4 with a LazyObject-wrapped
# User is a fatal TypeError: "function() takes at least 2 arguments
# (0 given)" deep inside deepcopy().
#
# Python 2.5 and 2.6 succeeded, but logged internally caught exception
# spew:
#
# Exception RuntimeError: 'maximum recursion depth exceeded while
# calling a Python object' in <type 'exceptions.AttributeError'>
# ignored"
query = Q(user=response.context['user']) & Q(someflag=True)
# Tests for user equality. This is hard because User defines
# equality in a non-duck-typing way
# See bug #12060
self.assertEqual(response.context['user'], user)
self.assertEqual(user, response.context['user'])
|
my fortress; I shall not be shaken.
my mighty rock, my refuge is in God.
God is a refuge for us.
She had been an indomitable woman! She was strong, independent, wise, determined, and faithful. She was a woman I knew from the community, long before she became a resident in the nursing home where I serve as chaplain. I had incredible respect for her. At least a generation older than I was, she demonstrated for me how a strong woman could manage in a world that didn’t always respect her gifts. I learned a lot from her.
Then dementia invaded her life. It began to undermine her independence. It eroded her wisdom. It depleted her strength. In spite of her determination, she became less and less able to navigate the daily activities necessary to survive unaided, until she finally needed residential care for her safety. This was overwhelmingly distressing to her and to many who had known her self-sufficiency.
But her faith remained intact. We would talk about God—her rock and refuge, her hope and fortress. And as so much was stripped away by dementia, her faith remained. Until. Until dementia took her ability to speak. Until dementia eliminated every shred of her personality, and she spent her final months unable to respond at all, unable to give any indication she was even aware that others were around her.
Even in the silence of profound dementia, God never stopped being her refuge, her hope, her rock, her fortress, her salvation, her deliverance. She waited in silence for a God who never forgot her. And those who loved her kept vigil, waiting for God to call her home.
What was true for her is true for us as well. Whatever our struggle, whatever our trouble, “Trust in him at all times, O people; pour out your heart before him; God is a refuge for us” (v. 8). In silence—or in speech— God is our only hope!
Prayer: God, our refuge and deliverer, we come before you acknowledging our deep need. Help us trust in you at all times, regardless of our circumstances. In Jesus’s name we pray. Amen.
Leigh Boelkins Van Kempen is a chaplain at Resthaven Care Community in Holland, Michigan. She and her husband, Case, also an RCA minister, have three adult, married children and four (and a half!) beloved grandchildren.
Posted on March 30, 2019, in Worship. Bookmark the permalink. Comments Off on Lent Devotion: March 30th.
Get new posts emailed to your inbox by clicking this Google Feedburner link and entering your email address.
|
import math
import numpy
from numpy import linalg as LA
import sympy
from sympy.core.symbol import Dummy
from sympy.simplify.simplify import nsimplify
import calculations
import itertools
import q_vec
#import scipy
#import scipy.linalg
class matrix_factory(object):
@staticmethod
def get_probability_matrix(n, q, isSymbolic):
"""
:param n: vector size
:param q: tuple size
:param isSymbolic: determines wether caclulcation will be Symbolic or float128 precision
:return: returns a matrix instace of size (n)_q with benesh probabilities
"""
matrix_instance = matrix()
matrix_instance.n = n
matrix_instance.q = q
size = int(math.floor(math.factorial(n) / math.factorial(n-q))) # (n)_q
matrix_instance.r = size # rows
matrix_instance.c = size # cols
matrix_instance.isSymbolic = isSymbolic
matrix_instance.matrix_type = 'BENESH'
if (isSymbolic == True): # choose matrix type
matrix_instance.m=sympy.Matrix(numpy.zeros([matrix_instance.r,matrix_instance.c]))
else:
matrix_instance.m=numpy.zeros([matrix_instance.r,matrix_instance.c],dtype=numpy.float64)
matrix_instance.indicesToVectors = []
matrix_instance.vectorsToIndices = {}
i = 0 # build map vector <-> matrix index
for v in itertools.permutations(range(n), q):
matrix_instance.indicesToVectors.append(v)
matrix_instance.vectorsToIndices[v] = i
i = i + 1
for i in range(0, matrix_instance.r): # init matrix with base values
alpha = matrix_instance.indicesToVectors[i]
for j in range(0, matrix_instance.c):
beta = matrix_instance.indicesToVectors[j]
matrix_instance.m[i, j] = calculations.calculate_benes(alpha, beta, n)
return matrix_instance
@staticmethod
def get_probability_disk_matrix(n, q, isSymbolic):
"""
using disk memory and not RAM memory.
:param n: vector size
:param q: tuple size
:param isSymbolic: determines wether caclulcation will be Symbolic or float128 precision
:return: returns a matrix instace of size (n)_q with benesh probabilities
"""
import h5py
matrix_instance = matrix()
matrix_instance.n = n
matrix_instance.q = q
size = int(math.floor(math.factorial(n) / math.factorial(n-q))) # (n)_q
matrix_instance.r = size # rows
matrix_instance.c = size # cols
matrix_instance.isSymbolic = isSymbolic
matrix_instance.matrix_type = 'BENESH'
if (isSymbolic == True): # choose matrix type
matrix_instance.m=sympy.Matrix(numpy.zeros([matrix_instance.r,matrix_instance.c]))
else:
f = h5py.File("/tmp/mytestfile.hdf5", "w")
matrix_instance.f = f
matrix_instance.m = f.create_dataset("mydataset",
(matrix_instance.r,matrix_instance.c),
dtype=numpy.float64)
# numpy.zeros([matrix_instance.r,matrix_instance.c],dtype=numpy.float64)
matrix_instance.indicesToVectors = []
matrix_instance.vectorsToIndices = {}
i = 0 # build map vector <-> matrix index
for v in itertools.permutations(range(n), q):
matrix_instance.indicesToVectors.append(v)
matrix_instance.vectorsToIndices[v] = i
i = i + 1
for i in range(0, matrix_instance.r): # init matrix with base values
alpha = matrix_instance.indicesToVectors[i]
for j in range(0, matrix_instance.c):
beta = matrix_instance.indicesToVectors[j]
matrix_instance.m[i, j] = calculations.calculate_benes(alpha, beta, n)
return matrix_instance
@staticmethod
def get_reduced_matrix(n, q, isSymbolic):
qv = q_vec.q_vec(n, q)
columns = qv.build_reduced_matrix()
matrix_instance = matrix()
matrix_instance.n = n
matrix_instance.q = q
if (isSymbolic == True): # choose matrix type
matrix_instance.m=sympy.Matrix(numpy.matrix(columns))
else:
matrix_instance.m=numpy.matrix(columns)
#matrix_instance.m = numpy.matrix(columns)
size = int(math.floor(math.factorial(n) / math.factorial(n-q))) # (n)_q
matrix_instance.r = len(columns) # rows
matrix_instance.c = len(columns) # cols
matrix_instance.isSymbolic = isSymbolic
matrix_instance.matrix_type = 'REDUCED'
return matrix_instance
class matrix(object):
"""
matrix class, wrapper for linear algebra calculations in the project
"""
def __init__(self):
return
def get_size(self):
return self.r
def get_symbol_by_index(self,i):
return self.indicesToVectors[i]
def get_probability_for_symbols(self, t1, t2):
"""
return the probability to move from symbol (q-tuple or type) , from the matrix
:param t1: symbolic tuple (q-tuple or type)
:param t2:
:return:
"""
if (self.matrix_type == 'BENESH'):
i = self.vectorsToIndices[t1]
j = self.vectorsToIndices[t2]
elif (self.matrix_type == 'REDUCED'):
i = 0;
j = 0;
return self.m[i,j]
def get_eigenvalues(self):
"""
returns the eigenvalues of the matrix,
using the appropriate libraries, based on the symbolism
:return:
"""
if (self.isSymbolic == True):
w = self.m.eigenvals()
else:
w,v = LA.eigh(self.m)
#w,v = scipy.linalg.eig(self.m)
return w;
def get_diagonal(self):
"""
returns the diagonal form of the matrix
:return:
"""
if (self.isSymbolic == True):
P, D = self.m.diagonalize();
return D
else:
w, v = LA.eigh(self.m)
P = numpy.matrix(v)
D = numpy.transpose(P) * self.m * P
return D
def getMatrixPower(self, p, compute_diagonal=True):
"""
Diagonlizes the matrix, and exponentiates it efficiently.
returns the matrix p-th power.
:param p:
:return:
"""
if compute_diagonal:
if (self.isSymbolic == False):
w, v = LA.eigh(self.m)
P = numpy.matrix(v)
D = numpy.transpose(P) * self.m * P
for i in range (0,self.r):
D[i,i]=pow(D[i,i],p)
D = P * D * numpy.transpose(P)
return D
else:
P, D = self.m.diagonalize();
for i in range (0,self.r):
D[i,i]=pow(D[i,i],p)
D = P * D * P^(-1)
return D
else:
return self.m^p
def get_eigenvalue_set(self):
"""
returns a set of eigenvalues for the matrix
:return:
"""
return set(self.get_eigenvalues())
def get_round_eigevalue_set(self):
"""
returns a set of rounded (decimal precsion) eigenvalues
:return:
:return:
"""
if (self.isSymbolic == True):
return self.get_eigenvalues()
else:
return set(numpy.round(self.get_eigenvalues(), 4))
"""
Benesh probabilities utils
"""
@staticmethod
def fromBaseN(n,t):
"""
:param n: - the base
:param t: - tuple representin coordinates in base n
:return: - decimal number
"""
sum = 0
p = len(t) - 1
for i in t:
sum += i*(pow(n,p))
p = p - 1
return sum
@staticmethod
def toBaseN(n,q,d):
"""
:param n: base we work in
:param q: number of digits in the vector
:param d: decimal number to move to new base as tuple
:return:
"""
l = [0]*(q)
for i in range(0,q):
l[i] = int(d%n)
d=math.floor(d/n)
l.reverse()
return tuple(l)
def custom_charpoly(self, **flags):
"""
custom charpoly
"""
if (self.isSymbolic == True):
self.m = self.m._new(self.m.rows, self.m.cols,[nsimplify(v, rational=True) for v in self.m])
max_denom = 0;
for i in range (0,self.m.rows):
for j in range (0,self.m.cols):
if self.m[i,j] > max_denom:
max_denom = self.m[i,j].q
print max_denom
self.m *= max_denom
flags.pop('simplify', None) # pop unsupported flag
return self.m.berkowitz_charpoly(Dummy('x'))
else:
numpy.rint(self.m)
return numpy.rint(numpy.poly(self.m))
|
40 photos Sample Resume for Caregiver of may competent And as we all know, a picture is worth a thousand words by admin and You have a few more options if you want to perform a reverse image search. Keep in mind that the visual you choose (square, portrait or landscape). There are at least the following types of Sample Resume for Caregiver to inspire your next custom. But for example, there are still some archived photos of Resume. So, as well as help you for example, explanatory Sample Resume for Caregiver.
Collection Of solutions Sample Resume for Caregiver Stunning Caregiver Resume Samples Visualcv Resume Samples Database.
Best solutions Of Sample Resume for Caregiver Brilliant Caregiver Resume Sample Writing Guide.
620 x 800 image over the Best solutions Of Sample Resume for Caregiver Brilliant Caregiver Resume Sample Writing Guide present include for in. Original file MIME type: 75524 size photo. Usage on Seocinim.com! .
Ideas Collection Sample Resume for Caregiver Fancy Caregiver Resume Samples Resume Samples for Caregiver Functional In.
1084 x 1403 image over the Ideas Collection Sample Resume for Caregiver Fancy Caregiver Resume Samples Resume Samples for Caregiver Functional In present include for in. Original file MIME type: 30692 size photo. Usage on Seocinim.com! .
|
import numpy as np
# import seaborn
from collections import namedtuple
from keras import backend as K
from keras.engine.topology import Layer
from scipy.interpolate import interp1d
## Loss functions
dice_smooth = 1.
def dice_coef(y_true, y_pred):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + dice_smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + dice_smooth)
def dice_coef_loss(y_true, y_pred):
return -dice_coef(y_true, y_pred)
## Layers and ops
## plotting tools
# class H1:
# '''Wrapper around numpy histogram
# '''
# def __init__(self,hist):
# self.bin_edges = hist[1]
# self.n_bins = self.bin_edges.shape[0]-1
# self.content = hist[0]
# def find_bin(self,x):
# if x < self.bin_edges[0]:
# return -1
# for ib in self.xrange(self.n_bins):
# if x>= self.bin_edges[ib]:
# return ib
# return self.n_bins
# def get_bin(self,ib):
# if ib<0 or ib>=self.n_bins:
# return 0
# return self.content[ib]
# def integral(self,lo=None,hi=None):
# if not lo:
# lo = 0
# if not hi:
# hi = self.n_bins
# widths = np.diff(self.bin_edges[lo:hi+1])
# return np.sum(self.content[lo:hi] * widths)
#
#
# def plot_hists(props, hists):
# plt.clf()
# bins = props['bins']
# for h in hists:
# plt.hist(h['vals'], bins=bins, weights=h['weights']/np.sum(h['weights']),
# histtype='step', # fill=False,
# color=h['color'], label=h['label'])
# if 'xlabel' in props:
# plt.xlabel(props['xlabel'])
# if 'ylabel' in props:
# plt.ylabel(props['ylabel'])
# plt.legend(loc=0)
# plt.savefig(props['output']+'.png',bbox_inches='tight',dpi=300)
# plt.savefig(props['output']+'.pdf',bbox_inches='tight')
#
#
#
# Tagger = namedtuple('Tagger',['response','name','lo','hi','flip'])
#
# def create_roc(taggers, labels, weights, output, nbins=50):
# colors = ['k','r','g','b']
# plt.clf()
# wps = []
# for t in taggers:
# color = colors[0]
# del colors[0]
# h_sig = H1(np.histogram(t.response[labels==1],
# weights=weights[labels==1],
# bins=nbins,range=(t.lo,t.hi),
# density=True))
# h_bkg = H1(np.histogram(t.response[labels==0],
# weights=weights[labels==0],
# bins=nbins,range=(t.lo,t.hi),
# density=True))
#
# epsilons_sig = []
# epsilons_bkg = []
# for ib in xrange(nbins):
# if t.flip:
# esig = h_sig.integral(hi=ib)
# ebkg = h_bkg.integral(hi=ib)
# else:
# esig = h_sig.integral(lo=ib)
# ebkg = h_bkg.integral(lo=ib)
# epsilons_sig.append(esig)
# epsilons_bkg.append(ebkg)
#
# interp = interp1d(epsilons_bkg,
# np.arange(t.lo,t.hi,float(t.hi-t.lo)/nbins))
# wps.append(interp(0.05))
#
# plt.plot(epsilons_sig, epsilons_bkg, color+'-',label=t.name)
# plt.axis([0,1,0.001,1])
# plt.yscale('log')
# plt.legend(loc=0)
# plt.ylabel('Background fake rate')
# plt.xlabel('Signal efficiency')
# plt.savefig(output+'.png',bbox_inches='tight',dpi=300)
# plt.savefig(output+'.pdf',bbox_inches='tight')
#
# return wps
|
Sunday 5th February – LDCV – Galley Hill, Streatley – Scrub clearing. Meet at the little car park, junction of Warden Hill Rd. and Links Way, Luton at 10am. Contact Trevor (01582 547095), or Andy (01234 364213) daytimes.
Sunday 19th February – LDCV – Suncote Pit, Sewell – Scrub clearing. Meet at the Sewell Cutting Reserve entrance in French’s Avenue, Dunstable at 10am. Contact Trevor (01582 547095), or Andy (01234 364213) daytimes.
Sunday 5th March – LDCV – Dallow Downs, Luton – Scrub clearing. Meet in Wellhouse Close, off Longcroft Road., Luton at 10am. Contact Trevor (01582 547095), or Andy (01234 364213) daytimes.
Sunday 19th March – LDCV – Cowslip Meadow, Luton – Coppicing. Meet at the Springfield Rd entrance at 10am. Contact Trevor (01582 547095), or Andy (01234 364213) daytimes.
Sunday 26th March – LDCV – Sewell Cutting, Dunstable – Scrub clearing. Meet at the Reserve entrance in French’s Avenue, Dunstable at 10am. Contact Trevor (01582 547095), or Andy (01234 364213) daytimes.
Site management on various sites in the Ouzel Valley. Please contact 01525 237760 for locations and details.
|
# -*- coding: utf-8 -*-
# Mathmaker creates automatically maths exercises sheets
# with their answers
# Copyright 2006-2017 Nicolas Hainaux <[email protected]>
# This file is part of Mathmaker.
# Mathmaker is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# any later version.
# Mathmaker is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Mathmaker; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import random
from mathmaker.lib import shared
from mathmaker.lib.tools.wording import setup_wording_format_of
from mathmaker.lib.core.root_calculus import Value
from mathmaker.lib.core.base_calculus import Item
from mathmaker.lib.document.content import component
ALL_LENGTHS_TO_CALCULATE = ['oneside', 'twosides']
class sub_object(component.structure):
def __init__(self, build_data, picture='true', **options):
super().setup("minimal", **options)
if build_data[0] < 11:
raise ValueError('build_data[0] == {} whereas it should be '
'>= 11'.format(str(build_data[0])))
build_data = (build_data[0] / 10, ) + build_data[1:]
super().setup("numbers", nb=build_data,
shuffle_nbs=False, **options)
super().setup("length_units", **options)
super().setup("intercept_theorem_figure", butterfly=True, **options)
if self.variant == 'default':
variant = ['random', 'random']
else:
if self.variant.count('_') != 1:
raise ValueError('XMLFileFormatError: the variant for '
'intercept_theorem_butterfly '
'shoud contain one _')
variant = self.variant.split(sep='_')
valid_variant = [['random', 'oneside', 'twosides'],
['random', 'all', 'twocouples']]
for v, valid, n in zip(variant, valid_variant,
['first', 'second', 'third']):
if v not in valid:
raise ValueError('XMLFileFormatError: Invalid {} part of the '
'variant. It should be in: {}'
.format(n, str(valid)))
if variant[0] == 'random':
if variant[1] == 'twocouples':
variant[0] = 'oneside'
else:
variant[0] = random.choice(['oneside', 'twosides'])
if variant[1] == 'random':
if variant[0] == 'twosides':
variant[1] = 'twocouples'
else:
variant[1] == random.choice(['all', 'twocouples'])
if variant == ['twosides', 'twocouples']:
raise ValueError('XMLFileFormatError: The twosides_twocouples '
'variant is impossible.')
# The order is:
# small[0] small[1] small[2] side[0] side[1] side[2]
labels_configurations = {
'oneside_all': [
['?', True, True, True, True, True],
[True, '?', True, True, True, True],
[True, True, '?', True, True, True],
[True, True, True, '?', True, True],
[True, True, True, True, '?', True],
[True, True, True, True, True, '?']
],
'oneside_twocouples': [
['?', True, False, True, True, False],
[False, True, '?', False, True, True],
[True, True, False, True, '?', False],
[False, True, True, False, '?', True],
['?', False, True, True, False, True],
[True, False, '?', True, False, True],
[True, '?', False, True, True, False],
[False, '?', True, False, True, True],
[False, True, True, False, True, '?'],
[True, True, False, '?', True, False],
[True, False, True, True, False, '?'],
[True, False, True, '?', False, True],
],
'twosides_all': [
['?', '?', True, True, True, True],
['?', True, '?', True, True, True],
[True, '?', '?', True, True, True],
['?', True, True, True, '?', True],
['?', True, True, True, True, '?'],
[True, '?', True, '?', True, True],
[True, '?', True, True, True, '?'],
[True, True, '?', True, '?', True],
[True, True, '?', '?', True, True],
[True, True, True, '?', '?', True],
[True, True, True, '?', True, '?'],
[True, True, True, True, '?', '?'],
]
}
variant_key = '_'.join(variant)
labels_conf = random.choice(labels_configurations[variant_key])
self.figure.setup_labels(labels_conf,
segments_list=self.figure.small
+ self.figure.side)
lengths_to_calculate = [s.length_name
for s in self.figure.small + self.figure.side
if s.label == Value('?')]
self.line1 = self.figure.small[1].length_name
self.line2 = self.figure.side[1].length_name
self.length1_name = lengths_to_calculate[0]
if len(lengths_to_calculate) == 2:
self.length2_name = lengths_to_calculate[1]
if len(lengths_to_calculate) == 1:
self.wording = _('The drawn figure is out of shape. {newline} '
'The lengths are given in {length_unit}. '
'{newline} '
'The {line1} is parallel to {line2}. {newline} '
'{newline} '
'Determine the length of {length1_name}.')
else:
self.wording = _('The drawn figure is out of shape. {newline} '
'The lengths are given in {length_unit}. '
'{newline} '
'The {line1} is parallel to {line2}. {newline} '
'{newline} '
'Determine the lengths of {length1_name} '
'and {length2_name}.')
setup_wording_format_of(self)
self.ratios = shared.machine.write_math_style1(
self.figure.ratios_equalities().into_str())
self.ratios_substituted = shared.machine.write_math_style1(
self.figure.ratios_equalities_substituted().into_str())
self.resolution0 = self.figure.ratios_equalities_substituted()\
.into_crossproduct_equation(Item(lengths_to_calculate[0]))\
.auto_resolution(dont_display_equations_name=True,
skip_first_step=True,
skip_fraction_simplification=True,
decimal_result=2,
unit=self.length_unit,
underline_result=True)
lengths_resolutions_part = _('hence: {resolution0} ')
if len(lengths_to_calculate) == 2:
self.resolution1 = self.figure.ratios_equalities_substituted()\
.into_crossproduct_equation(Item(lengths_to_calculate[1]))\
.auto_resolution(dont_display_equations_name=True,
skip_first_step=True,
skip_fraction_simplification=True,
decimal_result=2,
unit=self.length_unit,
underline_result=True)
lengths_resolutions_part = shared.machine.write(
lengths_resolutions_part + _('and: {resolution1} '),
multicolumns=2)
ans_variant = options.get('ans_variant', 'default')
ans_texts = {
'default': _('As: {line1} {parallel_to} {line2}, '
'{main_vertex_name} {belongs_to} {chunk0_length_name}'
' and '
'{main_vertex_name} {belongs_to} {chunk1_length_name}'
', then by the intercept theorem: {newline} '
'{ratios} '
'thus: {ratios_substituted} '),
'alternative1': _('As {line1} is parallel to {line2}, '
'and as the line {chunk0_length_name} cuts '
'the line {chunk1_length_name} at point '
'{main_vertex_name}, '
'then by the intercept theorem: {newline} '
'{ratios} '
'thus: {ratios_substituted} '),
'alternative2': _('As: {line1} is parallel to {line2}, '
'and as {point0_name}, {main_vertex_name} and '
'{vertex1_name} on one hand, '
'{point1_name}, {main_vertex_name} and '
'{vertex2_name} on the other hand,'
'are aligned in the same order, '
'then by the intercept theorem: {newline} '
'{ratios} '
'thus: {ratios_substituted} ')
}
self.answer_wording = ans_texts[ans_variant] + lengths_resolutions_part
setup_wording_format_of(self, w_prefix='answer_')
def q(self, **options):
return shared.machine.write_layout(
(1, 2),
[10, 10],
[self.wording.format(**self.wording_format),
shared.machine.insert_picture(self.figure,
scale=0.7,
top_aligned_in_a_tabular=True)])
def a(self, **options):
return self.answer_wording.format(**self.answer_wording_format)
# TODO: create the "js" answer (for interactive pdf)
# def js_a(self, **kwargs):
# return [self......jsprinted]
|
Welcome to Bordentown Regional School District!
One must be a lease or rental agreement/purchase agreement, mortgage statement or deed.
The second can be a utility bill or bank statement.
Please provide any court paperwork regarding custody if applicable.
Online INFOSNAP registration – you will receive a separate email or letter regarding this – please complete as soon as possible – You will need to complete with 3 emergency contacts.
Parents/Guardians must make an appointment with our main office to finalize registration. At that time, we will make transportation arrangements and determine the best placement for your child. Please feel free to contact me with any questions.
|
# SQLite is a lightweight database manager that's part of Python's standard
# library, so it's a good example of how to hook a script up to a database.
# If you work in MySQL or Postgres, there are libraries you can use to make
# a connection and gain similar functionality.
import sqlite3
# Connect to a test database; if one doesn't exist, it will be created on
# the fly. We also fire up a cursor to poke, prod and manipulate our
# database.
conn = sqlite3.connect('my_test.sqlite')
c = conn.cursor()
# Right now it's an empty database with no tables and no data. Let's create
# basic one that holds some CEO information.
c.execute(
'CREATE TABLE ceos '
'(ceo_name text, company text, salary int)')
# NOTE: with scripts, somestimes it's a good idea to preface a CREATE
# TABLE query with IF NOT EXISTS, that way you won't get an operational
# error.
# Let's insert three CEO names, companies and salaries into our ceos table.
c.execute(
"INSERT INTO ceos "
"VALUES ('John Smith', 'Acme, Inc.', '275000'), "
"('Libby Rogers', 'AstroTech', '1200000'), "
"('Darla Jones', 'Ballard Partners', '942000')")
# When we alter a table, we have to commit those changes.
conn.commit()
# Let's run a quick query that gives us everything in the table.
c.execute(
"SELECT * FROM ceos")
# The database has run the query and gives it back to use as a list of tuples
# for each row. We have to fetch this information.
result = c.fetchall()
print result
# Try fetchall() again; it should be empty and will be until we run another
# query.
c.fetchall()
# Let's try another basic query: a sum of the salaries.
c.execute(
"SELECT SUM(salary) FROM ceos")
result2 = c.fetchall()
print result2
# One more: companies that start with 'A,' sorted in descending order by
# salary
c.execute(
"SELECT * FROM ceos "
"WHERE company LIKE 'A%' "
"ORDER BY salary DESC")
result3 = c.fetchall()
print result3
|
This past weekend, I found myself in NYC for a couple of hours. It was pouring out in the city- damp, frigid and pretty miserable- and I ended up ducking into The Morgan Library and Museum on Madison Avenue for a short period of time to dry off, warm up, and browse Pierpont Morgan’s private library and study. I couldn’t resist snapping some photos (without flash, of course,) of the collection- because the rooms were just so gorgeous and awe-inspiring. I wound up slowly walking from shelf to shelf- exhibit to exhibit- staring through the glass at rare books and artifacts that had been accumulated over the years.
Admission to The Morgan Library was $18 (for adult entry) and I recommend it for anyone who wants a quiet and quaint museum experience for a couple of hours while they’re in New York- with or without a torrential downpour outside.
Afterwards, prior to catching my train, I made a quick-stop a couple blocks down and around the corner at Black Shack Burger to grab a bite to eat- and although the service left a lot to be desired (the guy at the counter obviously wanted to be anywhere but there and seemed kind of irritated while I was doing a speedy browse of the menu-) the food was really, really, REALLY good. I liked the atmosphere of the place, too, which felt like an underground punk rock club with all the gig posters I saw tacked up by where I had sat down. I’d definitely stop by again- and hopefully the employees are in better spirits when I do.
All in all, it wasn’t a bad way to spend a rainy, chilly Sunday- although next time I’ll be sure to bring or buy an umbrella.
I’ve been occasionally mentioning it in my past few posts, and the time is finally here to announce the details of my 2014 Halloween Giveaway! As most of you have probably figured out by now- October is my favorite month of the year- and Halloween is undoubtedly my favorite holiday. For that reason, I wanted to do something a little different this year and curate an assortment of items to give away to one lucky Legally Redhead reader! And unlike other giveaways I’ve hosted here on the blog in the past: this giveaway isn’t limited to United States residents only. I’ll be paying for the shipping and handling myself- which means it’s open to those of you across the pond, below the border, and anywhere in between, too!
Before I get to the important part: how to enter- let’s take a closer look at what’s inside!
As an avid collector of Funko figurines, included in this giveaway is a POP! Vinyl figure of one of my favorite horror movie villains- Freddy Krueger. From the detail on his burned and scarred face, to his infamous striped sweater- to the even more infamous metal clawed glove- this toy is too adorable to be afraid of. Put it up on a shelf, in a display case, or- if you dare- near your bed.
To help get the winner into the Halloween spirit, I’ve also included the medium sized “Pumpkin Patch” candle from Yankee Candle in the giveaway. A mix of the “Harvest” and “Spiced Pumpkin” scents from the same company, this swirl candle will get your entire room/home/wherever you light it smelling like a delicious combination of sweet cinnamon and pumpkin pie- perfect for the season.
I couldn’t possibly have a Halloween themed giveaway without including SOMETHING “Walking Dead” related, could I? To help the winner catch up just as we’re set to begin the fifth season (one week to go!) or even just provide them with something to gift to a TWD-enthusiast they may know, I included a new copy of the recently released fourth season of the show. This particular edition is Blu-Ray – Digital HD formatted and is loaded with special featurettes and deleted scenes that will keep you occupied for hours after you’ve binge-watched every single episode.
Fable & Fury has been one of my favorite Etsy-based jewelry shops for a long time. Their pieces are well-made, unique, and guaranteed to garner attention. This stainless steel Nosferatu pendant is no exception. This would be a perfect accessory to wear on Halloween night to whatever frightening plans you have lined up- although I’m the type of person who would wear this year-round, too.
On the surface, this brown bar from Fortune Cookie Soap– scented like berries and cinnamon- may look a little bland, but inside- it’s loaded with bugs. Not real ones, of course. From the “Nightmare Before Christmas” collection, this “Oogie Boogie” bar soap is made from natural ingredients and adds a bit of fun to any bath or shower as the soap begins to melt away with each use, revealing a bunch of creepy crawlers lurking on the inside, below the surface.
Lastly, most people my age can recall reading Alvin Schwartz’s “Scary Stories to Tell in the Dark,” “More Scary Stories to Tell in The Dark,” and “Scary Stories 3 : More Tales to Chill Your Bones” in middle school around this time of year. Admittedly, the stories weren’t particularly terrifying, but when paired with the haunting illustrations by Stephen Gammell- the books became timeless. This hardcover compilation of the best works from the three novels is a nice shot of nostalgia to keep for yourself or to gift to a friend or loved one.
Not shown in the above photos, but will definitely be included in the giveaway- is the candy I plan to send the winner after contacting them (just to make sure he/she doesn’t have any allergies, of course!) and a personalized card once someone has been selected!
The giveaway is running from now through midnight (EST) on Sunday, October 19th, after which time a winner will be selected at random through Rafflecopter and contacted directly to obtain additional information to process shipping. If selected, you’ll have 72 hours to respond to the confirmation e-mail. If I don’t receive a response in that time, I’ll unfortunately have to select another winner, instead!
First and foremost- please visit the Rafflecopter page for the giveaway by CLICKING HERE to make sure your entry is properly counted!
That’s it! Just two simple steps and you’re in!
Good luck to everyone who participates. I’m really looking forward to selecting a winner!
If you’re a regular reader of Legally Redhead, you may have noticed the layout of the blog changed today! As much as I loved the cute, olive green style I’ve had for the better part of a year and a half- I’m quite smitten with this new (but still easy to navigate) design and header, made possible by WordPress. I hope you all like it as much as I do!
This afternoon, the lovely Imogen linked me in “The Reading Habits Tag,” and I thought I’d unveil the new design of LR while answering the book/reading questions presented to me in her original post.
I usually read while I’m in bed, or curled up on my couch depending on how quiet it is in my living room. If it’s nice out, though, I like to sit out on my patio/deck with a good book!
I do the unthinkable- I turn over the corner of the page I left off on. I know it’s awful but I have a tendency to misplace bookmarks or pieces of paper! They always fall out and I always lose my place. Dog-earing the page is so much more convenient for me.
I try to finish whatever chapter I’m on before I save my place and take a break from the book. There have been a couple of occasions where I’ve had to stop abruptly, sometimes mid-sentence- but I usually go back and re-read the chapter, or at least a few pages before where I left off- mostly to re-familiarize myself with what was happening in the story before I got sidetracked.
Although I really do love a good cup of hot chocolate while I read by the fire during the winter.
Not usually, since it can be a little distracting. If I’m reading during a flight, I tend to keep my headphones on just out of habit- and even then I’m usually listening to very soft jazz music so it won’t break my concentration.
One book at a time! I like to focus on and finish one story before I begin another.
I prefer reading at home since it’s where I’m most comfortable, but I read a lot when I travel, too- in the airport, on the plane, in my hotel room, etc.
Silently in my head, unless I’m reading something to someone. Otherwise, I feel like reading out loud is sort of like those people who listen to music through their headphones or ear buds but insist on singing along anyway. It’s distracting to other people!
I try not to. There have been a couple of instances where I’ve been so frustrated with a book that I’ve skipped ahead to see if there’s any indication of it getting better/what happens to the character to determine if I want to keep reading- but I prefer to be surprised.
I know that sometimes it can’t be helped, but I like to keep my books in good, new-ish condition as much I can for as long as possible. That way, if I decide to lend/give the book to someone else to read, I know it won’t be falling apart on them.
I’ve highlighted stuff before, but I very rarely write in my books. Again, a lot of it has to do with potentially lending/giving the books to other people to read. I want them to be in good shape!
This is going to sound ridiculous, but while helping with some Summer cleaning at my mom’s house, I recently came across a crate of my old collection of “The Baby-Sitters Club” books, as well as some other novels I absolutely adored as a kid. Nostalgia got the better of me, and I started re-reading some of them again. I’m currently in the middle of “Boy-Crazy Stacey,” which was always my favorite BSC book. When I’m done with that (which should be in no time at all,) I’m moving onto my other favorite childhood book I found- “The Silver Tree” by Ruth L. Williams. Memorieeeees!
Instead of tagging any specific person or people for this, I’m just going to tag anyone who is interested in participating in this fun little questionnaire!
|
import importlib
import io
import os
import csv
import math
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.path as path
import matplotlib.dates as mdates
from dateutil.parser import parse
from datetime import datetime
from datetime import timedelta
# Python 2 and 3: easiest option
from future.standard_library import install_aliases
install_aliases()
from urllib.parse import urlparse, urlencode
from urllib.request import urlopen, Request
from urllib.error import HTTPError
import pytz
from matplotlib.backends.backend_pdf import PdfPages
import weather_data_tools as weather
importlib.reload(weather)
import spectra_fitting_tools as fitter
importlib.reload(fitter)
#--------------------------------------------------------------------------#
# Process input data
#--------------------------------------------------------------------------#
def make_int(lst):
'''
Makes all entries of a list an integer
'''
y = []
for i in lst:
y.append(int(i))
return y
def make_array(lst):
'''
Makes list into an array. Also splices out the irrelevant stuff
for a spectra
'''
y = np.asarray(make_int(lst[12:]))
return y
def get_times(rows, n, tstart, tstop):
'''
Get list of times for data: determines time as the midpoint between the upper and lower bounds in the integration window
Arguments:
- full list of inputs from data csv
- number of hours to integrate for each data point
- start/stop dates
Returns:
- list of times
'''
ndays = (tstop - tstart).days
entries = 12*n
nintervals = (24/n)
i = 0
counter = 0
times = []
while counter < ndays*nintervals:
integration = rows[(i*entries)+1:((i+1)*entries)+1]
i+=1
time_range = []
datatz = parse(integration[-1][1]).tzinfo
if (parse(integration[-1][1])<tstop.replace(tzinfo=datatz)) and \
(parse(integration[0][1])>tstart.replace(tzinfo=datatz)):
for j in integration:
time_range.append(parse(j[1]))
times.append(time_range[int(len(time_range)/2)])
counter+=1
return times
def get_arrays(values_w_errs):
vals = np.asarray([i[0] for i in values_w_errs])
errs = np.asarray([i[1] for i in values_w_errs])
return vals,errs
def varify_data(means,sigmas,amps):
# check for bad fits and use average of surrounding good fits
for i in range(len(means)):
if means[i][1] > 100 or math.isnan(means[i][0]):
print('Fit {} is bad!'.format(i))
j = 1
k = 1
if i<(len(means)-j):
while means[i+j][1] > 100:
j += 1
print('Trying {}+{} out of {}'.format(i,j,len(means)))
if i >= (len(means)-j):
print('Abort!')
break
if i>k:
while means[i-k][1] > 100 or math.isnan(means[i-k][0]):
k += 1
if i<k:
break
if i>k and i<(len(means)-j):
print('Averaging over {} and {}'.format(i-k,i+j))
means[i][0] = (means[i+j][0]+means[i-k][0])/2.0
means[i][1] = (means[i+j][1]+means[i-k][1])/2.0
sigmas[i][0] = (sigmas[i+j][0]+sigmas[i-k][0])/2.0
sigmas[i][1] = (sigmas[i+j][1]+sigmas[i-k][1])/2.0
amps[i][0] = (amps[i+j][0]+amps[i-k][0])/2.0
amps[i][1] = (amps[i+j][1]+amps[i-k][1])/2.0
elif i<k and i<(len(means)-j):
print('Using {}'.format(i+j))
means[i][0] = means[i+j][0]
means[i][1] = means[i+j][1]
sigmas[i][0] = sigmas[i+j][0]
sigmas[i][1] = sigmas[i+j][1]
amps[i][0] = amps[i+j][0]
amps[i][1] = amps[i+j][1]
elif i>k and i>=(len(means)-j):
print('Using {}'.format(i-k))
means[i][0] = means[i-k][0]
means[i][1] = means[i-k][1]
sigmas[i][0] = sigmas[i-k][0]
sigmas[i][1] = sigmas[i-k][1]
amps[i][0] = amps[i-k][0]
amps[i][1] = amps[i-k][1]
else:
print('Nothing makes sense')
return means,sigmas,amps
def find_time_match(times,time,delta):
first = 0
last = len(times)-1
found = False
index = -1
if not time.tzinfo:
time = time.replace(tzinfo=times[0].tzinfo)
while first<=last and not found:
midpoint = int((first + last)/2)
list_time = times[midpoint]
if not list_time.tzinfo:
list_time = list_time.replace(tzinfo=time.tzinfo)
if abs(list_time-time) < delta :
index = midpoint
found = True
else:
if time < list_time:
last = midpoint-1
else:
first = midpoint+1
return index
def SelectDataTimeRange(start_time,stop_time,data,times):
dataarray = np.array(data)
timesarray = np.array(times)
indices = np.where((timesarray>=start_time)&(timesarray<=stop_times))
subdata = dataarray[indices]
subdatatimes = timesarray[indices]
return subdata, subdatatimes
def merge_data(times1,data1,times2,data2):
merged_data1 = []
merged_data2 = []
merged_times = []
for i in range(len(times1)):
time_index = find_time_match(times2,times1[i],timedelta(minutes=30))
if time_index >= 0:
merged_data1.append(data1[i])
merged_data2.append(data2[time_index])
merged_times.append(times1[i])
return merged_times,merged_data1,merged_data2
def inTimeRange(time_string,tstart,tstop):
time = tstart - timedelta(minutes=1)
if isinstance(time_string, str):
try:
time = parse(time_string)
except:
print('{} Not a time!'.format(time_string))
return False
elif isinstance(time_string, datetime):
time = time_string
# check that tzinfo is set for tz aware comparisons
if tstart.tzinfo==None:
tstart = tstart.replace(tzinfo=time.tzinfo)
if tstop.tzinfo==None:
tstop = tstop.replace(tzinfo=time.tzinfo)
#print('Checking {} > {} and < {} = {}'.format(time,tstart,tstop,(time > tstart and time < tstop)))
return (time > tstart and time < tstop)
def get_spectra(rows, nhours, tstart, tstop):
datatz = rows[-1][1].tzinfo
date_itr = tstart
times = []
spectra = []
counter = 0
# break data up into days to speed up range selection
while date_itr < tstop:
next_day = date_itr+timedelta(days=1)
daily_row = [row for row in rows if \
inTimeRange(row[1],date_itr,next_day)]
time_itr = date_itr
date_itr = next_day
while time_itr < date_itr:
time_next = time_itr+timedelta(hours=nhours)
integration = [row for row in rows if \
inTimeRange(row[1],time_itr,time_next)]
time_itr = time_next
if len(integration)==0:
continue
array_lst = []
for j in integration:
array_lst.append(make_array(j))
integrated = sum(array_lst)
spectra.append(integrated)
times.append(integration[int(len(integration)/2)][1])
return times, spectra
def get_calibrations(spectra, fit_function, fit_args):
counter = 0
calibrations = []
calibration_errs = []
energy_spectra = []
last_calib = 2.5 # default calibration
last_err = 0
for spectrum in spectra:
mean,simga,amp = fit_function(spectrum,counter,*fit_args)
calib = (1460)/(mean[0])
calib_err = (1460)/(mean[0])**2*np.sqrt(mean[1]**2)
if calib < 0 or calib > 10 or math.isnan(calib):
print('invalid calibration {}, using {}'.format(calib,last_calib))
calib = last_calib
calib_err = last_err
else:
last_calib = calib
last_err = calib_err
calibrations.append(calib)
calibration_errs.append(calib_err)
energy_spectrum = np.array(spectrum)*calib
energy_spectra.append(energy_spectrum)
counter += 1
return calibrations, calibration_errs
def calibrate_spectra(spectra, calibrations, times, nsum):
E_spectra = []
bin_times = []
spectra_sum = []
itimes = []
isum = 0
for i in range(len(spectra)):
# list of energies = channel number * calibration (assume linear)
energies = np.array(range(len(spectra[i])))*calibrations[i]
print(energies)
spectrum = np.zeros(600)
for j in range(len(spectra[i])):
count = spectra[i][j]
# energy bin width = 5keV
index = int(energies[j]/5)
spectrum[index] += count
if isum < nsum:
spectra_sum.append(spectrum)
itimes.append(times[i])
isum += 1
else:
E_spectra.append(sum(spectra_sum))
bin_times.append(itimes[int(len(itimes)/2)])
itimes = []
spectra_sum = []
isum = 0
return E_spectra, bin_times
def get_peak_fits(spectra, fit_function, fit_args):
means = []
sigmas = []
amps = []
counter = 0
for spectrum in spectra:
mean,sigma,amp = fit_function(spectrum,counter,*fit_args)
means.append(mean)
sigmas.append(sigma)
amps.append(amp)
counter += 1
means,sigmas,amps = varify_data(means,sigmas,amps)
return means,sigmas,amps
def get_peaks(rows, nhours, tstart, tstop, fit_function, fit_args):
'''
Applies double gaussian + expo fits to all data over some range of time
Arguments:
- full list of csv data input rows
- number of hours to integrate each calculation over
- start/stop times to run over
- peak fitting method
- arguments to be fed to the peak fitting method
Returns:
- lists of means,sigmas,amps from all gaussian fits
- each entry in list includes the value and uncertainty
'''
datatz = rows[-1][1].tzinfo
date_itr = tstart
times = []
means = []
sigmas = []
amps = []
counter = 0
# break data up into days to speed up range selection
while date_itr < tstop:
next_day = date_itr+timedelta(days=1)
daily_row = [row for row in rows if \
inTimeRange(row[1],date_itr,next_day)]
time_itr = date_itr
date_itr = next_day
while time_itr < date_itr:
time_next = time_itr+timedelta(hours=nhours)
integration = [row for row in rows if \
inTimeRange(row[1],time_itr,time_next)]
time_itr = time_next
if len(integration)==0:
continue
array_lst = []
for j in integration:
array_lst.append(make_array(j))
integrated = sum(array_lst)
mean,sigma,amp = fit_function(integrated,counter,*fit_args)
counter += 1
means.append(mean)
sigmas.append(sigma)
amps.append(amp)
times.append(integration[int(len(integration)/2)][1])
means,sigmas,amps = varify_data(means,sigmas,amps)
return times,means,sigmas,amps
def get_weather_data(location,nhours,start_day,stop_day):
tstart = parse(start_day)
tstop = parse(stop_day)
date_itr = tstart
times = []
temps = []
while date_itr < tstop:
data = weather.weather_station_data_scrape(location, date_itr)
time_itr = date_itr
date_itr = date_itr+timedelta(days=1)
if not data:
print('No weather data for {}'.format(date_itr))
while time_itr < date_itr:
time_next = time_itr+timedelta(hours=nhours)
integration = [row for row in data if \
inTimeRange(row[0],time_itr,time_next)]
time_itr = time_next
if len(integration)==0:
continue
times.append(integration[int(len(integration)/2)][0])
temps.append(np.mean(np.asarray([x[1] for x in integration])))
return times,temps
def cut_outliers(array):
mean, sigma = get_stats(array)
for i in range(len(array)):
if (array[i]>mean+5*sigma) or (array[i]<mean-5*sigma):
if i > 0 and i < len(array)-1:
array[i] = (array[i-1] + array[i+1])/2
elif i==0:
if (array[i+1]<mean+5*simga) and (array[i+1]>mean-5*simga):
array[i] = array[i+1]
else:
array[i] = mean
elif i==len(array)-1:
array[i] = array[i-1]
return array
def get_stats(array):
return np.mean(array), np.sqrt(np.var(array))
def make_plot(points,data,errs,xlbl,ylbl,tstr,style,clr,ymin=0,ymax=0):
fig, ax = plt.subplots()
fig.patch.set_facecolor('white')
plt.title(tstr)
plt.xlabel(xlbl)
plt.ylabel(ylbl)
if ymin and ymax:
plt.ylim(ymin,ymax)
ax.plot(points,data,style)
ax.errorbar(points,data,yerr=errs,fmt=style,ecolor=clr)
fig.autofmt_xdate()
def import_csv(url,start,stop):
print(url)
response = urlopen(url)
reader = csv.reader(io.TextIOWrapper(response))
rows = [row for row in reader if \
inTimeRange(row[1],parse(start),parse(stop))]
print('extracted {} entries from data url'.format(len(rows)))
# remove meta data
return rows
def select_data(rows,start_day,stop_day):
tstart = parse(start_day)
tstop = parse(stop_day)
for row in rows:
if isinstance(row[1], str):
row[1] = parse(row[1])
rows = [row for row in rows if \
inTimeRange(row[1],tstart,tstop)]
times, spectra = get_spectra(rows,1,tstart,tstop)
return times,spectra
def main(times,spectra,nhours,stationID=0,wtimes=[],temps=[]):
#---------------------------------------------------------------------#
# Get fit results for ndays integrating over nhours for Potassium
#---------------------------------------------------------------------#
# single_peak_fit args: channel lims, expo offset, plot flag
#args = [210,310,100,False]
#args = [180,280,100,True]
args = [360,780,7.0,100,False,'K']
calibs,calib_err = get_calibrations(spectra, fitter.single_peak_fit,args)
E_spectra, bin_times = calibrate_spectra(spectra,calibs,times,nhours)
args = [180,390,7.0,100,False,'K']
K_peaks, K_sigmas, K_amps = get_peak_fits(E_spectra, \
fitter.single_peak_fit,args)
#-------------------------------------------------------------------------#
# Varify and break apart mean,sigma,amp values and uncertainties
#-------------------------------------------------------------------------#
K_ch, K_ch_errs = get_arrays(K_peaks)
K_sig = [i[0] for i in K_sigmas]
K_A = [i[0] for i in K_amps]
K_ch_ave, K_ch_var = get_stats(K_ch)
K_counts = fitter.get_peak_counts(K_ch,K_sig,K_A)
K_count = cut_outliers(K_counts)
K_mean, K_var = get_stats(np.asarray(K_counts))
for i in range(len(K_ch)):
if abs(K_ch[i]-K_ch_ave) > 3*K_ch_var:
print('Bad K-40 fit: peak channel = {}'.format(K_ch[i]))
#---------------------------------------------------------------------#
# Do the same for Bizmuth-214
#---------------------------------------------------------------------#
# double_peak_fit args: channel lims, gaus index, expo offset, plot flag
#args = [50,130,1,1,True]
if stationID==0:
args = [50,130,1,1,False,'Bi']
Bi_peaks,Bi_sigmas,Bi_amps = get_peak_fits(E_spectra, \
fitter.double_peak_fit,args)
if stationID==1:
args = [90,150,5.0,1,False,'Bi']
Bi_peaks,Bi_sigmas,Bi_amps = get_peak_fits(E_spectra, \
fitter.single_peak_fit,args)
Bi_ch, Bi_ch_errs = get_arrays(Bi_peaks)
Bi_sig = [i[0] for i in Bi_sigmas]
Bi_A = [i[0] for i in Bi_amps]
B_ch_ave,B_ch_var = get_stats(Bi_ch)
#-------------------------------------------------------------------------#
# Process channel data using fit results
#-------------------------------------------------------------------------#
Bi_counts = fitter.get_peak_counts(Bi_ch,Bi_sig,Bi_A)
Bi_counts = cut_outliers(Bi_counts)
Bi_mean, Bi_var = get_stats(np.asarray(Bi_counts))
print('K-40 <channel> = {} +/- {}'.format(K_ch_ave,K_ch_var))
print('K-40 <N> = {} +/- {}'.format(K_mean,K_var))
print('Bi-214 <channel> = {} +/- {}'.format(B_ch_ave,B_ch_var))
print('Bi-214 <N> = {} +/- {}'.format(Bi_mean,Bi_var))
#-------------------------------------------------------------------------#
# Process weather data
#-------------------------------------------------------------------------#
# LBL weather station
#location = 'KCABERKE89'
#location = 'KCABERKE86'
#wtimes,temps = get_weather_data(location,nhours,tstart,tstop)
times_both,counts,temps = merge_data(bin_times,Bi_counts,wtimes,temps)
#-------------------------------------------------------------------------#
# Plots of everything we are interested in!
#-------------------------------------------------------------------------#
make_plot(bin_times,K_counts,np.sqrt(K_counts), \
'Time','counts','K-40 counts vs Time','go','g')
fig_name = '/Users/alihanks/Google Drive/NQUAKE_analysis/D3S/K_counts_{}_5-8.png'.format(stationID)
plt.savefig(fig_name)
make_plot(times,calibs,calib_err, \
'Time','keV/channel','keV/channel vs Time','bo','b', \
2.4,2.6)
fig_name = '/Users/alihanks/Google Drive/NQUAKE_analysis/D3S/calibs_{}_5-8.png'.format(stationID)
plt.savefig(fig_name)
make_plot(bin_times,Bi_counts,np.sqrt(Bi_counts), \
'Time','counts','Bi-214 counts vs Time','go','g')
fig_name = '/Users/alihanks/Google Drive/NQUAKE_analysis/D3S/Bi_counts_{}_5-8.png'.format(stationID)
plt.savefig(fig_name)
#make_plot(Ktimes,K_ch,K_ch_errs, \
# 'Time','1460 center channel','1460 channel vs Time','ro','r')
#make_plot(times,Bi_ch,Bi_ch_errs, \
# 'Time','609 center channel','609 channel vs Time','ro','r', \
# B_ch_ave-10*B_ch_var,B_ch_ave+10*B_ch_var)
make_plot(temps,counts,np.sqrt(counts), \
'Temp (F)','Bi-214 counts','Bi-214 counts vs Temp (F)','ro','r')
fig_name = '/Users/alihanks/Google Drive/NQUAKE_analysis/D3S/Bi_counts_vs_T_{}_5-8.png'.format(stationID)
plt.savefig(fig_name)
plt.show()
if __name__ == '__main__':
url = 'https://radwatch.berkeley.edu/sites/default/files/dosenet/lbl_outside_d3s.csv'
#url = 'https://radwatch.berkeley.edu/sites/default/files/dosenet/etch_roof_d3s.csv'
start = '2017-6-6'
stop = '2017-5-31'
rows = import_csv(url,start,stop)
# number of days to look at and hours to integrate for each data point
nhours = 1
main(rows,nhours,start,stop)
|
Audrey graduated from Franklin University with a BS in Accounting and joined the Keelsra team in February 2015. She handles a mixture of client needs including payroll, grant management, and full outsourced accounting. Audrey enjoys being able to support our clients so they can focus more to provide the important services needed within the community. When not supporting her clients, Audrey cherishes all time spent with her husband and two children as well as travelling to visit relatives, camping, and football.
Copyright © Keelsra. All Rights Reserved. Built by Hatch.
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import fields, models
class StockPickingType(models.Model):
_inherit = 'stock.picking.type'
code = fields.Selection(selection_add=[('mrp_operation', 'Manufacturing Operation')])
count_mo_todo = fields.Integer(compute='_get_mo_count')
count_mo_waiting = fields.Integer(compute='_get_mo_count')
count_mo_late = fields.Integer(compute='_get_mo_count')
def _get_mo_count(self):
mrp_picking_types = self.filtered(lambda picking: picking.code == 'mrp_operation')
if not mrp_picking_types:
return
MrpProduction = self.env['mrp.production']
count_mo_waiting = MrpProduction.search_count([('availability', '=', 'waiting')])
count_mo_todo = MrpProduction.search_count([('state', 'in', ('confirmed', 'planned', 'progress'))])
count_mo_late = MrpProduction.search_count(['&', ('date_planned_start', '<', fields.Date.today()), ('state', '=', 'confirmed')])
for picking in mrp_picking_types:
picking.count_mo_waiting = count_mo_waiting
picking.count_mo_todo = count_mo_todo
picking.count_mo_late = count_mo_late
|
This collection of photographs documents the Presidency of Ronald E. Carrier, 4th President of James Madison University. The images display both his personal and professional life, especially between the years 1960-1985.
[Identification of Item], Ronald E. Carrier Photograph Collection, 1950-1990, P 0003, Special Collections, Carrier Library, James Madison University, Harrisonburg, Va.
Donated by the Office of the President of James Madison University in 2009.
Created and accrued during the presidency of Ronald E Carrier from 1971-1998.
Photographs were removed mechanically from their paper mounts.
Ronald E. Carrier served as the fourth President of James Madison University from 1971 until 1998. He came to Harrisonburg from Memphis State University, where he served as the Vice President for Academic Affairs. The Carrier family, which includes Ronald, his wife Edith, and their children Angela, Michael and Linda, resided in Hillcrest House on campus prior to the donation to JMU of “Oakview Mansion” by Mrs. Lois Poster, which became the President’s house.
Dr. Carrier brought many new ideas to Madison College. Under his tenure, Madison College grew from a prominently female institution holding 4,000 students to a co-ed university with 14,000 students. Dr. Carrier expanded the grounds of JMU more than 100 acres, and he is largely credited with the vision of East Campus, and the resulting expansion of the university to the eastern side of I-81. A total of 40 new buildings were also constructed at a cost of 210 million dollars.
The name of Madison College changed in 1977 to James Madison University. Changing the name of the college was controversial. However, the students, faculty, and alumni all voted for what name they preferred, and decided on the name James Madison University. In October, the Board of Visitors went to the Virginia General Assembly with the name change proposal. Dr. Carrier and Dr. Ray Sonner, then the Vice President of Public Affairs, lobbied for the name change and in October the bill passed through the Virginia House and Senate, and was signed by Governor Mills Godwin Jr. The name change was well received, and a new athletic facility was built, dubbed “Godwin Hall”.
This collection consists of photographs documenting the career and Presidency of Ronald E Carrier. It contains an array of both black and white and color photographs of varying sizes from snapshots to 8x10s. These photographs largely document Dr. Carrier’s presence on campus, from his inauguration to graduations to sporting events. The photographs also document other individuals that made influential contributions to the development of JMU such as Zane Showker and former Governor Mills E. Godwin.
The collection is arranged by topic, with attention to how the original order was kept. There is only one series, with fourteen subseries. The subseries are Portraits of Carrier, Portraits of Carrier Family and Friends, JMU Graduation, Name Change to James Madison University, Parties and Formal Events, Awards, Dr. Carrier with other officials, Board of Visitors, Dr. Carrier with Students and Alumni, Dr. Carrier at other Universities, Carriers’ Oakview Garden Show, Miscellaneous Black and White Photographs, and Miscellaneous JMU Photographs.
"Chancellor's Biography." James Madison University - Home. http://www.jmu.edu/chancellor/bio.htm (accessed March 3, 2011).
"JMU - JMU Presidents ." James Madison University - Home . http://www.jmu.edu/centennialcelebration/presidents.shtml (accessed March 3, 2011).
"JMU - Madison College to JMU ." James Madison University - Home . http://www.jmu.edu/centennialcelebration/Madison_JMU.shtml (accessed March 3, 2011).
"JMU Be the Change Profile: Ronald E. Carrier ." James Madison University - Home . http://www.jmu.edu/bethechange/people/carrier.shtml (accessed March 3, 2011).
Roberds, Richard M.. A History of the Founding of the College of Integrated Science and Technology: a Venture in Education Reform. Harrisonburg, Va.: James Madison University, 2005.
For textual information on the Presidency of Dr. Carrier, see PR 2000-0516B, which contains the papers of Dr. Carrier’s presidency.
Other material that is relevant to the development of James Madison University and this time period are the Roop Collection and the Emily Lee Collection.
These photographs were separated from PR 2000-0516B in the interest of preservation and organization.
-P0003.07.14 Carrier with Dean William Hanlon, Dr. Jennings, and D.P. Davis, Jr.
-P0003.07.16 Carrier with Francis and D. P. Davis Jr.
-P0003.07.22 Dr. William Hanlon, Dr. Carrier, Dr. Jennings, and D.P. Davis, Jr.
|
#!/usr/bin/python
# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: oneview_san_manager_facts
short_description: Retrieve facts about one or more of the OneView SAN Managers
description:
- Retrieve facts about one or more of the SAN Managers from OneView
version_added: "2.5"
requirements:
- hpOneView >= 2.0.1
author:
- Felipe Bulsoni (@fgbulsoni)
- Thiago Miotto (@tmiotto)
- Adriane Cardozo (@adriane-cardozo)
options:
provider_display_name:
description:
- Provider Display Name.
params:
description:
- List of params to delimit, filter and sort the list of resources.
- "params allowed:
- C(start): The first item to return, using 0-based indexing.
- C(count): The number of resources to return.
- C(query): A general query string to narrow the list of resources returned.
- C(sort): The sort order of the returned data set."
extends_documentation_fragment:
- oneview
'''
EXAMPLES = '''
- name: Gather facts about all SAN Managers
oneview_san_manager_facts:
config: /etc/oneview/oneview_config.json
delegate_to: localhost
- debug: var=san_managers
- name: Gather paginated, filtered and sorted facts about SAN Managers
oneview_san_manager_facts:
config: /etc/oneview/oneview_config.json
params:
start: 0
count: 3
sort: name:ascending
query: isInternal eq false
delegate_to: localhost
- debug: var=san_managers
- name: Gather facts about a SAN Manager by provider display name
oneview_san_manager_facts:
config: /etc/oneview/oneview_config.json
provider_display_name: Brocade Network Advisor
delegate_to: localhost
- debug: var=san_managers
'''
RETURN = '''
san_managers:
description: Has all the OneView facts about the SAN Managers.
returned: Always, but can be null.
type: dict
'''
from ansible.module_utils.oneview import OneViewModuleBase
class SanManagerFactsModule(OneViewModuleBase):
argument_spec = dict(
provider_display_name=dict(type='str'),
params=dict(type='dict')
)
def __init__(self):
super(SanManagerFactsModule, self).__init__(additional_arg_spec=self.argument_spec)
self.resource_client = self.oneview_client.san_managers
def execute_module(self):
if self.module.params.get('provider_display_name'):
provider_display_name = self.module.params['provider_display_name']
san_manager = self.oneview_client.san_managers.get_by_provider_display_name(provider_display_name)
if san_manager:
resources = [san_manager]
else:
resources = []
else:
resources = self.oneview_client.san_managers.get_all(**self.facts_params)
return dict(changed=False, ansible_facts=dict(san_managers=resources))
def main():
SanManagerFactsModule().run()
if __name__ == '__main__':
main()
|
What would it be like having your The Colony business rank high on the search engines? You can easily achieve this with the help of The Colony SEO service. All you need to do is call Webmaster Change and we make your dream a reality. We offer personalized SEO services to our clients in The Colony and help them in leveraging the power of the Internet to enhance their brand value and also add to their revenue.
Over the years we have regularly involved our clients in The Colony SEO service to let them have a hand-on-experience on the entire SEO process. Our core clientele comprises of small and medium businesses in The Colony. What has raised the bar is the SEO services with detailed analysis, need based planning and clinical execution. We just do not make tall promises but deliver results time and again.
At Webmaster Change we use white hat SEO techniques to rank your site high for targeted keywords. We have a team of experienced optimizers who have worked on all genres of websites including high end e-commerce solutions. We have always believed in building long term relationship with our clients and see our growth in the growth of our clients.
Comprehensive Analysis of the website which will include analysis of website structure, competitor analysis, audience and keyword analysis.
Lead capture page/landing page optimization to attract visitors to the exact pages that you want which reduces the bounce rates.
Actionable suggestions offered with The Colony SEO service. Webmasters can easily implement these into their websites for better optimization.
Manage PPC (pay per click) campaigns for the website on highly targeted keywords. All AdWords accounts managed by Google certified managers.
Social media marketing for the website on platforms such as Twitter and Facebook and leverage their popularity.
We strive to achieve excellence in our daily work with our The Colony SEO Service. If you have any doubts or queries regarding our website please feel free to call us at (214) 799-1242. We will happily resolve your query and offer you a customized solution for your website.
|
# This program converts OpenFOAM raw data for the velocity field to a text file with
# both position and velocity vector
#
# Output format :
# position (x y z) and velocity vector
# THIS PROGRAM REQUIRES A DIRECTORY U in the main folder
#
#
# Author : Bruno Blais
#Python imports
#----------------
import os
import sys
import numpy
#----------------
#********************************
# OPTIONS AND USER PARAMETERS
#********************************
#
readZ=False
readShear=True
readPseudo=True
#Initial time of simulation, final time and time increment must be specified by user
t0=0.4
tf=0.4
dT=0.4
#====================
# READERS
#====================
#This function reads an OpenFOAM raw for a scalar and extract a table of the data
def readfScalar(fname):
infile = open(fname,'r')
if (infile!=0):
#Clear garbage lines
for i in range(0,20,1):
infile.readline()
#Read number of cell centers
n=int(infile.readline())
#Pre-allocate memory
xu=numpy.zeros([n])
#Clear garbage line "("
infile.readline()
#read current property "xu"
for i in range(0,n,1):
number_str=infile.readline()
xu[i]=float(number_str)
else:
print "File %s could not be opened" %fname
infile.close();
return n,xu
#This function reads an OpenFOAM raw file for a vector and extracts a table of the data
def readfVector(fname):
infile = open(fname,'r')
if (infile!=0):
#Clear garbage lines
for i in range(0,20):
infile.readline()
#Read number of cell centers
n=int(infile.readline())
#Pre-allocate memory
x=numpy.zeros([n])
y=numpy.zeros([n])
z=numpy.zeros([n])
#Clear garbage line "("
infile.readline()
#read current property "xu"
for i in range(0,n,1):
number_str=infile.readline()
number2_str=number_str.split("(")
number3_str=number2_str[1].split(")")
number4_str=number3_str[0].split()
x[i]=float(number4_str[0])
y[i]=float(number4_str[1])
z[i]=float(number4_str[2])
else:
print "File %s could not be opened" %fname
infile.close();
return n,x,y,z
#======================
# MAIN
#======================
# Check if the destination folder exists
if not os.path.isdir("./U"):
print "********** Abort **********"
print "The folder particlesInfo does not exist, you must create it manually in the working folder"
#Name of the files to be considered
inname= ['ccx', 'ccy','ccz','p','U','cellVolumes']
if readPseudo:
inname.append('pseudoEq')
elif readShear:
inname.append('shearRate')
os.chdir(sys.argv[1]) # go to directory
nt=int((tf-t0)/dT)+1
t=t0
for i in range(0,nt):
#Current case
print "Post-processing time ", t
#Go to the directory corresponding to the timestep
if (t==0) : os.chdir("0")
elif ((numpy.abs(numpy.mod(t+0.00001,1)))<0.01): os.chdir(str(int(t)))
else :os.chdir(str(t))
[n,x] = readfScalar(inname[0])
[n,y] = readfScalar(inname[1])
if readZ :[n,z] = readfScalar(inname[2])
else : z=numpy.zeros([numpy.size(x)])
[n, p] = readfScalar(inname[3])
[n,u,v, w] = readfVector(inname[4])
[n, V] = readfScalar(inname[5])
if (readShear):
[n, shear] = readfScalar(inname[6])
#Create output file back in main folder
outname="../../U/U_%s" %str(i)
outfile=open(outname,'w')
for j in range(0,n):
if readShear:
outfile.write("%5.5e %5.5e %5.5e %5.5e %5.5e %5.5e %5.5e %5.5e %5.5e \n" %(x[j],y[j],z[j],u[j],v[j],w[j],p[j],V[j],shear[j]))
else:
outfile.write("%5.5e %5.5e %5.5e %5.5e %5.5e %5.5e %5.5e %5.5e\n" %(x[j],y[j],z[j],u[j],v[j],w[j],p[j],V[j]))
outfile.close()
t += dT
#Go back to CFD directory
os.chdir("..") #
print "Post-processing over"
|
With over 25 years of outstanding Leisure and Sport facilities in Essex, Stock Brook Country Club has long been established as the premiere Country Club venue in Essex and is proud to offer its members exclusive use of our out stand Swimming Pool and Spa facilities in Essex.
Take a plunge in one of the finest stand-alone swimming pools in the area. Stock Brook’s stunning 23 metre pool offers swimmers the option of a rigorous workout or a leisurely swim, whilst our retractable roof guarantees year-round swimming in ideal conditions; open-air in the summer, indoor during the winter.
But swimming is not just for children.
We offer extensive swim lessons for adults and hold regular swim club nights for all standards of swimming including starter lessons, (please go to our swim timetable for more details)..
Stock Brook has extensive Spa bath and Wet Side facilities which include a eight persons Spa Bath, a six persons Sauna and six persons Steam Room available.
|
import math
import time
import cairo
from gi.repository import Gtk, GLib
# studio clock that displays a clock like mentioned in:
# https://masterbase.at/studioclock/#C3CD2D
class StudioClock(Gtk.ToolItem):
__gtype_name__ = 'StudioClock'
# set resolution of the update timer in seconds
timer_resolution = 0.1
# init widget
def __init__(self):
super().__init__()
# suggest size of widget
self.set_size_request(130, 50)
# remember last drwn time
self.time = time.localtime(0)
# set up timeout for periodic redraw
GLib.timeout_add_seconds(self.timer_resolution, self.do_timeout)
def do_timeout(self):
# get current time
t = time.localtime(time.time())
# if time did not change since last redraw
if self.time != t:
self.time = t
self.queue_draw()
# just come back
GLib.timeout_add_seconds(self.timer_resolution, self.do_timeout)
# override drawing of the widget
def do_draw(self, cr):
# get actual widget size
width = self.get_allocated_width()
height = self.get_allocated_height()
# calculate center and radius of the clock
center = (width / 2, height / 2)
radius = min(center)
# setup gradients for clock background to get a smooth border
bg_lg = cairo.RadialGradient(
center[0], center[1], 0, center[0], center[1], radius)
bg_lg.add_color_stop_rgba(0.0, 0, 0, 0, 1.0)
bg_lg.add_color_stop_rgba(0.9, 0, 0, 0, 1.0)
bg_lg.add_color_stop_rgba(1.0, 0, 0, 0, 0.0)
# paint background
cr.set_source(bg_lg)
cr.arc(center[0], center[1], radius, 0, 2 * math.pi)
cr.fill()
# draw ticks for every second
for tick in range(0, 60):
# fade out seconds in future and highlight past seconds
if tick > self.time.tm_sec:
cr.set_source_rgb(0.2, 0.3, 0.01)
else:
cr.set_source_rgb(0.764, 0.804, 0.176)
# calculate tick position
angle = tick * math.pi / 30
pos = (center[0] + math.sin(angle) * radius * 0.8,
center[1] - math.cos(angle) * radius * 0.8)
# draw tick
cr.arc(pos[0], pos[1], radius / 40, 0, 2 * math.pi)
cr.fill()
# draw persistant ticks every five seconds
cr.set_source_rgb(0.764, 0.804, 0.176)
for tick in range(0, 12):
# calculate tick position
angle = tick * math.pi / 6
pos = (center[0] + math.sin(angle) * radius * 0.9,
center[1] - math.cos(angle) * radius * 0.9)
# draw tick
cr.arc(pos[0], pos[1], radius / 40, 0, 2 * math.pi)
cr.fill()
# set a reasonable font size
cr.set_font_size(cr.user_to_device_distance(0, height / 5)[1])
# format time into a string
text = time.strftime("%H:%M")
# get text drawing extents
(xbearing, ybearing,
textwidth, textheight,
xadvance, yadvance) = cr.text_extents(text)
# draw time
cr.move_to(center[0] - textwidth / 2, center[1] + textheight / 2)
cr.show_text(text)
|
For over 150 years we have been baking biscuits on the Maltese Islands using recipes passed down from one generation to another. Throughout Malta and Gozo, our brand can be seen peaking from supermarket shelves.
Brinc’s Biscuits is a family run business, manufacturing Maltese Traditional Biscuits under the brand names Brinc’s and Brincsons.
Our products are consumed, at all times of the day, dipped in a hot beverage or spread with jam in the morning, a sweet treat during the day, or served alone or with a variety of platters in the evening. As tastes and trends evolve, our products still find their way into many homes, enjoyed by family and friends. Our products are more than just biscuits, they are our Maltese Traditional Biscuits and a part of our way of life.
|
from django.urls import reverse
from reversion import revisions as reversion
from reversion.models import Version
from reversion.revisions import create_revision
from workshops.models import Event, Person, Tag
from workshops.tests.base import TestBase
class TestRevisions(TestBase):
def setUp(self):
self._setUpUsersAndLogin()
self._setUpOrganizations()
self.tag1, _ = Tag.objects.get_or_create(pk=1)
self.tag2, _ = Tag.objects.get_or_create(pk=2)
with create_revision():
self.event = Event.objects.create(host=self.org_alpha, slug="event")
self.event.tags.add(self.tag1)
self.event.save()
with create_revision():
self.event.slug = "better-event"
self.event.host = self.org_beta
self.event.tags.add(self.tag2)
self.event.save()
# load versions
versions = Version.objects.get_for_object(self.event)
assert len(versions) == 2
self.newer, self.older = versions
def test_showing_diff_event(self):
# get newer revision page
rv = self.client.get(reverse("object_changes", args=[self.newer.pk]))
self.assertEqual(rv.status_code, 200)
assert rv.context["version1"] == self.older
assert rv.context["version2"] == self.newer
assert rv.context["revision"] == self.newer.revision
assert rv.context["object"] == self.event
def test_diff_shows_coloured_labels(self):
# get newer revision page
rv = self.client.get(reverse("object_changes", args=[self.newer.pk]))
# Red label for removed host
self.assertContains(
rv,
'<a class="label label-danger" href="{}">-{}</a>'.format(
self.org_alpha.get_absolute_url(), self.org_alpha
),
html=True,
)
# Green label for assigned host
self.assertContains(
rv,
'<a class="label label-success" href="{}">+{}</a>'.format(
self.org_beta.get_absolute_url(), self.org_beta
),
html=True,
)
# Grey label for pre-assigned tag
self.assertContains(
rv,
'<a class="label label-default" href="#">{}</a>'.format(self.tag1),
html=True,
)
# Green label for additionally assigned tag
self.assertContains(
rv,
'<a class="label label-success" href="#">+{}</a>'.format(self.tag2),
html=True,
)
def test_diff_shows_PK_for_deleted_relationships(self):
# Delete the tag
self.tag1.delete()
self.tag2.delete()
# get newer revision page
rv = self.client.get(reverse("object_changes", args=[self.newer.pk]))
self.assertContains(
rv, '<a class="label label-default" href="#">1</a>', html=True
)
self.assertContains(
rv, '<a class="label label-success" href="#">+2</a>', html=True
)
class TestRegression1083(TestBase):
def setUp(self):
self._setUpUsersAndLogin()
def test_regression_1083(self):
with reversion.create_revision():
alice = Person.objects.create_user(
username="alice",
personal="Alice",
family="Jones",
email="[email protected]",
)
with reversion.create_revision():
bob = Person.objects.create_user(
username="bob", personal="Bob", family="Smith", email="[email protected]"
)
with reversion.create_revision():
alice.family = "Williams"
alice.save()
bob.family = "Brown"
bob.save()
res = self.app.get(reverse("person_details", args=[bob.pk]), user="admin")
revision = res.click("Last modified on")
self.assertIn("Smith", revision)
self.assertIn("Brown", revision)
back_to_person_view = revision.click("View newest")
self.assertIn("Brown", back_to_person_view)
|
James Clerk Maxwell was a Scottish mathematician and physicist. He is perhaps most famous for his extension and refinement of Faraday's equations describing magnetic and electric fields. He reduced the necessary set of equations to four simple partial differential equations, the eponymous Maxwell equations, publishing the work in 1873.
He also worked in thermodynamics, lending his name to another set of four key differential relationships (the Maxwell relations). He also independently derived the Boltzmann distribution of the kinetic energies of gas molecules. Maxwell's demon, a "finite being" who opened a door between to a box of molecules, letting them in and out depending on the amount of energy they had was a rhetorical device Maxwell used to show that entropy and heat flow were, at their core, statistical phenomena.
Lord Kelvin (aka William Thomson) applied the demon tag, Maxwell used the term "finite being"!
It should be mentioned that Maxwell originally had 20 equations, which were later condensed to 4.
I originally came across this blog when your posts on chirality were recent. You have a follower!
Ah...I didn't know that there were originally 20! It's nice to know there is someone out there reading, too!
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import os
hostname = os.uname()[1]
if hostname == "server1":
prepath = "/opt"
elif hostname == "octopussy":
prepath = "/home/xir/dev"
import sys, urllib, os,cStringIO
try:
import pycurl
except:
print "Intentando instalar pycurl"
try:
os.system('sudo apt-get install -y python-pycurl')
import pycurl
except:
print "No ha sido posible instalar la libreria necesaria *pycurl*"
print "Intentalo a mano"
sys.exit(254)
try:
from BeautifulSoup import BeautifulSoup
except:
print "Intentando instalar BeautifulSoap"
try:
os.system('sudo apt-get install -y python-beautifulsoup')
from BeautifulSoup import BeautifulSoup
except:
print "No ha sido posible instalar la libreria necesaria *BeautifulSoap*"
print "Intentalo a mano"
sys.exit(254)
sys.path.append(os.path.abspath(prepath + "/menu/conf/"))
from menuconf import *
sys.path.append(os.path.abspath(prepath + "/menu/lib/"))
from images import *
mes = [ 'zero','enero', 'febrero', 'marzo', 'abril', 'mayo', 'junio', 'julio', 'agosto', 'septiembre', 'octubre', 'noviembre','diciembre' ]
if int(today) > 27:
mesADescargar = mes[int(month) + 1]
else:
mesADescargar = mes[int(month)]
def get_image_url( url ):
buf = cStringIO.StringIO()
d = pycurl.Curl()
d.setopt(d.URL, url)
d.setopt(d.WRITEFUNCTION, buf.write)
d.perform()
menu = False
encontrado = False
for p in buf.getvalue().split('>'):
if "Men" in p:
if mesADescargar in p.lower():
menu = True
if menu and not encontrado:
if "imageanchor" in p:
encontrado = True
img = p.split(' ')[1][6:-1]
buf.close()
try:
return img
except:
return ""
# if dir exists, don't download again
if os.path.isfile(datapath + str(descargado)):
sys.exit()
else:
url = get_image_url(rss)
if url != "":
urllib.urlretrieve(url, tpath + filename)
create_images()
f = open (datapath + str(descargado), 'w')
f.close()
|
Originally hailing from the mid-16th century, the morion was a helmet that became fairly common, thanks to its efficient and effective protection. This Comb Morion recreates the form of this ages-old helm, as well as its protective strength. It should come as no surprise that the morion is a rather famous helmet design, as it has been widely depicted by film and fiction, and is often associated with Spanish explorers. It was also a popular helm of troops all throughout Europe, due in part to its ease of production, alongside its highly protective nature.
This combed morion helmet is made entirely from quality 18 gauge steel with a polished finish. The rounded top and brim serve to help keep the wearer's head protected and their face shielded from downward blows. The interior of the helmet features a leather liner, while a pair of cheek protectors hangs from the sides, providing extra facial coverage, as well as a place for the leather chin straps to connect. It is a full-sized replica helmet that is fully wearable, and when worn, it stands up rather well to the rigors and impacts experienced during the course of LARP combat and light reenactment.
It is available in two sizes, either medium or large. If you want to emulate the look of a conquistador, a comb morion helmet is definitely the way to go. More than that, though, this Comb Morion - Steel helmet is a solid choice if you want great protection, without having to restrict your vision or your face behind layers of steel.
|
# Copyright 2009 - 2011 Machinalis: http://www.machinalis.com/
#
# This file is part of Eff.
#
# Eff is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Eff is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Eff. If not, see <http://www.gnu.org/licenses/>.
from django.conf.urls.defaults import *
from django.contrib.auth.views import login, logout
from django.views.generic.simple import redirect_to
from django.conf import settings
from django.contrib import admin
admin.autodiscover()
from eff_site.settings import CURRENT_ABS_DIR
from eff_site.eff.views import (update_hours, eff, eff_check_perms,
eff_previous_week, eff_current_week, eff_current_month, eff_horas_extras,
eff_chart, eff_next, eff_prev, eff_charts, eff_report, eff_update_db,
eff_administration, eff_client_report, eff_client_reports_admin,
UserProfileForm, eff_last_month, eff_admin_add_user,
eff_admin_change_profile, profile_detail, eff_dump_csv_upload,
eff_fixed_price_client_reports, eff_admin_users_association, eff_home,
eff_client_home, index, eff_client_projects, eff_client_summary,
eff_client_summary_period, add_attachment_custom, delete_attachment_custom)
from os.path import join
jscalendar_dir = join(CURRENT_ABS_DIR, 'addons/jscalendar-1.0/')
js_dir = join(CURRENT_ABS_DIR, 'addons/js/')
jscalendar_lang_dir = join(CURRENT_ABS_DIR, 'addons/jscalendar-1.0/lang/')
calendar_dir = join(CURRENT_ABS_DIR, 'addons/simple-calendar/')
sortable_dir = join(CURRENT_ABS_DIR, 'addons/sortable-table/')
templates_dir = join(CURRENT_ABS_DIR, 'templates/')
images_dir = join(CURRENT_ABS_DIR, 'templates/images/')
urlpatterns = patterns('',
url(r'^$', index, name='root'),
url(r'^clients/home/$', eff_client_home, name='client_home'),
url(r'^clients/projects/$', eff_client_projects, name='client_projects'),
url(r'^clients/summary/period/$', eff_client_summary_period,
name='client_summary_period'),
url(r'^clients/summary/$', eff_client_summary,
name='client_summary'),
# django-profiles
url(r'^accounts/login/$', login, {'template_name': 'login.html'},
name='login'),
url(r'^accounts/logout/$', logout, {'template_name': 'logout.html'},
name='logout'),
url(r'^accounts/profile/$', eff_home, name='eff_home'),
url(r'^login/$', redirect_to, {'url': '/accounts/login/'},
name='redir_login'),
url(r'^logout/$', redirect_to, {'url': '/accounts/logout/'},
name='redir_logout'),
url(r'^checkperms/([A-Za-z_0-9]*)/$', eff_check_perms, name='checkperms'),
url(r'^profiles/edit', 'eff.views.edit_profile',
{'form_class': UserProfileForm, }, name='profiles_edit'),
url(r'^profiles/(?P<username>[\w\._-]+)/$', profile_detail,
name='profiles_detail'),
url(r'^profiles/', include('profiles.urls'), name='profiles'),
# password reset
url(r'^accounts/password_reset/$',
'django.contrib.auth.views.password_reset',
{'template_name': 'password_reset.html',
'email_template_name': 'password_reset_email.html'},
name='password_reset'),
url(r'^password_reset/$', redirect_to,
{'url': '/accounts/password_reset/'}, name='redir_password_reset'),
url(r'^accounts/password_reset/done/$',
'django.contrib.auth.views.password_reset_done',
{'template_name': 'password_reset_done.html'},
name='password_reset_done'),
url(r'^accounts/reset/(?P<uidb36>[0-9A-Za-z]+)-(?P<token>.+)/$',
'django.contrib.auth.views.password_reset_confirm',
{'template_name': 'password_reset_confirm.html'},
name='password_reset_confirm'),
url(r'^reset/(?P<uidb36>[0-9A-Za-z]+)-(?P<token>.+)/$',
redirect_to,
{'url': '/accounts/reset/(?P<uidb36>[0-9A-Za-z]+)-(?P<token>.+)/'},
name='redir_password_reset_confirm'),
url(r'^accounts/reset/done/$',
'django.contrib.auth.views.password_reset_complete',
{'template_name': 'password_reset_complete.html'},
name='password_reset_complete'),
# password change
url(r'^accounts/change_password/$',
'django.contrib.auth.views.password_change',
{'template_name': 'password_change.html',
'post_change_redirect': '/accounts/change_password/done/'},
name='password_change'),
url(r'^accounts/change_password/done/$',
'django.contrib.auth.views.password_change_done',
{'template_name': 'password_change_done.html'},
name='password_change_done'),
url(r'^password_change/$', redirect_to,
{'url': '/accounts/password_change/'},
name='redir_password_change'),
url(r'^updatehours/([A-Za-z_0-9]*)/$', update_hours, name='update_hours'),
url(r'^efi/$', eff, name='eff'),
url(r'^efi/semanaanterior/$', eff_previous_week, name='eff_previous_week'),
url(r'^efi/semanaactual/$', eff_current_week, name='eff_current_week'),
url(r'^efi/mesactual/$', eff_current_month, name='eff_current_month'),
url(r'^efi/mespasado/$', eff_last_month, name='eff_last_month'),
url(r'^efi/horasextras/$', eff_horas_extras, name='eff_extra_hours'),
url(r'^efi/next/$', eff_next, name='eff_next'),
url(r'^efi/prev/$', eff_prev, name='eff_prev'),
url(r'^efi/chart/([A-Za-z_0-9]*)/$', eff_chart, name='eff_chart'),
url(r'^efi/charts/$', eff_charts, name='eff_charts'),
url(r'^efi/reporte/([A-Za-z_0-9]*)/$', eff_report, name='eff_report'),
url(r'^efi/update-db/$', eff_update_db, name='eff_update_db'),
url(r'^efi/administration/users_password/$', eff_administration,
name='eff_administration'),
url(r'^efi/administration/users_profile/$', eff_admin_change_profile,
name='eff_admin_change_profile'),
url(r'^efi/administration/add_user/$', eff_admin_add_user,
name='eff_admin_add_user'),
url(r'^efi/administration/client_reports/$', eff_client_reports_admin,
name='eff_client_reports_admin'),
url(r'^efi/administration/fixed_price_client_reports/$',
eff_fixed_price_client_reports, name='eff_fixed_price_client_reports'),
url(r'^efi/administration/dump-csv-upload/$', eff_dump_csv_upload,
name='eff_dump_csv_upload'),
url(r'^efi/reporte_cliente/([-\w]+)/$', eff_client_report,
name='eff_client_report'),
url(r'^efi/administration/users_association/$',
eff_admin_users_association, name='eff_admin_users_association'),
url(r'^efi/administration/client_summary/$',
eff_client_summary_period,
name='eff_client_summary_period'),
url(r'^efi/administration/client_summary/([-\w]+)/$',
eff_client_summary,
name='eff_client_summary'),
url(r'^admin/', include(admin.site.urls)),
url(r'^comments/', include('django.contrib.comments.urls')),
url(r'^attachments/add-for/(?P<app_label>[\w\-]+)/(?P<module_name>[\w\-]+)/(?P<pk>\d+)/$',
add_attachment_custom, name="add_attachment_custom"),
url(r'^attachments/delete/(?P<attachment_pk>\d+)/$',
delete_attachment_custom, name="delete_attachment_custom"),
url(r'^attachments/', include('attachments.urls')),
)
if settings.DEBUG:
urlpatterns += patterns('',
url(r'^media/(?P<path>.*)$', 'django.views.static.serve', {
'document_root': settings.MEDIA_ROOT}),
)
|
If you own ‘Advent’ printers or copiers and are looking for optimum ink and toner cartridges for your equipment, you’re at the right place. Firstinks includes ink and toner cartridges for all your ‘Advent’ printer models and ensures rich quality printing experience along with long-lasting cartridge life. We have the most compatible products with huge savings on them and same-day shipping provisions.
|
### Slave API Views ###
from django.db.models import F, Count
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework import generics
from rest_framework import permissions
from rest_framework import pagination
from slave.models import Slave
from slave.serializers import SlaveSerializer, SlaveDetailSerializer
from slave.helpers import filter_by_attribute, filter_by_location_region
class API_SlaveList(generics.ListAPIView):
""" List Slaves. """
permission_classes = (permissions.IsAuthenticated,)
serializer_class = SlaveSerializer
def get_queryset(self):
""" Return Slaves of the current user. """
# Authorization check.
# We assume later that slave_list is already
# filtered with authorized slaves only so we may
# simply add some more filters.
slave_list = Slave.objects.filter(owner=self.request.user)
# Default filter alive slave only.
# Reversed order because alive are much more frequent requests.
if not 'dead' in self.request.query_params:
slave_list = slave_list.filter(date_death__isnull=True)
else:
slave_list = slave_list.filter(date_death__isnull=False)
# Filter by valid attributes
valid_params = ['location', 'sex']
for attr in valid_params:
if attr in self.request.query_params:
slave_list = filter_by_attribute(slave_list,\
attribute_name=attr,\
attribute=self.request.query_params.get(attr))
# Filter by Region
if 'region' in self.request.query_params:
slave_list = filter_by_location_region(slave_list, self.request.query_params.get('region'))
# Filter free Slaves
if 'free' in self.request.query_params:
# FIXME! This looks quite shitty.
# We compare the number of assignments to number of released ones.
# If the numbers are equal - then nothing is currently running.
# Unfortunately I couldn't yet filter by annotation of NON-released ones.
slave_list = slave_list.annotate(assgns=Count('assignments')).\
annotate(rel_assgns=Count('assignments__date_released')).\
filter(assgns=F('rel_assgns'))
# Order By
# Should one day get the ordering from request.
slave_list = slave_list.order_by('location__region', 'date_birth')
# Paginate
# FIXME The build in "LimitOffsetPagination" didn't work
# Had to write directly in the view.
if any(q for q in self.request.query_params if q in ['limit', 'offset']):
if 'limit' in self.request.query_params:
limit = int(self.request.query_params.get('limit'))
offset = int(self.request.query_params.get('offset'))\
if 'offset' in self.request.query_params else 0
if 'limit' in locals():
slave_list = slave_list[offset:limit+offset]
else:
slave_list = slave_list[offset:]
return slave_list
class API_SlaveDetail(APIView):
""" Slave Details. """
permission_classes = (permissions.IsAuthenticated,)
serializer_class = SlaveDetailSerializer
def get_object(self, pk):
""" Get already authorized Item."""
s = Slave.objects.get(pk=pk, owner=self.request.user)
# This updates available skills for the next time
s.get_available_skills()
return s
def get(self, request, pk, format=None):
# Get authorized Slave
try:
slave = self.get_object(pk)
except Slave.DoesNotExist:
return Response("Authorization error or wrong Slave id.",
status=status.HTTP_404_NOT_FOUND)
print(slave);
return Response(self.serializer_class(slave).data)
|
The Holiday Open House is your downtown local restaurants', retail stores' and businesses' way of saying thank you to our customers and community for their support and patronage throughout the year. Come celebrate our annual Holiday Open House together with the City of Manhattan Beach Pier Lighting Ceremony. Downtown merchants will be open until 9:00 PM and are sure to offer some distinctive gifts and intriguing offers this holiday season. Our restaurants will be serving their original menus and some sampling is sure to get your palette started for an evening of wonderful food. Be sure to check out the solar powered Festival of Lights and the 12 foot living tree in the Metlox Plaza. Please note: The Living Trees on the pier will be up for one week from November 14th to November 20th. Special thanks to Brett Zebrowski, owner of Palm Realty for sponsoring the trees.
Download a copy of the schedule (PDF).
Please be note the street closures that will occur the day of the event or download the map (PDF).
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
import json
import dateutil.parser
import mock
from django.test import TestCase
from paying_for_college.models import School, Contact, Program, Alias, Nickname
from paying_for_college.models import ConstantCap, ConstantRate, Disclosure
from paying_for_college.models import Notification, print_vals
from paying_for_college.models import get_region
class SchoolRegionTest(TestCase):
def test_get_region(self):
school = School(school_id='123456', state='NE')
self.assertTrue(get_region(school) == 'MW')
def test_get_region_failure(self):
school = School(school_id='123456', state='')
self.assertTrue(get_region(school) == '')
class SchoolModelsTest(TestCase):
def create_school(self, ID=999999,
data_json='',
accreditor="Almighty Wizard",
city="Emerald City",
degrees_highest="3",
state="OZ",
ope6=5555,
ope8=555500):
return School.objects.create(school_id=ID,
data_json=data_json,
accreditor=accreditor,
degrees_highest=degrees_highest,
degrees_predominant=degrees_highest,
city=city,
state=state,
ope6_id=ope6,
ope8_id=ope8)
def create_alias(self, alias, school):
return Alias.objects.create(alias=alias,
is_primary=True,
institution=school)
def create_contact(self):
return Contact.objects.create(contact='[email protected]',
name='Hackey Sack')
def create_nickname(self, school):
return Nickname.objects.create(institution=school,
nickname='Hackers')
def create_program(self, school):
return Program.objects.create(institution=school,
program_name='Hacking',
level='3')
def create_disclosure(self, school):
return Disclosure.objects.create(institution=school,
name='Regional transferability',
text="Your credits won't transfer")
def create_notification(self,
school,
oid='f38283b5b7c939a058889f997949efa566c616c5',
time='2016-01-13T20:06:18.913112+00:00'):
return Notification.objects.create(institution=school,
oid=oid,
timestamp=dateutil.parser.parse(time),
errors='none')
def test_school_related_models(self):
s = self.create_school()
self.assertTrue(isinstance(s, School))
self.assertEqual(s.primary_alias, "Not Available")
d = self.create_disclosure(s)
self.assertTrue(isinstance(d, Disclosure))
self.assertTrue(d.name in d.__unicode__())
a = self.create_alias('Wizard U', s)
self.assertTrue(isinstance(a, Alias))
self.assertTrue(a.alias in a.__unicode__())
self.assertEqual(s.primary_alias, a.alias)
self.assertEqual(s.__unicode__(), a.alias + u" (%s)" % s.school_id)
c = self.create_contact()
self.assertTrue(isinstance(c, Contact))
self.assertTrue(c.contact in c.__unicode__())
n = self.create_nickname(s)
self.assertTrue(isinstance(n, Nickname))
self.assertTrue(n.nickname in n.__unicode__())
p = self.create_program(s)
self.assertTrue(isinstance(p, Program))
self.assertTrue(p.program_name in p.__unicode__())
self.assertTrue(p.program_name in p.as_json())
self.assertTrue('Bachelor' in p.get_level())
noti = self.create_notification(s)
self.assertTrue(isinstance(noti, Notification))
self.assertTrue(noti.oid in noti.__unicode__())
self.assertTrue(print_vals(s) is None)
self.assertTrue("Emerald City" in print_vals(s, val_list=True))
self.assertTrue("Emerald City" in print_vals(s, val_dict=True)['city'])
self.assertTrue("Emerald City" in print_vals(s, noprint=True))
self.assertTrue(s.convert_ope6() == '005555')
self.assertTrue(s.convert_ope8() == '00555500')
self.assertTrue('Bachelor' in s.get_highest_degree())
s.ope6_id = 555555
s.ope8_id = 55555500
self.assertTrue(s.convert_ope6() == '555555')
self.assertTrue(s.convert_ope8() == '55555500')
s.ope6_id = None
s.ope8_id = None
self.assertTrue(s.convert_ope6() == '')
self.assertTrue(s.convert_ope8() == '')
def test_constant_models(self):
cr = ConstantRate(name='cr test', slug='crTest', value='0.1')
self.assertTrue(cr.__unicode__() == u'cr test (crTest), updated None')
cc = ConstantCap(name='cc test', slug='ccTest', value='0')
self.assertTrue(cc.__unicode__() == u'cc test (ccTest), updated None')
@mock.patch('paying_for_college.models.send_mail')
def test_email_notification(self, mock_mail):
skul = self.create_school()
noti = self.create_notification(skul)
msg = noti.notify_school()
self.assertTrue('failed' in msg)
contact = self.create_contact()
skul.contact = contact
skul.save()
noti2 = self.create_notification(skul)
msg1 = noti2.notify_school()
self.assertTrue(mock_mail.call_count == 1)
self.assertTrue('email' in msg1)
@mock.patch('paying_for_college.models.requests.post')
def test_endpoint_notification(self, mock_post):
skul = self.create_school()
contact = self.create_contact()
contact.endpoint = 'fake-api.fakeschool.edu'
contact.save()
skul.contact = contact
skul.save()
noti = self.create_notification(skul)
msg = noti.notify_school()
# print("notification mock_post.call_count is {0}".format(mock_post.call_count))
# print("endpoint notification msg is {0}".format(msg))
self.assertTrue(mock_post.call_count == 1)
self.assertTrue('endpoint' in msg)
def test_endpoint_notification_blank_contact(self):
skul = self.create_school()
contact = self.create_contact()
contact.contact = ''
contact.endpoint = ''
contact.save()
skul.contact = contact
skul.save()
noti = self.create_notification(skul)
msg = noti.notify_school()
self.assertTrue('failed' in msg)
|
BIJU PATNAIK UNIVERSITY OF TECHNOLOGY, ODISHA Lecture Notes On DBMS Prepared by, Dr. Subhendu Kumar Rath, BPUT, Odisha.
Prepared by: Dr. Subhendu Kumar Rath DBMS: Basic Concepts 1. Introduction 2. Disadvantages of file oriented approach 3. Database 4. Why Database 5. Database Management System(DBMS) 6. Function of DBMS 7. Advantages of DBMS and disadvantage of DBMS 8. Database Basics 9. Three level architecture of DBMS 10. Database users 11. Database language 12. Database structure Introduction: In computerized information system data is the basic resource of the organization. So, proper organization and management for data is required fro organization to run smoothly. Database management system deals the knowledge of how data stored and managed on a computerized information system. In any organization, it requires accurate and reliable data for better decision making, ensuring privacy of data and controlling data efficiently. The examples include deposit and/or withdrawal from a bank,hotel,airline or railway reservation, purchase items from supermarkets in all cases, a database is accessed. What is data: Data is the known facts or figures that have implicit meaning. It can also be defined as it is the representation of facts ,concepts or instruction in a formal manner, which is suitable for understanding and processing. Data can be represented in alphabets(A-Z, a-z),in digits(0-9) and using special characters(+,-.#,$, etc) e.g: 25, “ajit” etc. Information: Information is the processed data on which decisions and actions are based. Information can be defined as the organized and classified data to provide meaningful values. Eg: “The age of Ravi is 25” File: File is a collection of related data stored in secondary memory.
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Jörg Thalheim (Mic92)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import division
from libqtile.widget import base
def get_meminfo():
val = {}
with open('/proc/meminfo') as file:
for line in file:
key, tail = line.split(':')
uv = tail.split()
val[key] = int(uv[0]) // 1000
val['MemUsed'] = val['MemTotal'] - val['MemFree']
return val
class Memory(base.InLoopPollText):
"""Displays memory usage"""
orientations = base.ORIENTATION_HORIZONTAL
defaults = [
("fmt", "{MemUsed}M/{MemTotal}M", "see /proc/meminfo for field names")
]
def __init__(self, **config):
super(Memory, self).__init__(**config)
self.add_defaults(Memory.defaults)
def poll(self):
return self.fmt.format(**get_meminfo())
|
Macroecology is concerned with the statistical distributions of variables among large numbers of comparable ecological "particles." Usually these particles are either many individual organisms within species populations, or many species within local, regional or continental biotas. The individuals and species are not exactly identical; they vary in their characteristics. Macroecology seeks to discover, describe and explain the patterns of variation.
Much of the emphasis is on the shapes and boundaries of statistical distribution because these appear to reflect intrinsic, evolutionary or extrinsic environmental constraints on the variation. In order to characterize and compare these distributions, it is desirable - but not always possible - to have samples of hundreds or thousands of particles.
The variables of macroecological study are ecologically relevant characteristics of organisms. The kinds of attributes that can be used are necessarily limited by the requirement for samples of large numbers of individuals, populations, or species. Most of my research has focused on variables, such as body mass, population density, and area of geographic range that affect the use of space and nutritional resources. Body mass is correlated with the energetic, nutrient and space requirements of individual organisms.
Local population density indicates the number of individuals that coexist in and are supported by a small area. The size and configuration of the geographic range shows the area of space and range of environmental conditions within which all populations of a species occur. Note that each of the above variables characterizes a different level of organization: individual, population, and species, respectively. Note also that this is by no means an exhaustive list; these are just examples of the variables I have most frequently used in my macroecological studies.
Macroecology tends to focus on phenomena at regional to global spatial scales and decadal to millennial temporal scales. This is a practical limitation imposed by the need for large samples, but it means macroecology is often concerned with patterns and processes at much larger scales that in the small study plots and short field seasons of most experimental ecologists. Studies must often consider regional and global envrionmental variation, earth history, species dynamics (speciation, extinction and geographic range shifts), and phylogenetic relationships. Macroecology explores the domain where ecology, biogeography, paleobiology, and macroevolution come together, and thus has the potential to forge synthetic links among these disciplines.
The macroecology research program is both empirical and theoretical, both inductive and deductive. It is concerned with the relationship between pattern and process. It is based on the assumption that some of the general processes regulating abundance, distribution and diversity of organisms are reflected in emergent patterns in the statistical distributions of individuals, populations and species.
Macroecological research seeks to discover and describe these patterns and to develop and test hypotheses to account for them. While much of the initial inspiration comes inductively, from the discovery of patterns in data, the validity of the ideas ultimately must be evaluated deductively, by casting them as hypotheses that make testable predictions.
Macroecology has not only discovered some intriguing patterns, but has also begun to develop and test mechanistic hypotheses.
Information contained in this document is © copyright James H. Brown, 2009. All rights reserved.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.