text
stringlengths 29
850k
|
---|
# start to python 3 support:
from __future__ import unicode_literals
'''
Copyright 2012, 2013 Jonathan Morgan
This file is part of http://github.com/jonathanmorgan/reddit_collect.
reddit_collect is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
reddit_collect is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public License along with http://github.com/jonathanmorgan/reddit_collect. If not, see http://www.gnu.org/licenses/.
'''
#!/usr/bin/python
#================================================================================
# imports
#================================================================================
# base python libraries
import datetime
import gc
import sys
import time
# django imports
import django.db
# site-specific imports.
#site_path = '/home/socs/socs_reddit/'
#if site_path not in sys.path:
# sys.path.append( site_path )
#import myLib
import reddit_collect.models
# python_utilities
from python_utilities.email.email_helper import EmailHelper
from python_utilities.exceptions.exception_helper import ExceptionHelper
from python_utilities.logging.summary_helper import SummaryHelper
from python_utilities.rate_limited.basic_rate_limited import BasicRateLimited
from python_utilities.strings.string_helper import StringHelper
# ReddiWrapper
from reddiwrap.ReddiWrap import ReddiWrap
#================================================================================
# class RedditCollector
#================================================================================
class RedditCollector( BasicRateLimited ):
#============================================================================
# CONSTANTS-ish
#============================================================================
STATUS_SUCCESS = "Success!"
STATUS_PREFIX_ERROR = "ERROR: "
# DEBUG - changed to instance variable.
#DEBUG_FLAG = False
#============================================================================
# instance variables
#============================================================================
reddiwrap_instance = None
user_agent = ""
username = ""
password = ""
cookie_file_path = ""
# email helpers.
email_helper = None
email_status_address = ""
# rate limiting - in parent class BasicRateLimited.
#do_manage_time = True
#rate_limit_in_seconds = 2
#request_start_time = None
# response item limit - 200 when not logged in, 500 when logged in, 1500 when gold.
response_item_limit = 1500
# performance
do_bulk_create = True
# check for existing?
do_check_for_existing = True
# encoding, to deal with utf8 in mysql actually just allowing for up to
# 3-byte unicode characters, not all (4-byte and above).
convert_4_byte_unicode_to_entity = False
# error handling
error_limit_count = 10
exception_helper = ExceptionHelper()
# debug_flag - don't use this for outputting details, just for serious
# debugging information.
debug_flag = False
# output details?
do_output_details = False
# bulk comment processing.
bulk_comments_processed = 0
#---------------------------------------------------------------------------
# __init__() method
#---------------------------------------------------------------------------
def __init__( self ):
'''
Constructor
'''
# instance variables
self.reddiwrap_instance = None
self.user_agent = ""
self.username = ""
self.password = ""
self.cookie_file_path = ""
# flag to say if this instance should manage time.
self.do_manage_time = True
self.rate_limit_in_seconds = 2
self.request_start_time = None
# email
self.email_helper = None
self.email_status_address = ""
# response item limit - 200 when not logged in, 500 when logged in, 1500 when gold.
self.response_item_limit = 1500
# performance
self.do_bulk_create = True
# check for existing?
self.do_check_for_existing = True
# encoding
self.convert_4_byte_unicode_to_entity = True
# error handling
self.error_limit_count = 10
self.exception_helper = ExceptionHelper()
# debug_flag - don't use this for outputting details, just for serious
# debugging information.
self.debug_flag = False
# output details?
self.do_output_details = False
# bulk comment processing.
self.bulk_comments_processed = 0
#-- END constructor --#
#============================================================================
# instance methods
#============================================================================
def collect_comments( self,
posts_qs_IN = None,
do_update_existing_IN = True,
*args,
**kwargs ):
'''
This method accepts a QuerySet of django reddit_collect Post() instances
for which you want to collect comments. Uses ReddiWrapper to do the
actual retrieval, then stores them off in database using django.
Parameters:
- posts_qs_IN - defaults to None. QuerySet containing posts you want to collect comments for. If None, will collect for all posts in database whose comment status is not "done".
- do_update_existing_IN - Boolean, True if we want to update existing comments that are already in the database, false if not. Defaults to True.
Postconditions: Stores comments for each post to database using django
model classes. Returns a status message.
# Original Code
posts = myLib.posts_of_reddit(subreddit.name); # corrent
print "saving Comments ... ";
i = 0;
for post in posts:
pst = myLib.make_post_obj(post);
reddit.fetch_comments(pst);
myLib.iterate_comments(pst.comments); # iterates and save comments
time.sleep(1);
i = i + 1;
print i;
'''
# return reference
status_OUT = self.STATUS_SUCCESS
# declare variables
me = "collect_comments"
my_summary_helper = None
reddiwrap = None
posts_to_process_qs = None
post_count = -1
current_post = None
post_counter = -1
continue_collecting = True
current_rw_post = None
do_update_existing = False
# variables for dealing with intermittent connection problems.
comments_collected = False
connection_error_count = -1
temp_exception_string = ""
exception_details = ""
exception_message = ""
error_email_subject = ""
error_email_message = ""
error_email_status = ""
# variables for storing comments in database.
django_do_bulk_create = True
django_comment_create_list = []
comment_create_count = -1
django_current_create_count = -1
# variables for updating post based on comment collection.
do_update_post = False
# variables for exception handling.
exception_type = ""
exception_value = ""
exception_traceback = ""
# variables for summary information
new_posts_processed = -1
first_reddit_id_processed = ""
start_dt = ""
temp_string = ""
summary_string = ""
summary_email_subject = ""
summary_email_message = ""
# initialize summary helper
my_summary_helper = SummaryHelper()
# get reddiwrap instance
reddiwrap = self.get_reddiwrap_instance()
# initialize variables
post_counter = 0
comment_create_count = 0
django_do_bulk_create = False
django_bulk_create_list = []
django_bulk_create_count = 0
start_dt = datetime.datetime.now()
# set bulk create flag
django_do_bulk_create = self.do_bulk_create
# updating existing?
do_update_existing = do_update_existing_IN
# check to see if we have a QuerySet
if ( ( posts_qs_IN ) and ( posts_qs_IN != None ) ):
# yes. Use QuerySet passed in.
posts_to_process_qs = posts_qs_IN
else:
# no - get all that are eligible to be processed.
posts_to_process_qs = reddit_collect.models.Post.objects.filter( comment_collection_status != reddit_collect.models.Post.COMMENT_COLLECTION_STATUS_DONE )
#-- END check to see if posts passed in --#
# big outer try/except.
try:
# loop over posts.
post_counter = 0
do_update_post = False
django_current_create_count = 0
continue_collecting = True
post_count = len( posts_to_process_qs )
for current_post in posts_to_process_qs:
# see if it is OK to continue.
# call may_i_continue() if other than first post
if ( post_counter > 0 ):
# not first post. call may_i_continue()
continue_collecting = self.may_i_continue()
#-- END check to see if first post --#
# OK to continue?
if continue_collecting == True:
# reset variables
do_update_post = False
django_current_create_count = 0
# increment post counter
post_counter += 1
print( "- " + str( post_counter ) + " of " + str( post_count ) + " - " + str( datetime.datetime.now() ) + " - post " + str( current_post.id ) + " ( reddit ID: " + current_post.reddit_id + " ) by " + current_post.author_name + " - num_comments: " + str( current_post.num_comments ) + " - created on " + str( current_post.created_utc_dt ) )
# memory management.
gc.collect()
django.db.reset_queries()
# set request start time (OK to be a little inefficient)
self.start_request()
# populate a reddiwrap Post instance.
current_rw_post = current_post.create_reddiwrap_post()
# use reddiwrap to load comments.
# wrap in loop
comments_collected = False
connection_error_count = 0
while ( comments_collected == False ):
try:
reddiwrap.fetch_comments( current_rw_post, self.response_item_limit, "old" );
comments_collected = True
except Exception as e:
# set flag to False
comments_collected = False
# increment error count.
connection_error_count += 1
# make exception message
exception_message = "In " + me + ": reddiwrap.fetch_comments() threw exception, fetching comments for post " + str( current_post.id ) + " ( reddit ID: " + current_post.reddit_id + " ); post " + str( post_counter ) + " of " + str( post_count )
# are we up to error limit yet?
if ( connection_error_count >= self.error_limit_count ):
# yes - send email about problems
error_email_subject = "Connection problem with comment collector."
exception_message = "Comment collector failed to connect " + str( self.error_limit_count ) + " times. Details:\n" + exception_message
self.process_exception( e, exception_message, True, error_email_subject )
# throw exception
raise( e )
else:
# haven't reached error limit yet. Process exception,
# no email, pause 10 seconds, then try again.
self.process_exception( e, exception_message, False )
time.sleep( 10 )
#-- END check to see if we've exceeded error limit. --#
#-- END try/except around collecting comments from reddit. --#
#-- END loop around collecting comments --#
# !update - bulk or not?
if ( django_do_bulk_create == True ):
# so we can get counts, set self.bulk_comments_created to 0 before each call.
self.bulk_comments_processed = 0
# process comment list in bulk (recursive)
django_bulk_create_list = self.process_comments_bulk( post_IN = current_post,
comment_list_IN = current_rw_post.comments,
do_update_existing_IN = do_update_existing )
# get number of comments processed.
django_current_create_count = self.bulk_comments_processed
comment_create_count += django_current_create_count
else:
# process comment list (recursive)
django_current_create_count = self.process_comments( post_IN = current_post, comment_list_IN = current_rw_post.comments, do_update_existing_IN = do_update_existing )
# increment total count
comment_create_count += django_current_create_count
#-- END check to see if bulk or not. --#
# !Update post?
# update the post to show that it has been comment-harvested.
if ( current_post.comment_collection_status == reddit_collect.models.Post.COMMENT_COLLECTION_STATUS_NEW ):
# update status to "ongoing".
current_post.comment_collection_status = reddit_collect.models.Post.COMMENT_COLLECTION_STATUS_ONGOING
# we need to save updates.
do_update_post = True
#-- END check to see if first-time updating comments. --#
# check to see if more comments detected that reddiwrap
# couldn't pull in.
if ( current_rw_post.has_more_comments == True ):
# yes, more comments. Store off details.
current_post.has_more_comments = current_rw_post.has_more_comments
current_post.more_comments_details = current_rw_post.more_comments
# we need to save updates.
do_update_post = True
#-- END check to see if more comments detected. --#
# did we actually process any comments?
if ( django_current_create_count >= 0 ):
# we did. set number of comments processed.
current_post.num_comments_collected = django_current_create_count
# we need to save updates.
do_update_post = True
#-- END check to see if we have a valid comment count --#
# update post?
if ( do_update_post == True ):
# we do. call save() method.
current_post.save()
#-- END check to see if we update post. --#
else:
# may_i_continue() returned False. Once that happens once,
# unlikely it will return True ever again.
print( "====> In " + me + ": may_i_continue() returned False. This shouldn't be possible. Falling out of loop." )
break
#-- END check to see if we are OK to continue collecting. --#
print( " ==> In " + me + ": processed " + str( django_current_create_count ) + " comments." )
#-- END loop over posts. --#
except Exception as e:
# yes - send email about problems
error_email_subject = "Unexpected problem with comment collector."
exception_message = "Unexpected problem with comment collector. Details:\n"
self.process_exception( e, exception_message, True, error_email_subject )
# throw exception?
#raise( e )
#-- END super try/except around entire loop.
# output overall summary
summary_string = ""
# add stuff to summary
my_summary_helper.set_stop_time()
my_summary_helper.set_prop_value( "post_count", post_count )
my_summary_helper.set_prop_desc( "post_count", "Posts passed in" )
my_summary_helper.set_prop_value( "post_counter", post_counter )
my_summary_helper.set_prop_desc( "post_counter", "Posts processed" )
my_summary_helper.set_prop_value( "comment_create_count", comment_create_count )
my_summary_helper.set_prop_desc( "comment_create_count", "Comments processed" )
summary_string += my_summary_helper.create_summary_string( item_prefix_IN = "==> " )
print( summary_string )
# email summary
summary_email_subject = "Comment collection complete - " + str( datetime.datetime.now() )
summary_email_message = "Comment collection summary:\n"
summary_email_message += summary_string
summary_email_status = self.email_send_status( summary_email_message, summary_email_subject )
print( "==> Summary email status: " + summary_email_status )
return status_OUT
#-- END method collect_comments() --#
def collect_posts( self,
subreddit_IN = "all",
post_count_limit_IN = -1,
until_id_IN = "",
until_date_IN = None,
subreddit_in_list_IN = [],
after_id_IN = None,
before_id_IN = None,
do_update_existing_IN = True,
*args,
**kwargs ):
'''
This method collects posts from any subreddit you want, defaulting to the
/r/all subreddit, which allows access to the entire history of reddit.
Accepts parameters that let you collect from a given ID on (the
easiest way to collect starting at a certain date - find a post around
the date you want, collect from that ID on), to a certain date, until
you find a certain post ID, etc.
Parameters:
- subreddit_IN - defaults to "all". Subreddit you want to collect from.
- post_count_limit_IN - number of posts we want to collect.
- until_id_IN - value of ID we collect until we encounter in the stream (should include type - so begin with "t3_").
- until_date_IN - datetime instance of UTC/GMT date and time we want to collect to (will stop collecting once a date after this is encountered).
- subreddit_in_list_IN - list of subreddits to limit our collection to (each should begin with "t5_"). If you use this, in most cases, you should leave subreddit_IN = "all".
- after_id_IN - ID you want to get posts after. Must include type (start with "t3_").
- before_id_IN - ID before which you want posts. Must include type (start with "t3_").
- do_update_existing_IN - Boolean, True if we want to update existing posts that are already in the database, false if not. Defaults to True.
Parameters to come (TK):
- start_date_IN - datetime instance of date and time after which we want to collect (will ignore until a post is greater-than-or-equal to this date). For now, to collect from a certain date, find a post around the date you want, collect from that ID on using the after_id_IN parameter.
Postconditions: Stores each matching post to the database using django
model classes. Returns a status message.
'''
# return reference
status_OUT = self.STATUS_SUCCESS
# declare variables
me = "collect_posts"
my_summary_helper = None
reddiwrap = None
post_count = -1
api_url = ""
post_list = None
continue_collecting = True
current_rw_post = None
current_post_reddit_id = ""
current_post_created = ""
current_post_created_dt = None
current_post_subreddit_id = ""
do_update_existing = False
# variables for storing post in database.
django_do_bulk_create = True
django_post_create_list = []
django_bulk_create_count = -1
django_current_create_count = -1
django_post = None
is_post_in_database = False
do_call_post_save = False
# variables for exception handling.
exception_type = ""
exception_value = ""
exception_traceback = ""
# variables for summary information
new_posts_processed = -1
update_count = -1
first_reddit_id_processed = ""
start_dt = None
temp_dt = None
temp_string = ""
summary_string = ""
summary_email_subject = ""
summary_email_message = ""
# get reddiwrap instance
reddiwrap = self.get_reddiwrap_instance()
# initialize variables
post_count = 0
new_posts_processed = 0
update_count = 0
django_do_bulk_create = self.do_bulk_create
django_bulk_create_count = 0
# initialize summary helper
my_summary_helper = SummaryHelper()
# updating existing?
do_update_existing = do_update_existing_IN
# create URL - first, add in reddit, limit.
api_url = "/r/%s/new?limit=100" % subreddit_IN
# add ability to add parameterized limit to URL?
# after param?
if ( ( after_id_IN ) and ( after_id_IN != None ) and ( after_id_IN != "" ) ):
# yes. Add it to the URL.
api_url += "&after=" + after_id_IN
#-- END check to see if after ID passed in. --#
# before param?
if ( ( before_id_IN ) and ( before_id_IN != None ) and ( before_id_IN != "" ) ):
# yes. Add it to the URL.
api_url += "&before=" + before_id_IN
#-- END check to see if after ID passed in. --#
# big outer try/except.
try:
# loop until flag is false
while continue_collecting == True:
print( "In " + me + ": top of loop - " + str( datetime.datetime.now() ) + " - latest post = " + current_post_reddit_id + " ( " + str( current_post_created_dt ) + " ), number " + str( post_count ) + "." )
# memory management.
gc.collect()
django.db.reset_queries()
# set request start time (OK to be a little inefficient)
self.start_request()
# bulk create?
if ( django_do_bulk_create == True ):
# clear out the bulk create list.
django_post_create_list = []
#-- END check to see if doing bulk create. --#
# get first set of results, or grab next set of results.
if ( post_count == 0 ):
# get first set of results for /r/all
post_list = reddiwrap.get( api_url )
else:
# get next set of posts.
post_list = reddiwrap.get_next()
#-- END check to see how we grab more posts. --#
temp_dt = datetime.datetime.now()
print( "In " + me + ": after retrieving stuff from reddit - " + str( temp_dt ) + "; elapsed: " + str( temp_dt - self.request_start_time ) + " - latest post = " + current_post_reddit_id + " ( " + str( current_post_created_dt ) + " ), number " + str( post_count ) + "." )
#--------------------------------------------------------------------
# loop over posts.
#--------------------------------------------------------------------
for current_rw_post in post_list:
# increment post counter.
post_count += 1
# initialize variables
do_call_post_save = False
# get info. on current post.
current_post_reddit_id = current_rw_post.id
current_post_id_with_type = "t3_" + current_post_reddit_id
current_post_created = current_rw_post.created_utc
current_post_created_dt = datetime.datetime.fromtimestamp( int( current_post_created ) )
current_post_subreddit_id = current_rw_post.subreddit_id
current_post_subreddit_name = current_rw_post.subreddit
current_post_url = current_rw_post.url
# initialize variables
is_post_in_database = False
if ( self.do_output_details == True ):
print( "In " + me + ": reddit post " + current_post_id_with_type + " is post number " + str( post_count ) + ", subreddit = " + current_post_subreddit_name + ": URL = " + current_post_url )
#-- END DEBUG --#
# first post? (I know, couldn't think of a better way...)
if ( post_count == 1 ):
# store the first ID.
first_reddit_id_processed = current_post_id_with_type
#-- END check to see if post count = 1 --#
#----------------------------------------------------------------
# conditions for stopping collection
#----------------------------------------------------------------
# do we have a post count limit?
if ( ( post_count_limit_IN ) and ( post_count_limit_IN > 0 ) ):
# yes - has post count exceded this count?
if ( post_count > post_count_limit_IN ):
# it is. stop.
continue_collecting = False
print( "In " + me + ": reddit post " + current_post_reddit_id + " is post number " + str( post_count ) + ", putting us over our limit of " + str( post_count_limit_IN ) + ". Stopping collection." )
#-- END check to see if current post puts us over our post limit. --#
#-- END check for post count limit. --#
# do we have an until ID?
if ( ( until_id_IN ) and ( until_id_IN != "" ) ):
# is current ID the until ID?
if ( current_post_reddit_id == until_id_IN ):
# it is. stop.
continue_collecting = False
print( "In " + me + ": reddit post " + current_post_reddit_id + " is our until post ( " + until_id_IN + " ). Stopping collection." )
#-- END check to see if current post is post at which we are to stop. --#
#-- END check for until ID. --#
# do we have an until date?
if ( ( until_date_IN ) and ( until_date_IN != None ) ):
#-- we have an until date... is current date less than until date?
if ( current_post_created_dt < until_date_IN ):
# it is. stop.
continue_collecting = False
print( "In " + me + ": reddit post " + current_post_reddit_id + " has date " + str( current_post_created_dt ) + " that is past our until date. Stopping collection." )
#-- END check to see if post's date is past the cutoff. --#
#-- END check to see if we have an until date --#
#----------------------------------------------------------------
# collection logic
#----------------------------------------------------------------
# do we continue collecting?
if ( continue_collecting == True ):
# Only process if either there is no subreddit list, or the
# subreddit is in the list.
if ( ( len( subreddit_in_list_IN ) <= 0 ) or ( current_post_subreddit_id in subreddit_in_list_IN ) ):
# ==> post already in database?
try:
# lookup post.
django_post = reddit_collect.models.Post.objects.get( reddit_id = current_post_reddit_id )
# post is in database
is_post_in_database = True
# print( "In " + me + ": reddit post " + current_post_reddit_id + " is already in database." )
except:
# Not found. Create new instance, set flag.
django_post = reddit_collect.models.Post()
is_post_in_database = False
#-- END - check for post in database --#
# ==> Got existing? (Could put this in except, still not
# sure how I feel about using exceptions for program
# flow)
# OLD - allowing for update now.
#if ( django_post == None ):
# set fields from reddiwrap post instance.
django_post.set_fields_from_reddiwrap( current_rw_post, self.convert_4_byte_unicode_to_entity )
# !==> How do we process this post?
# - First, check if in database or not.
# - We add to bulk list if bulk is turned on AND comment not in database.
if ( is_post_in_database == False ):
# not in database. Increment new post count.
new_posts_processed += 1
# new post - bulk or not?
if ( django_do_bulk_create == True ):
# bulk create. Add to list.
django_post_create_list.append( django_post )
# bulk. No save.
do_call_post_save = False
else:
# not bulk. Just save.
do_call_post_save = True
#-- END check to see if bulk or not. --#
# if in database, if also are updating, set save() flag.
elif ( ( is_post_in_database == True ) and ( do_update_existing == True ) ):
# in database - increment update count.
update_count += 1
# not bulk. Just save.
do_call_post_save = True
else:
# for all others (probably existing, but not
# updating), don't save.
do_call_post_save = False
#-- END check to see how we process post --#
# see if we need to call save()
if ( do_call_post_save == True ):
# exception handling around save, to deal with encoding (!).
try:
# save to database.
django_post.save()
except Exception as e:
# error saving. Probably encoding error.
# process exception.
error_email_subject = "Problem saving post."
exception_message = "In " + me + ": reddit post " + current_post_reddit_id + " threw exception on save()."
self.process_exception( e, exception_message, True, error_email_subject )
# throw exception?
raise( e )
#-- END try-except around save() --#
#-- END check to see if we call save() --#
#-- END check to see if subreddit list indicates we should process this post. --#
#-- END check to see if we continue collecting --#
#-- END loop over current set of posts. --#
#--------------------------------------------------------------------
# bulk create?
#--------------------------------------------------------------------
if ( django_do_bulk_create == True ):
# yes, bulk create. Anything in the create list?
django_current_create_count = len( django_post_create_list )
if ( django_current_create_count > 0 ):
# yes. Bulk create, then update count.
# exception handling around save, to deal with encoding (!).
try:
# save to database using bulk_create().
reddit_collect.models.Post.objects.bulk_create( django_post_create_list )
except Exception as e:
# error saving. Probably encoding error.
# process exception.
error_email_subject = "Problem bulk-saving posts."
exception_message = "In " + me + ": bulk_create() threw exception. Last reddit post ID processed: " + current_post_reddit_id + "; count of posts being bulk created = " + str( django_current_create_count )
self.process_exception( e, exception_message, True, error_email_subject )
# throw exception?
raise( e )
#-- END try-except around bulk_create() --#
# increment the total posts created counter
django_bulk_create_count += django_current_create_count
# could empty create list here, but doing it at top of loop,
# so no need to do it twice.
#-- END check to see if posts to create --#
#-- END check to see if bulk create --#
#--------------------------------------------------------------------
# if we haven't already decided to stop, check if we can continue.
#--------------------------------------------------------------------
if ( continue_collecting == True ):
# no reason to stop yet... Do we have more posts?
if ( reddiwrap.has_next() == False ):
# no - do not continue.
continue_collecting = False
else:
# see if we are allowed to continue.
continue_collecting = self.may_i_continue()
#-- END checks to see if we continue collecting. --#
#-- END check to see if we continue collecting. --#
#-- END outer reddit collection loop --#
except Exception as e:
# yes - send email about problems
error_email_subject = "Unexpected problem with post collector."
exception_message = "Unexpected problem with post collector. Details:\n"
self.process_exception( e, exception_message, True, error_email_subject )
# throw exception?
#raise( e )
#-- END super try/except around entire loop.
# output overall summary
summary_string = ""
# add stuff to summary
my_summary_helper.set_stop_time()
my_summary_helper.set_prop_value( "post_count", post_count )
my_summary_helper.set_prop_desc( "post_count", "Posts processed" )
my_summary_helper.set_prop_value( "new_posts_processed", new_posts_processed )
my_summary_helper.set_prop_desc( "new_posts_processed", "New posts" )
if ( do_update_existing == True ):
my_summary_helper.set_prop_value( "update_count", update_count )
my_summary_helper.set_prop_desc( "update_count", "Updated posts" )
#-- END check to see if we are updating. --#
if ( django_do_bulk_create == True ):
my_summary_helper.set_prop_value( "django_bulk_create_count", django_bulk_create_count )
my_summary_helper.set_prop_desc( "django_bulk_create_count", "Posts bulk_create()'ed" )
#-- END check to see if bulk create --#
my_summary_helper.set_prop_value( "first_reddit_id_processed", first_reddit_id_processed )
my_summary_helper.set_prop_desc( "first_reddit_id_processed", "First reddit ID processed" )
my_summary_helper.set_prop_value( "current_post_reddit_id", current_post_reddit_id )
my_summary_helper.set_prop_desc( "current_post_reddit_id", "Last reddit ID processed" )
summary_string += my_summary_helper.create_summary_string( item_prefix_IN = "==> " )
print( summary_string )
# email summary
summary_email_subject = "Post collection complete - " + str( datetime.datetime.now() )
summary_email_message = "Post collection summary:\n"
summary_email_message += summary_string
summary_email_status = self.email_send_status( summary_email_message, summary_email_subject )
print( "==> Summary email status: " + summary_email_status )
return status_OUT
#-- END method collect_posts() --#
def create_reddiwrap_instance( self, *args, **kwargs ):
'''
Creates and returns ReddiWrap instance for User Agent in this
instance, and if there is both a username and a password, also logs it
in using those credentials. If error, returns None.
'''
# return reference
instance_OUT = None
# declare variables
my_user_agent = ""
my_cookie_file_path = ""
my_username = ""
my_password = ""
do_login = False
login_result = -1
# create new instance.
my_user_agent = self.user_agent
instance_OUT = ReddiWrap( user_agent = my_user_agent )
# do we have a cookie file path? If so, try to load cookies.
my_cookie_file_path = self.cookie_file_path
if ( ( my_cookie_file_path ) and ( my_cookie_file_path != "" ) ):
instance_OUT.load_cookies( my_cookie_file_path )
#-- END check to see if cookie file path --#
# got username and password?
my_username = self.username
my_password = self.password
if ( ( ( my_username ) and ( my_username != "" ) ) and ( ( my_password ) and ( my_password != "" ) ) ):
# from cookie file, is this user already authenticated?
if ( instance_OUT.logged_in == False ):
# log in.
do_login = True
# logged in - same username? If not, log in again.
elif ( reddit.user.lower() != my_username.lower() ):
# log in.
do_login = True
else:
# logged_in is True and it is the same user name. No need to
# log in again.
do_login = False
#-- END check to see if we need to log in. --#
# Do we need to log in?
if ( do_login == True ):
# yes, we need to login. Try it.
print('logging into %s' % my_username)
login_result = instance_OUT.login( user = my_username, password = my_password )
# success?
if ( login_result != 0 ):
# fail. Output message.
print( 'ERROR - unable to log in with username: %s; password: %s (error code %d where 1 = invalid password, 2 = over rate limit, -1 = unexpected error)' % ( my_username, my_password, login_result ) )
# return None?
# instance_OUT = None
else:
# success! If cookie path, update cookies.
if ( ( my_cookie_file_path ) and ( my_cookie_file_path != "" ) ):
# save cookies.
instance_OUT.save_cookies( my_cookie_file_path )
#-- END check to see if we have a cookie file path. --#
#-- END check to see if success --#
#-- END check to see if we need to log in. --#
#-- END check to see if we have a username and a password. --#
return instance_OUT
#-- END create_reddiwrap_instance() --#
def email_initialize( self, smtp_host_IN = "localhost", smtp_port_IN = -1, smtp_use_ssl_IN = False, smtp_username_IN = "", smtp_password_IN = "", *args, **kwargs ):
'''
Accepts properties that can be used to initialize an email helper
instance. Initializes object, stores it in instance variable.
'''
# declare variables
my_email_helper = None
my_exception_helper = None
# create email helper
my_email_helper = EmailHelper()
# set host.
my_email_helper.set_smtp_server_host( smtp_host_IN )
# set port?
if ( ( smtp_port_IN ) and ( smtp_port_IN != None ) and ( smtp_port_IN > 0 ) ):
my_email_helper.set_smtp_server_port( smtp_port_IN )
#-- END check to see if port passed in. --#
# use ssl?
my_email_helper.set_smtp_server_use_SSL( smtp_use_ssl_IN )
# set username?
if ( ( smtp_username_IN ) and ( smtp_username_IN != None ) and ( smtp_username_IN != "" ) ):
my_email_helper.set_smtp_server_username( smtp_username_IN )
#-- END check to see if username passed in --#
# set password?
if ( ( smtp_password_IN ) and ( smtp_password_IN != None ) and ( smtp_password_IN != "" ) ):
my_email_helper.set_smtp_server_password( smtp_password_IN )
#-- END check to see if password passed in --#
# store in instance variable.
self.email_helper = my_email_helper
# Do we have an Exception Helper?
my_exception_helper = self.exception_helper
if ( ( my_exception_helper ) and ( my_exception_helper ) ):
# we do. Add the email_helper to the exception helper, also.
my_exception_helper.email_helper = my_email_helper
#-- END check to see if exception helper --#
#-- END method email_initialize() --#
def email_send( self, message_IN = None, subject_IN = None, from_address_IN = None, to_address_IN = None, *args, **kwargs ):
'''
Uses nested email_helper instance to send email. Returns status message.
If status returned is email_helper.STATUS_SUCCESS, then success, if
anything else, it is an error message explaining why the email was not
sent.
'''
# return reference
status_OUT = ""
# declare variables
my_email_helper = None
# get email helper
my_email_helper = self.email_helper
# got a helper?
if ( ( my_email_helper ) and ( my_email_helper != None ) ):
# yes - send email
status_OUT = my_email_helper.send_email( message_IN, subject_IN, from_address_IN, to_address_IN )
else:
# no - error.
status_OUT = "ERROR - no email helper present, so can't send email."
#-- END check to see if we have an email helper. --#
return status_OUT
#-- END method email_send() --#
def email_send_status( self, message_IN = None, subject_IN = None, *args, **kwargs ):
'''
If email helper and status email are set, uses nested email_helper
instance to send email to status email. Returns status message.
If status returned is email_helper.STATUS_SUCCESS, then success, if
anything else, it is an error message explaining why the email was not
sent.
'''
# return reference
status_OUT = ""
# declare variables
my_email_helper = None
my_status_email = ""
# get email helper and status address
my_email_helper = self.email_helper
my_status_email = self.email_status_address
# got a helper?
if ( ( my_email_helper ) and ( my_email_helper != None ) ):
# yes. Got a status email address?
if ( ( my_status_email ) and ( my_status_email != None ) and ( my_status_email != "" ) ):
# yes - send email
status_OUT = my_email_helper.send_email( message_IN, subject_IN, my_status_email, my_status_email )
else:
# no status email address set.
status_OUT = "ERROR - no email address set for sending status messages. Can't send email status."
#-- END check to see if status email present --#
else:
# no - error.
status_OUT = "ERROR - no email helper present, so can't send email."
#-- END check to see if we have a mail helper. --#
return status_OUT
#-- END method email_send_status() --#
def get_reddiwrap_instance( self, *args, **kwargs ):
'''
If there is a reddiwrap instance already in this instance, returns it.
If not, creates and returns ReddiWrap instance for User Agent in this
instance, and if there is both a username and a password, also logs it
in using those credentials. Stores a newly created instance in this
object, so it can be re-used. If error, returns None.
'''
# return reference
instance_OUT = None
# declare variables
instance_OUT = self.reddiwrap_instance
if ( ( not instance_OUT ) or ( instance_OUT == None ) ):
# create new instance.
instance_OUT = self.create_reddiwrap_instance()
# store it.
self.reddiwrap_instance = instance_OUT
# retrieve from that variable, just so we make sure it got stored.
instance_OUT = self.reddiwrap_instance
#-- END check to see if there is anything in m_e2user_node_type.
return instance_OUT
#-- END get_reddiwrap_instance() --#
def process_comments( self,
post_IN = None,
comment_list_IN = [],
parent_comment_IN = None,
do_update_existing_IN = True,
*args,
**kwargs ):
'''
Accepts django reddit_collect.models.Post instance, list of reddiwrap
comment instances. Loops over all comments in the list, processing
each, then checking for child comments. If child(ren) found, calls
this routine again, also passing parent comment, so they reference
both root parent post and parent comment. Returns count of comments
created. This method creates all django relations as well as storing
IDs from reddit. The process_comments_bulk() method stores reddit IDs
so comment relations can be pieced together, but doesn't create django
relations, as well.
Parameters:
- post_IN - reddit_collect.models.Post instance, so we can relate comments to their post.
- comment_list_IN - list of reddiwrap Comment instances we are to store in the database.
- parent_comment_IN - reddit_collect.models.Comment instance of parent comment, so we can relate the child comment back to it.
- do_update_existing_IN - Boolean, True if we want to update existing comments that are already in the database, false if not. Defaults to True.
'''
# return reference
comment_count_OUT = 0
# declare variables
me = "process_comments"
do_update_existing = False
comment_count = -1
update_count = -1
new_comment_count = -1
current_rw_comment = None
comment_reddit_full_id = ""
django_comment = None
is_comment_in_database = False
django_do_bulk_create = False
comment_children = None
child_count = -1
# initialize variables
comment_count = 0
# updating existing?
do_update_existing = do_update_existing_IN
if ( self.do_output_details == True ):
print( "In " + me + ": update existing?: " + str( do_update_existing ) )
#-- END check to see if outputting details --#
# do we have a comment list
if ( ( comment_list_IN ) and ( len( comment_list_IN ) > 0 ) ):
# we have comments. Loop over them.
for current_rw_comment in comment_list_IN:
# increment count
comment_count += 1
# get the full ID
comment_reddit_full_id = current_rw_comment.name
# ==> comment already in database?
try:
# lookup comment.
django_comment = reddit_collect.models.Comment.objects.get( reddit_full_id = comment_reddit_full_id )
# post is in database
is_comment_in_database = True
if ( self.do_output_details == True ):
print( "==> In " + me + ": reddit comment " + comment_reddit_full_id + " is in database." )
#-- END check to see if outputting details --#
except:
# Not found. Create new instance, set flag.
django_comment = reddit_collect.models.Comment()
is_comment_in_database = False
if ( self.do_output_details == True ):
print( "==> In " + me + ": reddit comment " + comment_reddit_full_id + " is not in database." )
#-- END check to see if outputting details --#
#-- END - check for comment in database --#
# ==> Got existing? (Could put this in except, still not
# sure how I feel about using exceptions for program
# flow)
# OLD - allowing for update now.
#if ( django_comment == None ):
# ==> Do we process this comment? We do if:
# - comment is not in database. - OR -
# - comment is in database, but update flag is true.
if ( ( is_comment_in_database == False ) or ( ( is_comment_in_database == True ) and ( do_update_existing == True ) ) ):
if ( self.do_output_details == True ):
print( "====> In " + me + ": processing reddit comment " + comment_reddit_full_id )
#-- END check to see if outputting details --#
# Update appropriate counter
if ( is_comment_in_database == True ):
# in database - increment update count.
update_count += 1
else:
# not in database. Increment new post count.
new_comment_count += 1
#-- END counter increment. --#
# create model instance.
# OLD - already have instance now.
#django_comment = reddit_collect.models.Comment()
# set fields from reddiwrap instance.
django_comment.set_fields_from_reddiwrap( current_rw_comment, self.convert_4_byte_unicode_to_entity )
# if post, set post (better be a post).
if ( ( post_IN ) and ( post_IN is not None ) ):
# store reference to post in comment.
django_comment.post = post_IN
# does the post reference a subreddit?
if ( ( post_IN.subreddit ) and ( post_IN.subreddit is not None ) ):
# yes - put reference to it in comment, as well.
django_comment.subreddit = post_IN.subreddit
#-- END check to see if post has a subreddit. --#
#-- END check to see if related post passed in. --#
# if parent comment, set it.
if ( ( parent_comment_IN ) and ( parent_comment_IN is not None ) ):
django_comment.parent = parent_comment_IN
#- END check to see if parent_comment_IN --#
# exception handling around save, to deal with encoding (!).
try:
# save to database.
django_comment.save()
except Exception as e:
# error saving. Probably encoding error.
# process exception.
error_email_subject = "Problem saving comment."
exception_message = "In " + me + ": reddit comment " + comment_reddit_full_id + " threw exception on save()."
self.process_exception( e, exception_message, True, error_email_subject )
# throw exception?
raise( e )
#-- END try-except around save() --#
#-- END check to see if already in database --#
# does current comment have children?
comment_children = current_rw_comment.children
if ( ( comment_children ) and ( len( comment_children ) > 0 ) ):
if ( self.do_output_details == True ):
print( "====> In " + me + ": processing children of reddit comment " + comment_reddit_full_id )
#-- END check to see if outputting details --#
# yes. Recurse!
child_count = self.process_comments( post_IN, comment_children, django_comment, do_update_existing_IN )
# add child count to comment_count
comment_count += child_count
#-- END check to see if there are comments --#
#-- END loop over comments. --#
#-- END check to see if comments. --#
# return comment_count
comment_count_OUT = comment_count
return comment_count_OUT
#-- END method process_comments --#
def process_comments_bulk( self,
post_IN = None,
comment_list_IN = [],
do_update_existing_IN = True,
level_IN = 0,
*args,
**kwargs ):
'''
Accepts django reddit_collect.models.Post instance, list of reddiwrap
comment instances. Loops over all comments in the list, processing
each, then checking for child comments. If child(ren) found, calls
this routine again, passing post and list of children, so they
reference root parent post. Returns list of comments
that need to be bulk saved. This method stores reddit IDs so comment
relations can be pieced together, but doesn't create django relations,
as well. The process_comments() method creates all django
relations as well as storing IDs from reddit. Lots more queries,
though.
Parameters:
- post_IN - reddit_collect.models.Post instance, so we can relate comments to their post.
- comment_list_IN - list of reddiwrap Comment instances we are to store in the database.
'''
# return reference
comment_list_OUT = []
# declare variables
me = "process_comments_bulk"
comment_count = -1
new_comment_count = -1
updated_comment_count = -1
current_rw_comment = None
comment_reddit_full_id = ""
django_comment = None
found_existing = False
django_do_bulk_create = False
comment_children = None
child_comment_list = []
django_bulk_create_count = -1
# initialize variables
comment_count = 0
new_comment_count = 0
updated_comment_count = 0
if ( self.do_output_details == True ):
print( "In " + me + ": at level " + str( level_IN ) + " - update existing?: " + str( do_update_existing_IN ) )
#-- END check to see if outputting details --#
# do we have a comment list
if ( ( comment_list_IN ) and ( len( comment_list_IN ) > 0 ) ):
# we have comments. Loop over them.
for current_rw_comment in comment_list_IN:
# increment count
comment_count += 1
# reset found flag
found_existing = False
# get the full ID
comment_reddit_full_id = current_rw_comment.name
# ==> comment already in database?
try:
# lookup comment.
django_comment = reddit_collect.models.Comment.objects.get( reddit_full_id = comment_reddit_full_id )
if ( self.do_output_details == True ):
print( "==> In " + me + ": reddit comment " + comment_reddit_full_id + " IS ALREADY in database." )
#-- END check to see if outputting details. --#
# increment updated count
updated_comment_count += 1
# set found flag.
found_existing = True
except:
# Not found. Make new instance.
django_comment = reddit_collect.models.Comment()
if ( self.do_output_details == True ):
print( "==> In " + me + ": reddit comment " + comment_reddit_full_id + " NOT in database." )
#-- END check to see if outputting details. --#
# not in database. Add it.
new_comment_count += 1
# set found flag.
found_existing = False
#-- END - check for comment in database --#
# set fields from reddiwrap instance.
django_comment.set_fields_from_reddiwrap( current_rw_comment, self.convert_4_byte_unicode_to_entity )
# if post, set post (better be a post).
if ( ( post_IN ) and ( post_IN != None ) ):
# store reference to post in comment.
django_comment.post = post_IN
# does the post reference a subreddit?
if ( ( post_IN.subreddit ) and ( post_IN.subreddit is not None ) ):
# yes - put reference to it in comment, as well.
django_comment.subreddit = post_IN.subreddit
#-- END check to see if post has a subreddit. --#
#-- END check to see if related post passed in. --#
# ==> Got existing? (Could put this in except, still not
# sure how I feel about using exceptions for program
# flow)
if ( found_existing == False ):
# append instance to list
comment_list_OUT.append( django_comment )
if ( self.do_output_details == True ):
print( "====> In " + me + ": new reddit comment " + comment_reddit_full_id + " ADDED to bulk list." )
#-- END check to see if outputting details. --#
# if existing, are we to update?
elif ( ( found_existing == True ) and ( do_update_existing_IN == True ) ):
# save updates to existing comment.
# exception handling around save, to deal with encoding (!).
try:
# save to database.
django_comment.save()
except Exception as e:
# error saving. Probably encoding error.
# process exception.
error_email_subject = "Problem saving comment."
exception_message = "In " + me + ": reddit comment " + comment_reddit_full_id + " threw exception on save()."
self.process_exception( e, exception_message, True, error_email_subject )
# throw exception?
raise( e )
#-- END try-except around save() --#
if ( self.do_output_details == True ):
print( "====> In " + me + ": existing reddit comment " + comment_reddit_full_id + " UPDATED." )
#-- END check to see if outputting details. --#
#-- END check to see if already in database --#
# does current comment have children?
comment_children = current_rw_comment.children
if ( ( comment_children ) and ( len( comment_children ) > 0 ) ):
if ( self.do_output_details == True ):
print( "======> In " + me + ": processing children of reddit comment " + comment_reddit_full_id )
#-- END check to see if outputting details --#
# yes. Recurse!
child_comment_list = self.process_comments_bulk( post_IN = post_IN, comment_list_IN = comment_children, do_update_existing_IN = do_update_existing_IN, level_IN = level_IN + 1 )
# add instances in child list to the return list.
comment_list_OUT.extend( child_comment_list )
#-- END check to see if there are child comments --#
#-- END loop over comments. --#
# update count of comments created or updated.
self.bulk_comments_processed += new_comment_count
self.bulk_comments_processed += updated_comment_count
# do bulk_create()? Must be at calling level 0, and must have
# something in our list.
if ( ( level_IN == 0 ) and ( comment_list_OUT ) and ( len( comment_list_OUT ) > 0 ) ):
# get count
django_bulk_create_count = len( comment_list_OUT )
if ( self.do_output_details == True ):
print( "In " + me + ": at level 0 - bulk creating " + str( django_bulk_create_count ) + " comments." )
#-- END check to see if outputting details --#
# try/except around saving.
try:
# try bulk create.
reddit_collect.models.Comment.objects.bulk_create( comment_list_OUT )
except Exception as e:
# error saving. Probably encoding error.
# send email about problems
error_email_subject = "Problem bulk-saving comments."
exception_message = "In " + me + ": bulk_create() threw exception"
if ( ( post_IN ) and ( post_IN != None ) ):
exception_message += ", processing comments for post " + str( post_IN.id ) + " ( reddit ID: " + post_IN.reddit_id + " )"
#-- END check to see if post passed in (there better be!) --#
exception_message += " - count of comments being bulk created = " + str( django_bulk_create_count )
self.process_exception( e, exception_message, True, error_email_subject )
# throw exception?
raise( e )
#-- END try/except around saving. --#
#-- END check to see if anything to bulk create. --#
#-- END check to see if comments. --#
return comment_list_OUT
#-- END method process_comments_bulk --#
def process_exception( self, exception_IN = None, message_IN = "", send_email_IN = False, email_subject_IN = "", *args, **kwargs ):
# return reference
status_OUT = self.STATUS_SUCCESS
# declare variables
my_exception_helper = ""
# get exception helper
my_exception_helper = self.exception_helper
if ( ( my_exception_helper ) and ( my_exception_helper != None ) ):
# process using exception_helper.
status_OUT = my_exception_helper.process_exception( exception_IN, message_IN, send_email_IN, email_subject_IN )
#-- END check to see if we have a helper --#
return status_OUT
#-- END method process_exception() --#
def set_email_status_address( self, value_IN, *args, **kwargs ):
'''
Accepts an email address, stores it internally, and in the nested
exception helper, so it can be used to email exception messages.
'''
# declare variables
my_exception_helper = None
# store the value
self.email_status_address = value_IN
# see if we have an exception helper.
my_exception_helper = self.exception_helper
if ( ( my_exception_helper ) and ( my_exception_helper != None ) ):
# we do. Set it there, too.
my_exception_helper.email_status_address = value_IN
#-- END check to see if we have an exception helper --#
#-- END method set_email_status_address() --#
#-- END class RedditCollector. --#
'''
#================================================================================
# Original Code
#================================================================================
reddit = ReddiWrap(user_agent='ReddiWrap')
USERNAME = 'Mr_Boy'
PASSWORD = 'Iman1234'
SUBREDDIT_NAMES = ['POLITICS', 'FUNNY', 'PICS' , 'todayilearned'];
while True:
for MOD_SUB in SUBREDDIT_NAMES:
print "######### " + MOD_SUB + " ###########";
# Load cookies from local file and verify cookies are valid
reddit.load_cookies('cookies.txt')
# If we had no cookies, or cookies were invalid,
# or the user we are logging into wasn't in the cookie file:
if not reddit.logged_in or reddit.user.lower() != USERNAME.lower():
print('logging into %s' % USERNAME)
login = reddit.login(user=USERNAME, password=PASSWORD)
if login != 0:
# 1 means invalid password, 2 means rate limited, -1 means unexpected error
print('unable to log in: %d' % login)
print('remember to change USERNAME and PASSWORD')
exit(1)
# Save cookies so we won't have to log in again later
reddit.save_cookies('cookies.txt')
print('logged in as %s' % reddit.user)
# uinfo = reddit.user_info()
# print('\nlink karma: %d' % uinfo.link_karma)
# print('comment karma: %d' % uinfo.comment_karma)
# created = int(uinfo.created)
# print('account created on: %s' % reddit.time_to_date(created))
# print('time since creation: %s\n' % reddit.time_since(created))
# # # # # # # # Finding Subreddit
print "Finding Subreddit ..."
subreddit = "";
flag = False; # if we find the subreddit, this flag is going to be Ture
while True:
subreddits = reddit.get('/reddits');
for subred in subreddits:
if subred.display_name == MOD_SUB.lower():
subreddit = subred;
flag = True;
break
if (not reddit.has_next()) or flag:
break;
time.sleep(2);
subreddits = reddit.get_next()
# # # # # # # # saving subreddit in subreddit table
print "Saving Subreddit ... ";
over18 = 0;
if subreddit.over18 :
over18 = 1;
if not myLib.exsits_row(subreddit.id, "Subreddit"):
myLib.insert_row([subreddit.id, subreddit.name, subreddit.display_name, subreddit.title, subreddit.url, subreddit.description,
subreddit.created, over18, int(subreddit.subscribers), subreddit.header_title] , "Subreddit");
# # # # # # # # Saving Posts
print "saving Posts ... "
posts = reddit.get('/r/%s' % MOD_SUB)
while True:
for post in posts:
if not myLib.exsits_row(post.id, "Post"):
# add the post to the Post table
myLib.insert_row(myLib.retrieve_post_traits(post), 'Post');
if not reddit.has_next():
break
time.sleep(2);
posts = reddit.get_next()
# subreddit = myLib.retreive_subreddit(MOD_SUB.lower());
posts = myLib.posts_of_reddit(subreddit.name); # corrent
print "saving Comments ... ";
i = 0;
for post in posts:
pst = myLib.make_post_obj(post);
reddit.fetch_comments(pst);
myLib.iterate_comments(pst.comments); # iterates and save comments
time.sleep(1);
i = i + 1;
print i;
'''
|
On February 25-27, 2016, Ligonier Ministries hosted its 29th annual National Conference. James Anderson, Tim Challies, W. Robert Godfrey, Ian Hamilton, Tim Keesee, Greg Koukl, Steven Lawson, Albert Mohler, Stephen Nichols, Michael Reeves, Derek Thomas, and William VanDoodewaard joined R.C. Sproul to consider the transforming power of the gospel of Jesus Christ.
|
# -*- encoding: utf-8 -*-
# pilas engine: un motor para hacer videojuegos
#
# Copyright 2010-2014 - Hugo Ruscitti
# License: LGPLv3 (see http://www.gnu.org/licenses/lgpl.html)
#
# Website - http://www.pilas-engine.com.ar
class Output(object):
"Representación abstracta de un archivo de salida de datos."
def __init__(self, destino):
self.destino = destino
class ErrorOutput(Output):
"Representa la salida de error en la consola."
def write(self, linea):
self.destino.stdout_original.write(linea)
# Solo muestra el error en consola si es un mensaje util.
if "Traceback (most" in linea or 'File "<input>", line 1' in linea:
self.destino.ensureCursorVisible()
return
if linea.startswith(' File "'):
linea = linea.replace("File", "en el archivo")
linea = linea.replace('line', 'linea')
linea = linea[:linea.find(', in')]
if 'NameError' in linea:
linea = linea.replace('name', 'el nombre').replace('is not defined', 'no existe')
self.destino.insertar_error(linea.decode('utf-8'))
self.destino.ensureCursorVisible()
class NormalOutput(Output):
"Representa la salida estándar de la consola."
def write(self, linea):
self.destino.stdout_original.write(linea)
self.destino.imprimir_linea(linea.decode('utf-8'))
self.destino.ensureCursorVisible()
if '<bound method' in linea:
print "\n\n ... Hey, tal vez olvidaste poner () al final de la anterior sentencia no?"
|
CONCORD Mass., Nov. 1, 1999—StarBurst Software today unveiled AutoSync, a new content synchronization application module that automates content synchronization, making it easier and less time consuming to distribute content updates to thousands of distributed servers. Running on top of StarBurst’s flagship OmniCast platform, AutoSync replaces manual updating and greatly reduces the time it takes to perform content synchronization since synchronization is performed simultaneously and automatically to all receiving servers.
AutoSync enables network managers to schedule simultaneous synchronization of content such as video, audio, business data, databases and software. AutoSync automates server synchronization regardless of geographic location, maintains availability even during content updates, efficiently uses network bandwidth and places critical business content on distributed servers close as possible to the information consumer.
Companies facing strict time constraints can use AutoSync to centrally manage and automatically distribute content to thousands of servers simultaneously. AutoSync also saves companies the manpower needed to perform and monitor point-to-point content distributions – a time-consuming process requiring constant human interaction and attention.
AutoSync scans server content directories for changed, deleted or new content or directory structure changes and synchronizes content and directories on distributed replica servers. As content changes at the source, AutoSync automatically distributes it to all of the replica servers simultaneously. AutoSync enables businesses, for example, to replicate the contents of a Microsoft Windows NT server to multiple Windows servers using standard NT features to browse and select files and directories for synchronization. Such directories could contain product inventories, computer aided design (CAD) files, presentations, videos, manuals, software libraries or any content a directory will store. Before OmniCast, information could only be sent via multiple point-to-point transmissions, which took days to complete. Before AutoSync, files had to be manually found to create a content distribution. The combination of AutoSync and OmniCast melds automatic scanning and updating with simultaneous content distribution, creating a powerful, automated content distribution product suite.
"Until now, most businesses have relied on multi-phased content distribution plans which required manual intervention," said StarBurst vice president of sales and marketing, Bill Andrews. "By adding the AutoSync feature to OmniCast, we can offer all of the benefits of OmniCast such as one-to-many, simultaneous, guaranteed distribution without requiring any changes to the existing network – and we can take that one step further, to fully automate the process."
StarBurst OmniCast with AutoSync is immediately available on NT Servers. AutoSync is bundled with StarBurst OmniCast – content distribution application, and StarBurst AutoCast – transport engine for simultaneous content distributions.
StarBurst Software is the leader in developing e-business content distribution solutions that provide scalable, simultaneous and guaranteed transmission of content such as video, audio, software and large data files to hundreds, thousands, or tens of thousands of remote sites, and distributed devices without changing the network. StarBurst customers include automotive companies such as General Motors, Ford, Chrysler and Honda; retail companies like Wal-Mart, Kmart, The Gap, Sherwin Williams and Rite Aid; hospitality companies including Choice Hotels and Promus Hotels; financial companies such as Optimark, Dow Jones, Nomura Securities and Thomson Securities Information Services; high tech companies such as Microsoft and WebTV; news and information providers Bloomberg and Bridge Information Systems, and many other industry leaders from a wide array of markets worldwide. More information can be found at StarBurst’s World Wide Web site: http://www.starburstsoftware.com.
Product names mentioned herein may be trademarks and/or registered trademarks of their respective companies. StarBurst, StarBurst MFTP and MFTP are registered trademarks of StarBurst Software.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of PRIMO2 -- Probabilistic Inference Modules.
# Copyright (C) 2013-2017 Social Cognitive Systems Group,
# Faculty of Technology, Bielefeld University
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the Lesser GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program. If not, see
# <http://www.gnu.org/licenses/>.
import setuptools
import sys
from primo2 import __version__
if sys.argv[-1] == 'setup.py':
print("To install, run 'python setup.py install'")
print()
setuptools.setup(
name="primo2",
version=__version__,
description="PRIMO -- PRobabilistic Inference MOdules",
long_description="This project is a (partial) reimplementation of the original " \
"probabilistic inference modules which can be found at " \
"https://github.com/hbuschme/PRIMO. This reimplementation " \
"follows the same general idea, but restructured and unified the " \
"underlying datatypes to allow a more concise API and more efficient " \
"manipulation, e.g. by the inference algorithm. In turn the inference " \
"algorithms have been rewritten and partly extended. For most if not " \
"all use cases this implementation should be easier to use and more " \
"performant than the original.",
url='http://github.com/SocialCognitiveSystems/PRIMO/',
license='GNU Lesser General Public License v3 or later (LGPLv3+)',
maintainer="Jan Pöppel",
maintainer_email="[email protected]",
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU Lesser General Public License v3 or later (LGPLv3+)',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
packages=[
"primo2",
"primo2.inference"
],
install_requires = [
"lxml",
"numpy",
"networkx",
"six"
],
)
|
On June 25, Tristan Massie (40), visiting Oregon from Maryland, was free-solo climbing the spectacular talus of the class four summit of Mount Thielsen when he slipped, fell about 20 feet on volcanic blocks, and then slid about 50 feet on the steep snow field below the summit.
Tristan lay on the snow with painful and disabling injuries, unable to move more than a few feet, for the remainder of the day on Friday, when he heard a climber crossing the remote snowfield late in the afternoon. He was just barely able to attract the climber’s attention.
He had left his cell phone in his summit pack, stashed near his hiking boots at the foot of his proposed rock climb to the summit. The lone climber, Stewart Slay, had a cell phone and called 911 for Search and Rescue assistance at 5:07 p.m. Tristan was lying lightly clad, directly on the snow, under the threat of frost bite and hypothermia. Time passed and it grew very cold and dark before the Douglas County Sheriff’s Search and Rescue team could be mobilized and climb the snow covered slopes to the two climbers at 12:30 a.m. During the night, Tristan was lowered on a stretcher, down the steep snow and scree slope northwest of the summit to easier ground, where, at 10 a.m. Saturday, he was hoisted up into an Oregon National Guard helicopter and flown to St. Charles Hospital in Bend, Oregon.
Experience tells us to climb new summits with known companions. From Maryland, Tristan Massie had scheduled a guided climb of some Cascades peaks near Bend, but remaining snow fields had put the peaks out of reasonable reach for the guided group. Mount Thielsen was suggested as an easy peak. He is a strong long-distance runner. He reached the summit blocks in just four hours from the trailhead, despite drifts of snow on the trail and the large snowfield below the summit. Local experience tells us that few people climb Mount Thielsen this early in the summer.
Tammy Massie notes that Tristan did not carry his cell phone in his pants pocket or their SPOT-2 “GPS satellite communicator” and that he did not have a topo map of Mount Thielsen. He did not have a helmet, usually used when climbing peaks in the volcanic Oregon Cascades. Rather than carrying his small summit pack on the scramble, he had left it at the base of the rock face. He was unable to reach his summit pack and phone or his larger pack, which, however, did not have gear for a stranding overnight in the forecast conditions. He might not have survived the night, lying lightly dressed on the snow in subzero temperatures and summit winds, without his chance encounter.
|
import os
from django.conf import settings
from django.core.management.base import BaseCommand
from xbrowse_server.base.models import Project
from xbrowse_server.mall import get_cnv_store
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('args', nargs='*')
def handle(self, *args, **options):
project_id = args[0]
project = Project.objects.get(project_id=project_id)
file_map = {}
for path in os.listdir(args[1]):
sample = path.replace('.bam.csv', '')
sample = sample.replace('.', '')
sample = sample.replace('-', '')
file_map[sample] = os.path.abspath(os.path.join(args[1], path))
for indiv in project.get_individuals():
sample = indiv.indiv_id.replace('-', '')
if sample in file_map:
indiv.exome_depth_file = file_map[sample]
indiv.save()
get_cnv_store().add_sample(str(indiv.pk), open(indiv.exome_depth_file))
|
1] Is it safe to order online?
2] Is it real diamond & gold jewelry?
3] Are all watches you sell authentic?
4] Do you provide layaway or financing?
5] How long will it take to receive my order?
6] Do you ship to other countries?
7] Can I ship my order to a different address?
8] What shipping companies do you use and do you require signature upon delivery?
9] What type of warranty do you provide?
Yes it is with soicyjewelry.com All transactions done through us are encrypted using 128 bit encryption technology (the highest available) and go through SSL (Secure Socket Layers). SSL is the industry standard method for computers to communicate securely without risk of data interception, manipulation, or recipient impersonation.
Is it real diamond & gold jewelry?
We guarantee all our jewelry is 100% genuine and real. All gold is solid gold (not gold plated) and all diamonds are natural mined diamonds. We do not use lab created diamonds.
Yes! We only sell 100% authentic watches, guaranteed or your money back. Watches come with lifetime warranty.
No, Sorry this option is not available at the moment.
For domestic shipments the standard shipping time for orders shipped via Free USPS Priority is 3-5 business days, we also provide USPS Express Mail shipping option for $23.00 and Fedex Next Day Air shipping option for $55. Our regular order processing time is 1-3 days (in addition to the delivery time), however if the item is currently out of stock it may take up to 5 weeks to ship this item.
International orders usually takes 4-10 business day for delivery depending on international customs. International packages are sent via USPS mail service. FedEx and UPS Express service is available for an additional fee. Custom Tax & Duties will have to be paid by recipient.
If you need your item by a certain date please contact our representatives for additional information.
Yes, we ship worldwide. Delivery is $30 to most international countries (includes insurance) via USPS Express Mail, delivery time is 3-8 business days. Custom Tax & Duties will have to be paid by recipient.
We use mostly USPS and FEDEX for our deliveries. All our packages require signature upon delivery and will not be left at the door or any other place unattended.
Our Jewelry items come with a 5 month warranty which covers labor to fix all manufacturing defects. Warranty does not cover fixing damages resulting from accident or damages caused by unauthorized third parties, however we can still repair items for a very low cost.
|
# -*- coding: utf-8 -*-
"""
.. module:: network_dialog
:platform: Linux, Windows
:synopsis: GUI for adjusment calculation
.. moduleauthor: Zoltan Siki <[email protected]>
"""
import platform
import webbrowser
from PyQt4.QtGui import QDialog, QFont, QMessageBox
from PyQt4.QtCore import QSettings
import config
from network_calc import Ui_NetworkCalcDialog
from base_classes import *
from surveying_util import *
from gama_interface import *
class NetworkDialog(QDialog):
""" Class for network calculation dialog
"""
def __init__(self, log):
""" Initialize dialog data and event handlers
:param log: log instance for log messages
"""
super(NetworkDialog, self).__init__()
self.log = log
self.ui = Ui_NetworkCalcDialog()
self.ui.setupUi(self)
self.points = []
self.fix = []
self.adj = []
# event handling
self.ui.CloseButton.clicked.connect(self.onCloseButton)
self.ui.ResetButton.clicked.connect(self.onResetButton)
self.ui.AddFixButton.clicked.connect(self.onAddFixButton)
self.ui.AddAdjButton.clicked.connect(self.onAddAdjButton)
self.ui.RemoveFixButton.clicked.connect(self.onRemoveFixButton)
self.ui.RemoveAdjButton.clicked.connect(self.onRemoveAdjButton)
self.ui.CalcButton.clicked.connect(self.onCalcButton)
self.ui.HelpButton.clicked.connect(self.onHelpButton)
def showEvent(self, event):
""" Set up initial state of dialog widgets
:param event: NOT USED
"""
if platform.system() == 'Linux':
# change font
fontname = QSettings().value("SurveyingCalculation/fontname",config.fontname)
fontsize = int(QSettings().value("SurveyingCalculation/fontsize",config.fontsize))
self.ui.ResultTextBrowser.setFont(QFont(fontname, fontsize))
log_path = QSettings().value("SurveyingCalculation/log_path",config.log_path)
self.log.set_log_path(log_path)
self.reset()
def reset(self):
""" Reset dialog to initial state
"""
self.points = get_measured()
self.fix = []
self.adj = []
# clear lists
self.ui.PointsList.clear()
self.ui.FixList.clear()
self.ui.AdjustedList.clear()
self.ui.ResultTextBrowser.clear()
i = 0
if self.points is not None:
for p in self.points:
self.ui.PointsList.addItem(p[0])
if p[1]:
item = self.ui.PointsList.item(i)
itemfont = item.font()
itemfont.setWeight(QFont.Bold)
item.setFont(itemfont)
i += 1
def onCloseButton(self):
""" Close dialog after Close button pressed
"""
self.accept()
def onResetButton(self):
""" Reset dialog to initial state after Reset button pressed
"""
self.reset()
def onAddFixButton(self):
""" Move selected points to fix point list
"""
selected = self.ui.PointsList.selectedItems()
for item in selected:
i = self.ui.PointsList.row(item)
if self.points[i][1]:
self.ui.FixList.addItem(self.ui.PointsList.takeItem(i))
self.fix.append(self.points[i])
del self.points[i]
def onAddAdjButton(self):
""" Move selected points to adjusted list
"""
selected = self.ui.PointsList.selectedItems()
for item in selected:
i = self.ui.PointsList.row(item)
self.ui.AdjustedList.addItem(self.ui.PointsList.takeItem(i))
self.adj.append(self.points[i])
del self.points[i]
def onRemoveFixButton(self):
""" Move back selected points from fixed list
"""
selected = self.ui.FixList.selectedItems()
for item in selected:
i = self.ui.FixList.row(item)
self.ui.PointsList.addItem(self.ui.FixList.takeItem(i))
self.points.append(self.fix[i])
del self.fix[i]
def onRemoveAdjButton(self):
""" Move back selected points from adjusted list
"""
selected = self.ui.AdjustedList.selectedItems()
for item in selected:
i = self.ui.AdjustedList.row(item)
self.ui.PointsList.addItem(self.ui.AdjustedList.takeItem(i))
self.points.append(self.adj[i])
del self.adj[i]
def onCalcButton(self):
""" Collect observations and adjust network
"""
if len(self.adj):
dimension = int(self.ui.DimensionComboBox.currentText())
conf = float(self.ui.ConfidenceComboBox.currentText())
try:
stda = float(self.ui.AngleDevLineEdit.text())
stdd = float(self.ui.DistDevMMLineEdit.text())
stdd1 = float(self.ui.DistDevMMKMLineEdit.text())
except ValueError:
QMessageBox.warning(self, tr("Warning"), tr("Invalid standard deviation value"))
return
g = GamaInterface(dimension, conf, stda, stdd, stdd1)
# add points to adjustment
fix_names = []
adj_names = []
for fp in self.fix:
p = get_coord(fp[0])
g.add_point(p, 'FIX')
fix_names.append(fp[0])
for fp in self.adj:
p = get_coord(fp[0])
if p is None:
p = Point(fp[0])
g.add_point(p, 'ADJ')
adj_names.append(fp[0])
# add observations to adjustment
fb_list = get_fblist()
if fb_list is None:
return None
for fb in fb_list:
lay = get_layer_by_name(fb)
if lay is None:
continue
st = None
n_ori = 0 # number of orientation directions
n_adj = 0 # number of adjusted targets
#for feat in lay.getFeatures():
sorted_features = sorted(lay.getFeatures(), key=lambda x: x["id"])
for feat in sorted_features:
pid = feat['point_id']
if feat['station'] == 'station':
if st is not None and dimension in [2, 3]:
if (n_ori + n_adj == 0) or \
(st in fix_names and n_adj == 0):
# no adjusted point on known station, remove it
g.remove_last_observation(True)
st = None
n_ori = 0 # number of orientation directions
n_adj = 0 # number of adjusted targets
if pid in fix_names or pid in adj_names:
st = pid
o = PolarObservation(pid, feat['station'])
o.th = feat['th'] if type(feat['th']) is float else None
o.pc = feat['pc'] if type(feat['pc']) is str else None
g.add_observation(o)
else:
if st is not None and (pid in fix_names or pid in adj_names):
if dimension in [2, 3] and (type(feat['hz']) is float or \
type(feat['v']) is float and type(feat['sd']) is float) or \
dimension == 1 and type(feat['v']) is float and \
type(feat['sd']) is float:
o = PolarObservation(pid, None)
o.hz = Angle(feat['hz'], 'GON') if type(feat['hz']) is float else None
o.v = Angle(feat['v'], 'GON') if type(feat['v']) is float else None
if type(feat['v']) is float and \
(st in adj_names or pid in adj_names):
# add zenith if one end is unknown
o.v = Angle(feat['v'], 'GON')
if type(feat['sd']) is float and \
(st in adj_names or pid in adj_names):
# add distance if one end is unknown
o.d = Distance(feat['sd'], 'SD')
o.th = feat['th'] if type(feat['th']) is float else None
o.pc = feat['pc'] if type(feat['pc']) is str else None
if dimension in [2, 3] and (o.hz is not None or o.d is not None) or \
dimension == 1 and o.v is not None:
# direction or distance given
g.add_observation(o)
if pid in fix_names:
n_ori += 1
if pid in adj_names:
n_adj += 1
t = g.adjust()
if t is None:
# adjustment failed
QMessageBox.warning(self, tr("Warning"),
tr('gama-local not installed or other runtime error'))
else:
self.ui.ResultTextBrowser.append(t)
self.log.write_log(tr("Network adjustment"))
self.log.write(t)
else:
QMessageBox.warning(self, tr("Warning"),
tr('No points to adjust'))
def onHelpButton(self):
""" Open user's guide at Network Adjustment in the default web browser.
"""
webbrowser.open("http://www.digikom.hu/SurveyingCalculation/usersguide.html#network-adjustment")
|
Tandem Contracts Ltd was appointed to carry out a package of dilapidation work including various repairs to the roof and rainwater goods. A number of external steel fire doors were overhauled and redecorated and a new lintel was installed to the main entrance where the previous had suffered impact damage which had caused the existing lintel and adjacent blockwork to drop.
Repairs to the roof and RW goods included the erection of a full access scaffold and independent edge protection in order to treat all cut edge corrosion to steel profiled roof sheets using a Giromax Seamsil system, access was also required to clean the existing box gutters and re-lign with a liquid plastic system.
|
########################################################################
#
# File Name: HTMLParamElement
#
#
### This file is automatically generated by GenerateHtml.py.
### DO NOT EDIT!
"""
WWW: http://4suite.com/4DOM e-mail: [email protected]
Copyright (c) 2000 Fourthought Inc, USA. All Rights Reserved.
See http://4suite.com/COPYRIGHT for license and copyright information
"""
import string
from xml.dom import Node
from xml.dom.html.HTMLElement import HTMLElement
class HTMLParamElement(HTMLElement):
def __init__(self, ownerDocument, nodeName="PARAM"):
HTMLElement.__init__(self, ownerDocument, nodeName)
### Attribute Methods ###
def _get_name(self):
return self.getAttribute("NAME")
def _set_name(self, value):
self.setAttribute("NAME", value)
def _get_type(self):
return self.getAttribute("TYPE")
def _set_type(self, value):
self.setAttribute("TYPE", value)
def _get_value(self):
return self.getAttribute("VALUE")
def _set_value(self, value):
self.setAttribute("VALUE", value)
def _get_valueType(self):
return string.capitalize(self.getAttribute("VALUETYPE"))
def _set_valueType(self, value):
self.setAttribute("VALUETYPE", value)
### Attribute Access Mappings ###
_readComputedAttrs = HTMLElement._readComputedAttrs.copy()
_readComputedAttrs.update({
"name" : _get_name,
"type" : _get_type,
"value" : _get_value,
"valueType" : _get_valueType
})
_writeComputedAttrs = HTMLElement._writeComputedAttrs.copy()
_writeComputedAttrs.update({
"name" : _set_name,
"type" : _set_type,
"value" : _set_value,
"valueType" : _set_valueType
})
_readOnlyAttrs = filter(lambda k,m=_writeComputedAttrs: not m.has_key(k),
HTMLElement._readOnlyAttrs + _readComputedAttrs.keys())
|
The Clarence Community’s Trusted Orthodontist!
Owl Orthodontics offers premier orthodontics for children, teens and adults throughout the Clarence, NY community. Our orthodontist, Dr. Doug Wright, and his experienced staff is dedicated to ensuring we are always providing our patients with quality care in a safe and comfortable environment. We strive to provide the latest technology in orthodontic treatment. From traditional braces to Invisalign, we are confident we can provide the most effective treatment to address your orthodontic problem.
Owl Orthodontics Clarence office has plenty of parking space for patients just in front of our office. We are conveniently located near Jim’s Steakout, just north of Roll Road near Transit Middle School. We are also less than one mile from the local high school, Williamsville East High School. From the second you step foot into our office, you will surely feel at home.
Contact your Clarence, NY Orthodontic Office Today!
*Completely Free *Complete with photos & X-rays.
If you think you or someone in your family is a candidate for orthodontic treatment, we are here to help. Please feel free to call our Clarence orthodontic office. To schedule an appointment, give us a call or complete our simple online form. We look forward to adding you to the Owl Orthodontics family!
|
# Copyright 2004-2008 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import unittest
import autoconfig
import parser_test_case
import pygccxml
from pygccxml.utils import *
from pygccxml.parser import *
from pygccxml.declarations import *
class tester_t( parser_test_case.parser_test_case_t ):
def __init__(self, *args):
parser_test_case.parser_test_case_t.__init__(self, *args)
self.__files = [
'core_ns_join_1.hpp'
, 'core_ns_join_2.hpp'
, 'core_ns_join_3.hpp'
, 'core_membership.hpp'
, 'core_class_hierarchy.hpp'
, 'core_types.hpp'
, 'core_diamand_hierarchy_base.hpp'
, 'core_diamand_hierarchy_derived1.hpp'
, 'core_diamand_hierarchy_derived2.hpp'
, 'core_diamand_hierarchy_final_derived.hpp'
, 'core_overloads_1.hpp'
, 'core_overloads_2.hpp'
]
def test(self):
prj_reader = project_reader_t( self.config )
decls = prj_reader.read_files( self.__files
, compilation_mode=COMPILATION_MODE.ALL_AT_ONCE )
files = declaration_files( decls )
result = set()
for fn in files:
result.add( os.path.split( fn )[1] )
self.failUnless( set( self.__files ).issubset( result ) )
def create_suite():
suite = unittest.TestSuite()
suite.addTest( unittest.makeSuite(tester_t))
return suite
def run_suite():
unittest.TextTestRunner(verbosity=2).run( create_suite() )
if __name__ == "__main__":
run_suite()
|
The government will “take back” land on roughly 30 islands from private companies that have not made progress on planned developments, Prime Minister Hun Sen said in a speech on Monday that also targeted land-grabbing villagers and idle provincial governors.
Speaking at the inauguration of the Royal Sands hotel on Koh Rong, the premier directed the Council for the Development of Cambodia to start the land reclamation with Koh Tonsay, or Rabbit Island.
“We need to take them back because they have not developed anything,” he added.
Aggressive island development plans have worried rights groups and conservationists in Cambodia for years.
According to documents compiled by Adhoc, more than 180,000 hectares of land on 28 of Cambodia’s 64 islands were reclassified as state private property for companies seeking land concessions between 2008 and 2010. The islands in question included Koh Tonsay, Koh Russey, Koh Rong and Koh Rong Samloem, with most of the projects overseen by the Council for the Development of Cambodia.
Cheang Sam Um, a manager at a small resort on Koh Tonsay, said several companies owned by tycoons Try Pheap and Wing Hour had started construction on the opposite side of the island several years ago but had since abandoned it.
Sam Um raised concerns that future development could result in the eviction of villagers who had occupied the land for years.
On Koh Rong, rapid development has already led to land disputes with more than 100 families. In his speech, however, Hun Sen raised suspicions that government officials and wealthy investors were paying villagers to grab land.
The prime minister also blamed unidentified officials and provincial governors for sending disputes to him to solve and being too lax on illegal logging on the islands, threatening to fire officials who punt problems to him.
Chap Sotheary, provincial coordinator for rights group Adhoc, said the group has received at least 30 complaints from villagers on Koh Rong over the last two years.
“Officials from multiple institutions have land in the area,” she said. “Sometimes, there is the systemic collusion as well. Public officials usually have more chance to get the land than the normal people. Normal villagers are afraid of the law, so they rarely dare to,” she said.
Deputy Provincial Governor Chhin Seng Nguon refused to comment beyond insisting that his administration regularly cracks down on illegal logging.
Thida Ann, director of real estate firm CBRE Cambodia, said she is in favour of finding “real investors to develop the island to bring Cambodia to the world”.
“Some people just want to take benefits from concession land . . . They just want to reinvest or transfer to other investors for the appreciation,” she said.
But Alejandro Gonzalez-Davidson, the founder of the local environmental movement Mother Nature, said he fears that environmental degradation awaits the islands.
“Either that, or they are kept as private fiefdoms for the benefit of the super rich and Cambodia’s elite, as we are already seeing with some of the smaller islands off Sihanoukville,” he said in a message.
|
import sys
from setuptools import find_packages
from setuptools import setup
import io
import os
VERSION = '0.0.4'
def fpath(name):
return os.path.join(os.path.dirname(__file__), name)
def read(*filenames, **kwargs):
encoding = kwargs.get('encoding', 'utf-8')
sep = kwargs.get('sep', '\n')
buf = []
for filename in filenames:
with io.open(fpath(filename), encoding=encoding) as f:
buf.append(f.read())
return sep.join(buf)
def get_requirements():
requires = [line.rstrip('\n') for line in open(fpath('requirements.txt'))]
if sys.version_info[:2] == (2, 6):
# For python2.6 we have to require argparse since it was not in stdlib until 2.7.
requires.append('argparse')
return requires
setup_args = dict(
name='askme',
description='AskMe Python Client',
url='https://github.com/pirsquare/askme-python',
version=VERSION,
license='MIT',
packages=find_packages(exclude=['tests']),
package_data={'askme': ['source/data/*.json']},
entry_points={
'console_scripts': [
'askme = askme.main:main',
],
},
include_package_data=True,
install_requires=get_requirements(),
author='Ryan Liao',
author_email='[email protected]',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
)
if __name__ == '__main__':
setup(**setup_args)
|
As you know I love to doodle and draw. And I seem to have some eye-hand coordination, and some feel for making it look good.
What I DON’T have is a photographic memory. I need reference photos. I don’t always follow them perfectly, but I need them.
Reference photos are great! If you can’t draw – trace! Use a window or your phone as the light source. It’s such an easy way to add something pretty to your BuJo.
Now, I like to use my own photos. Why? well, it’s my dogs, my babies. It’s my environment that I see every day. And it’s my original work, no one else will have the same image!
But I can’t photograph my dog…I actully have a good camera, but I don’t really know how to use it…And anyway my dog just leaves!
You know what? I can help you with that.
Can you? Let me put my ear up to really hear this!
Not ALL of you, sorry, but those who speak Swedish!
26/3 I start my online-course in dog photography. It’s the fourth time I give it, and I have seen such progress in the other three classes! Students going from “Where do I turn this on?” to “This looks fantastic, I’m putting it on my wall!”.
Imagine…You can finally use your own photos as reference photos for your illustrations. Or maybe print the photos and put them in your memory section? Portraits of your dogs on your phone, computer, wall. Sounds good?
And I have a bonus! Join before Monday, and you get an editing course for Lightroom for free!
Four lessons. You learn about light, portrait, settings, action, how to get your dog to cooperate, and much more. Personal feedback on your photos in a closed FB-group. Answers to all your questions.
This will be fun for both you and your dog! And you know, that camera was kind of expensive, right? Isn’t it a shame not to use it…?
And if you want to see some of my work, visit www.kelpiephoto.com.
|
# -*- coding: utf-8 -*-
"""
Crop background and transform perspective from the photo of page
"""
import numpy as np
import cv2
from .helpers import *
def detection(image, area_thresh = 0.5):
"""Finding Page."""
small = resize(image)
# Edge detection
image_edges = _edges_detection(small, 200, 250)
# Close gaps between edges (double page clouse => rectangle kernel)
closed_edges = cv2.morphologyEx(image_edges,
cv2.MORPH_CLOSE,
np.ones((5, 11)))
# Countours
page_contour = _find_page_contours(closed_edges, small, area_thresh)
# Recalculate to original scale
page_contour = page_contour.dot(ratio(image, small.shape[0]))
# Transform prespective
new_image = _persp_transform(image, page_contour)
return new_image
def _edges_detection(img, minVal, maxVal):
"""Preprocessing (gray, thresh, filter, border) + Canny edge detection."""
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.bilateralFilter(img, 9, 75, 75)
img = cv2.adaptiveThreshold(img, 255,
cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY, 115, 4)
# Median blur replace center pixel by median of pixels under kelner
# => removes thin details
img = cv2.medianBlur(img, 11)
# Add black border - detection of border touching pages
img = cv2.copyMakeBorder(img, 5, 5, 5, 5,
cv2.BORDER_CONSTANT,
value=[0, 0, 0])
return cv2.Canny(img, minVal, maxVal)
def _four_corners_sort(pts):
"""Sort corners in order: top-left, bot-left, bot-right, top-right."""
diff = np.diff(pts, axis=1)
summ = pts.sum(axis=1)
return np.array([pts[np.argmin(summ)],
pts[np.argmax(diff)],
pts[np.argmax(summ)],
pts[np.argmin(diff)]])
def _contour_offset(cnt, offset):
"""Offset contour because of 5px border."""
cnt += offset
cnt[cnt < 0] = 0
return cnt
def _find_page_contours(edges, img, area_thresh):
"""Finding corner points of page contour."""
contours, hierarchy = cv2.findContours(edges,
cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
# Finding biggest rectangle otherwise return original corners
height = edges.shape[0]
width = edges.shape[1]
MIN_COUNTOUR_AREA = height * width * area_thresh
MAX_COUNTOUR_AREA = (width - 10) * (height - 10)
max_area = MIN_COUNTOUR_AREA
page_contour = np.array([[0, 0],
[0, height-5],
[width-5, height-5],
[width-5, 0]])
for cnt in contours:
perimeter = cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, 0.03 * perimeter, True)
# Page has 4 corners and it is convex
if (len(approx) == 4 and
cv2.isContourConvex(approx) and
max_area < cv2.contourArea(approx) < MAX_COUNTOUR_AREA):
max_area = cv2.contourArea(approx)
page_contour = approx[:, 0]
# Sort corners and offset them
page_contour = _four_corners_sort(page_contour)
return _contour_offset(page_contour, (-5, -5))
def _persp_transform(img, s_points):
"""Transform perspective from start points to target points."""
# Euclidean distance - calculate maximum height and width
height = max(np.linalg.norm(s_points[0] - s_points[1]),
np.linalg.norm(s_points[2] - s_points[3]))
width = max(np.linalg.norm(s_points[1] - s_points[2]),
np.linalg.norm(s_points[3] - s_points[0]))
# Create target points
t_points = np.array([[0, 0],
[0, height],
[width, height],
[width, 0]], np.float32)
# getPerspectiveTransform() needs float32
if s_points.dtype != np.float32:
s_points = s_points.astype(np.float32)
M = cv2.getPerspectiveTransform(s_points, t_points)
return cv2.warpPerspective(img, M, (int(width), int(height)))
|
When Pamela Talese traveled to Rome with the intention of painting places where new and old architecture intersect, she didn’t anticipate seeing America’s possible future in Italy’s past. But Talese, who has long captured New York’s outer boroughs, hopped on her bike and found a fascinating visual landscape along the periphery of Rome that is defined, in part, by the history of Fascism. Her latest exhibition, “The Third Rome,” (the title takes its name from Giuseppe Mazzini’s dictum that “After the Rome of the emperors, after the Rome of the Popes, there will come the Rome of the people”) now on display at Robert Simon Fine Art, Inc., on the UES, explores themes of pseudo-populism and fallen heroes in a series of evocative, small-scale oil paintings.
Talese, a lifelong New Yorker, spoke to Straus News about the similarities between Benito Mussolini and Robert Moses, how her famous parents influenced her approach to the artist’s life, and the disappearing Upper East Side of her youth.
Tell me about the inspiration behind “The Third Rome.” What drew you to the city?
I can’t really say why it was that Rome called. I had always loved Rome...Romans are very much like New Yorkers. They are funny, they’re a little abrupt, they can be rude, they’re moody, so it felt very familiar to me ... The more I was there, the more I stand in a place, the more I feel intuitively. Then I do some research and it’s like wow, there’s a really big story here.
How so? What was the story?
I began to spend more time in the Foro Italico [a sports complex intended for the Olympic games] where there are several Carrara marble statues of athletes ... these amazing, oversized, muscular, sort of these perfect male bodies are all around the Foro Italico [formerly Foro Mussolini, built during his regime] ... I thought that they were really campy, so I never paid much attention to them. But somehow that summer in the heat, the first summer of Trump, whatever I was going through at the time, made me look at these sculptures differently, and I began to think ... all of these young men were sent off to war completely under-equipped. Half of the Italian army ended up being imprisoned or in work camps. It was all propaganda, and no strategy. So the story of Italy in the 20th century is a sad one, it’s a really tragic one, and I think it’s that tragedy that I began to feel that summer.
That’s interesting to think of the parallels between the massive construction projects in our city today and these massive, early 20th century projects.
I was born and raised on the Upper East Side ... And I remember when there were three major companies. It was Turner, Tishman and Trump in the 70s, and they were tearing up the city that I knew, so I was acutely aware of urban development at a very young age. There’s a wonderful book by Nathan Silver, “Lost New York” ... maybe just seeing that book [as a young child] made me aware of preservation, and buildings, and the virtue of certain buildings ... another thing that I’ve said is that Benito Mussolini is the Robert Moses of Rome. They are very similar in sort of being master builders, they both were in love with the car and they both thought nothing of displacing thousands of residents for their big projects.
You sound like a true New Yorker.
I’m such a New Yorker. It’s sort of sad. We’re very provincial in that way.
So what do you miss about the Upper East Side you knew growing up?
I’m a little bit despairing about the amount of development that’s going on on the Upper East Side. I mean, you walk on a block, an early to mid-20th century block, and then all of a sudden ... it looks very surgical. To me, it looks like somebody pulled a tooth out. You’ll see a gap on the corner, especially everything that’s happened on Lexington and Third. That to me is a huge loss. I know that the world’s global population is expanding, and I understand why people want to live here. I mean, so do I. It’s a problem with scale, for me. We have some really beautiful, gracious streets ... there were these wonderful pastry shops — the Eclair — there were a couple of German or German-American pastry shops that made these kind of cookies that I have never seen again.
Did you parents encourage a career in the arts?
I should say I did everything I could not to be a painter. I’m no fool. My original plan was to be a magazine designer, and on and off for about eight years I was an interior designer. And I worked briefly for Parish-Hadley, and I worked for David Kleinberg [Design Associates], a job I loved ... There was one day I went up to Coney Island just to do another drawing of this building that was not a building of note, but I liked the composition, and it was gone. And I thought okay, I have to move on this. It wasn’t so much that I thought that I had a big art career, because I didn’t at the time. I didn’t even know it was my calling. I felt the urgency of documenting these things in the way I saw them before they disappeared. And that’s where it started.
I quit my job in 2000 ... my father [author Gay Talese] was not thrilled. He was not delighted. My mother [publisher Nan Talese] was much more encouraging. She had always thought that I was an artist, and I had been resisting it. And they are still kind of puzzled. Really, they’re wonderful, they’re nice, they’re concerned...I’m concerned, but it’s too late for me to become a paralegal.
I’ve always felt that way about writing. Did your parents give you any advice? I mean, you’re a storyteller in many ways.
I think that there’s a storytelling aspect in my family, and I guess while there wasn’t direct advice, they led by example. Both of my parents, who are in their 80s, are still working. I have a sister as well, and the four of us, we work all the time. That’s just the way we are oriented ... you know, my mother publishes amazing literary authors that are occasionally commercially successful, like Margaret Atwood or Ian McEwan or Pat Conroy, but she doesn’t do it because she can tell that they’re going to be a big seller. She does it because she believes in the work.
And my father very much goes off on his direction, sometimes against some persuasive advice. He is called in a certain way, and that’s the work he does, and certainly the work he does is not on trend. I’m a realist who paints mostly on location, and what’s been fascinating about this show is ... this work is transitional not only in my own career, but also in the ways people see Rome. I guess from my parents, I learned to just forge ahead.
|
from datetime import datetime
import posixpath
from storages.backends.s3boto3 import S3Boto3Storage
from django.conf import settings
# used only for suppressing INFO logging in S3Boto3Storage
import logging
class S3Storage(S3Boto3Storage):
# suppress boto3's INFO logging per https://github.com/boto/boto3/issues/521
logging.getLogger('boto3').setLevel(logging.WARNING)
logging.getLogger('botocore').setLevel(logging.WARNING)
def augmented_listdir(self, name):
path = self._normalize_name(self._clean_name(name))
# The path needs to end with a slash, but if the root is empty, leave
# it.
if path and not path.endswith('/'):
path += '/'
files = []
paginator = self.connection.meta.client.get_paginator('list_objects')
pages = paginator.paginate(Bucket=self.bucket_name, Delimiter='/', Prefix=path)
for page in pages:
for entry in page.get('Contents', ()):
last_modified = entry.get('LastModified')
age = datetime.now(last_modified.tzinfo) - last_modified
files.append({'file_name': posixpath.relpath(entry['Key'], path),
'last_modified': last_modified,
'age': age})
return files
def get_s3_storage(bucket_name='h2o.images'):
# We're planning on supporting multiple storage solutions. I'm adding this
# unnecessary layer of abstraction now, to hopefully encourage design decisions
# that will make it easier to support multiple and customer-specific storages later.
return S3Storage(
**settings.S3_STORAGE,
bucket_name=bucket_name
)
|
JSP Implicit Objects, JSP has 9 implicit objects made by web container and those implicit objects are available in all JSP pages. Implicit objects can be directly used in scriptlets. JSP Implicit objects can be directly used without declaring.
Page is an implict object. Page object is used for referencing auto generated Servlet class. For using page object type casting must be done.
Out object is used for writing data into buffer. Out object belongs to JspWriter. Out object is mostly used with JspExpression to invoke the class.
Request is an implict object of HttpServletRequest. Request object is used for requesting parameters, character encoding, header type, content, information, remote address, server name, server port.
Exception object is typically utilized for producing a suitable reaction to the mistake condition.
The pageContext is utilized to indicate the whole JSP page. PageContext object is an instance of a javax.servlet.jsp.PageContext.PageContent object is used as a way to get to data about the page while concealing a large portion of the execution points of interest.
This object is utilized to get introduction parameter for a specific JSP page and it is made to every single JSP page.
Response object is used for providing methods to the HTML header. Responce object is implict of javax.servlet.http.HttpServletRequest.
Session object is associated with HTTP Request. Session object is for set, get or remove attribute or to get session information.
It is an instance of a javax.servlet.ServletContext. Application object is created by web container. Application object is also used to get RequestDispatcher object.
JSP Implicit Objects – While writing the JSP program, it is not possible to remember the class names of the implicit objects because the change is based on the server utilized.
The implicit out is an object of JSP container, which supplies java class that extends from javax.servlet.jsp.JspWriter, and not directly from the javax.servlet.jsp.JspWriter class.
Here out.println is used to display the data.
JSP Implicit Objects can be also called as pre defined variables.
Implict objects are Java objects and they are placed in Jsp Container.
|
# This provides utilities for plotting things on a
# 1D or 2D mesh or a slice of a 3D mesh.
try :
import numpy as np
except ImportError :
print "Error importing Numpy."
try :
import matplotlib.pyplot as plt
import matplotlib
except ImportError :
print "Error importing matplotlib"
def plot_mesh_function(mesh, f, title="", colormap = "hot", edges = False, mybounds = [], myticks = []) :
""" Plot a mesh function.
"""
if mesh.dimension() == 1 :
# get the mesh points
x = mesh_axes(mesh)
# plot the map
plt.plot(x, f)
elif mesh.dimension() == 2 :
# Get the mesh axes and then make a grid of them for plotting.
x, y = mesh_axes(mesh)
X, Y = np.meshgrid(x, y)
# Reshape the function
f = f.reshape(mesh.number_cells_x(), mesh.number_cells_y())
if edges :
plt.pcolor(X, Y, f, cmap=colormap, edgecolors='k')
else :
plt.pcolor(X, Y, f, cmap=colormap)
plt.axis("scaled")
plt.xlabel("x [cm]")
plt.ylabel("y [cm]")
if len(myticks) :
cbar = plt.colorbar(boundaries=mybounds,ticks=myticks)
else :
cbar = plt.colorbar()
else :
print "not ready for 3d"
return
plt.title(title)
# show the plot
plt.show()
def plot_mesh_map(mesh, key, edges = False) :
""" Plot a mesh map, optionally with edges explicitly displayed.
"""
# no 3D for now
if mesh.dimension() == 3 :
print "not ready for 3d"
return
# Check that the map exists and return if missing
if not mesh.mesh_map_exists(key) :
print "Mesh map", key, " does not exist"
return
# Get the map
map = np.asarray(mesh.mesh_map(key))
if (mesh.dimension() == 2) :
# reshape the map
map = map.reshape(mesh.number_cells_x(), mesh.number_cells_y())
# Choose a random color map for 2D plots
unique_elements = np.unique(map)
#num_unique_elements = len(unique_elements)
num_unique_elements = max(unique_elements)+1
colormap = matplotlib.colors.ListedColormap(np.random.rand(num_unique_elements, 3))
bounds = np.linspace(-0.5, num_unique_elements - 0.5, num_unique_elements+1)
ticks = bounds[0:num_unique_elements]+0.5
# Plot
plot_mesh_function(mesh, map, key, colormap, edges, bounds, ticks)
def plot_multigroup_flux(mesh, state, edges = False) :
""" Plot the multigroup fluxes.
For 1D, they are superimposed on one plot. In 2D, they
are split into subfigures for the number of groups. Obviously,
this can get cumbersome for many groups, so we kill it at 5+.
"""
if mesh.dimension() == 1 :
# get the mesh points
x = mesh_axes(mesh)
# plot the map
plt.plot(x, f)
elif mesh.dimension() == 2 :
# Get the mesh axes and then make a grid of them for plotting.
x, y = mesh_axes(mesh)
X, Y = np.meshgrid(x, y)
edgec = 'none'
if edges :
edgec = 'k'
plt.pcolor(X, Y, f, cmap=colormap, edgecolors=edgec)
else :
print "not ready for 3d"
return
# show the plot
plt.show()
def mesh_axes(mesh) :
""" Get the fine mesh points for plotting.
"""
if (mesh.dimension() == 1) :
# for 1D, we take the cell center points
x = np.zeros(mesh.number_cells_x())
x[0] = mesh.dx(0) * 0.5
for i in range(0, mesh.number_cells_x()-1) :
x[i + 1] = x[i] + 0.5*(mesh.dx(i) + mesh.dx(i+1))
return x
else :
# for 2D, we take the mesh edges
x = np.zeros(mesh.number_cells_x()+1)
y = np.zeros(mesh.number_cells_y()+1)
for i in range(0, mesh.number_cells_x()) :
x[i + 1] = x[i] + mesh.dx(i)
for j in range(0, mesh.number_cells_y()) :
y[j + 1] = y[j] + mesh.dy(j)
return (x, y)
|
Kingsley School provides remote access to Staff members only at this time. The remote desktop can be accessed via clicking on the link below, and saving to your local machine.
Once downloaded, double click on the file (“StaffRemoteDesktop.rdp”) and you will be presented with a login screen, you will need to enter YOUR school login details, and click ok. This should then connect you to the school network.
To use the resource you must be running Windows 7, 8.1 or Windows 10 – Windows XP is not supported at present. For any other operating systems please contact IT and we will be happy to help.
|
'''
circular_oscillator.py: defines a ModelledRepr extension that begins with a
unit circle on the screen parametrized by (r, theta) = (1, [0,2pi))
This will be the test case for the initial extension of ModelledRepr. We will
update/expand/tweak ModelledRepr based on this implementation to make further
extensions easier.
'''
from musicvisualizer.pipeline.ir import ModelledRepr
class CircularOscillatorModel(ModelledRepr):
# TODO: include params for ModelledRepr
def __init__(self,
sampleRate = 24,
sampleRange = (None, None),
dataIn = None,
parameters = None,
number_of_points = 1024):
super(ModelledRepr, self).__init__(sampleRate = sampleRate,
sampleRange = sampleRange,
dataIn = dataIn,
parameters = parameters)
self.number_of_points = number_of_points
self.points = [0.0 for i in range(self.number_of_points)]
def increment_time(self):
pass
|
DC Entertainment (DCE) announced today an action-packed celebration of the most iconic Super Hero team in pop culture history with “Justice League Day” on Saturday, November 18. “Justice League Day” will take place during opening weekend of the highly anticipated big screen debut of Warner Bros. Pictures’ Justice League, and the massive event will honor DC’s legendary Super Hero team with epic worldwide festivities.
More than one thousand comic retailers, bookstores, libraries, Six Flags Theme Parks and other partners around the globe will host “Justice League Day” activations, including distributing free copies of JUSTICE LEAGUE DAY #1 SPECIAL EDITION, which reprints The New 52 JUSTICE LEAGUE #1 by superstar creative team Geoff Johns and Jim Lee, and features new cover art by Ivan Reis. Digital retailers will also offer free downloads of the comic.
Extending the adventure beyond the big screen, moviegoers who purchase a ticket to see Justice League in participating IMAX® at AMC Theatres® from Nov. 16-19 will receive a JUSTICE LEAGUE AMC IMAX SPECIAL EDITION comic book while supplies last – a reprinting of JUSTICE LEAGUE #15 by Geoff Johns and Ivan Reis. Fans who purchase tickets from Cinemark and Regal IMAX® theatres will also receive free digital comic download codes. Plus, limited-edition “Justice League Day” posters, temporary tattoos and fan-focused giveaways will be available at select retailers. DC All Access, DC’s award-winning original digital series, along with the DC Fans YouTube channel, will host special programming in support of the “Justice League Day” celebration. Fans can visit DCComics.com/JLDayto find more details on festivities and participating locations.
Justice League titles at Books-A-Million locations will be on sale Nov. 14-27 with a buy one, get one 50% off special offer. Digital readers can rediscover their favorite Justice League stories with DC’s “Join the League” sale, from Nov. 7-20. The two-week sale features over 160 e-books and periodical issues, including The New 52 and Rebirth collections for AQUAMAN, BATMAN, WONDER WOMAN, SUPERMAN, GREEN LANTERN, CYBORG and THE FLASH. DC will also offer over 100 discounted Justice League led e-books and periodicals with the “Justice League: All In” sale, from Nov. 14-20. DC’s digital sales are available via readdcentertainment.com, Amazon Kindle, comiXology, Google Play, iBooks, Nook, and Madefire.
DC Collectibles fans can also take home their favorite Justice League characters brought to life as stunning 12’’ statues based on the actor appearances in the film. Fans can find Gal Gadot’s Wonder Woman, Ben Affleck’s Batman, Jason Momoa’s Aquaman, Ray Fisher’s Cyborg, and Ezra Miller’s The Flash everywhere comics are sold.
Further spotlighting the groundbreaking Super Hero ensemble, DC has joined forces with an array of partners including, Books-a-Million, Comixology, Six Flags Theme Parks, Warner Bros. Studio Tour, PHR, Scholastic and more for exclusive in-store and digital promotions.
Fans can download the official “Justice League Day” activity kit, including cut out paper masks, word mazes, coloring pages and more. Visit DCAllAccess.com and download the DC All Access mobile app for more details on the “Justice League Day” festivities, participating stores, and in-depth coverage directly from original digital series DC All Access.
Join the celebration on November 18 and share on social media using #JusticeLeagueDay.
|
#!/usr/bin/python
import os
import time
from misc_lib import run_command_list
from commands import getstatusoutput
import stat
class Speed:
def __init__(self):
self.__max = 0
self.__min = -1
self.__avg = 0
def record_value(self, val):
if float(val) > float(self.__max):
self.__max = float(val)
if self.__min < 0:
self.__min = val
elif float(val) < float(self.__min):
self.__min = float(val)
if self.__avg == 0:
self.__avg = float(val)
else:
self.__avg = '%.2f' % ((float(self.__avg) + float(val)) / 2)
def get_values(self):
return self.__max, self.__avg, self.__min
class Raid_Util:
def __init__(self, cmd_dir, src_dir):
self.__cmd_dir = cmd_dir
self.__src_dir = src_dir
self.__raid_txn = True
self.raid_name = None
self.__sys_name = None
self.sub_dev_list = []
self.__sub_dev_cnt = 0
self.cmd_args = ''
self.__stripe_cache_size = 16384
self.raid_sub_dev_size_KB = 0
self.raid_level = 6
def set_raid_level(self, level):
self.raid_level = level
def get_raid_level(self):
return self.raid_level
def get_lest_sub_dev_cnt(self):
if self.raid_level == 6:
return 4
else:
return 3
def set_raid_sub_dev_size_KB(self, size_kb):
self.raid_sub_dev_size_KB = size_kb
def get_raid_sub_dev_size_KB(self):
return self.raid_sub_dev_size_KB
def set_stripe_cache_size(self, size):
self.__stripe_cache_size = size
def get_stripe_cache_size(self):
return str(self.__stripe_cache_size)
def set_cmd_args(self, args):
self.cmd_args = args
def get_cmd_args(self):
return self.cmd_args
def set_cmd_dir(self, path):
self.__cmd_dir = path
def get_cmd_dir(self):
return self.__cmd_dir
def set_src_dir(self, path):
self.__src_dir = path
def get_src_dir(self):
return self.__src_dir
def set_raid_txn(self, is_txn):
self.__raid_txn = is_txn
def get_raid_txn(self):
return self.__raid_txn
def get_cmd(self):
return self.__cmd_dir + '/mdadm'
def set_sub_dev_list(self, dev_list):
size_kb = find_min_dev_size_kb(dev_list)
size_kb /= 1024
if size_kb > 0:
size_kb *= 1024
self.set_raid_sub_dev_size_KB(size_kb - 4*1024)
self.sub_dev_list = dev_list
def get_sub_dev_list(self):
return self.sub_dev_list
def add_sub_dev(self, dev):
self.sub_dev_list.append(dev)
def del_sub_dev(self, dev):
self.sub_dev_list.remove(dev)
def get_sub_dev_cnt(self):
self.__sub_dev_cnt = len(self.sub_dev_list)
return self.__sub_dev_cnt
def set_raid_name(self, name):
self.raid_name = name
def search_raid_dev_path(self):
dev_paths = self.get_sub_dev_list()
status = 0
for dev in dev_paths:
dev_name = dev.split(os.sep)[-1]
cmd = 'cat /proc/mdstat | grep' + dev_name
(status, output) = getstatusoutput(cmd)
if not status:
break
if not status and output:
return ''.join(['/dev/', output.split()[0]])
return None
def get_raid_path(self):
path = None
if os.path.exists('/dev/' + self.raid_name):
path = ''.join(['/dev/', self.raid_name])
elif os.path.exists('/dev/md/' + self.raid_name):
path = ''.join(['/dev/md/', self.raid_name])
if path:
return path
return self.search_raid_dev_path()
def get_sys_name(self):
if self.__sys_name:
return self.__sys_name
path = self.get_raid_path()
if not path:
return
mode = os.stat(path).st_mode
if stat.S_ISLNK(mode):
tgt = os.readlink(path)
else:
tgt = path
self.__sys_name = tgt.split(os.sep)[-1]
return self.__sys_name
def remove_raid(self):
path = self.get_raid_path()
if not path:
return
cmds = [' '.join([self.get_cmd(), '-S', path])]
(status, _) = run_command_list(cmds)
if not status:
self.__sys_name = None
def exit_raid(self):
cmds = ['rmmod raid456 md_mod',
'modprobe -r async_raid6_recov async_pq',
#'rmmod raid6_pq',
#'dmesg -C > /dev/null'
]
run_command_list(cmds)
def init_raid(self):
if self.get_raid_txn():
src_dir = self.get_src_dir()
cmds = ['insmod ' + src_dir + '/raid6_pq.ko',
'modprobe async_raid6_recov',
'insmod ' + src_dir + '/md-mod.ko',
'insmod ' + src_dir + '/raid456.ko']
else:
cmds = ['modprobe md_mod',
'modprobe raid456']
run_command_list(cmds)
def zero_raid_sub_dev(self, tgt = None):
raid_cmd = self.get_cmd()
if self.get_sub_dev_cnt() == 0:
return
if tgt:
devs = tgt
else:
devs = ' '.join(self.get_sub_dev_list())
cmds = [' '.join([raid_cmd, '--zero-superblock',
'--force', devs])]
run_command_list(cmds)
def change_raid_stripe_cache_size(self):
if not self.get_sys_name():
return
cmd_change = ''.join(['echo ', str(self.get_stripe_cache_size()),
' > /sys/block/', self.get_sys_name(),
'/md/stripe_cache_size'])
cmds = [cmd_change]
run_command_list(cmds)
def create_raid(self):
if self.get_sub_dev_cnt() < self.get_lest_sub_dev_cnt():
return
raid_cmd = self.get_cmd()
if self.get_raid_txn():
txn = '-T'
else:
txn = ''
devs = ' '.join(self.get_sub_dev_list())
cmd_create = ' '.join(['echo "y" |', raid_cmd,
'-C', '/dev/md/' + self.raid_name,
self.cmd_args,
'-n', str(self.get_sub_dev_cnt()),
'-l', str(self.get_raid_level()),
'-z', str(self.get_raid_sub_dev_size_KB()),
txn, devs])
cmds = [cmd_create]
(err, _) = run_command_list(cmds)
if err:
return
cmd_map = ' '.join(['cat /dev/md/md-device-map', '|',
'grep', self.raid_name])
(status, output) = getstatusoutput(cmd_map)
if not status:
dev_path = '/dev/' + output.split()[0]
else:
dev_path = self.search_raid_dev_path()
if dev_path:
cmd_link1 = ''.join(['ln -s ', dev_path,
' /dev/',self.raid_name])
cmd_link2 = ''.join(['ln -s ', dev_path,
' /dev/md/', self.raid_name])
cmds = [cmd_link1, cmd_link2]
run_command_list(cmds)
return
def assemble_raid(self):
raid_cmd = self.get_cmd()
devs = ' '.join(self.get_sub_dev_list())
cmds = [' '.join([raid_cmd, '-A', self.get_raid_path(), devs])]
run_command_list(cmds)
def show_raid_info(self):
if not self.get_sys_name():
return
cmds = ['cat /proc/mdstat',
''.join(['cat /sys/block/', self.get_sys_name(),
'/md/stripe_cache_size']),
'cat /proc/modules | grep raid456'
]
run_command_list(cmds)
def fail_one(self, index = 0):
if not self.get_sys_name():
return
tgt = self.get_sub_dev_list()[index]
cmd_fail = ' '.join([self.get_cmd(),
self.get_raid_path(),
'--fail', tgt
])
cmd_remove = ' '.join([self.get_cmd(),
self.get_raid_path(),
'--remove', tgt
])
cmds = [cmd_fail, cmd_remove]
(err, _) = run_command_list(cmds)
if not err:
self.del_sub_dev(tgt)
def fail_two(self, index1 = 0, index2 = 1):
self.fail_one(index1)
self.fail_one(index2)
def add_one(self, index = 0):
if not self.get_sys_name():
return
tgt = self.get_sub_dev_list()[index]
self.zero_raid_sub_dev(tgt)
cmd = ' '.join([self.get_cmd(),
self.get_raid_path(),
'--add', tgt
])
cmds = [cmd]
run_command_list(cmds)
(err, _) = run_command_list(cmds)
if not err:
self.add_sub_dev(tgt)
def add_two(self, index1 = 0, index2 = 1):
self.add_one(index1)
self.add_one(index2)
def check_recovery_speed(self, speed_obj):
if not self.get_sys_name():
return 0
cmd = ' '.join(['cat /proc/mdstat | grep -A3', self.get_sys_name(),
'| grep speed'
])
cmds = [cmd]
(status, speed) = run_command_list(cmds)
if status:
return 0
speed_start = speed.find('speed=')
if speed_start < 0:
return 0
speed_start += len('speed=')
speed_end = -1
speed_units = ['K', 'M', 'G', 'B']
for unit in speed_units:
speed_end = speed[speed_start:].find(unit)
if speed_end >= 0:
break
if speed_end < 0:
print speed
return 0
speed_end += speed_start
speed_value = speed[speed_start: speed_end]
speed_obj.record_value(speed_value)
return 1
def wait_recovery_time(self, cnt = 100):
speed_obj = Speed()
for i in range(cnt):
time.sleep(1)
if i < 3:
continue
ret = self.check_recovery_speed(speed_obj)
if not ret:
break
print 'recovery speed (max avg min):', speed_obj.get_values()
def wait_sync(self):
speed_obj = Speed()
while self.check_recovery_speed(speed_obj):
time.sleep(5)
if speed_obj.__max:
print 'resync speed (max avg min):', speed_obj.get_values()
def find_min_dev_size_kb(dev_path_list):
min_size_kb = -1
for dev_path in dev_path_list:
name = dev_path.split(os.sep)[-1]
cmds = ['cat /proc/partitions | grep ' + name]
(status, line) = run_command_list(cmds)
if status:
continue
size_kb = int(line.split()[2])
if (size_kb < min_size_kb) or (min_size_kb < 0):
min_size_kb = size_kb
return min_size_kb
|
The registration is free of charge*. Before renting your first bike, we need to verify your payment method. You will be required to deposit a credit of 1 € which can be used in all fare types. You can change your preferred payment method at any time.
The annual rate includes 30 minutes per rental for 365 days and is valid in Augsburg. In all other cities the basic rate will apply. The subscription prolonges automatically.
Your advantage as a swa annual subscriber, swa Carshareror swa City Card owner: The first 30 minutes of each rental in Augsburg are included. In other cities the basic rate applies.
|
from setuptools import setup
try:
with open('readme.rst') as f:
long_description = f.read()
except IOError:
with open('readme.md') as f:
long_description = f.read()
def read_version():
with open('eralchemy/version.py') as f:
code = f.readlines()[0]
exec(code)
assert ('version' in locals())
return locals()['version']
setup(
name='ERAlchemy',
version=read_version(),
description='Simple entity relation (ER) diagrams generation',
long_description=long_description,
# The project's main homepage.d
url='https://github.com/Alexis-benoist/eralchemy',
# Author details
author='Alexis Benoist',
author_email='[email protected]',
# Choose your license
license='Apache License 2.0',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Scientific/Engineering :: Visualization',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Database',
],
# What does your project relate to?
keywords='sql relational databases ER diagram render',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=[
'eralchemy',
],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[
'SQLAlchemy',
'pygraphviz'
],
entry_points={
'console_scripts': [
'eralchemy=eralchemy.main:cli',
],
},
)
|
St. Benedict, Kansas, will be remembering their war dead on Memorial Day. But they won’t remember any from World War II, Korea, or Desert Storm.
The town has sent many young men to war, but none were lost — and local Catholics credit the Rosary.
Larry Buessing remembers St. Mary’s Church in the tiny town of St. Benedict as the church his grandfather helped build in 1893. “He was a little boy and he led the mule that raised the rock to put in place for the church,” he said.
The church, built by the Benedictines of Atchison, Kansas, is plain on the outside, but breathtakingly beautiful inside, such that it was a finalist for one of the “8 Wonders of Kansas” in a recent statewide contest.
That faith included the Rosary, whose 15 original mysteries are referenced in art on the church walls. Outside, the church added a grotto in 1936, using rocks from nearby farms.
Lilian Engelken, a parish secretary at St. Mary’s for 28 years, heard stories about how the grotto was built.
“The young men of the parish helped bring the rocks,” she said. They were the same young men who would later be sent to Europe and the Pacific to fight the Axis Powers.
And that they did, every night at 7:30, “after chores were done” – weather permitting. In inclement weather they prayed in the church instead.
Engelken remembers more than 50 young men headed off to war. Every single one came back.
Others suffered heavy losses. The town of Frankfort, also in Northeast Kansas, lost 32 young men in World War II.
“Based on records from local county newspapers of that time, it is concluded that the Frankfort community lost more men in World War II than any other town of similar size,” said Sen. Pat Roberts, upon entering all 32 names into the Congressional Record in 2005.
The miracle of the Rosary is remembered by parishioners to this day.
When the Korean War broke out, the town sent more men overseas, and prayed more rosaries. Again, no casualties were lost. The practice was remembered in 1991 when Desert Storm brought troops to Kuwait. Again, the town gathered for prayers. Again, “St. Benedict boys” were spared.
There are other more dramatic miracles of the Rosary from World War II — there were the eight German missionaries who prayed the Rosary daily in Hiroshima, Japan, during World War II, and walked away unscathed after the atomic bomb destroyed everything around them on August 6, 1945. There were also Austria’s daily rosaries to fend off communist takeover after the war was over.
But the miracle of the Rosary at St. Benedict, Kansas, reminds us how Our Lady takes cares of the humble, the dedicated and the devout who pray for peace, war after war.
|
# Volatility
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
@author: Edwin Smulders
@license: GNU General Public License 2.0 or later
@contact: [email protected]
"""
import volatility.plugins.linux.pslist as linux_pslist
from volatility.renderers.basic import Address
from volatility.renderers import TreeGrid
class linux_threads(linux_pslist.linux_pslist):
""" Prints threads of processes """
def unified_output(self, data):
return TreeGrid([("Offset",Address),
("NameProc",str),
("TGID",int),
("ThreadPid",str),
("ThreadName", str),
("thread_offset",Address),
("Addr_limit",Address),
("uid_cred",int),
("gid_cred",int),
("euid_cred",int)
],
self.generator(data))
def generator(self, data):
for task in data:
euidcred = task.euid
uidcred = task.uid
gidcred = task.gid
for thread in task.threads():
addr_limit = self.get_addr_limit(thread)
yield(0,[Address(task.obj_offset),
str(task.comm),
int(task.tgid),
str(thread.pid),
str(thread.comm),
Address(thread.obj_offset),
Address(addr_limit),
int(uidcred),
int(gidcred),
int(euidcred)
])
def get_addr_limit(self,thread, addrvar_offset = 8 ):
"""
Here we read the addr_limit variable of a thread by reading at the offset of the thread plus
the offset of the addr_limit variable inside the thread_info
:param thread: thread from which we want the information
:param addrvar_offset: offset of the addr_limit var in the thread_info
:return: the addr_limit
"""
addr_space = thread.get_process_address_space()
offset = thread.obj_offset + addrvar_offset
if addr_space.__class__ == "LinuxAMD64PagedMemory":
return addr_space.read_long_long_phys(offset)
else:
return addr_space.read_long_phys(offset)
def render_text(self, outfd, data):
for task in data:
outfd.write("\nProcess Name: {}\nProcess ID: {}\n".format(task.comm, task.tgid))
self.table_header(outfd, [('Thread PID', '13'), ('Thread Name', '16')])
for thread in task.threads():
self.table_row(outfd, str(thread.pid), thread.comm)
|
Bed Head Pajamas Home And Furniture | Chrisadamczykphotography bedhead pajamas flannel. bedhead pajamas flamingo. bedhead pajamas catalog.
Adorable Bed Head Pajamas Of BedHead Cancer Be Glammed. Appealing Bed Head Pajamas Of Bedhead Girl Snug Fit Classic 4T. Magnificent Bed Head Pajamas In BedHead Cherry Hearts Stretch L S Henley PJ Set 1097 SL7. Appealing Bed Head Pajamas In BHPJ By Bedhead Women S Classic Notch Collar Set. Sophisticated Bed Head Pajamas In BedHead Pink Polar Bear Stretch L S. Fabulous Bed Head Pajamas Of 16 Best Images On Pinterest Pjs And. Best Choice Of Bed Head Pajamas BedHead Women S Stretch Onesie Pajama Made In USA At Amazon. Eye Catching Bed Head Pajamas At Bedhead Coral Flamingo Stretch Classic PJ 1002 S 2319. Endearing Bed Head Pajamas At BHPJ By Bedhead Women S Soft Knit Button Front One Piece. Appealing Bed Head Pajamas In Shop For BedHead Front Close Sleepwear Women HerRoom. Cool Bed Head Pajamas At The Best Of Everything For You REVIEW BEDHEAD PAJAMAS. Attractive Bed Head Pajamas Of 25 Best Bedhead An Oprah Favorite Images On Pinterest. Cool Bed Head Pajamas On BedHead 2PC Women S Classic Woven Pajama Set Table Talk X. Magnificent Bed Head Pajamas In BHPJ By BedHead Bedhead Women S Soft Knit. Unique Bed Head Pajamas At Bedhead Unveils Collaboration With Paper Fashion West 3rd.
|
from CoreDomain import *
import argparse
import pylab
import glob
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--ourdoms',type=str)
parser.add_argument('--dixondoms',type=str)
parser.add_argument('--organism',type=str)
args = parser.parse_args()
if args.organism == 'mouse': chroms = chroms_mouse
elif args.organism == 'human': chroms = chroms_human
dixondoms = []
for chrom in chroms:
dixondoms += read_domains(args.dixondoms,chrom)
ourdoms = {}
for chrom in chroms:
for fname in glob.glob("{0}/*chr{1}.*.alpha*".format(args.ourdoms,chrom)):
gamma = float(fname.split("=")[1][0:-3])
if gamma not in ourdoms: ourdoms[gamma] = []
ourdoms[gamma] += read_domains(fname,chrom)
keys,values = zip(*sorted(ourdoms.items()))
pylab.plot(keys, [ max([d.len() for d in doms])
for doms in values],'o-')
pylab.plot(keys, [ pylab.mean([d.len() for d in doms])
for doms in values],'o-')
pylab.yscale('log')
#pylab.axhline(pylab.mean([d.len() for d in dixondoms]))
pylab.axhline(max([d.len() for d in dixondoms]))
pylab.show()
if __name__=="__main__": main()
|
GMO Z COM PAYMENT GATEWAY MALAYSIA SDN. BHD.
We are a team expert in both local and ASEAN market providing payment gateway and related supporting service to E-commerce businesses.
If you are looking for companies involved in the Payment System (Settlement System) industry in Malaysia, please visit M-navi website. This page introducing companies that are developing business in Payment System (Settlement System).You can found information such as service contents, location, job information, etc. on local companies and Japanese companies that conducting in Payment System (Settlement System).
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014-2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from abc import ABCMeta, abstractmethod
from nupic.algorithms.monitor_mixin.temporal_memory_monitor_mixin import (
TemporalMemoryMonitorMixin)
from nupic.data.generators.sequence_machine import SequenceMachine
class AbstractTemporalMemoryTest(object):
__metaclass__ = ABCMeta
VERBOSITY = 1
@abstractmethod
def getTMClass(self):
"""
Implement this method to specify the Temporal Memory class.
"""
@abstractmethod
def getPatternMachine(self):
"""
Implement this method to provide the pattern machine.
"""
def getDefaultTMParams(self):
"""
Override this method to set the default TM params for `self.tm`.
"""
return {}
def setUp(self):
self.tm = None
self.patternMachine = self.getPatternMachine()
self.sequenceMachine = SequenceMachine(self.patternMachine)
def init(self, overrides=None):
"""
Initialize Temporal Memory, and other member variables.
:param overrides: overrides for default Temporal Memory parameters
"""
params = self._computeTMParams(overrides)
class MonitoredTemporalMemory(TemporalMemoryMonitorMixin,
self.getTMClass()): pass
self.tm = MonitoredTemporalMemory(**params)
def feedTM(self, sequence, learn=True, num=1):
repeatedSequence = sequence * num
self.tm.mmClearHistory()
for pattern in repeatedSequence:
if pattern is None:
self.tm.reset()
else:
self.tm.compute(pattern, learn=learn)
# ==============================
# Helper functions
# ==============================
def _computeTMParams(self, overrides):
params = dict(self.getDefaultTMParams())
params.update(overrides or {})
return params
|
OBAMA-PUTIN TALKS CANCELLED – DOES RUSSIA EVEN CARE?
The news that the Obama Administration has called off the scheduled talks with Russia’s President Putin is hardly likely to upset the Russian leader, who is chairing the G20 talks in St. Petersburg shortly. Accusations were made by Washington that Putin still has a “Cold War” mentality (true). But the fact is that the US does, too, with Washington caring far more than necessary about relations with a greatly diminished Russia.
It was President Clinton who invited Russia to join the G7, the seven richest nations in the world. Russia was hardly the eighth richest. Even now, it’s way down the list of the biggest economies in the world.
Clinton’s invitation was meant to encourage the development of Russian democracy. With their history, there was little hope of Russia ever forming a democracy anything like America’s. The country just doesn’t fit in the G8 or the G20.
That does not mean to say it’s isolated. Far from it.
Relations with China to the east and Germany to the west are far more important to Moscow than is its relationship with the United States. Also important are relations with neighboring countries that were once a part of the Soviet Union, providing Russia with a buffer around its own borders.
Additionally, Russia is doing very well in the Middle East, offering support to Syria’s President Assad, Iran, and Hezbollah. Russia’s commitment to Syria’s government thwarted western efforts to replace Assad with a more acceptable alternative. Al-Qaeda’s involvement with the rebels is another reason the West backed off.
Iran, Syria, and Hezbollah, together with the new Shia-dominated Iraq (thanks to the US and allies!) gives Shia Islam an arc of security it has never had before. Russia’s role only strengthens this. Moscow will continue to provide them all with arms.
Obama’s cancellation of the meeting was due to Russia giving temporary asylum to Edward Snowden, the former NSA employee who betrayed US secrets. It’s interesting that there was a definite hesitation in granting his request. Putin may be uncomfortable with Snowden – if a man will betray his own country, he’s also likely to betray any other country that might take him in! His asylum is conditional upon him not revealing any more secrets.
The United States can hardly criticize Russia for granting Snowden asylum. America has often granted Russians fleeing their government asylum in the US.
Obama has also been critical of Putin’s new anti-gay law, saying that the Russian leader is out of step with the rest of the world. This is untrue – most of Africa and all of the Islamic Mideast, plus some Asian countries, have similar laws. If anything, this shows that Washington is out of touch with the rest of the world.
It will be interesting to see how the G20 summit progresses.
|
# -*-coding:Utf-8 -*
# Copyright (c) 2014 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant le masque <texte_libre>."""
from primaires.interpreteur.masque.masque import Masque
from primaires.interpreteur.masque.fonctions import *
from primaires.interpreteur.masque.exceptions.erreur_validation \
import ErreurValidation
class TexteLibre(Masque):
"""Masque <texte_libre>.
On attend un n'importe quoi en paramètre.
"""
nom = "texte_libre"
nom_complet = "texte libre"
def init(self):
"""Initialisation des attributs"""
self.texte = ""
def repartir(self, personnage, masques, commande):
"""Répartition du masque.
Le masque <texte_libre> prend tout le message.
"""
message = liste_vers_chaine(commande).lstrip()
self.a_interpreter = message
commande[:] = []
if not message:
raise ErreurValidation(
"Entrez quelque chose, au moins.")
masques.append(self)
return True
def valider(self, personnage, dic_masques):
"""Validation du masque"""
Masque.valider(self, personnage, dic_masques)
message = self.a_interpreter
self.texte = message
return True
|
Until I received an email from Swedish guitarist Nils Klöfver, I had never heard of an alto guitar. The instrument was developed 50 years ago in Sweden by luthier Georg Bolin and guitarist Per-Olof Johnson. It is an extended range guitar with eleven strings (though Bolin sometimes made 13 string ones as well). It arose because Per-Olof Johnson was looking for a way to play lute music using guitar playing technique. The design introduced two main elements, to provide conventional lute tuning by tuning the first six strings a minor third higher (hence the name "alto guitar") and to add five extra strings to accommodate low notes. The name for the guitar in Swedish is Altgitar.
This is the name that Nils Klöfver has given to his new disc of music for the alto guitar. Recorded to celebrate the instrument's 50th anniversary, the disc includes music by Bach and Weiss, including Bach's Lute Suite.
But the album has another twist too. It is produced in vinyl and Nils Klöfver has collaborated with artist Jenny Svenberg Bunnel who has produced drawings of Nils and the disc comes with one of three prints of the drawings as its cover. These are produced on a batch of art paper which was produced in the 1970's. The discs are limited edition, individually numbered and signed by the artist.. Something for the wall or something for the turntable. Don't worry, you get a digital download as well.
There is an introductory video on YouTube, with full information from the Altgitarr website.
|
#!/usr/bin/python
import sys
import string
def flash(numbers):
m = len(numbers[0])
atom = " \"prim\" : ["
for p in numbers:
atom += p[0] + ", "
atom = atom[:-2] + "],\n"
atom += " \"cont\" : ["
for p in range(1,m):
atom += "["
for q in numbers:
atom += q[p] + ", "
atom = atom[:-2] + "],\n"
atom = atom[:-2] + "]"
return atom
if len(sys.argv) < 2:
sys.exit("specify the filename")
fp = open(sys.argv[1], "r");
lines = fp.read().split("\n")
dict = ["s", "p", "d", "f", "g", "h", "i"]
dictn = []
for i in range(0,100):
dictn.append(str(i))
out = []
out.append("{")
numbers = []
set = ""
atommark = 0
first = 1
for l in lines:
if len(l) == 0:
continue
elif l[0] == "*" or l[0] == ".":
continue
elif l.strip() == "":
continue
if len(l) > 6:
s = l.strip()
if s[3] in dict and s[0] in dictn:
set = s[3]
continue
if atommark == 1:
if len(numbers) > 0:
#flash
out.append(flash(numbers))
out.append(" }")
numbers = []
if (first == 0):
out.append(" ],")
first = 0
chars = list(l.split(" ")[0])
str = " \""+chars[0].upper()
if len(chars) > 1: str += "".join(chars[1:])
str += "\" : [\n"
str += " {"
out.append(str)
atommark = 0
continue
if len(l) > 3:
if l == "$end":
continue
elif l[0] == "$":
atommark = 1
continue
#when does not start with number skip it
if not (l.strip()[0] in dictn): continue
if set != "":
if len(numbers) > 0:
#flash
out.append(flash(numbers))
str = " }, {"
numbers = []
out.append(str)
out.append(" \"angular\" : \"" + set + "\",")
numbers.append(l.split())
set = ""
else:
numbers.append(l.split())
#flash here too
out.append(flash(numbers))
out.append(" }")
out.append(" ]")
out.append("}")
for l in out:
l = l.replace("D-", "E-")
l = l.replace("D+", "E+")
fp.close()
fp = open(sys.argv[1]+".json", "w")
fp.write("\n".join(out))
fp.close()
|
Browse caught up with Blueleaf's Principal Strategist Jenny Lomax to gain her insights on the retail ecommerce world. Jenny, probably the most straight-talking member of the Blueleaf team, simply said "if you're not going forward you're going backwards", it's that simple!
Continuous optimisation is your friend.
You’ve recently gone fully responsive with your ecommerce website and you are hitting your sales targets with mobile and tablet users enjoying a better experience. Your marketing efforts are paying off and traffic to your website is higher than ever with the new blog and content strategy....things are going very well!
But the cycle continues – your competitors are replicating your successful new website features, vying for the attention of your site visitors and all the time your website revenue targets are on the up. So what do you do?
No matter how good your website is, there is always room for improvement. By implementing continuous optimisation, every single day is an opportunity to work towards increased revenue.
A key method we use is conversion rate optimisation – the concept is simple, we create two or more versions of a page, measure the performance by splitting traffic between the pages, and then implement the winning version.
The changes can range from small - such as tweaks to copy and button colour, to large - with the roll out of a new service and entirely fresh product page layout.
Each is tested for the right amount of time to account for anomalies in the sales cycle, and the results reviewed. Are they conclusive? Have they been skewed by an event? What have we learned? Have we just moved the problem? Should the change be made permanent? Do we have new insight that requires a change to the testing plan?
What would a 5% increase in revenue from your website mean to you?
|
from AccessControl import ClassSecurityInfo
from bika.lims import bikaMessageFactory as _, logger
from bika.lims.config import *
from bika.lims.idserver import renameAfterCreation
from bika.lims.utils import t, tmpID, changeWorkflowState
from bika.lims.utils import to_utf8 as _c
from bika.lims.browser.fields import HistoryAwareReferenceField
from bika.lims.config import PROJECTNAME
from bika.lims.content.bikaschema import BikaSchema
from bika.lims.interfaces import IWorksheet
from bika.lims.permissions import EditWorksheet, ManageWorksheets
from bika.lims.workflow import doActionFor
from bika.lims.workflow import skip
from DateTime import DateTime
from operator import itemgetter
from plone.indexer import indexer
from Products.Archetypes.config import REFERENCE_CATALOG
from Products.Archetypes.public import *
from Products.Archetypes.references import HoldingReference
from Products.ATContentTypes.lib.historyaware import HistoryAwareMixin
from Products.ATExtensions.ateapi import RecordsField
from Products.CMFCore.utils import getToolByName
from Products.CMFPlone.utils import safe_unicode, _createObjectByType
from zope.interface import implements
import re
@indexer(IWorksheet)
def Priority(instance):
priority = instance.getPriority()
if priority:
return priority.getSortKey()
schema = BikaSchema.copy() + Schema((
HistoryAwareReferenceField('WorksheetTemplate',
allowed_types=('WorksheetTemplate',),
relationship='WorksheetAnalysisTemplate',
),
ComputedField('WorksheetTemplateTitle',
searchable=True,
expression="context.getWorksheetTemplate() and context.getWorksheetTemplate().Title() or ''",
widget=ComputedWidget(
visible=False,
),
),
RecordsField('Layout',
required=1,
subfields=('position', 'type', 'container_uid', 'analysis_uid'),
subfield_types={'position': 'int'},
),
# all layout info lives in Layout; Analyses is used for back references.
ReferenceField('Analyses',
required=1,
multiValued=1,
allowed_types=('Analysis', 'DuplicateAnalysis', 'ReferenceAnalysis', 'RejectAnalysis'),
relationship = 'WorksheetAnalysis',
),
StringField('Analyst',
searchable = True,
),
# TODO Remove. Instruments must be assigned directly to each analysis.
ReferenceField('Instrument',
required = 0,
allowed_types = ('Instrument',),
relationship = 'WorksheetInstrument',
referenceClass = HoldingReference,
),
TextField('Remarks',
searchable = True,
default_content_type = 'text/plain',
allowed_content_types= ('text/plain', ),
default_output_type="text/plain",
widget = TextAreaWidget(
macro="bika_widgets/remarks",
label=_("Remarks"),
append_only=True,
),
),
StringField('ResultsLayout',
default = '1',
vocabulary = WORKSHEET_LAYOUT_OPTIONS,
),
),
)
schema['id'].required = 0
schema['id'].widget.visible = False
schema['title'].required = 0
schema['title'].widget.visible = {'edit': 'hidden', 'view': 'invisible'}
class Worksheet(BaseFolder, HistoryAwareMixin):
security = ClassSecurityInfo()
implements(IWorksheet)
displayContentsTab = False
schema = schema
_at_rename_after_creation = True
def _renameAfterCreation(self, check_auto_id=False):
from bika.lims.idserver import renameAfterCreation
renameAfterCreation(self)
def Title(self):
return safe_unicode(self.getId()).encode('utf-8')
def getFolderContents(self, contentFilter):
# The bika_listing machine passes contentFilter to all
# contentsMethod methods. We ignore it.
return list(self.getAnalyses())
security.declareProtected(EditWorksheet, 'addAnalysis')
def addAnalysis(self, analysis, position=None):
"""- add the analysis to self.Analyses().
- position is overruled if a slot for this analysis' parent exists
- if position is None, next available pos is used.
"""
workflow = getToolByName(self, 'portal_workflow')
analysis_uid = analysis.UID()
parent_uid = analysis.aq_parent.UID()
analyses = self.getAnalyses()
layout = self.getLayout()
# check if this analysis is already in the layout
if analysis_uid in [l['analysis_uid'] for l in layout]:
return
# If the ws has an instrument assigned for which the analysis
# is allowed, set it
instr = self.getInstrument()
if instr and analysis.isInstrumentAllowed(instr):
# Set the method assigned to the selected instrument
analysis.setMethod(instr.getMethod())
analysis.setInstrument(instr)
self.setAnalyses(analyses + [analysis, ])
# if our parent has a position, use that one.
if analysis.aq_parent.UID() in [slot['container_uid'] for slot in layout]:
position = [int(slot['position']) for slot in layout if
slot['container_uid'] == analysis.aq_parent.UID()][0]
else:
# prefer supplied position parameter
if not position:
used_positions = [0, ] + [int(slot['position']) for slot in layout]
position = [pos for pos in range(1, max(used_positions) + 2)
if pos not in used_positions][0]
self.setLayout(layout + [{'position': position,
'type': 'a',
'container_uid': parent_uid,
'analysis_uid': analysis.UID()}, ])
allowed_transitions = [t['id'] for t in workflow.getTransitionsFor(analysis)]
if 'assign' in allowed_transitions:
workflow.doActionFor(analysis, 'assign')
# If a dependency of DryMatter service is added here, we need to
# make sure that the dry matter analysis itself is also
# present. Otherwise WS calculations refer to the DB version
# of the DM analysis, which is out of sync with the form.
dms = self.bika_setup.getDryMatterService()
if dms:
dmk = dms.getKeyword()
deps = analysis.getDependents()
# if dry matter service in my dependents:
if dmk in [a.getService().getKeyword() for a in deps]:
# get dry matter analysis from AR
dma = analysis.aq_parent.getAnalyses(getKeyword=dmk,
full_objects=True)[0]
# add it.
if dma not in self.getAnalyses():
self.addAnalysis(dma)
security.declareProtected(EditWorksheet, 'removeAnalysis')
def removeAnalysis(self, analysis):
""" delete an analyses from the worksheet and un-assign it
"""
workflow = getToolByName(self, 'portal_workflow')
# overwrite saved context UID for event subscriber
self.REQUEST['context_uid'] = self.UID()
workflow.doActionFor(analysis, 'unassign')
# Note: subscriber might unassign the AR and/or promote the worksheet
# remove analysis from context.Analyses *after* unassign,
# (doActionFor requires worksheet in analysis.getBackReferences)
Analyses = self.getAnalyses()
if analysis in Analyses:
Analyses.remove(analysis)
self.setAnalyses(Analyses)
layout = [slot for slot in self.getLayout() if slot['analysis_uid'] != analysis.UID()]
self.setLayout(layout)
if analysis.portal_type == "DuplicateAnalysis":
self._delObject(analysis.id)
def addReferences(self, position, reference, service_uids):
""" Add reference analyses to reference, and add to worksheet layout
"""
workflow = getToolByName(self, 'portal_workflow')
rc = getToolByName(self, REFERENCE_CATALOG)
layout = self.getLayout()
wst = self.getWorksheetTemplate()
wstlayout = wst and wst.getLayout() or []
ref_type = reference.getBlank() and 'b' or 'c'
ref_uid = reference.UID()
if position == 'new':
highest_existing_position = len(wstlayout)
for pos in [int(slot['position']) for slot in layout]:
if pos > highest_existing_position:
highest_existing_position = pos
position = highest_existing_position + 1
# LIMS-2132 Reference Analyses got the same ID
refgid = self.nextReferenceAnalysesGroupID(reference)
for service_uid in service_uids:
# services with dependents don't belong in references
service = rc.lookupObject(service_uid)
calc = service.getCalculation()
if calc and calc.getDependentServices():
continue
ref_uid = reference.addReferenceAnalysis(service_uid, ref_type)
ref_analysis = rc.lookupObject(ref_uid)
# Set ReferenceAnalysesGroupID (same id for the analyses from
# the same Reference Sample and same Worksheet)
# https://github.com/bikalabs/Bika-LIMS/issues/931
ref_analysis.setReferenceAnalysesGroupID(refgid)
ref_analysis.reindexObject(idxs=["getReferenceAnalysesGroupID"])
# copy the interimfields
if calc:
ref_analysis.setInterimFields(calc.getInterimFields())
self.setLayout(
self.getLayout() + [{'position': position,
'type': ref_type,
'container_uid': reference.UID(),
'analysis_uid': ref_analysis.UID()}])
self.setAnalyses(
self.getAnalyses() + [ref_analysis, ])
workflow.doActionFor(ref_analysis, 'assign')
def nextReferenceAnalysesGroupID(self, reference):
""" Returns the next ReferenceAnalysesGroupID for the given reference
sample. Gets the last reference analysis registered in the system
for the specified reference sample and increments in one unit the
suffix.
"""
bac = getToolByName(reference, 'bika_analysis_catalog')
ids = bac.Indexes['getReferenceAnalysesGroupID'].uniqueValues()
prefix = reference.id+"-"
rr = re.compile("^"+prefix+"[\d+]+$")
ids = [int(i.split(prefix)[1]) for i in ids if i and rr.match(i)]
ids.sort()
_id = ids[-1] if ids else 0
suffix = str(_id+1).zfill(int(3))
return '%s%s' % (prefix, suffix)
security.declareProtected(EditWorksheet, 'addDuplicateAnalyses')
def addDuplicateAnalyses(self, src_slot, dest_slot):
""" add duplicate analyses to worksheet
"""
rc = getToolByName(self, REFERENCE_CATALOG)
workflow = getToolByName(self, 'portal_workflow')
layout = self.getLayout()
wst = self.getWorksheetTemplate()
wstlayout = wst and wst.getLayout() or []
src_ar = [slot['container_uid'] for slot in layout if
slot['position'] == src_slot]
if src_ar:
src_ar = src_ar[0]
if not dest_slot or dest_slot == 'new':
highest_existing_position = len(wstlayout)
for pos in [int(slot['position']) for slot in layout]:
if pos > highest_existing_position:
highest_existing_position = pos
dest_slot = highest_existing_position + 1
src_analyses = [rc.lookupObject(slot['analysis_uid'])
for slot in layout if
int(slot['position']) == int(src_slot)]
dest_analyses = [rc.lookupObject(slot['analysis_uid']).getAnalysis().UID()
for slot in layout if
int(slot['position']) == int(dest_slot)]
refgid = None
processed = []
for analysis in src_analyses:
if analysis.UID() in dest_analyses:
continue
# If retracted analyses, for some reason, the getLayout() returns
# two times the regular analysis generated automatically after a
# a retraction.
if analysis.UID() in processed:
continue
# Omit retracted analyses
# https://jira.bikalabs.com/browse/LIMS-1745
# https://jira.bikalabs.com/browse/LIMS-2001
if workflow.getInfoFor(analysis, "review_state") == 'retracted':
continue
processed.append(analysis.UID())
# services with dependents don't belong in duplicates
service = analysis.getService()
calc = service.getCalculation()
if calc and calc.getDependentServices():
continue
service = analysis.getService()
_id = self._findUniqueId(service.getKeyword())
duplicate = _createObjectByType("DuplicateAnalysis", self, _id)
duplicate.setAnalysis(analysis)
# Set ReferenceAnalysesGroupID (same id for the analyses from
# the same Reference Sample and same Worksheet)
# https://github.com/bikalabs/Bika-LIMS/issues/931
if not refgid and not analysis.portal_type == 'ReferenceAnalysis':
part = analysis.getSamplePartition().id
dups = [an.getReferenceAnalysesGroupID()
for an in self.getAnalyses()
if an.portal_type == 'DuplicateAnalysis'
and an.getSamplePartition().id == part]
dups = list(set(dups))
postfix = dups and len(dups) + 1 or 1
postfix = str(postfix).zfill(int(2))
refgid = '%s-D%s' % (part, postfix)
duplicate.setReferenceAnalysesGroupID(refgid)
duplicate.reindexObject(idxs=["getReferenceAnalysesGroupID"])
duplicate.processForm()
if calc:
duplicate.setInterimFields(calc.getInterimFields())
self.setLayout(
self.getLayout() + [{'position': dest_slot,
'type': 'd',
'container_uid': analysis.aq_parent.UID(),
'analysis_uid': duplicate.UID()}, ]
)
self.setAnalyses(self.getAnalyses() + [duplicate, ])
workflow.doActionFor(duplicate, 'assign')
def applyWorksheetTemplate(self, wst):
""" Add analyses to worksheet according to wst's layout.
Will not overwrite slots which are filled already.
If the selected template has an instrument assigned, it will
only be applied to those analyses for which the instrument
is allowed
"""
rc = getToolByName(self, REFERENCE_CATALOG)
bac = getToolByName(self, "bika_analysis_catalog")
bc = getToolByName(self, 'bika_catalog')
layout = self.getLayout()
wstlayout = wst.getLayout()
services = wst.getService()
wst_service_uids = [s.UID() for s in services]
wst_slots = [row['pos'] for row in wstlayout if row['type'] == 'a']
ws_slots = [row['position'] for row in layout if row['type'] == 'a']
nr_slots = len(wst_slots) - len(ws_slots)
positions = [pos for pos in wst_slots if pos not in ws_slots]
analyses = bac(portal_type='Analysis',
getServiceUID=wst_service_uids,
review_state='sample_received',
worksheetanalysis_review_state='unassigned',
cancellation_state = 'active',
sort_on='getDueDate')
# ar_analyses is used to group analyses by AR.
ar_analyses = {}
instr = self.getInstrument() if self.getInstrument() else wst.getInstrument()
for brain in analyses:
analysis = brain.getObject()
if instr and brain.getObject().isInstrumentAllowed(instr) is False:
# Exclude those analyses for which the ws selected
# instrument is not allowed
continue
ar_id = brain.getRequestID
if ar_id in ar_analyses:
ar_analyses[ar_id].append(analysis)
else:
if len(ar_analyses.keys()) < nr_slots:
ar_analyses[ar_id] = [analysis, ]
# Add analyses, sorted by AR ID
ars = sorted(ar_analyses.keys())
for ar in ars:
for analysis in ar_analyses[ar]:
self.addAnalysis(analysis, position=positions[ars.index(ar)])
# find best maching reference samples for Blanks and Controls
for t in ('b', 'c'):
form_key = t == 'b' and 'blank_ref' or 'control_ref'
ws_slots = [row['position'] for row in layout if row['type'] == t]
for row in [r for r in wstlayout if
r['type'] == t and r['pos'] not in ws_slots]:
reference_definition_uid = row[form_key]
samples = bc(portal_type='ReferenceSample',
review_state='current',
inactive_state='active',
getReferenceDefinitionUID=reference_definition_uid)
if not samples:
break
samples = [s.getObject() for s in samples]
if t == 'b':
samples = [s for s in samples if s.getBlank()]
else:
samples = [s for s in samples if not s.getBlank()]
complete_reference_found = False
references = {}
for reference in samples:
reference_uid = reference.UID()
references[reference_uid] = {}
references[reference_uid]['services'] = []
references[reference_uid]['count'] = 0
specs = reference.getResultsRangeDict()
for service_uid in wst_service_uids:
if service_uid in specs:
references[reference_uid]['services'].append(service_uid)
references[reference_uid]['count'] += 1
if references[reference_uid]['count'] == len(wst_service_uids):
complete_reference_found = True
break
if complete_reference_found:
supported_uids = wst_service_uids
self.addReferences(int(row['pos']),
reference,
supported_uids)
else:
# find the most complete reference sample instead
reference_keys = references.keys()
no_of_services = 0
reference = None
for key in reference_keys:
if references[key]['count'] > no_of_services:
no_of_services = references[key]['count']
reference = key
if reference:
reference = rc.lookupObject(reference)
supported_uids = [s.UID() for s in reference.getServices()
if s.UID() in wst_service_uids]
self.addReferences(int(row['pos']),
reference,
supported_uids)
# fill duplicate positions
layout = self.getLayout()
ws_slots = [row['position'] for row in layout if row['type'] == 'd']
for row in [r for r in wstlayout if
r['type'] == 'd' and r['pos'] not in ws_slots]:
dest_pos = int(row['pos'])
src_pos = int(row['dup'])
if src_pos in [int(slot['position']) for slot in layout]:
self.addDuplicateAnalyses(src_pos, dest_pos)
# Apply the wst instrument to all analyses and ws
if instr:
self.setInstrument(instr, True)
def exportAnalyses(self, REQUEST=None, RESPONSE=None):
""" Export analyses from this worksheet """
import bika.lims.InstrumentExport as InstrumentExport
instrument = REQUEST.form['getInstrument']
try:
func = getattr(InstrumentExport, "%s_export" % instrument)
except:
return
func(self, REQUEST, RESPONSE)
return
security.declarePublic('getWorksheetServices')
def getWorksheetServices(self):
""" get list of analysis services present on this worksheet
"""
services = []
for analysis in self.getAnalyses():
service = analysis.getService()
if service not in services:
services.append(service)
return services
security.declareProtected(EditWorksheet, 'resequenceWorksheet')
def resequenceWorksheet(self, REQUEST=None, RESPONSE=None):
""" Reset the sequence of analyses in the worksheet """
""" sequence is [{'pos': , 'type': , 'uid', 'key'},] """
old_seq = self.getLayout()
new_dict = {}
new_seq = []
other_dict = {}
for seq in old_seq:
if seq['key'] == '':
if seq['pos'] not in other_dict:
other_dict[seq['pos']] = []
other_dict[seq['pos']].append(seq)
continue
if seq['key'] not in new_dict:
new_dict[seq['key']] = []
analyses = new_dict[seq['key']]
analyses.append(seq)
new_dict[seq['key']] = analyses
new_keys = sorted(new_dict.keys())
rc = getToolByName(self, REFERENCE_CATALOG)
seqno = 1
for key in new_keys:
analyses = {}
if len(new_dict[key]) == 1:
new_dict[key][0]['pos'] = seqno
new_seq.append(new_dict[key][0])
else:
for item in new_dict[key]:
item['pos'] = seqno
analysis = rc.lookupObject(item['uid'])
service = analysis.Title()
analyses[service] = item
a_keys = sorted(analyses.keys())
for a_key in a_keys:
new_seq.append(analyses[a_key])
seqno += 1
other_keys = other_dict.keys()
other_keys.sort()
for other_key in other_keys:
for item in other_dict[other_key]:
item['pos'] = seqno
new_seq.append(item)
seqno += 1
self.setLayout(new_seq)
RESPONSE.redirect('%s/manage_results' % self.absolute_url())
security.declarePublic('current_date')
def current_date(self):
""" return current date """
return DateTime()
def setInstrument(self, instrument, override_analyses=False):
""" Sets the specified instrument to the Analysis from the
Worksheet. Only sets the instrument if the Analysis
allows it, according to its Analysis Service and Method.
If an analysis has already assigned an instrument, it won't
be overriden.
The Analyses that don't allow the instrument specified will
not be modified.
Returns the number of analyses affected
"""
analyses = [an for an in self.getAnalyses()
if (not an.getInstrument() or override_analyses)
and an.isInstrumentAllowed(instrument)]
total = 0
for an in analyses:
# An analysis can be done using differents Methods.
# Un method can be supported by more than one Instrument,
# but not all instruments support one method.
# We must force to set the instrument's method too. Otherwise,
# the WS manage results view will display the an's default
# method and its instruments displaying, only the instruments
# for the default method in the picklist.
meth = instrument.getMethod()
if an.isMethodAllowed(meth):
an.setMethod(meth)
success = an.setInstrument(instrument)
if success is True:
total += 1
self.getField('Instrument').set(self, instrument)
return total
def getAnalystName(self):
""" Returns the name of the currently assigned analyst
"""
mtool = getToolByName(self, 'portal_membership')
analyst = self.getAnalyst().strip()
analyst_member = mtool.getMemberById(analyst)
if analyst_member != None:
return analyst_member.getProperty('fullname')
else:
return analyst
def workflow_script_submit(self):
# Don't cascade. Shouldn't be submitting WSs directly for now,
# except edge cases where all analyses are already submitted,
# but self was held back until an analyst was assigned.
workflow = getToolByName(self, 'portal_workflow')
self.reindexObject(idxs=["review_state", ])
can_attach = True
for a in self.getAnalyses():
if workflow.getInfoFor(a, 'review_state') in \
('to_be_sampled', 'to_be_preserved', 'sample_due',
'sample_received', 'attachment_due', 'assigned',):
# Note: referenceanalyses and duplicateanalyses can still
# have review_state = "assigned".
can_attach = False
break
if can_attach:
doActionFor(self, 'attach')
def workflow_script_attach(self):
if skip(self, "attach"):
return
self.reindexObject(idxs=["review_state", ])
# Don't cascade. Shouldn't be attaching WSs for now (if ever).
return
def workflow_script_retract(self):
if skip(self, "retract"):
return
workflow = getToolByName(self, 'portal_workflow')
self.reindexObject(idxs=["review_state", ])
if not "retract all analyses" in self.REQUEST['workflow_skiplist']:
# retract all analyses in this self.
# (NB: don't retract if it's verified)
analyses = self.getAnalyses()
for analysis in analyses:
state = workflow.getInfoFor(analysis, 'review_state', '')
if state not in ('attachment_due', 'to_be_verified',):
continue
doActionFor(analysis, 'retract')
def workflow_script_verify(self):
if skip(self, "verify"):
return
workflow = getToolByName(self, 'portal_workflow')
self.reindexObject(idxs=["review_state", ])
if not "verify all analyses" in self.REQUEST['workflow_skiplist']:
# verify all analyses in this self.
analyses = self.getAnalyses()
for analysis in analyses:
state = workflow.getInfoFor(analysis, 'review_state', '')
if state != 'to_be_verified':
continue
doActionFor(analysis, "verify")
def workflow_script_reject(self):
"""Copy real analyses to RejectAnalysis, with link to real
create a new worksheet, with the original analyses, and new
duplicates and references to match the rejected
worksheet.
"""
if skip(self, "reject"):
return
utils = getToolByName(self, 'plone_utils')
workflow = self.portal_workflow
def copy_src_fields_to_dst(src, dst):
# These will be ignored when copying field values between analyses
ignore_fields = ['UID',
'id',
'title',
'allowDiscussion',
'subject',
'description',
'location',
'contributors',
'creators',
'effectiveDate',
'expirationDate',
'language',
'rights',
'creation_date',
'modification_date',
'Layout', # ws
'Analyses', # ws
]
fields = src.Schema().fields()
for field in fields:
fieldname = field.getName()
if fieldname in ignore_fields:
continue
getter = getattr(src, 'get'+fieldname,
src.Schema().getField(fieldname).getAccessor(src))
setter = getattr(dst, 'set'+fieldname,
dst.Schema().getField(fieldname).getMutator(dst))
if getter is None or setter is None:
# ComputedField
continue
setter(getter())
analysis_positions = {}
for item in self.getLayout():
analysis_positions[item['analysis_uid']] = item['position']
old_layout = []
new_layout = []
# New worksheet
worksheets = self.aq_parent
new_ws = _createObjectByType('Worksheet', worksheets, tmpID())
new_ws.unmarkCreationFlag()
new_ws_id = renameAfterCreation(new_ws)
copy_src_fields_to_dst(self, new_ws)
new_ws.edit(
Number = new_ws_id,
Remarks = self.getRemarks()
)
# Objects are being created inside other contexts, but we want their
# workflow handlers to be aware of which worksheet this is occurring in.
# We save the worksheet in request['context_uid'].
# We reset it again below.... be very sure that this is set to the
# UID of the containing worksheet before invoking any transitions on
# analyses.
self.REQUEST['context_uid'] = new_ws.UID()
# loop all analyses
analyses = self.getAnalyses()
new_ws_analyses = []
old_ws_analyses = []
for analysis in analyses:
# Skip published or verified analyses
review_state = workflow.getInfoFor(analysis, 'review_state', '')
if review_state in ['published', 'verified', 'retracted']:
old_ws_analyses.append(analysis.UID())
old_layout.append({'position': position,
'type':'a',
'analysis_uid':analysis.UID(),
'container_uid':analysis.aq_parent.UID()})
continue
# Normal analyses:
# - Create matching RejectAnalysis inside old WS
# - Link analysis to new WS in same position
# - Copy all field values
# - Clear analysis result, and set Retested flag
if analysis.portal_type == 'Analysis':
reject = _createObjectByType('RejectAnalysis', self, tmpID())
reject.unmarkCreationFlag()
reject_id = renameAfterCreation(reject)
copy_src_fields_to_dst(analysis, reject)
reject.setAnalysis(analysis)
reject.reindexObject()
analysis.edit(
Result = None,
Retested = True,
)
analysis.reindexObject()
position = analysis_positions[analysis.UID()]
old_ws_analyses.append(reject.UID())
old_layout.append({'position': position,
'type':'r',
'analysis_uid':reject.UID(),
'container_uid':self.UID()})
new_ws_analyses.append(analysis.UID())
new_layout.append({'position': position,
'type':'a',
'analysis_uid':analysis.UID(),
'container_uid':analysis.aq_parent.UID()})
# Reference analyses
# - Create a new reference analysis in the new worksheet
# - Transition the original analysis to 'rejected' state
if analysis.portal_type == 'ReferenceAnalysis':
service_uid = analysis.getService().UID()
reference = analysis.aq_parent
reference_type = analysis.getReferenceType()
new_analysis_uid = reference.addReferenceAnalysis(service_uid,
reference_type)
position = analysis_positions[analysis.UID()]
old_ws_analyses.append(analysis.UID())
old_layout.append({'position': position,
'type':reference_type,
'analysis_uid':analysis.UID(),
'container_uid':reference.UID()})
new_ws_analyses.append(new_analysis_uid)
new_layout.append({'position': position,
'type':reference_type,
'analysis_uid':new_analysis_uid,
'container_uid':reference.UID()})
workflow.doActionFor(analysis, 'reject')
new_reference = reference.uid_catalog(UID=new_analysis_uid)[0].getObject()
workflow.doActionFor(new_reference, 'assign')
analysis.reindexObject()
# Duplicate analyses
# - Create a new duplicate inside the new worksheet
# - Transition the original analysis to 'rejected' state
if analysis.portal_type == 'DuplicateAnalysis':
src_analysis = analysis.getAnalysis()
ar = src_analysis.aq_parent
service = src_analysis.getService()
duplicate_id = new_ws.generateUniqueId('DuplicateAnalysis')
new_duplicate = _createObjectByType('DuplicateAnalysis',
new_ws, duplicate_id)
new_duplicate.unmarkCreationFlag()
copy_src_fields_to_dst(analysis, new_duplicate)
workflow.doActionFor(new_duplicate, 'assign')
new_duplicate.reindexObject()
position = analysis_positions[analysis.UID()]
old_ws_analyses.append(analysis.UID())
old_layout.append({'position': position,
'type':'d',
'analysis_uid':analysis.UID(),
'container_uid':self.UID()})
new_ws_analyses.append(new_duplicate.UID())
new_layout.append({'position': position,
'type':'d',
'analysis_uid':new_duplicate.UID(),
'container_uid':new_ws.UID()})
workflow.doActionFor(analysis, 'reject')
analysis.reindexObject()
new_ws.setAnalyses(new_ws_analyses)
new_ws.setLayout(new_layout)
new_ws.replaces_rejected_worksheet = self.UID()
for analysis in new_ws.getAnalyses():
review_state = workflow.getInfoFor(analysis, 'review_state', '')
if review_state == 'to_be_verified':
changeWorkflowState(analysis, "bika_analysis_workflow", "sample_received")
self.REQUEST['context_uid'] = self.UID()
self.setLayout(old_layout)
self.setAnalyses(old_ws_analyses)
self.replaced_by = new_ws.UID()
def checkUserManage(self):
""" Checks if the current user has granted access to this worksheet
and if has also privileges for managing it.
"""
granted = False
can_access = self.checkUserAccess()
if can_access == True:
pm = getToolByName(self, 'portal_membership')
edit_allowed = pm.checkPermission(EditWorksheet, self)
if edit_allowed:
# Check if the current user is the WS's current analyst
member = pm.getAuthenticatedMember()
analyst = self.getAnalyst().strip()
if analyst != _c(member.getId()):
# Has management privileges?
if pm.checkPermission(ManageWorksheets, self):
granted = True
else:
granted = True
return granted
def checkUserAccess(self):
""" Checks if the current user has granted access to this worksheet.
Returns False if the user has no access, otherwise returns True
"""
# Deny access to foreign analysts
allowed = True
pm = getToolByName(self, "portal_membership")
member = pm.getAuthenticatedMember()
analyst = self.getAnalyst().strip()
if analyst != _c(member.getId()):
roles = member.getRoles()
restrict = 'Manager' not in roles \
and 'LabManager' not in roles \
and 'LabClerk' not in roles \
and 'RegulatoryInspector' not in roles \
and self.bika_setup.getRestrictWorksheetUsersAccess()
allowed = not restrict
return allowed
def setAnalyst(self,analyst):
for analysis in self.getAnalyses():
analysis.setAnalyst(analyst)
self.Schema().getField('Analyst').set(self, analyst)
security.declarePublic('getPriority')
def getPriority(self):
""" get highest priority from all analyses
"""
analyses = self.getAnalyses()
priorities = []
for analysis in analyses:
if not hasattr(analysis, 'getPriority'):
continue
if analysis.getPriority():
priorities.append(analysis.getPriority())
priorities = sorted(priorities, key = itemgetter('sortKey'))
if priorities:
return priorities[-1]
registerType(Worksheet, PROJECTNAME)
|
Footbed: Removable molded PU like EVA Footbed with textile topcover.
Women's Kinetic™ Sneaker is rated 4.6 out of 5 by 89.
Rated 5 out of 5 by Shay6 from So comfortable! Favorite pair of sneakers that I own. (I got them in black). These are my go-to pair...perfect for travel. The only slight issue that I have noticed is a stitch that started to unravel on the ankle after a few wears. Not a big enough issue for me to go through the exchange process. Overall they are great and I get tons of compliments!
Rated 5 out of 5 by Meljt from So comfortable I have very wide feet and can never find cute shoes that fit. I normally wear 7.5 but got these in 8. They fit perfectly in the length and width. They are the most comfortable shoes I have worn in forever. I’m on my feet all day and they felt great from the first time wearing them. And they are stylish; I get so many compliments on them. I think I will buy a pair in every color!
Rated 5 out of 5 by Seatea from Like glove! My narrow feet are difficult to find shoes for. That being said, these fit like a glove! The wrap around Velcro is the best! You can adjust these to any foot, narrow or wide or in between. Sizes run real to size. I am on my feet all day at work and these have been god sent!
Rated 5 out of 5 by sroy from Comfy and cute Love these! Very lightweight, comfortable, and super stylish. The green is nicely neutral and muted. Would wear them every day if it wouldn’t make my other shoes and sneaks jealous. My one complaint is that the pull tabs on the heels are too tiny to fit a finger in there, which you need to do to pull these on. Sorel, maybe fix that next time?
Rated 5 out of 5 by Miasarah from Amazing!!! This is the best sneakers I’ve owned so far. The most comfortable especially that I’m on my feet 8 hrs a day. I ordered a half size up so I can put the Dr Scholls gel support. I highly recommend it! I had lots of comments wearing it.
Rated 5 out of 5 by JenDogMom from Love these shoes so much! To be honest, I love ALL the shoes I've gotten from Sorel, but I liked the Kinetic Sneaker so much, I bought another pair after wearing them to work once. I must have had 10-15 compliments on the shoes, and I recommended them to anyone who would listen. Sorel's are the most comfortable shoes I have, and they are stylish, in great colors, and weather resistant. The Kinetic is also warm and didn't saturate when I walked in water -- it's been a wet winter and I have dogs who want to go on at least three walks a day. These shoes get me through the coldest, wettest and ugliest days. Thank you SOREL!
Rated 5 out of 5 by Kineticlover from Super comfortable and stylish! Shoes fit true to size and are very comfortable. They are my favorite tennis shoe and get lots of compliments! Is nice to a have a tennis shoe that’s unique, stylish and comfortable.
Rated 3 out of 5 by trinabean from Different shoe than online and box?? I own at least 10 pairs of Sorels and this makes me sad. Sorels are not cheap so I’m a little suprised. These were purchased online direct from Sorel.com. The shoe obviously has had some adjustments that do not match the box or online. Very disappointed. The shoe is all mesh and definitely not as dressy as the pic on box or online. The online product needs to match what is delivered.
Rated 1 out of 5 by Janice122 from Item sent, not what is advertised in pictures They are still showing the incorrect picture. Even the box the shoes came in show what's being shown online. However, the shoe they send is entirely the mesh material.
Rated 5 out of 5 by K8honea from Most compliments on a shoe ever! I love the Kinetic Sneaker by Sorel! Everytime I wear them out I get so many compliments. These sneakers are not only fashionable, but they are comfortable and functional. I got them in the copper blush color and they go with so many outfits. They are feminine and sporty. You can dress them up or down. I am a dancer so comfort is key, and these sneakers are so supportive. These shoes are my second purchase from Sorel, and I couldn't be more happy with the quality from this brand.
Rated 5 out of 5 by mademlivmom from Sorel does it again! Love love love these! I wear them everywhere. Perfect to dress up a jeans and tshirt look. Fit is true to size. Love how they can get messy and just wipe them off and good as new! Everything I love abut my kids shoes now in a super cool look for me!! YAY SOREL!
Rated 1 out of 5 by Katie33 from Not as pictured I’m a loyal Sorel customer. Unfortunately what I ordered and was pictured on the website was not what I received. Hopefully Sorel will correct this issue.
Rated 5 out of 5 by Shan70 from Fabulous shoes These shoes are fabulous they are so comfortable and very stylish!! I,m on my feet 12 hours a day and the shoes make that a lot easier. Love love theses sneakers!!
Rated 5 out of 5 by JenKnut from I was unsure I love the look of these but I was unsure about the comfort factor. I have had plantar fasciitis in the past and if I am not careful with what I wear it will flare up. These are beyond supportive and comfortable. I highly recommend. They are super cute too!
Rated 5 out of 5 by TJZhappy from love these shoes!!! A great shoe! I was looking for a good travel and errand running shoe, something to run around town and through the airport. These fit the bill and more. They're great looking, super comfortable, light weight and fun to wear. They're so fabulous, in fact, that I just bought a pair for my daughter and she loves them too. They are true to your Sorel size, and you can adjust the width for thicker socks with the handy velcro straps. My go to shoe....thanks Sorel!
Rated 5 out of 5 by roadgait from Comfy stylish Love these stylish sneakers. Super comfortable . Always get compliments on my Sorel footwear.
Rated 5 out of 5 by Dawn01 from Love these Kinetic Sneakers! I love these shoes! As with my other Sorels, they are so incredibly comfy! I am a PE teacher, on my feet all day and these keep my feet feeling good all day. Also, they are so cute and I get lots of compliments! Many tennis shoes are comfortable but these are like ‘glam’ tennis shoes! I love them. My feet are pretty normal as far as width, I don’t have to get wide or narrow shoes, but these could be considered a little narrow so might want to size up.
Rated 5 out of 5 by Brit75 from Love them I really like these shoes. The are really comfortable and look really trendy.
Rated 5 out of 5 by Basinbabe from Sweet sneaks These are the most beautiful sneakers I’ve ever seen or worn. Stylish and comfort finally got married. And it’s a beautiful relationship! It has a short arch so if you need more height in the shoe, order up a half size. I usually wear an 8 and the size 8 kinetic sneaker fits well.
Rated 5 out of 5 by gcho from My go to sneaker!! I absolutely love these sneakers! So comfy, unique, stylish... did I say comfy?! I love them so much, bought them in all 3 colors!! There is no break-in period, comfortable on the first wear. I get compliments on them EVERY time I wear them. A splurge worth splurging on! Some had mentioned to size up... I am normally a 7, and am glad I stuck to ordering my size. The 7 is perfect. Love them for running around town... errands or casual days/nights out.
Rated 5 out of 5 by Abbey480 from So unique, stylish, and COMFORTABLE I wasn't sure I was going to like these, but when I saw them in person I was so in love. I wear them almost everyday. When I'm out I get all kinds of compliments and people ask me where I got them. Super comfy, I could wear them all day long from the moment I got them with no "break-in" issues. I read many comments that they run small, but were very true-to-size for me. Happy I didn't size up. If you're thinking about getting these, DO IT! Worth the splurge IMO.
Rated 4 out of 5 by Tory45 from My Go To Everyday Shoes I absolutely love these sneakers! They conveniently slip on, and I wear them with everything from athletic wear to casual outfits with jeans or dresses. I purchased the black and they have been so comfortable so far. I plan to wear them while traveling. I fluctuate between an 11 and 12 in Sorel. Some styles run larger, so I wear an 11. These are true to size at a 12. I did order an 11 originally and had to exchange. I wish Sorel offered free shipping/returns/exchanges. I would give 5 stars if they did. That component of customer service makes a big difference.
Rated 5 out of 5 by Mahutie from Exactly as Pictured First of all, order your true size. I'm a perfect 7.5 in all of Sorel's footwear and footwear of other leading brands, and these were no different. Secondly, these are very comfortable; I bought these for my daily 2 miles walking commute to and from work, and they do not disappoint. They look so cute with leggings! I get compliments every time I wear them, I do not see glue on the edges like another reviewer said, and they look (and feel) very high-quality. One woman stopped me the other day and said they looked like a certain high-end fashion designer shoe. The only con I have with these shoes is they aren't the most breathable, so I would highly recommend wearing smartwool socks with them because they keep my feet from getting sweaty.
Rated 1 out of 5 by Jamerican from Not Satisfied I was eager to receive these. Once I received them, they are not what it shown in the picture. In person they look dull, you can see the glue on the sides, and it was still too tight. I read the reviews before purchasing and went up 1/2 a size.
Rated 5 out of 5 by Monroe22 from Love these shoes! Not only do they look cool, they fit perfectly. I receive compliments every time i wear them from men, women and even kids!
Rated 5 out of 5 by RDNClaudia from Great Customer Service Love these stylish shoes! Thank you, Reese, for all of your assistance. I will definitely purchase from your site again in the future.
Rated 5 out of 5 by Corey from Comfortable & Versitile I always get compliments when wearing these shoes. They look great with any jeans and are comfortable for any workout. I bought the wine color and they’re amazing. Can’t wait to get in black as well.
Rated 5 out of 5 by Aliese725 from Awesome! I have been eyeing the kinetic sneaker for a while. I finally decided to buy them for a trip to the west coast and they are awesome. I've gotten so many compliments and people asking 'what are those?' Although, I rated them as true to size the kinetic sneaker has a very roomy toe box which is perfect for my foot.
Rated 5 out of 5 by DrGirlfriend from Great Sneaks! Based on other reviews, I ordered a size 8.5 vs size 9, which I had to return. Some of my Sorel shoes/boots run smaller, some run larger. (I DO wish Sorel sizing was a bit more consistent, as shipping AND return shipping is NEVER free). I re-ordered in a size 9, which I love. They are smaller than I expected but do fit well. I love the design, the contrast textures, the Velcro, the soles. SO I ended up paying full price, plus tax, plus 3 shipping charges for these sneakers. Great fun sneakers, but they run short!!
Rated 4 out of 5 by Parlenne from Cool Sneaker I love the style of the Kinetic sneaker and get lots of compliments. I ended up having to exchange for a larger size. I found the toe to be too pointed and it was just a little too short for my foot. I would order a half-size up from your normal street shoe size. Although I like the look of the higher slip-on style, they can be a pain to pull on.
Rated 5 out of 5 by StylishGal from Perfect street & travel shoes I purchased the camel brown/sea salt combination and couldn't be happier. They are comfortable and absolutely stylish! The velcro closure allows you to adjust the width slightly. I purchased the 8.0 and 8.5 and kept the latter. For reference, I'm usually a size 9 in most brands. Thanks Sorel!
Rated 5 out of 5 by Sassy30 from Comfortable I received these shoes as a gift from my nephew. I love them, they’re so comfortable and stylish. I ordered my normal size and the shoe fits a tad bit big. Overall, this style of shoe is perfect. Love, love, love the color.
Rated 5 out of 5 by Babsdaughter from Best running around shoe EVER! Fell in love with these at a Sorel special event at a local store. Everyone loves them; I recently wore them on Michigan Ave in Chicago and felt like a trendsetter. Very comfortable and a great style. I would buy these again in a heartbeat! They do fit just a tad large and my 1/2 size down fit well, but if you ordered your usual size, they would likely be just fine, too.
Rated 5 out of 5 by Cyndy7 from Love these! Funky and so comfortable. Make more like these.
Rated 5 out of 5 by Chelsea27 from Casual and comfortable I got these sneakers because I travel a lot and am on my feet but I wanted something more fashionable than my running sneakers. I got the size I usually wear for flats and they fit comfortably with socks, so I would say size a half-size down from your usual sneaker size.
Rated 5 out of 5 by BridgetMarie from Love these funky sneakers My only complaint is that they arent as snug on the back of my foot with the only strap modestly tightened over the top of my foot. Otherwise I adore these shoes. Same size worked for me as the size I wear in Sorel leather boots.
|
#!/usr/bin/env python
import csv, sys
from os.path import splitext, dirname
def readFile(file):
data = csv.reader(open(file))
# Read the column names from the first line of the file
fields = data.next()
results = []
for row in data:
# Zip together the field names and values
items = zip(fields, row)
item = {}
# Add the value to our dictionary
for (name, value) in items:
item[name] = value.strip()
result = float(item['result'])
label = int(item['label'])
results += [[result, label]]
return results
def drange(start, stop, step):
r = start
while r < stop:
yield r
r += step
def getStats(results, threshold = 0.0):
fp = 0
tp = 0
fn = 0
tn = 0
for (result, label) in results:
if result > threshold and label == 1:
tp += 1
if result <= threshold and label == 1:
fn += 1
if result <= threshold and label == 0:
tn += 1
if result > threshold and label == 0:
fp += 1
return tp, fp, tn, fn
def getAPR(results, threshold = 0):
tp, fp, tn, fn = getStats(results,threshold)
if tp == 0 and fp == 0:
precision = 0
else:
precision = float(tp) / (tp + fp)
recall = float(tp) / (tp + fn)
accuracy = float(tp + tn) / (tp + tn + fp + fn)
return accuracy, precision, recall
def ROC(results, t):
tp, fp, tn, fn = getStats(results, t)
tpr = float(tp) / (tp + fn)
fpr = float(fp) / (fp + tn)
return fpr, tpr
def PR(results, t):
tp, fp, tn, fn = getStats(results, t)
p = float(tp) / (tp + fp)
r = float(tp) / (tp + fn)
return r, p
def getBestThreshold(results):
maxResult = max(map(lambda x: x[0], results))
minResult = min(map(lambda x: x[0], results))
r = maxResult - minResult
step = r / 100.0
score = 0.0
threshold = 0.0
for t in drange(minResult,maxResult,step):
a,p,r = getAPR(results,t)
s = 2.0 * p + r
if score < s:
score = s
threshold = t
return threshold
def getCurve(results, fn):
maxResult = max(map(lambda x: x[0], results))
minResult = min(map(lambda x: x[0], results))
r = maxResult - minResult
step = r / 100.0
rates = []
for t in drange(minResult,maxResult,step):
x, y = fn(results, t)
rates += [[x, y]]
return rates
class GraphParams:
def __init__(self, title = "", ylabel = "True Positive Rate", xlabel = "False Positive Rate"):
self.title = title
self.ylabel = ylabel
self.xlabel = xlabel
def generateCurves(files, params = GraphParams()):
curves = open("curves.gp", 'w')
curves.write('set xrange [0:1]; set yrange [0:1];\n')
curves.write('set xlabel "%s";\n' % params.xlabel)
curves.write('set ylabel "%s";\n' % params.ylabel)
curves.write('set title "%s";\n' % params.title)
curves.write('set datafile separator ",";\n')
curves.write('set key right center outside;\n')
curves.write('plot \\\n')
i = 1
for f, t in files:
results = readFile(f)
rates = getCurve(results, ROC)
f = splitext(f)[0]
outfile = f + "_roc.csv"
output = open(outfile, 'w')
for r in rates:
output.write("%s,%s\n" % (r[0], r[1]))
output.close()
curves.write(' "%s" u 1:2 title "%s" with lines' % (outfile,t))
if i == len(files):
curves.write(';\n')
else:
curves.write(', \\\n')
i += 1
curves.write("pause -1")
files = []
#files += [["hasHat_100w_10s_hog_rbf.csv", "HasHat with RBF"]]
#files += [["hasHat_poly3.csv", "HasHat with 3-Poly"]]
files += [["hasHat.csv", "Has Hat"]]
files += [["hasJeans.csv", "Has Jeans"]]
files += [["hasLongHair.csv", "Has Long Hair"]]
files += [["hasLongPants.csv", "Has Long Pants"]]
files += [["hasLongSleeves.csv", "Has Long Sleeves"]]
files += [["hasShorts.csv", "Has Shorts"]]
files += [["hasTShirt.csv", "Has T-Shirt"]]
files += [["isMale.csv", "Is Male"]]
files += [["hasGlasses.csv", "Has Glasses"]]
for f in files:
f[0] = "rbf_oneclass_100w_25s/" + f[0]
generateCurves(files, GraphParams(title = "Performance with RBF Kernels"))
for f in files:
results = readFile(f[0])
#t = getBestThreshold(results)
#print max(map(lambda x: x[0], results))
#print results
a, p, r = getAPR(results)
print "%s: A: %2.2f, P: %2.2f, R: %2.2f" % (f[1],a,p,r)
|
On the morning of March 15, 1848, revolutionaries in Budapest visited the printing presses of Landerer and Heckenast and printed Sándor Petőfi's poem Nemzeti Dal (National Song) together with the 12 demands formulated by the leaders of the revolution. The first demand of this list was: We demand the freedom of the press, the abolition of censorship. The question of freedom of the press is one that today is still being contested both in authoritarian regimes and in liberal democracies where civil liberties seem to be increasingly under threat. The notions of freedom of the press and freedom of speech also impacts upon, and are interrelated with other areas such as censorship and self-censorship, the internet, copyright, intellectual property, the privatization of knowledge, protest and public order, public space, and human rights issues in general. Therefore, freedom of the press is always among the demands of movements that fight for open, democratic societies. In the framework of the exhibition, Blinken OSA presents a selection of its various collections of Hungarian samizdat publications.
These “unofficial” publications, including periodicals, books, and ephemera, as well as audiovisual documents signalled the first fissures in the communist system, cracks in which young intellectuals carved out space for a semi-autonomous cultural and political discourse.
|
from __future__ import absolute_import
from __future__ import print_function
import os
import keras.models as models
from keras.layers.core import Layer, Dense, Dropout, Activation, Flatten, Reshape, Permute
from keras.layers.convolutional import Conv2D, MaxPooling2D, UpSampling2D, Cropping2D
from keras.layers.normalization import BatchNormalization
from keras.layers import Conv2D, Conv2DTranspose
from keras import backend as K
import cv2
import numpy as np
import json
K.set_image_dim_ordering('th')
# weight_decay = 0.0001
from keras.regularizers import l2
class Tiramisu():
def __init__(self):
self.create()
def DenseBlock(self, layers, filters):
model = self.model
for i in range(layers):
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(filters, kernel_size=(3, 3), padding='same', init="he_uniform", W_regularizer = l2(0.0001)))
model.add(Dropout(0.2))
def TransitionDown(self,filters):
model = self.model
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(filters, kernel_size=(1, 1), padding='same', init="he_uniform", W_regularizer = l2(0.0001)))
model.add(Dropout(0.2))
model.add(MaxPooling2D(pool_size=(2, 2)))
def TransitionUp(self,filters, input_shape,output_shape):
model = self.model
model.add(Conv2DTranspose(filters,kernel_size=(3, 3), strides=(2, 2),data_format='channels_first', output_shape=output_shape,
padding='same', input_shape=input_shape, init="he_uniform", W_regularizer = l2(0.0001)))
def gfactorCounterDown(self,model_self,growth_factor,block_size,previous_conv_size,block_count=5):
for i in range(block_count):
m = block_size * growth_factor + previous_conv_size
model_self.DenseBlock(growth_factor,m)
model_self.TransitionDown(growth_factor,m)
def gfactorCounterUp(self,model_self,growth_factor,block_size,previous_block_size,previous_conv_size,block_count=5):
# previous_conv_size = 288, since:
# self.DenseBlock(4,288) # 4*12 = 48 + 288 = 336
# self.TransitionDown(288)
for i in range(block_count):
m = block_size * growth_factor + previous_block_size * growth_factor + previous_conv_size
model_self.DenseBlock(growth_factor,m)
model_self.TransitionDown(growth_factor,m)
def create(self):
model = self.model = models.Sequential()
# cropping
# model.add(Cropping2D(cropping=((68, 68), (128, 128)), input_shape=(3, 360,480)))
model.add(Conv2D(48, kernel_size=(3, 3), padding='same', input_shape=(3,224,224), init="he_uniform", W_regularizer = l2(0.0001)))
# (5 * 4)* 2 + 5 + 5 + 1 + 1 +1
# growth_m = 4 * 12
# previous_m = 48
self.gfactorCounterDown(self.model,12,4,48,5)
# self.DenseBlock(4,96) # 4*12 = 48 + 48 = 96
# self.TransitionDown(96)
# self.DenseBlock(4,144) # 4*12 = 48 + 96 = 144
# self.TransitionDown(144)
# self.DenseBlock(4,192) # 4*12 = 48 + 144 = 192
# self.TransitionDown(192)
# self.DenseBlock(4,240)# 4*12 = 48 + 192 = 240
# self.TransitionDown(240)
# self.DenseBlock(4,288) # 4*12 = 48 + 288 = 336
# self.TransitionDown(288)
self.DenseBlock(15,336) # 4 * 12 = 48 + 288 = 336
self.gfactorCounterDown(self.model,12,4,4,288,5)
# self.TransitionUp(384, (384, 7, 7), (None, 384, 14, 14)) # m = 288 + 4x12 + 4x12 = 384.
# self.DenseBlock(4,384)
# self.TransitionUp(336, (336, 14, 14), (None, 336, 28, 28)) #m = 240 + 4x12 + 4x12 = 336
# self.DenseBlock(4,336)
# self.TransitionUp(288, (288, 28, 28), (None, 288, 56, 56)) # m = 192 + 4x12 + 4x12 = 288
# self.DenseBlock(4,288)
# self.TransitionUp(240, (240, 56, 56), (None, 240, 112, 112)) # m = 144 + 4x12 + 4x12 = 240
# self.DenseBlock(4,240)
# self.TransitionUp(192, (192, 112, 112), (None, 192, 224, 224)) # m = 96 + 4x12 + 4x12 = 192
# self.DenseBlock(4,192)
model.add(Conv2D(12, kernel_size=(3, 3), padding='same', init="he_uniform", W_regularizer = l2(0.0001)))
model.add(Reshape((12, 224 * 224)))
model.add(Permute((2, 1)))
model.add(Activation('softmax'))
model.summary()
with open('tiramisu_fc_dense56_model.json', 'w') as outfile:
outfile.write(json.dumps(json.loads(model.to_json()), indent=3))
Tiramisu()
|
Cmes Corp is a Internet Services-Network Designers/Consultants business in Corpus Christi, TX.
Cmes Corp has been operational since 0 and provides B2B services in the industry of Internet Services-Network Designers/Consultants from its main office in Texas. The company functions with 1 to 4 employees and conducts business from its single location with a flagship found at 5541 Bear Lane # 202 in Corpus Christi, TX 78405.
From its inception in 0, Cmes Corp has seen growth reaching revenues of $1.000.000 to $2.499.999 per annum. This company of Internet Services-Network Designers/Consultants is classified under SIC code 7373 and NAICS number 5415121, which can be examined for more business details.
For inquiries or more information about , B2B market variables, and professional , please contact representative Peter Darst, Owner Full Name Report by phone number (361) 289-9278 Full Phone Report. To access Cmes Corp’s online presence, visit its website at or connect through social media via on Twitter or on Facebook.
|
import logging
from pajbot.managers.handler import HandlerManager
from pajbot.managers.redis import RedisManager
from pajbot.modules.base import ModuleSetting
from pajbot.modules.quest import QuestModule
from pajbot.modules.quests import BaseQuest
log = logging.getLogger(__name__)
class TypeMeMessageQuestModule(BaseQuest):
ID = "quest-" + __name__.split(".")[-1]
NAME = "Colorful chat /me"
DESCRIPTION = "Type X /me messages with X message length."
PARENT_MODULE = QuestModule
CATEGORY = "Quest"
SETTINGS = [
ModuleSetting(
key="quest_limit",
label="How many messages does the user needs to type?",
type="number",
required=True,
placeholder="",
default=100,
constraints={"min_value": 1, "max_value": 200},
),
ModuleSetting(
key="quest_message_length",
label="How many letters minimum should be in the message?",
type="number",
required=True,
placeholder="",
default=15,
constraints={"min_value": 1, "max_value": 500},
),
]
def get_limit(self):
return self.settings["quest_limit"]
def get_quest_message_length(self):
return self.settings["quest_message_length"]
def on_message(self, source, message, event, **rest):
if len(message) < self.get_quest_message_length() or event.type != "action":
return
user_progress = self.get_user_progress(source, default=0)
if user_progress >= self.get_limit():
return
user_progress += 1
redis = RedisManager.get()
if user_progress == self.get_limit():
self.finish_quest(redis, source)
self.set_user_progress(source, user_progress, redis=redis)
def start_quest(self):
HandlerManager.add_handler("on_message", self.on_message)
redis = RedisManager.get()
self.load_progress(redis=redis)
def stop_quest(self):
HandlerManager.remove_handler("on_message", self.on_message)
redis = RedisManager.get()
self.reset_progress(redis=redis)
def get_objective(self):
return f"Type {self.get_limit()} /me messages with a length of minimum {self.get_quest_message_length()} letters KappaPride "
|
Massage pad is known for its ability to help release the tension in your back muscle and spine. Many people face back pains, or tensed feeling on their neck and shoulder everyday due to long hour seat. Thus, we need a solution for that. To me, massage pad for chair can be of help. A massage pad can do miracles for your spine disorders, or other parts of your body while you are siting and working in your chair. These heating massage pads can helps relieve the pain in that part of your body. Massage pad can be used with other along with floor chair and office chair as well. As they all come with their own role and functionality, please check them out to see which is best for you.
Too many people complain every day of having back pains, of feeling tensed, from neck to the whole spine, and even of muscle tension. However, the list of similar health disorders can go on, and in time these problems cause others, as a bad positioned spine for instance manifests too much pressure on some internal organs (e.g. the heart, causing palpitations or difficulties in breathing). Usually the main cause of back pains or spine issues is the fact that individuals spend most of their time at a desk, in front of a computer, because of the nature of their job, or even worse, they need to stand up for long hours and in a similar position, or to carry heavy objects. Ok… so until now we had only some bad news for you: that these negative effects of your daily activities can harm your physical health. But there is good news also… and the good news is that we have the solution for all your problems: the massage chair pad. A massage chair can do miracles for your spine disorders, or other parts of your body where you have pains. Not only that will make the pain disappear, but it also helps treating the cause of your problems which means that when used on a regular basis, the massage chair (or Massage Chair Pads) can heal for good, so that you will never feel any pain in that part of your body again.
This article makes a trespassing through the best models of massage chair pads. We outline the features for each of the product in our list of top 10 massage chairs, explaining why should anyone buy such a product, and which are the main benefits derived from it.
Our first Chair Massage Pad is the Homedics QRM-400H Therapist Select Quad-Roller Shiatsu & Rolling Massaging Cushion with Heat. The adjustable heating massage pads’ command allows the chair pad to fit the size and weight of your body. It develops six massage programmes with its controlling microprocessor. It has an easy usage manual and it is a great ideal for anyone who needs to relax after a busy day, to release his back pains, release the tension from the spine muscles, or obtain other similar benefits. If you always come home with tension in the back, this is the right one for you.
This chair massage pad provides massage relief for many parts of your body, from back to neck, waist and abdomen, including hips or thighs. It allows you to concentrate the massage activities on specific part of your body. It also has a system of sending massage nodes from up to down and the other way around in order to obtain a total relaxation effect. In addition, the product benefits from shiatsu style mechanisms, rolling motions, extra sooting, vibration massage on three levels, high degree of comfort, safely usage, and the possibility to set the heat level. Further more, the heating system of this back massager pad for chair allows for on and off options for different massage nodes. The product can be adapted to your desk chair, home sofa, dining chair, etc. The automatic shut off programme of 20 minutes makes it more resistant to any damage and extends the life functioning of the device. Last, the massage chair has a warrantee of 90 days, period in which you are able to ask for a refund of its price.
The Motor Vibration Massage Chair Pad with Heat is designed to provide massage and relaxation for various parts of your body: back, thighs shoulder and neck. The three speed massage system, as well as its heating mechanisms, or four massage zones system is managed by a remote control. The heating back massagers for chair are suitable for your car, home chairs, and also office chairs. It also has the facility of shutting down on automatic mode in 30 minutes after usage.
The Naipo Back Massager Chair is a full body massager cushion seat with pad. Its main characteristics are the air compressor, the massage system based on shiatsu kneading method, its vibration mechanisms, the shooting heat system, and the options that allow self customization of massage program at your convenient.
Specially design to be flexible and adaptable for different parts of your body, CNHIDEE Massage therapy travel provides great comfort and anti stress release for people of all ages. What makes it different from other similar products is the mixture of a heating function system with an automatic timer, and hot stone massage technique. Its ergonomic design as well as the shiatsu massage technique make this product a very demanded one among many categories of people, regardless of their gender, occupation, or age.
The Sunbeam 730-811 Heating Pad plus Massage tool is the best remedy for arthritis symptoms or sore muscles. Functioning based on vibration and warmth smoothing, the model has basically two main mechanisms: massage and heating. It allows for an automatic shut off for one hour, it is not difficult to handle, and can be cleaned and maintained quite easily.
This one is known as providing a deep relaxation for your muscles. It contains multiple nodes for massage, and function based on a heating system that induces warm and comfort. It can be considerable for maintaining your good health and spirits, filling you with energy, but also for treating injuries or other problems of your body.
It is considered as a revolutionary massage pillow one. With dual kneading deep rollers that use the shiatsu technique and an efficient heat mechanism, the product becomes a great asset for anyone who needs to relax after a busy and stressful day, or from releasing muscle tensions. It can be placed anywhere in your home, but is also suitable for your car chair.
The 2019 BHealth Premium Massage Chair w/body scan is an ideal for home or salon shop. It provides you or your customers with a complete relaxation session. With no less than six massage steps, the chair provides the opportunity to select between the vibration, knock, shiatsu, flap and knead, as well as roll modes. Its special features are infrared heat provided by the jade heat, the heat therapy module obtaining by adjusting the temperature, and finally the recliner, foot and calf rest elements, as well as its buttons and commands for adjusting and improving the position.
Last, but not least, we have the Elite Robo Pad Massage Chair (Brown) model. Made of synthetic leather, containing foot rollers, android technology, and rolling stroke, this chair becomes suitable for anyone in need for a relaxation zone. Its modern design makes it comfortable and easy to handle. The product has a warrantee for a five years period.
|
from coverage.report import Reporter
import os
class JsonFileCoverageCounters(object):
def __init__(self):
self.hits = 0
self.misses = 0
def jsonify(self):
return dict(
hits=self.hits,
misses=self.misses
)
class JsonFileCoverage(object):
def __init__(self, path):
self.path = path
self.lines = {}
self.counters = JsonFileCoverageCounters()
def jsonify(self):
return dict(
path=self.path,
lines=self.lines,
counters=self.counters.jsonify()
)
class JsonCoverageCounters(object):
def __init__(self):
self.files = 0
self.hits = 0
self.misses = 0
def jsonify(self):
return dict(
files=self.files,
hits=self.hits,
misses=self.misses
)
class JsonCoverageReport(Reporter):
def __init__(self, cov, config):
super(JsonCoverageReport, self).__init__(cov, config)
#self.arcs = cov.data.has_arcs()
#self.packages = {}
self.path_strip_prefix = os.getcwd() + os.sep
self.files = []
self.counters = JsonCoverageCounters()
def report(self, morfs=None, outfile=None):
#self.packages = {}
self.coverage._harvest_data()
#self.coverage.config.from_args(
# ignore_errors=None, omit=None, include=None,
# show_missing=None
#)
self.report_files(self.json_file, morfs)
self.counters.files = len(self.files)
self.counters.misses = sum([f.counters.misses for f in self.files])
self.counters.hits = sum([f.counters.hits for f in self.files])
def json_file(self, cu, analysis):
filename = cu.file_locator.relative_filename(cu.filename).replace('\\', '/')
cfile = JsonFileCoverage(filename)
for line in sorted(analysis.statements):
cfile.lines[line] = int(line not in analysis.missing)
cfile.counters.misses = len(analysis.missing)
cfile.counters.hits = len(analysis.statements) - cfile.counters.misses
self.files.append(cfile)
def jsonify(self):
return dict(
counters=self.counters.jsonify(),
files=[f.jsonify() for f in self.files]
)
|
Hair Removal 411 makes it easy to find a Beautician in Carthage, NY. We are a trusted source for finding information on electrologists, full body waxing, epen, eyebrow threading and back hair removal. If you're unable to find the information you're looking for. We invite you to contact one of our listed Beauticians for more information on Laser Hair Removal and Electrolysis.
|
import numpy as np
import pandas as pd
import pandas._testing as tm
def test_data_frame_value_counts_unsorted():
df = pd.DataFrame(
{"num_legs": [2, 4, 4, 6], "num_wings": [2, 0, 0, 0]},
index=["falcon", "dog", "cat", "ant"],
)
result = df.value_counts(sort=False)
expected = pd.Series(
data=[1, 2, 1],
index=pd.MultiIndex.from_arrays(
[(2, 4, 6), (2, 0, 0)], names=["num_legs", "num_wings"]
),
)
tm.assert_series_equal(result, expected)
def test_data_frame_value_counts_ascending():
df = pd.DataFrame(
{"num_legs": [2, 4, 4, 6], "num_wings": [2, 0, 0, 0]},
index=["falcon", "dog", "cat", "ant"],
)
result = df.value_counts(ascending=True)
expected = pd.Series(
data=[1, 1, 2],
index=pd.MultiIndex.from_arrays(
[(2, 6, 4), (2, 0, 0)], names=["num_legs", "num_wings"]
),
)
tm.assert_series_equal(result, expected)
def test_data_frame_value_counts_default():
df = pd.DataFrame(
{"num_legs": [2, 4, 4, 6], "num_wings": [2, 0, 0, 0]},
index=["falcon", "dog", "cat", "ant"],
)
result = df.value_counts()
expected = pd.Series(
data=[2, 1, 1],
index=pd.MultiIndex.from_arrays(
[(4, 2, 6), (0, 2, 0)], names=["num_legs", "num_wings"]
),
)
tm.assert_series_equal(result, expected)
def test_data_frame_value_counts_normalize():
df = pd.DataFrame(
{"num_legs": [2, 4, 4, 6], "num_wings": [2, 0, 0, 0]},
index=["falcon", "dog", "cat", "ant"],
)
result = df.value_counts(normalize=True)
expected = pd.Series(
data=[0.5, 0.25, 0.25],
index=pd.MultiIndex.from_arrays(
[(4, 2, 6), (0, 2, 0)], names=["num_legs", "num_wings"]
),
)
tm.assert_series_equal(result, expected)
def test_data_frame_value_counts_single_col_default():
df = pd.DataFrame({"num_legs": [2, 4, 4, 6]})
result = df.value_counts()
expected = pd.Series(
data=[2, 1, 1],
index=pd.MultiIndex.from_arrays([[4, 2, 6]], names=["num_legs"]),
)
tm.assert_series_equal(result, expected)
def test_data_frame_value_counts_empty():
df_no_cols = pd.DataFrame()
result = df_no_cols.value_counts()
expected = pd.Series([], dtype=np.int64)
tm.assert_series_equal(result, expected)
def test_data_frame_value_counts_empty_normalize():
df_no_cols = pd.DataFrame()
result = df_no_cols.value_counts(normalize=True)
expected = pd.Series([], dtype=np.float64)
tm.assert_series_equal(result, expected)
def test_data_frame_value_counts_dropna_true(nulls_fixture):
# GH 41334
df = pd.DataFrame(
{
"first_name": ["John", "Anne", "John", "Beth"],
"middle_name": ["Smith", nulls_fixture, nulls_fixture, "Louise"],
},
)
result = df.value_counts()
expected = pd.Series(
data=[1, 1],
index=pd.MultiIndex.from_arrays(
[("Beth", "John"), ("Louise", "Smith")], names=["first_name", "middle_name"]
),
)
tm.assert_series_equal(result, expected)
def test_data_frame_value_counts_dropna_false(nulls_fixture):
# GH 41334
df = pd.DataFrame(
{
"first_name": ["John", "Anne", "John", "Beth"],
"middle_name": ["Smith", nulls_fixture, nulls_fixture, "Louise"],
},
)
result = df.value_counts(dropna=False)
expected = pd.Series(
data=[1, 1, 1, 1],
index=pd.MultiIndex(
levels=[
pd.Index(["Anne", "Beth", "John"]),
pd.Index(["Louise", "Smith", nulls_fixture]),
],
codes=[[0, 1, 2, 2], [2, 0, 1, 2]],
names=["first_name", "middle_name"],
),
)
tm.assert_series_equal(result, expected)
|
Specifically if we can get atleast java Script log message it would be beneficial.
Dear Musaffir, the logs are accessible only after the test.
If you want to get information on a running automated test, then you can use NeoLoad Web. Everyone has access to a free SaaS plan to evaluate or for a limited usage.
After changing the wifi Network connection and change in IPV4 address, The Neoload is not launching the browsers. Kindly find the error log attached and provide a solution.
|
# -*- coding: utf-8 -*-
# ###
# Copyright (c) 2013-2015, Rice University
# This software is subject to the provisions of the GNU Affero General
# Public License version 3 (AGPLv3).
# See LICENCE.txt for details.
# ###
"""Methods for handling mimetypes."""
__all__ = (
'COLLECTION_MIMETYPE',
'SUBCOLLECTION_MIMETYPE',
'FOLDER_MIMETYPE',
'MIMETYPES',
'MODULE_MIMETYPE',
'COMPOSITE_MODULE_MIMETYPE',
'PORTALTYPE_TO_MIMETYPE_MAPPING',
'portaltype_to_mimetype',
)
MODULE_MIMETYPE = 'application/vnd.org.cnx.module'
COMPOSITE_MODULE_MIMETYPE = 'application/vnd.org.cnx.composite-module'
COLLECTION_MIMETYPE = 'application/vnd.org.cnx.collection'
SUBCOLLECTION_MIMETYPE = 'application/vnd.org.cnx.subcollection'
FOLDER_MIMETYPE = 'application/vnd.org.cnx.folder'
MIMETYPES = (MODULE_MIMETYPE, COLLECTION_MIMETYPE, FOLDER_MIMETYPE,)
PORTALTYPE_TO_MIMETYPE_MAPPING = {
'Module': MODULE_MIMETYPE,
'CompositeModule': COMPOSITE_MODULE_MIMETYPE,
'Collection': COLLECTION_MIMETYPE,
'SubCollection': SUBCOLLECTION_MIMETYPE,
}
def portaltype_to_mimetype(portal_type):
"""Map the given ``portal_type`` to a mimetype."""
return PORTALTYPE_TO_MIMETYPE_MAPPING[portal_type]
|
J Bailey by the Bailey Boys Saxony Roscoe Shirt. This cute button down features a green, blue and white plaid. Pair this shirt with the marine blue or khaki pants also by J Bailey.
|
#!/usr/bin/python2.4
# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Command processor for GRIT. This is the script you invoke to run the various
GRIT tools.
'''
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), '..'))
import getopt
from grit import util
import grit.exception
import grit.tool.build
import grit.tool.count
import grit.tool.diff_structures
import grit.tool.menu_from_parts
import grit.tool.newgrd
import grit.tool.resize
import grit.tool.rc2grd
import grit.tool.test
import grit.tool.transl2tc
import grit.tool.unit
# Copyright notice
_COPYRIGHT = '''\
GRIT - the Google Resource and Internationalization Tool
Copyright (c) Google Inc. %d
''' % util.GetCurrentYear()
# Keys for the following map
_CLASS = 1
_REQUIRES_INPUT = 2
_HIDDEN = 3 # optional key - presence indicates tool is hidden
# Maps tool names to the tool's module. Done as a list of (key, value) tuples
# instead of a map to preserve ordering.
_TOOLS = [
['build', { _CLASS : grit.tool.build.RcBuilder, _REQUIRES_INPUT : True }],
['newgrd', { _CLASS : grit.tool.newgrd.NewGrd, _REQUIRES_INPUT : False }],
['rc2grd', { _CLASS : grit.tool.rc2grd.Rc2Grd, _REQUIRES_INPUT : False }],
['transl2tc', { _CLASS : grit.tool.transl2tc.TranslationToTc,
_REQUIRES_INPUT : False }],
['sdiff', { _CLASS : grit.tool.diff_structures.DiffStructures,
_REQUIRES_INPUT : False }],
['resize', { _CLASS : grit.tool.resize.ResizeDialog, _REQUIRES_INPUT : True }],
['unit', { _CLASS : grit.tool.unit.UnitTestTool, _REQUIRES_INPUT : False }],
['count', { _CLASS : grit.tool.count.CountMessage, _REQUIRES_INPUT : True }],
['test', { _CLASS: grit.tool.test.TestTool, _REQUIRES_INPUT : True, _HIDDEN : True }],
['menufromparts', { _CLASS: grit.tool.menu_from_parts.MenuTranslationsFromParts,
_REQUIRES_INPUT : True, _HIDDEN : True }],
]
def PrintUsage():
print _COPYRIGHT
tool_list = ''
for (tool, info) in _TOOLS:
if not _HIDDEN in info.keys():
tool_list += ' %-12s %s\n' % (tool, info[_CLASS]().ShortDescription())
# TODO(joi) Put these back into the usage when appropriate:
#
# -d Work disconnected. This causes GRIT not to attempt connections with
# e.g. Perforce.
#
# -c Use the specified Perforce CLIENT when talking to Perforce.
print '''Usage: grit [GLOBALOPTIONS] TOOL [args to tool]
Global options:
-i INPUT Specifies the INPUT file to use (a .grd file). If this is not
specified, GRIT will look for the environment variable GRIT_INPUT.
If it is not present either, GRIT will try to find an input file
named 'resource.grd' in the current working directory.
-v Print more verbose runtime information.
-x Print extremely verbose runtime information. Implies -v
-p FNAME Specifies that GRIT should profile its execution and output the
results to the file FNAME.
Tools:
TOOL can be one of the following:
%s
For more information on how to use a particular tool, and the specific
arguments you can send to that tool, execute 'grit help TOOL'
''' % (tool_list)
class Options(object):
'''Option storage and parsing.'''
def __init__(self):
self.disconnected = False
self.client = ''
self.input = None
self.verbose = False
self.extra_verbose = False
self.output_stream = sys.stdout
self.profile_dest = None
def ReadOptions(self, args):
'''Reads options from the start of args and returns the remainder.'''
(opts, args) = getopt.getopt(args, 'g:dvxc:i:p:')
for (key, val) in opts:
if key == '-d': self.disconnected = True
elif key == '-c': self.client = val
elif key == '-i': self.input = val
elif key == '-v':
self.verbose = True
util.verbose = True
elif key == '-x':
self.verbose = True
util.verbose = True
self.extra_verbose = True
util.extra_verbose = True
elif key == '-p': self.profile_dest = val
if not self.input:
if 'GRIT_INPUT' in os.environ:
self.input = os.environ['GRIT_INPUT']
else:
self.input = 'resource.grd'
return args
def __repr__(self):
return '(disconnected: %d, verbose: %d, client: %s, input: %s)' % (
self.disconnected, self.verbose, self.client, self.input)
def _GetToolInfo(tool):
'''Returns the info map for the tool named 'tool' or None if there is no
such tool.'''
matches = filter(lambda t: t[0] == tool, _TOOLS)
if not len(matches):
return None
else:
return matches[0][1]
def Main(args):
'''Parses arguments and does the appropriate thing.'''
util.ChangeStdoutEncoding()
if not len(args) or len(args) == 1 and args[0] == 'help':
PrintUsage()
return 0
elif len(args) == 2 and args[0] == 'help':
tool = args[1].lower()
if not _GetToolInfo(tool):
print "No such tool. Try running 'grit help' for a list of tools."
return 2
print ("Help for 'grit %s' (for general help, run 'grit help'):\n"
% (tool))
print _GetToolInfo(tool)[_CLASS].__doc__
return 0
else:
options = Options()
args = options.ReadOptions(args) # args may be shorter after this
tool = args[0]
if not _GetToolInfo(tool):
print "No such tool. Try running 'grit help' for a list of tools."
return 2
try:
if _GetToolInfo(tool)[_REQUIRES_INPUT]:
os.stat(options.input)
except OSError:
print ('Input file %s not found.\n'
'To specify a different input file:\n'
' 1. Use the GRIT_INPUT environment variable.\n'
' 2. Use the -i command-line option. This overrides '
'GRIT_INPUT.\n'
' 3. Specify neither GRIT_INPUT or -i and GRIT will try to load '
"'resource.grd'\n"
' from the current directory.' % options.input)
return 2
toolobject = _GetToolInfo(tool)[_CLASS]()
if options.profile_dest:
import hotshot
prof = hotshot.Profile(options.profile_dest)
prof.runcall(toolobject.Run, options, args[1:])
else:
toolobject.Run(options, args[1:])
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:]))
|
Free Business App by FORTINET TECHNOLOGIES CANADA INC.
You are downloading the FortiToken Mobile XAP file v3.0.1.8 for Windows Phone. FortiToken Mobile is a free and useful Business app: FortiToken Mobile is an OATH compliant, time-based One Time Password (OTP) generator application for the mobile device. It is the client component of ... You can now download the offline XAP file to install & try it.
Note: To install it manually from an SD card, you should choose one of the server location above to get the offline FortiToken Mobile XAP file, move the file to your phone's SD card and then tap 'Install local apps' in the phone's App list.
Appx4Fun.com only share the original XAP installer for FortiToken Mobile v3.0.1.8.
All the Windows Phone apps & games here are free for home or personal use ONLY. FortiToken Mobile is the property and trademark from the developer FORTINET TECHNOLOGIES CANADA INC., all rights reserved.
|
import unittest
from streamlink.plugins.schoolism import Schoolism
class TestPluginSchoolism(unittest.TestCase):
def test_can_handle_url(self):
should_match = [
'https://www.schoolism.com/watchLesson.php',
]
for url in should_match:
self.assertTrue(Schoolism.can_handle_url(url))
def test_can_handle_url_negative(self):
should_not_match = [
'https://www.schoolism.com',
]
for url in should_not_match:
self.assertFalse(Schoolism.can_handle_url(url))
def test_playlist_parse_subs(self):
with_subs = """var allVideos=[
{sources:[{type:"application/x-mpegurl",src:"https://d8u31iyce9xic.cloudfront.net/44/2/part1.m3u8?Policy=TOKEN&Signature=TOKEN&Key-Pair-Id=TOKEN",title:"Digital Painting - Lesson 2 - Part 1",playlistTitle:"Part 1",}], subtitles: [{
"default": true,
kind: "subtitles", srclang: "en", label: "English",
src: "https://s3.amazonaws.com/schoolism-encoded/44/subtitles/2/2-1.vtt",
}],
},
{sources:[{type:"application/x-mpegurl",src:"https://d8u31iyce9xic.cloudfront.net/44/2/part2.m3u8?Policy=TOKEN&Signature=TOKEN&Key-Pair-Id=TOKEN",title:"Digital Painting - Lesson 2 - Part 2",playlistTitle:"Part 2",}], subtitles: [{
"default": true,
kind: "subtitles", srclang: "en", label: "English",
src: "https://s3.amazonaws.com/schoolism-encoded/44/subtitles/2/2-2.vtt",
}]
}];
"""
data = Schoolism.playlist_schema.validate(with_subs)
self.assertIsNotNone(data)
self.assertEqual(2, len(data))
def test_playlist_parse(self):
without_subs = """var allVideos=[
{sources:[{type:"application/x-mpegurl",src:"https://d8u31iyce9xic.cloudfront.net/14/1/part1.m3u8?Policy=TOKEN&Signature=TOKEN&Key-Pair-Id=TOKEN",title:"Gesture Drawing - Lesson 1 - Part 1",playlistTitle:"Part 1",}],},
{sources:[{type:"application/x-mpegurl",src:"https://d8u31iyce9xic.cloudfront.net/14/1/part2.m3u8?Policy=TOKEN&Signature=TOKEN&Key-Pair-Id=TOKEN",title:"Gesture Drawing - Lesson 1 - Part 2",playlistTitle:"Part 2",}]}
];
"""
data = Schoolism.playlist_schema.validate(without_subs)
self.assertIsNotNone(data)
self.assertEqual(2, len(data))
|
black and white soap dispenser blank white and black plastic pump bottle used for shampoo or soap studio shot isolated.
black and white soap dispenser ml flex automatic soap commercial wall mounted dispenser.
black and white soap dispenser soap dispenser tube.
black and white soap dispenser bathroom a bathroom accessories a soap dishes dispensers previous next.
black and white soap dispenser soap dispenser wall mounted frosted glass soap dispenser.
black and white soap dispenser white geometric soap dispenser.
black and white soap dispenser lounge wall mounted soap dispenser in chrome.
black and white soap dispenser soap dispenser.
black and white soap dispenser 9 black wall mounted soap dispenser.
black and white soap dispenser.
black and white soap dispenser soap dispenser white hover to zoom.
black and white soap dispenser soap dispenser easy to refill as the dispenser has a wide opening.
black and white soap dispenser gel foam or liquid soap dispenser pump plastic bottle silhouette vector black and white icons and line icon on chess board.
black and white soap dispenser soap dispenser black.
black and white soap dispenser soap dispenser black white.
black and white soap dispenser next.
black and white soap dispenser stainless steel slimline soap dispenser.
black and white soap dispenser bl special colours detergent dispenser.
black and white soap dispenser white ceramic soap dispenser.
black and white soap dispenser soap dispenser white.
black and white soap dispenser lacquer soap dispenser alt image 1.
black and white soap dispenser white soap dispenser alt image 1.
|
from app.domain.constants import CONSTANTS
from app.models import EventSearchMixin, WeightclassEventSearchMixin, DataInterface
from app.models.database import DBObject
class Bot(DBObject, EventSearchMixin, WeightclassEventSearchMixin):
bid = None
registered_ind = None
event_id = None
name = None
team_name = None
team_email = None
team_city = None
team_state = None
category = None
weightclass = None
primary_freq = None
secondary_freq = None
multibot_ind = None
notes = None
photo_url = None
seed_number = None
bracket_id = None
@classmethod
def get_by_bracket_seed(cls, event_id, bracket_id, seed):
db = DataInterface(CONSTANTS.DB_NAME)
sql = "SELECT * FROM %s WHERE event_id = %d AND seed_number = %d AND bracket_id = %d" % (cls.__name__, int(event_id), seed, bracket_id)
result = db.fetch_one(sql)
return cls(**(result)) if result else None
@classmethod
def get_by_bracket(cls, bracket_id):
"""
alternative to the method available in BracketSearchMixin, parses through Matches instead
"""
from app.models.match import Match
matches = Match.get_by_bracket_round(bracket_id, 'A')
bots = []
for match in matches:
bots += [match.bot1_id, match.bot2_id]
bots = list(set(bots))
return [cls.get_by_id(bot_id) for bot_id in bots if bot_id]
@classmethod
def bye(cls):
"""
placeholder bot object for a bye
"""
params = {
'name': '--bye--',
'id': 0
}
return cls(**(params))
def register(self):
"""
registers the bot
"""
sql = "UPDATE %s SET registered_ind = 'Y' WHERE id = %d" % (self.__class__.__name__, self.id)
return self.db.execute(sql)
def unregister(self):
"""
unregisters the bot
"""
sql = "UPDATE %s SET registered_ind = 'N' WHERE id = %d" % (self.__class__.__name__, self.id)
return self.db.execute(sql)
def to_dict(self):
return {
'id': self.id,
'botName': self.name,
'teamName': self.team_name,
'teamEmail': self.team_email,
'teamCity': self.team_city,
'teamState': self.team_state,
'category': self.category,
'weightclass': self.weightclass,
'photoUrl': self.photo_url,
'multibot': True if self.multibot_ind == 'Y' else False,
'isRegistered': True if self.registered_ind == 'Y' else False
}
|
This is a three book series, though the third book is the one that is now being released. I was given the opportunity to read all three. Though “A Mending at the Edge” can be read on its own, I highly recommend reading the first two in the series first.
These books are fiction based on historical records of a real woman, Emma Wagner Giesy. It reminded me a lot of the Little House on the Prairie books I read over and over as a child. Emma, however was a member of a religious group that lived communally.
Now that I have finished the full series, it was a really good story. I had a hard time through the middle book (not unlike Empire Strikes Back) because the middle of any good story is where all the real struggle is. Because I have the tendency to carry into real life the emotions of the books I am reading, I was somewhat depressed while reading the middle book. Emma had a very hard life. She made many decisions I wouldn’t have, which made it harder for me to read as well. Overall, though I am very glad to have read it.
For the official summary, please read summaries of each book down below.
A lot of historical information written in a way that I enjoyed it. That says something.
I’ll hang on to these books, mostly so I can share them. I might re-read them someday. While she did so many things that were contrary to my way of thinking, I did learn a lot about service and servanthood that I know I need to take to heart.
Not horrible, but really does warrant the time to be able to sit down and at least absorb a chapter at a time. I did read most of it in a page here and there, but I felt almost disrespectful. It’s so much better when you can fully get the word pictures in each scene, rather than having to scramble back a few pages or paragraphs to pick up momentum again.
Jane Kirkpatrick is the best-selling author of two nonfiction books and fourteen historical novels, including the popular Kinship and Courage series. Her award-winning writing has appeared in more than fifty publications, including Sports Afield and Decision. She’s won the coveted Western Heritage Wrangler Award, an honor shared by such writers as Larry McMurtry and Barbara Kingsolver. Jane is a licensed clinical social worker as well as an internationally recognized speaker. She and her husband, Jerry, ranch 160 acres in eastern Oregon.
|
# coding: utf-8
"""
bitstringモジュールに関するサンプルです。
BitArrayについて(1)。
"""
import bitstring as bs
from trypython.common.commoncls import SampleBase
from trypython.common.commonfunc import pr
class Sample(SampleBase):
def exec(self):
# ---------------------------------------------------------------
# [link]
# http://pythonhosted.org/bitstring/walkthrough.html
# ---------------------------------------------------------------
# BitArray は、バイナリデータを保持するコンテナ
# BitStream は、ポジションや読み込みの操作を行うことが出来るストリーム
# ---------------------------------------------------------------
# BitArray は、mutable
# インスタンスから2進数や16進数など様々な形で取り出せる
# ---------------------------------------------------------------
# hex は、ビットが4の倍数でないとエラーとなる
# oct は、ビットが3の倍数でないとエラーとなる
# ---------------------------------------------------------------
# 初期化の方法はいろいろある
# BitArray(bin='0b11111111')
# BitArray(hex='0xff')
# BitArray(uint=255, length=8)
# 上記はどれも同じデータをつくる
# ---------------------------------------------------------------
ba01 = bs.BitArray('0xff01')
pr('ba01', ba01)
pr('ba01.bin', ba01.bin)
pr('ba01.hex', ba01.hex)
pr('ba01.int', ba01.int)
pr('ba01.uint', ba01.uint)
pr('ba01.bytes', ba01.bytes)
try:
# 8進数はビットが3の倍数分存在しないと駄目
pr('ba01.oct', ba01.oct)
except bs.InterpretError as e:
pr('ba01.oct', e)
ba02 = ba01 + '0b00'
pr('ba02.oct', ba02.oct)
ba03 = bs.BitArray(bin='0b11111111')
pr('ba03', ba03)
pr('ba03.uint', ba03.uint)
ba04 = bs.BitArray(hex='0xff')
pr('ba04', ba04)
pr('ba04.uint', ba04.uint)
ba05 = bs.BitArray(uint=255, length=8)
pr('ba05', ba05)
pr('ba05.uint', ba05.uint)
def go():
obj = Sample()
obj.exec()
|
Metal Boxes There are 88 products.
Discover this selection of exclusive metal boxes, illustrated and accompanied by the best products of the House!
Almond sugared almonds, chocolate sweets, fancy dragees, there is something for everyone!
Reproduction of a writing by Léon Braquier (deceased in December 1937) and signed by his hand. In order to guarantee all its customers an impossible counterfeit, the sugared almonds are placed in an envelope bearing its trademark and sealed with a stamp with the initials "L B". On this writing is the bouquet of dragees offered to the President of the Republic, Mr. LOUBET.
A nod to the sweets of the dragee through old-fashioned haymaking and the sweets of love represented by a soldier and his beloved. This box was created to celebrate the anniversary of the Battle of Verdun.
Discover our products in an exclusive round box, presenting the monuments of the region. Dive into the heart of history and let your greed take you away!
Nougatine coated with milk chocolate.
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1VsphereVirtualDiskVolumeSource(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, fs_type=None, volume_path=None):
"""
V1VsphereVirtualDiskVolumeSource - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'fs_type': 'str',
'volume_path': 'str'
}
self.attribute_map = {
'fs_type': 'fsType',
'volume_path': 'volumePath'
}
self._fs_type = fs_type
self._volume_path = volume_path
@property
def fs_type(self):
"""
Gets the fs_type of this V1VsphereVirtualDiskVolumeSource.
Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.
:return: The fs_type of this V1VsphereVirtualDiskVolumeSource.
:rtype: str
"""
return self._fs_type
@fs_type.setter
def fs_type(self, fs_type):
"""
Sets the fs_type of this V1VsphereVirtualDiskVolumeSource.
Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.
:param fs_type: The fs_type of this V1VsphereVirtualDiskVolumeSource.
:type: str
"""
self._fs_type = fs_type
@property
def volume_path(self):
"""
Gets the volume_path of this V1VsphereVirtualDiskVolumeSource.
Path that identifies vSphere volume vmdk
:return: The volume_path of this V1VsphereVirtualDiskVolumeSource.
:rtype: str
"""
return self._volume_path
@volume_path.setter
def volume_path(self, volume_path):
"""
Sets the volume_path of this V1VsphereVirtualDiskVolumeSource.
Path that identifies vSphere volume vmdk
:param volume_path: The volume_path of this V1VsphereVirtualDiskVolumeSource.
:type: str
"""
if volume_path is None:
raise ValueError("Invalid value for `volume_path`, must not be `None`")
self._volume_path = volume_path
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
I have lost count of the number of times I’ve read and heard of celebrity marriages failing. It seems strange that we often see TV and movie stars as perfect people, living the fairytale life of riches and glamour. I suppose we all have to stop sticking our heads in the clouds and face reality.
So what does it take to keep your own sense of self? Here are 7 things to think and improve on for the next 7 days.
Are you wandering through life – hoping that you’ll find happiness, health and wealth? Identify your life purpose and create a mission statement and you will have your own unique compass that will lead you to your true north every time.
This may seem like a daunting task, especially if you see yourself in a tight spot or at a dead end. However, it is important to remember that there is always a small light within you that can shine bright and help you turn things around and make a big difference to yourself.
What do you value? Make a list of your top 5 values. Some examples are security, freedom, family, spiritual development, learning. As you set your goals – check your goals against your values. If the goal doesn’t align with any of your top five values – you may want to reconsider it or revise it.
Unmet needs can keep you from living realistically. Take care of yourself. You can not fully take care of anyone else if you do not take care of yourself first. Do you have a need to be acknowledged, to be right, to be in control, to be loved? There are so many people who lived their lives without realizing their dreams. Most poeple who do not realize their dreams end up being stressed or even depressed. List your top four needs and get them met before it’s too late!
You know who you are and what you truly enjoy in life. Obstacles like self doubt and a lack of enthusiasm may hinder you, but it will not derail your chance to become the person you ought to be. Express yourself and honor the people who have inspired you to become the person you want to be.
Increase the awareness of your inner wisdom by reflecting in silence on a regular basis. Be in touch with nature. Breathe deeply to quiet your preoccupied mind. For most of us it’s hard to even find the peace and quiet we want even in our own home. In my case I often just sit in a dimly lit room and play classical music. There’s sound, but music can focus your awareness.
What are your positive characteristics? What special talents do you have? List five – if you get stuck, ask those closest to you to help identify these. Are you imaginative, witty, good with your hands? Find ways to express your authentic self through your strengths. You can increase your self-confidence when you can share what you know with others.
When you live authentically, you may find that you develop an interconnected sense of being. When you are true to yourself, living your purpose and giving of your talents to others, you give back what you came to share with others -your spirit – your essence. The rewards for sharing your gift with those close to you is rewarding, but it is much more rewarding if it is the eyes of a stranger who can appreciate what you have done for them.
Self improvement is work that is worth it. It shouldn’t always be within the confines of an office building, or maybe in the four corners of your own room. The difference lies within ourselves and how much we want to change for the better.
|
from django.db import models
# Create your models here.
class Category(models.Model):
name=models.CharField(max_length=50)
slug=models.SlugField(max_length=50,unique=True,
help_text='Unique value for product page URL, created from name.')
description=models.TextField()
is_active=models.BooleanField(default=True)
meta_keywords=models.CharField("Meta Keywords",max_length=255,
help_text='Comma-delimited set of SEO keywords for meta tag')
meta_description=models.CharField("Meta Description",max_length=255,
help_text='Content for description meta tag')
created_at=models.DateTimeField(auto_now_add=True)
updated_at=models.DateTimeField(auto_now=True)
parent_category=models.ForeignKey('self')
#created_by (to be added after the user model is defined)
#updated_by (to be added after the user model is defined)
class Meta:
db_table='categories'
ordering=['-created_at']
verbose_name_plural='Categories'
def __unicode__(self):
return self.name
@models.permalink
def get_absolute_url(self):
return ('catalog_category',(),{'category_slug':self.slug})
class Product(models.Model):
name=models.CharField(max_length=255,unique=True)
slug=models.SlugField(max_length=255,unique=True,
help_text='Unique value for product-deal page URL, created from name.')
#shop (to be added after shop model is defined)
brand=models.CharField(max_length=50)
price=models.DecimalField(max_digits=9,decimal_places=2)
old_price=models.DecimalField(max_digits=9,decimal_places=2,
blank=True,default=0.00)
image=models.CharField(max_length=50)
is_active=models.BooleanField(default=True)
#quantity=models.IntegerField()
product_description=models.TextField()
deal_description=models.TextField()
deal_weeknumber=models.IntegerField()
meta_keywords=models.CharField("Meta Keywords",max_length=255,
help_text='Comma-delimited set of SEO keywords for meta tag')
meta_description=models.CharField("Meta Description",max_length=255,
help_text='Content for description meta tag')
created_at=models.DateTimeField(auto_now_add=True)
updated_at=models.DateTimeField(auto_now=True)
categories=models.ManyToManyField(Category)
#created_by (to be added after the user model is defined)
#updated_by (to be added after the user model is defined)
class Meta:
db_table='products'
ordering=['-created_at']
def __unicode__(self):
return self.name
@models.permalink
def get_absolute_url(self):
return ('catalog_product',(),{'product_slug':self.slug})
|
Mission of the Organization The Minnesota Music Coalition supports and connects Minnesota's diverse community of musicians. Through education, communication, and advocacy, we make Minnesota the best place for all musicians to live and work.
- Share your gifts, resources and assets in your service to the organization.
- Participate in monthly Board Meetings and relevant committee meetings.
- Invest in the organization by making a financial contribution (of any size).
- Utilize your own personal connections on behalf of the MMC.
- Support the work of the MMC by regularly attending events and programs.
- Advocate on the behalf of the organization and its mission to serve all Minnesota musicians.
The MMC Board is an active working Board that is very involved in the oversight of this small but growing organization. Board Members participate in at least one committee--Executive, Finance, Governance, Development, Communications or Membership.
|
/**
* Definition for binary tree
* public class TreeNode {
* int val;
* TreeNode left;
* TreeNode right;
* TreeNode(int x) { val = x; }
* }
*/
import java.util.Hashtable;
public class Solution {
public TreeNode buildTree(int[] preorder, int[] inorder) {
int len = preorder.length;
if(len == 0) return null;
Hashtable<Integer, Integer> pretb = new Hashtable<Integer, Integer>();
Hashtable<Integer, Integer> intb = new Hashtable<Integer, Integer>();
int root = preorder[0];
TreeNode tree_root = new TreeNode(root);
for(int i = 0; i < len; i++){
pretb.put(preorder[i], i);
intb.put(inorder[i], i);
}
for(int j = 1; j < len; j++){
int num = preorder[j];
int inloc = intb.get(num);
TreeNode next = tree_root;
boolean flag = false;
while(true){
if(inloc < intb.get(next.val)){
if(next.left == null){
next.left = new TreeNode(num);
flag = true;
}
else{
next = next.left;
}
}
else{
if(next.right == null){
next.right = new TreeNode(num);
flag = true;
}
else{
next = next.right;
}
}
if(flag) break;
}
}
return tree_root;
}
}
|
Hellenismos is a contemporary revival movement of the ancient Greek polytheistic religion and culture. Followers celebrate the ancient gods, culture and philosophy.
After years of praying underground and in other hidden worship sites, Muslims in Athens eagerly await the opening of the city's first official mosque.
An American Mormon goes to Greece for a short-term volunteer stint and ends up running a refugee camp in Athens.
Jews in Thessaloniki rescue tombstones that were scattered when Nazis destroyed their cemetery; used for construction projects, the fragments find a resting place in a new Jewish burial site.
In Spring 2017, USC Annenberg’s J585 Reporting on Religion course focused on Greece’s economic, political and spiritual crises. In preparation for our trip to Greece, we reported on the local Greek, Orthodox and Muslim communities since we planned to cover, among other stories, the plight of Syrian refugees in Athens, the rise of a Neo-Nazi party, and the future of Greek Orthodoxy. During our eight days away, we visited Athens and Thessaloniki, Greece’s second-largest city and a longtime crossroads for Christians, Muslims and Jews. Among our stories were reports on Greece’s opioid problem, Athens’ first drag club, the resurgence of the “evil eye,” and one musician’s campaign for the environment. This year’s partnerships with the GroundTruth Project and KPCC were augmented by publications in outlets ranging from Public Radio International to the Daily Beast and from Killing the Buddha to Bustle.
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Base URL used to get all the required files.
BASE_DOWNLOAD_URL = 'http://www1.ncdc.noaa.gov/pub/data/ghcn/daily/'
# List of required files for a build.
FILE_NAMES = []
FILE_NAMES.append('ghcnd-countries.txt')
FILE_NAMES.append('ghcnd-inventory.txt')
FILE_NAMES.append('ghcnd-states.txt')
FILE_NAMES.append('ghcnd-stations.txt')
FILE_NAMES.append('ghcnd-version.txt')
FILE_NAMES.append('ghcnd_all.tar.gz')
FILE_NAMES.append('ghcnd_gsn.tar.gz')
FILE_NAMES.append('ghcnd_hcn.tar.gz')
FILE_NAMES.append('readme.txt')
FILE_NAMES.append('status.txt')
# Store the row details here.
# Index values of each field details.
FIELD_INDEX_NAME = 0
FIELD_INDEX_START = 1
FIELD_INDEX_END = 2
FIELD_INDEX_TYPE = 3
DLY_FIELD_ID = 0
DLY_FIELD_YEAR = 1
DLY_FIELD_MONTH = 2
DLY_FIELD_ELEMENT = 3
DLY_FIELD_DAY_OFFSET = 4
DLY_FIELD_DAY_FIELDS = 4
DLY_FIELDS = []
# Details about the row.
DLY_FIELDS.append(['ID', 1, 11, 'Character'])
DLY_FIELDS.append(['YEAR', 12, 15, 'Integer'])
DLY_FIELDS.append(['MONTH', 16, 17, 'Integer'])
DLY_FIELDS.append(['ELEMENT', 18, 21, 'Character'])
# Days in each row.
for i in range(1, 32):
start = 22 + ((i - 1) * 8)
DLY_FIELDS.append(['VALUE' + str(i), (start + 0), (start + 4), 'Integer'])
DLY_FIELDS.append(['MFLAG' + str(i), (start + 5), (start + 5), 'Character'])
DLY_FIELDS.append(['QFLAG' + str(i), (start + 6), (start + 6), 'Character'])
DLY_FIELDS.append(['SFLAG' + str(i), (start + 7), (start + 7), 'Character'])
# Details about the row.
STATIONS_FIELDS = {}
STATIONS_FIELDS['ID'] = ['ID', 1, 11, 'Character']
STATIONS_FIELDS['LATITUDE'] = ['LATITUDE', 13, 20, 'Real']
STATIONS_FIELDS['LONGITUDE'] = ['LONGITUDE', 22, 30, 'Real']
STATIONS_FIELDS['ELEVATION'] = ['ELEVATION', 32, 37, 'Real']
STATIONS_FIELDS['STATE'] = ['STATE', 39, 40, 'Character']
STATIONS_FIELDS['NAME'] = ['NAME', 42, 71, 'Character']
STATIONS_FIELDS['GSNFLAG'] = ['GSNFLAG', 73, 75, 'Character']
STATIONS_FIELDS['HCNFLAG'] = ['HCNFLAG', 77, 79, 'Character']
STATIONS_FIELDS['WMOID'] = ['WMOID', 81, 85, 'Character']
# Details about the row.
COUNTRIES_FIELDS = {}
COUNTRIES_FIELDS['CODE'] = ['CODE', 1, 2, 'Character']
COUNTRIES_FIELDS['NAME'] = ['NAME', 4, 50, 'Character']
# Details about the row.
STATES_FIELDS = {}
STATES_FIELDS['CODE'] = ['CODE', 1, 2, 'Character']
STATES_FIELDS['NAME'] = ['NAME', 4, 50, 'Character']
# Details about the row.
INVENTORY_FIELDS = {}
INVENTORY_FIELDS['ID'] = ['ID', 1, 11, 'Character']
INVENTORY_FIELDS['LATITUDE'] = ['LATITUDE', 13, 20, 'Real']
INVENTORY_FIELDS['LONGITUDE'] = ['LONGITUDE', 22, 30, 'Real']
INVENTORY_FIELDS['ELEMENT'] = ['ELEMENT', 32, 35, 'Character']
INVENTORY_FIELDS['FIRSTYEAR'] = ['FIRSTYEAR', 37, 40, 'Integer']
INVENTORY_FIELDS['LASTYEAR'] = ['LASTYEAR', 42, 45, 'Integer']
|
Play, sing, dance, and learn with the little one you love this winter! Lake City Music Together offers mixed-age music and movement classes for children (birth-kindergarten) and their caregivers.
?Registration information is available at www.lakecitymusictogether.com.
Classes are held at Winona Lake Grace Brethren Church.
Class size is limited. Additional class times may be made available with suffiicient interest.
The nine-week session is $150 for one child, $260 for two siblings, with a household maximum of $330. Tuition also includes 2 copies of the professionally-recorded Sticks song collection with an accompanying songbook with lyrics, music, and activities, as well as a download code for the Family Music Zone which links to the free "Hello Everybody!" app so your music can go with you wherever you are.
?After December 15, an additional $15 charge for new families is added to tuition; use the code CHAMBER for a registration fee waiver!
|
"""Tests for display of certificates on the student dashboard. """
import unittest
import ddt
import mock
from django.conf import settings
from django.core.urlresolvers import reverse
from mock import patch
from django.test.utils import override_settings
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from student.tests.factories import UserFactory, CourseEnrollmentFactory
from certificates.tests.factories import GeneratedCertificateFactory # pylint: disable=import-error
from certificates.api import get_certificate_url # pylint: disable=import-error
from course_modes.models import CourseMode
from student.models import LinkedInAddToProfileConfiguration
# pylint: disable=no-member
def _fake_is_request_in_microsite():
"""
Mocked version of microsite helper method to always return true
"""
return True
@ddt.ddt
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
class CertificateDisplayTest(ModuleStoreTestCase):
"""Tests display of certificates on the student dashboard. """
USERNAME = "test_user"
PASSWORD = "password"
DOWNLOAD_URL = "http://www.example.com/certificate.pdf"
def setUp(self):
super(CertificateDisplayTest, self).setUp()
self.user = UserFactory.create(username=self.USERNAME, password=self.PASSWORD)
result = self.client.login(username=self.USERNAME, password=self.PASSWORD)
self.assertTrue(result, msg="Could not log in")
self.course = CourseFactory()
self.course.certificates_display_behavior = "early_with_info"
self.update_course(self.course, self.user.username)
@ddt.data('verified', 'professional')
@patch.dict('django.conf.settings.FEATURES', {'CERTIFICATES_HTML_VIEW': False})
def test_display_verified_certificate(self, enrollment_mode):
self._create_certificate(enrollment_mode)
self._check_can_download_certificate()
@patch.dict('django.conf.settings.FEATURES', {'CERTIFICATES_HTML_VIEW': False})
def test_display_verified_certificate_no_id(self):
"""
Confirm that if we get a certificate with a no-id-professional mode
we still can download our certificate
"""
self._create_certificate(CourseMode.NO_ID_PROFESSIONAL_MODE)
self._check_can_download_certificate_no_id()
@ddt.data('verified', 'honor')
@override_settings(CERT_NAME_SHORT='Test_Certificate')
@patch.dict('django.conf.settings.FEATURES', {'CERTIFICATES_HTML_VIEW': True})
def test_display_download_certificate_button(self, enrollment_mode):
"""
Tests if CERTIFICATES_HTML_VIEW is True
and course has enabled web certificates via cert_html_view_enabled setting
and no active certificate configuration available
then any of the Download certificate button should not be visible.
"""
self.course.cert_html_view_enabled = True
self.course.save()
self.store.update_item(self.course, self.user.id)
self._create_certificate(enrollment_mode)
self._check_can_not_download_certificate()
@ddt.data('verified')
@override_settings(CERT_NAME_SHORT='Test_Certificate')
@patch.dict('django.conf.settings.FEATURES', {'CERTIFICATES_HTML_VIEW': True})
def test_linked_student_to_web_view_credential(self, enrollment_mode):
certificates = [
{
'id': 0,
'name': 'Test Name',
'description': 'Test Description',
'is_active': True,
'signatories': [],
'version': 1
}
]
self.course.certificates = {'certificates': certificates}
self.course.cert_html_view_enabled = True
self.course.save() # pylint: disable=no-member
self.store.update_item(self.course, self.user.id)
cert = self._create_certificate(enrollment_mode)
test_url = get_certificate_url(course_id=self.course.id, uuid=cert.verify_uuid)
response = self.client.get(reverse('dashboard'))
self.assertContains(response, u'View Test_Certificate')
self.assertContains(response, test_url)
def test_post_to_linkedin_invisibility(self):
"""
Verifies that the post certificate to linked button
does not appear by default (when config is not set)
"""
self._create_certificate('honor')
# until we set up the configuration, the LinkedIn action
# button should not be visible
self._check_linkedin_visibility(False)
def test_post_to_linkedin_visibility(self):
"""
Verifies that the post certificate to linked button appears
as expected
"""
self._create_certificate('honor')
config = LinkedInAddToProfileConfiguration(
company_identifier='0_mC_o2MizqdtZEmkVXjH4eYwMj4DnkCWrZP_D9',
enabled=True
)
config.save()
# now we should see it
self._check_linkedin_visibility(True)
@mock.patch("microsite_configuration.microsite.is_request_in_microsite", _fake_is_request_in_microsite)
def test_post_to_linkedin_microsite(self):
"""
Verifies behavior for microsites which disables the post to LinkedIn
feature (for now)
"""
self._create_certificate('honor')
config = LinkedInAddToProfileConfiguration(
company_identifier='0_mC_o2MizqdtZEmkVXjH4eYwMj4DnkCWrZP_D9',
enabled=True
)
config.save()
# now we should not see it because we are in a microsite
self._check_linkedin_visibility(False)
def _check_linkedin_visibility(self, is_visible):
"""
Performs assertions on the Dashboard
"""
response = self.client.get(reverse('dashboard'))
if is_visible:
self.assertContains(response, u'Add Certificate to LinkedIn Profile')
else:
self.assertNotContains(response, u'Add Certificate to LinkedIn Profile')
def _create_certificate(self, enrollment_mode):
"""Simulate that the user has a generated certificate. """
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id, mode=enrollment_mode)
return GeneratedCertificateFactory(
user=self.user,
course_id=self.course.id,
mode=enrollment_mode,
download_url=self.DOWNLOAD_URL,
status="downloadable",
grade=0.98,
)
def _check_can_download_certificate(self):
response = self.client.get(reverse('dashboard'))
self.assertContains(response, u'Download Your ID Verified')
self.assertContains(response, self.DOWNLOAD_URL)
def _check_can_download_certificate_no_id(self):
"""
Inspects the dashboard to see if a certificate for a non verified course enrollment
is present
"""
response = self.client.get(reverse('dashboard'))
self.assertContains(response, u'Download')
self.assertContains(response, u'(PDF)')
self.assertContains(response, self.DOWNLOAD_URL)
def _check_can_not_download_certificate(self):
"""
Make sure response does not have any of the download certificate buttons
"""
response = self.client.get(reverse('dashboard'))
self.assertNotContains(response, u'View Test_Certificate')
self.assertNotContains(response, u'Download Your Test_Certificate (PDF)')
self.assertNotContains(response, u'Download Test_Certificate (PDF)')
self.assertNotContains(response, self.DOWNLOAD_URL)
|
This is something you do not see every day…This photo of a weasel and a woodpecker appears to be taken from a movie but the sad truth is that the weasel is actually trying to break the crust. The woodpecker was able to escape but not before the photographer captured this beautiful image. The bird fell to the ground when the weasel jumped on its back. "The woodpecker landed right in front of us and I feared the worst," continues the photographer.
|
#
# Copyright (C) 2013 - 2015 Satoru SATOH <ssato @ redhat.com>
# License: MIT
#
"""configobj backend.
- Format to support: configobj, http://goo.gl/JbP2Kp (readthedocs.org)
- Requirements: configobj (https://pypi.python.org/pypi/configobj/)
- Limitations: None obvious
- Special options:
- All options except for 'infile' passed to configobj.ConfigObj.__init__
should work.
- See also: http://goo.gl/LcVOzZ (readthedocs.org)
"""
from __future__ import absolute_import
import configobj
import anyconfig.backend.base
def make_configobj(cnf, **kwargs):
"""
Make a configobj.ConfigObj initalized with given config `cnf`.
:param cnf: Configuration data :: Parser.container
:param kwargs: optional keyword parameters passed to ConfigObj.__init__
:return: An initialized configobj.ConfigObj instance
"""
cobj = configobj.ConfigObj(**kwargs)
cobj.update(cnf)
return cobj
class Parser(anyconfig.backend.base.LParser, anyconfig.backend.base.D2Parser):
"""
Parser for Ini-like config files which configobj supports.
"""
_type = "configobj"
_priority = 10
_load_opts = ["cls", "configspec", "encoding", "interpolation",
"raise_errors", "list_values", "create_empty", "file_error",
"stringify", "indent_type", "default_encoding", "unrepr",
"_inspec", ]
_dump_opts = ["cls", "encoding", "list_values", "indent_type",
"default_encoding", "unrepr", "write_empty_values", ]
_open_flags = ('rb', 'wb')
load_from_path = anyconfig.backend.base.to_method(configobj.ConfigObj)
load_from_stream = anyconfig.backend.base.to_method(configobj.ConfigObj)
def dump_to_string(self, cnf, **kwargs):
"""
Dump config `cnf` to a string.
:param cnf: Configuration data to dump :: self.container
:param kwargs: backend-specific optional keyword parameters :: dict
:return: string represents the configuration
"""
return '\n'.join(make_configobj(cnf, **kwargs).write())
def dump_to_stream(self, cnf, stream, **kwargs):
"""
:param cnf: Configuration data to dump :: self.container
:param stream: Config file or file-like object
:param kwargs: backend-specific optional keyword parameters :: dict
"""
make_configobj(cnf, **kwargs).write(stream)
# vim:sw=4:ts=4:et:
|
It is with acknowledgment to Garry Loveridge for his tremendous support, with his two digger machines, to the Society during the days of track work both in the old Waitara Railway Yards and at the Waitara Road station Yards, Brixton.
This work continued through the month of June working both Saturdays and Sundays with the odd days in the week/s.
Garry has now signed on as a member which he was most welcomed.
Our Team Leader Mike's first real big project 2013, with the guidance from Denyse.
The replacement of some roads in the Waitara railway Yards.
Jim Blyde the Scribe as usual.
Noel and I left New Plymouth about 08.3ºam on the first two days. When had arrived at Brixton the team had almost completed their task of loading track-sets on the Us wagon. All that was left to do was to secure the load and load the digger on the Eww. The 12 tonne digger was free of charge by the owner Garry Loveridge, as well as his time, (this was most welcomed by the Society). The Society had to pay the transport and fuel costs.
There were a good number of members during the first two days that I had been in attendance. We all tried to our the best of our ability to try and complete what is required by the Team Leader.
Of course it goes without saying that the Ladies did also by providing nourishment to the team. Hot drinks were brought down to the Waitara Yard along with freshly baked cake, for smoko. Lunch was curried mince with mashed potatoes with a hint of parsley with buttered buns and hot drinks. (Some times we get really spoilt).
The next days' smoko was hot drinks at the Waitara Road Yard. Lunch was veggie soup with bacon hocks added again hot drinks and Fresh fruit. Thank you Ladies.
I did not attend on Sunday. I will endeavor to get some information to how the this day went.
All the photos copyright to me. Except others identified as such.
Just after Noel and I arrived. Three sets of rail already on board. Mike, Ben, Trevor, Mark, Noel, Garry in the cab, Denyse, Bill on the Us and Jim H.
Below are some photos which were taken by Margo Zeier, Mike's Sister.
Thank you Margo. It is nice to get some one else's perspective photos. I was not available for the Sundays' working Bee.
Below are photos from Ben one of our younger member. As above it is nice to see another member's photos.
The team continued with the collecting of rail set that were not require at the Waitara Yard.
The day was cloudy and cool, then cleared to a nice day, but as the afternoon worn on it got very cloudy eventually we had light rain, enough to make one wet.
A good team of members worked well, once again.
Garry Loveridge was asked to assist with his little digger. Again was of tremendous help.
Three lengths of rail were laid.
|
from hc.api.models import Channel
from hc.test import BaseTestCase
from django.test.utils import override_settings
@override_settings(APPRISE_ENABLED=True)
class AddAppriseTestCase(BaseTestCase):
def setUp(self):
super().setUp()
self.url = "/projects/%s/add_apprise/" % self.project.code
def test_instructions_work(self):
self.client.login(username="[email protected]", password="password")
r = self.client.get(self.url)
self.assertContains(r, "Integration Settings", status_code=200)
def test_it_works(self):
form = {"url": "json://example.org"}
self.client.login(username="[email protected]", password="password")
r = self.client.post(self.url, form)
self.assertRedirects(r, self.channels_url)
c = Channel.objects.get()
self.assertEqual(c.kind, "apprise")
self.assertEqual(c.value, "json://example.org")
self.assertEqual(c.project, self.project)
@override_settings(APPRISE_ENABLED=False)
def test_it_requires_client_id(self):
self.client.login(username="[email protected]", password="password")
r = self.client.get(self.url)
self.assertEqual(r.status_code, 404)
def test_it_requires_rw_access(self):
self.bobs_membership.rw = False
self.bobs_membership.save()
self.client.login(username="[email protected]", password="password")
r = self.client.get(self.url)
self.assertEqual(r.status_code, 403)
|
This Challenge is sponsored by the Paris Saint-Germain (53 avenue Émile Zola CS 60065 92650 Boulogne Billancourt Cedex) as well as l’École Polytechnique (the “Sponsors”).
The aim of these Rules is to set out the terms and conditions governing your participation in the Challenge. By participating in this Challenge, you fully and unconditionally agree to comply with these Rules. If you do not agree with any of these Rules, do not register for this Challenge and do not submit an entry.
In case of non-respect of these Rules, the Participant will be immediately disqualified from the Challenge and no prize will be awarded.
To take part in the Challenge, we must receive your completed registration by the 15 April 2019 at 11.59pm.
Any registration based on inaccurate, false or incomplete information will result in the Participant’s disqualification. Refusing collection, recording and use of their personal data that is strictly necessary to performing the Challenge will result in the Participant’s disqualification. The Participant is solely responsible for the information he/she provides when registering. Any intentional or non-intentional mistake, anomaly or inconsistency, regarding this information, may result in the Participant’s disqualification. The Sponsor reserves the right to proceed all necessary verifications regarding the Participant’s identity, postal and/or email address.
Deletion of a Participant’s user account on the Challenge website will be deemed as a withdrawal from the Challenge. In this case, the Participant, regardless of whether the deletion results from the Participant’s personal action or not, will not participate in the Challenge, and will not obtain any compensation.
Students. Any individual aged eighteen (18) years and/or with full legal capacity, who (i) is currently enrolled at a post-secondary institution or (ii) has recently graduated from a post-secondary institution less than 2 years before the current academic year, with a student card as verification. Each Student may only participate once in each Challenge. If the participant is a minor, he/she must provide this authorization signed by a parent or guardian.
Participation in the Challenge must be individual (Individual Participant).
Individual Participants must enter the Challenge in their individual capacities.
To be eligible, Deliverables must (i) address the specific issue set out in the Brief, (ii) be in a common digital format, such as, DOC, DOCX, PDF, PPT, PPTX, KEY, ODT, MP3, MPEG, MOV, MP4, and (iii) be in English or French, and in general terms must comply with these Rules.
The Deliverable includes exclusive contributions from an Individual Participant.
The content of the Deliverable does not and will not infringe or violate any rights of any third party or entity, including, without limitation, intellectual property rights, privacy, competition law, confidentiality, or any contractual or extracontractual right. All deliverables suspected of any law(s) and/or any third party’s right will be ineligible.
Participants are responsible for and shall bear any costs or expenses associated with preparing and submitting Deliverables. Participants assume all risk for damaged, lost, late, incomplete, invalid, incorrect or misdirected Deliverables.
Round One: Deliverables are accepted from March 4, 2019, 12:00 pm, to April 15, 2019, 11:59 pm. At the end of this stage, a maximum of 11 participants will be selected and will access the second stage.
Round Two: This stage will take place from 22 to 23 April 2019. Participants will need to have a Skype interview with the challenge organizers to clarify the design of their algorithm. At the end of this stage, a maximum of 5 participants will be selected and will proceed to the third stage.
Round Three: Deliverables are accepted from April 24, 2019, 12:00 pm, to May 20, 2019, 11:59 pm. At the end of this stage, the 5 selected participants will reach the final.
The Final: Participants are not required to post a Deliverable on the Challenge Site for this final step.
Deliverable. If a Participant does not upload a Deliverable on the Challenge website before the deadline, this will be considered as a withdrawal from the Challenge. The Participant may not join the Challenge, and may not obtain any compensation from the Sponsor.
Notification to Participants. Once selection has been completed, Participants will be notified by email, phone, or by other means of the results of each Round. The Sponsor will be free to alter the dates on which results are released should this become necessary due to the number of projects to be assessed. The ranking decided by the final jury will be announced on the day that presentations are made and will identify the Participants that are finalists.
Downloading the learning database consisting of Opta F24 files.
Submitting an algorithm in Python or R programming language applicable to this test database. The submitted folder must contain 3 files (in Python: main_psgx.py; install_psgx.py; readme.text OR in R: main_psgx.r; install_psgx.r; readme.text).
The Algorithm Vote will take place from April 15, 2019, 12:00 to April 17, 2019, 11:59 pm.
Performance: Candidates will be required to create an algorithm to optimize athletic performance based on a set of data provided. The most efficient algorithms will be selected for the next step.
Except as provided below, the 11 Participants who received the highest scores at the end of the voting stage win this vote and proceed to the next stage of selection.
Participants will be informed of the results of the first selection stage on 18 April 2019.
The Criteria Vote will take place on April 23, 2019.
Relevance: to what extent does the Deliverable meet the specific need expressed in the Brief?
Clarity: Is the proposed solution and explanation of its design well-articulated?
Except as provided below, the 5 Participants who received the highest scores at the end of the voting stage win this vote and proceed to the next stage of selection.
Participants will be informed of the results of the second selection stage on 24 April 2019.
All Deliverables of this second stage must have been received by the Organizing Companies before or on May 20, 2019, 11:59pm. Deliverables must meet the specifications defined in section 6 above.
The 5 finalists selected at the end of the interview will be given a data set to test the good design of their algorithm. The finalists will have to put themselves in the shoes of an analyst from Paris Saint-Germain confronted with a problem. Participants will be able to be assisted in their project design.
The Participants selected to participate in this Final will be contacted directly by the Organising Companies. In the absence of confirmation of participation from them when the Organising Companies have tried to contact them by email and telephone, the latter reserve the right, at their sole discretion, to disqualify the Participants concerned. Such disqualification, if any, shall not give rise to any compensation from the Organizing Companies.
This final phase will consist of an oral presentation of the projects selected by the Participants. Participants will be asked to present the results obtained in the third phase on May 21, 2019, at 4:30 pm.
Performance: could the proposed algorithm best support the Paris Saint-Germain team and enable it to increase its sporting performance?
At this stage of the selection, Participants are not required to submit any Deliverables via the Challenge Website before the Final.
This final phase will consist of an oral presentation of the Final Deliverable by the Project Participants on May, 21, 2019.
A ranking of the 5 Projects participating in the final phase will be carried out on the basis of this oral presentation of the Fina Deliverable. An institutional film presenting the prize-giving ceremony of the Competition may be produced by the Organizers and the selected Participants undertake to make their best efforts to participate.
At the end of the Final, the jury will be in charge of electing the winning projects and will proceed to a ranking of the winners.
Finalists who are unable to travel to Paris to participate in the final will be disqualified.
The Participant acknowledges and consents that Sponsor may, at an international level, the duration of the Challenge and for a period of one (1) year following the Final Round using the Challenge for publicity, including for Sponsor’s advertising or other marketing purposes, (by any means and through any format (website, advertising banners, social networks, newsletter, press release) now known or unknow to date, free of charge or for a consideration.
In particular, you consent the use, by Sponsor, of your name and surname, your city and region of residence, the name of the institution in which you studied or in which you obtained your diplomas and other biographical information, your image, the information regarding the Prize (if you are a winner) and any other personal data that you submit with your Deliverables as well as the content of your Deliverables relating to the Challenge.
As an example, each Participant authorizes the Sponsor using the photographs taken during the Final to disseminate them via any communication medium.
Such use does not entitle the winner to any other payment than the Prize he/she received. The Sponsor agrees to cease using of the aforementioned elements in connection with the Participant at the end of the aforementioned period.
Sponsor has no obligation to keep the information contained in the Deliverables confidential. In general terms, when submitting any Deliverables, the Participants understand, agree and accept that any information contained therein may be publicly disclosed by the Sponsor.
If you are a prize winner, Sponsor may request that you execute a confidentiality/non-disclosure agreement for the purpose of entering into negotiations regarding the further development of your project, as described in your Deliverable. Such confidentiality/non-disclosure agreement will be directed to your Deliverable and all intellectual property that it may contain.
‘Creations’ refers to any software (including source and object code software), database, technical specifications, text, design, model, information, knowledge, method, process or product, as well as any resulting elements and/or processes likely or otherwise to be protected according to national and/or international laws or conventions on intellectual property developed by any Participant as a part of any submitted deliverable throughout the Challenge (“Creations”).
General provisions. The Participants acknowledge and accept that their participation in the Challenge does not imply any transfer of intellectual property to it by the Organizing Companies, which remain the sole owners of all their trademarks and distinctive signs. Consequently, the Participants refrain from reproducing in any way whatsoever the trademarks and distinctive signs of the Organizing Companies. The Participants shall in particular take care not to create confusion in the minds of the public as to the existence of a relationship as an employee but also as a "partner", "supplier", "sponsor" or "sponsor" of the Organising Companies. In the event of failure to comply with these provisions, the Organising Companies reserve the right to disqualify a Participant in the Challenge under the conditions provided for in Article 12 and to assert their rights in any way they deem appropriate.
By submitting a Deliverable and participating in this Challenge, you are not granting Sponsor any rights to any intellectual property supporting all of part of the Deliverable and Sponsor makes no claim to ownership of your Deliverable or any intellectual property that it may contain.
Challenge Cancellation or Suspension. The Sponsor reserves the right in its discretion, to (i) cancel, terminate, modify or suspend the Challenge and these Rules, for any reason, at any time and without any liability, and (ii) to limit or restrict participation in the Challenge. The Sponsor will not be held liable for the modification, cancellation or suspension of the Challenge and no compensation or remuneration will be due to the Participants.
Participants agree that company, its affiliates and all of their respective officers, directors, employees, contractors, representatives and agents (“released parties”) will have no liability whatsoever for, and will be released and held harmless by participants for any claims, liabilities, or causes of action of any kind or nature for any injury, loss or damages of any kind including direct, indirect, incidental, consequential or punitive damages to persons, including without limitation disability or death. Without limiting the foregoing, everything on the challenge website and in connection with the challenge is provided “as is” without warranty of any kind, either express or implied, including but not limited to, the implied warranties of merchantability, fitness for a particular purpose, and non-infringement. Some jurisdictions may not allow the limitations or exclusion of liability for incidental or consequential damages or exclusion of implied warranties, in which case such limitation or exclusion shall apply only to the extent permitted by the law in the relevant jurisdiction.
Participant’s personal data is subject to processing within the meaning of the regulations on the protection of personal data (The EU 2016/679 General Data Protection Regulation and the Council of 27 April 2016, known as GDPR) for which the Sponsor defines the purposes and means and is, as such, “Data controller” within the meaning of the GDPR.
In accordance with the provisions of the GDPR, the Sponsor undertakes to implement organizational and technical security measures in order to protect all Participant’s Personal Data. The Sponsor undertakes to allow the exercise of their rights from GDPR.
Any Participant’s claims arising from the Challenge, should be addressed no later than thirty (30) days following the Challenge end date to Mathieu LACOME ([email protected]). All claims must include: (i) the Participant’s complete contact details (name, address, email address and phone); (ii) the name of the Challenge; and (iii) a clear and detailed explanation for the claim.
The Challenge and any action related thereto shall be governed by the French applicable law.
In case of persistent disagreement on the application or interpretation of the Rules, and in the absence of an amicable settlement, any dispute that may arise between the parties, will be subject to the exclusive jurisdiction of the French courts to which the parties expressly grant jurisdiction, even in the case of multiple defendants, in summary proceedings, appeal by guarantee or by motion or any other jurisdiction.
(2) the Participant (the "Recipient").
In this Agreement the Disclosing Party and the Recipient are each individually referred to as a "Party" and together as the "Parties".
Information relating to the Disclosing Party, which is confidential in nature, may be disclosed to the Recipient by the Disclosing Party in connection with the Recipient’s participation in the Event (as defined below) and upon such disclosure the Disclosing Party wishes to ensure that the Recipient maintains the confidentiality of such information. In consideration of the benefits to the Parties from disclosing and receiving such confidential information, the Parties have agreed to comply with the following provisions in connection with the use and disclosure of such information.
"Affiliate" means in relation to a body corporate, any subsidiary, subsidiary undertaking or holding company of such body corporate, and any subsidiary or subsidiary undertaking of any such holding company for the time being as defined in section 1159 of the Companies Act 2006.
(d) the Parties agree in writing is not confidential and can be disclosed to third parties free from any obligations of confidentiality.
"Purpose" means participation at the Sports Analytics Challenge.
Clauses are to the Clauses of this Agreement.
1.2 Words in the singular shall include words in the plural and vice versa and a person includes a natural person, corporate or unincorporated body (whether or not having separate legal personality).
1.3 A reference to a statute or statutory provision is a reference to it as it is in force for the time being, taking account of any amendment, extension, or re-enactment, and includes any subordinate legislation for the time being in force made under it.
Confidential Information, save as strictly necessary for the achievement of the Purpose.
2.2 The Recipient may disclose Confidential Information only to the extent required by law, by any governmental or other regulatory authority or by a court or other authority of competent jurisdiction provided that, (i) to the extent it is legally permitted to do so, he/she gives the Disclosing Party as much notice of such disclosure as possible and, where notice of disclosure is not prohibited and is given in accordance with this Clause 2.2, he/she takes into account the reasonable requests of the Disclosing Party in relation to the content of such disclosure and (ii) such Confidential Information is disclosed only to the extent necessary.
and to the extent strictly necessary to permit the Recipient to keep evidence that he has performed his obligations under this Agreement. The provisions of this Agreement shall continue to apply to any such documents and materials retained by the Recipient, except that the Recipient shall not make any further use or disclosure of the Confidential Information.
3.2 If the Recipient develops or uses a product or a process which, in the reasonable opinion of the Disclosing Party, might have involved the use of any of the Confidential Information, the Recipient shall, at the written request of the Disclosing Party, supply to the Disclosing Party information reasonably necessary to establish that the Confidential Information has not been used or disclosed in order to develop or use that product or process.
3.3 The Recipient shall not make, or permit any person to make, any public announcement concerning this Agreement or the relationship between the Parties without the prior written consent of the Disclosing Party (such consent not to be unreasonably withheld or delayed), except as required by law or any governmental or regulatory authority or by any court or other authority of competent jurisdiction.
4.1 The Disclosing Party reserves all rights in the Confidential Information. The Recipient acknowledges and agrees that, as between the Disclosing Party and the Recipient, the Confidential Information and any intellectual property rights pertaining thereto, are and shall remain the property of the Disclosing Party or its Affiliates or commercial partners as the case may be. No rights in respect of the Confidential Information are granted to the Recipient and no obligations are imposed on the Disclosing Party other than those expressly stated in this Agreement. In particular, nothing in this Agreement shall be construed or implied as obliging the Disclosing Party to disclose any specific type of information under this Agreement, whether Confidential Information or not.
4.2 Any intellectual property rights arising from the Recipient’s projects, concepts and innovations during the Event, are and shall remain the property of the Recipient (providing such intellectual property rights do not contain any or all of the Disclosing Party’s Confidential Information).
4.3 In the event the intellectual property arising pursuant to clause 4.2 contains any or all of the Disclosing Party’s Confidential Information, such intellectual property rights are and shall remain (as between the Disclosing Party and the Recipient) the property of the Disclosing Party. If the Recipient wishes to pursue any commercial development in respect of this intellectual property (the “Commercial Pursuit”), the Recipient may request a license from the Disclosing Party (such consent to be in the sole discretion of the Disclosing Party) and the Disclosing Party shall have a right of first refusal to collaborate with the Recipient in respect of such Commercial Pursuit.
4.4 Except as expressly stated in this Agreement, the Disclosing Party does not make any express or implied warranty or representation concerning the Confidential Information, or the accuracy or completeness of the Confidential Information.
4.5 The disclosure of the Confidential Information by the Disclosing Party shall not form any offer by, or representation or warranty on the part of, the Disclosing Party to enter into any further agreement with the Recipient.
4.6 Without prejudice to any other rights or remedies which the Disclosing Party may have, the Recipient acknowledges and agrees that damages would not be an adequate remedy for any breach by him/her of the provisions of this Agreement and the Disclosing Party shall be entitled to seek the remedies of injunction, specific performance and other equitable relief for any threatened or actual breach of any such provisions by the Recipient and no proof of special damages shall be necessary for the enforcement by the Disclosing Party of its rights hereunder.
5.1 The obligations under this Agreement shall continue indefinitely unless and until the parties have entered into a binding agreement with each other to the contrary.
5.2 This Agreement constitutes the whole agreement between the Parties. Each Party acknowledges that, in entering into this Agreement, it has not relied on and shall have no right or remedy in respect of, any statement, representation, assurance or warranty (whether made negligently or innocently) other than as expressly set out in this Agreement. Nothing in this Clause shall limit or exclude any liability for fraud or for fraudulent misrepresentation.
5.3 The Parties may terminate, rescind or vary this Agreement without the consent of any person who is not a party to this Agreement. For the avoidance of doubt, any termination, rescission or variation of this Agreement is not subject to the consent of any Affiliate of the Disclosing Party. No variation of this Agreement shall be effective unless it is in writing and signed by each of the Parties, or their authorised representatives.
5.4 No failure or delay by the Disclosing Party in exercising any of its rights under this Agreement shall operate as a waiver thereof, nor shall any single or partial exercise preclude any other or further exercise of such rights.
5.5 Nothing in this Agreement is intended to, or shall be deemed to establish any partnership or joint venture between the Parties, constitute any Party the agent of the other Party, nor authorise any Party to make or enter into any commitments for or on behalf of the other Party.
5.6 This Agreement is made for the benefit of the Parties to it and is not intended to benefit, or be enforceable by, anyone else, with the exception of the Disclosing Party’s Affiliates from time to time, and each Affiliate may enforce this Agreement as if they were the Disclosing Party and a party to this Agreement.
5.7 This Agreement and any dispute claim or non-contractual obligation arising out of or in connection with this Agreement, shall be governed by, and construed in accordance with, English law. Each Party hereby submits to the exclusive jurisdiction of the courts of England and Wales in respect of any dispute, claim or matter arising out of or in connection with this Agreement.
|
"""Helpers for use with type annotation.
Use the empty classes in this module when annotating the types of Pyrsistent
objects, instead of using the actual collection class.
For example,
from pyrsistent import pvector
from pyrsistent.typing import PVector
myvector: PVector[str] = pvector(['a', 'b', 'c'])
"""
from __future__ import absolute_import
try:
from typing import Container
from typing import Hashable
from typing import Generic
from typing import Iterable
from typing import Mapping
from typing import Sequence
from typing import Sized
from typing import TypeVar
__all__ = [
'CheckedPMap',
'CheckedPSet',
'CheckedPVector',
'PBag',
'PDeque',
'PList',
'PMap',
'PSet',
'PVector',
]
T = TypeVar('T')
KT = TypeVar('KT')
VT = TypeVar('VT')
class CheckedPMap(Mapping[KT, VT], Hashable):
pass
# PSet.add and PSet.discard have different type signatures than that of Set.
class CheckedPSet(Generic[T], Hashable):
pass
class CheckedPVector(Sequence[T], Hashable):
pass
class PBag(Container[T], Iterable[T], Sized, Hashable):
pass
class PDeque(Sequence[T], Hashable):
pass
class PList(Sequence[T], Hashable):
pass
class PMap(Mapping[KT, VT], Hashable):
pass
# PSet.add and PSet.discard have different type signatures than that of Set.
class PSet(Generic[T], Hashable):
pass
class PVector(Sequence[T], Hashable):
pass
class PVectorEvolver(Generic[T]):
pass
class PMapEvolver(Generic[KT, VT]):
pass
class PSetEvolver(Generic[T]):
pass
except ImportError:
pass
|
The Persons detectives investigate all major felony crimes such as homicide, robbery, kidnapping, elder abuse, neglect, and exploitation. Detectives cover all aspects of an investigation. They must work closely with prosecutors, forensic investigators, lab personnel, and other law enforcement officials.
The Persons Unit has a Victim/Witness Advocate. The advocate works closely with victims, witnesses, and family members to keep them apprised of case development as well as assisting in meeting any special needs they may have. The Victim/Witness Advocate position ensures that the needs of the survivors and victims of violent crime are not overlooked.
Within the Persons Unit are four highly trained detectives that investigate crimes targeting the juvenile population of the unincorporated areas of Washoe County. These detectives investigate incidents such as sexual molestation, physical abuse, missing/abducted juveniles, runaway juveniles and other crimes that endanger juveniles.
These detectives work closely with other local law enforcement agencies, regional school systems, community services agencies, and national groups such as the National Center for Missing and Exploited Children in an effort to ensure the well being of our children.
|
#!/usr/bin/env python
"""
Copyright (c) 2014-2016 Miroslav Stampar (@stamparm)
See the file 'LICENSE' for copying permission
"""
import re
from core.common import retrieve_content
__url__ = "http://malwaredomains.lehigh.edu/files/domains.txt"
__check__ = "safebrowsing.clients.google.com"
__reference__ = "malwaredomains.com"
def fetch():
retval = {}
content = retrieve_content(__url__)
if __check__ in content:
for line in content.split('\n'):
line = line.strip('\r').replace('\xa0', "")
if not line or line.startswith('#'):
continue
items = line.split('\t')
if len(items) > 4:
info = items[3]
for _ in ("andromeda", "banjori", "banload", "bedep", "bhek", "bhek2", "blackvine", "browlock", "citadel", "corebot", "cridex", "cryptowall", "darkcomet", "dexter", "dircrypt", "dridex", "dyre", "fareit", "geinimi", "gh0st", "gorynych", "goz", "gozi", "gumblar", "hesperbot", "kaixin", "katrina", "kazy", "keitaro", "kelihos", "kins", "koobface", "kryptik", "matsnu", "napolar", "necurs", "neurevt", "njrat", "nymaim", "passwordstealer", "pkybot", "pony", "p0ny", "posmalware", "poweliks", "pushdo", "pykspa", "qakbot", "ramnit", "ranbyus", "rbn", "rovnix", "runforestrun", "russiandoll", "shiotob", "shylock", "simda", "soaksoak", "sofacy", "suppobox", "teslacrypt", "tinba", "vawtrak", "waledac", "yigido", "zemot", "zeus"):
if re.search(r"(?i)\b%s\b" % _, info):
info = "%s (malware)" % _
break
retval[items[2]] = (info.replace('_', ' '), __reference__)
return retval
|
It’s time to create a custom checker, but what kind?
Security Issues with Apple iOS?
What’s the Right Iteration Length?
Is Pure Agile Always an Option?
|
import inspect
import sys
import time
from django.conf import settings
from django.core import cache
from django.core.cache import get_cache as base_get_cache
from django.core.cache.backends.base import BaseCache
from django.dispatch import Signal
from django.template import Node
from django.utils.datastructures import SortedDict
from django.utils.translation import ugettext_lazy as _, ungettext
from debug_toolbar.panels import DebugPanel
from debug_toolbar.utils import (tidy_stacktrace, render_stacktrace,
get_template_info, get_stack)
cache_called = Signal(providing_args=["time_taken", "name", "return_value", "args", "kwargs", "trace"])
def send_signal(method):
def wrapped(self, *args, **kwargs):
t = time.time()
value = method(self, *args, **kwargs)
t = time.time() - t
enable_stacktraces = getattr(settings,
'DEBUG_TOOLBAR_CONFIG', {}).get('ENABLE_STACKTRACES', True)
if enable_stacktraces:
stacktrace = tidy_stacktrace(reversed(get_stack()))
else:
stacktrace = []
template_info = None
cur_frame = sys._getframe().f_back
try:
while cur_frame is not None:
if cur_frame.f_code.co_name == 'render':
node = cur_frame.f_locals['self']
if isinstance(node, Node):
template_info = get_template_info(node.source)
break
cur_frame = cur_frame.f_back
except:
pass
del cur_frame
cache_called.send(sender=self.__class__, time_taken=t,
name=method.__name__, return_value=value,
args=args, kwargs=kwargs, trace=stacktrace,
template_info=template_info, backend=self.cache)
return value
return wrapped
class CacheStatTracker(BaseCache):
"""A small class used to track cache calls."""
def __init__(self, cache):
self.cache = cache
def __repr__(self):
return u"<CacheStatTracker for %s>" % self.cache.__repr__()
def _get_func_info(self):
frame = sys._getframe(3)
info = inspect.getframeinfo(frame)
return (info[0], info[1], info[2], info[3])
def __contains__(self, key):
return self.cache.__contains__(key)
def make_key(self, *args, **kwargs):
return self.cache.make_key(*args, **kwargs)
def validate_key(self, *args, **kwargs):
self.cache.validate_key(*args, **kwargs)
def clear(self):
return self.cache.clear()
@send_signal
def add(self, *args, **kwargs):
return self.cache.add(*args, **kwargs)
@send_signal
def get(self, *args, **kwargs):
return self.cache.get(*args, **kwargs)
@send_signal
def set(self, *args, **kwargs):
return self.cache.set(*args, **kwargs)
@send_signal
def delete(self, *args, **kwargs):
return self.cache.delete(*args, **kwargs)
@send_signal
def has_key(self, *args, **kwargs):
return self.cache.has_key(*args, **kwargs)
@send_signal
def incr(self, *args, **kwargs):
return self.cache.incr(*args, **kwargs)
@send_signal
def decr(self, *args, **kwargs):
return self.cache.decr(*args, **kwargs)
@send_signal
def get_many(self, *args, **kwargs):
return self.cache.get_many(*args, **kwargs)
@send_signal
def set_many(self, *args, **kwargs):
self.cache.set_many(*args, **kwargs)
@send_signal
def delete_many(self, *args, **kwargs):
self.cache.delete_many(*args, **kwargs)
@send_signal
def incr_version(self, *args, **kwargs):
return self.cache.incr_version(*args, **kwargs)
@send_signal
def decr_version(self, *args, **kwargs):
return self.cache.decr_version(*args, **kwargs)
class CacheDebugPanel(DebugPanel):
"""
Panel that displays the cache statistics.
"""
name = 'Cache'
template = 'debug_toolbar/panels/cache.html'
has_content = True
def __init__(self, *args, **kwargs):
super(CacheDebugPanel, self).__init__(*args, **kwargs)
self.total_time = 0
self.hits = 0
self.misses = 0
self.calls = []
self.counts = SortedDict((
('add', 0),
('get', 0),
('set', 0),
('delete', 0),
('get_many', 0),
('set_many', 0),
('delete_many', 0),
('has_key', 0),
('incr', 0),
('decr', 0),
('incr_version', 0),
('decr_version', 0),
))
cache_called.connect(self._store_call_info)
def _store_call_info(self, sender, name=None, time_taken=0,
return_value=None, args=None, kwargs=None, trace=None,
template_info=None, backend=None, **kw):
if name == 'get':
if return_value is None:
self.misses += 1
else:
self.hits += 1
elif name == 'get_many':
for key, value in return_value.iteritems():
if value is None:
self.misses += 1
else:
self.hits += 1
self.total_time += time_taken * 1000
self.counts[name] += 1
self.calls.append({
'time': time_taken,
'name': name,
'args': args,
'kwargs': kwargs,
'trace': render_stacktrace(trace),
'template_info': template_info,
'backend': backend
})
def nav_title(self):
return _('Cache')
def nav_subtitle(self):
cache_calls = len(self.calls)
return ungettext('%(cache_calls)d call in %(time).2fms',
'%(cache_calls)d calls in %(time).2fms',
cache_calls) % {'cache_calls': cache_calls,
'time': self.total_time}
def title(self):
count = len(getattr(settings, 'CACHES', ['default']))
return ungettext('Cache calls from %(count)d backend',
'Cache calls from %(count)d backends',
count) % dict(count=count)
def url(self):
return ''
def process_response(self, request, response):
self.record_stats({
'total_calls': len(self.calls),
'calls': self.calls,
'total_time': self.total_time,
'hits': self.hits,
'misses': self.misses,
'counts': self.counts,
})
def get_cache_debug(*args, **kwargs):
base_cache = base_get_cache(*args, **kwargs)
return CacheStatTracker(base_cache)
cache.cache = CacheStatTracker(cache.cache)
cache.get_cache = get_cache_debug
|
Add £240 to this order to pay in easy instalments.
Sleek and stylish, this Kim Minchin Baby Roxy Disc Silver Bracelet. The chain is adjustable from 15.5 to 17.5cm. The bracelet is made from Sterling Silver.
|
#!/usr/bin/env python3
"""
Common routines for pulling useful examples of Quechua adjectives the
generation of which requires lexical choice.
Used by extractbibles and extractelicitation at least!
"""
import yaml
import l3
es_adj = set()
def load_spanish_adjectives():
spanish_adj_fn = "../l3xdg/languages/es/es_adj.yaml"
with open(spanish_adj_fn) as infile:
adjs = yaml.load(infile)
for entry in adjs:
if "word" in entry:
es_adj.add(entry["word"])
quechua_cache = {}
def quechuaMA(word):
"""Caching wrapper around the l3 Quechua morphological analyzer."""
if word not in quechua_cache:
quechua_cache[word] = l3.anal_word("qu", word, raw=True)
return quechua_cache[word]
def get_pos(word):
"""Given a word in Spanish, return its POS tag."""
pass
## try looking up the word in a dictionary.
## which dictionaries of Spanish do we have?
## runasimi, spanish wordnet, dicAML...
## how do we deal with morphology in Spanish? What if we're looking for
## "alta", but only "alto" is in the dictionary? Do stemming?
## can morphological analysis with AntiMorfo help?
## failing that, return "OOV".
|
Public Participation There will be 15 minutes set aside at the beginning of the meeting for the public to ask questions or make comments on items on the Agenda if it is required. Once the Council Meeting is in session there will be no further public participation.
3. Minutes of the Council Meeting held on 25th February 2019 to be approved and signed.
4.2 DM/19/0781 - Freckborough Manor Ditchling Road Ditchling Common Ditchling Burgess Hill East Sussex RH15 0SE Demolition of existing garage and flat and the provision of a newbuild four bedroom detached house with garage and the provision of a new garage and store for existing dwelling.
14. Correspondence The list to be circulated prior to the meeting.
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
##############################################################################
#
# Pedro Arroyo M <[email protected]>
# Copyright (C) 2015 Mall Connection(<http://www.mallconnection.org>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import osv
from osv import fields
class hr_fixed_allocation(osv.osv):
'''
Open ERP Model
'''
_name = 'hr.fixed.allocation'
_description = 'hr.fixed.allocation'
_columns = {
'name':fields.char('Description', size=64, required=True, readonly=False),
#'code':fields.char('Code', size=64, required=False, readonly=False),
#'type':fields.selection([
# ('collation','Collation'),
# ('mobilization','Mobilization'),
# ('cash_loss','Cash loss'),
# ('tool_wear','Tool wear')
# ('bonification','Bonification')
# ], 'Type'),
'amount': fields.float('Amount', digits=(3,2),required=True),
'allocation_type_id':fields.many2one('hr.fixed.allocation.type', 'Allocation type', required=True),
'contract_id':fields.many2one('hr.contract', 'Contract', required=False),
#'taxable':fields.boolean('Taxable', required=False),
}
hr_fixed_allocation()
class hr_fixed_allocation_type(osv.osv):
'''
Open ERP Model
'''
_name = 'hr.fixed.allocation.type'
_description = 'hr.fixed.allocation type'
_columns = {
'name':fields.char('Description', size=64, required=True, readonly=False),
'code':fields.char('Code', size=64, required=False, readonly=False),
'type':fields.selection([
('collation','Collation'),
('mobilization','Mobilization'),
('cash_loss','Cash loss'),
('tool_wear','Tool wear'),
('bonification','Bonification')
], 'Type'),
'taxable':fields.boolean('Taxable', required=False),
}
hr_fixed_allocation_type()
|
The Federal Court has ordered the CFMEU and a delegate to pay almost $100,000 in penalties for the coercion involved when he prevented a subcontractor's employee from working on a job because he wasn't a union member.
A court has found a delegate liable as an accessory for adverse action after he stood by and failed to correct the record when an organiser told workers they would be removed from a construction site if they refused to join the union.
A tribunal has ordered an employer to allow the CFMEU entry to a major freeway construction site to investigate suspected breaches of OHS laws amid claims of threats directed towards its "stressed and anxious" members.
An Australia Post employee who two decades ago won the support of then shadow IR minister John Howard in postal union elections has failed to win his job back after an FWC full bench rejected his appeal.
The Federal Court has today fined a Melbourne painting & decorating firm and its director almost $20,000 for texting workers and telling them they must be members of the CFMEU before starting on-site work.
Despite securing almost $2 million in penalties against non-compliant players in the construction industry over 12 months, the FWBC's director says that it is losing the fight to restore law and order on building sites.
A contracts manager and a team leader of a construction company that took adverse action against a subcontractor it refused to hire because its enterprise agreement wasn't endorsed by the CFMEU have been fined almost $2,000 each for the part they played in their employer's contraventions.
FWBC pursues CFMEU over alleged "closed shop practices"; Document request no "fishing expedition": FWC; WGEA challenges workplace gender stereotypes.
The FWC has knocked back an application for orders preventing three union officials entering the Ichthys LNG project, as well as the organisation of combined union meetings on site.
|
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.rst')).read()
CHANGES = open(os.path.join(here, 'CHANGES.rst')).read()
requires = [
'colander',
'pyramid',
'pyramid_debugtoolbar',
'WebHelpers',
]
setup(name='formhelpers2',
version='0.0',
description='formhelpers2',
long_description=README + '\n\n' + CHANGES,
classifiers=[
"Programming Language :: Python",
"Framework :: Pylons",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
],
author='',
author_email='',
url='',
keywords='web pyramid pylons',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=requires,
tests_require=requires,
test_suite="formhelpers2",
entry_points = """\
[paste.app_factory]
main = formhelpers2:main
""",
paster_plugins=['pyramid'],
)
|
Jones: Mr. Cole did you find him?
Cole: Yeah I found him.
Jones: Then you failed your mission. You were to eliminate him before he could release the plague.
Cole: Leland's dead. I killed him just like you wanted me to. You were wrong killing him didn't change anything. There were others. There were always others.
Jones: Others? Others who? Mr. Cole... who?
Cole: The Army of the 12 Monkeys.
Permalink: Leland's dead. I killed him just like you wanted me to.
Cassie: I said your name.
Cole: You gave me this mission. You're the reason I'm here.
Permalink: You gave me this mission. You're the reason I'm here.
Permalink: It's not an exact science.
|
import math
import model
import cython_verlet
import threading
import numpy
import matplotlib
import matplotlib.pyplot as plot
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg
def pure_python_verlet(particles):
G = 6.67 * (10 ** -11)
for p in particles:
new_a = sum(map(lambda e:
G * e.mass * (e.position['x'] - p.position['x'])
/ (math.sqrt(
(e.position['x'] - p.position['x'])**2 +
(e.position['y'] - p.position['y'])**2
)) , filter(lambda e: not e is p, particles)))
new_b = sum(map(lambda e:
G * e.mass * (e.position['y'] - p.position['y'])
/ (math.sqrt(
(e.position['x'] - p.position['x'])**2 +
(e.position['y'] - p.position['y'])**2
)), filter(lambda e: not e is p, particles)))
if p.time > 0:
p.position['x'] += p.velocity['u'] + 0.5 * p.acceleration['a']
p.position['y'] += p.velocity['v'] + 0.5 * p.acceleration['b']
p.time += 1
p.velocity['u'] += 0.5 * (new_a + p.acceleration['a'])
p.velocity['v'] += 0.5 * (new_b + p.acceleration['b'])
p.acceleration['a'] = new_a
p.acceleration['b'] = new_b
return [p.position for p in particles]
def verlet_worker(particles, begin, end):
G = 6.67 * (10 ** -11)
for p in particles[begin : end]:
new_a = sum(map(lambda e:
G * e.mass * (e.position['x'] - p.position['x'])
/ (math.sqrt(
(e.position['x'] - p.position['x'])**2 +
(e.position['y'] - p.position['y'])**2
)) , filter(lambda e: not e is p, particles)))
new_b = sum(map(lambda e:
G * e.mass * (e.position['y'] - p.position['y'])
/ (math.sqrt(
(e.position['x'] - p.position['x'])**2 +
(e.position['y'] - p.position['y'])**2
)), filter(lambda e: not e is p, particles)))
if p.time > 0:
p.position['x'] += p.velocity['u'] + 0.5 * p.acceleration['a']
p.position['y'] += p.velocity['v'] + 0.5 * p.acceleration['b']
p.time += 1
p.velocity['u'] += 0.5 * (new_a + p.acceleration['a'])
p.velocity['v'] += 0.5 * (new_b + p.acceleration['b'])
p.acceleration['a'] = new_a
p.acceleration['b'] = new_b
def multiprocess_verlet(particles):
jobs = []
for i in range(len(particles)):
job = threading.Thread(target = verlet_worker, args = (particles, i, i + 1))
job.start()
jobs.append(job)
for j in jobs:
j.join()
return [p.position for p in particles]
class ParticlePlot(FigureCanvasQTAgg):
def __init__(self, parent, width, height, dpi, size_policy):
figure = matplotlib.figure.Figure(figsize = (width, height), dpi = dpi,
facecolor = 'white')
self.axes = figure.add_axes([0.005,0.005,0.990,0.990], frameon=True, aspect=1)
FigureCanvasQTAgg.__init__(self, figure)
self.setParent(parent)
FigureCanvasQTAgg.setSizePolicy(self, size_policy, size_policy)
FigureCanvasQTAgg.updateGeometry(self)
self.figure.canvas.draw()
def update_plot(self, particles, updater):
self.axes.cla()
self.axes.set_xlim(-45, 45), self.axes.set_xticks([])
self.axes.set_ylim(-25, 25), self.axes.set_yticks([])
data = updater(particles)
mass_data = [ p.mass / 1000 for p in particles ]
color_data = [ p.color for p in particles ]
x_data = [ p['x'] for p in data ]
y_data = [ p['y'] for p in data ]
self.scatter = self.axes.scatter(x_data, y_data, s = mass_data, lw = 0.5,
c = color_data)
self.figure.canvas.draw()
class ParticleController:
defaults = {
'mass': 1250 * 1000,
'lifetime': 4,
'velocity': { 'u': 5, 'v': 7 },
'position': { 'x': 0, 'y': 0 },
'color': (0, 1, 0),
'method': 0
}
def __init__(self):
self.__mass = __class__.defaults['mass']
self.__lifetime = __class__.defaults['lifetime']
self.__velocity = __class__.defaults['velocity']
self.__position = __class__.defaults['position']
self.__color = __class__.defaults['color'];
self.method = __class__.defaults['method']
self.particles = []
self.updaters = [
pure_python_verlet,
cython_verlet.cython_verlet,
multiprocess_verlet,
]
self.methods = [
"Pure Python Verlet algorithm implementation",
"Cython Verlet algorithm implementation",
"Multiprocess Verlet algorithm implementation",
]
def __add_particle(self):
self.particles.append(model.WildParticle(
self.position['x'], self.position['y'],
self.velocity['u'], self.velocity['v'],
self.mass, self.color, self.lifetime
) )
@property
def position(self):
return self.__position
@position.setter
def position(self, value):
self.__position = value
@property
def velocity(self):
return self.__velocity
@velocity.setter
def velocity(self, value):
self.__velocity = value
@property
def acceleration(self):
return self.__acceleration
@acceleration.setter
def acceleration(self, value):
self.__acceleration = value
@property
def mass(self):
return self.__mass
@mass.setter
def mass(self, value):
self.__mass = value
@property
def lifetime(self):
return self.__lifetime
@lifetime.setter
def lifetime(self, value):
self.__lifetime = value
@property
def color(self):
return self.__color
@color.setter
def color_set(self, value):
self.__color = value
|
It's going to be a pleasant day with mostly sunny skies and highs hovering near 50.
Winds will be delightfully calm out of the west at 10 mph.
Tonight expect clear skies and lows of 32.
|
# Copyright (c) 2014 by Ecreall under licence AGPL terms
# available on http://www.gnu.org/licenses/agpl.html
# licence: AGPL
# author: Amen Souissi
import math
from pyramid.view import view_config
from dace.objectofcollaboration.principal.util import get_current
from dace.processinstance.core import DEFAULTMAPPING_ACTIONS_VIEWS
from pontus.view import BasicView
from lac.views.filter import find_entities
from lac.content.interface import ISiteFolder
from lac.content.processes.admin_process.behaviors import (
SeeSiteFolders)
from lac.content.lac_application import (
CreationCulturelleApplication)
from lac import _
CONTENTS_MESSAGES = {
'0': _(u"""No element found"""),
'1': _(u"""One element found"""),
'*': _(u"""${nember} elements found""")
}
@view_config(
name='seesitefolders',
context=CreationCulturelleApplication,
renderer='pontus:templates/views_templates/grid.pt',
)
class SeeSiteFoldersView(BasicView):
title = ''
name = 'seesitefolders'
behaviors = [SeeSiteFolders]
template = 'lac:views/admin_process/templates/see_sitefolders.pt'
viewid = 'seesitefolders'
def update(self):
self.execute(None)
# root = getSite()
# folders = root.site_folders
folders = find_entities(
user=get_current(),
interfaces=[ISiteFolder],
sort_on='modified_at', reverse=True)
result = {}
len_result = len(folders)
index = str(len_result)
if len_result > 1:
index = '*'
self.title = _(CONTENTS_MESSAGES[index],
mapping={'nember': len_result})
values = {'folders': list(folders),
'row_len': math.ceil(len_result/6)}
body = self.content(args=values, template=self.template)['body']
item = self.adapt_item(body, self.viewid)
result['coordinates'] = {self.coordinates: [item]}
return result
DEFAULTMAPPING_ACTIONS_VIEWS.update({SeeSiteFolders: SeeSiteFoldersView})
|
What is the airport code for Halim Perdanakusuma International Airport?
What is the ICAO code for Halim Perdanakusuma International Airport?
What is the IATA code for Halim Perdanakusuma International Airport?
|
from django.db import models
from django.core.exceptions import ValidationError
from mozdns.validation import validate_ip_type
from mozdns.ip.models import ipv6_to_longs
from core.utils import IPFilter, one_to_two, to_a
from core.vlan.models import Vlan
from core.site.models import Site
from core.mixins import ObjectUrlMixin
from core.keyvalue.base_option import CommonOption
import ipaddr
class Network(models.Model, ObjectUrlMixin):
id = models.AutoField(primary_key=True)
vlan = models.ForeignKey(Vlan, null=True,
blank=True, on_delete=models.SET_NULL)
site = models.ForeignKey(Site, null=True,
blank=True, on_delete=models.SET_NULL)
# NETWORK/NETMASK FIELDS
IP_TYPE_CHOICES = (('4', 'ipv4'), ('6', 'ipv6'))
ip_type = models.CharField(max_length=1, choices=IP_TYPE_CHOICES,
editable=True, validators=[validate_ip_type])
ip_upper = models.BigIntegerField(null=False, blank=True)
ip_lower = models.BigIntegerField(null=False, blank=True)
# This field is here so ES can search this model easier.
network_str = models.CharField(max_length=49, editable=True,
help_text="The network address of this "
"network.")
prefixlen = models.PositiveIntegerField(null=False,
help_text="The number of binary "
"1's in the netmask.")
dhcpd_raw_include = models.TextField(
null=True, blank=True, help_text="The config options in this box "
"will be included *as is* in the dhcpd.conf file for this "
"subnet."
)
network = None
def details(self):
details = [
('Network', self.network_str),
]
if self.vlan:
details.append(
('Vlan',
to_a("{0}:{1}".format(self.vlan.name, self.vlan.number),
self.vlan)))
if self.site:
details.append(('Site', to_a(self.site.full_name, self.site)))
return details
class Meta:
db_table = 'network'
unique_together = ('ip_upper', 'ip_lower', 'prefixlen')
def delete(self, *args, **kwargs):
if self.range_set.all().exists():
raise ValidationError("Cannot delete this network because it has "
"child ranges")
super(Network, self).delete(*args, **kwargs)
def save(self, *args, **kwargs):
self.clean()
super(Network, self).save(*args, **kwargs)
def clean(self):
self.update_network()
# Look at all ranges that claim to be in this subnet, are they actually
# in the subnet?
for range_ in self.range_set.all():
"""
I was writing checks to make sure that subnets wouldn't orphan
ranges. IPv6 needs support.
"""
fail = False
# Check the start addresses.
if range_.start_upper < self.ip_upper:
fail = True
elif (range_.start_upper > self.ip_upper and range_.start_lower <
self.ip_lower):
fail = True
elif (range_.start_upper == self.ip_upper and range_.start_lower
< self.ip_lower):
fail = True
if self.ip_type == '4':
brdcst_upper, brdcst_lower = 0, int(self.network.broadcast)
else:
brdcst_upper, brdcst_lower = ipv6_to_longs(str(
self.network.broadcast))
# Check the end addresses.
if range_.end_upper > brdcst_upper:
fail = True
elif (range_.end_upper < brdcst_upper and range_.end_lower >
brdcst_lower):
fail = True
elif (range_.end_upper == brdcst_upper and range_.end_lower
> brdcst_lower):
fail = True
if fail:
raise ValidationError("Resizing this subnet to the requested "
"network prefix would orphan existing "
"ranges.")
def update_ipf(self):
"""Update the IP filter. Used for compiling search queries and firewall
rules."""
self.update_network()
self.ipf = IPFilter(self.network.network, self.network.broadcast,
self.ip_type, object_=self)
def update_network(self):
"""This function will look at the value of network_str to update other
fields in the network object. This function will also set the 'network'
attribute to either an ipaddr.IPv4Network or ipaddr.IPv6Network object.
"""
if not isinstance(self.network_str, basestring):
raise ValidationError("ERROR: No network str.")
try:
if self.ip_type == '4':
self.network = ipaddr.IPv4Network(self.network_str)
elif self.ip_type == '6':
self.network = ipaddr.IPv6Network(self.network_str)
else:
raise ValidationError("Could not determine IP type of network"
" %s" % (self.network_str))
except (ipaddr.AddressValueError, ipaddr.NetmaskValueError):
raise ValidationError("Invalid network for ip type of "
"'{0}'.".format(self, self.ip_type))
# Update fields
self.ip_upper, self.ip_lower = one_to_two(int(self.network))
self.prefixlen = self.network.prefixlen
def __str__(self):
return self.network_str
def __repr__(self):
return "<Network {0}>".format(str(self))
class NetworkKeyValue(CommonOption):
obj = models.ForeignKey(Network, related_name='keyvalue_set', null=False)
aux_attrs = (
('description', 'A description of the site'),
)
class Meta:
db_table = 'network_key_value'
unique_together = ('key', 'value', 'obj')
"""The NetworkOption Class.
"DHCP option statements always start with the option keyword, followed
by an option name, followed by option data." -- The man page for
dhcpd-options
In this class, options are stored without the 'option' keyword. If it
is an option, is option should be set.
"""
def save(self, *args, **kwargs):
self.clean()
super(NetworkKeyValue, self).save(*args, **kwargs)
def _aa_description(self):
"""
A descrition of this network
"""
pass
def _aa_filename(self):
"""
filename filename;
The filename statement can be used to specify the name of the
initial boot file which is to be loaded by a client. The filename
should be a filename recognizable to whatever file transfer
protocol the client can be expected to use to load the file.
"""
self.is_statement = True
self.is_option = False
self.has_validator = True
# Anything else?
def _aa_next_server(self):
"""
The next-server statement
next-server server-name;
The next-server statement is used to specify the host address
of the server from which the initial boot file (specified in
the filename statement) is to be loaded. Server-name should be
a numeric IP address or a domain name. If no next-server
parameter applies to a given client, the DHCP server's IP
address is used.
"""
self.has_validator = True
self.is_statement = True
self.is_option = False
self._single_ip(self.obj.ip_type)
def _aa_dns_servers(self):
"""
A list of DNS servers for this network.
"""
self.is_statement = False
self.is_option = False
self._ip_list(self.obj.ip_type)
def _aa_routers(self):
self._routers(self.obj.ip_type)
def _aa_ntp_servers(self):
self._ntp_servers(self.obj.ip_type)
|
Grand 1920 Federation Hotel in WA's most beautiful country town!
The Bridgetown Hotel is a Grand 1920 Federation Style Country Hotel in arguably WA's most beautiful country town. The hotel encompasses 8 spa suits accessed directly from the hotels first floor balcony, restaurant, cafe and bar on our ground floor that are open for lunch and dinner every day, outdoor garden deck and boutique bottleshop within a short stroll of town shops, galleries, breath taking scenery and the magnificent Blackwood River.
|
"""Image manipulation tools."""
__all__ = [
'ImageError',
'Unsupported',
'ImageFormat',
'detect_format',
'resize',
'resize_unsafe',
]
import enum
import os
import tempfile
from . import _imagetools
from ._imagetools import ImageError
class Unsupported(ImageError):
pass
@enum.unique
class ImageFormat(enum.Enum):
UNKNOWN = _imagetools.FORMAT_UNKNOWN
GIF = _imagetools.FORMAT_GIF
JPEG = _imagetools.FORMAT_JPEG
PNG = _imagetools.FORMAT_PNG
def detect_format(image):
"""Detect image format."""
return ImageFormat(_imagetools.detect_format(image))
def resize(image, desired_width, output_path):
"""Resize an image to the desired_width.
It writes to a temporary file while processing, and so it does not
clobber output file on error. If clobbering output is not an issue,
you may use resize_unsafe, which is faster.
"""
tmp_path = None
try:
fd, tmp_path = tempfile.mkstemp()
os.close(fd) # Close fd immediately (don't leak it!).
dimension = resize_unsafe(image, desired_width, tmp_path)
os.rename(tmp_path, output_path)
tmp_path = None
finally:
if tmp_path is not None:
os.remove(tmp_path)
return dimension
def resize_unsafe(image, desired_width, output_path):
"""Unsafe version of resize."""
image_format = detect_format(image)
if image_format is ImageFormat.JPEG:
return _imagetools.resize_jpeg(image, desired_width, output_path)
else:
raise Unsupported
|
PIAA Xtreme White Plus A8 D2 at ModBargains.com. Get Expert Help!
Stock light output not strong enough for you? Looking for that brighter, whiter look? Up grade your stock B5 A4 bulbs with PIAA's very popular Xtreme White Plus technology bulb. Rated at 4000K and providing 110watts worth of light from only 55watts these bulbs will provide a brilliant cool white light that will light up road hazards and street signs long before a normal halogen bulb, gaining you a more classy look and better road safety.
Star White bulbs are significantly whiter and brighter than normal halogen bulbs. The Star White bulbs operate at 3800K while normal halogen bulbs operate at 3200K. Star White bulbs feature PIAA’s exclusive XTRA Technology, which increases bulb performance without jacking your car's electrical system. PIAA Star White Bulbs can be used to upgrade performance in headlamps, fog lights, driving lamps and many other applications, throughout the vehicle.
Stock foglight output not strong enough for you? Looking for a brighter, more vivid look? Upgrade your stock foglight bulbs with PIAA's very popular Xtreme White Plus technology bulb. Rated at 4000K and providing 110watts worth of light from only 55watts these bulbs will provide a brilliant cool white light that will light up road hazards long before a normal halogen bulb, giving you a more classy look and better road safety.
Star White bulbs are significantly whiter and brighter than normal halogen bulbs. The Star White bulbs operate at 3800K while normal halogen bulbs operate at 3200K. Star White bulbs feature PIAA’s exclusive XTRA Technology, which increases bulb performance without using more of your car's electrical system. PIAA Star White Bulbs can be used to upgrade performance in your fog lights.
PIAA lamps are intended for use solely as auxiliary lighting. Lighting laws vary from state to state. PIAA makes no representation or warranty, either expressed or implied, as to the legality of its products for street use on any vehicle or in any location. PIAA lamps are designed to improve visibility during night time motoring and inclement weather conditions. However, irresponsible use of any auxiliary light can be dangerous and illegal.
How do I mount this light on my vehicle?
All PIAA lamps come complete with installation instructions. If you've lost your installation instructions, simply e-mail Customer Service for a replacement set.
1) The lens may be upside down. The PIAA name must read right side up so the vent on the bottom can drain.
2) The lens/reflector unit may not be mounted tight against the lamp housing. Loosen, then re-tighten the screws.
3) The gasket may be twisted or pinched. Some gaskets have a vent gap.Make sure it's on the bottom.
4) Lamps with a "Snorkel" type wiring grommet in the back of the housing must have the opening facing down.
5) 9O Pro lamps have a vent on the back of the housing in the center. The opening must be facing down. Water leaks through an obvious gap in the sealant or from a loose lens are defects covered under warranty.
Can I mount my lamps upside down?
PIAA lamps are designed to function properly mounted upright or upside down, as long as a few details are taken into consideration. When mounting your PIAA lamps opposite of their intended installation, remember that lamps with a "Snorkel" type wiring grommet in the back of the housing must have the opening facing down. Also, the PIAA name must read right side up so the vent on the bottom can drain. 9O Pro lamps have a vent on the back of the housing in the center. The opening must be facing down. Proper auxiliary lamp aiming is the most important part of an installation. Refer to the Light Aiming Instructions on this web site for proper alignment.
What is the difference between a fog light & a driving light?
Fog lights provide illumination forward of the vehicle that enhances visibility in fog, rain, snow, or dust. Fog lights are intended to be used in conjunction with standard headlamp low beams. Driving lights are mounted to provide illumination forward of the vehicle and intended to supplement the high beam of a standard headlamp system. It is not intended for use alone or with the low beam of a standard headlamp system.
If my product is covered by warranty how do I get it processed?
If a PIAA product is suspected of being defective, it must be submitted freight prepaid, to either an authorized dealer or distributor for warranty inspection. The receipt or other proof of purchase and a description of the problem must be included. The returned product will be inspected. If the product is found to be defective and covered by this Limited Warranty, the sole remedy is repair or replacement, at PIAA's option. A repaired or replacement product will be shipped back at no charge, and will be warranted to be free from defects in workmanship and materials under normal use for as long as the original purchaser owns it. Removal, installation, or reinstallation costs are not covered by this Limited Warranty. Read the Warranty page on the web site for additional information regarding your warranty.
PIAA Bulbs are DOT/SAE compliant and are street legal in all 50 states and Canada.
This is the third bulb to blow out in four months; why does this keep happening?
Bulb failures caused by voltage surges have become more common as new vehicles have placed greater demands on factory electrical systems. The main lamp ground wire must be connected to the battery negative terminal or to a cable leading to it. If a voltage surge occurs, the bulb will look like it was struck by lightning! The metal base & glass will be burnt or the filament will blow up!
This Limited Warranty specifically excludes defects resulting from misuse, abuse, neglect, alteration, modification, improper installation, unauthorized repairs, submersion, theft, vehicle crash, or by any other type of impact. Except for the Limited Warranty stated above, there are no warranties of PIAA products or any part thereof, whether express or implied. Any implied warranty of merchantability or any warranty of fitness for a particular purpose is expressly disclaimed. Some states do not permit the disclaimer of implied warranties.
In no event shall PIAA be liable for any damages whatsoever (including, without limitation, consequential damages, incidental damages, or damages for loss of use, loss of business profits, business interruption, loss of business information, loss of time, inconvenience, or other losses) arising out of the use, misuse, or inability to use a PIAA product. PIAA reserves the right to change the design its products without any obligation to modify any previous product. This warranty gives you specific legal rights. You may also have other rights that vary from state to state.
If a PIAA product is suspected of being defective, it must be submitted freight prepaid, to either an authorized dealer or distributor for warranty inspection. The receipt or other proof of purchase and a description of the problem must be included. The returned product will be inspected. If the product is found to be defective and covered by this Limited Warranty, the sole remedy is repair or replacement, at PIAA's option. A repaired or replacement product will be shipped back at no charge, and will be warranted to be free from defects in workmanship and materials under normal use for as long as the original purchaser owns it. Removal, installation, or reinstallation costs are not covered by this Limited Warranty.
|
#!/usr/bin/env python
# MINI PYGOL - Game of Life - Nico Nabholz ([email protected])
gen = 1
cell = {}
import sys, os, random, time
os.system('clear')
rows, columns = os.popen('stty size', 'r').read().split()
y = int(rows) - 2
x = int(columns)
while 1:
print chr(27) + "[H" + "Game Of Life (Generation: %d)" % gen
for r in range(y):
for c in range(x):
if (r * c == 0 or r == y-1 or c == x-1): cell[c, r, gen] = 0
elif gen == 1: cell[c, r, gen] = int(round(random.randrange(0,4,1)/3))
else:
homies = (cell[c-1, r-1, gen-1] + cell[c, r-1, gen-1] + cell[c+1, r-1, gen-1] +
cell[c-1, r, gen-1] + cell[c+1, r, gen-1] +
cell[c-1, r+1, gen-1] + cell[c, r+1, gen-1] + cell[c+1, r+1, gen-1])
if (cell[c, r, gen-1] == 0 and homies == 3): cell[c, r, gen] = 1
elif (cell[c, r, gen-1] == 1 and (homies == 2 or homies == 3)): cell[c, r, gen] = 1
else: cell[c, r, gen] = 0
if cell[c, r, gen] == 1: sys.stdout.write('O')
else: sys.stdout.write(' ')
print
time.sleep(.1)
gen += 1
|
Prescription glasses Diesel DL5271 067 are part of the latest Diesel collection carefully crafted for men. This elegant full-rim model reflects the latest trends in contemporary designer eyewear and its wayfarer shape makes Diesel DL5271 the perfect choice especially for round, oval and heart-shaped faces.
Buy your Diesel DL5271 067 on eyerim right now and it will be delivered free of charge directly to your door in its original protective packaging.
|
from rtorrent.rpc import RPCObject, BaseMulticallBuilder
from rtorrent.rpc.processors import *
class File(RPCObject):
def __init__(self, context, info_hash, index):
super().__init__(context)
self.rpc_id = "{0}:f{1}".format(info_hash, index)
def rpc_call(self, key, *args):
call = super().rpc_call(key, *args)
call.get_args().insert(0, self.rpc_id)
return call
class FileMetadata(object):
def __init__(self, results: dict):
self.results = results
def __getattr__(self, item):
return lambda: self.results[item]
class FileMulticallBuilder(BaseMulticallBuilder):
__metadata_cls__ = FileMetadata
__rpc_object_cls__ = File
__multicall_rpc_method__ = 'f.multicall'
def __init__(self, context, torrent):
super().__init__(context)
self.args.extend([torrent.get_info_hash(), ''])
_VALID_FILE_PRIORITIES = ['off', 'normal', 'high']
File.register_rpc_method('get_size_bytes',
['f.get_size_bytes', 'f.size_bytes'])
File.register_rpc_method('get_size_chunks',
['f.get_size_chunks', 'f.size_chunks'])
File.register_rpc_method('get_path', ['f.get_path', 'f.path'])
File.register_rpc_method('get_priority', ['f.get_priority', 'f.priority'],
post_processors=[lambda x:
_VALID_FILE_PRIORITIES[x]])
File.register_rpc_method('set_priority', ['f.set_priority', 'f.priority.set'],
pre_processors=[valmap(_VALID_FILE_PRIORITIES,
range(0, 3), 1)],
post_processors=[check_success])
File.register_rpc_method('get_completed_chunks', 'f.completed_chunks')
File.register_rpc_method('get_frozen_path', 'f.frozen_path')
File.register_rpc_method('get_last_touched',
['f.get_last_touched', 'f.last_touched'],
post_processors=[to_datetime])
File.register_rpc_method('get_offset', ['f.get_offset', 'f.offset'])
File.register_rpc_method('get_path_components',
['f.get_path_components', 'f.path_components'])
File.register_rpc_method('get_path_depth',
['f.get_path_depth', 'f.path_depth'])
File.register_rpc_method('get_range_first',
['f.get_range_first', 'f.range_first'])
File.register_rpc_method('get_range_second',
['f.get_range_second', 'f.range_second'])
File.register_rpc_method('is_create_queued', 'f.is_create_queued',
boolean=True)
File.register_rpc_method('is_open', 'f.is_open',
boolean=True)
File.register_rpc_method('get_completed_chunks',
['f.get_completed_chunks', 'f.completed_chunks'])
File.register_rpc_method('get_match_depth_next',
['f.get_match_depth_next', 'f.match_depth_next'])
File.register_rpc_method('get_match_depth_prev',
['f.get_match_depth_prev', 'f.match_depth_prev'])
File.register_rpc_method('is_prioritized_first', 'f.prioritize_first',
boolean=True)
File.register_rpc_method('enable_prioritize_first',
'f.prioritize_first.enable',
post_processor=[check_success])
File.register_rpc_method('disable_prioritize_first',
'f.prioritize_first.disable',
post_processor=[check_success])
File.register_rpc_method('is_prioritized_last', 'f.prioritize_last',
boolean=True)
File.register_rpc_method('enable_prioritize_last',
'f.prioritize_last.enable',
post_processor=[check_success])
File.register_rpc_method('disable_prioritize_last',
'f.prioritize_last.disable',
post_processor=[check_success])
File.register_rpc_method('is_created', 'f.is_created',
boolean=True)
|
These statistics are generated based on the current listing's property type and located in Skyview Ranch. Average values are derived using median calculations.
Welcome to Aurora at Sky Pointe. Executive TOWNHOME with NO CONDO FEES having a DOUBLE DETACHED GARAGE, landscaped FENCED YARD and QUALITY FINISHINGS inside and out. Located in an ultra-convenient location just steps from amenities and future C-train station and only minutes from the airport with easy access of major roadways Stoney Trail and Deerfoot nearby. Open concept main floor boasts front great room with huge picture window. Rear kitchen showcases stainless steels appliances, tiled backsplash, large island with eating bar and quartz countertops. Adjacent mudroom has rear door leading out to the west facing private landscaped yard with double detached garage beyond. Upper level showcases three bedrooms with master bedroom having an ensuite bathroom and walk-in closet. Laundry is conveniently located on the upper floor. Unspoiled basement is ready for your future choice development. An amazing townhome in a fantastic location, be the first to call it home!
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for Apple System Log file parser."""
import unittest
from plaso.formatters import asl # pylint: disable=unused-import
from plaso.lib import timelib
from plaso.parsers import asl
from tests import test_lib as shared_test_lib
from tests.parsers import test_lib
class ASLParserTest(test_lib.ParserTestCase):
"""Tests for Apple System Log file parser."""
@shared_test_lib.skipUnlessHasTestFile([u'applesystemlog.asl'])
def testParse(self):
"""Tests the Parse function."""
parser_object = asl.ASLParser()
storage_writer = self._ParseFile(
[u'applesystemlog.asl'], parser_object)
self.assertEqual(len(storage_writer.events), 2)
event_object = storage_writer.events[0]
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-11-25 09:45:35.705481')
self.assertEqual(event_object.timestamp, expected_timestamp)
self.assertEqual(event_object.record_position, 442)
self.assertEqual(event_object.message_id, 101406)
self.assertEqual(event_object.computer_name, u'DarkTemplar-2.local')
self.assertEqual(event_object.sender, u'locationd')
self.assertEqual(event_object.facility, u'com.apple.locationd')
self.assertEqual(event_object.pid, 69)
self.assertEqual(event_object.user_sid, u'205')
self.assertEqual(event_object.group_id, 205)
self.assertEqual(event_object.read_uid, 205)
self.assertEqual(event_object.read_gid, 0xffffffff)
self.assertEqual(event_object.level, 4)
# Note that "compatiblity" is spelt incorrectly in the actual message being
# tested here.
expected_message = (
u'Incorrect NSStringEncoding value 0x8000100 detected. '
u'Assuming NSASCIIStringEncoding. Will stop this compatiblity '
u'mapping behavior in the near future.')
self.assertEqual(event_object.message, expected_message)
expected_extra = (
u'CFLog Local Time: 2013-11-25 09:45:35.701, '
u'CFLog Thread: 1007, '
u'Sender_Mach_UUID: 50E1F76A-60FF-368C-B74E-EB48F6D98C51')
self.assertEqual(event_object.extra_information, expected_extra)
expected_msg = (
u'MessageID: 101406 '
u'Level: WARNING (4) '
u'User ID: 205 '
u'Group ID: 205 '
u'Read User: 205 '
u'Read Group: ALL '
u'Host: DarkTemplar-2.local '
u'Sender: locationd '
u'Facility: com.apple.locationd '
u'Message: {0:s} {1:s}').format(expected_message, expected_extra)
expected_msg_short = (
u'Sender: locationd '
u'Facility: com.apple.locationd')
self._TestGetMessageStrings(event_object, expected_msg, expected_msg_short)
if __name__ == '__main__':
unittest.main()
|
GREENHILL Consulting • Transformation or Transaction?
What is your leadership style? If you are not sure, we find it for you through our products.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.