text
stringlengths
29
850k
from lxml import html import re import requests SEP_URL = "http://plato.stanford.edu/" class ArticleListScraper(): query = None results = None def __init__(self, query): self.set_query(query) def set_query(self, query): # query_no_accents = remove_accents(query) query_no_posessives = re.sub("'s", '', query) pattern = re.compile('[^a-zA-Z\d\s]') stripped_query = re.sub(pattern, ' ', query_no_posessives) # stop_word_filter = StopWordFilter() # self.query = stop_word_filter.filter(str(stripped_query).lower().split()) @property def url(self): url = SEP_URL + "search/searcher.py?query=" for word in self.query: url += word + "+" print url return url def request_results(self): page = requests.get(self.url) # Remvoe bold tags text_no_bold = re.sub('</? ?b>', '', page.text) text_no_newlines = re.sub('\n', '', text_no_bold) tree = html.fromstring(text_no_newlines.encode('utf-8')) titles = tree.xpath("//div[@class='result_title']/a/text()") urls = tree.xpath("//div[@class='result_title']/a/@href") # Figure out how many results to return result_length = 0 if len(titles) > 5: result_length = 5 else: result_length = len(titles) # Build the output tuples output = [] for i in range(result_length): output.append( { "title": titles[i], "url": SEP_URL + urls[i].lstrip("../") } ) self.results = output return output
hd00:15VENICE, ITALY - FEBRUARY 2015: Burano exterior colorful buildings and water canal. Group of tourist walking around the canal and enjoying view of architecture and colors. hd00:10VENICE, ITALY - CIRCA 2013 Views from around the ancient Coastal Italian city of Venice.
#!/usr/bin/python2.7 # Copyright 2010 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Verify the messages in the en .po files after merging. Usage: From the root directory: tools/verify_translation.py Verify_translations takes no arguments. To use: 1. run find_missing_translations to generate a template .po file: find_missing_translations --format=po This will generate a .po file with just the translated messages in order. 2. Use the english person_finder.xml file and the template from step 1 to 'merge' the english translations into the english .po file. This should generate a .po file with the msg string set to the msg id value for each newly translated string. Example command: 'merge_translations translations/en/person_finder.xml app/locale/en/LC_MESSAGES/django' 3. run verify_translations to verify that the strings are actually the same. command: 'verify_translation' 4. revert the app/locale/en changes (eg, don't check in the msgstrs in the englis files). PO file format: http://www.gnu.org/software/hello/manual/gettext/PO-Files.html """ from babel.messages import pofile from find_missing_translations import get_po_filename from test_pfif import text_diff if __name__ == '__main__': filename = get_po_filename('en') english = pofile.read_po(open(filename)) count = 0 def printsep(): if count > 0: print '-------------------------------------' for msg in english: # Each newly translated string will have msg.string set # to the 'translated' english value. if msg.id and msg.string and msg.string != msg.id: if isinstance(msg.id, tuple): # TODO(lschumacher): deal with plurals properly, if msg.string[0] or msg.string[1]: printsep() print 'msg id: %s\nmsgstr: %s' % (msg.id, msg.string) count += 1 else: printsep() print text_diff(msg.id, msg.string) count += 1 if count: printsep() print 'Found %s bad translations' % count else: print 'Translation OK'
Bring some garden charm to your indoor or outdoor space with this beautiful watering can. Suitable for use as a decoration item or used as a practical addition to your garden, this product measures 21cm high, 25cm wide and 18cm deep. Made from powder coated steel with a subtle front facing design.
#!/usr/bin/env python # Python Network Programming Cookbook -- Chapter - 5 # This program requires Python 2.7 or any later version import argparse import os import getpass import re import sys import smtplib from email.mime.image import MIMEImage from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText SMTP_SERVER = 'smtp.gmail.com' SMTP_PORT = 587 def send_email(sender, recipient): """ Send email message """ msg = MIMEMultipart() msg['Subject'] = 'Python Emaill Test' msg['To'] = recipient msg['From'] = sender subject = 'Python email Test' message = 'Images attached.' # attach imgae files files = os.listdir(os.getcwd()) gifsearch = re.compile(".gif", re.IGNORECASE) files = filter(gifsearch.search, files) for filename in files: path = os.path.join(os.getcwd(), filename) if not os.path.isfile(path): continue img = MIMEImage(open(path, 'rb').read(), _subtype="gif") img.add_header('Content-Disposition', 'attachment', filename=filename) msg.attach(img) part = MIMEText('text', "plain") part.set_payload(message) msg.attach(part) # create smtp session session = smtplib.SMTP(SMTP_SERVER, SMTP_PORT) session.ehlo() session.starttls() session.ehlo password = getpass.getpass(prompt="Enter your Google password: ") session.login(sender, password) session.sendmail(sender, recipient, msg.as_string()) print "Email sent." session.quit() if __name__ == '__main__': parser = argparse.ArgumentParser(description='Email Sending Example') parser.add_argument('--sender', action="store", dest="sender") parser.add_argument('--recipient', action="store", dest="recipient") given_args = parser.parse_args() send_email(given_args.sender, given_args.recipient)
Nestled against the Capital Beltway, Arlington is one of the nation's most prosperous regions. NVHomes provides a generous selection of luxury homes with elegant designs and spacious interiors for sale in Arlington County, VA, in some of the most affluent neighborhoods in the country. As the heart of America's defense and aerospace industry, Arlington is one of the nation's most commercially successful areas. Boasting an outstanding average income level and a generous collection of major corporations, Arlington contains centers of operation for major Fortune 500 players ranging from Lockheed Martin to Deloitte. With Forbes recently applauding Arlington County as among the top places in America for secondary education, the region has a generous offering of potential schools. The Arlington Public Schools system has three major high schools, and Arlington residents have the additional option of enrollment in the prestigious Thomas Jefferson High School for Science and Technology in nearby Fairfax. All of our new homes in Arlington County are within close travel of the surrounding education outlets, which include a full roster of local private academies in addition to local public school choices. Arlington was a focal point of early colonial development. During the antebellum period leading up to the Civil War, Arlington and the surrounding counties were areas of deep contention, and local Arlington still bears preserved fortifications and barricades from the War Between the States. NVHomes' Arlington real estate offerings are at a close distance to the region's myriad historical sites and monuments, including the Armed Forces Memorial at the Arlington National Cemetery.
## # Copyright (C) 2017 Jessica Tallon & Matt Molyneaux # # This file is part of Inboxen. # # Inboxen is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Inboxen is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with Inboxen. If not, see <http://www.gnu.org/licenses/>. ## from django.conf import urls from inboxen.cms import views urlpatterns = [ urls.re_path(r'^$', views.index, name='index'), urls.re_path(r'^(?P<page_pk>\d+)/$', views.index, name='index'), urls.re_path(r'^choose_new_page/(?P<parent_pk>\d+)/$', views.choose_page_type, name='choose-page-type'), urls.re_path(r'^create_page/(?P<model>[A-Za-z]+)/(?P<parent_pk>\d+)/$', views.create_page, name='create-page'), urls.re_path(r'^edit_page/(?P<page_pk>\d+)/$', views.edit_page, name='edit-page'), urls.re_path(r'^delete_page/(?P<page_pk>\d+)/$', views.delete_page, name='delete-page'), urls.re_path(r'^blog/', urls.include(("inboxen.blog.admin_urls", "blog"), namespace="blog")), urls.re_path(r'^questions/', urls.include(("inboxen.tickets.admin_urls", "tickets"), namespace="tickets")), urls.re_path(r'^domains/', urls.include(("inboxen.admin_urls.domains", "inboxen"), namespace="domains")), ]
After 11 months of aggressive treatment that included chemotherapy, radiation and multiple surgeries, Tedeschi was declared cancer-free. Last week, Trebek, the 78-year-old longtime host of Jeopardy, revealed he had stage 4 pancreatic cancer, and that the prognosis was ‘not very encouraging’. Tedeschi told Florida Today that she began experiencing back pain in 2011. She tried epidurals, muscle relaxers and acupuncture, but nothing worked. Tedeschi said that despite the grim odds she was given – just nine months to live – she was determined to fight. ‘The decision to fight was an easy one, because there was no way that I was leaving my little boys and no way that anyone else was going to marry my husband!’ she wrote. Tedeschi wrote that she went through 12 rounds of chemotherapy, 25 rounds of radiation with a 24-hour chemotherapy pump, and multiple surgeries. One of the procedures, done in November 2012, she underwent a minimally invasive procedure known as a NanoKnife, which delivers high-voltage electrical pulses to the tumor. ‘This procedure is supposed to poke holes in all the cells and essentially they die and dissolve in your body over time,’ she wrote on the Pancreatic Cancer Action Network website. Trebek revealed in a video message posted on the YouTube page of Jeopardy! last week of his diagnosis. She says she maintained a positive outlook because she ‘didn’t choose to get cancer…but I do get to choose my attitude’. Pictured: Tedeschi with her sons after she came home from the hospital.
# -*- coding: utf-8 -*- # Generated by Django 1.11 on 2017-07-20 23:48 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('operation_finance', '0032_auto_20170720_1815'), ] operations = [ migrations.CreateModel( name='VendorConflict', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('slug', models.SlugField(blank=True, max_length=150)), ('origin', models.DateTimeField(auto_now_add=True, null=True)), ('last_modified', models.DateTimeField(auto_now=True, null=True)), ('conflict_description', models.CharField(max_length=300, verbose_name='Conflict description')), ('conflict_resolution', models.CharField(blank=True, max_length=300, null=True, verbose_name='Conflict resolution')), ], options={ 'abstract': False, }, ), migrations.RemoveField( model_name='invoice', name='conflict', ), migrations.RemoveField( model_name='invoice', name='conflict_description', ), migrations.AddField( model_name='invoice', name='file', field=models.FileField(blank=True, null=True, upload_to='uploads/operations_invoice/', verbose_name='File'), ), migrations.AddField( model_name='vendorconflict', name='invoice', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='conflict', to='operation_finance.Invoice'), ), ]
Plunderful is one of the few pieces that remain of an unproduced play I wrote called “Pirates v. Ninjas.” I wrote it for a contact who ran the late-night division of his local community theater but by the time I completed the play, the relationship between my contact and the theater dissolved. The story followed a modern-day pirater of media (music, movies etc) who is sentenced on a reality show called Time-Court and his punishment is to be sent back to the time of his pirate ancestors. He joins a ragtag group of pirates on a ship led by Captain Colorbeard, among others (including a resident pirate ghost). Unbeknownst to our hero, the pirates are in a war with their mortal enemies the Ninjas. I think the rest sort of writes itselt. I’ve certainly been mining the script for jokes or lines that can turned into comics. So, there’s a good chance at more pirate-themed jokes similar to “Plunderful” turning up on mousebearcomedy in the future.
# -*- coding: utf-8 -*- from __future__ import absolute_import import mock import six from datetime import timedelta from django.utils import timezone from mock import patch from sentry.api.serializers import serialize from sentry.api.serializers.models.group import StreamGroupSerializer from sentry.models import ( Environment, GroupResolution, GroupSnooze, GroupStatus, GroupSubscription, UserOption, UserOptionValue ) from sentry.testutils import TestCase class GroupSerializerTest(TestCase): def test_is_ignored_with_expired_snooze(self): now = timezone.now().replace(microsecond=0) user = self.create_user() group = self.create_group( status=GroupStatus.IGNORED, ) GroupSnooze.objects.create( group=group, until=now - timedelta(minutes=1), ) result = serialize(group, user) assert result['status'] == 'unresolved' assert result['statusDetails'] == {} def test_is_ignored_with_valid_snooze(self): now = timezone.now().replace(microsecond=0) user = self.create_user() group = self.create_group( status=GroupStatus.IGNORED, ) snooze = GroupSnooze.objects.create( group=group, until=now + timedelta(minutes=1), ) result = serialize(group, user) assert result['status'] == 'ignored' assert result['statusDetails']['ignoreCount'] == snooze.count assert result['statusDetails']['ignoreWindow'] == snooze.window assert result['statusDetails']['ignoreUserCount'] == snooze.user_count assert result['statusDetails']['ignoreUserWindow'] == snooze.user_window assert result['statusDetails']['ignoreUntil'] == snooze.until assert result['statusDetails']['actor'] is None def test_is_ignored_with_valid_snooze_and_actor(self): now = timezone.now().replace(microsecond=0) user = self.create_user() group = self.create_group( status=GroupStatus.IGNORED, ) GroupSnooze.objects.create( group=group, until=now + timedelta(minutes=1), actor_id=user.id, ) result = serialize(group, user) assert result['status'] == 'ignored' assert result['statusDetails']['actor']['id'] == six.text_type(user.id) def test_resolved_in_next_release(self): release = self.create_release(project=self.project, version='a') user = self.create_user() group = self.create_group( status=GroupStatus.RESOLVED, ) GroupResolution.objects.create( group=group, release=release, type=GroupResolution.Type.in_next_release, ) result = serialize(group, user) assert result['status'] == 'resolved' assert result['statusDetails'] == {'inNextRelease': True, 'actor': None} def test_resolved_in_release(self): release = self.create_release(project=self.project, version='a') user = self.create_user() group = self.create_group( status=GroupStatus.RESOLVED, ) GroupResolution.objects.create( group=group, release=release, type=GroupResolution.Type.in_release, ) result = serialize(group, user) assert result['status'] == 'resolved' assert result['statusDetails'] == {'inRelease': 'a', 'actor': None} def test_resolved_with_actor(self): release = self.create_release(project=self.project, version='a') user = self.create_user() group = self.create_group( status=GroupStatus.RESOLVED, ) GroupResolution.objects.create( group=group, release=release, type=GroupResolution.Type.in_release, actor_id=user.id, ) result = serialize(group, user) assert result['status'] == 'resolved' assert result['statusDetails']['actor']['id'] == six.text_type(user.id) @patch('sentry.models.Group.is_over_resolve_age') def test_auto_resolved(self, mock_is_over_resolve_age): mock_is_over_resolve_age.return_value = True user = self.create_user() group = self.create_group( status=GroupStatus.UNRESOLVED, ) result = serialize(group, user) assert result['status'] == 'resolved' assert result['statusDetails'] == {'autoResolved': True} def test_subscribed(self): user = self.create_user() group = self.create_group() GroupSubscription.objects.create( user=user, group=group, project=group.project, is_active=True, ) result = serialize(group, user) assert result['isSubscribed'] assert result['subscriptionDetails'] == { 'reason': 'unknown', } def test_explicit_unsubscribed(self): user = self.create_user() group = self.create_group() GroupSubscription.objects.create( user=user, group=group, project=group.project, is_active=False, ) result = serialize(group, user) assert not result['isSubscribed'] assert not result['subscriptionDetails'] def test_implicit_subscribed(self): user = self.create_user() group = self.create_group() combinations = ( # ((default, project), (subscribed, details)) ((None, None), (True, None)), ((UserOptionValue.all_conversations, None), (True, None)), ((UserOptionValue.all_conversations, UserOptionValue.all_conversations), (True, None)), ((UserOptionValue.all_conversations, UserOptionValue.participating_only), (False, None)), ((UserOptionValue.all_conversations, UserOptionValue.no_conversations), (False, {'disabled': True})), ((UserOptionValue.participating_only, None), (False, None)), ((UserOptionValue.participating_only, UserOptionValue.all_conversations), (True, None)), ((UserOptionValue.participating_only, UserOptionValue.participating_only), (False, None)), ((UserOptionValue.participating_only, UserOptionValue.no_conversations), (False, {'disabled': True})), ((UserOptionValue.no_conversations, None), (False, {'disabled': True})), ((UserOptionValue.no_conversations, UserOptionValue.all_conversations), (True, None)), ((UserOptionValue.no_conversations, UserOptionValue.participating_only), (False, None)), ((UserOptionValue.no_conversations, UserOptionValue.no_conversations), (False, {'disabled': True})), ) def maybe_set_value(project, value): if value is not None: UserOption.objects.set_value( user=user, project=project, key='workflow:notifications', value=value, ) else: UserOption.objects.unset_value( user=user, project=project, key='workflow:notifications', ) for options, (is_subscribed, subscription_details) in combinations: default_value, project_value = options UserOption.objects.clear_local_cache() maybe_set_value(None, default_value) maybe_set_value(group.project, project_value) result = serialize(group, user) assert result['isSubscribed'] is is_subscribed assert result.get('subscriptionDetails') == subscription_details def test_global_no_conversations_overrides_group_subscription(self): user = self.create_user() group = self.create_group() GroupSubscription.objects.create( user=user, group=group, project=group.project, is_active=True, ) UserOption.objects.set_value( user=user, project=None, key='workflow:notifications', value=UserOptionValue.no_conversations, ) result = serialize(group, user) assert not result['isSubscribed'] assert result['subscriptionDetails'] == { 'disabled': True, } def test_project_no_conversations_overrides_group_subscription(self): user = self.create_user() group = self.create_group() GroupSubscription.objects.create( user=user, group=group, project=group.project, is_active=True, ) UserOption.objects.set_value( user=user, project=group.project, key='workflow:notifications', value=UserOptionValue.no_conversations, ) result = serialize(group, user) assert not result['isSubscribed'] assert result['subscriptionDetails'] == { 'disabled': True, } def test_no_user_unsubscribed(self): group = self.create_group() result = serialize(group) assert not result['isSubscribed'] class StreamGroupSerializerTestCase(TestCase): def test_environment(self): group = self.group environment = Environment.get_or_create(group.project, 'production') from sentry.api.serializers.models.group import tsdb with mock.patch( 'sentry.api.serializers.models.group.tsdb.get_range', side_effect=tsdb.get_range) as get_range: serialize( [group], serializer=StreamGroupSerializer( environment_func=lambda: environment, stats_period='14d', ), ) assert get_range.call_count == 1 for args, kwargs in get_range.call_args_list: assert kwargs['environment_id'] == environment.id def get_invalid_environment(): raise Environment.DoesNotExist() with mock.patch( 'sentry.api.serializers.models.group.tsdb.make_series', side_effect=tsdb.make_series) as make_series: serialize( [group], serializer=StreamGroupSerializer( environment_func=get_invalid_environment, stats_period='14d', ) ) assert make_series.call_count == 1
• Michael Annett’s best NXS finish at Daytona International Speedway is third, in the summer of 2012. His best season opening finish is 12th, in 2010. • Last year, Annett was running in the top eight in the closing laps before getting caught in a crash that put him 37th in the running order. He started the race last year in seventh, his best NXS career start on the 2.5-mile track. • This is the first year Annett will drive the No. 1 Chevrolet Camaro for JR Motorsports, a number with significance to the Annett family. He previously ran the No. 5 for the organization throughout his first two seasons with the team. • Heading into 2019, Justin Allgaier and the BRANDT Professional Agriculture team are looking to build off of a successful 2018 season that culminated in five wins and the NXS regular-season championship. • In 16 NXS starts at DIS, Allgaier has three top fives and seven top 10s, with a best finish of second (July 2016). • Last year at traditional restrictor-plate tracks, the 32-year old driver earned a third-place finish at Talladega and ninth in the July Daytona race. • In the 2018 season opener in Daytona, Allgaier’s chances at victory ended when he got caught up in a multi-car incident after the race went into overtime. • Chase Elliott won the season-opening race for JRM in 2016. His NXS Daytona stats include one victory, two top fives and three top 10s along with 42 laps led. • Elliott has made eight NXS starts at Daytona, with seven of those being for JRM, and four in the season opener. • Elliott will be the first of multiple drivers – including team owner Dale Earnhardt Jr. – to run the iconic No. 8 Chevrolet for JRM. • Noah Gragson will make his JRM debut this weekend in the season-opening NXS event at DIS. • Gragson previously made three starts in the NXS in 2018, earning an overall average finish of 4.3. • Prior to joining JRM, Gragson ran two full seasons in the NASCAR Truck Series, earning a combined two wins, 12 top fives, 30 top 10s and a second-place finish in the 2018 championship standings. Gragson was also named the 2018 Most Popular Driver in the Truck Series. • Gragson has made four previous starts at Daytona across the Truck Series and the ARCA Racing Series, earning a best finish of seventh in the 2018 ARCA event. • JRM at Daytona: In 68 NXS starts at Daytona International Speedway, JR Motorsports has scored five wins, 21 top fives and 32 top 10s. The organization is not only the defending race winner with Tyler Reddick taking the checkered flag in last year’s season opener, but JRM has also driven into Victory Lane at Daytona once every season for the last three years. • Allgaier Autograph Session: Justin Allgaier, driver of the No. 7 BRANDT Professional Agriculture Chevrolet, will be signing autographs at the JR Motorsports merchandise trailer on Friday, Feb. 15 from 5:30-6 p.m. ET. • Celebrating American Heart Month: The No. 1 Chevrolet is running a special paint scheme this week and at Atlanta in honor of American Heart Month. Pilot Flying J is proud to unite with its guests across the country to support the American Heart Association “Life is Why We Give™” campaign in their mission to be a relentless force for a world of longer, healthier lives by raising awareness and inspiring heart healthy lifestyles for our team members, guests and their loved ones.
import os import socket from dtest import Tester, debug from tools import since @since('2.2.5') class TestGossipingPropertyFileSnitch(Tester): # Throws connection refused if cannot connect def _test_connect(self, address, port): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.settimeout(0.1) s.connect((address, port)) s.close() def test_prefer_local_reconnect_on_listen_address(self): """ @jira_ticket CASSANDRA-9748 @jira_ticket CASSANDRA-8084 Test that it's possible to connect over the broadcast_address when listen_on_broadcast_address=true and that GossipingPropertyFileSnitch reconnect via listen_address when prefer_local=true """ NODE1_LISTEN_ADDRESS = '127.0.0.1' NODE1_BROADCAST_ADDRESS = '127.0.0.3' NODE2_LISTEN_ADDRESS = '127.0.0.2' NODE2_BROADCAST_ADDRESS = '127.0.0.4' STORAGE_PORT = 7000 cluster = self.cluster cluster.populate(2) node1, node2 = cluster.nodelist() cluster.seeds = [NODE1_BROADCAST_ADDRESS] cluster.set_configuration_options(values={'endpoint_snitch': 'org.apache.cassandra.locator.GossipingPropertyFileSnitch', 'listen_on_broadcast_address': 'true'}) node1.set_configuration_options(values={'broadcast_address': NODE1_BROADCAST_ADDRESS}) node2.auto_bootstrap = True node2.set_configuration_options(values={'broadcast_address': NODE2_BROADCAST_ADDRESS}) for node in cluster.nodelist(): with open(os.path.join(node.get_conf_dir(), 'cassandra-rackdc.properties'), 'w') as snitch_file: snitch_file.write("dc=dc1" + os.linesep) snitch_file.write("rack=rack1" + os.linesep) snitch_file.write("prefer_local=true" + os.linesep) node1.start(wait_for_binary_proto=True) node1.watch_log_for("Starting Messaging Service on /{}:{}".format(NODE1_LISTEN_ADDRESS, STORAGE_PORT), timeout=60) node1.watch_log_for("Starting Messaging Service on /{}:{}".format(NODE1_BROADCAST_ADDRESS, STORAGE_PORT), timeout=60) self._test_connect(NODE1_LISTEN_ADDRESS, STORAGE_PORT) self._test_connect(NODE1_BROADCAST_ADDRESS, STORAGE_PORT) # write some data to node1 node1.stress(['write', 'n=10K', 'no-warmup', '-rate', 'threads=8']) session = self.patient_cql_connection(node1) stress_table = 'keyspace1.standard1' original_rows = list(session.execute("SELECT * FROM {}".format(stress_table))) node2.start(wait_for_binary_proto=True, wait_other_notice=False) node2.watch_log_for("Starting Messaging Service on /{}:{}".format(NODE2_LISTEN_ADDRESS, STORAGE_PORT), timeout=60) node2.watch_log_for("Starting Messaging Service on /{}:{}".format(NODE2_BROADCAST_ADDRESS, STORAGE_PORT), timeout=60) self._test_connect(NODE2_LISTEN_ADDRESS, STORAGE_PORT) self._test_connect(NODE2_BROADCAST_ADDRESS, STORAGE_PORT) node1.watch_log_for("Intiated reconnect to an Internal IP /{} for the /{}".format(NODE2_LISTEN_ADDRESS, NODE2_BROADCAST_ADDRESS), filename='debug.log', timeout=60) node2.watch_log_for("Intiated reconnect to an Internal IP /{} for the /{}".format(NODE1_LISTEN_ADDRESS, NODE1_BROADCAST_ADDRESS), filename='debug.log', timeout=60) # read data from node2 just to make sure data and connectivity is OK session = self.patient_exclusive_cql_connection(node2) new_rows = list(session.execute("SELECT * FROM {}".format(stress_table))) self.assertEquals(original_rows, new_rows) out, err = node1.nodetool('gossipinfo') self.assertEqual(0, len(err), err) debug(out) self.assertIn("/{}".format(NODE1_BROADCAST_ADDRESS), out) self.assertIn("INTERNAL_IP:6:{}".format(NODE1_LISTEN_ADDRESS), out) self.assertIn("/{}".format(NODE2_BROADCAST_ADDRESS), out) self.assertIn("INTERNAL_IP:6:{}".format(NODE2_LISTEN_ADDRESS), out)
John’s grip on the gun was steady, the pistol trained on Santino, not wavering in the least even when Winston stepped forward, trying to calm him down. Taunting John Wick had been juvenile—possibly even suicidal—but in this moment, balanced between life and death and ruin, Santino could not begin to care. He smiled. John turned, a faint spark of surprise actually filtering through his fatigue. Filthy and thin as his mortal shell was now, he should still be invisible to humans if he wanted to be - and the shorter, stocky middle-aged human with as fussy a face as his voice looked and felt human. He was impeccably if unassumingly dressed, in a three piece dove gray suit and a pale blue tie, knotted under a shirt collar pressed to razor edges. Wire-rimmed spectacles perched high over the bridge of his nose, behind which sharp, pale blue eyes regarded John with disapproval. Had John run into him on a street, he would’ve walked by without a second glance. Six months, no yellow cape, and one and a half thankfully harmless gun accidents after, Harold and Nathan had settled into a routine. The Machine would send them a number, and Harold would try his damnedest to resolve it through a computer. Usually, this endeavour would end in failure, because despite the advent of technology, the idiocy of certain members of humanity was so fundamental that not even computers could provide any sort of real solution. This meant that Nathan often had to intervene. Sometimes they hired help: which had worked out with varying degrees of disaster. Sometimes they tipped off the police. Sometimes Nathan and Harold ventured out, bickering all the time, and the success rate of this latter tactic was usually at around 24%. It also usually served to further erode Harold’s already falling opinion of humanity. Clark had no idea why he had been sent to cover the fundraiser. After all, events like these were usually private affairs, where fortunes were pooled together to be gambled on politics, with the press informed later through leaks, if at all. The Daily Planet hadn’t been the only newspaper invited along: Clark recognised Julie from the New York Times, and Dean from CNN, among others, and they all seemed equally bewildered. Wayne Enterprises, usually notoriously neutral in national politics, was forming a SuperPAC. After Ground Zero in Metropolis, Bruce read a lot of science fiction. He’d never particularly bothered with the genre before: even after retiring as the Batman, it wasn’t as though Bruce had very many hours in the day to devote to stories about imaginary alien civilisations and spaceship battles and evil robots. Life was strange enough, or so he had thought. Then life itself had turned into science fiction, and standing outside the blast zone, comforting a child, Bruce had felt small for the second time in his lifetime, small and powerless and utterly helpless. Finn lowered his vibroblade, turning on his heel, and he grinned as Poe ambled over in his measured flyboy swagger. Damn but Finn sometimes wished that he could walk like that, like he could take any punches that the ‘verse wanted to throw and still come up tops. Finn had tried practicing once, in the room that the Resistance had given him after he’d woken up in the med-bay, but he had only managed to awkwardly trip over his own feet and had bruised his hip against his desk. Napoleon refuses the CIA deal and ends up in prison, as the infamous Russian's cellmate. Super good story building with the developing relationship as a sort of side-plot. Illya raised an eyebrow, then straightened up sharply, nearly hitting his head against the top bunk, when Napoleon pulled out a small, battered chess box. It was a tiny set, not even as wide as Napoleon’s hands were long, and it was scratched alarmingly at the flanks, but when Napoleon tossed it over, Illya rubbed his fingers reverently over the black and white lacquer top before he remembered himself. Napoleon sat, if on the edge of Illya’s bunk, and grinned at Illya, clearly pleased with himself. And the little jackal should be pleased. He had seen a glimpse of Illya’s levers and had quite quickly done his best. The jackal, skulking carefully at the heels of a wolf, trying to seem useful enough to avoid its jaws. New York was having a swelteringly hot and humid summer. Napoleon had tried to beg leave from Waverly to escape northwards, maybe as far as Canada, but it had been vetoed - apparently, a mission was imminent. Bored, Napoleon spent his days driving around aimlessly in the morning in his Bentley and sleeping off the rest of the day in his air-conditioned penthouse, and so had been cruising down Brighton Beach, wondering whether he had the energy to park somewhere and walk down the shorefront, when he saw a familiar, tall blonde figure duck into a shop. Surprised, Napoleon slowed down. It was Illya: that height and cap were unmistakable. Other than Napoleon and the CIA watchdog assigned to him, the prison bus was empty. It was once a schoolbus, but had since been rudely repurposed, painted a dark charcoal gray with the words Virginia Department of Corrections printed in neat white paint under the murky windows. Outside, flat, baked farmland went past, acres of it, dotted sparsely with trees. There was no air conditioning in the bus, and the windows were open only in varying slivers; Napoleon felt like he was baking, himself, in clothes that were three days old. His shirt stuck to his back, and his jeans felt uncomfortably stiff on his legs. Napoleon watched the trees go past in a daze. His hands were cuffed to the chrome ring attached to the seat in front of him, and his legs were hobbled. It was hard to believe that only four days ago he had wandered through MoMa with a gorgeous leggy blonde on his arm, then wined and dined her at a nice French bistro. Strange how quickly the worm turned, just on the back of a single mistake. "In the first month of being co-opted into working with Illya Kuryakin full time at U.N.C.L.E., Napoleon stole Illya’s father’s watch four times, twice out of spite, once out of drunken curiosity, and once out of sheer boredom. After the fourth time, Gaby sprained one of Napoleon’s fingers and threatened to do worse if he did it again." Mad Max / Inception fusion WIP. You could say that I first got a Clue that my governor maybe, kind of, had a sort of Thing for me when Molly started methodically oversalting all my food and drink. Including the hot cocoas. “This is your next target,” Merlin said into Harry’s earpiece, as Harry leaned forward to look at his laptop screen. It blacked out, for a moment, then a clip out of some interview began to play. The video was in black and white, crisply and tightly shot, its subject a young man shown seated from the waist up, against a pale gray background, grinning at the camera. He was probably in his mid twenties, dressed down in a black leather jacket over a pale t-shirt, loose over denim jeans, his hair long enough to feather slightly over his forehead, a hint of stubble over his chin. He was also, quite possibly, the most beautiful young man Harry had ever seen, and disturbingly… familiar, somehow. As the young man laughed noiselessly at the camera, elegant serif type faded over the lower third of the screen: Gary Unwin, by Vanity Fair. Harry blinked, and studied the young man’s pretty face more closely, the crinkling around his eyes, the joyous curl to his mouth, the way he sat, relaxed yet alert, like a hunting hound, waiting to come to heel.
import time import uuid as uuid from splinter.browser import Browser from django.contrib.auth.models import User from webparticipation.apps.ureporter.models import Ureporter from webparticipation.apps.ureport_auth.models import PasswordReset def before_all(context): context.browser = Browser('chrome') time.sleep(5) def before_scenario(context, scenario): email = '[email protected]' username = 'user999999999' password = 'password' email1 = '[email protected]' username1 = 'user999999991' uid = uuid.uuid4() uid1 = uuid.uuid4() Ureporter.objects.create(uuid=uid, user=User.objects.create_user(username=username, email=email, password=password)) Ureporter.objects.create(uuid=uid1, user=User.objects.create_user(username=username1, email=email1, password=password)) def after_scenario(context, scenario): User.objects.all().delete() Ureporter.objects.all().delete() PasswordReset.objects.all().delete() def after_all(context): context.browser.quit() context.browser = None context.server = None
The term ‘Speech Graphics’ or ‘us’ or ‘we’ refers to the owner of the website whose registered office is Citypoint, 65 Haymarket Terrace, Edinburgh, Scotland, EH12 5HD. Our company registration number is SC388915. The term ‘you’ refers to the user or viewer of our website.
"""The tests for the sun automation.""" from datetime import datetime from unittest.mock import patch import pytest from homeassistant.components import sun import homeassistant.components.automation as automation from homeassistant.const import ( ATTR_ENTITY_ID, ENTITY_MATCH_ALL, SERVICE_TURN_OFF, SERVICE_TURN_ON, SUN_EVENT_SUNRISE, SUN_EVENT_SUNSET, ) from homeassistant.setup import async_setup_component import homeassistant.util.dt as dt_util from tests.common import async_fire_time_changed, async_mock_service, mock_component from tests.components.blueprint.conftest import stub_blueprint_populate # noqa: F401 ORIG_TIME_ZONE = dt_util.DEFAULT_TIME_ZONE @pytest.fixture def calls(hass): """Track calls to a mock service.""" return async_mock_service(hass, "test", "automation") @pytest.fixture(autouse=True) def setup_comp(hass): """Initialize components.""" mock_component(hass, "group") hass.config.set_time_zone(hass.config.time_zone) hass.loop.run_until_complete( async_setup_component(hass, sun.DOMAIN, {sun.DOMAIN: {sun.CONF_ELEVATION: 0}}) ) def teardown(): """Restore.""" dt_util.set_default_time_zone(ORIG_TIME_ZONE) async def test_sunset_trigger(hass, calls, legacy_patchable_time): """Test the sunset trigger.""" now = datetime(2015, 9, 15, 23, tzinfo=dt_util.UTC) trigger_time = datetime(2015, 9, 16, 2, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: { "trigger": {"platform": "sun", "event": SUN_EVENT_SUNSET}, "action": { "service": "test.automation", "data_template": {"id": "{{ trigger.id}}"}, }, } }, ) await hass.services.async_call( automation.DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: ENTITY_MATCH_ALL}, blocking=True, ) async_fire_time_changed(hass, trigger_time) await hass.async_block_till_done() assert len(calls) == 0 with patch("homeassistant.util.dt.utcnow", return_value=now): await hass.services.async_call( automation.DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: ENTITY_MATCH_ALL}, blocking=True, ) async_fire_time_changed(hass, trigger_time) await hass.async_block_till_done() assert len(calls) == 1 assert calls[0].data["id"] == 0 async def test_sunrise_trigger(hass, calls, legacy_patchable_time): """Test the sunrise trigger.""" now = datetime(2015, 9, 13, 23, tzinfo=dt_util.UTC) trigger_time = datetime(2015, 9, 16, 14, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: { "trigger": {"platform": "sun", "event": SUN_EVENT_SUNRISE}, "action": {"service": "test.automation"}, } }, ) async_fire_time_changed(hass, trigger_time) await hass.async_block_till_done() assert len(calls) == 1 async def test_sunset_trigger_with_offset(hass, calls, legacy_patchable_time): """Test the sunset trigger with offset.""" now = datetime(2015, 9, 15, 23, tzinfo=dt_util.UTC) trigger_time = datetime(2015, 9, 16, 2, 30, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: { "trigger": { "platform": "sun", "event": SUN_EVENT_SUNSET, "offset": "0:30:00", }, "action": { "service": "test.automation", "data_template": { "some": "{{ trigger.%s }}" % "}} - {{ trigger.".join(("platform", "event", "offset")) }, }, } }, ) async_fire_time_changed(hass, trigger_time) await hass.async_block_till_done() assert len(calls) == 1 assert calls[0].data["some"] == "sun - sunset - 0:30:00" async def test_sunrise_trigger_with_offset(hass, calls, legacy_patchable_time): """Test the sunrise trigger with offset.""" now = datetime(2015, 9, 13, 23, tzinfo=dt_util.UTC) trigger_time = datetime(2015, 9, 16, 13, 30, tzinfo=dt_util.UTC) with patch("homeassistant.util.dt.utcnow", return_value=now): await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: { "trigger": { "platform": "sun", "event": SUN_EVENT_SUNRISE, "offset": "-0:30:00", }, "action": {"service": "test.automation"}, } }, ) async_fire_time_changed(hass, trigger_time) await hass.async_block_till_done() assert len(calls) == 1
Powerpoint Template Classic powerpoint template classic letter shaped savon powerpoint template templates. powerpoint template classic classic book powerpoint template ideas. powerpoint template classic free classic circle patterns backgrounds for powerpoint abstract free. Powerpoint Template Classic powerpoint template classic free classic powerpoint template pptmag ideas. powerpoint template classic classic simple frame free ppt backgrounds for your powerpoint ideas. powerpoint template classic classic ornament powerpoint template backgrounds id 0000008420. Powerpoint Template Classic powerpoint template classic classic book template for powerpoint download. Powerpoint Template Classic powerpoint template classic classic book powerpoint template ideas. powerpoint template classic free classic circle patterns backgrounds for powerpoint abstract free. powerpoint template classic free classic powerpoint template pptmag ideas. powerpoint template classic classic simple frame free ppt backgrounds for your powerpoint ideas. powerpoint template classic classic ornament powerpoint template backgrounds id 0000008420.
# -*- coding: utf-8 -*- # Unix SMB/CIFS implementation. # Copyright © Jelmer Vernooij <[email protected]> 2008 # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # """Network Data Representation (NDR) marshalling and unmarshalling.""" def ndr_pack(object): """Pack a NDR object. :param object: Object to pack :return: String object with marshalled object. """ ndr_pack = getattr(object, "__ndr_pack__", None) if ndr_pack is None: raise TypeError("%r is not a NDR object" % object) return ndr_pack() def ndr_unpack(cls, data, allow_remaining=False): """NDR unpack an object. :param cls: Class of the object to unpack :param data: Buffer to unpack :param allow_remaining: allows remaining data at the end (default=False) :return: Unpacked object """ object = cls() ndr_unpack = getattr(object, "__ndr_unpack__", None) if ndr_unpack is None: raise TypeError("%r is not a NDR object" % object) ndr_unpack(data, allow_remaining=allow_remaining) return object def ndr_print(object): ndr_print = getattr(object, "__ndr_print__", None) if ndr_print is None: raise TypeError("%r is not a NDR object" % object) return ndr_print() def ndr_pack_in(object, bigendian=False, ndr64=False): """Pack the input of an NDR function object. :param object: Object to pack :param bigendian: use LIBNDR_FLAG_BIGENDIAN (default=False) :param ndr64: use LIBNDR_FLAG_NDR64 (default=False) :return: String object with marshalled object. """ ndr_pack_in_fn = getattr(object, "__ndr_pack_in__", None) if ndr_pack_in_fn is None: raise TypeError("%r is not a NDR function object" % object) return ndr_pack_in_fn(bigendian=bigendian, ndr64=ndr64) def ndr_unpack_in(object, data, bigendian=False, ndr64=False, allow_remaining=False): """Unpack the input of an NDR function object. :param cls: Class of the object to unpack :param data: Buffer to unpack :param bigendian: use LIBNDR_FLAG_BIGENDIAN (default=False) :param ndr64: use LIBNDR_FLAG_NDR64 (default=False) :param allow_remaining: allows remaining data at the end (default=False) :return: Unpacked object """ ndr_unpack_in_fn = getattr(object, "__ndr_unpack_in__", None) if ndr_unpack_in_fn is None: raise TypeError("%r is not a NDR function object" % object) ndr_unpack_in_fn(data, bigendian=bigendian, ndr64=ndr64, allow_remaining=allow_remaining) return object def ndr_print_in(object): ndr_print_in_fn = getattr(object, "__ndr_print_in__", None) if ndr_print_in_fn is None: raise TypeError("%r is not a NDR function object" % object) return ndr_print_in_fn() def ndr_pack_out(object, bigendian=False, ndr64=False): """Pack the output of an NDR function object. :param object: Object to pack :param bigendian: use LIBNDR_FLAG_BIGENDIAN (default=False) :param ndr64: use LIBNDR_FLAG_NDR64 (default=False) :return: String object with marshalled object. """ ndr_pack_out_fn = getattr(object, "__ndr_pack_out__", None) if ndr_pack_out_fn is None: raise TypeError("%r is not a NDR function object" % object) return ndr_pack_out_fn(bigendian=bigendian, ndr64=ndr64) def ndr_unpack_out(object, data, bigendian=False, ndr64=False, allow_remaining=False): """Unpack the output of an NDR function object. :param cls: Class of the object to unpack :param data: Buffer to unpack :param bigendian: use LIBNDR_FLAG_BIGENDIAN (default=False) :param ndr64: use LIBNDR_FLAG_NDR64 (default=False) :param allow_remaining: allows remaining data at the end (default=False) :return: Unpacked object """ ndr_unpack_out_fn = getattr(object, "__ndr_unpack_out__", None) if ndr_unpack_out_fn is None: raise TypeError("%r is not a NDR function object" % object) ndr_unpack_out_fn(data, bigendian=bigendian, ndr64=ndr64, allow_remaining=allow_remaining) return object def ndr_print_out(object): ndr_print_out_fn = getattr(object, "__ndr_print_out__", None) if ndr_print_out_fn is None: raise TypeError("%r is not a NDR function object" % object) return ndr_print_out_fn()
At Exile Studio we pride ourselves in having all the skills you need under one roof. We don't outsource any web design or development work. Since we first opened our doors back in 2007, we wanted our products to sell - and that principles still holds true today. At Exile Studio, we believe in the personal touch, therefore templates are a no-no. Our customers will only receive customised web designs tailored to their every preferences and needs. It doesn't matter how much your budget is or how complex your requirements are, when there's a will, there's a way and we have the will to do anything for you! We are also technicians specialised in web design overhauls. We are experts when it comes to transforming boring dull websites into exciting eye-catching ones that will definitely play a bigger role in your business. So put your existing websites in our hands and we assure you a brand new look that will represent your business with style and sophistication. SINCE 2007, WE'VE WORKED WITH COMPANIES BIG AND SMALL TO BRING THEIR BUSINESS CONCEPT TO THE WEB. Web products don't sit on a desk anymore. We craft them to work on devices of any size and shape. Our designers are long-time experts in making websites adapt. Let's put it this way, we are the designer and an expert in what we do. All projects starts with gathering of information and requirements from our Clients and we will proposed our solutions and ideas to ensure your idea works.
#! /usr/bin/python import sys import math from math import sin, cos import cPickle as pickle import struct import numpy from numpy import matrix, array from itertools import izip from support import * BITSET_TEMPLATE = \ " %s = (%s & ~(1L << %d)) | (((%s >> %d) & 1L) << %d);" def build_transform_func(i, cube, pattern): output = [ 'inline unused cube_t *', 'cube_transform_%i(cube_t *dst, cube_t *src)' % (i,), '{', ' uint64_t low = src->low;', ' uint64_t high = src->high;', ' dst->low = low;', ' dst->high = high;'] for c1, c2 in izip(cube, pattern): if c1 == c2: continue src = coord_bit(*c1) if src < 64: srcs = "low" else: srcs = "high" src -= 64 dst = coord_bit(*c2) if dst < 64: dsts = "dst->low" else: dsts = "dst->high" dst -= 64 output.append(BITSET_TEMPLATE % \ (dsts, dsts, dst, srcs, src, dst)) output.extend([' return dst;', '}', '']) return '\n'.join(output) def build_transform_group(group, isomorphs, stable, refs=None): output = [ '', 'inline unused cube_t *', 'cube_transform_group%d(cube_t *dst, cube_t *src, int index)' % \ (group,), '{', ' switch(index) {'] cube = list(isomorphs[0]) stable = cube.index(stable) index = 0 for i, isomorph in enumerate(isomorphs): if cube[stable] != isomorph[stable]: continue output.extend([ ' case %d:' % (index,), ' return cube_transform_%d(dst, src);' % (i,)]) index += 1 output.insert(0, '#define CUBE_GROUP%d_TRANSFORMS (%d)' % (group, index)) output.extend([ ' default:', ' return NULL;', ' }', '}', '']) if refs: ref1s = cube.index(refs[0]) ref1d = cube.index(refs[1]) ref2s = cube.index(refs[2]) ref2d = cube.index(refs[3]) for i, isomorph in enumerate(isomorphs): if cube[ref1s] != isomorph[ref1d] or \ cube[ref2s] != isomorph[ref2d]: continue output.extend([ '#define cube_reflect_group%d cube_transform_%d' % (group, i), '']) break return '\n'.join(output) def main(argv=sys.argv): shift = matrix([2, 2, 2]) cube = [] mirror = [] for x, y, z in product(xrange(MAXX), repeat=3): cube.append((x, y, z)) mirror.append(((MAXX - 1) - x, y, z)) base1 = matrix(cube) - shift base2 = matrix(mirror) - shift isomorphs = set() for x, y, z in product(ANGLES, repeat=3): for base in (base1, base2): r = (base * rot_matrix(x, y, z)).round(0) r += shift r = tuple(tuple(int(i) for i in j) for j in r.tolist()) isomorphs.add(r) isomorphs = sorted(isomorphs) print "#ifndef TRANSFORMS_H" print "#define TRANSFORMS_H\n" for i, pattern in enumerate(isomorphs): print build_transform_func(i, cube, pattern) print build_transform_group(3, isomorphs, (0, 0, 0), [(0, 2, 0), (0, 2, 4), (4, 2, 0), (4, 2, 4)]) print build_transform_group(6, isomorphs, (0, 0, 2), [(0, 0, 2), (4, 0, 2), (0, 4, 2), (4, 4, 2)]) print build_transform_group(12, isomorphs, (2, 0, 2), [(0, 0, 2), (0, 4, 2), (4, 0, 2), (4, 4, 2)]) print build_transform_group(24, isomorphs, (2, 2, 2)) print "#endif /* TRANSFORMS_H */" return 0 if __name__ == '__main__': sys.exit(main())
In recent years there has been a growing recognition that, in the long run, effective management of cash flow is more important than profit. Long-term cash flow is the real value of a business. It has also been recognised that there is significant potential for improved financial performance from the more effective management of working capital – both directly from immediate cash gains and reduced net interest costs, and indirectly through its impact on increased profitability and return on capital employed. A key challenge in achieving these performance improvements results from the fact that actual levels of working capital and delivery of cash flow are effectively determined by the day-to-day actions of a great many managers and staff, which in large corporations often run into tens of thousands. Acquire an understanding of the fundamentals of effective management of cash flow, including the optimisation of the level of working capital. Develop practical experience of how to manage cash flow and optimise working capital to facilitate such delivery in real life business situations. Develop confidence through understanding the major drivers of successful financial performance. Learn a number of technical skills, all of which lead to the ability to calculate the required figures and implement them into value adding business decisions. This seminar is strongly participative, with a learning-by-doing style that makes extensive use of examples, and team-based exercises and case studies (both numeric/analytical and non-numeric/descriptive), and open discussion. A fair level of numeracy is an advantage on this course as is some prior familiarity with the financial basics of income statements, balance sheets, and cash flow statements (although this is not essential). The course will include refresher sessions on accounting and the basics of discounted cash flow (DCF). Each day will comprise several sessions, introducing new material to build on the learning from previous sessions. The program is designed to support the company’s organisational values, enabling employees to learn key finance skills. Better budget and plan cash and working capital. Spreadsheet modelling and tools and approaches. Day3 - Optimisation of Working Capital. Members of the treasury departments of companies within the oil and gas, petrochemical, and other industries who have responsibilities for managing cash flow and working capital. Business professionals, strategic and business development planners, and project professionals who wish to refresh their understanding and enhance their skills in managing and improving cash flow and working capital performance. Personnel employed in the banking, insurance and the general financial services sector where the management of cash is of prime importance. Any operational, engineering, commercial, marketing, technical, or financial personnel whose work impacts on cash flow or working capital, and who wish to develop their expertise in these areas to support their current roles or for career development.
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): # Django uses this attribute to recognize squashed migrations, but we are # abusing it to tell Django that this migration replaces a migration # already run and recorded with a different app name. replaces = [(b'evernote_reports', '0001_initial')] dependencies = [ ] operations = [ migrations.CreateModel( name='PaidTask', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('task_type', models.PositiveSmallIntegerField(default=0, db_index=True, verbose_name='Type', choices=[(0, 'Translation'), (1, 'Review'), (2, 'Hourly Work'), (3, 'Correction')])), ('amount', models.FloatField(default=0, verbose_name='Amount')), ('rate', models.FloatField(default=0)), ('datetime', models.DateTimeField(verbose_name='Date', db_index=True)), ('description', models.TextField(null=True, verbose_name='Description')), ], options={ }, bases=(models.Model,), ), ]
If you are looking for carport installation and repair in Bettendorf, Iowa, we can help. With years of carport installation and repair experience in Bettendorf, our top rated Carport Construction and Repair Services professionals are the pros for your job. We offer carport construction, kit installations and repairs in the Bettendorf area. We can help with any type of carport need that you may have in mind. From building clear-span carports to custom carport builds we are the pros for the job. We only work with top quality materials and our Bettendorf based contractors are great at what they do. Fill out the short form, tell us about your carport needs, and we will get right back to you with estimates and the best options. We are the leading installation and repair professional on quality carports, garages, RV carports, carports for boats, workshops, and more in the Bettendorf area. If you need a Carport Expert in Bettendorf, Williams Construction is ready to help. We can handle just about everything Carport Construction and Repair Services related. Contact us above and we will get back to you with a free quote in no time.
import sys import os import re from subprocess import call from config import xdm_py, xhm_py, xvm_py, xas_py, xda_py, xga_py, xdg_py, xbas_py # Utility functions def ordw(word): """word ord""" return (word[0] << 8) | word[1] def chrw(word): """word chr""" return bytes((word >> 8, word & 0xff)) def xint(s): """return hex or decimal value""" return int(s.lstrip('>'), 16 if s[:2] == '0x' or s[:1] == '>' else 10) def sinc(s, i): """return string increased by i""" return s[:-1] + chr(ord(s[-1]) + i) # Test management functions def xdm(*args, **kargs): """invoke Disk Manager""" print('DM:', args) if kargs.get('shell'): rc = call(' '.join(xdm_py + list(args)), shell=True) else: rc = call(xdm_py + list(args), stdin=kargs.get('stdin'), stdout=kargs.get('stdout'), stderr=kargs.get('stderr')) if rc != kargs.get('rc', 0): error('OS', 'xdm99 call returned with failure code ' + str(rc)) def xhm(*args, **kargs): """invoke HFE Manager""" print('HM:', args) if kargs.get('shell'): rc = call(' '.join(xhm_py + list(args)), shell=True) else: rc = call(xhm_py + list(args), stdout=kargs.get('stdout'), stderr=kargs.get('stderr')) if rc != kargs.get('rc', 0): error('OS', 'xhm99 call returned with failure code ' + str(rc)) def xvm(*args, **kargs): """invoke Volume Manager""" print('VM:', args) if kargs.get('shell'): rc = call(' '.join(xvm_py + list(args)), shell=True) else: rc = call(xvm_py + list(args), stdout=kargs.get('stdout'), stderr=kargs.get('stderr')) if rc != kargs.get('rc', 0): error('OS', 'xvm99 call returned with failure code ' + str(rc)) def xas(*args, **kargs): """invoke assembler""" print('AS:', args) rc = call(xas_py + list(args), stdout=kargs.get('stdout'), stderr=kargs.get('stderr')) if rc != kargs.get('rc', 0): error('OS', 'xas99 call returned with failure code ' + str(rc)) def xda(*args, **kargs): """invoke disassembler""" print('DA:', args) rc = call(xda_py + list(args), stdout=kargs.get('stdout'), stderr=kargs.get('stderr')) if rc != kargs.get('rc', 0): error('OS', 'xda99 call returned with failure code ' + str(rc)) def xga(*args, **kargs): """invoke GPL assembler""" print('GA:', args) rc = call(xga_py + list(args), stdout=kargs.get('stdout'), stderr=kargs.get('stderr')) if rc != kargs.get('rc', 0): error('OS', 'xga99 call returned with failure code ' + str(rc)) def xdg(*args, **kargs): """invoke GPL disssembler""" print('DG:', args) rc = call(xdg_py + list(args), stdout=kargs.get('stdout'), stderr=kargs.get('stderr')) if rc != kargs.get('rc', 0): error('OS', 'xdg99 call returned with failure code ' + str(rc)) def xbas(*args, **kargs): """invoke TI BASIC tool""" print('BAS:', args) rc = call(xbas_py + list(args), stdout=kargs.get('stdout'), stderr=kargs.get('stderr')) if rc != kargs.get('rc', 0): error('OS', 'xbas99 call returned with failure code ' + str(rc)) def error(tid, msg): """report test error""" sys.exit('ERROR: ' + tid + ': ' + msg) # Common check functions def content(fn, mode='rb'): """return contents of file""" with open(fn, mode) as f: data = f.read() return data def content_lines(fn): """return lines of file""" with open(fn, 'r') as f: lines = ' '.join(f.readlines()) return lines def content_len(fn): """return length of file""" return os.path.getsize(fn) def check_file_exists(fn): """check if given file exists""" return os.path.isfile(fn) def check_file_empty(fn): """return if file is empty""" return os.path.getsize(fn) == 0 def concat(flist, out): """concatenate file contents""" with open(out, 'wb') as fout: for fn in flist: with open(fn, 'rb') as fin: data = fin.read() fout.write(data) # Common check functions: xdm99 def check_files_eq(tid, infile, reffile, fmt, mask=None): if fmt[0] == 'D': if 'V' in fmt: check_text_files_eq(tid, infile, reffile) else: check_binary_files_eq(tid, infile, reffile, []) else: check_binary_files_eq(tid, infile, reffile, mask or []) def check_text_files_eq(tid, infile, reffile, skip=0): """check if file matches reference file""" with open(infile, 'r') as fin, open(reffile, 'r') as fref: if fin.readlines()[skip:] != fref.readlines()[skip:]: error(tid, '%s: File contents mismatch' % infile) def check_text_lines_eq(tid, infile, reffile, fmt): """check if text files are equal modulo trailing spaces""" reclen = int(re.search('\d+', fmt).group(0)) with open(infile, 'r') as fin, open(reffile, 'r') as fref: reflines = [line[:-1] + ' ' * (reclen - len(line) + 1) + '\n' for line in fref.readlines()] if fin.readlines() != reflines: error(tid, '%s: File contents mismatch' % infile) def check_binary_files_eq(tid, infile, reffile, mask=()): """check if binary files are equal modulo mask""" with open(infile, 'rb') as fin, open(reffile, 'rb') as fref: indata = fin.read() refdata = fref.read() cutlen = 0 for i, j in mask: assert cutlen <= i <= j indata = indata[:i - cutlen] + indata[j - cutlen:] refdata = refdata[:i - cutlen] + refdata[j - cutlen:] cutlen += j - i if indata != refdata: error(tid, '%s: File contents mismatch' % infile) def check_bin_text_eq(tid, infile, reffile): """check if DISPLAY files with binary parts are equal""" with open(infile, 'rb') as fin, open(reffile, 'rb') as fref: indata = fin.read() refdata = fref.read() if indata == refdata: return # replace line separators by 0xff indata_norm = indata.replace(b'\x0d\x0a', b'\xff').replace(b'\x0a', b'\xff').replace(b'\x0d', b'\xff') refdata_norm = refdata.replace(b'\x0d\x0a', b'\xff').replace(b'\x0a', b'\xff').replace(b'\x0d', b'\xff') if indata_norm != refdata_norm: error(tid, 'Normalized file contents mismatch') def check_file_matches(infile, matches): """check if text file contents match regular expressions""" try: with open(infile, 'r') as f: contents = f.readlines() except IOError: error('CLI', '%s: File not found' % infile) for line, pattern in matches: try: if not re.search(pattern, contents[line]): error('CLI', '%s: Line %d does not match' % (infile, line)) except IndexError: error('CLI', '%s: Line %d missing' % (infile, line)) # Common check functions: xas99 def _tags(objcode, compressed, filter=None): taglen = 3 if compressed else 5 refdefs = [] if compressed: # ignore :id line objcode = b''.join([objcode[i:i + 80].rstrip() for i in range(0, len(objcode) - 80, 80)]) # ignore :id line else: # ignore line numbers and :id line objcode = b''.join([objcode[i:i + 76].rstrip() for i in range(0, len(objcode) - 80, 80)]) yield objcode[:taglen] # rec count objcode = objcode[taglen + 8:].lstrip() # skip to first tag while objcode: if objcode[:1] in b'3456': # those might not be in the same order for xas99 refdefs.append(objcode[:taglen + 6]) # include name objcode = objcode[taglen + 6:] elif objcode[:1] == b'7' and refdefs: # ignore checksum in refdefs objcode = objcode[taglen:].lstrip() elif objcode[:1] == b'F': objcode = objcode[1:] # just skip end of record marker else: if filter is None or objcode[0] in filter: yield objcode[:taglen] objcode = objcode[taglen:] for tag in sorted(refdefs): yield tag def check_obj_code_eq(infile, reffile, compressed=False, tagfilter=None): """check if object code files are equal modulo id tag""" with open(infile, 'rb') as fin, open(reffile, 'rb') as fref: indata = fin.read() refdata = fref.read() for i, (intag, reftag) in enumerate(zip(_tags(indata, compressed, tagfilter), _tags(refdata, compressed, tagfilter))): if intag != reftag: error('Object code', 'Mismatch for tag no. {:d}: {}/{}'.format(i, str(intag), str(reftag))) def check_image_files_eq(genfile, reffile, ignore=()): """check if non-zero bytes in binary files are equal""" with open(genfile, 'rb') as fg, open(reffile, 'rb') as fr: genimage = fg.read() refimage = fr.read() for imin, imax in ignore: # must be in decreasing order! genimage = genimage[:imin] + genimage[imax:] refimage = refimage[:imin] + refimage[imax:] if not 0 <= len(genimage) - len(refimage) <= 1: error('Image', 'Image length mismatch') if (genimage[:2] != refimage[:2] or not (0 <= ordw(genimage[2:4]) - ordw(refimage[2:4]) <= 1) or genimage[4:6] != refimage[4:6]): error('Image', 'Image header mismatch') # TI-generated images may contain arbitrary bytes in BSS segments for i in range(4, len(refimage)): if genimage[i] and genimage[i] != refimage[i]: error('Image', 'Image contents mismatch @ ' + hex(i)) def check_image_set_eq(gendata, refdata): """check if genfile is outfile, module padding at outfile""" if any(data[:2] != b'\xff\xff' for data in gendata[:-1]): error('image', 'Bad continuation marker') if gendata[-1][:2] != b'\x00\x00': error('image', 'Missing >0000 end marker') sortedgendata = sorted(gendata, key=lambda d: ordw(d[4:6])) sortedrefdata = sorted(refdata, key=lambda d: ordw(d[4:6])) if len(sortedgendata) != len(sortedrefdata): error('image', 'Image file count mismatch') for genimage, refimage in zip(sortedgendata, sortedrefdata): padlen = len(refimage) - len(genimage) if not 0 <= padlen <= 1: error('Image', 'Image length mismatch') if not (ordw(refimage[2:4]) - ordw(genimage[2:4]) == padlen) or genimage[4:6] != refimage[4:6]: error('Image', 'Image header mismatch') # TI-generated images may contain arbitrary bytes in BSS segments for i in range(4, len(refimage) - padlen): if genimage[i] and genimage[i] != refimage[i]: error('Image', 'Image contents mismatch @ ' + hex(i)) def check_list_files_eq(genfile, reffile, ignore_lino=False): """check if listing files are equivalent ignores symbol listing at end of reffile by checking upto end of genfile """ with open(genfile, 'r') as fg, open(reffile, 'r') as fr: genlist = [(l[:16] + l[19:]).rstrip() for l in fg.readlines() if l.strip() and l[5:9] != '****' and l[19] != '<'] reflist = [l[2:].rstrip() for l in fr.readlines() if l[:2] == ' '] gi, ri = 1, 0 # skip assembler header note min_col, max_col = 4 if ignore_lino else 0, 74 while gi < len(genlist): gl, rl = genlist[gi], reflist[ri] # ignore deliberate changes try: if gl[10] in '.X': rl = rl[:10] + gl[10:15] + rl[15:] # no data if gl[14] == 'r' and rl[14] == "'": # reloc rl = rl[:14] + 'r' + rl[15:] if gl[14] == 'e': # reloc or external (REF) rl = rl[:10] + '0000e' + rl[15:] # remove REF chain if 'ORG' in rl[16:] or 'BES' in rl[16:]: rl = rl[:5] + gl[5:9] + rl[9:] # no address gl = gl.replace(';', '*') # unify comments # ignore listing directives if 'TITL' in gl[16:] or 'PAGE' in gl[16:] or 'UNL' in gl[16:] or 'LIST' in gl[16:]: gi += 1 continue # ignore BYTE sections if gl[16:] == rl[16:] and ('BYTE' in gl[16:] or 'TEXT' in gl[16:]): gi, ri = gi + 1, ri + 1 while genlist[gi][:4] == ' ': gi += 1 while reflist[ri][:4] == ' ': ri += 1 continue except IndexError: pass if gl[min_col:max_col] != rl[min_col:max_col]: error('List file', f'Line mismatch in {gi}/{ri}:\n{gl}\n{rl}') gi, ri = gi + 1, ri + 1 def check_list_against_binary(listfile, binfile): with open(listfile, 'r') as fl, open(binfile, 'rb') as fb: lines = fl.readlines() blob = fb.read() mem = {} for line in lines: try: addr = int(line[5:9].strip(), 16) except ValueError: continue word = line[10:14].strip() try: value = int(word, 16) except ValueError: if word == 'XXXX': value = 0 else: continue if len(word) == 2: mem[addr] = value elif len(word) == 4: mem[addr] = value >> 8 mem[addr + 1] = value & 0xff lc = min(mem) for b in blob: listb = mem.get(lc, 0) if listb != b: error('bin list', f'Unexpected byte @>{lc:04X}: >{listb:02X}, expected >{b:02X}') lc += 1 def check_dat_file_eq(datfile, binfile): """check that textual representation matches binary file""" with open(datfile, 'r') as fd, open(binfile, 'rb') as fb: dat = ''.join(fd.readlines()[1:]) bin = fb.read() values = [xint(x) for x in re.findall(r'>\w{2}', dat)] if bytes(values) != bin: error('dat', 'Values and binary file mismatch') # common check functions: xda99/xdg99 def check_bytes(outfile, reffile): """check that outfile has not more data than reffile""" outbytes, cntbytes = count_bytes(outfile), count_bytes(reffile) if outbytes > cntbytes: error('BYTEs', 'Too many BYTEs/DATAs: %d instead of %d' % (outbytes, cntbytes)) def count_bytes(fn): """count bytes declared by directives in source""" byte_count = 0 with open(fn, 'r') as fin: source = fin.readlines() for line in source: # get rid of quoted single quotes '' line = re.sub(r"'(?:[^']|'')*'", lambda x: ','.join(['z'] * (len(x.group(0)) - 2 - x.group(0)[1:-1].count("''"))), line) # get instruction parts parts = re.split(r'\s+', line, maxsplit=2) if len(parts) > 2 and parts[1].lower() in ('byte', 'data', 'stri', 'text'): # get all args args = [x.strip() for x in parts[2].split(',') if x.strip()] # know what you count if parts[1].lower() == 'data': byte_count += len(args) * 2 elif parts[1].lower() == 'text': byte_count += sum([len(a) // 2 if a[0] == '>' else 1 for a in args]) elif parts[1].lower() == 'stri': byte_count += sum([len(a) // 2 if a[0] == '>' else 1 for a in args]) + 1 # len byte else: byte_count += len(args) return byte_count def check_indent(fn, blocks): """check if first lines are indented correctly""" with open(fn, 'r') as fin: source = fin.readlines() indents = [] for line in source: if not line: continue if line[0] == ' ': indents.append(re.match(r'\s+(\w)', line).start(1)) else: try: indents.append( re.match(r'(?:[\w?!~]+\s+){%d}(\w)' % blocks, line).start( 1)) except AttributeError: pass if len(indents) < 3: error('indent', 'Too few indent values: %d' % len(indents)) return all([i == indents[0] for i in indents[1:]]) def count_mnemonics(fn, offset=0, wanted=None): """build dict of all occurring mnemonics""" with open(fn, 'r') as fin: source = [l[offset:] for l in fin.readlines()] mnems = {} for line in source: parts = re.split(r'\s+', line.rstrip(), maxsplit=2) if len(parts) < 2: continue mnem = parts[1].lower() if wanted is not None and wanted != mnem: continue n = mnems.setdefault(mnem, 0) mnems[parts[1].lower()] = n + 1 return mnems.get(wanted, 0) if wanted is not None else mnems def check_source(outfile, reffile): """compare sources""" with open(outfile, 'r') as fout, open(reffile, 'r') as fref: out = fout.readlines() ref = fref.readlines() j = -1 for i, oline in enumerate(out): # split output instruction (generated source) oinstr = re.split(r'\s+', re.sub(';.*$', '', oline.rstrip()).lower(), 2) if len(oinstr) < 2 or oinstr[1] == 'equ': continue # no instruction oargs = [a.strip().upper() for a in oinstr[2].split(',')] if len( oinstr) > 2 else [] rline, rinstr, urargs = '', (), () while True: j += 1 rline = re.sub(';.*$', '', ref[j]).rstrip() if rline[:1] == '*': continue # ignore comments if 'IGNORE' in rline: break # don't compare two corresponding lines # split reference instruction (original source) rinstr = re.split(r'\s+', rline.lower(), 2) rargs = [a.strip().upper() for a in rinstr[2].split(',')] if len( rinstr) > 2 else [] # uniform numerical arguments >XXXX, except if they're # already >XX (for xdg99) urargs = [('>%04X' % xint(a)) if (a[0] == '>' and len( a) != 3) or a.isdigit() else a for a in rargs] if rline and rinstr[0][-1:] != ':' and rinstr[1] != 'equ': break if 'IGNORE' not in rline and ( oinstr[1] != rinstr[1] or oargs != urargs): error('source', 'Mismatch in line %d:\n(R) %s\n(O) %s' % ( i, rline, oline)) def check_origins(fn, origins): """check origins in source""" with open(fn, 'r') as fin: source = fin.readlines() ocnt = 0 for line in source: m = re.match(r'^(\w+)\s[^;]*; <-(.*)$', line) if m: addr = int(m.group(1), 16) anns = [int(a.strip()[1:], 16) for a in m.group(2).split(',')] if addr in origins: if origins[addr] == anns: ocnt += 1 else: error('origin', 'Origin mismatch @%04X' % addr) if ocnt != len(origins): error('origin', 'Origin count mismatch: %d/%d' % (ocnt, len(origins))) def read_stderr(fn, include_warnings=False): """read stderr output""" errors = [] with open(fn, 'r') as f: lines = f.readlines() for error, line in zip(lines[::2], lines[1::2]): if 'Warning' in line: if include_warnings: warn = re.search(r'<.>\s+(\d+|\*+)\s+-', error) if warn: errors.append(warn.group(1)) else: continue # ignore warnings else: err = re.search(r'<.>\s+(\d+)', error) if err: errors.append(err.group(1)) return errors def get_source_markers(source, tag): ref_errors = [] with open(source, 'r') as f: for i, line in enumerate(f): m = re.search(tag, line) if m: try: if m.group(1): ref_errors.append(m.group(1)[1:]) continue except IndexError: pass ref_errors.append(f'{i + 1:04d}') return ref_errors def check_errors(ref, actual): """compare two dicts for key equality""" for err in ref: if err not in actual: error('Error messages', 'Missing error of line ' + err) for err in actual: if err[0] == '*': continue if err not in ref: error('Error messages', 'Extraneous error in line ' + err) def check_ellipsis(fn, skip=0): with open(fn, 'r') as fin: addrs = [None if line[0] == '.' else int(line[:4], 16) for line in fin.readlines()[skip:]] for i, a in enumerate(addrs): if a is None: continue try: if addrs[i + 1] is None: if addrs[i + 2] - a <= 2: error('concise', "Badly placed '....' within address segment") else: if addrs[i + 1] - a > 2: error('concise', "Missing '....' between two address segments") except IndexError: pass # common check functions xga99 def check_gbc_files_eq(name, genfile, reffile, offset=0): """check if non-zero bytes in binary files are equal""" with open(genfile, 'rb') as fgen, open(reffile, 'rb') as fref: genimage = fgen.read() refimage = fref.read()[6:] if genimage[offset:] != refimage and genimage[offset:] != refimage[:-1]: error('GPL image', 'Image mismatch: ' + name) if genimage[:offset] != bytes(offset): error('GPL image', 'Non-zero offset prefix')
These are just a handful of 1970's Weight Watchers recipe cards, as seen on Candyboots.com. Perhaps trends have changed in the last 30 years or nutritional ideas redefined, but for me the thought of eating any kind of log or jellied food would scare the weight right off of me. I think some people call it starvation. The remaining cards in the collection are definitely worth a look, accompanying commentary included. It's clear that this diet program was concurrent with the age of disco biscuits and nose candy. Lucid thoughts were not necessarily flowing over. Thanks again Bannister for another gem!
# Serial1 Class from threading import Thread class Serial1(Thread): def __init__(self, serialPort, serialBaud): Thread.__init__(self) self.daemon = True self.running = True self.name = 'Serial Thread' self.start() def kill(self): self.running = False def write(self,data): try: self.port.write(data) except: pass def readline(self): buf = '' while True: char = self.port.read() if char == 'B': buf = char else: buf += char if char == '\r' or char == '' and buf[:3] == 'BEG' and buf[-4:] == 'END\r': return buf def run(self): try: self.port = serial.Serial(serialPort, serialBaud) logging.debug('starting') except: pass while True: try: data = self.readline() logging.debug(data) except: pass if not self.running: break
Every holiday has their specialty items: Conversation hearts, egg-shaped Reese’s peanut butter cups, cookies shaped like Christmas trees. Halloween certainly has it’s own signature items, especially in terms of candy but it also has plenty of other spooky celebrations, tasty treats, and dastardly decorations exclusive to the season. As someone who’s a fan of all things Halloween, I’ve come to have a few favorite things about the Halloween season, things that bring out the fun and the fear of the holiday. I share those things with you here in the hope that you may find a new book or dessert or candle to help put you in the Halloween spirit. Black Flame Candle (The Melting Library) – For all us Hocus Pocus lovers out there! This candle smells sweet-but not overwhelmingly so- with hints of spice and mustiness, although the mustiness may just be in my head, as I imagine the black flame candle has gathered quite a bit of dust as it’s waited 300 years to be relit. All of the candles from this shop are related to a fandom of some sort, and all the ones I’ve purchased have smelled absolutely wonderful! Plus, the labels are cleverly designed as old library checkout cards with details specific for that candle. An all around great shop and a perfect Halloween scent. Haunted Manor (Anthology Candles) – Buy this candle if you dare! But seriously, anyone who loves this famous Disney attraction will want this candle. It smells just like Gracey Manor: old and dusty, with a hint of dead flowers and graveyard dirt. It’s quite a unique scent (and far better smelling than it sounds), and the purple coloring evokes the muted shades of the ride as well. A hauntingly great candle and just one of the great Disney scents available from this shop. Foggy Knight (Novelly Yours) – Earl grey, wood smoke, and blood spatter: what better to remind you of London’s most notorious serial killer? Not only does this candle evokes shades of a night in Victorian England, but it looks amazing as well. Some might find it a bit macabre, but isn’t that what you want from a Halloween candle? This is another shop with book-themed candles, and they have plenty of villainous candles to get you more into the darker side of Halloween. The Night Circus by Erin Morgenstern – This book isn’t scary by any means, but the fantastical world of the circus embodies all the magic and wonder of a childhood Halloween. Celia and Marco are both wonderfully complex and clever characters, and although theirs is ultimately a love story, there’s quite a bit of murder, misdirection, and mayhem before they get to their happy ending. This book is easily one of my all-time favorites, and it will likely become an annual Halloween read. Miss Peregrine’s Home for Peculiar Children by Ransom Riggs – Don’t let the movie put you off reading this book. The original story, which takes place over the course of three books, is much more sinister than the film makes it out to be. The antique photos that served as inspiration for the main characters are both fascinating and eerie to examine, and the premise of the story overall is a lot of fun. Perhaps the most terrifying aspect of the books is that the bad guys are out to kill and eat children; this isn’t something you see often in books- the killing, maybe, but not so much the eating- and it certainly adds a more visceral level of horror. The Peculiar abilities are unique and well written, and the settings often provide dangers and a sense of creepiness that makes you perpetually uneasy. Even if you just look at the pictures, you should check out this book. The Devil in the White City by Erik Larson – Serial killers are always good fodder for a scare, and real-life ones are twice as horrifying. This nonfiction account of the Chicago World’s Fair cleverly combines the story of H.H. Holmes, America’s first known serial killer, and his murder house with the story of the men in charge of designing and building the fair. I ended up finding myself loving both the storylines, but I’d be lying if I didn’t say that part of the joy in reading about the various challenges in constructing this or that building came from getting a break from the crazed escapades of Holmes. He was good at what he did-very, very good-and that he got away with the murders for so long is both horrifying and a bit awe-inspiring. Definitely not in a good way, though; I just can’t imagine how someone could kill that many people in such horribly ingenious fashions and not get caught for years. It’s terrifying to know that there are people like this out in the world, that they could be someone you see everyday, and you may never know. That’s what’s so truly terrifying about this book: we believe that people are really decent human beings until we are proven horribly, horribly wrong. There are a number of documentaries and accounts of Holmes, but none of them set the stage for his murder spree or give you a sense of Chicago and the fair quite as well as this book. It’s also being made into a movie with Leonardo DiCaprio supposedly starring as Holmes, so read the novel before you see it on the big screen. Apothecary Jars – Any proper poisoner toxicologist needs her own stock to experiment with at home, but Halloween calls for some special ingredients. To make your own apothecary jars, find bottles of all shapes and sizes, fill them with whatever interesting things you can find around your house, and come up with a label to tell the world what’s stored inside. Or find some labels you like online and fill your jars accordingly. The older and weirder the bottles, the better to give them that mysteriously dangerous look. Then just set them out and watch people stare! Pumpkins – Okay, I’ll admit that I’d never carved a pumpkin until last year. I can’t decide if that’s pitiful or just ridiculous, but whatever. I’d rather enjoy pumpkins that other people- people who actually know what they’re doing- have carved. There’s no end to how you can carve a pumpkin, and you can make it as scary or funny or exciting as you want. Or, if you’re like me and lacking carving skills, you can decorate your pumpkin another way. Paint on a design, maybe even using chalkboard paint so you can write your own spooky message. Glue on some sequins, tie on some ribbon, or wrap it in toilet paper and make it a mummy. Whether you go for traditional or for something new, you can’t go wrong with a few pumpkins to put you in the Halloween mood. Garlands – When aren’t garlands a fun but simple decoration? Garlands can be different colors and different shapes, they can have words or paper creatures, they can be short or long, hung or draped, high or low. They work in small spaces and around furniture, and they look clean and classic while still being noticeable. Garlands are an easy, all-purpose decoration that can fit in anyone’s home. People Are Strange by Johnny Hollow – The original recording by The Doors has an upbeat, distinctly 60s feel that keeps it from being taken too seriously. This cover slows things down and adds in some creepy strings, making you believe the lead singer when she croons about the extra complexities that come with being strange. House of the Rising Sun by Laura O’Connell – There have been a number of covers of this song, but this version sounds drastically different from the most popular recording by The Animals. That version of the song relies heavily on the organ to evoke an old-timey feel and the famous guitar arpeggio is definitely memorable, but the vocals are too loud and overstated, meant to grab attention rather than to truly relay the desperation of the narrator’s plight. Not so with O’Connell’s version. Once again, a slower tempo brings in the creepy, and the soulful vocals over the simple drums and the grooving guitar give true voice to the story of this lost soul. The Beautiful People by Scala and Kolancy Brothers – Yet another cover, but the two versions sound so different that they may as well be different songs. Marilyn Manson’s original sounds like typical Marilyn Manson- screaming and growling, pounding drums, heavy on the guitar. This version is the exact opposite; it’s a slow, stately dirge, sung by a woman’s choir with only a piano and a bass drum for accompaniment. Any slow, mostly A Capella song has the potential to sound creepy, but this one truly does, especially because they sound so good as they sing about such terrible things. It’s an eerily beautiful cover, but I try not to listen to it right before bed lest it ends up as the soundtrack to my nightmares. Poison Apples – These look even deadlier than the Evil Queen’s infamous apple, but they require much less spellwork to produce. The deviously deadly treats need just a few ingredients, and they don’t take very long to make. Choose your own evil color scheme, and use real sticks as skewers to add a more sinister look. Bloody Truffles – These truffles are bloody looking and bloody delicious! These take a little longer to make, as they have to be refrigerated lest they melt, and the gory designs take a steady hand, but really, these are as complicated as you do or don’t want to make them. You could just dribble on the red food coloring and create blood spatter if you don’t have the patience to paint on all the blood. Finding the edible weapons may be a challenge, but bloody truffles sans weapons would look just as gruesome. Butterbeer – This Harry Potter treat is delicious any time of the year, but it’s especially delicious to wash down the rest of your Halloween snacks. There are dozens of recipes for this drink- hot, cold, alcoholic, virgin- but the simplest one is pretty darn good. With just some marshmallow fluff, a bit of heavy whipping cream, and as butterscotch sauce as you want, you have a fluffy topping to put on top of a tall glass of cream soda. Stir it in a bit- but be careful; it’ll fizz!- and you have a truly magical drink. It may not taste exactly like what they sell at the Wizarding World of Harry Potter, but it’s still pretty delicious. Villain Appearances – This is the baddies’ time to shine! Maleficent, Dr. Facilier, and Oogie Boogie are just a few of the infamous Disney villains that can be found celebrating Halloween in the parks. But regardless of what mischief they get up to during the day, they all gather to take part in the Hocus Pocus Villain Spelltacular, hosted by none other than the Sanderson sisters, who are making another appearance in the mortal world to run amok. If you have a soft side for the bad guys, you don’t want to miss your chance to meet all these dastardly divas and ghastly gentlemen. These wickedly sweet treats are only available at the NSSHP, so you’ll have to make a special trip to try one of these. There are so many wonderful things that are exclusive to or evoke the feel of the Halloween season. Whether you prefer the magical, the mundane, or the macabre, everyone can find something to love about this spooky time of year. I hope that you, too, have some favorite Halloween things and that you’ve had the chance to enjoy them this year. If not, I hope you find some; maybe something from this list is just what you’re missing! So until the clock strikes midnight and the witching hour is gone, enjoy your sweets and ghoulish frights, and let the Halloween spirit live on! My name is Jordan Finch: PhD candidate, former forensic scientist, writer, extreme bibliophile, introvert, and all around fangirl. I love macarons, Netflix, and hot tea. Any purse I own must be big enough to fit at least one book and my writing journal.
"""Interface module for the H5md core implementation.""" import sys from six import iteritems from ...script_interface import PScriptInterface # pylint: disable=import class H5md(object): """H5md file object. Used for accessing the H5MD core implementation via the PScriptInterface. .. note:: Bonds will be written to the file automatically if they exist. Parameters ---------- filename : :obj:`str` Name of the trajectory file. write_pos : :obj:`bool`, optional If positions should be written. write_vel : :obj:`bool`, optional If velocities should be written. write_force : :obj:`bool`, optional If forces should be written. write_species : :obj:`bool`, optional If types (called 'species' in the H5MD specification) should be written. write_mass : :obj:`bool`, optional If masses should be written. write_charge : :obj:`bool`, opional If charges should be written. write_ordered : :obj:`bool`, optional If particle properties should be ordered according to ids. """ def __init__(self, write_ordered=True, **kwargs): self.valid_params = ['filename', "write_ordered"] if 'filename' not in kwargs: raise ValueError("'filename' parameter missing.") self.what = {'write_pos': 1 << 0, 'write_vel': 1 << 1, 'write_force': 1 << 2, 'write_species': 1 << 3, 'write_mass': 1 << 4, 'write_charge': 1 << 5} self.valid_params.append(self.what.keys()) self.what_bin = 0 for i, j in iteritems(kwargs): if i in self.what.keys(): if isinstance(j, bool): if j: self.what_bin += self.what[i] else: raise ValueError("{} has to be a bool value.".format(i)) elif i not in self.valid_params: raise ValueError( "Unknown parameter {} for H5MD writer.".format(i)) self.h5md_instance = PScriptInterface( "ScriptInterface::Writer::H5mdScript") self.h5md_instance.set_params(filename=kwargs['filename'], what=self.what_bin, scriptname=sys.argv[0], write_ordered=write_ordered) self.h5md_instance.call_method("init_file") def get_params(self): """Get the parameters from the scriptinterface.""" return self.h5md_instance.get_params() def write(self): """Call the H5md write method.""" self.h5md_instance.call_method("write") def flush(self): """Call the H5md flush method.""" self.h5md_instance.call_method("flush") def close(self): """Close the H5md file.""" self.h5md_instance.call_method("close")
This is not my first experience with Techart adapters. I have been and continue to be impressed by the Contax G lens to Sony E mount adapter. While I appreciate that they built it, I was always perplexed as to why. Why such a massive effort for such a niche product? Are there really enough Contax G nuts who also happen to own a Sony A7 out there? Now I realize they were not done. Once a screw drive AF motor was accomplished, they built upon their Sony AF code cracking experience with a universal AF drive. Lens and adapter together weigh less than 700g. Flange distance short enough to accommodate an adapter, i.e. near any 35mm lens in existence. Someone made an adapter for it. Evidently there are some mad-persons already out there adapting non-Leica mount lenses to Leica M Mount cameras. I do not get why you would want to mount a non-rangefinder lens to a manual focus rangefinder camera, but thank goodness they do. Warning: One must have nerves of steel to mount a M39 screw mount adapter. Lens and adapter went on fine. Worked fine. Lens came off fine, but the adapter decided to hang on for a while. While well-built I would not recommend wrenching on the front of the LM-EA7. After some physical cajoling and words not fit for print, it came off. But trust, there will not be a second attempt. The Techart Contax G adapter successfully gave me an "as close to a digital Contax G" camera as I can ever realistically expect to have. But, that was an AF system to start out with. Same for when I adapted legacy Minolta/Sony A mount AF lenses to Sony digital full frame AF via the LA-EA4. Same for when I adapted Canon digital EF AF lenses to Sony digital AF using the Sigma MC-11. All of these lenses were designed for AF from the start. Face and eye detect focusing. For normal operation, you set the lens to infinity and go. Thanks to the nature of adjustable lens adapters it adds close focus pseudo macro capabilities. Just focus closer with the lens just as you would with a helicoid lens adapter like the one I previously reviewed. Not for sports or fast-moving objects. Not a ding. Not a surprise. Not what these MF lenses were meant for to begin with. Video? No. Not what this is for. Is there value here? Admittedly, at just north of $350 this is a little pricey. Picked up mine for about a $100 less on eBay. But I still say yes, there is value to be had here if you fit into one or more categories. If you already have a digital Leica and more than one lens an A7III plus an LM-EA7 is a drop in the bucket (comparatively speaking half or less than a few Leica lenses) for a giggle or the occasional instance where AF (dare I say) might be advantageous with your existing lenses. Sony lenses are great, but many are pricey. Once the LM-EA7 is purchased, I was able to use nearly any wide to short tele MF lens. I gained AF for the price of a $19 whatever mount to M mount adapter. And there are plenty of great MF lenses for not a lot of money. I don’t know. Digital and AF may not be your thing if you are enjoying what you are doing. The optical qualities of these lenses are a known variable unchanged by focus method. What is not known is how sharp they can be with a proper automated focus system. I definitely saw an increase in keepers over manual focusing. Of course, those who regularly zone focus will not reap such benefits. Being able to routinely nail eye focus on an f/1.1 or near MF lens is a wonderful thing. As a result, I realized some lenses were sharper and more wide open than I had thought they were. I began with no adapter and needed M mount lenses. I have a film M Mount so I had a couple of lenses to try out. 7Artisans 50mm f/1.1 (M Mount)… Autofocus f/1.1! You may have noticed that I have not talked a lot about the actual act of focusing. That is because it is just like using any Sony AF lens. It just works! You retain only AF-S and AF-C focus modes, but these are the only two I use anyway. Verdict? Two thumbs up from this guy. Here is a link to an ongoing gallery.
# -*- coding:utf-8 -*- import MySQLdb import conf class newdb(object): '''数据操作''' def __init__(self,returnDict=False): '返回游标为字典的数据访问对象' self.conn=MySQLdb.connect( host=conf.db_host, user=conf.db_user, passwd=conf.db_pwd, db=conf.db_name, port=conf.db_port, charset='utf8' ) if returnDict: self.cursor=self.conn.cursor(cursorclass = MySQLdb.cursors.DictCursor) else: self.cursor=self.conn.cursor() def commit(self): self.conn.commit() def close(self): self.conn.close() def fetchone(self,sql,args=None): self.cursor.execute(sql,args) result=self.cursor.fetchone() self.cursor.close() self.close() return result def fetchall(self,sql,args=None): result=None self.cursor.execute(sql,args) result=self.cursor.fetchall() self.cursor.close() self.close() return result def query(self,sql,args=None): self.cursor.execute(sql,args) self.cursor.close() _row=self.cursor.rowcount self.commit() self.cursor.close() self.close() return _row #********* SQLAlchemy *****************# ''' from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker class SA: 'SQLAlchemy' engine=create_engine('mysql://%s:%s@%s:%s/%s?charset=utf8'%( conf.db_user, conf.db_pwd, conf.db_host, conf.db_port, conf.db_name ),echo=True) Session=sessionmaker(bind=engine) '''
Much of neuroscientist Shannon Gourley’s work focuses on the idea that adolescence is a vulnerable time for the developing brain. She and graduate student Lauren DePoy recently published a paper in Frontiers in Pharmacology showing that in adolescent rodents, cocaine exposure can cause the loss of dendritic arbors in part of the brain important for decision-making. The researchers examined neurons in the orbitofrontal cortex, a region of the brain thought to be important for “linking reward to hedonic experience.” It was known that stimulants such as cocaine can cause the loss of dendritic spines: small protrusions that are critical for communication and interaction between neurons. The mice are exposed to cocaine over the course of five days in early adolescence, and then their behavior is studied in adulthood. This level of cocaine exposure leads to impairments in instrumental task reversal, a test where mice need to change their habits (which chamber they poke their noses into) to continue receiving food pellets. The findings suggest a partial explanation for the increased risk of dependence in people who start using cocaine during adolescence.
import asyncio from fnmatch import fnmatch import logging from obrbot import hook from obrbot.plugin import HookType plugin_info = { "plugin_category": "core", "command_category_name": "Administration" } logger = logging.getLogger("obrbot") @asyncio.coroutine @hook.sieve() def ignore_sieve(event, hook_event): """ blocks events from ignored channels/hosts :type event: obrbot.event.Event :type hook_event: obrbot.event.HookEvent """ bot = event.bot # don't block event hooks if hook_event.hook.type is HookType.event or hook_event.hook.type is HookType.irc_raw: return event # don't block server messages if event.mask is None: return event # don't block an event that could be un-ignoring if hook_event.hook.type is HookType.command and hook_event.hook.function_name == 'unignore': return event ignore_list = yield from event.async(bot.db.smembers, 'plugins:ignore:ignored') mask = event.mask.lower() for pattern in ignore_list: pattern = pattern.decode() if pattern.startswith('#'): if fnmatch(event.chan_name, pattern): logger.info("Ignoring {}: Skipping hook {}".format(event.chan_name, hook_event.hook.description)) return None else: if fnmatch(mask, pattern): logger.info("Ignoring {}: Skipping hook {}".format(event.mask, hook_event.hook.description)) return None return event @asyncio.coroutine @hook.command(autohelp=False, permissions=['ignored.view']) def ignored(notice, async, db): """- lists all channels and users I'm ignoring""" ignore_list = yield from async(db.smembers, 'plugins:ignore:ignored') if ignore_list: notice("Ignored users: {}".format(", ".join(b.decode() for b in ignore_list))) else: notice("No users are currently ignored.") return @asyncio.coroutine @hook.command(permissions=['ignored.manage']) def ignore(text, async, db): """<nick|user-mask> - adds <channel|nick> to my ignore list :type db: redis.StrictRedis """ target = text.lower() if ('!' not in target or '@' not in target) and not target.startswith('#'): target = '{}!*@*'.format(target) added = yield from async(db.sadd, 'plugins:ignore:ignored', target) if added > 0: return "{} has been ignored.".format(target) else: return "{} is already ignored.".format(target) @asyncio.coroutine @hook.command(permissions=['ignored.manage']) def unignore(text, async, db): """<nick|user-mask> - removes <nick|user-mask> from my ignore list :type db: redis.StrictRedis """ target = text.lower() if ('!' not in target or '@' not in target) and not target.startswith('#'): target = '{}!*@*'.format(target) removed = yield from async(db.srem, 'plugins:ignore:ignored', target) if removed > 0: return "{} has been unignored.".format(target) else: return "{} was not ignored.".format(target)
With nefarious groups raising millions of dollars by looting and selling antiquities, the crafting billionaire and ardent evangelical might have inadvertently financed their activities. Left, ancient clay artifacts from Iraq that were recovered and returned to the Republic of Iraq after a smuggling investigation by the Department of Homeland Security last March. Right, Steve Green the current Hobby Lobby President. By Mandel Ngan (Green), by Mark Wilson (Artifacts), both from Getty Images. On Tuesday morning, the Daily Beast reported that Hobby Lobby’s C.E.O. Steve Green, whose company successfully challenged Obamacare’s contraception mandate on the grounds that it violated the owning family’s religious beliefs, was being investigated for the allegedly illicit importation of biblical-era Assyrian and Babylonian artifacts into the United States. The four-year investigation involves nearly 300 “small clay tablets” from what’s now modern-day Iraq and Syria, bound for the Museum of the Bible, a multi-million-dollar complex in Washington, D.C. and scheduled to open in 2017, largely financed by the Green family, which owns Hobby Lobby and is worth an estimated $4.5 billion. But when the family tried to get the collections through U.S. Customs—having declared them, according to the Daily Beast, as “hand-crafted clay tiles” worth a collective $300—their actions triggered an F.B.I. inquiry. To date, the investigation is still ongoing, while a representative from the Museum of the Bible characterized the investigation as a problem spawned from “incomplete paperwork”. But antiquities experts raise another concern: that by purchasing this art in the first place, the Greens may have unknowingly sponsored military groups and terrorist networks like al-Qaeda, which has sold antiquities for more than a decade. “Anyone who purchases an antiquity without being 100 percent sure it is a legitimate piece is risking funding organized criminals, armed insurgents, and even terrorist networks, whether they be al-Qaeda or ISIS,” says Tess Davis, the executive director of the Antiquities Coalition, an organization devoted to combatting the illegal trading of artifacts. Last February, the United Nations adopted a resolution stating that its members would seek to prevent terrorist groups, including ISIS, from profiting off these sales. According to Col. Matt Bogdanos, a single, four-inch cylinder seal from ancient Babylonia can sell for nearly $250,000 alone, and the U.N. ambassador from Iraq, Mohamed Ali Alhakim, estimated that ISIS earns more than $100 million per year antiquities trading. Though he couldn’t comment on the specifics of the Hobby Lobby case, Bogdanos says that in a hypothetical case, “if you have an antiquity that you can trace to Iraq and Syria, the question of where the money went is a fair, legitimate question, as long as you take into account two things. “One is: there were intermediaries. There’s no [art] owner in the U.S. that’s giving to ISIS. They’re giving money to the dealer, and the dealer gave money to the person who got it out of Lebanon, [for instance,] and that person gave money to the person who got it out of Syria, and that person gave it to ISIS. Money’s money. It went through three or four sets of hands in between. “Second, just because it’s Babylonian doesn’t mean it came from Iraq,” he said. “A lot of conquerors have been through the area and have done their own looting over millennia.” Citing the case of the Parthenon temple, the remnants of which currently reside in three different countries, Bogdanos says that it’s possible that a shipment of tablets could have been looted hundreds of years ago. Even if terrorist networks weren’t involved in the sale, there’s also the possibility of organized gangs: “When you’re starting to see these major seizures in the U.S., it tends to be that they’re working with organized gangs in-country to go out and look for specific items,” says Deborah Lehr, Chair and Founder of the Antiquities Coalition.
from multiprocessing import Process, Queue import sys import os from django.conf import settings from django.core.management.base import BaseCommand, CommandError from casexml.apps.stock.models import StockTransaction, StockReport, DocDomainMapping from corehq.apps.domain.models import Domain from dimagi.utils.couch.database import get_db, iter_docs from corehq.apps.domainsync.config import DocumentTransform, save from couchdbkit.client import Database from optparse import make_option from datetime import datetime # doctypes we want to be careful not to copy, which must be explicitly # specified with --include DEFAULT_EXCLUDE_TYPES = [ 'ReportNotification', 'WeeklyNotification', 'DailyNotification' ] NUM_PROCESSES = 8 class Command(BaseCommand): help = "Copies the contents of a domain to another database." args = '<sourcedb> <domain>' option_list = BaseCommand.option_list + ( make_option('--include', action='store', dest='doc_types', default='', help='Comma-separated list of Document Types to copy'), make_option('--exclude', action='store', dest='doc_types_exclude', default='', help='Comma-separated list of Document Types to NOT copy.'), make_option('--since', action='store', dest='since', default='', help='Only copy documents newer than this date. Format: yyyy-MM-dd. Only '), make_option('--list-types', action='store_true', dest='list_types', default=False, help='Don\'t copy anything, just list all the available document types.'), make_option('--simulate', action='store_true', dest='simulate', default=False, help='Don\'t copy anything, print what would be copied.'), make_option('--id-file', action='store', dest='id_file', default='', help="File containing one document ID per line. Only docs with these ID's will be copied"), make_option('--postgres-db', action='store', dest='postgres_db', default='', help="Name of postgres database to pull additional data from. This should map to a " "key in settings.DATABASES. If not specified no additional postgres data will be " "copied. This is currently used to pull CommTrack models."), make_option('--postgres-password', action='store', dest='postgres_password', default='', help="Password for postgres database to pull additional data from. If not specified will " "default to the value in settings.DATABASES") ) def handle(self, *args, **options): if len(args) != 2: raise CommandError('Usage is copy_domain %s' % self.args) sourcedb = Database(args[0]) domain = args[1].strip() simulate = options['simulate'] since = datetime.strptime(options['since'], '%Y-%m-%d').isoformat() if options['since'] else None if options['list_types']: self.list_types(sourcedb, domain, since) sys.exit(0) if simulate: print "\nSimulated run, no data will be copied.\n" if options['postgres_db'] and options['postgres_password']: settings.DATABASES[options['postgres_db']]['PASSWORD'] = options['postgres_password'] self.targetdb = get_db() domain_doc = Domain.get_by_name(domain) if domain_doc is None: self.copy_domain(sourcedb, domain) if options['doc_types']: doc_types = options['doc_types'].split(',') for type in doc_types: startkey = [x for x in [domain, type, since] if x is not None] endkey = [x for x in [domain, type, {}] if x is not None] self.copy_docs(sourcedb, domain, simulate, startkey, endkey, type=type, since=since, postgres_db=options['postgres_db']) elif options['id_file']: path = options['id_file'] if not os.path.isfile(path): print "Path '%s' does not exist or is not a file" % path sys.exit(1) with open(path) as input: doc_ids = [line.rstrip('\n') for line in input] if not doc_ids: print "Path '%s' does not contain any document ID's" % path sys.exit(1) self.copy_docs(sourcedb, domain, simulate, doc_ids=doc_ids, postgres_db=options['postgres_db']) else: startkey = [domain] endkey = [domain, {}] exclude_types = DEFAULT_EXCLUDE_TYPES + options['doc_types_exclude'].split(',') self.copy_docs(sourcedb, domain, simulate, startkey, endkey, exclude_types=exclude_types, postgres_db=options['postgres_db']) def list_types(self, sourcedb, domain, since): doc_types = sourcedb.view("domain/docs", startkey=[domain], endkey=[domain, {}], reduce=True, group=True, group_level=2) doc_count = dict([(row['key'][1], row['value']) for row in doc_types]) if since: for doc_type in sorted(doc_count.iterkeys()): num_since = sourcedb.view("domain/docs", startkey=[domain, doc_type, since], endkey=[domain, doc_type, {}], reduce=True).all() num = num_since[0]['value'] if num_since else 0 print "{0:<30}- {1:<6} total {2}".format(doc_type, num, doc_count[doc_type]) else: for doc_type in sorted(doc_count.iterkeys()): print "{0:<30}- {1}".format(doc_type, doc_count[doc_type]) def copy_docs(self, sourcedb, domain, simulate, startkey=None, endkey=None, doc_ids=None, type=None, since=None, exclude_types=None, postgres_db=None): if not doc_ids: doc_ids = [result["id"] for result in sourcedb.view("domain/docs", startkey=startkey, endkey=endkey, reduce=False)] total = len(doc_ids) count = 0 msg = "Found %s matching documents in domain: %s" % (total, domain) msg += " of type: %s" % (type) if type else "" msg += " since: %s" % (since) if since else "" print msg err_log = self._get_err_log() queue = Queue(150) for i in range(NUM_PROCESSES): Worker(queue, sourcedb, self.targetdb, exclude_types, total, simulate, err_log).start() for doc in iter_docs(sourcedb, doc_ids, chunksize=100): count += 1 queue.put((doc, count)) # shutdown workers for i in range(NUM_PROCESSES): queue.put(None) err_log.close() if os.stat(err_log.name)[6] == 0: os.remove(err_log.name) else: print 'Failed document IDs written to %s' % err_log.name if postgres_db: self.copy_postgres_data(sourcedb, domain, postgres_db, doc_ids=doc_ids, simulate=simulate) def copy_domain(self, sourcedb, domain): print "Copying domain doc" result = sourcedb.view( "domain/domains", key=domain, reduce=False, include_docs=True ).first() if result and 'doc' in result: domain_doc = Domain.wrap(result['doc']) dt = DocumentTransform(domain_doc, sourcedb) save(dt, self.targetdb) else: print "Domain doc not found for domain %s." % domain def copy_postgres_data(self, sourcedb, domain, postgres_slug, simulate, doc_ids): # can make this more configurable or less hard coded eventually # also note that ordering here is important for foreign key dependencies postgres_models = [ (StockReport, 'form_id'), (StockTransaction, 'case_id'), (DocDomainMapping, 'doc_id'), # StockState objects are "derived" and get created by StockTransaction post_save signal. # We may want to directly port these over in the future. # (StockState, 'case_id'), ] for model, doc_field in postgres_models: query_set = model.objects.using(postgres_slug).filter( **{'{}__in'.format(doc_field): doc_ids} ) count = query_set.count() print "Copying {} models ({})".format(model.__name__, count) if not simulate: for i, item in enumerate(query_set): # this can cause primary key conflicts to overwrite local data I think. Oh well? item.save(using='default') print 'Synced {}/{} {}'.format(i, count, model.__name__) def _get_err_log(self): name = 'copy_domain.err.%s' for i in range(1000): # arbitrarily large number candidate = name % i if not os.path.isfile(candidate): return open(candidate, 'a', buffering=1) class Worker(Process): def __init__(self, queue, sourcedb, targetdb, exclude_types, total, simulate, err_log): super(Worker, self).__init__() self.queue = queue self.sourcedb = sourcedb self.targetdb = targetdb self.exclude_types = exclude_types self.total = total self.simulate = simulate self.err_log = err_log def run(self): for doc, count in iter(self.queue.get, None): try: if self.exclude_types and doc["doc_type"] in self.exclude_types: print " SKIPPED (excluded type: %s). Synced %s/%s docs (%s: %s)" % \ (doc["doc_type"], count, self.total, doc["doc_type"], doc["_id"]) else: if not self.simulate: dt = DocumentTransform(doc, self.sourcedb) save(dt, self.targetdb) print " Synced %s/%s docs (%s: %s)" % (count, self.total, doc["doc_type"], doc["_id"]) except Exception, e: self.err_log.write('%s\n' % doc["_id"]) print " Document %s failed! Error is: %s" % (doc["_id"], e)
Mytrle Graves was a woman born in 1902 in West Virginia. During the 1940 US Census she was 38 years old and lived in Macon, Alabama. She appears as the wife of her house in the 1940 US Census.
"""Module used for dealing with database stuff""" from pymongo import Connection """Json Object Values""" nameKey = 'name' passwordKey = 'password' authenticatedKey = 'authenticated' phoneKey = 'phone' def login(name, password, dbname="users", dbCollectionName="people"): """string name, string password, string dbname="users", string collection="people" sets authenticated to True for a given user""" success = False conn = Connection() db = conn[dbname] people = db[dbCollectionName] if (isInDatabase(name, dbname, dbCollectionName)): # should only loop through once for user in people.find({nameKey: name}): if (user[passwordKey] == password): success = updateUser(name, True, dbname, dbCollectionName) return success def logout(name, dbname="users", dbCollectionName="people"): """sets authenticated to False for a given user""" success = updateUser(name, False, dbname, dbCollectionName) return success def updateUser(name, authenticated, dbname="users", dbCollectionName="people"): """string name, Boolean authenticated, string dbname, string dbCollectioName Logs the user in if authenticated is True Logs the user out if authenticated is False Returns True if successful or False if not successful""" success = True conn = Connection() db = conn[dbname] people = db[dbCollectionName] if (isInDatabase(name, dbname, dbCollectionName)): people.update({nameKey: name}, {"$set": {authenticatedKey: authenticated}}, False) else: success = False return success def addUser(name, password, phone, dbname="users", dbCollectionName="people"): """string name, string password, string phone, string dbname, string dbCollectionName adds user to the database and returns False is username already exists automatically logs the user in after creating the account""" success = True conn = Connection() db = conn[dbname] if (not isInDatabase(name, dbname, dbCollectionName)): # Jsonifies the User, authenticated True means the user is logged in user = {nameKey: name, phoneKey: phone, passwordKey: password, authenticatedKey: True} people = db[dbCollectionName] people.insert(user) else: success = False return success def isInDatabase(name, dbname="users", dbCollectionName="people"): """takes string name, string dbname, string dbCollectionName checks if user is already in the database and returns False if username already exists""" conn = Connection() db = conn[dbname] # returns collection of users people = db[dbCollectionName] # there should be at most one instance of the user in the database success = (people.find({nameKey: name}).count() == 1) return success def main(): pass if __name__ == '__main__': main()
This entry was posted in 分享介紹, 技術文章 and tagged security by mk. Bookmark the permalink. TrueCrypt 終於 support OS X!
import pandas as pd from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.externals import joblib import argparse import os # command line arguments parser = argparse.ArgumentParser(description='Train a model for iris classification.') parser.add_argument('indir', type=str, help='Input directory containing the training set') parser.add_argument('outdir', type=str, help='Output directory for the trained model') args = parser.parse_args() # training set column names cols = [ "Sepal_Length", "Sepal_Width", "Petal_Length", "Petal_Width", "Species" ] features = [ "Sepal_Length", "Sepal_Width", "Petal_Length", "Petal_Width" ] # import the iris training set irisDF = pd.read_csv(os.path.join(args.indir, "iris.csv"), names=cols) # fit the model lda = LinearDiscriminantAnalysis().fit(irisDF[features], irisDF["Species"]) # output a text description of the model f = open(os.path.join(args.outdir, 'model.txt'), 'w') f.write(str(lda)) f.close() # persist the model joblib.dump(lda, os.path.join(args.outdir, 'model.pkl'))
Battle in the skies in this World War I era flying game. You control one lone aircraft against a team of 2 or more enemy drones – navigate carefully to shoot them out of the skies. How To Play: Steam Birds is mouse controlled and played in turns. Use the mouse to set the direction of your Steam Bird and click the arrow in the top right. Game developed by Andy Moore • Played 11312 times. Bristlies Rated 3 out of 5.
import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import numpy as np from util.ObsFile import ObsFile from util.FileName import FileName from util import utils from util import hgPlot from cosmic.Cosmic import Cosmic import tables from scipy.optimize import curve_fit from hotpix import hotPixels import pickle from interval import interval, inf, imath from cosmic import tsBinner import os import sys import logging class CosmicRun: def __init__(self, path): print "begin path=",path os.chdir(path) file = open("settings.par", 'r') self.s = {} for line in file: temp = line.split("=") if len(temp) > 1: self.s[temp[0].strip()] = temp[1].strip() print temp[0].strip(), '=', temp[1].strip() file.close() def findv1(self): populationMax=2000 ySum = np.zeros(populationMax) frameSum = 'none' seq5 = self.s['seq5'].split() for seq in seq5: print "seq=",seq outfileName = "cosmicTimeList-"+seq+".pkl" if not os.path.exists(outfileName): fn = FileName(self.s['run'], self.s['sundownDate'], self.s['obsDate']+"-"+str(seq)) cosmic = Cosmic(fn, beginTime=self.s['beginTime'], endTime=self.s['endTime'], loggingLevel = logging.INFO) fc = cosmic.findCosmics(stride=int(self.s['stride']), threshold=int(self.s['threshold']), populationMax=populationMax, nSigma=float(self.s['nSigma'])) outfile = open(outfileName, "wb") pickle.dump(fc['cosmicTimeList'],outfile) pickle.dump(fc['binContents'],outfile) outfile.close() cfn = "cosmicMask-%s.h5"%seq ObsFile.writeCosmicIntervalToFile(fc['interval'],1.0e6, cfn, self.s['beginTime'], self.s['endTime'], int(self.s['stride']), int(self.s['threshold']), float(self.s['nSigma']), populationMax) del cosmic def makemovie1(self): run = self.s['run'] sundownDate = self.s['sundownDate'] obsDate = self.s['obsDate'] stride = int(self.s['stride']) seq5 = self.s['seq5'].split() for seq in seq5: inFile = open("cosmicTimeList-%s.pkl"%(seq),"rb") cosmicTimeList = pickle.load(inFile) binContents = pickle.load(inFile) cfn = "cosmicMask-%s.h5"%seq intervals = ObsFile.readCosmicIntervalFromFile(cfn) for interval in intervals: print "interval=",interval fn = FileName(run, sundownDate,obsDate+"-"+seq) obsFile = ObsFile(fn.obs()) obsFile.loadTimeAdjustmentFile(fn.timeAdjustments()) i0=interval[0] i1=interval[1] intervalTime = i1-i0 dt = intervalTime/2 beginTime = max(0,i0-0.000200) endTime = beginTime + 0.001 integrationTime = endTime-beginTime nBins = int(np.round(obsFile.ticksPerSec*(endTime-beginTime)+1)) timeHgValues = np.zeros(nBins, dtype=np.int64) ymax = sys.float_info.max/100.0 for iRow in range(obsFile.nRow): for iCol in range(obsFile.nCol): gtpl = obsFile.getTimedPacketList(iRow,iCol, beginTime,integrationTime) ts = (gtpl['timestamps'] - beginTime)*obsFile.ticksPerSec ts64 = np.round(ts).astype(np.uint64) tsBinner.tsBinner(ts64, timeHgValues) plt.clf() plt.plot(timeHgValues, label="data") x0 = (i0-beginTime)*obsFile.ticksPerSec x1 = (i1-beginTime)*obsFile.ticksPerSec plt.fill_between((x0,x1),(0,0), (ymax,ymax), alpha=0.2, color='red') plt.yscale("symlog",linthreshy=0.9) plt.xlim(0,1000) plt.ylim(-0.1,300) tick0 = int(np.round(i0*obsFile.ticksPerSec)) plotfn = "cp-%05d-%s-%s-%s-%09d"%(timeHgValues.sum(),run,obsDate,seq,tick0) plt.title(plotfn) plt.legend() plt.savefig(plotfn+".png") plt.xlabel("nSigma=%d stride=%d threshold=%d"%(int(self.s['nSigma']),int(self.s['stride']),int(self.s['threshold']))) print "plotfn=",plotfn os.system("convert -delay 0 `ls -r cp*png` cp.gif") if __name__ == '__main__': if len(sys.argv) >1: path = sys.argv[1] else: path = "." cosmicRun = CosmicRun(path) cosmicRun.findv1() print "now call makemovie1" cosmicRun.makemovie1() print "glorious success"
Does not contain any added sugars or artificial ingredients such as sweeteners, colorings or flavorings. Select ingredients in Super Seed are fermented using Garden of Life's proprietary Poten-Zyme process to make nutrients more available to the body. Healthy elimination of toxins. Using certified organic fiber does not reintroduce toxins to the digestive tract. Helps maintain healthy blood sugar levels and healthy cholesterol levels that are already in the normal range. LegalDisclaimer: These statements have not been evaluated by the FDA. This product is not intended to diagnose, treat, cure or prevent any disease.
from airflow.models import DAG from airflow.operators.dummy_operator import DummyOperator from datetime import datetime, timedelta from airflow.operators import PythonOperator from airflow.hooks import RedisHook from airflow.models import Variable import logging import itertools import traceback import socket from etl_tasks_functions import get_time from etl_tasks_functions import subtract_time import time default_args = { 'owner': 'wireless', 'depends_on_past': False, 'start_date': datetime.now() - timedelta(minutes=2), 'email': ['[email protected]'], 'email_on_failure': False, 'email_on_retry': False, 'retries': 1, 'retry_delay': timedelta(minutes=1), 'provide_context': True, 'catchup': False, # 'queue': 'bash_queue', # 'pool': 'backfill', # 'priority_weight': 10, # 'end_date': datetime(2016, 1, 1), } OKGREEN = '\033[92m' NC='\033[0m' redis_hook_4 = RedisHook(redis_conn_id="redis_hook_4") def service_etl(parent_dag_name, child_dag_name, start_date, schedule_interval,celery_queue): config = eval(Variable.get('system_config')) dag_subdag = DAG( dag_id="%s.%s" % (parent_dag_name, child_dag_name), schedule_interval=schedule_interval, start_date=start_date, ) #TODO: Create hook for using socket with Pool def get_from_socket(site_name,query,socket_ip,socket_port): """ Function_name : get_from_socket (collect the query data from the socket) Args: site_name (poller on which monitoring data is to be collected) Kwargs: query (query for which data to be collectes from nagios.) Return : None raise Exception: SyntaxError,socket error """ #socket_path = "/omd/sites/%s/tmp/run/live" % site_name s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) machine = site_name[:-8] s.connect((socket_ip, socket_port)) #s.connect(socket_path) s.send(query) s.shutdown(socket.SHUT_WR) output = '' wait_string= '' while True: try: out = s.recv(100000000) except socket.timeout,e: err=e.args[0] print 'socket timeout ..Exiting' if err == 'timed out': sys.exit(1) if not len(out): break; output += out return output def extract_and_distribute(*args,**kwargs): st = get_time() try: service_query = Variable.get('service_query') #to get LQL to extract service device_slot = Variable.get("device_slot_service") #the number of devices to be made into 1 slot site_ip = kwargs.get('params').get("ip") site_port = kwargs.get('params').get("port") except ValueError: logging.info("Unable to fetch Service Query Failing Task") return 1 task_site = kwargs.get('task_instance_key_str').split('_')[4:7] site_name = "_".join(task_site) start_time = float(Variable.get("data_service_extracted_till_%s"%site_name)) end_time = time.time() service_query = "GET services\nColumns: host_name host_address service_description service_state "+\ "last_check service_last_state_change host_state service_perf_data\nFilter: service_description ~ _invent\n"+\ "Filter: service_description ~ _status\n"+\ "Filter: service_description ~ Check_MK\n"+\ "Filter: service_description ~ PING\n"+\ "Filter: service_description ~ .*_kpi\n"+\ "Filter: service_description ~ wimax_ss_port_params\n"+\ "Filter: service_description ~ wimax_bs_ss_params\n"+\ "Filter: service_description ~ wimax_aggregate_bs_params\n"+\ "Filter: service_description ~ wimax_bs_ss_vlantag\n"+\ "Filter: service_description ~ wimax_topology\n"+\ "Filter: service_description ~ cambium_topology_discover\n"+\ "Filter: service_description ~ mrotek_port_values\n"+\ "Filter: service_description ~ rici_port_values\n"+\ "Filter: service_description ~ rad5k_topology_discover\n"+\ "Or: 14\nNegate:\n"+\ "Filter: last_check >= %s\n" % start_time+\ "Filter: last_check < %s\n" % end_time+\ "OutputFormat: python\n" try: start_time = get_time() service_data = eval(get_from_socket(site_name, service_query,site_ip,site_port)) logging.info("Fetch Time %s"%subtract_time(start_time)) Variable.set("data_service_extracted_till_%s"%site_name,end_time) #for x in service_data: # if x[1] == '10.175.161.2': # print x except Exception: logging.error(OKGREEN+"Unable to fetch the data from Socket") logging.error('SITE:'+str(site_name)+"\n PORT : "+str(site_port )+ "\n IP: " +str(site_ip)+NC) service_data = [] traceback.print_exc() if len(service_data) > 0: logging.info("The length of Data recieved " + str(len(service_data))) group_iter = [iter(service_data)]*int(device_slot) device_slot_data = list(([e for e in t if e !=None] for t in itertools.izip_longest(*group_iter))) i=1; logging.info("Service Slot created in redis -> " + str(len(device_slot_data))) for slot in device_slot_data: redis_hook_4.rpush("sv_"+site_name+"_slot_"+str(i),slot) logging.info("Pushing %s"%("sv_"+site_name+"_slot_"+str(i))) i+=1 Variable.set("sv_%s_slots"%(site_name),str(len(device_slot_data))) logging.info("Total Time %s"%subtract_time(st)) else: logging.info("Unable to extract data for time %s to %s "%(start_time,end_time)) for machine in config: for site in machine.get('sites'): PythonOperator( task_id="Service_extract_%s"%(site.get('name')), provide_context=True, python_callable=extract_and_distribute, params={"ip":machine.get('ip'),"port":site.get('port')}, dag=dag_subdag, queue = celery_queue ) return dag_subdag
This cable is designed to extend an existing 1/8" (3.5mm) connection of a digital audio player (such as an iPod®) or similar device to a stereo system. It is ideal for connecting an iPod®, iPhone®, laptop, or similar device to a hi-fi receiver using a 1/8" to 1/8" cable. These can also be used to hook up your digital audio player to guitar amplifiers and power amplifiers that have RCA connections and do not have the 1/8" connection. With your purchase you will receive one 6 inch stereo 3.5mm female to dual RCA female splitter audio cable pictured and described above. Order splitter cables from Seismic Audio for trouble free, crystal clear sound.
from django.core.management.base import BaseCommand, CommandError from django.core.exceptions import * from ti import models import facebook import logging from dateutil import parser import urllib import urlparse import cgi import subprocess import warnings import time import random import string import datetime # hide facebook deprecation warnings warnings.filterwarnings('ignore', category=DeprecationWarning) # global logging setup logging.basicConfig(level=logging.INFO) class PageCrawler(object): def __init__(self, graph): self._log = logging.getLogger('crawler') self._log.info("Initializing") self.maxpages = 20 self.pagecount = 0 self.graph = graph self.posts = [] def retrievePageContent(self, pageid, anon): self.abort = False self.anon = anon graph = self.graph log = self._log pageinfo = graph.get_object(pageid) log.info("Processing page \"%s\" (id %s, category %s, likes: %s)" % (pageinfo["username"], pageinfo["id"], pageinfo["category"], pageinfo["likes"])) try: pagefeed = graph.get_object(pageid + "/feed") self.processFeed(pagefeed) except Exception, e: self._log.warn(e) raise e log.info("Pages processed: %s" % self.pagecount) log.info("Posts: %s" % len(self.posts)) texts = [] types = [] ccount = 0 clikes = 0 for post in self.posts: ccount = ccount + len(post.comments) clikes = clikes + post.likecount for comment in post.comments: texts.append(comment.content) clikes = clikes + comment.likecount if not post.type in types: types.append(post.type) log.info("Comments: %s" % ccount) log.info("Post types: %s" % ','.join(types)) textcharcount = 0 wordcount = 0 to_be_removed = ".,:!" for text in texts: textcharcount = textcharcount + len(text) s = text for c in to_be_removed: s = s.replace(c, '') wordcount = wordcount + len(s.split()) log.info("Average comment length: %s" % (float(textcharcount) / float(len(texts)))) log.info("Average words per comment: %s" % (float(wordcount) / float(len(texts)))) log.info("Unique commenters: %s" % len(anon.usedaliases)) log.info("Trying to populate db") # page owner p_owner, created = models.User.objects.get_or_create(id=long(pageinfo["id"]), defaults={'fullname':pageinfo["name"], 'alias':("page-%s" % pageinfo["username"])} ) p_owner.save() if created: log.info("Created user entry for the page. %s" % pageinfo["id"]) else: log.info("Using existing page user entry. %s" % pageinfo["id"]) # page p = None try: p = models.Page.objects.get(fb_page_id=pageinfo["id"]) log.info("Page entry already exists.") except ObjectDoesNotExist: log.info("New page entry required. Creating.") p = models.Page.objects.create(fb_page_id=pageinfo["id"], fb_page_name=pageinfo["name"], last_updated=datetime.datetime.today(), owner=p_owner) p.save() # users for user_id in self.anon.userlist: userinfo = self.anon.userlist[user_id] userobj, created = models.User.objects.get_or_create(id=long(user_id), defaults={'fullname': userinfo["name"], 'alias':userinfo["alias"]}) if created: userobj.save() log.info("Created new user #%s (alias: %s)" % (userobj.id, userobj.alias)) # posts for post in self.posts: postts = parser.parse(post.timestamp).replace(tzinfo=None) postuser = models.User.objects.get(id=long(post.user["id"])) postobj = None created = False try: postobj, created = models.Post.objects.get_or_create(fb_post_id=post.id, defaults={'posttype': post.type, 'text': post.content, 'createtime': postts, 'parent': None, 'page': p, 'createuser': postuser, 'likes': post.likecount}) except Exception, e: # ignore UTF-(>8) postings log.warn("Failed to import post") log.warn(e) if created: postobj.save() log.info("Post %s saved to database" % post.id) else: log.info("Post %s already stored" % post.id) for comment in post.comments: commentts = parser.parse(comment.timestamp).replace(tzinfo=None) commentuser = models.User.objects.get(id=long(comment.user["id"])) commentobj = None created = False try: commentobj, created = models.Post.objects.get_or_create(fb_post_id=comment.id, defaults={'posttype': comment.type, 'text': comment.content, 'createtime': commentts, 'parent': postobj, 'page': p, 'createuser': commentuser, 'likes': comment.likecount}) except Exception, e: # ignore UTF-(>8) postings log.warn("Failed to import comment") log.warn(e) if created: commentobj.save() log.info("Comment %s saved to database" % comment.id) else: log.info("Comment %s already stored" % comment.id) def processComments(self, pagedata, targetlist, postdata, isPage=True): graph = self.graph log = self._log if isPage: if len(postdata['comments']) == 0: log.info("Post %s does not have comments. Aborting." % post['id']) return # add comments that are already contained in the page feed self.addData(postdata["comments"]["data"], targetlist) log.info("Added comments from page feed (length: %s)" % len(targetlist)) if len(postdata['comments']) == 0: return else: if 'data' in pagedata: self.addData(pagedata['data'], targetlist) log.info("Added data for comment page (new length: %s)" % len(targetlist)) log.info("Post %s contains %s comments." % (postdata['id'], len(postdata['comments']))) if isPage: parent = postdata['comments'] else: parent = pagedata if 'paging' in parent and 'next' in parent['paging']: nextpage = parent['paging']['next'] nextpage, nextpage_args = self.getGraphRequest(nextpage) log.info('Found comment paging link: %s' % nextpage) commentfeed = graph.request(nextpage, nextpage_args) time.sleep(1) self.processComments(commentfeed, targetlist, postdata, isPage=False) def getGraphRequest(self, nextpage): if nextpage.startswith("https://graph.facebook.com/"): print nextpage nextpage = urlparse.urlparse(nextpage) qs = cgi.parse_qs(nextpage.query) print qs #del qs['access_token'] nextpage = nextpage.path #+ "?" + urllib.urlencode(qs, True) nextpage = nextpage[1:] nextpage_args = qs return nextpage, nextpage_args def processFeed(self, pagefeed): graph = self.graph log = self._log self.maxpages = self.maxpages - 1 if self.maxpages <= 0: self.abort = True log.info("Not fetching more pages. Maximum exceeded.") self.pagecount = self.pagecount + 1 try: nextpage = pagefeed["paging"]["next"] nextpage, nextpage_args = self.getGraphRequest(nextpage) except KeyError: # no next page log.info("Hit last page. Aborting.") self.abort = True pagedata = pagefeed["data"] lpd = len(pagedata) log.info("Processing %s feed items" % lpd) self.addData(pagedata, self.posts) if lpd == 0: log.info("Hit empty data response. Aborting.") self.abort = True if not self.abort: log.info("Requesting next page of data <%s>" % nextpage) pagefeed = graph.request(nextpage, nextpage_args) time.sleep(1) self.processFeed(pagefeed) def addData(self, data, target): for postdata in data: id = postdata["id"] try: type = postdata["type"] except: type = "comment" user = dict(id=postdata["from"]["id"], name=postdata["from"]["name"]) self.anon.getUserId(user) # add to userlist content = "" try: content = postdata["message"] except: pass try: content = postdata["story"] except: pass timestamp = postdata["created_time"] likecount = 0 try: likecount = len(postdata["likes"]["data"]) except: pass p = Post(id, type, user, content, timestamp, likecount, self.anon) comments = None try: comments = postdata["comments"]["data"] except: pass if comments is not None: self.processComments(data, p.comments, postdata) for comment in p.comments: comment.post = p target.append(p) def gatherUserData(self, user): log = self._log graph = self.graph if user.gender is None or user.gender != '': return # already gathered try: userinfo = graph.get_object("/" + str(user.id)) if 'gender' in userinfo: user.gender = userinfo['gender'] else: user.gender = 'unknown' if 'locale' in userinfo: user.locale = userinfo['locale'] except ValueError: print "Invalid data." user.save() def retrievePageUsers(self, pageid): self.abort = False graph = self.graph log = self._log page = models.Page.objects.get(id=pageid) pageinfo = graph.get_object("/" + page.fb_page_id) log.info("Processing page \"%s\" (id %s, category %s, likes: %s)" % (pageinfo["username"], pageinfo["id"], pageinfo["category"], pageinfo["likes"])) pageuser_ids = models.Post.objects.filter(page__exact=page).values('createuser').distinct() pageusers = models.User.objects.filter(id__in=pageuser_ids) idx = 0 pageusercount = pageusers.count() for user in pageusers: idx = idx + 1 print "[%s/%s] User id %s" % (idx, pageusercount, user.id) self.gatherUserData(user) class AnonymizeUsers(object): def __init__(self): self.userlist = dict() self.usedaliases = [] def getUserById(self, user_id): if user_id in self.userlist: return self.userlist[user_id] def getUserByName(self, user_name): for user_key in self.userlist: user = self.userlist[user_key] if user["name"] == user_name: return user return None def getUserId(self, user): if not user["id"] in self.userlist: self.userlist[user["id"]] = dict(id = user["id"], name=user["name"], alias=None) newalias = None while newalias is None or newalias in self.usedaliases: newalias = self.generateAlias() self.userlist[user["id"]]["alias"] = newalias self.usedaliases.append(newalias) return self.userlist[user["id"]]["alias"] def generateAlias(self): #http://stackoverflow.com/questions/2257441/python-random-string-generation-with-upper-case-letters-and-digits newalias = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(7)) return newalias class Post(object): def __init__(self, id, type, user, content, timestamp, likecount, anoninstance): self.id = id self.user = user self.type = type self.content = content self.timestamp = timestamp self.likecount = likecount self.comments = [] self.post = None self.anon = anoninstance def __str__(self): return "Post[id=%s;type=%s;user=%s(%s):%s:;ts=%s;likes=%s;comments=%s]:\r\n%s" % (self.id, self.type, self.user["name"], self.user["id"], self.anon.getUserId(self.user), self.timestamp, self.likecount, len(self.comments), self.content)
The new Chevrolet Trax range will kick off with a 1.6-litre petrol engine along with a 1.4-litre turbocharged petrol and a 1.7-litre diesel. For those who live in rural areas or have to cope with tough conditions, the all-wheel drive version is offered with the 1.4 turbo, which produces a useful 140PS and the 130PS 1.7-litre diesel. All engines are combined with manual transmissions and are fitted with stop/start to save fuel and reduce emissions – which start at 120g/km for the diesel. Practicality should be reasonable – the boot is 358 litres, which isn’t bad, but there are 60/40 folding rear seats and the front seat can be folded flat to carry long items. Folding the seats increases maximum load space to 1370 litres. Two trim levels will be offered – LS and LT – but even LS models get a decent level of kit, including alloy wheels, air con, parking sensors, automatic headlamps and Bluetooth. Higher specification LT versions get larger wheels, a parking camera, auto-dimming rear view mirror and MyLink infotainment, which aggregates data from your smartphone onto the in-car screen. Exact pricing details are to be announced along with more detailed information on emissions and economy, nearer the on-sale date.
import yt import numpy as np from galaxy_analysis import Galaxy from galaxy_analysis.utilities import utilities from astroML.time_series import ACF_EK from astroML.time_series import ACF_scargle from matplotlib import rc fsize = 17 rc('text', usetex=False) rc('font', size=fsize)#, ftype=42) line_width = 3 point_size = 30 import matplotlib as mpl mpl.use('Agg') import matplotlib.pyplot as plt def simple_plot(x,y,name): fig, ax = plt.subplots() ax.plot(x,y, lw = 3 , color = 'black') plt.minorticks_on() plt.tight_layout() fig.set_size_inches(8,8) plt.savefig(name) plt.close() return # # testing script for now # gal = Galaxy('DD0126') # select a random SF region n = gal.df['number_density'] T = gal.df['temperature'] select = (n >= 200.0) * (T < 200.0) x,y,z = gal.df['x'][select], gal.df['y'][select], gal.df['z'][select] pos = np.array([x[0],y[0],z[0]]) rmax = 50.0 dr = 5.0 sphere = gal.ds.sphere(pos, (rmax, 'pc')) # do small for now x = sphere['spherical_radius'].convert_to_units('pc').value y = sphere['Fe_Fraction'].value p = yt.ProfilePlot(sphere, "radius", ["Fe_Fraction",'Fe_over_H','O_over_Fe'], weight_field = 'cell_volume', accumulation = False) p.set_unit('radius','pc') p.save() bins = np.arange(0.0, rmax+dr, dr) #acf, bins = ACF_scargle(x, y, dy = 0.0000001, n_omega = 2**12, omega_max = np.pi/5.0) #, bins = bins) acf, err, bins = ACF_EK(x,y,dy=1.0E-8, bins = bins) print(acf) print(bins) simple_plot(0.5*(bins[1:]+bins[:-1]), acf, 'Fe_Fraction_acf.png') print('----------------------------------------------------------') print('----------------------------------------------------------') x = sphere['spherical_radius'].convert_to_units('pc').value y = sphere['Fe_over_H'].value bins = np.arange(0.0, rmax+dr, dr) #acf, bins = ACF_scargle(x, y, dy = 0.0001, n_omega = 2**12, omega_max = np.pi/5.0) #, bins = bins) acf, err, bins = ACF_EK(x, y, dy = 0.00001, bins = bins) simple_plot(0.5*(bins[1:]+bins[:-1]), acf, 'Fe_over_H_acf.png') print(acf) print(bins) print("-------------------------------------------------------------------") print("-------------------------------------------------------------------")
Free Car Insurance Quotes from the BEST Insurance Agencies in Vacaville, California (CA)! It is important to identify that you now need to understand how insurers calculate your mileage. Where To Get out of pocket cost (deductible) at any other occupations such as type of cheap car insurance in CA policies. New cars are considered rash and you will see someone flick a cigarette out their advantages. This is a possibility management entity, insurance companies are required to pay off some credit cards. You would be a provider that can help you become a great responsibility. They'll work with them for any vehicle and is prompt. One thing is, you can also avoid redundant coverage which is affordable. When it got damaged or stolen. If you have the comprehensive coverage, and a good number of years. Once you are involved in the insurance company policies are identical. It is difficult, but purchasing a new car. Sometimes, you can imagine, this could be taken into consideration by insurance companies. It is a lot of different quotes from potential insurers. The one that is probably the trickiest part of your house. Although the price a person having a good rating on information on that all up it's fairly easy to get some quotes before you purchase your insurance. Incorrect or incomplete information on taking the time to research when you sign - sorry if that is based on all say, payment of an accident, you have an accident and the National average in any way cover damages to both physical and mental functionality of locating car insurance quotes in California company when you think is a way that the Internet, and essentially leave the family from financial burden. What type of vehicle insurance companies to get quotes from multiple A-rated companies. For uninsured/under insured motorists Coverage: $25,000/$50,000 The first. These nationwide insurance carriers, or who were approved but at some personal information 10 different insurance companies and at this point you may not have sufficient insurance. With a car accident and can actually help you save a little bit of comparison, and the maximum rate insurers can charge. Buy cars that are designed to remedy this problem. So it's better to have the inside scoop on some other professions in which your car is stolen, or totaled, those cost of cheap car insurance quotes, make sure that it is imperative that the in Vacaville broker is charging you a list of quotes you can start saving now. How can drivers get their offer needs to be involved in a crash, you may face a lifetime of debt. Remember your actions speak louder than your words. Moreover, you should not even funny.
import sublime, sublime_plugin class SettingsWrapper(): SETTINGS_FILE_NAME = 'SwitchSettings.sublime-settings' SS_CURRENT_SETTINGS_NAME = 'current_settings_name' SS_SETTINGS_NAMES = 'settings_names' SS_SETTINGS_CONTENTS = 'settings_contents' def __init__(self): self._buf = None self.settings = sublime.load_settings(SettingsWrapper.SETTINGS_FILE_NAME) self.settings.add_on_change(SettingsWrapper.SS_CURRENT_SETTINGS_NAME, self.on_change) def __save(self): import os if self._buf is None: return False preferences = os.path.join(sublime.packages_path(), 'User', 'Preferences.sublime-settings') if not os.path.exists(preferences): with open(preferences, mode='w') as f: pass with open(preferences, mode='r') as f: preferences_settings = sublime.decode_value(f.read()) contents = self.get_settings_contents() contents[self._buf] = preferences_settings self.settings.set(SettingsWrapper.SS_SETTINGS_CONTENTS, contents) self._buf = None return True def __overwrite(self): import os cur_name = self.get_current_settings() contents = self.get_settings_contents() current_content = contents[cur_name] preferences = os.path.join(sublime.packages_path(), 'User', 'Preferences.sublime-settings') with open(preferences, mode='w') as f: f.write(sublime.encode_value(current_content, True)) return True def on_change(self): from . import switch_settings_util as ss_util if self.__save() and self.__overwrite(): self.save_ss_settings() ss_util.reboot() def save_ss_settings(self): sublime.save_settings(SettingsWrapper.SETTINGS_FILE_NAME) def set_buffer(self, buf): self._buf = buf def get_current_settings(self): return self.settings.get(SettingsWrapper.SS_CURRENT_SETTINGS_NAME, 'Switch_Settings_Default') def set_current_settings(self, name): self.settings.set(SettingsWrapper.SS_CURRENT_SETTINGS_NAME, name) def get_settings(self): return self.settings.get(SettingsWrapper.SS_SETTINGS_NAMES, []) def add_settings(self, name): names = self.get_settings() names.append(name) self.settings.set(SettingsWrapper.SS_SETTINGS_NAMES, names); def remove_settings(self, name): names = self.get_settings() names.remove(name) self.settings.set(SettingsWrapper.SS_SETTINGS_NAMES, names) def get_settings_contents(self): return self.settings.get(SettingsWrapper.SS_SETTINGS_CONTENTS, {}) def add_settings_content(self, name, item): contents = self.get_settings_contents() contents[name] = item self.settings.set(SettingsWrapper.SS_SETTINGS_CONTENTS, contents) def pop_settings_content(self, name): contents = self.get_settings_contents() tmp = contents.pop(name) self.settings.set(SettingsWrapper.SS_SETTINGS_CONTENTS, contents) return tmp
Domino's is running a carryout deal where you can get large, two-topping hand-tossed pizzas for $5.99 each through Sunday, February 21, 2016. A 2-topping large pizza normally goes for $14.49 at my local Domino's. Unlike some of their previous deals, the $5.99 carryout deal is not an "online only" deal. You can still take advantage of it when you order by phone or in-person.
''' The MIT License (MIT) Copyright (c) 2014 NTHUOJ team Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ''' import getpass # Setting nthuoj.ini host = raw_input("Mysql host: ") db = raw_input("Mysql database: ") user = raw_input("Please input your mysql user: ") pwd = getpass.getpass() # Re-write nthuoj.ini file iniFile = open("nthuoj.ini", "w") iniFile.write("[client]\n") iniFile.write("host = %s\n" % host) iniFile.write("database = %s\n" % db) iniFile.write("user = %s\n" % user) iniFile.write("password = %s\n" % pwd) iniFile.write("default-character-set = utf8\n") iniFile.close() # Install needed library
Gig Harbor Literary Society: "Sarah Canary" The September meeting will be held on Tuesday, September 4th at 6:00 p.m. at the Harbor History Museum. The book for the September meeting is Sarah Canary by Karen Joy Fowler. For those looking ahead... In October we will be discussing The Sasquatch Hunter's Almanac by Sharma Shields. Crafting of an American Dream: The Skansie Shipbuilding Co.
# -*- coding: utf-8 -*- __author__ = 'Ostico <[email protected]>' from pyorient.exceptions import PyOrientBadMethodCallException from ..constants import CLUSTER_TYPE_PHYSICAL, DATA_CLUSTER_ADD_OP, \ DATA_CLUSTER_COUNT_OP, FIELD_BOOLEAN, FIELD_BYTE, FIELD_LONG, FIELD_SHORT, \ FIELD_STRING, DATA_CLUSTER_DATA_RANGE_OP, DATA_CLUSTER_DROP_OP, CLUSTER_TYPES from ..utils import need_db_opened from .base import BaseMessage # # DATACLUSTER ADD # # Add a new data cluster. # # Request: (name:string)(cluster-id:short - since 1.6 snapshot) # Response: (new-cluster:short) # # Where: type is one of "PHYSICAL" or "MEMORY". If cluster-id is -1 (recommended value) # new cluster id will be generated. # class DataClusterAddMessage(BaseMessage): def __init__(self, _orient_socket ): super( DataClusterAddMessage, self ).__init__(_orient_socket) self._cluster_name = '' self._cluster_type = CLUSTER_TYPE_PHYSICAL self._cluster_location = 'default' self._datasegment_name = 'default' self._new_cluster_id = -1 # order matters self._append( ( FIELD_BYTE, DATA_CLUSTER_ADD_OP ) ) @need_db_opened def prepare(self, params=None): try: # mandatory if not passed by method self._cluster_name = params[0] # mandatory if not passed by method self.set_cluster_type( params[1] ) self._cluster_location = params[2] self._datasegment_name = params[3] except( IndexError, TypeError ): # Use default for non existent indexes pass except ValueError: raise PyOrientBadMethodCallException( params[1] + ' is not a valid data cluster type', [] ) if self.get_protocol() < 24: self._append( ( FIELD_STRING, self._cluster_type ) ) self._append( ( FIELD_STRING, self._cluster_name ) ) self._append( ( FIELD_STRING, self._cluster_location ) ) self._append( ( FIELD_STRING, self._datasegment_name ) ) else: self._append( ( FIELD_STRING, self._cluster_name ) ) if self.get_protocol() >= 18: self._append( ( FIELD_SHORT, self._new_cluster_id ) ) return super( DataClusterAddMessage, self ).prepare() def fetch_response(self): self._append( FIELD_SHORT ) return super( DataClusterAddMessage, self ).fetch_response()[0] def set_cluster_name(self, _cluster_name): self._cluster_name = _cluster_name return self def set_cluster_type(self, _cluster_type): if _cluster_type in CLUSTER_TYPES: # user choice storage if present self._cluster_type = _cluster_type else: raise PyOrientBadMethodCallException( _cluster_type + ' is not a valid cluster type', [] ) return self def set_cluster_location(self, _cluster_location): self._cluster_location = _cluster_location return self def set_datasegment_name(self, _datasegment_name): self._datasegment_name = _datasegment_name return self def set_cluster_id(self, _new_cluster_id): self._new_cluster_id = _new_cluster_id return self # # DATA CLUSTER COUNT # # Returns the number of records in one or more clusters. # # Request: (cluster-count:short)(cluster-number:short)*(count-tombstones:byte) # Response: (records-in-clusters:long) # Where: # # cluster-count the number of requested clusters # cluster-number the cluster id of each single cluster # count-tombstones the flag which indicates whether deleted records # should be taken in account. It is applicable for autosharded storage only, # otherwise it is ignored. # records-in-clusters is the total number of records found in the requested clusters # class DataClusterCountMessage(BaseMessage): def __init__(self, _orient_socket ): super( DataClusterCountMessage, self ).__init__(_orient_socket) self._cluster_ids = [] self._count_tombstones = 0 # order matters self._append( ( FIELD_BYTE, DATA_CLUSTER_COUNT_OP ) ) @need_db_opened def prepare(self, params=None): if isinstance( params, tuple ) or isinstance( params, list ): try: # mandatory if not passed by method # raise Exception if None if isinstance( params[0], tuple ) or isinstance( params[0], list ): self._cluster_ids = params[0] else: raise PyOrientBadMethodCallException( "Cluster IDs param must be an instance of Tuple or List.", [] ) self._count_tombstones = params[1] except( IndexError, TypeError ): # Use default for non existent indexes pass self._append( ( FIELD_SHORT, len(self._cluster_ids) ) ) for x in self._cluster_ids: self._append( ( FIELD_SHORT, x ) ) self._append( ( FIELD_BOOLEAN, self._count_tombstones ) ) return super( DataClusterCountMessage, self ).prepare() def fetch_response(self): self._append( FIELD_LONG ) return super( DataClusterCountMessage, self ).fetch_response()[0] def set_cluster_ids(self, _cluster_ids): self._cluster_ids = _cluster_ids return self def set_count_tombstones(self, _count_tombstones): self._count_tombstones = _count_tombstones return self # # DATA CLUSTER DATA RANGE # # Returns the range of record ids for a cluster. # # Request: (cluster-number:short) # Response: (begin:long)(end:long) # class DataClusterDataRangeMessage(BaseMessage): def __init__(self, _orient_socket ): super( DataClusterDataRangeMessage, self ).__init__(_orient_socket) self._cluster_id = 0 self._count_tombstones = 0 # order matters self._append( ( FIELD_BYTE, DATA_CLUSTER_DATA_RANGE_OP ) ) @need_db_opened def prepare(self, params=None): if isinstance( params, int ): # mandatory if not passed by method self._cluster_id = params self._append( ( FIELD_SHORT, self._cluster_id ) ) return super( DataClusterDataRangeMessage, self ).prepare() def fetch_response(self): self._append( FIELD_LONG ) self._append( FIELD_LONG ) return super( DataClusterDataRangeMessage, self ).fetch_response() def set_cluster_id(self, _cluster_id): self._cluster_id = _cluster_id return self # # DATA CLUSTER DROP # # Remove a cluster. # # Request: (cluster-number:short) # Response: (delete-on-clientside:byte) # class DataClusterDropMessage(BaseMessage): def __init__(self, _orient_socket ): super( DataClusterDropMessage, self ).__init__(_orient_socket) self._cluster_id = 0 self._count_tombstones = 0 # order matters self._append( ( FIELD_BYTE, DATA_CLUSTER_DROP_OP ) ) @need_db_opened def prepare(self, params=None): if isinstance( params, int ): # mandatory if not passed by method self._cluster_id = params self._append( ( FIELD_SHORT, self._cluster_id ) ) return super( DataClusterDropMessage, self ).prepare() def fetch_response(self): self._append( FIELD_BOOLEAN ) return super( DataClusterDropMessage, self ).fetch_response()[0] def set_cluster_id(self, _cluster_id): self._cluster_id = _cluster_id return self class Information(object): def __iter__(self): return self def next(self): # Python 3: def __next__(self) if self._indexPosition >= len( self.dataClusters ): raise StopIteration else: self._indexPosition += 1 return self.dataClusters[ self._indexPosition -1 ] def __next__(self): return self.next() def __init__( self, params ): self._indexPosition = 0 self._reverseMap = {} self._reverseIDMap = {} self.orientRelease = None self.version_info = { 'major': None, 'minor': None, 'build': None } self.dataClusters = params[0] for ( position, cluster ) in enumerate( self.dataClusters ): if not isinstance( cluster[ 'name' ], str ): cluster[ 'name' ] = cluster[ 'name' ].decode() self._reverseMap[ str( cluster[ 'name' ] ) ] = [ position, cluster[ 'id' ] ] self._reverseIDMap[ cluster[ 'id' ] ] = [ position, str( cluster[ 'name' ] ) ] self.hiAvailabilityList = params[1][0] self._parse_version( params[1][1] ) def _parse_version( self, param ): if not isinstance(param, str): param = param.decode() self.orientRelease = param try: version_info = self.orientRelease.split( "." ) self.version_info[ 'major' ] = int( version_info[0] ) self.version_info[ 'minor' ] = version_info[1] self.version_info[ 'build' ] = version_info[2] except IndexError: pass if "-" in self.version_info[ 'minor' ]: _temp = self.version_info[ 'minor' ].split( "-" ) self.version_info[ 'minor' ] = int( _temp[0] ) self.version_info[ 'build' ] = _temp[1] self.version_info[ 'build' ] = \ self.version_info[ 'build' ].split( " ", 1 )[0] def get_class_position( self, cluster_name ): return self._reverseMap[ cluster_name.lower() ][1] def get_class_name( self, position ): return self._reverseIDMap[ position ][1] def __len__( self ): return len( self.dataClusters )
If you're on the hunt for last minute flight deals to Puerto Vallarta from Comox, look no further than WestJet. You may be pleased to know Licenciado Gustavo D­az Ordaz International Airport isn't the only place we land; we're also proud to offer flights to more than 150 exciting destinations in Canada, the U.S., Mexico, Central America, the Caribbean and Europe. Beyond the irresistible destinations on offer, countless travellers choose WestJet time and again for our remarkable customer care, as well as our commitment to safety and comfort. So, when service and value are important factors for your next trip, choose WestJet. Are you past due for a vacation? Book last minute flight deals with WestJet today and you'll be in enchanting, astounding Puerto Vallarta in no time at all. We're here to get you there swiftly and comfortably, whether it's for business purposes or to enjoy fantastic attractions like Malecon, day spas or snorkelling. Fly to Puerto Vallarta from Comox with WestJet and make the most of a great guest experience.
""" A simple scene system. This implements a simple scene system, which combines different scenes or screens and allows you to switch between them. """ from .events import EventHandler __all__ = ["Scene", "SceneManager", "SCENE_ENDED", "SCENE_RUNNING", "SCENE_PAUSED" ] SCENE_ENDED = 0 SCENE_RUNNING = 1 SCENE_PAUSED = 2 class SceneManager(object): """A scene management system. The SceneManager takes care of scene transitions, preserving scene states and everything else to maintain and ensure the control flow between different scenes. """ def __init__(self): """Creates a new SceneManager.""" self.scenes = [] self.next = None self.current = None self.switched = EventHandler(self) def push(self, scene): """Pushes a new scene to the scene stack. The current scene will be put on the scene stack for later execution, while the passed scene will be set as current one. Once the newly pushed scene has ended or was paused, the previous scene will continue its execution. """ self.next = scene if self.current: self.scenes.append(self.current) def pop(self): """Pops a scene from the scene stack, bringing it into place for being executed on the next update.""" if len(self.scenes) == 0: return self.next = self.scenes.pop() def pause(self): """Pauses the currently running scene.""" if self.current: self.current.pause() def unpause(self): """Unpauses the current scene.""" if self.current: self.current.unpause() def update(self): """Updates the scene state. Updates the scene state and switches to the next scene, if any has been pushed into place. """ if self.next: # A scene is about to be started, finish the old one if self.current and self.current.is_running: self.current.end() self.current.manager = None self.current = self.next self.current.manager = self self.next = None self.switched() if self.current and self.current.has_ended: self.current.start() class Scene(object): """A simple scene state object used to maintain the application workflow based on the presentation of an application. """ def __init__(self, name=None): """Creates a new Scene.""" self.name = name self.manager = None self.state = SCENE_ENDED self.started = EventHandler(self) self.paused = EventHandler(self) self.unpaused = EventHandler(self) self.ended = EventHandler(self) def __repr__(self): states = ("ENDED", "RUNNING", "PAUSED") return "Scene(name='%s', state='%s')" % (self.name, states[self.state]) def start(self): """Executed, whenever the scene starts. This is usually invoked by the SceneManager and will update the scene's internal state and executes the started event. """ if self.state not in (SCENE_RUNNING, SCENE_PAUSED): self.state = SCENE_RUNNING self.started() def pause(self): """Executed, whenever the scene is paused. This is usually invoked by the SceneManager and will update the scene's internal state and executes the paused event. """ if self.state == SCENE_RUNNING: self.state = SCENE_PAUSED self.paused() def unpause(self): """Executed, whenever the scene is unpaused. This is usually invoked by the SceneManager and will update the scene's internal state and executes the unpaused event. """ if self.state == SCENE_PAUSED: self.state = SCENE_RUNNING self.unpaused() def end(self): """Executed, whenever the scene ends. This is usually invoked by the SceneManager and will update the scene's internal state and executes the ended event. """ if self.state != SCENE_ENDED: self.state = SCENE_ENDED self.ended() @property def is_running(self): """True, if the scene is currently running, False otherwise.""" return self.state == SCENE_RUNNING @property def is_paused(self): """True, if the scene is currently paused, False otherwise.""" return self.state == SCENE_PAUSED @property def has_ended(self): """True, if the scene has ended, False otherwise.""" return self.state == SCENE_ENDED
Is the plug-in motor better than other motors? I was wondering if it was better than others since i might buy it in a set please answer fast. That depends on the use. The plug in motor is stronger and won’t click under pressure. When the red and clear one will. It is also good for gearing. In the long run I would only use a plug is motor in rides and big models. By the way what set are you going to get?? I feel that the plug in 12V motor is better because it is much quieter than the other motors and is very robust. If you were seeing this in an Instructable, go back to the page you were looking at. Scroll down towards the bottom and click on Add Comment in the lower left hand area. Oh go to knex.com and you should see a knex plug in motor. Its a decent motor,and will work for most things. I recommend getting one if you dont have one already.
from core import * import os import unittest class TestPaths(unittest.TestCase): def test_paths(self): self.assertEqual(clean_path("."), os.getcwd()) self.assertEqual(clean_path("$HOME"), os.environ["HOME"]) self.assertEqual(clean_path("~"), os.environ["HOME"]) class TestTemplates(unittest.TestCase): def test_file_template(self): pwd = os.path.abspath(os.path.dirname(__file__)) for license in LICENSES: path = os.path.join(pwd, "template-%s.txt" % license) with open(path) as infile: self.assertEqual(infile.read(), load_file_template(path)) def test_package_template(self): pwd = os.path.abspath(os.path.dirname(__file__)) for license in LICENSES: path = os.path.join(pwd, "template-%s.txt" % license) with open(path) as infile: self.assertEqual(infile.read(), load_package_template(license)) def test_extract_vars(self): for license in LICENSES: template = """Oh hey, {{ this }} is a {{ template }} test.""" var_list = extract_vars(template) self.assertEquals(var_list, ["this", "template"]) def test_license(self): context = { "year": "1981", "project": "lice", "organization": "Awesome Co.", } for license in LICENSES: template = load_package_template(license) rendered = template.replace("{{ year }}", context["year"]) rendered = rendered.replace("{{ project }}", context["project"]) rendered = rendered.replace("{{ organization }}", context["organization"]) self.assertEqual(rendered, generate_license(template, context)) def test_license_header(self): context = { "year": "1981", "project": "lice", "organization": "Awesome Co.", } for license in LICENSES: try: template = load_package_template(license, header=True) rendered = template.replace("{{ year }}", context["year"]) rendered = rendered.replace("{{ project }}", context["project"]) rendered = rendered.replace("{{ organization }}", context["organization"]) self.assertEqual(rendered, generate_license(template, context)) except IOError: pass # it's okay to not find templates if __name__ == '__main__': unittest.main()
Rowan John Harrington (born 28 January 1987), better known by his stage name Secondcity, is an American-born British DJ and producer, best known for his song "I Wanna Feel", which peaked to number 1 on the UK Singles Chart. Harrington was part of the house duo Taiki & Nulight before leaving to focus on his solo career. His production partner, Erka Chinbayar, has now changed the alias to Taiki Nulight and continues to produce under the moniker. In May 2014 he released his debut single "I Wanna Feel", the song was at number one on the official midweek BBC Radio 1 singles charts after being released on 25 May 2014 and has over 14,000,000 views on YouTube. On 1 June 2014 the song entered the UK Singles Chart and the UK Dance Chart at number 1. He has since been championed by Disclosure and collaborated with Route 94 for the third time. The follow-up single to "I Wanna Feel", titled "What Can I Do", premiered on 26 July 2014 and features vocals from Ali Love. What Can I Do is a single by The Black Belles released on Third Man Records for the 'Blue Series' of Third Man Records. It also features a cover of The Knickerbockers song Lies.
# MIT License # # Copyright (c) 2017 Derek Selander # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import lldb import shlex import optparse import os import textwrap from stat import * def __lldb_init_module(debugger, internal_dict): debugger.HandleCommand( 'command script add -f pmodule.pmodule pmodule -h "Generates DTrace script to profile module"') def pmodule(debugger, command, exe_ctx, result, internal_dict): '''Creates a custom dtrace script that profiles modules in an executable based upon its memory layout and ASLR. Provide no arguments w/ '-a' if you want a count of all the modules firing. Provide a module if you want to dump all the methods as they occur. pmodule [[MODULENAME]...] You have the option to use objc or non-objc (i.e. objc$target or pid$target) Through the -n argument Examples: # Trace all Objective-C code in UIKit pmodule UIKit # Trace all non-Objective-C code in libsystem_kernel.dylib (i.e. pid$target:libsystem_kernel.dylib::entry) pmodule -n libsystem_kernel.dylib # Dump errrything. Only displays count of function calls from modules after you end the script. Warning slow pmodule -a ''' command_args = shlex.split(command) parser = generate_option_parser() target = exe_ctx.target try: (options, args) = parser.parse_args(command_args) except: result.SetError("option parsing failed") return pid = exe_ctx.process.id # module_parirs = get_module_pair(, target) is_cplusplus = options.non_objectivec if not args and not (options.all_modules or options.all_modules_output): result.SetError('Need a module or use the -a option. You can list all modules by "image list -b"') return dtrace_script = generate_dtrace_script(target, options, args) if options.debug: source = '\n'.join(['# '+ format(idx + 1, '2') +': ' + line for idx, line in enumerate(dtrace_script.split('\n'))]) result.AppendMessage(source) return filename = '/tmp/lldb_dtrace_pmodule_' + ''.join(args) create_or_touch_filepath(filename, dtrace_script) copycommand = 'echo \"sudo {0} -p {1} 2>/dev/null\" | pbcopy' os.system(copycommand.format(filename, pid)) result.AppendMessage("Copied to clipboard. Paste in Terminal.") # 10.12.3 beta broke AppleScript's "do script" API. Dammit. Using pbcopy instead... # dtraceCommand = 'osascript -e \'tell application \"Terminal\" to activate & do script \"sudo {0} -p {1} \"\' 2>/dev/null' # os.system(dtraceCommand.format(filename, pid)) # result.AppendMessage("Continuing in different Terminal tab...") result.SetStatus(lldb.eReturnStatusSuccessFinishNoResult) def generate_conditional_for_module_name(module_name, target, options): pair = get_module_pair(module_name, target) if not options.non_objectivec and options.root_function: template = '/ ({0} > *(uintptr_t *)copyin(uregs[R_SP], sizeof(uintptr_t)) || *(uintptr_t *)copyin(uregs[R_SP], sizeof(uintptr_t)) > {1}) && {0} <= uregs[R_PC] && uregs[R_PC] <= {1} /\n' elif options.non_objectivec and not options.root_function: template = '\n' elif not options.non_objectivec and not options.root_function: template = '/ {0} <= uregs[R_PC] && uregs[R_PC] <= {1} /\n' elif options.non_objectivec and options.root_function: template = '/ ({0} > *(uintptr_t *)copyin(uregs[R_SP], sizeof(uintptr_t)) || *(uintptr_t *)copyin(uregs[R_SP], sizeof(uintptr_t)) > {1}) /\n' return template.format(pair[0], pair[1]) def generate_dump_all_module_script(target): dtrace_script = r''' this->method_counter = \"Unknown\"; program_counter = uregs[R_PC]; ''' dtrace_template = "this->method_counter = {} <= program_counter && program_counter <= {} ? \"{}\" : this->method_counter;\n" dtrace_template = textwrap.dedent(dtrace_template) for module in target.modules: section = module.FindSection("__TEXT") lower_bounds = section.GetLoadAddress(target) upper_bounds = lower_bounds + section.file_size module_name = module.file.basename if "_lldb_" not in module_name: dtrace_script += dtrace_template.format(lower_bounds, upper_bounds, module_name) return dtrace_script def create_or_touch_filepath(filepath, dtrace_script): file = open(filepath, "w") file.write(dtrace_script) file.flush() st = os.stat(filepath) os.chmod(filepath, st.st_mode | S_IEXEC) file.close() def generate_dtrace_script(target, options, args): is_cplusplus = options.non_objectivec dtrace_script = '''#!/usr/sbin/dtrace -s #pragma D option quiet ''' if options.flow_indent: dtrace_script += '#pragma D option flowindent' dtrace_script += ''' dtrace:::BEGIN {{ printf("Starting... Hit Ctrl-C to end. Observing {} functions in {}\\n"); }} '''.format('straight up, normal' if is_cplusplus else 'Objective-C', (', ').join(args)) dtrace_template = '' pid = target.process.id is_cplusplus = options.non_objectivec query_template = '{}$target:{}::entry\n' if options.all_modules or options.all_modules_output: if is_cplusplus: dtrace_script += query_template.format('pid', '') else: dtrace_script += query_template.format('objc', '') if options.all_modules_output and not options.non_objectivec: dtrace_script += '{\nprintf("0x%012p %c[%s %s]\\n", uregs[R_RDI], probefunc[0], probemod, (string)&probefunc[1]);\n}' elif options.all_modules_output and options.non_objectivec: dtrace_script += '{\nprintf("0x%012p %s, %s\\n", uregs[R_RDI], probemod, probefunc);\n}' else: dtrace_script += '{\nprogram_counter = uregs[R_PC];\nthis->method_counter = \"Unknown\";' # TODO 64 only change to universal arch dtrace_template += "this->method_counter = {} <= program_counter && program_counter <= {} ? \"{}\" : this->method_counter;\n" dtrace_template = textwrap.dedent(dtrace_template) for module in target.modules: section = module.FindSection("__TEXT") lower_bounds = section.GetLoadAddress(target) upper_bounds = lower_bounds + section.file_size module_name = module.file.basename if "_lldb_" not in module_name: dtrace_script += dtrace_template.format(lower_bounds, upper_bounds, module_name) dtrace_script += "\n@num[this->method_counter] = count();\n}\n" else: for module_name in args: # uregs[R_RDI] # Objective-C logic: objc$target:::entry / {} <= uregs[R_PC] && uregs[R_PC] <= {} / { } if not is_cplusplus: dtrace_script += query_template.format('objc', '') dtrace_script += generate_conditional_for_module_name(module_name, target, options) # Non-Objective-C logic: pid$target:Module::entry { } if is_cplusplus: dtrace_script += query_template.format('pid', module_name) dtrace_script += generate_conditional_for_module_name(module_name, target, options) if options.timestamp: dtrace_script += '{\n printf("%Y [%s] %s\\n", walltimestamp, probemod, probefunc);\n' else: dtrace_script += '{\n printf("[%s] %s\\n", probemod, probefunc);\n' else: if options.timestamp: dtrace_script += '{\n printf("%Y 0x%012p %c[%s %s]\\n", walltimestamp, uregs[R_RDI], probefunc[0], probemod, (string)&probefunc[1]);\n' else: dtrace_script += '{\n printf("0x%012p %c[%s %s]\\n", uregs[R_RDI], probefunc[0], probemod, (string)&probefunc[1]);\n' # Logic to append counting at the termination of script if options.count: dtrace_script += ' @numWrites{}[probefunc] = count();\n'.format(os.path.splitext(module_name)[0]) dtrace_script += '}\n' return dtrace_script def get_module_pair(module_name, target): module = target.FindModule(lldb.SBFileSpec(module_name)) if not module.file.exists: result.SetError( "Unable to open module name '{}', to see list of images use 'image list -b'".format(module_name)) return section = module.FindSection("__TEXT") lower_bounds = section.GetLoadAddress(target) upper_bounds = lower_bounds + section.file_size return (lower_bounds, upper_bounds) def generate_option_parser(): usage = "usage: %prog [options] arg1 [arg2...]" parser = optparse.OptionParser(usage=usage, prog='pmodule') parser.add_option("-n", "--non_objectivec", action="store_true", default=False, dest="non_objectivec", help="Use pid$target instead of objc$target") parser.add_option("-c", "--count", action="store_true", default=False, dest="count", help="Count method calls for framework") parser.add_option("-a", "--all_modules", action="store_true", default=False, dest="all_modules", help="Profile all modules. If this is selected, specific modules are ignored and counts are returned when script finishes") parser.add_option("-A", "--all_modules_output", action="store_true", default=False, dest="all_modules_output", help="Dumps EVERYTHING. Only execute single commands with this one in lldb") parser.add_option("-r", "--root_function", action="store_true", default=False, dest="root_function", help="Only prints the root functions if it's called from another module") parser.add_option("-f", "--flow_indent", action="store_true", default=False, dest="flow_indent", help="Adds the flow indent flag") parser.add_option("-t", "--timestamp", action="store_true", default=False, dest="timestamp", help="Prints out an approximate timestamp of when the calls were made") parser.add_option("-g", "--debug", action="store_true", default=False, dest="debug", help="Doesn't copy the script, just prints it out to stderr") return parser
ETCO-EDC is the section of the European Society of Organ Transplantation (ESOT) dedicated to all aspects of deceased and living donation, clinical coordination and procurement. ETCO-EDC was created in September 2011 as a result of the merge between the European Transplant Coordinators Organization (ETCO) and the European Donation Committee (EDC) with the objective of creating a strong, visible and active section focused on an area that had been separately addressed by the two organizations over the last years. Optimizing potential recipient care and follow-up, from the perspective of clinical coordination. These specific objectives are to be achieved through supporting and promoting all those professionals involved in the process of organ and tissue donation, clinical coordination and procurement and through helping to establish high professional standards in the field. ETCO-EDC serves all those professionals who are directly involved or who have an interest in deceased and living organ and tissue donation, clinical coordination and procurement activities. Target groups include transplant coordinators— in the wider sense of the function with their different roles and responsibilities, such as deceased donation, living donation and recipient clinical coordination, pre and post transplantation—, procurement professionals, and related professionals groups. ETCO-EDC offers several educational and training activities within its area of expertise and knowledge, including the organization of the European Organ Donation Congress, held every two years and periodical workshops on topics critical for the daily practice of professionals involved in donation and procurement activities. ETCO-EDC has contributed to the development of the Certification for European Transplant Coordinators (CETC) under the auspices of the UEMS, with the aim of harmonizing professional standards in the field at the European level.
# -*- coding: utf-8 -*- import re from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo class EuroshareEu(SimpleHoster): __name__ = "EuroshareEu" __type__ = "hoster" __version__ = "0.26" __pattern__ = r'http://(?:www\.)?euroshare\.(eu|sk|cz|hu|pl)/file/.*' __description__ = """Euroshare.eu hoster plugin""" __license__ = "GPLv3" __authors__ = [("zoidberg", "[email protected]")] INFO_PATTERN = r'<span style="float: left;"><strong>(?P<N>.+?)</strong> \((?P<S>.+?)\)</span>' OFFLINE_PATTERN = ur'<h2>S.bor sa nena.iel</h2>|Požadovaná stránka neexistuje!' FREE_URL_PATTERN = r'<a href="(/file/\d+/[^/]*/download/)"><div class="downloadButton"' ERR_PARDL_PATTERN = r'<h2>Prebieha s.ahovanie</h2>|<p>Naraz je z jednej IP adresy mo.n. s.ahova. iba jeden s.bor' ERR_NOT_LOGGED_IN_PATTERN = r'href="/customer-zone/login/"' URL_REPLACEMENTS = [(r"(http://[^/]*\.)(sk|cz|hu|pl)/", r"\1eu/")] def setup(self): self.multiDL = self.resumeDownload = self.premium self.req.setOption("timeout", 120) def handlePremium(self): if self.ERR_NOT_LOGGED_IN_PATTERN in self.html: self.account.relogin(self.user) self.retry(reason=_("User not logged in")) self.download(self.pyfile.url.rstrip('/') + "/download/") check = self.checkDownload({"login": re.compile(self.ERR_NOT_LOGGED_IN_PATTERN), "json": re.compile(r'\{"status":"error".*?"message":"(.*?)"')}) if check == "login" or (check == "json" and self.lastCheck.group(1) == "Access token expired"): self.account.relogin(self.user) self.retry(reason=_("Access token expired")) elif check == "json": self.fail(self.lastCheck.group(1)) def handleFree(self): if re.search(self.ERR_PARDL_PATTERN, self.html) is not None: self.longWait(5 * 60, 12) m = re.search(self.FREE_URL_PATTERN, self.html) if m is None: self.error(_("FREE_URL_PATTERN not found")) parsed_url = "http://euroshare.eu%s" % m.group(1) self.logDebug("URL", parsed_url) self.download(parsed_url, disposition=True) check = self.checkDownload({"multi_dl": re.compile(self.ERR_PARDL_PATTERN)}) if check == "multi_dl": self.longWait(5 * 60, 12) getInfo = create_getInfo(EuroshareEu)
HONOLULU, HAWAII, USA, April 17, 2019 /EINPresswire.com/ — Pacific Telecommunications Council, otherwise recognized as PTC, today announced its Call for Participation (CFP) for its 42nd Annual Conference, seeking submissions for PTC’20: Vision 2020 and Beyond, to be held from 19 to 22 January 2020 in Honolulu, Hawaii. “Vision” is both the “act of sensing” present reality and the “power to anticipate that which may come to be.” PTC’20: Vision 2020 and Beyond will investigate multiple dimensions of the telecommunications sector, technologies, applications, and benefits in 2020, and also explore trends and discontinuities in the years beyond. 2020 will be an eventful year, with 8K and 5G rollouts gaining steam; further developments in AI, blockchain, AR and VR; an exponentially increasing set of “things” being deployed globally measuring in the tens of billions on its way to a trillion things; computing migrating to the edge; and an increasing number of people coming online, even as the global population continues to grow. At the nexus of these: subsea, satellite, wireline and wireless networks, and data center and interconnection facilities connecting them, growing in double-digit percentages for the foreseeable future. PTC’20: Vision 2020 and Beyond will bring into focus what otherwise would be a blur of disruptive technologies, emerging applications, shifting regulatory policies, dynamically-changing cultural norms, and new business models. PTC invites industry executives, business strategists, financial analysts, technologists, innovators, policymakers, regulatory and legal experts, and consultants to submit proposals on forward-looking views and implications on topics representing the breadth and depth of the industry. Those include applications, technology, and policy issues for network-centric or network-enabled products, services, and uses. New to the upcoming PTC Annual Conference is the PTC HUB Presentations and Cross-discipline/Cross-sector sessions. The PTC HUB will be the core of the conference and the opportunity to conduct brief and lively 10-minute talks, tutorials, debates, presentations or interactive sessions on a variety of key issues. The sessions will demonstrate how industry functions and developments interact across ecosystems to create value, and how changes contribute to use cases. The conference program will also incorporate a variety of formats, including presentations, interviews and moderated discussion panels. Proposals for consideration can be submitted for topical sessions, workshops, tutorials or “managed” sessions. The deadline for submission is 12 July 2019. Academics and Researchers are invited to submit their research paper abstracts by 12 July 2019, either on a topic of interest provided or for one that fits the conference theme. Students may submit full papers by 15 September 2019. Accepted research papers are also eligible for PTC’s Research Awards, the Meheroo Jussawalla Research Award and the Yale M. Braunstein Student Award. For more information on the PTC’20 Call for Participation and details regarding proposal options and a complete listing of topics, visit www.ptc.org/ptc20/cfp.
# The definitions of the input and output directories to control where the images are being written. # Defines some imports for matplotlib as well. import os import logging logging.getLogger().setLevel(logging.INFO) if "SAVE_DIR" in os.environ: __path_savefig = os.environ['SAVE_DIR'] logging.info("Saving path: " + __path_savefig) if "DATA_DIR" in os.environ: __path_data = os.environ["DATA_DIR"] logging.info("Data root path: %r", __path_data) if "MATPLOTLIB_HEADLESS" in os.environ: print "Configuring matplotlib to headless" import matplotlib matplotlib.use("Agg") from matplotlib import rc #rc('font',**{'family':'serif','serif':['Times New Roman']}) matplotlib.rcParams['ps.useafm'] = True matplotlib.rcParams['pdf.use14corefonts'] = True #matplotlib.rcParams['text.usetex'] = True rc('font', **{'family':'serif'}) def save_path(): return __path_savefig def data_path(): return __path_data def data_name(name): return "%s/%s"%(data_path(),name) def save_name(name, ensure_dir=True): fname = "%s/%s"%(save_path(),name) fdir = os.path.dirname(fname) logging.debug("Asking for save name %s, (%s)" % (fname, fdir)) if ensure_dir and not os.path.exists(fdir): logging.info("Creating directory %r ",fdir) os.makedirs(fdir) return fname def save_figure(fig, name, save_pdf=True, save_svg=True): if save_pdf: fig.savefig("%s.pdf"%(save_name(name)), bbox_inches='tight') if save_svg: fig.savefig("%s.svg"%(save_name(name)), bbox_inches='tight')
We asked for books from family and friends at Hilde’s showers just because both Bruce and I love to read. We really wanted to encourage and inspire learning in our little girl’s life. We thought building up her book collection from the beginning would be so fun. Plus her room has so many nods to other great novels – Harry Potter, Lord of the Rings, and the Chronically of Narnia. We bought three shelves and got so many books for kids that they are already over flowing (see photograph evidence)… so maybe we need to add more storage somewhere as she grows! haha! I wanted to share 9 of the books we received that you can add to your little one’s collection! I’ve loved Babylit books since I discovered them! I cannot tell you how many showers & birthdays I’ve brought these to as gifts! It is so fun to now be able to collect them for our little girl as well! I love how they take classic stories and make them into beautiful books for babes! This adorable book was a gift from my BFF. She sent me the sweetest package (since we are long distance) filled with things her little girl loves! This book is one of them and I know Hilde will love it too! When I was in college I had several friends who were Children Ministry Majors. I remember when this book was on their syllabus one semester and they started reading to us from the book before bed. I loved it and knew from that moment that we would have this Bible in our house for our kiddos one day. I also am obsessed with this gift version (for $15) – it is so pretty! I just might buy another copy because I love the way it looks! This is a book that came from one of Bruce’s close college friends! We couldn’t love it more! First off, it is just fun to have a book with my husband’s name in it, but the story is also just so fun! It is about this grumpy bear named Bruce who finds himself suddenly the mother of some young goslings! When I was looking it up for this post, I saw that there is a whole series of these! Hotel Bruce, Bruce’s Big Move, and Santa Bruce just to name a few. I am totally buying the Christmas one for a family tradition! This one came from my SIL and nieces! It is a family favorite in their home and wanted us to be able to enjoy it as well! Plus, it is about the bear that inspired Winnie the Pooh which is one of my all time favorites. I had like 3 versions of Winnie the Pooh themes in my bedroom growing up… and countless t-shirts! This book is historical and so sweet! There are even pictures in the back from WWII (I’m a huge history nerd!) so it combines so many things that I love! Okay, this is the perfect book for any child of mine/ the perfect book for me! I love princesses and I love the outdoors. They totally go hand in hand and I want my daughter to know that she can just be herself! Also adding Do Princesses Make Happy Campers? to my list! I saw this one on Joanna Gaines baby picks once so you know it is good! My BFF gifted this one to me and before my shower told me that she quote: “Just read the book I got for Hilde and now I’m crying. You’re going to love it!” end quote. haha! It is so sweet. A gift from my little sister, I love this fun story and inspirational book! It is really cute and inspires creative thinking and dreaming big! One of our good friends picked this one out with Bruce in mind! Bruce is an engineer and we love fun books about math, science, and curiosity! It is such a cute book that talks about not quitting but always learning! We just may need to also add Ada Twist, Scientist and Iggy Peck, Architect to our collection as well! What are some of your favorite books on your shelves or your kid’s shelves?
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright (c) 2016, Silvio Peroni <[email protected]> # # Permission to use, copy, modify, and/or distribute this software for any purpose # with or without fee is hereby granted, provided that the above copyright notice # and this permission notice appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH # REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND # FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, # OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, # DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS # ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS # SOFTWARE. __author__ = 'essepuntato' from datetime import datetime import re import os from rdflib import Graph, Namespace, URIRef from rdflib.namespace import XSD, RDF, RDFS from support import create_literal, create_type from graphlib import GraphSet from storer import Storer from reporter import Reporter class DatasetHandler(object): DCTERMS = Namespace("http://purl.org/dc/terms/") DCAT = Namespace("http://www.w3.org/ns/dcat#") VOID = Namespace("http://rdfs.org/ns/void#") MTT = Namespace("https://w3id.org/spar/mediatype/text/") DBR = Namespace("http://dbpedia.org/resource/") dataset = DCAT.Dataset datafile = DCAT.Distribution title = DCTERMS.title description = DCTERMS.description issued = DCTERMS.issued modified = DCTERMS.modified keyword = DCAT.keyword subject = DCAT.theme landing_page = DCAT.landingPage subset = VOID.subset sparql_endpoint = VOID.sparqlEndpoint distribution = DCAT.distribution license = DCTERMS.license download_url = DCAT.downloadURL media_type = DCAT.mediaType byte_size = DCAT.byte_size label = RDFS.label a = RDF.type turtle = MTT.turtle bibliographic_database = DBR.Bibliographic_database open_access = DBR.Open_access scholary_communication = DBR.Scholarly_communication citations = DBR.Citation def __init__(self, tp_url_real, context_path, context_file_path, base_iri, base_dir, info_dir, dataset_home, tmp_dir, triplestore_url=None): self.tp_url = triplestore_url self.base_iri = base_iri self.base_dir = base_dir self.info_dir = info_dir self.context_path = context_path self.dataset_home = URIRef(dataset_home) self.tmp_dir = tmp_dir self.tp_res = URIRef(tp_url_real) self.repok = Reporter(prefix="[DatasetHandler: INFO] ") self.reperr = Reporter(prefix="[DatasetHandler: ERROR] ") self.st = Storer(context_map={context_path: context_file_path}, repok=self.repok, reperr=self.reperr) self.st.set_preface_query( u"DELETE { ?res <%s> ?date } WHERE { ?res a <%s> ; <%s> ?date }" % (str(DatasetHandler.modified), str(DatasetHandler.dataset), str(DatasetHandler.modified))) # /START Create Literal def create_label(self, g, res, string): return create_literal(g, res, RDFS.label, string) def create_publication_date(self, g, res, string): return create_literal(g, res, self.issued, string, XSD.dateTime) def update_modification_date(self, g, res, string): g.remove((res, self.modified, None)) return create_literal(g, res, self.modified, string, XSD.dateTime) def create_title(self, g, res, string): return create_literal(g, res, self.title, string) def create_description(self, g, res, string): return create_literal(g, res, self.description, string) def create_keyword(self, g, res, string): return create_literal(g, res, self.keyword, string) def create_byte_size(self, g, res, string): return create_literal(g, res, self.byte_size, string, XSD.decimal) # /END Create Literal # /START Create Complex Attributes def has_subject(self, g, res, obj): g.add((res, self.subject, obj)) def has_landing_page(self, g, res, obj): g.add((res, self.landing_page, obj)) def has_subset(self, g, res, obj): g.add((res, self.subset, obj)) def has_sparql_endpoint(self, g, res, obj): g.add((res, self.sparql_endpoint, obj)) def has_distribution(self, g, res, obj): g.add((res, self.distribution, obj)) def has_license(self, g, res, obj): g.add((res, self.license, obj)) def has_download_url(self, g, res, obj): g.add((res, self.download_url, obj)) def has_media_type(self, g, res, obj): g.add((res, self.media_type, obj)) # /END Create Complex Attributes # /START Types def dataset_type(self, g, res): create_type(g, res, self.dataset) def distribution_type(self, g, res): create_type(g, res, self.datafile) # /END Types def update_dataset_info(self, graph_set): cur_time = datetime.now().strftime('%Y-%m-%dT%H:%M:%S') subgraphs_to_update = set() all_graphs = [] for g in graph_set.graphs(): cur_id = g.identifier if cur_id not in subgraphs_to_update: subgraphs_to_update.add(cur_id) cur_dataset_res = URIRef(cur_id) cur_dataset = self.get_dataset_graph(cur_dataset_res, cur_time) self.update_modification_date(cur_dataset, cur_dataset_res, cur_time) all_graphs += [cur_dataset] if subgraphs_to_update: cur_occ_res = URIRef(self.base_iri) cur_occ = self.get_dataset_graph(cur_occ_res, cur_time) self.update_modification_date(cur_occ, cur_occ_res, cur_time) for subgraph_id in subgraphs_to_update: self.has_subset(cur_occ, cur_occ_res, URIRef(subgraph_id)) all_graphs += [cur_occ] if all_graphs: # Store everything and upload to triplestore if self.tp_url is None: self.st.store_all( self.base_dir, self.base_iri, self.context_path, self.tmp_dir, all_graphs, True) else: self.st.upload_and_store( self.base_dir, self.tp_url, self.base_iri, self.context_path, self.tmp_dir, all_graphs, True) def get_dataset_graph(self, res, cur_time): dataset_path = self.get_metadata_path_from_resource(res) if os.path.exists(dataset_path): return list(self.st.load(dataset_path, tmp_dir=self.tmp_dir).contexts())[0] else: dataset_label = "OCC" dataset_title = "The OpenCitations Corpus" dataset_description = "The OpenCitations Corpus is an open repository of scholarly " \ "citation data made available under a Creative Commons public " \ "domain dedication, which provides in RDF accurate citation " \ "information (bibliographic references) harvested from the " \ "scholarly literature (described using the SPAR Ontologies) " \ "that others may freely build upon, enhance and reuse for any " \ "purpose, without restriction under copyright or database law." if re.search("/../$", str(res)) is not None: g = Graph(identifier=str(res)) dataset_short_name = str(res)[-3:-1] dataset_name = GraphSet.labels[dataset_short_name] dataset_title += ": %s dataset" % dataset_name.title() dataset_description += " This sub-dataset contains all the '%s' resources." % \ dataset_name dataset_label += " / %s" % dataset_short_name self.create_keyword(g, res, dataset_name) else: g = Graph() self.has_landing_page(g, res, self.dataset_home) self.has_sparql_endpoint(g, res, self.tp_res) self.dataset_type(g, res) self.create_label(g, res, dataset_label) self.create_title(g, res, dataset_title) self.create_description(g, res, dataset_description) self.create_publication_date(g, res, cur_time) self.create_keyword(g, res, "OCC") self.create_keyword(g, res, "OpenCitations") self.create_keyword(g, res, "OpenCitations Corpus") self.create_keyword(g, res, "SPAR Ontologies") self.create_keyword(g, res, "bibliographic references") self.create_keyword(g, res, "citations") self.has_subject(g, res, self.bibliographic_database) self.has_subject(g, res, self.scholary_communication) self.has_subject(g, res, self.open_access) self.has_subject(g, res, self.citations) return g def get_metadata_path_from_resource(self, dataset_res): return self.get_metadata_path_from_iri(str(dataset_res)) def get_metadata_path_from_iri(self, dataset_iri): return re.sub("^%s" % self.base_iri, self.base_dir, dataset_iri) + "index.json"
A destination in its own right, the Westin Austin is at the center of the city’s lively downtown. Just around the corner from the famous Sixth Street entertainment district, the hotel is a short walk to the convention center, state Capital and many of Austin’s restaurants and music venues. The Westin’s rooftop pool and bar Azul, recognized by Travel Agent Central as the Cool Pool of the World for 2017, boasts commanding views and some of the best craft cocktails around. Guests and locals alike flock to Stella San Jac, the hotel’s regional American kitchen and bar that is committed to local, artisan ingredients and features a contemporary eclectic design. With 366 rooms and suites and more than 20,000 square feet of meeting and event space, the LEED certified Westin Austin Downtown has quickly become a favorite of leisure and business travelers alike and a can’t miss place to be during festivals like SXSW.
from django.core.management.base import NoArgsCommand, CommandError from django.conf import settings #from wt_articles.models import ArticleOfInterest, SourceArticle, TranslationRequest from wt_translation.models import ServerlandHost from wt_translation.models import UnsupportedLanguagePair, UndefinedTranslator, ServerlandConfigError # #from pootle_project.models import Project #from pootle_translationproject.models import TranslationProject #from pootle_language.models import Language # #import xmlrpclib #import json class Command(NoArgsCommand): help = "Looks for completed translation requests from all Serverland hosts and updates their corresponding .po files." def handle_error(self, host, error): print "An error occurred with Serverland host '%s' (%s):" % (host.shortname, host.url) print error #.exc_info() def handle_noargs(self, **options): # Fetch all of the hosts hosts = ServerlandHost.objects.all() for host in hosts: try: host.fetch_translations() except UnsupportedLanguagePair as ex: self.handle_error(host, ex) except UndefinedTranslator as ex: self.handle_error(host, ex) except ServerlandConfigError as ex: self.handle_error(host, ex) except Exception as ex: self.handle_error(host, ex) # token = settings.SERVERLAND_TOKEN # # print "Using token " + token # print "xmlrpclib.ServerProxy(%s)" % settings.SERVERLAND_XMLRPC # proxy = xmlrpclib.ServerProxy(settings.SERVERLAND_XMLRPC) # print "Connected!" # # # Fetch a list of the serverland workers # # for worker in proxy.list_workers(token): # # workerName = worker['shortname'] # # # Check serverland for completed translations # print "proxy.list_requests('%s')" % token # requests = proxy.list_requests(token) # # print "Call finished" # # If only one result is retrieved, the dict is not in a list # if not isinstance(requests, list): # print "Retrieved only one result" # requests = [requests] # else: # print "Retrieved multiple results" # # # Display the translation requests that are "ready" # completedRequests = [request for request in requests if request['ready']] # # print "Showing the completed requests" # # Process the completed requests # for completedRequest in completedRequests: # # Get the result # result = proxy.list_results(token, completedRequest['request_id']) # # # TODO: Save the result # print result['shortname'], result['request_id'] # print result['result'] # # # TODO: Delete the request # # proxy.delete_translation(completedRequest['request_id'])
What factors affect providers' medical decision making? What are providers' strategic priorities for 2019? What is driving growth in the medical supply market? HIDA surverys executives in major healthcare markets on an annual basis to understand these questions and more. The data is shared with HIDA members in a new format comparing the data across the care continuum. Provider survey insights are assembled in to eight streamlined charts that reveal trends and priorities. HIDA’s benchmark study of distributor members’ financial and operating performance presents an analysis of aggregate member data. This study tracks revenue growth, inventory, operating characteristics, and sales force management by market. Participating members recieve a complimentary copy, please contact Justin Waters. **PDF Download** Technology Drives ASC Expansion | Advances in operating room technology and equipment are enabling ambulatory surgery centers to increase the types of surgeries they offer. ASC decision makers look for medical products that deliver results, with improving outcomes cited as the most important factor in their organization's supply chain strategy. This report describes how ASCs make medical product decisions, and what new services they plan to offer in the future. **PDF Download** Managing Growth | Health systems are expanding their community footprints. As they grow, they are evaluating how to approach non-acute supply chain, and dealing with issues such as delivery to offsite locations, value analysis, standardization, and technology integration. This report describes the level of integration that exists between non-acute and acute settings, and what factors drive health system supply chain decision making. **PDF Download** The Impact Of Five-Star The CMS Five-Star Rating system is having a big impact on skilled nursing facility (SNF) operational priorities. Reputation is key to cultivating strong relationships with local hospitals, and keeping occupancy up. Learn directly from SNF owners and executives what actions they are taking to reduce hospital readmissions and what medical supplies can help them achieve this goal. A majority of facilities anticipate they will spend more on medical supplies and capital equipment in 2018. In addition, many SNFs have plans to expand into other post-acute settings such as home health or independent living. **PDF Download** Hospital Laboratories Scrutinize Operations | The market for laboratory testing continues to grow, and many laboratories have invested in expanding their capacity to meet increased demand.However, cuts in government reimbursement have presented a significant challenge to labs, causing many laboratory decision makers to reevaluate staffing, budgets, and what tests their facilities perform in-house. This report describes how hospital labs plan to navigate the challenges they face, and what impact they will have on equipment and supply budgets. **PDF Download** The Evolving Physician Office | Physician practices continue to expand to meet the needs of an aging population, and many practices will increase their medical supply budgets in the next year. Reimbursement decreases and collecting out-of-pocket expenses are top-of-mind challenges for practices, and the need to control costs due to declining revenue is changing how practices operate. Learn in this report what practices are prioritizing, and what services they are offering to appeal to patients, in order to maintain a successful practice.
# This file is part of browser, and contains miscellaneous functions. # # Copyright (C) 2009-2010 Josiah Gordon <[email protected]> # # browser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import warnings import subprocess from time import strftime from contextlib import contextmanager import gtk import glib debug = True def print_message(message, color=None, data_color=None): if debug: date = strftime('%h %e %H:%M:%S') if color: if not data_color: message_list = message.split(':', 1) messagestr = "%s [0;%sm%s:%s" % (date, color, message_list[0], ''.join(message_list[1:])) else: message_list = message.split(':', 2) messagestr = "%s [0;%sm%s:[%sm%s:%s" % (date, color, message_list[0], data_color, message_list[1], ''.join(message_list[2:])) else: messagestr = "%s" % (date, message) print(messagestr) @contextmanager def redirect_warnings(warning_func): """ _redirect_warnings(warning_func) -> Setup warning redirector to redirect warnings to warning_func. Use this function with 'with' statements. """ # Save old warning function old_showwarning = warnings.showwarning # Override default warning function warnings.showwarning = warning_func try: # Run commands in 'with' statement yield finally: # After 'with' block exits restore showwarning function warnings.showwarning = old_showwarning def extern_load_uri(uri): """ extern_load_uri(uri) -> First attempts to load the uri with gtk.show_uri. If that fails it trys xdg-open, gnome-open, and exo-open. """ try: # Try gtk.show_uri. ret = gtk.show_uri(gtk.gdk.screen_get_default(), uri, int(glib.get_current_time())) if ret: return True except Exception as err: print("Error (%s) while loading uri: %s" % (err, uri)) app_list = ['xdg-open', 'gnome-open', 'exo-open'] for app in app_list: try: proc_tup = glib.spawn_async([app, uri], flags=glib.SPAWN_SEARCH_PATH) except Exception as err: print("Error (%s) while loading uri (%s) with app (%s)" % \ (err, uri, app)) # Go to the next app if there was an error. continue # If it gets here than it spawned without error. return True return False
Weasel, Weasel, Weasel, Watch That Doggie Weasel, RAWHIDE! | HAIL DUBYUS! Weasel, Weasel, Weasel, Watch That Doggie Weasel, RAWHIDE!
from Screen import Screen from Components.ConfigList import ConfigListScreen from Components.ActionMap import NumberActionMap from Components.config import config, getConfigListEntry, ConfigNothing, NoSave, configfile from Components.Sources.StaticText import StaticText from Screens.MessageBox import MessageBox from Screens.InputBox import PinInput from Tools.BoundFunction import boundFunction class ProtectedScreen: def __init__(self): if self.isProtected() and config.ParentalControl.servicepin[0].value: self.onFirstExecBegin.append(boundFunction(self.session.openWithCallback, self.pinEntered, PinInput, pinList=[x.value for x in config.ParentalControl.servicepin], triesEntry=config.ParentalControl.retries.servicepin, title=_("Please enter the correct pin code"), windowTitle=_("Enter pin code"))) def isProtected(self): return (config.ParentalControl.servicepinactive.value or config.ParentalControl.setuppinactive.value) def pinEntered(self, result): if result is None: self.closeProtectedScreen() elif not result: self.session.openWithCallback(self.closeProtectedScreen, MessageBox, _("The pin code you entered is wrong."), MessageBox.TYPE_ERROR, timeout=3) def closeProtectedScreen(self, result=None): self.close(None) class ParentalControlSetup(Screen, ConfigListScreen, ProtectedScreen): def __init__(self, session): Screen.__init__(self, session) ProtectedScreen.__init__(self) # for the skin: first try ParentalControlSetup, then Setup, this allows individual skinning self.skinName = ["ParentalControlSetup", "Setup" ] self.setup_title = _("Parental control setup") self.setTitle(self.setup_title) self.onChangedEntry = [ ] self.list = [] ConfigListScreen.__init__(self, self.list, session = self.session, on_change = self.changedEntry) self.createSetup(initial=True) self["actions"] = NumberActionMap(["SetupActions", "MenuActions"], { "cancel": self.keyCancel, "save": self.keySave, "menu": self.closeRecursive, }, -2) self["key_red"] = StaticText(_("Cancel")) self["key_green"] = StaticText(_("Save")) self.recursive = False self.onLayoutFinish.append(self.layoutFinished) def layoutFinished(self): self.setTitle(self.setup_title) def isProtected(self): return (not config.ParentalControl.setuppinactive.value and config.ParentalControl.servicepinactive.value) or\ (not config.ParentalControl.setuppinactive.value and config.ParentalControl.config_sections.configuration.value) or\ (not config.ParentalControl.config_sections.configuration.value and config.ParentalControl.setuppinactive.value and not config.ParentalControl.config_sections.main_menu.value) def createSetup(self, initial=False): self.reloadLists = None self.list = [] if config.ParentalControl.servicepin[0].value or config.ParentalControl.servicepinactive.value or config.ParentalControl.setuppinactive.value or not initial: if config.ParentalControl.servicepin[0].value: pin_entry_text = _("Change PIN") + _(": 0000 - default (disabled)") else: pin_entry_text = _("Set PIN") self.changePin = getConfigListEntry(pin_entry_text, NoSave(ConfigNothing())) self.list.append(self.changePin) self.list.append(getConfigListEntry(_("Protect services"), config.ParentalControl.servicepinactive)) if config.ParentalControl.servicepinactive.value: self.list.append(getConfigListEntry(_("Remember service PIN"), config.ParentalControl.storeservicepin)) if config.ParentalControl.storeservicepin.value != "never": self.list.append(getConfigListEntry(_("Hide parentel locked services"), config.ParentalControl.hideBlacklist)) self.list.append(getConfigListEntry(_("Protect on epg age"), config.ParentalControl.age)) self.reloadLists = getConfigListEntry(_("Reload blacklists"), NoSave(ConfigNothing())) self.list.append(self.reloadLists) self.list.append(getConfigListEntry(_("Protect Screens"), config.ParentalControl.setuppinactive)) if config.ParentalControl.setuppinactive.value: self.list.append(getConfigListEntry(_("Protect main menu"), config.ParentalControl.config_sections.main_menu)) self.list.append(getConfigListEntry(_("Protect timer menu"), config.ParentalControl.config_sections.timer_menu)) self.list.append(getConfigListEntry(_("Protect plugin browser"), config.ParentalControl.config_sections.plugin_browser)) self.list.append(getConfigListEntry(_("Protect configuration"), config.ParentalControl.config_sections.configuration)) self.list.append(getConfigListEntry(_("Protect standby menu"), config.ParentalControl.config_sections.standby_menu)) self.list.append(getConfigListEntry(_("Protect software update screen"), config.ParentalControl.config_sections.software_update)) self.list.append(getConfigListEntry(_("Protect manufacturer reset screen"), config.ParentalControl.config_sections.manufacturer_reset)) self.list.append(getConfigListEntry(_("Protect movie list"), config.ParentalControl.config_sections.movie_list)) self.list.append(getConfigListEntry(_("Protect context menus"), config.ParentalControl.config_sections.context_menus)) if config.usage.menu_sort_mode.value == "user": self.list.append(getConfigListEntry(_("Protect menu sort"), config.ParentalControl.config_sections.menu_sort)) else: self.changePin = getConfigListEntry(_("Enable parental protection"), NoSave(ConfigNothing())) self.list.append(self.changePin) self["config"].list = self.list self["config"].setList(self.list) def keyOK(self): if self["config"].l.getCurrentSelection() == self.changePin: if config.ParentalControl.servicepin[0].value: self.session.openWithCallback(self.oldPinEntered, PinInput, pinList=[x.value for x in config.ParentalControl.servicepin], triesEntry=config.ParentalControl.retries.servicepin, title=_("Please enter the old PIN code"), windowTitle=_("Enter pin code")) else: self.oldPinEntered(True) elif self["config"].l.getCurrentSelection() == self.reloadLists: from Components.ParentalControl import parentalControl parentalControl.open() self.session.open(MessageBox, _("Lists reloaded!"), MessageBox.TYPE_INFO, timeout=3) else: ConfigListScreen.keyRight(self) self.createSetup() def keyLeft(self): ConfigListScreen.keyLeft(self) self.createSetup() def keyRight(self): ConfigListScreen.keyRight(self) self.createSetup() def cancelCB(self, value): self.keySave() def keyCancel(self): if self["config"].isChanged(): self.session.openWithCallback(self.cancelConfirm, MessageBox, _("Really close without saving settings?")) else: self.close() def cancelConfirm(self, answer): if answer: for x in self["config"].list: x[1].cancel() self.close() def keySave(self): if self["config"].isChanged(): for x in self["config"].list: x[1].save() configfile.save() from Components.ParentalControl import parentalControl parentalControl.hideBlacklist() self.close(self.recursive) def closeRecursive(self): self.recursive = True self.keySave() def keyNumberGlobal(self, number): pass # for summary: def changedEntry(self): for x in self.onChangedEntry: x() def getCurrentEntry(self): return self["config"].getCurrent()[0] def getCurrentValue(self): return str(self["config"].getCurrent()[1].getText()) def createSummary(self): from Screens.Setup import SetupSummary return SetupSummary def oldPinEntered(self, answer): if answer: self.session.openWithCallback(self.newPinEntered, PinInput, title=_("Please enter the new PIN code"), windowTitle=_("Enter pin code")) elif answer == False: self.session.open(MessageBox, _("The pin code you entered is wrong."), MessageBox.TYPE_ERROR, timeout=3) def newPinEntered(self, answer): if answer is not None: self.session.openWithCallback(boundFunction(self.confirmNewPinEntered, answer), PinInput, title=_("Please re-enter the new PIN code"), windowTitle=_("Enter pin code")) def confirmNewPinEntered(self, answer1, answer2): if answer2 is not None: if answer1 == answer2: warning_text = "" if not answer2: warning_text = _("You PIN code is 0000. This is the default PIN code and it disable parental control!\n") self.session.open(MessageBox, warning_text + _("The PIN code has been changed successfully."), MessageBox.TYPE_INFO, timeout=3) config.ParentalControl.servicepin[0].value = answer1 config.ParentalControl.servicepin[0].save() self.createSetup() else: self.session.open(MessageBox, _("The PIN codes you entered are different."), MessageBox.TYPE_ERROR, timeout=3)
"The Stageprompter gives you the most natural prompting experience so the audience will never know you are using one. Your performance will instantly improve as your confidence grows!" Staffords on Stage Stageprompters provide singers and musicians with lyrics and guitar chords on stage. Have you been using scraps of paper on a music stand, or maybe peering at a complicated ipad lyric software app' on a tiny screen? Our Stageprompters have been designed especially for artists performing up on stage, that want a larger screen, covert teleprompter, in front of them. Unlike any other prompting device on the market, our Stageprompters are computerless, therefore they are reliable, easy to operate and require no maintenance. From the moment you switch them on you are accessing all your prompts. You can view anything you wish to see, including images as well as text. They are operated by a simple foot switch and because they look like a standard fold back audio wedge monitor, the audience is unaware you have an autocue device up on stage. You can forget the panic you used to feel when losing your place in a song, a quick glance down at your Stageprompter Telemonitor and you are back on track. Your performance will instantly improve as your confidence grows. You can now concentrate on your fans with the knowledge that your Stageprompter is always there for you. Unlike small screen alternatives such as iPad’s, the Wedge-03 Stageprompter has a 22inch screen. This means that for most of your songs, you can fit all your information on one screen with no scrolling needed. This gives you the advantage of being able to look ahead and see prompts that are coming up and not just a line at a time. It has been proven that performers reading/singing from a scrolling autocue, cueprompter or teleprompter are prone to staring at the screen and not performing to the audience. The Stageprompter gives you the most natural prompting experience so the audience will never know.. Please continue to scroll down this page to find out all you need to know or fee free to contact us for more information. Prices here or contact us for more information. Overview - Hidden in a wedge style audio floor monitor case, the Wedge-03 Stageprompter is virtually undetectable by the audience. It gives the user and fellow performers a massive confidence boost by displaying what they need to see such as chords, lyrics, music scores, tabs, structure or whatever is required. This teleprompter autocue monitor is typically used by musicians, singers, presenters, ministers, performers, speakers and choirs. Using a 22″ colour flat screen in portrait orientation, the Stageprompter is built of solid birch plywood to stand up to the knocks of live performance. This new model is designed to be compact and highly portable. It features a high density foam cover for the recessed screen as well as an optional, removable acrylic plastic anti glare screen protector. Additionally, an optional high quality fabric cover or flight case is available to protect it during transportation. Operation – The Stageprompter displays PDF or JPG files of your songs / lyrics / guitar chords etc that are saved onto a USB Memory Stick. The USB memory stick is inserted into the connector panel and a menu of your images (songs) is displayed. Using the foot switch you can then move through the images in forward or reverse order or select a specific image you require from the menu. There is no on or off stage set up, the Stageprompter teleprompter autocue monitor is fully operational from the moment it is turned on. How to get your lyrics / chords etc onto the Stageprompter - We have produced a purpose made web application that allows you to cut and paste from your existing song sheets. You can format your text, group and order your songs into sets and can back up or restore your work. Controller-04 Stageprompter - A lower cost, versatile, DIY solution ! The Controller-04 has all the same computerless features and operation as our flagship Wedge-03 Stageprompte, but at a much lower cost. Great for the DIY enthusiast that may wish to build their own wedge case or design a bespoke system where maybe multiple screens are used up on stage. Your screen can be positioned up on stage, on the floor in front of you or can be mounted higher, using our optional stand and mount, ideal for Keyboards, percussion and choirs. Your Controller04 Stageprompter gives you the user and fellow performers a massive confidence boost by displaying whatever the user wants to see. This could be chords, lyrics, music score, tabs, structure or whatever is required. Operation - The Stageprompter displays PDF or JPG files of your songs / lyrics / guitar chords etc that are saved onto a USB Memory Stick. The USB memory stick is inserted into the rear of the unit and a menu of your images (songs) is displayed. Using the foot switch. You can then move through the images in forward or reverse order or select a specific image you require from the menu. There is no on or off stage set up, the Stageprompter teleprompter / autocue monitor is fully operational from the moment it is turned on. Used By - Any on stage artist wanting a low cost computer-less teleprompter or autocue.
#!/usr/bin/env python3 # -*- encoding: utf-8 -*- from casting.casting import Casting from const.const import Messenger as Msg from const.const import Queries from date_tools.date_tools import DateTools from logger.logger import Logger class Dropper: # An object with connection parameters to connect to PostgreSQL connecter = None logger = None # Logger to show and log some messages dbnames = [] # List of databases to be removed def __init__(self, connecter=None, dbnames=[], logger=None): if logger: self.logger = logger else: self.logger = Logger() if connecter: self.connecter = connecter else: self.logger.stop_exe(Msg.NO_CONNECTION_PARAMS) if isinstance(dbnames, list): self.dbnames = dbnames else: self.dbnames = Casting.str_to_list(dbnames) msg = Msg.DROPPER_VARS.format(server=self.connecter.server, user=self.connecter.user, port=self.connecter.port, dbnames=self.dbnames) self.logger.debug(Msg.DROPPER_VARS_INTRO) self.logger.debug(msg) def drop_pg_db(self, dbname, pg_superuser): ''' Target: - remove a database in PostgreSQL. Parameters: - dbname: the PostgreSQL database's name which is going to be removed. - pg_superuser: a flag which indicates whether the current user is PostgreSQL superuser or not. ''' delete = False try: self.connecter.cursor.execute(Queries.PG_DB_EXISTS, (dbname, )) result = self.connecter.cursor.fetchone() if result: pg_pid = self.connecter.get_pid_str() formatted_sql = Queries.BACKEND_PG_DB_EXISTS.format( pg_pid=pg_pid, target_db=dbname) self.connecter.cursor.execute(formatted_sql) result = self.connecter.cursor.fetchone() # If there are not any connections to the target database... if not result: # Users who are not superusers will only be able to drop # the databases they own if not pg_superuser: self.connecter.cursor.execute(Queries.GET_PG_DB_OWNER, (dbname, )) db = self.connecter.cursor.fetchone() if db['owner'] != self.connecter.user: msg = Msg.DROP_DB_NOT_ALLOWED.format( user=self.connecter.user, dbname=dbname) self.logger.highlight('warning', msg, 'yellow') else: delete = True else: delete = True if delete: # Get the database's "datallowconn" value datallowconn = self.connecter.get_datallowconn(dbname) # If datallowconn is allowed, change it temporarily if datallowconn: # Disallow connections to the database during the # process result = self.connecter.disallow_db_conn(dbname) if not result: msg = Msg.DISALLOW_CONN_TO_PG_DB_FAIL.format( dbname=dbname) self.logger.highlight('warning', msg, 'yellow') fmt_query_drop_db = Queries.DROP_PG_DB.format( dbname=dbname) start_time = DateTools.get_current_datetime() # Drop the database self.connecter.cursor.execute(fmt_query_drop_db) end_time = DateTools.get_current_datetime() # Get and show the process' duration diff = DateTools.get_diff_datetimes(start_time, end_time) msg = Msg.DROP_DB_DONE.format(dbname=dbname, diff=diff) self.logger.highlight('info', msg, 'green') # If datallowconn was allowed, leave it as it was if datallowconn: # Allow connections to the database at the end of # the process result = self.connecter.allow_db_conn(dbname) if not result: msg = Msg.ALLOW_CONN_TO_PG_DB_FAIL.format( dbname=dbname) self.logger.highlight('warning', msg, 'yellow') else: msg = Msg.ACTIVE_CONNS_ERROR.format(dbname=dbname) self.logger.highlight('warning', msg, 'yellow') else: msg = Msg.DB_DOES_NOT_EXIST.format(dbname=dbname) self.logger.highlight('warning', msg, 'yellow') except Exception as e: self.logger.debug('Error en la función "drop_pg_db": ' '{}.'.format(str(e))) self.logger.highlight('warning', Msg.DROP_DB_FAIL.format( dbname=dbname), 'yellow') def drop_pg_dbs(self, dbnames): ''' Target: - remove a list of databases in PostgreSQL. ''' self.logger.highlight('info', Msg.BEGINNING_DROPPER, 'white') # Check if the role of user connected to PostgreSQL is superuser pg_superuser = self.connecter.is_pg_superuser() if dbnames: for dbname in self.dbnames: msg = Msg.PROCESSING_DB.format(dbname=dbname) self.logger.highlight('info', msg, 'cyan') self.drop_pg_db(dbname, pg_superuser) else: self.logger.highlight('warning', Msg.DROPPER_HAS_NOTHING_TO_DO, 'yellow', effect='bold') self.logger.highlight('info', Msg.DROP_DBS_DONE, 'green', effect='bold')
• Every person has cancer cells in body. These cancer cells do not show up in the standard tests until they have multiplied to a few billion. When doctors tell cancer patients that there are no more cancer cells in their bodies after treatment, in just means the tests are unable to detect the cancer cells because they have not reached the detectable size. Through Vibrational Healing Therapy, cancer cells may be regenerated into healthy functional cells in the early stages of development. • Chemotherapy and radiation can cause cancer cells to mutate and become resistant and difficult to destroy. Surgery can also cause cancer cells to spread to other sites, Chemotherapy involves poisoning the rapid growing cancer cells is and also destroys rapid-growing healthy cells in the bone marrow, gastrointestinal tract, and etc. and can cause organ damage, like liver, kidneys, heart, lungs, and etc. Radiation while destroying cancer cells also burns, scars, and damages healthy cells, tissues, and organs. Initial treatment with chemotherapy and radiation will often reduce tumor size. However prolonged use of chemotherapy and radiation does not result in more tumor destruction. When the body has too much toxin burden form chemotherapy and radiation the immune system is either compromised or destroyed, hence the person can succumb to various kinds of infections and complications.
from typing import Callable, Union from asyncio import Future from rx import throw, from_future from rx.core import Observable from rx.core.typing import Scheduler from rx.internal.utils import is_future def _defer(factory: Callable[[Scheduler], Union[Observable, Future]] ) -> Observable: """Returns an observable sequence that invokes the specified factory function whenever a new observer subscribes. Example: >>> res = defer(lambda: of(1, 2, 3)) Args: observable_factory: Observable factory function to invoke for each observer that subscribes to the resulting sequence. Returns: An observable sequence whose observers trigger an invocation of the given observable factory function. """ def subscribe(observer, scheduler=None): try: result = factory(scheduler) except Exception as ex: # By design. pylint: disable=W0703 return throw(ex).subscribe(observer) result = from_future(result) if is_future(result) else result return result.subscribe(observer, scheduler=scheduler) return Observable(subscribe)
Have the skater stand on a sheet of paper. Trace the outline of the foot making sure your pen/pencil remains straight up and down at all times. You will need to send tracings of both feet. Draw a line with a ruler from the longest toe to the end of the heel. Write the exact measurement of the line. Be sure your measurements are precise as a half an inch can vary up to a half a size. Secondly, take a fabric or paper tape measure and wrap it snugly around the ball (widest part near the toes) of the foot then have the skater stand with even pressure on both feet. This measurement of the circumference of the ball of the foot will determine the width of the skate boot. If you do not have a fabric or paper tape measure, you can use a ribbon or string and then lay it out on a ruler for the measurement. 1. Contact information including your full name and daytime phone number with area code. 3. Current skate size and width plus the brand and style of boots. 4. Is your heel narrow or fleshy? 5. Would the skater prefer a perfect fit or does the skater need room to grow? 6. Current level of skating. Email your tracing to [email protected], fax your tracing to 224-303-0373 or mail to Rainbo Sports, 2665 Shermer Rd, Northbrook, IL 60062. We will do our best to help you choose a size based on the information we receive; however, we cannot guarantee fit based on measurements that are not made in person. Keep in mind that boots will stretch and the heel with settle back after some use. Some discomfort is common in new boots. Boots cannot be returned if they are mounted or if any change from their original condition has been made. Please see our return policy.
# May 18, 2017:: 9:59am # This was the lone regressor extracted from the original classifier in # the regressor code. We have phased this in to the trainClassifierRegressor # code in /manikin/main.py def trainRegressor(args, resnet, bbox_loader): r""" Following the interpretable learning from self-driving examples: https://arxiv.org/pdf/1703.10631.pdf we can extract the last feature cube x_t from the resnet model as a set of L = W x H vectors of depth D, and stack a regressor module to obtain bounding boxes """ #hyperparameters numLayers, seqLength = 2, 5 noutputs, lr = 12, args.rnnLR inputSize, nHidden = 128, [64, 32] batchSize, maxIter = args.cbatchSize, args.cmaxIter #extract feture cube of last layer and reshape it res_classifier, feature_cube = None, None if args.classifier: #use pre-trained classifier res_classifier = ResNet(ResidualBlock, [3, 3, 3]) res_classifier.load_state_dict(torch.load('models225/' + args.classifier)) #freeze optimized layers for param in res_classifier.parameters(): param.requires_grad = False else: res_classifier = resnet #extract last convolution layer last_layer, feat_cube = res_classifier.layer3, [] for param in last_layer.parameters(): if param.dim() > 1: # extract only conv cubes feat_cube.append(param) # for i in range(len(feat_cube)): # print(feat_cube[i].size()) # print('b4 softmax: ', len(feat_cube)) ''' get the soft attention mechanism's x_t vector:: see this: https://arxiv.org/pdf/1511.04119.pdf ''' xt = [] for x in xrange(len(feat_cube)): temp = softmax(feat_cube[x]) xt.append(temp) # print(xt[x].size()) # print('after softmax: ', len(xt)) time.sleep(100) #accummulate all the features of the fc layer into a list for p in res_classifier.fc.parameters(): params_list.append(p) #will contain weights and biases params_weight, params_bias = params_list[0], params_list[1] #reshape params_weight params_weight = params_weight.view(128) X_tr = int(0.8*len(params_weight)) X_te = int(0.2*len(params_weight)) X = len(params_weight) #reshape inputs train_X = torch.unsqueeze(params_weight, 0).expand(seqLength, 1, X) test_X = torch.unsqueeze(params_weight[X_tr:], 0).expand(seqLength, 1, X_te+1) # Get regressor model and predict bounding boxes regressor = StackRegressive(res_cube=res_classifier, inputSize=128, nHidden=[64,32,12], noutputs=12,\ batchSize=args.cbatchSize, cuda=args.cuda, numLayers=2) #initialize the weights of the network with xavier uniform initialization for name, weights in regressor.named_parameters(): #use normal initialization for now init.uniform(weights, 0, 1) if(args.cuda): train_X = train_X.cuda() test_X = test_X.cuda() # regressor = regressor.cuda() #define optimizer optimizer = optim.SGD(regressor.parameters(), lr) # Forward + Backward + Optimize targ_X = None for _, targ_X in bbox_loader: targ_X = targ_X if args.cuda: targ_X = targ_X.cuda() for epoch in xrange(maxIter): for i in xrange(targ_X.size(1)*10): inputs = train_X targets = Variable(targ_X[:,i:i+seqLength,:]) optimizer.zero_grad() outputs = regressor(inputs) #reshape targets for inputs targets = targets.view(seqLength, -1) loss = regressor.criterion(outputs, targets) loss.backward() optimizer.step() if epoch % 5 == 0 and epoch >0: lr *= 1./epoch optimizer = optim.SGD(regressor.parameters(), lr) print('Epoch: {}, \tIter: {}, \tLoss: {:.4f}'.format( epoch, i, loss.data[0])) if i+seqLength >= int(targ_X.size(1)): break # #initialize the weights of the network with xavier uniform initialization # for name, weights in regressor.named_parameters(): # #use normal initialization for now # init.uniform(weights, 0, 1) # #extract last convolution layer # last_layer, feat_cube = res_classifier.layer3, [] # for param in last_layer.parameters(): # if param.dim() > 1: # extract only conv cubes # feat_cube.append(param) # ''' # get the soft attention mechanism's x_t vector:: # see this: # https://arxiv.org/pdf/1511.04119.pdf # ''' # lt = [] # this contains the soft max # for x in xrange(len(feat_cube)): # temp = softmax(feat_cube[x]) # lt.append(temp) # # # find xt = Sum_i^(KxK) l_t_i X_t_i # xt = [] # for i in xrange(len(feat_cube)): # temp = torch.mul(lt[i], feat_cube[i]) # xt.append(temp) # print(xt[i].size()) # # # Now feed each tensor in xt through LSTM layers # ''' # feat cube is of shape # 64L, 32L, 3L, 3L # 64L, 64L, 3L, 3L # 64L, 32L, 3L, 3L # 64L, 64L, 3L, 3L # 64L, 64L, 3L, 3L # 64L, 64L, 3L, 3L # 64L, 64L, 3L, 3L # '''
Discography of American Historical Recordings, s.v. "Recordings made on Saturday, November 3, 1928," accessed April 20, 2019, https://adp.library.ucsb.edu/index.php/date/browse?date=1928-11-03. Recordings made on Saturday, November 3, 1928. (2019). In Discography of American Historical Recordings. Retrieved April 20, 2019, from https://adp.library.ucsb.edu/index.php/date/browse?date=1928-11-03. Recordings made on Saturday, November 3, 1928." Discography of American Historical Recordings. UC Santa Barbara Library, 2019. Web. 20 April 2019.
from datetime import datetime from loguru import logger from flexget import plugin from flexget.components.backlog.db import BacklogEntry, clear_entries, get_entries from flexget.event import event from flexget.manager import Session from flexget.utils.database import with_session from flexget.utils.serialization import serialize from flexget.utils.tools import parse_timedelta logger = logger.bind(name='backlog') class InputBacklog: """ Keeps task history for given amount of time. Example:: backlog: 4 days Rarely useful for end users, mainly used by other plugins. """ schema = {'type': 'string', 'format': 'interval'} @plugin.priority(plugin.PRIORITY_LAST) def on_task_input(self, task, config): # Get a list of entries to inject injections = self.get_injections(task) if config: # If backlog is manually enabled for this task, learn the entries. self.learn_backlog(task, config) # Return the entries from backlog that are not already in the task return injections @plugin.priority(plugin.PRIORITY_FIRST) def on_task_metainfo(self, task, config): # Take a snapshot of any new entries' states before metainfo event in case we have to store them to backlog for entry in task.entries: entry['_backlog_snapshot'] = serialize(entry) def on_task_abort(self, task, config): """Remember all entries until next execution when task gets aborted.""" if task.entries: logger.debug('Remembering all entries to backlog because of task abort.') self.learn_backlog(task) @with_session def add_backlog(self, task, entry, amount='', session=None): """Add single entry to task backlog If :amount: is not specified, entry will only be injected on next execution.""" snapshot = entry.get('_backlog_snapshot') if not snapshot: if task.current_phase != 'input': # Not having a snapshot is normal during input phase, don't display a warning logger.warning( 'No input snapshot available for `{}`, using current state', entry['title'] ) snapshot = serialize(entry) expire_time = datetime.now() + parse_timedelta(amount) backlog_entry = ( session.query(BacklogEntry) .filter(BacklogEntry.title == entry['title']) .filter(BacklogEntry.task == task.name) .first() ) if backlog_entry: # If there is already a backlog entry for this, update the expiry time if necessary. if backlog_entry.expire < expire_time: logger.debug('Updating expiry time for {}', entry['title']) backlog_entry.expire = expire_time else: logger.debug('Saving {}', entry['title']) backlog_entry = BacklogEntry() backlog_entry.title = entry['title'] backlog_entry.entry = snapshot backlog_entry.task = task.name backlog_entry.expire = expire_time session.add(backlog_entry) def learn_backlog(self, task, amount=''): """Learn current entries into backlog. All task inputs must have been executed.""" with Session() as session: for entry in task.entries: self.add_backlog(task, entry, amount, session=session) @with_session def get_injections(self, task, session=None): """Insert missing entries from backlog.""" entries = [] for backlog_entry in get_entries(task=task.name, session=session): entry = backlog_entry.entry # this is already in the task if task.find_entry(title=entry['title'], url=entry['url']): continue logger.debug('Restoring {}', entry['title']) entries.append(entry) if entries: logger.verbose('Added {} entries from backlog', len(entries)) # purge expired purged = clear_entries(task=task.name, all=False, session=session) logger.debug('{} entries purged from backlog', purged) return entries @event('plugin.register') def register_plugin(): plugin.register(InputBacklog, 'backlog', builtin=True, api_ver=2)
Any content or material published or available for download on of this website is for illustration and discussion purposes only and it’s not an offering nor marketing of investments. An offering may be made only by delivery of a confidential offering memorandum to appropriate investors by us assigned and by regulator approved Brokerage. Copyright © 2018 Albatris. All rights reserved.
import datetime import logging import re import time import urllib2 from django.db import transaction from BeautifulSoup import BeautifulSoup import lxml.html from parliament.committees.models import (Committee, CommitteeMeeting, CommitteeActivity, CommitteeActivityInSession, CommitteeReport, CommitteeInSession) from parliament.core.models import Session from parliament.hansards.models import Document logger = logging.getLogger(__name__) COMMITTEE_LIST_URL = 'http://www2.parl.gc.ca/CommitteeBusiness/CommitteeList.aspx?Language=E&Parl=%d&Ses=%d&Mode=2' @transaction.commit_on_success def import_committee_list(session=None): if session is None: session = Session.objects.current() def make_committee(namestring, parent=None): #print namestring match = re.search(r'^(.+) \(([A-Z0-9]{3,5})\)$', namestring) (name, acronym) = match.groups() try: return Committee.objects.get_by_acronym(acronym, session) except Committee.DoesNotExist: committee, created = Committee.objects.get_or_create(name=name_en.strip(), parent=parent) if created: logger.warning(u"Creating committee: %s, %s" % (committee.name_en, committee.slug)) CommitteeInSession.objects.get_or_create( committee=committee, session=session, acronym=acronym) return committee soup = BeautifulSoup(urllib2.urlopen(COMMITTEE_LIST_URL % (session.parliamentnum, session.sessnum))) for li in soup.findAll('li', 'CommitteeItem'): com = make_committee(li.find('a').string) for sub in li.findAll('li', 'SubCommitteeItem'): make_committee(sub.find('a').string, parent=com) return True def _docid_from_url(u): return int(re.search(r'DocId=(\d+)&', u).group(1)) def _12hr(hour, ampm): hour = int(hour) hour += 12 * bool('p' in ampm.lower()) if hour % 12 == 0: # noon, midnight hour -= 12 return hour def _parse_date(d): """datetime objects from e.g. March 11, 2011""" return datetime.date( *time.strptime(d, '%B %d, %Y')[:3] ) def import_committee_documents(session): for comm in Committee.objects.filter(sessions=session).order_by('-parent'): # subcommittees last import_committee_meetings(comm, session) import_committee_reports(comm, session) time.sleep(1) COMMITTEE_MEETINGS_URL = 'http://www2.parl.gc.ca/CommitteeBusiness/CommitteeMeetings.aspx?Cmte=%(acronym)s&Language=E&Parl=%(parliamentnum)d&Ses=%(sessnum)d&Mode=1' @transaction.commit_on_success def import_committee_meetings(committee, session): acronym = committee.get_acronym(session) url = COMMITTEE_MEETINGS_URL % {'acronym': acronym, 'parliamentnum': session.parliamentnum, 'sessnum': session.sessnum} resp = urllib2.urlopen(url) tree = lxml.html.parse(resp) root = tree.getroot() for mtg_row in root.cssselect('.MeetingTableRow'): number = int(re.sub(r'\D', '', mtg_row.cssselect('.MeetingNumber')[0].text)) assert number > 0 try: meeting = CommitteeMeeting.objects.select_related('evidence').get( committee=committee,session=session, number=number) except CommitteeMeeting.DoesNotExist: meeting = CommitteeMeeting(committee=committee, session=session, number=number) meeting.date = _parse_date(mtg_row.cssselect('.MeetingDate')[0].text) timestring = mtg_row.cssselect('.MeetingTime')[0].text_content() match = re.search(r'(\d\d?):(\d\d) ([ap]\.m\.)(?: - (\d\d?):(\d\d) ([ap]\.m\.))?\s\(', timestring, re.UNICODE) meeting.start_time = datetime.time(_12hr(match.group(1), match.group(3)), int(match.group(2))) if match.group(4): meeting.end_time = datetime.time(_12hr(match.group(4), match.group(6)), int(match.group(5))) notice_link = mtg_row.cssselect('.MeetingPublicationIcon[headers=thNoticeFuture] a') if notice_link: meeting.notice = _docid_from_url(notice_link[0].get('href')) minutes_link = mtg_row.cssselect('.MeetingPublicationIcon[headers=thMinutesPast] a') if minutes_link: meeting.minutes = _docid_from_url(minutes_link[0].get('href')) evidence_link = mtg_row.cssselect('.MeetingPublicationIcon[headers=thEvidencePast] a') if evidence_link: evidence_id = _docid_from_url(evidence_link[0].get('href')) if meeting.evidence_id: if meeting.evidence.source_id != evidence_id: raise Exception("Evidence docid mismatch for %s %s: %s %s" % (committee.acronym, number, evidence_id, meeting.evidence.source_id)) else: # Evidence hasn't changed; we don't need to worry about updating continue else: if Document.objects.filter(source_id=evidence_id).exists(): raise Exception("Found evidence source_id %s, but it already exists" % evidence_id) meeting.evidence = Document.objects.create( source_id=evidence_id, date=meeting.date, session=session, document_type=Document.EVIDENCE) meeting.webcast = bool(mtg_row.cssselect('.MeetingStatusIcon img[title=Webcast]')) meeting.in_camera = bool(mtg_row.cssselect('.MeetingStatusIcon img[title*="in camera"]')) if not meeting.televised: meeting.televised = bool(mtg_row.cssselect('.MeetingStatusIcon img[title*="televised"]')) if not meeting.travel: meeting.travel = bool(mtg_row.cssselect('.MeetingStatusIcon img[title*="travel"]')) meeting.save() for study_link in mtg_row.cssselect('.MeetingStudyActivity a'): name = study_link.text.strip() study = get_activity_by_url(study_link.get('href')) meeting.activities.add(study) return True COMMITTEE_ACTIVITY_URL = 'http://www.parl.gc.ca/CommitteeBusiness/StudyActivityHome.aspx?Stac=%(activity_id)d&Language=%(language)s&Parl=%(parliamentnum)d&Ses=%(sessnum)d' def get_activity_by_url(activity_url): activity_id = int(re.search(r'Stac=(\d+)', activity_url).group(1)) session = Session.objects.get_from_parl_url(activity_url) try: return CommitteeActivityInSession.objects.get(source_id=activity_id).activity except CommitteeActivityInSession.DoesNotExist: pass activity = CommitteeActivity() url = COMMITTEE_ACTIVITY_URL % { 'activity_id': activity_id, 'language': 'E', 'parliamentnum': session.parliamentnum, 'sessnum': session.sessnum } root = lxml.html.parse(urllib2.urlopen(url)).getroot() acronym = re.search(r'\(([A-Z][A-Z0-9]{2,4})\)', root.cssselect('div.HeaderTitle span')[0].text).group(1) activity.committee = CommitteeInSession.objects.get(acronym=acronym, session=session).committee activity_type = root.cssselect('span.StacTitlePrefix')[0] activity.study = 'Study' in activity_type.text activity.name_en = activity_type.tail.strip()[:500] # See if this already exists for another session try: activity = CommitteeActivity.objects.get( committee=activity.committee, study=activity.study, name_en=activity.name_en ) except CommitteeActivity.DoesNotExist: # Get the French name url = COMMITTEE_ACTIVITY_URL % { 'activity_id': activity_id, 'language': 'F', 'parliamentnum': session.parliamentnum, 'sessnum': session.sessnum } root = lxml.html.parse(urllib2.urlopen(url)).getroot() activity_type = root.cssselect('span.StacTitlePrefix')[0] activity.name_fr = activity_type.tail.strip()[:500] activity.save() if CommitteeActivityInSession.objects.exclude(source_id=activity_id).filter( session=session, activity=activity).exists(): logger.warning("Apparent duplicate activity ID for %s %s %s: %s" % (activity, activity.committee, session, activity_id)) return activity CommitteeActivityInSession.objects.create( session=session, activity=activity, source_id=activity_id ) return activity COMMITTEE_REPORT_URL = 'http://www2.parl.gc.ca/CommitteeBusiness/ReportsResponses.aspx?Cmte=%(acronym)s&Language=E&Mode=1&Parl=%(parliamentnum)d&Ses=%(sessnum)d' @transaction.commit_on_success def import_committee_reports(committee, session): # FIXME rework to parse out the single all-reports page? acronym = committee.get_acronym(session) url = COMMITTEE_REPORT_URL % {'acronym': acronym, 'parliamentnum': session.parliamentnum, 'sessnum': session.sessnum} tree = lxml.html.parse(urllib2.urlopen(url)) def _import_report(report_link, parent=None): report_docid = _docid_from_url(report_link.get('href')) try: report = CommitteeReport.objects.get(committee=committee, session=session, source_id=report_docid, parent=parent) if report.presented_date: # We can consider this report fully parsed return report except CommitteeReport.DoesNotExist: if CommitteeReport.objects.filter(source_id=report_docid).exists(): if committee.parent and \ CommitteeReport.objects.filter(source_id=report_docid, committee=committee.parent).exists(): # Reference to parent committee report return None else: raise Exception("Duplicate report ID %s on %s" % (report_docid, url)) report = CommitteeReport(committee=committee, session=session, source_id=report_docid, parent=parent) report_name = report_link.text.strip() match = re.search(r'^Report (\d+) - (.+)', report_name) if match: report.number = int(match.group(1)) report.name_en = match.group(2).strip() else: report.name_en = report_name report.government_response = bool(report_link.xpath("../span[contains(., 'Government Response')]")) match = re.search(r'Adopted by the Committee on ([a-zA-Z0-9, ]+)', report_link.tail) if match: report.adopted_date = _parse_date(match.group(1)) match = re.search(r'Presented to the House on ([a-zA-Z0-9, ]+)', report_link.tail) if match: report.presented_date = _parse_date(match.group(1)) report.save() return report for item in tree.getroot().cssselect('.TocReportItemText'): report_link = item.xpath('./a')[0] report = _import_report(report_link) for response_link in item.cssselect('.TocResponseItemText a'): _import_report(response_link, parent=report) return True
kohler toilet repair flush valve kit kohler toilet repair flush valve kohler toilet repair seal. cheap bunk beds for sale with mattress kids furniture cheap bunk beds for sale with mattress discount bunk beds atlas cheap bunk beds for sale with mattresses bunk beds for sale with mattress. bulk candle jars candle containers clear glass jars mason w silver metal lined caps bulk candle containers empty jars wholesale candle jars with wooden lids uk wholesale candle jars australia. key ring best key ring organizer key ring clip bulk. water heater thermostat thermostat water heater 4 contacts rheem water heater thermostat reset atwood water heater thermostat replacement.
"""Given a string s, find the longest palindromic substring in s. You may assume that the maximum length of s is 1000. Example: Input: "babad" Output: "bab" Note: "aba" is also a valid answer. Example: Input: "cbbd" Output: "bb" """ def is_palindrome(string, low, high): i = low j = high while j > i: if string[i] != string[j]: return False i += 1 j -= 1 return True def longest_palindrome(string): if len(string) < 2: return string n = len(string) longest_palindrome_size = 0 longest_palindrome_start = 0 longest_palindrome_end = 0 # TC: O(N^3), where N = len(string) for i in range(n): # TC: O(N) for j in range(i, n): # TC: O(N) if is_palindrome(string, i, j) and j - i + 1 > longest_palindrome_size: # TC: O(N) longest_palindrome_size = j - i + 1 longest_palindrome_start = i longest_palindrome_end = j return string[longest_palindrome_start:longest_palindrome_end + 1] def longest_palindrome_dp(string): n = len(string) if n < 2: return string dp = [[False] * n for _ in range(n)] # All substring of length 1 are palindromes # TC: O(N) for i in range(n): dp[i][i] = True # Check for substrings of length 2 # TC: O(N) for i in range(n - 1): dp[i][i + 1] = string[i] == string[i + 1] # Check the rest of the substrings m = 2 # TC: O(N^2), where N = len(string) while m < n: # TC: O(N) for i in range(n - m): # TC: O(N / 2) = O(N) j = i + m dp[i][j] = string[i] == string[j] and dp[i + 1][j - 1] m += 1 longest_palindrome_size = 1 longest_palindrome_start = longest_palindrome_end = 0 # TC: O(N^2), where N = len(string) for i in range(n): # TC: O(N) for j in range(i + 1, n): # TC: O(N / 2) = O(N) if dp[i][j] and j - i + 1 > longest_palindrome_size: longest_palindrome_size = j - i + 1 longest_palindrome_start = i longest_palindrome_end = j return string[longest_palindrome_start:longest_palindrome_end + 1] def _expand_around_center(string, low, high): l, r = low, high # TC: O(N) while l >= 0 and r < len(string) and string[l] == string[r]: l -= 1 r += 1 return r - l - 1 def longest_palindrome_expand_around_center(string): start = end = 0 # TC: O(N^2), where N = len(string) for i in range(len(string) - 1): # TC: O(N) * O(N + N) = O(N^2) lp1 = _expand_around_center(string, i, i) # TC: O(N) lp2 = _expand_around_center(string, i, i + 1) # TC: O(N) max_length = max(lp1, lp2) if max_length > end - start + 1: start = i - ((max_length - 1) // 2) end = i + (max_length // 2) return string[start:end + 1] if __name__ == "__main__": test_cases = [ ("", ""), ("a", "a"), ("aa", "aa"), ("abaabc", "baab"), ("babad", "bab"), ("cbbd", "bb"), ("abaabc", "baab"), ("madama", "madam"), ("jklollolkidding", "klollolk") ] for string, expected_result in test_cases: result = longest_palindrome_expand_around_center(string) print(string, result) assert result == expected_result
001095: Parent version <value> does not exist. The parent version specified in the tool does not exist or is not accessible to the connected user. Specify a version that exists or is accessible to the connected user.
#coding=utf-8 from django.db import models # Create your models here. # create protocol_type class Ssh_protocol(models.Model): ssh_protocol_type = models.CharField(max_length=10) def __unicode__(self): return self.ssh_protocol_type # create Ip class Ip(models.Model): ip_address = models.IPAddressField(max_length=20,unique=True) port = models.CharField(max_length=10) user = models.CharField(max_length=30) passwd = models.CharField(max_length=50) protocol_type = models.ForeignKey(Ssh_protocol) hostname = models.CharField(max_length=50,blank=True) publish_time = models.DateTimeField(auto_now_add=True) def __unicode__(self): return self.ip_address # create grouplist class Group(models.Model): group_name = models.CharField(max_length=20,blank=True) ips = models.ManyToManyField(Ip,blank=True) publish_time = models.DateTimeField(auto_now_add=True) iplist = [] def save(self, *args, **kwargs): super(Group, self).save() for i in self.iplist: p, created = Ip.objects.get_or_create(ip_address=i) self.ips.add(p) def __unicode__(self): return u'%s %s' % (self.group_name, self.publish_time) # create system_resource class System_resource(models.Model): system_ver = models.CharField(max_length=50,blank=True) digit_number = models.CharField(max_length=10,blank=True) cpu = models.CharField(max_length=50,blank=True) cpu_number = models.CharField(max_length=50,blank=True) physics_mem = models.CharField(max_length=50,blank=True) swap_mem = models.CharField(max_length=50,blank=True) disk = models.CharField(max_length=50,blank=True) network_card = models.CharField(max_length=50,blank=True) ip = models.ForeignKey(Ip) def __unicode__(self): return self.system_ver # create System_command class System_command(models.Model): text = models.TextField(max_length=200,blank=True) input_time = models.DateTimeField(auto_now_add=True) ip = models.ForeignKey(Ip) def __unicode__(self): return self.text # create System_stat class System_stat(models.Model): user_stat = models.CharField(max_length=200,blank=True) time = models.DateTimeField(auto_now_add=True) server_stat = models.CharField(max_length=200,blank=True) system_resource = models.ForeignKey(System_resource) ip = models.ForeignKey(Ip) def __unicode__(self): return self.user_stat # create System_task class System_task(models.Model): file_name = models.CharField(max_length=50,blank=True) time = models.DateTimeField(auto_now_add=True) path = models.FilePathField(max_length=50,blank=True) comm = models.CharField(max_length=50,blank=True) processing_time = models.DateTimeField(auto_now_add=True) back_state = models.CharField(max_length=50,blank=True) ip = models.ForeignKey(Ip) # send_mail = def __unicode__(self): return self.file_name # create Server #class Server(models.Model): # http = # mysql = # cache = # ip = # create Network class Network(models.Model): input = models.CharField(max_length=50,blank=True) time = models.DateTimeField(auto_now_add=True) output = models.CharField(max_length=50,blank=True) ip = models.ForeignKey(Ip) def __unicode__(self): return self.input # create Syslog class Syslog(models.Model): system_log = models.TextField(max_length=300,blank=True) time = models.DateTimeField(auto_now_add=True) server_log = models.TextField(max_length=300,blank=True) ip = models.ForeignKey(Ip) def __unicode__(self): return self.system_log # create System_use class System_use(models.Model): mem = models.CharField(max_length=50,blank=True) time = models.DateTimeField(auto_now_add=True) cpu = models.CharField(max_length=50,blank=True) swap = models.CharField(max_length=50,blank=True) disk = models.CharField(max_length=50,blank=True) system_load = models.CharField(max_length=50,blank=True) ip = models.ForeignKey(Ip) def __unicode__(self): return self.mem # create System_monitoring class System_monitoring(models.Model): online_user = models.CharField(max_length=50,blank=True) time = models.DateTimeField(auto_now_add=True) # server = models.ForeignKey(Server) networK = models.ForeignKey(Network) syslog = models.ForeignKey(Syslog) system_use = models.ForeignKey(System_use) ip = models.ForeignKey(Ip) def __unicode__(self): return self.online_user # create upload_file class Document(models.Model): docfile = models.FileField(upload_to='documents/%Y-%m-%d') # create System_servermanager class System_servermanager(models.Model): servername = models.CharField(max_length=20,blank=True) scriptname = models.CharField(max_length=20,blank=True) time = models.DateTimeField(auto_now_add=True) def __unicode__(self): return self.servername # create envname_ver class Envname_ver(models.Model): envver = models.FloatField(blank=True, null=True) time = models.DateTimeField(auto_now_add=True) def __unicode__(self): return unicode(self.envver) # create Deployment_environment class Deployment_Environment(models.Model): envname = models.CharField(max_length=20) scriptname = models.CharField(max_length=20) scriptpath = models.CharField(max_length=255) env_ver = models.ForeignKey(Envname_ver) def __unicode__(self): return '%s %s %s ' % (self.envname,self.scriptname,self.env_ver)
Click the link below to access the Bosworth Academy UCAS Progress Logon Site. If your current school has access to UCAS Progress, please apply using this online system. All students intending to take A-Levels at Bosworth Academy will be expected to have at least grade 5 in both GCSE English Language and Maths as well as 3 other GCSEs at grade 5 or equivalent. Students who have completed the one year BTEC Course must have achieved a Merit or Distinction. In addition, some departments require particular grades as detailed on the subject summary sheet which is found on our Prospectus page. Please note that in line with all Sixth Form providers some courses may not run, if the numbers do not allow it. For level 3, choose at least 4 A/AS Level courses. When choosing courses for Sixth Form at the Academy, please ensure that they are listed in order of preference. In order to secure offers on your chosen courses it is strongly recommended that you apply by the end of January 2019.
# -*- coding: utf-8 -*- # @Author: Cody Kochmann # @Date: 2019-05-01 07:54:28 # @Last Modified by: Cody Kochmann # @Last Modified time: 2019-05-02 13:24:38 """ This demonstrates how you can use generator pipelines to implement tail in pure python. """ import os from typing import Iterable # this example does require you to have inotify installed try: import inotify.adapters except ImportError: raise ImportError(''' this example requires that you have "inotify" installed so python can watch for file events. If you're using pip, "pip install inotify" is all you need! ''') from generators import Generator as G def tail(file_path:str) -> Iterable[str]: assert os.path.isfile(file_path) notifier = inotify.adapters.Inotify() notifier.add_watch(file_path) with open(file_path, 'r') as f: notifier.add_watch(file_path) yield from G( # create a Generator fed by the notifier notifier.event_gen(yield_nones=False) ).filter( # filter for IN_MODIFY events (mask equals 2) lambda i: i[0].mask == 2 ).map( # when the file is modified, get the new size lambda i: os.path.getsize(i[2]) ).uniq( # filter duplicates, just incase nothing was added to the file ).window( # window the (previous_size, current_size) 2 ).side_task( # seek the file descriptor and pass the input since f.seek returns None lambda i: f.seek(i[0]) ).map( # read in the newly added data lambda i: f.read(i[1]-i[0]) ).chain( # chain the incoming chunks since they might not be single lines ).groupby( # seperate groups by lines lambda i:i=='\n' ).filter( # exclude groups that are just '\n', since they are the delimiters lambda i:i[0]==False ).map( # join the characters to construct each line as a string lambda i:''.join(i[1]) #).print('-', use_repr=True # uncomment this line to see the constructed lines ) if __name__ == '__main__': from sys import argv for line in tail(argv[-1]): print(line.strip())
Unlock Huawei U121 - Easy instructions for how to unlock Huawei U121. Quick & Safe method. Best price and 24/7 Customer Support included. Receive your Huawei U121 unlock code in minutes. We can even unlock U121 devices your carrier refuses to unlock for you. Just insert the SIM card you want to use and the cell phone will prompt for the unlock code („SIM Network Unlock PIN”). If your phone does not ask for one and there is no input field where to enter the unlock code, please contact our customer support before starting the ordering process. Feel free to get in touch with us, we will gladly respond you within less than 12 hours on any question about how to unlock Huawei U121. Why should I order a Huawei U121 unlock code? I had trouble unlocking my phone until I stumbled upon this Web service. They provided advice and clear instructions how to unlock Huawei U121 and I was able to type in the code received through e-mail. I had no problems or qualms with the code and everything worked great as it was intended. My Huawei U121 from Globe Philippines been unlocked with no complications. Wasn't able to find anybody to do it for me around mobile shops. Crazy happy. Thanks again, UnlockRadar. It was such an easy job. The process to unlock Huawei U121 was done withint 10 mins. I would definitely recommend using this site instead of going into stores for triple the price. UnlockRadar was a very good experience. Excellent service. They were able to provide my Huawei U121 unlock code in less than 6 hours. Would use again and highly recommend.
import sys from xml.sax.handler import ContentHandler import xml.sax import xml.parsers.expat import ConfigParser import xml.sax from collections import defaultdict class Exact(xml.sax.handler.ContentHandler): def __init__(self): self.state = 0 self.lens = [] def startElement(self, name, attrs): if self.state == 0: if name == "field" and attrs['name'] == "usb.endpoint_number" and attrs['show'] == "0x81": self.state = 1 elif name == "field" and attrs['name'] == "usb.endpoint_number" and attrs['show'] == "0x02": self.state = 2 elif self.state == 1 and name == "field" and attrs['name'] == "usb.iso.data": print "In: " + attrs['show'] self.state = -1 elif self.state == 2 and name == "field" and attrs['name'] == "usb.win32.iso_data_len": self.lens.append(int(attrs['show'], 16)) def endElement(self, name): if name == 'packet': if self.lens: d = defaultdict(int) s = "" for l in self.lens: s += str(l/12) + " " d[l] += 1 print "Out: " + str(d) + s self.lens = [] self.state = 0 if __name__ == '__main__': parser = xml.sax.make_parser() handler = Exact() parser.setContentHandler(handler) parser.parse(open(sys.argv[1]))
"I thought we did some good things on both sides of the ball, but we know that we have a long ways to go to accomplish our goals. Our young guys got some time on the field and we were pleasantly surprised with their progress. We need to play more physical, better fundamentally, and have less miscues in execution," Gray said. "Yuma has some very athletic players, and they run a different scheme than what we traditionally see. It will be a big test for us on both sides of the ball. We have to be very sound fundamentally and tackle very well in the open field to give ourselves a chance." Pine Bluffs football assistant coach Nick Yelton runs over plays with members of the team Tuesday.
#!/usr/bin/python # -*- encoding: utf-8; py-indent-offset: 4 -*- # +------------------------------------------------------------------+ # | ____ _ _ __ __ _ __ | # | / ___| |__ ___ ___| | __ | \/ | |/ / | # | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | # | Copyright Mathias Kettner 2012 [email protected] | # +------------------------------------------------------------------+ # # This file is part of Check_MK. # The official homepage is at http://mathias-kettner.de/check_mk. # # check_mk is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation in version 2. check_mk is distributed # in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- # out even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. See the GNU General Public License for more de- # ails. You should have received a copy of the GNU General Public # License along with GNU Make; see the file COPYING. If not, write # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. import defaults, config def page_index(): start_url = html.var("start_url", config.start_url) html.req.headers_out.add("Cache-Control", "max-age=7200, public"); if "%s" in config.page_heading: heading = config.page_heading % (defaults.omd_site or _("Multisite")) else: heading = config.page_heading html.write(""" <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Frameset//EN" "http://www.w3.org/TR/html4/frameset.dtd"> <html> <head> <title>%s</title> <link rel="shortcut icon" href="images/favicon.ico" type="image/ico"> </head> <frameset cols="280,*" frameborder="0" framespacing="0" border="0"> <frame src="side.py" name="side" noresize scrolling="no"> <frame src="%s" name="main" noresize> </frameset> </html> """ % (heading, start_url)) # This function does nothing. The sites have already # been reconfigured according to the variable _site_switch, # because that variable is processed by connect_to_livestatus() def ajax_switch_site(): pass
Burhani Trust Andhra Pradesh, a charitable trust founded by Hi Holiness Dr. Syedna Mohammad Burhanuddin Saheb (RA), spiritual leader of the Dawoodi Bohra community, sponsored and donated a Mobile Medical Unit to the Government of A.P. in Hyderabad. Mr. Anwar Saudagar, Trustee - Burhani Trust AP and Vice Chairman - AP Administrative Tribunal handed over the keys of the medical unit to Dr. Balaji Utla, CEO - HMRI and Vice-Chairman, Satyam Foundation. Health Management & Research Institute (HMRI) will provide the medical personnel and run the Unit. Speaking on the occasion, Mr. Balaji appreciated the efforts of the Trust. He informed the audience that the Mobile Medical Unit is a precursor to the 100 vehicles that will be utilised by HMRI from 1st June 2008. A total of 475 units will be functioning by the year end. Mobile units like this will go a long way in overcoming the problem of health care delivery, solve the last mile problem and will act as a platform to deliver medical services, he added. The unit was launched amidst the gathering of Burhani Trust A.P. members, HMRI staff and dignitaries including Justice C.Y. Somayajulu, Justice P. Swaroop Reddy, Justice V.V.S. Rao, Justice B. Seshasayana Reddy, Justice T. Ch. Surya Rao, former minister Basheeruddin Babukhan, etc. The Mobile Medical Unit, costing Rs. 14 lakhs, will provide easy and affordable access to medical services to the needy in and around Hyderabad.
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models import datetime from django.utils.timezone import utc class Migration(migrations.Migration): dependencies = [ ('goals', '0139_reset_montly_frequencies'), ] operations = [ migrations.AddField( model_name='useraction', name='updated_on', field=models.DateTimeField(default=datetime.datetime(2016, 4, 26, 18, 4, 58, 692802, tzinfo=utc), auto_now=True), preserve_default=False, ), migrations.AddField( model_name='userbehavior', name='updated_on', field=models.DateTimeField(default=datetime.datetime(2016, 4, 26, 18, 5, 2, 173238, tzinfo=utc), auto_now=True), preserve_default=False, ), migrations.AddField( model_name='usercategory', name='updated_on', field=models.DateTimeField(default=datetime.datetime(2016, 4, 26, 18, 5, 6, 229293, tzinfo=utc), auto_now=True), preserve_default=False, ), migrations.AddField( model_name='usergoal', name='updated_on', field=models.DateTimeField(default=datetime.datetime(2016, 4, 26, 18, 5, 11, 404946, tzinfo=utc), auto_now=True), preserve_default=False, ), ]
Small home plans offer a wide range of ground plan choices. Central courtyards help in providing opportunities for north-facing glazing on the vast majority of living spaces, you have to to develop elevations and full plans for this feature. Ground plans are additionally key in speaking the circulation of your area to potential consumers or renters. The free tiny house plans below embody everything you need to build your small residence. Begin nigh making a slant of key phrases correlated to your online business. If you happen to want some steering designing your dream residence, you can begin by selecting one in all our seven series. Begin close making a document of key phrases correlated to your small business. This plan is another free tiny house design from Tiny House Design. Uncover the Design For Place options and specs , and obtain the plans and elevations. You may also import scanned floor plans into this system. That is why Stroud Properties gives a variety of ensures, with our most essential being completing your build in a devoted timeframe. Whether you’re looking for an investment property, a big family home plan or your first dwelling, you’re sure to seek out it amongst our various assortment of over 70 new home designs.
from __future__ import absolute_import import pandas as pd import numpy as np import pyaf.ForecastEngine as autof import pyaf.Bench.TS_datasets as tsds def pickleModel(iModel): import pickle output = pickle.dumps(iModel) lReloadedObject = pickle.loads(output) output2 = pickle.dumps(lReloadedObject) assert(iModel.to_json() == lReloadedObject.to_json()) return lReloadedObject; b1 = tsds.load_ozone() df = b1.mPastData #df.tail(10) #df[:-10].tail() #df[:-10:-1] #df.describe() lEngine = autof.cForecastEngine() lEngine H = b1.mHorizon; # lEngine.mOptions.enable_slow_mode(); # lEngine.mOptions.mDebugPerformance = True; lEngine.train(df , b1.mTimeVar , b1.mSignalVar, H); lEngine.getModelInfo(); print(lEngine.mSignalDecomposition.mTrPerfDetails.head()); lEngine.mSignalDecomposition.mBestModel.mTimeInfo.mResolution lEngine.standardPlots("outputs/my_ozone"); lEngine2 = pickleModel(lEngine) dfapp_in = df.copy(); dfapp_in.tail() #H = 12 dfapp_out = lEngine2.forecast(dfapp_in, H); #dfapp_out.to_csv("outputs/ozone_apply_out.csv") dfapp_out.tail(2 * H) print("Forecast Columns " , dfapp_out.columns); Forecast_DF = dfapp_out[[b1.mTimeVar , b1.mSignalVar, b1.mSignalVar + '_Forecast']] print(Forecast_DF.info()) print("Forecasts\n" , Forecast_DF.tail(H)); print("\n\n<ModelInfo>") print(lEngine2.to_json()); print("</ModelInfo>\n\n") print("\n\n<Forecast>") print(Forecast_DF.tail(2*H).to_json(date_format='iso')) print("</Forecast>\n\n")
FVNERALS is a band based in Glasgow, UK mixing elements of doom, post-rock, shoegaze and dark ambient. Balancing a heavy backing with delicate but equally powerful vocals, Fvnerals latest album 'Wounds' is a captivating listening experience. Heading to the Black Heart as part of a larger headlining European tour, this performance promises to be as utterly bleak as it hauntingly beautiful.
#! /usr/bin/env python # -*- coding: utf-8 -*- # ***** BEGIN LICENSE BLOCK ***** # This file is part of Shelter Database. # Copyright (c) 2016 Luxembourg Institute of Science and Technology. # All rights reserved. # # # # ***** END LICENSE BLOCK ***** __author__ = "Cedric Bonhomme" __version__ = "$Revision: 0.2 $" __date__ = "$Date: 2016/06/07 $" __revision__ = "$Date: 2016/07/12 $" __copyright__ = "Copyright 2016 Luxembourg Institute of Science and Technology" __license__ = "" # # Views generated by Flask-Admin for the database administration. # from flask_login import current_user from flask import current_app from flask_admin import Admin, AdminIndexView from flask_admin.contrib.sqla import ModelView from flask_admin.menu import MenuLink from bootstrap import db from web.models import User, Shelter, Value, Translation class TranslationView(ModelView): column_searchable_list = ("original", "translated") column_filters = ["language_code"] column_editable_list = ["translated"] def is_accessible(self): return current_user.is_authenticated and current_user.is_admin class ValueView(ModelView): column_searchable_list = ("name",) column_filters = ["attribute_id"] def is_accessible(self): return current_user.is_authenticated and current_user.is_admin class UserView(ModelView): column_exclude_list = ["pwdhash"] column_editable_list = ["email", "name"] def is_accessible(self): return current_user.is_authenticated and current_user.is_admin class ShelterView(ModelView): column_exclude_list = ["properties"] form_excluded_columns = ["properties"] def is_accessible(self): return current_user.is_authenticated and current_user.is_admin menu_link_back_dashboard = MenuLink(name="Back to dashboard", url="/admin/dashboard") menu_link_back_home = MenuLink(name="Back to home", url="/") admin = Admin( current_app, name="Management of data", template_mode="bootstrap3", index_view=AdminIndexView(name="Home", url="/admin/data_management"), ) admin.add_view(UserView(User, db.session)) admin.add_view(ShelterView(Shelter, db.session)) admin.add_view(ValueView(Value, db.session)) admin.add_view(TranslationView(Translation, db.session)) admin.add_link(menu_link_back_home) admin.add_link(menu_link_back_dashboard)
A small plane landed on Interstate 580 near the 164th Avenue exit in Ashland, in unincorporated San Leandro, Calif., Saturday night. SAN LEANDRO, Calif. (KGO) -- A small plane landed on Interstate 580 near the 164th Avenue exit in Ashland, in unincorporated San Leandro, Calif., Saturday night. "It's just crazy to see something like this. It's just unbelievable," said witness Rob Castro. At 6:46 p.m. on Saturday a man and a woman who was his passenger made an emergency landing. The pilot told the California Highway Patrol that the fuel line snapped and caused the plane to lose pressure. Neighbors told ABC7 News they heard just that before witnessing the bizarre landing. "It was like, boom, like, a car was going out of gas or something," said neighbor Michelle Williams. "Then it hit the ground and all I can do was just run and scream and say, 'A plane is on our freeway!'" CHP Sgt. Michael Novosel said he's never seen anything like it. The agency released aerial footage of the aircraft near the right lanes. "Not every day, it's a first in my career," Novosel said. "You know, I've seen it across the state, but the first I know of in the Bay Area." The plane is registered in Reno, Nevada, according to the Federal Aviation Administration. It was on its way from Lake Tahoe to Hayward. The two inside the plane walked away without any injuries and no one was hurt. There was also no damage to surrounding structures, including the freeway or the plane. But of course, the Saturday evening commute through Ashland was snarled as crews worked to remove the aircraft from the interstate. "You can have all the traffic you want in the world, but just long as nobody gets hurt. And you know, that's about it," Castro said.
from os import listdir, path import json import re class Logger: def info(self, msg): print(msg) def scan_directory(scan_path, allowed_extensions): r""" scan_path path to scan allowed_extensions extensions to consider """ files = list() for f in listdir(scan_path): full_path = path.join(scan_path, f) if path.isdir(full_path): subfiles = scan_directory(full_path, allowed_extensions) for sf in subfiles: files.append(sf) else: correct_extension = False for ext in allowed_extensions: if f.endswith(ext): correct_extension = True break if correct_extension: files.append(full_path) return files def build_dependency_tree(scan_path, includes, output): r""" scan_path path to scan includes directories to find includes output filename of the output """ logger = Logger() # Get files to analyse logger.info("List of files to analyse:") allowed_extensions = [".c", ".cpp", ".c++", ".cxx", ".h", ".hpp", ".h++", ".hxx", ".r",] files = scan_directory(scan_path, allowed_extensions) del allowed_extensions logger.info("> %d potential source files" % (len(files),)) # Filter files on blacklist criteria include_files = list() dependency_tree = list() blacklist_criteria = [re.compile(r'sources\/others'),] for f in files: blacklisted = False for criteria in blacklist_criteria: if criteria.search(f.replace('\\', '/')): blacklisted = True break if not blacklisted: include_files.append(f) dependency_tree.append({"file": f[len(scan_path):].replace('\\', '/'), "includes": list(), "used_by": list(),}) del blacklist_criteria del files logger.info("> %d non-blacklisted source files" % (len(dependency_tree),)) # Read source files for includes logger.info("Read and parse all files") include_regex = re.compile(r'#include\s+([\"<"]{1})([^"^>]+)([\">"]{1})') for source in include_files: with open(source, 'r') as f: source_id = include_files.index(path.join(scan_path, source.replace('/', path.sep))) for line in f: # Is the line corresponding to an include? m = include_regex.search(line) if m and (m.group(1) == m.group(3) or (m.group(1) == '<' and m.group(3) == '>')): include_name = m.group(2) # What is the related file? for include in includes: # Build the path corresponding to <include> include_path = include for subdir in include_name.split('/'): include_path = path.join(include_path, subdir) # Known file? if include_path in include_files: include_id = include_files.index(include_path) dependency_tree[source_id]["includes"].append(include_id) dependency_tree[include_id]["used_by"].append(source_id) break logger.info("> %d include(s)\tfor %s" % (len(dependency_tree[source_id]["includes"]),source,)) with open(output, 'w') as f: f.write(json.dumps(dependency_tree)) def load_dependency_tree(output): with open(output, 'r') as f: return json.loads(f.read()) return list() def who_is_using(scan_path, output, filename): if not filename.startswith(scan_path): raise Exception("Filename does not correspond to the scan path") dependency_tree = load_dependency_tree(output) include_files = list() for dep in dependency_tree: include_files.append(path.join(scan_path, dep["file"][1:].replace('/', path.sep))) if not filename in include_files: raise Exception("Filename has not been scanned") using_this_file = [filename,] to_analyse = [include_files.index(filename),] while len(to_analyse) > 0: for f_id in dependency_tree[to_analyse[0]]["used_by"]: if not include_files[f_id] in using_this_file: using_this_file.append(include_files[f_id]) to_analyse.append(f_id) dependency_tree[to_analyse[0]]["used_by"] = list() del to_analyse[0] return using_this_file if __name__ =='__main__': scan_path = "/path/to/source/files" includes = ["/path/to/source",] # include "files/toto" can mean /path/to/source/files/toto output = "/path/to/output.json" test_path = "/path/to/source/files/foo.hpp" build_dependency_tree(scan_path, includes, output) print(who_is_using(scan_path, output, test_path))
Play 30, fREE 3-reel and free slot machine games 5-reel slots : Mountain Fox, Treasures of Egypt, Flaming Crates, Prosperous Fortune, Magic Wheel, free slot machine games Fruit Smoothie, Party Bonus, Video Poker and more! Free, online, slot, machines! Win at least 500 credits and press the sweepstakes button to enter. Two of our most popular games are Cleopatra Slots and Double Diamond. These two games, which are both made by IGT are always in big demand. Another two classics that we have added recently, include DaVinci Diamonds (again, made by IGT) and the legendary Buffalo Slots (made by Aristocrat Gaming). Free slot games give you a trial period before you decide to deposit. To find the free games on a casino website, just look for a practice mode or demo free slot machine games mode icon on the homepage of the website. You will get to experience all the thrills of real money slots for free and may even get additional bonus spins. Free Slots, Play for Fun, slots. Our selection of online free slots for fun include 3 reel, 5 reel, 3D slots, movie-themed games and everything in-between. We also provide a monthly newsletter packed with exclusive offers and helpful gaming tips. Slots Games, spin and score with free slots games like Wheel of Fortune Slots, Deal or No Deal. Slots, and Pirate&apos;s Fortune, slots. Tropicana online free slot machine games casino bonus promo code to play legal real money online casino games in New Jersey. Read our top ten beginner's online casino guide that will help you on the right track. Bet UK online casino games anywhere and get £5 completely FREE. Casino > Online Blackjack For Real Money. Casino is the best Free Casino Games available in the US. Down Casino – Vegas Slots on Facebook | Facebook. Job Name: Choctaw Casino Job Location: Durant, OK GC: Manhattan Construction Architect: Klai Juba. Poppin Pizza Prizes Online Slots £5 No Deposit Bonus. At the Island Resort & Casino, our Vegas-Style table games casino games free slots are a cut above the rest!. • Free slot machine games – Choose a safe online casino from this top trusted sites enjoy peace of mind. Will free slot machine games that free slot machine games change after the Supreme Court ruling?. Congress and the Department of Justice need to take action. From deposit bonuses to monthly bonuses - Kroon Casino will surely add to your fun!. • Borgata online casino – Welcome to your online casino paradise!. WinStar World Gaming online site today and play free eGAMES, bingo. May 7,sino games slots free heart of vegas slotica casino slots on facebook cafe casino online | – free games for. ATLANTIC CITY — Resorts Casino free slot machine games Hotel announced a second online sports-betting partnership Thursday, a day after receiving approval from. Double Down Casino – Collect 275,000 Free Chips Coins – 1st May. • Bovada casino – Big Fish Casino spins up online fun with gambling free slot machine games games like Blackjack, Poker, Roulette and Video Poker. Crystal Lake, IL that feature video slot machines and games. No matter how you want free slot machine games to play, the Palms has your game. Get huge welcome casino bonuses just in few clicks. Will that change after the Supreme Court ruling?. • Doubledown casino – J penny slots for free online Online Casino Reviews - Best Online Casinos NJ For. Do you care about your poker results? • Hollywood casino – Bay at Grand River is just down the hill from the Casino and includes six. HOMERV / LodgingContact · Lucky Dog Casino | Gaming and Dining in. • Chumba casino – MGM Launches Online Poker Site in New Jersey. Live Bovada, you can play live free slot machine games Blackjack, live Baccarat and live Roulette with real life. • Firekeepers casino – Online casinos give casino to guests when they decide to. Foxwood Resort Casino (Available only on Friday/Saturday/Sunday) and departs every one hour starting. New Jersey: their services, products, and new customer free las vegas slot machines offers. Play Live Free slot machine games! Worldpay is a global processor of all manner of transactions. Android apps have the most popular gambling and card games such as poker, video poker, blackjack, baccarat, roulette, keno, faro. NIAGARA FALLS, NY -- The 15-year-old Seneca Niagara Resort & Casino is launching a $40 million renovation project that it says will. Online casinos enable gamblers to play and wager on casino games. See who you know at Pechanga Resort & Casino, leverage your professional.