text
stringlengths 29
850k
|
---|
#!/usr/bin/env python3
# pylint: disable=C0103
"""This module does nothing ...."""
import math
from document.utils import printProgressBar
from nltk.collocations import BigramCollocationFinder
from nltk.metrics import BigramAssocMeasures
from mlcore import TextService
import progressbar
import pyorient
orientdb = pyorient.OrientDB("localhost", 2424)
orientdb.connect("root", "root")
orientdb.db_open('datascience', "root", "root")
text_service = TextService()
def bag_of_bigrams_words(words, n=100, score_fn=BigramAssocMeasures.chi_sq):
bigram_finder = BigramCollocationFinder.from_words(words)
bigrams = bigram_finder.nbest(score_fn, n)
return dict([(bigram, True) for bigram in bigrams])
def save_bigrams_as_many(bigrams):
default_cluster = (orientdb.command("SELECT FROM ( SELECT expand( classes ) FROM metadata:schema ) WHERE name = 'Bigrams'")[0]).oRecordData['defaultClusterId']
#orientdb.command("delete from Bigrams")
for key in bigrams.keys():
w = dict(id=key, qtd=bigrams.get(key))
# orientdb.record_create(default_cluster, w)
print (w)
if __name__ == "__main__":
print("bigramming..")
result = orientdb.query("select * from Documento LIMIT 100")
current = 0
total = len(result)
bar = progressbar.ProgressBar(max_value=total)
all_texts = ""
for each in result:
current += 1
bar.update(current)
all_texts = all_texts + each.texto_arquivo
bar.finish()
# save_bigrams_as_many(bigra
bigrams = bag_of_bigrams_words(text_service.tokenize(all_texts))
save_bigrams_as_many(bigrams)
|
You can wholesale customized design your own antiqu copper metal coin with custom logo and provide good quality products with competive price.
1. Q: Can I get Metal Coin samples?
3. Q: What guarantee do I have that assures me I will get my order from you since I have to pay in advance? What happens if the Metal Coin you shipped are wrong or poorly made?
A: Artigifts has been in business since 2007. We do not only believe that our job consists in making good products but also building strong and long-term relationship with our customers. Our reputation among customers and their satisfaction are the main reasons for our success.Furthermore, whenever a customer makes an order, we can make approval samples on request. It is also in our own interest to get approval from the customer first before starting production. This is how we can afford a "Full After-Sales Service". If the Metal Coin does not meet your strict requirements, we can provide either immediate refund or immediate remakes at no extra cost to you.We have set up this model in order to set customers in a position of confidence and reliability.
|
from glob import glob
import os, sys
if __name__ == '__main__':
rootdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, rootdir)
from planet.spider import filename
from planet import config
def open():
try:
cache = config.cache_directory()
index=os.path.join(cache,'index')
if not os.path.exists(index): return None
import dbhash
return dbhash.open(filename(index, 'id'),'w')
except Exception, e:
if e.__class__.__name__ == 'DBError': e = e.args[-1]
from planet import logger as log
log.error(str(e))
def destroy():
from planet import logger as log
cache = config.cache_directory()
index=os.path.join(cache,'index')
if not os.path.exists(index): return None
idindex = filename(index, 'id')
if os.path.exists(idindex): os.unlink(idindex)
os.removedirs(index)
log.info(idindex + " deleted")
def create():
from planet import logger as log
cache = config.cache_directory()
index=os.path.join(cache,'index')
if not os.path.exists(index): os.makedirs(index)
import dbhash
index = dbhash.open(filename(index, 'id'),'c')
try:
import libxml2
except:
libxml2 = False
from xml.dom import minidom
for file in glob(cache+"/*"):
if os.path.isdir(file):
continue
elif libxml2:
try:
doc = libxml2.parseFile(file)
ctxt = doc.xpathNewContext()
ctxt.xpathRegisterNs('atom','http://www.w3.org/2005/Atom')
entry = ctxt.xpathEval('/atom:entry/atom:id')
source = ctxt.xpathEval('/atom:entry/atom:source/atom:id')
if entry and source:
index[filename('',entry[0].content)] = source[0].content
doc.freeDoc()
except:
log.error(file)
else:
try:
doc = minidom.parse(file)
doc.normalize()
ids = doc.getElementsByTagName('id')
entry = [e for e in ids if e.parentNode.nodeName == 'entry']
source = [e for e in ids if e.parentNode.nodeName == 'source']
if entry and source:
index[filename('',entry[0].childNodes[0].nodeValue)] = \
source[0].childNodes[0].nodeValue
doc.freeDoc()
except:
log.error(file)
log.info(str(len(index.keys())) + " entries indexed")
index.close()
return open()
if __name__ == '__main__':
if len(sys.argv) < 2:
print 'Usage: %s [-c|-d]' % sys.argv[0]
sys.exit(1)
config.load(sys.argv[1])
if len(sys.argv) > 2 and sys.argv[2] == '-c':
create()
elif len(sys.argv) > 2 and sys.argv[2] == '-d':
destroy()
else:
from planet import logger as log
index = open()
if index:
log.info(str(len(index.keys())) + " entries indexed")
index.close()
else:
log.info("no entries indexed")
|
Find a person by email with Bermuda reverse email lookup.
Search businesses in Bermuda Yellow Pages - bermuda.com by business name, category or key word. The local directories include ratings and reviews. City locations, Hamilton, St. George and Sandys.
Find the businesses you are looking for in the Bermuda Yellow Pages - bermudayp.com. Search by name or category.
From USA dial: 011 + 1 + 441 + phone number.
From abroad dial: exit code + 1 + 441 + local number.
Bermuda Phone books gives you access to people lookup, business yellow book and reverse email lookup in Bermuda and worldwide. Find a Bermudian company phone number and we will show you how to call abroad. Need to call someone in Bermuda? No worries! See how to dial landine and mobile numbers in Bermuda. Simply enter the phone number in the correct international format. Find businesses by category, keyword, company name or business number. Locate people by email or search the local phonebook by first name, last name and location. Our free Bermudian yellowbook directory enquiries include white pages, yellow pages and email address lookup.
Search yellow pages for a business address or find a person in the white pages. Get instant results with Bermuda email lookup.
|
import sublime, sublime_plugin
from Javadoc import *
class JavadocCommand(sublime_plugin.TextCommand):
def determineIndentation(self,region):
(row, col) = self.view.rowcol(region.begin())
indent_region = self.view.find('^\s+', self.view.text_point(row, 0))
indent_level = len(self.view.substr(indent_region))/4
return indent_level
def alreadyCommented(self,region):
(row,col)= self.view.rowcol(region.begin())
previous_line = self.view.line(self.view.text_point(row-1,0))
if "*/" in self.view.substr(previous_line):
return True
else:
return False
def run(self, edit):
#check if it's a java file
#fileName = self.view.file_name()[-4:]
classSignature = self.view.find("""(public|private|protected) (abstract )?(class|interface|enum)""",0)
#indentation_level = self.determineIndentation(classSignature)
#maybe do this better?
javadocer = Javadoc()
if not self.alreadyCommented(classSignature):
self.view.insert(edit,classSignature.begin(), javadocer.createClassJavadoc())
startSearchPoint = 0
foundPublicsCount = self.view.find_all("public.*\\)")
#use the [region] as a counter of how many comments we're inserting
for methodSignatures in foundPublicsCount:
#find from startSearchPoint because everytime we insert comments,
#all characters move so we have to continually keep searching for
#the next method signature
methodSignature = self.view.find("public.*\\)", startSearchPoint)
methodSignatureString = self.view.substr(methodSignature)
indentation_level = self.determineIndentation(methodSignature)
javadocer = Javadoc(indentation_level, methodSignatureString)
if not self.alreadyCommented(methodSignature):
self.view.insert(edit,methodSignature.begin(),javadocer.createMethodJavadoc())
startSearchPoint = methodSignature.end()+javadocer.charactersAdded
|
Precision Painting is the top choice of Pacific Palisades house painting contractors. When it comes to choosing a Pacific Palisades HOA painter, commercial painter, or if you’re even just looking for residential painters in Pacific Palisades, look no further than the pros at Precision Painting.
Pacific Palisades is home to a mixture of affluent homeowners, trendy apartments, and a tremendous community spirit. Whether you hike the hills, spend time with the kids in the large parklands, or snooze in the California sun, the last thing that you want to be doing on a Sunday is (attempting) to paint the whole house. With Pacific Palisades house painting contractors, you can rest easy knowing that your painting projects are taken care of. For Pacific Palisades commercial painting, trust the professionals from Precision Painting. We have the most experience as local house painters in Pacific Palisades!
As a homeowner, there are certain responsibilities that you need to do to keep the house at its best. Corrective and protective paint helps your home to stay strong and weather well, and a fresh lick of paint helps your home’s exterior to shine and stay on trend. However, all of this work is easier said than done.
Let the professional team at Precision Painting step in to handle your home painting projects. As Pacific Palisades house painting contractors, we have expertise working on high-end homes with unexpected challenges. Luckily, we keep our cool every single time.
Anyone can throw some paint on a wall. But you invested in your home, and it’s only right that it looks its best. With our team, there are no runs, no drips, no uneven coverage, no peeling or splitting after we leave. You get a paint job that you absolutely love and get to enjoy for years to come. We’re so confident that you’ll love our results that every client receives a 2-year guarantee. Now you don’t have to worry about painting again for 730 days straight!
Precision Painting works on a level that far exceeds our competition and your expectations, without blowing past your budget. If you’re looking for Pacific Palisades house painting contractors who get the job done on time, on budget, and honestly, Precision Painting is exactly who you should call!
|
from setuptools import setup # find_packages
from codecs import open
from os import path
with open(path.join(path.abspath(path.dirname(__file__)), 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='django-rest-api',
version='0.1.5',
description='Django REST API',
long_description=long_description,
url='https://github.com/skies-io/django-rest-api',
author='Skies',
author_email='[email protected]',
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
# 'Programming Language :: Python :: 2',
# 'Programming Language :: Python :: 2.6',
# 'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
keywords='django rest api',
# packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
packages=['rest_api'],
install_requires=['Django>=1.6'],
)
|
• 晶莹美颜霜 0.5 Oz. Net Wt.
• 手部修护霜 1 Oz. Net Wt.
• 纤颜紧致精华乳 0.3 Oz. Net Wt.
• 花样年华眼部修护霜 0.2 Oz. Net Wt.
• 迷你装Supra Volume睫毛膏 01 Intense Black 0.1 Oz. Net Wt.
*Receive a 6-Piece FREE Gift + a Beauty Pouch+ 3 FREE samples of your choice with any $100. Expires on 04/30/17 at 11:59PM PST. Standard offer terms and conditions apply: valid at Clarins.com only, while supplies last, available samples may vary. We reserve the right to substitute samples in the event a sample goes out of stock. Offers available to US residents only. All offers are only 1 gift per order and per customer unless otherwise noted and while supplies last. All purchases are subject to bank authorization before shipment. Packages will not be shipped without proper authorization. We reserve the right to cancel any order due to unauthorized use of offer and to modify or cancel this promotion due to technical error or unforeseen problems. We reserve the right to substitute any free item offered with an item of equal or greater value. Promotional offers and discounts may not be applied to the purchase of Clarinsusa.com e-Gift Certificates. The value of Clarinsusa.com e-Gift Certificates purchases are not used to determine promotional thresholds. Applicable purchase amount does not include shipping, handling or sales tax charges. Promotions may not be applied to past purchases. Any promotion will not be granted and may not be redeemed for a future purchase if the initial transaction is cancelled or item(s) are returned.
|
#!/usr/local/bin/python3
"""Script to watch Adobe Security page
This script looks at https://helpx.adobe.com/security.html and parses the
html file for all the products. It then determines if the page was updated
from the last check.
Design Spec
- Create 2 tables with the following schemas
Table 1
- Product Name
- Product URL
- Published Date
Table 2
- Product Name
- Link
- Title
- Posted
- Updated
- collection_date
"""
import urllib.request
from urllib.parse import urljoin
import bs4 as bs
url = "https://helpx.adobe.com/security.html"
def main():
"""Fabled main, where it all begins."""
print (url)
sauce = urllib.request.urlopen(url).read()
soup = bs.BeautifulSoup(sauce, "html.parser")
for meta in soup.find_all('meta', attrs = {'name': 'publishDate'}, content=True):
# print (type(meta))
print ("Published Date : {}".format(meta.get('content')))
bulletin_items = soup.find(class_="noHeader")
b_href = bulletin_items.find_all("a")
# Loop through the Table records
for t_row in bulletin_items.find_all("tr")[1::]:
t_data = t_row.find_all("td")
#link = t_row.find_all("a")
link = t_row.find_all("a")
link = link[0].get("href")
href = urljoin(url,link)
print ("Link : {}\n"
"Title : {}\n"
"Posted : {}\n"
"Updated : {}\n".format(href, t_data[0].text, t_data[1].text, t_data[2].text))
f_href = urllib.request.urlopen(href).read()
print(f_href)
exit(0)
if __name__ == "__main__":
main()
|
Paypal Offer – Get 50% Cashback Upto Rs 200 at Netmeds, Do you want discount on purchase of medicines? Now your wait is over for the reason that Netmeds has come up with a new offer. Netmeds is offering flat 50% Cashback Upto Rs 200 when you purchase medicines at Netmeds. This offer is valid from 22nd January to 28th February 2019. So follow below steps and avail this offer.
Now login or register a new account on Netmeds.
verify your email and mobile number.
Then purchase any products on Netmeds.
On Payment page select Paypal as payment method.
The PayPal Cashback offer entitles you to get up to Rs. 200 PayPal instant cashback (Flat 50%) when you purchase a minimum of Rs. 50 worth of ANY products using the PayPal payment option at Netmeds.com/Netmeds App.
The PayPal Cashback offer period is valid from 22nd January to 28th February 2019.
The 50% PayPal cashback offer is valid ONLY on the FIRST–EVER PayPal transaction.
This means that the customers who have previously made the transaction using the PayPal India account on any other PayPal merchant network ARE NOT eligible for this offer.
The maximum PayPal Cashback amount is capped at Rs. 200 per user.
The minimum order value to be eligible for the PayPal cashback is Rs. 50.
The 50% PayPal Cashback of up to Rs. 200, under this Offer, will be credited INSTANTLY to the eligible customer’s PayPal account upon successful completion of a transaction, which will also be notified via email.
The PayPal Cashback (or voucher), credited in to your PayPal account, will expire 60 days from the date of credit.
To view your PayPal Cashback (or voucher amount), log in to your PayPal India account and select the ‘Payment Methods’ tab. Your PayPal voucher will appear under ‘Saved Offers’.
|
#!/usr/bin/env python2.7
#
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import glob
import multiprocessing
import os
import shutil
import subprocess
import sys
sys.path.append(
os.path.join(
os.path.dirname(sys.argv[0]), '..', '..', 'run_tests', 'python_utils'))
import check_on_pr
argp = argparse.ArgumentParser(description='Perform diff on microbenchmarks')
argp.add_argument(
'-d',
'--diff_base',
type=str,
help='Commit or branch to compare the current one to')
argp.add_argument('-j', '--jobs', type=int, default=multiprocessing.cpu_count())
args = argp.parse_args()
LIBS = [
'libgrpc.so',
'libgrpc++.so',
]
def build(where):
subprocess.check_call('make -j%d' % args.jobs, shell=True, cwd='.')
shutil.rmtree('bloat_diff_%s' % where, ignore_errors=True)
os.rename('libs', 'bloat_diff_%s' % where)
build('new')
if args.diff_base:
old = 'old'
where_am_i = subprocess.check_output(
['git', 'rev-parse', '--abbrev-ref', 'HEAD']).strip()
subprocess.check_call(['git', 'checkout', args.diff_base])
subprocess.check_call(['git', 'submodule', 'update'])
try:
try:
build('old')
except subprocess.CalledProcessError, e:
subprocess.check_call(['make', 'clean'])
build('old')
finally:
subprocess.check_call(['git', 'checkout', where_am_i])
subprocess.check_call(['git', 'submodule', 'update'])
subprocess.check_call(
'make -j%d' % args.jobs, shell=True, cwd='third_party/bloaty')
text = ''
for lib in LIBS:
text += '****************************************************************\n\n'
text += lib + '\n\n'
old_version = glob.glob('bloat_diff_old/opt/%s' % lib)
new_version = glob.glob('bloat_diff_new/opt/%s' % lib)
assert len(new_version) == 1
cmd = 'third_party/bloaty/bloaty -d compileunits,symbols'
if old_version:
assert len(old_version) == 1
text += subprocess.check_output(
'%s %s -- %s' % (cmd, new_version[0], old_version[0]), shell=True)
else:
text += subprocess.check_output(
'%s %s' % (cmd, new_version[0]), shell=True)
text += '\n\n'
print text
check_on_pr.check_on_pr('Bloat Difference', '```\n%s\n```' % text)
|
Foundry is a free, open-source solution for managing a Science Olympiad Tournament.
Hosting your tournament with Foundry is quick, easy, and will always be free.
Keep track of all participating teams online. Team management integrates with online scoring and event registration to provide a seamless experience for participating schools.
Foundry features online registration for events, so competing teams can resolve conflicts before arriving at the tournament. It's easier on coaches and event supervisors, but, most importantly, online registration gives the best opportunity for competing students to actually compete in the events they have prepared long and hard for!
Distribute scores the fastest way. No more custom Excel spreadsheets, Foundry will rank teams automatically and according to Science Olympiad tie-breaking rules.
Instantly create a presentation for your awards ceremony. No downloads required!
Case Western Reserve University's tournament has used Foundry for three years. Click here to see the 2013 tournament.
The Akron Regional Science Olympiad uses Foundry! Click here to see the 2013 tournament.
Foundry was written by a Science Olympiad alum for tournaments to provide logistical support for tournaments.
|
import GNUCapSimulationData as gc
import numpy as np
import plotly.plotly as py
from plotly.graph_objs import *
py.sign_in('squarebracket', '6edn8gin4t')
for x in [1, 2, 3]:
sim_data = None
sim_data = gc.GNUCapSimulationData('circuit%s.csv' % x, index_col='Time',
delim_whitespace=True)
(vol, voh, total_current, total_power, avg_prop) = sim_data.get_properties(vout_node='sum', signal_node='a', psu_name='VDD')
print('Parameters for Circuit %s' % x)
print('VOL: %f' % vol)
print('VOH: %f' % voh)
print('total current: %s' % total_current)
print('total power: %s' % total_power)
print('average tprop: %s' % avg_prop)
subplot_data = [
['V(a)', 'V(a_inv)'],
['V(b)', 'V(b_inv)'],
['V(cin)', 'V(cin_inv)'],
['V(h)', 'V(i)'],
['V(j)', 'V(k)'],
['V(x)', 'V(y)'],
['V(sum)', 'V(sum_inv)'],
]
f = sim_data.plot_timing_diagram(subplot_data, y1_label=r'$\mathrm{Voltage}$',
line_style={'alpha': 1},
sec_line_style={'linestyle': '--', 'alpha': 1})
f.savefig('vtd_%s.pdf' % x)
subplot_data = ['I(VDD)', 'I(CH)', 'I(CI)', 'I(CJ)', 'I(CK)', 'I(CX)', 'I(CY)', 'I(CSum)', 'I(CSum_inv)']
sim_data.df['I(PMOS)'] = sim_data.df['ID(M1P)'] + sim_data.df['ID(M5P)'] + \
sim_data.df['ID(M9P)'] + sim_data.df['ID(M13P)']
subplot_data = [
['I(VDD)',],
# ['I(CH)', 'ID(M1P)', 'ID(M2N)',],
# ['I(CI)', 'ID(M5P)', 'ID(M6N)',],
# ['I(CJ)', 'ID(M9P)', 'ID(M10N)',],
# ['I(CK)', 'ID(M13P)', 'ID(M14N)',],
# ['I(CX)'],
# ['I(CY)'],
# ['I(CH)', 'ID(M1P)', 'ID(M2N)', 'ID(M3N)', 'ID(M4N)'],
# ['I(CI)', 'ID(M5P)', 'ID(M6N)', 'ID(M7N)', 'ID(M8N)'],
# ['I(CJ)', 'ID(M9P)', 'ID(M10N)', 'ID(M11N)', 'ID(M12N)'],
# ['I(CK)', 'ID(M13P)', 'ID(M14N)', 'ID(M15N)', 'ID(M16N)'],
# ['ID(M1P)', 'ID(M2N)', 'ID(M3N)', 'ID(M4N)'],
# ['ID(M5P)', 'ID(M6N)', 'ID(M7N)', 'ID(M8N)'],
# ['ID(M9P)', 'ID(M10N)', 'ID(M11N)', 'ID(M12N)'],
# ['ID(M13P)', 'ID(M14N)', 'ID(M15N)', 'ID(M16N)'],
['ID(M1P)', 'ID(M5P)', 'ID(M9P)', 'ID(M13P)'],
['ID(M2N)', 'ID(M6N)', 'ID(M10N)', 'ID(M14N)'],
['I(CH)', 'I(CI)', 'I(CJ)', 'I(CK)'],
]
f = sim_data.plot_timing_diagram(subplot_data, y1_label=r'$\mathrm{Current}$',
line_style={'alpha': 0.5}, unit='A', yscale=1,
sec_line_style={'alpha': 0.5}, y1_lim=(-0.0006, 0.0006), hspace=None)
f.savefig('itd_%s.pdf' % x)
subplot_data = [
('V(h)', ['I(CH)', 'ID(M1P)', 'ID(M2N)']),
('V(i)', ['I(CI)', 'ID(M5P)', 'ID(M6N)']),
('V(j)', ['I(CJ)', 'ID(M9P)', 'ID(M10N)']),
('V(k)', ['I(CK)', 'ID(M13P)', 'ID(M14N)']),
('V(x)', ['I(CX)', 'ID(M3N)', 'ID(M4N)', 'ID(M7N)', 'ID(M12N)']),
('V(y)', ['I(CY)', 'ID(M8N)', 'ID(M11N)', 'ID(M15N)', 'ID(M16N)']),
('V(sum)', ['I(CSum)', 'ID(M4N)', 'ID(M8N)'])
]
if x == 3:
subplot_data[6][1].append('ID(M17P)')
time_slices=(
(np.float64('5E-9'), np.float64('7E-9')),
(np.float64('10E-9'), np.float64('12E-9')),
(np.float64('15E-9'), np.float64('17E-9')),
(np.float64('20E-9'), np.float64('22E-9')),
)
f = sim_data.plot3(subplot_data, right_line_style={'alpha': 0.75}, left_unit='V', right_unit='A', yscale=1.2,
left_line_style={'alpha': 0.2}, hspace=0, time_slices=time_slices)
f.savefig('itd3_%s.pdf' % x)
f = sim_data.plot2(subplot_data, right_line_style={'alpha': 0.75}, left_unit='V', right_unit='A', yscale=1.2,
left_line_style={'alpha': 0.2}, hspace=0)
f.savefig('itd2_%s.pdf' % x)
# ax = sim_data.df[['ID(M1P)', 'ID(M5P)', 'ID(M9P)', 'ID(M13P)']].plot(kind='area', figsize=(8,1.2))
# ax.figure.savefig('../report/test.pdf')
subplot_data = (
(['V(h)'], ['I(CH)', 'ID(M1P)', 'ID(M2N)', 'ID(M3N)']),
(['V(x)', 'V(h)', 'V(i)'], ['ID(M3N)', 'ID(M4N)', 'ID(M7N)', 'ID(M12N)', 'I(CH)', 'I(CI)', 'I(CX)']),
)
f = sim_data.plot2(subplot_data, line_style={'alpha': 0.7}, left_unit='V', right_unit='A')
f.savefig('current_%s.pdf' % x)
f = sim_data.plot_timing_diagram(['V(x)', 'VTH(M3N)', 'VTH(M4N)', 'VTH(M7N)', 'VTH(M12N)'])
f.savefig('a%s.pdf' % x)
f = sim_data.plot_timing_diagram([['V(sum)', 'V(sum_inv)'], ['I(VDD)']],
line_style={'alpha': 0.8},
sec_line_style={'alpha': 0.8, 'linestyle': '--'},
start_time=np.float64('4.E-9'), end_time=np.float64('9.E-9'),
sharey=False)
f.savefig('blip_%s.pdf' % x)
|
Source cheap and high quality products of power tools, hand tools online from Chinese tools manufacturers & suppliers. Our knowledgeable buyer service will help you choose just the right Diamond Jewelry factory in reliable price. Welcome to discuss about the 2018 newest trends associated with tooling industries in our communities. When purchasing hardware and tools from China, quality control, logistic organization, and delivery control are all very important factors to take into consideration. If you are looking for Diamond Jewelry for sale, check out the above products choices that you can't miss along with other options such as fashion jewelry, costume jewelry, fashion jewellery. Find out what's trending and get fast access to industry content all in one location. Inquire & Get Quotation Now!
|
import re
from django.db import models
from django.core.validators import RegexValidator
from django.utils.encoding import smart_text
from django.utils.translation import ugettext as _
#==============================================================================
color_re = re.compile('^\#([a-fA-F0-9]{6}|[a-fA-F0-9]{3})$')
validate_color = RegexValidator(color_re, _("Enter a valid color."), 'invalid')
#==============================================================================
class RGBColorField(models.CharField):
default_validators = [validate_color]
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 7
super(RGBColorField, self).__init__(*args, **kwargs)
def clean(self, value, model_instance):
if value[0] != '#':
value = '#' + value
value = super(RGBColorField, self).clean(value, model_instance)
return smart_text(value)
def deconstruct(self):
name, path, args, kwargs = super(RGBColorField, self).deconstruct()
del kwargs["max_length"]
return name, path, args, kwargs
|
These Sock Shop Gentle Grip® Plain Socks are both comfortable and stylish with the unique HoneyComb Top that combats uncomfortable pressure or tightness whilst maintaining grip to your legs.
Sock Shop Gentle Grip® Socks are perfect for those who are susceptible to poor circulation. These Gentle Grip socks are made from cotton rich yarn keeping you dry and warm all day.
Free UK Postage You will receive 6 pairs of socks Gentle Grip with Comfortable Honeycomb Top Comfort Toe Seam Machine Washable at 40° Plain: 69% Cotton 19% Polyester 10% Nylon 2% Elastane Marl: 75% Cotton 20% Polyester 3% Nylon 2% Elastane These Sock Shop Gentle Grip® Plain Socks are both comfortable and stylish with the unique HoneyComb Top that combats uncomfortable pressure or tightness whilst maintaining grip to your legs. Sock Shop Gentle Grip® Socks are perfect for those who are susceptible to poor circulation. These Gentle Grip socks are made from cotton rich yarn keeping you dry and warm all day.
|
import asyncio
from common.config import config
from common import rpc
from common import storm
from common import twitch
from common import utils
import dateutil.parser
FOLLOWER_CHECK_DELAY = 60
class TwitchFollows:
def __init__(self, lrrbot, loop):
self.lrrbot = lrrbot
self.loop = loop
self.last_timestamp = None
self.last_users = set()
self.schedule_check()
def schedule_check(self):
asyncio.ensure_future(self.check_follows(), loop=self.loop).add_done_callback(utils.check_exception)
self.loop.call_later(FOLLOWER_CHECK_DELAY, self.schedule_check)
async def check_follows(self):
if self.last_timestamp is None:
async for follower in twitch.get_followers():
if self.last_timestamp is None or self.last_timestamp == follower['created_at']:
self.last_timestamp = follower['created_at']
self.last_users.add(follower['user']['_id'])
else:
break
else:
last_users = self.last_users
self.last_users = set()
events = []
async for follower in twitch.get_followers():
if follower['created_at'] >= self.last_timestamp:
if follower['user']['_id'] not in last_users:
events.append((
follower['user'].get('display_name') or follower['user']['name'],
follower['user'].get('logo'),
follower['created_at'],
))
self.last_users.add(follower['user']['_id'])
else:
break
if not events:
self.last_users = last_users
for name, avatar, timestamp in events[::-1]:
self.last_timestamp = timestamp
timestamp = dateutil.parser.parse(timestamp)
event = {
'name': name,
'avatar': avatar,
'count': storm.increment(self.lrrbot.engine, self.lrrbot.metadata, 'twitch-follow'),
}
await rpc.eventserver.event('twitch-follow', event, timestamp)
|
Nearby Messages script for Android app. The ...app. (2) Subscribe to messages and connect to the other device (3) unpublished the message This is to write script. (4) unsubscribe and stop receiving device messages API Details: [kirjaudu nähdäksesi URL:n] ---- This job is to write the script using Messages API for Android.
|
from django.conf.urls import patterns, include, url
from django.views.generic import TemplateView
from django.conf import settings
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^$', TemplateView.as_view(template_name='index.html'), name="home"),
url(r'^detectors/', include('detectors.urls')),
url(r'^leaderboard/', include('leaderboards.urls')),
url(r'^videostream/', include('videostream.urls')),
url(r'^how_it_works/$', TemplateView.as_view(template_name='how_it_works.html'),
name="how_it_works"),
url(r'^api_documentation/$', TemplateView.as_view(template_name='api_documentation.html'),
name="api_documentation"),
url(r'^about/$', TemplateView.as_view(template_name='about.html'),
name="about"),
url(r'^contact/', include('envelope.urls'), name="contact"),
# create account over api
(r'^accounts/api/', include('accounts.urls')),
# userena app
(r'^accounts/', include('userena.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
)
# Include the login and logout views for the API.
urlpatterns += patterns('',
url(r'^api-token-auth/', 'rest_framework.authtoken.views.obtain_auth_token')
)
# Metrics
urlpatterns += patterns('',
url(r'^metrics/', include('redis_metrics.urls')),
url(r'^profiler/', include('profiler.urls')),
)
url(r'^metrics/', include('redis_metrics.urls')),
# Allow access to the Media folder from the browser
if settings.DEBUG:
urlpatterns += patterns(
'',
url(r'^media/(?P<path>.*)$', 'django.views.static.serve', {
'document_root': settings.MEDIA_ROOT,
}),
url(r'^static/(?P<path>.*)$', 'django.views.static.serve', {
'document_root': settings.STATIC_ROOT,
}),
)
|
Discuss topics related to the health of our cats and advice on how to help treat common health problems and issues including cat nutrition.
Can My Cat Get The Flu From Me?
Does Your Cat Have Whisker Stress?
Welcome to Petsr4life. We are a friendly community of people that promote the welfare of our pets, by sharing our experience and knowledge.
[email protected] Community Software by Invision Power Services, Inc.
|
from django.contrib.auth.decorators import login_required, user_passes_test, \
permission_required
from django.shortcuts import get_object_or_404, render
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.template import loader
from django.core.urlresolvers import reverse, reverse_lazy
from django.views import generic
from django.utils.decorators import method_decorator
from django.core.exceptions import ValidationError
from django.db.models import Avg, Value, Sum, Count
from django.db.models.functions import Coalesce
from .forms import MealForm, DishForm, TicketForm, TicketFormSet, \
InstAmtForm, InstPriceForm, \
NewInstForm, NewInstStdForm, NewStdInstForm
import json, datetime, time
from .models import Resource_Type, Resource_Inst, Resource_Ticket, \
TicketManager, Meal, Dish, Standard_Inst
from .admin import other_checks
# Create your views here.
decs = [ login_required, user_passes_test(other_checks)]
SEMI_OPEN_STATE_THRESHOLD = 10
ENABLE_CAL_PROGRESS_BARS = True
def iso_to_gregorian(iso_year, iso_week, iso_day):
"Gregorian calendar date for the given ISO year, week and day"
fifth_jan = datetime.date(iso_year, 1, 5)
_, fifth_jan_week, fifth_jan_day = fifth_jan.isocalendar()
return fifth_jan + datetime.timedelta(days=iso_day-fifth_jan_day,
weeks=iso_week-fifth_jan_week)
@login_required
@user_passes_test(other_checks)
def index(request):
# We want to know what meals there are
meal_list = Meal.objects.filter(
meal_owner=request.user.id).order_by('-cons_time')
cal = {}
for meal in meal_list:
iy, im, iw = meal.cons_time.isocalendar()
if (iy, im) not in cal:
cal[(iy, im)] = {}
if iw not in cal[(iy, im)]:
cal[(iy, im)][iw] = []
cal[(iy, im)][iw].append(meal)
for e in cal:
# e is a week, e.g. (2016, 1)
# cal[e] is a dict of meals by day {iw: [meal]} that week
weekMeals = [[] for i in range(7)]
tot, mc, opensum = 0, 0, 0
for w in xrange(7):
weekMeals[w] = cal[e][w+1] if w+1 in cal[e] else []
for meal in weekMeals[w]:
tot += meal.get_meal_cost()
mc += 1
opensum += meal.open_cost
weekMeals[w] = [ (u"%s \xA3%.2f" % (meal.meal_type[0],
meal.get_meal_cost()/100),
reverse("mealy:meal_detail", args=(meal.id,)),
meal.open_cost, meal.get_meal_cost())
for meal in weekMeals[w]]
# weekMeals[0] = (monday) [ ("L 2.77", det_link, 0.56, 2.77), ... ]
weekMeals[w] = [iso_to_gregorian(e[0], e[1], w+1).strftime("%b %d"),
weekMeals[w]]
# weekMeals[0] = [ "Mar 14", [("L...", det_link, 0.56, 2.77), ...] ]
weekMeals[w][1].sort()
weekMeals.append(["", [(u"T \xA3%.2f (%.2f)" % (tot/100, opensum/100),
False, opensum, tot),
(u"A \xA3%.2f (%.2f)" %
(tot/100/mc, opensum/100/mc), False, opensum/mc, tot/mc)]])
cal[e] = weekMeals
cal = sorted(list(cal.items()), reverse=True)
template = loader.get_template("mealy/meals.html")
contDict = { 'meal_list': meal_list,
'mtypes': Meal.MEAL_TYPES,
'meal_form': MealForm,
'user': request.user,
'cal_meals': cal,
'semi_open': SEMI_OPEN_STATE_THRESHOLD,
'prog_bars': ENABLE_CAL_PROGRESS_BARS,
}
return HttpResponse(template.render(contDict, request))
@method_decorator(decs, name='dispatch')
class MealView(generic.DetailView):
def get_queryset(self):
return Meal.objects.filter( id=self.kwargs['pk'],
meal_owner=self.request.user)
def get_context_data(self, **kwargs):
context = super(MealView, self).get_context_data(**kwargs)
context['dish_form'] = DishForm
return context
@method_decorator(decs, name='dispatch')
class NewMeal(generic.edit.CreateView):
form_class = MealForm
success_url = reverse_lazy("mealy:index")
def form_invalid(self, form):
raise ValidationError("Invalid form value", code='invalid')
def form_valid(self, form):
form.instance.meal_owner = self.request.user
return super(NewMeal, self).form_valid(form)
@method_decorator(decs, name='dispatch')
class DeleteMeal(generic.edit.DeleteView):
models = Meal
def get_queryset(self):
return Meal.objects.filter( id=self.kwargs['pk'],
meal_owner=self.request.user,
dish__isnull=True)
def get(self, *args, **kwargs):
return HttpResponseRedirect(reverse("mealy:meal_detail",
args=[self.get_object().id]))
def get_success_url(self):
return reverse("mealy:index")
@method_decorator(decs, name='dispatch')
class NewDish(generic.edit.CreateView):
form_class = DishForm
def form_invalid(self, form):
raise ValidationError("Invalid form value", code='invalid')
def form_valid(self, form):
form.instance.par_meal = Meal.objects.get( id=self.kwargs['meal_id'],
meal_owner=self.request.user, )
return super(NewDish, self).form_valid(form)
def get_success_url(self):
return reverse("mealy:meal_detail", args=[self.kwargs['meal_id']])
@method_decorator(decs, name='dispatch')
class DeleteDish(generic.edit.DeleteView):
models = Dish
def get_queryset(self):
return Dish.objects.filter( id=self.kwargs['pk'],
par_meal__meal_owner=self.request.user,
resource_ticket__isnull=True)
def get(self, *args, **kwargs):
return HttpResponseRedirect(reverse("mealy:dish_detail",
args=[self.get_object().id]))
def get_success_url(self):
return reverse("mealy:meal_detail", args=[self.object.par_meal.id])
@method_decorator(decs, name='dispatch')
class DishView(generic.DetailView):
def get_queryset(self):
return Dish.objects.filter( id=self.kwargs['pk'],
par_meal__meal_owner=self.request.user)
def get_context_data(self, **kwargs):
context = super(DishView, self).get_context_data(**kwargs)
context['tkt_form'] = TicketForm(self.request.user)
return context
def post(self, *args, **kwargs):
form = TicketForm(self.request.user, self.request.POST)
if form.is_valid():
self.object = self.get_object()
res_inst = form.cleaned_data['resource_inst']
uu = form.cleaned_data['units_used']
exhausted = form.cleaned_data['exhausted']
nt = Resource_Ticket.objects.create_ticket(
res_inst, uu, self.object, exhausted)
else:
raise Http404("Invalid form")
return HttpResponseRedirect(
reverse("mealy:dish_detail", args=[self.object.id]))
@method_decorator(decs, name='dispatch')
class TypesOverview(generic.ListView):
queryset = Resource_Type.objects.order_by('r_parent')
@method_decorator(decs, name='dispatch')
class TypesView(generic.DetailView):
slug_field = "r_name"
queryset = Resource_Type.objects.all()
@method_decorator(decs, name='dispatch')
class StdInstListView(generic.ListView):
queryset = Standard_Inst.objects.order_by('inst_type')
def get_context_data(self, **kwargs):
context = super(StdInstListView, self).get_context_data(**kwargs)
context['nsiForm'] = NewStdInstForm
return context
@method_decorator(decs, name='dispatch')
class StdInstDetailView(generic.DetailView):
queryset = Standard_Inst.objects.all()
@method_decorator(decs + [permission_required('mealy.can_add_standard_resource_instance')], name='dispatch')
class NewStdInst(generic.edit.CreateView):
form_class = NewStdInstForm
success_url = reverse_lazy("mealy:std_insts")
def form_invalid(self, form):
raise ValidationError("Invalid form value", code='invalid')
@method_decorator(decs, name='dispatch')
class InventView(generic.ListView):
def get_queryset(self):
objs = Resource_Inst.objects.filter(inst_owner=self.request.user)
if not self.kwargs['showAll']:
objs = objs.filter(exhausted=False)
return objs.order_by('res_type', 'purchase_date')
def get_context_data(self, **kwargs):
context = super(InventView, self).get_context_data(**kwargs)
context['types'] = Resource_Type.objects.all()
context['showAll'] = self.kwargs['showAll']
context['niForm'] = NewInstForm
context['nisForm'] = NewInstStdForm(auto_id='newinststdform_%s')
return context
@method_decorator(decs, name='dispatch')
class DeleteTicket(generic.edit.DeleteView):
models = Resource_Ticket
def get_queryset(self):
return Resource_Ticket.objects.filter( id=self.kwargs['pk'], resource_inst__inst_owner=self.request.user)
def get(self, *args, **kwargs):
return HttpResponseRedirect(reverse("mealy:inv_detail",
args=[self.get_object().resource_inst.id]))
def get_success_url(self):
return reverse("mealy:inv_detail", args=[self.object.resource_inst.id])
@login_required
@user_passes_test(other_checks)
def invent_detail(request, inst_id):
inst = get_object_or_404(Resource_Inst, id=inst_id, inst_owner=request.user)
if request.method == "POST":
formType = request.POST['formtype']
if formType == "finalise":
defin = request.POST['finalisation']
if defin == "final":
inst.finalise()
elif defin == "definal":
inst.definalise()
else:
raise Http404("Finalisation invalid")
elif formType == "pricechange":
initf = inst.exhausted
if initf:
inst.definalise()
newPrice = int(request.POST['price'])
inst.change_price(newPrice)
if initf:
inst.finalise()
elif formType == "amtchange":
newAmt = float(request.POST['orig_amt'])
inst.change_amt(newAmt)
else:
raise Http404("We're not sure what form you submitted")
return HttpResponseRedirect(reverse("mealy:inv_detail", args=[inst.id]))
tickets = Resource_Ticket.objects.filter(resource_inst=inst).order_by('par_dish')
similar_insts = inst.similar_set()
similar_att = inst.similar_attrs()
template = loader.get_template("mealy/inv_detail.html")
contDict = { 'inst': inst,
'price_form': InstPriceForm,
'amt_form': InstAmtForm,
'tickets': tickets,
'sim_list': similar_insts,
'sim_att': similar_att,
}
return HttpResponse(template.render(contDict, request))
@method_decorator(decs, name='dispatch')
class NewInst(generic.edit.CreateView):
form_class = NewInstForm
success_url = reverse_lazy("mealy:inventory")
def form_invalid(self, form):
raise ValidationError("Invalid form value", code='invalid')
def form_valid(self, form):
form.instance.inst_owner = self.request.user
form.instance.unit_use_formal = False
return super(NewInst, self).form_valid(form)
@method_decorator(decs, name='dispatch')
class DeleteInst(generic.edit.DeleteView):
models = Resource_Inst
success_url = reverse_lazy("mealy:inventory")
def get_queryset(self):
return Resource_Inst.objects.filter( id=self.kwargs['pk'],
inst_owner=self.request.user,
resource_ticket__isnull=True)
def get(self, *args, **kwargs):
return HttpResponseRedirect(reverse("mealy:invent_detail",
args=[self.get_object().id]))
@method_decorator(decs, name='dispatch')
class NewInstStd(generic.edit.CreateView):
form_class = NewInstStdForm
success_url = reverse_lazy("mealy:inventory")
def form_invalid(self, form):
raise ValidationError("Invalid form value", code='invalid')
def form_valid(self, form):
form.instance.inst_owner = self.request.user
form.instance.unit_use_formal = False
form.instance.res_name = form.cleaned_data['std_inst'].inst_name
form.instance.res_type = form.cleaned_data['std_inst'].inst_type
form.instance.orig_unit = form.cleaned_data['std_inst'].orig_unit
form.instance.amt_original = form.cleaned_data['std_inst'].orig_amt
form.instance.best_before = form.cleaned_data['std_inst'].use_bestbef
if form.cleaned_data['std_inst'].is_relative:
form.instance.amt_original = form.cleaned_data['amt_dummy']
return super(NewInstStd, self).form_valid(form)
@login_required
@user_passes_test(other_checks)
def getStandardInst(request):
inst = get_object_or_404(Standard_Inst, id=request.GET['id'])
return HttpResponse(inst.show_fields())
|
Играй всеки ден нови забавни игри за деца, игри за момичета, спортни игри, игри с карти. Play the free online game 8 Ball Pool multiplayer at Y8.com ! Click to play 8 Ball Pool multiplayer free game! We have also selected the best free games. Play the free online game Drift Challenge at Y8.com ! Click to play Drift Challenge free game! We have also selected the best free games like Drift Challenge.
Gamersenterprise.com offers free flash arcade games including Action games, Adventure games, Racing games, Sports games, Shoot Скачать приложения о Игры для android похожие на dream league soccer 2017, clash royale, clash of clans. Reviews “MASTERPIECE! Inside is a 2D puzzle platformer that builds upon what made Limbo great, and in fact builds something greater.” 10/10 We provide healthy, nonviolent games that are suitable Play Online games, free Online games and best games online. Collection of 20,000 free Online games including Puzzle games, Action games, Mario Games, Shooting Games.
Your zone to play free games online! Play free games online including racing games, sports games and more at GamesGames.com. UpdateStar is compatible with Windows platforms. UpdateStar has been tested to meet all of the technical requirements to be compatible with Windows 10, 8.1, Windows. Outlook.com is a free, personal email service from Microsoft. Keep your inbox clutter-free with powerful organizational tools, and collaborate easily with OneDrive. Star Wars: Knights of the Old Republic (KotOR) is a roleplaying game made originally for the Microsoft Xbox and later released for the PC and is also avaliable. Bresplatne igrice na internetu, ima vise od tisucu besplatnih igrica. Ako je moguce ocjenite igrice. Igrice Solitaire: Igrice su podeljene u kategorije Игри - белот, сантасе, бридж, блато, табла, не се сърди човече, окей, свара, домино, монопол. Free online Chess server. Play Chess now in a clean interface. No registration, no ads, no plugin required. Play Chess with the computer, friends or random opponents. An alternate reality game (ARG) is an interactive networked narrative that uses the real world as a platform and employs transmedia storytelling to deliver a story.
Play free online Action games, Racing games, Sports games, Adventure games, War games and more at AGAME.COM. Aurora, a young girl from 1895 Austria, awakens on the lost fairytale continent of Lemuria. To return home she must fight against the dark creatures of the Queen. A webbased naruto online tactical multiplayer game with more then 100.000 members. Featuring ladders, missions and hundreds of characters and skills. Skill7 - real winnings and real opponents! The best online skill games out there. У нас представлены лучшие онлайн настольные игры бесплатно. Мы собрали огромную коллекцию.
|
#!/usr/bin/python2
"""
initialize the redis databases and create and store a random maze recursively
"""
import redis
import random
import sys
import math
global gMaze, sx, sy, it
gMaze = []
maxX = 6
maxY = 6
sx = 6
sy = 6
it = 0
sys.setrecursionlimit( 2000)
db_MazeLayout = 0
db_Entities = 1
random.seed()
for i in range( 0, maxX*maxY):
gMaze.append({0:'w',1:'w',2:'w',3:'w','visited':False})
def recurse(x, y):
global gMaze, sy, sy, it
print "iteration: "+str(it)+" Room @ "+str(x)+" : "+str(y)
it = it + 1
gMaze[(y*sx) + x]['visited'] = True
seq = [0,1,2,3]
random.shuffle( seq)
for i in range(0,4):
neighbour = seq[i]
if neighbour == 0:
nx = x
ny = y-1
if ny < 0 or gMaze[(ny*sx) + nx]['visited']:
continue
else:
gMaze[(y*sx) + x][0] = 'd'
gMaze[(ny*sx) + nx][2] = 'd'
recurse(x, ny)
elif neighbour == 1:
nx = x+1
ny = y
if nx >= sx or gMaze[(ny*sx) + nx]['visited']:
continue
else:
gMaze[(y*sx) + x][1] = 'd'
gMaze[(ny*sx) + nx][3] = 'd'
recurse(nx, y)
elif neighbour == 2:
nx = x
ny = y+1
if ny >= sy or gMaze[(ny*sx) + x]['visited']:
continue
else:
gMaze[(y*sx) + x][2] = 'd'
gMaze[(ny*sx) + nx][0] = 'd'
recurse(x, ny)
elif neighbour == 3:
nx = x-1
ny = y
if nx < 0 or gMaze[(ny*sx) + nx]['visited']:
continue
else:
gMaze[(y*sx) + x][3] = 'd'
gMaze[(ny*sx) + nx][1] = 'd'
recurse(nx,y)
def createStoreMaze():
global gMaze, sx, sy
recurse( 0,0)
r = redis.StrictRedis( host='localhost', port=6379, db=db_Entities)
r.flushdb()
r = redis.StrictRedis( host='localhost', port=6379, db=db_MazeLayout)
r.flushdb()
for i in range( 0, sy):
for s in range( 0, sx):
items=""
if s==(sx-1) and i==(sy-1):
items = "stairs"
data = {'walls':( gMaze[(i*sx)+s][0], gMaze[(i*sx)+s][1], gMaze[(i*sx)+s][2], gMaze[(i*sx)+s][3]), 'entities':[], 'items':[items], 'grafiti':""};
k = "x"+str(s)+"y"+str(i)
r.set( k, data)
print "\ndone\n"
createStoreMaze();
|
I always appreciate a unique patina. And while this one is not so out of this world different in terms of skill, it is unique to what you commonly see among Patina artists. The Shine, a Polish patina/shoe shine service that also sells shoes, is becoming for me one of Europe’s more exciting Patina artists. If you gander through the Instagram account you can see the growth of their skill and range of patina’s that they offer and it is becoming more and more exciting to see what they will come up with next but more so to see if they will expand their range of shoes as most of them are simply semi brogues, derbys, wholecut oxfords and your classic double monks. It would be great to see some boots on there, whether it a chelsea or balmoral boot. But I guess with rise in popularity will come demand for new products. so time should tell soon!
|
import pytest
import matplotlib
matplotlib.use('AGG') # use a non-interactive backend
from matplotlib import pyplot as plt
from lifetimes import plotting
from lifetimes import BetaGeoFitter, ParetoNBDFitter, ModifiedBetaGeoFitter
from lifetimes.datasets import load_cdnow_summary, load_transaction_data
from lifetimes import utils
bgf = BetaGeoFitter()
cd_data = load_cdnow_summary()
bgf.fit(cd_data['frequency'], cd_data['recency'], cd_data['T'], iterative_fitting=1)
@pytest.mark.plottest
class TestPlotting():
@pytest.mark.mpl_image_compare(tolerance=30)
def test_plot_period_transactions(self):
plt.figure()
plotting.plot_period_transactions(bgf)
return plt.gcf()
@pytest.mark.mpl_image_compare(tolerance=30)
def test_plot_period_transactions_parento(self):
pnbd = ParetoNBDFitter()
pnbd.fit(cd_data['frequency'], cd_data['recency'], cd_data['T'], iterative_fitting=1)
plt.figure()
plotting.plot_period_transactions(pnbd)
return plt.gcf()
@pytest.mark.mpl_image_compare(tolerance=30)
def test_plot_period_transactions_mbgf(self):
mbgf = ModifiedBetaGeoFitter()
mbgf.fit(cd_data['frequency'], cd_data['recency'], cd_data['T'], iterative_fitting=1)
plt.figure()
plotting.plot_period_transactions(mbgf)
return plt.gcf()
@pytest.mark.mpl_image_compare(tolerance=30)
def test_plot_period_transactions_max_frequency(self):
plt.figure()
plotting.plot_period_transactions(bgf, max_frequency=12)
return plt.gcf()
@pytest.mark.mpl_image_compare(tolerance=30)
def test_plot_period_transactions_labels(self):
plt.figure()
plotting.plot_period_transactions(bgf, label=['A', 'B'])
return plt.gcf()
@pytest.mark.mpl_image_compare(tolerance=30)
def test_plot_frequency_recency_matrix(self):
plt.figure()
plotting.plot_frequency_recency_matrix(bgf)
return plt.gcf()
@pytest.mark.mpl_image_compare(tolerance=30)
def test_plot_frequency_recency_matrix_max_recency(self):
plt.figure()
plotting.plot_frequency_recency_matrix(bgf, max_recency=100)
return plt.gcf()
@pytest.mark.mpl_image_compare(tolerance=30)
def test_plot_frequency_recency_matrix_max_frequency(self):
plt.figure()
plotting.plot_frequency_recency_matrix(bgf, max_frequency=100)
return plt.gcf()
@pytest.mark.mpl_image_compare(tolerance=30)
def test_plot_frequency_recency_matrix_max_frequency_max_recency(self):
plt.figure()
plotting.plot_frequency_recency_matrix(bgf, max_frequency=100, max_recency=100)
return plt.gcf()
@pytest.mark.mpl_image_compare(tolerance=30)
def test_plot_probability_alive_matrix(self):
plt.figure()
plotting.plot_probability_alive_matrix(bgf)
return plt.gcf()
@pytest.mark.mpl_image_compare(tolerance=30)
def test_plot_probability_alive_matrix_max_frequency(self):
plt.figure()
plotting.plot_probability_alive_matrix(bgf, max_frequency=100)
return plt.gcf()
@pytest.mark.mpl_image_compare(tolerance=30)
def test_plot_probability_alive_matrix_max_recency(self):
plt.figure()
plotting.plot_probability_alive_matrix(bgf, max_recency=100)
return plt.gcf()
@pytest.mark.mpl_image_compare(tolerance=30)
def test_plot_probability_alive_matrix_max_frequency_max_recency(self):
plt.figure()
plotting.plot_probability_alive_matrix(bgf, max_frequency=100, max_recency=100)
return plt.gcf()
@pytest.mark.mpl_image_compare(tolerance=30)
def test_plot_expected_repeat_purchases(self):
plt.figure()
plotting.plot_expected_repeat_purchases(bgf)
return plt.gcf()
@pytest.mark.mpl_image_compare(tolerance=30)
def test_plot_expected_repeat_purchases_with_label(self):
plt.figure()
plotting.plot_expected_repeat_purchases(bgf, label='test label')
return plt.gcf()
@pytest.mark.mpl_image_compare(tolerance=30)
def test_plot_customer_alive_history(self):
plt.figure()
transaction_data = load_transaction_data()
# yes I know this is using the wrong data, but I'm testing plotting here.
id = 35
days_since_birth = 200
sp_trans = transaction_data.loc[transaction_data['id'] == id]
plotting.plot_history_alive(bgf, days_since_birth, sp_trans, 'date')
return plt.gcf()
@pytest.mark.mpl_image_compare(tolerance=30)
def test_plot_calibration_purchases_vs_holdout_purchases(self):
transaction_data = load_transaction_data()
summary = utils.calibration_and_holdout_data(transaction_data, 'id', 'date', '2014-09-01', '2014-12-31')
bgf.fit(summary['frequency_cal'], summary['recency_cal'], summary['T_cal'])
plt.figure()
plotting.plot_calibration_purchases_vs_holdout_purchases(bgf, summary)
return plt.gcf()
@pytest.mark.mpl_image_compare(tolerance=30)
def test_plot_calibration_purchases_vs_holdout_purchases_time_since_last_purchase(self):
transaction_data = load_transaction_data()
summary = utils.calibration_and_holdout_data(transaction_data, 'id', 'date', '2014-09-01', '2014-12-31')
bgf.fit(summary['frequency_cal'], summary['recency_cal'], summary['T_cal'])
plt.figure()
plotting.plot_calibration_purchases_vs_holdout_purchases(bgf, summary, kind='time_since_last_purchase')
return plt.gcf()
|
Earthship Fife is now a Learning Destination for the Children’s University!
The Earthship Fife Visitor Centre is now a registered Learning Destination so once your child is registered, come along and do the Earthship Close-up Quiz and our staff will put a stamp in your child’s Children’s University Passport To Learning . Your child will learn how this off-grid, self-sustaining building works. There is no entry fee but we are a registered charity so would appreciate a small donation towards Earthship running costs. Check out our opening hours here.
May 25, 2015 in Announcement.
|
#!/usr/bin/python3
import docker
import docker.utils
from .core import Docker
from .image import Image
__author__ = 'Ryan Clarke - [email protected]'
class Container:
"""Base Container class for docker containers managed by Hivemined."""
label = 'hivemined.container'
def __init__(self, name, image, command='', volumes=list(), port=None, memory=None, swap=None, cpu=None, **kwargs):
self.name = str(name)
self.command = str(command)
# Type checking for image
if isinstance(image, Image):
self.image = image
else:
raise TypeError('Parameter must be an Image', image)
# Type checking for volumes
if next((False for v in volumes if not isinstance(v, Container)), True):
self.volumes = volumes
else:
raise TypeError('Parameter must be a list of Containers.', volumes)
# Set network port and resource limits
self.port = port
self.limits = {}
if memory:
self.limits['Memory'] = str(memory)
if swap:
self.limits['Swap'] = str(swap)
if cpu:
self.limits['Cpu'] = int(cpu)
self.restart = {
'Name': 'always', # 'always' | 'on-failure' | 'no'
'MaximumRetryCount': 0
}
self.container = None
self.create(**kwargs)
def list(self, show_all=False, quiet=False):
"""List all containers manages by the calling class (respects inheritance)."""
return Docker.containers(all=show_all, quiet=quiet, filters={'label': type(self).label})
def exists(self, running=False):
"""Return True if the container referenced by this object exists, or False otherwise.
If running==True, check if the container is running instead.
"""
if not self.container.get('Id'):
return False
containers = self.list(show_all=(not running))
return next((True for c in containers if c.get('Id') == self.container.get('Id')), False)
def create(self, force=False, **kwargs):
"""Create a new managed docker container.
If force==True, create new a container even if one already exists.
Propagates LookupError from self.image.get() f the image does not exist and cannot be pulled or built,
Raises Warning if container creation resulted in warnings form Docker.
"""
labels = {type(self).label: None, 'name': self.name}
if self.exists() and not force:
return
try:
self.image.get() # Ensure that the specified Image exists.
except LookupError as e:
print(e)
raise
volume_list = []
for v in self.volumes:
volume_list.append(v.container.get("Id"))
if self.port:
host_cfg = docker.utils.create_host_config(
volumes_from=volume_list, restart_policy=self.restart, port_bindings={25565: int(self.port)})
else:
host_cfg = docker.utils.create_host_config(
volumes_from=volume_list, restart_policy=self.restart, publish_all_ports=True)
self.container = Docker.create_container(
host_config=host_cfg, labels=labels, image=self.image.name, command=self.command,
mem_limit=self.limits.get('memory'), memswap_limit=self.limits.get('swap'),
cpu_shares=self.limits.get('cpu'), **kwargs)
if self.container.get('Warnings'):
raise Warning("Container creation warning.", self)
def delete(self, volumes=True):
if self.exists(running=True):
self.stop()
Docker.remove_container(self.container.get('Id'), v=volumes)
def update(self):
self.image.get(update=True)
old_container = self.container
self.create(force=True)
Docker.remove_container(old_container.get('Id'))
def start(self, tty=False):
if not self.exists():
self.create()
Docker.start(self.container.get('Id'), tty=tty)
def stop(self):
Docker.stop(self.container.get('Id'))
def restart(self):
Docker.restart(self.container.get('Id'))
def command(self, command, tty=False):
exec_str = Docker.exec_create(self.container.get('Id'), cmd=command, tty=tty)
Docker.exec_start(exec_str, tty=tty)
|
Best reviews of health paradise organic tartary buckwheat tea 200g 2 tins, this product is a popular item this year. this product is a new item sold by LIFEWINNERS ORGANIC & FINE FOODS store and shipped from Singapore. Health Paradise Organic Tartary Buckwheat Tea 200g (2 Tins) can be purchased at lazada.sg which has a really cheap cost of SGD17.80 (This price was taken on 04 June 2018, please check the latest price here). what are features and specifications this Health Paradise Organic Tartary Buckwheat Tea 200g (2 Tins), let's wait and watch the important points below.
For detailed product information, features, specifications, reviews, and guarantees or another question which is more comprehensive than this Health Paradise Organic Tartary Buckwheat Tea 200g (2 Tins) products, please go straight to the owner store that is due LIFEWINNERS ORGANIC & FINE FOODS @lazada.sg.
LIFEWINNERS ORGANIC & FINE FOODS is a trusted seller that already is skilled in selling Tea products, both offline (in conventional stores) an internet-based. most of the clientele are incredibly satisfied to purchase products from your LIFEWINNERS ORGANIC & FINE FOODS store, that may seen together with the many five star reviews written by their clients who have bought products in the store. So you don't have to afraid and feel focused on your product not up to the destination or not prior to what is described if shopping in the store, because has numerous other clients who have proven it.
Moreover LIFEWINNERS ORGANIC & FINE FOODS provide discounts and product warranty returns if the product you acquire doesn't match whatever you ordered, of course together with the note they provide. Including the product that we are reviewing this, namely "Health Paradise Organic Tartary Buckwheat Tea 200g (2 Tins)", they dare to offer discounts and product warranty returns if your products they sell do not match what exactly is described.
So, if you wish to buy or seek out Health Paradise Organic Tartary Buckwheat Tea 200g (2 Tins) however recommend you buy it at LIFEWINNERS ORGANIC & FINE FOODS store through marketplace lazada.sg.
Why should you buy Health Paradise Organic Tartary Buckwheat Tea 200g (2 Tins) at LIFEWINNERS ORGANIC & FINE FOODS shop via lazada.sg?
Obviously there are many benefits and advantages available when shopping at lazada.sg, because lazada.sg is really a trusted marketplace and also have a good reputation that can present you with security from all types of online fraud. Excess lazada.sg in comparison to other marketplace is lazada.sg often provide attractive promotions such as rebates, shopping vouchers, free postage, and often hold flash sale and support which is fast and that is certainly safe. and what I liked is really because lazada.sg can pay on the spot, which has been not there in a other marketplace.
|
from __future__ import print_function, absolute_import
from six.moves import filter
from io import open # to use encoding kw in Python 2
import os
from collections import defaultdict
from crowfood.utils import is_subdir
import re
import sys
import itertools
def get_roots_and_include_paths(args):
# convention:
# input roots are the directories of the files to scan
# include roots are the directories given by -I
input_roots = set()
for path in args.path:
if os.path.isfile(path):
path = os.path.dirname(path)
input_roots.add(path)
external_roots = set(args.external_roots)
# make any include path an additional external root if it is outside any existing root
external_roots.update(
set(filter(lambda include_path: not any(is_subdir(include_path, root)
for root in input_roots.union(external_roots)),
args.include_paths)))
input_include_paths = defaultdict(list) # input root -> include paths
external_include_paths = defaultdict(list) # external root -> include paths
for include_path in args.include_paths:
input_root = [root for root in input_roots if is_subdir(include_path, root)]
if input_root:
input_include_paths[input_root[0]].append(include_path)
else:
external_root = [root for root in external_roots
if is_subdir(include_path, root) or include_path == root]
external_include_paths[external_root[0]].append(include_path)
for root in input_roots:
if root not in input_include_paths:
input_include_paths[root].append(root)
for root in external_roots:
if root not in external_include_paths:
external_include_paths[root].append(root)
return input_roots, input_include_paths, external_roots, external_include_paths
def run(args):
input_roots, input_include_paths, external_roots, external_include_paths =\
get_roots_and_include_paths(args)
# for every found directory and file we need to output:
#((root, 'relative/path/to/root'), (None, None))
# We scan all requested files and directories and stop at the outer
# level of any dependencies found at the include search paths.
# Files in the include paths are not scanned for #include's.
# Get a list of all files with .c/.cc/.cpp/.cxx/.h/.hpp/.hxx extension
# from the directories to scan for, if any.
exts = ['c', 'h', 'cc', 'cpp', 'cxx', 'hpp', 'hxx'] + args.additional_exts
files = defaultdict(list) # input root -> file paths relative to root
def get_input_root(path):
return next(filter(lambda root: root in path, input_roots))
if args.fuzzy:
filemap = defaultdict(list) # filename -> (root,relpath)
for path in args.path:
if os.path.isfile(path):
root = get_input_root(path)
files[root].append(os.path.relpath(path, root))
else:
for base, _, filenames in os.walk(path):
if base in args.ignore_paths:
continue
root = get_input_root(base)
filenames = list(filter(lambda f: any(f.endswith('.' + ext) for ext in exts), filenames))
filepaths = map(lambda f: os.path.join(base, f), filenames)
filepaths = list(map(lambda p: os.path.relpath(p, root), filepaths))
files[root].extend(filepaths)
if args.fuzzy:
for filename, filepath in zip(filenames,filepaths):
filemap[filename].append((root,filepath))
# parse the #include's of all files
quotes = dict({'both': ('["|<]', '["|>]'),
'angle': ('<', '>'),
'quote': ('"', '"')
})[args.quotetypes]
include_re = re.compile(r'#include {}([a-zA-Z0-9_\-\.\/]+){}'.format(*quotes))
includes = dict() # (root,relpath) -> [include paths]
for root, filepaths in files.items():
for filepath in filepaths:
print('parsing', filepath, file=sys.stderr)
with open(os.path.join(root, filepath), encoding='utf8') as fp:
includes[(root,filepath)] = include_re.findall(fp.read())
# for each include, find the root it belongs to
includes_roots = dict() # include path -> root
includes_unique = set(itertools.chain.from_iterable(includes.values()))
def find_in_root(include, root, include_paths, cache=None):
for include_path in include_paths:
full_path = os.path.join(include_path, include)
rel = os.path.relpath(full_path, root)
if cache:
if rel in cache[root]:
return rel
elif os.path.exists(full_path):
return rel
return False
def find_in_roots(include, root_includepaths, cache=False):
for root, include_paths in root_includepaths:
rel = find_in_root(include, root, include_paths, cache)
if rel:
return root, rel
return False, False
for include in includes_unique:
# first we search within the input roots, then in the external roots
root, relpath = find_in_roots(include, input_include_paths.items(), files)
if not root:
root, relpath = find_in_roots(include, external_include_paths.items())
if root:
includes_roots[include] = root, relpath
not_found = defaultdict(list)
for (root, filepath), includepaths in list(includes.items()):
includes[(root, filepath)] = []
for include in includepaths:
root_path = False
if not args.no_include_current:
# look in current folder and prefer this over the other results
rel = find_in_root(include, root,
[os.path.join(root, os.path.dirname(filepath))], files)
if rel:
root_path = root, rel
if not root_path:
root_path = includes_roots.get(include)
if not root_path and args.fuzzy:
filename = os.path.basename(include)
if filename in filemap:
res = filemap[filename]
if len(res) > 1:
print('WARNING: ignoring fuzzy result as multiple '
'{} candidates were found (from {}): {}'.\
format(filename, filepath, [p for _,p in res]),
file=sys.stderr)
else:
root_path = res[0]
if root_path:
includes[(root, filepath)].append((root_path[0],root_path[1]))
else:
not_found[include].append(filepath)
if not_found:
print('\nWARNING: some includes could not be found:\n', file=sys.stderr)
for include in sorted(not_found.keys()):
print('{} not found'.format(include), file=sys.stderr)
if args.verbose:
for filepath in sorted(not_found[include]):
print(' from {}'.format(filepath), file=sys.stderr)
# Unify roots when a file was found over multiple roots.
# This happens when an include search path is given that is above
# an internal root.
roots = input_roots.union(external_roots)
nested_roots = list(filter(lambda r: is_subdir(*r), itertools.product(roots, roots)))
if nested_roots:
print('going to unify paths as there are nested roots', file=sys.stderr)
def move_root(subroot, root, filepath):
full = os.path.join(root, filepath)
if is_subdir(full, subroot) or os.path.dirname(full) == subroot:
rel = os.path.relpath(full, subroot)
print('moving root: {} -> {} for {}'.format(root, subroot, filepath), file=sys.stderr)
return (subroot, rel)
else:
return (root, filepath)
for subroot,root in nested_roots:
# the strategy is to move all includes from root to the subroot if they
# are actually within the subroot
for rf,includepaths in includes.items():
includes[rf] = [move_root(subroot,root,filepath) if root_ == root else (root_,filepath)
for root_,filepath in includepaths]
# merge .h/.c files if requested
if args.merge == 'module':
# The tricky part is: how do we know which files belong together?
# Obviously this is only possible if there is a 1-1 relationship
# in naming of the .c/.h files, that is the base is the same.
# Also, the .h file must be included in the matching .c file.
# We merge transitive dependencies of the same base name
# into the including .c file entry, thereby collapsing
# the dependencies of the matching files.
def find_matches(base, includepaths):
''' returns a list of (root,filepath) items '''
if not includepaths:
return []
matches = ((root,filepath) for (root,filepath) in includepaths
if os.path.splitext(os.path.basename(filepath))[0] == base)
return itertools.chain(matches,
itertools.chain.from_iterable(
find_matches(base, includes[match])
for match in matches)
)
for (root,filepath),includepaths in list(includes.items()):
if (root,filepath) not in includes:
# already merged
continue
filename = os.path.basename(filepath)
base,ext = os.path.splitext(filename)
if not ext.startswith('.c'):
continue
# Recursively get all includes with matching base name
# starting from the current c file.
# This becomes the set of files to merge into the including .c entry.
# Recursion only follows paths where the base name matches,
# that is, a.c -> a.h -> a.inc will be picked up, but not
# a.c -> b.h -> a.inc.
# Cyclic imports will lead to an error.
matches = set(find_matches(base, includepaths))
deps = itertools.chain.from_iterable(includes.get(match, []) for match in matches)
includes[(root,filepath)] = list((set(includepaths) | set(deps)) - matches)
for match in matches:
if match in includes:
del includes[match]
# move old references to the merged module
for k, includepaths in includes.items():
if match in includepaths:
includes[k] = [(root,filepath) if dep == match else dep
for dep in includepaths]
# remove file extensions as these don't make sense anymore now
newincludes = dict()
for (root1,path1),includepaths in includes.items():
newincludes[(root1,os.path.splitext(path1)[0])] =\
[(root2,os.path.splitext(path2)[0]) for (root2,path2) in includepaths]
includes = newincludes
# return dependencies as ((root,path),(root,path)) tuples
deps = []
dirs = set()
for (root,filepath),includepaths in includes.items():
deps.append(((root,filepath),(None,None)))
directory = os.path.dirname(os.path.join(root, filepath))
if directory not in dirs:
dirs.add(directory)
deps.append(((root,os.path.dirname(filepath)),(None,None)))
for root_,filepath_ in includepaths:
deps.append(((root,filepath),(root_,filepath_)))
directory = os.path.dirname(os.path.join(root_, filepath_))
if directory not in dirs:
dirs.add(directory)
deps.append(((root_,os.path.dirname(filepath_)),(None,None)))
return deps
|
Consulting boutique firm BIC Group is looking for a Senior Consultant to join our team, it is the third level position in our firm. As a Senior Consultant, you’ll be responsible for one or several streams of the project, and, moreover, we suppose you sharing our goals and culture.
Senior Consultant is the third level position in our firm.
As a Senior Consultant, you’ll join our consulting team of Roland Berger’s alumni on different operational and strategic projects. We suppose from you deep expertise and experience in management consulting or certain industry, which correlates with our main industries.
Our main industries are Engineering, Metallurgy, Mining, Retail etc.
As a Senior Consultant you’ll be responsible for one or several streams of the project (it depends on complexity and size of the project), and, moreover, we suppose you sharing our goals and culture and translating them to your junior peers and mentoring them.
Senior Consultants are the main assets in our Firm because we considering you for further developing and taking part in creating the future of our Firm. You should be a very responsible person, self-sufficient professional available to work in an uncertain environment and under tight deadlines, moreover strong leadership skills and reliability is required.
You’ll be like a respected member of the Team, who can influence the future of consulting and our Firm.
|
#!/usr/bin/env python
from six.moves.urllib import request
import cPickle as pickle
import os
import cv2
import subprocess
import numpy as np
picBase = "/mnt/s3pic/cifar10/"
def save_object(obj, filename):
with open(filename, 'wb') as output:
pickle.dump(obj, output, pickle.HIGHEST_PROTOCOL)
def checkExistance(path):
if os.path.exists(path):
return True
else:
return False
def makeDirectory(path):
if not checkExistance(path):
os.makedirs(path)
def downloadPic(url, name):
cmd = "wget " + url + " -O " + name + " -q"
subprocess.call(cmd, shell=True)
def extractExtension(name):
return re.findall(r"^.*(\..*)$", name)[0]
def moveFile(path, name):
cmd = "mv " + name + " " + path
subprocess.call(cmd, shell=True)
def cifar100Extract():
makeDirectory(picBase)
makeDirectory(picBase + "train")
makeDirectory(picBase + "test")
makeDirectory(picBase + "label")
if not os.path.exists("cifar-100-python"):
request.urlretrieve(
"http://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz",
"./cifar-100-python.tar.gz"
)
cmd = "tar -xvzf cifar-100-python.tar.gz"
subprocess.call(cmd, shell=True)
cmd = "rm -r cifar-100-python.tar.gz"
subprocess.call(cmd, shell=True)
def unpickle(file):
import cPickle
fo = open(file, 'rb')
dict = cPickle.load(fo)
fo.close()
return dict
train = unpickle("cifar-100-python/train")
test = unpickle("cifar-100-python/test")
tag = unpickle("cifar-100-python/meta")
tagWith_ = ['aquarium_fish',
'lawn_mower',
'maple_tree',
'oak_tree',
'palm_tree',
'pickup_truck',
'pine_tree',
'sweet_pepper',
'willow_tree']
tagAlter = ["fish",
"lawnmower",
"maple",
"oak",
"palm",
"truck",
"pine",
"paprika",
"willow"]
index = [tag["fine_label_names"].index(with_) for with_ in tagWith_]
count = 0
for i in index:
tag["fine_label_names"][i] = tagAlter[count]
count = count + 1
y_train = {}
y_test = {}
x_test = test['data']
x_test = x_test.reshape(len(x_test),3,32,32)
x_train = train['data']
x_train= x_train.reshape(len(x_train),3,32,32)
# for x in zip(x_test, test["filenames"], test["fine_labels"]):
# cv2.imwrite(picBase + "test/" + x[1], x[0].transpose(1,2,0)[:,:,::-1].copy())
# y_test[x[1]] = x[2]
# for x in zip(x_train, train["filenames"], train["fine_labels"]):
# cv2.imwrite(picBase + "train/" + x[1], x[0].transpose(1,2,0)[:,:,::-1].copy())
# y_train[x[1]] = x[2]
# save_object(y_test, picBase + "label/y_test.pkl")
# save_object(y_train, picBase + "label/y_train.pkl")
return (train, test, tag)
"""
In [38]: tag.keys()
Out[38]: ['fine_label_names', 'coarse_label_names']
In [40]: train.keys()
Out[40]: ['data', 'batch_label', 'fine_labels', 'coarse_labels', 'filenames'
In [41]: len(train["data"])
Out[41]: 50000
In [42]: len(train["data"][0])
Out[42]: 3072 // it means 32*32*3
"""
if __name__ == '__main__':
# x_train, y_train, x_test, y_test, tag = cifar100Extract()
train, test, tag = cifar100Extract()
|
Last year, we looked at the amazing embedded design achievements of student teams in Microsoft’s Windows Embedded Student Challenge. On impossibly short schedules, and with almost no existing infrastructure and with no previous experience in most of the development tools, these student teams put together complete working systems with custom hardware and software components, as well as marketing and product plans to assess the viability of their projects as products.
The dedication, creativity, and energy required to pull off such a feat are well beyond the capacity of most commercial product development teams. The world of benefits packages, balancing work and family life, vacation time, office politics, and industrial regulations and standards preclude the kind of fast-paced, get-the-job-done-at-all-costs, wind-in-your-face effort that these student teams (and some startup companies) are capable of generating.
Once the competition was down to 200 teams, it was time to roll up the sleeves, pull out the slide rules – OK, wait, I dropped back a few decades on the education system – set up the project team website, and get down to real work. Each of the selected top 200 teams was provided a single-board computer, a Phidgets LCD and interface, a full version of Windows Embedded CE 6.0, Visual Studio 2005, and cables. All these tools allowed them to move their idea from proposal into working prototype.
The single-board computer is an ICOP eBox 2300 – a small-form-factor, low-cost, low-power platform with a 200Mhz Vortex86 SoC processor, 128MB SDRAM, an IDE interface for hard disk, bootable IDE and other storage, VGA output with support for resolutions up to 1280X1024, Compact Flash slot, 2 RS-232 ports, 3 USB ports (1.1), Ethernet, PS2 keyboard and mouse connections, AC97 audio inputs and outputs, and a simple, 5V supply requirement. This platform offered student teams a robust set of options for I/O and custom peripheral connection with enough computing power to handle most applications except those requiring things like digital signal processing or high-speed/high-resolution video.
On the software side, teams were required to build, debug, and deploy a Windows Embedded CE 6.0 operating system image. With the OS in place, any custom hardware developed for the project would also require custom drivers to be integrated, and then the final application could be deployed on that base. The Platform Builder IDE for Windows CE allows the operating system image to be configured specifically for the requirements of each project. Teams (and you too for that matter) can maintain a lean image with exactly the features required for a particular application.
With development tools in hand, the 200 “Second Round” teams had just two months to get their ideas working in real hardware and their presentations ready for the judges. This time, the cut was much deeper – only 15 of the top 200 teams would be selected for the finals next week – August 5-11 in Seoul, South Korea. The 15 chosen teams will have to demonstrate working systems and make final presentations to the judging panel.
“Our greatest motivation was the fact that one-fifth of the world’s population is illiterate,” says Andre Furtado of Trivent Dreams – a finalist team from Universidade Federal de Pernambuco in Recife, Brazil, “That makes about 800 million people who cannot do basic reading, writing, and calculation. In some countries, the illiteracy rate approaches 90%.” Trivent Dreams set about creating a low-cost, easy to deploy learning platform that could be used both in the school and at home to educate and motivate young students.
Their project, “E-du Box” uses the ICOP box connected to an ordinary TV, a pen input device, and a custom piece of hardware the team calls their “External Agent” – an avatar constructed of a hand-puppet made into a robotic animatronics figure that aids in communicating with and motivating the student. The External Agent is certainly the centerpiece of the team’s well-conceived application – controlled wirelessly via Bluetooth, it agrees, disagrees, says yes or no, and has a repertoire of other simple movements that can reinforce the lesson being presented.
The Pangea system uses no custom hardware and is designed to be deployed in public places such as schools and libraries. The software searches for compatible students and matches them up, and then it facilitates the eTandem learning process via internet connections. Already, local institutions are expressing interest in deploying Pangea in their environments – a testament to the relevance and utility of the students’ solution.
The other 13 finalists field similarly compelling entries – which should make for an engaging presentation next week when the final winners are determined. Those of us developing embedded products for a living can learn a lot from the energy, creativity, resourcefulness, and focus of these students.
|
#!/bin/env python2
"""
Automate powered loudspeakers power with EATON UPS from Denon DN-500AV pre-amplifier state
"""
from twisted.internet import reactor
from twisted.internet.protocol import ClientFactory
from twisted.conch.telnet import TelnetTransport, TelnetProtocol # Unavailable in Python3, yet
from PyNUT import PyNUTClient
class DenonProtocol(TelnetProtocol):
ups_name = 'nutdev1' # TODO: store in a configuration file
ups_var = "outlet.2.switchable" # on means power down or off power up
ups_username = 'admin' # TODO: store in a configuration file
ups_userpass = 'ups' # TODO: store securely? in a configuration file
def connectionMade(self):
# Subscribe to the power state
self.transport.write("PW?\n")
def dataReceived(self, bytes):
ups = PyNUTClient(login=self.ups_username, password=self.ups_userpass)
if 'PWON' in bytes:
# Enable UPS sockets
ups.SetRWVar(ups=self.ups_name, var=self.ups_var, value='no')
if 'PWSTANDBY' in bytes:
# Disable UPS sockets
ups.SetRWVar(ups=self.ups_name, var=self.ups_var, value='yes')
class TelnetFactory(ClientFactory):
def buildProtocol(self, addr):
return TelnetTransport(DenonProtocol)
if __name__ == '__main__':
"""
The amplifier uses a Telnet interface (port 23) to send and receive serial commands
We can subscribe to the power state using PW?\r
The reply can be either PWON or PWSTANDBY
The UPS is an EATON powerstation
It exposes an interface through NUT to control 2 power sockets
We want them to follow the amp's state
"""
host = '192.168.1.10' # TODO: store in a configuration file
port = 23
reactor.connectTCP(host, port, TelnetFactory())
reactor.run()
|
Feather Canoes | Contact Feather Canoes Wee Lassie plans and patterns for a classic cedar strip solo canoe you can build yourself.
For all questions, comments etc. please email us by clicking here and we will do our best to help out.
If you prefer, please mail us at the address below.
|
# Delete Middle Node: Implement an algorithm to delete a node in the middle of
# singly linked list, given only access to that node
from LinkedList import Node, LinkedList
def delete_middle_node(node_to_delete):
if node_to_delete.next is None:
raise Exception("Invalid node to delete")
node_to_delete.data = node_to_delete.next.data
node_to_delete.next = node_to_delete.next.next
if __name__ == '__main__':
ll = LinkedList()
continue_ans = raw_input("Do you want to add new node? (y/n): ")
to_delete = "n"
while continue_ans == 'y':
data = raw_input("Enter data for the new node: ")
data_node = Node(data)
ll.append_node(data_node)
if to_delete == 'n':
to_delete = raw_input("Is this the one you want to remove? (y/n): ")
if to_delete == 'y':
node_to_delete = data_node
continue_ans = raw_input("Do you want to add new node? (y/n)?")
print "Initial linked list: {}".format(ll)
print "Middle node to delete: {}".format(node_to_delete)
delete_middle_node(node_to_delete)
print "Linked list after deletion: {}".format(ll)
|
Wisconsinites know you don’t have to be on one of the Great Lakes to find a great public beach. With literally thousands of lakes, Wisconsin is known far and wide for having some of the most pristine and beautiful inland beaches around. These inland beaches offer clean calm water, attractive atmospheres, and are truly great places to relax on sunny summer afternoons. Here are eight awesome options to explore with family and friends.
Located on picturesque Nagawicka Lake, Naga-Waukee Park offers 130 feet of sandy beach, a beach house with restrooms, and a great concession stand. The 414 acre park also features an 18-hole golf course, 8 miles of trails, and endless opportunities for fun. All just 25 miles west of Milwaukee!
With a great sandy beach and crystal clear water, Fireman’s Beach is one of the best in the state. This fun park features a concession stand, picnic tables, grills, sand volleyball courts and shelters for rent. All for a bargain gate admission of $3 for adults and $1 for those under 12.
Located in Adams County, Friendship Lake Public Beach is the perfect place to spend an afternoon. With an average depth of just 6 feet, Friendship Lake is great for family swimming.
Who would’ve thought that there is a beautiful sandy beach just minutes from downtown Janesville? Lions Beach, located on a 5-acre spring fed pond next to the Rotary Botanical gardens, features a grassy picnic area and restrooms.
Come play at the Memorial Park Beach on the 2,600 acre Shell Lake in Washburn County. The beach is located right downtown and offers a dock, swimming rafts, and lifeguards during the summer months. If the kids want to take a break from swimming, they can play on the playground equipment right within view of the water.
Hattie Sherwood is known for its beautiful scenery, sandy swimming beaches, and great fishing. When visitors aren’t enjoying the pristine waters of Green Lake, they can be found monkey-ing around on the jungle gym and exploring the many hiking and biking trails.
With its crystal clear waters and world-class fishing, Keyes Lake Park is a true Northwoods treasure. The park itself features picnic facilities, a sandy beach, swimmers raft, and a water slide.
Known far and wide as a great swimming beach, Shawano Lake County Park is just an hour drive from Green Bay, Appleton, and Wausau. Swim in the calm refreshing water, or rent a canoe and paddle boat to explore with a little more speed.
|
# http://flask.pocoo.org/docs/patterns/fileuploads/
import os
from flask import Flask, request, redirect, url_for, send_from_directory
from werkzeug import secure_filename
UPLOAD_FOLDER = 'uploads'
ALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
def allowed_file(filename):
# this has changed from the original example because the original did not work for me
return filename[-3:].lower() in ALLOWED_EXTENSIONS
@app.route('/', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
file = request.files['file']
if file and allowed_file(file.filename):
print '**found file', file.filename
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
# for browser, add 'redirect' function on top of 'url_for'
return url_for('uploaded_file',
filename=filename)
return '''
<!doctype html>
<title>Upload new File</title>
<h1>Upload new File</h1>
<form action="" method=post enctype=multipart/form-data>
<p><input type=file name=file>
<input type=submit value=Upload>
</form>
'''
@app.route('/uploads/<filename>')
def uploaded_file(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'],
filename)
if __name__ == '__main__':
app.run(debug=True)
|
Mastabol is a synthetic derivative of dihydrotestosterone, displaying a potent androgenic effect that is responsible for increases in muscle density and hardness and a moderate anabolic effect that creates a positive nitrogen balance in humans and promotes protein synthesis. Since it is a derivative of dihydrotestosterone, dromastolone does not aromatize in any dosage and thus it cannot be converted into estrogen. Therefore, estrogen-related water retention is eliminated. Mastabol 200 Depot combines the fast-acting propionate form with the longer acting enanthate form.
|
import six
assert six.PY3, "Please run me with Python3"
import ply.lex as lex
import ply.yacc as yacc
import readline
import urllib.parse
import requests
import sys
class Node:
def __init__(self,dtype,children):
self.dtype=dtype
self.children=children
def dsearch_ex_lin(self):
#cases like [dep xxx xxx xxx xxx]
assert sum(1 for c in self.children if isinstance(c,str))==len(self.children)
exprs=[]
for root_idx,root in enumerate(self.children):
expr=['"'+root+'"']
for other_idx,other in enumerate(self.children):
if other_idx<root_idx:
expr.append('>lin@L "{}"'.format(other))
elif other_idx>root_idx:
expr.append('>lin@R "{}"'.format(other))
exprs.append("("+(" ".join(expr))+")")
return "("+(" | ".join(exprs))+")"
def dsearch_ex(self):
global macros
#Now I guess I pick one of my STRING children to be the root or what?
possible_roots=[c for c in self.children if isinstance(c,str)]
if len(possible_roots)==len(self.children) and len(self.children)>1:
return self.dsearch_ex_lin()
elif len(possible_roots)>1:
raise ValueError("Unsupported")
assert len(possible_roots)==1
for r in possible_roots:
bits=["(",macros.get(r,'"'+r+'"')] #Bits of the expression
for c in self.children:
if c==r:
continue
if isinstance(c,str):
bits.extend(['>',macros.get(c,'"'+c+'"')])
elif isinstance(c,Node):
if c.dtype=="dep" or c.dtype=="_":
bits.append(' > ')
else:
bits.append(' >'+c.dtype)
bits.append(c.dsearch_ex())
else:
assert False, repr(c)
bits.append(")")
return " ".join(bits)#I guess I should then generate the others too?
### ---------- lexer -------------
# List of token names. This is always required
tokens = ('LBRAC','RBRAC','STRING')
def t_LBRAC(t):
r'\['
return t
def t_RBRAC(t):
r'\]'
return t
def t_STRING(t):
r'[^\s\[\]]+'
return t
# A string containing ignored characters (spaces and tabs)
t_ignore = ' \t'
# Error handling rule
def t_error(t):
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
lexer = lex.lex()
### --------- grammar -----------
def p_expressions(p):
'''expressions : expression
| expression expressions
'''
if len(p)==2:
p[0]=[p[1]]
elif len(p)==3:
p[0]=[p[1]]+p[2]
else:
assert False
def p_expr(p):
'''expression : tree
| STRING
'''
p[0]=p[1]
def p_tree(p):
'tree : LBRAC STRING expressions RBRAC'
p[0]=Node(p[2],p[3])
def p_error(p):
print("Syntax error in input!")
parser = yacc.yacc()
def get_query_url(q):
url="http://bionlp-www.utu.fi/dep_search/query"
url+="?"+urllib.parse.urlencode({"search":q,"db":"RU160M","case_sensitive":"False","hits_per_page":"50"})
return url
def download(qry,maxnum,fname):
data={"search":qry,"db":"RU160M","case":"False","retmax":maxnum}
result=requests.get("http://epsilon-it.utu.fi/dep_search_webapi",params=data)
print(result.url)
with open(fname,"w") as f:
print(result.text,file=f)
### ---------- run this ------------
# * NP-Nom = NOUN Case=Nom
# * XP = any phrasal category = NOUN, ADJ, ADV, PRON, VERB
# * PRON-Dat = PRON Case=Dat
# * NOUN-Nom = NOUN Case=Nom
# * VP = VERB
# * AP = ADJ
# * VP-Inf = VERB VerbForm=Inf
# * Imper = Mood=Imp
# * dep = any dependency label
macros_def="""
NP-Nom : (NOUN&Nom)
NP-Dat : (NOUN&Dat)
XP : (NOUN|ADJ|ADV|PRON|VERB)
PRON-Dat : (PRON&Dat)
NOUN-Nom : (NOUN&Nom)
VP : VERB
AP : ADJ
VP-Inf : (VERB&Inf)
VP-Imper : (VERB&Mood=Imp)
V-Past : (VERB&Past)
Imper : (Mood=Imp)
Cl : (VERB >nsubj _)
_ : _
"""
macros={} #macro -> replacement
for repl in macros_def.strip().split("\n"):
src,trg=repl.split(" : ",1)
macros[src]=trg
expressions={} #filename -> list of expressions
for line in sys.stdin:
line=line.strip()
if not line:
continue
if line.startswith("["):
#an expression
expression_list.append(line)
else: #construction name
line=line.replace(" ","_")
expression_list=[]
expressions[line]=expression_list
for fname,expression_list in sorted(expressions.items()):
for expression in expression_list:
print("Parsing expression", expression, file=sys.stderr, flush=True)
node = parser.parse(expression)
qry=node[0].dsearch_ex()
print(qry)
download(qry,5,"dl/"+fname+".conllu")
|
Adam Parsons, 26, Russell, court commitment.
Jon Bergeron, 21, Hillsboro, probation violation.
Ashley Johnson, 19, Hillsboro, battery.
Michael Rosner, 55, Wichita, driving under the influence.
Donald Peel, 39, Herington, failure to appear.
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Handles assignment processing."""
import enum
import json
import time
import traceback
import uuid
from learner import data_fetcher
from learner import file_system
from learner import model_exporter
from learner import model_manager
from learner import stats_collector
from learner import storage
from learner.brains import brain_cache as brain_cache_module
from learner.brains import continuous_imitation_brain
from learner.brains import demonstration_buffer
from log import falken_logging
# pylint: disable=g-bad-import-order
import common.generate_protos # pylint: disable=unused-import
import action_pb2
import episode_pb2
import data_store_pb2 as falken_schema_pb2
# How long to work on a single assignment in a single learner at most.
_MAX_ASSIGNMENT_WORK_TIME_SECS = 60*60
_CHUNK_STATE_TO_STEP_PHASE = {
episode_pb2.UNSPECIFIED: demonstration_buffer.StepPhase.UNSPECIFIED,
episode_pb2.IN_PROGRESS: demonstration_buffer.StepPhase.IN_PROGRESS,
episode_pb2.SUCCESS: demonstration_buffer.StepPhase.SUCCESS,
episode_pb2.FAILURE: demonstration_buffer.StepPhase.FAILURE,
episode_pb2.ABORTED: demonstration_buffer.StepPhase.ABORTED,
episode_pb2.GAVE_UP: demonstration_buffer.StepPhase.GAVE_UP,
}
_DEFAULT_LEARNER_HPARAMS = {
# Should learning continue or restart when new data is received?
'continuous': True,
# The minimum interval between model saves, measured in batches trained.
# If set to None, the model is never saved.
'save_interval_batches': 20_000,
# Minimum number of (steps * batch_size) before finishing training (or
# restarting). If None, we don't require a minimum amount of steps to train.
'min_train_examples': None,
# Maximum number of (steps * batch_size). If None, we don't require
# a maximum amount of steps to train.
'max_train_examples': None,
# Export models in the main thread.
'synchronous_export': False,
}
class Error(Exception):
"""Base class for exceptions."""
class HParamError(Exception):
"""Raised if there is an unknown hyperparameter in the assignment."""
class NoDataError(Error):
"""Learner could not find new data to train on."""
class ExceededMaxWorkTimeError(Error):
"""Learner exceeded maximum work time on a single assignment."""
class AssignmentStats:
"""Returns stats about assignment processing for testing / debugging.
In contrast to StatsCollector these are stats that are use to track
information about an entire assignment rather than information about
each trained model.
Attributes:
queries_completed: Number of Spanner fetch chunk queries completed.
frames_added: Number of frames added to the brain from fetched
demonstration data.
models_recorded: Number of models saved during this assignment.
brain_train_steps: Number of calls to brain.train() during the assignment.
num_restarts: Number of times training has been restarted from scratch
with a newly initialized model.
brain_global_step: The value of the global step variable. (Resets with
restarts.)
"""
def __init__(self):
self.queries_completed = 0
self.frames_added = 0
self.models_recorded = 0
self.brain_train_steps = 0
self.num_restarts = 0
self.brain_global_step = 0
# Lists the possible processing assignment status, e.g. those that
# ProcessAssignmentUntil can handle.
class ProcessAssignmentStatus(enum.Enum):
"""Possible stopping points for assignment processing.
Can be used in calls to ProcessAssignmentUntil.
"""
# A step has been processed in the current assignment.
PROCESSED_STEP = 1
# A step has been processed in the current assignment, and training should
# restart before processing the next step.
PROCESSED_STEP_NEEDS_RESTART = 2
# A model has been saved.
SAVED_MODEL = 3
# The assignment is about to fetch data. Useful for making sure more data
# is available when it fetches, during a test.
WILL_FETCH_DATA = 4
# The assignment finished processing.
FINISHED = 5
def _step_generator(episode_chunks):
"""Yields steps from EpisodeChunks."""
for chunk in episode_chunks:
for i, step in enumerate(chunk.data.steps):
step_phase = demonstration_buffer.StepPhase.IN_PROGRESS
if chunk.chunk_id == 0 and i == 0:
# First step of first chunk is the start of the episode.
step_phase = demonstration_buffer.StepPhase.START
elif i == len(chunk.data.steps) - 1:
# Last step of any chunk has equivalent phase as the chunk state.
step_phase = _CHUNK_STATE_TO_STEP_PHASE.get(
chunk.data.episode_state,
demonstration_buffer.StepPhase.UNSPECIFIED)
if step_phase == demonstration_buffer.StepPhase.UNSPECIFIED:
raise ValueError(
f'Unexpected chunk state: {chunk.data.episode_state}.')
yield (chunk.episode_id, chunk.chunk_id, step.observation,
step.reward.reward_value, step_phase, step.action,
chunk.created_micros)
def _get_hparams(assignment_id):
"""Parse a hyperparemeters dictionary from an assignment ID.
Args:
assignment_id: Assignment ID to parse. If this is "default" an empty
dictionary is returned.
Returns:
Dictionary containing the parsed hyperparameters.
Raises:
HParamError: If the assignment is malformed.
"""
falken_logging.info(f'GetHParams got assignment_id {assignment_id}')
if assignment_id == 'default':
return {}
try:
return json.loads(assignment_id)
except json.decoder.JSONDecodeError as error:
error_message = (f'Failed to parse assignment ID: {error}\n' +
assignment_id)
falken_logging.error(error_message)
raise HParamError(error_message)
def populate_hparams_with_defaults_and_validate(hparams):
"""Construct hyperparameters for brain creation.
Args:
hparams: Hyperparameters that override the default learner hyperparameters.
Returns:
Hyperparameters dictionary that can be used to create a brain.
Raises:
HParamError: If the provided hyperparmeters overlap with default learner
parameters or they're unknown.
"""
result_hparams = continuous_imitation_brain.BCAgent.default_hparams()
for hparam in _DEFAULT_LEARNER_HPARAMS:
if hparam in result_hparams:
raise HParamError(f'Learner HParam overlaps with brain HParam: {hparam}')
result_hparams.update(_DEFAULT_LEARNER_HPARAMS)
for hparam in hparams:
if hparam not in result_hparams:
raise HParamError(f'Unknown hparam in assignment: {hparam}')
result_hparams.update(hparams)
return result_hparams
class AssignmentProcessor:
"""Trains models based on incoming Assignments."""
# How often to check the DB for new data.
_DB_QUERY_INTERVAL_SECS = 10.0
# How long to wait for training data
_WAIT_FOR_DATA_BRAIN_SECS = 60
def __init__(self,
read_assignment: falken_schema_pb2.Assignment,
filesys_helper: file_system.FileSystem,
storage_helper: storage.Storage,
brain_cache: brain_cache_module.BrainCache,
get_session_state=None,
write_assignment=None,
always_block_when_fetching=False):
"""Create a new assignment processor.
Args:
read_assignment: The Assignment proto received from the queue.
filesys_helper: A filesystem.Filesystem object.
storage_helper: A storage.Storage helper.
brain_cache: BrainCache instance.
get_session_state: Callable which takes no arguments and returns
storage.SessionState for the assignment. When this is None
the session state is retrieved from the session associated with
read_assignment in the database.
write_assignment: Assignment proto used to write assignment results.
If this is None, results are written to read_assignment.
always_block_when_fetching: If True, always block during fetching. Useful
for removing racing conditions in tests.
"""
self._brain_cache = brain_cache
self._read_assignment = read_assignment
self._write_assignment = (
write_assignment if write_assignment else read_assignment)
falken_logging.info(f'Reading from {self._read_assignment}, '
f'writing to {self._write_assignment}')
self._episode_id = ''
self._episode_chunk_id = 0
self._most_recent_demo_micros = 0
self._file = filesys_helper
self._storage = storage_helper
self._brain = None
self._hparams = None
self._model_manager = None
self._always_block_when_fetching = always_block_when_fetching
self._stats = stats_collector.StatsCollector(
self._write_assignment.project_id,
self._write_assignment.brain_id,
self._write_assignment.session_id,
self._write_assignment.assignment_id)
self._assignment_stats = AssignmentStats()
if get_session_state:
self._get_session_state = get_session_state
else:
def _get_session_state():
return self._storage.get_session_state(
self._read_assignment.project_id,
self._read_assignment.brain_id,
self._read_assignment.session_id)
self._get_session_state = _get_session_state
def __enter__(self):
"""Start processing an assignment."""
return self
def __exit__(self, *unused_args):
"""Stop processing an assignment and clean up temporary storage."""
self._file.wipe_checkpoints(self._write_assignment)
@property
def stats(self):
"""Returns StatsCollector about the processing task."""
return self._stats
@property
def assignment_stats(self):
"""Returns AssignmentStats statistics about the processing task."""
return self._assignment_stats
@property
def _min_train_batches(self):
"""Min amount of batches to train (or None if unrestricted)."""
min_train_examples = self._hparams['min_train_examples']
if min_train_examples is None:
return None
return int(min_train_examples / self._hparams['batch_size'])
@property
def _max_train_batches(self):
"""Max amount of batches to train (or None if unlimited)."""
max_train_examples = self._hparams['max_train_examples']
if max_train_examples is None:
return None
return int(max_train_examples / self._hparams['batch_size'])
def _create_brain(self):
"""Creates a Brain."""
brain_spec = self._storage.get_brain_spec(
self._read_assignment.project_id, self._read_assignment.brain_id)
falken_logging.info('Creating brain.',
brain_spec=brain_spec,
project_id=self._write_assignment.project_id,
brain_id=self._write_assignment.brain_id,
session_id=self._write_assignment.session_id)
if not brain_spec:
raise ValueError(
f'Brain spec not found for project_id: '
f'{self._read_assignment.project_id} and '
f'brain_id: {self._read_assignment.brain_id}.')
checkpoint_path = self._file.create_checkpoints_path(self._write_assignment)
summary_path = self._file.create_summary_path(self._write_assignment)
return self._brain_cache.GetOrCreateBrain(
_get_hparams(self._read_assignment.assignment_id),
brain_spec, checkpoint_path, summary_path)
def _add_episode_chunks(self, chunks):
"""Insert new EpisodeData into the brain's replay buffer.
Args:
chunks: A batch of EpisodeChunks
Returns:
The number of demo frames contained in the provided chunks.
"""
falken_logging.info('Adding {} new chunks.'.format(len(chunks)),
project_id=self._write_assignment.project_id,
brain_id=self._write_assignment.brain_id,
session_id=self._write_assignment.session_id)
demo_frames = 0
for (episode_id, chunk_id, observation, reward, phase, action,
timestamp) in _step_generator(chunks):
self._episode_id = episode_id
self._episode_chunk_id = chunk_id
self.assignment_stats.frames_added += 1
self._brain.record_step(observation, reward, phase, episode_id, action,
timestamp)
if action.source == action_pb2.ActionData.HUMAN_DEMONSTRATION:
demo_frames += 1
if timestamp > self._most_recent_demo_micros:
self._most_recent_demo_micros = timestamp
falken_logging.info(
f'Finished adding {len(chunks)} new chunks with {demo_frames} '
f'demo frames',
project_id=self._write_assignment.project_id,
brain_id=self._write_assignment.brain_id,
session_id=self._write_assignment.session_id)
return demo_frames
def _chunk_generator(self):
"""Generates lists of chunks by querying the database.
Yields:
List of new chunks if available, None otherwise.
"""
earliest_timestamp_micros = 0
# Fetch data associated with ancestors AND with the current session.
session_ids = self._storage.get_ancestor_session_ids(
self._read_assignment.project_id,
self._read_assignment.brain_id,
self._read_assignment.session_id)
session_ids.add(self._read_assignment.session_id)
def generate_chunk_key(chunk):
"""Returns a unique string identifier for an episode chunk proto.
Args:
chunk: data_store_pb2.EpisodeChunk proto.
Returns:
Unique identifier for the chunk proto.
"""
return f'{chunk.session_id}_{chunk.episode_id}_{chunk.chunk_id}'
previous_chunk_keys = set()
while True: # Yield new chunks, potentially forever.
new_chunks = []
new_chunk_keys = set()
for chunk in self._storage.get_episode_chunks(
self._read_assignment.project_id,
self._read_assignment.brain_id,
session_ids, earliest_timestamp_micros):
chunk_key = generate_chunk_key(chunk)
if chunk_key not in previous_chunk_keys:
# Update date_timestamp_micros to avoid refetching data.
earliest_timestamp_micros = max(earliest_timestamp_micros,
chunk.created_micros)
new_chunks.append(chunk)
new_chunk_keys.add(chunk_key)
self.assignment_stats.queries_completed += 1
if new_chunks:
previous_chunk_keys = new_chunk_keys
yield new_chunks
else:
yield None
def _session_complete(self):
"""Returns true if the session is stale or ended."""
session_state = self._get_session_state()
complete = session_state in (
storage.SessionState.STALE, storage.SessionState.ENDED)
if complete:
falken_logging.info(
'Session complete, with state: '
f'{storage.SessionState.as_string(session_state)}',
project_id=self._write_assignment.project_id,
brain_id=self._write_assignment.brain_id,
session_id=self._write_assignment.session_id)
return complete
def _training_complete(self):
"""Returns true if training on the assignment is complete."""
if self._session_complete():
falken_logging.info(
'Stopping training, reason: session has completed.',
project_id=self._write_assignment.project_id,
brain_id=self._write_assignment.brain_id,
session_id=self._write_assignment.session_id)
return True
if self._min_train_batches is not None and (
self._brain.tf_agent.train_step_counter < self._min_train_batches):
# Unless the session is closed, we train the brain for min steps.
return False
if self._model_manager and self._model_manager.should_stop():
falken_logging.info(
f'Stopping training, reason: {self._model_manager.should_stop()}',
project_id=self._write_assignment.project_id,
brain_id=self._write_assignment.brain_id,
session_id=self._write_assignment.session_id)
return True
if self._max_train_batches is not None and (
self._brain.global_step >= self._max_train_batches):
falken_logging.info(
'Stopping training, reason: Exceeded max_train_batches of '
f'{self._max_train_batches}',
project_id=self._write_assignment.project_id,
brain_id=self._write_assignment.brain_id,
session_id=self._write_assignment.session_id)
return True
return False
def _save_and_evaluate_policy(self, exporter):
"""Saves the current policy and evaluates it vs current best.
Args:
exporter: A ModelExporter.
Returns:
The ID of the model that was written.
"""
if self._session_complete():
falken_logging.info(
'Skipping model export on completed session.',
project_id=self._write_assignment.project_id,
brain_id=self._write_assignment.brain_id,
session_id=self._write_assignment.session_id)
return
falken_logging.info(
'Writing tmp model.',
project_id=self._write_assignment.project_id,
brain_id=self._write_assignment.brain_id,
session_id=self._write_assignment.session_id)
self._stats.demonstration_frames = self._brain.num_train_frames
self._stats.evaluation_frames = self._brain.num_eval_frames
model_id = str(uuid.uuid4())
with self._stats.record_event(
stats_collector.FALKEN_EXPORT_CHECKPOINT_EVENT_NAME):
tmp_checkpoint_path = self._file.create_tmp_checkpoint_path(
self._write_assignment, model_id)
self._brain.save_checkpoint(tmp_checkpoint_path)
falken_logging.info(
'Finished writing tmp model.',
project_id=self._write_assignment.project_id,
brain_id=self._write_assignment.brain_id,
session_id=self._write_assignment.session_id)
with self._stats.record_event(stats_collector.FALKEN_EVAL_EVENT_NAME):
evals = list(self._brain.compute_full_evaluation())
training_examples_completed = (
self._brain.global_step * self._hparams['batch_size'])
# The hparam can be set explicitly to None so we need to check for it.
max_training_examples = (
self._hparams['max_train_examples']
if self._hparams.get('max_train_examples', None) else 0)
exporter.export_model(tmp_checkpoint_path, evals, self._stats, model_id,
self._episode_id, self._episode_chunk_id,
training_examples_completed, max_training_examples,
self._most_recent_demo_micros)
self.assignment_stats.models_recorded += 1
# Compare new model to previous best and update accordingly.
self._model_manager.record_new_model(model_id, evals)
return model_id
def _fetch_data(self, fetcher, initial_wait_for_data):
"""Fetches training / eval data from a fetcher and adds it to the brain.
Args:
fetcher: A data_fetcher.DataFetcher.
initial_wait_for_data: Whether to wait for data on the first queue fetch.
Returns:
The number of demonstration frames that were added.
"""
falken_logging.info(
'Checking for new training data.',
project_id=self._write_assignment.project_id,
brain_id=self._write_assignment.brain_id,
session_id=self._write_assignment.session_id)
first_fetch = True
demo_frames = 0
while True:
try:
block, timeout = False, None
if initial_wait_for_data and first_fetch:
# Wait longer on the first fetch.
block, timeout = True, self._WAIT_FOR_DATA_BRAIN_SECS
# TODO(lph): Change datafetcher to auto-block on first query.
elif self._always_block_when_fetching:
# Short block for other fetches.
block, timeout = True, 5
first_fetch = False
chunks = fetcher.get(block=block, timeout=timeout)
demo_frames += self._add_episode_chunks(chunks)
except data_fetcher.Empty:
# If the underlying SQL queries did not complete, then we're not
# waiting long enough for data to arrive.
if (initial_wait_for_data and
not self.assignment_stats.queries_completed):
# We are in the first loop iteration and have not completed any
# queries after _WAIT_FOR_DATA_BRAIN_SECS.
raise NoDataError('Could not query DB for chunks.')
return demo_frames
def _process_step(self, fetcher):
"""Create and train a model.
Args:
fetcher: A data_fetcher.DataFetcher object to pull fresh data from.
Yields:
Pairs (ProcessAssignmentStatus, status_metadata). This allows for
functions like ProcessAssignmentUntil to pause and resume Process.
Raises:
ExceededMaxWorkTimeError: If assignment takes too long to process.
"""
if not self._brain:
self._brain, self._hparams = self._create_brain()
self._stats.training_steps = self._hparams['training_steps']
self._stats.batch_size = self._hparams['batch_size']
self._model_manager = model_manager.ModelManager()
else:
self._brain.reinitialize_agent()
self._model_manager.reset_counters()
with model_exporter.ModelExporter(self._write_assignment, self._storage,
self._file, self._model_manager,
self._brain.hparams) as exporter:
saved_model_id = None
loop_counter = 0
restart_requested = False # Whether to restart.
# Enter main training loop.
while not self._training_complete():
if restart_requested and (
self._min_train_batches is None or
self._brain.tf_agent.train_step_counter >=
self._min_train_batches):
if saved_model_id is None:
# Save if we didn't auto-save last loop iteration.
saved_model_id = self._save_and_evaluate_policy(exporter)
yield ProcessAssignmentStatus.SAVED_MODEL, saved_model_id
falken_logging.info(
f'Restarting training after {loop_counter} iterations.',
project_id=self._write_assignment.project_id,
brain_id=self._write_assignment.brain_id,
session_id=self._write_assignment.session_id)
yield ProcessAssignmentStatus.PROCESSED_STEP_NEEDS_RESTART, None
return
time_elapsed = time.perf_counter() - self._start_timestamp
falken_logging.info(
f'{time_elapsed}s elapsed since start of assignment.',
brain_id=self._write_assignment.brain_id,
session_id=self._write_assignment.session_id,
assignment_id=self._write_assignment.assignment_id)
if time_elapsed > _MAX_ASSIGNMENT_WORK_TIME_SECS:
raise ExceededMaxWorkTimeError(
f'Assignment took too long. Started {time_elapsed} seconds ago.'
)
# Grab all data from the fetcher.
with self._stats.record_event(
stats_collector.FALKEN_FETCH_CHUNK_EVENT_NAME):
yield ProcessAssignmentStatus.WILL_FETCH_DATA, None
demo_frames = self._fetch_data(
fetcher,
initial_wait_for_data=(loop_counter == 0))
continuous = self._hparams['continuous']
falken_logging.info(
f'Finished data fetch, training iteration {loop_counter}. '
f'Got {demo_frames} new demo frames, continuous={continuous}',
project_id=self._write_assignment.project_id,
brain_id=self._write_assignment.brain_id,
session_id=self._write_assignment.session_id)
if not continuous and loop_counter and demo_frames:
restart_requested = True
falken_logging.info('Received new data, requesting a restart.',
project_id=self._write_assignment.project_id,
brain_id=self._write_assignment.brain_id,
session_id=self._write_assignment.session_id)
if not self._brain.num_train_frames:
falken_logging.error(
'No training frames available.',
brain_id=self._write_assignment.brain_id,
session_id=self._write_assignment.session_id,
assignment_id=self._write_assignment.assignment_id)
break
# Perform training.
with self._stats.record_event(
stats_collector.FALKEN_TRAIN_BRAIN_EVENT_NAME):
try:
falken_logging.info('Training brain.',
project_id=self._write_assignment.project_id,
brain_id=self._write_assignment.brain_id,
session_id=self._write_assignment.session_id)
self._brain.train()
self.assignment_stats.brain_train_steps += 1
self.assignment_stats.brain_global_step = self._brain.global_step
except Exception as e: # pylint: disable=broad-except
falken_logging.error(
f'Exception found when running _train_step: {e}.'
f'Traceback:\n{traceback.format_exc()}',
brain_id=self._write_assignment.brain_id,
session_id=self._write_assignment.session_id,
assignment_id=self._write_assignment.assignment_id)
raise e
batch_count = (self.assignment_stats.brain_train_steps *
self._hparams['training_steps'])
if (self._hparams['save_interval_batches'] is not None and
batch_count % self._hparams['save_interval_batches'] == 0):
saved_model_id = self._save_and_evaluate_policy(exporter)
yield ProcessAssignmentStatus.SAVED_MODEL, saved_model_id
else:
saved_model_id = None
loop_counter += 1
# End of main training loop.
if saved_model_id is None and self.assignment_stats.brain_train_steps:
# If the last loop didn't save, save now.
saved_model_id = self._save_and_evaluate_policy(exporter)
yield ProcessAssignmentStatus.SAVED_MODEL, saved_model_id
# End of exporter context
# Learner completed normally, no restart indicated.
yield ProcessAssignmentStatus.PROCESSED_STEP, None
def process(self):
"""Train one or multiple brains.
Yields:
Pairs (ProcessAssignmentStatus, status_metadata). This allows for
functions like ProcessAssignmentUntil to pause and resume Process.
"""
with self._stats.record_event(stats_collector.FALKEN_PROCESS_EVENT_NAME):
self._start_timestamp = time.perf_counter()
if self._session_complete():
falken_logging.info('Returning since assignment is '
'associated with closed or stale session.',
brain_id=self._read_assignment.brain_id,
session_id=self._read_assignment.session_id,
assignment_id=self._read_assignment.assignment_id)
return
falken_logging.info('Starting work on assignment.',
project_id=self._write_assignment.project_id,
brain_id=self._write_assignment.brain_id,
session_id=self._write_assignment.session_id,
assignment_id=self._write_assignment.assignment_id)
with self._stats.record_event(
stats_collector.FALKEN_MAIN_TRAINING_LOOP_EVENT_NAME):
with data_fetcher.DataFetcher(self._chunk_generator(),
self._DB_QUERY_INTERVAL_SECS) as fetcher:
has_next_step = True
while has_next_step:
# Actually do the work.
has_next_step = False
for status, metadata in self._process_step(fetcher):
if status == ProcessAssignmentStatus.PROCESSED_STEP_NEEDS_RESTART:
has_next_step = True
yield status, metadata
if has_next_step:
falken_logging.info(
'Restarting work on assignment.',
project_id=self._write_assignment.project_id,
brain_id=self._write_assignment.brain_id,
session_id=self._write_assignment.session_id,
assignment_id=self._write_assignment.assignment_id)
self.assignment_stats.num_restarts += 1
# Delete checkpoints to ensure that restarts start from scratch.
self._file.wipe_checkpoints(self._write_assignment)
if self.assignment_stats.brain_train_steps:
falken_logging.info(
'Completed assignment. '
f'Called brain.train {self.assignment_stats.brain_train_steps} '
'times.',
project_id=self._write_assignment.project_id,
brain_id=self._write_assignment.brain_id,
session_id=self._write_assignment.session_id,
assignment_id=self._write_assignment.assignment_id)
else:
# This should only happen in rare cases: A learner failed to ACK
# after training to completion, e.g., due to preemption of the
# learner at the end of training.
falken_logging.warn(
'Completed assignment without training.',
project_id=self._write_assignment.project_id,
brain_id=self._write_assignment.brain_id,
session_id=self._write_assignment.session_id,
assignment_id=self._write_assignment.assignment_id)
# Clean-up checkpoints dir.
self._file.wipe_checkpoints(self._write_assignment)
|
Have you struggled to find quality MBA writers? We connect MBA students to the highly qualified, experienced, and screened writers to help them with their writing tasks. Writing an excellent essay for MBA is not a task someone would take for granted. You need writers who understand the requirements for university-level writings. You don’t have to settle for mediocre writers when you can get a highly trained expert from us. We have been helping many college students online to do their MBA papers, and they keep coming for more orders here. They have known the secret of relying on trusted experts on the market.
It is amazing to have your MBA paper written by the carefully vetted experts working with us. Many people get frustrated online after encountering fraud. If you have had such encounters in the past, we are here to help you avoid them at all costs. We value your education, just as you do. We aim to help you have a smooth college life free from the stress that emanates from academic writing. Allow us to work on your essay while you are handling other significant responsibilities. With the quality writers we have, there is no reason to worry. Stay calm when waiting for your exceptionally crafted essay for submission.
MBA essays need skillful writing to meet the required level of quality. Only experienced writers like ours can help you do that. You could feel tempted to do your essay on your own. Our advice is that you allow a professional writer to help you. The benefits it brings will be more than what you would pay. It is difficult to find a trusted company to write your essays. Since you are here, you can allow us to write and edit your paper accordingly. We offer multiple services for MBA students. If you need MBA essay editing service, we have proficient editors to help you on that. It is unlikely that most students overlook the significance of editing their MBA papers. For us, we take it seriously and give it a fresh eye of expert editors. With such, you can be sure of submitting a flawless essay for your MBA.
Doing your MBA tasks could be the hardest thing you could imagine in this season. Sometimes pursuing a master’s education becomes challenging because of the intensive writing that you need to involve in. For most students, they could lose the balance between their academic work, social life, and other significant areas of life. Since that can be harmful to you, we avail our professional MBA essay help to you. Students can have great assistance with their MBA papers from the most competent experts in the same. While other students suffer writing their essays at the expense of the many responsibilities they have, we help our customers to overcome them quickly. Our service is trustworthy, and college students can benefit greatly from us.
MBA essays are not as short and straightforward as those done at the bachelor’s level of learning. Contrastingly, they involve in-depth research, extensive reading, and writing of long texts. All these can be a source of mild or severe stress for the students. Thanks to our trusted writing service that brings relevant help at the time you need it most. We offer excellent MBA essay writing services at an affordable price. All these aims at helping you to live a smooth life at school. We understand that you need to enjoy being in the university as well as get decent grades in academics. Since combining and balancing the two can be stressful, you could use our services to achieve that.
Professionals like us are the right people whom you could trust with important texts such as MBA papers. The numbers of students we have helped online are uncountable. We have a qualified team that works to ensure all the customers are attended to. Our service provides that the right professionals work on the papers you bring to us excellently. We always achieve high-quality writings because of the great competence of our expert writers.
The secret to achieving good essays is sharpening your research skills. If you want to know how to write an MBA essay excellently, then you need to learn how to research your topics well. MBA writings require professional demonstration of concepts. You can’t deliver an excellent essay with poorly written content. The strength and accuracy of essay content contribute to the quality of the information. This calls for adequate research and careful consideration of the information you add in your texts.
You also need to sharpen your writing skills to achieve exceptional essays for your MBA. The choice of topic, vocabulary, sentence structure and grammar should all merge to reflect a perfect author. Since you might find it difficult to achieve it, experts like us always come in handy to help. Don’t struggle with any professional work; experts are there for you. Our availability is to make you comfortable every time. If you want to save time to attend to your part-time job, then don’t let your academic work be the only hindrance. We can handle it for you.
Our essay writing service can help you achieve more than you could imagine. Unlike other writing companies, our service aims at not only fulfilling your need but also making you happy through excellent delivery of our services. Have you considered our platform for your MBA writings? Don’t get late; fill out the order form, and an expert will start working on your paper immediately. Our writing company is equipped to handle all your concerns: do not hesitate, rely on us!
|
# -*- coding: utf-8 -*-
from classytags.arguments import Argument
from classytags.core import Options, Tag
from classytags.helpers import InclusionTag
from cms.constants import PUBLISHER_STATE_PENDING
from cms.toolbar.utils import get_plugin_toolbar_js
from cms.utils.admin import render_admin_rows
from sekizai.helpers import get_varname
from django import template
from django.conf import settings
from django.utils.encoding import force_text
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
register = template.Library()
CMS_ADMIN_ICON_BASE = "%sadmin/img/" % settings.STATIC_URL
@register.simple_tag(takes_context=True)
def show_admin_menu_for_pages(context, pages):
request = context['request']
if 'cl' in context:
filtered = context['cl'].is_filtered or context['cl'].query
else:
filtered = False
site = context['cms_current_site']
language = context['preview_language']
return render_admin_rows(request, pages=pages, site=site, filtered=filtered, language=language)
class TreePublishRow(Tag):
name = "tree_publish_row"
options = Options(
Argument('page'),
Argument('language')
)
def render_tag(self, context, page, language):
if page.is_published(language) and page.publisher_public_id and page.publisher_public.is_published(language):
if page.is_dirty(language):
cls = "cms-pagetree-node-state cms-pagetree-node-state-dirty dirty"
text = _("unpublished changes")
else:
cls = "cms-pagetree-node-state cms-pagetree-node-state-published published"
text = _("published")
else:
if language in page.languages:
public_pending = page.publisher_public_id and page.publisher_public.get_publisher_state(
language) == PUBLISHER_STATE_PENDING
if public_pending or page.get_publisher_state(
language) == PUBLISHER_STATE_PENDING:
cls = "cms-pagetree-node-state cms-pagetree-node-state-unpublished-parent unpublishedparent"
text = _("unpublished parent")
else:
cls = "cms-pagetree-node-state cms-pagetree-node-state-unpublished unpublished"
text = _("unpublished")
else:
cls = "cms-pagetree-node-state cms-pagetree-node-state-empty empty"
text = _("no content")
return mark_safe(
'<span class="cms-hover-tooltip cms-hover-tooltip-left cms-hover-tooltip-delay %s" '
'data-cms-tooltip="%s"></span>' % (cls, force_text(text)))
register.tag(TreePublishRow)
@register.filter
def is_published(page, language):
if page.is_published(language) and page.publisher_public_id and page.publisher_public.is_published(language):
return True
else:
if language in page.languages and page.publisher_public_id and page.publisher_public.get_publisher_state(
language) == PUBLISHER_STATE_PENDING:
return True
return False
@register.filter
def is_dirty(page, language):
return page.is_dirty(language)
@register.filter
def all_ancestors_are_published(page, language):
"""
Returns False if any of the ancestors of page (and language) are
unpublished, otherwise True.
"""
page = page.parent
while page:
if not page.is_published(language):
return False
page = page.parent
return True
class CleanAdminListFilter(InclusionTag):
"""
used in admin to display only these users that have actually edited a page
and not everybody
"""
name = 'clean_admin_list_filter'
template = 'admin/cms/page/tree/filter.html'
options = Options(
Argument('cl'),
Argument('spec'),
)
def get_context(self, context, cl, spec):
choices = sorted(list(spec.choices(cl)), key=lambda k: k['query_string'])
query_string = None
unique_choices = []
for choice in choices:
if choice['query_string'] != query_string:
unique_choices.append(choice)
query_string = choice['query_string']
return {'title': spec.title, 'choices': unique_choices}
register.tag(CleanAdminListFilter)
@register.filter
def boolean_icon(value):
BOOLEAN_MAPPING = {True: 'yes', False: 'no', None: 'unknown'}
return mark_safe(
u'<img src="%sicon-%s.gif" alt="%s" />' % (CMS_ADMIN_ICON_BASE, BOOLEAN_MAPPING.get(value, 'unknown'), value))
@register.filter
def preview_link(page, language):
if settings.USE_I18N:
# Which one of page.get_slug() and page.get_path() is the right
# one to use in this block? They both seem to return the same thing.
try:
# attempt to retrieve the localized path/slug and return
return page.get_absolute_url(language, fallback=False)
except:
# no localized path/slug. therefore nothing to preview. stay on the same page.
# perhaps the user should be somehow notified for this.
return ''
return page.get_absolute_url(language)
class PageSubmitRow(InclusionTag):
name = 'page_submit_row'
template = 'admin/cms/page/submit_row.html'
def get_context(self, context):
opts = context['opts']
change = context['change']
is_popup = context['is_popup']
save_as = context['save_as']
basic_info = context.get('basic_info', False)
advanced_settings = context.get('advanced_settings', False)
change_advanced_settings = context.get('can_change_advanced_settings', False)
language = context.get('language', '')
filled_languages = context.get('filled_languages', [])
show_buttons = language in filled_languages
if show_buttons:
show_buttons = (basic_info or advanced_settings) and change_advanced_settings
context = {
# TODO check this (old code: opts.get_ordered_objects() )
'onclick_attrib': (opts and change
and 'onclick="submitOrderForm();"' or ''),
'show_delete_link': False,
'show_save_as_new': not is_popup and change and save_as,
'show_save_and_add_another': False,
'show_save_and_continue': not is_popup and context['has_change_permission'],
'is_popup': is_popup,
'basic_info_active': basic_info,
'advanced_settings_active': advanced_settings,
'show_buttons': show_buttons,
'show_save': True,
'language': language,
'language_is_filled': language in filled_languages,
'object_id': context.get('object_id', None)
}
return context
register.tag(PageSubmitRow)
def in_filtered(seq1, seq2):
return [x for x in seq1 if x in seq2]
in_filtered = register.filter('in_filtered', in_filtered)
@register.simple_tag
def admin_static_url():
"""
If set, returns the string contained in the setting ADMIN_MEDIA_PREFIX, otherwise returns STATIC_URL + 'admin/'.
"""
return getattr(settings, 'ADMIN_MEDIA_PREFIX', None) or ''.join([settings.STATIC_URL, 'admin/'])
class CMSAdminIconBase(Tag):
name = 'cms_admin_icon_base'
def render_tag(self, context):
return CMS_ADMIN_ICON_BASE
register.tag(CMSAdminIconBase)
@register.simple_tag(takes_context=True)
def render_plugin_toolbar_config(context, plugin):
content_renderer = context['cms_content_renderer']
instance, plugin_class = plugin.get_plugin_instance()
if not instance:
return ''
with context.push():
content = content_renderer.render_editable_plugin(
instance,
context,
plugin_class,
)
# render_editable_plugin will populate the plugin
# parents and children cache.
placeholder_cache = content_renderer.get_rendered_plugins_cache(instance.placeholder)
toolbar_js = get_plugin_toolbar_js(
instance,
request_language=content_renderer.request_language,
children=placeholder_cache['plugin_children'][instance.plugin_type],
parents=placeholder_cache['plugin_parents'][instance.plugin_type],
)
varname = get_varname()
toolbar_js = '<script>{}</script>'.format(toolbar_js)
# Add the toolbar javascript for this plugin to the
# sekizai "js" namespace.
context[varname]['js'].append(toolbar_js)
return mark_safe(content)
@register.inclusion_tag('admin/cms/page/plugin/submit_line.html', takes_context=True)
def submit_row_plugin(context):
"""
Displays the row of buttons for delete and save.
"""
opts = context['opts']
change = context['change']
is_popup = context['is_popup']
save_as = context['save_as']
ctx = {
'opts': opts,
'show_delete_link': context.get('has_delete_permission', False) and change and context.get('show_delete', True),
'show_save_as_new': not is_popup and change and save_as,
'show_save_and_add_another': context['has_add_permission'] and not is_popup and (not save_as or context['add']),
'show_save_and_continue': not is_popup and context['has_change_permission'],
'is_popup': is_popup,
'show_save': True,
'preserved_filters': context.get('preserved_filters'),
}
if context.get('original') is not None:
ctx['original'] = context['original']
return ctx
|
Your CCEC bill will soon take on a new and improved look. A simplified layout will make your bill easier to understand, while electricity usage charts will give you more insight into how weather and habits contribute to your bill.
Your statement print date, account number, and bill due date are provided with a summary of your existing charges.
This section will contain important information regarding your bill along with useful tips from CCEC.
This section expands upon your service summary, including account details and a breakdown of your energy and demand charges. Additional charges or credits will appear in this section.
A usage chart provides an easy way to compare how you’ve used electricity from month to month with usage, temperature, and cost averages.
Paying by mail? Be sure to include the bottom portion of your bill with the appropriate side facing out. This section includes an area to update your contact information.
|
# -*- coding: utf-8 -*-
# © 2016 Antiun Ingeniería S.L. - Jairo Llopis
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp import _, api, models
from openerp.addons.event_registration_partner_unique import exceptions
class SaleOrderLine(models.Model):
_inherit = "sale.order.line"
@api.multi
def button_confirm(self):
"""Add registrations to the already existing record if possible."""
for s in self:
try:
with self.env.cr.savepoint():
super(SaleOrderLine, s).button_confirm()
# A registration already exists
except exceptions.DuplicatedPartnerError as error:
regs = error._kwargs["registrations"].with_context(
mail_create_nolog=True)
qty = int(s.product_uom_qty)
for reg in regs:
reg.nb_register += qty
regs.message_post(_("%d new registrations sold in %s.") %
(qty, s.order_id.display_name))
return True
|
Rob Cressy is joined Hoopstergram’s Tom Phillips as they do a wrap up of all things hoopsters and jerseys that went down at Lollapalooza. They dish about the best jerseys they saw at Lollapalooza, the jersey trends, and the jerseys they rocked and they dap they received (or didn’t receive). They also talk about the World’s Largest Random NBA Jerseys Database, a resource we launched that allows you to search for the NBA jerseys of 763 past and present players.
Don’t forget to check out the photo diary of Lollapalooza jerseys that I put together.
2. When you see any awesome jerseys on the street snap a pic and tag us on Instagram @BaconSports or Twitter @BaconSports and use #randomjersey.
3. Have an awesome collection of jerseys? Snap a pic and email it over to [email protected] and tell us about it. If it’s good enough we’ll feature it in an upcoming My Jersey Collection article.
1. Don’t forget to subscribe to our Bacon Sports podcast on iTunes and tell a friend. Let the Hoopster Nation podcast be your #1 source for all things random jerseys, hoopsters, and jerseys culture.
2. Become part of Team Bacon, our bacon loving, jersey rocking, sports fan community, by signing up HERE!
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ADB protocol implementation.
Implements the ADB protocol as seen in android's adb/adbd binaries, but only the
host side.
"""
import struct
import time
import usb_exceptions
# Maximum amount of data in an ADB packet.
MAX_ADB_DATA = 4096
# ADB protocol version.
VERSION = 0x01000000
# AUTH constants for arg0.
AUTH_TOKEN = 1
AUTH_SIGNATURE = 2
AUTH_RSAPUBLICKEY = 3
class InvalidCommandError(Exception):
"""Got an invalid command over USB."""
def __init__(self, message, response_header, response_data):
if response_header == 'FAIL':
message = 'Command failed, device said so. (%s)' % message
super(InvalidCommandError, self).__init__(
message, response_header, response_data)
class InvalidResponseError(Exception):
"""Got an invalid response to our command."""
class InvalidChecksumError(Exception):
"""Checksum of data didn't match expected checksum."""
class InterleavedDataError(Exception):
"""We only support command sent serially."""
def MakeWireIDs(ids):
id_to_wire = {
cmd_id: sum(ord(c) << (i * 8) for i, c in enumerate(cmd_id))
for cmd_id in ids
}
wire_to_id = {wire: cmd_id for cmd_id, wire in id_to_wire.items()}
return id_to_wire, wire_to_id
class AuthSigner(object):
"""Signer for use with authenticated ADB, introduced in 4.4.x/KitKat."""
def Sign(self, data):
"""Signs given data using a private key."""
raise NotImplementedError()
def GetPublicKey(self):
"""Returns the public key in PEM format without headers or newlines."""
raise NotImplementedError()
class _AdbConnection(object):
"""ADB Connection."""
def __init__(self, usb, local_id, remote_id, timeout_ms):
self.usb = usb
self.local_id = local_id
self.remote_id = remote_id
self.timeout_ms = timeout_ms
def _Send(self, command, arg0, arg1, data=''):
message = AdbMessage(command, arg0, arg1, data)
message.Send(self.usb, self.timeout_ms)
def Write(self, data):
"""Write a packet and expect an Ack."""
self._Send('WRTE', arg0=self.local_id, arg1=self.remote_id, data=data)
# Expect an ack in response.
cmd, okay_data = self.ReadUntil('OKAY')
if cmd != 'OKAY':
if cmd == 'FAIL':
raise usb_exceptions.AdbCommandFailureException(
'Command failed.', okay_data)
raise InvalidCommandError(
'Expected an OKAY in response to a WRITE, got %s (%s)',
cmd, okay_data)
return len(data)
def Okay(self):
self._Send('OKAY', arg0=self.local_id, arg1=self.remote_id)
def ReadUntil(self, *expected_cmds):
"""Read a packet, Ack any write packets."""
cmd, remote_id, local_id, data = AdbMessage.Read(
self.usb, expected_cmds, self.timeout_ms)
if local_id != 0 and self.local_id != local_id:
raise InterleavedDataError("We don't support multiple streams...")
if remote_id != 0 and self.remote_id != remote_id:
raise InvalidResponseError(
'Incorrect remote id, expected %s got %s' % (
self.remote_id, remote_id))
# Ack write packets.
if cmd == 'WRTE':
self.Okay()
return cmd, data
def ReadUntilClose(self):
"""Yield packets until a Close packet is received."""
while True:
cmd, data = self.ReadUntil('CLSE', 'WRTE')
if cmd == 'CLSE':
self._Send('CLSE', arg0=self.local_id, arg1=self.remote_id)
break
if cmd != 'WRTE':
if cmd == 'FAIL':
raise usb_exceptions.AdbCommandFailureException(
'Command failed.', data)
raise InvalidCommandError('Expected a WRITE or a CLOSE, got %s (%s)',
cmd, data)
yield data
def Close(self):
self._Send('CLSE', arg0=self.local_id, arg1=self.remote_id)
cmd, data = self.ReadUntil('CLSE')
if cmd != 'CLSE':
if cmd == 'FAIL':
raise usb_exceptions.AdbCommandFailureException('Command failed.', data)
raise InvalidCommandError('Expected a CLSE response, got %s (%s)',
cmd, data)
class AdbMessage(object):
"""ADB Protocol and message class.
Protocol Notes
local_id/remote_id:
Turns out the documentation is host/device ambidextrous, so local_id is the
id for 'the sender' and remote_id is for 'the recipient'. So since we're
only on the host, we'll re-document with host_id and device_id:
OPEN(host_id, 0, 'shell:XXX')
READY/OKAY(device_id, host_id, '')
WRITE(0, host_id, 'data')
CLOSE(device_id, host_id, '')
"""
ids = ['SYNC', 'CNXN', 'AUTH', 'OPEN', 'OKAY', 'CLSE', 'WRTE']
commands, constants = MakeWireIDs(ids)
# An ADB message is 6 words in little-endian.
format = '<6I'
connections = 0
def __init__(self, command=None, arg0=None, arg1=None, data=''):
self.command = self.commands[command]
self.magic = self.command ^ 0xFFFFFFFF
self.arg0 = arg0
self.arg1 = arg1
self.data = data
@property
def checksum(self):
return self.CalculateChecksum(self.data)
@staticmethod
def CalculateChecksum(data):
# The checksum is just a sum of all the bytes. I swear.
return sum(map(ord, data)) & 0xFFFFFFFF
def Pack(self):
"""Returns this message in an over-the-wire format."""
return struct.pack(self.format, self.command, self.arg0, self.arg1,
len(self.data), self.checksum, self.magic)
@classmethod
def Unpack(cls, message):
try:
cmd, arg0, arg1, data_length, data_checksum, unused_magic = struct.unpack(
cls.format, message)
except struct.error as e:
raise ValueError('Unable to unpack ADB command.', cls.format, message, e)
return cmd, arg0, arg1, data_length, data_checksum
def Send(self, usb, timeout_ms=None):
"""Send this message over USB."""
usb.BulkWrite(self.Pack(), timeout_ms)
usb.BulkWrite(self.data, timeout_ms)
@classmethod
def Read(cls, usb, expected_cmds, timeout_ms=None, total_timeout_ms=None):
"""Receive a response from the device."""
total_timeout_ms = usb.Timeout(total_timeout_ms)
start = time.time()
while True:
msg = usb.BulkRead(24, timeout_ms)
cmd, arg0, arg1, data_length, data_checksum = cls.Unpack(msg)
command = cls.constants.get(cmd)
if not command:
raise InvalidCommandError(
'Unknown command: %x' % cmd, cmd, (arg0, arg1))
if command in expected_cmds:
break
if time.time() - start > total_timeout_ms:
raise InvalidCommandError(
'Never got one of the expected responses (%s)' % expected_cmds,
cmd, (timeout_ms, total_timeout_ms))
if data_length > 0:
data = ''
while data_length > 0:
temp = usb.BulkRead(data_length, timeout_ms)
data += temp
data_length -= len(temp)
actual_checksum = cls.CalculateChecksum(data)
if actual_checksum != data_checksum:
raise InvalidChecksumError(
'Received checksum %s != %s', (actual_checksum, data_checksum))
else:
data = ''
return command, arg0, arg1, data
@classmethod
def Connect(cls, usb, banner='notadb', rsa_keys=None, auth_timeout_ms=100):
"""Establish a new connection to the device.
Args:
usb: A USBHandle with BulkRead and BulkWrite methods.
banner: A string to send as a host identifier.
rsa_keys: List of AuthSigner subclass instances to be used for
authentication. The device can either accept one of these via the Sign
method, or we will send the result of GetPublicKey from the first one
if the device doesn't accept any of them.
auth_timeout_ms: Timeout to wait for when sending a new public key. This
is only relevant when we send a new public key. The device shows a
dialog and this timeout is how long to wait for that dialog. If used
in automation, this should be low to catch such a case as a failure
quickly; while in interactive settings it should be high to allow
users to accept the dialog. We default to automation here, so it's low
by default.
Returns:
The device's reported banner. Always starts with the state (device,
recovery, or sideload), sometimes includes information after a : with
various product information.
Raises:
usb_exceptions.DeviceAuthError: When the device expects authentication,
but we weren't given any valid keys.
InvalidResponseError: When the device does authentication in an
unexpected way.
"""
msg = cls(
command='CNXN', arg0=VERSION, arg1=MAX_ADB_DATA,
data='host::%s\0' % banner)
msg.Send(usb)
cmd, arg0, arg1, banner = cls.Read(usb, ['CNXN', 'AUTH'])
if cmd == 'AUTH':
if not rsa_keys:
raise usb_exceptions.DeviceAuthError(
'Device authentication required, no keys available.')
# Loop through our keys, signing the last 'banner' or token.
for rsa_key in rsa_keys:
if arg0 != AUTH_TOKEN:
raise InvalidResponseError(
'Unknown AUTH response: %s %s %s' % (arg0, arg1, banner))
signed_token = rsa_key.Sign(banner)
msg = cls(
command='AUTH', arg0=AUTH_SIGNATURE, arg1=0, data=signed_token)
msg.Send(usb)
cmd, arg0, unused_arg1, banner = cls.Read(usb, ['CNXN', 'AUTH'])
if cmd == 'CNXN':
return banner
# None of the keys worked, so send a public key.
msg = cls(
command='AUTH', arg0=AUTH_RSAPUBLICKEY, arg1=0,
data=rsa_keys[0].GetPublicKey() + '\0')
msg.Send(usb)
try:
cmd, arg0, unused_arg1, banner = cls.Read(
usb, ['CNXN'], timeout_ms=auth_timeout_ms)
except usb_exceptions.BulkReadFailedError as e:
if e.usb_error.value == -7: # Timeout.
raise usb_exceptions.DeviceAuthError(
'Accept auth key on device, then retry.')
raise
# This didn't time-out, so we got a CNXN response.
return banner
return banner
@classmethod
def Open(cls, usb, destination, timeout_ms=None):
"""Opens a new connection to the device via an OPEN message.
Not the same as the posix 'open' or any other google3 Open methods.
Args:
usb: USB device handle with BulkRead and BulkWrite methods.
destination: The service:command string.
timeout_ms: Timeout in milliseconds for USB packets.
Raises:
InvalidResponseError: Wrong local_id sent to us.
InvalidCommandError: Didn't get a ready response.
Returns:
The local connection id.
"""
local_id = 1
msg = cls(
command='OPEN', arg0=local_id, arg1=0,
data=destination + '\0')
msg.Send(usb, timeout_ms)
cmd, remote_id, their_local_id, _ = cls.Read(usb, ['CLSE', 'OKAY'],
timeout_ms=timeout_ms)
if local_id != their_local_id:
raise InvalidResponseError(
'Expected the local_id to be %s, got %s' % (local_id, their_local_id))
if cmd == 'CLSE':
# Device doesn't support this service.
return None
if cmd != 'OKAY':
raise InvalidCommandError('Expected a ready response, got %s' % cmd,
cmd, (remote_id, their_local_id))
return _AdbConnection(usb, local_id, remote_id, timeout_ms)
@classmethod
def Command(cls, usb, service, command='', timeout_ms=None):
"""One complete set of USB packets for a single command.
Sends service:command in a new connection, reading the data for the
response. All the data is held in memory, large responses will be slow and
can fill up memory.
Args:
usb: USB device handle with BulkRead and BulkWrite methods.
service: The service on the device to talk to.
command: The command to send to the service.
timeout_ms: Timeout for USB packets, in milliseconds.
Raises:
InterleavedDataError: Multiple streams running over usb.
InvalidCommandError: Got an unexpected response command.
Returns:
The response from the service.
"""
return ''.join(cls.StreamingCommand(usb, service, command, timeout_ms))
@classmethod
def StreamingCommand(cls, usb, service, command='', timeout_ms=None):
"""One complete set of USB packets for a single command.
Sends service:command in a new connection, reading the data for the
response. All the data is held in memory, large responses will be slow and
can fill up memory.
Args:
usb: USB device handle with BulkRead and BulkWrite methods.
service: The service on the device to talk to.
command: The command to send to the service.
timeout_ms: Timeout for USB packets, in milliseconds.
Raises:
InterleavedDataError: Multiple streams running over usb.
InvalidCommandError: Got an unexpected response command.
Yields:
The responses from the service.
"""
connection = cls.Open(usb, destination='%s:%s' % (service, command),
timeout_ms=timeout_ms)
for data in connection.ReadUntilClose():
yield data
|
Australia's Great Barrier Reef may never recover from last year's warming-driven coral bleaching, said a study that called for urgent action in the face of ineffective conservation efforts.
Record-high temperatures in 2015 and 2016 drove an unprecedented bleaching episode, which occurs when stressed corals expel the algae that live in their tissue and provide them with food.
Bleached coral is more susceptible to disease, and without sufficient time to recover -- which can take one decade or several depending on the species -- it can die.
For the new study released late Wednesday, an Australian-led team examined the impact of three major bleaching events -- in 1998, 2002 and 2016 -- over the reef's entire 2,300-kilometre (1,400-mile) length.
In 2016, they found, the proportion of constituent reefs experiencing extreme bleaching was over four times higher than in the two previous episodes.
Only nine percent escaped bleaching altogether, compared with more than 40 percent in 2002 and 1998.
"The chances of the northern Great Barrier Reef returning to its pre-bleaching assemblage structure are slim given the scale of damage that occurred in 2016 and the likelihood of a fourth bleaching event occurring within the next decade or two as global temperatures continue to rise," the team wrote.
Earlier this month, researchers warned that the reef was already experiencing an unprecedented second straight year of bleaching.
Local reef protection "affords little or no resistance" to extreme heat, the researchers wrote in the journal Nature.
Current endeavours focus on better water quality and fisheries management, but "even the most highly protected reefs and near-pristine areas are highly susceptible to severe heat stress," they said.
The findings have important implications for coral reef conservation efforts.
"Bolstering resilience will become more challenging and less effective in coming decades because local interventions have had no discernible effect on resistance of corals to extreme heat stress," the study said.
- Scientists re-mobilise -Coinciding with the study's launch, lead author Terry Hughes from Australia's Centre of Excellence for Coral Reef Studies, said researchers were re-mobilising to conduct aerial and underwater surveys tracking the second year of bleaching now underway.
"We’re hoping that the next 2-3 weeks will cool off quickly, and this year’s bleaching won’t be anything like last year," he said. "The severity of the 2016 bleaching was off the chart."
Co-author Janice Lough, from the Australian Institute of Marine Science, said the average sea-surface temperatures for the Australian summer in 2016 were the highest ever recorded on the Great Barrier Reef.
"In each of the three events since 1998, the pattern of bleaching matches exactly where the warmest water was each year. That allows us to predict when and where bleaching is likely to occur this year,” she added.
Hughes and Lough agree climate change is the number one threat to the World Heritage-listed natural wonder, arguing the only solution was "urgent and rapid action" to limit global warming that is expected to further increase water temperatures and coral die-offs.
"It broke my heart to see so many corals dying on northern reefs on the Great Barrier Reef in 2016,” said Hughes, who led the expansive aerial surveys that revealed the damage.
"With rising temperatures due to global warming, it’s only a matter of time before we see more of these events. A fourth event after only one year is a major blow to the reef."
The world's nations agreed in Paris in 2015 to limit average warming to two degrees Celsius (3.6 degrees Fahrenheit) over pre-industrial levels, by curbing fossil fuel burning.
|
from __future__ import absolute_import
from wallstreet.crawler import stockapi
from wallstreet.crawler.fetcher import CurlFetcher
from datetime import datetime
from wallstreet import config
class TestYahooStockHistoryAPI:
def test_get_url_params(self):
api = stockapi.YahooHistoryDataAPI()
url, method, headers, data = api.get_url_params("BIDU", start_date="20150217", end_date="20150914")
assert url == "http://real-chart.finance.yahoo.com/table.csv" \
"?s=BIDU&g=d&ignore=.csv&a=1&b=17&c=2015&d=8&e=14&f=2015"
assert method == "GET"
assert data == {}
def test_parse_ret(self):
api = stockapi.YahooHistoryDataAPI()
url, method, headers, data = api.get_url_params("BIDU", start_date="20150218", end_date="20150220")
fetcher = CurlFetcher()
status_code, content = fetcher.fetch(url, method, headers, data)
assert status_code == 200
days = api.parse_ret("BIDU", content)
assert len(days) == 3
day_last = days[0]
assert day_last.symbol == "BIDU"
assert day_last.date == datetime(2015, 2, 20)
class TestNasdaqStockInfoAPI:
def test_all(self):
api = stockapi.NasdaqStockInfoAPI()
url, method, headers, data = api.get_url_params("NASDAQ")
fetcher = CurlFetcher()
status_code, content = fetcher.fetch(url, method, headers, data)
stock_infos = api.parse_ret("NASDAQ", content)
assert len(stock_infos) > 100
class TestEdgarAPI:
def test_year_fiscal_report(self):
api = stockapi.EdgarYearReportAPI(config.get_test("edgar", "core_key"))
url, method, headers, data = api.get_url_params(["BIDU", "AAPL"], start_year=2011, end_year=2012)
fetcher = CurlFetcher(timeout=30)
status_code, content = fetcher.fetch(url, method, headers, data)
raw_report = api.parse_ret(content)
assert len(raw_report) == 4
def test_quarter_fiscal_report(self):
api = stockapi.EdgarQuarterReportAPI(config.get_test("edgar", "core_key"))
url, method, headers, data = api.get_url_params("FB", start_year=2014, end_year=2015, start_quarter=3, end_quarter=1)
fetcher = CurlFetcher(timeout=30)
status_code, content = fetcher.fetch(url, method, headers, data)
raw_report = api.parse_ret(content)
assert len(raw_report) == 3
def test_company_report(self):
api = stockapi.EdgarCompanyAPI(config.get_test("edgar", "core_key"))
url, method, headers, data = api.get_url_params(["BIDU", "BABA"])
fetcher = CurlFetcher(timeout=30)
status_code, content = fetcher.fetch(url, method, headers, data)
t = api.parse_ret(content)
assert len(t) == 2
|
The outermost of the five major moons of Uranus and the second largest is Oberon, which was discovered in 1787 by William Herschel. As with Titania, it was also named by William's son for a character in William Shakespeare's play A Midsummer Night's Dream.
(362,000 miles), and its orbital period is 13.46 days. Like all of Uranus's large moons, Oberon rotates synchronously with its orbital period, keeping the same hemisphere toward the planet and the same hemisphere forward in its orbit. The moon has a diameter of 1,522 km (946 miles) and a density of 1.63 g/cm3 (0.94 oz/in3).
Photographic images transmitted by the U.S. Voyager 2 spacecraft when it flew past the Uranian system in 1986 revealed that Oberon's surface is old and that a few of the numerous bright craters appear to have been flooded by some kind of dark material that upwelled from the moon's interior.
|
# -*- coding: utf-8 -*-
"""
========================================================================
Transforming positions and velocities to and from a Galactocentric frame
========================================================================
This document shows a few examples of how to use and customize the
`~astropy.coordinates.Galactocentric` frame to transform Heliocentric sky
positions, distance, proper motions, and radial velocities to a Galactocentric,
Cartesian frame, and the same in reverse.
The main configurable parameters of the `~astropy.coordinates.Galactocentric`
frame control the position and velocity of the solar system barycenter within
the Galaxy. These are specified by setting the ICRS coordinates of the
Galactic center, the distance to the Galactic center (the sun-galactic center
line is always assumed to be the x-axis of the Galactocentric frame), and the
Cartesian 3-velocity of the sun in the Galactocentric frame. We'll first
demonstrate how to customize these values, then show how to set the solar motion
instead by inputting the proper motion of Sgr A*.
Note that, for brevity, we may refer to the solar system barycenter as just "the
sun" in the examples below.
*By: Adrian Price-Whelan*
*License: BSD*
"""
##############################################################################
# Make `print` work the same in all versions of Python, set up numpy,
# matplotlib, and use a nicer set of plot parameters:
import numpy as np
import matplotlib.pyplot as plt
from astropy.visualization import astropy_mpl_style
plt.style.use(astropy_mpl_style)
##############################################################################
# Import the necessary astropy subpackages
import astropy.coordinates as coord
import astropy.units as u
##############################################################################
# Let's first define a barycentric coordinate and velocity in the ICRS frame.
# We'll use the data for the star HD 39881 from the `Simbad
# <simbad.harvard.edu/simbad/>`_ database:
c1 = coord.SkyCoord(ra=89.014303*u.degree, dec=13.924912*u.degree,
distance=(37.59*u.mas).to(u.pc, u.parallax()),
pm_ra_cosdec=372.72*u.mas/u.yr,
pm_dec=-483.69*u.mas/u.yr,
radial_velocity=0.37*u.km/u.s,
frame='icrs')
##############################################################################
# This is a high proper-motion star; suppose we'd like to transform its position
# and velocity to a Galactocentric frame to see if it has a large 3D velocity
# as well. To use the Astropy default solar position and motion parameters, we
# can simply do:
gc1 = c1.transform_to(coord.Galactocentric)
##############################################################################
# From here, we can access the components of the resulting
# `~astropy.coordinates.Galactocentric` instance to see the 3D Cartesian
# velocity components:
print(gc1.v_x, gc1.v_y, gc1.v_z)
##############################################################################
# The default parameters for the `~astropy.coordinates.Galactocentric` frame
# are detailed in the linked documentation, but we can modify the most commonly
# changes values using the keywords ``galcen_distance``, ``galcen_v_sun``, and
# ``z_sun`` which set the sun-Galactic center distance, the 3D velocity vector
# of the sun, and the height of the sun above the Galactic midplane,
# respectively. The velocity of the sun can be specified as an
# `~astropy.units.Quantity` object with velocity units and is interepreted as a
# Cartesian velocity, as in the example below. Note that, as with the positions,
# the Galactocentric frame is a right-handed system (i.e., the Sun is at negative
# x values) so ``v_x`` is opposite of the Galactocentric radial velocity:
v_sun = [11.1, 244, 7.25] * (u.km / u.s) # [vx, vy, vz]
gc_frame = coord.Galactocentric(
galcen_distance=8*u.kpc,
galcen_v_sun=v_sun,
z_sun=0*u.pc)
##############################################################################
# We can then transform to this frame instead, with our custom parameters:
gc2 = c1.transform_to(gc_frame)
print(gc2.v_x, gc2.v_y, gc2.v_z)
##############################################################################
# It's sometimes useful to specify the solar motion using the `proper motion
# of Sgr A* <https://arxiv.org/abs/astro-ph/0408107>`_ instead of Cartesian
# velocity components. With an assumed distance, we can convert proper motion
# components to Cartesian velocity components using `astropy.units`:
galcen_distance = 8*u.kpc
pm_gal_sgrA = [-6.379, -0.202] * u.mas/u.yr # from Reid & Brunthaler 2004
vy, vz = -(galcen_distance * pm_gal_sgrA).to(u.km/u.s, u.dimensionless_angles())
##############################################################################
# We still have to assume a line-of-sight velocity for the Galactic center,
# which we will again take to be 11 km/s:
vx = 11.1 * u.km/u.s
v_sun2 = u.Quantity([vx, vy, vz]) # List of Quantity -> a single Quantity
gc_frame2 = coord.Galactocentric(galcen_distance=galcen_distance,
galcen_v_sun=v_sun2,
z_sun=0*u.pc)
gc3 = c1.transform_to(gc_frame2)
print(gc3.v_x, gc3.v_y, gc3.v_z)
##############################################################################
# The transformations also work in the opposite direction. This can be useful
# for transforming simulated or theoretical data to observable quantities. As
# an example, we'll generate 4 theoretical circular orbits at different
# Galactocentric radii with the same circular velocity, and transform them to
# Heliocentric coordinates:
ring_distances = np.arange(10, 25+1, 5) * u.kpc
circ_velocity = 220 * u.km/u.s
phi_grid = np.linspace(90, 270, 512) * u.degree # grid of azimuths
ring_rep = coord.CylindricalRepresentation(
rho=ring_distances[:,np.newaxis],
phi=phi_grid[np.newaxis],
z=np.zeros_like(ring_distances)[:,np.newaxis])
angular_velocity = (-circ_velocity / ring_distances).to(u.mas/u.yr,
u.dimensionless_angles())
ring_dif = coord.CylindricalDifferential(
d_rho=np.zeros(phi_grid.shape)[np.newaxis]*u.km/u.s,
d_phi=angular_velocity[:,np.newaxis],
d_z=np.zeros(phi_grid.shape)[np.newaxis]*u.km/u.s
)
ring_rep = ring_rep.with_differentials(ring_dif)
gc_rings = coord.SkyCoord(ring_rep, frame=coord.Galactocentric)
##############################################################################
# First, let's visualize the geometry in Galactocentric coordinates. Here are
# the positions and velocities of the rings; note that in the velocity plot,
# the velocities of the 4 rings are identical and thus overlaid under the same
# curve:
fig,axes = plt.subplots(1, 2, figsize=(12,6))
# Positions
axes[0].plot(gc_rings.x.T, gc_rings.y.T, marker='None', linewidth=3)
axes[0].text(-8., 0, r'$\odot$', fontsize=20)
axes[0].set_xlim(-30, 30)
axes[0].set_ylim(-30, 30)
axes[0].set_xlabel('$x$ [kpc]')
axes[0].set_ylabel('$y$ [kpc]')
# Velocities
axes[1].plot(gc_rings.v_x.T, gc_rings.v_y.T, marker='None', linewidth=3)
axes[1].set_xlim(-250, 250)
axes[1].set_ylim(-250, 250)
axes[1].set_xlabel(f"$v_x$ [{(u.km / u.s).to_string('latex_inline')}]")
axes[1].set_ylabel(f"$v_y$ [{(u.km / u.s).to_string('latex_inline')}]")
fig.tight_layout()
plt.show()
##############################################################################
# Now we can transform to Galactic coordinates and visualize the rings in
# observable coordinates:
gal_rings = gc_rings.transform_to(coord.Galactic)
fig, ax = plt.subplots(1, 1, figsize=(8, 6))
for i in range(len(ring_distances)):
ax.plot(gal_rings[i].l.degree, gal_rings[i].pm_l_cosb.value,
label=str(ring_distances[i]), marker='None', linewidth=3)
ax.set_xlim(360, 0)
ax.set_xlabel('$l$ [deg]')
ax.set_ylabel(fr'$\mu_l \, \cos b$ [{(u.mas/u.yr).to_string("latex_inline")}]')
ax.legend()
plt.show()
|
WINTHROP -- Mrs. Kellie L. (Bunnell) Migliaccio, 47, who previously resided in Ogdensburg, passed away on Sunday April 15, 2018 in the comfort of her home with her family and friends at her side. Kellie had been courageously battling cancer for several years. Arrangements are entrusted to Hammill Funeral Home in Winthrop.
Kellie was born on August 17, 1970 in Westerly, Rhode Island to William J. Bunnell Sr. and Mona Spaulding. She graduated from Ponagansett High School in North Scituate, RI.
At one time Kellie worked as a nurse’s aide at the Waterman Heights Nursing Home in Gloster, RI.
Finding comfort in appreciating nature; Kellie enjoyed camping with her family, fishing, and relaxing with a few beers. In leisure time she would watch cooking shows and was an avid Red Sox and Cowboys fan.
While living in Ogdensburg Kellie created lasting wonderful friendships with some of the people she met; truly appreciating them and the bonds that were made. She also had a special place in her heart for her feline friend, Freddie.
Kellie is survived by 3 sons and a daughter-in-law: Jacob and Sarah Bunnell, of VA, Zachery Bunnell of MA., and Anthony Costa of RI; her mother, Mona Bunnell of Winthrop; granddaughter: Elliana Bunnell of VA., and 2 granddaughters residing in MA.; brother and sister-in-law, William and Lisa Bunnell Jr. of Winthrop, 2 sisters and brothers -in-law: Rebecca and Eric Wiley of Brasher Falls, and Micah and Kimberly Bunnell of Winthrop; close friend, Cindy Pratt of Ogdensburg; and several nieces, nephews, and cousins.
She was predeceased by her father, William J. Bunnell Sr.; paternal grandparents: Lois and Richard Williams; a niece, Mya Munson; and a cousin, Jennifer Bunnell.
Calling hours will be Saturday April 21, 2018 from 2:00 to 4:00 pm at the funeral home. A closing prayer service will be offered at 4:00 pm by Certified Celebrant Julia O’Brien.
Memories and condolences may be shared with her family at www.hammillfh.com.
|
# -*- coding: utf-8 -*-
"""
Helper functions used in views.
"""
import csv
from lxml import etree
from json import dumps
from functools import wraps
from datetime import datetime
from flask import Response
from presence_analyzer.main import app
import logging
log = logging.getLogger(__name__) # pylint: disable=C0103
def jsonify(function):
"""
Creates a response with the JSON representation of wrapped function result.
"""
@wraps(function)
def inner(*args, **kwargs):
"""
Inner function of jsonify.
"""
return Response(dumps(function(*args, **kwargs)),
mimetype='application/json')
return inner
def get_menu_data():
"""
Extracts menu data from CSV file
It creates structure like this:
data = [{
'link': 'mainpage',
'title': 'This is mainpage'
}]
"""
data = []
with open(app.config['MENU_CSV'], 'r') as csvfile:
menu_reader = csv.reader(csvfile, delimiter=',')
for row in menu_reader:
data.append({
'link': row[0],
'title': row[1]
})
return data
@app.template_global()
def get_menu(page_url):
"""
Gets links and their titles.
Adds 'selected' attribute to current page.
"""
pages = get_menu_data()
for page in pages:
if page.get('link') == page_url:
page['selected'] = True
return pages
@app.template_global()
def get_users():
"""
Gets dictionary with users data imported from xml file
"""
data = etree.parse(app.config['DATA_USERS']).getroot()
server = data.find('server')
host = '{0}://{1}:{2}'.format(
server.find('protocol').text,
server.find('host').text,
server.find('port').text,
)
data_users = data.find('users')
users = {
user.get('id'): {
'name': unicode(user.find('name').text),
'avatar': host + user.find('avatar').text
} for user in data_users
}
return users
def get_data():
"""
Extracts presence data from CSV file and groups it by user_id.
It creates structure like this:
data = {
'user_id': {
datetime.date(2013, 10, 1): {
'start': datetime.time(9, 0, 0),
'end': datetime.time(17, 30, 0),
},
datetime.date(2013, 10, 2): {
'start': datetime.time(8, 30, 0),
'end': datetime.time(16, 45, 0),
},
}
}
"""
data = {}
with open(app.config['DATA_CSV'], 'r') as csvfile:
presence_reader = csv.reader(csvfile, delimiter=',')
for i, row in enumerate(presence_reader):
if len(row) != 4:
# ignore header and footer lines
continue
try:
user_id = int(row[0])
date = datetime.strptime(row[1], '%Y-%m-%d').date()
start = datetime.strptime(row[2], '%H:%M:%S').time()
end = datetime.strptime(row[3], '%H:%M:%S').time()
data.setdefault(user_id, {})[date] = {
'start': start,
'end': end
}
except (ValueError, TypeError):
log.debug('Problem with line %d: ', i, exc_info=True)
return data
def group_by_weekday(items):
"""
Groups presence entries by weekday.
"""
result = {i: [] for i in range(7)}
for date in items:
start = items[date]['start']
end = items[date]['end']
result[date.weekday()].append(interval(start, end))
return result
def seconds_since_midnight(time):
"""
Calculates amount of seconds since midnight.
"""
return time.hour * 3600 + time.minute * 60 + time.second
def interval(start, end):
"""
Calculates inverval in seconds between two datetime.time objects.
"""
return seconds_since_midnight(end) - seconds_since_midnight(start)
def mean(items):
"""
Calculates arithmetic mean. Returns zero for empty lists.
"""
return float(sum(items)) / len(items) if len(items) > 0 else 0
def group_by_weekday_start_end(items):
"""
Groups start and end presences by weekday.
"""
weekdays = {
i: {
'start': [],
'end': []
}
for i in range(7)
}
for date in items:
start = seconds_since_midnight(items[date]['start'])
end = seconds_since_midnight(items[date]['end'])
weekdays[date.weekday()]['start'].append(start)
weekdays[date.weekday()]['end'].append(end)
return weekdays
def presence_start_end(items):
"""
Groups mean start and mean end presences by weekday.
"""
weekdays = group_by_weekday_start_end(items)
result = {
weekday: {
'start': int(mean(time['start'])),
'end': int(mean(time['end'])),
}
for weekday, time in weekdays.items()
}
return result
|
^ 1.0 1.1 Johnson, R. Skip. Emotional Blackmail: Fear, Obligation and Guilt (FOG). BPDFamily.com. 16 August 2014 [18 October 2014].
^ 家人之間的情緒勒索. 今周刊. [2017-03-04] (中文(台灣)).
^ 你被情緒勒索了嗎?. 天下雜誌. 2017-02-24 [2017-03-04] (中文(台灣)).
^ Storm.mg. 你我都有的「情緒陰影」!了解它,才能不被情緒牽著鼻子走,擺脫情緒化的心魔-風傳媒. [2018-02-11] (中文(台灣)).
^ Knee, C. Raymond; Canevello, Amy; Bush, Amber L.; Cook, Astrid. Relationship-contingent self-esteem and the ups and downs of romantic relationships.. Journal of Personality and Social Psychology: 608–627. doi:10.1037/0022-3514.95.3.608.
|
# -*- coding: utf-8 -*-
"""Encapsulates settings for a plot
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
from qgis.PyQt.QtCore import (
QFile,
QIODevice
)
from qgis.PyQt.QtXml import QDomDocument, QDomElement
from qgis.core import (
QgsXmlUtils,
QgsPropertyCollection,
QgsPropertyDefinition
)
class PlotSettings: # pylint: disable=too-many-instance-attributes
"""
The PlotSettings class encapsulates all settings relating to a plot, and contains
methods for serializing and deserializing these settings.
"""
PROPERTY_FILTER = 1
PROPERTY_MARKER_SIZE = 2
PROPERTY_COLOR = 3
PROPERTY_STROKE_COLOR = 4
PROPERTY_STROKE_WIDTH = 5
PROPERTY_X_MIN = 6
PROPERTY_X_MAX = 7
PROPERTY_Y_MIN = 8
PROPERTY_Y_MAX = 9
PROPERTY_TITLE = 10
PROPERTY_LEGEND_TITLE = 11
PROPERTY_X_TITLE = 12
PROPERTY_Y_TITLE = 13
PROPERTY_Z_TITLE = 14
DYNAMIC_PROPERTIES = {
PROPERTY_FILTER: QgsPropertyDefinition('filter', 'Feature filter', QgsPropertyDefinition.Boolean),
PROPERTY_MARKER_SIZE: QgsPropertyDefinition('marker_size', 'Marker size', QgsPropertyDefinition.DoublePositive),
PROPERTY_COLOR: QgsPropertyDefinition('color', 'Color', QgsPropertyDefinition.ColorWithAlpha),
PROPERTY_STROKE_COLOR: QgsPropertyDefinition('stroke_color', 'Stroke color',
QgsPropertyDefinition.ColorWithAlpha),
PROPERTY_STROKE_WIDTH: QgsPropertyDefinition('stroke_width', 'Stroke width',
QgsPropertyDefinition.DoublePositive),
PROPERTY_TITLE: QgsPropertyDefinition('title', 'Plot title', QgsPropertyDefinition.String),
PROPERTY_LEGEND_TITLE: QgsPropertyDefinition('legend_title', 'Legend title', QgsPropertyDefinition.String),
PROPERTY_X_TITLE: QgsPropertyDefinition('x_title', 'X title', QgsPropertyDefinition.String),
PROPERTY_Y_TITLE: QgsPropertyDefinition('y_title', 'Y title', QgsPropertyDefinition.String),
PROPERTY_Z_TITLE: QgsPropertyDefinition('z_title', 'Z title', QgsPropertyDefinition.String),
PROPERTY_X_MIN: QgsPropertyDefinition('x_min', 'X axis minimum', QgsPropertyDefinition.Double),
PROPERTY_X_MAX: QgsPropertyDefinition('x_max', 'X axis maximum', QgsPropertyDefinition.Double),
PROPERTY_Y_MIN: QgsPropertyDefinition('y_min', 'Y axis minimum', QgsPropertyDefinition.Double),
PROPERTY_Y_MAX: QgsPropertyDefinition('y_max', 'Y axis maximum', QgsPropertyDefinition.Double)
}
def __init__(self, plot_type: str = 'scatter', properties: dict = None, layout: dict = None,
source_layer_id=None):
# Define default plot dictionary used as a basis for plot initialization
# prepare the default dictionary with None values
# plot properties
plot_base_properties = {
'marker': 'markers',
'custom': None,
'hover_text': None,
'additional_hover_text': None,
'hover_label_text': None,
'x_name': '',
'y_name': '',
'z_name': '',
'in_color': '#8ebad9',
'out_color': '#1f77b4',
'marker_width': 1,
'marker_size': 10,
'marker_symbol': 0,
'line_dash': 'solid',
'box_orientation': 'v',
'box_stat': None,
'box_outliers': False,
'name': '',
'normalization': None,
'cont_type': 'fill',
'color_scale': None,
'show_lines': False,
'cumulative': False,
'show_colorscale_legend': False,
'invert_color_scale': False,
'invert_hist': 'increasing',
'bins': 0,
'selected_features_only': False,
'visible_features_only': False,
'color_scale_data_defined_in_check': False,
'color_scale_data_defined_in_invert_check': False,
'marker_type_combo': 'Points',
'point_combo': '',
'line_combo': 'Solid Line',
'contour_type_combo': 'Fill',
'show_lines_check': False,
'opacity': 1,
'violin_side': None,
'violin_box': False,
'show_mean_line': False,
'layout_filter_by_map': False,
'layout_filter_by_atlas': False
}
# layout nested dictionary
plot_base_layout = {
'title': 'Plot Title',
'legend': True,
'legend_title': None,
'legend_orientation': 'h',
'x_title': '',
'y_title': '',
'z_title': '',
'xaxis': None,
'bar_mode': None,
'x_type': None,
'y_type': None,
'x_inv': None,
'y_inv': None,
'x_min': None,
'x_max': None,
'y_min': None,
'y_max': None,
'range_slider': {'borderwidth': 1, 'visible': False},
'bargaps': 0,
'polar': {'angularaxis': {'direction': 'clockwise'}},
'additional_info_expression': '',
'bins_check': False,
'gridcolor': '#bdbfc0'
}
self.plot_base_dic = {
'plot_type': None,
'layer': None,
'plot_prop': plot_base_properties,
'layout_prop': plot_base_layout
}
self.data_defined_properties = QgsPropertyCollection()
# Set class properties - we use the base dictionaries, replacing base values with
# those from the passed properties dicts
if properties is None:
self.properties = plot_base_properties
else:
self.properties = {**plot_base_properties, **properties}
if layout is None:
self.layout = plot_base_layout
else:
self.layout = {**plot_base_layout, **layout}
self.plot_type = plot_type
self.x = []
self.y = []
self.z = []
self.feature_ids = []
self.additional_hover_text = []
self.data_defined_marker_sizes = []
self.data_defined_colors = []
self.data_defined_stroke_colors = []
self.data_defined_stroke_widths = []
# layout properties
self.data_defined_title = ""
self.data_defined_legend_title = ""
self.data_defined_x_title = ""
self.data_defined_y_title = ""
self.data_defined_z_title = ""
self.data_defined_x_min = None
self.data_defined_x_max = None
self.data_defined_y_min = None
self.data_defined_y_max = None
self.source_layer_id = source_layer_id
def write_xml(self, document: QDomDocument):
"""
Writes the plot settings to an XML element
"""
element = QgsXmlUtils.writeVariant({
'plot_type': self.plot_type,
'plot_properties': self.properties,
'plot_layout': self.layout,
'source_layer_id': self.source_layer_id,
'dynamic_properties': self.data_defined_properties.toVariant(PlotSettings.DYNAMIC_PROPERTIES)
}, document)
return element
def read_xml(self, element: QDomElement) -> bool:
"""
Reads the plot settings from an XML element
"""
res = QgsXmlUtils.readVariant(element)
if not isinstance(res, dict) or \
'plot_type' not in res or \
'plot_properties' not in res or \
'plot_layout' not in res:
return False
self.plot_type = res['plot_type']
self.properties = res['plot_properties']
self.layout = res['plot_layout']
self.source_layer_id = res.get('source_layer_id', None)
self.data_defined_properties.loadVariant(res.get('dynamic_properties', None), PlotSettings.DYNAMIC_PROPERTIES)
return True
def write_to_project(self, document: QDomDocument):
"""
Writes the settings to a project (represented by the given DOM document)
"""
elem = self.write_xml(document)
parent_elem = document.createElement('DataPlotly')
parent_elem.appendChild(elem)
root_node = document.elementsByTagName("qgis").item(0)
root_node.appendChild(parent_elem)
def read_from_project(self, document: QDomDocument):
"""
Reads the settings from a project (represented by the given DOM document)
"""
root_node = document.elementsByTagName("qgis").item(0)
if root_node.isNull():
return False
node = root_node.toElement().firstChildElement('DataPlotly')
if node.isNull():
return False
elem = node.toElement()
return self.read_xml(elem.firstChildElement())
def write_to_file(self, file_name: str) -> bool:
"""
Writes the settings to an XML file
"""
document = QDomDocument("dataplotly")
elem = self.write_xml(document)
document.appendChild(elem)
try:
with open(file_name, "w") as f:
f.write('<?xml version="1.0" encoding="UTF-8"?>\n')
f.write(document.toString())
return True
except FileNotFoundError:
return False
def read_from_file(self, file_name: str) -> bool:
"""
Reads the settings from an XML file
"""
f = QFile(file_name)
if f.open(QIODevice.ReadOnly):
document = QDomDocument()
if document.setContent(f):
if self.read_xml(document.firstChildElement()):
return True
return False
|
As requested, this post is to dabble in the lotions and potions that claim to brighten, smooth, and color correct your skin tone over time. Sounds like a miracle in a jar, right? After trying out a few samples, I suggest grabbing one of these products to experience your own miracle!
I checked out Even Better Makeup by Clinique. I love how much it covers, but how it still looks natural. I don't have a lot of scars or dark spots around my face, but I do sometimes get splotchy. This did the trick! After a couple days my skin seemed more even-colored and I didn't need a ton of makeup. I've heard it doesn't really suit those with oily skin, so I suggest straying away from this foundation if you have that type of skin.
I had heard a lot about Brighter by Nature Moisturizer by Origins. It has SPF 35, which is perfect to protect my skin from the Arizona sun. It smells delicious, since I am a fan of cucumber and basil scents. It goes on really smoothly and it didn't feel like I was tugging at my skin as I rubbed it on. My skin definitely looked brighter and healthy, which I loved. If your skin is clear, you really don't even need makeup if you put this moisturizer on. It's fantastic!
As I mentioned before, my skin can get a little red sometimes. I tried the Correcting Moisturizer by Murad and was pretty pleased. The peppermint soothes any splotches, and other moisturizers work to correct the overall tint. It's really lightweight, so wearing makeup over it doesn't feel too heavy.
After browsing Sephora and Ulta, I decided to hit the drugstores. I found Definity Color Recapture by Olay. It's still expensive for Target, but a little more affordable then higher-end makeup stores. It has a tint to it and works as a moisturizer and a sunscreen. It's three-in-one! It's brightened my skin, evened out my skin tone, and I haven't really touched my makeup besides concealer. I'm just out the door after some mascara.
I would definitely suggest trying out some of these items if you're tired of the time-consuming makeup routine and want bright, fresh-looking skin.
|
from client import *
import base64
client = create_pubsub_client()
# You can fetch multiple messages with a single API call.
batch_size = 100
subscription = 'projects/single-object-747/subscriptions/mysubscription'
# Create a POST body for the Pub/Sub request
body = {
# Setting ReturnImmediately to false instructs the API to wait
# to collect the message up to the size of MaxEvents, or until
# the timeout.
'returnImmediately': False,
'maxMessages': batch_size,
}
while True:
resp = client.projects().subscriptions().pull(
subscription=subscription, body=body).execute()
received_messages = resp.get('receivedMessages')
if received_messages is not None:
ack_ids = []
for received_message in received_messages:
pubsub_message = received_message.get('message')
if pubsub_message:
# Process messages
print base64.b64decode(str(pubsub_message.get('data')))
# Get the message's ack ID
ack_ids.append(received_message.get('ackId'))
# Create a POST body for the acknowledge request
ack_body = {'ackIds': ack_ids}
# Acknowledge the message.
client.projects().subscriptions().acknowledge(
subscription=subscription, body=ack_body).execute()
|
Positive Quotes Inspiration Positive Quotes Brainyquote picture is in category Quotes that can use for individual and noncommercial purpose because All trademarks referenced here in are the properties of their respective owners. You can browse other picture of Positive Quotes Inspiration Positive Quotes Brainyquote in our galleries below. If you want to see other picture, you can browse our other category.
Positive Quotes Inspiration Positive Quotes Brainyquote was posted in August 4 2018 at 8:28 am and has been seen by 50 users. If you want to view image in full size just click image on gallery or click "View Image" at the bottom of the image.
|
# import implemented python files
import Config
from utils import DataLoader, GensimModels, DataPlotter
class IngredientAnalysis:
def __init__(self, word_vectors):
print "\nIngredientAnalysis initialized"
self.word_vectors = word_vectors
def analogy(self):
list_most_similar_cosmul = self.word_vectors.most_similar(positive=['orange', 'apple_juice'], negative=['apple'])
print "\nIngredient Analogy"
for dic in list_most_similar_cosmul:
word = dic[0]
score = dic[1]
if score > 0.5 :
print word, score
else:
print "No similar words"
if __name__ == '__main__':
gensimLoader = GensimModels.GensimModels()
model = gensimLoader.load_word2vec(path=Config.path_embeddings_ingredients)
vocab = model.vocab
"""
Analyze Intredient2Vec
"""
# analgoy test
ingredientAnalyzer = IngredientAnalysis(model)
ingredientAnalyzer.analogy()
"""
Plot Ingredient2Vec
"""
# TSNE
model_TSNE = DataPlotter.load_TSNE(model)
# plot data with category
DataPlotter.plot_category(model=model, model_tsne=model_TSNE, path=Config.path_plottings_ingredients_category, withLegends=True)
# plot data with clustering
DataPlotter.plot_clustering(model=model, model_tsne=model_TSNE, path=Config.path_plottings_ingredients_clustering)
|
This is what respected independent financial journalist Nic Cicutti says about us: https://www.moneymarketing.co.uk/nic-cicutti-latest-collapsed-investment-firm-is-a-misselling-scandal-of-the-fcas-making/?unapproved=146907&moderation-hash=a5aed1c434cd1f410062d89df3605fc1#comment-146907 . More links to press comments are included below.
West Riding is a family business co-owned by Neil and Linda Liversidge. Toni Turton who joined us in 2011 manages our reception in addition to her main role as Neil's Personal Assistant. Our Operations Manager Charlotte Kelso joined the company in 2013 after 18 years with HSBC. Charlotte is also a fully qualified adviser additionally qualified to advise on equity release. Nikita Kyle who joined us in November 2016 is Charlotte's Personal Assistant and is training to qualify as an advisor. Our newest arrival Amy Hirst joined our admin' team on 1 August 2018, our first day in West Riding House following the move from our old office nearby in Sagar Street. Like all our staff Amy will have the opportunity and support to train and qualify for any role to which she aspires. We thoroughly invest in our people.
Linda Liversidge is West Riding's Company Secretary. Linda previously worked for many years in a major Leeds law firm and was the co-founder of the company in July 2004. Neil Liversidge, West Riding's Managing Director, has worked in financial services since 1980 and is an elected Council Member of PIMFA, the UK's Personal Investment Management & Financial Advice Association. Neil's previous experience includes five years in a major UK merchant banking and life assurance group, ten years in private client investment management at major regional and national firms, and seven years in charge of investment research at what was then the UK's largest national adviser network, DBS Management PLC. At DBS, as the ‘advisor to the advisors’, Neil provided research support and guidance to more than 1,800 IFA firms and 4,000 individual advisors throughout the UK.
Protecting Our Clients - Scambusting!
We understand that decisions affecting your financial future are amongst the most important you’ll ever make so we’ll give you the time and attention you need. We offer an exploratory first meeting here at our expense during which we will conduct a thorough fact-finding exercise. Once we understand your situation and what work is required we shall tell you in writing what the cost will be. All fees are agreed in advance - there are no nasty surprises! We are certainly not expensive but we do deliver value and our FairFees Promise makes West Riding one of the UK's most competitive firms of its type. We provide all recommendations in writing and are happy to work with your solicitor, accountant or other professional advisors to ensure our advice fits with theirs. Most important of all, we tell it like it is in plain English.
Neil Liversidge is a regular contributor to BBC programming and has appeared on BBC Radio 4's Money Box and BBC TV's Panorama. For an exceptionally long 8-year run on BBC Radio Leeds, Neil was the station's 'Money Guru', answering listeners' money questions and commenting on financial matters in a regular weekly slot with one of BBC Local Radio's most distinguished broadcasters, Andrew Edwards. In the print media, Neil wrote a monthly column for over five years in the trade magazine Money Marketing. His letters and articles have also been featured in many other publications.
West Riding Personal Financial Solutions Limited is directly authorised by the Financial Conduct Authority as FCA firm number 402246 and registered in England as Company Number 5142989. The Registered Office is at West Riding House, 6-8 Commercial Street, Castleford, West Yorkshire, England WF10 1DG.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Setup file for the chemcoord package.
"""
from __future__ import with_statement
from __future__ import absolute_import
from setuptools import setup, find_packages
from io import open # pylint:disable=redefined-builtin
import version
MAIN_PACKAGE = 'chemcoord'
DESCRIPTION = "Python module for dealing with chemical coordinates."
LICENSE = 'LGPLv3'
AUTHOR = 'Oskar Weser'
EMAIL = '[email protected]'
URL = 'https://github.com/mcocdawc/chemcoord'
INSTALL_REQUIRES = ['numpy>=1.14', 'scipy', 'pandas>=1.0', 'numba>=0.35',
'sortedcontainers', 'sympy', 'six', 'pymatgen']
KEYWORDS = ['chemcoord', 'transformation', 'cartesian', 'internal',
'chemistry', 'zmatrix', 'xyz', 'zmat', 'coordinates',
'coordinate system']
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)',
'Operating System :: Unix',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering :: Chemistry',
'Topic :: Scientific/Engineering :: Physics']
def readme():
'''Return the contents of the README.md file.'''
with open('README.md') as freadme:
return freadme.read()
def setup_package():
setup(
name=MAIN_PACKAGE,
version=version.get_version(pep440=True),
url=URL,
description=DESCRIPTION,
author=AUTHOR,
author_email=EMAIL,
include_package_data=True,
keywords=KEYWORDS,
license=LICENSE,
long_description=readme(),
classifiers=CLASSIFIERS,
packages=find_packages('src'),
package_dir={'': 'src'},
install_requires=INSTALL_REQUIRES,
)
if __name__ == "__main__":
setup_package()
|
Glowing shades of blue like a barracuda sliding toward the boat came a giant muskie. One of those follows you may tell people about without mentioning it was the biggest one you'd ever laid eyes on.
Happened just a few weeks ago, as I write this. One of those eye-popping, breath-stealing moments that can turn a normal, functioning human being into a babbling, obsessed "'ski head." Muskies can exert a powerful grip on their prey, but nothing like the hold they have on our minds. What causes a fisherman to abandon every other species — and every other form of outdoor recreation — to exclusively hunt muskies?
I guess you have to be there. And those who have been there will read this and wonder: What about Wabigoon? And Green Bay? Cave Run? The English and Winnipeg rivers? Somebody will certainly ask those questions. How could such places not make a list of the world's best musky waters? The answer is simple: Every list has to end someplace, and the criteria for this one includes world records, a history of producing giants, documented behemoths, verifiable angling reports, and real results from population and creel surveys taken by fisheries folks.
Things can change (and they always do), but that's how we determined which muskie waters might currently be the best in all the world for the most addictive fish on the planet.
|
import os
import sys
import getpass
import subprocess
class TerminalPlatformUnsupported(Exception):
"""Platform-specific functionality is not supported
Raised by code that can not be used to interact with the terminal on this
platform.
"""
pass
class Colors:
def __wrap_with(raw_code):
@staticmethod
def inner(text, bold=False):
code = raw_code
if bold:
code = "1;{}".format(code)
return "\033[{}m{}\033[0m".format(code, text)
return inner
red = __wrap_with("31")
green = __wrap_with("32")
yellow = __wrap_with("33")
blue = __wrap_with("34")
magenta = __wrap_with("35")
cyan = __wrap_with("36")
white = __wrap_with("37")
class PosixEchoControl:
"""Posix Console Echo Control Driver
Uses termios on POSIX compliant platforms to control console echo. Is not
supported on Windows as termios is not available and will throw a
TerminalPlatformUnsupported exception if contructed on Windows.
"""
def __init__(self):
try:
import termios
self.termios = termios
except ImportError:
raise TerminalPlatformUnsupported("POSIX not supported")
def set_echo(self, enabled):
handle = sys.stdin.fileno()
if not os.isatty(handle):
return
attrs = self.termios.tcgetattr(handle)
if enabled:
attrs[3] |= self.termios.ECHO
else:
attrs[3] &= ~self.termios.ECHO
self.termios.tcsetattr(handle, self.termios.TCSANOW, attrs)
class Win32EchoControl:
"""Windows Console Echo Control Driver
This uses the console API from WinCon.h and ctypes to control console echo
on Windows clients. It is not possible to construct this class on
non-Windows systems, on those systems it will throw a
TerminalPlatformUnsupported exception.
"""
STD_INPUT_HANDLE = -10
ENABLE_ECHO_INPUT = 0x4
DISABLE_ECHO_INPUT = ~ENABLE_ECHO_INPUT
def __init__(self):
import ctypes
if not hasattr(ctypes, "windll"):
raise TerminalPlatformUnsupported("Windows not supported")
from ctypes import wintypes
self.ctypes = ctypes
self.wintypes = wintypes
self.kernel32 = ctypes.windll.kernel32
def _GetStdHandle(self, handle):
return self.kernel32.GetStdHandle(handle)
def _GetConsoleMode(self, handle):
mode = self.wintypes.DWORD()
self.kernel32.GetConsoleMode(handle, self.ctypes.byref(mode))
return mode.value
def _SetConsoleMode(self, handle, value):
self.kernel32.SetConsoleMode(handle, value)
def set_echo(self, enabled):
stdin = self._GetStdHandle(self.STD_INPUT_HANDLE)
mode = self._GetConsoleMode(stdin)
if enabled:
self._SetConsoleMode(stdin, mode | self.ENABLE_ECHO_INPUT)
else:
self._SetConsoleMode(stdin, mode & self.DISABLE_ECHO_INPUT)
class Screen:
def __init__(self):
try:
self._echo_driver = PosixEchoControl()
except TerminalPlatformUnsupported:
pass
try:
self._echo_driver = Win32EchoControl()
except TerminalPlatformUnsupported:
pass
if not self._echo_driver:
raise TerminalPlatformUnsupported("No supported terminal driver")
def set_echo(self, enabled):
self._echo_driver.set_echo(enabled)
@staticmethod
def clear():
sys.stdout.write("\x1b[2J\x1b[H")
sys.stdout.flush()
@staticmethod
def print_error(msg):
print(Colors.red(msg))
@staticmethod
def print_success(msg):
print(Colors.green(msg))
@staticmethod
def get_string(prompt):
while True:
value = input(prompt).strip()
if not value:
print(Colors.red("Value Required!"))
else:
return value
@staticmethod
def get_password(prompt="Password: "):
while True:
value = getpass.getpass(prompt)
if not value:
print(Colors.red("Value Required!"))
else:
return value
@staticmethod
def get_integer(prompt):
"""Gather user input and convert it to an integer
Will keep trying till the user enters an interger or until they ^C the
program.
"""
while True:
try:
return int(input(prompt).strip())
except ValueError:
print(Colors.red("Invalid Input!"))
def iterate_forever(func, *args, **kwargs):
"""Iterate over a finite iterator forever
When the iterator is exhausted will call the function again to generate a
new iterator and keep iterating.
"""
output = func(*args, **kwargs)
while True:
try:
playlist_item = next(output)
playlist_item.prepare_playback()
yield playlist_item
except StopIteration:
output = func(*args, **kwargs)
class SilentPopen(subprocess.Popen):
"""A Popen varient that dumps it's output and error"""
def __init__(self, *args, **kwargs):
self._dev_null = open(os.devnull, "w")
kwargs["stdin"] = subprocess.PIPE
kwargs["stdout"] = subprocess.PIPE
kwargs["stderr"] = self._dev_null
super().__init__(*args, **kwargs)
def __del__(self):
self._dev_null.close()
super().__del__()
|
Gibraltar Carlson took on Wayne Memorial on Monday night in the Class A, Region 7 semifinals at Dearborn High. The Marauders ultimately fell by a score of 65-46. For more details check out the game story.
|
from django.core.management.base import BaseCommand
from given_paper_tickets import given_paper_tickets
from core.models import User
from ticketing.models import GivenPaperTickets, Performance
from datetime import datetime, date
from django.contrib.contenttypes.models import ContentType
import django.utils.timezone as django_tz
def fill_in_existing_data():
do = Performance.objects.get(date__contains=date(2015,5,7))
vr = Performance.objects.get(date__contains=date(2015,5,8))
tz = django_tz.get_default_timezone()
for tup in given_paper_tickets['23 for do']:
GivenPaperTickets.objects.update_or_create(
given_on=datetime(2015,4,23,22,45,tzinfo=tz),
given_to=User.objects.get(first_name__iexact=tup[0], last_name__iexact=tup[1]),
for_what_type=ContentType.objects.get_for_model(Performance),
for_what_id=do.id,
count=tup[2],
)
for tup in given_paper_tickets['23 for vr']:
GivenPaperTickets.objects.update_or_create(
given_on=datetime(2015,4,23,22,45,tzinfo=tz),
given_to=User.objects.get(first_name__iexact=tup[0], last_name__iexact=tup[1]),
for_what_type=ContentType.objects.get_for_model(Performance),
for_what_id=vr.id,
count=tup[2],
)
for tup in given_paper_tickets['30 for do']:
GivenPaperTickets.objects.update_or_create(
given_on=datetime(2015,4,30,22,45,tzinfo=tz),
given_to=User.objects.get(first_name__iexact=tup[0], last_name__iexact=tup[1]),
for_what_type=ContentType.objects.get_for_model(Performance),
for_what_id=do.id,
count=tup[2],
)
for tup in given_paper_tickets['30 for vr']:
GivenPaperTickets.objects.update_or_create(
given_on=datetime(2015,4,30,22,45,tzinfo=tz),
given_to=User.objects.get(first_name__iexact=tup[0], last_name__iexact=tup[1]),
for_what_type=ContentType.objects.get_for_model(Performance),
for_what_id=vr.id,
count=tup[2],
)
class Command(BaseCommand):
args = 'none'
help = "yo do this"
def handle(self, *args, **options):
fill_in_existing_data()
self.stdout.write('Succesfully filled in existing given_paper_tickets data.')
|
Mare Getchy as she has complecations from her pregnancy.
Colleen an old neighbor of the Brant's, who lost her job after 27 years.
Thompson family as they endure the loss of Joe.
Prayers for friend Martha and Mike Stolar.
Mike Tremski as he is having medical issues, he is a 92 year old veteran.
|
import pymel.core as pm
from RMPY.rig import rigBase
class ConstraintSwitchModel(rigBase.BaseModel):
def __init__(self):
super(ConstraintSwitchModel, self).__init__()
self.outputs = []
self.list_a = []
self.list_b = []
self.constraints = []
self.attribute_output_a = None
self.attribute_output_b = None
class ConstraintSwitch(rigBase.RigBase):
def __init__(self, *args, **kwargs):
super(ConstraintSwitch, self).__init__(*args, **kwargs)
self._model = ConstraintSwitchModel()
self.constraint_func = {'parent': pm.parentConstraint,
'point': pm.pointConstraint,
'orient': pm.orientConstraint}
@property
def attribute_output_b(self):
return self._model.attribute_output_b
@attribute_output_b.setter
def attribute_output_b(self, value):
self._model.attribute_output_b = value
@property
def attribute_output_a(self):
return self._model.attribute_output_a
@attribute_output_a.setter
def attribute_output_a(self, value):
self._model.attribute_output_a = value
@property
def controls(self):
return self._model.controls
@controls.setter
def controls(self, value):
self._model.controls = value
@property
def outputs(self):
return self._model.outputs
@outputs.setter
def outputs(self, value):
self._model.outputs = value
@property
def constraints(self):
return self._model.constraints
@constraints.setter
def constraints(self, value):
self._model.constraints = value
@property
def list_a(self):
return self._model.list_a
@list_a.setter
def list_a(self, value):
self._model.list_a = value
@property
def list_b(self):
return self._model.list_b
@list_b.setter
def list_b(self, value):
self._model.list_b = value
def build(self, list_a, list_b, **kwargs):
control = kwargs.pop('control', None)
self.create_list_base(list_a, list_b)
if control:
print 'control found {}, {}'.format(control, kwargs)
self.create_attribute_control(control, **kwargs)
self.link_attribute_to_constraints()
self.controls.append(control)
def create_list_base(self, list_a, list_b, **kwargs):
destination = kwargs.pop('destination', None)
constraint_type = kwargs.pop('constraint_type', 'parent')
output_type = kwargs.pop('output_type', 'joint')
root_group = pm.group(empty=True)
self.name_convention.rename_name_in_format(root_group, name='intermediate')
if output_type == 'group' or output_type == 'locator':
root_group.setParent(self.rig_system.kinematics)
else:
root_group.setParent(self.rig_system.joints)
if len(list_a) == len(list_b):
for index, (constraint_a, constraint_b) in enumerate(zip(list_a, list_b)):
if not destination:
if output_type == 'group':
output = self.create.group.point_base(constraint_a, name='intermediate')
output.setParent(root_group)
elif output_type == 'locator':
output = self.create.space_locator.point_base(constraint_a, name='intermediate')
output.setParent(root_group)
else:
reset, output = self.create.joint.point_base(constraint_a, name='intermediate')
reset.setParent(root_group)
else:
output = destination[index]
self.outputs.append(output)
constraint = self.constraint_func[constraint_type](constraint_a, output)
constraint.interpType.set(2)
self.constraint_func[constraint_type](constraint_b, output)
self.constraints.append(constraint)
else:
print 'list_a and list_b should be the same size'
def create_attribute_control(self, control, **kwargs):
self.controls.append(control)
attribute_name = kwargs.pop('attribute_name', 'space_switch')
if attribute_name not in pm.listAttr(self.controls[0]):
pm.addAttr(self.controls[0], ln=attribute_name, hnv=True, hxv=True, min=0, max=10, k=True)
reverse = pm.shadingNode('reverse', asUtility=True, name="reverse")
multiply = pm.createNode('unitConversion', name="multiplier")
self.name_convention.rename_name_in_format(reverse)
self.name_convention.rename_name_in_format(multiply)
pm.connectAttr('{}.{}'.format(self.controls[0], attribute_name), "{}.input".format(multiply))
pm.setAttr("{}.conversionFactor".format(multiply), 0.1)
pm.connectAttr("{}.output".format(multiply), "{}.inputX".format(reverse))
self.attribute_output_a = multiply.output
self.attribute_output_b = reverse.outputX
def link_attribute_to_constraints(self):
for each_constraint in self.constraints:
for attribute_control, weight_alias in zip([self.attribute_output_a, self.attribute_output_b],
each_constraint.getWeightAliasList()):
attribute_control >> weight_alias
if __name__ == '__main__':
pass
|
Freestone at Bayside: "Lopez" Home features vaulted entry,large gathering room w/gas log fireplace & bookshelves,chef style kitchen w/stainless appliances & island, ext. hardwoods, solid quartz or granite surfaces,white painted doors/trim plus 3 large bedrooms and loft area upstairs including large master suite w/5pc spa area, Nexia smart technology, tankless water heater all on fully landscaped yard w/rear yard fencing:Photos Representative-Not actual Home. Home Complete!
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('flashcards', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='card',
name='Card_ID',
),
migrations.RemoveField(
model_name='deck',
name='Deck_ID',
),
migrations.RemoveField(
model_name='image',
name='Image_ID',
),
migrations.AddField(
model_name='card',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, default=0, serialize=False, verbose_name='ID'),
preserve_default=False,
),
migrations.AddField(
model_name='deck',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, default=None, serialize=False, verbose_name='ID'),
preserve_default=False,
),
migrations.AddField(
model_name='image',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, default=None, serialize=False, verbose_name='ID'),
preserve_default=False,
),
migrations.AlterField(
model_name='card',
name='Back_Img_ID',
field=models.ForeignKey(related_name=b'Back_Image', blank=True, to='flashcards.Image'),
),
migrations.AlterField(
model_name='card',
name='Back_Text',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='card',
name='Difficulty',
field=models.IntegerField(blank=True),
),
migrations.AlterField(
model_name='card',
name='Front_Img_ID',
field=models.ForeignKey(related_name=b'Front_Image', blank=True, to='flashcards.Image'),
),
migrations.AlterField(
model_name='card',
name='Front_Text',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='card',
name='Last_Attempted',
field=models.DateTimeField(blank=True),
),
migrations.AlterField(
model_name='deck',
name='Accessed_Date',
field=models.DateTimeField(blank=True),
),
]
|
HOUSTON (KTRK) -- Houston police are asking for your help to find a missing 9-year-old boy who was last seen yesterday in northeast Houston..
Police say Myrio Edwards, 9, walked away from his house yesterday at around 5pm and hasn't been seen since.
Myrio is four feet tall and weighs around 80 pounds. He was last seen wearing a red T-shirt with a cartoon character on it. He had on gray basketball shorts with with black stripes on the sides and black tennis shoes.
If you've seen him, you're asked to call Houston police at 832-394-1816.
|
from lexpy._base.node import FSANode
from lexpy._base.automata import FSA
__all__ = ['Trie']
class Trie(FSA):
__slots__ = 'root'
"""
Description:
To create a Trie instance, create an object of this class.
Attributes:
root: (_TrieNode) The Top level node which is created every time you create a Trie instance
"""
def __init__(self):
"""
Description:
This method initializes the Trie instance by creating the root node.
By default, the id of the root node is 1 and number of words in the Trie is also 1.
The label of the root node is an empty string ''.
"""
root = FSANode(0, '')
super(Trie, self).__init__(root)
def __len__(self):
"""
Description:
Returns the number of nodes in the Trie Data Structure
Returns:
:returns (int) Number of Nodes in the trie data structure
:return:
"""
return self._id
def add(self, word, count=1):
"""
Description:
Adds a word in the trie data structure.
Args:
:arg word (str) : The word that you want to insert in the trie.
Raises:
:raises: ``AssertionError`` if the word is None
"""
assert word is not None, "Input word cannot be None"
node = self.root
for i, letter in enumerate(word):
if letter not in node.children:
self._id += 1
node.add_child(letter, _id=self._id)
node = node[letter]
if i == len(word)-1:
node.eow = True
node.count += count
self._num_of_words += count
|
Booko: Comparing prices for U.S. Half-tracks. The Development and Deployment of the U.S. Army's Half-tracked Vehicles ( The Military Machine, Volume One).
U.S. Half-tracks. The Development and Deployment of the U.S. Army's Half-tracked Vehicles ( The Military Machine, Volume One).
|
from functools import partial
from itertools import chain
from math import sin, cos, pi
from random import gauss, uniform
from pygame import Rect, Surface
from pygame.constants import *
import pygame.key
from core import color
from core import config
from core.particles import ParticleEmitter, ParticlePool, Particle
from game import gamedata
from game.combocounter import get_combo_counter
from game.gameobject import GameObject
from game.shipbullet import ShipBullet
from game.enemy import Enemy
### Functions ##################################################################
def _burst_appear(self):
self.acceleration[1] = GRAVITY
self.velocity = [gauss(0, 25), uniform(-10, -20)]
def _radius_appear(self):
self.progress = 0
distance_magnitude = gauss(300, 50)
angle = uniform(0, -pi)
self.position[0] = START_POS.centerx + distance_magnitude * cos(angle)
self.position[1] = START_POS.centery + distance_magnitude * sin(angle)
self.startpos = tuple(self.position)
self.rect.topleft = (self.position[0] + .5, self.position[1] + .5)
def _radius_move(self):
self.progress += 1
position = self.position
percent = self.progress / 30
if percent == 1:
#If we've reached our target location...
self.change_state(Particle.STATES.LEAVING)
else:
dx = (percent**2) * (3-2*percent)
ddx = 1 - dx
position[0] = (self.startpos[0] * ddx) + (START_POS.centerx * dx)
position[1] = (self.startpos[1] * ddx) + (START_POS.centery * dx)
self.rect.topleft = (position[0] + .5, position[1] + .5)
################################################################################
### Constants ##################################################################
PART_IMAGE = Rect(4, 170, 4, 4)
APPEAR = config.load_sound('appear.wav')
APPEAR_POOL = ParticlePool(config.get_sprite(PART_IMAGE), _radius_move, _radius_appear)
DEATH = config.load_sound('death.wav')
DEATH_POOL = ParticlePool(config.get_sprite(PART_IMAGE), appear_func=_burst_appear)
FRAMES = tuple(config.get_sprite(Rect(32 * i, 128, 32, 32)) for i in range(5))
GRAVITY = 0.5
SHIP_STATES = ('IDLE', 'SPAWNING', 'ACTIVE', 'DYING', 'DEAD', 'RESPAWN')
SPEED = 4
START_POS = Rect(config.SCREEN_WIDTH / 2, config.SCREEN_HEIGHT * .8, 32, 32)
################################################################################
### Preparation ################################################################
for i in FRAMES: i.set_colorkey(color.COLOR_KEY, config.BLIT_FLAGS)
################################################################################
class FlameTrail(GameObject):
'''
FlameTrail is the jet left by the Ship's engines. This is purely a
graphical effect.
'''
FRAMES = tuple(config.get_sprite(Rect(32*i, 0, 32, 32)) for i in range(6))
GROUP = None
def __init__(self):
super().__init__()
self.anim = 0.0
self.image = FlameTrail.FRAMES[0]
self.position = [-300.0, -300.0]
self.rect = Rect(self.position, self.image.get_size())
self.state = 1
del self.acceleration, self.velocity
for i in self.__class__.FRAMES: i.set_colorkey(color.COLOR_KEY, config.BLIT_FLAGS)
def animate(self):
self.anim += 1/3
self.image = FlameTrail.FRAMES[int(3 * sin(self.anim / 2)) + 3]
actions = {1 : 'animate'}
################################################################################
class LightColumn(GameObject):
'''
This class exists to let the player know where exactly he's aiming.
'''
SIZE = Rect(0, 0, 32, config.SCREEN_HEIGHT - 32 * 3)
def __init__(self):
super().__init__()
self.image = Surface(self.__class__.SIZE.size, config.BLIT_FLAGS)
self.position = [-300.0, -300.0]
self.rect = Rect(self.position, self.__class__.SIZE.size)
self.state = 1
self.image.fill(color.WHITE)
self.image.set_alpha(128)
del self.acceleration, self.velocity
actions = {1 : None}
################################################################################
class Ship(GameObject):
'''
The Ship is the player character. There's only going to be one instance of
it, but it has to inherit from pygame.sprite.Sprite, so we can't make it a
true Python singleton (i.e. a module).
'''
STATES = config.Enum(*SHIP_STATES)
GROUP = None
def __init__(self):
'''
@ivar anim: A counter for ship animation
@ivar image: The graphic
@ivar invincible: How many frames of invincibility the player has if any
@ivar my_bullet: The single bullet this ship may fire
'''
super().__init__()
self.anim = 0.0
self.appear_emitter = ParticleEmitter(APPEAR_POOL, START_POS.copy(), 2)
self.emitter = ParticleEmitter(DEATH_POOL, START_POS.copy(), 2)
self.flames = FlameTrail()
self.image = FRAMES[0]
self.invincible = 0
self.light_column = LightColumn()
self.my_bullet = ShipBullet()
self.position = list(START_POS.topleft)
self.rect = START_POS.copy()
self.respawn_time = 3 * 60 # In frames
self.change_state(Ship.STATES.RESPAWN)
def on_fire_bullet(self):
bul = self.my_bullet
if bul.state == ShipBullet.STATES.IDLE and self.state == Ship.STATES.ACTIVE:
#If our bullet is not already on-screen...
bul.add(Ship.GROUP)
self.anim = 1
self.image = FRAMES[self.anim]
bul.rect.center = self.rect.center
bul.position = list(self.rect.topleft)
bul.change_state(ShipBullet.STATES.FIRED)
def respawn(self):
self.appear_emitter.burst(200)
APPEAR.stop()
APPEAR.play()
for i in chain(FRAMES, FlameTrail.FRAMES, {self.light_column.image}): i.set_alpha(128)
self.invincible = 250
self.light_column.rect.midbottom = self.rect.midtop
self.position = list(START_POS.topleft)
self.rect = START_POS.copy()
self.respawn_time = 3 * 60
self.change_state(Ship.STATES.ACTIVE)
def move(self):
keys = pygame.key.get_pressed() #Shorthand for which keys are pressed
rect = self.rect
width = self.image.get_width()
if self.state not in {Ship.STATES.DYING, Ship.STATES.DEAD, Ship.STATES.IDLE}:
if (keys[K_LEFT] or keys[K_a]) and rect.left > 0:
#If we're pressing left and not at the left edge of the screen...
self.position[0] -= SPEED
elif (keys[K_RIGHT] or keys[K_d]) and rect.right < config.SCREEN_RECT.right:
#If we're pressing right and not at the right edge of the screen...
self.position[0] += SPEED
rect.left = self.position[0] + 0.5
self.flames.rect.midtop = (rect.midbottom[0], rect.midbottom[1] - 1)
#Compensate for the gap in the flames ^^^
self.light_column.position[0] = self.position[0]
self.light_column.rect.left = round(self.light_column.position[0] / width) * width
if self.invincible:
#If we're invincible...
self.invincible -= 1
elif self.image.get_alpha() == 128:
for i in chain(FRAMES, FlameTrail.FRAMES): i.set_alpha(255)
self.anim = self.anim + (0 < self.anim < len(FRAMES) - 1) / 3 if self.anim != 4 else 0.0
self.image = FRAMES[int(self.anim)]
if gamedata.combo_time == gamedata.MAX_COMBO_TIME and gamedata.combo > 1:
counter = get_combo_counter(gamedata.combo, self.rect.topleft)
counter.rect.midbottom = self.rect.midtop
counter.position = list(counter.rect.topleft)
counter.change_state(counter.__class__.STATES.APPEARING)
Ship.GROUP.add(counter)
def die(self):
DEATH.play()
for i in chain(FRAMES, FlameTrail.FRAMES, (self.light_column.image,)): i.set_alpha(0)
self.emitter.rect = self.rect
self.emitter.burst(100)
self.change_state(Ship.STATES.DEAD)
def instadie(self, other):
if gamedata.lives:
#If we have any lives...
gamedata.lives = 0
self.die()
def wait_to_respawn(self):
self.respawn_time -= 1
if not self.respawn_time:
#If we're done waiting to respawn...
self.change_state(Ship.STATES.RESPAWN)
actions = {
STATES.IDLE : None ,
STATES.SPAWNING : 'respawn' ,
STATES.ACTIVE : 'move' ,
STATES.DYING : 'die' ,
STATES.DEAD : 'wait_to_respawn',
STATES.RESPAWN : 'respawn' ,
}
collisions = {
Enemy: instadie,
}
|
Sinn Féin MLA Carál Ní Chuilín has said called on the Department of Communities and Housing Executive to ensure that funding for the ‘Supporting People’ programme is protected.
“Supporting People helps some of the most vulnerable people in society to live independently, including those who are disabled, elderly, and victims of domestic violence.
“Helping people to live in their own home and to remain part of the community is better for the health and well-being of the person involved.
“Supporting People providers are already financially stretched in providing vital services.
|
# Accelerometer Grapher and Fall Dector - Hugh O'Brien March 2009
#
#This is a script for PyS60 that opens a bluetooth serial connection
#to a pre-programmed SHIMMER sensor, The SHIMMER provides accelerometer
#data in the form "1111 1111 1111" where '1111' will be in the range
#of 0 -> 4400. The three values represent the data gathered
#from monitoring the three axis of the accelerometer.
#
#The script reduces the accuracy of these values in order to be able
#to graph them on a screen that is only 320x240px in size
#
#The script also monitors the difference between two subsequent
#readings in order to determine if a large movement has occured.
#This can be interpreted as a fall. A call is then placed to a
#pre-defined telephone number and the details of the victim are
#read out to the receiver.
import e32, appuifw, audio, telephone
#btsocket is the 'old' BT system, new version introduced in
#PyS60 1.9.1 is harder to work with.
import btsocket as socket
#a predefined BT MAC address can be set here to skip discovery process
target = ''
contact_name = "John Watson"
contact_number = "5550137"
victim_name = "Mr. Sherlock Holmes"
victim_address = "221 B. Baker Street. London"
sensitivity = 28
def fall():
global app_lock, contact_name, contact_number, victim_name,\
victim_address, data, prev
audio.say("Dialling %s now" % contact_name)
telephone.dial(contact_number)
e32.ao_sleep(7) #7 sec delay for someone to answer
for i in range(2, -1, -1):
audio.say("This is an automated message. A fall has been detected.\
Please assist %s at address %s. \
This message will repeat %d more times" \
% (victim_name, victim_address, i) )
telephone.hang_up()
data = ( 40, 40, 40 ) #reset values so as not to trigger again
prev = data
app_lock.signal() #unlock the main loop
def connect(): #this function sets up the BT socket connection
global btsocket, target
try:
#socket params passed to the OS
btsocket=socket.socket(socket.AF_BT,socket.SOCK_STREAM)
if target == '': #if no target defined, begin OS discovery routine
address,services = socket.bt_discover()
target = (address, services.values()[0])
btsocket.connect(target) #initiate connection and notify user
appuifw.note(u"Connected to " + str(address), "info")
except: #fail cleanly
appuifw.note(u"Error connecting to device")
btsocket.close()
def getData(): #this receives single characters over the bitstream
#until it encounters a newline and carraige return it then
#returns the characters it has buffered until that point
global btsocket #use the globally defined socket
buffer = "" #create an empty buffer
rxChar = btsocket.recv(1) #receive 1 char over BT and save in rxChar
#spin here until we get a 'real' char
while (rxChar == '\n') or (rxChar == '\r'):
rxChar = btsocket.recv(1)
#as long as we receive 'real' chars buffer them
while (rxChar != '\n') and (rxChar != '\r'):
buffer += rxChar
rxChar = btsocket.recv(1)
return buffer #return the buffer contents
def graph_data(input):
#this function produces the graphs on the screen. the screen is
#landscape oriented with a resolution of 240x320. The constants seen
#here are used to define where on the screen the graphs should be drawn
global count, canvas, prev, data
#take the input string formated like "1111 1111 1111" and parse it
#to acquire 3 sets of chars and then interpret them as digits saving
#them to a list in this format: ( '1111', '1111', '1111' )
#the values are then divided by 60 as they will be in the range
#0 -> x -> 4400 as the screen is only 240px high. furthermore as there
#are three graphs being drawn each is confined to (240 / 3 )px of
#height. The divisor of 60 accommodates this at the cost of accuracy.
try:
data = (\
int(input[0:4]) / 60, \
int(input[5:9]) / 60, \
int(input[10:14]) / 60\
)
#sane defaults if we receive a malformed reading
except ValueError:
data = ( 36, 36, 36 )
#redraw the screen if there are more than 280 samples displayed.
if count > 280:
reset()
#draw a line, with the X1 starting 10 points from the left and
#expanding right, Y1 being the previous value of Y2 (initially zero)
#plus a vertical offset so the graphs don't overlap each other, X2
#being one point right of X1 and Y2 one of the 3 XYZ readings plus
#the vertical offset. other options are purely aesthetic.
canvas.line(\
(count + 10, prev[0], count + 11, data[0] ), \
outline = 0xFF0000, width = 1)
canvas.line(\
(count + 10, prev[1] + 80, count + 11, data[1] + 80), \
outline = 0x00DD00, width = 1)
canvas.line(\
(count + 10, prev[2] + 160, count + 11, data[2] + 160), \
outline = 0x4444FF, width = 1)
#increment counter - data should also be pushed into prev here
#but this happens in the main loop for monitoring reasons
count = count + 1
def reset(): # this function redraws the screen when it becomes full
global count, canvas
#reset the count and redraw a blank canvas
count = 0
canvas.rectangle((0, 0, 320, 240), fill = 0x000000)
#Main
data = ( 0, 0, 0 )
prev = (40, 40, 40) #initial zero values for 'previous values' of the data
canvas = appuifw.Canvas() #create a new Canvas object
appuifw.app.body = canvas
appuifw.app.screen = "full" #go 'fullscreen'
appuifw.app.orientation = "landscape" # draw in landscape orientation
appuifw.app.title = u"Activity Monitor" #name the program
app_lock = e32.Ao_lock() #locking system
connect() #open the BT socket
e32.ao_sleep(1) # sleep for 1 second in case of graphical slowness
reset() # initially reset the screen to draw the canvas
while 1: #loop the following code infinitely
e32.reset_inactivity() #keep the screensaver away
graph_data( getData() ) # poll the BT data passing it to the grapher.
#test the movement level between the last two samples
if ( (abs(data[0] - prev[0]) > sensitivity ) \
or (abs(data[1] - prev[1]) > sensitivity ) \
or (abs(data[2] - prev[2]) > sensitivity ) ):
fall() #if too much, take action
app_lock.wait() #pause this loop until fall() finishes
e32.ao_sleep(1)
reset()
prev = data #move current data into previous data buffer
|
There are approximately 5.3 million children and youth in the United States with at least one undocumented parent. Election day was a disaster for these children – over 4 million of whom are U.S. citizens. Many millions of young Americans were up until late that night, sobbing as they expressed to their parents their worries about their families being ripped apart. Executive orders are now amplifying this disaster, reaching into the daily lives, educational opportunities and life chances of millions of young United States citizens. Within recent days, a recipient of Deferred Action for Childhood Arrivals with no criminal record, a permanent resident and mother of four who voted, and other mothers of U.S.-born citizens have been arrested or deported. These are the first in a tidal wave of deportations of those without serious crimes who represent no threat to our country.
Under current executive orders in effect or under consideration, the offenses considered as appropriate to start removal proceedings extend to the misdemeanor of crossing the border for the first time, to stating on employment forms that they can legally work, or even enrolling one’s child in the federal school lunch program. These place the large majority of the 11 million undocumented at immediate risk of deportation. In the last years of the Obama Administration, prioritization for removal proceedings was restricted to those with repeat immigration violations, serious crimes like felonies or violent crimes. At a time when net migration across the southern border is in the southerly, not northerly, direction, deportations based on misdemeanors like traffic tickets will devastate the lives of hard working parents who labor and contribute to our economy.
Children and youth stand to be harmed by these draft policy directives. Close to a third of all children of immigrants in the United States have at least one undocumented parent. The deportation of parents of these millions of children and youth, both born in the US and those who have spent nearly their whole lives in the US, would irreparably harm their development. Dooming the chances of roughly one child in every classroom in our country not only harms the social fabric of our nation, but its economic future.
In a recent consensus statement of the Society for Research in Adolescence published in the Journal of Research on Adolescence, Carola Suarez-Orozco of UCLA, Roberto Gonzales of Harvard and I summarized all existing rigorous research on the effects of undocumented status on parents and youth. This research shows that the parent-child separations that would occur among families brought under scrutiny of these executive orders would harm the development of hundreds of thousands of children and youth – interrupting their education and greatly harming their psychological development.
Our research also shows that extending current undocumented status without a pathway to citizenship and threatening and enacting deportation would harm children’s early cognition, their academic achievement, their educational attainment, and their ability to contribute to U.S. society. Children of the undocumented attain nearly a year and a half less education by their mid twenties – a whopping effect that would only be worsened under these executive orders. As a country we need to be providing educational and economic opportunities, rather than blocking them for children and youth. Threats of deportation also drive parents to keep their children out of school, preschool, and after-school programs – all critical building blocks for a successful workforce.
These children also suffer from worries, anxiety and depression due to their parents’ undocumented status. The costs to society of the mental health problems borne by children of the undocumented will soar under the Trump administration’s proposed policies. We are a nation of immigrants. Our physical and human infrastructure – from the Continental Railroad to the interstate highway system to the IT revolution – has benefited from the labor of millions upon millions of people from all over the world coming to the United States to pursue educational and economic opportunity. Many initially came without papers but were eventually integrated into the country’s social, academic and economic development. To block these pathways to educational and economic success will harm not only our moral standing in the world, but our society’s future.
Hirokazu Yoshikawa is the Courtney Sale Ross Professor of Globalization and Education at New York University and a faculty affiliate of the Metropolitan Center for Equity and the Transformation of Schools at NYU. He also co-directs the Global TIES for Children Center at NYU. He is the author of Immigrants Raising Citizens: Undocumented Parents and Their Young Children.
This entry was posted in Abilities and Inclusion, Administration and Leadership, Community Engagement, Early Childhood Development and Learning, Equity and Social Justice, Language and Literacy, Parent Engagement and Support, School Climate, School Districts and State Education Systems, Student Support and Services, Teaching and Teachers and tagged DACA, deportation, executive order, immigrant, immigration, policy. Bookmark the permalink.
|
# ------------------------------------------------------------------------
# This block checks to see if the script is being run directly,
# i.e. through the command line. If it is, then it stops and exits the
# program, asking the user to use these files by running the main.py
# ------------------------------------------------------------------------
try:
from .utils import testForMain
except:
from utils import testForMain
testForMain(__name__)
# ------------------------------------------------------------------------
# PINGS.PY
#
# AUTHOR(S): Peter Walker [email protected]
# Brandon Layton [email protected]
#
# PURPOSE- Holds a single measurement of data transfer speed in a single test
# (i.e. This object represent one line of text in a speed test)
#
# VARIABLES:
# secIntervalStart Float, represents the start time of this Ping
# secIntervalEnd Float, represents the end time of this Ping (should always be start + 1)
# size Float, represents this Ping's size in Kbits sent
# speed Float, represents this Ping's speed in KBytes/sec
# size_string String, converted from size, used in __str__
# size_units String, units to be appended to string
# speed_string String, converted from speed, used in __str__
# speed_units String, units to be appended to string
#
# FUNCTIONS:
# __init__ - Used to initialize an object of this class
# INPUTS- self: reference to the object calling this method (i.e. Java's THIS)
# OUTPUTS- none
#
# __str__ - Returns a string represenation of the object
# INPUTS- self: reference to the object calling this method (i.e. Java's THIS)
# OUTPUTS- String, representing the attributes of the object (THIS)
# ------------------------------------------------------------------------
from .utils import global_str_padding as pad; pad = pad*4
class Ping(object):
# ------------------
# Initializing some class attributes
secIntervalStart = 0
secIntervalEnd = 0
size = 0
speed = 0
size_string = ""
size_units = ""
speed_string = ""
speed_units = ""
# ------------------
# DESC: Initializing class
def __init__(self, data, size_u, speed_u):
self.size_units = size_u
self.speed_units = speed_u
#This takes the given data String and parses the object information
data_start = data.split("-")[0].split("]")[1].strip()
data = data.split("-")[1]
data_end = data.split("sec", 1)[0].strip()
data = data.split("sec", 1)[1]
data_size = data.split(self.size_units)[0].strip()
data = data.split(self.size_units)[1]
data_speed = data.split(self.speed_units)[0].strip()
self.secIntervalStart = float(data_start)
self.secIntervalEnd = float(data_end)
self.size = float(data_size)
self.speed = float(data_speed)
#This section adds the zeros following the speed and size numbers, as sometimes
# the size may vary between ##.# and ###
if ("." in data_size):
if (len(data_size.split(".")[1]) == 1):
data_size += "0"
#END IF
else:
data_size += ".00"
self.size_string = data_size
if ("." in data_speed):
if (len(data_speed.split(".")[1]) == 1):
data_speed += "0"
#END IF
else:
data_speed += ".00"
self.speed_string = data_speed
#Creating the padding of spaces needed to line up all of the numbers
# The padding after the time varies because the time may be between 0 and 99.
# If the start and end are both 1 digit, two spaces are needed. If start and end are
# a 1 and 2 digit number, one space is needed
self.time_pad = ""
if self.secIntervalEnd < 10.0:
self.time_pad = " "
elif self.secIntervalStart < 10.0 and self.secIntervalEnd >= 10.0:
self.time_pad = " "
from math import log10
self.size_pad = (" " * (4 - int(log10(self.size)))) if self.size else (" " * 4)
self.speed_pad = (" " * (4 - int(log10(self.speed)))) if self.speed else (" " * 4)
#END DEF
# DESC: Creating a string representation of our object
def __str__(self):
return (pad + str(self.secIntervalStart) + "-"
+ str(self.secIntervalEnd) + self.time_pad + " " + self.size_pad
+ str(self.size_string) + " " + str(self.size_units) + " " + self.speed_pad
+ str(self.speed_string) + " " + str(self.speed_units)
)
#END DEF
#END CLASS
|
Mini Outdoor Christmas Trees Awesome Beautiful Christmas Presents Drawings Small Prekhome is just one of the many collections of best interior design ideas Reference that we have on this website. We have a lot of best interior design ideas or wall decorating and any other things concerning in this website. We're not just providing info about , but , you can get a lot more reference to create your dream home as well. So , don't forget to keep visiting eldiariodelanovia.com to get the latest update about best interior design ideas , wall decorating ideas , Sample home design and more.
Mini Outdoor Christmas Trees Awesome Beautiful Christmas Presents Drawings Small Prekhome was posted in February 19, 2017 at 9:08 pm. Mini Outdoor Christmas Trees Awesome Beautiful Christmas Presents Drawings Small Prekhome has viewed by 45 users. Click it and download the Mini Outdoor Christmas Trees Awesome Beautiful Christmas Presents Drawings Small Prekhome.
Christmas, Artificial Tree Christmas was posted April 13, 2017 at 7:24 am by eldiariodelanovia.com . More over Artificial Tree Christmas has viewed by 3122 visitor.
Wall Decor, Gold Wall Decals was posted October 26, 2017 at 2:10 pm by eldiariodelanovia.com . More over Gold Wall Decals has viewed by 1700 visitor.
Wall Decor, Wall Decoration Shelf was posted October 16, 2017 at 10:03 am by eldiariodelanovia.com . More over Wall Decoration Shelf has viewed by 1517 visitor.
|
#!/Applications/anaconda/envs/Python3/bin
import sys
def main():
'''Python 3 Quick Start Code Examples'''
# Get input from user and display it
# feels = input("On a scale of 1-10, how do you feel? ")
# print("You selected: {}".format(feels))
# Python Data Types
integer = 42
floater = 3.14
stringer = 'Hello, World!'
noner = None # singleton value, check: if var is None
tupler = (1, 2, 3)
lister = [1, 2, 3]
dicter = dict(
one = 1,
two = 2,
three = 3
)
boolTrue = True
boolFalse = False
# Conditionals
print("=========== Conditionals ==========")
num1, num2 = 0, 1
if (num1 > num2):
# print("{} is greater than {}".format(num1, num2))
pass
elif (num1 < num2):
# print("{} is less than {}".format(num1, num2))
pass
else:
# print("{} is equal to {}".format(num1, num2))
pass
# Python version of ternary operator
bigger = num1 if num1 >= num2 else num2
smaller = num1 if num1 < num2 else num2
# print("Conditional statment says {} is greater than or equal to {}".format(bigger, smaller))
# Python version of a switch statement
choices = dict(
a = 'First',
b = 'Second',
c = 'Third',
d = 'Fourth',
e = 'Fifth'
)
opt1 = 'c'
opt2 = 'f'
default = 'Option not found'
# print("Python 'switch' statment using a dict: {}".format(choices))
# print("Option 1 was {} and returned: {}".format(opt1, choices.get(opt1, default)))
# print("Option 2 was {} and returned: {}".format(opt2, choices.get(opt2, default)))
print("==============================")
# Loops
print("=========== Loops ==========")
print("Fibonacci series up to 100:")
a, b = 0, 1
while b < 100:
print(b, end=" ")
a, b = b, a + b
print()
# print("For loop printing parts of {}".format(stringer))
for letter in stringer:
# Don't print the vowels
if letter in 'aeiouAEIOU':
continue
# Stop looping at punctuation
if letter in '!@#$%^&*.,?;:-_+=|':
break
# print(letter, end=" ")
# print()
print("==============================")
# Get an index using a for loop with enumerate()
# for index, letter in enumerate(stringer):
# print("Index: {} is letter: {}".format(index, letter))
# List comprehensions
print("=========== List Comprehensions ==========")
# Create a new list - [expression for variable in list]
listOne = [0, 1, 2, 3, 4, 5]
listSquares = [x*x for x in listOne]
print("List comprehension: {}".format(listSquares))
# Filter a list - [expression for variable in list if condition]
listOdd = [x for x in listSquares if x % 2 == 1]
print("Filtered list comprehension: {}".format(listOdd))
# Dictionary comprehensions
print("=========== Dict Comprehensions ==========")
dictComp = {chr(64+x): x for x in range(1, 27)}
print("Dict comprehension: {}".format(dictComp))
# Set comprehension
print("=========== Set Comprehensions ==========")
setComp = {x**5 for x in range(2,8)}
print("Set comprehension: {}".format(setComp))
print("==============================")
# Check if a type is an iterable
print("=========== Is X Type Interable? ==========")
print("Is a string an iterable? {}".format(hasattr(str, '__iter__')))
print("Is a Boolean an iterable? {}".format(hasattr(bool, '__iter__')))
print("Is a list an iterable? {}".format(hasattr(list, '__iter__')))
print("Is a set an iterable? {}".format(hasattr(set, '__iter__')))
print("Is an int an iterable? {}".format(hasattr(int, '__iter__')))
print("==============================")
# Generator Expressions
# Similar to list comprehension, less space in memory
print("=========== Generator Expressions ==========")
genExp = (x**5 for x in range(2,8))
listComp = [x**5 for x in range(2,8)]
print("Type of a generator expression: {}".format(type(genExp)))
print("Actual generator expression: {}".format(genExp))
print("Size of generator expression: {}".format(sys.getsizeof(genExp)))
print("Size of same list comprehension: {}".format(sys.getsizeof(listComp)))
print("==============================")
return 0
if __name__ == '__main__':
main()
|
He never saw her again, but he thought of her frequently. Mind you, I'm not saying is bad for guys to woo a girl with sweet words of love. To love yourself,it is not something you can make up for. Tags: how men fall in love, how men fall in love with women, how do men fall in love, stages men fall in love, how men fall in love stages. Obviously not all of these apply to me, but I do hate the hookup culture a lot of 20-something singles have adopted. You check off all of these boxes for him.
Maybe you want to live in the country while he wants to live in the city. The Need to Be Appreciated. If the game guy play with love is like this, how do you know men who are really in love. Before I read this, I thought I was just being crazy and ridiculous. The feelings these two men had were the same as those others who fell in love at first sight.
Yet, there will always be exceptions, yet generally this is how men think at this stage. Intimacy is another ambiguous concept. How Men Fall In Love — 7 Stages Of Love How men fall in love 1. Lust never see any good in others but things that are good to caress his ego. So, one of the most important things to have someone to love you is to fall in love with yourself and accept yourself completely no matter what.
If they come back to you, they are yours forever. One woman fell in love with a ball player by observing him from her seat in the stands. That makes a man truly enraged and fills him with unique emotions of violence and righteousness. Their life becomes about whether something is moving them towards their dream or away from it. That's a statement that seems to make perfect sense at face value but is probably also incredibly confusing when you start to examine it. Men look for what they perceive as beauty. Also, experiencing new things will make you feel alive—only too often do we stop experiencing life and go on autopilot with our routines.
How do guys fall in love?
It has to do with the person who is falling in love. That is why many men end up cheating a lady that profess they are head over heeel in love with. It's like asking a student who failed out of med school to perform brain surgery. In fact, men are very shallow creatures. Or, it grows more intense. At almost the same time, he had said that to a thousand and one ladies. Say it in front of others and he will feel like he can take on the world.
I met this guy when I was a freshman in high school and he was a 6th grader. I saw him once and immediately found a conniption with him, i immediately wanted to know more about it, luckily enough he works with me, so i have found out so much about him, we always talk and sing and laugh together. Instead, the good girl rarely asks for anything, because you often feel naturally inclined to help her anyways. A series of chemical reactions in the brain. You fulfill the needs of each other. Instead, he feels appreciated for who he is.
Innuendo: Dare to Think it When we fall for someone, we often get a bit… nervous. Men may want to act like the strong, proud fathers they grew up admiring, but they really just want to be the little boy who is taken care of again. When you try to impress your special person, put on a smile and a positive mood, like this your communication will be easier and smoother and you will look more attractive. However, it is not always successful, either they are not trying hard enough or because the women are not interested in them. Additionally, I may be an affiliate for products that I recommend. I noticed that most of the anecdotes were of men falling in love with women, mostly based on some aspect of her appearance. Lust wants to be served all times.
The video also features robots. That strong physical usually includes sexual feelings. Imagine, if you will, a perfect day. When you fail terribly, instead of letting you marinate in your misery, she appeals to your pride by telling you gently and calmly that you have to man up and face your situation. But the real question we have is: Why? A man might be able to connect with lots of different partners in a lot of different ways, but a truly fulfilling relationship is going to hit all these things in some way, shape, or form. Let him take care of you and show you how well he can handle things.
It was directed by Mark Andre Yapching and Archie Abong. You may have intense chemistry with someone—be attracted to them on all levels and vice versa—but the attraction fades after a while. This makes a man puzzled and forces him to face what he did and solve the problem instead of having endless arguments that lead to nowhere. He dreamed on occasion of this girl. As an experiment, I asked him to look at her with the sound turned off, looking for clues to her.
Also, she does not manipulate to get what she wants. I know this is not an advice column, and not interested in that, but would like to know if there is a deeper psychological definition of love, and what one can expect with regards to longevity of these emotions once they hit? You know your man much more than other people do, so you know what makes him happy. Attraction, then, starts with the eyes. A relationship is great when it feels great to be with the other person. As women fall in love, it might fill with bursts of miss-you texts and happiness. Movies have been written about this also. Guys,stop playing the flirt games.
|
'''
Created on 04-04-2013
@author: jurek
'''
from hra_core.special import ImportErrorMessage
try:
from PyQt4.QtCore import * # @UnusedWildImport
from PyQt4.QtGui import * # @UnusedWildImport
from hra_core.misc import Params
from hra_math.model.data_vector_listener import DataVectorListener
from hra_math.statistics.tachogram_statistics import calculate_tachogram_statistics # @IgnorePep8
from hra_gui.qt.utils.dnd import CopyDragger
from hra_gui.qt.widgets.dock_widget_widget import DockWidgetWidget
from hra_gui.qt.widgets.table_view_widget import TableViewWidget
from hra_gui.qt.plots.tachogram_plot_const import STATISTIC_MIME_ID
from hra_gui.qt.plots.tachogram_plot_const import STATISTIC_CLASS_NAME_ID
except ImportError as error:
ImportErrorMessage(error, __name__)
class TachogramPlotStatisticsDockWidget(DockWidgetWidget):
"""
a dock widget for tachogram plot statistics
"""
def __init__(self, parent, **params):
self.params = Params(**params)
super(TachogramPlotStatisticsDockWidget, self).__init__(parent,
title=params.get('title', 'Tachogram plot statistics'),
**params)
self.data_accessor = self.params.data_accessor # alias
self.data_accessor.addListener(self,
__TachogramStatisticsDataVectorListener__(self))
self.__createStatisticsWidget__(QVBoxLayout())
parent.addDockWidget(Qt.RightDockWidgetArea, self)
def __createStatisticsWidget__(self, _layout):
self.statisticsWidget = TachogramStatisticsWidget(self.dockComposite,
layout=_layout)
self.fillStatisticsWidget()
def fillStatisticsWidget(self):
statistics = calculate_tachogram_statistics(
signal=self.data_accessor.signal,
annotation=self.data_accessor.annotation)
self.statisticsWidget.setTachogramStatistics(statistics)
class TachogramStatisticsWidget(TableViewWidget):
"""
a widget to display basic tachogram's statistics
"""
def __init__(self, parent, **params):
TableViewWidget.__init__(self, parent, **params)
self.__dragger__ = CopyDragger(self, STATISTIC_MIME_ID, drag_only=True)
self.setSelectionMode(QAbstractItemView.SingleSelection)
self.setSelectionBehavior(QAbstractItemView.SelectRows)
self.__createModel__()
def __createModel__(self):
model = TachogramStatisticsModel(self)
labels = QStringList(["class_name", "Statistic", "Value"])
model.setHorizontalHeaderLabels(labels)
self.setModel(model)
def setTachogramStatistics(self, _statistics):
model = self.model()
model.removeRows(0, model.rowCount())
values = _statistics[0]
descriptions = _statistics[1]
self.class_names = sorted([name for name in values])
for name in sorted([name for name in self.class_names]):
model.appendRow([QStandardItem(str(name)),
QStandardItem(str(descriptions[name])),
QStandardItem(str(values[name]))])
self.setColumnHidden(0, True) # "class_name" is a hidden column
def startDrag(self, dropActions):
row = self.model().itemFromIndex(self.currentIndex()).row()
self.__dragger__.clear()
self.__dragger__.dragObject(STATISTIC_CLASS_NAME_ID,
self.class_names[row])
self.__dragger__.startDrag()
class TachogramStatisticsModel(QStandardItemModel):
def __init__(self, parent):
QStandardItemModel.__init__(self, parent=parent)
def data(self, _modelIndex, _role):
#the third column (indexing starts from 0) is a value of statistic
if _modelIndex.column() == 2 and _role == Qt.TextAlignmentRole:
return Qt.AlignRight
else:
return super(TachogramStatisticsModel, self).data(_modelIndex,
_role)
class __TachogramStatisticsDataVectorListener__(DataVectorListener):
"""
class used to recalculate tachogram statistics for tachogram
statistics widget when signal or annotation data is changing
"""
def __init__(self, _dock_widget):
self.__dock_widget__ = _dock_widget
def changeSignal(self, _signal, **params):
self.__dock_widget__.fillStatisticsWidget()
def changeAnnotation(self, _annotation, **params):
self.__dock_widget__.fillStatisticsWidget()
|
All people experienced expectation of getting luxuries dream house and in addition great however with confined funds and constrained territory, also will possibly be hard to comprehend the think of household. Nevertheless, dont get worried mainly because currently there are numerous Best Living Room Design Ideas that can make the property together with basic home style straight into your house seems to be lavish however not so wonderful. Although this time around Best Living Room Design Ideas can also be plenty of attention simply because together with its easy, the purchase price you will need to make a family house may also be certainly not too large. When it comes to her own Best Living Room Design Ideas is definitely uncomplicated, but instead glimpse less attractive as a result of basic house screen continues to be beautiful to think about.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
.. module:: bluetoothiface.py
:platform: Unix, Windows
:synopsis: Ulyxes - an open source project to drive total stations and
publish observation results. GPL v2.0 license Copyright (C)
2010-2013 Zoltan Siki <[email protected]>.
sudo apt-get -y install bluetooth bluez bluez-tools rfkill
to turn on/off bluetooth interface from the command line:
rfkill unblock bluetooth or sudo /etc/init.d/bluetooth stop
rfkill block bluetooth or sudo /etc/init.d/bluetooth start
make connection from command line
hciconfig
https://computingforgeeks.com/connect-to-bluetooth-device-from-linux-terminal/
.. moduleauthor:: Zoltan Siki <[email protected]>,
Kecskeméti Máté <[email protected]>
"""
import logging
import bluetooth
import time
from iface import Iface
class BluetoothIface(Iface):
""" Interface to communicate through bluetooth interfacei as a client.
This class depends on pybluez.
:param name: name of bluetooth interface (str)
:param mac: mac address of server to connect
:param port: bluetooth port, default 3
:param eomRead: end of message char from instrument (str), default '\\r\\n'
:param eomWrite: end of message char from computer (str), default '\\r\\n'
"""
def __init__(self, name, mac, port=3, timeout=5, eomRead='\r\n',
eomWrite='\r\n'):
""" Constructor for bluetooth client
"""
super(BluetoothIface, self).__init__(name)
self.mac = mac
self.port = port
self.timeout = timeout
self.eomRead = eomRead
self.eomWrite = eomWrite
self.socket = None
self.Open()
def __del__(self):
""" Destructor for bluetooth client
"""
self.Close()
self.socket = None
def Open(self):
""" Open bluetooth communication
"""
self.socket = bluetooth.BluetoothSocket(bluetooth.RFCOMM)
try:
self.socket.connect((self.mac, self.port))
except Exception:
logging.error(" error opening bluetooth connection")
self.state = self.IF_SOURCE
self.socket = None
def Close(self):
""" Close bluetooth communication
"""
try:
self.socket.close()
except Exception:
pass
def GetLine(self):
""" read a line from bluetooth
"""
if self.socket is None or self.state != self.IF_OK:
logging.error(" bluetooth connection not opened or in error state")
return None
# read answer till end of message marker
ans = ''
w = -1 * len(self.eomRead)
while ans[w:] != self.eomRead:
ch = ''
try:
ch = (self.socket.recv(1)).decode('ascii')
except Exception:
self.state = self.IF_READ
logging.error(" cannot read bluetooth connection")
break
if ch == '':
# timeout exit loop
self.state = self.IF_TIMEOUT
logging.error(" timeout on bluetooth")
break
ans += ch
# remove end of line
logging.debug(" message got: %s", ans)
ans = ans.strip(self.eomRead)
return ans
def PutLine(self, msg):
""" Send message through bluetooth
:param msg: message to send (str)
:returns: 0 - on OK, -1 on error or interface is in error state
"""
# do nothing if interface is in error state
if self.socket is None or self.state != self.IF_OK:
logging.error(" bluetooth connection not opened or in error state")
return -1
# add CR/LF to message end
w = -1 * len(self.eomWrite)
if msg[w:] != self.eomWrite:
msg += self.eomWrite
# remove special characters
msg = msg.encode('ascii', 'ignore')
# send message to bluetooth interface
logging.debug(" message sent: %s", msg)
try:
self.socket.settimeout(self.timeout)
self.socket.send(msg)
except Exception:
self.state = self.IF_WRITE
logging.error(" cannot write to bluetooth connection")
return -1
return 0
def Send(self, msg):
""" send message to bluetooth and wait for answer
:param msg: message to send, it can be multipart message separated by '|' (str)
:returns: answer from instrument (str)
"""
msglist = msg.split("|")
res = ''
#sending
for m in msglist:
if self.PutLine(m) == 0:
time.sleep(5)
res += self.GetLine() + '|'
if res.endswith('|'):
res = res[:-1]
return res
if __name__ == "__main__":
#a = BluetoothIface('test', '00:12:F3:04:ED:06', 1) # leica 1200
a = BluetoothIface('test', '00:07:80:57:3B:6E', 1) # topcon hiper II rover
if a.GetState() == a.IF_OK:
print(a.Send('%R1Q,2008:1,0'))
print(a.GetState())
|
Creator / Producers: Helena Harris and Posie Graeme-Evans.
Executive Producer: Kris Noble. Director: Ian Munro.
Hi-5 - Wayne the Crane. Sharing Stories Book & Tape. ISBN 1 66503 400 2.
Book softcover. 24 pages. Cassette running time 10mins.
The Five Mile Press Pty. Ltd. 2000.
Music CD. Jump and Jive with Hi-5. No:496058.2. Sony Wonder.
Music CD. HI-5 It's a Party. No:497848.2. Sony Wonder. 2000.
Producer: Chris Harriott. Recorded at Trackdown Digital, Camperdown, Sydney, NSW.
08: North South East and West.
16: You Can't See Me.
32: North South East and West.
Music CD. HI-5 Boom Boom Beat. No:5021772000. Sony Wonder.
Music CD. It's a Hi-5 Christmas. No:5043022000. Sony Wonder. 2001.
Producer: Chris Harriott. Recorded at Sunstone Music, Enmore, NSW.
01: Santa Claus is Coming.
04: Rudolph the Red Nosed Reindeer.
05: Rockin' Around the Christmas tree.
07: Santa Wear Your Shorts!
08: Away in a Manger.
09: T'was the Night Before Christmas.
10: Dear Santa (A Christmas Wish).
12: Five Days to Christmas.
14: We Wish You a Merry Christmas.
Music CD. It's a Hi-5 Celebrate. No:5083902000. Sony Wonder.
Music x 2CD. Hi-5 Double The Fun - Jump And Jive With Hi-5 / It's A Party. No: 5051582000. Sony Wonder.
|
"""This is an example implementation of a "get_logs" handler
for cmp-connectors. For the purpose of this example, a python generator is
generating fake logs
"""
import datetime as dt
import itertools
def get_logs(event, context):
# mock the credentials for now, they're usually inside the event
# e.g. credentials = event['credentials']
credentials = {
"username": "usr",
"password": "pass",
"region": "eu"
}
try:
username = credentials["username"]
password = credentials["password"]
region = credentials["region"] # Provider region of the resource
provider_id = event["provider_id"] # Provider ID of the resource
resource_id = event["resource_id"] # CMP ID of the resource
start_date = event["start_date"]
end_date = event["end_date"]
except KeyError as e:
raise Exception("Missing \"%s\" from the event" % e)
client = NTTTrainingClient(username=username,
password=password,
region=region)
logs = client.get_logs(provider_id=provider_id,
start_date=start_date,
end_date=end_date)
context.log("Collected %d logs from Prototype provider" % len(logs))
cmp_logs = build_cmp_logs(context, resource_id, logs)
context.log("Built %d CMP logs" % len(cmp_logs))
return post_logs(context=context, data=cmp_logs)
ADJECTIVES = [
"bad", "terrible", "awful", "sinister", "despicable",
"good", "great", "groovy", "wonderful", "marvelous",
"weird", "mysterious", "unexpected", "worrying",
]
def logs_generator(adjectives):
values = ["Something %s happened" % adj for adj in adjectives]
return itertools.cycle(values)
logs = logs_generator(adjectives=ADJECTIVES)
class NTTTrainingClient(object):
"""Fake provider client that generates logs.
It uses the logs generator above to generate fake data
"""
def __init__(self, *args, **kwargs):
"""The arguments depend on the format of the provider credentials"""
self.provider = 'ntt-training'
def get_logs(self, provider_id, start_date, end_date):
"""Query the provider for log data and return it"""
start = dt.datetime.strptime(start_date, "%Y-%m-%dT%H:%M:%S.%fZ")
end = dt.datetime.strptime(end_date, "%Y-%m-%dT%H:%M:%S.%fZ")
return self._generate_logs(logs, start, end)
def _generate_logs(self, iterator, start, end):
logs = []
t = start
while t <= end:
logs.append({
"message": next(iterator),
"level": "INFO",
"time": t.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
})
t += dt.timedelta(minutes=1)
return logs
def build_cmp_logs(context, resource_id, logs):
"""Convert the provider logs into a CMP-friendly format"""
cmp_logs = []
for log in logs:
cmp_logs.append({
"service": "nflex.cmp-adapter.ntt-training",
"resource_id": resource_id,
"severity": log["level"],
"timestamp": log["time"],
"message": log["message"],
})
return cmp_logs
def post_logs(context, data):
"""Send the logs to CMP"""
try:
response = context.api.post(path="/logs", data=data)
response.raise_for_status()
return response.json()
except Exception as err:
context.log("Error sending logs to CMP: %s" % err,
severity="ERROR")
|
KSP Warder Recruitment Notification 2019 is announced by the Karnataka Prisons Department of which the Contenders need to refer the Karnataka Prisons Department Recruitment Notification to know the details about 662 Job Openings of Jailor, Warder Posts and then to enroll for the Posts by sending the Karnataka Jailor Application Form in Online through ksp.gov.in on or before 09th March 2019.
Karnataka Prisons Recruitment 2019: The candidates who are searching for the latest news from the side of the Karnataka Police Department. The Officials announced the KSP Warder Notification 2019 for hiring a total of 662 Posts of Jailor, Warder Posts to fill the vacancies across Karnataka State. So, the postulants need to Read the full announcement notice at the main website @ ksp.gov.in about the KSP Jailor Recruitment and if they are eligible they can enroll to it. The major links are given in the bottom of the page. You can stay tuned to this page for having continuous information about ksp.gov.in Recruitment of Jailor & Warder Posts. Jobschat.in will provide the Complete info about the Karnataka Prisons Jailor, Warder Job Vacancy whenever the candidates requested.
Karnataka Jailor & Warder Recruitment: The candidates who are wanted to work with the Karnataka Prisons Department has good news. Because the KSP Board released a huge vacancy notification in order to fill 662 Posts. The officials are inviting the Karnataka Jailor Application. So, you can only apply in online mode at the official website @ “jw19.kpdonline.co.in” otherwise simply check the Advertisement of KSP Warder, Jailor Recruitment 2019 and after getting complete understanding they may use the links to enroll without fail. This will be helpful for Police Job Hunters.
Warder Post: SSLC/ 10th Standard or Equivalent from a recognized board.
Jailor Post: Bachelor’s Degree in Sociology, Psychology/ Criminology/ Correctional Administration/ Equivalent Qualification.
Karnataka Prisons Organization is giving the pay scale to the selected candidates in the recruitment hiring, as per the given job roles in the notification.
Aspirants must pay the Application Fee along with Submission of KSP Warder Recruitment Application Form.
The officials of the Karnataka Prisons Department is willing to recruit the newcomers into the newly released KSP 662 Jailor & Warder Job Vacancies. The authorities are planning to organize the hiring process by using written test, PET, PST and viva voice process rounds one by one step. So, the Applicants who did registration have to workout hard in order to qualify in all those rounds to get selected by the KSP Officials. Respective Admit Cards also be informed by the Officials to the applicants, So, they may use them and stay tuned to this portal for having the updates about KSP Warder Recruitment 2019.
How to Apply Online Karnataka Prisons Recruitment 2019?
Go and visit the official website of Karnataka Prisons Department ksp.gov.in.
Search for announced Karnataka Prisons Recruitment Notification 2019 For 662 Jailor, Warder Posts.
Check the Details first and if you are interested to open the ksp.gov.in Warder Application Form.
Fill up the complete details as per the notification required.
After filling the complete details click on the “Submit” button and Pay the Fee.
Take a Print out of the KSP Warder, Jailor Application Form.
The post KSP Warder Recruitment 2019 || Apply For 662 Karnataka State Police Warder, Jailor Posts @ ksp.gov.in appeared first on JobsChat.in.
|
from django import forms
from django.forms import CharField, Form, PasswordInput
from .models import *
from django.contrib.auth.models import User, Group
from django.db.models.fields import BLANK_CHOICE_DASH
class volunteerUserSignupForm(forms.ModelForm):
password = forms.CharField(widget=PasswordInput())
class Meta:
model = User
fields = ['first_name', 'last_name', 'email', 'username', 'password', ]
class volunteerSignupForm(forms.ModelForm):
hsGradChoices = (
("", 'Select range'),
(1, '1-4'),
(2, '5-10'),
(3, '11 or more'),
(4, 'Have not graduated'),)
collegeLevelChoice = (
("", "Select"),
(1, "associate"),
(2, "bachelor's"),
(3, "master's"),
(4, "doctoral"),
(5, "none"),)
canGetText = forms.TypedChoiceField(coerce=lambda x: x == 'True', choices=((True, 'Yes'), (False, 'No')),
widget=forms.RadioSelect, label="Can we text you on this number?", required=True)
isBusinessOwner = forms.BooleanField(label="I am a business owner", initial=True, required=False)
yearsInIndustry = forms.CharField(label="Number of years in this industry", required=True,
widget=forms.NumberInput(attrs={'size': '10', 'placeholder': ''}))
workTitle = forms.CharField(label="Work title", required=False)
workIndustry = forms.CharField(label="Work industry", required=False)
linkedinProfile = forms.CharField(label="Your Linkedin profile", required=False)
yearsSinceHSGraduation = forms.ChoiceField(hsGradChoices, label="Year since high school graduation", required=True)
collegeLevel = forms.ChoiceField(choices=collegeLevelChoice, label="Highest college degree",
required=True, initial="")
collegeMajor = forms.CharField(label="College major(s)", required=False)
skills = forms.CharField(label="Please enter skills related to your job, role and industry",
required=False)
interests = forms.CharField(label="Please provide some interests that lead you to your career choice",
required=False)
def __init__(self, *args, **kwargs):
super(volunteerSignupForm, self).__init__(*args, **kwargs)
self.fields['yearsInIndustry'].widget.attrs['style'] = "width:20%"
class Meta:
model = Volunteer_User_Add_Ons
# fields = '__all__'
exclude = ['user']
class TeacherAddClass(forms.Form):
class_name = forms.CharField(label="Class name")
students_csv = forms.FileField(required=True, label='Upload File')
class TeacherAddClassAssignment(forms.Form):
assignment_name = forms.CharField(label="Assignment name")
description = forms.CharField(label="Description", required=False)
|
How can you find warm and eco-friendly winter clothing?
Beauty: how can natural products be used for face care in winter?
What will the fashion trends of the coming year look like?
How can you find out your clothing origin?
Why should you choose eco-friendly clothing?
|
# -*- coding: utf-8 -*-
"""
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License,
or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see <http://www.gnu.org/licenses/>.
@author: zoidberg
"""
import re
import time
from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
from module.common.json_layer import json_loads
def convertDecimalPrefix(m):
# decimal prefixes used in filesize and traffic
return ("%%.%df" % {'k': 3, 'M': 6, 'G': 9}[m.group(2)] % float(m.group(1))).replace('.', '')
class UlozTo(SimpleHoster):
__name__ = "UlozTo"
__type__ = "hoster"
__pattern__ = r"http://(\w*\.)?(uloz\.to|ulozto\.(cz|sk|net)|bagruj.cz|zachowajto.pl)/(?:live/)?(?P<id>\w+/[^/?]*)"
__version__ = "0.95"
__description__ = """uloz.to"""
__author_name__ = ("zoidberg")
FILE_NAME_PATTERN = r'<a href="#download" class="jsShowDownload">(?P<N>[^<]+)</a>'
FILE_SIZE_PATTERN = r'<span id="fileSize">.*?(?P<S>[0-9.]+\s[kMG]?B)</span>'
FILE_INFO_PATTERN = r'<p>File <strong>(?P<N>[^<]+)</strong> is password protected</p>'
FILE_OFFLINE_PATTERN = r'<title>404 - Page not found</title>|<h1 class="h1">File (has been deleted|was banned)</h1>'
FILE_SIZE_REPLACEMENTS = [('([0-9.]+)\s([kMG])B', convertDecimalPrefix)]
FILE_URL_REPLACEMENTS = [(r"(?<=http://)([^/]+)", "www.ulozto.net")]
PASSWD_PATTERN = r'<div class="passwordProtectedFile">'
VIPLINK_PATTERN = r'<a href="[^"]*\?disclaimer=1" class="linkVip">'
FREE_URL_PATTERN = r'<div class="freeDownloadForm"><form action="([^"]+)"'
PREMIUM_URL_PATTERN = r'<div class="downloadForm"><form action="([^"]+)"'
def setup(self):
self.multiDL = self.premium
self.resumeDownload = True
def process(self, pyfile):
pyfile.url = re.sub(r"(?<=http://)([^/]+)", "www.ulozto.net", pyfile.url)
self.html = self.load(pyfile.url, decode=True, cookies=True)
passwords = self.getPassword().splitlines()
while self.PASSWD_PATTERN in self.html:
if passwords:
password = passwords.pop(0)
self.logInfo("Password protected link, trying " + password)
self.html = self.load(pyfile.url, get={"do": "passwordProtectedForm-submit"},
post={"password": password, "password_send": 'Send'}, cookies=True)
else:
self.fail("No or incorrect password")
if re.search(self.VIPLINK_PATTERN, self.html):
self.html = self.load(pyfile.url, get={"disclaimer": "1"})
self.file_info = self.getFileInfo()
if self.premium and self.checkTrafficLeft():
self.handlePremium()
else:
self.handleFree()
self.doCheckDownload()
def handleFree(self):
action, inputs = self.parseHtmlForm('id="frm-downloadDialog-freeDownloadForm"')
if not action or not inputs:
self.parseError("free download form")
self.logDebug('inputs.keys() = ' + str(inputs.keys()))
# get and decrypt captcha
if inputs.has_key('captcha_value') and inputs.has_key('captcha_id') and inputs.has_key('captcha_key'):
# Old version - last seen 9.12.2013
self.logDebug('Using "old" version')
captcha_value = self.decryptCaptcha("http://img.uloz.to/captcha/%s.png" % inputs['captcha_id'])
self.logDebug('CAPTCHA ID: ' + inputs['captcha_id'] + ', CAPTCHA VALUE: ' + captcha_value)
inputs.update({'captcha_id': inputs['captcha_id'], 'captcha_key': inputs['captcha_key'], 'captcha_value': captcha_value})
elif inputs.has_key("captcha_value") and inputs.has_key("timestamp") and inputs.has_key("salt") and inputs.has_key("hash"):
# New version - better to get new parameters (like captcha reload) because of image url - since 6.12.2013
self.logDebug('Using "new" version')
xapca = self.load("http://www.ulozto.net/reloadXapca.php", get = { "rnd": str(int(time.time()))})
self.logDebug('xapca = ' + str(xapca))
data = json_loads(xapca)
captcha_value = self.decryptCaptcha(str(data['image']))
self.logDebug('CAPTCHA HASH: ' + data['hash'] + ', CAPTCHA SALT: ' + str(data['salt']) + ', CAPTCHA VALUE: ' + captcha_value)
inputs.update({'timestamp': data['timestamp'], 'salt': data['salt'], 'hash': data['hash'], 'captcha_value': captcha_value})
else:
self.parseError("CAPTCHA form changed")
self.multiDL = True
self.download("http://www.ulozto.net" + action, post=inputs, cookies=True, disposition=True)
def handlePremium(self):
self.download(self.pyfile.url + "?do=directDownload", disposition=True)
#parsed_url = self.findDownloadURL(premium=True)
#self.download(parsed_url, post={"download": "Download"})
def findDownloadURL(self, premium=False):
msg = "%s link" % ("Premium" if premium else "Free")
found = re.search(self.PREMIUM_URL_PATTERN if premium else self.FREE_URL_PATTERN, self.html)
if not found:
self.parseError(msg)
parsed_url = "http://www.ulozto.net" + found.group(1)
self.logDebug("%s: %s" % (msg, parsed_url))
return parsed_url
def doCheckDownload(self):
check = self.checkDownload({
"wrong_captcha": re.compile(r'<ul class="error">\s*<li>Error rewriting the text.</li>'),
"offline": re.compile(self.FILE_OFFLINE_PATTERN),
"passwd": self.PASSWD_PATTERN,
"server_error": 'src="http://img.ulozto.cz/error403/vykricnik.jpg"', # paralell dl, server overload etc.
"not_found": "<title>Ulož.to</title>"
})
if check == "wrong_captcha":
#self.delStorage("captcha_id")
#self.delStorage("captcha_text")
self.invalidCaptcha()
self.retry(reason="Wrong captcha code")
elif check == "offline":
self.offline()
elif check == "passwd":
self.fail("Wrong password")
elif check == "server_error":
self.logError("Server error, try downloading later")
self.multiDL = False
self.setWait(3600, True)
self.wait()
self.retry()
elif check == "not_found":
self.fail("Server error - file not downloadable")
getInfo = create_getInfo(UlozTo)
|
Save the date for the third Annual Staff Appreciation Luncheon on Wednesday, 5/24/17.
Good food, relaxing atmosphere, fun with your friends and door prizes, too! What more could you ask for?
|
from django.apps import AppConfig
from django.db.models import signals
from nodeconductor.structure import SupportedServices
class SaltStackConfig(AppConfig):
name = 'nodeconductor_saltstack.saltstack'
verbose_name = 'SaltStack Core'
service_name = 'SaltStack'
def ready(self):
from .backend import SaltStackBackend
from .models import SaltStackProperty
import handlers
SupportedServices.register_backend(SaltStackBackend)
from nodeconductor.structure.models import ServiceSettings
from nodeconductor.quotas.fields import QuotaField, CounterQuotaField
from ..exchange.models import ExchangeTenant
ServiceSettings.add_quota_field(
name='sharepoint_storage',
quota_field=QuotaField(
creation_condition=lambda service_settings: service_settings.type == SaltStackConfig.service_name,
),
)
ServiceSettings.add_quota_field(
name='exchange_storage',
quota_field=QuotaField(
creation_condition=lambda service_settings: service_settings.type == SaltStackConfig.service_name,
),
)
ServiceSettings.add_quota_field(
name='exchange_tenant_count',
quota_field=CounterQuotaField(
creation_condition=lambda service_settings: service_settings.type == SaltStackConfig.service_name,
target_models=[ExchangeTenant],
path_to_scope='service_project_link.service.settings',
)
)
for index, model in enumerate(SaltStackProperty.get_all_models()):
signals.post_save.connect(
handlers.log_saltstack_property_created,
sender=model,
dispatch_uid='nodeconductor_saltstack.saltstack.handlers.log_saltstack_property_created{}_{}'.format(
model.__name__, index),
)
signals.post_delete.connect(
handlers.log_saltstack_property_deleted,
sender=model,
dispatch_uid='nodeconductor_saltstack.saltstack.handlers.log_saltstack_property_deleted{}_{}'.format(
model.__name__, index),
)
|
* Additional messages can be purchased for $0.025 each.
** Call Forwarding minutes are billed separately at $0.04 per minute.
When you activate a new account you will receive 1 phone number and 50 messages. This will give you a chance to try it before you buy it. When your messages have expired you can upgrade to one of our monthly plans.
Yes. If you will consistently exceed 5,000 messages per month contact us at [email protected] to discuss a customized plan.
Can I use my existing business phone number?
No, you will provision new text-enabled phone numbers. We do not port your existing business phone numbers. However, you can have incoming calls forwarded to your business line.
Do monthly messages roll over?
No, at the end of your monthly billing cycle, we’ll top you back up to your monthly allocation. Any messages you don’t use from the previous month do not roll over into the next month. You can always purchase additional Anytime messages to add to your account. Anytime messages do not expire and will stay on your account balance until they’re used.
Anytime messages are additional messages that you can purchase over and above your monthly allotment. Anytime messages are only consumed when your monthly message allotment has been exceeded. Anytime Messages do not expire.
Not right now. Texting is currently only enabled in the US & Canada.
|
import math
import random
from enum import Enum
from typing import Any, Dict, List, Optional, Tuple, Type
from bot import bot
from enums import Action, Clue, Rank, Suit, Variant
from game import Game
names: List[str] = ['Alice', 'Bob', 'Cathy', 'Donald', 'Emily']
numberWords: List[str] = ['zero', 'one', 'two', 'three', 'four', 'five']
class CardStatus(Enum):
Deck = 0
Hand = 1
Play = 2
Discard = 3
class ServerGame:
def __init__(self,
variant: Variant,
players: int,
botCls: Type[bot.Bot], *,
print_messages: Any=False,
print_verbose: Any=False,
null_clues: Any=False,
seed: Any=None, **kwargs) -> None:
if variant not in Variant:
raise ValueError('variant')
if players < 2 or players > 5:
raise ValueError('players')
self.seed: Any = seed
self.rng: random.Random = random.Random(self.seed)
self.variant: Variant = variant
self.printVerbose: Optional[bool] = convert_print(print_verbose)
self.printMessages: Optional[bool]= convert_print(print_messages)
self.allowNullClues: bool = bool(null_clues)
self.gameLog: List[Tuple[str, dict]] = []
self.messages: List[str] = []
self.verbose: List[str] = []
self.deck: List[ServerCard]
self.initialize_deck()
self.hands: List[List[int]] = [[] for _ in range(players)]
self.discards: Dict[Suit, List[int]]
self.discards = {s: [] for s in self.variant.pile_suits}
self.plays: Dict[Suit, List[int]]
self.plays = {s: [] for s in self.variant.pile_suits}
self.nextDraw: int = 0
self.turnCount: int = 0
self.endTurn: Optional[int] = None
self.maxScore: int = 25
self.score: int = 0
self.clues: int = 8
self.strikes: int = 0
self.loss: bool = False
self.connections: List[ServerConnection]
self.connections = [ServerConnection(p, self)
for p in range(players)]
self.players: List[Game]
self.players = [Game(self.connections[p], self.variant,
names[:players], p, botCls, **kwargs)
for p in range(players)]
self.currentPlayer: int = self.rng.randrange(players)
self.lastAction: int = (self.currentPlayer - 1) % players
def isGameComplete(self) -> bool:
if self.strikes == 3 or self.score >= self.maxScore:
return True
if self.turnCount > (self.endTurn or math.inf):
return True
return False
def updateMaxScore(self) -> None:
maxScore: int = 0
s: Suit
for s in self.variant.pile_suits:
possible: int = 5
copies: Dict[Rank, int] = {r: 0 for r in Rank}
d: int
for d in self.discards[s]:
card: ServerCard = self.deck[d]
copies[card.rank] += 1
r: Rank
for r in reversed(Rank): # type: ignore
totalCopies: int = r.num_copies
if self.variant == Variant.OneOfEach and s == Suit.Extra:
totalCopies += 1
if copies[r] == totalCopies:
possible = r.value - 1
maxScore += possible
self.maxScore = maxScore
def print(self,
message: Optional[str]=None,
verbose: Optional[str]=None,
final: bool=False) -> None:
verbose = verbose if verbose is not None else message
if verbose is not None:
self.verbose.append(verbose)
if self.printVerbose:
print(verbose)
if message is not None:
self.messages.append(message)
if not self.printVerbose and self.printMessages:
print(message)
if final and self.printMessages is None:
print(message)
def run_game(self) -> None:
self.log('game_start', {'replay': False})
p: Game
for p in self.players:
self.log('init',
{'seat': p.botPosition,
'names': names[:len(self.players)],
'variant': self.variant.value,
'replay': False,
'spectating': False})
handSize: int = 4 if len(self.players) > 3 else 5
for p in self.players:
for _ in range(handSize):
self.draw_card(p.botPosition)
self.print('{} goes first'.format(names[self.currentPlayer]))
while not self.isGameComplete():
self.send('notify', {'type': 'status', 'clues': self.clues,
'score': self.score})
self.send('notify', {'type': 'turn', 'who': self.currentPlayer,
'num': self.turnCount})
self.send('action',
{'can_clue': self.clues > 0,
'can_discard': self.clues < 8},
player=self.currentPlayer)
self.turnCount += 1
self.currentPlayer = (self.currentPlayer + 1) % len(self.players)
self.updateMaxScore()
self.send('notify', {'type': 'game_over'})
self.loss = self.strikes == 3
if not self.loss:
self.print("Players score {} points".format(self.score),
final=True)
else:
self.print("Players lost", final=True)
self.print(verbose='')
self.print(verbose='Number of Players: {}'.format(len(self.players)))
self.print(verbose='Variant: {}'.format(self.variant.full_name))
self.print(verbose='Deck Size: {}'.format(len(self.deck)))
self.recordGameState()
def recordGameState(self) -> None:
deckSize: int = len(self.deck) - self.nextDraw
lastPlayer: int = (self.currentPlayer - 1) % len(self.players)
self.print(verbose='Deck Count: {}'.format(deckSize))
self.print(verbose='Clue Count: {}'.format(self.clues))
self.print(verbose='Score: {}'.format(self.score))
self.print(verbose='Strikes: {}'.format(self.strikes))
self.print(verbose='Max Possible Score: {}'.format(self.maxScore))
self.print(verbose='Turn Count: {}'.format(self.turnCount))
self.print(verbose='End Turn: {}'.format(self.endTurn))
self.print(verbose='Next Draw Index: {}'.format(self.nextDraw))
self.print(verbose='Last Player: {}'.format(names[lastPlayer]))
self.print(verbose='')
self.print(verbose='Player Hands (Newest To Oldest)')
p: int
hand: List[int]
for p, hand in enumerate(self.hands):
cards = []
for deckIdx in reversed(hand):
card = self.deck[deckIdx]
cards.append('{} {}'.format(card.suit.full_name(self.variant),
card.rank.value))
self.print(verbose='{}: {}'.format(names[p], ', '.join(cards)))
self.print(verbose='')
self.print(verbose='Played Cards')
s: Suit
for s in self.variant.pile_suits:
self.print(verbose='{}: {}'.format(s.full_name(self.variant),
len(self.plays[s])))
self.print(verbose='')
self.print(verbose='Discarded Cards')
for s in self.variant.pile_suits:
discards: List[int] = []
for deckIdx in self.discards[s]:
card = self.deck[deckIdx]
discards.append(card.rank.value)
discards.sort()
self.print(verbose='{}: {}'.format(
s.full_name(self.variant),
', '.join(str(d) for d in discards)))
self.print(verbose='')
def log(self, type: str, resp: dict) -> None:
self.gameLog.append((type, resp))
def send(self,
type: str,
resp: dict, *,
player: Optional[int]=None) -> None:
if player is not None:
p = self.players[player]
p.received(type, resp)
else:
for p in self.players:
p.received(type, resp)
self.log(type, resp)
def initialize_deck(self) -> None:
self.deck = []
index: int = 0
s: Suit
r: Rank
i: int
for s in self.variant.pile_suits:
for r in Rank:
if not (s == Suit.Extra and self.variant == Variant.OneOfEach):
for i in range(r.num_copies):
self.deck.append(ServerCard(index, s, r, self.variant))
index += 1
else:
self.deck.append(ServerCard(index, s, r, self.variant))
index += 1
self.rng.shuffle(self.deck)
card: ServerCard
for i, card in enumerate(self.deck):
card.position = i
card.status = CardStatus.Deck
def draw_card(self, player: int) -> None:
if self.nextDraw >= len(self.deck):
return
card: ServerCard = self.deck[self.nextDraw]
if card.status != CardStatus.Deck:
raise GameException('Bad Card Status', card.status)
card.player = player
card.status = CardStatus.Hand
p: Game
info: dict
for p in self.players:
info = {'type': 'draw',
'who': player,
'order': self.nextDraw}
if p.botPosition != player:
info['suit'] = card.suit.value
info['rank'] = card.rank.value
self.send('notify', info, player=p.botPosition)
info = {'type': 'draw',
'who': player,
'order': self.nextDraw,
'suit': card.suit.value,
'rank': card.rank.value}
self.log('notify', info)
self.hands[player].append(self.nextDraw)
self.nextDraw += 1
if self.nextDraw >= len(self.deck):
self.endTurn = self.turnCount + len(self.players)
self.send('notify', {'type': 'draw_size',
'size': len(self.deck) - self.nextDraw})
self.print(verbose="{} draws {} {}".format(
names[player], card.suit.full_name(self.variant), card.rank.value))
def clue_player(self,
player: int,
target: int,
type: int,
value: int) -> None:
if self.isGameComplete():
raise GameException('Game is complete')
if self.lastAction == player:
raise GameException('Player already made a move', player)
if self.currentPlayer != player:
raise GameException('Wrong Player Turn', player)
if player == target:
raise GameException('Cannot clue self')
if self.clues == 0:
raise GameException('Cannot Clue')
if target >= len(self.players):
raise GameException('Target does not exist', target)
rank: Rank
suit: Suit
positions: List[int]
cards: List[int]
card: ServerCard
i: int
h: int
if type == Clue.Rank.value:
rank = Rank(value)
if not rank.valid():
raise GameException('Invalid rank value', value)
positions = []
cards = []
for i, h in enumerate(self.hands[target]):
card = self.deck[h]
if card.rank == rank:
positions.insert(0, len(self.hands[target]) - i)
cards.append(h)
if not cards and not self.allowNullClues:
raise GameException('No Cards Clued')
self.send('notify',
{'type': 'clue',
'giver': player,
'target': target,
'clue': {'type': type, 'value': value},
'list': cards})
self.clues -= 1
self.lastAction = player
self.print(
"{} tells {} about {} {}'s".format(
names[player], names[target], numberWords[len(cards)],
rank.value),
"{} tells {} about {} {}'s in slots {}".format(
names[player], names[target], numberWords[len(cards)],
rank.value, ', '.join(str(p) for p in positions)))
elif type == Clue.Suit.value:
suit = Suit(value)
if not suit.valid(self.variant):
raise GameException('Invalid suit value', value)
positions = []
cards = []
for i, h in enumerate(self.hands[target]):
card = self.deck[h]
if card.suit == suit:
positions.insert(0, len(self.hands[target]) - i)
cards.append(h)
if self.variant == Variant.Rainbow and card.suit == Suit.Extra:
cards.append(h)
if not cards and not self.allowNullClues:
raise GameException('No Cards Clued')
self.send('notify',
{'type': 'clue',
'giver': player,
'target': target,
'clue': {'type': type, 'value': value},
'list': cards})
self.clues -= 1
self.lastAction = player
self.print(
"{} tells {} about {} {}'s".format(
names[player], names[target], numberWords[len(cards)],
suit.full_name(self.variant)),
"{} tells {} about {} {}'s in slots {}".format(
names[player], names[target], numberWords[len(cards)],
suit.full_name(self.variant),
', '.join(str(p) for p in positions)))
else:
raise GameException('Invalid clue type', type)
def play_card(self, player: int, deckIdx: int) -> None:
if self.isGameComplete():
raise GameException('Game is complete')
if self.lastAction == player:
raise GameException('Player already made a move', player)
if self.currentPlayer != player:
raise GameException('Wrong Player Turn', player)
card: ServerCard = self.deck[deckIdx]
if card.status != CardStatus.Hand:
raise GameException('Bad Card Status', card.status)
if card.player != player:
raise GameException('Card does not belong to player', card.player)
nextRank: int = len(self.plays[card.suit]) + 1
position: int
position = len(self.hands[player]) - self.hands[player].index(deckIdx)
if card.rank.value == nextRank:
self.plays[card.suit].append(card.position)
self.send('notify',
{'type': 'played',
'which': {'suit': card.suit, 'rank': card.rank,
'index': card.index, 'order': card.position}})
self.score += 1
card.status = CardStatus.Play
self.hands[player].remove(deckIdx)
self.print(
"{} plays {} {}".format(
names[player], card.suit.full_name(self.variant),
card.rank.value),
"{} plays {} {} from slot {}".format(
names[player], card.suit.full_name(self.variant),
card.rank.value, position))
else:
self.discards[card.suit].append(card.position)
self.strikes += 1
self.send('notify',
{'type': 'discard',
'which': {'suit': card.suit, 'rank': card.rank,
'index': card.index, 'order': card.position}})
self.send('notify',
{'type': 'strike',
'num': self.strikes})
card.status = CardStatus.Discard
self.hands[player].remove(deckIdx)
self.print(
"{} fails to play {} {}".format(
names[player], card.suit.full_name(self.variant),
card.rank.value),
"{} fails to play {} {} from slot {}".format(
names[player], card.suit.full_name(self.variant),
card.rank.value, position))
self.draw_card(player)
self.lastAction = player
def discard_card(self, player: int, deckIdx: int) -> None:
if self.isGameComplete():
raise GameException('Game is complete')
if self.lastAction == player:
raise GameException('Player already made a move', player)
if self.currentPlayer != player:
raise GameException('Wrong Player Turn', player)
if self.clues == 8:
raise GameException('Cannot Discard')
card: ServerCard = self.deck[deckIdx]
if card.status != CardStatus.Hand:
raise GameException('Bad Card Status', card.status)
if card.player != player:
raise GameException('Card does not belong to player', card.player)
self.discards[card.suit].append(card.position)
self.send('notify',
{'type': 'discard',
'which': {'suit': card.suit, 'rank': card.rank,
'index': card.index, 'order': card.position}})
card.status = CardStatus.Discard
position: int
position = len(self.hands[player]) - self.hands[player].index(deckIdx)
self.hands[player].remove(deckIdx)
self.clues += 1
self.print(
"{} discards {} {}".format(
names[player], card.suit.full_name(self.variant),
card.rank.value),
"{} discards {} {} from slot {}".format(
names[player], card.suit.full_name(self.variant),
card.rank.value, position))
self.draw_card(player)
self.lastAction = player
class ServerCard:
def __init__(self,
index: int,
suit: Suit,
rank: Rank,
variant: Variant) -> None:
self.variant:Variant = variant
self.index: int = index
self.position: int
self.suit: Suit = suit
self.rank: Rank = rank
self.status: CardStatus
self.player: int = None
def __str__(self) -> str:
return "{color} {number}".format(
color=self.suit.full_name(self.variant),
number=self.rank.value)
class ServerConnection:
def __init__(self, position: int, game: ServerGame) -> None:
self.position: int = position
self.game: ServerGame = game
def emit(self, *args) -> None:
if len(args) == 2:
type: str
data: dict
type, data = args
if type != 'message':
raise GameException('emit type')
if data['type'] != 'action':
raise GameException('data type')
if data['resp']['type'] == Action.Clue.value:
self.game.clue_player(
self.position, data['resp']['target'],
data['resp']['clue']['type'],
data['resp']['clue']['value'])
elif data['resp']['type'] == Action.Play.value:
self.game.play_card(self.position, data['resp']['target'])
elif data['resp']['type'] == Action.Discard.value:
self.game.discard_card(self.position, data['resp']['target'])
else:
raise GameException('emit action type')
class GameException(Exception):
pass
def convert_print(arg: Any) -> Optional[bool]:
if isinstance(arg, str):
if arg.lower() in ['false', '0', '']:
return False
if arg.lower() in ['none']:
return None
return bool(arg)
|
Basically CUTE! Our Mauve Long Sleeve Top is a perfect basic for the Fall! Such a super soft and comfortable piece. The fabulous fit and easy to wear style make this a great look for any woman. Not to mention it’s so easy to pair with anything already in your closet! Such a cute top!
Beautiful color. Loved the fit!
|
# vim: set fenc=utf8 ts=4 sw=4 et :
import sys
import xml.sax
import imp
from os import path
from signal import signal, SIGINT
from shutil import copytree, ignore_patterns
from pkg_resources import resource_filename
from configparser import ConfigParser
from .logging import *
from .conf import Conf
from .plugin import *
from .pdmlhandler import PdmlHandler
def _add_common_arguments(argparser):
argparser.add_argument(
'-s',
dest='EXTRACT_SHOW',
action='store_true',
help='Extract show names, every data leaf will now look like {{ raw : [] , show: [] }} [default: {}]'.format(
Conf.EXTRACT_SHOW
)
)
argparser.add_argument(
'-d',
dest='DEBUG',
action='store_true',
help='Debug mode [default: {}]'.format(
Conf.DEBUG
)
)
def pdml2flow():
def add_arguments_cb(argparser):
argparser.add_argument(
'-f',
dest='FLOW_DEF_STR',
action='append',
help='Fields which define the flow, nesting with: \'{}\' [default: {}]'.format(
Conf.FLOW_DEF_NESTCHAR, Conf.FLOW_DEF_STR
)
)
argparser.add_argument(
'-t',
type=int,
dest='FLOW_BUFFER_TIME',
help='Lenght (in seconds) to buffer a flow before writing the packets [default: {}]'.format(
Conf.FLOW_BUFFER_TIME
)
)
argparser.add_argument(
'-l',
type=int,
dest='DATA_MAXLEN',
help='Maximum lenght of data in tshark pdml-field [default: {}]'.format(
Conf.DATA_MAXLEN
)
)
argparser.add_argument(
'-c',
dest='COMPRESS_DATA',
action='store_true',
help='Removes duplicate data when merging objects, will not preserve order of leaves [default: {}]'.format(
Conf.COMPRESS_DATA
)
)
argparser.add_argument(
'-a',
dest='FRAMES_ARRAY',
action='store_true',
help='Instead of merging the frames will append them to an array [default: {}]'.format(
Conf.FRAMES_ARRAY
)
)
_add_common_arguments(argparser)
def postprocess_conf_cb(conf):
"""Split each flowdef to a path."""
if conf['FLOW_DEF_STR'] is not None:
conf['FLOW_DEF'] = Conf.get_real_paths(
conf['FLOW_DEF_STR'],
Conf.FLOW_DEF_NESTCHAR
)
Conf.load(
'Aggregates wireshark pdml to flows',
add_arguments_cb,
postprocess_conf_cb
)
start_parser()
def pdml2frame():
def add_arguments_cb(argparser):
_add_common_arguments(argparser)
def postprocess_conf_cb(conf):
conf['DATA_MAXLEN'] = sys.maxsize
conf['FLOW_BUFFER_TIME'] = 0
conf['FLOW_DEF_STR'] = [ 'frame.number' ]
conf['FLOW_DEF'] = Conf.get_real_paths(
conf['FLOW_DEF_STR'],
Conf.FLOW_DEF_NESTCHAR
)
Conf.load(
'Converts wireshark pdml to frames',
add_arguments_cb,
postprocess_conf_cb
)
start_parser()
def start_parser():
# print config
for name, value in Conf.get().items():
debug('{} : {}'.format(name, value))
handler = PdmlHandler()
def sigint_handler(sig, frame):
handler.endDocument()
sys.exit(0)
signal(SIGINT, sigint_handler)
try:
xml.sax.parse(
Conf.IN,
handler
)
except xml.sax._exceptions.SAXParseException as e:
# this might happen when a pdml file is malformed
warning('Parser returned exception: {}'.format(e))
handler.endDocument()
def pdml2flow_new_plugin():
def add_arguments_cb(argparser):
argparser.add_argument(
'DST',
type=str,
nargs='+',
help='Where to initialize the plugin, basename will become the plugin name'
)
Conf.load(
'Initializes a new plugin',
add_arguments_cb
)
for dst in Conf.DST:
plugin_name = path.basename(dst)
plugin_conf = ConfigParser({
'plugin_name': plugin_name
})
copytree(
resource_filename(__name__, 'plugin-skeleton'),
dst,
ignore=ignore_patterns('__pycache__')
)
with open(path.join(dst, Conf.PLUGIN_CONF_NAME), mode='w') as fd:
plugin_conf.write(fd)
|
Our range of NEW bins!!!
We have a range of new 6 yard crane tested skip ready to hire. We also have 20 yard crane certified bins and a range of NEW 12 yard enclosed skips.
|
import copy
from typing import List, Optional
from io import StringIO
import sys
import demistomock as demisto # noqa: E402 lgtm [py/polluting-import]
import urllib3
from CommonServerPython import * # noqa: E402 lgtm [py/polluting-import]
from pycti import OpenCTIApiClient, Identity
# Disable insecure warnings
urllib3.disable_warnings()
# Disable info logging from the api
logging.getLogger().setLevel(logging.ERROR)
XSOAR_TYPES_TO_OPENCTI = {
'account': "User-Account",
'domain': "Domain-Name",
'email': "Email-Addr",
'file-md5': "StixFile",
'file-sha1': "StixFile",
'file-sha256': "StixFile",
'file': 'StixFile',
'host': "X-OpenCTI-Hostname",
'ip': "IPv4-Addr",
'ipv6': "IPv6-Addr",
'registry key': "Windows-Registry-Key",
'url': "Url"
}
OPENCTI_TYPES_TO_XSOAR = {
"User-Account": 'Account',
"Domain-Name": 'Domain',
"Email-Addr": 'Email',
"StixFile": "File",
"X-OpenCTI-Hostname": 'Host',
"IPv4-Addr": 'IP',
"IPv6-Addr": 'IPv6',
"Windows-Registry-Key": 'Registry Key',
"Url": 'URL'
}
KEY_TO_CTI_NAME = {
'description': 'x_opencti_description',
'score': 'x_opencti_score'
}
FILE_TYPES = {
'file-md5': "file.hashes.md5",
'file-sha1': "file.hashes.sha-1",
'file-sha256': "file.hashes.sha-256"
}
def label_create(client: OpenCTIApiClient, label_name: Optional[str]):
""" Create label at opencti
Args:
client: OpenCTI Client object
label_name(str): label name to create
Returns:
readable_output, raw_response
"""
try:
label = client.label.create(value=label_name)
except Exception as e:
demisto.error(str(e))
raise DemistoException("Can't create label.")
return label
def build_indicator_list(indicator_list: List[str]) -> List[str]:
"""Builds an indicator list for the query
Args:
indicator_list: List of XSOAR indicators types to return..
Returns:
indicators: list of OPENCTI indicators types"""
result = []
if 'ALL' in indicator_list:
# Replaces "ALL" for all types supported on XSOAR.
result = ['User-Account', 'Domain-Name', 'Email-Addr', 'StixFile', 'X-OpenCTI-Hostname', 'IPv4-Addr',
'IPv6-Addr', 'Windows-Registry-Key', 'Url']
else:
result = [XSOAR_TYPES_TO_OPENCTI.get(indicator.lower(), indicator) for indicator in indicator_list]
return result
def reset_last_run():
"""
Reset the last run from the integration context
"""
demisto.setIntegrationContext({})
return CommandResults(readable_output='Fetch history deleted successfully')
def get_indicators(client: OpenCTIApiClient, indicator_types: List[str], score: List[str] = None,
limit: Optional[int] = 500,
last_run_id: Optional[str] = None) -> dict:
""" Retrieving indicators from the API
Args:
score: Range of scores to filter by.
client: OpenCTI Client object.
indicator_types: List of indicators types to return.
last_run_id: The last id from the previous call to use pagination.
limit: the max indicators to fetch
Returns:
indicators: dict of indicators
"""
indicator_type = build_indicator_list(indicator_types)
filters = [{
'key': 'entity_type',
'values': indicator_type
}]
if score:
filters.append({
'key': 'x_opencti_score',
'values': score
})
indicators = client.stix_cyber_observable.list(after=last_run_id, first=limit,
withPagination=True, filters=filters)
return indicators
def get_indicators_command(client: OpenCTIApiClient, args: dict) -> CommandResults:
""" Gets indicator from opencti to readable output
Args:
client: OpenCTI Client object
args: demisto.args()
Returns:
readable_output, raw_response
"""
indicator_types = argToList(args.get("indicator_types"))
last_run_id = args.get("last_run_id")
limit = arg_to_number(args.get('limit', 50))
start = arg_to_number(args.get('score_start', 1))
end = arg_to_number(args.get('score_end', 100)) + 1 # type:ignore
score = None
if start or end:
score = [str(i) for i in range(start, end)] # type:ignore
raw_response = get_indicators(
client=client,
indicator_types=indicator_types,
limit=limit,
last_run_id=last_run_id,
score=score
)
last_run = raw_response.get('pagination', {}).get('endCursor') # type: ignore
if indicators_list := copy.deepcopy(raw_response.get('entities')):
indicators = [{'type': OPENCTI_TYPES_TO_XSOAR.get(indicator['entity_type'], indicator['entity_type']),
'value': indicator.get('observable_value'),
'id': indicator.get('id'),
'createdBy': indicator.get('createdBy').get('id')
if indicator.get('createdBy') else None,
'score': indicator.get('x_opencti_score'),
'description': indicator.get('x_opencti_description'),
'labels': [label.get('value') for label in indicator.get('objectLabel')],
'marking': [mark.get('definition') for mark in indicator.get('objectMarking')],
'externalReferences': indicator.get('externalReferences')
}
for indicator in indicators_list]
readable_output = tableToMarkdown('Indicators', indicators,
headers=["type", "value", "id"],
removeNull=True)
outputs = {
'OpenCTI.Indicators(val.lastRunID)': {'lastRunID': last_run},
'OpenCTI.Indicators.IndicatorsList(val.id === obj.id)': indicators
}
return CommandResults(
outputs=outputs,
readable_output=readable_output,
raw_response=indicators_list
)
else:
return CommandResults(readable_output='No indicators')
def indicator_delete_command(client: OpenCTIApiClient, args: dict) -> CommandResults:
""" Delete indicator from opencti
Args:
client: OpenCTI Client object
args: demisto.args()
Returns:
readable_output, raw_response
"""
indicator_id = args.get("id")
try:
client.stix_cyber_observable.delete(id=indicator_id)
except Exception as e:
demisto.error(str(e))
raise DemistoException("Can't delete indicator.")
return CommandResults(readable_output='Indicator deleted.')
def indicator_field_update_command(client: OpenCTIApiClient, args: dict) -> CommandResults:
""" Update indicator field at opencti
Args:
client: OpenCTI Client object
args: demisto.args()
Returns:
readable_output, raw_response
"""
indicator_id = args.get("id")
# works only with score and description
key = KEY_TO_CTI_NAME[args.get("field")] # type: ignore
value = args.get("value")
try:
result = client.stix_cyber_observable.update_field(id=indicator_id, key=key, value=value)
except Exception as e:
demisto.error(str(e))
raise DemistoException(f"Can't update indicator with field: {key}.")
return CommandResults(
outputs_prefix='OpenCTI.Indicator',
outputs_key_field='id',
outputs={'id': result.get('id')},
readable_output=f'Indicator {indicator_id} updated successfully.',
raw_response=result
)
def indicator_create_command(client: OpenCTIApiClient, args: Dict[str, str]) -> CommandResults:
""" Create indicator at opencti
Args:
client: OpenCTI Client object
args: demisto.args()
Returns:
readable_output, raw_response
"""
indicator_type = args.get("type")
created_by = args.get("created_by")
marking_id = args.get("marking_id")
label_id = args.get("label_id")
external_references_id = args.get("external_references_id")
description = args.get("description")
score = arg_to_number(args.get("score", '50'))
value = args.get("value")
data = {'type': XSOAR_TYPES_TO_OPENCTI.get(indicator_type.lower(), indicator_type), # type:ignore
'value': value}
if indicator_type == 'Registry Key':
data['key'] = value
if indicator_type == 'Account':
data['account_login'] = value
simple_observable_key = None
simple_observable_value = None
if 'file' in indicator_type.lower(): # type: ignore
simple_observable_key = FILE_TYPES.get(indicator_type.lower(), indicator_type) # type: ignore
simple_observable_value = value
try:
# cti code prints to stdout so we need to catch it.
old_stdout = sys.stdout
sys.stdout = StringIO()
result = client.stix_cyber_observable.create(
simple_observable_key=simple_observable_key,
simple_observable_value=simple_observable_value,
type=indicator_type,
createdBy=created_by, objectMarking=marking_id,
objectLabel=label_id, externalReferences=external_references_id,
simple_observable_description=description,
x_opencti_score=score, observableData=data
)
sys.stdout = old_stdout
except KeyError as e:
raise DemistoException(f'Missing argument at data {e}')
if id := result.get('id'):
readable_output = f'Indicator created successfully. New Indicator id: {id}'
outputs = {
'id': result.get('id'),
'value': value,
'type': indicator_type
}
else:
raise DemistoException("Can't create indicator.")
return CommandResults(
outputs_prefix='OpenCTI.Indicator',
outputs_key_field='id',
outputs=outputs,
readable_output=readable_output,
raw_response=result
)
def indicator_add_marking(client: OpenCTIApiClient, id: Optional[str], value: Optional[str]):
""" Add indicator marking to opencti
Args:
client: OpenCTI Client object
id(str): indicator id to update
value(str): marking name to add
Returns:
true if added successfully, else false.
"""
try:
result = client.stix_cyber_observable.add_marking_definition(id=id, marking_definition_id=value)
except Exception as e:
demisto.error(str(e))
raise DemistoException("Can't add marking to indicator.")
return result
def indicator_add_label(client: OpenCTIApiClient, id: Optional[str], value: Optional[str]):
""" Add indicator label to opencti
Args:
client: OpenCTI Client object
id(str): indicator id to update
value(str): label name to add
Returns:
true if added successfully, else false.
"""
try:
result = client.stix_cyber_observable.add_label(id=id, label_id=value)
except Exception as e:
demisto.error(str(e))
raise DemistoException("Can't add label to indicator.")
return result
def indicator_field_add_command(client: OpenCTIApiClient, args: Dict[str, str]) -> CommandResults:
""" Add indicator marking or label to opencti
Args:
client: OpenCTI Client object
args: demisto.args()
Returns:
readable_output
"""
indicator_id = args.get("id")
# works only with marking and label
key = args.get("field")
value = args.get("value")
result = {}
if key == 'marking':
result = indicator_add_marking(client=client, id=indicator_id, value=value)
elif key == 'label':
result = indicator_add_label(client=client, id=indicator_id, value=value)
if result:
return CommandResults(readable_output=f'Added {key} successfully.')
else:
return CommandResults(readable_output=f'Cant add {key} to indicator.')
def indicator_remove_label(client: OpenCTIApiClient, id: Optional[str], value: Optional[str]):
""" Remove indicator label from opencti
Args:
client: OpenCTI Client object
id(str): indicator id to update
value(str): label name to remove
Returns:
true if removed successfully, else false.
"""
try:
result = client.stix_cyber_observable.remove_label(id=id, label_id=value)
except Exception as e:
demisto.error(str(e))
raise DemistoException("Can't remove label from indicator.")
return result
def indicator_remove_marking(client: OpenCTIApiClient, id: Optional[str], value: Optional[str]):
""" Remove indicator marking from opencti
Args:
client: OpenCTI Client object
id(str): indicator id to update
value(str): marking name to remove
Returns:
true if removed successfully, else false.
"""
try:
result = client.stix_cyber_observable.remove_marking_definition(id=id, marking_definition_id=value)
except Exception as e:
demisto.error(str(e))
raise DemistoException("Can't remove marking from indicator.")
return result
def indicator_field_remove_command(client: OpenCTIApiClient, args: Dict[str, str]) -> CommandResults:
""" Remove indicator marking or label from opencti
Args:
client: OpenCTI Client object
args: demisto.args()
Returns:
readable_output
"""
indicator_id = args.get("id")
# works only with marking and label
key = args.get("field")
value = args.get("value")
result = {}
if key == 'marking':
result = indicator_remove_marking(client=client, id=indicator_id, value=value)
elif key == 'label':
result = indicator_remove_label(client=client, id=indicator_id, value=value)
if result:
readable_output = f'{key}: {value} was removed successfully from indicator: {indicator_id}.'
else:
raise DemistoException(f"Can't remove {key}.")
return CommandResults(readable_output=readable_output)
def organization_list_command(client: OpenCTIApiClient, args: Dict[str, str]) -> CommandResults:
""" Get organizations list from opencti
Args:
client: OpenCTI Client object
args: demisto.args()
Returns:
readable_output, raw_response
"""
limit = arg_to_number(args.get('limit', '50'))
last_run_id = args.get('last_run_id')
organizations_list = client.identity.list(types='Organization', first=limit, after=last_run_id, withPagination=True)
if organizations_list:
new_last_run = organizations_list.get('pagination').get('endCursor')
organizations = [
{'name': organization.get('name'), 'id': organization.get('id')}
for organization in organizations_list.get('entities')]
readable_output = tableToMarkdown('Organizations', organizations, headers=['name', 'id'],
headerTransform=pascalToSpace)
outputs = {
'OpenCTI.Organizations(val.organizationsLastRun)': {'organizationsLastRun': new_last_run},
'OpenCTI.Organizations.OrganizationsList(val.id === obj.id)': organizations
}
return CommandResults(
outputs=outputs,
readable_output=readable_output,
raw_response=organizations_list
)
else:
return CommandResults(readable_output='No organizations')
def organization_create_command(client: OpenCTIApiClient, args: Dict[str, str]) -> CommandResults:
""" Create organization at opencti
Args:
client: OpenCTI Client object
args: demisto.args()
Returns:
readable_output, raw_response
"""
name = args.get("name")
description = args.get("description")
reliability = args.get('reliability')
try:
identity = Identity(client)
result = identity.create(name=name, type='Organization', x_opencti_reliability=reliability,
description=description)
except Exception as e:
demisto.error(str(e))
raise DemistoException("Can't remove label from indicator.")
if organization_id := result.get('id'):
readable_output = f'Organization {name} was created successfully with id: {organization_id}.'
return CommandResults(outputs_prefix='OpenCTI.Organization',
outputs_key_field='id',
outputs={'id': result.get('id')},
readable_output=readable_output,
raw_response=result)
else:
raise DemistoException("Can't create organization.")
def label_list_command(client: OpenCTIApiClient, args: Dict[str, str]) -> CommandResults:
""" Get label list from opencti
Args:
client: OpenCTI Client object
args: demisto.args()
Returns:
readable_output, raw_response
"""
limit = arg_to_number(args.get('limit', '50'))
last_run_id = args.get('last_run_id')
label_list = client.label.list(first=limit, after=last_run_id, withPagination=True)
if label_list:
new_last_run = label_list.get('pagination').get('endCursor')
labels = [
{'value': label.get('value'), 'id': label.get('id')}
for label in label_list.get('entities')]
readable_output = tableToMarkdown('Labels', labels, headers=['value', 'id'],
headerTransform=pascalToSpace)
outputs = {
'OpenCTI.Labels(val.labelsLastRun)': {'labelsLastRun': new_last_run},
'OpenCTI.Labels.LabelsList(val.id === obj.id)': labels
}
return CommandResults(
outputs=outputs,
readable_output=readable_output,
raw_response=label_list
)
else:
return CommandResults(readable_output='No labels')
def label_create_command(client: OpenCTIApiClient, args: Dict[str, str]) -> CommandResults:
""" Create label at opencti
Args:
client: OpenCTI Client object
args: demisto.args()
Returns:
readable_output, raw_response
"""
name = args.get("name")
result = label_create(client=client, label_name=name)
if label_id := result.get('id'):
readable_output = f'Label {name} was created successfully with id: {label_id}.'
return CommandResults(outputs_prefix='OpenCTI.Label',
outputs_key_field='id',
outputs={'id': result.get('id')},
readable_output=readable_output,
raw_response=result)
else:
raise DemistoException("Can't create label.")
def external_reference_create_command(client: OpenCTIApiClient, args: Dict[str, str]) -> CommandResults:
""" Create external reference at opencti
Args:
client: OpenCTI Client object
args: demisto.args()
Returns:
readable_output, raw_response
"""
external_references_source_name = args.get('source_name')
external_references_url = args.get('url')
result = client.external_reference.create(
source_name=external_references_source_name,
url=external_references_url
)
if external_reference_id := result.get('id'):
readable_output = f'Reference {external_references_source_name} was created successfully with id: ' \
f'{external_reference_id}.'
return CommandResults(outputs_prefix='OpenCTI.externalReference',
outputs_key_field='id',
outputs={'id': result.get('id')},
readable_output=readable_output,
raw_response=result)
else:
raise DemistoException("Can't create external reference.")
def marking_list_command(client: OpenCTIApiClient, args: Dict[str, str]) -> CommandResults:
""" Get marking list from opencti
Args:
client: OpenCTI Client object
args: demisto.args()
Returns:
readable_output, raw_response
"""
limit = arg_to_number(args.get('limit', '50'))
last_run_id = args.get('last_run_id')
marking_list = client.marking_definition.list(first=limit, after=last_run_id, withPagination=True)
if marking_list:
new_last_run = marking_list.get('pagination').get('endCursor')
markings = [
{'value': mark.get('definition'), 'id': mark.get('id')}
for mark in marking_list.get('entities')]
readable_output = tableToMarkdown('Markings', markings, headers=['value', 'id'],
headerTransform=pascalToSpace)
outputs = {
'OpenCTI.MarkingDefinitions(val.markingsLastRun)': {'markingsLastRun': new_last_run},
'OpenCTI.MarkingDefinitions.MarkingDefinitionsList(val.id === obj.id)': markings
}
return CommandResults(
outputs=outputs,
readable_output=readable_output,
raw_response=marking_list
)
else:
return CommandResults(readable_output='No markings')
def main():
params = demisto.params()
args = demisto.args()
credentials = params.get('credentials', {})
api_key = credentials.get('password')
base_url = params.get('base_url').strip('/')
try:
client = OpenCTIApiClient(base_url, api_key, ssl_verify=params.get('insecure'), log_level='error')
command = demisto.command()
demisto.info(f"Command being called is {command}")
# Switch case
if command == "test-module":
'''When setting up an OpenCTI Client it is checked that it is valid and allows requests to be sent.
and if not he immediately sends an error'''
get_indicators_command(client, args)
return_results('ok')
elif command == "opencti-get-indicators":
return_results(get_indicators_command(client, args))
elif command == "opencti-indicator-delete":
return_results(indicator_delete_command(client, args))
elif command == "opencti-indicator-field-update":
return_results(indicator_field_update_command(client, args))
elif command == "opencti-indicator-create":
return_results(indicator_create_command(client, args))
elif command == "opencti-indicator-field-add":
return_results(indicator_field_add_command(client, args))
elif command == "opencti-indicator-field-remove":
return_results(indicator_field_remove_command(client, args))
elif command == "opencti-organization-list":
return_results(organization_list_command(client, args))
elif command == "opencti-organization-create":
return_results(organization_create_command(client, args))
elif command == "opencti-label-list":
return_results(label_list_command(client, args))
elif command == "opencti-label-create":
return_results(label_create_command(client, args))
elif command == "opencti-external-reference-create":
return_results(external_reference_create_command(client, args))
elif command == "opencti-marking-definition-list":
return_results(marking_list_command(client, args))
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f"Error:\n [{e}]")
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
|
We spent a lovely January evening in Brighton recording a live set for Sofar Sounds, the purveyors of fine house concerts. And then I did one myself in London about a week or so later. Here are the results.
What else? Tom Ravenscroft played our song, Obituaries. Thanks Tom!
|
#!/usr/bin/env python
#
# read and plot audio file
#
import numpy as np
import matplotlib.pyplot as plt;
import struct
import sys, os
import argparse
import scipy.signal
import ipdb
from scipy.io import wavfile
def plot_wave(t, s):
if s.ndim > 1:
for i in range(0, s.shape[1]):
plt.subplot(s.shape[1], 1, i + 1)
plt.plot(t, s[:,i])
else:
plt.plot(t, s)
plt.show()
def fft_correlate(a, b):
af = np.fft.fft(a, axis=0)
bf = np.fft.fft(b, axis=0)
ft = np.multiply(np.conj(af), bf)
cc = np.real(np.fft.ifft(ft))
return cc
def read_file(f):
ffmt = struct.unpack('i', f.read(struct.calcsize('i')))
depth = f.read(5)
sz = struct.unpack('2i', f.read(struct.calcsize('2i')))
# t_start: time at which the rosbag started
# t1: time at which first audio packet was received
# t0: time at which first sample must have been generated
# dt: delta time between samples, matched to recording
# t = t0 + idx * dt = best rostime estimate
# t - t_start = best estimate for time since start of bag recording
rate, t_start, t0, t1, dt = struct.unpack('5f', f.read(struct.calcsize('5f')))
print "bag start: %f, rate: %f, depth: %s, number of samples: %d, number of channels: %d" % (t_start, rate, depth, sz[0], sz[1])
ts = t0 + np.arange(0,sz[0]) * dt
dtype = np.int16 if depth == 'int16' else np.int32
smpl = np.fromfile(f,dtype=dtype)
samples = np.reshape(smpl, sz)
t = t0 + np.arange(0, sz[0]) * dt - t_start; # gives best estimate
return t, rate, samples
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='find audio onset.')
parser.add_argument('--start', '-s', action='store', default=0.0, type=float,
help='start time')
parser.add_argument('--end', '-e', action='store', default=1e30, type=float,
help='end time')
parser.add_argument('--song', action='store', default='song.wav', help = 'wav file with pattern to search for')
parser.add_argument('--file', action='store', required=True, help = 'audio.dat file generated from rosbag')
parser.add_argument('--songstart', action='store', default=0, type=float, help = 'start time of call within song file')
args = parser.parse_args()
f = open(args.file)
t, rate_recording, recording = read_file(f)
rate_song, song = wavfile.read(args.song)
#song = np.pad(song, (0,int(rate_recording*20)), 'constant', constant_values=(0,0)) # XXX
# average channels of recording
recording = np.sum(recording, axis=1) # add across channels to get average
# resample song to recording frequency
song = scipy.signal.resample(song, int(song.shape[0] * rate_recording / rate_song))
# pad whichever is shorter with zeros
num_pad = abs(recording.shape[0] - song.shape[0])
if (song.shape[0] < recording.shape[0]):
print "padding %d zero samples to song" % num_pad
song = np.expand_dims(np.pad(song, (0,num_pad), 'constant', constant_values=(0,0)), -1)
if (recording.shape[0] < song.shape[0]):
print "padding %d zero samples to recording" % num_pad
recording = np.pad(recording, (0,num_pad), 'constant', constant_values=(0,0))
# expand dimensions so we can stack it
#song = np.expand_dims(song, axis=-1)
print 'recording rate: %d, song rate: %d' % (rate_recording, rate_song)
recording_exp = np.expand_dims(recording, axis=-1)
#cc = fft_correlate(song[:,0], samples[:,0]) # use this for individual channels
cc = fft_correlate(song[:,0], recording)
amax = np.argmax(cc, axis=0) # find maximum correlation
song_rolled = np.roll(song, amax)
idx = np.where((t >= args.start) & (t <= args.end))
t_ss = args.songstart + amax / rate_recording
if t_ss > song.shape[0] / rate_recording:
t_ss = t_ss - song.shape[0] / rate_recording
print 'max correlation roll: %d, song start: %f' % (amax, t_ss)
# stacked = np.hstack((song_rolled[idx[0],:], song[idx[0],:], cc[idx[0],:]))
# stacked = np.hstack((song_rolled[idx[0],:], recording_exp[idx[0],:]))
stacked = np.hstack((song[idx[0],:], recording_exp[idx[0],:], np.expand_dims(cc[idx[0]],-1)))
plot_wave(t[idx[0]], stacked)
|
A significant ridge of high pressure will settle over the eastern two-thirds of the United States to start the new week. High pressure indicates sinking air, which makes it difficult for clouds to form, thus limiting the development of precipitation. This translates to is very beautiful weather. Pressures will max out at 1035 millibars over New England, which means that the bubble of high pressure is very strong.
High pressure spins clockwise, so on the eastern side of the high pressure there will be a northerly component to the wind. Up north over Canada is where the cold air is locked in, so some of that cooler air will be drawn into the eastern third of the country on Monday. As the high pressure center gradually shifts to the east, the northerly flow will become cut-off. Therefore Tuesday will be milder, with temperatures of 5 degrees above average for the Eastern seaboard. By Wednesday, the high pressure will move offshore. Winds will shift to the south, leading to a warmer flow. Temperatures on Wednesday will be nice and mild, ranging from 5 to 15 degrees above normal for the large majority of the nation.
If you’re a fan of the warmer temperatures, enjoy this week’s weather, because changes are coming fast. I discuss the changes for mid-December here.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.