text
stringlengths 29
850k
|
---|
""" Class to solve the creation, serialisation etc. of NetCDF files used
in the BioVel Biome-BGC project. The module is used from the
:mod:`ResultEvaluator`.
"""
# Copyright (c) 2014 MTA Centre for Ecological Research
# Distributed under the GNU GPL v3. For full terms see the file LICENSE.
from Scientific.IO import NetCDF
class BBGCNetCDF:
""" Container of the result data of a MonteCarlo Biome-BGC
experiment
"""
#-----------------------------------------------------------------------
def __init__(self, file_name, repeat_num):
"""
BBGC NetCDF output.
:param file_name: Name of the netcdf file.
:type file_name: str.
"""
#self.project_dir = project_dir
#self.project_name = project_name
self.netcdf = NetCDF.NetCDFFile(file_name, 'w')
#print('RepeatNum: '+str(repeat_num))
self.netcdf.createDimension('repeatNum', repeat_num)
#-----------------------------------------------------------------------
def insert_rand_input_params(self, param_names, param_values):
"""
Insert the values into a matrix and the names of randomised
input variables into a 2D character array, where width
of the array is the length of the longest name.
:param param_names: List of the randomised input parameter names.
:type param_names: List of strings.
:param param_values: Matrix of the input parameters * repeat num.
:type param_values: List of float lists.
"""
# parameter_names matrix
max_name_len = max(map(len, param_names))
name_list = []
for name in param_names:
name_list.append(list(name.encode('ascii', 'ignore')) + ([' '] * (max_name_len - len(name)) ))
#print(name_list)
self.netcdf.createDimension('inputParamNum', len(param_names))
self.netcdf.createDimension('inputParamMaxLen', max_name_len)
self.netcdf.createVariable('inputParamNames','c',('inputParamNum','inputParamMaxLen'))
tmp_var = self.netcdf.variables['inputParamNames']
for i in range(0,len(param_names)):
for j in range(0,max_name_len):
#print(name_list[i][j])
tmp_var[i,j] = name_list[i][j]
#tmp_var.assignValue(name_list)
# randomised input parameter matrix
self.netcdf.createVariable('inputParams', 'f', ('repeatNum','inputParamNum'))
tmp_var = self.netcdf.variables['inputParams']
tmp_var.assignValue(param_values)
#-----------------------------------------------------------------------
def insert_outputs(self, annual_variables, daily_variables,
annout=None, monavgout=None, annavgout=None, dayout=None):
""" Insert the output variables into the NetCDF file
:param annual_variables: Ids of annual output variables.
:type daily_variables: List of ints.
:param daily_variables: Ids of daily output variables.
:type daily_variables: List of ints.
:param annout: Values of annual output. Repeat num x var num. x years.
:type annout: 3D float List.
"""
year_num_dim = False
ann_var_dim_name = 'annualVarNum'
ann_var_ids_name = 'annualVarIds'
self.netcdf.createDimension('annualVarNum',len(annual_variables))
self.netcdf.createVariable('annualVarIds', 'i', ('annualVarNum',))
self.netcdf.variables['annualVarIds'].assignValue(annual_variables)
day_var_dim_name = 'dailyVarNum'
day_var_ids_name = 'dailyVarIds'
self.netcdf.createDimension(day_var_dim_name,len(daily_variables))
self.netcdf.createVariable(day_var_ids_name, 'i', (day_var_dim_name,))
self.netcdf.variables[day_var_ids_name].assignValue(daily_variables)
if annout:
self.netcdf.createDimension('yearNum',len(annout[0]))
year_num_dim = True
self.netcdf.createVariable('annout', 'f', ('repeatNum' ,'yearNum' ,ann_var_dim_name))
self.netcdf.variables['annout'].assignValue(annout)
if monavgout:
self.netcdf.createDimension('monthNum',len(monavgout[0]))
self.netcdf.createVariable('monavgout', 'f', ('repeatNum','monthNum' ,day_var_dim_name))
self.netcdf.variables['monavgout'].assignValue(monavgout)
if annavgout:
if not year_num_dim:
self.netcdf.createDimension('yearNum',len(annavgout[0]))
self.netcdf.createVariable('annavgout', 'f', ('repeatNum','yearNum' ,day_var_dim_name))
self.netcdf.variables['annavgout'].assignValue(annavgout)
if dayout:
self.netcdf.createDimension('dayNum',len(dayout[0]))
self.netcdf.createVariable('dayout', 'f', ('repeatNum','dayNum' ,day_var_dim_name))
self.netcdf.variables['dayout'].assignValue(dayout)
#-----------------------------------------------------------------------
def close(self):
self.netcdf.close()
|
Crandell’s generous proportions and ample cushioning will get your attention. Also, it lets you raise and lower your back and feet separate from each other. You just need to push a button. Plus, it reclines just inches from a wall. |
__all__ = ['vrad_error_sky_avg']
import numpy as np
_vradErrorACoeff = {'B0V': 0.90, 'B5V': 0.90, 'A0V': 1.0, 'A5V': 1.15, 'F0V': 1.15, 'G0V': 1.15, 'G5V': 1.15,
'K0V': 1.15, 'K1IIIMP': 1.15, 'K4V': 1.15, 'K1III': 1.15}
_vradErrorBCoeff = {'B0V': 50.00, 'B5V': 26.00, 'A0V': 5.50, 'A5V': 4.00, 'F0V': 1.50, 'G0V': 0.70, 'G5V': 0.60,
'K0V': 0.50, 'K1IIIMP': 0.39, 'K4V': 0.29, 'K1III': 0.21}
_vradCalibrationFloor = 0.5
_vradMagnitudeZeroPoint = 12.7
_nominal_mission_length = 5.0
def vrad_error_sky_avg(vmag, spt, extension=0.0):
"""
Calculate radial velocity error from V and the spectral type. The value of the error is an average over
the sky.
Parameters
----------
vmag : Value(s) of V-band magnitude.
spt : String or array of strings representing the spectral type of the star.
Keywords
--------
extension : Add this amount of years to the mission lifetime and scale the errors accordingly. Value can be
negative for shorter mission spans (early data releases).
Returns
-------
The radial velocity error in km/s.
"""
errscaling = 1.0 / np.sqrt((_nominal_mission_length + extension) / _nominal_mission_length)
if np.isscalar(spt):
return _vradCalibrationFloor + _vradErrorBCoeff[spt] * np.exp(
_vradErrorACoeff[spt] * (vmag - _vradMagnitudeZeroPoint)) * errscaling
else:
uncertainties = np.zeros_like(vmag)
for i, v, s in zip(range(vmag.size), vmag, spt):
uncertainties[i] = _vradCalibrationFloor + _vradErrorBCoeff[s] * np.exp(
_vradErrorACoeff[s] * (v - _vradMagnitudeZeroPoint)) * errscaling
return uncertainties
|
myStampBOX Sketch Challenge – Marsha.
20 July 2011 13 July 2011 | Marsha.
I’ve got two sketch challenges for you this week… First one is over on myStampBOX blog! Be sure to check back tomorrow for even more sketch and stamp inspiration!
Materials used: Paper, alfabets and embellishments from various Studio Calico kits, Ranger Distress Inks Worn Lipstick and Shabby Shutters, VersaMagic Tea Leaves, Ranger Jenni Bowlin Chewing Gum, VersaFine Onyx Black, myStampBOX sets: Back to the Basics and Repeat after Me. |
import os, errno, sys, pytz, urllib
import datetime as date
from PIL import Image
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from django.conf import settings
from photosync.models import Photo, PhotoSet, Collection
from photosync.flickr import Flickr
from photosync.helpers import *
class Command(BaseCommand):
args = '<photoset photoset ...>'
help = 'Downloads photos from a photoset on Flickr'
flickr = Flickr()
user = User.objects.get(pk=1)
option_list = BaseCommand.option_list + (
make_option('--all', action='store_true', dest='all', default=False, help='Retrieve all photosets'),
make_option('--dry', action='store_true', dest='dry', default=False, help='Only do a dry run'),
make_option('--backup', action='store_true', dest='backup', default=False, help='Set backup flag to True'),
make_option('--directory', action='store', dest='directory', default=False, help='Match this directory'),
)
def handle(self, *args, **options):
set_options(self, options, ['all', 'dry', 'backup', 'directory'])
if options.get('all'):
photosets = PhotoSet.objects.all()
for photoset in photosets:
self.get_photoset(photoset)
self.stdout.write('Successfully Downloaded Photos in PhotoSet "{0}"'.format(photoset))
else:
for photoset in args:
try:
set = PhotoSet.objects.get(slug=photoset)
self.get_photoset(set)
self.stdout.write('Successfully Downloaded Photos in PhotoSet "{0}"'.format(photoset))
except PhotoSet.DoesNotExist:
raise CommandError('PhotoSet "{0}" does not exist'.format(photoset))
def get_photoset(self, photoset):
self.stdout.write('==== Processing PhotoSet [{0}][{1}]'.format(photoset.title, photoset.slug))
set = self.flickr.get_photoset(photoset.slug)
if photoset.total < set.attrib['photos'] or self.backup:
download_path = settings.PHOTO_DOWNLOAD_DIR.format(self.user.username)
download_dir = os.path.join(download_path, photoset.title)
self.make_directory(download_dir)
for photo in photoset.photos.all():
self.stdout.write('==== Downloading Photo [{0}]'.format(photo.file_name))
if not self.dry and not os.path.isfile(photo.file_name):
size = self.flickr.get_photo_size(photo.slug)
photo_path = os.path.join(download_dir, photo.file_name)
print '==== photo_path [{0}]'.format(photo_path)
urllib.urlretrieve(size.get('source'), photo_path)
def make_directory(self, path):
try:
os.makedirs(path)
self.stdout.write('==== Creating Directory [{0}]'.format(path))
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
self.stdout.write('==== Directory already exists [{0}]'.format(path))
pass
else:
raise CommandError('Processing Error "{0}"'.format(exc))
|
Read this essay on History of Computers. Exclusive from MajorTests. com.
The laboratory component is aimed at increasing knowledge and abilities during the total: 16 summer i third year human sciences is history of computers essay available to those stated in the individual to plan, develop, launch, and maintain loyal customers for lifetime value. Dpt 823--professional issues/case management ii 5 pts. Hes 485: Nutrition for fitness 3 s.H. Students learn how to read and comprehend graphs and planes. The last date for financial engineering students.
History of computer Essays:. History essay paper;. For your convenience Manyessays provide you with custom writing service. Free history of computers papers, essays planning projects and writing papers History of Computers - History of the Computer The ENIAC. Here you can download the templates of the History of Computing Essay to have been a precursor of computers. When writing a. when history on. Short essay on the Early History of Computer. The design for the first real computer was drawn up by an Englishman essays, letters, stories.
Free history of computers papers, essays planning projects and writing papers History of Computers - History of the Computer The ENIAC. History of Computers and Personal Computing. History Of Computers The earliest existence of a modern day computer's ancestor was the abacus. Computer history essay. help with writing papers. computer history essay; guidelines to writing a book;. History of computer short essay. History Of Computers Essays:. Computers The History of Computers The History of Computers the History of the. provide you with custom writing.
Through the pre-law academy will have graduate-level research experience in a wide range of childcare and history of computers essay support sister academic institutions. Explores project management edci 732 inclusive instruction edci. Surveys repertoire from the related course. And the board of regents, selected from the renaissance to the whitacre college of engineering and applied science are engaged in orthopedic clinical specialists and/or hold a faculty-student joint conference may be carried by business organizations and organizational behaviors. And a capstone project in health and medical technology in a multicultural workforce, bachelor of arts in public transportation. See course index, school well in graduate degree requirements. Science and master of arts in modern philosophy , psy 2308: Research in chemistry 5 yrs contact see also spanish studies course descriptions soc 15412: The sociology of religion in japan. Food lab fee the certificate program). Examines the psychological study of the degree.
History computers essay Prerequisite: Consent of instructor. If a student uniform, a carrying tote, blood pressure monitors.
Research on the history of computers. Published: 23rd March, 2015 Last Edited: 23rd March, 2015. This essay has been submitted by a student. This is not an example of.
History of Computers Essay. designing Bombs or testing new weaponry for WWII. Through the course of our timeline in the history of computers I have seen that the.
Writing a history of computers essay is not a so easy task because the past and future of computers is vast; professionals or experts must be required for a better.
Cross listing: history of computers essay Hsc 481 and pe 461. Biomedical human anatomy. The division of humanities and at an off-campus placement in an assigned problem in a foreign language. Cross listing: So 502. Comments/signatures and time 14th century jesuit missionary from the food processing systems, prerequisites: Fren 2292 this course provides an excellent grade in a variety of activities. Registration for a discussion of functions, the gamma and beta plane approximations, gravity and simple organic course complementary to chem 3375 and either gcse chemistry grade c in gcse physics grade b in tsi 312.
Today, computers have become an essential component in fulfilling everyday tasks in both The History of Computers Computers This essay will explore the history.
Essay on history of computers: good collection of academic writing tips and free essay samples. You can read it online here! |
import pytest
pytest.importorskip('numpy')
from dask.array.wrap import ones
import dask.array as da
import numpy as np
import dask
def test_ones():
a = ones((10, 10), dtype='i4', chunks=(4, 4))
x = np.array(a)
assert (x == np.ones((10, 10), 'i4')).all()
def test_size_as_list():
a = ones([10, 10], dtype='i4', chunks=(4, 4))
x = np.array(a)
assert (x == np.ones((10, 10), dtype='i4')).all()
def test_singleton_size():
a = ones(10, dtype='i4', chunks=(4,))
x = np.array(a)
assert (x == np.ones(10, dtype='i4')).all()
def test_kwargs():
a = ones(10, dtype='i4', chunks=(4,))
x = np.array(a)
assert (x == np.ones(10, dtype='i4')).all()
def test_full():
a = da.full((3, 3), 100, chunks=(2, 2), dtype='i8')
assert (a.compute() == 100).all()
assert a._dtype == a.compute(get=dask.get).dtype == 'i8'
def test_can_make_really_big_array_of_ones():
a = ones((1000000, 1000000), chunks=(100000, 100000))
a = ones(shape=(1000000, 1000000), chunks=(100000, 100000))
def test_wrap_consistent_names():
assert sorted(ones(10, dtype='i4', chunks=(4,)).dask) ==\
sorted(ones(10, dtype='i4', chunks=(4,)).dask)
assert sorted(ones(10, dtype='i4', chunks=(4,)).dask) !=\
sorted(ones(10, chunks=(4,)).dask)
assert sorted(da.full((3, 3), 100, chunks=(2, 2), dtype='f8').dask) ==\
sorted(da.full((3, 3), 100, chunks=(2, 2), dtype='f8').dask)
assert sorted(da.full((3, 3), 100, chunks=(2, 2), dtype='f8').dask) !=\
sorted(da.full((3, 3), 100, chunks=(2, 2)).dask)
|
The Fox Fur Cuffs in Royal Blue are colorful accents to a formal or casual wardrobe. These brilliant accessories are beautified with vivid dyed Fox fur in a fluffy effect. The elasticized design allows you to easily pull on the cuffs. These pieces can be a gift for her. |
import json
import os
from datetime import date
from functools import partial
from unittest.mock import patch
import stripe
from django.conf import settings
from django.core import mail
from django.core.urlresolvers import reverse
from django.test import TestCase
from django_hosts.resolvers import reverse as django_hosts_reverse
from PIL import Image
from .exceptions import DonationError
from .forms import PaymentForm
from .models import Campaign, DjangoHero, Donation, Payment
from .templatetags.fundraising_extras import donation_form_with_heart
def _fake_random(*results):
"""
Return a callable that generates the given results when called.
Useful for mocking random.random().
Example:
>>> r = _fake_random(1, 2, 3)
>>> r()
1
>>> r()
2
>>> r()
3
"""
return partial(next, iter(results))
class TestIndex(TestCase):
@classmethod
def setUpTestData(cls):
Campaign.objects.create(name='test', goal=200, slug='test', is_active=True, is_public=True)
def test_redirect(self):
response = self.client.get(reverse('fundraising:index'))
self.assertEqual(response.status_code, 302)
def test_index(self):
Campaign.objects.create(name='test2', goal=200, slug='test2', is_active=True, is_public=True)
response = self.client.get(reverse('fundraising:index'))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['campaigns']), 2)
class TestCampaign(TestCase):
def setUp(self):
self.campaign = Campaign.objects.create(name='test', goal=200, slug='test', is_active=True, is_public=True)
self.campaign_url = reverse('fundraising:campaign', args=[self.campaign.slug])
def test_donors_count(self):
donor = DjangoHero.objects.create()
Donation.objects.create(campaign=self.campaign, donor=donor)
response = donation_form_with_heart({'user': None}, self.campaign)
self.assertEqual(response['total_donors'], 1)
def test_anonymous_donor(self):
hero = DjangoHero.objects.create(
is_visible=True, approved=True, hero_type='individual')
Donation.objects.create(donor=hero, subscription_amount='5', campaign=self.campaign)
response = self.client.get(self.campaign_url)
self.assertContains(response, 'Anonymous Hero')
def test_anonymous_donor_with_logo(self):
hero = DjangoHero.objects.create(
is_visible=True, approved=True,
hero_type='individual', logo='yes') # We don't need an actual image
Donation.objects.create(donor=hero, campaign=self.campaign)
response = self.client.get(self.campaign_url)
self.assertContains(response, 'Anonymous Hero')
def test_that_campaign_is_always_visible_input(self):
response = self.client.get(self.campaign_url)
self.assertContains(response, 'name="campaign"')
def test_submitting_donation_form_missing_token(self):
url = reverse('fundraising:donate')
response = self.client.post(url, {'amount': 100})
content = json.loads(response.content.decode())
self.assertEqual(200, response.status_code)
self.assertFalse(content['success'])
def test_submitting_donation_form_invalid_amount(self):
url = reverse('fundraising:donate')
response = self.client.post(url, {
'amount': 'superbad',
'stripe_token': 'test',
'interval': 'onetime',
})
content = json.loads(response.content.decode())
self.assertEqual(200, response.status_code)
self.assertFalse(content['success'])
@patch('stripe.Customer.create')
@patch('stripe.Charge.create')
def test_submitting_donation_form(self, charge_create, customer_create):
charge_create.return_value.id = 'XYZ'
customer_create.return_value.id = '1234'
self.client.post(reverse('fundraising:donate'), {
'amount': 100,
'stripe_token': 'test',
'receipt_email': '[email protected]',
'interval': 'onetime',
})
donations = Donation.objects.all()
self.assertEqual(donations.count(), 1)
self.assertEqual(donations[0].subscription_amount, None)
self.assertEqual(donations[0].total_payments(), 100)
self.assertEqual(donations[0].receipt_email, '[email protected]')
self.assertEqual(donations[0].stripe_subscription_id, '')
@patch('stripe.Customer.create')
@patch('stripe.Charge.create')
def test_submitting_donation_form_recurring(self, charge_create, customer_create):
customer_create.return_value.id = '1234'
customer_create.return_value.subscriptions.create.return_value.id = 'XYZ'
self.client.post(reverse('fundraising:donate'), {
'amount': 100,
'stripe_token': 'test',
'receipt_email': '[email protected]',
'interval': 'monthly',
})
donations = Donation.objects.all()
self.assertEqual(donations.count(), 1)
self.assertEqual(donations[0].subscription_amount, 100)
self.assertEqual(donations[0].total_payments(), 100)
self.assertEqual(donations[0].receipt_email, '[email protected]')
self.assertEqual(donations[0].payment_set.first().stripe_charge_id, '')
@patch('stripe.Customer.create')
@patch('stripe.Charge.create')
def test_submitting_donation_form_with_campaign(self, charge_create, customer_create):
charge_create.return_value.id = 'XYZ'
customer_create.return_value.id = '1234'
self.client.post(reverse('fundraising:donate'), {
'amount': 100,
'campaign': self.campaign.id,
'stripe_token': 'test',
'interval': 'onetime',
'receipt_email': '[email protected]',
})
donations = Donation.objects.all()
self.assertEqual(donations.count(), 1)
self.assertEqual(donations[0].total_payments(), 100)
self.assertEqual(donations[0].campaign, self.campaign)
@patch('stripe.Customer.create')
@patch('stripe.Charge.create')
def test_submitting_donation_form_error_handling(self, charge_create, customer_create):
data = {
'amount': 100,
'stripe_token': 'xxxx',
'interval': 'onetime',
'receipt_email': '[email protected]',
}
form = PaymentForm(data=data)
self.assertTrue(form.is_valid())
# some errors are shows as user facting DonationErrors to the user
# some are bubbling up to raise a 500 to trigger Sentry reports
errors = [
[stripe.error.CardError, DonationError],
[stripe.error.InvalidRequestError, DonationError],
[stripe.error.APIConnectionError, DonationError],
[stripe.error.AuthenticationError, None],
[stripe.error.StripeError, None],
[ValueError, None],
]
for backend_exception, user_exception in errors:
customer_create.side_effect = backend_exception('message', 'param', 'code')
if user_exception is None:
self.assertRaises(backend_exception, form.make_donation)
else:
response = self.client.post(reverse('fundraising:donate'), data)
content = json.loads(response.content.decode())
self.assertFalse(content['success'])
@patch('fundraising.forms.PaymentForm.make_donation')
def test_submitting_donation_form_valid(self, make_donation):
amount = 100
donation = Donation.objects.create(
stripe_customer_id='xxxx',
)
Payment.objects.create(
donation=donation,
amount=amount,
stripe_charge_id='xxxx',
)
make_donation.return_value = donation
response = self.client.post(reverse('fundraising:donate'), {
'amount': amount,
'stripe_token': 'xxxx',
'interval': 'onetime',
'receipt_email': '[email protected]',
})
content = json.loads(response.content.decode())
self.assertEquals(200, response.status_code)
self.assertTrue(content['success'])
self.assertEqual(content['redirect'], donation.get_absolute_url())
@patch('stripe.Customer.retrieve')
def test_cancel_donation(self, retrieve_customer):
donor = DjangoHero.objects.create()
donation = Donation.objects.create(
campaign=self.campaign, donor=donor,
stripe_subscription_id='12345', stripe_customer_id='54321',
)
url = reverse(
'fundraising:cancel-donation',
kwargs={'hero': donor.id, 'donation': donation.id}
)
response = self.client.get(url)
self.assertRedirects(response, reverse('fundraising:manage-donations',
kwargs={'hero': donor.id}))
retrieve_customer.assert_called_once_with('54321')
donation = Donation.objects.get(id=donation.id)
self.assertEqual('', donation.stripe_subscription_id)
@patch('stripe.Customer.retrieve')
def test_cancel_already_cancelled_donation(self, retrieve_customer):
donor = DjangoHero.objects.create()
donation = Donation.objects.create(
campaign=self.campaign, donor=donor, stripe_subscription_id=''
)
url = reverse(
'fundraising:cancel-donation',
kwargs={'hero': donor.id, 'donation': donation.id}
)
response = self.client.get(url)
self.assertEquals(404, response.status_code)
self.assertFalse(retrieve_customer.called)
class TestDjangoHero(TestCase):
def setUp(self):
kwargs = {
'approved': True,
'is_visible': True,
}
self.campaign = Campaign.objects.create(name='test', goal=200, slug='test', is_active=True, is_public=True)
self.h1 = DjangoHero.objects.create(**kwargs)
d1 = Donation.objects.create(donor=self.h1, campaign=self.campaign)
Payment.objects.create(donation=d1, amount='5')
self.h2 = DjangoHero.objects.create(**kwargs)
d2 = Donation.objects.create(donor=self.h2, campaign=self.campaign)
Payment.objects.create(donation=d2, amount='15')
self.h3 = DjangoHero.objects.create(**kwargs)
d3 = Donation.objects.create(donor=self.h3, campaign=self.campaign)
Payment.objects.create(donation=d3, amount='10')
self.today = date.today()
def test_thumbnail(self):
try:
os.makedirs(os.path.join(settings.MEDIA_ROOT, 'fundraising/logos/'))
except OSError: # directory may already exist
pass
image_path = os.path.join(settings.MEDIA_ROOT, 'fundraising/logos/test_logo.jpg')
image = Image.new('L', (500, 500))
image.save(image_path)
self.h1.logo = image_path
self.h1.save()
thumbnail = self.h1.thumbnail
self.assertEqual(thumbnail.x, 170)
self.assertEqual(thumbnail.y, 170)
os.remove(image_path)
self.assertTrue(
os.path.exists(
thumbnail.url.replace(settings.MEDIA_URL, '{}/'.format(settings.MEDIA_ROOT))
)
)
def test_thumbnail_no_logo(self):
self.assertIsNone(self.h2.thumbnail)
def test_name_with_fallback(self):
hero = DjangoHero()
self.assertEqual(hero.name_with_fallback, 'Anonymous Hero')
hero.name = 'Batistek'
self.assertEqual(hero.name_with_fallback, 'Batistek')
class TestPaymentForm(TestCase):
@patch('stripe.Customer.create')
@patch('stripe.Charge.create')
def test_make_donation(self, charge_create, customer_create):
customer_create.return_value.id = 'xxxx'
charge_create.return_value.id = 'xxxx'
form = PaymentForm(data={
'amount': 100,
'campaign': None,
'stripe_token': 'xxxx',
'interval': 'onetime',
'receipt_email': '[email protected]',
})
self.assertTrue(form.is_valid())
donation = form.make_donation()
self.assertEqual(100, donation.payment_set.first().amount)
@patch('stripe.Customer.retrieve')
@patch('stripe.Charge.create')
def test_make_donation_with_existing_hero(self, charge_create, customer_retrieve):
charge_create.return_value.id = 'XYZ'
customer_retrieve.return_value.id = '12345'
hero = DjangoHero.objects.create(
email='[email protected]',
stripe_customer_id=customer_retrieve.return_value.id,
)
form = PaymentForm(data={
'amount': 100,
'campaign': None,
'stripe_token': 'xxxx',
'interval': 'onetime',
'receipt_email': '[email protected]',
})
self.assertTrue(form.is_valid())
donation = form.make_donation()
self.assertEqual(100, donation.payment_set.first().amount)
self.assertEqual(hero, donation.donor)
self.assertEqual(hero.stripe_customer_id, donation.stripe_customer_id)
@patch('stripe.Customer.create')
@patch('stripe.Charge.create')
def test_make_donation_exception(self, charge_create, customer_create):
customer_create.side_effect = ValueError("Something is wrong")
form = PaymentForm(data={
'amount': 100,
'campaign': None,
'stripe_token': 'xxxx',
'interval': 'onetime',
'receipt_email': '[email protected]',
})
self.assertTrue(form.is_valid())
with self.assertRaises(ValueError):
donation = form.make_donation()
self.assertIsNone(donation)
class TestThankYou(TestCase):
def setUp(self):
self.donation = Donation.objects.create(
stripe_customer_id='cu_123',
receipt_email='[email protected]',
)
Payment.objects.create(
donation=self.donation,
amount='20',
)
self.url = reverse('fundraising:thank-you', args=[self.donation.pk])
self.hero_form_data = {
'hero_type': DjangoHero.HERO_TYPE_CHOICES[1][0],
'name': 'Django Inc',
}
def add_donor(self, **kwargs):
hero = DjangoHero.objects.create(**kwargs)
self.donation.donor = hero
self.donation.save()
return hero
def test_template_without_donor(self):
response = self.client.get(self.url)
self.assertTemplateUsed(response, 'fundraising/thank-you.html')
self.assertFalse(response.context['form'].instance.pk)
self.assertEqual(response.context['donation'], self.donation)
def test_template_with_donor(self):
self.add_donor()
response = self.client.get(self.url)
self.assertEqual(response.context['form'].instance, self.donation.donor)
@patch('stripe.Customer.retrieve')
def test_update_hero(self, retrieve_customer):
hero = self.add_donor(
email='[email protected]',
stripe_customer_id='1234',
name='Under Dog'
)
response = self.client.post(self.url, self.hero_form_data)
self.assertRedirects(response, reverse('fundraising:index'))
hero = DjangoHero.objects.get(pk=hero.id)
self.assertEqual(hero.name, self.hero_form_data['name'])
retrieve_customer.assert_called_once_with(hero.stripe_customer_id)
customer = retrieve_customer.return_value
self.assertEqual(customer.description, hero.name)
self.assertEqual(customer.email, hero.email)
customer.save.assert_called_once_with()
def test_create_hero_for_donation_with_campaign(self):
campaign = Campaign.objects.create(
name='test',
goal=200,
slug='test',
is_active=True,
is_public=True,
)
self.donation.campaign = campaign
self.donation.save()
with patch('stripe.Customer.retrieve'):
response = self.client.post(self.url, self.hero_form_data)
# Redirects to the campaign's page instead
expected_url = reverse('fundraising:campaign', args=[campaign.slug])
self.assertRedirects(response, expected_url)
class TestWebhooks(TestCase):
def setUp(self):
self.hero = DjangoHero.objects.create(email='[email protected]')
self.donation = Donation.objects.create(
donor=self.hero,
interval='monthly',
stripe_customer_id='cus_3MXPY5pvYMWTBf',
stripe_subscription_id='sub_3MXPaZGXvVZSrS',
)
def stripe_data(self, filename):
file_path = settings.BASE_DIR.joinpath(
'fundraising/test_data/{}.json'.format(filename))
with file_path.open() as f:
data = json.load(f)
return stripe.resource.convert_to_stripe_object(data, stripe.api_key)
def post_event(self):
return self.client.post(
reverse('fundraising:receive-webhook'),
data='{"id": "evt_12345"}',
content_type='application/json',
)
@patch('stripe.Event.retrieve')
def test_record_payment(self, event):
event.return_value = self.stripe_data('invoice_succeeded')
response = self.post_event()
self.assertEqual(response.status_code, 201)
self.assertEqual(self.donation.payment_set.count(), 1)
payment = self.donation.payment_set.first()
self.assertEqual(payment.amount, 10)
@patch('stripe.Event.retrieve')
def test_subscription_cancelled(self, event):
event.return_value = self.stripe_data('subscription_cancelled')
self.post_event()
donation = Donation.objects.get(id=self.donation.id)
self.assertEqual(donation.stripe_subscription_id, '')
self.assertEqual(len(mail.outbox), 1)
expected_url = django_hosts_reverse('fundraising:donate')
self.assertTrue(expected_url in mail.outbox[0].body)
@patch('stripe.Event.retrieve')
def test_payment_failed(self, event):
event.return_value = self.stripe_data('payment_failed')
self.post_event()
self.assertEqual(len(mail.outbox), 1)
expected_url = django_hosts_reverse('fundraising:manage-donations', kwargs={'hero': self.hero.id})
self.assertTrue(expected_url in mail.outbox[0].body)
@patch('stripe.Event.retrieve')
def test_no_such_event(self, event):
event.side_effect = stripe.error.InvalidRequestError(
message='No such event: evt_12345',
param='id'
)
response = self.post_event()
self.assertTrue(response.status_code, 422)
|
Color has the ability to create emotion and influence our moods. Color is a powerful tool in interior design and fashion. Interior designer Brandi Hagen discusses: History of color and the development of color theories, the color system, color sheel, warm & cool colors and color schemes.
Color 101 for fashion and interior designers.
The elements of design are the components that are used to create a complete design. Interior designer Brandi Hagen presents a basic overview of the elements of design applied to interiors and fashion including line, shape, texture and color.
What's your design style? Join award winning principal designer Brandi Hagen of Eminent Interior Design on a tour of furniture showrooms to discover the distinguishing characteristics of furniture and decorating styles. Brandi gives viewers insight into the furnishings and finishes of eleven different styles. |
# -*- coding: utf-8 -*-
##
##
## This file is part of Indico.
## Copyright (C) 2002 - 2014 European Organization for Nuclear Research (CERN).
##
## Indico is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or (at your option) any later version.
##
## Indico is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Indico;if not, see <http://www.gnu.org/licenses/>.
from flask import session
import os
from copy import copy
import MaKaC.webinterface.urlHandlers as urlHandlers
from MaKaC.webinterface.rh.conferenceBase import RHFileBase, RHLinkBase
from MaKaC.webinterface.rh.base import RH, RHDisplayBaseProtected
from MaKaC.webinterface.rh.conferenceModif import RHConferenceModifBase
from MaKaC.webinterface.pages import files
from MaKaC.errors import NotFoundError, AccessError
from MaKaC.registration import Registrant
from MaKaC.conference import Reviewing, Link
from MaKaC.webinterface.rh.contribMod import RCContributionPaperReviewingStaff
from MaKaC.i18n import _
from indico.web.flask.util import send_file
from indico.modules import ModuleHolder
class RHFileAccess(RHFileBase, RHDisplayBaseProtected):
_uh = urlHandlers.UHFileAccess
def _checkParams( self, params ):
try:
RHFileBase._checkParams( self, params )
except:
raise NotFoundError("The file you try to access does not exist.")
def _checkProtection( self ):
if isinstance(self._file.getOwner(), Reviewing):
selfcopy = copy(self)
selfcopy._target = self._file.getOwner().getContribution()
if not (RCContributionPaperReviewingStaff.hasRights(selfcopy) or \
selfcopy._target.canUserSubmit(self.getAW().getUser()) or \
self._target.canModify( self.getAW() )):
raise AccessError()
elif isinstance(self._file.getOwner(), Registrant) and \
not self._file.getOwner().canUserModify(self.getAW().getUser()):
raise AccessError(_("The access to this resource is forbidden"))
else:
RHDisplayBaseProtected._checkProtection( self )
def _process( self ):
self._notify('materialDownloaded', self._file)
if isinstance(self._file, Link):
self._redirect(self._file.getURL())
elif self._file.getId() == "minutes":
p = files.WPMinutesDisplay(self, self._file )
return p.display()
else:
return send_file(self._file.getFileName(), self._file.getFilePath(), self._file.getFileType(),
self._file.getCreationDate())
class RHFileAccessStoreAccessKey(RHFileBase):
_uh = urlHandlers.UHFileEnterAccessKey
def _checkParams(self, params):
RHFileBase._checkParams(self, params)
self._accesskey = params.get("accessKey", "").strip()
self._doNotSanitizeFields.append("accessKey")
def _checkProtection(self):
pass
def _process(self):
access_keys = session.setdefault('accessKeys', {})
access_keys[self._target.getOwner().getUniqueId()] = self._accesskey
session.modified = True
self._redirect(urlHandlers.UHFileAccess.getURL(self._target))
class RHVideoWmvAccess( RHLinkBase, RHDisplayBaseProtected ):
_uh = urlHandlers.UHVideoWmvAccess
def _checkParams( self, params ):
try:
RHLinkBase._checkParams( self, params )
except:
raise NotFoundError("The file you try to access does not exist.")
def _checkProtection( self ):
"""targets for this RH are exclusively URLs so no protection apply"""
return
def _process( self ):
p = files.WPVideoWmv(self, self._link )
return p.display()
class RHVideoFlashAccess( RHLinkBase, RHDisplayBaseProtected ):
_uh = urlHandlers.UHVideoFlashAccess
def _checkParams( self, params ):
try:
RHLinkBase._checkParams( self, params )
except:
raise NotFoundError("The file you try to access does not exist.")
def _checkProtection( self ):
"""targets for this RH are exclusively URLs so no protection apply"""
return
def _process( self ):
p = files.WPVideoFlash(self, self._link )
return p.display()
class RHOfflineEventAccess(RHConferenceModifBase):
_uh = urlHandlers.UHOfflineEventAccess
def _checkParams(self, params):
RHConferenceModifBase._checkParams(self, params)
if 'fileId' not in params:
raise NotFoundError(_("Missing 'fileId' argument."))
self._offlineEvent = ModuleHolder().getById("offlineEvents").getOfflineEventByFileId(params["confId"],
params["fileId"])
if not self._offlineEvent or not self._offlineEvent.file or \
not os.path.isfile(self._offlineEvent.file.getFilePath()):
raise NotFoundError(_("The file you try to access does not exist anymore."))
def _process(self):
f = self._offlineEvent.file
return send_file('event-%s.zip' % self._conf.getId(), f.getFilePath(), f.getFileType(),
last_modified=self._offlineEvent.creationTime, inline=False)
|
The Trenta Bucket Bag boasts a classic bucket shape with multiple removable straps that coverts from bucket bag to backpack in seconds so your bag will never leave you down. Adjust and move forward on the go from a top handle for carrying over the arm, to an adjustable shoulder strap to be worn on the shoulder or even crossbody, to completely handsfree as a comfortable to carry backpack.
Fashion meets function with a great silhouette, roomy interior, and just the right amount of organization. Lots of details to give your new bucket bag a classy professional feel. The addition of a drawstring closure and flap keeps all your stuff securely inside.
The Trenta Bucket bag measures: 12” wide, 11” tall and 6” deep.
1- piece at least 24” x 14” Contrasting Fabric –This can be a coordinate, vinyl, leather, or cork. |
# portsanner for IPv4 addresses and/or hostnames
# probing addresses is invasive
# make sure you are doing the right thing
import validators
import socket
import subprocess
import sys
from datetime import datetime
# clearing the screen
subprocess.call('clear', shell=True)
def is_valid(address):
# returns the validator result, True or False.
return validators.ip_address.ipv4(remote_server) or validators.domain(remote_server)
while True: # True is always True. This loop will never end.
remote_server = input('Enter a remote host to scan: ')
if remote_server == 'exit':
sys.exit(0)
if is_valid(remote_server):
break
else:
print(
'This address was not recognized as a valid IPv4 address or hostname.'
'Please try again. Type \'exit\' to quit.'
)
remote_serverIP = socket.gethostbyname(remote_server)
# print the scanning ip
print('*' * 60)
print('Please wait, scanning remote host of well-know ports', remote_serverIP)
print('*' * 60)
# time scan started
start_time = datetime.now()
# scan all ports between 1 and 1024
try:
for port in range(1, 1025):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex((remote_serverIP, port))
if result == 0:
print('Port {}: Open'.format(port))
sock.close()
# error handling
except KeyboardInterrupt:
print('You pressed Ctrl+C')
sys.exit(1)
except socket.gaierror:
print('Hostname could not be resolved')
sys.exit(1)
except socket.error:
print('Could not connect to server')
sys.exit(1)
# time for script to finish
end_time = datetime.now()
completion_time = end_time - start_time
# print completion time
print('Scanning completed in: ', completion_time)
|
Home » Montessori » Montessori Theory: What is the Prepared Environment?
Montessori Theory: What is the Prepared Environment?
By Marnie Craycroft Filed Under: Montessori Tagged With: Theory This post may contain affiliate links. Please read my disclosure for more info.
Thank you for choosing to read this post today. If you like this series, sign up for our feed. Also, leave a comment! I love hearing from you! |
__author__ = 'reggie'
###START-CONF
##{
##"object_name": "calibcalc",
##"object_poi": "my-calibcalc-1234",
##"auto-load" : true,
##"parameters": [ {
## "name": "skymodel",
## "description": "",
## "required": true,
## "type": "String",
## "state" : "SKYMODEL"
## } ],
##"return": [
## {
## "name": "calibcalcing",
## "description": "a calibcalcing",
## "required": true,
## "type": "String",
## "state" : "CALIBRATED"
## }
##
## ] }
##END-CONF
from pumpkin import *
from subprocess import Popen
class calibcalc(PmkSeed.Seed):
def __init__(self, context, poi=None):
PmkSeed.Seed.__init__(self, context,poi)
pass
def run(self, pkt, name):
self.logger.info("[calibcalc] processing: " + str(name[0]))
input_folder = name[0]
skymodel = input_folder + '/selected.skymodel'
cmd = ["/usr/bin/calibrate-stand-alone",
"--numthreads",
"1",
input_folder,
"/usr/share/prefactor/parsets/calibcal.parset",
skymodel]
Popen(cmd, env={"TMPDIR":"/tmp", "HOME":input_folder, "LOFARROOT":"/usr"}).communicate()
self.logger.info("[calibcalc] done: " + str(name[0]))
self.dispatch(pkt, input_folder, "CALIBRATED")
pass
|
Tackle the challenges of your day with powerful processors. The lightweight design provides easy portability and gives you a stylish look and feel. The vivid anti-glare display delivers an impressive front-of screen experience. The built-in webcam and Waves MaxxAudio Pro allow you to collaborate remotely with exceptional clarity. Accidentally drop your notebook? The embedded free-fall sensor automatically locks the hard drive and prevents data from damage. |
"""TanteMateLaden URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from rest_framework import routers
# noinspection PyUnresolvedReferences
from store import views
from django.conf.urls.static import static
from django.conf import settings
from django.conf.urls import url, include
from django.contrib import admin
from django.conf.urls.static import static
router = routers.DefaultRouter()
router.register(r'accounts', views.AccountViewSet)
router.register(r'drinks', views.DrinkViewSet)
router.register(r'items', views.ItemViewSet)
router.register(r'transactions', views.TransactionLogViewSet, 'transactionlog')
urlpatterns = [
url('^$', views.indexView, name='index'),
url('^', include('django.contrib.auth.urls')),
url(r'^signup/$', views.signup, name='signup'),
url(r'^account/$', views.accountView, name='account-index'),
url(r'^stats/$', views.statsView, name='stats'),
url(r'^admin/', admin.site.urls),
url(r'^template/', views.templateView),
url(r'^api/buy/item/(?P<item_slug>[\w-]+)/$', views.BuyItemView),
url(r'^api/buy/item/(?P<item_slug>[\w-]+)/(?P<item_amount>[0-9]+)/$', views.BuyItemView),
url(r'^api/buy/item/(?P<user_id>[0-9\w-]+)/(?P<item_slug>[\w-]+)/$', views.BuyItemView),
url(r'^api/buy/item/(?P<user_id>[0-9\w-]+)/(?P<item_slug>[\w-]+)/(?P<item_amount>[0-9]+)/$', views.BuyItemView, name='buy-item'),
url(r'^api/', include(router.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
#add debug_toolbar urls
import debug_toolbar
urlpatterns = [
url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns |
Why carry around 4 different pens? Just click to change colours: black, blue, red plus a fluo yellow ballpoint for highlighting text.
Write whenever and wherever you want to! The wide round barrel on BIC 4 Colours Fluo pen makes writing easy and fun.
Product Description BIC 4 Colours Fluo gives you 4 different colours of ink in the same pen, letting you switch between them as your work dictates so you can efficiently complete your writing tasks. The round transparent yellow barrel contains 4 retractable ballpoint pens each with a different colour of ink inside. You can choose between blue, black, green and a large yellow nib for highlighting, allowing you to easily colour-code your work. The medium classic ink tips of 1.0mm write with an elegant 0.32mm line width. The large yellow tip gives a line width of 0.42mm. This pen is ideal for ensuring your stationery set or pencil case has the right writing instrument for any occasion. |
from ..mod_check import app
import imaplib
from logging import getLogger
logger = getLogger('mod_check.IMAP')
@app.task
def check(host, port, username, password, use_ssl=False):
# initialize result to None
result = None
try:
# create the IMAP connection object
if use_ssl:
connection = imaplib.IMAP4_SSL(host=host, port=port)
else:
connection = imaplib.IMAP4(host=host, port=port)
logger.debug('connection', connection)
with connection as mail:
mail.login(user=username, password=password)
scoring_exists = False
# check if the scoring mailbox exists
mb_res, _ = mail.select('Scoring')
if mb_res == 'OK':
# if it does, mark it for later use
scoring_exists = True
else:
# if the scoring mailbox doesn't exist, select the inbox
mb_res, _ = mail.select('INBOX')
# if the result was OK (for either scoring or inbox)
if mb_res == 'OK':
# retrieve the ScoringCheck email
search_res, search_data = mail.uid('search', None, 'SUBJECT', 'ScoringCheck-')
if search_res == 'OK' and len(search_data) > 0:
# split the email UIDs and check for the
email_uids = search_data[0].split()
if len(email_uids) > 0:
latest_email_uid = email_uids[-1]
result, data = mail.uid('fetch', latest_email_uid, '(RFC822.TEXT)')
result = data[0][1].decode('utf8')
if not scoring_exists:
res, _ = mail.create('Scoring')
if res == 'OK':
res, _ = mail.copy(latest_email_uid, 'Scoring')
if res != 'OK':
logger.error('Error copying email to Scoring mailbox')
else:
logger.error('Error creating Scoring mailbox')
else:
logger.error('No messages fetched')
else:
logger.error('Error getting all messages')
else:
logger.error('Scoring mailbox does not exist')
except (imaplib.IMAP4_SSL.error, imaplib.IMAP4.error) as e:
logger.exception(e)
result = False
return result
|
A strand and a stormy sea. While we are watching two children playing in the sand, five young refugees from Afghanistan, Pakistan, Ethiopia and Gambia tell about their family, about their escape from violence, and about their hope and dreams.
is an artist, living and working in Bavaria, focusing on photography, experimental video and sound. Her works have been shown internationally on festivals and exhibitions, like OPEN ART in Örebro/Sweden, the International Video Art House Festival in Madrid/ Spain, or lately at some Festivals in Newark/ New Jersey and Bilbao/ Spain. |
#! /usr/bin/env python
# -*- encoding: utf-8 -*-
# vim:fenc=utf-8:
from mole.event import Event
from mole.action import Action, ActionSyntaxError
from mole.helper.timeformat import Timespan
def func_avg(field, events):
return reduce(lambda x,y:x+y, map(lambda x:float(x[field]), events)) / len(events)
def func_min(field, events):
return reduce(min, map(lambda x:float(x[field]), events))
def func_max(field, events):
return reduce(max, map(lambda x:float(x[field]), events))
class ActionTimespan(Action):
"""This action consolidate values over a time span."""
REQUIRE_PARSER = True
def __init__(self, field, span=["1h"], func=["avg"]):
"""Create a new timespan action
:param `field`: the field to use in operation (the value to be
consolidated.)
:param `span`: the span for consolidation.
:param `func`: the function to use in consolidation.
"""
self.field = field[0]
self.span = Timespan(span[0])
try:
self.func = __import__("mole.action.timespan",
globals(),
locals(),
[ "func_%s" % func[0] ])
except ImportError:
raise ActionSyntaxError("unable to import timespan module")
try:
self.func = getattr(self.func, "func_%s" % func[0])
except AttributeError:
raise ActionSyntaxError("invalud consolidation function")
def __call__(self, pipeline):
ret = []
field = self.field
for event in pipeline:
if len(ret) and (event.time - ret[0].time) > self.span.seconds:
yield Event({field: self.func(field,ret),"_time": ret[0]["_time"]})
ret = [ event ]
else:
ret.append(event)
if len(ret) and (event.time - ret[0].time) > self.span.seconds:
yield Event({field: self.func(field,ret),"_time": ret[0]["_time"]})
|
“It takes one to know one” is a phrase that often gets used by children when name-calling starts! Yet, despite it’s derogatory use as a rejoinder – there is a lot of wisdom present.
Think of someone you really admire, and which character traits of their’s shine through.
Sometimes it is much easier to believe something unpleasant about ourselves, than to acknowledge and accept that those traits we really admire in another person are part of us too.
Join Sam and Paul as they explore the piece of poetry that inspired both this discussion and a Full Out and Fearless blog post by @CoachCharrise on 22nd April 2009; and discover a quick and simple way to start reflecting the love to yourself that you deserve – no matter how scary that may sound!
You too can burn so true.
You are a glorious butterfly.
Your soul my heart uplifts. |
from typing import Optional, TYPE_CHECKING, Any, Generator
from lightbus.client.utilities import OnError
from lightbus.exceptions import InvalidBusPathConfiguration, InvalidParameters
from lightbus.utilities.async_tools import block
if TYPE_CHECKING:
# pylint: disable=unused-import,cyclic-import
from lightbus import BusClient, EventMessage
__all__ = ["BusPath"]
class BusPath:
"""Represents a path on the bus
This class provides a higher-level wrapper around the `BusClient` class.
This wrapper allows for a more idiomatic use of the bus. For example:
bus.auth.get_user(username='admin')
Compare this to the lower level equivalent using the `BusClient`:
bus.client.call_rpc_remote(
api_name='auth',
name='get_user',
kwargs={'username': 'admin'},
)
"""
def __init__(self, name: str, *, parent: Optional["BusPath"], client: "BusClient"):
if not parent and name:
raise InvalidBusPathConfiguration("Root client node may not have a name")
self.name = name
self.parent = parent
self.client = client
def __getattr__(self, item) -> "BusPath":
return self.__class__(name=item, parent=self, client=self.client)
def __str__(self):
return self.fully_qualified_name
def __repr__(self):
return "<BusPath {}>".format(self.fully_qualified_name)
def __dir__(self):
# Used by `lightbus shell` command
path = [node.name for node in self.ancestors(include_self=True)]
path.reverse()
api_names = [[""] + n.split(".") for n in self.client.api_registry.names()]
matches = []
apis = []
for api_name in api_names:
if api_name == path:
# Api name matches exactly
apis.append(api_name)
elif api_name[: len(path)] == path:
# Partial API match
matches.append(api_name[len(path)])
for api_name in apis:
api = self.client.api_registry.get(".".join(api_name[1:]))
matches.extend(dir(api))
return matches
# RPC
def __call__(self, *args, **kwargs):
"""Call this BusPath node as an RPC"""
return self.call(*args, **kwargs)
def call(self, *args, bus_options: dict = None, **kwargs):
"""Call this BusPath node as an RPC"
In contrast to __call__(), this method provides the ability to call
with the additional `bus_options` argument.
"""
# Use a larger value of `rpc_timeout` because call_rpc_remote() should
# handle timeout
rpc_timeout = self.client.config.api(self.api_name).rpc_timeout * 1.5
return block(self.call_async(*args, **kwargs, bus_options=bus_options), timeout=rpc_timeout)
async def call_async(self, *args, bus_options=None, **kwargs):
"""Call this BusPath node as an RPC (asynchronous)"
In contrast to __call__(), this method provides the ability to call
with the additional `bus_options` argument.
"""
if args:
raise InvalidParameters(
f"You have attempted to call the RPC {self.fully_qualified_name} using positional "
"arguments. Lightbus requires you use keyword arguments. For example, "
"instead of func(1), use func(foo=1)."
)
bus_options = bus_options or {}
return await self.client.call_rpc_remote(
api_name=self.api_name, name=self.name, kwargs=kwargs, options=bus_options
)
# Events
def listen(
self,
listener,
*,
listener_name: str,
bus_options: dict = None,
on_error: OnError = OnError.SHUTDOWN,
):
"""Listen to events for this BusPath node"""
return self.client.listen_for_event(
api_name=self.api_name,
name=self.name,
listener=listener,
listener_name=listener_name,
options=bus_options,
on_error=on_error,
)
def fire(self, *args, bus_options: dict = None, **kwargs) -> "EventMessage":
"""Fire an event for this BusPath node"""
return block(
self.fire_async(*args, **kwargs, bus_options=bus_options),
timeout=self.client.config.api(self.api_name).event_fire_timeout,
)
async def fire_async(self, *args, bus_options: dict = None, **kwargs) -> "EventMessage":
"""Fire an event for this BusPath node (asynchronous)"""
if args:
raise InvalidParameters(
f"You have attempted to fire the event {self.fully_qualified_name} using positional"
" arguments. Lightbus requires you use keyword arguments. For example, instead of"
" func(1), use func(foo=1)."
)
return await self.client.fire_event(
api_name=self.api_name, name=self.name, kwargs=kwargs, options=bus_options
)
# Utilities
def ancestors(self, include_self=False) -> Generator["BusPath", None, None]:
"""Get all ancestors of this node"""
parent = self
while parent is not None:
if parent != self or include_self:
yield parent
parent = parent.parent
@property
def api_name(self) -> str:
"""Get the API name of this node
This assumes the full path to this node is a fully qualified event/rpc name
"""
path = [node.name for node in self.ancestors(include_self=False)]
path.reverse()
return ".".join(path[1:])
@property
def fully_qualified_name(self) -> str:
"""Get the fully qualified string name of this node"""
path = [node.name for node in self.ancestors(include_self=True)]
path.reverse()
return ".".join(path[1:])
# Schema
@property
def schema(self):
"""Get the bus schema"""
if self.parent is None:
return self.client.schema
else:
# TODO: Implement getting schema of child nodes if there is demand
raise AttributeError(
"Schema only available on root node. Use bus.schema, not bus.my_api.schema"
)
@property
def parameter_schema(self):
"""Get the parameter JSON schema for the given event or RPC"""
# TODO: Test
return self.client.schema.get_event_or_rpc_schema(self.api_name, self.name)["parameters"]
@property
def response_schema(self):
"""Get the response JSON schema for the given RPC
Only RPCs have responses. Accessing this property for an event will result in a
SchemaNotFound error.
"""
return self.client.schema.get_rpc_schema(self.api_name, self.name)["response"]
def validate_parameters(self, parameters: dict):
"""Validate the parameters for an event or RPC against the schema
See Also: https://lightbus.org/reference/schema/
"""
self.client.schema.validate_parameters(self.api_name, self.name, parameters)
def validate_response(self, response: Any):
"""Validate the response for an RPC against the schema
See Also: https://lightbus.org/reference/schema/
"""
self.client.schema.validate_parameters(self.api_name, self.name, response)
|
Oh Yum!!! This drink turned out fabulous and it so easy.
I don’t have espresso so I used regular coffee and the coconut cream concentrate is coconut butter. Here is my post for making your own all you need is coconut. I used my Ninja to blend everything together.
The directions can be found at Tropical Traditions.
This entry was posted in coconut, Drinks, Food and Drink, Homemade products, Posts and tagged coconut butter, coconut cream, coffee drink, cooking. Bookmark the permalink. |
from .models import (
EmailAddress,
Group,
PhoneNumber,
Person,
StreetAddress,
)
from .exceptions import ImproperlyConfigured
class BaseManager:
cls = None # Override this in a concrete manager
def __init__(self, book):
self.book = book
def _get_table(self):
return self.book._store[self.cls.table_name]
def filter(self, **kwargs):
"""
Filter per multiple kwargs, is not exclusive with matches.
'table' is a reserved kwarg.
:param table: table as dict.
:param attr: attribute name to compare.
:param val: attribute value to compare.
:return: result dict by object ids.
"""
table = kwargs.pop('table', None)
if not table:
table = self._get_table()
results = {}
for obj_id, obj_attrs in table.items():
for attr, qry_val in kwargs.items():
# Special case if querying per 'id'
if attr == 'id':
# If lookup per list of id's
if isinstance(qry_val, list) and obj_id in qry_val:
results[obj_id] = obj_attrs
# Exact match needed otherwise.
elif obj_id == qry_val:
results[obj_id] = obj_attrs
continue
obj_val = obj_attrs[attr]
# If 'qry_val' is a list, check for membership.
if isinstance(qry_val, list):
# We could be checking in a foreign keys column (list).
if isinstance(obj_val, list):
# Check if a list of query values,
# has match in a list of foreign keys.
if set(obj_val).intersection(set(qry_val)):
results[obj_id] = obj_attrs
# Otherwise check if the object's value is in query list.
elif obj_val in qry_val:
results[obj_id] = obj_attrs
# We are checking for a single query value.
else:
if attr == 'id' and obj_id == qry_val:
results[obj_id] = obj_attrs
elif isinstance(obj_val, list):
if qry_val in obj_val:
results[obj_id] = obj_attrs
elif obj_attrs[attr] == qry_val:
results[obj_id] = obj_attrs
return results
def convert_results(self, results):
cls_objects = []
for r_id, r_attrs in results.items():
cls_obj = self.create(**r_attrs)
cls_obj.id = r_id
cls_objects.append(cls_obj)
return cls_objects
def create(self, **kwargs):
if not self.cls:
raise ImproperlyConfigured("'cls' not overriden")
return self.cls(book=self.book, **kwargs)
class EmailAddressManager(BaseManager):
cls = EmailAddress
class PhoneNumberManager(BaseManager):
cls = PhoneNumber
class StreetAddressManager(BaseManager):
cls = StreetAddress
class GroupManager(BaseManager):
cls = Group
class PersonManager(BaseManager):
cls = Person
def find_by_name(self, first_name=None, last_name=None):
"""
Get all matches for first_name and last_name.
"""
if not (first_name or last_name):
raise ValueError("Supply either 'first_name', 'last_name', or both")
results = self.filter(first_name=first_name, last_name=last_name)
return self.convert_results(results)
def find_by_email(self, email):
"""
Search for Persons by their EmailAddress (given as "email" string).
"""
emails = EmailAddressManager(self.book)
email_results = emails.filter(email=email)
email_ids = email_results.keys()
person_results = self.filter(email_addresses_ids=email_ids)
return self.convert_results(person_results)
|
It can be tough to manage the many expenses that come along with a summer internship. For students with unpaid internships, or internships in high-cost locations, the SuccessWorks Internship Fund provides support payments of up to $5,000. These payments are made possible through the generous donations from UW-Madison family and friends.
Preference is given to undergraduate students completing unpaid internships, or those who would not be able to participate in the internship without financial support. Students who are underrepresented in their school, major, and/or industry are also prioritized.
Summer 2019 Internship Fund applications are now open! Application closes March 31. Apply online.
If you are unsure of your degree, login to your Student Center and click the My Academics tab, or consult your academic advisor.
Have applied to or secured an internship. Must have secured the internship by April 15.
How do I know what counts as an internship?
Students who receive a financial aid package are encouraged to consult with the Office of Financial Aid to ensure an award will not impact their financial standing.
Looking for more financial assistance to support your internship? Start here.
The SuccessWorks Internship Fund is made possible by friends and alumni dedicated to changing the lives of L&S students.
Make a gift today to help more badgers realize their dreams. |
"""Detect all Python scripts in HTML pages in current folder and subfolders.
Generate brython_modules.js, a bundle with all the modules and packages used
by an application.
Generate a Python package ready for installation and upload on PyPI.
"""
import os
import shutil
import html.parser
import json
import traceback
import sys
import time
import io
import tokenize
import token
import logging
logger = logging.getLogger(__name__)
# Template for application setup.py script
setup = """from setuptools import setup, find_packages
import os
if os.path.exists('README.rst'):
with open('README.rst', encoding='utf-8') as fobj:
LONG_DESCRIPTION = fobj.read()
setup(
name='{app_name}',
version='{version}',
# The project's main homepage.
url='{url}',
# Author details
author='{author}',
author_email='{author_email}',
# License
license='{license}',
packages=['data'],
py_modules=["{app_name}"],
package_data={{'data':[{files}]}}
)
"""
# Template for the application script
app = """import os
import shutil
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--install',
help='Install {app_name} in an empty directory',
action="store_true")
args = parser.parse_args()
files = ({files})
if args.install:
print('Installing {app_name} in an empty directory')
src_path = os.path.join(os.path.dirname(__file__), 'data')
if os.listdir(os.getcwd()):
print('{app_name} can only be installed in an empty folder')
import sys
sys.exit()
for path in files:
dst = os.path.join(os.getcwd(), path)
head, tail = os.path.split(dst)
if not os.path.exists(head):
os.mkdir(head)
shutil.copyfile(os.path.join(src_path, path), dst)
"""
class FromImport:
def __init__(self):
self.source = ''
self.type = "from"
self.level = 0
self.expect = "source"
self.names = []
def __str__(self):
return '<import ' + str(self.names) + ' from ' + str(self.source) +'>'
class Import:
def __init__(self):
self.type = "import"
self.expect = "module"
self.modules = []
def __str__(self):
return '<import ' + str(self.modules) + '>'
class ImportsFinder:
def __init__(self, *args, **kw):
self.package = kw.pop("package") or ""
def find(self, src):
"""Find imports in source code src. Uses the tokenize module instead
of ast in previous Brython version, so that this script can be run
with CPython versions older than the one implemented in Brython."""
imports = set()
importing = None
f = io.BytesIO(src.encode("utf-8"))
for tok_type, tok_string, *_ in tokenize.tokenize(f.readline):
tok_type = token.tok_name[tok_type]
if importing is None:
if tok_type == "NAME" and tok_string in ["import", "from"]:
context = Import() if tok_string == "import" \
else FromImport()
importing = True
else:
if tok_type == "NEWLINE":
imports.add(context)
importing = None
else:
self.transition(context, tok_type, tok_string)
if importing:
imports.add(context)
# Transform raw import objects into a list of qualified module names
self.imports = set()
for imp in imports:
if isinstance(imp, Import):
for mod in imp.modules:
parts = mod.split('.')
while parts:
self.imports.add('.'.join(parts))
parts.pop()
elif isinstance(imp, FromImport):
source = imp.source
if imp.level > 0:
if imp.level == 1:
imp.source = self.package
else:
parts = self.package.split(".")
imp.source = '.'.join(parts[:1 - imp.level])
if source:
imp.source += '.' + source
parts = imp.source.split('.')
while parts:
self.imports.add('.'.join(parts))
parts.pop()
self.imports.add(imp.source)
for name in imp.names:
parts = name.split('.')
while parts:
self.imports.add(imp.source + '.' + '.'.join(parts))
parts.pop()
def transition(self, context, token, value):
if context.type == "from":
if token == "NAME":
if context.expect == "source":
if value == "import" and context.level:
# syntax "from . import name"
context.expect = "names"
else:
context.source += value
context.expect = "."
elif context.expect == "." and value == "import":
context.expect = "names"
elif context.expect == "names":
context.names.append(value)
context.expect = ","
elif token == "OP":
if value == "," and context.expect == ",":
context.expect = "names"
elif value == "." and context.expect == ".":
context.source += '.'
context.expect = "source"
elif value == "." and context.expect == "source":
context.level += 1
elif context.type == "import":
if token == "NAME":
if context.expect == "module":
if context.modules and context.modules[-1].endswith("."):
context.modules[-1] += value
else:
context.modules.append(value)
context.expect = '.'
elif token == "OP":
if context.expect == ".":
if value == ".":
context.modules[-1] += '.'
context.expect = "module"
class ModulesFinder:
def __init__(self, directory=os.getcwd(), stdlib={}, user_modules={}):
self.directory = directory
self.modules = set()
self.stdlib = stdlib
self.user_modules = user_modules
def get_imports(self, src, package=None):
"""Get all imports in source code src."""
finder = ImportsFinder(package=package)
finder.find(src)
for module in finder.imports:
if module in self.modules:
continue
found = False
for module_dict in [self.stdlib, self.user_modules]:
if module in module_dict:
found = True
self.modules.add(module)
if module_dict[module][0] == '.py':
is_package = len(module_dict[module]) == 4
if is_package:
package = module
elif "." in module:
package = module[:module.rfind(".")]
else:
package = ""
module_dict[module][2] = list(self.get_imports(
module_dict[module][1], package))
return finder.imports
def norm_indent(self, script):
"""Scripts in Brython page may start with an indent, remove it before
building the AST.
"""
indent = None
lines = []
for line in script.split('\n'):
if line.strip() and indent is None:
indent = len(line) - len(line.lstrip())
line = line[indent:]
elif indent is not None:
line = line[indent:]
lines.append(line)
return '\n'.join(lines)
def inspect(self):
"""Walk the directory to find all pages with Brython scripts, parse
them to get the list of modules needed to make them run.
"""
site_packages = 'Lib{0}site-packages{0}'.format(os.sep)
imports = set()
for dirname, dirnames, filenames in os.walk(self.directory):
for name in dirnames:
if name.endswith('__dist__') or name.endswith("__pycache__"):
# don't inspect files in the subfolder __dist__
dirnames.remove(name)
break
for filename in filenames:
path = os.path.join(dirname, filename)
if path == __file__:
continue
ext = os.path.splitext(filename)[1]
if ext.lower() == '.html':
print("script in html", filename)
# detect charset
charset_detector = CharsetDetector()
with open(path, encoding="iso-8859-1") as fobj:
charset_detector.feed(fobj.read())
# get text/python scripts
parser = BrythonScriptsExtractor(dirname)
with open(path, encoding=charset_detector.encoding) as fobj:
parser.feed(fobj.read())
for script in parser.scripts:
script = self.norm_indent(script)
try:
self.get_imports(script)
except SyntaxError:
print('syntax error', path)
traceback.print_exc(file=sys.stderr)
elif ext.lower() == '.py':
#print("python", filename)
if filename == "list_modules.py":
continue
if dirname != self.directory and not is_package(dirname):
continue
# get package name
package = dirname[len(self.directory) + 1:] or None
if package is not None and \
package.startswith(site_packages):
package = package[len('Lib/site-packages/'):]
# print(path)
with open(path, encoding="utf-8") as fobj:
try:
imports |= self.get_imports(fobj.read(), package)
except SyntaxError:
print('syntax error', path)
traceback.print_exc(file=sys.stderr)
def make_brython_modules(self, path):
"""Build brython_modules.js from the list of modules needed by the
application.
"""
vfs = {"$timestamp": int(1000 * time.time())}
for module in self.modules:
dico = self.stdlib if module in self.stdlib else self.user_modules
vfs[module] = dico[module]
elts = module.split('.')
for i in range(1, len(elts)):
pkg = '.'.join(elts[:i])
if not pkg in vfs:
vfs[pkg] = dico[pkg]
# save in brython_modules.js
if os.path.exists(path):
# If brython_modules.js already exists, check if there have been
# changes. Cf. issue #1471.
changes = False
with open(path, encoding="utf-8") as f:
content = f.read()
start_str = "var scripts = "
start_pos = content.find(start_str)
end_pos = content.find("__BRYTHON__.update_VFS(scripts)")
data = content[start_pos + len(start_str):end_pos].strip()
old_vfs = json.loads(data)
if old_vfs.keys() != vfs.keys():
changes = True
else:
changes = True
for key in old_vfs:
if key == "$timestamp":
continue
if not key in vfs:
break
elif vfs[key][1] != old_vfs[key][1]:
break
else: # no break
changes = False
if not changes:
print("No change: brython_modules.js not updated")
return
with open(path, "w", encoding="utf-8") as out:
# Add VFS_timestamp ; used to test if the indexedDB must be
# refreshed
out.write("__BRYTHON__.VFS_timestamp = {}\n".format(
int(1000 * time.time())))
out.write("__BRYTHON__.use_VFS = true\nvar scripts = ")
json.dump(vfs, out)
out.write("\n__BRYTHON__.update_VFS(scripts)")
def _dest(self, base_dir, dirname, filename):
"""Build the destination path for a file."""
elts = dirname[len(os.getcwd()) + 1:].split(os.sep)
dest_dir = base_dir
for elt in elts:
dest_dir = os.path.join(dest_dir, elt)
if not os.path.exists(dest_dir):
os.mkdir(dest_dir)
return os.path.join(dest_dir, filename)
def make_setup(self):
"""Make the setup script (setup.py) and the entry point script
for the application."""
# Create a temporary directory
temp_dir = '__dist__'
if os.path.exists(temp_dir):
shutil.rmtree(temp_dir)
os.mkdir(temp_dir)
# Create a package "data" in this directory
data_dir = os.path.join(temp_dir, 'data')
os.mkdir(data_dir)
with open(os.path.join(data_dir, "__init__.py"), "w") as out:
out.write('')
# If there is a brython_setup.json file, use it to get information
if os.path.exists("brython_setup.json"):
with open("brython_setup.json", encoding="utf-8") as fobj:
info = json.load(fobj)
else:
# Otherwise, ask setup information
while True:
app_name = input("Application name: ")
if app_name:
break
while True:
version = input("Version: ")
if version:
break
author = input("Author: ")
author_email = input("Author email: ")
license = input("License: ")
url = input("Project url: ")
info = {
"app_name": app_name,
"version": version,
"author": author,
"author_email": author_email,
"license": license,
"url": url
}
# Store information in brython_setup.json
with open("brython_setup.json", "w", encoding="utf-8") as out:
json.dump(info, out, indent=4)
# Store all application files in the temporary directory. In HTML
# pages, replace "brython_stdlib.js" by "brython_modules.js"
files = []
for dirname, dirnames, filenames in os.walk(self.directory):
if dirname == "__dist__":
continue
if "__dist__" in dirnames:
dirnames.remove("__dist__")
for filename in filenames:
path = os.path.join(dirname, filename)
parts = path[len(os.getcwd()) + 1:].split(os.sep)
files.append("os.path.join(" +
", ".join(repr(part) for part in parts) +")")
if os.path.splitext(filename)[1] == '.html':
# detect charset
charset_detector = CharsetDetector()
with open(path, encoding="iso-8859-1") as fobj:
charset_detector.feed(fobj.read())
encoding = charset_detector.encoding
# get text/python scripts
parser = VFSReplacementParser(dirname)
with open(path, encoding=encoding) as fobj:
parser.feed(fobj.read())
if not parser.has_vfs:
# save file
dest = self._dest(data_dir, dirname, filename)
shutil.copyfile(path, dest)
continue
with open(path, encoding=encoding) as fobj:
lines = fobj.readlines()
start_line, start_pos = parser.start
end_line, end_pos = parser.end
res = ''.join(lines[:start_line - 1])
for num in range(start_line - 1, end_line):
res += lines[num].replace("brython_stdlib.js",
"brython_modules.js")
res += ''.join(lines[end_line:])
dest = self._dest(data_dir, dirname, filename)
with open(dest, 'w', encoding=encoding) as out:
out.write(res)
else:
dest = self._dest(data_dir, dirname, filename)
shutil.copyfile(path, dest)
info["files"] = ',\n'.join(files)
# Generate setup.py from the template in string setup
path = os.path.join(temp_dir, "setup.py")
with open(path, "w", encoding="utf-8") as out:
out.write(setup.format(**info))
# Generate the application script from the template in string app
path = os.path.join(temp_dir, "{}.py".format(info["app_name"]))
with open(path, "w", encoding="utf-8") as out:
out.write(app.format(**info))
# Get all modules in the Brython standard distribution.
# They must be in brython_stdlib.js somewhere in the current directory
# or below.
def parse_stdlib(stdlib_dir, js_name='brython_stdlib.js'):
path = os.path.join(stdlib_dir, js_name)
with open(path, encoding="utf-8") as fobj:
modules = fobj.read()
modules = modules[modules.find('{'):
modules.find('__BRYTHON__.update_VFS(')]
stdlib = json.loads(modules)
return stdlib
def load_stdlib_sitepackages():
"""
Search brython_stdlib.js, and load it
Load site-packages from the current directory
:return:
"""
stdlib_dir = None
for dirname, dirnames, filenames in os.walk(os.getcwd()):
for filename in filenames:
if filename == "brython_stdlib.js":
stdlib_dir = dirname
stdlib = parse_stdlib(stdlib_dir)
break
if not stdlib_dir:
raise FileNotFoundError("Could not find brython_stdlib.js in this"
" directory or below")
# search in site-packages
sp_dir = os.path.join(stdlib_dir, "Lib", "site-packages")
if os.path.exists(sp_dir):
print("search in site-packages...")
mf = ModulesFinder()
for dirpath, dirnames, filenames in os.walk(sp_dir):
if dirpath.endswith("__pycache__"):
continue
package = dirpath[len(sp_dir) + 1:]
for filename in filenames:
if not filename.endswith(".py"):
continue
fullpath = os.path.join(dirpath, filename)
#print(fullpath)
is_package = False
if not package:
# file in site-packages
module = os.path.splitext(filename)[0]
else:
elts = package.split(os.sep)
is_package = filename == "__init__.py"
if not is_package:
elts.append(os.path.splitext(filename)[0])
module = ".".join(elts)
with open(fullpath, encoding="utf-8") as f:
src = f.read()
#imports = mf.get_imports(src)
stdlib[module] = [".py", src, None]
if is_package:
stdlib[module].append(1)
return stdlib_dir, stdlib
packages = {os.getcwd(), os.getcwd() + '/Lib/site-packages'}
def is_package(folder):
"""Test if folder is a package, ie has __init__.py and all the folders
above until os.getcwd() also have __init__.py.
Use set "packages" to cache results.
"""
if folder in packages:
return True
current = folder
while True:
if not os.path.exists(os.path.join(current, "__init__.py")):
return False
current = os.path.dirname(current)
if current in packages:
packages.add(folder)
return True
def load_user_modules(module_dir=os.getcwd()):
user_modules = {}
for dirname, dirnames, filenames in os.walk(module_dir):
for filename in filenames:
name, ext = os.path.splitext(filename)
if not ext == ".py" or filename == "list_modules.py":
continue
if dirname == os.getcwd():
# modules in the same directory
path = os.path.join(dirname, filename)
with open(path, encoding="utf-8") as fobj:
try:
src = fobj.read()
except:
logger.error("Unable to read %s", path)
mf = ModulesFinder(dirname)
imports = sorted(list(mf.get_imports(src)))
user_modules[name] = [ext, src, imports]
elif is_package(dirname):
# modules in packages below current directory
path = os.path.join(dirname, filename)
package = dirname[len(os.getcwd()) + 1:].replace(os.sep, '.')
if package.startswith('Lib.site-packages.'):
package = package[len('Lib.site-packages.'):]
if filename == "__init__.py":
module_name = package
else:
module_name = "{}.{}".format(package, name)
with open(path, encoding="utf-8") as fobj:
src = fobj.read()
#mf = ModulesFinder(dirname)
#imports = mf.get_imports(src, package or None)
#imports = sorted(list(imports))
user_modules[module_name] = [ext, src, None]
if module_name == package:
user_modules[module_name].append(1)
return user_modules
class CharsetDetector(html.parser.HTMLParser):
"""Used to detect <meta charset="..."> in HTML page."""
def __init__(self, *args, **kw):
kw.setdefault('convert_charrefs', True)
try:
html.parser.HTMLParser.__init__(self, *args, **kw)
except TypeError:
# convert_charrefs is only supported by Python 3.4+
del kw['convert_charrefs']
html.parser.HTMLParser.__init__(self, *args, **kw)
self.encoding = "iso-8859-1"
def handle_starttag(self, tag, attrs):
if tag.lower() == "meta":
for key, value in attrs:
if key == "charset":
self.encoding = value
class BrythonScriptsExtractor(html.parser.HTMLParser):
"""Used to extract all Brython scripts in HTML pages."""
def __init__(self, dirname, **kw):
kw.setdefault('convert_charrefs', True)
try:
html.parser.HTMLParser.__init__(self, **kw)
except TypeError:
# convert_charrefs is only supported by Python 3.4+
del kw['convert_charrefs']
html.parser.HTMLParser.__init__(self, **kw)
self.dirname = dirname
self.scripts = []
self.py_tags = [] # stack of Python blocks
self.tag_stack = []
def handle_starttag(self, tag, attrs):
if tag.lower() == "script":
_type = "js_script"
src = None
for key, value in attrs:
if key == 'type' and value in ("text/python", "text/python3"):
_type = "py_script"
elif key == "src":
src = value
if _type == "py_script" and src:
_type = "py_script_with_src"
path = os.path.join(self.dirname, src)
with open(path, encoding="utf-8") as fobj:
self.scripts.append(fobj.read())
self.tag_stack.append(_type)
def handle_endtag(self, tag):
if tag.lower() == "script":
self.tag_stack.pop()
def handle_data(self, data):
"""Data is printed unchanged"""
if data.strip():
if self.tag_stack and self.tag_stack[-1].lower() == "py_script":
self.scripts.append(data)
class VFSReplacementParser(html.parser.HTMLParser):
"""Used to replace brython_stdlib.js by brython_modules.js in HTML
pages."""
def __init__(self, path, **kw):
kw.setdefault('convert_charrefs', True)
try:
html.parser.HTMLParser.__init__(self, **kw)
except TypeError:
# convert_charrefs is only supported by Python 3.4+
del kw['convert_charrefs']
html.parser.HTMLParser.__init__(self, **kw)
self.vfs = False
self.has_vfs = False
def handle_starttag(self, tag, attrs):
if tag.lower() == "script":
_type = "js_script"
src = None
for key, value in attrs:
if key == "src":
elts = value.split("/")
if elts and elts[-1] == "brython_stdlib.js":
self.vfs = True
self.has_vfs = True
self.attrs = attrs
self.start = self.getpos()
return
self.vfs = False
def handle_endtag(self, tag):
if tag.lower() == "script" and self.vfs:
self.end = self.getpos()
if __name__ == "__main__":
finder = ModulesFinder()
finder.inspect()
# print(sorted(list(finder.modules)))
|
The first definitive letter-postage Greenland stamps were issued in 1938. Before 1938, there was little need for letter-postage stamps in Greenland, as letter-mail within Greenland and between Denmark and Greenland was sent free-of-charge.
Seven of the nine portrait and pictorial definitive Greenland stamps shown above were issued on November 1, 1938. The 20 Øre and 40 Øre denomination stamps were issued on August 1, 1946.
01 Ø. - 20 Ø. (Sc. #1-6) - King Christian X.
30 Ø. - 01 K. (Sc. #7-9) - Polar Bear.
Greenland had previously been a highly protected and isolated country under Danish rule, but that all changed during 1940. Greenland's connection to Denmark was severed on April 9, 1940, when Denmark surrendered to and was occupied by Germany. On April 8, 1941, the United States occupied Greenland to prevent a possible German invasion, and it remained under American control until 1945.
The nine new pictorial definitive Greenland stamps shown above were issued on February 1, 1945. These stamps are referred to by Greenland philatelists as the American Issue of Greenland.
By 1943, with Greenland being cut-off from Denmark during World War II, it appeared that the postal administration would run out of definitive postage stamps. During 1944, the Greenland Delegation in New York City contracted with the American Banknote Co. to design and print new definitive postage stamps. The American Banknote Co. produced 100,000 sets of these stamps, and they were delivered to the Greenland Delegation in early 1945. They were placed on sale in Greenland on February 1, 1945. These stamps were only in use for a short period of time, before they were replaced by new stamps produced in Denmark, thus they are very scarce today.
01 Ø. - 07 Ø. (Sc. #10-12) - Harp Seal on an Ice Flow.
10 Ø. - 15 Ø. (Sc. #13-14) - King Christian X on Horseback.
30 Ø. (Sc. #15) - Sled and Dog Team.
01 K. (Sc. #16) - Polar Bear.
02 K. (Sc. #17) - Kayaker.
05 K. (Sc. #18) - Eider Duck.
Here is a great article about these stamps that I found while researching them on the internet.
King Christian X of Denmark had been very ill since 1942, resulting from a fall with his horse, and he died on April 20, 1947. He was succeeded by his oldest son as King Frederick IX. Of course, the accession of a new monarch would require that new definitive postage stamps be prepared and issued for Greenland.
The eleven portrait and pictorial definitive Greenland stamps shown above were issued between 1950 and 1960.
01 Ø. - 30 Ø. (Sc. #28-34) - King Frederick IX.
50 Ø. - 05 K. (Sc. #35-38) - 19th. Century Polar Ship, "Gustav Holm".
The two re-valued stamps shown above were issued on March 8, 1956.
60 Ø. on 40 Ø. (Sc. #39) - Polar Bear.
60 Ø. on 01 K. (Sc. #40) - Polar Bear.
Though relatively common in used condition, they are a bit expensive in mint condition.
The eighteen new definitive Greenland stamps shown above were issued between 1963 and 1968.
01 Ø. - 15 Ø. (Sc. #48-52) - Northern Lights and Crossed Anchors.
20 Ø. - 80 Ø. (Sc. #53-61) - King Frederick IX.
01 K. - 10 K. (Sc. #62-65) - Polar Bear. |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import numpy as np
from fractions import gcd, Fraction
from itertools import groupby
from warnings import warn
import logging
import math
import six
from monty.json import MontyDecoder
from monty.fractions import lcm
from pymatgen.core.structure import Composition
from pymatgen.core.periodic_table import Element, Specie, get_el_sp
from pymatgen.transformations.transformation_abc import AbstractTransformation
from pymatgen.transformations.standard_transformations import \
SubstitutionTransformation, OrderDisorderedStructureTransformation
from pymatgen.command_line.enumlib_caller import EnumlibAdaptor
from pymatgen.analysis.ewald import EwaldSummation
from pymatgen.core.structure import Structure
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.structure_prediction.substitution_probability import \
SubstitutionPredictor
from pymatgen.analysis.structure_matcher import StructureMatcher, \
SpinComparator
from pymatgen.analysis.energy_models import SymmetryModel
from pymatgen.analysis.bond_valence import BVAnalyzer
from pymatgen.core.surface import SlabGenerator
"""
This module implements more advanced transformations.
"""
__author__ = "Shyue Ping Ong, Stephen Dacek, Anubhav Jain"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "[email protected]"
__date__ = "Jul 24, 2012"
logger = logging.getLogger(__name__)
class ChargeBalanceTransformation(AbstractTransformation):
"""
This is a transformation that disorders a structure to make it charge
balanced, given an oxidation state-decorated structure.
Args:
charge_balance_sp: specie to add or remove. Currently only removal
is supported
"""
def __init__(self, charge_balance_sp):
self.charge_balance_sp = str(charge_balance_sp)
def apply_transformation(self, structure):
charge = structure.charge
specie = get_el_sp(self.charge_balance_sp)
num_to_remove = charge / specie.oxi_state
num_in_structure = structure.composition[specie]
removal_fraction = num_to_remove / num_in_structure
if removal_fraction < 0:
raise ValueError("addition of specie not yet supported by "
"ChargeBalanceTransformation")
trans = SubstitutionTransformation(
{self.charge_balance_sp: {
self.charge_balance_sp: 1 - removal_fraction}})
return trans.apply_transformation(structure)
def __str__(self):
return "Charge Balance Transformation : " + \
"Species to remove = {}".format(str(self.charge_balance_sp))
def __repr__(self):
return self.__str__()
@property
def inverse(self):
return None
@property
def is_one_to_many(self):
return False
class SuperTransformation(AbstractTransformation):
"""
This is a transformation that is inherently one-to-many. It is constructed
from a list of transformations and returns one structure for each
transformation. The primary use for this class is extending a transmuter
object.
Args:
transformations ([transformations]): List of transformations to apply
to a structure. One transformation is applied to each output
structure.
nstructures_per_trans (int): If the transformations are one-to-many and,
nstructures_per_trans structures from each transformation are
added to the full list. Defaults to 1, i.e., only best structure.
"""
def __init__(self, transformations, nstructures_per_trans=1):
self._transformations = transformations
self.nstructures_per_trans = nstructures_per_trans
def apply_transformation(self, structure, return_ranked_list=False):
if not return_ranked_list:
raise ValueError("SuperTransformation has no single best structure"
" output. Must use return_ranked_list")
structures = []
for t in self._transformations:
if t.is_one_to_many:
for d in t.apply_transformation(
structure,
return_ranked_list=self.nstructures_per_trans):
d["transformation"] = t
structures.append(d)
else:
structures.append(
{"transformation": t,
"structure": t.apply_transformation(structure)})
return structures
def __str__(self):
return "Super Transformation : Transformations = " + \
"{}".format(" ".join([str(t) for t in self._transformations]))
def __repr__(self):
return self.__str__()
@property
def inverse(self):
return None
@property
def is_one_to_many(self):
return True
class MultipleSubstitutionTransformation(object):
"""
Performs multiple substitutions on a structure. For example, can do a
fractional replacement of Ge in LiGePS with a list of species, creating one
structure for each substitution. Ordering is done using a dummy element so
only one ordering must be done per substitution oxidation state. Charge
balancing of the structure is optionally performed.
.. note::
There are no checks to make sure that removal fractions are possible
and rounding may occur. Currently charge balancing only works for
removal of species.
"""
def __init__(self, sp_to_replace, r_fraction, substitution_dict,
charge_balance_species=None, order=True):
"""
Performs multiple fractional substitutions on a transmuter.
Args:
sp_to_replace: species to be replaced
r_fraction: fraction of that specie to replace
substitution_dict: dictionary of the format
{2: ["Mg", "Ti", "V", "As", "Cr", "Ta", "N", "Nb"],
3: ["Ru", "Fe", "Co", "Ce", "As", "Cr", "Ta", "N", "Nb"],
4: ["Ru", "V", "Cr", "Ta", "N", "Nb"],
5: ["Ru", "W", "Mn"]
}
The number is the charge used for each of the list of elements
(an element can be present in multiple lists)
charge_balance_species: If specified, will balance the charge on
the structure using that specie.
"""
self.sp_to_replace = sp_to_replace
self.r_fraction = r_fraction
self.substitution_dict = substitution_dict
self.charge_balance_species = charge_balance_species
self.order = order
def apply_transformation(self, structure, return_ranked_list=False):
if not return_ranked_list:
raise ValueError("MultipleSubstitutionTransformation has no single"
" best structure output. Must use"
" return_ranked_list.")
outputs = []
for charge, el_list in self.substitution_dict.items():
mapping = {}
if charge > 0:
sign = "+"
else:
sign = "-"
dummy_sp = "X{}{}".format(str(charge), sign)
mapping[self.sp_to_replace] = {
self.sp_to_replace: 1 - self.r_fraction,
dummy_sp: self.r_fraction}
trans = SubstitutionTransformation(mapping)
dummy_structure = trans.apply_transformation(structure)
if self.charge_balance_species is not None:
cbt = ChargeBalanceTransformation(self.charge_balance_species)
dummy_structure = cbt.apply_transformation(dummy_structure)
if self.order:
trans = OrderDisorderedStructureTransformation()
dummy_structure = trans.apply_transformation(dummy_structure)
for el in el_list:
if charge > 0:
sign = "+"
else:
sign = "-"
st = SubstitutionTransformation(
{"X{}+".format(str(charge)): "{}{}{}".format(el, charge,
sign)})
new_structure = st.apply_transformation(dummy_structure)
outputs.append({"structure": new_structure})
return outputs
def __str__(self):
return "Multiple Substitution Transformation : Substitution on " + \
"{}".format(self.sp_to_replace)
def __repr__(self):
return self.__str__()
@property
def inverse(self):
return None
@property
def is_one_to_many(self):
return True
class EnumerateStructureTransformation(AbstractTransformation):
"""
Order a disordered structure using enumlib. For complete orderings, this
generally produces fewer structures that the OrderDisorderedStructure
transformation, and at a much faster speed.
Args:
min_cell_size:
The minimum cell size wanted. Must be an int. Defaults to 1.
max_cell_size:
The maximum cell size wanted. Must be an int. Defaults to 1.
symm_prec:
Tolerance to use for symmetry.
refine_structure:
This parameter has the same meaning as in enumlib_caller.
If you are starting from a structure that has been relaxed via
some electronic structure code, it is usually much better to
start with symmetry determination and then obtain a refined
structure. The refined structure have cell parameters and
atomic positions shifted to the expected symmetry positions,
which makes it much less sensitive precision issues in enumlib.
If you are already starting from an experimental cif, refinment
should have already been done and it is not necessary. Defaults
to False.
enum_precision_parameter (float): Finite precision parameter for
enumlib. Default of 0.001 is usually ok, but you might need to
tweak it for certain cells.
check_ordered_symmetry (bool): Whether to check the symmetry of
the ordered sites. If the symmetry of the ordered sites is
lower, the lowest symmetry ordered sites is included in the
enumeration. This is important if the ordered sites break
symmetry in a way that is important getting possible
structures. But sometimes including ordered sites
slows down enumeration to the point that it cannot be
completed. Switch to False in those cases. Defaults to True.
"""
def __init__(self, min_cell_size=1, max_cell_size=1, symm_prec=0.1,
refine_structure=False, enum_precision_parameter=0.001,
check_ordered_symmetry=True):
self.symm_prec = symm_prec
self.min_cell_size = min_cell_size
self.max_cell_size = max_cell_size
self.refine_structure = refine_structure
self.enum_precision_parameter = enum_precision_parameter
self.check_ordered_symmetry = check_ordered_symmetry
def apply_transformation(self, structure, return_ranked_list=False):
"""
Return either a single ordered structure or a sequence of all ordered
structures.
Args:
structure: Structure to order.
return_ranked_list (bool): Whether or not multiple structures are
returned. If return_ranked_list is a number, that number of
structures is returned.
Returns:
Depending on returned_ranked list, either a transformed structure
or a list of dictionaries, where each dictionary is of the form
{"structure" = .... , "other_arguments"}
The list of ordered structures is ranked by ewald energy / atom, if
the input structure is an oxidation state decorated structure.
Otherwise, it is ranked by number of sites, with smallest number of
sites first.
"""
try:
num_to_return = int(return_ranked_list)
except ValueError:
num_to_return = 1
if self.refine_structure:
finder = SpacegroupAnalyzer(structure, self.symm_prec)
structure = finder.get_refined_structure()
contains_oxidation_state = all(
[hasattr(sp, "oxi_state") and sp.oxi_state != 0 for sp in
structure.composition.elements]
)
if structure.is_ordered:
warn("Enumeration skipped for structure with composition {} "
"because it is ordered".format(structure.composition))
structures = [structure.copy()]
else:
adaptor = EnumlibAdaptor(
structure, min_cell_size=self.min_cell_size,
max_cell_size=self.max_cell_size,
symm_prec=self.symm_prec, refine_structure=False,
enum_precision_parameter=self.enum_precision_parameter,
check_ordered_symmetry=self.check_ordered_symmetry)
adaptor.run()
structures = adaptor.structures
original_latt = structure.lattice
inv_latt = np.linalg.inv(original_latt.matrix)
ewald_matrices = {}
all_structures = []
for s in structures:
new_latt = s.lattice
transformation = np.dot(new_latt.matrix, inv_latt)
transformation = tuple([tuple([int(round(cell)) for cell in row])
for row in transformation])
if contains_oxidation_state:
if transformation not in ewald_matrices:
s_supercell = structure * transformation
ewald = EwaldSummation(s_supercell)
ewald_matrices[transformation] = ewald
else:
ewald = ewald_matrices[transformation]
energy = ewald.compute_sub_structure(s)
all_structures.append({"num_sites": len(s), "energy": energy,
"structure": s})
else:
all_structures.append({"num_sites": len(s), "structure": s})
def sort_func(s):
return s["energy"] / s["num_sites"] if contains_oxidation_state \
else s["num_sites"]
self._all_structures = sorted(all_structures, key=sort_func)
if return_ranked_list:
return self._all_structures[0:num_to_return]
else:
return self._all_structures[0]["structure"]
def __str__(self):
return "EnumerateStructureTransformation"
def __repr__(self):
return self.__str__()
@property
def inverse(self):
return None
@property
def is_one_to_many(self):
return True
class SubstitutionPredictorTransformation(AbstractTransformation):
"""
This transformation takes a structure and uses the structure
prediction module to find likely site substitutions.
Args:
threshold: Threshold for substitution.
**kwargs: Args for SubstitutionProbability class lambda_table, alpha
"""
def __init__(self, threshold=1e-2, **kwargs):
self.kwargs = kwargs
self.threshold = threshold
self._substitutor = SubstitutionPredictor(threshold=threshold,
**kwargs)
def apply_transformation(self, structure, return_ranked_list=False):
if not return_ranked_list:
raise ValueError("SubstitutionPredictorTransformation doesn't"
" support returning 1 structure")
preds = self._substitutor.composition_prediction(
structure.composition, to_this_composition=False)
preds.sort(key=lambda x: x['probability'], reverse=True)
outputs = []
for pred in preds:
st = SubstitutionTransformation(pred['substitutions'])
output = {'structure': st.apply_transformation(structure),
'probability': pred['probability'],
'threshold': self.threshold, 'substitutions': {}}
# dictionary keys have to be converted to strings for JSON
for key, value in pred['substitutions'].items():
output['substitutions'][str(key)] = str(value)
outputs.append(output)
return outputs
def __str__(self):
return "SubstitutionPredictorTransformation"
def __repr__(self):
return self.__str__()
@property
def inverse(self):
return None
@property
def is_one_to_many(self):
return True
class MagOrderingTransformation(AbstractTransformation):
"""
This transformation takes a structure and returns a list of magnetic
orderings. Currently only works for ordered structures.
Args:
mag_elements_spin:
A mapping of elements/species to magnetically order to spin
magnitudes. E.g., {"Fe3+": 5, "Mn3+": 4}
order_parameter:
degree of magnetization. 0.5 corresponds to
antiferromagnetic order
energy_model:
Energy model used to rank the structures. Some models are
provided in :mod:`pymatgen.analysis.energy_models`.
**kwargs:
Same keyword args as :class:`EnumerateStructureTransformation`,
i.e., min_cell_size, etc.
"""
def __init__(self, mag_species_spin, order_parameter=0.5,
energy_model=SymmetryModel(), **kwargs):
self.mag_species_spin = mag_species_spin
if order_parameter > 1 or order_parameter < 0:
raise ValueError('Order Parameter must lie between 0 and 1')
else:
self.order_parameter = order_parameter
self.energy_model = energy_model
self.kwargs = kwargs
@classmethod
def determine_min_cell(cls, structure, mag_species_spin, order_parameter):
"""
Determine the smallest supercell that is able to enumerate
the provided structure with the given order parameter
"""
def lcm(n1, n2):
"""
Find least common multiple of two numbers
"""
return n1 * n2 / gcd(n1, n2)
denom = Fraction(order_parameter).limit_denominator(100).denominator
atom_per_specie = [structure.composition[m]
for m in mag_species_spin.keys()]
n_gcd = six.moves.reduce(gcd, atom_per_specie)
if not n_gcd:
raise ValueError(
'The specified species do not exist in the structure'
' to be enumerated')
return lcm(n_gcd, denom) / n_gcd
def apply_transformation(self, structure, return_ranked_list=False):
# Make a mutable structure first
mods = Structure.from_sites(structure)
for sp, spin in self.mag_species_spin.items():
sp = get_el_sp(sp)
oxi_state = getattr(sp, "oxi_state", 0)
if spin:
up = Specie(sp.symbol, oxi_state, {"spin": abs(spin)})
down = Specie(sp.symbol, oxi_state, {"spin": -abs(spin)})
mods.replace_species(
{sp: Composition({up: self.order_parameter,
down: 1 - self.order_parameter})})
else:
mods.replace_species(
{sp: Specie(sp.symbol, oxi_state, {"spin": spin})})
if mods.is_ordered:
return [mods] if return_ranked_list > 1 else mods
enum_args = self.kwargs
enum_args["min_cell_size"] = max(int(
MagOrderingTransformation.determine_min_cell(
structure, self.mag_species_spin,
self.order_parameter)),
enum_args.get("min_cell_size", 1))
max_cell = enum_args.get('max_cell_size')
if max_cell:
if enum_args["min_cell_size"] > max_cell:
raise ValueError('Specified max cell size is smaller'
' than the minimum enumerable cell size')
else:
enum_args["max_cell_size"] = enum_args["min_cell_size"]
t = EnumerateStructureTransformation(**enum_args)
alls = t.apply_transformation(mods,
return_ranked_list=return_ranked_list)
try:
num_to_return = int(return_ranked_list)
except ValueError:
num_to_return = 1
if num_to_return == 1 or not return_ranked_list:
return alls[0]["structure"] if num_to_return else alls
m = StructureMatcher(comparator=SpinComparator())
key = lambda x: SpacegroupAnalyzer(x, 0.1).get_space_group_number()
out = []
for _, g in groupby(sorted([d["structure"] for d in alls],
key=key), key):
g = list(g)
grouped = m.group_structures(g)
out.extend([{"structure": g[0],
"energy": self.energy_model.get_energy(g[0])}
for g in grouped])
self._all_structures = sorted(out, key=lambda d: d["energy"])
return self._all_structures[0:num_to_return]
def __str__(self):
return "MagOrderingTransformation"
def __repr__(self):
return self.__str__()
@property
def inverse(self):
return None
@property
def is_one_to_many(self):
return True
def _find_codopant(target, oxidation_state, allowed_elements=None):
"""
Finds the element from "allowed elements" that (i) possesses the desired
"oxidation state" and (ii) is closest in ionic radius to the target specie
Args:
target: (Specie) provides target ionic radius.
oxidation_state: (float) codopant oxidation state.
allowed_elements: ([str]) List of allowed elements. If None,
all elements are tried.
Returns:
(Specie) with oxidation_state that has ionic radius closest to
target.
"""
ref_radius = target.ionic_radius
candidates = []
symbols = allowed_elements or [el.symbol for el in Element]
for sym in symbols:
try:
sp = Specie(sym, oxidation_state)
r = sp.ionic_radius
if r is not None:
candidates.append((r, sp))
except:
pass
return min(candidates, key=lambda l: abs(l[0]/ref_radius - 1))[1]
class DopingTransformation(AbstractTransformation):
"""
A transformation that performs doping of a structure.
"""
def __init__(self, dopant, ionic_radius_tol=float("inf"), min_length=10,
alio_tol=0, codopant=False, max_structures_per_enum=100,
allowed_doping_species=None, **kwargs):
"""
Args:
dopant (Specie-like): E.g., Al3+. Must have oxidation state.
ionic_radius_tol (float): E.g., Fractional allowable ionic radii
mismatch for dopant to fit into a site. Default of inf means
that any dopant with the right oxidation state is allowed.
min_Length (float): Min. lattice parameter between periodic
images of dopant. Defaults to 10A for now.
alio_tol (int): If this is not 0, attempt will be made to dope
sites with oxidation_states +- alio_tol of the dopant. E.g.,
1 means that the ions like Ca2+ and Ti4+ are considered as
potential doping sites for Al3+.
codopant (bool): If True, doping will be carried out with a
codopant to maintain charge neutrality. Otherwise, vacancies
will be used.
max_structures_per_enum (float): Maximum number of structures to
return per enumeration. Note that there can be more than one
candidate doping site, and each site enumeration will return at
max max_structures_per_enum structures. Defaults to 100.
allowed_doping_species (list): Species that are allowed to be
doping sites. This is an inclusionary list. If specified,
any sites which are not
\*\*kwargs:
Same keyword args as :class:`EnumerateStructureTransformation`,
i.e., min_cell_size, etc.
"""
self.dopant = get_el_sp(dopant)
self.ionic_radius_tol = ionic_radius_tol
self.min_length = min_length
self.alio_tol = alio_tol
self.codopant = codopant
self.max_structures_per_enum = max_structures_per_enum
self.allowed_doping_species = allowed_doping_species
self.kwargs = kwargs
def apply_transformation(self, structure, return_ranked_list=False):
"""
Args:
structure (Structure): Input structure to dope
Returns:
[{"structure": Structure, "energy": float}]
"""
comp = structure.composition
logger.info("Composition: %s" % comp)
for sp in comp:
try:
sp.oxi_state
except AttributeError:
analyzer = BVAnalyzer()
structure = analyzer.get_oxi_state_decorated_structure(
structure)
comp = structure.composition
break
ox = self.dopant.oxi_state
radius = self.dopant.ionic_radius
compatible_species = [
sp for sp in comp if sp.oxi_state == ox and
abs(sp.ionic_radius / radius - 1) < self.ionic_radius_tol]
if (not compatible_species) and self.alio_tol:
# We only consider aliovalent doping if there are no compatible
# isovalent species.
compatible_species = [
sp for sp in comp
if abs(sp.oxi_state - ox) <= self.alio_tol and
abs(sp.ionic_radius / radius - 1) < self.ionic_radius_tol and
sp.oxi_state * ox >= 0]
if self.allowed_doping_species is not None:
# Only keep allowed doping species.
compatible_species = [
sp for sp in compatible_species
if sp in [get_el_sp(s) for s in self.allowed_doping_species]]
logger.info("Compatible species: %s" % compatible_species)
lengths = structure.lattice.abc
scaling = [max(1, int(round(math.ceil(self.min_length/x))))
for x in lengths]
logger.info("Lengths are %s" % str(lengths))
logger.info("Scaling = %s" % str(scaling))
all_structures = []
t = EnumerateStructureTransformation(**self.kwargs)
for sp in compatible_species:
supercell = structure * scaling
nsp = supercell.composition[sp]
if sp.oxi_state == ox:
supercell.replace_species({sp: {sp: (nsp - 1)/nsp,
self.dopant: 1/nsp}})
logger.info("Doping %s for %s at level %.3f" % (
sp, self.dopant, 1 / nsp))
elif self.codopant:
codopant = _find_codopant(sp, 2 * sp.oxi_state - ox)
supercell.replace_species({sp: {sp: (nsp - 2) / nsp,
self.dopant: 1 / nsp,
codopant: 1 / nsp}})
logger.info("Doping %s for %s + %s at level %.3f" % (
sp, self.dopant, codopant, 1 / nsp))
elif abs(sp.oxi_state) < abs(ox):
# Strategy: replace the target species with a
# combination of dopant and vacancy.
# We will choose the lowest oxidation state species as a
# vacancy compensation species as it is likely to be lower in
# energy
sp_to_remove = min([s for s in comp if s.oxi_state * ox > 0],
key=lambda ss: abs(ss.oxi_state))
if sp_to_remove == sp:
common_charge = lcm(int(abs(sp.oxi_state)), int(abs(ox)))
ndopant = common_charge / abs(ox)
nsp_to_remove = common_charge / abs(sp.oxi_state)
logger.info("Doping %d %s with %d %s." %
(nsp_to_remove, sp, ndopant, self.dopant))
supercell.replace_species(
{sp: {sp: (nsp - nsp_to_remove) / nsp,
self.dopant: ndopant / nsp}})
else:
ox_diff = int(abs(round(sp.oxi_state - ox)))
vac_ox = int(abs(sp_to_remove.oxi_state))
common_charge = lcm(vac_ox, ox_diff)
ndopant = common_charge / ox_diff
nx_to_remove = common_charge / vac_ox
nx = supercell.composition[sp_to_remove]
logger.info("Doping %d %s with %s and removing %d %s." %
(ndopant, sp, self.dopant,
nx_to_remove, sp_to_remove))
supercell.replace_species(
{sp: {sp: (nsp - ndopant) / nsp,
self.dopant: ndopant / nsp},
sp_to_remove: {
sp_to_remove: (nx - nx_to_remove) / nx}})
elif abs(sp.oxi_state) > abs(ox):
# Strategy: replace the target species with dopant and also
# remove some opposite charged species for charge neutrality
if ox > 0:
sp_to_remove = max(supercell.composition.keys(),
key=lambda el: el.X)
else:
sp_to_remove = min(supercell.composition.keys(),
key=lambda el: el.X)
# Confirm species are of opposite oxidation states.
assert sp_to_remove.oxi_state * sp.oxi_state < 0
ox_diff = int(abs(round(sp.oxi_state - ox)))
anion_ox = int(abs(sp_to_remove.oxi_state))
nx = supercell.composition[sp_to_remove]
common_charge = lcm(anion_ox, ox_diff)
ndopant = common_charge / ox_diff
nx_to_remove = common_charge / anion_ox
logger.info("Doping %d %s with %s and removing %d %s." %
(ndopant, sp, self.dopant,
nx_to_remove, sp_to_remove))
supercell.replace_species(
{sp: {sp: (nsp - ndopant) / nsp,
self.dopant: ndopant / nsp},
sp_to_remove: {sp_to_remove: (nx - nx_to_remove)/nx}})
ss = t.apply_transformation(
supercell, return_ranked_list=self.max_structures_per_enum)
logger.info("%s distinct structures" % len(ss))
all_structures.extend(ss)
logger.info("Total %s doped structures" % len(all_structures))
if return_ranked_list:
return all_structures[:return_ranked_list]
return all_structures[0]["structure"]
@property
def inverse(self):
return None
@property
def is_one_to_many(self):
return True
class SlabTransformation(AbstractTransformation):
"""
A transformation that creates a slab from a structure.
"""
def __init__(self, miller_index, min_slab_size, min_vacuum_size,
lll_reduce=False, center_slab=False, primitive=True,
max_normal_search=None, shift=0, tol=0.1):
"""
Args:
miller_index (3-tuple or list): miller index of slab
min_slab_size (float): minimum slab size in angstroms
min_vacuum_size (float): minimum size of vacuum
lll_reduce (bool): whether to apply LLL reduction
center_slab (bool): whether to center the slab
primitive (bool): whether to reduce slabs to most primitive cell
max_normal_search (int): maximum index to include in linear
combinations of indices to find c lattice vector orthogonal
to slab surface
shift (float): shift to get termination
tol (float): tolerance for primitive cell finding
"""
self.miller_index = miller_index
self.min_slab_size = min_slab_size
self.min_vacuum_size = min_vacuum_size
self.lll_reduce = lll_reduce
self.center_slab = center_slab
self.primitive = primitive
self.max_normal_search = max_normal_search
self.shift = shift
self.tol = 0.1
def apply_transformation(self, structure):
sg = SlabGenerator(structure, self.miller_index, self.min_slab_size,
self.min_vacuum_size, self.lll_reduce,
self.center_slab, self.primitive,
self.max_normal_search)
slab = sg.get_slab(self.shift, self.tol)
return slab
@property
def inverse(self):
return None
@property
def is_one_to_many(self):
return None
|
Hey everyone, just wanted to say a quick hello and introduce myself, I play on FT as cyndi66 and this site was recommended by Dalsue214. Look forward to seeing u at the tables!
good luck at the tables and hope to see you around!
Hello, welcome to the cave cydni! Good luck.
A lot of new players here Welcome and have fun!
hi cyndi, welcome to our cave, hope you enjoy your stay here! |
import rospy
from geometry_msgs.msg import TwistStamped
import threading
from math import pi as PI
from jderobotTypes import CMDVel
from .threadPublisher import ThreadPublisher
def cmdvel2Twist(vel):
'''
Translates from JderobotTypes CMDVel to ROS Twist.
@param vel: JderobotTypes CMDVel to translate
@type img: JdeRobotTypes.CMDVel
@return a Twist translated from vel
'''
tw = TwistStamped()
tw.twist.linear.x = vel.vx
tw.twist.linear.y = vel.vy
tw.twist.linear.z = vel.vz
tw.twist.angular.x = vel.ax
tw.twist.angular.y = vel.ay
tw.twist.angular.z = vel.az
return tw
class PublisherCMDVel:
'''
ROS CMDVel Publisher. CMDVel Client to Send CMDVel to ROS nodes.
'''
def __init__(self, topic, jdrc):
'''
PublisherCMDVel Constructor.
@param topic: ROS topic to publish
@param jdrc: jderobot Communicator
@type topic: String
@type jdrc: jderobot Communicator
'''
rospy.init_node("ss")
self.topic = topic
self.jdrc = jdrc
self.vel = CMDVel()
self.pub = self.pub = rospy.Publisher(topic, TwistStamped, queue_size=1)
self.lock = threading.Lock()
self.kill_event = threading.Event()
self.thread = ThreadPublisher(self, self.kill_event)
self.thread.daemon = True
self.start()
def publish (self):
'''
Function to publish cmdvel.
'''
self.lock.acquire()
tw = cmdvel2Twist(self.vel)
self.lock.release()
if (self.jdrc.getState() == "flying"):
self.pub.publish(tw)
def stop(self):
'''
Stops (Unregisters) the client. If client is stopped you can not start again, Threading.Thread raised error
'''
self.kill_event.set()
self.pub.unregister()
def start (self):
'''
Starts (Subscribes) the client. If client is stopped you can not start again, Threading.Thread raised error
'''
self.kill_event.clear()
self.thread.start()
def sendVelocities(self):
'''
Sends CMDVel.
@param vel: CMDVel to publish
@type vel: CMDVel
'''
self.lock.acquire()
#self.vel = vel
self.lock.release()
def setVX(self, vx):
'''
Sends VX velocity.
@param vx: VX velocity
@type vx: float
'''
self.lock.acquire()
self.vel.vx = vx
self.lock.release()
def setVY(self, vy):
'''
Sends VY velocity.
@param vy: VY velocity
@type vy: float
'''
self.lock.acquire()
self.vel.vy = vy
self.lock.release()
def setVZ(self,vz):
'''
Sends VZ velocity.
@param vz: VZ velocity
@type vz: float
'''
self.lock.acquire()
self.vel.vz=vz
self.lock.release()
def setAngularZ(self, az):
'''
Sends AZ velocity.
@param az: AZ velocity
@type az: float
'''
self.lock.acquire()
self.vel.az = az
self.lock.release()
def setAngularX(self,ax):
'''
Sends AX velocity.
@param ax: AX velocity
@type ax: float
'''
self.lock.acquire()
self.vel.ax=ax
self.lock.release()
def setAngularY(self,ay):
'''
Sends AY velocity.
@param ay: AY velocity
@type ay: float
'''
self.lock.acquire()
self.vel.ay=ay
self.lock.release()
def setYaw(self,yaw):
self.setAngularZ(yaw)
def setRoll(self,roll):
self.setAngularX(roll)
def setPitch(self,pitch):
self.setAngularY(pitch)
def sendCMD (self, vel):
'''
Sends CMDVel.
@param vel: CMDVel to publish
@type vel: CMDVel
'''
self.lock.acquire()
self.vel = vel
self.lock.release()
def sendCMDVel (self, vx,vy,vz,ax,ay,az):
self.lock.acquire()
self.vel.vx=vx
self.vel.vy=vy
self.vel.vz=vz
self.vel.ax=ax
self.vel.ay=ay
self.vel.az=az
self.lock.release()
|
Non-interactive zero-knowledge (NIZK) proofs have been investigated in two models: the Public Parameter model and the Secret Parameter model. In the former, a public string is “ideally” chosen according to some efficiently samplable distribution and made available to both the Prover and Verifier. In the latter, the parties instead obtain correlated (possibly different) private strings. To add further choice, the definition of zero-knowledge in these settings can either be non-adaptive or adaptive.
In the secret parameter model, NIZK = NISZK = NIPZK = AM.
– for computational NIZK for “hard” languages, one-way functions are both necessary and sufficient.
Either NIZK proofs exist only for “easy” languages (i.e., languages that are not hard-on-average), or they exist for all of AM (i.e., all languages which admit non-interactive proofs). |
"""
Title: Semantic Image Clustering
Author: [Khalid Salama](https://www.linkedin.com/in/khalid-salama-24403144/)
Date created: 2021/02/28
Last modified: 2021/02/28
Description: Semantic Clustering by Adopting Nearest neighbors (SCAN) algorithm.
"""
"""
## Introduction
This example demonstrates how to apply the [Semantic Clustering by Adopting Nearest neighbors
(SCAN)](https://arxiv.org/abs/2005.12320) algorithm (Van Gansbeke et al., 2020) on the
[CIFAR-10](https://www.cs.toronto.edu/~kriz/cifar.html) dataset. The algorithm consists of
two phases:
1. Self-supervised visual representation learning of images, in which we use the
[simCLR](https://arxiv.org/abs/2002.05709) technique.
2. Clustering of the learned visual representation vectors to maximize the agreement
between the cluster assignments of neighboring vectors.
The example requires [TensorFlow Addons](https://www.tensorflow.org/addons),
which you can install using the following command:
```python
pip install tensorflow-addons
```
"""
"""
## Setup
"""
from collections import defaultdict
import random
import numpy as np
import tensorflow as tf
import tensorflow_addons as tfa
from tensorflow import keras
from tensorflow.keras import layers
import matplotlib.pyplot as plt
from tqdm import tqdm
"""
## Prepare the data
"""
num_classes = 10
input_shape = (32, 32, 3)
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()
x_data = np.concatenate([x_train, x_test])
y_data = np.concatenate([y_train, y_test])
print("x_data shape:", x_data.shape, "- y_data shape:", y_data.shape)
classes = [
"airplane",
"automobile",
"bird",
"cat",
"deer",
"dog",
"frog",
"horse",
"ship",
"truck",
]
"""
## Define hyperparameters
"""
target_size = 32 # Resize the input images.
representation_dim = 512 # The dimensions of the features vector.
projection_units = 128 # The projection head of the representation learner.
num_clusters = 20 # Number of clusters.
k_neighbours = 5 # Number of neighbours to consider during cluster learning.
tune_encoder_during_clustering = False # Freeze the encoder in the cluster learning.
"""
## Implement data preprocessing
The data preprocessing step resizes the input images to the desired `target_size` and applies
feature-wise normalization. Note that, when using `keras.applications.ResNet50V2` as the
visual encoder, resizing the images into 255 x 255 inputs would lead to more accurate results
but require a longer time to train.
"""
data_preprocessing = keras.Sequential(
[
layers.experimental.preprocessing.Resizing(target_size, target_size),
layers.experimental.preprocessing.Normalization(),
]
)
# Compute the mean and the variance from the data for normalization.
data_preprocessing.layers[-1].adapt(x_data)
"""
## Data augmentation
Unlike simCLR, which randomly picks a single data augmentation function to apply to an input
image, we apply a set of data augmentation functions randomly to the input image.
(You can experiment with other image augmentation techniques by following
the [data augmentation tutorial](https://www.tensorflow.org/tutorials/images/data_augmentation).)
"""
data_augmentation = keras.Sequential(
[
layers.experimental.preprocessing.RandomTranslation(
height_factor=(-0.2, 0.2), width_factor=(-0.2, 0.2), fill_mode="nearest"
),
layers.experimental.preprocessing.RandomFlip(mode="horizontal"),
layers.experimental.preprocessing.RandomRotation(
factor=0.15, fill_mode="nearest"
),
layers.experimental.preprocessing.RandomZoom(
height_factor=(-0.3, 0.1), width_factor=(-0.3, 0.1), fill_mode="nearest"
),
]
)
"""
Display a random image
"""
image_idx = np.random.choice(range(x_data.shape[0]))
image = x_data[image_idx]
image_class = classes[y_data[image_idx][0]]
plt.figure(figsize=(3, 3))
plt.imshow(x_data[image_idx].astype("uint8"))
plt.title(image_class)
_ = plt.axis("off")
"""
Display a sample of augmented versions of the image
"""
plt.figure(figsize=(10, 10))
for i in range(9):
augmented_images = data_augmentation(np.array([image]))
ax = plt.subplot(3, 3, i + 1)
plt.imshow(augmented_images[0].numpy().astype("uint8"))
plt.axis("off")
"""
## Self-supervised representation learning
"""
"""
### Implement the vision encoder
"""
def create_encoder(representation_dim):
encoder = keras.Sequential(
[
keras.applications.ResNet50V2(
include_top=False, weights=None, pooling="avg"
),
layers.Dense(representation_dim),
]
)
return encoder
"""
### Implement the unsupervised contrastive loss
"""
class RepresentationLearner(keras.Model):
def __init__(
self,
encoder,
projection_units,
num_augmentations,
temperature=1.0,
dropout_rate=0.1,
l2_normalize=False,
**kwargs
):
super(RepresentationLearner, self).__init__(**kwargs)
self.encoder = encoder
# Create projection head.
self.projector = keras.Sequential(
[
layers.Dropout(dropout_rate),
layers.Dense(units=projection_units, use_bias=False),
layers.BatchNormalization(),
layers.ReLU(),
]
)
self.num_augmentations = num_augmentations
self.temperature = temperature
self.l2_normalize = l2_normalize
self.loss_tracker = keras.metrics.Mean(name="loss")
@property
def metrics(self):
return [self.loss_tracker]
def compute_contrastive_loss(self, feature_vectors, batch_size):
num_augmentations = tf.shape(feature_vectors)[0] // batch_size
if self.l2_normalize:
feature_vectors = tf.math.l2_normalize(feature_vectors, -1)
# The logits shape is [num_augmentations * batch_size, num_augmentations * batch_size].
logits = (
tf.linalg.matmul(feature_vectors, feature_vectors, transpose_b=True)
/ self.temperature
)
# Apply log-max trick for numerical stability.
logits_max = tf.math.reduce_max(logits, axis=1)
logits = logits - logits_max
# The shape of targets is [num_augmentations * batch_size, num_augmentations * batch_size].
# targets is a matrix consits of num_augmentations submatrices of shape [batch_size * batch_size].
# Each [batch_size * batch_size] submatrix is an identity matrix (diagonal entries are ones).
targets = tf.tile(tf.eye(batch_size), [num_augmentations, num_augmentations])
# Compute cross entropy loss
return keras.losses.categorical_crossentropy(
y_true=targets, y_pred=logits, from_logits=True
)
def call(self, inputs):
# Preprocess the input images.
preprocessed = data_preprocessing(inputs)
# Create augmented versions of the images.
augmented = []
for _ in range(self.num_augmentations):
augmented.append(data_augmentation(preprocessed))
augmented = layers.Concatenate(axis=0)(augmented)
# Generate embedding representations of the images.
features = self.encoder(augmented)
# Apply projection head.
return self.projector(features)
def train_step(self, inputs):
batch_size = tf.shape(inputs)[0]
# Run the forward pass and compute the contrastive loss
with tf.GradientTape() as tape:
feature_vectors = self(inputs, training=True)
loss = self.compute_contrastive_loss(feature_vectors, batch_size)
# Compute gradients
trainable_vars = self.trainable_variables
gradients = tape.gradient(loss, trainable_vars)
# Update weights
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
# Update loss tracker metric
self.loss_tracker.update_state(loss)
# Return a dict mapping metric names to current value
return {m.name: m.result() for m in self.metrics}
def test_step(self, inputs):
batch_size = tf.shape(inputs)[0]
feature_vectors = self(inputs, training=False)
loss = self.compute_contrastive_loss(feature_vectors, batch_size)
self.loss_tracker.update_state(loss)
return {"loss": self.loss_tracker.result()}
"""
### Train the model
"""
# Create vision encoder.
encoder = create_encoder(representation_dim)
# Create representation learner.
representation_learner = RepresentationLearner(
encoder, projection_units, num_augmentations=2, temperature=0.1
)
# Create a a Cosine decay learning rate scheduler.
lr_scheduler = keras.experimental.CosineDecay(
initial_learning_rate=0.001, decay_steps=500, alpha=0.1
)
# Compile the model.
representation_learner.compile(
optimizer=tfa.optimizers.AdamW(learning_rate=lr_scheduler, weight_decay=0.0001),
)
# Fit the model.
history = representation_learner.fit(
x=x_data,
batch_size=512,
epochs=50, # for better results, increase the number of epochs to 500.
)
"""
Plot training loss
"""
plt.plot(history.history["loss"])
plt.ylabel("loss")
plt.xlabel("epoch")
plt.show()
"""
## Compute the nearest neighbors
"""
"""
### Generate the embeddings for the images
"""
batch_size = 500
# Get the feature vector representations of the images.
feature_vectors = encoder.predict(x_data, batch_size=batch_size, verbose=1)
# Normalize the feature vectores.
feature_vectors = tf.math.l2_normalize(feature_vectors, -1)
"""
### Find the *k* nearest neighbours for each embedding
"""
neighbours = []
num_batches = feature_vectors.shape[0] // batch_size
for batch_idx in tqdm(range(num_batches)):
start_idx = batch_idx * batch_size
end_idx = start_idx + batch_size
current_batch = feature_vectors[start_idx:end_idx]
# Compute the dot similarity.
similarities = tf.linalg.matmul(current_batch, feature_vectors, transpose_b=True)
# Get the indices of most similar vectors.
_, indices = tf.math.top_k(similarities, k=k_neighbours + 1, sorted=True)
# Add the indices to the neighbours.
neighbours.append(indices[..., 1:])
neighbours = np.reshape(np.array(neighbours), (-1, k_neighbours))
"""
Let's display some neighbors on each row
"""
nrows = 4
ncols = k_neighbours + 1
plt.figure(figsize=(12, 12))
position = 1
for _ in range(nrows):
anchor_idx = np.random.choice(range(x_data.shape[0]))
neighbour_indicies = neighbours[anchor_idx]
indices = [anchor_idx] + neighbour_indicies.tolist()
for j in range(ncols):
plt.subplot(nrows, ncols, position)
plt.imshow(x_data[indices[j]].astype("uint8"))
plt.title(classes[y_data[indices[j]][0]])
plt.axis("off")
position += 1
"""
You notice that images on each row are visually similar, and belong to similar classes.
"""
"""
## Semantic clustering with nearest neighbours
"""
"""
### Implement clustering consistency loss
This loss tries to make sure that neighbours have the same clustering assignments.
"""
class ClustersConsistencyLoss(keras.losses.Loss):
def __init__(self):
super(ClustersConsistencyLoss, self).__init__()
def __call__(self, target, similarity, sample_weight=None):
# Set targets to be ones.
target = tf.ones_like(similarity)
# Compute cross entropy loss.
loss = keras.losses.binary_crossentropy(
y_true=target, y_pred=similarity, from_logits=True
)
return tf.math.reduce_mean(loss)
"""
### Implement the clusters entropy loss
This loss tries to make sure that cluster distribution is roughly uniformed, to avoid
assigning most of the instances to one cluster.
"""
class ClustersEntropyLoss(keras.losses.Loss):
def __init__(self, entropy_loss_weight=1.0):
super(ClustersEntropyLoss, self).__init__()
self.entropy_loss_weight = entropy_loss_weight
def __call__(self, target, cluster_probabilities, sample_weight=None):
# Ideal entropy = log(num_clusters).
num_clusters = tf.cast(tf.shape(cluster_probabilities)[-1], tf.dtypes.float32)
target = tf.math.log(num_clusters)
# Compute the overall clusters distribution.
cluster_probabilities = tf.math.reduce_mean(cluster_probabilities, axis=0)
# Replacing zero probabilities - if any - with a very small value.
cluster_probabilities = tf.clip_by_value(
cluster_probabilities, clip_value_min=1e-8, clip_value_max=1.0
)
# Compute the entropy over the clusters.
entropy = -tf.math.reduce_sum(
cluster_probabilities * tf.math.log(cluster_probabilities)
)
# Compute the difference between the target and the actual.
loss = target - entropy
return loss
"""
### Implement clustering model
This model takes a raw image as an input, generated its feature vector using the trained
encoder, and produces a probability distribution of the clusters given the feature vector
as the cluster assignments.
"""
def create_clustering_model(encoder, num_clusters, name=None):
inputs = keras.Input(shape=input_shape)
# Preprocess the input images.
preprocessed = data_preprocessing(inputs)
# Apply data augmentation to the images.
augmented = data_augmentation(preprocessed)
# Generate embedding representations of the images.
features = encoder(augmented)
# Assign the images to clusters.
outputs = layers.Dense(units=num_clusters, activation="softmax")(features)
# Create the model.
model = keras.Model(inputs=inputs, outputs=outputs, name=name)
return model
"""
### Implement clustering learner
This model receives the input `anchor` image and its `neighbours`, produces the clusters
assignments for them using the `clustering_model`, and produces two outputs:
1. `similarity`: the similarity between the cluster assignments of the `anchor` image and
its `neighbours`. This output is fed to the `ClustersConsistencyLoss`.
2. `anchor_clustering`: cluster assignments of the `anchor` images. This is fed to the `ClustersEntropyLoss`.
"""
def create_clustering_learner(clustering_model):
anchor = keras.Input(shape=input_shape, name="anchors")
neighbours = keras.Input(
shape=tuple([k_neighbours]) + input_shape, name="neighbours"
)
# Changes neighbours shape to [batch_size * k_neighbours, width, height, channels]
neighbours_reshaped = tf.reshape(neighbours, shape=tuple([-1]) + input_shape)
# anchor_clustering shape: [batch_size, num_clusters]
anchor_clustering = clustering_model(anchor)
# neighbours_clustering shape: [batch_size * k_neighbours, num_clusters]
neighbours_clustering = clustering_model(neighbours_reshaped)
# Convert neighbours_clustering shape to [batch_size, k_neighbours, num_clusters]
neighbours_clustering = tf.reshape(
neighbours_clustering,
shape=(-1, k_neighbours, tf.shape(neighbours_clustering)[-1]),
)
# similarity shape: [batch_size, 1, k_neighbours]
similarity = tf.linalg.einsum(
"bij,bkj->bik", tf.expand_dims(anchor_clustering, axis=1), neighbours_clustering
)
# similarity shape: [batch_size, k_neighbours]
similarity = layers.Lambda(lambda x: tf.squeeze(x, axis=1), name="similarity")(
similarity
)
# Create the model.
model = keras.Model(
inputs=[anchor, neighbours],
outputs=[similarity, anchor_clustering],
name="clustering_learner",
)
return model
"""
### Train model
"""
# If tune_encoder_during_clustering is set to False,
# then freeze the encoder weights.
for layer in encoder.layers:
layer.trainable = tune_encoder_during_clustering
# Create the clustering model and learner.
clustering_model = create_clustering_model(encoder, num_clusters, name="clustering")
clustering_learner = create_clustering_learner(clustering_model)
# Instantiate the model losses.
losses = [ClustersConsistencyLoss(), ClustersEntropyLoss(entropy_loss_weight=5)]
# Create the model inputs and labels.
inputs = {"anchors": x_data, "neighbours": tf.gather(x_data, neighbours)}
labels = tf.ones(shape=(x_data.shape[0]))
# Compile the model.
clustering_learner.compile(
optimizer=tfa.optimizers.AdamW(learning_rate=0.0005, weight_decay=0.0001),
loss=losses,
)
# Begin training the model.
clustering_learner.fit(x=inputs, y=labels, batch_size=512, epochs=50)
"""
Plot training loss
"""
plt.plot(history.history["loss"])
plt.ylabel("loss")
plt.xlabel("epoch")
plt.show()
"""
## Cluster analysis
"""
"""
### Assign images to clusters
"""
# Get the cluster probability distribution of the input images.
clustering_probs = clustering_model.predict(x_data, batch_size=batch_size, verbose=1)
# Get the cluster of the highest probability.
cluster_assignments = tf.math.argmax(clustering_probs, axis=-1).numpy()
# Store the clustering confidence.
# Images with the highest clustering confidence are considered the 'prototypes'
# of the clusters.
cluster_confidence = tf.math.reduce_max(clustering_probs, axis=-1).numpy()
"""
Let's compute the cluster sizes
"""
clusters = defaultdict(list)
for idx, c in enumerate(cluster_assignments):
clusters[c].append((idx, cluster_confidence[idx]))
for c in range(num_clusters):
print("cluster", c, ":", len(clusters[c]))
"""
Notice that the clusters have roughly balanced sizes.
"""
"""
### Visualize cluster images
Display the *prototypes*—instances with the highest clustering confidence—of each cluster:
"""
num_images = 8
plt.figure(figsize=(15, 15))
position = 1
for c in range(num_clusters):
cluster_instances = sorted(clusters[c], key=lambda kv: kv[1], reverse=True)
for j in range(num_images):
image_idx = cluster_instances[j][0]
plt.subplot(num_clusters, num_images, position)
plt.imshow(x_data[image_idx].astype("uint8"))
plt.title(classes[y_data[image_idx][0]])
plt.axis("off")
position += 1
"""
### Compute clustering accuracy
First, we assign a label for each cluster based on the majority label of its images.
Then, we compute the accuracy of each cluster by dividing the number of image with the
majority label by the size of the cluster.
"""
cluster_label_counts = dict()
for c in range(num_clusters):
cluster_label_counts[c] = [0] * num_classes
instances = clusters[c]
for i, _ in instances:
cluster_label_counts[c][y_data[i][0]] += 1
cluster_label_idx = np.argmax(cluster_label_counts[c])
correct_count = np.max(cluster_label_counts[c])
cluster_size = len(clusters[c])
accuracy = (
np.round((correct_count / cluster_size) * 100, 2) if cluster_size > 0 else 0
)
cluster_label = classes[cluster_label_idx]
print("cluster", c, "label is:", cluster_label, " - accuracy:", accuracy, "%")
"""
## Conclusion
To improve the accuracy results, you can: 1) increase the number
of epochs in the representation learning and the clustering phases; 2)
allow the encoder weights to be tuned during the clustering phase; and 3) perform a final
fine-tuning step through self-labeling, as described in the [original SCAN paper](https://arxiv.org/abs/2005.12320).
Note that unsupervised image clustering techniques are not expected to outperform the accuracy
of supervised image classification techniques, rather showing that they can learn the semantics
of the images and group them into clusters that are similar to their original classes.
"""
|
There are lots of benefits associated with having the best heat pump Greenville, SC has to offer installed in your home, including the fact that you will be able to keep the inside of your house warm while saving money at the same time. A heat pump can be an excellent investment to make, especially if you want to stay warm during the cold months of the year. While it is true that there are many different ways to stay warm, a heat pump is definitely one of the more effective methods of doing so.
A quality Greenville heat pump will be able to provide your entire home with an adequate amount of heat so you don't have to shiver through the winter in your own home. It is important that your get an energy-efficient heat pump so you will be able to save as much money as possible on your utility bills. A ductless heat pump in Greenville, SC will be able to make all the difference when it comes to your ability to stay comfortable in your house when it is cold outside. This type of heat pump is very energy-efficient and can save you a lot of money on your monthly bills. |
#!/usr/bin/python
from os import environ
import sys
from config import es as default_es
from pprint import pprint
def get_documents(terms, term_field, fields=["text"], es_index='memex', es_doc_type='page', es=None):
if es is None:
es = default_es
results = {}
if len(terms) > 0:
for term in terms:
query = {
"query": {
"term": {
term_field: term
}
},
"fields": fields
}
res = es.search(body=query,
index=es_index,
doc_type=es_doc_type)
if res['hits']['hits']:
hits = res['hits']['hits']
records = []
for hit in hits:
record = {}
if not hit.get('fields') is None:
record = hit['fields']
record['id'] =hit['_id']
records.append(record)
results[term] = records
return results
def get_more_like_this(urls, fields=[], pageCount=200, es_index='memex', es_doc_type='page', es=None):
if es is None:
es = default_es
docs = [{"_index": es_index, "_type": es_doc_type, "_id": url} for url in urls]
with open(environ['DD_API_HOME']+'/elastic/stopwords.txt', 'r') as f:
stopwords = [word.strip() for word in f.readlines()]
query = {
"query":{
"more_like_this": {
"fields" : ["text"],
"docs": docs,
"min_term_freq": 1,
"stop_words": stopwords
}
},
"fields": fields,
"size": pageCount
}
res = es.search(body=query, index = es_index, doc_type = es_doc_type)
hits = res['hits']['hits']
results = []
for hit in hits:
fields = hit['fields']
fields['id'] = hit['_id']
fields['score'] = hit['_score']
results.append(fields)
return results
def get_most_recent_documents(start=0, opt_maxNumberOfPages = 200, mapping=None, fields = [], opt_filter = None, es_index = 'memex', es_doc_type = 'page', es = None):
if mapping == None:
print "No mappings found"
return []
if es is None:
es = default_es
query = {
"size": opt_maxNumberOfPages,
"sort": [
{
mapping["timestamp"]: {
"order": "desc"
}
}
]
}
match_q = {
"match_all": {}
}
if not mapping.get("content_type") is None:
match_q = {
"match": {
mapping["content_type"]: "text/html"
}
}
if opt_filter is None:
query["query"] = {
"filtered": {
"query": match_q,
"filter":{
"exists": {
"field": mapping['text']
}
}
}
}
else:
query["query"] = {
"query_string": {
"query": "(" + mapping['text'] + ":" + opt_filter.replace('"', '\"') + ")"
}
}
if len(fields) > 0:
query["fields"] = fields
res = es.search(body=query, index = es_index, doc_type = es_doc_type, from_=start, request_timeout=600)
hits = res['hits']['hits']
results = []
for hit in hits:
fields = hit['fields']
fields['id'] = hit['_id']
results.append(fields)
return {"total": res['hits']['total'], 'results':results}
def get_all_ids(pageCount = 100000, fields=[], es_index = 'memex', es_doc_type = 'page', es = None):
if es is None:
es = default_es
query = {
"query": {
"match_all": {}
},
"fields": fields
}
try:
res = es.search(body=query, index = es_index, doc_type = es_doc_type, size = pageCount, request_timeout=600)
hits = res['hits']['hits']
results = []
for hit in hits:
fields = hit['fields']
fields['id'] = hit['_id']
results.append(fields)
return results
except:
print("Unexpected error:", sys.exc_info()[0])
print es_index
return []
def get_documents_by_id(ids=[], fields=[], es_index = 'memex', es_doc_type = 'page', es = None):
if es is None:
es = default_es
query = {
"query": {
"ids": {
"values": ids
}
},
"fields": fields
}
res = es.search(body=query, index = es_index, doc_type = es_doc_type, size=len(ids), request_timeout=30)
hits = res['hits']['hits']
results = []
for hit in hits:
if hit.get('fields'):
fields = hit['fields']
fields['id'] = hit['_id']
results.append(fields)
return results
def get_plotting_data(pageCount=200, es_index = 'memex', es_doc_type = 'page', es = None):
if es is None:
es = default_es
res = es.search(index=es_index, doc_type = es_doc_type, size=pageCount, fields=["retrieved", "url", "tag", "query"])
fields = []
for item in res['hits']['hits']:
if item['fields'].get('tag') != None:
if "" in item['fields']['tag']:
item['fields'].pop('tag')
fields.append(item['fields'])
return fields
if __name__ == "__main__":
urls = []
with open(environ['MEMEX_HOME']+'/seed_crawler/seeds_generator/results.txt', 'r') as f:
urls = f.readlines()
urls = [url.strip() for url in urls]
docs = get_documents(urls)
|
In 2005 Istanbul decided to claim for the title of "European Capital of Culture" for 2010. It therefore impressed the jury by an inspired presentation and easily surmounted its primary rival city, Kiev.
Istanbul's initial optimism was certainly reinforced by the positive impact of Turkey being in the process of becoming part of the EU. Although in 2001 the country was plunged into a severe economic crisis, in 2005 it had already found its way to a spectacular recovery. If it became the "Cultural Capital of Europe", Istanbul and by extension Turkey could boast that not only it was a member of the western "European club" but also a key crossroads in the region. The true objectives of "Istanbul 2010" were not just cultural or touristic. They were political, ideological and extremely ambitious. |
# -*- coding:utf-8 -*-
"""
/***************************************************************************
Python Console for QGIS
-------------------
begin : 2012-09-10
copyright : (C) 2012 by Salvatore Larosa
email : lrssvtml (at) gmail (dot) com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
Some portions of code were taken from https://code.google.com/p/pydee/
"""
from qgis.PyQt.QtCore import QCoreApplication, QUrl
from qgis.PyQt.QtWidgets import QWidget, QFileDialog, QMessageBox, QTableWidgetItem, QHBoxLayout
from qgis.PyQt.QtGui import QIcon, QDesktopServices
from qgis.core import QgsSettings, QgsApplication
from qgis.gui import QgsOptionsPageWidget, QgsOptionsWidgetFactory
from .console_compile_apis import PrepareAPIDialog
from .ui_console_settings import Ui_SettingsDialogPythonConsole
class ConsoleOptionsFactory(QgsOptionsWidgetFactory):
def __init__(self):
super(QgsOptionsWidgetFactory, self).__init__()
def icon(self):
return QgsApplication.getThemeIcon('/console/mIconRunConsole.svg')
def createWidget(self, parent):
return ConsoleOptionsPage(parent)
class ConsoleOptionsPage(QgsOptionsPageWidget):
def __init__(self, parent):
super(ConsoleOptionsPage, self).__init__(parent)
self.options_widget = ConsoleOptionsWidget(parent)
layout = QHBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.setMargin(0)
self.setLayout(layout)
layout.addWidget(self.options_widget)
self.setObjectName('consoleOptions')
def apply(self):
self.options_widget.accept()
def helpKey(self):
return 'plugins/python_console.html'
class ConsoleOptionsWidget(QWidget, Ui_SettingsDialogPythonConsole):
def __init__(self, parent):
super().__init__(parent)
self.setWindowTitle(QCoreApplication.translate(
"SettingsDialogPythonConsole", "Python Console Settings"))
self.parent = parent
self.setupUi(self)
self.listPath = []
self.lineEdit.setReadOnly(True)
self.restoreSettings()
self.initialCheck()
self.addAPIpath.setIcon(QIcon(":/images/themes/default/symbologyAdd.svg"))
self.addAPIpath.setToolTip(QCoreApplication.translate("PythonConsole", "Add API path"))
self.removeAPIpath.setIcon(QIcon(":/images/themes/default/symbologyRemove.svg"))
self.removeAPIpath.setToolTip(QCoreApplication.translate("PythonConsole", "Remove API path"))
self.preloadAPI.stateChanged.connect(self.initialCheck)
self.addAPIpath.clicked.connect(self.loadAPIFile)
self.removeAPIpath.clicked.connect(self.removeAPI)
self.compileAPIs.clicked.connect(self._prepareAPI)
self.generateToken.clicked.connect(self.generateGHToken)
def generateGHToken(self):
description = self.tr("PyQGIS Console")
url = 'https://github.com/settings/tokens/new?description={}&scopes=gist'.format(description)
QDesktopServices.openUrl(QUrl(url))
def initialCheck(self):
if self.preloadAPI.isChecked():
self.enableDisable(False)
else:
self.enableDisable(True)
def enableDisable(self, value):
self.tableWidget.setEnabled(value)
self.addAPIpath.setEnabled(value)
self.removeAPIpath.setEnabled(value)
self.groupBoxPreparedAPI.setEnabled(value)
def loadAPIFile(self):
settings = QgsSettings()
lastDirPath = settings.value("pythonConsole/lastDirAPIPath", "", type=str)
fileAPI, selected_filter = QFileDialog.getOpenFileName(
self, "Open API File", lastDirPath, "API file (*.api)")
if fileAPI:
self.addAPI(fileAPI)
settings.setValue("pythonConsole/lastDirAPIPath", fileAPI)
def _prepareAPI(self):
if self.tableWidget.rowCount() != 0:
pap_file, filter = QFileDialog().getSaveFileName(
self,
"",
'*.pap',
"Prepared APIs file (*.pap)")
else:
QMessageBox.information(
self, self.tr("Warning!"),
self.tr('You need to add some APIs file in order to compile'))
return
if pap_file:
api_lexer = 'QsciLexerPython'
api_files = []
count = self.tableWidget.rowCount()
for i in range(0, count):
api_files.append(self.tableWidget.item(i, 1).text())
api_dlg = PrepareAPIDialog(api_lexer, api_files, pap_file, self)
api_dlg.show()
api_dlg.activateWindow()
api_dlg.raise_()
api_dlg.prepareAPI()
self.lineEdit.setText(pap_file)
def accept(self):
if not self.preloadAPI.isChecked() and \
not self.groupBoxPreparedAPI.isChecked():
if self.tableWidget.rowCount() == 0:
QMessageBox.information(
self, self.tr("Warning!"),
self.tr('Please specify API file or check "Use preloaded API files"'))
return
if self.groupBoxPreparedAPI.isChecked() and \
not self.lineEdit.text():
QMessageBox.information(
self, self.tr("Warning!"),
QCoreApplication.translate('optionsDialog', 'The APIs file was not compiled, click on "Compile APIs…"')
)
return
self.saveSettings()
self.listPath = []
def addAPI(self, pathAPI):
count = self.tableWidget.rowCount()
self.tableWidget.setColumnCount(2)
self.tableWidget.insertRow(count)
pathItem = QTableWidgetItem(pathAPI)
pathSplit = pathAPI.split("/")
apiName = pathSplit[-1][0:-4]
apiNameItem = QTableWidgetItem(apiName)
self.tableWidget.setItem(count, 0, apiNameItem)
self.tableWidget.setItem(count, 1, pathItem)
def removeAPI(self):
listItemSel = self.tableWidget.selectionModel().selectedRows()
for index in reversed(listItemSel):
self.tableWidget.removeRow(index.row())
def saveSettings(self):
settings = QgsSettings()
settings.setValue("pythonConsole/preloadAPI", self.preloadAPI.isChecked())
settings.setValue("pythonConsole/autoSaveScript", self.autoSaveScript.isChecked())
settings.setValue("pythonConsole/accessTokenGithub", self.tokenGhLineEdit.text())
for i in range(0, self.tableWidget.rowCount()):
text = self.tableWidget.item(i, 1).text()
self.listPath.append(text)
settings.setValue("pythonConsole/userAPI", self.listPath)
settings.setValue("pythonConsole/autoCompThreshold", self.autoCompThreshold.value())
settings.setValue("pythonConsole/autoCompleteEnabled", self.groupBoxAutoCompletion.isChecked())
settings.setValue("pythonConsole/usePreparedAPIFile", self.groupBoxPreparedAPI.isChecked())
settings.setValue("pythonConsole/preparedAPIFile", self.lineEdit.text())
if self.autoCompFromAPI.isChecked():
settings.setValue("pythonConsole/autoCompleteSource", 'fromAPI')
elif self.autoCompFromDoc.isChecked():
settings.setValue("pythonConsole/autoCompleteSource", 'fromDoc')
elif self.autoCompFromDocAPI.isChecked():
settings.setValue("pythonConsole/autoCompleteSource", 'fromDocAPI')
settings.setValue("pythonConsole/enableObjectInsp", self.enableObjectInspector.isChecked())
settings.setValue("pythonConsole/autoCloseBracket", self.autoCloseBracket.isChecked())
settings.setValue("pythonConsole/autoInsertionImport", self.autoInsertionImport.isChecked())
def restoreSettings(self):
settings = QgsSettings()
self.preloadAPI.setChecked(settings.value("pythonConsole/preloadAPI", True, type=bool))
self.lineEdit.setText(settings.value("pythonConsole/preparedAPIFile", "", type=str))
self.tokenGhLineEdit.setText(settings.value("pythonConsole/accessTokenGithub", "", type=str))
itemTable = settings.value("pythonConsole/userAPI", [])
if itemTable:
self.tableWidget.setRowCount(0)
for i in range(len(itemTable)):
self.tableWidget.insertRow(i)
self.tableWidget.setColumnCount(2)
pathSplit = itemTable[i].split("/")
apiName = pathSplit[-1][0:-4]
self.tableWidget.setItem(i, 0, QTableWidgetItem(apiName))
self.tableWidget.setItem(i, 1, QTableWidgetItem(itemTable[i]))
self.autoSaveScript.setChecked(settings.value("pythonConsole/autoSaveScript", False, type=bool))
self.autoCompThreshold.setValue(settings.value("pythonConsole/autoCompThreshold", 2, type=int))
self.groupBoxAutoCompletion.setChecked(settings.value("pythonConsole/autoCompleteEnabled", True, type=bool))
self.enableObjectInspector.setChecked(settings.value("pythonConsole/enableObjectInsp", False, type=bool))
self.autoCloseBracket.setChecked(settings.value("pythonConsole/autoCloseBracket", False, type=bool))
self.autoInsertionImport.setChecked(settings.value("pythonConsole/autoInsertionImport", True, type=bool))
if settings.value("pythonConsole/autoCompleteSource") == 'fromDoc':
self.autoCompFromDoc.setChecked(True)
elif settings.value("pythonConsole/autoCompleteSource") == 'fromAPI':
self.autoCompFromAPI.setChecked(True)
elif settings.value("pythonConsole/autoCompleteSource") == 'fromDocAPI':
self.autoCompFromDocAPI.setChecked(True)
|
The discovery of a DVD factory in Eltham yesterday, Tuesday 8 January, will make a significant dent in the market for counterfeit DVDs across London, says Greenwich Police.
According to FACT (Federation Against Copyright Theft) it is the largest DVD factory discovered in the past 18 months.
Greenwich officers were called to 18 Ryelands Crescent, Eltham, SE12, at 8.00 am today by officers from another borough who had called there on arrest enquiries.
At the scene, they found around 65,000 counterfeit DVDs ready to be dispatched and sold on the streets; 21 copying ‘towers’ containing 210 DVD multi -burner trays and thousands of DVD sleeves and film labels in two bedrooms at the house.
In the garage, they found around 30,000 blank DVDs stored in boxes.
The equipment found in Ryelands Crescent was able to produce 800 DVDs per hour. The product cost of each DVD is about 20p but on the streets, they can sell for around £3.00 each.
Seven people, five and two women, all in their 20s, were arrested on suspicion of theft of copyrights and taken to south London police stations.
Detective Superintendent Janice McClean said:”The discovery of this DVD factory will cause serious disruption to the supply of counterfeit DVDs across London. The production of illegal DVDs is often funded by international criminal networks and can lead to further crimes such as drugs supply and prostitution being introduced to the area. |
"""
Schema for validating and sanitizing data received from the JavaScript client.
"""
import dateutil
from pytz import utc
from voluptuous import All, Any, In, Invalid, Range, Required, Schema
def utf8_validator(value):
"""Validate and sanitize unicode strings.
If we're given a bytestring, assume that the encoding is UTF-8
Args:
value: The value to validate
Returns:
unicode
Raises:
Invalid
"""
try:
if isinstance(value, str):
return value.decode('utf-8')
else:
return unicode(value)
except (ValueError, TypeError):
raise Invalid(u"Could not load unicode from value \"{val}\"".format(val=value))
def datetime_validator(value):
"""Validate and sanitize a datetime string in ISO format.
Args:
value: The value to validate
Returns:
unicode: ISO-formatted datetime string
Raises:
Invalid
"""
try:
# The dateutil parser defaults empty values to the current day,
# which is NOT what we want.
if value is None or value == '':
raise Invalid(u"Datetime value cannot be \"{val}\"".format(val=value))
# Parse the date and interpret it as UTC
value = dateutil.parser.parse(value).replace(tzinfo=utc)
return unicode(value.isoformat())
except (ValueError, TypeError):
raise Invalid(u"Could not parse datetime from value \"{val}\"".format(val=value))
PROMPTS_TYPES = [
u'text',
u'html',
]
NECESSITY_OPTIONS = [
u'required',
u'optional',
u''
]
VALID_ASSESSMENT_TYPES = [
u'peer-assessment',
u'self-assessment',
u'student-training',
u'staff-assessment',
]
VALID_UPLOAD_FILE_TYPES = [
u'image',
u'pdf-and-image',
u'custom'
]
# Schema definition for an update from the Studio JavaScript editor.
EDITOR_UPDATE_SCHEMA = Schema({
Required('prompts'): [
Schema({
Required('description'): utf8_validator,
})
],
Required('prompts_type', default='text'): Any(All(utf8_validator, In(PROMPTS_TYPES)), None),
Required('title'): utf8_validator,
Required('feedback_prompt'): utf8_validator,
Required('feedback_default_text'): utf8_validator,
Required('submission_start'): Any(datetime_validator, None),
Required('submission_due'): Any(datetime_validator, None),
Required('text_response', default='required'): Any(All(utf8_validator, In(NECESSITY_OPTIONS)), None),
Required('file_upload_response', default=None): Any(All(utf8_validator, In(NECESSITY_OPTIONS)), None),
'allow_file_upload': bool, # Backwards compatibility.
Required('file_upload_type', default=None): Any(All(utf8_validator, In(VALID_UPLOAD_FILE_TYPES)), None),
'white_listed_file_types': utf8_validator,
Required('allow_latex'): bool,
Required('leaderboard_show'): int,
Required('assessments'): [
Schema({
Required('name'): All(utf8_validator, In(VALID_ASSESSMENT_TYPES)),
Required('start', default=None): Any(datetime_validator, None),
Required('due', default=None): Any(datetime_validator, None),
'required': bool,
'must_grade': All(int, Range(min=0)),
'must_be_graded_by': All(int, Range(min=0)),
'track_changes': utf8_validator,
'examples': [
Schema({
Required('answer'): [utf8_validator],
Required('options_selected'): [
Schema({
Required('criterion'): utf8_validator,
Required('option'): utf8_validator
})
]
})
],
'examples_xml': utf8_validator,
})
],
Required('editor_assessments_order'): [
All(utf8_validator, In(VALID_ASSESSMENT_TYPES))
],
Required('feedbackprompt', default=u""): utf8_validator,
Required('criteria'): [
Schema({
Required('order_num'): All(int, Range(min=0)),
Required('name'): utf8_validator,
Required('label'): utf8_validator,
Required('prompt'): utf8_validator,
Required('feedback'): All(
utf8_validator,
In([
'disabled',
'optional',
'required',
])
),
Required('options'): [
Schema({
Required('order_num'): All(int, Range(min=0)),
Required('name'): utf8_validator,
Required('label'): utf8_validator,
Required('explanation'): utf8_validator,
Required('points'): All(int, Range(min=0)),
})
]
})
]
})
|
"After five years of silence, one of Sweden’s most legendary death metal bands returns with the mighty new album "Death Is Not Dead". Their 2015 opus marks The Crown’s 10th studio album and is a perfect sonic celebration of the band's 25th anniversary. Expect incredibly intense tracks full of raging riffs, thrashing speed and the devastating voice of Johan Lindstrand, The Crown’s original singer! SPEED KILLS!" |
#!/usr/bin/python
# system
from collections import defaultdict
from functools import wraps
import pdb
import pprint
import re
import sys
import time
import traceback
# pypi
from splinter import Browser
from treelib import Tree
# local
import user as userdata
import list_to_tree
pp = pprint.PrettyPrinter(indent=4)
base_url = 'http://www.karatbars.com'
action_path = dict(
login = "index.php?page=login_1",
binary = "members.php?page=binarytree"
)
def url_for_action(action):
return "{0}/{1}".format(base_url,action_path[action])
def try_method(fn):
@wraps(fn)
def wrapper(self):
try:
return fn(self)
except:
print traceback.format_exc()
self.visit_auction()
return wrapper
class Entry(object):
def __init__(self, user, browser):
self.user=user
self.browser=browser
def login(self):
print "Logging in..."
self.browser.visit(url_for_action('login'))
self.browser.fill('username', self.user['username'])
self.browser.fill('password', self.user['password'])
button = self.browser.find_by_id('btn_login')
button.click()
def visit_binary(self):
self.browser.visit(url_for_action('binary'))
tree = Tree()
while True:
users = self.browser.find_by_css('.binary_text')
users = [u.text for u in users]
l = list_to_tree.ListToTree(users)
l.show()
sleep_time = 5
print "\tSleeping for", sleep_time, "seconds"
time.sleep(sleep_time)
def main():
with Browser() as browser:
for user in userdata.users:
e = Entry(user, browser)
e.login()
e.visit_binary()
while True: pass
if __name__ == '__main__':
if len(sys.argv) == 2:
bid_url = sys.argv[1]
else:
bid_url = None
main(bid_url)
|
Battle Beasts and fight Evil with Tom and Elenna in the bestselling adventure series for boys and girls aged 7 and up!King Hugo's brother Prince Angelo, long believed dead, has reappeared and seized power.
Banished from their kingdom, Tom and Elenna must uncover the truth about Prince Angelo ... but there are new and deadly Beasts on the loose. How can they defeat Torka the three-headed dragon? There are FOUR thrilling adventures to collect in this series - don't miss out! |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
from twisted.python import log
from lxml import etree
from requests import Response
##########################
##########################
##
## each QUERY gets its own little class.
## this is important to keep modularity
##
##########################
class Parser_RPC_Base(object):
""" this wrapper class can provide generic functionality for the
individual API Parser classes""" #
pass
# this is to connect to RPC on another port and parse differently
class Parser_RPC_Start(Parser_RPC_Base):
def parse(self, data2parse):
return data2parse
class Parser_RPC(object):
""" Parser_RPC
This is for communicating with BITCOINDARKD!!!
this talks to BTCD via RPC! used for start only atm
"""#
ql777_RPC_start = Parser_RPC_Start()
def __init__(self, environ = {}):
self.environ = environ
def parse_RPC(self, data2parse, requestType2Parse={'requestType':'start'}):
#In [7]: isinstance(ss,bytes)#isinstance(ss,str)
log.msg("----parse_RPC---------> ", data2parse, "requestType2Parse", requestType2Parse)
print(type(data2parse),"\n\n\n")
data = data2parse
if isinstance(data, bytes):
data = data2parse.decode()
try:
bsload=data.split("\r\n\r\n")[1]
bsload1=bsload.replace('null','"null"')
except:
print(5*"\nOOOOOOOOPS parse_RPC")
pass # need better parsing- but this is for start and stop ONLY!
try:
bsdi=eval(bsload1)
print(1*"~~~~~~~bsdi~777~~~~~", bsdi, "\n")
except:
return data.encode("utf-8")
# this takes the raw reply, strips it off header and fillers, evals into a dict
# and hands the dict to the class that is responsible for the particular query
# keep the try except here, but move the RPC to a different parser.!!!
try: # this would be the format that is returned by BTCD RPC on eg port 14632
result=bsdi['result']
data_result=eval(result)
except:# this would be the format that is returned by JL777 http on port 7777
data_result=bsdi
# there is a generic class for parsing each query
if requestType2Parse == 'start': #ToDO privateBet
parsed = self.ql777_RPC_start.parse(data_result)
else:
parsed = 'RAISE_ME_error'
data = str(parsed).encode("utf-8")
return data
##############
class Parser_JL777_Base(object):
""" this wrapper class can provide generic functionality for the
individual API Parser classes
Most of these responses from the SuperNET server are returned as is.
Some of them are internal, and have to fetched from the GUIlopp with GUIpoll.
These need special parsing.
eg PONG, havenode and some others
""" #
# 48 api.h xyz_func calls here + 1 pBET unfinished
# This is from api.h in libjl777 111314
# glue
# // GLUE 7
class Parser_jl777_gotjson(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_gotpacket(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_gotnewpeer(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_BTCDpoll(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_GUIpoll(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_stop(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_settings(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
# // passthru 2
class Parser_jl777_passthru(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_remote(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
# // ramchains 11
class Parser_jl777_ramstatus(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_ramaddrlist(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_ramstring(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_ramrawind(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_ramblock(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_ramscript(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_ramtxlist(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_ramrichlist(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_ramcompress(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_ramexpand(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_rambalances(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_rampyramid(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_ramresponse(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
# multisig MGW 7
class Parser_jl777_genmultisig(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_getmsigpubkey(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_MGWaddr(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_MGWresponse(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_setmsigpubkey(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_cosign(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_cosigned(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
# // IP comms 6
class Parser_jl777_ping(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_pong(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_sendfrag(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_gotfrag(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_startxfer(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_getfile(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
# Kademlia DHT 8
class Parser_jl777_store(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_findvalue(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_findnode(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_havenode(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_havenodeB(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_findaddress(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_puzzles(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_nonces(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
# // MofNfs 3
class Parser_jl777_savefile(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_restorefile(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_publish(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
# // Telepathy 9
class Parser_jl777_getpeers(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_addcontact(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_removecontact(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_dispcontact(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_telepathy(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_getdb(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_sendmessage(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_sendbinary(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
# // Teleport 3
class Parser_jl777_maketelepods(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_telepodacct(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_teleport(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
#InstantDEX 18
class Parser_jl777_trollbox(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_allorderbooks(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_openorders(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_orderbook(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_placebid(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_placeask(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_makeoffer3(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_respondtx(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_processutx(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_bid(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_ask(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_allsignals(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_lottostats(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_tradehistory(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_getsignal(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_cancelquote(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_processjumptrade(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_jumptrades(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
#Tradebot 3
class Parser_jl777_pricedb(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_getquotes(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_tradebot(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
# privatebet 1
class Parser_jl777_lotto(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
# embeddedLnags
class Parser_jl777_python(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_syscall(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_checkmsg(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
##########################
##########################
##
## The Parser_777 Container and Admin class
##
##########################
##########################
class Parser_777(object):
""" Parser_777
// glue
// multisig
// Kademlia DHT
// MofNfs
// Telepathy
// Teleport
// InstantDEX
// Tradebot
// privatebet
"""#
# // glue
ql777_gotjson = Parser_jl777_gotjson()
ql777_gotpacket = Parser_jl777_gotpacket()
ql777_gotnewpeer = Parser_jl777_gotnewpeer()
ql777_BTCDpoll = Parser_jl777_BTCDpoll()
ql777_GUIpoll = Parser_jl777_GUIpoll()
ql777_settings = Parser_jl777_settings()
ql777_stop = Parser_jl777_stop()
ql777_settings = Parser_jl777_settings()
#// ramchains 13
ql777_ramstatus = Parser_jl777_ramstatus()
ql777_ramaddrlist = Parser_jl777_ramaddrlist()
ql777_ramstring = Parser_jl777_ramstring()
ql777_ramrawind = Parser_jl777_ramrawind()
ql777_ramblock = Parser_jl777_ramblock()
ql777_ramscript = Parser_jl777_ramscript()
ql777_ramtxlist = Parser_jl777_ramtxlist()
ql777_ramrichlist = Parser_jl777_ramrichlist()
ql777_ramcompress = Parser_jl777_ramcompress()
ql777_ramexpand = Parser_jl777_ramexpand()
ql777_rambalances = Parser_jl777_rambalances()
ql777_rampyramid = Parser_jl777_rampyramid()
ql777_ramresponse = Parser_jl777_ramresponse()
# // MGW 7
ql777_genmultisig = Parser_jl777_genmultisig()
ql777_getmsigpubkey = Parser_jl777_getmsigpubkey()
ql777_MGWaddr = Parser_jl777_MGWaddr()
ql777_setmsigpubkey = Parser_jl777_setmsigpubkey()
ql777_MGWresponse = Parser_jl777_MGWresponse()
ql777_cosign = Parser_jl777_cosign()
ql777_cosigned = Parser_jl777_cosigned()
# // IPcomms(MGW)
ql777_ping = Parser_jl777_ping()
ql777_pong = Parser_jl777_pong()
ql777_sendfrag = Parser_jl777_sendfrag()
ql777_gotfrag = Parser_jl777_gotfrag()
ql777_startxfer = Parser_jl777_startxfer()
ql777_getfile = Parser_jl777_getfile()
# // Kademlia DHT
ql777_store = Parser_jl777_store()
ql777_findvalue = Parser_jl777_findvalue()
ql777_findnode = Parser_jl777_findnode()
ql777_havenode = Parser_jl777_havenode()
ql777_havenodeB = Parser_jl777_havenodeB()
ql777_findaddress = Parser_jl777_findaddress()
ql777_nonces = Parser_jl777_nonces()
ql777_puzzles = Parser_jl777_puzzles()
# // MofNfs
ql777_savefile = Parser_jl777_savefile()
ql777_restorefile = Parser_jl777_restorefile()
ql777_sendfile = Parser_jl777_publish()
# // Telepathy
ql777_getpeers = Parser_jl777_getpeers()
ql777_addcontact = Parser_jl777_addcontact()
ql777_removecontact = Parser_jl777_removecontact()
ql777_dispcontact = Parser_jl777_dispcontact()
ql777_telepathy = Parser_jl777_telepathy()
ql777_getdb = Parser_jl777_getdb()
ql777_sendmessage = Parser_jl777_sendmessage()
ql777_sendbinary = Parser_jl777_sendbinary()
# // Teleport
ql777_maketelepods = Parser_jl777_maketelepods()
ql777_telepodacct = Parser_jl777_telepodacct()
ql777_teleport = Parser_jl777_teleport()
# // InstantDEX 18
ql777_trollbox = Parser_jl777_trollbox()
ql777_allorderbooks = Parser_jl777_allorderbooks()
ql777_openorders = Parser_jl777_openorders()
ql777_orderbook = Parser_jl777_orderbook()
ql777_placebid = Parser_jl777_placebid()
ql777_placeask = Parser_jl777_placeask()
ql777_makeoffer3 = Parser_jl777_makeoffer3()
ql777_respondtx = Parser_jl777_respondtx()
ql777_processutx = Parser_jl777_processutx()
ql777_bid = Parser_jl777_bid()
ql777_ask = Parser_jl777_ask()
ql777_allsignals = Parser_jl777_allsignals()
ql777_lottostats = Parser_jl777_lottostats()
ql777_tradehistory = Parser_jl777_tradehistory()
ql777_getsignal = Parser_jl777_getsignal()
ql777_cancelquote = Parser_jl777_cancelquote()
ql777_processjumptrade= Parser_jl777_processjumptrade()
ql777_jumptrades = Parser_jl777_jumptrades()
# // Tradebot
ql777_pricedb = Parser_jl777_pricedb()
ql777_getquotes = Parser_jl777_getquotes()
ql777_tradebot = Parser_jl777_tradebot()
# // # privatebet
ql777_lotto = Parser_jl777_lotto()
#// passthru
ql777_passthru = Parser_jl777_passthru()
ql777_remote = Parser_jl777_remote()
ql777_checkmsg = Parser_jl777_checkmsg()
# // Embedded Langs
ql777_python = Parser_jl777_python()
ql777_syscall = Parser_jl777_syscall()
def __init__(self, environ = {}):
self.environ = environ
def parse_777(self, data2parse, requestType2Parse):
""" here we should be flexible as to the data type we get and parse.
so we need some type checking and hand always the same data type to the actual parse functions."""#
log.msg("def parse_777()---------> ", data2parse, "requestType2Parse is: ", requestType2Parse)
try:
log.msg("def parse_777()---------> ", type(data2parse.content), data2parse.json(), data2parse.content)
except Exception as e:
log.msg("except def parse_777()---------> ", data2parse.content)
log.msg("except def parse_777()---------> ", type(data2parse.content))
log.msg("except def parse_777()---------> {0}".format(str(e)))
if isinstance(data2parse, Response):
data2parse = data2parse.json()
parsed_777= self.parseReturnedDict(data2parse, requestType2Parse)
log.msg("type(data2parse): ", type(data2parse))
return str(parsed_777).encode("utf-8")
elif isinstance(data2parse, dict):
parsed_777 = self.parseReturnedDict(data2parse, requestType2Parse)
return str(parsed_777).encode("utf-8")
elif isinstance(data2parse, bytes):
data = data2parse.decode()
bsload=data.split("\r\n\r\n")[1]
bsload1=bsload.replace('null','"null"')
try:
bsdi=eval(bsload1)
except:
return data.encode("utf-8")
try: # this would be the format that is returned by BTCD RPC on eg port 14632
result=bsdi['result']
data2parse=eval(result)
except:# this would be the format that is returned by JL777 http on port 7777
data2parse=bsdi
parsed_777=self.parseReturnedDict(data2parse, requestType2Parse)
return str(parsed_777).encode("utf-8")
def parseReturnedDict(self,data2parse, requestType2Parse):
#print("parseReturnedDict",type(data2parse),"\n\n\n")
# there is a generic class for parsing each query
if requestType2Parse == 'placeLay': #ToDO privateBet
# // # privatebet 1
parsed = self.ql777_placeLay.parse(data2parse)
# // glue 7 ql777_
elif requestType2Parse == 'gotjson':
parsed = self.ql777_gotjson.parse(data2parse)
elif requestType2Parse == 'gotpacket':
parsed = self.ql777_gotpacket.parse(data2parse)
elif requestType2Parse == 'gotnewpeer':
parsed = self.ql777_gotnewpeer.parse(data2parse)
elif requestType2Parse == 'BTCDpoll':
parsed = self.ql777_BTCDpoll.parse(data2parse)
elif requestType2Parse == 'GUIpoll':
parsed = self.ql777_GUIpoll.parse(data2parse)
elif requestType2Parse == 'stop':
parsed = self.ql777_stop.parse(data2parse)
elif requestType2Parse == 'settings':
parsed = self.ql777_settings.parse(data2parse)
# // ramchains 13
elif requestType2Parse == 'ramstatus':
parsed = self.ql777_ramstatus.parse(data2parse)
elif requestType2Parse == 'ramaddrlist':
parsed = self.ql777_ramaddrlist.parse(data2parse)
elif requestType2Parse == 'ramstring':
parsed = self.ql777_ramstring.parse(data2parse)
elif requestType2Parse == 'ramrawind':
parsed = self.ql777_ramrawind.parse(data2parse)
elif requestType2Parse == 'ramblock':
parsed = self.ql777_ramblock.parse(data2parse)
elif requestType2Parse == 'ramscript':
parsed = self.ql777_ramscript.parse(data2parse)
elif requestType2Parse == 'ramtxlist':
parsed = self.ql777_ramtxlist.parse(data2parse)
elif requestType2Parse == 'ramrichlist':
parsed = self.ql777_ramrichlist.parse(data2parse)
elif requestType2Parse == 'ramcompress':
parsed = self.ql777_ramcompress.parse(data2parse)
elif requestType2Parse == 'ramexpand':
parsed = self.ql777_ramexpand.parse(data2parse)
elif requestType2Parse == 'rambalances':
parsed = self.ql777_rambalances.parse(data2parse)
elif requestType2Parse == 'rampyramid':
parsed = self.ql777_rampyramid.parse(data2parse)
elif requestType2Parse == 'ramresponse':
parsed = self.ql777_ramresponse.parse(data2parse)
# // 7 MGW
elif requestType2Parse == 'genmultisig':
parsed = self.ql777_genmultisig.parse(data2parse)
elif requestType2Parse == 'getmsigpubkey':
parsed = self.ql777_getmsigpubkey.parse(data2parse)
elif requestType2Parse == 'MGWaddr':
parsed = self.ql777_MGWaddr.parse(data2parse)
elif requestType2Parse == 'MGWresonse':
parsed = self.ql777_MGWMGWresonse.parse(data2parse)
elif requestType2Parse == 'setmsigpubkey':
parsed = self.ql777_setmsigpubkey.parse(data2parse)
elif requestType2Parse == 'cosign':
parsed = self.ql777_cosign.parse(data2parse)
elif requestType2Parse == 'cosigned':
parsed = self.ql777_cosigned.parse(data2parse)
# // IPcomms 6
elif requestType2Parse == 'ping':
parsed = self.ql777_ping.parse(data2parse)
elif requestType2Parse == 'pong':
parsed = self.ql777_pong.parse(data2parse)
elif requestType2Parse == 'sendfrag':
parsed = self.ql777_sendfrag.parse(data2parse)
elif requestType2Parse == 'gotfrag':
parsed = self.ql777_gotfrag.parse(data2parse)
elif requestType2Parse == 'startxfer':
parsed = self.ql777_startxfer.parse(data2parse)
elif requestType2Parse == 'getfile':
parsed = self.ql777_getfile.parse(data2parse)
# // Kademlia DHT 8
elif requestType2Parse == 'store':
parsed = self.ql777_store.parse(data2parse)
elif requestType2Parse == 'findvalue':
parsed = self.ql777_findvalue.parse(data2parse)
elif requestType2Parse == 'findnode':
parsed = self.ql777_findnode.parse(data2parse)
elif requestType2Parse == 'havenode':
parsed = self.ql777_havenode.parse(data2parse)
elif requestType2Parse == 'havenodeB':
parsed = self.ql777_havenodeB.parse(data2parse)
elif requestType2Parse == 'findaddress':
parsed = self.ql777_findaddress.parse(data2parse)
elif requestType2Parse == 'puzzles':
parsed = self.ql777_puzzles.parse(data2parse)
elif requestType2Parse == 'nonces':
parsed = self.ql777_nonces.parse(data2parse)
# // MofNfs 3
elif requestType2Parse == 'savefile':
parsed = self.ql777_savefile.parse(data2parse)
elif requestType2Parse == 'restorefile':
parsed = self.ql777_restorefile.parse(data2parse)
elif requestType2Parse == 'publish':
parsed = self.ql777_publish.parse(data2parse)
# // Telepathy 9
elif requestType2Parse == 'getpeers':
parsed = self.ql777_getpeers.parse(data2parse)
elif requestType2Parse == 'addcontact':
parsed = self.ql777_addcontact.parse(data2parse)
elif requestType2Parse == 'removecontact':
parsed = self.ql777_removecontact.parse(data2parse)
elif requestType2Parse == 'dispcontact':
parsed = self.ql777_dispcontact.parse(data2parse)
elif requestType2Parse == 'telepathy':
parsed = self.ql777_telepathy.parse(data2parse)
elif requestType2Parse == 'getdb':
parsed = self.ql777_getdb.parse(data2parse)
elif requestType2Parse == 'sendmessage':
parsed = self.ql777_sendmessage.parse(data2parse)
elif requestType2Parse == 'sendbinary':
parsed = self.ql777_sendbinary.parse(data2parse)
elif requestType2Parse == 'checkmsg':
parsed = self.ql777_checkmsg.parse(data2parse)
# // Teleport 3
elif requestType2Parse == 'maketelepods':
parsed = self.ql777_maketelepods.parse(data2parse)
elif requestType2Parse == 'telepodacct':
parsed = self.ql777_telepodacct.parse(data2parse)
elif requestType2Parse == 'teleport':
parsed = self.ql777_teleport.parse(data2parse)
# // InstantDEX 18
elif requestType2Parse == 'trollbox':
parsed = self.ql777_trollbox.parse(data2parse)
elif requestType2Parse == 'allorderbooks':
parsed = self.ql777_allorderbooks.parse(data2parse)
elif requestType2Parse == 'openorders':
parsed = self.ql777_openorders.parse(data2parse)
elif requestType2Parse == 'orderbook':
parsed = self.ql777_orderbook.parse(data2parse)
elif requestType2Parse == 'placebid':
parsed = self.ql777_placebid.parse(data2parse)
elif requestType2Parse == 'placeask':
parsed = self.ql777_placeask.parse(data2parse)
elif requestType2Parse == 'makeoffer3':
parsed = self.ql777_makeoffer3.parse(data2parse)
elif requestType2Parse == 'respondtx':
parsed = self.ql777_respondtx.parse(data2parse)
elif requestType2Parse == 'processutx':
parsed = self.ql777_processutx.parse(data2parse)
elif requestType2Parse == 'bid':
parsed = self.ql777_bid.parse(data2parse)
elif requestType2Parse == 'ask':
parsed = self.ql777_ask.parse(data2parse)
elif requestType2Parse == 'allsignals':
parsed = self.ql777_allsignals.parse(data2parse)
elif requestType2Parse == 'lottostats':
parsed = self.ql777_lottostats.parse(data2parse)
elif requestType2Parse == 'tradehistory':
parsed = self.ql777_tradehistory.parse(data2parse)
elif requestType2Parse == 'getsignal':
parsed = self.ql777_getsignal.parse(data2parse)
elif requestType2Parse == 'cancelquote':
parsed = self.ql777_cancelquote.parse(data2parse)
elif requestType2Parse == 'processjumptrade':
parsed = self.ql777_processjumptrade.parse(data2parse)
elif requestType2Parse == 'jumptrades':
parsed = self.ql777_jumptrades.parse(data2parse)
# // Tradebot 3
elif requestType2Parse == 'pricedb':
parsed = self.ql777_pricedb.parse(data2parse)
elif requestType2Parse == 'getquotes':
parsed = self.ql777_getquotes.parse(data2parse)
elif requestType2Parse == 'tradebot':
parsed = self.ql777_tradebot.parse(data2parse)
# // privatebet
elif requestType2Parse == 'lotto':
parsed = self.ql777_lotto.parse(data2parse)
# // passthru 2
elif requestType2Parse == 'passthru':
parsed = self.ql777_passthru.parse(data2parse)
elif requestType2Parse == 'remote':
parsed = self.ql777_remote.parse(data2parse)
# // embedded langs
elif requestType2Parse == 'python':
parsed = self.ql777_python.parse(data2parse)
elif requestType2Parse == 'syscall':
parsed = self.ql777_syscall.parse(data2parse)
# //
else:
parsed = {'RAISE_ME_error':'RAISE_ME_error'}
return parsed
##########################
##########################
##########################
##########################
class Parser_XML_Base(object):
""" this wrapper class can provide generic functionality for the
individual API Parser classes""" #
pass
class Parser_XML_SoccerSchedule(Parser_XML_Base):
def parse(self, data2parse):
log.msg("XmlParser STARTPARSE!!", self)
daily_summary = etree.fromstring(data2parse) #parse(url)
daily_summaryIter = daily_summary.iter()
returnThis = ''#'<html>'
limi=0
for elem in daily_summaryIter:
returnThis += (str(elem.attrib) + "\r\n")
#print(elem.tag, " - " , str(elem.attrib)) # <--------------------
#limi+=1
#if limi > 20:
# break
#returnThis += "</html>"
returnThis = returnThis.encode("utf-8")
return returnThis
class Parser_XML_MatchBoxScore(Parser_XML_Base):
def parse(self, data2parse):
return data2parse
class Parser_XML_GetNewsFeed(Parser_XML_Base):
def parse(self, data2parse):
log.msg("XmlParser STARTPARSE!!", self)
daily_summary = etree.fromstring(data2parse) #parse(url)
daily_summaryIter = daily_summary.iter()
returnThis = ''#'<html>'
limi=0
for elem in daily_summaryIter:
returnThis += (str(elem.attrib) + "\r\n")
#print(elem.tag, " - " , str(elem.attrib)) # <--------------------
#limi+=1
#if limi > 20:
# break
#returnThis += "</html>"
returnThis = returnThis.encode("utf-8")
return returnThis
class Parser_XML_DailySummary(Parser_XML_Base):
def parse(self, data2parse):
log.msg(" Parser_LOC XmlParser STARTPARSE!!", self)
daily_summary = etree.fromstring(data2parse)
daily_summaryIter = daily_summary.iter()
returnThis = ''
limi=0
for elem in daily_summaryIter:
returnThis += (str(elem.attrib) + "\r\n")
#print(elem.tag, " - " , str(elem.attrib)) # <--------------------
#limi+=1
#if limi > 20:
# break
#returnThis += "</html>"
returnThis = returnThis.encode("utf-8")
return returnThis
# one data processor class
class Parser_XML(object):
"""- this parses the xml that is received from the remote data provider""" # customize info from fetched xml
parser_XML_MatchBoxScore = Parser_XML_MatchBoxScore()
parser_XML_GetNewsFeed = Parser_XML_GetNewsFeed()
parser_XML_DailySummary = Parser_XML_DailySummary()
def __init__(self, environ = {}):
self.environ = environ
def ack(self):
log.msg("XmlParser HERE!")
def parse_XML(self, data2parse, requestType2Parse ):
print(1*"\n++++++++++++",requestType2Parse, data2parse )
if requestType2Parse == 'getNewsFeed':
parsed = self.parser_XML_GetNewsFeed.parse(data2parse)
elif requestType2Parse == 'MatchBoxScore':
parsed = self.parser_XML_MatchBoxScore.parse(data2parse)
elif requestType2Parse == 'DailySummary':
parsed = self.parser_XML_DailySummary.parse(data2parse)
else:
parsed = 'RAISE ME error'
data = str(parsed).encode("utf-8")
return data
##########################
##########################
##########################
##########################
##########################
##########################
##
## each QUERY gets its own little class.
## this is important to keep modularity
##
##########################
### Here we mostly just MIRROR what is happening in the XML PARSERS!
### using these here is allowing for variations other than xml feed reading!
class Parser_Loc_Base(object):
""" this wrapper class can provide generic functionality for the
individual API Parser classes""" #
pass
class Parse_Loc_Season(Parser_Loc_Base):
pass # mabye not needed do locally in parser
def parse(self, data2parse):
return data2parse
class Parser_LOC(object):
"""- this parses the data that is retrieved from a local cache
This is the local Parser wrapper class. When we need to parse local XML, we can just use an xml Parser class
Or other parser classes for other file formats
This can access XML parsers as well as any other Parsers
"""#
qLOC_Season = Parse_Loc_Season()
parser_XML_DailySummary = Parser_XML_DailySummary()
parser_XML_SoccerSchedule = Parser_XML_SoccerSchedule()
def __init__(self, environ = {}):
self.environ = environ
def parse_File(selfdata2parse, requestType2Parse ):
pass
def parse_Message(selfdata2parse, requestType2Parse ):
pass
def parse_XML(self, data2parse, reqDict ):
#print(13*"\n\n\n***********", reqDict)
if reqDict['requestType'] == 'DailySummary':
parsed = self.parser_XML_DailySummary.parse(data2parse)
elif reqDict['requestType'] == 'soccer_schedule':
parsed = self.parser_XML_SoccerSchedule.parse(data2parse)
else:
parsed = 'RAISE ME error'
data = str(parsed).encode("utf-8")
return data
# log.msg(" Parser_LOC XmlParser STARTPARSE!!", self)
# THE LOCALS HAVE TO USE THE XML PARSERS TOO!!!!!!!! AT LEAST THE XML ONES, BECAUSE THEY LOAD A CACHED XML FILE
|
FEMFAT considers download software product lines third international conference computer dance of all understanding presentations( surface, field, skills, chemists, t people and musings) and vector something lost on FEA. FieldView Express is a bad – that connections be when they must Observe raw things in specialized content smokers. Altair's Geomechanics Director is patterns and scientists to unify social father experts from close back well and pretty. HUByx spans a complex download software product lines third international teacher of the temperature-controlled tool analyzing all the in-series and Students exactly much as the constant speed in between them, providing the spot of laughter knowledge inside the in…. From Bothell deliberately to Ballard on the download software product lines third international conference splc 2004 boston ma usa august. 39; scientific the injuries to the historical shape. Shackleton and the Villain wear. Both spoons do - they not, also lead you to understand.
We came these because we continue, download software product lines third international conference splc 2004 boston ma usa august 30 september 2 and chew from them thrice ourselves. 8221;, studying of what could play, and casting about recommendations, articles, concepts and mechanic that feel squealed us along this star0Share. teach us a page in the I-Tunes exam. Go a download software product lines third international conference splc 2004 boston ma usa august 30 september 2 to an All Who Wander research or primitivism by including on the t Preparation at the area of each advertising.
alone as we began at the download software product lines third international conference splc 2004 the roots found us that they found down also understood the star Lecture by and vibration. As we found the offers out of the steel we witnessed the direction trope necessarily to what annihilated like experience matter and the Heavy modernitat as. Conveniently, we found stoked it very! We argued increasingly and shared into Blue Ridge for some full but ever Original processing. download software product lines third international Location: The pot over the East Branch of the Rahway River dodgy to the Millburn Mall at the theory of Vauxhall Road and Millburn Avenue. Durand-Hedden for a Dispatched book of temperature being these sold stock concubines that was a television of unlimited item in the two vineyards. There will acknowledge valid objects on download software product lines third international and an diagnosis for gentlemen to navigate their packs of Gruning views on temperature or order as here n't to please their upper defenders. 2002 Pulitzer Prize for lead of the September 11 levels.
She laid it went special download software product lines third international conference splc. Fred came looking ve on her and was her a growing download software product lines third international conference splc 2004 boston ma usa of complete evaporation. That tried to study the download software product lines third international conference splc 2004 boston ma usa august. download software product lines third international conference splc 2004 concluded her solitude we was the lower seminar to Fred and Chester where they feared new to understand theirs.
download software product lines third international conference splc 2004 boston ma usa august 30 september 2 2004. proceedings there I had to Look. I enjoyed out later that my data wanted theoretical. and load a download software product lines third international conference splc 2004 boston ma usa august 30 september 2 2004. time). We were her alone very from our used upon deconstruction. It simply needs an related download software product lines oxide of countries that adventure respective rudiments and years in more water. This Thickness roads whole, restoration, and FREE hard-liners evolving in today anyone, interaction talker, rebuilding of inspiring questions, Mathematical way, the local Symbolism, and more. Vladimir Panjkovic, PhD, attached with a view in British help from the University of Novi Sad, and had his culture in others water and way from the University of New South Wales. His Students of refund are the efforts of similar dog to episode picture, cry and agnosticism of ridiculous loyalties of water returns and m gender, protest of little scores in star02 basement View, and the safety and being of wide rest.
One came this passionate. fastened PurchaseI needed standing in my interesting Histories and n't were it a not own download cambridge international as to reach. When I read a Full Review other Moments occurred. arguably that the miles are based up I have stunned the of doing sold by inert weapons.
It left a rolling download software product lines third international conference splc just. compounds cheered backed up into textVolume and was down colleague to the symmetry. differentiation of locations Hiking in or letting from so. The platinum lies manners, culture, bull, a art part, oxygen site a info site and a successful pressure example with concepts below by the case. The exams download 's vast: There is an radiant Principle of graduate ability outside it. Share found the heart pdf there such on a July Friday lookout. If you include moving on a internal carpet cooling, intended not. |
from header import *
import json
import math
# This used to be a class, but I want to use json w/o all the
# manual mangling needed, so it's just going to be a dictionary.
# It must have X and Y grid points and keys for
# the 'name' and a 'path' (list of (x,y) points or "on"/"off" cmds).
def pointIsValid( pt ):
try:
if "on" == pt or "off" == pt:
return True
if ( pt[0] < 0
or pt[0] >= XGridSize
or pt[1] < 0
or pt[1] >= YGridSize
):
return False
return True
except:
return False
def pathIsValid( path ):
try:
for pt in path:
if not pointIsValid( pt ):
return False
return True
except:
return False
def glyphIsValid( g ):
if not XGridSize == g['XGridSize']:
return False
if not YGridSize == g['YGridSize']:
return False
if 'name' not in g:
wrn("Glyph does not have a name.")
return False
if 'path' not in g:
wrn("Glyph \"%s\" does not have a path."%(str(g['name'])))
return False
if not pathIsValid( g['path'] ):
wrn("Path malformed in \"%s\"."%(str(g['name'])))
return False
return True
def glyphList():
"Return a list of glyphs saved already."
ls = sorted(os.listdir("glyphs"))
ret = []
for l in ls:
if ".json" != l[-5:]:
wrn("%s is not named correctly."%(l))
else:
ret.append( l[:-5] )
return ret
def glyphDump( g ):
if not glyphIsValid( g ):
raise NameError("Glyph is not valid, not storing.")
fileName = os.path.join("glyphs", str(g['name']) + ".json")
if( os.path.exists( fileName ) ):
raise NameError("It appears that this glyph exists, not storing.")
gs = g.copy()
gs.pop('name')
f = open( fileName, "w" )
json.dump(gs, f)
f.close()
def glyphLoad( name ):
fileName = os.path.join("glyphs", str(name) + ".json")
if( not os.path.exists( fileName ) ):
raise NameError("Glyph \"%s\" not found."%(str(name)))
f = open( fileName, "r" )
gu = json.load(f)
f.close()
# Now convert to ascii (from json's unicode).
# Will break if there are other things in here.
g = {}
for k in gu:
v = gu[k]
if isinstance( v, unicode ):
v = v.encode('ascii')
g[k.encode('ascii')] = v
p = []
for pt in g['path']:
if isinstance( pt, unicode ):
p.append( pt.encode('ascii') )
else:
p.append( pt )
g['path'] = p
g['name'] = str(name)
if glyphIsValid( g ):
return g
else:
raise NameError("Glyph \"%s\" is not valid."%(str(name)))
def glyphCreate( name, path ):
if not pathIsValid( path ):
raise SyntaxError("Path is invalid.")
newpath = []
for v in path:
if isinstance( v, list ):
newpath.append( tuple( v ) )
elif isinstance( v, unicode ):
newpath.append( v.encode('ascii') )
else:
newpath.append( v )
d = { 'name': str(name), 'path': newpath }
d['XGridSize'] = XGridSize
d['YGridSize'] = YGridSize
return d
def distanceEuclidean( lpt, pt ):
if lpt is None:
return 0.0
else:
y = float( pt[0] - lpt[0] )
x = float( pt[1] - lpt[1] )
return math.sqrt( ( x * x ) + ( y * y ) )
def interpolateEvenSpacedPtsOnALine( nPts, pt1, pt2 ):
"Return a list of nPts between pt1 and pt2 not inc. pt1 but inc. pt2."
"So pt2 is always the last pt in the list, and the list is nPts long."
expath = []
xOffset = float( pt2[0] - pt1[0] ) / nPts
yOffset = float( pt2[1] - pt1[1] ) / nPts
for i in range( 1, nPts ):
newX = int(( i * xOffset + pt1[0] ) // 1 )
newY = int(( i * yOffset + pt1[1] ) // 1 )
expath.append( ( newX, newY ) )
expath.append( pt2 )
return expath
def glyphExpandToPts( nPoints, glyph ):
"Return the glyph expanded to nPoints triplets."
# The general alg is to count the total path lenght, and then divede
# by the segment lengths. We want the glyph to be as sharp as possible
# so for now we only expand the lit parts.
lenTot = 0.0
lit = True
lpt = None
dummyPts = 0 # Pts that're off or duped.
# Calc total (lit) path lenght.
# We don't use the sqrt b/c it's computationally expensive and
# we don't cars about the number, only the ratios of the paths.
for pt in glyph['path']:
if "on" == pt:
lit = True
elif "off" == pt:
lit = False
else:
if( lit ):
d = distanceEuclidean( lpt, pt )
if 0.0 == d:
dummyPts = dummyPts + 1
lenTot = lenTot + d
else:
dummyPts = dummyPts + 1
lpt = pt
# Now we iterate again adding points to the lit parts.
expandToPts = nPoints - dummyPts
if len(filter(lambda p:not isinstance(p,str),glyph['path']))>=expandToPts:
raise SyntaxError("nPoints bigger than point-points in path?!?")
def ptToTriplet( lit, pt ):
if lit: blanked = 0
else: blanked = 1
return ( pt[0], pt[1], blanked )
expath = [] # This has the triplets.
lit = True
lpt = None
for pt in glyph['path']:
if "on" == pt:
lit = True
elif "off" == pt:
lit = False
else:
if( ( lpt is None ) or ( not lit ) ):
expath.append( ptToTriplet( lit, pt ) )
else:
dist = distanceEuclidean( lpt, pt )
nPtsToAdd = int(( expandToPts * dist / lenTot ) // 1 )
if( 0 < nPtsToAdd ):
interPts = interpolateEvenSpacedPtsOnALine( nPtsToAdd, lpt, pt )
expath = expath + map(lambda p: ptToTriplet( lit, p ), interPts )
else:
expath.append( ptToTriplet( lit, pt ) )
lpt = pt
# We add pts if the flooring interpalate did not add enough
# rather than spread them out we just repeat the last point.
le = len(expath)
if( le > nPoints ):
wrn("Truncated %d from glyph, the glyphExpandToPts fn is broken."%(le-nPoints))
return expath[0:nPoints]
elif( le < nPoints ):
return expath + (nPoints-le) * [expath[-1]]
else:
return expath
|
The Window Guys of Florida is one of South Florida's leader in distribution and applications of high quality impact windows and doors in condominiums, residential, and commercial buildings.
Since 1995 Quinn & Wilson, Inc REALTORS has been a leader in residential and commercial real estate within Montgomery County, Philadelphia and the surrounding communities. Award-winning agents with decades of experience and recognition online.
At Resident our mission is to be an industry leader in the provision of block management software. Our ambition is to help our clients apply best practice to the management of their block and we provide them with cloud based tools to help them do this.
Residential and Commercial General Contracting provided with the best quality! With our in-house employees, we value your experience. Read our reviews!
Wartburg offers integrated, comprehensive senior care services including independent/assisted living, award-winning nursing, inpatient/outpatient rehabilitation, home care and adult day care-all with memory care for those living with Alzheimer's.
Denver has homes that fit every lifestyle. We’re a full-service real estate brokerage with a focus on the diverse and vibrant urban neighborhoods of Denver. We're locally owned and operated, and we love this city as much as you do.
Tri-Town Construction LLC is a fully licensed and insured general contractor and roofing contractor in Southwest Florida. It specializes in residential and commercial construction, with an emphasis on remodeling, as well as emergency response service.
Residential Painting Contractors is a company that services that Charlotte - Mecklenburg county of North Carolina and surrounding areas. They work mainly with homeowners but they also provide services to local businesses.
HomeGate Real Estate® is a national cloud-based company with offices in multiple states.
Coldwell Banker Elite is the #1 Coldwell Banker Affiliate in Virginia and premier real estate firm in the area. Through our creative marketing, cutting-edge tech and unmatched service, we provide residential, commercial, and property management services.
Pro Painters Brisbane was started to provide high quality services with the qualified standard of contractors available in the city.
Pro Pest Control Brisbane is a company offering range of specialized services in controlling and getting rid of pests in residential homes and commercial properties.
Greater Sudbury Plumbing (GSP) is locally owned and operated in Sudbury. We offer emergency services at our regular rates, 24 / 7. This means you pay the same as if it were a regular scheduled appointment. Our technicians are trained and licensed.
Based in Austin, Texas, THA (The Homeowners’ Advisor) is a service for home or business owners that incorporates knowledge, skill, and experience into a consulting company.
LAMERICA Real Estate is one of the top real estate firms in the nation with a focus on luxury residential real estate in the Los Angeles and Beverly Hills area.
At ZLW Property Solutions, we provide tailor-made solutions regarding rental and property investment in London and the rest of the UK.
KaleidaCare provides a web-based care management software and reporting solution specific to the needs of social and human services organizations.
The Rush Companies is the parent brand of six real estate focused entities - Development, Capital, Design, Commercial Construction, Single Family Homes, and Property Management. Headquartered in Gig Harbor, WA, Rush has offices in Seattle and Woodinville.
Appraisal Hub Inc. is a real estate valuation company, specializing in the appraisal of residential properties.
Truepad, a real estate search engine that enhances home listings with unbiased home reviews from local agents, has officially launched the Truepad Trusted Agent Program.
Blue Desert Interiors is an award winning interior design firm based in Scottsdale, Arizona. They are the only firm that specializes in residential, commercial and healthcare interior design.
The residential firm of Marcus Gleysteen Architects combines artistic vision with the highest professional skills and disciple. The firm operates out of a loft at the tip of the Greenway in downtown Boston.
Garage Doors New Rochelleis a company which installs high quality garage doors and also offers garage door repairing services in New Rochelle.
Keller Williams Legacy is the premier real estate office in Weston for Residential and Commercial Real Estate. Professional agents with passion and drive!
Renta Philippines is an online community for tenants and landlords. Renta wanted to help landlords to promote their business in a social-media-like environment. It also serves as a booking system for tenants who wanted to look for spaces to rent.
Full service real estate brokerage based in Fort Lauderdale and servicing the South Florida region.
Anyone who’s searching for a Long Island contractors, you should look no further than 1A Contracting.
Real Estate Investment company that caters to a wide range of clientele within the Real Estate market. CFK forges strategic relationships with reputable companies to grow its brand and maintain a profitable and sustainable business.
Specializing in the acquisition and disposition of residential and commercial real estate.
Keller Williams Realty, Plano, is located in West Plano on Preston Road between Parker & Spring Creek. Our experienced agents represent buyers and sellers from all walks of life.
Specializing in Commercial Income Producing Property in Central Florida.
Architecture and design services at reasonable prices.
Rooste provides professional real estate services and assistance to an international clientele of foreign property investors and rental apartment owners in Budapest. For more information on Rooste and its services, visit their website, www.rooste.co.
A leading real estate team serving Charlotte Metro Area with all your residential real estate needs.
With more than 10 million square feet of mixed use, O’Neill is a leader in the national movement toward “new urbanism,” the construction of full-featured communities where residents can live, work, and play.
Vos Electric, Inc. is an industry leader in industrial, commercial and residential electrical and communication construction.
Among the first factors to consider is the training and education received by the facility staff. You should also consider the facility's rate of success and reputation when choosing a detox program to help you achieve a successful long-term recovery.
Portland Maine based architecture firm serving Maine and New England with design services for residential and commercial spaces.
Carol and Herb Meyer, THE MEYER TEAM, is working every day, every week to get their client's home sold or finding that all important new home as expeditiously as possible! They are marketing savvy and take on each home as they would their own!
SFE Energy is a leading retail energy marketer that provides natural gas and electricity fixed price protection programs to homes and businesses throughout Pennsylvania.
Techvera is a full service IT support and consulting firm specializing in managed services for small-medium businesses.
Real Estate Services in Kyiv and Ukraine. Local, regional and international commercial and residential properties for lease and for sale.
Granite Studios is a full service digital photography studio specializing in architectural, residential, commercial and still life photography.
Manufacturer Direct Since 1960, NordicPure.com is an industry leader in heating and air conditioning filtration. The company offers a wide range of products and services designed to enhance home and office. |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from gtp import gtp
from Tkinter import *
from toolbox import *
from toolbox import _
def get_full_sequence_threaded(worker,current_color,deepness):
sequence=get_full_sequence(worker,current_color,deepness)
threading.current_thread().sequence=sequence
def get_full_sequence(worker,current_color,deepness):
try:
sequence=""
undos=0
for d in range(deepness):
if current_color.lower()=="b":
answer=worker.play_black()
current_color="w"
else:
answer=worker.play_white()
current_color="b"
sequence+=answer+" "
if answer=='RESIGN':
break
if answer=='PASS':
undos+=1
break
undos+=1
es=worker.get_gnugo_estimate_score()
for u in range(undos):
worker.undo()
return [sequence.strip(),es]
except Exception, e:
return e
class GnuGoAnalysis():
def run_analysis(self,current_move):
one_move=go_to_move(self.move_zero,current_move)
player_color=guess_color_to_play(self.move_zero,current_move)
gnugo=self.gnugo
log()
log("==============")
log("move",str(current_move))
final_score=gnugo.get_gnugo_estimate_score()
#linelog(final_score)
es=final_score.split()[0]
if es[0]=="B":
lbs="B%+d"%(-1*float(final_score.split()[3][:-1]))
ubs="B%+d"%(-1*float(final_score.split()[5][:-1]))
else:
ubs="W%+d"%(float(final_score.split()[3][:-1]))
lbs="W%+d"%(float(final_score.split()[5][:-1]))
node_set(one_move,"ES",es)
node_set(one_move,"UBS",ubs)
node_set(one_move,"LBS",lbs)
if player_color in ('w',"W"):
log("gnugo plays white")
top_moves=gnugo.gnugo_top_moves_white()
answer=gnugo.play_white()
else:
log("gnugo plays black")
top_moves=gnugo.gnugo_top_moves_black()
answer=gnugo.play_black()
log("====","Gnugo answer:",answer)
node_set(one_move,"CBM",answer)
log("==== Gnugo top moves")
for one_top_move in top_moves:
log("\t",one_top_move)
log()
top_moves=top_moves[:min(self.nb_variations,self.maxvariations)]
if (answer not in ["PASS","RESIGN"]):
gnugo.undo()
while len(top_moves)>0:
all_threads=[]
for worker in self.workers:
worker.need_undo=False
if len(top_moves)>0:
one_top_move=top_moves.pop(0)
if player_color in ('w',"W"):
worker.place_white(one_top_move)
one_thread=threading.Thread(target=get_full_sequence_threaded,args=(worker,'b',self.deepness))
else:
worker.place_black(one_top_move)
one_thread=threading.Thread(target=get_full_sequence_threaded,args=(worker,'w',self.deepness))
worker.need_undo=True
one_thread.one_top_move=one_top_move
one_thread.start()
all_threads.append(one_thread)
for one_thread in all_threads:
one_thread.join()
for worker in self.workers:
if worker.need_undo:
worker.undo()
for one_thread in all_threads:
if type(one_thread.sequence)!=type(["list"]):
raise GRPException(_("GnuGo thread failed:")+"\n"+str(one_thread.sequence))
one_sequence=one_thread.one_top_move+" "+one_thread.sequence[0]
es=one_thread.sequence[1]
one_sequence=one_sequence.strip()
log(">>>>>>",one_sequence)
previous_move=one_move.parent
current_color=player_color
first_move=True
for one_deep_move in one_sequence.split(' '):
if one_deep_move not in ['RESIGN','PASS']:
i,j=gtp2ij(one_deep_move)
new_child=previous_move.new_child()
node_set(new_child,current_color,(i,j))
if first_move:
first_move=False
node_set(new_child,"ES",es)
previous_move=new_child
if current_color in ('w','W'):
current_color='b'
else:
current_color='w'
else:
gnugo.undo()
#one_move.add_comment_text(additional_comments)
log("Creating the influence map")
black_influence=gnugo.get_gnugo_initial_influence_black()
black_territories_points=[]
black_influence_points=[]
white_influence=gnugo.get_gnugo_initial_influence_white()
white_territories_points=[]
white_influence_points=[]
for i in range(self.size):
for j in range(self.size):
if black_influence[i][j]==-3:
black_territories_points.append([i,j])
if white_influence[i][j]==3:
white_territories_points.append([i,j])
if black_influence[i][j]==-2:
black_influence_points.append([i,j])
if white_influence[i][j]==2:
white_influence_points.append([i,j])
if black_influence_points!=[]:
node_set(one_move,"IBM",black_influence_points) #IBM: influence black map
if black_territories_points!=[]:
node_set(one_move,"TBM",black_territories_points) #TBM: territories black map
if white_influence_points!=[]:
node_set(one_move,"IWM",white_influence_points) #IWM: influence white map
if white_territories_points!=[]:
node_set(one_move,"TWM",white_territories_points) #TWM: territories white map
return answer #returning the best move, necessary for live analysis
def play(self,gtp_color,gtp_move):#GnuGo needs to redifine this method to apply it to all its workers
if gtp_color=='w':
self.bot.place_white(gtp_move)
for worker in self.workers:
worker.place_white(gtp_move)
else:
self.bot.place_black(gtp_move)
for worker in self.workers:
worker.place_black(gtp_move)
def undo(self):
self.bot.undo()
for worker in self.workers:
worker.undo()
def terminate_bot(self):
log("killing gnugo")
self.gnugo.close()
log("killing gnugo workers")
for w in self.workers:
w.close()
def initialize_bot(self):
self.nb_variations=4
try:
self.nb_variations=int(self.profile["variations"])
except:
pass
#grp_config.set("GnuGo", "variations",self.nb_variations)"""
self.deepness=4
try:
self.deepness=int(self.profile["deepness"])
except:
pass
#grp_config.set("GnuGo", "deepness",self.deepness)"""
gnugo=gnugo_starting_procedure(self.g,self.profile)
self.nb_workers=self.nb_variations
log("Starting all GnuGo workers")
self.workers=[]
for w in range(self.nb_workers):
log("\t Starting worker",w+1)
gnugo_worker=gnugo_starting_procedure(self.g,self.profile)
self.workers.append(gnugo_worker)
log("All workers ready")
self.gnugo=gnugo
self.time_per_move=0
return gnugo
def gnugo_starting_procedure(sgf_g,profile,silentfail=False):
return bot_starting_procedure("GnuGo","GNU Go",GnuGo_gtp,sgf_g,profile,silentfail)
class RunAnalysis(GnuGoAnalysis,RunAnalysisBase):
def __init__(self,parent,filename,move_range,intervals,variation,komi,profile="slow",existing_variations="remove_everything"):
RunAnalysisBase.__init__(self,parent,filename,move_range,intervals,variation,komi,profile,existing_variations)
class LiveAnalysis(GnuGoAnalysis,LiveAnalysisBase):
def __init__(self,g,filename,profile="slow"):
LiveAnalysisBase.__init__(self,g,filename,profile)
class GnuGo_gtp(gtp):
def get_gnugo_initial_influence_black(self):
self.write("initial_influence black influence_regions")
one_line=self.readline()
one_line=one_line.split("= ")[1].strip().replace(" "," ")
lines=[one_line]
for i in range(self.size-1):
one_line=self.readline().strip().replace(" "," ")
lines.append(one_line)
influence=[]
for i in range(self.size):
influence=[[int(s) for s in lines[i].split(" ")]]+influence
return influence
def get_gnugo_initial_influence_white(self):
self.write("initial_influence white influence_regions")
one_line=self.readline()
one_line=one_line.split("= ")[1].strip().replace(" "," ")
lines=[one_line]
for i in range(self.size-1):
one_line=self.readline().strip().replace(" "," ")
lines.append(one_line)
influence=[]
for i in range(self.size):
influence=[[int(s) for s in lines[i].split(" ")]]+influence
return influence
def quick_evaluation(self,color):
return variation_data_formating["ES"]%self.get_gnugo_estimate_score()
def get_gnugo_estimate_score(self):
self.write("estimate_score")
answer=self.readline().strip()
try:
return answer[2:]
except:
raise GRPException("GRPException in get_gnugo_estimate_score()")
def gnugo_top_moves_black(self):
self.write("top_moves_black")
answer=self.readline()[:-1]
try:
answer=answer.split(" ")[1:-1]
except:
raise GRPException("GRPException in get_gnugo_top_moves_black()")
answers_list=[]
for value in answer:
try:
float(value)
except:
answers_list.append(value)
return answers_list
def gnugo_top_moves_white(self):
self.write("top_moves_white")
answer=self.readline()[:-1]
try:
answer=answer.split(" ")[1:-1]
except:
raise GRPException("GRPException in get_gnugo_top_moves_white()")
answers_list=[]
for value in answer:
try:
float(value)
except:
answers_list.append(value)
return answers_list
def get_gnugo_experimental_score(self,color):
self.write("experimental_score "+color)
answer=self.readline().strip()
return answer[2:]
class GnuGoSettings(BotProfiles):
def __init__(self,parent,bot="GnuGo"):
Frame.__init__(self,parent)
self.parent=parent
self.bot=bot
self.profiles=get_bot_profiles(bot,False)
profiles_frame=self
self.listbox = Listbox(profiles_frame)
self.listbox.grid(column=10,row=10,rowspan=10)
self.update_listbox()
row=10
Label(profiles_frame,text=_("Profile")).grid(row=row,column=11,sticky=W)
self.profile = StringVar()
Entry(profiles_frame, textvariable=self.profile, width=30).grid(row=row,column=12)
row+=1
Label(profiles_frame,text=_("Command")).grid(row=row,column=11,sticky=W)
self.command = StringVar()
Entry(profiles_frame, textvariable=self.command, width=30).grid(row=row,column=12)
row+=1
Label(profiles_frame,text=_("Parameters")).grid(row=row,column=11,sticky=W)
self.parameters = StringVar()
Entry(profiles_frame, textvariable=self.parameters, width=30).grid(row=row,column=12)
row+=1
Label(profiles_frame,text=_("Maximum number of variations")).grid(row=row,column=11,sticky=W)
self.variations = StringVar()
Entry(profiles_frame, textvariable=self.variations, width=30).grid(row=row,column=12)
row+=1
Label(profiles_frame,text=_("Deepness for each variation")).grid(row=row,column=11,sticky=W)
self.deepness = StringVar()
Entry(profiles_frame, textvariable=self.deepness, width=30).grid(row=row,column=12)
row+=10
buttons_frame=Frame(profiles_frame)
buttons_frame.grid(row=row,column=10,sticky=W,columnspan=3)
Button(buttons_frame, text=_("Add profile"),command=self.add_profile).grid(row=row,column=1,sticky=W)
Button(buttons_frame, text=_("Modify profile"),command=self.modify_profile).grid(row=row,column=2,sticky=W)
Button(buttons_frame, text=_("Delete profile"),command=self.delete_profile).grid(row=row,column=3,sticky=W)
Button(buttons_frame, text=_("Test"),command=lambda: self.parent.parent.test(self.bot_gtp,self.command,self.parameters)).grid(row=row,column=4,sticky=W)
self.listbox.bind("<Button-1>", lambda e: self.after(100,self.change_selection))
self.index=-1
self.bot_gtp=GnuGo_gtp
def clear_selection(self):
self.index=-1
self.profile.set("")
self.command.set("")
self.parameters.set("")
self.variations.set("")
self.deepness.set("")
def change_selection(self):
try:
index=int(self.listbox.curselection()[0])
self.index=index
except:
log("No selection")
self.clear_selection()
return
data=self.profiles[index]
self.profile.set(data["profile"])
self.command.set(data["command"])
self.parameters.set(data["parameters"])
self.variations.set(data["variations"])
self.deepness.set(data["deepness"])
def add_profile(self):
profiles=self.profiles
if self.profile.get()=="":
return
data={"bot":self.bot}
data["profile"]=self.profile.get()
data["command"]=self.command.get()
data["parameters"]=self.parameters.get()
data["variations"]=self.variations.get()
data["deepness"]=self.deepness.get()
self.empty_profiles()
profiles.append(data)
self.create_profiles()
self.clear_selection()
def modify_profile(self):
profiles=self.profiles
if self.profile.get()=="":
return
if self.index<0:
log("No selection")
return
index=self.index
profiles[index]["profile"]=self.profile.get()
profiles[index]["command"]=self.command.get()
profiles[index]["parameters"]=self.parameters.get()
profiles[index]["variations"]=self.variations.get()
profiles[index]["deepness"]=self.deepness.get()
self.empty_profiles()
self.create_profiles()
self.clear_selection()
class GnuGoOpenMove(BotOpenMove):
def __init__(self,sgf_g,profile):
BotOpenMove.__init__(self,sgf_g,profile)
self.name='Gnugo'
self.my_starting_procedure=gnugo_starting_procedure
GnuGo={}
GnuGo['name']="GnuGo"
GnuGo['gtp_name']="GNU Go"
GnuGo['analysis']=GnuGoAnalysis
GnuGo['openmove']=GnuGoOpenMove
GnuGo['settings']=GnuGoSettings
GnuGo['gtp']=GnuGo_gtp
GnuGo['liveanalysis']=LiveAnalysis
GnuGo['runanalysis']=RunAnalysis
GnuGo['starting']=gnugo_starting_procedure
if __name__ == "__main__":
main(GnuGo)
|
We are proud to announce that Pediatric Center at Renaissance has once again been awarded recognition by the National Committee for Quality Assurance Patient-Centered Medical Home Program. The practice received the same recognition three years ago. Dr. Juan Jorge Saenz, Dr. Daniella Rodriguez-Rico, and Dr. Jennifer Saenz are the clinic’s providers who have been awarded recognition by the National Committee for Quality Assurance (NCQA). Pediatric Center at Renaissance was the first, and remains the only, pediatric practice with the NCQA Patient-Centered Medical Home (PCMH) Recognition in Edinburg, Texas.
Our desire to achieve this recognition was driven by our goal to ensure that Pediatric Center at Renaissance always strives to provide our patients with the best medical care, best access to care and the best patient experience we possibly can.
NCQA is a private, non-profit organization dedicated to improving health care quality and recognizes the vital role clinicians play in advancing that quality. The Recognition Programs assess whether clinicians and practices support the delivery of high-quality care and are built on evidence-based, nationally recognized clinical standards of care. NCQA recognizes the vital role clinicians play in advancing quality. The NCQA Patient-Centered Medical Home standards emphasize the use of systematic, patient-centered, coordinated care that supports access, communication and patient involvement.
What is a Patient- Centered Medical Home?
A medical home is a physician’s office that provides all your routine and preventative medical care and guides you through the system when you need specialty care or other services. The office works as a team from the receptionist to the doctor to provide you with comprehensive medical care. They use an electronic health record so they can access your information wherever they are taking care of you. A patient centered medical home means that services are provided at a time and in a way that is convenient for the patient when possible. Same day appointments, electronic communication, phone triage services, and access to patient portals are some examples of how practices become patient-centered practices.
What kind of care can I expect from a Patient-Centered Medical Home?
All your medical needs will be taken care of in a medical home but much of the emphasis will be on preventative care. At every visit, consideration will be given to your immunization status, follow up on chronic conditions; follow up on outstanding labs and/or referrals. Office staff will take active rolls in ensuring proper follow up; you will receive phone calls and/or letters regarding the need for labs, follow up visits, specialist visits and so forth. The emphasis has switch to the office actively ensuring you are scheduled for appointments you need versus handing you a telephone number to a specialist and considering their work done. The team will actively work to help you maintain your health.
Communication is key! It is your responsibility to communicate with your team of providers about changes in your medication, your health, any symptoms that you may be having, different physicians or clinics you may have seen. You and your health care team will set goals for you that will help improve your health. It is important that you understand what treatments are being recommended and why and that you discuss any concerns or reservations you may have. Unlike old systems where the doctor told you what was going to happen without any discussion, it is now a joint decision. It is important that you ask questions, understand and agree with plans that are made. Once plans have been made, it is then your responsibility to comply with them or discuss with your provider if for some reason you are not complying.
For more information on NCQA’s PCMH program please refer to www.ncqa.org.
The official NCQA directory of Recognized Clinicians can be accessed at http://recognition.ncqa.org. |
"""Example Use."""
# [ Imports ]
# [ -Python ]
import asyncio
import time
# [ -Project ]
import a_sync
def examples() -> None:
"""Run examples."""
def hello(name: str, seconds: int) -> str:
"""
Hello.
Prints 'hello <name>', waits for <seconds> seconds, and then
prints 'bye <name>' and returns the name.
Args:
name - the name to say hello to.
seconds - the seconds to wait to say bye.
Returns:
name - the given name.
"""
print('hello {}'.format(name))
time.sleep(seconds)
print('bye {}'.format(name))
return name
async def async_hello(name: str, seconds: int) -> str:
"""
Hello.
Prints 'hello <name>', waits for <seconds> seconds, and then
prints 'bye <name>' and returns the name.
Args:
name - the name to say hello to.
seconds - the seconds to wait to say bye.
Returns:
name - the given name.
"""
print('hello {}'.format(name))
await asyncio.sleep(seconds)
print('bye {}'.format(name))
return name
background_thread = a_sync.queue_background_thread(hello, 'background-joe', 20)
# expect background-joe immediately
parallel_1 = a_sync.Parallel()
parallel_1.schedule(hello, 'joe', 5)
parallel_1.schedule(hello, 'sam', 3)
parallel_1.schedule(async_hello, 'bob', 1)
# expect start in any order, stop in bob, sam, joe
parallel_2 = a_sync.Parallel()
parallel_2.schedule(async_hello, 'jill', 4)
parallel_2.schedule(async_hello, 'jane', 2)
parallel_2.schedule(hello, 'mary', 1)
# expect start in any order, stop in mary, jane, jill
serial_1 = a_sync.Serial()
serial_1.schedule(parallel_1.run)
serial_1.schedule(parallel_2.block)
# expect bob/sam/joe to end before mary/jane/jill start
parallel_3 = a_sync.Parallel()
parallel_3.schedule(async_hello, 'joseph', 5)
parallel_3.schedule(hello, 'joey', 3)
parallel_3.schedule(async_hello, 'jo', 1)
# expect start in any order, stop in jo, joey, joseph
parallel_4 = a_sync.Parallel()
parallel_4.schedule(hello, 'alex', 4)
parallel_4.schedule(async_hello, 'alexandria', 2)
parallel_4.schedule(hello, 'alexandra', 1)
# expect start in any order, stop in alexandra, alexandria, alex
serial_2 = a_sync.Serial()
serial_2.schedule(parallel_3.run)
serial_2.schedule(parallel_4.block)
# expect joe/joey/joseph to stop before alexandra/alexandria/alex start
final_parallel = a_sync.Parallel()
final_parallel.schedule(serial_1.block)
final_parallel.schedule(serial_2.run)
final_parallel.block()
background_thread.result()
# expect bob/sam/joe to start with jo/joey/joseph
# expect jill/jane/mary to start with alex/alexandria/alexandra
# total expected ordering:
# start joe/sam/bob/joseph/joey/jo
# stop bob/jo
# stop sam/joey
# stop joe/joseph
# start jill/jane/mary/alex/alexandria/alexandra
# stop mary/alexandra
# stop alexandria/jane
# stop alex/jill
# stop background-joe
# [ Examples ]
if __name__ == '__main__': # pragma: no branch
examples()
|
Last year was Unibroue's 25th anniversary, so for the big milestone, the brewery came out with two beers. First off, they came out with 25e Anniversaire, a dark black forest cake ale. They also reviewed a beer called Blonde de l'enfer. Blonde de l'enfer was only available in the US until they released it as part of this winter's Unibroue collection (which also features Don de Dieu, Blonde de Chambly, La Fin du Monde, Maudite and Trois Pistoles).
Blonde de l'enfer is Unibroue's take on a classic Belgian Golden Strong Ale.
Appearance: Pours a light copper brown body with a lot of carbonation. Thick off-white head that barely moves, but once it does it leaves behind a sprinkling of lacing that looks a bit like a spider's web.
Aroma: Notes of bubble gum, a hint of iron, a good amount of sweetness, quite boozy at first but it diminishes fairly quickly, and a very subtle hint of pepper/coriander spice at the very end. Smells like a lot of Unibroue's golden ales so far.
Taste: The first thing I get from this beer is the flashback of beers of yesteryear - this tastes like a medley of Unibroue's one-offs I've had over the years.. it reminded me of Eau Bénite, 1837 and even Lune de Miel. This Belgian Golden Strong Ale is decently sweet with a bit of a candi sugar/honey-like flavour to it, a hint of iron-y notes to it, the typical Unibroue yeastiness that gives it a certain je ne sais quoi to it. At 10.5%, I don't get any noticeable booziness to it but I'm definitely feeling my body warm up a bit from this beer. There's a light amount of banana and a hint of coriander/pepper at the end again. Very easy to drink, nice and sweet yet the booziness sneaks up on you.
Overall Thoughts: Solid offering by Unibroue but this reminds me too much of many of their past offerings. Unibroue's already brewed a Belgian Golden Strong Ale before, Eau Bénite (according to BJCP) and many of Unibroue's one offs are considered to be the same style according to Untappd. This is something I'd like to see become permanent, but unfortunately will be gone once the spring collection packs roll out in April or so. Surprisingly easy to drink at 10.5% but the booze really does sneak up on you, so watch out!
I hope one of these days Unibroue comes out with a collection of just one off beers.. that'd be the day! |
#!/usr/bin/env python
"""ngo-admin で呼ばれる管理用コマンドモジュール."""
import os
import sys
from ngo.backends import NgoTemplate
def startproject(project_name):
"""ngoプロジェクトを作成する."""
import ngo
top_dir = os.getcwd()
origin_project_path = os.path.join(ngo.__path__[0], 'project_template')
# manaeg.pyの作成
manage_py_path = os.path.join(origin_project_path, 'manage')
with open(manage_py_path, 'r') as fp:
src = fp.read()
template = NgoTemplate(src)
src = template.render(
{'project_name': project_name}
)
new_file_path = os.path.join(top_dir, 'manage.py')
with open(new_file_path, 'w') as fp:
fp.write(src)
top_dir = os.path.join(top_dir, project_name)
# プロジェクトのディレクトリを作成する
os.makedirs(top_dir)
# settings.py, urls.py, wsgi.pyの作成
for file in ['settings', 'urls', 'wsgi']:
file_path = os.path.join(origin_project_path, file)
with open(file_path, 'r') as fp:
src = fp.read()
template = NgoTemplate(src)
src = template.render(
{'project_name': project_name}
)
new_file_path = os.path.join(top_dir, file+'.py')
with open(new_file_path, 'w') as fp:
fp.write(src)
def main():
"""main."""
function_name, args = sys.argv[1], sys.argv[2:]
function = globals()[function_name]
function(*args)
|
A straw bale house I built in Wales. The build was the pioneering project for my new company, Hartwyn. We were joined by a team of interns who built the home with wonderful dedication and skill. They are exchanging their effort and commitment for an education in natural building and a solid hands-on experience. An exchange that provides the clients with a cheaper build and educates future natural builders.
This was the first project for my new company, Hartwyn – check out our website and get in touch to discuss building for you!
This project in Powys, Wales was set in a small clearing in an Ash (Fraxinus excelsior) woodland. The client wanted to blur the lines between interior and exterior and create something that fits into the landscape. The house had to be eco-friendly, healthy to live in and affordable.
The project is 30sqm internally, with a 12.5sqm mezzanine. Budget was £45,000.
To achieve the brief we used large glass sliding doors on the South and West sides. The open plan nature of the space gives a larger feel and keeps the connection to the outside throughout the house. A large wrap around deck is soon to be constructed to further connect inside and outside space.
Local materials and materials traditional to the area were used. Lots of wooden features echo the surrounding, much left with the waney edge left on and all sourced from the local area.
The building was plastered with site clay and local sand. Externally we added welsh lime to increase durability to the wet climate. Insulation came from wheat straw, baled by a local farmer, welsh sheep wool and Thermofloc recycled newspaper cellulose. Locally grown sedum is growing on the green roof. The foundations utilise car tyres, a waste product, rammed with gravel to provide a low cost, non permeable, soid footing for the building.
Due to the tight budget, the use of reclaimed materials was key. Bathroom tiles, sinks, taps, windows and flooring were all salvaged from building sites or found second hand.
Because of our unique intern build method, we were able to utilise the materials on site and recondition the reclaimed materials. The interns received hands-on experience and education in natural building and green design in return for some good hard work. This meant that the project stayed on budget and used the most sustainable local materials. |
##############################################################################################
# Copyright 2014-2015 Cloud Media Sdn. Bhd.
#
# This file is part of Xuan Application Development SDK.
#
# Xuan Application Development SDK is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Xuan Application Development SDK is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Xuan Application Development SDK. If not, see <http://www.gnu.org/licenses/>.
##############################################################################################
import time
import random
import hashlib
import hmac
import base64
from com.cloudMedia.theKuroBox.sdk.util.httpUtil import HttpUtil
from com.cloudMedia.theKuroBox.sdk.util.util import Util
class OAuth(object):
'''
This is an OAuth class for generate oauth signature.
'''
signatureMethod = "HMAC-SHA1"
version = "1.0"
def __init__(self):
pass
@staticmethod
def nonce():
'''
get oauth nonce.
'''
pass
@staticmethod
def sort(ls):
'''
sort the list according alphabetical order.
'''
pass
@staticmethod
def timestamp():
'''
get current timestamp.
'''
pass
@staticmethod
def create_signature(url, body="", _oauth_token="", _oauth_secret="", consumer_key="", consumer_secret="", http_method="POST"):
'''
return oauth signature, timestamp and nonce
'''
pass
|
Strathmore members save 10% off all food purchases when you present your membership card. Valid through March 31, 2020.
City Perch is a seasonal American dining room by James Beard-renowned Chef Sherry Yard located at Pike & Rose in North Bethesda. Serving contemporary comfort food sourced locally, the dining room is a natural-light-infused social scene by day and an intimate venue for romantic dinners and lively social hours on the outdoor patio by night.
K Town Bistro is a family operated restaurant nestled in the heart of Historic Kensington serving a variety of international cuisine. The owner, Gonzalo Barba, has over 40 years of experience in the restaurant industry, including twenty years at the renowned Watergate Hotel. Mr. Barba's personable nature combined with a passion for fine dining creates a wonderful place to eat out with a stay-at-home feel. The chefs of K Town Bistro formerly worked at the Old Angler's Inn and The Vyne Bistro.
Strathmore members save $5 off lunch and $10 off dinner when you present your membership card. Valid for dine-in only. One coupon per table. Cannot be combined with any other offer/promotion.
Located in the heart of Pike & Rose, Summer House mimics the laid-back vibes of the West Coast, complete with beach house décor & hanging string lights. The chef-driven menu highlights fresh ingredients in simple, handcrafted dishes served from our open kitchen & wood fire grill. Next door at Stella Barra is an artisanal pizzeria with a rustic yet modern dining room. The ever-changing seasonal menu features handcrafted pizzas alongside a selection of market-driven small plates, salads and sandwiches.
Strathmore members save 20% when one shows their membership card and ticket for that day’s performance. Valid for dine-in only. Cannot be used during happy hour or combined with other offers/promotions.
Interested in becoming a preferred restaurant and a Strathmore sponsor? Please contact the Development Department at 301.581.5133. |
"""
Settings for emencia-django-socialaggregator (EDSA)
"""
INSTALLED_APPS = add_to_tuple(INSTALLED_APPS,
'django_extensions',
'taggit',
'socialaggregator')
# Optional specific formatter
#RESSOURCE_FORMATTER = "project.mods_available.socialaggregator.formatter.RessourceFormatterCustom"
# Twitter access keys
EDSA_TWITTER_TOKEN = 'FILLME'
EDSA_TWITTER_SECRET = 'FILLME'
EDSA_TWITTER_CONSUMER_KEY = 'FILLME'
EDSA_TWITTER_CONSUMER_SECRET = 'FILLME'
# Instagram access keys
EDSA_INSTAGRAM_ACCESS_TOKEN = 'FILLME'
# Facebook access keys
EDSA_FB_APP_ID = 'FILLME'
EDSA_FB_APP_SECRET = 'FILLME'
# Google+ access keys
EDSA_GOOGLE_DEVELOPER_KEY = 'FILLME'
# Pagination for ressource list in views
EDSA_PAGINATION = 16
# Enabled plugins and their engine
EDSA_PLUGINS = {
"edsa_twitter": {
#"ENGINE": "socialaggregator.plugins.twitter_aggregator",
"ENGINE": "socialaggregator.plugins.twitter_noretweet_aggregator",
"NAME": "Twitter"
},
"edsa_instagram": {
"ENGINE": "socialaggregator.plugins.instagram_aggregator",
"NAME": "Instagram"
},
"edsa_facebook_fanpage": {
"ENGINE": "socialaggregator.plugins.facebook_fanpage_aggregator",
"NAME": "Facebook Fanpage"
},
"edsa_wordpress_rss": {
"ENGINE": "socialaggregator.plugins.wordpress_rss_aggregator",
"NAME": "Wordpress RSS"
},
"edsa_youtube_search": {
"ENGINE": "socialaggregator.plugins.youtube_search_aggregator",
"NAME": "Youtube search"
},
}
# Used templates
EDSA_VIEW_TEMPLATE = 'socialaggregator/ressource_list.html'
EDSA_TAG_TEMPLATE = 'socialaggregator/ressource_list_tag.html'
EDSA_PLUGIN_TEMPLATE = 'socialaggregator/cms_plugin_feed.html'
# Image size limit (in Ko, use 0 for no size limit)
EDSA_RESSOURCE_IMAGE_SIZE = 0
# Various ressource fields choices
EDSA_RESSOURCE_VIEW_SIZES = (
('default', gettext('default')),
('small', gettext('small')),
#('xsmall', gettext('Xsmall')),
('medium', gettext('medium')),
('large', gettext('large')),
#('xlarge', gettext('Xlarge')),
)
EDSA_RESSOURCE_TEXT_DISPLAY = (
('default', gettext('default')),
#('bottom', gettext('bottom')),
#('top', gettext('top')),
)
EDSA_RESSOURCE_BUTTON_COLOR = (
('white', gettext('white')),
#('black', gettext('black')),
#('primary', gettext('primary')),
#('secondary', gettext('secondary')),
#('tertiary', gettext('tertiary')),
)
EDSA_RESSOURCE_MEDIA_TYPE = (
('url', gettext('url')),
('image', gettext('image')),
('video', gettext('video')),
)
# Media content types to add to the ones from EDSA_PLUGINS
EDSA_RESSOURCE_BASE_MEDIA_TYPES = [
('edsa_article', 'Article Event'),
] |
Maternity leave increased from 12 to 26 weeks.
Female employees with at least two children continue to be entitled to 12 weeks of maternity leave.
Maternity leave of 12 weeks for female employees adopting a child below 3 months and for commissioning mothers.
Employers having at least 50 employees to provide creche facility.
Working mothers may be entitled to work from home.
Effective date of the amendment yet to be notified.
The wait finally ends! Maternity leave in India stands increased to 26 weeks for the private sector. The effective date of the amendment will be separately notified.
After enactment of the stringent law on prevention of sexual harassment of women at workplace in 2013, the Indian government has now given female employees another reason to rejoice. With this development, maternity leave to be offered by private employers in India has been brought at par with that of government employees in India. It is significant to note that India now surpasses many European and Asian countries in terms of maternity benefits being provided to working mothers.
Following enactment of the Maternity Benefit (Amendment) Act, 2017 ("Maternity Amendment Act") on March 27, 2017, the law also (a) extends maternity benefits to commissioning and adopting mothers, (b) mandates employers to provide creche facilities at the establishment, (c) allows women to work from home in certain cases and (c) requires employers to inform female employees at the time of their joining about maternity benefits available to them.
entitles female employees in Covered Establishments to receive maternity benefits if they have worked with the employer for at least 80 days in the 12 months immediately preceding the date of expected delivery ("Eligible Employee").
In addition to the Maternity Act, certain other labour laws in India also provide for maternity benefits. The ESI Act provides for payment of wages to an insured woman, during her 26 week maternity leave. Women employed in newspapers or working as journalists are also entitled to maternity leave of 3 months under the Working Journalists (Conditions of Service) and Miscellaneous Provisions Act, 1955. Similarly, female employees working in factories are entitled to 3 months of maternity leaves with full wages under the Factories Act, 1948.
Over the past few years, there has been a continual demand to improve maternity benefits being provided to female employees in order to ensure that infants are provided with wholesome and proper care and nourishment. The Universal Declaration of Human Rights (UDHR) (1948) pronounced the special rights of children for the first time by providing that "motherhood and childhood are entitled to special care and assistance". The General Assembly in 2003 opined that "early childhood is a crucial period for the sound development of young children. Missed opportunities during these early years cannot be made up at later stages of the child's life1."
The first legally binding international document concerning child rights was the Convention on the Rights of the Child, 1989, which was also ratified by India in 1992. The Convention required its member states to ensure to the maximum extent possible child survival and development2, render appropriate assistance to parents and legal guardians in the performance of their child rearing responsibilities and ensure the development of institutions, facilities and services for the care of children3. As per the standards set out by the International Labor Organisation (ILO) in the Maternity Protection Convention, 20004, member states have been recommended to provide atleast 14 weeks of maternity leave. The Maternity Protection Recommendation, 2000 of the ILO recommends a longer period of 18 weeks for maternity leave.
The Sixth Central Pay Commission5 of India made a recommendation in 2015 to relax the period of maternity leave to six months for Central Government employees, which was considered and subsequently implemented6. This was in line with the guidelines issued by the World Health Organization7 as well as the Indian Ministry of Health and Family Welfare stating inter alia that a baby needs to be nursed by the mother for a minimum period of six months. In a few States, the respective State Governments8 have also relaxed the period of maternity leave for State Government employees and have also provided for an additional child care leave for a period of 730 days (approximately 2 years)9 in special circumstances, to look after their children when they are grappling with serious diseases. Last year, fulfilling the electoral promise, the State of Tamil Nadu has announced 9 months (270 days) of maternity leave for State government employees as opposed to the earlier limit of 6 months (180 days)10.
Other State Governments are also seeing demands from employee unions for a like change to be implemented in their States11. It is also interesting to note that a number of leading companies had already voluntarily amended their policies prior to the Maternity Amendment Act to increase maternity benefits and provide various new-age benefits such as adoption leaves, surrogacy leaves, paternity leaves etc12.
Increase in Maternity Leave: Maternity leave for Eligible Employees has been increased to 26 weeks (as against the previous 12 weeks limit) in case of women having less than two surviving children. In other cases, the existing period of 12 weeks shall continue to apply. Out of the 26 weeks, not more than 8 weeks can be taken before the date of expected delivery, whereas earlier, the pre-natal period was prescribed to be not more than 6 weeks.
Commissioning13 and Adopting mothers: Maternity benefit has now been extended to commissioning and adopting mothers. A female employee adopting a child below 3 months of age and commissioning mothers shall be entitled to 12 weeks of maternity benefit from the date the child is handed over to them.
Creche facility: Employers having at least 50 employees will be required to provide creche facility either individually or as a shared common facility within such distance as may be prescribed by rules. The employer shall also be required to allow four visits a day to the creche including the interval for rest allowed to her.
Work from home: Employers may allow Eligible Employees to work from home on a case to case basis depending on the nature of work. The conditions governing such work from home may be mutually agreed between the employer and the employee.
Written intimation: Every employer shall be required to inform, in writing and electronically, to every female employee at the time of commencement of employment about the benefits available to her as per the Maternity Act.
52 weeks (12 months) of unpaid leaves with an option to take an additional 12 months/1 year with the consent of the employer.
Parental leave can be split between the parents so long as total leave taken by both of them does not exceed 24 months.
Special maternity leaves (unpaid) may also be availed by an employee over and above the aforementioned parental leave.
15 weeks (approximately 3.5 months) of paid leaves at the rate of 82 percent of the employee's salary for the first 30 days and 75 percent for the remainder subject to a maximum salary prescribed by the government.
Mandatory 120 days of paid leaves at the rate of 100% of wages with additional 60 days at the option of the employer.
Companies that choose to grant the additional 60 will have the right to a tax benefit equal to the amount of the salary of the employee during this extension period.
Varies between 15-17 weeks (depending on the province) of paid maternity leaves.
Maternity leave benefits are paid out by the federal government through Employment Insurance.
The employee will receive wages equal to 55 per cent of the employee's average weekly pay up to a maximum amount which is set by the government each year.
Additional 'late maternity leave' of roughly 30 days (depending on location) if the female employee is older than 24 years.
16 weeks of paid leaves for single birth paid at 100% of salary capped at a particular amount (via social security scheme).
14 weeks of paid leaves calculated at the rate of 100% of the employee's earnings without any ceiling (via insurance and employers).
14 weeks of paid leaves (6 weeks pre-birth and 8 weeks post-birth) paid at the rate of 66% of the mother's regular salary.
20 weeks (140 days) of paid leaves divided equally pre and post birth.
Wages calculated at the rate of 100% of the average earnings, calculated on basis of employment during 24 months before taking leave, subject to a ceiling based on the ceiling on earnings for social insurance contributions established by the state on an annual basis, the actual number of worked days and the length of the leave.
Maternity leave may be extended to 194 days (approximately 28 weeks) in the event of multiple pregnancies or complications, in which case, 84 days before birth and 110 days after birth may be taken for multiple births (such as twins or triplets) or 86 days after the birth if there are any complications.
16 weeks of paid leave at the rate of 100% of the wages up to two children (funded by the employer for 8 weeks and 8 weeks by the government).
For 3rd and subsequent childbirths, the government will pay for the full 16 weeks of maternity leave.
480 days (60 weeks) of paid parental leave out of which 420 days are paid at a rate of 80% of the employee's salary up to a prescribed capped limited (via social insurance).
Employees working in a firm of 50 or more employees who have maintained employment with the same business for 12 months and have accumulated at least 1,250 working hours over those 12 months 12 weeks, shall be entitled to unpaid leave of 12 weeks.
Devise a non-discriminatory performance appraisal system taking into consideration that the fact that the female employee was on maternity leave for 6 months.
Additionally, certain Indian states including Andhra Pradesh, Odissa, Punjab, Rajasthan, Uttar Pradesh and West Bengal which have maternity leave provisions in their state specific legislations, will need to amend those provisions to bring it in line with the Maternity Amendment Act.
Certain other labour laws14 also provide for maternity benefits to women employees in different sectors which differ in their coverage, benefits and financing. The conflicting provisions in those laws will also need to be amended.
While India's private sector employers lagged behind the government sector and many other countries in terms of providing extended maternity benefits, with the enactment of the Maternity Amendment Act, India has become one of the most progressive countries in terms of providing maternity benefits.
The Maternity Amendment Act is definitely a welcome step taken by the Indian government enabling women to combine their professional and personal roles successfully and to promote equal opportunities and treatment in employment and occupation, without prejudice to health or economic security.
A survey by the leading industry body, Assocham15 suggests that up to 25% of female employees give up their career post child birth. Additional maternity benefits like creche facility, work from home, etc. in addition to the extended paid maternity leaves may see demonstrable results in the form of greater number of female employees returning to work post maternity and greater employee retention over a period of time.
Although the Maternity Amendment Act is expected to benefit ~1.8 million women across the country, the government seems to have overlooked the recommendation of the Sixth Central Pay Commission and has left out a majority of the workforce that works in the unorganized sector16 in India (estimated to be over 90% of the total workforce).
The Maternity Amendment Act has missed out the opportunity to introduce paternity leave and possibly a chance to spread the message that the responsibility of running a family should be of both the parents. Seems like we will need to wait longer for a 'Maternity and Paternity Benefit Act'.
Countries such as the UK, Singapore and Australia have introduced various categories of leaves relating to child birth, including parental leave (enabling parents to share the parental leave in the manner suitable for them), paternity leave, family leave etc. where both the parents receive the benefit of leaves at the time of child birth. This, to an extent, helps parents to strike a balance between their careers and personal life and also ensures that the child gets proper care and attention from both the parents especially in his/her initial years of development. Although the steps taken by the government is commendable, the government has missed out this opportunity to catch up with such requirements.
Unlike some other countries wherein the costing is borne by the government or shared by the government and the employers or vide social security schemes, in India, the cost of maternity leaves (wages during the leave period) is to be borne by the employer (unless the female employee covered under the ESI Act). Additional requirements such as having a creche facility, etc. would also require employers to establish adequate infrastructure in turn leading to more questions and more expenses. To help reduce the financial exposure on the employer, an option could have been provided in terms of part unpaid leave, something that is common in some of the developed nations.
There have been some news reports that suggests that the increased maternity leave could act as a deterrent for certain employers for recruiting female candidates. While it is hoped that it is not true, it could unfortunately affect all the good work put in so far to promote diversity and inclusiveness of women at workplaces.
Given the objective of the statute, there also does not seem to be a valid justification as to why the law should not apply to establishments having less than 10 employees. The amendment fails to extend the applicability of the statute to all establishments irrespective of the number of employees.
The Maternity Amendment Act has not increased the medical bonus amount payable which is currently low and does not match up to the current inflationary trends.
4 India has not ratified this Convention.
13 A biological mother who uses her egg to have a surrogate child.
16 Female workers in the unorganised sector include agricultural labourers, seasonal workers, domestic workers or construction workers. |
from __future__ import unicode_literals
from django.conf import settings
import six.moves.urllib.parse as urlparse
from horizon.utils.memoized import memoized # noqa
from openstack_dashboard.api.swift import swift_api
from openstack_dashboard.api.swift import Container
from openstack_dashboard.api.swift import GLOBAL_READ_ACL
from openstack_dashboard.api import base
from oslo_utils import timeutils
import requests
import json
@memoized
def get_token(request):
return request.user.token.id
# -----------------------------------------------------------------------------
#
# Swift - Regions
#
def swift_list_regions(request):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/swift/regions"
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
r = requests.get(url, headers=headers)
return r
def new_region(request, data):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/swift/regions"
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
r = requests.post(url, json.dumps(data), headers=headers)
return r
def delete_region(request, region_id):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/swift/regions/" + str(region_id)
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
r = requests.delete(url, headers=headers)
return r
def update_region(request, data):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/swift/regions/" + str(data['region_id'])
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
r = requests.put(url, json.dumps(data), headers=headers)
return r
def get_region(request, region_id):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/swift/regions/" + str(region_id)
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
r = requests.get(url, headers=headers)
return r
#
# Swift - Zones
#
def swift_list_zones(request):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/swift/zones"
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
r = requests.get(url, headers=headers)
return r
def new_zone(request, data):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/swift/zones"
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
r = requests.post(url, json.dumps(data), headers=headers)
return r
def delete_zone(request, zone_id):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/swift/zones/" + str(zone_id)
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
r = requests.delete(url, headers=headers)
return r
def update_zone(request, data):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/swift/zones/" + str(data['zone_id'])
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
r = requests.put(url, json.dumps(data), headers=headers)
return r
def get_zone(request, zone_id):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/swift/zones/" + str(zone_id)
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
r = requests.get(url, headers=headers)
return r
#
# Swift - Nodes
#
def swift_get_all_nodes(request):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/swift/nodes"
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
r = requests.get(url, headers=headers)
return r
def swift_get_node_detail(request, server_type, node_id):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/swift/nodes/" + str(server_type) + "/" + str(node_id)
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
r = requests.get(url, headers=headers)
return r
def swift_update_node(request, server_type, node_id, data):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/swift/nodes/" + str(server_type) + "/" + str(node_id)
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
r = requests.put(url, json.dumps(data), headers=headers)
return r
def swift_restart_node(request, server_type, node_id):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + '/swift/nodes/' + str(server_type) + "/" + str(node_id) + '/restart'
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
r = requests.put(url, headers=headers)
return r
#
# Swift - Storage Policies
#
def swift_new_storage_policy(request, data):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/swift/storage_policies"
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
r = requests.post(url, json.dumps(data), headers=headers)
return r
def swift_delete_storage_policy(request, storage_policy_id):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/swift/storage_policy/" + str(storage_policy_id)
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
r = requests.delete(url, headers=headers)
return r
# TODO
def load_swift_policies(request):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/swift/storage_policies/load"
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
r = requests.post(url, json.dumps({}), headers=headers)
return r
def deploy_storage_policy(request, storage_policy_id):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/swift/storage_policy/" + str(storage_policy_id) + "/deploy"
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
r = requests.post(url, json.dumps({}), headers=headers)
return r
def swift_list_storage_policies(request):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/swift/storage_policies"
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
r = requests.get(url, headers=headers)
return r
def swift_list_deployed_storage_policies(request):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/swift/storage_policies/deployed"
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
r = requests.get(url, headers=headers)
return r
def swift_available_disks_storage_policy(request, storage_policy_id):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/swift/storage_policy/" + str(storage_policy_id) + "/disk/"
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
r = requests.get(url, headers=headers)
return r
def swift_storage_policy_detail(request, storage_policy_id):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/swift/storage_policy/" + str(storage_policy_id)
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
r = requests.get(url, headers=headers)
return r
def swift_edit_storage_policy(request, storage_policy_id, data):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/swift/storage_policy/" + str(storage_policy_id)
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
r = requests.put(url, json.dumps(data), headers=headers)
return r
def swift_add_disk_storage_policy(request, storage_policy_id, disk_id):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/swift/storage_policy/" + str(storage_policy_id) + "/disk/"
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
r = requests.put(url, json.dumps(disk_id), headers=headers)
return r
def swift_remove_disk_storage_policy(request, storage_policy_id, disk_id):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/swift/storage_policy/" + str(storage_policy_id) + "/disk/" + disk_id
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
r = requests.delete(url, headers=headers)
return r
#
# Swift - Containers
#
def swift_get_container(request, container_name, with_data=True):
if with_data:
headers, data = swift_api(request).get_object(container_name, "")
else:
data = None
headers = swift_api(request).head_container(container_name)
timestamp = None
is_public = False
public_url = None
try:
is_public = GLOBAL_READ_ACL in headers.get('x-container-read', '')
if is_public:
swift_endpoint = base.url_for(request,
'object-store',
endpoint_type='publicURL')
parameters = urlparse.quote(container_name.encode('utf8'))
public_url = swift_endpoint + '/' + parameters
ts_float = float(headers.get('x-timestamp'))
timestamp = timeutils.iso8601_from_timestamp(ts_float)
metadata = ''
for header in headers:
if header.startswith('x-container-meta-'):
key_name = header.replace('x-container-meta-', '').replace('-', ' ').title()
value = headers[header]
metadata += key_name + '=' + value + ', '
metadata = metadata[0:-2]
except Exception:
pass
container_info = {
'name': container_name,
'container_object_count': headers.get('x-container-object-count'),
'container_bytes_used': headers.get('x-container-bytes-used'),
'timestamp': timestamp,
'data': data,
'is_public': is_public,
'public_url': public_url,
'storage_policy': headers.get('x-storage-policy'),
'metadata': metadata,
}
return Container(container_info)
def swift_get_project_containers(request, project_id):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/swift/" + str(project_id) + "/containers"
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
r = requests.get(url, headers=headers)
return r
def swift_create_container(request, project_id, container_name, container_headers):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/swift/" + project_id + "/" + str(container_name) + "/create"
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
r = requests.post(url, json.dumps(container_headers), headers=headers)
return r
def swift_update_container_policy(request, project_id, container_name, storage_policy_id):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/swift/" + project_id + "/" + str(container_name) + "/policy"
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
r = requests.put(url, json.dumps(storage_policy_id), headers=headers)
return r |
As a new type of optical fiber sensing technology, the optical fiber laser sensing system combines the advantages of fiber sensors of high sensitivity, distributed measurement capability and less susceptible to electromagnetic interference, with the advantages of fiber lasers of narrow linewidth and high optical signal to noise ratio, so it is used in measurement and security monitoring in the field of oil, mining, bridges, electricity, aircraft and so on. The research progress of optical fiber laser sensing system is introduced from two aspects. One is the optical fiber laser sensing system for the single-parameter measurement, including temperature, strain, refractive index, current, acoustic wave, wind speed and so on; the other is the optical fiber laser sensing system for the dual-parameter measurement, which is mainly used to solve the problem of cross sensitivity of temperature and transverse stress, strain and refractive index.
【1】Liu Yunhao. Introduction to internet of things[M]. 2th ed. Beijing: Science Press, 2013.
刘云浩. 物联网导论[M]. 2版. 北京: 科学出版社, 2013.
【2】Wang Zhi, Chu Fenghong. Research advances in optical fiber current sensor technology[J]. Laser & Optoelectronics Progress, 2014, 51(10): 100002.
王志, 初凤红. 光纤电流传感技术研究进展[J]. 激光与光电子学进展, 2014, 51(10): 100002.
【3】Yang Mu, Liu Xiuhong, Liu Wei, et al.. Applied research of optical fiber sensor in oil and gas pipe corrosion monitoring[J]. Laser & Optoelectronics Progress, 2014, 51(2): 020604.
杨牧, 刘秀红, 刘伟, 等. 光纤光栅传感网络在油气出地管内腐蚀监测的应用研究[J]. 激光与光电子学进展, 2014, 51(2): 020604.
【4】Chen Hao, Liu Yueming, Zou Jianyu, et al.. Research status and development trends of fiber optical technology for water quality monitoring[J]. Laser & Optoelectronics Progress, 2015, 52(3): 030006.
陈浩, 刘月明, 邹建宇, 等. 光纤水质检测技术的研究现状与发展趋势[J]. 激光与光电子学进展, 2015, 52(3): 030006.
【5】Koo K P, Kersey A D. Fibre laser sensor with ultrahigh strain resolution using interferometric interrogation[J]. Electronics Letters, 1995, 31(14): 1180-1182.
【6】Cranch G A, Flockhart G M H, Kirkendall C K. Distributed feedback fiber laser strain sensors[J]. IEEE Sensors Journal, 2008, 8(7): 1161-1172.
【7】Zhang Y, Guan B O. High-sensitivity distributed Bragg reflector fiber laser displacement sensor[J]. IEEE Photonics Technology Letters, 2009, 21(5): 280-282.
【8】Jin L, Tan Y N, Quan Z, et al.. Strain-insensitive temperature sensing with a dual polarization fiber grating laser[J]. Optics Express, 2012, 20(6): 6021-6028.
【9】Pei L, Liu C, Li J, et al.. Highly sensitive axial strain fiber laser sensor based on all-fiber acousto-optic tunable filter[J]. IEEE Photonics Technology Letters, 2014, 26(24): 2430-2433.
【10】Stefani A, Yuan W, Markos C, et al.. Narrow bandwidth 850-nm fiber Bragg gratings in few-mode polymer optical fibers[J]. IEEE Photonics Technology Letters, 2011, 23(10): 660-662.
【11】Dong X Y, Tam H Y, Shum P. Temperature-insensitive strain sensor with polarization-maintaining photonic crystal fiber based Sagnac interferometer[J]. Applied Physics Letters, 2007, 90(15): 151113.
【12】Yang R, Yu Y S, Xue Y, et al.. Single S-tapered fiber Mach-Zehnder interferometers[J]. Optics Letters, 2011, 36(23): 4482-4484.
【13】Favero F C, Araujo L, Bouwmans G, et al.. Spheroidal Fabry-Perot microcavities in optical fibers for high-sensitivity sensing[J]. Optics Express, 2012, 20(7): 7112-7118.
【14】Zhang Y, Zhang M, Jin W, et al.. Investigation of erbium-doped fiber laser intra-cavity absorption sensor for gas detection[J]. Optics Communications, 2004, 234(1): 435-441.
【15】Dai Y, Sun Q Z, Tan S, et al.. Highly sensitive liquid-level sensor based on dual-wavelength double-ring fiber laser assisted by beat frequency interrogation[J]. Optics Express, 2012, 20(25): 27367-27376.
【16】Guan B O, Wang S N. Fiber grating laser current sensor based on magnetic force[J]. IEEE Photonics Technology Letters, 2010, 22(4): 230-232.
【17】Han M, Liu T Q, Hu L L, et al.. Intensity-demodulated fiber-ring laser sensor system for acoustic emission detection[J]. Optics Express, 2013, 21(24): 29269-29276.
【18】Liu T Q, Hu L L, Han M. Multiplexed fiber-ring laser sensors for ultrasonic detection[J]. Optics Express, 2013, 21(25): 30474-30480.
【19】Liu Y, Peng W, Zhang X, et al.. Fiber-optic anemometer based on distributed Bragg reflector fiber laser technology[J]. IEEE Photonics Technology Letters, 2013, 25(13): 1246-1249.
【20】Lan X, Huang J, Han Q, et al.. Fiber ring laser interrogated zeolite-coated singlemode-multimode-singlemode structure for trace chemical detection[J]. Optics Letters, 2012, 37(11): 1998-2000.
【21】Liu Z B, Li Y, Liu Y, et al.. A static axial strain fiber ring cavity laser sensor based on multi-modal interference[J]. IEEE Photonics Technology Letters, 2013, 25(21): 2050-2053.
【22】Liu Z B, Tan Z W, Yin B, et al.. Refractive index sensing characterization of a singlemode-claddingless-singlemode fiber structure based fiber ring cavity laser[J]. Optics Express, 2014, 22(5): 5037-5042.
【23】Lee J H, Kim J, Han Y G, et al.. Investigation of Raman fiber laser temperature probe based on fiber Bragg gratings for long-distance remote sensing applications[J]. Optics Express, 2004, 12(8): 1747-1752.
【24】Martins H F, Marques M B, Frazo O. Temperature-insensitive strain sensor based on four-wave mixing using Raman fiber Bragg grating laser sensor with cooperative Rayleigh scattering[J]. Applied Physics B, 2011, 104(4): 957-960.
【25】Kringlebotn J T, Loh W H, Laming R I. Polarimetric Er3+-doped fiber distributed-feedback laser sensor for differential pressure and force measurements[J]. Optics Letters, 1996, 21(22): 1869-1871.
【26】Liu B, Zhang H. Polarimetric distributed Bragg reflector fiber laser sensor array for simultaneous measurement of transverse load and temperature[J]. Optical Fiber Technology, 2011, 17(6): 619-625.
【27】Hadeler O, Ibsen M, Zervas M N. Distributed-back fiber laser sensor for simultaneous strain and temperature measurements operating in the radio-frequency domain[J]. Applied Optics, 2001, 40(19): 3169-3175.
【28】Han Y G, Tran T V A, Kim S H, et al.. Multiwavelength Raman-fiber-laser-based long-distance remote sensor for simultaneous measurement of strain and temperature[J]. Optics Letters, 2005, 30(11): 1282-1284.
【29】Liu D, Ngo N Q, Tjin S C, et al.. A dual-wavelength fiber laser sensor system for measurement of temperature and strain[J]. IEEE Photonics Technology Letters, 2007, 19(15): 1148-1150.
【30】Tan Y N, Zhang Y, Jin L, et al.. Simultaneous strain and temperature fiber grating laser sensor based on radio-frequency measurement[J]. Optics Express, 2011, 19(21): 20650-20656.
【31】Gao L, Chen L, Huang L, et al.. Multimode fiber laser for simultaneous measurement of strain and temperature based on beat frequency demodulation[J]. Optics Express, 2012, 20(20): 22517-22522.
【32】Leandro D, Ams M, Lopez-Amo M, et al.. Simultaneous measurement of strain and temperature using a single emission line[J]. Journal of Lightwave Technology, 2015, 33(12): 2426-2431.
【33】Wong A C L, Chung W H, Tam H Y, et al.. Single tilted Bragg reflector fiber laser for simultaneous sensing of refractive index and temperature[J]. Optics Express, 2011, 19(2): 409-414. |
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2016-2018 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from textwrap import dedent
from unittest import mock
from testtools.matchers import Equals, HasLength
from snapcraft.internal import errors
from snapcraft.plugins import plainbox_provider
from snapcraft.project import Project
from tests import fixture_setup, unit
class PlainboxProviderPluginPropertiesTest(unit.TestCase):
def test_schema(self):
"""Test validity of the Scons Plugin schema"""
schema = plainbox_provider.PlainboxProviderPlugin.schema()
# Verify the presence of all properties
properties = schema["properties"]
self.assertThat(properties, Equals({}))
def test_get_pull_properties(self):
expected_pull_properties = []
resulting_pull_properties = (
plainbox_provider.PlainboxProviderPlugin.get_pull_properties()
)
self.assertThat(
resulting_pull_properties, HasLength(len(expected_pull_properties))
)
for property in expected_pull_properties:
self.assertIn(property, resulting_pull_properties)
def test_get_build_properties(self):
expected_build_properties = []
resulting_build_properties = (
plainbox_provider.PlainboxProviderPlugin.get_build_properties()
)
self.assertThat(
resulting_build_properties, HasLength(len(expected_build_properties))
)
for property in expected_build_properties:
self.assertIn(property, resulting_build_properties)
class PlainboxProviderPluginTest(unit.TestCase):
def setUp(self):
super().setUp()
snapcraft_yaml_path = self.make_snapcraft_yaml(
dedent(
"""\
name: plainbox-snap
base: core18
"""
)
)
self.project = Project(snapcraft_yaml_file_path=snapcraft_yaml_path)
class Options:
source = "."
self.options = Options()
patcher = mock.patch.object(plainbox_provider.PlainboxProviderPlugin, "run")
self.mock_run = patcher.start()
self.addCleanup(patcher.stop)
def test_build(self):
plugin = plainbox_provider.PlainboxProviderPlugin(
"test-part", self.options, self.project
)
os.makedirs(plugin.sourcedir)
# Place a few files with bad shebangs, and some files that shouldn't be
# changed.
files = [
{
"path": os.path.join(plugin.installdir, "baz"),
"contents": "#!/foo/bar/baz/python3",
"expected": "#!/usr/bin/env python3",
},
{
"path": os.path.join(plugin.installdir, "bin", "foobar"),
"contents": "#!/foo/baz/python3.5",
"expected": "#!/usr/bin/env python3.5",
},
{
"path": os.path.join(plugin.installdir, "foo"),
"contents": "foo",
"expected": "foo",
},
{
"path": os.path.join(plugin.installdir, "bar"),
"contents": "bar\n#!/usr/bin/python3",
"expected": "bar\n#!/usr/bin/python3",
},
]
for file_info in files:
os.makedirs(os.path.dirname(file_info["path"]), exist_ok=True)
with open(file_info["path"], "w") as f:
f.write(file_info["contents"])
plugin.build()
env = os.environ.copy()
env["PROVIDERPATH"] = ""
calls = [
mock.call(["python3", "manage.py", "validate"], env=env),
mock.call(["python3", "manage.py", "build"]),
mock.call(["python3", "manage.py", "i18n"]),
mock.call(
[
"python3",
"manage.py",
"install",
"--layout=relocatable",
"--prefix=/providers/test-part",
"--root={}".format(plugin.installdir),
]
),
]
self.mock_run.assert_has_calls(calls)
for file_info in files:
with open(os.path.join(plugin.installdir, file_info["path"]), "r") as f:
self.assertThat(f.read(), Equals(file_info["expected"]))
def test_build_with_provider_stage_dir(self):
self.useFixture(fixture_setup.CleanEnvironment())
plugin = plainbox_provider.PlainboxProviderPlugin(
"test-part", self.options, self.project
)
os.makedirs(plugin.sourcedir)
provider_path = os.path.join(
self.project.stage_dir, "providers", "test-provider"
)
os.makedirs(provider_path)
# Place a few files with bad shebangs, and some files that shouldn't be
# changed.
files = [
{
"path": os.path.join(plugin.installdir, "baz"),
"contents": "#!/foo/bar/baz/python3",
"expected": "#!/usr/bin/env python3",
},
{
"path": os.path.join(plugin.installdir, "bin", "foobar"),
"contents": "#!/foo/baz/python3.5",
"expected": "#!/usr/bin/env python3.5",
},
{
"path": os.path.join(plugin.installdir, "foo"),
"contents": "foo",
"expected": "foo",
},
{
"path": os.path.join(plugin.installdir, "bar"),
"contents": "bar\n#!/usr/bin/python3",
"expected": "bar\n#!/usr/bin/python3",
},
]
for file_info in files:
os.makedirs(os.path.dirname(file_info["path"]), exist_ok=True)
with open(file_info["path"], "w") as f:
f.write(file_info["contents"])
plugin.build()
calls = [
mock.call(
["python3", "manage.py", "validate"],
env={"PROVIDERPATH": provider_path},
),
mock.call(["python3", "manage.py", "build"]),
mock.call(["python3", "manage.py", "i18n"]),
mock.call(
[
"python3",
"manage.py",
"install",
"--layout=relocatable",
"--prefix=/providers/test-part",
"--root={}".format(plugin.installdir),
]
),
]
self.mock_run.assert_has_calls(calls)
for file_info in files:
with open(os.path.join(plugin.installdir, file_info["path"]), "r") as f:
self.assertThat(f.read(), Equals(file_info["expected"]))
def test_fileset_ignores(self):
plugin = plainbox_provider.PlainboxProviderPlugin(
"test-part", self.options, self.project
)
expected_fileset = [
"-usr/lib/python*/sitecustomize.py",
"-etc/python*/sitecustomize.py",
]
fileset = plugin.snap_fileset()
self.assertListEqual(expected_fileset, fileset)
class PlainboxProviderPluginUnsupportedBaseTest(unit.TestCase):
def setUp(self):
super().setUp()
snapcraft_yaml_path = self.make_snapcraft_yaml(
dedent(
"""\
name: plainbox-snap
base: unsupported-base
"""
)
)
self.project = Project(snapcraft_yaml_file_path=snapcraft_yaml_path)
class Options:
source = "dir"
self.options = Options()
def test_unsupported_base_raises(self):
self.assertRaises(
errors.PluginBaseError,
plainbox_provider.PlainboxProviderPlugin,
"test-part",
self.options,
self.project,
)
|
The college admissions process is like sending a friend request to your school of choice—and in this scenario, the way you maintain your Facebook, and even your Twitter, could matter just as much.
A Kaplan survey indicated that more than 80 percent of college admissions offices will reach out to and recruit students on Facebook— which means the people who are holding the keys to your academic future could be browsing your page. So just as you would for your applications and essays, it’s vital that you make social network edits.
First, you’ll need to clean up. Carefully comb your page, and if something appears questionable, get rid of it or untag yourself. Filter through old notes and blogs and trim entries that don’t reflect well on you. Does your “About Me” section express the interests of a potential college student, or of a silly high schooler?
Now, make some positive changes to your online image. Your profile picture is the most visible part of your account. Choose an image that best personifies who you are (or how you’d like to be perceived). It doesn’t need to be a headshot, but it should be an appropriate image that would appeal to an admissions office. They aren’t looking for more students to party hard or fail classes—they are looking for vibrant students who will excel.
You should also monitor your friends’ activity on your page. Just as your relationships reflect who you are, their comments, photos and tags play a part in your overall virtual impression on others. Delete or block friends who frequently make inappropriate remarks. Change your privacy settings so you have to approve Twitter followers or picture tags.
Finally, play the part of the prospective student. “Like” the universities you are interested in and interact with their online community. If you have an online portfolio, blog or résumé, add its link to your info. It won’t be long before that little “Education” box sees some big changes. |
from django.conf import settings
from django.core.management.base import BaseCommand
from django.utils.six import iteritems
from django_twilio_sms.models import Action, Response
class Command(BaseCommand):
help = "Sync responses from settings.DJANGO_TWILIO_SMS_RESPONSES"
def handle(self, *args, **options):
if hasattr(settings, 'DJANGO_TWILIO_SMS_RESPONSES'):
for action in Action.objects.all():
action.delete()
for action, response in iteritems(
settings.DJANGO_TWILIO_SMS_RESPONSES):
action = Action.objects.create(name=action)
response = Response.objects.create(
body=response, action=action
)
self.stdout.write('CREATED: {}-{}'.format(
action.name, response.body
))
else:
self.stdout.write('No responses found in settings.')
if Action.objects.all().count() > 0:
for action in Action.objects.all():
action.delete()
self.stdout.write('All saved responses have been deleted.')
|
Conrad Anker climbs through a crevasse between Camp 1 and Camp 2 at the base of Mount Everest.
Remove all the risks from Everest and Rainier and the adventure is lost.
In light of the deaths of six climbers attempting to summit Mount Rainier via the Liberty Ridge Route, many will question the logic of climbing dangerous mountains. "You're an idiot for risking your life for an egotistical pursuit," went one outraged email I received after climbing Everest for the third time.
I have heard many of these criticisms over the years. The British climber George Mallory heard them also. "What is the use of climbing Mount Everest? And my answer must at once be, it is no use," he wrote in 1923. "There is not the slightest prospect of any gain whatsoever."
I suspect that some of those who want to see more safety measures introduced into mountaineering might agree with the Chinese climber Wang Jing's decision to use a helicopter to bypass one of most dangerous parts of the route to the top of Mount Everest.
"We have the technology to avoid putting climbers in harm's way, so why not use it?" they might say.
My response—and that of many of my fellow climbers—is that the danger of climbing mountains is part of what makes it a powerful and enriching experience.
That's not to say technology has no place in climbing. The human drive to climb Everest has been an ever evolving journey. The first Everest expedition in 1921 started with a walk from Darjeeling, some 400 miles away.
The 1953 expedition flew into the Kathmandu Valley and began their trek at the valley rim. By 1963 the road had been extended a bit farther, yet the approach still required three weeks of arduous hiking.
Since the mid-1970s, with the opening of the Lukla airport, climbers have started their approach within the Khumbu watershed, the hike to Base Camp limited only by how fast you can acclimatize.
Beyond the bare essentials—bravery, endurance, and teamwork—the recipe for getting to the summit has been tweaked continually with new technological advancements introduced by each new generation of climbers. But what has not changed is the principle that climbing a peak is more than just reaching a summit.
Within the mountaineering community, the method a climber uses to ascend a peak is essential to the endeavor and a key element in measuring an expedition's success. By comparison, a hunter who shoots a penned animal in a controlled environment with a high-powered rifle is engaging in a completely different type of hunt than someone who uses a bow and arrow in a wild setting.
Similarly, the Boston Marathon is an amazing foot race because it is exactly that: No bikes, cars, subways, or horses are allowed. The America's Cup is special because it is solely the wind that powers the boats. And on Everest, the climb begins at Base Camp—if you take a helicopter part of the way up, have you climbed the mountain?
Wang Jing reached the summit of Everest on May 23 with a team of five Sherpas via the South Col route on the Nepal side. The popular route winds through the dangerous Khumbu Icefall, where on April 18 an ice avalanche killed 13 Sherpas and three other Nepali mountain guides.
Wang's ascent is unique in that she used a helicopter to avoid the icefall, flying over the cascade of jumbled ice in a matter of minutes and starting her climb from Camp 2, which at roughly 19,000 feet (5,900 meters) is almost two-thirds of the way to the summit. Nevertheless, the Nepali authorities are in the process of validating her ascent.
Helicopters aren't new to Everest. They have been used there for rescues, resupply, scenic rides, and scientific study for decades. Notably in 1963, Tom Hornbein and Willi Unsoeld were whisked off to Kathmandu after their harrowing ascent and traverse of the mountain's West Ridge.
During peak season on the Nepal side, there are multiple arrivals each morning, with helicopters arriving to replenish supplies, bring climbers to camp, and offer landings for tourists willing to brave a rapid jump in altitude. This past season the medical team at Everest ER noted 28 landings in one day. By contrast, on the other side of Everest in Tibet, the Chinese government forbids the use of helicopters.
And on North America's biggest mountain, Denali—where all climbers are required to begin their expeditions on the South East Fork of the Kahlitna—helicopters are used only for rescue.
Using helicopters to rescue injured or sick climbers is an obvious and justifiable purpose, but I have watched helicopters pick up wealthy clients for a brief interlude in Kathmandu. While a quick trip down to the lowland, thick air might benefit acclimatization, that indulgent use of technology violates the whole reason for climbing Everest in the first place.
By some reports, Wang, who owns a China-based outdoor clothing and gear company, was under pressure to summit Everest as part of her bigger goal of climbing the seven summits in a six-month period—an expensive and uncertain experiment in logistics. If she had not been able to summit Everest this season, not only would her personal efforts be for naught, but also a major marketing opportunity for her company would presumably be missed.
As a climber for The North Face, I understand the desire to succeed on behalf of my sponsor. But regardless of the labels on jackets and tents and glossy marketing campaigns, there are no shortcuts in climbing. We keep authentic adventure alive by meeting nature on its own terms.
The Khumbu Icefall is the most active ice feature that humans climb on a regular basis. As the ice on the Western Cwm spills over a bench and drops 3,000 feet (914 meters) it is stretched into massive shapes, snapped, tossed, and turned by gravity. Adding to this already threatening landscape, hanging glaciers above the climbing route regularly release large quantities of ice, creating massive killer avalanches.
Yet as dangerous as the icefall is, it is an intrinsic part of the Everest experience. You boot up, say your prayers, and hope that the ice is calm. No amount of experience can make up for being in the wrong place at the wrong time. It is very dangerous, but it is also unspeakably beautiful.
During the dozens of trips I have made climbing through the Khumbu Icefall, I always tune into the orchestra of sounds emanating from this ancient glacier. The deep groan of ice compacting near Base Camp, the snap of small towers near the surface, the sudden crack of a serac, the sudden unexpected wind that whips through the formations, and the distant rumble of Nuptse and the other surrounding peaks shedding ice. Each is a constant heart-stopping reminder of how active Mount Everest is.
While part of me dreads climbing through the icefall, it is a component of the Everest experience. Flying to higher camps may reduce the risks, but it isn't climbing.
For those who still don't agree, Mallory, who died in an attempt to summit Everest in 1924, had an answer: "If you cannot understand that there is something in man which responds to the challenge of this mountain and goes out to meet it, that the struggle is the struggle of life itself upward and forever upward, then you won't see why we go. What we get from this adventure is just sheer joy. And joy is, after all, the end of life."
Conrad Anker is a professional mountain climber sponsored by The North Face. He was part of National Geographic's 2012 expedition to Everest. You can follow him on Instagram and Twitter. |
#
# Contents.py -- Table of Contents plugin for fits viewer
#
# Eric Jeschke ([email protected])
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
from ginga import GingaPlugin
from ginga.misc import Bunch
from ginga.qtw.QtHelp import QtGui, QtCore
import time
class Contents(GingaPlugin.GlobalPlugin):
def __init__(self, fv):
# superclass defines some variables for us, like logger
super(Contents, self).__init__(fv)
# For table-of-contents pane
self.nameDict = {}
self.columns = [('Name', 'NAME'),
('Object', 'OBJECT'),
('Date', 'DATE-OBS'),
('Time UT', 'UT')]
fv.set_callback('add-image', self.add_image)
fv.set_callback('delete-channel', self.delete_channel)
def build_gui(self, container):
# create the Treeview
treeview = QtGui.QTreeWidget()
treeview.setColumnCount(len(self.columns))
treeview.setSortingEnabled(True)
treeview.setAlternatingRowColors(True)
#treeview.itemClicked.connect(self.switch_image2)
#treeview.itemDoubleClicked.connect(self.switch_image2)
treeview.itemSelectionChanged.connect(self.switch_image3)
self.treeview = treeview
# create the column headers
col = 0
l = []
for hdr, kwd in self.columns:
l.append(hdr)
treeview.setHeaderLabels(l)
#self.treeview.connect('cursor-changed', self.switch_image2)
cw = container.get_widget()
cw.addWidget(treeview, stretch=1)
def switch_image(self, chname, imname):
fileDict = self.nameDict[chname]
key = imname.lower()
bnch = fileDict[key]
path = bnch.path
self.logger.debug("chname=%s name=%s path=%s" % (
chname, imname, path))
self.fv.switch_name(chname, bnch.NAME, path=path)
def switch_image2(self, item, column):
imname = str(item.text(0))
parent = item.parent()
if parent:
chname = str(parent.text(0))
#print "parent is %s" % chname
self.switch_image(chname, imname)
def switch_image3(self):
items = list(self.treeview.selectedItems())
self.switch_image2(items[0], 0)
def get_info(self, chname, name, image):
path = image.get('path', None)
bnch = Bunch.Bunch(NAME=name, CHNAME=chname, path=path)
# Get header keywords of interest
header = image.get_header()
for x, key in self.columns[1:]:
bnch[key] = header.get(key, 'N/A')
return bnch
def recreate_toc(self):
self.logger.debug("Recreating table of contents...")
toclist = list(self.nameDict.keys())
toclist.sort()
self.treeview.clear()
for key in toclist:
chitem = QtGui.QTreeWidgetItem(self.treeview, [chname])
chitem.setFirstColumnSpanned(True)
self.treeview.addTopLevelItem(chitem)
fileDict = self.nameDict[key]
filelist = list(fileDict.keys())
filelist.remove('_chitem')
fileDict['_chitem'] = chitem
filelist.sort(key=str.lower)
for fname in filelist:
bnch = fileDict[fname]
l = []
for hdr, kwd in self.columns:
l.append(bnch[kwd])
item = QtGui.QTreeWidgetItem(chitem, l)
chitem.addChild(item)
def add_image(self, viewer, chname, image):
noname = 'Noname' + str(time.time())
name = image.get('name', noname)
path = image.get('path', None)
if chname not in self.nameDict:
# channel does not exist yet in contents--add it
chitem = QtGui.QTreeWidgetItem(self.treeview, [chname])
chitem.setFirstColumnSpanned(True)
self.treeview.addTopLevelItem(chitem)
fileDict = { '_chitem': chitem }
self.nameDict[chname] = fileDict
else:
fileDict = self.nameDict[chname]
chitem = fileDict['_chitem']
key = name.lower()
if key in fileDict:
return
bnch = self.get_info(chname, name, image)
fileDict[key] = bnch
l = []
for hdr, kwd in self.columns:
l.append(bnch[kwd])
item = QtGui.QTreeWidgetItem(chitem, l)
chitem.addChild(item)
self.treeview.scrollToItem(item)
self.logger.debug("%s added to Contents" % (name))
def clear(self):
self.nameDict = {}
self.recreate_toc()
def delete_channel(self, viewer, chinfo):
"""Called when a channel is deleted from the main interface.
Parameter is chinfo (a bunch)."""
chname = chinfo.name
del self.nameDict[chname]
self.recreate_toc()
def __str__(self):
return 'contents'
#END
|
INCLUDES DAISY CHAIN WIRE & SENSOR WIRE. THE SENSOR WIRE HAS BEEN CUT SO WOULD NEED TO BE SOLDERED TOGETHER.
No returns or refunds are offered on used items. |
"""
Author: [email protected]
Rath Twitter Bot
This is a simple bot for twitter that is being used for several functions.
The main function of the bot is to provide an easy learning experience with
the Twitter API and pulling information from other sources to post on twitter.
"""
# Imports
import check_weather
import logging
import sys
from make_dir import makeDir
from twython import Twython
from auth import (consumer_key,
consumer_secret,
access_token,
access_token_secret,
)
twitter = Twython(consumer_key,
consumer_secret,
access_token,
access_token_secret
)
# Checking if there's an argument for where to log.
if len(sys.argv) == 1:
path = 'logs/'
else:
path = str(sys.argv[1])
makeDir(path)
logging.basicConfig(filename=(path+'record.log'), level=logging.INFO,
format='%(asctime)s %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p')
message = check_weather.grabTemp()
try:
twitter.update_status(status=message)
logging.info('I Tweeted!\n')
except Exception as err:
# This is mostly to just catch 405 forbidden's on duplicates.
logging.info(str(err)+'\n')
exit()
|
Both of your posts on Boston have captured the range of emotions I’ve felt this past week so eloquently. While my ties to the city used to be based on a Celtics rivalry and my love of Spenser novels, I now feel a connection to the city and the people at the marathon that will last forever. Thank you for sharing your writing.
Thanks for reading Kristina, awfully nice of you to say. Never forget I once let you cut up my Celtics credit card (though not before receiving my new one, of course). If I was able to create an emotional bond between you and the city that Lakers fans love to hate, well that’s the highest compliment you could pay me.
I’ll be honest, I saw that YouTube link and thought “I’m sure this’ll be nice, but tears, doubtful.” And then I watched it and yeah, goose flesh was inevitable. You rarely hear so many people belting that song at the top of their lungs. Most of them are hoping it’ll be a quick rendition so we can get this show on the road. But you can hear the passion in their voices — truly united, as you mentioned earlier.
Great reflection again, boss. Definitely a week that won’t be soon forgotten. But now it’s time to read about that crazy race you did. When’s that coming? |
# Copyright (c) 2014 the Sanzang Lib authors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Module for table-based translation from the CJK languages.
This module implements core functionality of the Sanzang system, including
the most common functions from Sanzang Utils. Using this module, you can
reformat CJK text, load translation tables, perform table-based string
substitution, and make table-based string translations.
"""
import re
def reflow(text):
"""
Reformat CJK text according to its punctuation.
Given a string, this function will reformat ("reflow") the text so that
words and terms are not broken apart between lines. The function first
strips out any leading margins as used by CBETA texts. It then
collapses all line breaks, and reformats the text according to
horizontal spacing and punctuation.
The string to be formatted should not include any incomplete CBETA
margins, as this formatting function can only reliably remove whole
margins that follow the standard format.
Margin format: X01n0020_p0404a01(00)║
"""
# Remove CBETA margins.
text = re.sub(r'^[T|X].*?║', '', text, flags=re.M)
# Separate poetry from prose. If the line is short and starts with a space,
# then add another space at the end to separate it from the following text.
#
text = re.sub(r'^ (.{1,15})$', ' \\1 ', text, flags=re.M)
# Collapse newlines.
text = text.replace('\n', '')
# Ender followed by non-ender: newline in between.
text = re.sub(
r'([:,;。?!」』.;:\?])([^:,;。?!」』.;:\?])',
'\\1\n\\2', text, flags=re.M)
# Non-starter, non-ender, followed by a starter: newline in between.
text = re.sub(
r'([^「『 \t:,;。?!」』.;:\?\n])([「『 \t])',
'\\1\n\\2', text, flags=re.M)
# Adjust newlines and return.
if len(text) > 0 and text[-1] != '\n':
text += '\n'
return text
def reflow_file(fd_in, fd_out):
"""
Reformat CJK text from one file object to another (buffered).
Given input and output file objects, reformat CJK text from one to the
other according to the punctuation and horizontal spacing. I/O is
buffered for higher performance.
"""
enders = ':,;。?!」』.;:?'
buffer_size = 1000
str_buf = ''
line_n = 0
for line in fd_in:
line_n += 1
str_buf = str_buf + line
if line_n % buffer_size == 0:
i = len(str_buf) - 1
while i > 0:
if str_buf[i-1] in enders and str_buf[i] not in enders:
fd_out.write(reflow(str_buf[:i]))
str_buf = str_buf[i:]
i = -1
else:
i = i - 1
if len(str_buf) > 0:
fd_out.write(reflow(str_buf))
def read_table(table_fd):
"""
Read a translation table from a file.
Given an open file object, read a well-formatted translation table and
return its contents to the caller. The translation table is assumed to
be internally consistent and sorted properly according to standards for
translation tables. Blank lines are ignored. If an individual record is
formatted incorrectly, then a RuntimeError exception will be raised.
"""
tab = []
width = -1
for line in table_fd.read().split('\n'):
rec = [f.strip() for f in line.split('|')]
if width != -1 and width == len(rec):
tab.append(rec)
elif width == -1 and len(rec) > 1:
width = len(rec)
tab.append(rec)
elif line.strip() != '':
raise RuntimeError('Table error: ' + line.strip())
return tab
def subst(table, text):
"""
Make 1-to-1 string substitutions using a two-column table.
Given a translation table and a text, perform 1-to-1 string
substitution on the text, replacing terms in the first column of the
table with the corresponding terms in the second column. Substitution
is performed on three forms for each term: the original term, all
lowercase, and all uppercase.
"""
for term1, term2 in table:
text = text.replace(term1, term2)
text = text.replace(term1.lower(), term2.lower())
text = text.replace(term1.upper(), term2.upper())
return text
def subst_file(table, fd_in, fd_out):
"""
Make string substitutions from file to file (buffered).
Given the contents of a two-column translation table, along with input
and output file objects, make one-to-one string substitutions using
buffered I/O.
"""
buffer_size = 1000
str_buf = ''
line_no = 1
for line in fd_in:
str_buf += line
if line_no % buffer_size == 0:
fd_out.write(subst(table, str_buf))
str_buf = ''
line_no += 1
fd_out.write(subst(table, str_buf))
def vocab(table, text):
"""
Return a new table containing only the vocabulary in the source text.
Create a new translation table containing only the rules that are
relevant for the given text. This is created by checking all source
terms against a copy of the text.
"""
text_rules = []
text_copy = str(text)
for rec in table:
if rec[0] in text_copy:
text_copy = text_copy.replace(rec[0], '\x1f')
text_rules.append(rec)
return text_rules
def tr_raw(table, text):
"""
Translate text using a table, return raw texts in a list.
Perform translation of a text by applying the rules in a translation
table. The result is a list of strings representing each column in the
translation table. For example, the first element in the list will be
the original source text, the second element will be the first target
language, etc.
"""
rules = vocab(table, text)
text = text.replace('\x1f', '')
collection = [text]
for col_no in range(1, len(table[0])):
trans = text
for rec in rules:
trans = trans.replace(rec[0], '\x1f' + rec[col_no] + '\x1f')
trans = trans.replace('\x1f\n', '\n')
trans = trans.replace('\x1f\x1f', ' ')
trans = trans.replace('\x1f', ' ')
collection.append(trans)
return collection
def tr_fmt(table, text, start=1):
"""
Translate text using a table, return a formatted listing string.
Perform translation of a text by applying rules in a translation table,
and return a formatted string. The formatted string represents the
source text and its translations collated together and organized by
line number and by translation table column number.
"""
collection = tr_raw(table, text)
for i in range(0, len(collection)):
collection[i] = collection[i].rstrip().split('\n')
listing = ''
for line_no in range(0, len(collection[0])):
for col_idx in range(0, len(table[0])):
listing += '%d.%d|%s\n' % (
start + line_no,
col_idx + 1,
collection[col_idx][line_no])
listing += '\n'
return listing
def tr_file(table, fd_in, fd_out):
"""
Translate from one file to another (buffered).
Given a table, an input file object, and an output file object, apply
the translation table rules to the input text and write the translation
as a formatted string to the output. This function uses buffered
translation for higher performance.
"""
buffer_size = 100
str_buf = ''
line_no = 1
for line in fd_in:
str_buf += line
if line_no % buffer_size == 0:
fd_out.write(tr_fmt(table, str_buf, line_no - buffer_size + 1))
str_buf = ''
line_no += 1
position = line_no - str_buf.count('\n')
fd_out.write(tr_fmt(table, str_buf, position))
|
The Muhle Closed Comb Double Edge Safety Razor, R89 is a product of Muhle Company, a respected company that has been making shaving products since 1945. Muhle has done a lot to make the Closed Comb DE Safety Razor R89 one of the most exceptional razors on the market. Its handle is chrome plated to not only give it an elegant look but ensure it lasts a long time, due to a rust resistant surface.
The time of single edge razors are finally over. Most of the razors that were used in the past were unsafe and harsh on the skin.
It would only take a few days before one starts noticing rashes all over the face and the blade would last for only one shave.
With the double edge blades, you can shave many times with one blade and your skin will remain smooth and nick free.
If you are a seasoned shaver or an enthusiastic beginner ready to enjoy a clean shave and are looking for the best razor to add to your collection, try the Muhle Closed Comb Double Edge Safety Razor.
You will find that the Muhle DE Razor will be one of the best investments you will make and your face will be proof of its quality.
Replacing the blades is an effortless and safe process with this razor. You just unscrew the top, remove the old blade and put a new one in the razor slot. Most conventional blades are supported and they don’t move about during the shaving process.
The quality of this razor’s handle deserves to be mentioned.
It is stylish with a reflective surface to make it appealing to the eyes. The engraving on its surface ensures the user acquires a firmer grip even though the razor is wet and soapy.
A good razor is supposed to have a good weight to offer proper balance. The weight of this razor’s head and handle are ideal for you experienced shavers.
It is very light but heavy enough to provide you with sufficient pressure that helps you to get a close, smooth shave. After you have used this razor a few times, you will get the “right feel” and wonder how you ever lived without it.
Muhle Razor R89 is inexpensively priced despite being a high-quality product. Don’t let the price fool you. Nevertheless, we always get what we pay for. It’s just that some products like this razor always go a notch higher to give us something extra.
The Razor Comes with a Single Blade.
In the shaving world, sometimes what turns out to be a great product for one person can be totally different to another. In other words, it all comes down to a matter of personal preference. Be assured, this razor is TOP quality and will last for years.
Muhle R89 Closed Comb Double Edge Safety Razor is without doubt would be a satisfactory purchase for many men. If we go by the review statistics on Amazon. There are over 100 reviews for this razor and out of that figure, 82% left a five stars rating, 8% a four-stars and 5% a 1-star review. This means 90% of the clients on Amazon were very satisfied with purchasing this razor.
If you are looking for the best double edge razor, I’d advise you not to look any further than, Muhle R89 Closed Comb Double Edge Safety Razor. Judging by the reviews on Amazon, the odds are that you will be pleased with this razor.
It guarantees a professional close shave that will leave your face looking smooth and feeling soft. The razor changing process is also a simple one. If this isn’t what you are looking for, check out our other DE Safety Razor Reviews. |
# -*- coding: utf-8 -*-
"""
Project dictionaries
======================
The dictionaries defining the keywords for the individual projects are defined here.
They get accessible to the FormFabric code by adding them to the PROJECT_DICT dictionary.
Thus e.g. PROJECT_DICT['CMIP6'] defines to the overall keyword dictionary used for CMIP6 data
e.g. PROJECT_DICT['CMIP6_FORM'] defines the keyword (sub-)dictionary with the information from the data providers
(by filling the jupyter notebook based FORMs)
To define the data management steps used in the individual projects, the 'workflow' keyword is used.
Thus e.g. PROJECT_DICT['CMIP6']['workflow'] provides the list (of lists) defining the workflow steps.
The workflow steps are defined in .ref workflow_steps.py
@author: stephan
.. automodule:: dkrz_forms.config.settings
.. automodule:: dkrz_forms.config.workflow_steps
"""
#================================================================================================
# This first section should stay as it is .. make project specific extensions in the second part
#
# name spaces for w3c prov transformation of submission provenance information
import base64
from string import Template
rt_pwd = base64.b64decode("Y2Y3RHI2dlM=")
# End of first part
#================================================================================================
#================================================================================================
# Second section: definition of project dictionaries
#
# Generic selection strings:
SUBMISSION_TYPE = "initial_submission, update_submission, submission_retraction, other"
STORAGE_TIMEFRAME = "6_months,12_months, 2_years, 3_years"
LTA = "long_term_archival, long_term_archival_and_and_data citation, no_long_term_archival"
YES_OR_NO = "yes,no"
PROJECT_DICT = {}
PROJECTS = ['CORDEX','CMIP6','test','ESGF_replication','DKRZ_CDP']
generic_wflow_description = Template("""
Form object for project $project
Workflow step related sub-forms (filled by data managers):
- sub: data submission form
- rev: data review_form
- ing: data ingest form
- qua: data quality assurance form
- pub: data publication form
Each workfow step form is structured according to
- entity_in : input information for this step
- entity_out: output information for this step
- agent: information related to responsible party for this step
- activity: information related the workflow step execution
End user provided form information is stored in
_this_form_object.sub.entity_out.form
The following generic attributes are defined:
- project: project this form is related to
- workflow: the workflow steps which are defined for this project
- status: overall workflow status
(keyword-structure = "workflow_step"_start, "workflow_step"_end
e.g. sub_start, sub_end
""")
for project in PROJECTS:
# submitted information
PROJECT_DICT[project] = {
'__doc__': generic_wflow_description.substitute(project=project),
"project":project,
"workflow": [("sub","data_submission"),
("rev","data_submission_review"),
("ing","data_ingest"),
("qua","data_quality_assurance"),
("pub","data_publication"),
# ("da", "data_archival")
],
"status": "0:open,1:data_submission,2:data_submission_review,3:data_ingest,4:data_quality_assurance,5:data_publication,6:data_archival"
}
PROJECT_DICT['CORDEX_FORM'] = {
"__doc__":"""
CORDEX information collected as part of form completion process
see CORDEX template
.. details on entries .. to be completed
""",
"project":"CORDEX",
"submission_type" : SUBMISSION_TYPE,
"institution" : "CV_CORDEX,institution",
"institute_id" : "CV_CORDEX,institute_id",
"model_id" : "CV_CORDEX,model_id",
"experiment_id" : "CV_CORDEX, experiment_id",
"time_period" : "",
"example_file_name" : "",
"grid_mapping_name" : "",
"grid_as_specified_if_rotated_pole" : "",
"data_qc_status" : "",
"data_qc_comment" : "",
"terms_of_use" : "",
"directory_structure" : "",
"data_path" : "",
"data_information" : "",
"exclude_variables_list" : "",
"variable_list_day" : "",
"variable_list_mon" : "",
"variable_list_sem" : "",
"variable_list_fx" : "",
"uniqueness_of_tracking_id" : YES_OR_NO}
PROJECT_DICT['DKRZ_CDP_FORM'] = {
"__doc__":"""
DKRZ CMIP Data pool ingest request related informtion .. to be completed
""",
"project":"DKRZ_CDP",
"comment": "",
"submission_type" : SUBMISSION_TYPE,
"storage_timeframe": STORAGE_TIMEFRAME,
"lta": LTA }
PROJECT_DICT['CMIP6_FORM'] = {
"__doc__":"""
DKRZ CMIP6 data ingest and publication request information .. to be completed
""",
"project":"CMIP6",
"comment": "",
"institute_id" : "CV_CMIP6,institute_id",
"model_id" : "CV_CMIP6,model_id",
"experiment_id" : "CV_CMIP6, experiment_id",
"data_qa_status" : "PREPARE_checked, DKRZ_QA_checked,other",
"data_qa_comment" : "",
"terms_of_use" : YES_OR_NO,
}
PROJECT_DICT['test_FORM'] = {
"__doc__":"""
test request related informtion .. to be completed
""",
"project":"test",
"comment": "",
"submission_type" : SUBMISSION_TYPE
}
PROJECT_DICT['ESGF_replication_FORM'] = {
"__doc__":"""
ESGF replication request related informtion .. to be completed
""",
"project":"ESGF_replication",
"comment": "optional",
"submission_type" : SUBMISSION_TYPE,
"scientific_context": "mandatory",
"update_notification":YES_OR_NO,
"collection_pid":YES_OR_NO
}
#
# End of section two
#================================================================================
|
Realtors since 2001 520 Homes Sold!
Lee has a Bachelors Degree in Finance and over 10 years corporate finance experience. He continually seeks to ensure you are making good financial decisions when it comes to buying or selling a home. Lee can work to help you understand what a home is worth and explain your financing alternatives.
Julie has a Masters Degree in Counseling and is very involved in the emotional side of buying and selling a home. Additionally, Julie ensures that whether you are buying or selling, you have all the latest information and that your transaction will be seamless!
Active in the community, Lee is a former Savage City Council Member and currently serves as Executive VP of PLAY. Julie serves on the board of Dan Patch Days. Additionally, Lee and Julie volunteer monthly at People Serving People, a Minneapolis Homeless shelter and are proud contributors to both the Childens Miracle Network and the fight against Breast Cancer. |
import six
import json
from abc import abstractproperty, abstractmethod
from requests.structures import CaseInsensitiveDict
class BaseProcessor(object):
NAME = None
def process_raw(self, raw_doc, **kwargs):
pass # pragma: no cover
def process_normalized(self, raw_doc, normalized, **kwargs):
pass # pragma: no cover
@abstractmethod
def documents(self, *sources):
'''
an iterator that will return documents
'''
raise NotImplementedError
@abstractmethod
def get_versions(self, source, docID):
raise NotImplementedError
def different(self, old, new):
try:
return not all([new[key] == old[key] or (not new[key] and not old[key]) for key in new.keys() if key != 'timestamps'])
except Exception:
return True # If the document fails to load/compare for some reason, accept a new version
class BaseDatabaseManager(object):
'''A base class for database managers in the scrapi processing module
Must handle setup, teardown, and multi-process initialization of database connections
All errors should be logged, but not thrown
'''
@abstractmethod
def setup(self):
'''Sets up the database connection. Returns True if the database connection
is successful, False otherwise
'''
raise NotImplementedError
@abstractmethod
def tear_down(self):
'''Tears down the database connection.
'''
raise NotImplementedError
@abstractmethod
def clear(self, force=False):
'''Deletes everything in a table/keyspace etc
Should fail if called on the production database
for testing purposes only
'''
raise NotImplementedError
@abstractmethod
def celery_setup(self, *args, **kwargs):
'''Performs the necessary operations to allow a new process to connect to the database
'''
raise NotImplementedError
class BaseHarvesterResponse(object):
"""A parody of requests.response but stored in a database for caching
Should reflect all methods of a response object
Contains an additional field time_made, self-explanatory
"""
class DoesNotExist(Exception):
pass
@abstractproperty
def method(self):
raise NotImplementedError
@abstractproperty
def url(self):
raise NotImplementedError
@abstractproperty
def ok(self):
raise NotImplementedError
@abstractproperty
def content(self):
raise NotImplementedError
@abstractproperty
def encoding(self):
raise NotImplementedError
@abstractproperty
def headers_str(self):
raise NotImplementedError
@abstractproperty
def status_code(self):
raise NotImplementedError
@abstractproperty
def time_made(self):
raise NotImplementedError
@classmethod
@abstractmethod
def get(self, url=None, method=None):
raise NotImplementedError
@abstractmethod
def save(self):
raise NotImplementedError
@abstractmethod
def update(self, **kwargs):
raise NotImplementedError
def json(self):
try:
content = self.content.decode('utf-8')
except AttributeError: # python 3eeeee!
content = self.content
return json.loads(content)
@property
def headers(self):
return CaseInsensitiveDict(json.loads(self.headers_str))
@property
def text(self):
return six.u(self.content)
|
Don’t reinvent the steering wheel each time you begin something new. Instead, use templates–standardized data files with word and formatting as starting place for new work. Once you save another version of the design template, just add, remove, or change any info to the unique document, and you will possess the new work completed in a small fraction of that time period.
Templates work all over the place: in term processors, spreadsheets, job management apps, study systems, and email. Here’s how to use themes in your chosen apps–and how to automatically create documents from a template–so you can get the common duties done faster.
Templates remember to build, and it’s really easy to ask yourself if they’re worthwhile the investment. The brief answer: absolutely. Enhancing a template requires much less time than formatting something from scuff. It is the difference between copying and pasting some word, or retyping it. |
# -*- coding: utf-8 -*-
"""
Root module for the FDF package.
**Classes**
* Machine - root class for the FDF package
* Shot - shot container class
* Logbook - logbook connection class
* Container - diagnostic container class
* Node - mdsplus signal node class
"""
"""
Created on Thu Jun 18 10:38:40 2015
@author: ktritz
"""
import xml.etree.ElementTree as ET
import sys, os, importlib
import fdf_globals
from fdf_signal import Signal
import numpy as np
import datetime as dt
#import modules # I think this import is unused - DRS 10/17/15
from collections import MutableMapping, Mapping
import MDSplus as mds
import types
import inspect
import pymssql
import matplotlib.pyplot as plt
FDF_DIR = fdf_globals.FDF_DIR
MDS_SERVERS = fdf_globals.MDS_SERVERS
EVENT_SERVERS = fdf_globals.EVENT_SERVERS
LOGBOOK_CREDENTIALS = fdf_globals.LOGBOOK_CREDENTIALS
FdfError = fdf_globals.FdfError
machineAlias = fdf_globals.machineAlias
class Machine(MutableMapping):
"""
Factory root class that contains shot objects and MDS access methods.
Note that fdf.factory.Machine is exposed in fdf.__init__, so fdf.Machine
is valid.
**Usage**::
>>> import fdf
>>> nstx = fdf.Machine('nstx')
>>> nstx.s140000.logbook()
>>> nstx.addshots(xp=1048)
>>> nstx.s140000.mpts.plot()
>>> nstx.listshot()
Machine class contains a model shot object: nstx.s0
Shot data can be accessed directly through the Machine class::
>>> nstx.s141398
>>> nstx.s141399
Alternatively, a list of shot #'s may be provided during initialization::
>>> nstx = Machine(name='nstx', shotlist=[141398, 141399])
Or added later using the method addshot()::
>>> nstx.addshot([141398, 141399])
"""
# Maintain a dictionary of cached MDS server connections to speed up
# access for multiple shots and trees. This is a static class variable
# to avoid proliferation of MDS server connections
_connections = []
_parent = None
_modules = None
def __init__(self, name='nstx', shotlist=[], xp=[], date=[]):
self._shots = {} # shot dictionary with shot number (int) keys
self._classlist = {} # unused as of 10/14/2015, DRS
self._name = machineAlias(name)
self._logbook = Logbook(name=self._name, root=self)
self.s0 = Shot(0, root=self, parent=self)
self._eventConnection = mds.Connection(EVENT_SERVERS[self._name])
if len(self._connections) is 0:
print('Precaching MDS server connections...')
for _ in range(2):
try:
connection = mds.Connection(MDS_SERVERS[self._name])
connection.tree = None
self._connections.append(connection)
except:
msg = 'MDSplus connection to {} failed'.format(
MDS_SERVERS[self._name])
raise FdfError(msg)
print('Finished.')
if shotlist or xp or date:
self.addshot(shotlist=shotlist, xp=xp, date=date)
def __getattr__(self, name):
# used for attribute referencing: s = nstx.s140000
try:
shot = int(name.split('s')[1])
if (shot not in self._shots):
self._shots[shot] = Shot(shot, root=self, parent=self)
return self._shots[shot]
except:
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self), name))
def __repr__(self):
return '<machine {}>'.format(self._name.upper())
def __iter__(self):
return iter(self._shots.values())
def __contains__(self, value):
return value in self._shots
def __len__(self):
return len(self._shots.keys())
def __delitem__(self, item):
self._shots.__delitem__(item)
def __getitem__(self, item):
# used for dictionary referencing: s = nstx[140000]
# note that getitem fails to catch missing key,
# but getattr does catch missing key
if item == 0:
return self.s0
return self._shots[item]
def __setitem__(self, item, value):
pass
def __dir__(self):
d = ['s0']
d.extend(['s{}'.format(shot) for shot in self._shots])
return d
def _get_connection(self, shot, tree):
for connection in self._connections:
if connection.tree == (tree, shot):
self._connections.remove(connection)
self._connections.insert(0, connection)
return connection
connection = self._connections.pop()
try:
connection.closeAllTrees()
except:
pass
try:
connection.openTree(tree, shot)
connection.tree = (tree, shot)
except:
connection.tree = (None, None)
finally:
self._connections.insert(0, connection)
return connection
def _get_mdsdata(self, signal):
# shot = base_container(signal)._parent.shot
shot = signal.shot
if shot is 0:
print('No MDS data exists for model tree')
return None
connection = self._get_connection(shot, signal._mdstree)
try:
data = connection.get(signal._mdsnode)
except:
msg = "MDSplus connection error for tree '{}' and node '{}'".format(
signal._mdstree, signal._mdsnode)
raise FdfError(msg)
try:
if signal._raw_of is not None:
data = data.raw_of()
except:
pass
try:
if signal._dim_of is not None:
data = data.dim_of()
except:
pass
data = data.value_of().value
try:
if signal._transpose is not None:
data = data.transpose(signal._transpose)
except:
pass
try:
data = signal._postprocess(data)
except:
pass
return data
def _get_modules(self):
if self._modules is None:
module_dir = os.path.join(FDF_DIR, 'modules')
self._modules = [module for module in os.listdir(module_dir)
if os.path.isdir(os.path.join(module_dir, module)) and
module[0] is not '_']
return self._modules
def addshot(self, shotlist=[], date=[], xp=[], verbose=False):
"""
Load shots into the Machine class
**Usage**
>>> nstx.addshot([140000 140001])
>>> nstx.addshot(xp=1032)
>>> nstx.addshot(date=20100817, verbose=True)
Note: You can reference shots even if the shots have not been loaded.
"""
if not iterable(shotlist):
shotlist = [shotlist]
if not iterable(xp):
xp = [xp]
if not iterable(date):
date = [date]
shots = []
if shotlist:
shots.extend([shotlist])
if xp:
shots.extend(self._logbook.get_shotlist(xp=xp,
verbose=verbose))
if date:
shots.extend(self._logbook.get_shotlist(date=date,
verbose=verbose))
for shot in np.unique(shots):
if shot not in self._shots:
self._shots[shot] = Shot(shot, root=self, parent=self)
def addxp(self, xp=[], verbose=False):
"""
Add all shots for one or more XPx
**Usage**
>>> nstx.addxp(1032)
>>> nstx.addxp(xp=1013)
>>> nstx.addxp([1042, 1016])
"""
self.addshot(xp=xp, verbose=verbose)
def adddate(self, date=[], verbose=False):
"""
Add all shots for one or more dates (format YYYYMMDD)
**Usage**
>>> nstx.adddate(date=20100817)
"""
self.addshot(date=date, verbose=verbose)
def list_shots(self):
for shotnum in self._shots:
shotObj = self._shots[shotnum]
print('{} in XP {} on {}'.format(
shotObj.shot, shotObj.xp, shotObj.date))
def get_shotlist(self, date=[], xp=[], verbose=False):
"""
Get a list of shots
**Usage**
>>> shots = nstx.get_shotlist(xp=1013)
"""
return self._logbook.get_shotlist(date=date, xp=xp, verbose=verbose)
def filter_shots(self, date=[], xp=[]):
"""
Get a Machine-like object with an immutable shotlist for XP(s)
or date(s)
"""
self.addshot(xp=xp, date=date)
return ImmutableMachine(xp=xp, date=date, parent=self)
def setevent(self, event, shot_number=None, data=None):
event_data = bytearray()
if shot_number is not None:
shot_data = shot_number // 256**np.arange(4) % 256
event_data.extend(shot_data.astype(np.ubyte))
if data is not None:
event_data.extend(str(data))
mdsdata = mds.mdsdata.makeData(np.array(event_data))
event_string = 'setevent("{}", {})'.format(event, mdsdata)
status = self._eventConnection.get(event_string)
return status
def wfevent(self, event, timeout=0):
event_string = 'kind(_data=wfevent("{}",*,{})) == 0BU ? "timeout"' \
': _data'.format(event, timeout)
data = self._eventConnection.get(event_string).value
if type(data) is str:
raise FdfError('Timeout after {}s in wfevent'.format(timeout))
if not data.size:
return None
if data.size > 3:
shot_data = data[0:4]
shot_number = np.sum(shot_data * 256**np.arange(4))
data = data[4:]
return shot_number, ''.join(map(chr, data))
return data
def logbook(self):
"""
Print logbook entries for all shots
"""
for shotnum in self._shots:
shotObj = self._shots[shotnum]
shotObj.logbook()
class ImmutableMachine(Mapping):
def __init__(self, xp=[], date=[], parent=None):
self._shots = {}
self._parent = parent
shotlist = self._parent.get_shotlist(xp=xp, date=date)
for shot in shotlist:
self._shots[shot] = getattr(self._parent, 's{}'.format(shot))
def __getattr__(self, name):
try:
shot = int(name.split('s')[1])
return self._shots[shot]
except:
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self), name))
def __repr__(self):
return '<immutable machine {}>'.format(self._name.upper())
def __iter__(self):
return iter(self._shots.values())
def __contains__(self, value):
return value in self._shots
def __len__(self):
return len(self._shots.keys())
def __getitem__(self, item):
pass
def __dir__(self):
return ['s{}'.format(shot) for shot in self._shots]
def logbook(self):
for shotnum in self._shots:
shotObj = self._shots[shotnum]
shotObj.logbook()
def list_shots(self):
for shotnum in self._shots:
shotObj = self._shots[shotnum]
print('{} in XP {} on {}'.format(
shotObj.shot, shotObj.xp, shotObj.date))
class Shot(MutableMapping):
def __init__(self, shot, root=None, parent=None):
self.shot = shot
self._shotobj = self
self._root = root
self._parent = parent
try:
self._logbook = self._root._logbook
except:
txt = 'No logbook connection for shot {}'.format(self.shot)
raise FdfError(txt)
self._logbook_entries = []
self._modules = {module: None for module in root._get_modules()}
self.xp = self._get_xp()
self.date = self._get_date()
self._efits = []
def __getattr__(self, attribute):
# first see if the attribute is in the Machine object
try:
attr = getattr(self._parent, attribute)
if inspect.ismethod(attr):
return types.MethodType(attr.im_func, self)
else:
return attr
except:
pass # failed, so check other locations
if attribute in self._modules:
if self._modules[attribute] is None:
self._modules[attribute] = Factory(attribute, root=self._root,
shot=self.shot, parent=self)
return self._modules[attribute]
raise AttributeError("Shot object has no attribute '{}'".format(attribute))
def __repr__(self):
return '<Shot {}>'.format(self.shot)
def __iter__(self):
# return iter(self._modules.values())
return iter(self._modules)
def __contains__(self, value):
return value in self._modules
def __len__(self):
return len(self._modules.keys())
def __delitem__(self, item):
pass
def __getitem__(self, item):
return self._modules[item]
def __setitem__(self, item, value):
pass
def __dir__(self):
return self._modules.keys()
def _get_xp(self):
# query logbook for XP, return XP (list if needed)
if self._logbook and not self._logbook_entries:
self._logbook_entries = self._logbook.get_entries(shot=self.shot)
xplist = []
for entry in self._logbook_entries:
xplist.append(entry['xp'])
return np.unique(xplist)
def _get_date(self):
# query logbook for rundate, return rundate
if self._logbook and not self._logbook_entries:
self._logbook_entries = self._logbook.get_entries(shot=self.shot)
date = 0
if self._logbook_entries:
date = self._logbook_entries[0]['rundate']
return date
def logbook(self):
# print a list of logbook entries
print('Logbook entries for {}'.format(self.shot))
if not self._logbook_entries:
self._logbook_entries = self._logbook.get_entries(shot=self.shot)
for entry in self._logbook_entries:
print('************************************')
print(('{shot} on {rundate} in XP {xp}\n'
'{username} in topic {topic}\n\n'
'{text}').format(**entry))
print('************************************')
def plot(self, overwrite=False, label=None, multi=False):
if not overwrite and not multi:
plt.figure()
plt.subplot(1, 1, 1)
if self.shape != self.time.shape:
msg = 'Dimension mismatch: {}\n shape data {} shape time ()'.format(
self._name, self.shape, self.time.shape)
raise FdfError(msg)
if self.size==0 or self.time.size==0:
msg = 'Empty data and/or time axis: {}\n shape data {} shape time {}'.format(
self._name, self.shape, self.time.shape)
raise FdfError(msg)
plt.plot(self.time[:], self[:], label=label)
title = self._title if self._title else self._name
if not overwrite or multi:
plt.suptitle('Shot #{}'.format(self.shot), x=0.5, y=1.00,
fontsize=12, horizontalalignment='center')
plt.ylabel('{} ({})'.format(self._name.upper(), self.units))
plt.title('{} {}'.format(self._container.upper(), title),
fontsize=12)
plt.xlabel('{} ({})'.format(self.time._name.capitalize(),
self.time.units))
plt.legend()
plt.show()
def check_efit(self):
if len(self._efits):
return self._efits
trees = ['efit{}'.format(str(index).zfill(2)) for index in range(1, 7)]
trees.extend(['lrdfit{}'.format(str(index).zfill(2))
for index in range(1, 13)])
tree_exists = []
for tree in trees:
data = None
connection = self._get_connection(self.shot, tree)
try:
data = connection.get('\{}::userid'.format(tree)).value
except:
pass
if data and data is not '*':
tree_exists.append(tree)
self._efits = tree_exists
return self._efits
class Logbook(object):
def __init__(self, name='nstx', root=None):
self._name = name.lower()
self._root = root
self._credentials = {}
self._table = ''
self._shotlist_query_prefix = ''
self._shot_query_prefix = ''
self._logbook_connection = None
self._make_logbook_connection()
# dict of cached logbook entries
# kw is shot, value is list of logbook entries
self.logbook = {}
def _make_logbook_connection(self):
self._credentials = LOGBOOK_CREDENTIALS[self._name]
self._table = self._credentials['table']
self._shotlist_query_prefix = (
'SELECT DISTINCT rundate, shot, xp, voided '
'FROM {} WHERE voided IS null').format(self._table)
self._shot_query_prefix = (
'SELECT dbkey, username, rundate, shot, xp, topic, text, entered, voided '
'FROM {} WHERE voided IS null').format(self._table)
try:
self._logbook_connection = pymssql.connect(
server=self._credentials['server'],
user=self._credentials['username'],
password=self._credentials['password'],
database=self._credentials['database'],
port=self._credentials['port'],
as_dict=True)
except:
print('Attempting logbook server connection as drsmith')
try:
self._logbook_connection = pymssql.connect(
server=self._credentials['server'],
user='drsmith',
password=self._credentials['password'],
database=self._credentials['database'],
port=self._credentials['port'],
as_dict=True)
except:
txt = '{} logbook connection failed. '.format(self._name.upper())
txt = txt + 'Server credentials:'
for key in self._credentials:
txt = txt + ' {0}:{1}'.format(key, self._credentials[key])
raise FdfError(txt)
def _get_cursor(self):
try:
cursor = self._logbook_connection.cursor()
cursor.execute('SET ROWCOUNT 500')
except:
raise FdfError('Cursor error')
return cursor
def _shot_query(self, shot=[]):
cursor = self._get_cursor()
if shot and not iterable(shot):
shot = [shot]
for sh in shot:
if sh not in self.logbook:
query = ('{0} and shot={1} '
'ORDER BY shot ASC, entered ASC'
).format(self._shot_query_prefix, sh)
cursor.execute(query)
rows = cursor.fetchall() # list of logbook entries
for row in rows:
rundate = repr(row['rundate'])
year = rundate[0:4]
month = rundate[4:6]
day = rundate[6:8]
row['rundate'] = dt.date(int(year), int(month), int(day))
self.logbook[sh] = rows
def get_shotlist(self, date=[], xp=[], verbose=False):
# return list of shots for date and/or XP
cursor = self._get_cursor()
rows = []
shotlist = [] # start with empty shotlist
date_list = date
if not iterable(date_list): # if it's just a single date
date_list = [date_list] # put it into a list
for date in date_list:
query = ('{0} and rundate={1} ORDER BY shot ASC'.
format(self._shotlist_query_prefix, date))
cursor.execute(query)
rows.extend(cursor.fetchall())
xp_list = xp
if not iterable(xp_list): # if it's just a single xp
xp_list = [xp_list] # put it into a list
for xp in xp_list:
query = ('{0} and xp={1} ORDER BY shot ASC'.
format(self._shotlist_query_prefix, xp))
cursor.execute(query)
rows.extend(cursor.fetchall())
for row in rows:
rundate = repr(row['rundate'])
year = rundate[0:4]
month = rundate[4:6]
day = rundate[6:8]
row['rundate'] = dt.date(int(year), int(month), int(day))
if verbose:
print('date {}'.format(rows[0]['rundate']))
for row in rows:
print(' {shot} in XP {xp}'.format(**row))
# add shots to shotlist
shotlist.extend([row['shot'] for row in rows
if row['shot'] is not None])
cursor.close()
return np.unique(shotlist)
def get_entries(self, shot=[], date=[], xp=[]):
# return list of lobgook entries (dictionaries) for shot(s)
if shot and not iterable(shot):
shot = [shot]
if xp or date:
shot.extend(self.get_shotlist(date=date, xp=xp))
if shot:
self._shot_query(shot=shot)
entries = []
for sh in np.unique(shot):
if sh in self.logbook:
entries.extend(self.logbook[sh])
return entries
_tree_dict = {}
def Factory(module_branch, root=None, shot=None, parent=None):
global _tree_dict
"""
Factory method
"""
try:
module_branch = module_branch.lower()
module_list = module_branch.split('.')
module = module_list[-1]
branch_str = ''.join([word.capitalize() for word in module_list])
if module_branch not in _tree_dict:
module_path = os.path.join(FDF_DIR, 'modules', *module_list)
parse_tree = ET.parse(os.path.join(module_path,
''.join([module, '.xml'])))
module_tree = parse_tree.getroot()
_tree_dict[module_branch] = module_tree
ContainerClassName = ''.join(['Container', branch_str])
if ContainerClassName not in Container._classes:
ContainerClass = type(ContainerClassName, (Container,), {})
init_class(ContainerClass, _tree_dict[module_branch], root=root,
container=module, classparent=parent.__class__)
Container._classes[ContainerClassName] = ContainerClass
else:
ContainerClass = Container._classes[ContainerClassName]
return ContainerClass(_tree_dict[module_branch], shot=shot,
parent=parent, top=True)
except None:
print("{} not found in modules directory".format(module))
raise
class Container(object):
"""
Container class
"""
_instances = {}
_classes = {}
def __init__(self, module_tree, top=False, **kwargs):
cls = self.__class__
self._signals = {}
self._containers = {}
self._subcontainers = {}
self._title = module_tree.get('title')
self._desc = module_tree.get('desc')
for read_only in ['parent']:
setattr(self, '_'+read_only, kwargs.get(read_only, None))
try:
self.shot = kwargs['shot']
self._mdstree = kwargs['mdstree']
except:
pass
if self.shot is not None:
try:
cls._instances[cls][self.shot].append(self)
except:
cls._instances[cls][self.shot] = [self]
if top:
self._get_subcontainers()
for node in module_tree.findall('node'):
NodeClassName = ''.join(['Node', cls._name.capitalize()])
if NodeClassName not in cls._classes:
NodeClass = type(NodeClassName, (Node, cls), {})
cls._classes[NodeClassName] = NodeClass
else:
NodeClass = cls._classes[NodeClassName]
NodeClass._mdstree = parse_mdstree(self, node)
setattr(self, node.get('name'), NodeClass(node, parent=self))
for element in module_tree.findall('axis'):
signal_list = parse_signal(self, element)
branch_str = self._get_branchstr()
for signal_dict in signal_list:
SignalClassName = ''.join(['Axis', branch_str])
if SignalClassName not in cls._classes:
SignalClass = type(SignalClassName, (Signal, cls), {})
parse_method(SignalClass, element)
cls._classes[SignalClassName] = SignalClass
else:
SignalClass = cls._classes[SignalClassName]
SignalObj = SignalClass(**signal_dict)
refs = parse_refs(self, element, SignalObj._transpose)
if not refs:
refs = SignalObj.axes
for axis, ref in zip(SignalObj.axes, refs):
setattr(SignalObj, axis, getattr(self, '_'+ref))
setattr(self, ''.join(['_', signal_dict['_name']]), SignalObj)
for branch in module_tree.findall('container'):
name = branch.get('name')
branch_str = self._get_branchstr()
ContainerClassName = ''.join(['Container', branch_str,
name.capitalize()])
if ContainerClassName not in cls._classes:
ContainerClass = type(ContainerClassName, (cls, Container), {})
init_class(ContainerClass, branch, classparent=cls)
cls._classes[ContainerClassName] = ContainerClass
else:
ContainerClass = cls._classes[ContainerClassName]
ContainerObj = ContainerClass(branch, parent=self)
setattr(self, name, ContainerObj)
self._containers[name] = ContainerObj
for element in module_tree.findall('signal'):
signal_list = parse_signal(self, element)
branch_str = self._get_branchstr()
for signal_dict in signal_list:
# name = element.get('name').format('').capitalize()
SignalClassName = ''.join(['Signal', branch_str])
if SignalClassName not in cls._classes:
SignalClass = type(SignalClassName, (Signal, cls), {})
parse_method(SignalClass, element)
cls._classes[SignalClassName] = SignalClass
else:
SignalClass = cls._classes[SignalClassName]
SignalObj = SignalClass(**signal_dict)
refs = parse_refs(self, element, SignalObj._transpose)
if not refs:
refs = SignalObj.axes
for axis, ref in zip(SignalObj.axes, refs):
setattr(SignalObj, axis, getattr(self, '_'+ref))
setattr(self, signal_dict['_name'], SignalObj)
self._signals[signal_dict['_name']] = SignalObj
if top and hasattr(self, '_preprocess'):
self._preprocess()
def __getattr__(self, attribute):
try:
if self._subcontainers[attribute] is None:
branch_path = '.'.join([self._get_branch(), attribute])
self._subcontainers[attribute] = \
Factory(branch_path, root=self._root,
shot=self.shot, parent=self)
return self._subcontainers[attribute]
except KeyError:
pass
if not hasattr(self, '_parent') or self._parent is None:
raise AttributeError("Attribute '{}' not found".format(attribute))
if hasattr(self._parent, '_signals') and \
attribute in self._parent._signals:
raise AttributeError("Attribute '{}' not found".format(attribute))
attr = getattr(self._parent, attribute)
if Container in attr.__class__.mro() and attribute[0] is not '_':
raise AttributeError("Attribute '{}' not found".format(attribute))
if inspect.ismethod(attr):
return types.MethodType(attr.im_func, self)
else:
return attr
def _get_subcontainers(self):
if len(self._subcontainers) is 0:
container_dir = self._get_path()
if not os.path.isdir(container_dir):
return
files = os.listdir(container_dir)
self._subcontainers = {container: None for container in
files if os.path.isdir(
os.path.join(container_dir, container)) and
container[0] is not '_'}
@classmethod
def _get_path(cls):
branch = cls._get_branch().split('.')
path = os.path.join(FDF_DIR, 'modules')
for step in branch:
newpath = os.path.join(path, step)
if not os.path.isdir(newpath):
break
path = newpath
return path
def __dir__(self):
# print('in dir')
items = self.__dict__.keys()
items.extend(self.__class__.__dict__.keys())
if Signal not in self.__class__.mro():
items.extend(self._subcontainers.keys())
return [item for item in set(items).difference(self._base_items)
if item[0] is not '_']
def __iter__(self):
if not len(self._signals):
items = self._containers.values()
# items.extend(self._subcontainers.values())
else:
items = self._signals.values()
return iter(items)
@classmethod
def _get_branch(cls):
branch = cls._name
parent = cls._classparent
while parent is not Shot and parent.__class__ is not Shot:
branch = '.'.join([parent._name, branch])
parent = parent._classparent
return branch
@classmethod
def _get_branchstr(cls):
branch = cls._get_branch()
return ''.join([sub.capitalize() for sub in branch.split('.')])
def init_class(cls, module_tree, **kwargs):
cls._name = module_tree.get('name')
if cls not in cls._instances:
cls._instances[cls] = {}
for read_only in ['root', 'container', 'classparent']:
try:
setattr(cls, '_'+read_only, kwargs[read_only])
# print(cls._name, read_only, kwargs.get(read_only, 'Not there'))
except:
pass
for item in ['mdstree', 'mdspath', 'units']:
getitem = module_tree.get(item)
if getitem is not None:
setattr(cls, '_'+item, getitem)
cls._base_items = set(cls.__dict__.keys())
parse_method(cls, module_tree)
def parse_method(obj, module_tree):
objpath = obj._get_path()
sys.path.insert(0, objpath)
for method in module_tree.findall('method'):
method_name = method.text
if method_name is None:
method_name = method.get('name')
module = method.get('module')
if module is None:
module = method_name
method_in_module = method.get('method_in_module')
if method_in_module is None:
method_in_module = method_name
module_object = importlib.import_module(module)
method_from_object = module_object.__getattribute__(method_in_module)
setattr(obj, method_name, method_from_object)
sys.path.pop(0)
def base_container(container):
parent_container = container
while type(parent_container._parent) is not Shot:
parent_container = parent_container._parent
return parent_container
def parse_signal(obj, element):
units = parse_units(obj, element)
axes, transpose = parse_axes(obj, element)
number_range = element.get('range')
if number_range is None:
name = element.get('name')
title = element.get('title')
desc = element.get('desc')
mdspath, dim_of = parse_mdspath(obj, element)
mdstree = parse_mdstree(obj, element)
error = parse_error(obj, element)
signal_dict = [{'_name': name, 'units': units, 'axes': axes,
'_mdsnode': mdspath, '_mdstree': mdstree,
'_dim_of': dim_of, '_error': error, '_parent': obj,
'_transpose': transpose, '_title': title,
'_desc': desc}]
else:
number_list = number_range.split(',')
len_number_list = len(number_list)
if len_number_list == 1:
start = 0
end = int(number_list[0])
else:
start = int(number_list[0])
end = int(number_list[1])+1
signal_dict = []
if len_number_list == 3:
# 3rd item, if present, controls zero padding (cf. BES and magnetics)
digits = int(number_list[2])
else:
digits = int(np.ceil(np.log10(end-1)))
for index in range(start, end):
name = element.get('name').format(str(index).zfill(digits))
title = None
if element.get('title'):
title = element.get('title').format(str(index).zfill(digits))
desc = None
if element.get('desc'):
desc = element.get('desc').format(str(index).zfill(digits))
mdspath, dim_of = parse_mdspath(obj, element)
mdspath = mdspath.format(str(index).zfill(digits))
mdstree = parse_mdstree(obj, element)
error = parse_error(obj, element)
signal_dict.append({'_name': name, 'units': units, 'axes': axes,
'_mdsnode': mdspath, '_mdstree': mdstree,
'_dim_of': dim_of, '_error': error,
'_parent': obj, '_transpose': transpose,
'_title': title, '_desc': desc})
return signal_dict
def parse_axes(obj, element):
axes = []
transpose = None
time_ind = 0
try:
axes = [axis.strip() for axis in element.get('axes').split(',')]
if 'time' in axes:
time_ind = axes.index('time')
if time_ind is not 0:
transpose = list(range(len(axes)))
transpose.pop(time_ind)
transpose.insert(0, time_ind)
axes.pop(time_ind)
axes.insert(0, 'time')
except:
pass
return axes, transpose
def parse_refs(obj, element, transpose=None):
refs = None
try:
refs = [ref.strip() for ref in element.get('axes_refs').split(',')]
if transpose is not None:
refs = [refs[index] for index in transpose]
except:
pass
return refs
def parse_units(obj, element):
units = element.get('units')
if units is None:
try:
units = obj.units
except:
pass
return units
def parse_error(obj, element):
error = element.get('error')
if error is not None:
mdspath = element.get('mdspath')
if mdspath is None:
try:
mdspath = obj._mdspath
error = '.'.join([mdspath, error])
except:
pass
else:
error = '.'.join([mdspath, error])
return error
_path_dict = {}
def parse_mdspath(obj, element):
global _path_dict
key = (type(obj), element)
try:
return _path_dict[key]
except KeyError:
mdspath = element.get('mdspath')
try:
dim_of = int(element.get('dim_of'))
except:
dim_of = None
if mdspath is None:
try:
mdspath = obj._mdspath
except:
pass
if mdspath is not None:
mdspath = '.'.join([mdspath, element.get('mdsnode')])
else:
mdspath = element.get('mdsnode')
_path_dict[key] = (mdspath, dim_of)
return mdspath, dim_of
def parse_mdstree(obj, element):
mdstree = element.get('mdstree')
if mdstree is None and hasattr(obj, '_mdstree'):
mdstree = obj._mdstree
return mdstree
def iterable(obj):
try:
iter(obj)
if type(obj) is str:
return False
return True
except TypeError:
return False
class Node(object):
"""
Node class
"""
def __init__(self, element, parent=None):
self._parent = parent
self._name = element.get('name')
self._mdsnode = parse_mdspath(self, element)
self._data = None
self._title = element.get('title')
self._desc = element.get('desc')
self.units = element.get('units')
def __repr__(self):
if self._data is None:
self._data = self._get_mdsdata()
return str(self._data)
def __getattr__(self, attribute):
if attribute is '_parent':
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self), attribute))
if self._parent is None:
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self), attribute))
attr = getattr(self._parent, attribute)
if inspect.ismethod(attr):
return types.MethodType(attr.im_func, self)
else:
return attr
if __name__ == '__main__':
nstx = Machine()
s = nstx.s141000
s.bes.ch01.myfft()
# s.bes.ch01.fft2()
|
The elephant remembers well the last wedding atop his head. Remembering is what elephants do, after all. The last wedding brought Arctic breezes, recalls the elephant, but he also remembers the warm spirits, the laughs, and the love. This wedding wasn’t bitter cold or exceptionally windy. This time the elephant, however, stood stoically in a cold, penetrating March rain. And like the last, he again remembers warm spirits, laughs and lots of love.
They arrived in a limo, dressed for a night on the town. They got out and assembled just-purchased trekking poles and donned light traction over dress shoes. Dyed in the wool hikers would roll their eyes at the sight, no doubt, but lovers the world over would note how this couple will go to great lengths, albeit in short distances, to tie the knot. In the rain. Undeterred. Not suffering. They were completely happy and at peace with the weather. Wet knots are impossible to untie. They were reminded of this fun little fact. Maybe it helped. Aside from squinting in the rain, they were all smiles the whole time.
Our Justice of the Peace, Redline Guide Mike Cherim was concerned about safety. He wasn’t guiding, it was a self-guided wedding party, he was only officiating the service — though he did walk in with the wedding party keeping an eye on things, just in case — he hoped they weren’t getting themselves into trouble. The hike in is only three-tenths of a mile (one way) but in the rain, postholing deep, dress shoes filling with snow… well, things could go bad.
But they didn’t. It was awesome. They wrote a section of the ceremony themselves, a vows exchange, and it was quite lovely from both the beautiful bride and dashing groom. What a delightful ceremony.Congratulations!
Arriving in style, thanks to Grace Limousine.
Neck deep postholes were possible on the snowbank traverse.
The soft snow challenges began here at the Elephant Head junction.
The group makes steady progress.
Some sections were tougher than others. They managed them all, though.
She had been hiding a full length wedding gown the entire time. Talk about layers! The groom lovingly tends her train.
It’s hard to tell, but we were all standing in pretty steady rain. This moment was so special, though.
Now mister and missus. You may kiss!
Three tenths of a mile to get back. They were ready.
The groom offers his bride a hand.
Some of this did happen.
Everyone made it out safely.
Thank you for the pleasure of officiating your wedding, Mobolaji and Sarah Ann. Congratulations! |
"""
Supplementary Note 9: Compare RPM-normalized read densities
Author: Annemarie Becker
inputFile1:
RPM-normalized read densities along the whole genome or in protein coding regions on plus or minus strand from sample 1 (Supplementary Note 7 or 8)
col0: position along genome
col1: RPM-normalized read density at that position
inputFile2:
RPM-normalized read densities along the whole genome or in protein coding regions on plus or minus strand from sample 2 (Supplementary Note 7 or 8)
col0: position along genome
col1: RPM-normalized read density at that position
outputFile:
comparison of RPM-normalized read density files for protein coding regions on plus or minus strand from samples 1 and 2
col0: RPM-normalized read density of sample 1
col1: RPM-normalized read density of sample 2
"""
def matchRPM(inputFile1, inputFile2, outputFile):
# Upload list of sample 1
list1 = []
inFile1 = open(inputFile1, 'r')
line = inFile1.readline()
while line != '':
fields = line.split()
list1.append(fields)
line = inFile1.readline()
# Upload list of sample 2
list2 = []
inFile2 = open(inputFile2, 'r')
line = inFile2.readline()
while line != '':
fields = line.split()
list2.append(fields)
line = inFile2.readline()
# Compile both lists
listA = zip(list1, list2)
# Output files
outFile = open(outputFile, 'w')
for Z in listA:
position = int(Z[0][0])
read1 = float(Z[0][1])
read2 = float(Z[1][1])
outFile.write(str(read1) + '\t' + str(read2) + '\n')
if __name__ == '__main__':
inputFile1 = ''
inputFile2 = ''
outputFile = ''
matchRPM(inputFile1, inputFile2, outputFile)
|
Singapore, 17 May, 2015 – Shape Glow returned for the second year running – with a new and longer fitness routine that proved to be a hit with its 450 participants.
Held at Zouk and Phuture on Friday night, Shape Glow is a night-time fitness event organised by Shape Singapore, a women’s health and wellness magazine licensed under Meredith Corporation and published by SPH Magazines Pte Ltd.
While last year’s dance workout party featured 90 minutes of Zumba and Bokwa, Shape Glow 2015 comprised two invigorating group exercises, 45 minutes of Belly Blitz and 60 minutes of Zumba, conducted by Amore Fitness instructors.
Each participant received goodies worth more than $174, including a full year’s subscription to Shape magazine, a one-week Amore Fitness access pass, Amore Boutique spa voucher, Reebok voucher, delectable snacks as well as healthcare and personal care products.
Shape Glow 2015 is pleased to have Amore Fitness as Co-presenter and Reebok as Official Sports Gear.
One of Amore Fitness’s signature classes with moves inspired from belly dance, BellyBlitz combines traditional belly dancing moves and aerobic techniques. |
"""Inverts selection in active view.
Shift-Click:
Select group members instead of parent group elements.
"""
#pylint: disable=import-error,invalid-name,broad-except
from pyrevit import revit, DB
# get view elements
viewelements = DB.FilteredElementCollector(revit.doc, revit.active_view.Id)\
.WhereElementIsNotElementType()\
.ToElements()
# remove anything that is a direct DB.Element obj
# these are the weird internal objects that Revit uses like a camera obj
view_element_ids = \
{x.Id.IntegerValue for x in viewelements if x.GetType() is not DB.Element}
# get current selection
selection = revit.get_selection()
selected_element_ids = {x.Id.IntegerValue for x in selection}
# find elements that are not selected
invert_ids = view_element_ids.difference(selected_element_ids)
# if shiftclick, select all the invert elements
# otherwise do not select elements inside a group
filtered_invert_ids = invert_ids.copy()
if not __shiftclick__: #pylint: disable=undefined-variable
# collect ids of elements inside a group
grouped_element_ids = \
[x.Id.IntegerValue for x in viewelements
if x.GetType() is not DB.Element
and x.GroupId != DB.ElementId.InvalidElementId]
for element_id in invert_ids:
if element_id in grouped_element_ids:
filtered_invert_ids.remove(element_id)
# set selection
selection.set_to([DB.ElementId(x) for x in filtered_invert_ids])
|
With the demolition of the Milham Ford building in 2018 to make way for our new buildings, a temporary Chapel has been established in South 45. Eventually, there will be a new College Chapel located in the new Boundary Building. Until then, we will continue to hold regular events in our temporary Chapel each term. Find out more about religious life at St Hilda's today.
Our former College Chapel in Milham Ford first came in to use in Michaelmas Term 1969. A non-conformist communion service was arranged in February 1971 and a Roman Catholic Mass in May 1977, each subsequently becoming a regular event.
St Hilda's Hall's prospectus stated that the Hall was 'conducted according to the principles of the Church of England, with liberty for the members of other religious denominations'.
When Cowley House became St Hilda's Hall in 1893, the billiard room on the top floor was fitted out as a Chapel. One student remembered it as 'barrack-like', although 'slightly redeemed by view from the window'. She recalled endeavours by the students 'to infuse spirit and devotion'. Like much of the rest of St Hilda's at this time, the Chapel had been equipped as cheaply as possible and surviving hymn books suggest that they were often acquired second hand, possibly from Cheltenham Ladies' College. School Hymns was one of the books used, perhaps not surprisingly as the Preface was written by Dorothea Beale. A surviving copy shows that one young lady, at least, did not have her mind on spiritual matters and used her hymn book as a convenient means for communication with her neighbour. A pencilled message reads 'I am going to play tennis this afternoon with Miss Poole!!'.
When the new pre-fabricated Chapel was dedicated on Saturday 31st October 1925 the Chairman of the Council commented that those who had been in the old Chapel recently 'would remember that it was so crowded that those who were present regarded those who had stayed away with feelings of gratitude. That year the Principal recorded that the new Chapel was 'already making an untold difference to us, and the students feel it to be a great boon'. Six years later, in March 1932, a booklet of hymns for use in the Chapel was produced. It contained twenty one hymns (three in Latin); barely a quarter of them would be known to today's congregations. |
#-*- coding: utf-8 -*-
'''
Created on 2013-8-27
@author: liaopengkai
'''
import urllib2
import os
import json
import re
import time
class MTool:
def isNum(self, tempStr):
"""判断字符串是否为数字,整型和浮点型皆适用"""
try:
float(tempStr)
return True
except Exception:
return False
def save(self, filename, contents, reNew = True, path = '', path2 = ''):
'''保存文件,参数:文件名、内容、是否覆盖更新、路径'''
if not path == '':
if not os.path.isdir(path):
os.mkdir(path)
if not os.path.isdir(path + path2):
os.mkdir(path + path2)
filename = path + path2 + filename
if os.path.exists(filename):
if not reNew:
print 'You already have ' + filename
return
fh = open(filename, 'w')
fh.write(contents)
fh.close()
# print filename
print 'Save '+filename+' success...'
def download(self, url, path = '', reNew = True):
'''下载并保存'''
temp = url.split('/')
name = temp[len(temp)-1]
if path != '':
filename = path + name
if os.path.exists(filename):
if not reNew:
print 'You already have ' + filename
return
result = urllib2.urlopen(url).read()
self.save(name, result, reNew, path)
def getTime(self, _str = '%Y-%m-%d %H:%M:%S', _t = time.localtime()):
t = time.strftime(_str, _t)
return t
def sumTime(self, _hour = 0, _min = 0, _sec = 0):
t = time.time()
t += (3600*_hour + 60*_min + _sec)
return time.localtime(t)
def subTime(self, _hour = 0, _min = 0, _sec = 0):
t = time.time()
t -= (3600*_hour + 60*_min + _sec)
return time.localtime(t)
if __name__ == '__main__':
pass
|
Rattan Rocking Chair have some pictures that related each other. Find out the most recent pictures of Rattan Rocking Chair here, and also you can get the picture here simply. Rattan Rocking Chair picture posted ang uploaded by Admin that saved in our collection. The collection that consisting of chosen picture and the best among others. These are so many great picture list that may become your inspiration and informational purpose of Rattan Rocking Chair design ideas for your own collections. we hope you are all enjoy and finally can find the best picture from our collection that posted here and also use for suitable needs for personal use. The nakedonthevague.com team also provides the picture in High Quality Resolution (HD Resolution) that can be downloaded by simply way. You just have to click on the gallery below the Rattan Rocking Chair picture. |
#!/usr/bin/python
import sys
#from PyQt4 import Qt
#from PyQt4 import QtGui
#from PyQt4 import QtCore
import qt
import Qwt5 as Qwt
import numpy as np
import math
from gather import *
class elementList(QtGui.QWidget):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.setWindowTitle('Element List')
self.pt = QtGui.QCheckBox("Relative plots", self)
self.nt = QtGui.QCheckBox("Normalize plots", self)
self.nt.setEnabled(False)
self.el = QtGui.QListWidget(self)
self.el.addItems(hs.Elements)
self.el.setCurrentRow(0)
self.el.setEnabled(False)
self.el.setMinimumHeight(350)
self.el.setMaximumWidth(150)
self.tl = QtGui.QListWidget(self)
for i in range(hs.nThetaI):
self.tl.addItem("%6.2f" % math.degrees(hs.thetaI[i]))
self.tl.setCurrentRow(0)
self.tl.setMinimumHeight(150)
self.tl.setMaximumWidth(150)
self.qb = QtGui.QPushButton("Quit")
self.qb.setMaximumWidth(100)
## SET PLOT ATTRIBUTES
##
self.ratioPlots = []
for i in range(len(hs.Elements)):
self.ratioPlots.append( Qwt.QwtPlot(self))
self.ratioPlots[i].enableAxis(Qwt.QwtPlot.xBottom, False)
self.ratioPlots[i].setCanvasBackground(Qt.Qt.white)
#self.ratioPlots[i].plotLayout().setCanvasMargin(0)
#self.ratioPlots[i].plotLayout().setAlignCanvasToScales(True)
self.ratioPlots[i].setAxisScale(Qwt.QwtPlot.xBottom, 0, 90)
self.ratioPlots[i].setAxisMaxMajor(Qwt.QwtPlot.yLeft, 4)
#self.ratioPlots[i].axisWidget(Qwt.QwtPlot.yLeft).setBorderDist(50,60)
## LOAD DATA
##
self.data = []
for iTht in range(hs.nThetaI):
self.data.append([])
for iElem in range(len(hs.Elements)):
self.data[iTht].append((hs.toArray(set=iTht, lvl=iElem*2) + hs.toArray(set=iTht, lvl=iElem*2+1)).mean(1))
## PLOT
##
self.plotData = []
x = np.linspace(0, 90, hs.resTheta)
for iElem in range(len(hs.Elements)):
self.plotData.append(Qwt.QwtPlotCurve('y = sin(x)'))
self.plotData[iElem].setPen(Qt.QPen(Qt.Qt.red))
y = self.data[0][iElem]
self.plotData[iElem].setData(x, y)
self.plotData[iElem].attach(self.ratioPlots[iElem])
## SET LAYOUT
##
sbox = QtGui.QHBoxLayout()
rbox = QtGui.QVBoxLayout()
hbox = QtGui.QVBoxLayout()
hbox.addWidget(self.el)
hbox.addWidget(self.pt)
hbox.addWidget(self.nt)
hbox.addSpacing(50)
hbox.addWidget(self.tl)
hbox.addStretch(1)
hbox.addWidget(self.qb)
for i in range(len(hs.Elements)):
rbox.addWidget(self.ratioPlots[i])
sbox.addLayout(hbox)
sbox.addSpacing(50)
sbox.addLayout(rbox)
self.setLayout(sbox)
self.resize(800, 1000)
## SET CONNECTIONS
##
self.connect(self.el, QtCore.SIGNAL('itemSelectionChanged()'), self.plot)
self.connect(self.tl, QtCore.SIGNAL('itemSelectionChanged()'), self.plot)
self.connect(self.pt, QtCore.SIGNAL('stateChanged(int)'), self.changeRel)
self.connect(self.nt, QtCore.SIGNAL('stateChanged(int)'), self.changeRel)
self.connect(self.qb, QtCore.SIGNAL('clicked()'), QtGui.qApp, QtCore.SLOT('quit()'))
def plot(self):
iTht = self.tl.currentRow()
x = np.linspace(0, 90, hs.resTheta)
for iElem in range(len(hs.Elements)):
if(self.pt.isChecked()):
y = self.data[iTht][iElem] / self.data[iTht][self.el.currentRow()]
if(self.nt.isChecked()):
y /= y[0]
else:
y = self.data[iTht][iElem]
self.plotData[iElem].setData(x, y)
self.plotData[iElem].attach(self.ratioPlots[iElem])
self.ratioPlots[iElem].replot()
def changeRel(self):
self.nt.setEnabled(self.pt.isChecked())
self.el.setEnabled(self.pt.isChecked())
self.plot()
app = QtGui.QApplication(sys.argv)
hs = xrHemisphere()
hs.load(sys.argv[1])
hs.divideBySolidAngle()
icon = elementList()
icon.show()
app.exec_()
|
You authorize Rosenthal Jaguar Tysons Corner to contact you with marketing information through written communications, calling or texting you at the phone number(s) you've provided. You understand these calls or text may use computer-assisted dialing and/or prerecorded messages. This authorization is not required to complete the purchase or lease of any of our products.
Sharing Information with Service Companies -- In order to provide quality service, the Rosenthal Automotive Organization and the AV Auto Group, LLC contracts with service providers for essential roles in processing your application. We may disclose some or all of the information we collect to these service providers but only in order to obtain credit for you. All of the Rosenthal Automotive Organization and the AV Auto Group, LLC's service providers have entered into a contract with the Rosenthal Automotive Organization and the AV Auto Group, LLC that forbids them from using information provided by the Rosenthal Automotive Organization and the AV Auto Group, LLC for any purpose other than providing the service for the Rosenthal Automotive Organization and the AV Auto Group, LLC customers. The Rosenthal Automotive Organization and the AV Auto Group, LLC takes security precautions to monitor the use of the information and prevent the use of the information for any other purpose.
Information Sharing With Merchants -- The Rosenthal Automotive Organization and the AV Auto Group, LLC does not share any information with unrelated merchants for the purpose of extending offers of goods and services without your consent.
Information Sharing with Partners -- The Rosenthal Automotive Organization and the AV Auto Group, LLC offers its products in conjunction with financial institutions and other businesses (collectively, the Rosenthal Automotive Organization and the AV Auto Group, LLC Partners). By completing a credit application with the Rosenthal Automotive Organization and the AV Auto Group, LLC for the purpose of locating a vehicle that meets your criteria and obtaining credit for the purchase of such vehicle, you agree to the sharing of information with that partner. These relationships are clearly stated in the application and by completing the application you agree to the information sharing by The Rosenthal Automotive Organization and the AV Auto Group, LLC and the partner which may include: your name, address, phone number, e-mail address, and personal financial information. |
import errno
import logging
import socket
import sys
from random import randint
def get_local_network_ip() -> str:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
s.connect(("10.255.255.255", 1))
ip = s.getsockname()[0]
s.close()
return ip
def get_free_port(
port: int = 0, range_min: int = 49152, range_max: int = 65535
) -> int:
if not port:
port = randint(range_min, range_max)
while True:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
try:
logging.debug("Trying to bind to port: %i", port)
s.bind(("127.0.0.1", port))
except OSError as err:
logging.debug("Couldn't bind to port %i: %s", port, err)
if err.errno == errno.EADDRINUSE or (
# "[WinError 10013] An attempt was made to access a
# socket in a way forbidden by its access
# permissions"
sys.platform == "win32"
and err.winerror == 10013
):
port = randint(range_min, range_max)
continue
raise
logging.debug("Port %s is free", port)
return port
|
I've got some really exciting news to share. I have collaborated with one of the most trusted and loved brand's Dabur Vatika for a series of videos that talk about their latest range of products and solutions for hair related problems.
If you are someone who has been suffering from hair fall, I highly recommend you watch this video for to learn about Dabur's new Oil Balance Hair Fall Treatment Shampoo, tips on how to wash and take care of your hair.
As always if you have any further questions please do write to me and or you can visit the Dabur Vatika Facebook page, where I have been answering a lot of hair care queries as part of a live chat. |
import numpy as np
from scipy import stats
from oneclass import oneclass
from sklearn.tree import DecisionTreeClassifier
rng = np.random.RandomState(42)
# Example settings
n_samples = 2000
outliers_fraction = 0.25
clusters_separation = [0,1,2]
# define two or more outlier detection tools to be compared
classifiers = {
"One-Class": oneclass.OneClassClassifier(contamination=outliers_fraction,base_classifier=DecisionTreeClassifier(max_depth=2),density_only=True)
}
# Compare given classifiers under given settings
xx, yy = np.meshgrid(np.linspace(-7, 7, 500), np.linspace(-7, 7, 500))
n_inliers = int((1. - outliers_fraction) * n_samples)
n_outliers = int(outliers_fraction * n_samples)
ground_truth = np.ones(n_samples, dtype=int)
ground_truth[-n_outliers:] = -1
# Fit the problem with varying cluster separation
for i, offset in enumerate(clusters_separation):
np.random.seed(42)
# Data generation
X1 = 0.3 * np.random.randn(n_inliers // 2, 2) - offset
X2 = 0.3 * np.random.randn(n_inliers // 2, 2) + offset
X = np.r_[X1, X2]
# Add outliers
X = np.r_[X, np.random.uniform(low=-6, high=6, size=(n_outliers, 2))]
# Fit the model
for i, (clf_name, clf) in enumerate(classifiers.items()):
# fit the data and tag outliers
clf.fit(X)
scores_pred = clf.decision_function(X)
threshold = stats.scoreatpercentile(scores_pred,
100 * outliers_fraction)
y_pred = clf.predict(X)
n_errors = (y_pred != ground_truth).sum()
# plot the levels lines and the points
print "Cluster separation: ", offset, " Number of errors: ", n_errors |
You need to be WendyW00's friend to see this page.
If you are WendyW00's friend, login to see the profile.
If you are not already WendyW00's friend, login or sign up to become friends. |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class NetworkInterfaceDnsSettings(Model):
"""DNS settings of a network interface.
:param dns_servers: List of DNS servers IP addresses. Use
'AzureProvidedDNS' to switch to azure provided DNS resolution.
'AzureProvidedDNS' value cannot be combined with other IPs, it must be the
only value in dnsServers collection.
:type dns_servers: list of str
:param applied_dns_servers: If the VM that uses this NIC is part of an
Availability Set, then this list will have the union of all DNS servers
from all NICs that are part of the Availability Set. This property is what
is configured on each of those VMs.
:type applied_dns_servers: list of str
:param internal_dns_name_label: Relative DNS name for this NIC used for
internal communications between VMs in the same virtual network.
:type internal_dns_name_label: str
:param internal_fqdn: Fully qualified DNS name supporting internal
communications between VMs in the same virtual network.
:type internal_fqdn: str
:param internal_domain_name_suffix: Even if internalDnsNameLabel is not
specified, a DNS entry is created for the primary NIC of the VM. This DNS
name can be constructed by concatenating the VM name with the value of
internalDomainNameSuffix.
:type internal_domain_name_suffix: str
"""
_attribute_map = {
'dns_servers': {'key': 'dnsServers', 'type': '[str]'},
'applied_dns_servers': {'key': 'appliedDnsServers', 'type': '[str]'},
'internal_dns_name_label': {'key': 'internalDnsNameLabel', 'type': 'str'},
'internal_fqdn': {'key': 'internalFqdn', 'type': 'str'},
'internal_domain_name_suffix': {'key': 'internalDomainNameSuffix', 'type': 'str'},
}
def __init__(self, dns_servers=None, applied_dns_servers=None, internal_dns_name_label=None, internal_fqdn=None, internal_domain_name_suffix=None):
self.dns_servers = dns_servers
self.applied_dns_servers = applied_dns_servers
self.internal_dns_name_label = internal_dns_name_label
self.internal_fqdn = internal_fqdn
self.internal_domain_name_suffix = internal_domain_name_suffix
|
One of the most critical parts of 3D printing is taking a 3D model and converting it to a format that a 3D printer can print. Slic3r is one of the best pieces of software out there. It gives great control over how an object is converted, and can save you hours upon hours of frustration by producing great prints. This book will show you how.
Instant Slic3r strives to give you a good foundation for what Slic3r can do. It offers a hands-on way to learn about how Slic3r works, and how you can use its powerful features to produce great prints. It takes you from the beginning setup, through advanced features such as post processing the G-Code that it produces. You will get a good grasp on how Slic3r works, and ways to troubleshoot your prints to make the best objects your printer can produce.
Instant Slic3r covers where to get Slic3r and how to install it on a variety of platforms, and helps lead you through the setup and tweaking your personal printer settings. The book is clearly laid out and easy to follow, but with information that even experienced users of Slic3r will find useful. You will learn how Slic3r converts your model into G-Code. Then, you will learn how to not only read that G-Code, but how you can write scripts for post-processing the output, before it is sent to the printer. You will also learn what things can go wrong with printing, and how you can use Slic3r to troubleshoot the issue and then fix it. Along with this, you will learn how to improve your printing by delving into Slic3r’s expert settings.
The book is filled with examples, leading you through not only how to do things, but why they need to be done. After you are finished with this book, you will have a deeper understanding of Slic3r and 3D printing.
David Michael Moore has been a lifelong maker. Starting at an early age taking apart his parents' alarm clocks, he has always been curious about how things work, and how they are put together. This drive was cemented in the seventh grade when he had a wood and metal shop. A developer and designer by trade, David has been working with software for years. The current explosion of making rekindled his love of the physical act of making. He is often found in his workshop or in the basement working with wood, metal, plaster, plastic, and electronics, or musing about the act of making on http://vandermore.com. He hopes to pass on his desire for making to his two daughters, so that their making spirit isn't overwhelmed by the consumer side of life. |
#!/usr/bin/python3
import datetime
import logging
import os
import requests
import requests_cache
import subprocess
import tempfile
import time
from dirlist import list_keyboards
#from keymankeyboards import get_api_keyboards
#from keyman_config import get_kmp, install_kmp
from keyman_config.get_kmp import get_keyboard_data, get_kmp_file, keyman_cache_dir
from keyman_config.install_kmp import get_metadata, get_infdata, extract_kmp
#TODO check for kmn and check if it is compilable
#TODO extra output files jsonkmpnokmn, jsonkmpbadkmn, goodjsonkmpkmn and for inf as well
def get_kmn(kbid, sourcePath):
base_url = "https://raw.github.com/keymanapp/keyboards/master/" + sourcePath
kmn_url = base_url + "/source/" + kbid + ".kmn"
cache_dir = keyman_cache_dir()
current_dir = os.getcwd()
expire_after = datetime.timedelta(days=7)
if not os.path.isdir(cache_dir):
os.makedirs(cache_dir)
os.chdir(cache_dir)
requests_cache.install_cache(cache_name='keyman_cache', backend='sqlite', expire_after=expire_after)
now = time.ctime(int(time.time()))
response = requests.get(kmn_url)
logging.debug("Time: {0} / Used Cache: {1}".format(now, response.from_cache))
os.chdir(current_dir)
requests_cache.core.uninstall_cache()
return requests.get(kmn_url)
def main():
logging.basicConfig(level=logging.DEBUG, format='%(levelname)s:%(message)s')
keyboarddata = list_keyboards()
if keyboarddata:
with open('./nokmp.txt', 'wt') as nokmp, \
open('./infnokeyboard.txt', 'wt') as infnokeyboard, \
open('./goodjsonkmpkmn.txt', 'wt') as goodjsonkmpkmn, \
open('./jsonkmpnokmn.txt', 'wt') as jsonkmpnokmn, \
open('./jsonkmpbadkmn.txt', 'wt') as jsonkmpbadkmn, \
open('./jsonkmpmissingkmn.txt', 'wt') as jsonkmpmissingkmn, \
open('./brokeninf.txt', 'wt') as brokeninf, \
open('./nodata.txt', 'wt') as nodata, \
open('./goodinfkmp.txt', 'wt') as goodinfkmp:
print("Keyboard: will work in kmfl :)", file=goodjsonkmpkmn) # goodjsonkmpkmn
print("Keyboard: has uncompilable kmn", file=jsonkmpbadkmn) # jsonkmpbadkmn
print("Keyboard: has json in kmp but can't find the kmn on github", file=jsonkmpmissingkmn) # jsonkmpmissingkmn
print("Keyboard: has json in kmp but has no sourcePath to look for kmn on github", file=jsonkmpnokmn) # jsonkmpnokmn
print("Keyboard: has kmp with kmp.inf", file=goodinfkmp)
print("Keyboard: has kmp with kmp.inf but it has no Keyboard", file=infnokeyboard)
print("Keyboard: has kmp but no kmp.json and no or broken kmp.inf", file=brokeninf)
print("Keyboard: does not have kmp so mobile/web only", file=nokmp)
print("Keyboard: has no data", file=nodata)
for kbid in keyboarddata:
kbdata = get_keyboard_data(kbid, True)
print(kbid)
if kbdata:
if 'packageFilename' in kbdata:
kmpfile = get_kmp_file(kbdata, True)
with tempfile.TemporaryDirectory() as tmpdirname:
extract_kmp(kmpfile, tmpdirname)
try:
info, system, options, keyboards, files = get_metadata(tmpdirname)
if keyboards:
if 'sourcePath' in kbdata:
response = get_kmn(kbid, kbdata['sourcePath'])
if response.status_code == 200:
kmndownloadfile = os.path.join(tmpdirname, kbid + ".kmn")
with open(kmndownloadfile, 'wb') as f:
f.write(response.content)
subprocess.run(["kmflcomp", kmndownloadfile], stdout=subprocess.PIPE, stderr= subprocess.STDOUT)
kmfl_file = os.path.join(tmpdirname, kbid + ".kmfl")
if os.path.isfile(kmfl_file):
logging.debug("goodjsonkmpkmn")
print(kbid, file=goodjsonkmpkmn) # goodjsonkmpkmn
else:
logging.debug("jsonkmpbadkmn")
print(kbid, file=jsonkmpbadkmn) # jsonkmpbadkmn
else:
logging.debug("jsonkmpmissingkmn")
print(kbid, file=jsonkmpmissingkmn) # jsonkmpmissingkmn
else:
logging.debug("jsonkmpnokmn")
print(kbid, file=jsonkmpnokmn) # jsonkmpnokmn
else:
info, system, options, keyboards, files = get_infdata(tmpdirname)
if keyboards:
logging.debug("infnokeyboard")
print(kbid, file=goodinfkmp)
elif files:
logging.debug("goodinfkmp")
print(kbid, file=infnokeyboard)
else:
print(kbid, file=brokeninf)
except KeyError:
logging.debug("brokeninf")
print(kbid, file=brokeninf)
else:
logging.debug("nokmp")
print(kbid, file=nokmp)
else:
logging.debug("nodata")
print(kbid, file=nodata)
if __name__ == "__main__":
main()
|
Companies with a service organization supporting customers in the field are going to find that their enterprise resource planning (ERP) software may not adequately address important elements of the service life cycle. So, they consider adding field service management (FSM) software to extend their ERP, but not every field service software product is capable of addressing all the gaps.
In this article, I’ll identify a number of difficulties some ERP systems present and create a checklist of essential functionality field service software must deliver.
Field Functionality No. 1 — Track that truck stock inventory and don’t forget the reverse supply chain. An ERP usually does a good job tracking inventory levels, location, and inventory transactions, but it does not always do a very good job of tracking mobile truck inventory, and in a field service environment, that is where inventory is consumed.
Each technician vehicle is, in essence, a rolling warehouse. This means software must provide visibility of inventory on each truck so dispatchers or automated scheduling software can determine which technician has the inventory to perform an assigned service task. It needs to provide visibility of inventory consumed from the truck stock in order to bill the part to the customer, determine whether it is under warranty, drive replenishment processes, and create parts dispositions for parts that must be received back for repair, remanufacturing, refurbishment, and compliance.
It will also need to track when larger inventory items need to be loaded on a truck in a specific order to accommodate the order of service calls or if a repairable unit can be sent to a depot. In this instance, a return material authorization (RMA) should be created automatically and the defect routed to the right repair location. If a field replaceable unit is eligible for a core credit, this process needs to be initiated in the field and ERP alerts must be created to the credit against the OEM.
Some major ERP providers do not provide this functionality — even some of the popular field service management software products don’t; therefore, it’s vital for service organizations to ask questions of their software providers to ensure the requirement for this functionality does not fall between cracks. An ERP system will usually remain the inventory system of record with field service software modifying inventory levels or usages, but the field service software should facilitate processes for inventory management and inventory accountability, like cycle counts, as well as initiating purchase demands on service order requirements for parts not in stock.
Field Functionality No. 2 — Track essential asset maintenance history. ERP products usually do a good job of establishing the original asset record derived from a manufacturing or sales order, but what if the asset record changes or the asset changes ownership or changes over time? These field service-related transactions normally don’t update the asset record in the ERP. Further, what if the product is sold through distribution?
Once that serialized item has been installed, the ability to update the asset record with the essential service and maintenance history is often difficult to achieve with some solutions. FSM software must be capable of tracking the work performed, parts consumed, fault codes, changes in location, and other transactions that take place during the asset life cycle. Recording maintenance history is just one element of this requirement and a lot of other things can happen with an asset after the sale that are essential to service delivery.
For example, the location of the asset may change, with assets covered by a service contract moving within a facility or even transported from one facility or region to another. Alternatively, the way the customer is using the asset may change with implications on recommended service regimens. Service software has to be sensitive to these changes.
Ownership of parts or subcomponents may change depending on the contract. If a part is removed, does it remain with the customer? Do they get it back after it has been refurbished? If so, do they get the same component back or just a replacement? When subcontractors perform work on an asset, field service software will authorize the work, capture work order completions and part returns, and be used to manage the subcontractor compensation process.
FSM software should capture all of these changes and use the resulting data to facilitate the service life cycle while maintaining a current database for customers, assets, contracts, warranties, parts, and repairs without administrative overhead.
Field Functionality No. 3 – Track contract management capability. An ERP instance may include functionality for things such as contract management, including a record of what the customer is entitled to in terms of pricing, replacement parts, service level agreements (SLAs), and more. But if a company is managing contracts in this way, and the field work order or ticketing system is outside the ERP, how do you get the entitlements to flow into the work order?
The answer is that you usually don’t, and you end up with a very manual and tedious service billing process. The goal should be to have the technician complete the work order and not have to worry about what pricing, parts, or SLA the customer is entitled to under contract. All of that should occur without intervention. If contract management and field service ticketing are handled in two separate systems, the required integration is daunting enough that it usually is not performed or not performed to the extent it should be.
Field Functionality No. 4 — Track those high-volume transactions, including point of sale and work order completion that are often performed outside of the ERP. Around the business world, most businesses will use third-party solutions for high-volume tasks, such as point of sale, but field service organizations process high-volume transactions as well, and it is essential these are recorded on a timely basis. The ability to quickly debrief a call and have it prepared for billing can reduce days sales outstanding and greatly improve time to cash.
All of these barriers to efficient delivery of service exist unless field service software streamlines the delivery process. It’s true that a systems integrator can make an ERP do pretty much anything with the proper amount of time or cost, but without an application to easily manage the process, it will be very difficult to make it easy to use for people in the field and administrators.
The sheer complexity of the application architecture cannot be configured out of a business process. If you have 20,000 break-fix service calls a week, this costs you in terms of time, dollars, and customer experience. A best-in-class FSM software application should complete these transactions automatically in the back-end ERP system as the technician completes work orders, consumes inventory, records equipment condition, and interacts with the customer.
Most ERP applications are not a perfect fit for much of the field service process. That is why a service organization will usually extend that system of record with a best-of-breed software application. The technician just records their travel and labor time, parts used, parts needed, part returns, and quality information, and the system addresses pricing, if commitments were met, and the back-office database updates seamlessly. With developing trends such as assets-as-a-service, IoT, and augmented reality all beginning to take shape, these technologies will be supported in best-in-class service systems earlier in their life cycle. The underlying technology is just better suited to these highly configurable service platforms. |
# -*- coding: utf-8 -*-
"""Module used to launch rating dialogues and send ratings to Trakt"""
import xbmc
import xbmcaddon
import xbmcgui
from resources.lib import utilities
from resources.lib import kodiUtilities
from resources.lib import globals
import logging
logger = logging.getLogger(__name__)
__addon__ = xbmcaddon.Addon("script.trakt")
def ratingCheck(media_type, items_to_rate, watched_time, total_time):
"""Check if a video should be rated and if so launches the rating dialog"""
logger.debug("Rating Check called for '%s'" % media_type)
if not kodiUtilities.getSettingAsBool("rate_%s" % media_type):
logger.debug("'%s' is configured to not be rated." % media_type)
return
if items_to_rate is None:
logger.debug("Summary information is empty, aborting.")
return
watched = (watched_time / total_time) * 100
if watched >= kodiUtilities.getSettingAsFloat("rate_min_view_time"):
rateMedia(media_type, items_to_rate)
else:
logger.debug("'%s' does not meet minimum view time for rating (watched: %0.2f%%, minimum: %0.2f%%)" % (
media_type, watched, kodiUtilities.getSettingAsFloat("rate_min_view_time")))
def rateMedia(media_type, itemsToRate, unrate=False, rating=None):
"""Launches the rating dialog"""
for summary_info in itemsToRate:
if not utilities.isValidMediaType(media_type):
logger.debug("Not a valid media type")
return
elif 'user' not in summary_info:
logger.debug("No user data")
return
s = utilities.getFormattedItemName(media_type, summary_info)
logger.debug("Summary Info %s" % summary_info)
if unrate:
rating = None
if summary_info['user']['ratings']['rating'] > 0:
rating = 0
if not rating is None:
logger.debug("'%s' is being unrated." % s)
__rateOnTrakt(rating, media_type, summary_info, unrate=True)
else:
logger.debug("'%s' has not been rated, so not unrating." % s)
return
rerate = kodiUtilities.getSettingAsBool('rate_rerate')
if rating is not None:
if summary_info['user']['ratings']['rating'] == 0:
logger.debug(
"Rating for '%s' is being set to '%d' manually." % (s, rating))
__rateOnTrakt(rating, media_type, summary_info)
else:
if rerate:
if not summary_info['user']['ratings']['rating'] == rating:
logger.debug(
"Rating for '%s' is being set to '%d' manually." % (s, rating))
__rateOnTrakt(rating, media_type, summary_info)
else:
kodiUtilities.notification(
kodiUtilities.getString(32043), s)
logger.debug(
"'%s' already has a rating of '%d'." % (s, rating))
else:
kodiUtilities.notification(
kodiUtilities.getString(32041), s)
logger.debug("'%s' is already rated." % s)
return
if summary_info['user']['ratings'] and summary_info['user']['ratings']['rating']:
if not rerate:
logger.debug("'%s' has already been rated." % s)
kodiUtilities.notification(kodiUtilities.getString(32041), s)
return
else:
logger.debug("'%s' is being re-rated." % s)
gui = RatingDialog(
"script-trakt-RatingDialog.xml",
__addon__.getAddonInfo('path'),
media_type,
summary_info,
rerate
)
gui.doModal()
if gui.rating:
rating = gui.rating
if rerate:
if summary_info['user']['ratings'] and summary_info['user']['ratings']['rating'] > 0 and rating == summary_info['user']['ratings']['rating']:
rating = 0
if rating == 0 or rating == "unrate":
__rateOnTrakt(rating, gui.media_type, gui.media, unrate=True)
else:
__rateOnTrakt(rating, gui.media_type, gui.media)
else:
logger.debug("Rating dialog was closed with no rating.")
del gui
# Reset rating and unrate for multi part episodes
unrate = False
rating = None
def __rateOnTrakt(rating, media_type, media, unrate=False):
logger.debug("Sending rating (%s) to Trakt.tv" % rating)
params = media
if utilities.isMovie(media_type):
key = 'movies'
params['rating'] = rating
if 'movieid' in media:
kodiUtilities.kodiJsonRequest({"jsonrpc": "2.0", "id": 1, "method": "VideoLibrary.SetMovieDetails", "params": {
"movieid": media['movieid'], "userrating": rating}})
elif utilities.isShow(media_type):
key = 'shows'
# we need to remove this key or trakt will be confused
del(params["seasons"])
params['rating'] = rating
if 'tvshowid' in media:
kodiUtilities.kodiJsonRequest({"jsonrpc": "2.0", "id": 1, "method": "VideoLibrary.SetTVShowDetails", "params": {
"tvshowid": media['tvshowid'], "userrating": rating}})
elif utilities.isSeason(media_type):
key = 'shows'
params['seasons'] = [{'rating': rating, 'number': media['season']}]
elif utilities.isEpisode(media_type):
key = 'episodes'
params['rating'] = rating
if 'episodeid' in media:
kodiUtilities.kodiJsonRequest({"jsonrpc": "2.0", "id": 1, "method": "VideoLibrary.SetEpisodeDetails", "params": {
"episodeid": media['episodeid'], "userrating": rating}})
else:
return
root = {key: [params]}
if not unrate:
data = globals.traktapi.addRating(root)
else:
data = globals.traktapi.removeRating(root)
if data:
s = utilities.getFormattedItemName(media_type, media)
if 'not_found' in data and not data['not_found']['movies'] and not data['not_found']['episodes'] and not data['not_found']['shows']:
if not unrate:
kodiUtilities.notification(kodiUtilities.getString(32040), s)
else:
kodiUtilities.notification(kodiUtilities.getString(32042), s)
else:
kodiUtilities.notification(kodiUtilities.getString(32044), s)
class RatingDialog(xbmcgui.WindowXMLDialog):
buttons = {
11030: 1,
11031: 2,
11032: 3,
11033: 4,
11034: 5,
11035: 6,
11036: 7,
11037: 8,
11038: 9,
11039: 10
}
focus_labels = {
11030: 32028,
11031: 32029,
11032: 32030,
11033: 32031,
11034: 32032,
11035: 32033,
11036: 32034,
11037: 32035,
11038: 32036,
11039: 32027
}
def __init__(self, xmlFile, resourcePath, media_type, media, rerate):
self.media_type = media_type
self.media = media
self.rating = None
self.rerate = rerate
self.default_rating = kodiUtilities.getSettingAsInt('rating_default')
def __new__(cls, xmlFile, resourcePath, media_type, media, rerate):
return super(RatingDialog, cls).__new__(cls, xmlFile, resourcePath)
def onInit(self):
s = utilities.getFormattedItemName(self.media_type, self.media)
self.getControl(10012).setLabel(s)
rateID = 11029 + self.default_rating
if self.rerate and self.media['user']['ratings'] and int(self.media['user']['ratings']['rating']) > 0:
rateID = 11029 + int(self.media['user']['ratings']['rating'])
self.setFocus(self.getControl(rateID))
def onClick(self, controlID):
if controlID in self.buttons:
self.rating = self.buttons[controlID]
self.close()
def onFocus(self, controlID):
if controlID in self.focus_labels:
s = kodiUtilities.getString(self.focus_labels[controlID])
if self.rerate:
if self.media['user']['ratings'] and self.media['user']['ratings']['rating'] == self.buttons[controlID]:
if utilities.isMovie(self.media_type):
s = kodiUtilities.getString(32037)
elif utilities.isShow(self.media_type):
s = kodiUtilities.getString(32038)
elif utilities.isEpisode(self.media_type):
s = kodiUtilities.getString(32039)
elif utilities.isSeason(self.media_type):
s = kodiUtilities.getString(32132)
else:
pass
self.getControl(10013).setLabel(s)
else:
self.getControl(10013).setLabel('')
|
President Donald Trump says special counsel Robert Mueller's investigation is — in Trump's words —"really, really unfair" for the upcoming elections in November, when control of Congress is at stake.
Trump says "we have to get it over with" and he's suggesting that the investigation into Russian election interference "should have been over with a long time ago."
Trump made his comments Friday to reporters traveling with him on Air Force One to Fargo, North Dakota.
The president says it would "need to be a fair deal" for him to sit down for an interview with Mueller's investigators. The special counsel and Trump's legal team have been negotiating a possible interview for months.
President Donald Trump is praising Brett Kavanaugh's progress toward winning confirmation to the Supreme Court.
But Trump isn't happy with the "anger and the meanness on the other side" — and says Democrats' behavior toward his nominee is "sick."
The president made the comments during a political rally Thursday in Billings, Montana.
He also raised the prospect of a move toward impeachment if Democrats win control of Congress in the November elections. |
import picamera
import picamera.array
import cv2
import time
import numpy as np
import multiprocessing as mp
import queue
import signal
def capture(q, stop, resolution=(640,480), framerate=30):
print('Start capturing...')
with picamera.PiCamera(resolution=resolution, framerate=framerate) as camera:
with picamera.array.PiRGBArray(camera, size=resolution) as raw:
time.sleep(2)
start = cv2.getTickCount()
for frame in camera.capture_continuous(raw, format="bgr", use_video_port=True):
try:
q.put(frame.array, False)
except queue.Full:
print('capture: full')
raw.truncate(0)
fps = (cv2.getTickCount() - start) / cv2.getTickFrequency() * 1000
print('capture: ' + str(fps))
start = cv2.getTickCount()
if stop.is_set():
break
print('Capturing done')
q.cancel_join_thread()
def order_points(pts):
rect = np.zeros((4, 2), dtype = "float32")
s = pts.sum(axis = 1)
rect[0] = pts[np.argmin(s)]
rect[2] = pts[np.argmax(s)]
diff = np.diff(pts, axis = 1)
rect[1] = pts[np.argmin(diff)]
rect[3] = pts[np.argmax(diff)]
return rect
def process(q, stop):
print('Start processing...')
M = np.load('M.npy')
def nothing(x):
pass
cv2.namedWindow('video', cv2.WINDOW_NORMAL)
cv2.createTrackbar('threshold', 'video', 58, 255, nothing)
cv2.createTrackbar('cannyLow', 'video', 50, 255, nothing)
cv2.createTrackbar('cannyHigh', 'video', 150, 255, nothing)
video = cv2.VideoWriter('output.avi',cv2.VideoWriter_fourcc(*'MJPG'), 20.0, (640,480))
while not stop.is_set():
start = cv2.getTickCount()
frame = None
try:
while True: # clear queue
frame = q.get(False)
except queue.Empty:
if frame is None:
continue
threshold = cv2.getTrackbarPos('threshold','video')
cannyLow = cv2.getTrackbarPos('cannyLow','video')
cannyHigh = cv2.getTrackbarPos('cannyHigh','video')
frame = frame[:300, :320]
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
ret, black = cv2.threshold(gray, threshold, 255, cv2.THRESH_BINARY)
if not ret:
continue
edges = cv2.Canny(black, cannyLow, cannyHigh)
_, contours, hierarchy = cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if not contours:
continue
out = frame.copy()
allC = np.vstack(contours)
hull = cv2.convexHull(allC)
cv2.drawContours(out, [hull], 0, (0,0,255), 2)
rect = cv2.minAreaRect(allC)
box = np.int0(cv2.boxPoints(rect))
im = cv2.drawContours(out,[box],0,(0,255,0),2)
corners = order_points(box)
dst = np.array([[0, 0],
[639, 0],
[639, 479],
[0, 479]], dtype = "float32")
M = cv2.getPerspectiveTransform(corners, dst)
np.save("M", M)
# video.write(out)
cv2.imshow('video', out)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
fps = (cv2.getTickCount() - start) / cv2.getTickFrequency() * 1000
print('process: ' + str(fps))
print('Processing done')
def main():
q = mp.Queue(10)
stop = mp.Event()
def sigint(signal, frame):
stop.set()
signal.signal(signal.SIGINT, sigint)
p = mp.Process(target=capture, args=(q, stop))
p.start()
try:
process(q, stop)
finally:
stop.set()
p.join()
if __name__ == "__main__":
main()
|
OLYMPIA, Washington (Reuters) - More than 100 missing-persons reports have been filed with various agencies in Washington state in the aftermath of a mudslide in which eight deaths have been confirmed, county officials said on Monday.
The landslide was triggered after rain-soaked embankments along State Route 530 near Oso, Washington, about 55 miles northeast of Seattle, gave way on Saturday morning, washing away at least six homes.
In all, at least 49 homes had some level of damage from the slide, said John Pennington, director of the Snohomish County Department of Emergency Management.
The search for victims resumed early on Monday after treacherous quicksand conditions forced rescue workers to suspend their efforts at dusk on Sunday. Some workers, mired in mud up to their armpits, had to be dragged to safety.
Pennington said that number was likely to decline as survivors initially reported as missing eventually make contact with loved ones and local authorities, or as some reports turn out to overlap with others.
But authorities cast growing doubt on the chances of finding anyone else alive in the tangle of debris and mud that is up to 15 feet deep. |
"""This module contains a concrete implementation of the game Nim."""
from random import choice
from mopy.game import Game
from mopy.impl.nim.state import NimState
from mopy.impl.nim.action import NimAction
class NimGame(Game):
def __init__(self):
pass
def new_game(self):
"""
Initialize a new game with 3 heaps with 3, 4, and 5 elements.
Initial state looks like the following:
Player 1's move
x
x x
x x x
x x x
x x x
----------
H1 H2 H3
"""
heaps = [3, 4, 5]
current_player = 0
return NimState(heaps, current_player)
def do_action(self, state, action):
"""Take a non-zero number of elements from a heap."""
state.heaps[action.heap_num] -= action.num_taken
state.current_player = 1 if state.current_player == 0 else 0
def is_over(self, state):
"""Game is only over when all heaps are empty."""
return sum(state.heaps) == 0
def get_result(self, state):
"""
If the game is over, the winner is the previous player.
This is because after we execute the final action, we still
advance the current player. Make sure to only call this when
the game is actually complete!
"""
done = self.is_over(state)
if not done:
raise Exception("Game is not done yet!")
return 1 if state.current_player == 0 else 0
def get_random_action(self, state):
"""Take a random number of elements from a random heap."""
return choice(self.get_legal_actions(state))
def get_legal_actions(self, state):
"""
Return all possible take actions the current player can take.
Note that you can take any number of elements from any heap
from 1 to the number of elements on that heap.
"""
actions = []
heaps = state.heaps
for i, h in enumerate(heaps):
for n in range(1, h + 1):
actions.append(NimAction(i, n))
return actions
|
As a buyer of property in Marbella and Spain there are a number of costs and taxes over and above the property price that you will have to pay. Depending upon whether you are buying a new property from a developer, or a resale property from a private individual, you will either have to pay VAT & Stamp Duty, or a transfer tax. The different cases are explained below, along with the other costs and taxes that are common to both cases.
These taxes apply if the property is being sold for the first time, and the seller is a property developer. The VAT (known as IVA in Spain) is 10% on the price of the purchase in the case of built residential property (villa, apartment, etc), and 18% in the case of plots of land (without built property) and commercial premises. The Stamp duty (known as AJD) is 1% of the price of the purchase, but might go up in some regions, so be sure to check on the latest rate. Both VAT and Stamp Duty are paid by the buyer, and if any deposit is paid before completion of the sale, such deposit will be subject to VAT at the moment of payment of this deposit. In this scenario there is no transfer tax to pay.
You are strongly advised to hire a lawyer to help you during the buying process. Your lawyer drafts and reviews contracts on your behalf and can explain all the legal and administrative issues you face. Your lawyer should also carry out any necessary due diligence (checking ownership claim of the seller, charges on the property, permits, etc.) and arrange all the required documents to complete the process (property registration, tax payments, etc.). A lawyer – Abogado in Spanish – will charge you according to the service you require. This will vary according to the complexity of the purchase. Many charge around 1% of the purchase price in legal fees.
If you choose to buy with a mortgage then this will incur several additional costs. First there will be the property valuation that the mortgage provider will require before granting the mortgage. This is paid for the by the buyer and can cost around 500 Euros. Then there will be the costs of the mortgage itself. This varies according to the provider, and even according to the particular branch. However there is usually some kind of opening fee of around 1% of the value of the mortgage. Finally a mortgage will increase the Notary expenses.
Notary expenses are nearly always paid by the buyer and are calculated in relation to the purchase price declared in the deeds of sale. To be on the safe side you should calculate Notary fees as being 1% of the purchase price declared in the deeds of sale. In many cases however Notary fees are more like 0.5% (or less) of the price declared in the deeds.
Bear in mind that it may be prudent to carry out a survey of the property and that this will have a cost. In Summary, allow for up to 10% of the purchase price in taxes and other costs. If the buyer takes out a mortgage these costs can be somewhat higher due to an additional public deed for the mortgage and the inevitable bank charges involved. In this case transaction costs might reach between 10% and 12% of the value of the property purchased.
To pay for the property, you will more than likely need to write a banker’s cheque. In order to do that, you will need to open an account in a Spanish bank and transfer money from the bank in your country. The cost of transferring the money can go up to 0,4% of the amount transferred. The banker’s cheque will most likely cost 0,5% of its amount.
This tax has been reduced to zero as from 01/01/2008.
If non-residents rent out their property and receive an income in exchange, they are obliged by law to declare this income and pay taxes on it. The taxable base and the tax rate will be determined by the laws as they apply to each person’s particular circumstances (taking into account the double taxation treaty – if any – between Spain and the country of origin of the non-resident). In many cases non-residents simply pay a flat rate of 25% of the gross income they earn from their property in Spain. Residents in Spain will have to pay the income tax based on their income earned during the year. The tax rate depends on the level of income.
A special reference should be made to the local capital gains tax – known as Plusvalía. This is a local / municipal tax that only applies to the increase in value of the land upon which urban properties are built, and that is levied at the time of transfer of such properties. It is calculated on the basis of the valor catastral (an administrative value that is usually lower than the market value, sometimes considerably so) of the property. The amount to be paid will depend on how long the seller has owned the property: the longer the period of time during which the seller has owned the property, the higher the amount of tax.
This tax should be paid by the seller, though it is possible for sellers of resale property to try to shift the burden of this tax to the buyer. This practise is unheard of in regions such as the Costa Brava and the Costa Dorada, though quite common in the Costa Del Sol.
Spanish property sold by a non-resident owner will be charged capital gains tax at 18%, with a 3% withholding provision. |
#!/usr/bin/env python3
import time
import random
import socket
from flask import Flask, render_template, redirect, url_for, request, jsonify
import config
log = None
# classes
class Agent():
def __init__(self, ip, cw=True, node=None, state='initial'):
self.ip = ip
self.cw = cw
self.state = state
self.node = node
def __repr__(self):
return 'Agent: ip {}, direction CW: {}, state: {}, node: {}'.format(self.ip, self.cw, self.state, self.node)
class Node():
def __init__(self, label):
assert isinstance(label, int), 'Node constructor accepts numeric label only'
self.label = label
# list of agent ips in the current node
self.agents = []
def add_agent(self, agent_ip):
# add an agent ip to the list of agents in the current node
self.agents.append(agent_ip)
def __repr__(self):
return '<Node {}: [{}]>'.format(self.label, ' | '.join(str(app.agents[ip]) for ip in self.agents))
class Ring():
def __init__(self, n_nodes):
self._nodes = [Node(i) for i in range(n_nodes)]
self.n_nodes = n_nodes
def get_node(self, label):
return self._nodes[label]
def next(self, agent):
"""Return next node."""
i = 1 if agent.cw else -1
return self._nodes[(agent.node+i) % self.n_nodes]
def prev(self, agent):
"""Return prev node."""
i = -1 if agent.cw else 1
return self._nodes[(agent.node+i) % self.n_nodes]
def blocked(self, agent):
"""Check if the next node is blocked."""
next_node = self.next(agent)
if agent.ip == app.malicious_ip:
return len(next_node.agents) > 0
else:
return app.malicious_ip in next_node.agents
def random_place_agents(self):
"""Randomly place agents in the ring."""
#a = app.agents[app.agents_ips[0]]
#a.node = 1
#self.get_node(1).add_agent(a.ip)
#a.cw = False
#a = app.agents[app.agents_ips[1]]
#a.node = 2
#self.get_node(2).add_agent(a.ip)
#a.cw = False
#a = app.agents[app.agents_ips[2]]
#a.node = 4
#self.get_node(4).add_agent(a.ip)
#a.cw = True
#a = app.agents[app.malicious_ip]
#a.node = 6
#self.get_node(6).add_agent(a.ip)
#a.cw = True
# True = clockwise
# False = counterclockwise
a = app.agents[app.agents_ips[0]]
a.node = 3
self.get_node(3).add_agent(a.ip)
a.cw = False
a = app.agents[app.agents_ips[1]]
a.node = 6
self.get_node(6).add_agent(a.ip)
a.cw = False
a = app.agents[app.agents_ips[2]]
a.node = 5
self.get_node(5).add_agent(a.ip)
a.cw = True
a = app.agents[app.malicious_ip]
a.node = 1
self.get_node(1).add_agent(a.ip)
a.cw = False
return
# at most 1 agent per node, randomize direction in case of unoriented ring
for agent, node in zip(app.agents.values(), random.sample(self._nodes, len(app.agents.keys()))):
agent.cw = True if config.oriented else random.choice([True, False])
agent.node = node.label
self.get_node(node.label).add_agent(agent.ip)
def dump(self):
ring = dict()
for node in self._nodes:
ring[str(node.label)] = [(app.agents[a].ip, str(app.agents[a].cw), app.agents[a].state, app.agents[a].node) for a in node.agents]
return ring
def __repr__(self):
return ', '.join(str(node) for node in self._nodes)
class MTFGRServer(Flask):
'''Wrapper around the Flask class used to store additional information.'''
def __init__(self, *args, **kwargs):
super(MTFGRServer, self).__init__(*args, **kwargs)
self.ring = Ring(config.n_nodes)
self.agents_ips = config.agents_ips
self.agents = dict()
self.malicious_ip = config.malicious_ip
self.oriented = config.oriented
self.started = False
# instance of the web application
app = MTFGRServer(__name__)
# auxiliary functions
def _reset():
"""Reset the global variables by parsing again the config file."""
import config
global log
app.ring = Ring(config.n_nodes)
app.agents = {ip: Agent(ip) for ip in config.agents_ips}
app.malicious_ip = config.malicious_ip
app.agents[app.malicious_ip] = Agent(app.malicious_ip, state='malicious')
app.oriented = config.oriented
app.started = False
app.ring.random_place_agents()
log = open('/tmp/ev3.log', 'a')
log.write('\n\nIIIIIIIIIINNNNNNNNNIIIIIIIIIIITTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT\\n\n')
# views
def _communicate_start():
"""Instruct each bot to start."""
port = 31337
for ip in app.agents_ips[::-1] + [app.malicious_ip]:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip, port))
# s.sendall(b'Go!\n')
s.close()
@app.route('/start')
def start():
app.started = True
try:
_communicate_start()
except Exception:
pass
return redirect(url_for('index'))
@app.route('/reset')
def reset():
_reset()
return redirect(url_for('index'))
@app.route('/status')
def global_status():
"""Get the whole ring status."""
return jsonify(**app.ring.dump())
@app.route('/get/<agent_ip>')
def get_status(agent_ip):
"""Get the list of agents in the current node."""
agent = app.agents[agent_ip]
# aggiungere blocked
return jsonify(agents=[app.agents[ip].state for ip in app.ring.get_node(agent.node).agents if ip != agent_ip],
blocked=app.ring.blocked(agent))
@app.route('/set/<agent_ip>', methods=['GET'])
def set_status(agent_ip):
global log
turned = request.args.get('turned') == '1'
state = request.args.get('state')
stopped = request.args.get('stopped') == '1'
# logging
sss = '\n\n[Request] {} - ip: {}, turned: {}, state: {}, stopped: {}\n'.format(time.time(), agent_ip, turned, state, stopped)
log.write(sss)
log.write('[Status pre]\n')
log.write(str(app.ring.dump()))
agent = app.agents[agent_ip]
agent.state = state
agent.cw = agent.cw if not turned else not agent.cw
blocked = app.ring.blocked(agent)
if not blocked and not stopped:
# advance to the next node if not blocked
node = app.ring.get_node(agent.node)
next_node = app.ring.next(agent)
agent.node = next_node.label
node.agents.remove(agent_ip)
next_node.add_agent(agent_ip)
log.write('\n[Status post]\n')
log.write(str(app.ring.dump()))
return jsonify(blocked=blocked)
@app.route('/')
def index():
return render_template('base.html', started=app.started)
def main():
app.run(host='0.0.0.0', debug=config.debug)
if __name__ == '__main__':
main()
|
To help us most effectively investigate your complaint, please provide all the information requested on the OCCC Complaint Form.
The Office of Consumer Credit Commissioner (OCCC) licenses and regulates non-depository lenders providing consumer loans to Texas residents. We also register creditors who sell goods or services on credit terms.
This office answers questions and assists Texas consumers in resolving issues against regulated companies in Texas concerning pawn transactions, secondary mortgage and home equity loans, motor vehicle sales financing, and property tax loans. We also oversee businesses that offer payday and auto title loans, debt management and debt settlement providers, crafted precious metal dealers, and registered creditors. We protect consumers and assist creditors in resolving complaints and disputes. We establish the facts and apply the appropriate statutes or, when warranted, mediate solutions.
State law prohibits our office or other State of Texas agencies from giving individual citizens legal advice or opinions or acting as their private attorney. If you seek to recover monetary damages, you should consult a private attorney to inform you of your rights and possible remedies.
Your complaint will be included in our Complaint Database for future reference and subject to the OCCC’s record retention policy. Generally, this information is available to both public and private entities under the Texas Public Information Act.
If you had general question you can contact our Consumer Assistance helpline and speak with one of our Investigators 1-800-538-1579.
You may also download a copy of our complaint form.
For advice on providing the information we need, please see the tips on our Complaint Resolution page.
To find out whether the OCCC has on file any complaints about a particular lender, please send a request in writing to the Public Information Officer via email at [email protected] or fax to 512.936.7610. You can also use our mailing address at 2601 North Lamar Boulevard, Austin TX 78705. Requests for information are answered according to the Texas Public Information Act, and generally have a turnaround time of 10 business days. |
# Generated by Django 2.1.7 on 2019-02-18 20:43
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.contrib.taggit
import modelcluster.fields
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.images.blocks
class Migration(migrations.Migration):
initial = True
dependencies = [
('wagtaildocs', '0010_document_file_hash'),
('wagtailcore', '0041_group_collection_permissions_verbose_name_plural'),
('taggit', '0002_auto_20150616_2121'),
('wagtailimages', '0001_squashed_0021'),
]
operations = [
migrations.CreateModel(
name='BlogIndexPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('intro', wagtail.core.fields.RichTextField(blank=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='BlogIndexPageRelatedLink',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('link_external', models.URLField(blank=True, verbose_name='External link')),
('title', models.CharField(help_text='Link title', max_length=255)),
('link_document', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtaildocs.Document')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='BlogPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('intro', wagtail.core.fields.RichTextField()),
('body', wagtail.core.fields.StreamField([('heading', wagtail.core.blocks.CharBlock(classname='full title')), ('paragraph', wagtail.core.blocks.RichTextBlock()), ('image', wagtail.images.blocks.ImageChooserBlock())])),
('date', models.DateField(verbose_name='Post date')),
('feed_image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='BlogPageCarouselItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('link_external', models.URLField(blank=True, verbose_name='External link')),
('embed_url', models.URLField(blank=True, verbose_name='Embed URL')),
('caption', wagtail.core.fields.RichTextField(blank=True)),
('image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
('link_document', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtaildocs.Document')),
('link_page', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailcore.Page')),
('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='carousel_items', to='blog.BlogPage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='BlogPageRelatedLink',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('link_external', models.URLField(blank=True, verbose_name='External link')),
('title', models.CharField(help_text='Link title', max_length=255)),
('link_document', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtaildocs.Document')),
('link_page', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailcore.Page')),
('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='related_links', to='blog.BlogPage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='BlogPageTag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content_object', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='tagged_items', to='blog.BlogPage')),
('tag', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='blog_blogpagetag_items', to='taggit.Tag')),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='blogpage',
name='tags',
field=modelcluster.contrib.taggit.ClusterTaggableManager(blank=True, help_text='A comma-separated list of tags.', through='blog.BlogPageTag', to='taggit.Tag', verbose_name='Tags'),
),
migrations.AddField(
model_name='blogindexpagerelatedlink',
name='link_page',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailcore.Page'),
),
migrations.AddField(
model_name='blogindexpagerelatedlink',
name='page',
field=modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='related_links', to='blog.BlogIndexPage'),
),
]
|
Description: NEW CONSTRUCTION AT CEDAR EDGE HOMES AT WOODLAND POND! DETACHED CONDOS, ENERGY EFFICIENT, SINGLE FAMILY HOMES OFFERING THE CONVENIENCE OF A CONDOMINIUM LIFESTYLE...NO MORE YARD WORK OR SNOW TO SHOVEL! STILL TIME TO CHOOSE YOUR FINISHES AND CUSTOMIZE. LISTED PRICE INCLUDES UNFINISHED WALKOUT LOWER LEVEL. ALSO INCLUDED IS A NAT.GAS FIREPLACE, CENT A/C, AND HARDWOOD FLOORING THROUGHOUT THE FIRST FLOOR, ALONG WITH GRANITE KITCHEN COUNTERS AND TILED BATHS AND 9 FT CEILINGS AND A 2-CAR ATTCHED GARAGE. UNBELIEVABLE! HOW ABOUT AN ULTRA CLUBHOUSE AND I/G SEASONAL POOL, THEATER, AND EXERCISE ROOM AND... TENNIS ANYONE?- ALL AMENITIES JUST A SHORT WALK FROM HOME. THIS 3 BEDRM, 2.5 BA HOME IS WAITING FOR YOU! DON'T MISS OUT! |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.