repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
icce-p/bbgc-ws | BBGCNetCDF.py | 1 | 5082 | """ Class to solve the creation, serialisation etc. of NetCDF files used
in the BioVel Biome-BGC project. The module is used from the
:mod:`ResultEvaluator`.
"""
# Copyright (c) 2014 MTA Centre for Ecological Research
# Distributed under the GNU GPL v3. For full terms see the file LICENSE.
from Scientific.IO import NetCDF
class BBGCNetCDF:
""" Container of the result data of a MonteCarlo Biome-BGC
experiment
"""
#-----------------------------------------------------------------------
def __init__(self, file_name, repeat_num):
"""
BBGC NetCDF output.
:param file_name: Name of the netcdf file.
:type file_name: str.
"""
#self.project_dir = project_dir
#self.project_name = project_name
self.netcdf = NetCDF.NetCDFFile(file_name, 'w')
#print('RepeatNum: '+str(repeat_num))
self.netcdf.createDimension('repeatNum', repeat_num)
#-----------------------------------------------------------------------
def insert_rand_input_params(self, param_names, param_values):
"""
Insert the values into a matrix and the names of randomised
input variables into a 2D character array, where width
of the array is the length of the longest name.
:param param_names: List of the randomised input parameter names.
:type param_names: List of strings.
:param param_values: Matrix of the input parameters * repeat num.
:type param_values: List of float lists.
"""
# parameter_names matrix
max_name_len = max(map(len, param_names))
name_list = []
for name in param_names:
name_list.append(list(name.encode('ascii', 'ignore')) + ([' '] * (max_name_len - len(name)) ))
#print(name_list)
self.netcdf.createDimension('inputParamNum', len(param_names))
self.netcdf.createDimension('inputParamMaxLen', max_name_len)
self.netcdf.createVariable('inputParamNames','c',('inputParamNum','inputParamMaxLen'))
tmp_var = self.netcdf.variables['inputParamNames']
for i in range(0,len(param_names)):
for j in range(0,max_name_len):
#print(name_list[i][j])
tmp_var[i,j] = name_list[i][j]
#tmp_var.assignValue(name_list)
# randomised input parameter matrix
self.netcdf.createVariable('inputParams', 'f', ('repeatNum','inputParamNum'))
tmp_var = self.netcdf.variables['inputParams']
tmp_var.assignValue(param_values)
#-----------------------------------------------------------------------
def insert_outputs(self, annual_variables, daily_variables,
annout=None, monavgout=None, annavgout=None, dayout=None):
""" Insert the output variables into the NetCDF file
:param annual_variables: Ids of annual output variables.
:type daily_variables: List of ints.
:param daily_variables: Ids of daily output variables.
:type daily_variables: List of ints.
:param annout: Values of annual output. Repeat num x var num. x years.
:type annout: 3D float List.
"""
year_num_dim = False
ann_var_dim_name = 'annualVarNum'
ann_var_ids_name = 'annualVarIds'
self.netcdf.createDimension('annualVarNum',len(annual_variables))
self.netcdf.createVariable('annualVarIds', 'i', ('annualVarNum',))
self.netcdf.variables['annualVarIds'].assignValue(annual_variables)
day_var_dim_name = 'dailyVarNum'
day_var_ids_name = 'dailyVarIds'
self.netcdf.createDimension(day_var_dim_name,len(daily_variables))
self.netcdf.createVariable(day_var_ids_name, 'i', (day_var_dim_name,))
self.netcdf.variables[day_var_ids_name].assignValue(daily_variables)
if annout:
self.netcdf.createDimension('yearNum',len(annout[0]))
year_num_dim = True
self.netcdf.createVariable('annout', 'f', ('repeatNum' ,'yearNum' ,ann_var_dim_name))
self.netcdf.variables['annout'].assignValue(annout)
if monavgout:
self.netcdf.createDimension('monthNum',len(monavgout[0]))
self.netcdf.createVariable('monavgout', 'f', ('repeatNum','monthNum' ,day_var_dim_name))
self.netcdf.variables['monavgout'].assignValue(monavgout)
if annavgout:
if not year_num_dim:
self.netcdf.createDimension('yearNum',len(annavgout[0]))
self.netcdf.createVariable('annavgout', 'f', ('repeatNum','yearNum' ,day_var_dim_name))
self.netcdf.variables['annavgout'].assignValue(annavgout)
if dayout:
self.netcdf.createDimension('dayNum',len(dayout[0]))
self.netcdf.createVariable('dayout', 'f', ('repeatNum','dayNum' ,day_var_dim_name))
self.netcdf.variables['dayout'].assignValue(dayout)
#-----------------------------------------------------------------------
def close(self):
self.netcdf.close()
| gpl-3.0 | 2,878,319,306,538,595,000 | 42.067797 | 106 | 0.591696 | false | 3.79537 | false | false | false |
agabrown/PyGaia | pygaia/errors/spectroscopic.py | 1 | 1717 | __all__ = ['vrad_error_sky_avg']
import numpy as np
_vradErrorACoeff = {'B0V': 0.90, 'B5V': 0.90, 'A0V': 1.0, 'A5V': 1.15, 'F0V': 1.15, 'G0V': 1.15, 'G5V': 1.15,
'K0V': 1.15, 'K1IIIMP': 1.15, 'K4V': 1.15, 'K1III': 1.15}
_vradErrorBCoeff = {'B0V': 50.00, 'B5V': 26.00, 'A0V': 5.50, 'A5V': 4.00, 'F0V': 1.50, 'G0V': 0.70, 'G5V': 0.60,
'K0V': 0.50, 'K1IIIMP': 0.39, 'K4V': 0.29, 'K1III': 0.21}
_vradCalibrationFloor = 0.5
_vradMagnitudeZeroPoint = 12.7
_nominal_mission_length = 5.0
def vrad_error_sky_avg(vmag, spt, extension=0.0):
"""
Calculate radial velocity error from V and the spectral type. The value of the error is an average over
the sky.
Parameters
----------
vmag : Value(s) of V-band magnitude.
spt : String or array of strings representing the spectral type of the star.
Keywords
--------
extension : Add this amount of years to the mission lifetime and scale the errors accordingly. Value can be
negative for shorter mission spans (early data releases).
Returns
-------
The radial velocity error in km/s.
"""
errscaling = 1.0 / np.sqrt((_nominal_mission_length + extension) / _nominal_mission_length)
if np.isscalar(spt):
return _vradCalibrationFloor + _vradErrorBCoeff[spt] * np.exp(
_vradErrorACoeff[spt] * (vmag - _vradMagnitudeZeroPoint)) * errscaling
else:
uncertainties = np.zeros_like(vmag)
for i, v, s in zip(range(vmag.size), vmag, spt):
uncertainties[i] = _vradCalibrationFloor + _vradErrorBCoeff[s] * np.exp(
_vradErrorACoeff[s] * (v - _vradMagnitudeZeroPoint)) * errscaling
return uncertainties
| lgpl-3.0 | 1,875,366,522,975,445,200 | 37.155556 | 112 | 0.609785 | false | 2.764895 | false | false | false |
useEvil/flickr-photo-sync | flickrphotosync/photosync/management/commands/download.py | 1 | 3263 | import os, errno, sys, pytz, urllib
import datetime as date
from PIL import Image
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from django.conf import settings
from photosync.models import Photo, PhotoSet, Collection
from photosync.flickr import Flickr
from photosync.helpers import *
class Command(BaseCommand):
args = '<photoset photoset ...>'
help = 'Downloads photos from a photoset on Flickr'
flickr = Flickr()
user = User.objects.get(pk=1)
option_list = BaseCommand.option_list + (
make_option('--all', action='store_true', dest='all', default=False, help='Retrieve all photosets'),
make_option('--dry', action='store_true', dest='dry', default=False, help='Only do a dry run'),
make_option('--backup', action='store_true', dest='backup', default=False, help='Set backup flag to True'),
make_option('--directory', action='store', dest='directory', default=False, help='Match this directory'),
)
def handle(self, *args, **options):
set_options(self, options, ['all', 'dry', 'backup', 'directory'])
if options.get('all'):
photosets = PhotoSet.objects.all()
for photoset in photosets:
self.get_photoset(photoset)
self.stdout.write('Successfully Downloaded Photos in PhotoSet "{0}"'.format(photoset))
else:
for photoset in args:
try:
set = PhotoSet.objects.get(slug=photoset)
self.get_photoset(set)
self.stdout.write('Successfully Downloaded Photos in PhotoSet "{0}"'.format(photoset))
except PhotoSet.DoesNotExist:
raise CommandError('PhotoSet "{0}" does not exist'.format(photoset))
def get_photoset(self, photoset):
self.stdout.write('==== Processing PhotoSet [{0}][{1}]'.format(photoset.title, photoset.slug))
set = self.flickr.get_photoset(photoset.slug)
if photoset.total < set.attrib['photos'] or self.backup:
download_path = settings.PHOTO_DOWNLOAD_DIR.format(self.user.username)
download_dir = os.path.join(download_path, photoset.title)
self.make_directory(download_dir)
for photo in photoset.photos.all():
self.stdout.write('==== Downloading Photo [{0}]'.format(photo.file_name))
if not self.dry and not os.path.isfile(photo.file_name):
size = self.flickr.get_photo_size(photo.slug)
photo_path = os.path.join(download_dir, photo.file_name)
print '==== photo_path [{0}]'.format(photo_path)
urllib.urlretrieve(size.get('source'), photo_path)
def make_directory(self, path):
try:
os.makedirs(path)
self.stdout.write('==== Creating Directory [{0}]'.format(path))
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
self.stdout.write('==== Directory already exists [{0}]'.format(path))
pass
else:
raise CommandError('Processing Error "{0}"'.format(exc))
| mit | -2,673,812,735,853,304,000 | 45.614286 | 115 | 0.614465 | false | 3.969586 | false | false | false |
vikhyat/dask | dask/array/tests/test_wrap.py | 7 | 1577 | import pytest
pytest.importorskip('numpy')
from dask.array.wrap import ones
import dask.array as da
import numpy as np
import dask
def test_ones():
a = ones((10, 10), dtype='i4', chunks=(4, 4))
x = np.array(a)
assert (x == np.ones((10, 10), 'i4')).all()
def test_size_as_list():
a = ones([10, 10], dtype='i4', chunks=(4, 4))
x = np.array(a)
assert (x == np.ones((10, 10), dtype='i4')).all()
def test_singleton_size():
a = ones(10, dtype='i4', chunks=(4,))
x = np.array(a)
assert (x == np.ones(10, dtype='i4')).all()
def test_kwargs():
a = ones(10, dtype='i4', chunks=(4,))
x = np.array(a)
assert (x == np.ones(10, dtype='i4')).all()
def test_full():
a = da.full((3, 3), 100, chunks=(2, 2), dtype='i8')
assert (a.compute() == 100).all()
assert a._dtype == a.compute(get=dask.get).dtype == 'i8'
def test_can_make_really_big_array_of_ones():
a = ones((1000000, 1000000), chunks=(100000, 100000))
a = ones(shape=(1000000, 1000000), chunks=(100000, 100000))
def test_wrap_consistent_names():
assert sorted(ones(10, dtype='i4', chunks=(4,)).dask) ==\
sorted(ones(10, dtype='i4', chunks=(4,)).dask)
assert sorted(ones(10, dtype='i4', chunks=(4,)).dask) !=\
sorted(ones(10, chunks=(4,)).dask)
assert sorted(da.full((3, 3), 100, chunks=(2, 2), dtype='f8').dask) ==\
sorted(da.full((3, 3), 100, chunks=(2, 2), dtype='f8').dask)
assert sorted(da.full((3, 3), 100, chunks=(2, 2), dtype='f8').dask) !=\
sorted(da.full((3, 3), 100, chunks=(2, 2)).dask)
| bsd-3-clause | 5,999,699,896,511,672,000 | 31.854167 | 75 | 0.570704 | false | 2.742609 | true | false | false |
vxvinh1511/djangoproject.com | fundraising/tests.py | 1 | 18822 | import json
import os
from datetime import date
from functools import partial
from unittest.mock import patch
import stripe
from django.conf import settings
from django.core import mail
from django.core.urlresolvers import reverse
from django.test import TestCase
from django_hosts.resolvers import reverse as django_hosts_reverse
from PIL import Image
from .exceptions import DonationError
from .forms import PaymentForm
from .models import Campaign, DjangoHero, Donation, Payment
from .templatetags.fundraising_extras import donation_form_with_heart
def _fake_random(*results):
"""
Return a callable that generates the given results when called.
Useful for mocking random.random().
Example:
>>> r = _fake_random(1, 2, 3)
>>> r()
1
>>> r()
2
>>> r()
3
"""
return partial(next, iter(results))
class TestIndex(TestCase):
@classmethod
def setUpTestData(cls):
Campaign.objects.create(name='test', goal=200, slug='test', is_active=True, is_public=True)
def test_redirect(self):
response = self.client.get(reverse('fundraising:index'))
self.assertEqual(response.status_code, 302)
def test_index(self):
Campaign.objects.create(name='test2', goal=200, slug='test2', is_active=True, is_public=True)
response = self.client.get(reverse('fundraising:index'))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['campaigns']), 2)
class TestCampaign(TestCase):
def setUp(self):
self.campaign = Campaign.objects.create(name='test', goal=200, slug='test', is_active=True, is_public=True)
self.campaign_url = reverse('fundraising:campaign', args=[self.campaign.slug])
def test_donors_count(self):
donor = DjangoHero.objects.create()
Donation.objects.create(campaign=self.campaign, donor=donor)
response = donation_form_with_heart({'user': None}, self.campaign)
self.assertEqual(response['total_donors'], 1)
def test_anonymous_donor(self):
hero = DjangoHero.objects.create(
is_visible=True, approved=True, hero_type='individual')
Donation.objects.create(donor=hero, subscription_amount='5', campaign=self.campaign)
response = self.client.get(self.campaign_url)
self.assertContains(response, 'Anonymous Hero')
def test_anonymous_donor_with_logo(self):
hero = DjangoHero.objects.create(
is_visible=True, approved=True,
hero_type='individual', logo='yes') # We don't need an actual image
Donation.objects.create(donor=hero, campaign=self.campaign)
response = self.client.get(self.campaign_url)
self.assertContains(response, 'Anonymous Hero')
def test_that_campaign_is_always_visible_input(self):
response = self.client.get(self.campaign_url)
self.assertContains(response, 'name="campaign"')
def test_submitting_donation_form_missing_token(self):
url = reverse('fundraising:donate')
response = self.client.post(url, {'amount': 100})
content = json.loads(response.content.decode())
self.assertEqual(200, response.status_code)
self.assertFalse(content['success'])
def test_submitting_donation_form_invalid_amount(self):
url = reverse('fundraising:donate')
response = self.client.post(url, {
'amount': 'superbad',
'stripe_token': 'test',
'interval': 'onetime',
})
content = json.loads(response.content.decode())
self.assertEqual(200, response.status_code)
self.assertFalse(content['success'])
@patch('stripe.Customer.create')
@patch('stripe.Charge.create')
def test_submitting_donation_form(self, charge_create, customer_create):
charge_create.return_value.id = 'XYZ'
customer_create.return_value.id = '1234'
self.client.post(reverse('fundraising:donate'), {
'amount': 100,
'stripe_token': 'test',
'receipt_email': '[email protected]',
'interval': 'onetime',
})
donations = Donation.objects.all()
self.assertEqual(donations.count(), 1)
self.assertEqual(donations[0].subscription_amount, None)
self.assertEqual(donations[0].total_payments(), 100)
self.assertEqual(donations[0].receipt_email, '[email protected]')
self.assertEqual(donations[0].stripe_subscription_id, '')
@patch('stripe.Customer.create')
@patch('stripe.Charge.create')
def test_submitting_donation_form_recurring(self, charge_create, customer_create):
customer_create.return_value.id = '1234'
customer_create.return_value.subscriptions.create.return_value.id = 'XYZ'
self.client.post(reverse('fundraising:donate'), {
'amount': 100,
'stripe_token': 'test',
'receipt_email': '[email protected]',
'interval': 'monthly',
})
donations = Donation.objects.all()
self.assertEqual(donations.count(), 1)
self.assertEqual(donations[0].subscription_amount, 100)
self.assertEqual(donations[0].total_payments(), 100)
self.assertEqual(donations[0].receipt_email, '[email protected]')
self.assertEqual(donations[0].payment_set.first().stripe_charge_id, '')
@patch('stripe.Customer.create')
@patch('stripe.Charge.create')
def test_submitting_donation_form_with_campaign(self, charge_create, customer_create):
charge_create.return_value.id = 'XYZ'
customer_create.return_value.id = '1234'
self.client.post(reverse('fundraising:donate'), {
'amount': 100,
'campaign': self.campaign.id,
'stripe_token': 'test',
'interval': 'onetime',
'receipt_email': '[email protected]',
})
donations = Donation.objects.all()
self.assertEqual(donations.count(), 1)
self.assertEqual(donations[0].total_payments(), 100)
self.assertEqual(donations[0].campaign, self.campaign)
@patch('stripe.Customer.create')
@patch('stripe.Charge.create')
def test_submitting_donation_form_error_handling(self, charge_create, customer_create):
data = {
'amount': 100,
'stripe_token': 'xxxx',
'interval': 'onetime',
'receipt_email': '[email protected]',
}
form = PaymentForm(data=data)
self.assertTrue(form.is_valid())
# some errors are shows as user facting DonationErrors to the user
# some are bubbling up to raise a 500 to trigger Sentry reports
errors = [
[stripe.error.CardError, DonationError],
[stripe.error.InvalidRequestError, DonationError],
[stripe.error.APIConnectionError, DonationError],
[stripe.error.AuthenticationError, None],
[stripe.error.StripeError, None],
[ValueError, None],
]
for backend_exception, user_exception in errors:
customer_create.side_effect = backend_exception('message', 'param', 'code')
if user_exception is None:
self.assertRaises(backend_exception, form.make_donation)
else:
response = self.client.post(reverse('fundraising:donate'), data)
content = json.loads(response.content.decode())
self.assertFalse(content['success'])
@patch('fundraising.forms.PaymentForm.make_donation')
def test_submitting_donation_form_valid(self, make_donation):
amount = 100
donation = Donation.objects.create(
stripe_customer_id='xxxx',
)
Payment.objects.create(
donation=donation,
amount=amount,
stripe_charge_id='xxxx',
)
make_donation.return_value = donation
response = self.client.post(reverse('fundraising:donate'), {
'amount': amount,
'stripe_token': 'xxxx',
'interval': 'onetime',
'receipt_email': '[email protected]',
})
content = json.loads(response.content.decode())
self.assertEquals(200, response.status_code)
self.assertTrue(content['success'])
self.assertEqual(content['redirect'], donation.get_absolute_url())
@patch('stripe.Customer.retrieve')
def test_cancel_donation(self, retrieve_customer):
donor = DjangoHero.objects.create()
donation = Donation.objects.create(
campaign=self.campaign, donor=donor,
stripe_subscription_id='12345', stripe_customer_id='54321',
)
url = reverse(
'fundraising:cancel-donation',
kwargs={'hero': donor.id, 'donation': donation.id}
)
response = self.client.get(url)
self.assertRedirects(response, reverse('fundraising:manage-donations',
kwargs={'hero': donor.id}))
retrieve_customer.assert_called_once_with('54321')
donation = Donation.objects.get(id=donation.id)
self.assertEqual('', donation.stripe_subscription_id)
@patch('stripe.Customer.retrieve')
def test_cancel_already_cancelled_donation(self, retrieve_customer):
donor = DjangoHero.objects.create()
donation = Donation.objects.create(
campaign=self.campaign, donor=donor, stripe_subscription_id=''
)
url = reverse(
'fundraising:cancel-donation',
kwargs={'hero': donor.id, 'donation': donation.id}
)
response = self.client.get(url)
self.assertEquals(404, response.status_code)
self.assertFalse(retrieve_customer.called)
class TestDjangoHero(TestCase):
def setUp(self):
kwargs = {
'approved': True,
'is_visible': True,
}
self.campaign = Campaign.objects.create(name='test', goal=200, slug='test', is_active=True, is_public=True)
self.h1 = DjangoHero.objects.create(**kwargs)
d1 = Donation.objects.create(donor=self.h1, campaign=self.campaign)
Payment.objects.create(donation=d1, amount='5')
self.h2 = DjangoHero.objects.create(**kwargs)
d2 = Donation.objects.create(donor=self.h2, campaign=self.campaign)
Payment.objects.create(donation=d2, amount='15')
self.h3 = DjangoHero.objects.create(**kwargs)
d3 = Donation.objects.create(donor=self.h3, campaign=self.campaign)
Payment.objects.create(donation=d3, amount='10')
self.today = date.today()
def test_thumbnail(self):
try:
os.makedirs(os.path.join(settings.MEDIA_ROOT, 'fundraising/logos/'))
except OSError: # directory may already exist
pass
image_path = os.path.join(settings.MEDIA_ROOT, 'fundraising/logos/test_logo.jpg')
image = Image.new('L', (500, 500))
image.save(image_path)
self.h1.logo = image_path
self.h1.save()
thumbnail = self.h1.thumbnail
self.assertEqual(thumbnail.x, 170)
self.assertEqual(thumbnail.y, 170)
os.remove(image_path)
self.assertTrue(
os.path.exists(
thumbnail.url.replace(settings.MEDIA_URL, '{}/'.format(settings.MEDIA_ROOT))
)
)
def test_thumbnail_no_logo(self):
self.assertIsNone(self.h2.thumbnail)
def test_name_with_fallback(self):
hero = DjangoHero()
self.assertEqual(hero.name_with_fallback, 'Anonymous Hero')
hero.name = 'Batistek'
self.assertEqual(hero.name_with_fallback, 'Batistek')
class TestPaymentForm(TestCase):
@patch('stripe.Customer.create')
@patch('stripe.Charge.create')
def test_make_donation(self, charge_create, customer_create):
customer_create.return_value.id = 'xxxx'
charge_create.return_value.id = 'xxxx'
form = PaymentForm(data={
'amount': 100,
'campaign': None,
'stripe_token': 'xxxx',
'interval': 'onetime',
'receipt_email': '[email protected]',
})
self.assertTrue(form.is_valid())
donation = form.make_donation()
self.assertEqual(100, donation.payment_set.first().amount)
@patch('stripe.Customer.retrieve')
@patch('stripe.Charge.create')
def test_make_donation_with_existing_hero(self, charge_create, customer_retrieve):
charge_create.return_value.id = 'XYZ'
customer_retrieve.return_value.id = '12345'
hero = DjangoHero.objects.create(
email='[email protected]',
stripe_customer_id=customer_retrieve.return_value.id,
)
form = PaymentForm(data={
'amount': 100,
'campaign': None,
'stripe_token': 'xxxx',
'interval': 'onetime',
'receipt_email': '[email protected]',
})
self.assertTrue(form.is_valid())
donation = form.make_donation()
self.assertEqual(100, donation.payment_set.first().amount)
self.assertEqual(hero, donation.donor)
self.assertEqual(hero.stripe_customer_id, donation.stripe_customer_id)
@patch('stripe.Customer.create')
@patch('stripe.Charge.create')
def test_make_donation_exception(self, charge_create, customer_create):
customer_create.side_effect = ValueError("Something is wrong")
form = PaymentForm(data={
'amount': 100,
'campaign': None,
'stripe_token': 'xxxx',
'interval': 'onetime',
'receipt_email': '[email protected]',
})
self.assertTrue(form.is_valid())
with self.assertRaises(ValueError):
donation = form.make_donation()
self.assertIsNone(donation)
class TestThankYou(TestCase):
def setUp(self):
self.donation = Donation.objects.create(
stripe_customer_id='cu_123',
receipt_email='[email protected]',
)
Payment.objects.create(
donation=self.donation,
amount='20',
)
self.url = reverse('fundraising:thank-you', args=[self.donation.pk])
self.hero_form_data = {
'hero_type': DjangoHero.HERO_TYPE_CHOICES[1][0],
'name': 'Django Inc',
}
def add_donor(self, **kwargs):
hero = DjangoHero.objects.create(**kwargs)
self.donation.donor = hero
self.donation.save()
return hero
def test_template_without_donor(self):
response = self.client.get(self.url)
self.assertTemplateUsed(response, 'fundraising/thank-you.html')
self.assertFalse(response.context['form'].instance.pk)
self.assertEqual(response.context['donation'], self.donation)
def test_template_with_donor(self):
self.add_donor()
response = self.client.get(self.url)
self.assertEqual(response.context['form'].instance, self.donation.donor)
@patch('stripe.Customer.retrieve')
def test_update_hero(self, retrieve_customer):
hero = self.add_donor(
email='[email protected]',
stripe_customer_id='1234',
name='Under Dog'
)
response = self.client.post(self.url, self.hero_form_data)
self.assertRedirects(response, reverse('fundraising:index'))
hero = DjangoHero.objects.get(pk=hero.id)
self.assertEqual(hero.name, self.hero_form_data['name'])
retrieve_customer.assert_called_once_with(hero.stripe_customer_id)
customer = retrieve_customer.return_value
self.assertEqual(customer.description, hero.name)
self.assertEqual(customer.email, hero.email)
customer.save.assert_called_once_with()
def test_create_hero_for_donation_with_campaign(self):
campaign = Campaign.objects.create(
name='test',
goal=200,
slug='test',
is_active=True,
is_public=True,
)
self.donation.campaign = campaign
self.donation.save()
with patch('stripe.Customer.retrieve'):
response = self.client.post(self.url, self.hero_form_data)
# Redirects to the campaign's page instead
expected_url = reverse('fundraising:campaign', args=[campaign.slug])
self.assertRedirects(response, expected_url)
class TestWebhooks(TestCase):
def setUp(self):
self.hero = DjangoHero.objects.create(email='[email protected]')
self.donation = Donation.objects.create(
donor=self.hero,
interval='monthly',
stripe_customer_id='cus_3MXPY5pvYMWTBf',
stripe_subscription_id='sub_3MXPaZGXvVZSrS',
)
def stripe_data(self, filename):
file_path = settings.BASE_DIR.joinpath(
'fundraising/test_data/{}.json'.format(filename))
with file_path.open() as f:
data = json.load(f)
return stripe.resource.convert_to_stripe_object(data, stripe.api_key)
def post_event(self):
return self.client.post(
reverse('fundraising:receive-webhook'),
data='{"id": "evt_12345"}',
content_type='application/json',
)
@patch('stripe.Event.retrieve')
def test_record_payment(self, event):
event.return_value = self.stripe_data('invoice_succeeded')
response = self.post_event()
self.assertEqual(response.status_code, 201)
self.assertEqual(self.donation.payment_set.count(), 1)
payment = self.donation.payment_set.first()
self.assertEqual(payment.amount, 10)
@patch('stripe.Event.retrieve')
def test_subscription_cancelled(self, event):
event.return_value = self.stripe_data('subscription_cancelled')
self.post_event()
donation = Donation.objects.get(id=self.donation.id)
self.assertEqual(donation.stripe_subscription_id, '')
self.assertEqual(len(mail.outbox), 1)
expected_url = django_hosts_reverse('fundraising:donate')
self.assertTrue(expected_url in mail.outbox[0].body)
@patch('stripe.Event.retrieve')
def test_payment_failed(self, event):
event.return_value = self.stripe_data('payment_failed')
self.post_event()
self.assertEqual(len(mail.outbox), 1)
expected_url = django_hosts_reverse('fundraising:manage-donations', kwargs={'hero': self.hero.id})
self.assertTrue(expected_url in mail.outbox[0].body)
@patch('stripe.Event.retrieve')
def test_no_such_event(self, event):
event.side_effect = stripe.error.InvalidRequestError(
message='No such event: evt_12345',
param='id'
)
response = self.post_event()
self.assertTrue(response.status_code, 422)
| bsd-3-clause | 2,764,254,014,664,502,000 | 38.294363 | 115 | 0.626873 | false | 3.802424 | true | false | false |
pferreir/indico-backup | indico/MaKaC/webinterface/rh/fileAccess.py | 1 | 5634 | # -*- coding: utf-8 -*-
##
##
## This file is part of Indico.
## Copyright (C) 2002 - 2014 European Organization for Nuclear Research (CERN).
##
## Indico is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or (at your option) any later version.
##
## Indico is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Indico;if not, see <http://www.gnu.org/licenses/>.
from flask import session
import os
from copy import copy
import MaKaC.webinterface.urlHandlers as urlHandlers
from MaKaC.webinterface.rh.conferenceBase import RHFileBase, RHLinkBase
from MaKaC.webinterface.rh.base import RH, RHDisplayBaseProtected
from MaKaC.webinterface.rh.conferenceModif import RHConferenceModifBase
from MaKaC.webinterface.pages import files
from MaKaC.errors import NotFoundError, AccessError
from MaKaC.registration import Registrant
from MaKaC.conference import Reviewing, Link
from MaKaC.webinterface.rh.contribMod import RCContributionPaperReviewingStaff
from MaKaC.i18n import _
from indico.web.flask.util import send_file
from indico.modules import ModuleHolder
class RHFileAccess(RHFileBase, RHDisplayBaseProtected):
_uh = urlHandlers.UHFileAccess
def _checkParams( self, params ):
try:
RHFileBase._checkParams( self, params )
except:
raise NotFoundError("The file you try to access does not exist.")
def _checkProtection( self ):
if isinstance(self._file.getOwner(), Reviewing):
selfcopy = copy(self)
selfcopy._target = self._file.getOwner().getContribution()
if not (RCContributionPaperReviewingStaff.hasRights(selfcopy) or \
selfcopy._target.canUserSubmit(self.getAW().getUser()) or \
self._target.canModify( self.getAW() )):
raise AccessError()
elif isinstance(self._file.getOwner(), Registrant) and \
not self._file.getOwner().canUserModify(self.getAW().getUser()):
raise AccessError(_("The access to this resource is forbidden"))
else:
RHDisplayBaseProtected._checkProtection( self )
def _process( self ):
self._notify('materialDownloaded', self._file)
if isinstance(self._file, Link):
self._redirect(self._file.getURL())
elif self._file.getId() == "minutes":
p = files.WPMinutesDisplay(self, self._file )
return p.display()
else:
return send_file(self._file.getFileName(), self._file.getFilePath(), self._file.getFileType(),
self._file.getCreationDate())
class RHFileAccessStoreAccessKey(RHFileBase):
_uh = urlHandlers.UHFileEnterAccessKey
def _checkParams(self, params):
RHFileBase._checkParams(self, params)
self._accesskey = params.get("accessKey", "").strip()
self._doNotSanitizeFields.append("accessKey")
def _checkProtection(self):
pass
def _process(self):
access_keys = session.setdefault('accessKeys', {})
access_keys[self._target.getOwner().getUniqueId()] = self._accesskey
session.modified = True
self._redirect(urlHandlers.UHFileAccess.getURL(self._target))
class RHVideoWmvAccess( RHLinkBase, RHDisplayBaseProtected ):
_uh = urlHandlers.UHVideoWmvAccess
def _checkParams( self, params ):
try:
RHLinkBase._checkParams( self, params )
except:
raise NotFoundError("The file you try to access does not exist.")
def _checkProtection( self ):
"""targets for this RH are exclusively URLs so no protection apply"""
return
def _process( self ):
p = files.WPVideoWmv(self, self._link )
return p.display()
class RHVideoFlashAccess( RHLinkBase, RHDisplayBaseProtected ):
_uh = urlHandlers.UHVideoFlashAccess
def _checkParams( self, params ):
try:
RHLinkBase._checkParams( self, params )
except:
raise NotFoundError("The file you try to access does not exist.")
def _checkProtection( self ):
"""targets for this RH are exclusively URLs so no protection apply"""
return
def _process( self ):
p = files.WPVideoFlash(self, self._link )
return p.display()
class RHOfflineEventAccess(RHConferenceModifBase):
_uh = urlHandlers.UHOfflineEventAccess
def _checkParams(self, params):
RHConferenceModifBase._checkParams(self, params)
if 'fileId' not in params:
raise NotFoundError(_("Missing 'fileId' argument."))
self._offlineEvent = ModuleHolder().getById("offlineEvents").getOfflineEventByFileId(params["confId"],
params["fileId"])
if not self._offlineEvent or not self._offlineEvent.file or \
not os.path.isfile(self._offlineEvent.file.getFilePath()):
raise NotFoundError(_("The file you try to access does not exist anymore."))
def _process(self):
f = self._offlineEvent.file
return send_file('event-%s.zip' % self._conf.getId(), f.getFilePath(), f.getFileType(),
last_modified=self._offlineEvent.creationTime, inline=False)
| gpl-3.0 | -4,598,389,109,649,951,000 | 38.398601 | 110 | 0.664714 | false | 3.987261 | false | false | false |
inflatus/Python | Networking/portscanner.py | 1 | 1792 | # portsanner for IPv4 addresses and/or hostnames
# probing addresses is invasive
# make sure you are doing the right thing
import validators
import socket
import subprocess
import sys
from datetime import datetime
# clearing the screen
subprocess.call('clear', shell=True)
def is_valid(address):
# returns the validator result, True or False.
return validators.ip_address.ipv4(remote_server) or validators.domain(remote_server)
while True: # True is always True. This loop will never end.
remote_server = input('Enter a remote host to scan: ')
if remote_server == 'exit':
sys.exit(0)
if is_valid(remote_server):
break
else:
print(
'This address was not recognized as a valid IPv4 address or hostname.'
'Please try again. Type \'exit\' to quit.'
)
remote_serverIP = socket.gethostbyname(remote_server)
# print the scanning ip
print('*' * 60)
print('Please wait, scanning remote host of well-know ports', remote_serverIP)
print('*' * 60)
# time scan started
start_time = datetime.now()
# scan all ports between 1 and 1024
try:
for port in range(1, 1025):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex((remote_serverIP, port))
if result == 0:
print('Port {}: Open'.format(port))
sock.close()
# error handling
except KeyboardInterrupt:
print('You pressed Ctrl+C')
sys.exit(1)
except socket.gaierror:
print('Hostname could not be resolved')
sys.exit(1)
except socket.error:
print('Could not connect to server')
sys.exit(1)
# time for script to finish
end_time = datetime.now()
completion_time = end_time - start_time
# print completion time
print('Scanning completed in: ', completion_time)
| mit | -9,015,522,186,552,988,000 | 24.971014 | 88 | 0.68192 | false | 3.687243 | false | false | false |
recap/pumpkin | examples/prefactor/tasks/calibcalc.py | 1 | 1395 | __author__ = 'reggie'
###START-CONF
##{
##"object_name": "calibcalc",
##"object_poi": "my-calibcalc-1234",
##"auto-load" : true,
##"parameters": [ {
## "name": "skymodel",
## "description": "",
## "required": true,
## "type": "String",
## "state" : "SKYMODEL"
## } ],
##"return": [
## {
## "name": "calibcalcing",
## "description": "a calibcalcing",
## "required": true,
## "type": "String",
## "state" : "CALIBRATED"
## }
##
## ] }
##END-CONF
from pumpkin import *
from subprocess import Popen
class calibcalc(PmkSeed.Seed):
def __init__(self, context, poi=None):
PmkSeed.Seed.__init__(self, context,poi)
pass
def run(self, pkt, name):
self.logger.info("[calibcalc] processing: " + str(name[0]))
input_folder = name[0]
skymodel = input_folder + '/selected.skymodel'
cmd = ["/usr/bin/calibrate-stand-alone",
"--numthreads",
"1",
input_folder,
"/usr/share/prefactor/parsets/calibcal.parset",
skymodel]
Popen(cmd, env={"TMPDIR":"/tmp", "HOME":input_folder, "LOFARROOT":"/usr"}).communicate()
self.logger.info("[calibcalc] done: " + str(name[0]))
self.dispatch(pkt, input_folder, "CALIBRATED")
pass
| mit | 4,136,251,798,795,798,500 | 23.910714 | 89 | 0.499642 | false | 3.184932 | false | false | false |
c3e/TanteMateLaden | TanteMateLaden/TanteMateLaden/urls.py | 1 | 2415 | """TanteMateLaden URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from rest_framework import routers
# noinspection PyUnresolvedReferences
from store import views
from django.conf.urls.static import static
from django.conf import settings
from django.conf.urls import url, include
from django.contrib import admin
from django.conf.urls.static import static
router = routers.DefaultRouter()
router.register(r'accounts', views.AccountViewSet)
router.register(r'drinks', views.DrinkViewSet)
router.register(r'items', views.ItemViewSet)
router.register(r'transactions', views.TransactionLogViewSet, 'transactionlog')
urlpatterns = [
url('^$', views.indexView, name='index'),
url('^', include('django.contrib.auth.urls')),
url(r'^signup/$', views.signup, name='signup'),
url(r'^account/$', views.accountView, name='account-index'),
url(r'^stats/$', views.statsView, name='stats'),
url(r'^admin/', admin.site.urls),
url(r'^template/', views.templateView),
url(r'^api/buy/item/(?P<item_slug>[\w-]+)/$', views.BuyItemView),
url(r'^api/buy/item/(?P<item_slug>[\w-]+)/(?P<item_amount>[0-9]+)/$', views.BuyItemView),
url(r'^api/buy/item/(?P<user_id>[0-9\w-]+)/(?P<item_slug>[\w-]+)/$', views.BuyItemView),
url(r'^api/buy/item/(?P<user_id>[0-9\w-]+)/(?P<item_slug>[\w-]+)/(?P<item_amount>[0-9]+)/$', views.BuyItemView, name='buy-item'),
url(r'^api/', include(router.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
#add debug_toolbar urls
import debug_toolbar
urlpatterns = [
url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns | mit | -705,898,773,161,818,200 | 42.142857 | 133 | 0.689027 | false | 3.331034 | false | false | false |
nmpiazza/Hermes | app/mod_check/IMAP.py | 1 | 2529 | from ..mod_check import app
import imaplib
from logging import getLogger
logger = getLogger('mod_check.IMAP')
@app.task
def check(host, port, username, password, use_ssl=False):
# initialize result to None
result = None
try:
# create the IMAP connection object
if use_ssl:
connection = imaplib.IMAP4_SSL(host=host, port=port)
else:
connection = imaplib.IMAP4(host=host, port=port)
logger.debug('connection', connection)
with connection as mail:
mail.login(user=username, password=password)
scoring_exists = False
# check if the scoring mailbox exists
mb_res, _ = mail.select('Scoring')
if mb_res == 'OK':
# if it does, mark it for later use
scoring_exists = True
else:
# if the scoring mailbox doesn't exist, select the inbox
mb_res, _ = mail.select('INBOX')
# if the result was OK (for either scoring or inbox)
if mb_res == 'OK':
# retrieve the ScoringCheck email
search_res, search_data = mail.uid('search', None, 'SUBJECT', 'ScoringCheck-')
if search_res == 'OK' and len(search_data) > 0:
# split the email UIDs and check for the
email_uids = search_data[0].split()
if len(email_uids) > 0:
latest_email_uid = email_uids[-1]
result, data = mail.uid('fetch', latest_email_uid, '(RFC822.TEXT)')
result = data[0][1].decode('utf8')
if not scoring_exists:
res, _ = mail.create('Scoring')
if res == 'OK':
res, _ = mail.copy(latest_email_uid, 'Scoring')
if res != 'OK':
logger.error('Error copying email to Scoring mailbox')
else:
logger.error('Error creating Scoring mailbox')
else:
logger.error('No messages fetched')
else:
logger.error('Error getting all messages')
else:
logger.error('Scoring mailbox does not exist')
except (imaplib.IMAP4_SSL.error, imaplib.IMAP4.error) as e:
logger.exception(e)
result = False
return result
| mit | -3,881,200,669,202,220,500 | 37.318182 | 94 | 0.497825 | false | 4.556757 | false | false | false |
ajdiaz/mole | mole/action/timespan.py | 1 | 2018 | #! /usr/bin/env python
# -*- encoding: utf-8 -*-
# vim:fenc=utf-8:
from mole.event import Event
from mole.action import Action, ActionSyntaxError
from mole.helper.timeformat import Timespan
def func_avg(field, events):
return reduce(lambda x,y:x+y, map(lambda x:float(x[field]), events)) / len(events)
def func_min(field, events):
return reduce(min, map(lambda x:float(x[field]), events))
def func_max(field, events):
return reduce(max, map(lambda x:float(x[field]), events))
class ActionTimespan(Action):
"""This action consolidate values over a time span."""
REQUIRE_PARSER = True
def __init__(self, field, span=["1h"], func=["avg"]):
"""Create a new timespan action
:param `field`: the field to use in operation (the value to be
consolidated.)
:param `span`: the span for consolidation.
:param `func`: the function to use in consolidation.
"""
self.field = field[0]
self.span = Timespan(span[0])
try:
self.func = __import__("mole.action.timespan",
globals(),
locals(),
[ "func_%s" % func[0] ])
except ImportError:
raise ActionSyntaxError("unable to import timespan module")
try:
self.func = getattr(self.func, "func_%s" % func[0])
except AttributeError:
raise ActionSyntaxError("invalud consolidation function")
def __call__(self, pipeline):
ret = []
field = self.field
for event in pipeline:
if len(ret) and (event.time - ret[0].time) > self.span.seconds:
yield Event({field: self.func(field,ret),"_time": ret[0]["_time"]})
ret = [ event ]
else:
ret.append(event)
if len(ret) and (event.time - ret[0].time) > self.span.seconds:
yield Event({field: self.func(field,ret),"_time": ret[0]["_time"]})
| gpl-3.0 | 1,233,472,038,927,200,000 | 32.633333 | 86 | 0.560951 | false | 3.800377 | false | false | false |
adamcharnock/lightbus | lightbus/path.py | 1 | 7391 | from typing import Optional, TYPE_CHECKING, Any, Generator
from lightbus.client.utilities import OnError
from lightbus.exceptions import InvalidBusPathConfiguration, InvalidParameters
from lightbus.utilities.async_tools import block
if TYPE_CHECKING:
# pylint: disable=unused-import,cyclic-import
from lightbus import BusClient, EventMessage
__all__ = ["BusPath"]
class BusPath:
"""Represents a path on the bus
This class provides a higher-level wrapper around the `BusClient` class.
This wrapper allows for a more idiomatic use of the bus. For example:
bus.auth.get_user(username='admin')
Compare this to the lower level equivalent using the `BusClient`:
bus.client.call_rpc_remote(
api_name='auth',
name='get_user',
kwargs={'username': 'admin'},
)
"""
def __init__(self, name: str, *, parent: Optional["BusPath"], client: "BusClient"):
if not parent and name:
raise InvalidBusPathConfiguration("Root client node may not have a name")
self.name = name
self.parent = parent
self.client = client
def __getattr__(self, item) -> "BusPath":
return self.__class__(name=item, parent=self, client=self.client)
def __str__(self):
return self.fully_qualified_name
def __repr__(self):
return "<BusPath {}>".format(self.fully_qualified_name)
def __dir__(self):
# Used by `lightbus shell` command
path = [node.name for node in self.ancestors(include_self=True)]
path.reverse()
api_names = [[""] + n.split(".") for n in self.client.api_registry.names()]
matches = []
apis = []
for api_name in api_names:
if api_name == path:
# Api name matches exactly
apis.append(api_name)
elif api_name[: len(path)] == path:
# Partial API match
matches.append(api_name[len(path)])
for api_name in apis:
api = self.client.api_registry.get(".".join(api_name[1:]))
matches.extend(dir(api))
return matches
# RPC
def __call__(self, *args, **kwargs):
"""Call this BusPath node as an RPC"""
return self.call(*args, **kwargs)
def call(self, *args, bus_options: dict = None, **kwargs):
"""Call this BusPath node as an RPC"
In contrast to __call__(), this method provides the ability to call
with the additional `bus_options` argument.
"""
# Use a larger value of `rpc_timeout` because call_rpc_remote() should
# handle timeout
rpc_timeout = self.client.config.api(self.api_name).rpc_timeout * 1.5
return block(self.call_async(*args, **kwargs, bus_options=bus_options), timeout=rpc_timeout)
async def call_async(self, *args, bus_options=None, **kwargs):
"""Call this BusPath node as an RPC (asynchronous)"
In contrast to __call__(), this method provides the ability to call
with the additional `bus_options` argument.
"""
if args:
raise InvalidParameters(
f"You have attempted to call the RPC {self.fully_qualified_name} using positional "
"arguments. Lightbus requires you use keyword arguments. For example, "
"instead of func(1), use func(foo=1)."
)
bus_options = bus_options or {}
return await self.client.call_rpc_remote(
api_name=self.api_name, name=self.name, kwargs=kwargs, options=bus_options
)
# Events
def listen(
self,
listener,
*,
listener_name: str,
bus_options: dict = None,
on_error: OnError = OnError.SHUTDOWN,
):
"""Listen to events for this BusPath node"""
return self.client.listen_for_event(
api_name=self.api_name,
name=self.name,
listener=listener,
listener_name=listener_name,
options=bus_options,
on_error=on_error,
)
def fire(self, *args, bus_options: dict = None, **kwargs) -> "EventMessage":
"""Fire an event for this BusPath node"""
return block(
self.fire_async(*args, **kwargs, bus_options=bus_options),
timeout=self.client.config.api(self.api_name).event_fire_timeout,
)
async def fire_async(self, *args, bus_options: dict = None, **kwargs) -> "EventMessage":
"""Fire an event for this BusPath node (asynchronous)"""
if args:
raise InvalidParameters(
f"You have attempted to fire the event {self.fully_qualified_name} using positional"
" arguments. Lightbus requires you use keyword arguments. For example, instead of"
" func(1), use func(foo=1)."
)
return await self.client.fire_event(
api_name=self.api_name, name=self.name, kwargs=kwargs, options=bus_options
)
# Utilities
def ancestors(self, include_self=False) -> Generator["BusPath", None, None]:
"""Get all ancestors of this node"""
parent = self
while parent is not None:
if parent != self or include_self:
yield parent
parent = parent.parent
@property
def api_name(self) -> str:
"""Get the API name of this node
This assumes the full path to this node is a fully qualified event/rpc name
"""
path = [node.name for node in self.ancestors(include_self=False)]
path.reverse()
return ".".join(path[1:])
@property
def fully_qualified_name(self) -> str:
"""Get the fully qualified string name of this node"""
path = [node.name for node in self.ancestors(include_self=True)]
path.reverse()
return ".".join(path[1:])
# Schema
@property
def schema(self):
"""Get the bus schema"""
if self.parent is None:
return self.client.schema
else:
# TODO: Implement getting schema of child nodes if there is demand
raise AttributeError(
"Schema only available on root node. Use bus.schema, not bus.my_api.schema"
)
@property
def parameter_schema(self):
"""Get the parameter JSON schema for the given event or RPC"""
# TODO: Test
return self.client.schema.get_event_or_rpc_schema(self.api_name, self.name)["parameters"]
@property
def response_schema(self):
"""Get the response JSON schema for the given RPC
Only RPCs have responses. Accessing this property for an event will result in a
SchemaNotFound error.
"""
return self.client.schema.get_rpc_schema(self.api_name, self.name)["response"]
def validate_parameters(self, parameters: dict):
"""Validate the parameters for an event or RPC against the schema
See Also: https://lightbus.org/reference/schema/
"""
self.client.schema.validate_parameters(self.api_name, self.name, parameters)
def validate_response(self, response: Any):
"""Validate the response for an RPC against the schema
See Also: https://lightbus.org/reference/schema/
"""
self.client.schema.validate_parameters(self.api_name, self.name, response)
| apache-2.0 | -5,512,542,352,097,787,000 | 33.863208 | 100 | 0.602084 | false | 4.156918 | false | false | false |
kibernick/pycontacts | pycontacts/managers.py | 1 | 4040 | from .models import (
EmailAddress,
Group,
PhoneNumber,
Person,
StreetAddress,
)
from .exceptions import ImproperlyConfigured
class BaseManager:
cls = None # Override this in a concrete manager
def __init__(self, book):
self.book = book
def _get_table(self):
return self.book._store[self.cls.table_name]
def filter(self, **kwargs):
"""
Filter per multiple kwargs, is not exclusive with matches.
'table' is a reserved kwarg.
:param table: table as dict.
:param attr: attribute name to compare.
:param val: attribute value to compare.
:return: result dict by object ids.
"""
table = kwargs.pop('table', None)
if not table:
table = self._get_table()
results = {}
for obj_id, obj_attrs in table.items():
for attr, qry_val in kwargs.items():
# Special case if querying per 'id'
if attr == 'id':
# If lookup per list of id's
if isinstance(qry_val, list) and obj_id in qry_val:
results[obj_id] = obj_attrs
# Exact match needed otherwise.
elif obj_id == qry_val:
results[obj_id] = obj_attrs
continue
obj_val = obj_attrs[attr]
# If 'qry_val' is a list, check for membership.
if isinstance(qry_val, list):
# We could be checking in a foreign keys column (list).
if isinstance(obj_val, list):
# Check if a list of query values,
# has match in a list of foreign keys.
if set(obj_val).intersection(set(qry_val)):
results[obj_id] = obj_attrs
# Otherwise check if the object's value is in query list.
elif obj_val in qry_val:
results[obj_id] = obj_attrs
# We are checking for a single query value.
else:
if attr == 'id' and obj_id == qry_val:
results[obj_id] = obj_attrs
elif isinstance(obj_val, list):
if qry_val in obj_val:
results[obj_id] = obj_attrs
elif obj_attrs[attr] == qry_val:
results[obj_id] = obj_attrs
return results
def convert_results(self, results):
cls_objects = []
for r_id, r_attrs in results.items():
cls_obj = self.create(**r_attrs)
cls_obj.id = r_id
cls_objects.append(cls_obj)
return cls_objects
def create(self, **kwargs):
if not self.cls:
raise ImproperlyConfigured("'cls' not overriden")
return self.cls(book=self.book, **kwargs)
class EmailAddressManager(BaseManager):
cls = EmailAddress
class PhoneNumberManager(BaseManager):
cls = PhoneNumber
class StreetAddressManager(BaseManager):
cls = StreetAddress
class GroupManager(BaseManager):
cls = Group
class PersonManager(BaseManager):
cls = Person
def find_by_name(self, first_name=None, last_name=None):
"""
Get all matches for first_name and last_name.
"""
if not (first_name or last_name):
raise ValueError("Supply either 'first_name', 'last_name', or both")
results = self.filter(first_name=first_name, last_name=last_name)
return self.convert_results(results)
def find_by_email(self, email):
"""
Search for Persons by their EmailAddress (given as "email" string).
"""
emails = EmailAddressManager(self.book)
email_results = emails.filter(email=email)
email_ids = email_results.keys()
person_results = self.filter(email_addresses_ids=email_ids)
return self.convert_results(person_results)
| mit | -8,262,843,655,173,583,000 | 32.114754 | 80 | 0.541089 | false | 4.284199 | false | false | false |
brython-dev/brython | setup/brython/list_modules.py | 1 | 26213 | """Detect all Python scripts in HTML pages in current folder and subfolders.
Generate brython_modules.js, a bundle with all the modules and packages used
by an application.
Generate a Python package ready for installation and upload on PyPI.
"""
import os
import shutil
import html.parser
import json
import traceback
import sys
import time
import io
import tokenize
import token
import logging
logger = logging.getLogger(__name__)
# Template for application setup.py script
setup = """from setuptools import setup, find_packages
import os
if os.path.exists('README.rst'):
with open('README.rst', encoding='utf-8') as fobj:
LONG_DESCRIPTION = fobj.read()
setup(
name='{app_name}',
version='{version}',
# The project's main homepage.
url='{url}',
# Author details
author='{author}',
author_email='{author_email}',
# License
license='{license}',
packages=['data'],
py_modules=["{app_name}"],
package_data={{'data':[{files}]}}
)
"""
# Template for the application script
app = """import os
import shutil
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--install',
help='Install {app_name} in an empty directory',
action="store_true")
args = parser.parse_args()
files = ({files})
if args.install:
print('Installing {app_name} in an empty directory')
src_path = os.path.join(os.path.dirname(__file__), 'data')
if os.listdir(os.getcwd()):
print('{app_name} can only be installed in an empty folder')
import sys
sys.exit()
for path in files:
dst = os.path.join(os.getcwd(), path)
head, tail = os.path.split(dst)
if not os.path.exists(head):
os.mkdir(head)
shutil.copyfile(os.path.join(src_path, path), dst)
"""
class FromImport:
def __init__(self):
self.source = ''
self.type = "from"
self.level = 0
self.expect = "source"
self.names = []
def __str__(self):
return '<import ' + str(self.names) + ' from ' + str(self.source) +'>'
class Import:
def __init__(self):
self.type = "import"
self.expect = "module"
self.modules = []
def __str__(self):
return '<import ' + str(self.modules) + '>'
class ImportsFinder:
def __init__(self, *args, **kw):
self.package = kw.pop("package") or ""
def find(self, src):
"""Find imports in source code src. Uses the tokenize module instead
of ast in previous Brython version, so that this script can be run
with CPython versions older than the one implemented in Brython."""
imports = set()
importing = None
f = io.BytesIO(src.encode("utf-8"))
for tok_type, tok_string, *_ in tokenize.tokenize(f.readline):
tok_type = token.tok_name[tok_type]
if importing is None:
if tok_type == "NAME" and tok_string in ["import", "from"]:
context = Import() if tok_string == "import" \
else FromImport()
importing = True
else:
if tok_type == "NEWLINE":
imports.add(context)
importing = None
else:
self.transition(context, tok_type, tok_string)
if importing:
imports.add(context)
# Transform raw import objects into a list of qualified module names
self.imports = set()
for imp in imports:
if isinstance(imp, Import):
for mod in imp.modules:
parts = mod.split('.')
while parts:
self.imports.add('.'.join(parts))
parts.pop()
elif isinstance(imp, FromImport):
source = imp.source
if imp.level > 0:
if imp.level == 1:
imp.source = self.package
else:
parts = self.package.split(".")
imp.source = '.'.join(parts[:1 - imp.level])
if source:
imp.source += '.' + source
parts = imp.source.split('.')
while parts:
self.imports.add('.'.join(parts))
parts.pop()
self.imports.add(imp.source)
for name in imp.names:
parts = name.split('.')
while parts:
self.imports.add(imp.source + '.' + '.'.join(parts))
parts.pop()
def transition(self, context, token, value):
if context.type == "from":
if token == "NAME":
if context.expect == "source":
if value == "import" and context.level:
# syntax "from . import name"
context.expect = "names"
else:
context.source += value
context.expect = "."
elif context.expect == "." and value == "import":
context.expect = "names"
elif context.expect == "names":
context.names.append(value)
context.expect = ","
elif token == "OP":
if value == "," and context.expect == ",":
context.expect = "names"
elif value == "." and context.expect == ".":
context.source += '.'
context.expect = "source"
elif value == "." and context.expect == "source":
context.level += 1
elif context.type == "import":
if token == "NAME":
if context.expect == "module":
if context.modules and context.modules[-1].endswith("."):
context.modules[-1] += value
else:
context.modules.append(value)
context.expect = '.'
elif token == "OP":
if context.expect == ".":
if value == ".":
context.modules[-1] += '.'
context.expect = "module"
class ModulesFinder:
def __init__(self, directory=os.getcwd(), stdlib={}, user_modules={}):
self.directory = directory
self.modules = set()
self.stdlib = stdlib
self.user_modules = user_modules
def get_imports(self, src, package=None):
"""Get all imports in source code src."""
finder = ImportsFinder(package=package)
finder.find(src)
for module in finder.imports:
if module in self.modules:
continue
found = False
for module_dict in [self.stdlib, self.user_modules]:
if module in module_dict:
found = True
self.modules.add(module)
if module_dict[module][0] == '.py':
is_package = len(module_dict[module]) == 4
if is_package:
package = module
elif "." in module:
package = module[:module.rfind(".")]
else:
package = ""
module_dict[module][2] = list(self.get_imports(
module_dict[module][1], package))
return finder.imports
def norm_indent(self, script):
"""Scripts in Brython page may start with an indent, remove it before
building the AST.
"""
indent = None
lines = []
for line in script.split('\n'):
if line.strip() and indent is None:
indent = len(line) - len(line.lstrip())
line = line[indent:]
elif indent is not None:
line = line[indent:]
lines.append(line)
return '\n'.join(lines)
def inspect(self):
"""Walk the directory to find all pages with Brython scripts, parse
them to get the list of modules needed to make them run.
"""
site_packages = 'Lib{0}site-packages{0}'.format(os.sep)
imports = set()
for dirname, dirnames, filenames in os.walk(self.directory):
for name in dirnames:
if name.endswith('__dist__') or name.endswith("__pycache__"):
# don't inspect files in the subfolder __dist__
dirnames.remove(name)
break
for filename in filenames:
path = os.path.join(dirname, filename)
if path == __file__:
continue
ext = os.path.splitext(filename)[1]
if ext.lower() == '.html':
print("script in html", filename)
# detect charset
charset_detector = CharsetDetector()
with open(path, encoding="iso-8859-1") as fobj:
charset_detector.feed(fobj.read())
# get text/python scripts
parser = BrythonScriptsExtractor(dirname)
with open(path, encoding=charset_detector.encoding) as fobj:
parser.feed(fobj.read())
for script in parser.scripts:
script = self.norm_indent(script)
try:
self.get_imports(script)
except SyntaxError:
print('syntax error', path)
traceback.print_exc(file=sys.stderr)
elif ext.lower() == '.py':
#print("python", filename)
if filename == "list_modules.py":
continue
if dirname != self.directory and not is_package(dirname):
continue
# get package name
package = dirname[len(self.directory) + 1:] or None
if package is not None and \
package.startswith(site_packages):
package = package[len('Lib/site-packages/'):]
# print(path)
with open(path, encoding="utf-8") as fobj:
try:
imports |= self.get_imports(fobj.read(), package)
except SyntaxError:
print('syntax error', path)
traceback.print_exc(file=sys.stderr)
def make_brython_modules(self, path):
"""Build brython_modules.js from the list of modules needed by the
application.
"""
vfs = {"$timestamp": int(1000 * time.time())}
for module in self.modules:
dico = self.stdlib if module in self.stdlib else self.user_modules
vfs[module] = dico[module]
elts = module.split('.')
for i in range(1, len(elts)):
pkg = '.'.join(elts[:i])
if not pkg in vfs:
vfs[pkg] = dico[pkg]
# save in brython_modules.js
if os.path.exists(path):
# If brython_modules.js already exists, check if there have been
# changes. Cf. issue #1471.
changes = False
with open(path, encoding="utf-8") as f:
content = f.read()
start_str = "var scripts = "
start_pos = content.find(start_str)
end_pos = content.find("__BRYTHON__.update_VFS(scripts)")
data = content[start_pos + len(start_str):end_pos].strip()
old_vfs = json.loads(data)
if old_vfs.keys() != vfs.keys():
changes = True
else:
changes = True
for key in old_vfs:
if key == "$timestamp":
continue
if not key in vfs:
break
elif vfs[key][1] != old_vfs[key][1]:
break
else: # no break
changes = False
if not changes:
print("No change: brython_modules.js not updated")
return
with open(path, "w", encoding="utf-8") as out:
# Add VFS_timestamp ; used to test if the indexedDB must be
# refreshed
out.write("__BRYTHON__.VFS_timestamp = {}\n".format(
int(1000 * time.time())))
out.write("__BRYTHON__.use_VFS = true\nvar scripts = ")
json.dump(vfs, out)
out.write("\n__BRYTHON__.update_VFS(scripts)")
def _dest(self, base_dir, dirname, filename):
"""Build the destination path for a file."""
elts = dirname[len(os.getcwd()) + 1:].split(os.sep)
dest_dir = base_dir
for elt in elts:
dest_dir = os.path.join(dest_dir, elt)
if not os.path.exists(dest_dir):
os.mkdir(dest_dir)
return os.path.join(dest_dir, filename)
def make_setup(self):
"""Make the setup script (setup.py) and the entry point script
for the application."""
# Create a temporary directory
temp_dir = '__dist__'
if os.path.exists(temp_dir):
shutil.rmtree(temp_dir)
os.mkdir(temp_dir)
# Create a package "data" in this directory
data_dir = os.path.join(temp_dir, 'data')
os.mkdir(data_dir)
with open(os.path.join(data_dir, "__init__.py"), "w") as out:
out.write('')
# If there is a brython_setup.json file, use it to get information
if os.path.exists("brython_setup.json"):
with open("brython_setup.json", encoding="utf-8") as fobj:
info = json.load(fobj)
else:
# Otherwise, ask setup information
while True:
app_name = input("Application name: ")
if app_name:
break
while True:
version = input("Version: ")
if version:
break
author = input("Author: ")
author_email = input("Author email: ")
license = input("License: ")
url = input("Project url: ")
info = {
"app_name": app_name,
"version": version,
"author": author,
"author_email": author_email,
"license": license,
"url": url
}
# Store information in brython_setup.json
with open("brython_setup.json", "w", encoding="utf-8") as out:
json.dump(info, out, indent=4)
# Store all application files in the temporary directory. In HTML
# pages, replace "brython_stdlib.js" by "brython_modules.js"
files = []
for dirname, dirnames, filenames in os.walk(self.directory):
if dirname == "__dist__":
continue
if "__dist__" in dirnames:
dirnames.remove("__dist__")
for filename in filenames:
path = os.path.join(dirname, filename)
parts = path[len(os.getcwd()) + 1:].split(os.sep)
files.append("os.path.join(" +
", ".join(repr(part) for part in parts) +")")
if os.path.splitext(filename)[1] == '.html':
# detect charset
charset_detector = CharsetDetector()
with open(path, encoding="iso-8859-1") as fobj:
charset_detector.feed(fobj.read())
encoding = charset_detector.encoding
# get text/python scripts
parser = VFSReplacementParser(dirname)
with open(path, encoding=encoding) as fobj:
parser.feed(fobj.read())
if not parser.has_vfs:
# save file
dest = self._dest(data_dir, dirname, filename)
shutil.copyfile(path, dest)
continue
with open(path, encoding=encoding) as fobj:
lines = fobj.readlines()
start_line, start_pos = parser.start
end_line, end_pos = parser.end
res = ''.join(lines[:start_line - 1])
for num in range(start_line - 1, end_line):
res += lines[num].replace("brython_stdlib.js",
"brython_modules.js")
res += ''.join(lines[end_line:])
dest = self._dest(data_dir, dirname, filename)
with open(dest, 'w', encoding=encoding) as out:
out.write(res)
else:
dest = self._dest(data_dir, dirname, filename)
shutil.copyfile(path, dest)
info["files"] = ',\n'.join(files)
# Generate setup.py from the template in string setup
path = os.path.join(temp_dir, "setup.py")
with open(path, "w", encoding="utf-8") as out:
out.write(setup.format(**info))
# Generate the application script from the template in string app
path = os.path.join(temp_dir, "{}.py".format(info["app_name"]))
with open(path, "w", encoding="utf-8") as out:
out.write(app.format(**info))
# Get all modules in the Brython standard distribution.
# They must be in brython_stdlib.js somewhere in the current directory
# or below.
def parse_stdlib(stdlib_dir, js_name='brython_stdlib.js'):
path = os.path.join(stdlib_dir, js_name)
with open(path, encoding="utf-8") as fobj:
modules = fobj.read()
modules = modules[modules.find('{'):
modules.find('__BRYTHON__.update_VFS(')]
stdlib = json.loads(modules)
return stdlib
def load_stdlib_sitepackages():
"""
Search brython_stdlib.js, and load it
Load site-packages from the current directory
:return:
"""
stdlib_dir = None
for dirname, dirnames, filenames in os.walk(os.getcwd()):
for filename in filenames:
if filename == "brython_stdlib.js":
stdlib_dir = dirname
stdlib = parse_stdlib(stdlib_dir)
break
if not stdlib_dir:
raise FileNotFoundError("Could not find brython_stdlib.js in this"
" directory or below")
# search in site-packages
sp_dir = os.path.join(stdlib_dir, "Lib", "site-packages")
if os.path.exists(sp_dir):
print("search in site-packages...")
mf = ModulesFinder()
for dirpath, dirnames, filenames in os.walk(sp_dir):
if dirpath.endswith("__pycache__"):
continue
package = dirpath[len(sp_dir) + 1:]
for filename in filenames:
if not filename.endswith(".py"):
continue
fullpath = os.path.join(dirpath, filename)
#print(fullpath)
is_package = False
if not package:
# file in site-packages
module = os.path.splitext(filename)[0]
else:
elts = package.split(os.sep)
is_package = filename == "__init__.py"
if not is_package:
elts.append(os.path.splitext(filename)[0])
module = ".".join(elts)
with open(fullpath, encoding="utf-8") as f:
src = f.read()
#imports = mf.get_imports(src)
stdlib[module] = [".py", src, None]
if is_package:
stdlib[module].append(1)
return stdlib_dir, stdlib
packages = {os.getcwd(), os.getcwd() + '/Lib/site-packages'}
def is_package(folder):
"""Test if folder is a package, ie has __init__.py and all the folders
above until os.getcwd() also have __init__.py.
Use set "packages" to cache results.
"""
if folder in packages:
return True
current = folder
while True:
if not os.path.exists(os.path.join(current, "__init__.py")):
return False
current = os.path.dirname(current)
if current in packages:
packages.add(folder)
return True
def load_user_modules(module_dir=os.getcwd()):
user_modules = {}
for dirname, dirnames, filenames in os.walk(module_dir):
for filename in filenames:
name, ext = os.path.splitext(filename)
if not ext == ".py" or filename == "list_modules.py":
continue
if dirname == os.getcwd():
# modules in the same directory
path = os.path.join(dirname, filename)
with open(path, encoding="utf-8") as fobj:
try:
src = fobj.read()
except:
logger.error("Unable to read %s", path)
mf = ModulesFinder(dirname)
imports = sorted(list(mf.get_imports(src)))
user_modules[name] = [ext, src, imports]
elif is_package(dirname):
# modules in packages below current directory
path = os.path.join(dirname, filename)
package = dirname[len(os.getcwd()) + 1:].replace(os.sep, '.')
if package.startswith('Lib.site-packages.'):
package = package[len('Lib.site-packages.'):]
if filename == "__init__.py":
module_name = package
else:
module_name = "{}.{}".format(package, name)
with open(path, encoding="utf-8") as fobj:
src = fobj.read()
#mf = ModulesFinder(dirname)
#imports = mf.get_imports(src, package or None)
#imports = sorted(list(imports))
user_modules[module_name] = [ext, src, None]
if module_name == package:
user_modules[module_name].append(1)
return user_modules
class CharsetDetector(html.parser.HTMLParser):
"""Used to detect <meta charset="..."> in HTML page."""
def __init__(self, *args, **kw):
kw.setdefault('convert_charrefs', True)
try:
html.parser.HTMLParser.__init__(self, *args, **kw)
except TypeError:
# convert_charrefs is only supported by Python 3.4+
del kw['convert_charrefs']
html.parser.HTMLParser.__init__(self, *args, **kw)
self.encoding = "iso-8859-1"
def handle_starttag(self, tag, attrs):
if tag.lower() == "meta":
for key, value in attrs:
if key == "charset":
self.encoding = value
class BrythonScriptsExtractor(html.parser.HTMLParser):
"""Used to extract all Brython scripts in HTML pages."""
def __init__(self, dirname, **kw):
kw.setdefault('convert_charrefs', True)
try:
html.parser.HTMLParser.__init__(self, **kw)
except TypeError:
# convert_charrefs is only supported by Python 3.4+
del kw['convert_charrefs']
html.parser.HTMLParser.__init__(self, **kw)
self.dirname = dirname
self.scripts = []
self.py_tags = [] # stack of Python blocks
self.tag_stack = []
def handle_starttag(self, tag, attrs):
if tag.lower() == "script":
_type = "js_script"
src = None
for key, value in attrs:
if key == 'type' and value in ("text/python", "text/python3"):
_type = "py_script"
elif key == "src":
src = value
if _type == "py_script" and src:
_type = "py_script_with_src"
path = os.path.join(self.dirname, src)
with open(path, encoding="utf-8") as fobj:
self.scripts.append(fobj.read())
self.tag_stack.append(_type)
def handle_endtag(self, tag):
if tag.lower() == "script":
self.tag_stack.pop()
def handle_data(self, data):
"""Data is printed unchanged"""
if data.strip():
if self.tag_stack and self.tag_stack[-1].lower() == "py_script":
self.scripts.append(data)
class VFSReplacementParser(html.parser.HTMLParser):
"""Used to replace brython_stdlib.js by brython_modules.js in HTML
pages."""
def __init__(self, path, **kw):
kw.setdefault('convert_charrefs', True)
try:
html.parser.HTMLParser.__init__(self, **kw)
except TypeError:
# convert_charrefs is only supported by Python 3.4+
del kw['convert_charrefs']
html.parser.HTMLParser.__init__(self, **kw)
self.vfs = False
self.has_vfs = False
def handle_starttag(self, tag, attrs):
if tag.lower() == "script":
_type = "js_script"
src = None
for key, value in attrs:
if key == "src":
elts = value.split("/")
if elts and elts[-1] == "brython_stdlib.js":
self.vfs = True
self.has_vfs = True
self.attrs = attrs
self.start = self.getpos()
return
self.vfs = False
def handle_endtag(self, tag):
if tag.lower() == "script" and self.vfs:
self.end = self.getpos()
if __name__ == "__main__":
finder = ModulesFinder()
finder.inspect()
# print(sorted(list(finder.modules)))
| bsd-3-clause | 3,624,906,521,840,438,300 | 36.340456 | 80 | 0.495441 | false | 4.501631 | false | false | false |
aykol/pymatgen | pymatgen/transformations/advanced_transformations.py | 2 | 33514 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import numpy as np
from fractions import gcd, Fraction
from itertools import groupby
from warnings import warn
import logging
import math
import six
from monty.json import MontyDecoder
from monty.fractions import lcm
from pymatgen.core.structure import Composition
from pymatgen.core.periodic_table import Element, Specie, get_el_sp
from pymatgen.transformations.transformation_abc import AbstractTransformation
from pymatgen.transformations.standard_transformations import \
SubstitutionTransformation, OrderDisorderedStructureTransformation
from pymatgen.command_line.enumlib_caller import EnumlibAdaptor
from pymatgen.analysis.ewald import EwaldSummation
from pymatgen.core.structure import Structure
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.structure_prediction.substitution_probability import \
SubstitutionPredictor
from pymatgen.analysis.structure_matcher import StructureMatcher, \
SpinComparator
from pymatgen.analysis.energy_models import SymmetryModel
from pymatgen.analysis.bond_valence import BVAnalyzer
from pymatgen.core.surface import SlabGenerator
"""
This module implements more advanced transformations.
"""
__author__ = "Shyue Ping Ong, Stephen Dacek, Anubhav Jain"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "[email protected]"
__date__ = "Jul 24, 2012"
logger = logging.getLogger(__name__)
class ChargeBalanceTransformation(AbstractTransformation):
"""
This is a transformation that disorders a structure to make it charge
balanced, given an oxidation state-decorated structure.
Args:
charge_balance_sp: specie to add or remove. Currently only removal
is supported
"""
def __init__(self, charge_balance_sp):
self.charge_balance_sp = str(charge_balance_sp)
def apply_transformation(self, structure):
charge = structure.charge
specie = get_el_sp(self.charge_balance_sp)
num_to_remove = charge / specie.oxi_state
num_in_structure = structure.composition[specie]
removal_fraction = num_to_remove / num_in_structure
if removal_fraction < 0:
raise ValueError("addition of specie not yet supported by "
"ChargeBalanceTransformation")
trans = SubstitutionTransformation(
{self.charge_balance_sp: {
self.charge_balance_sp: 1 - removal_fraction}})
return trans.apply_transformation(structure)
def __str__(self):
return "Charge Balance Transformation : " + \
"Species to remove = {}".format(str(self.charge_balance_sp))
def __repr__(self):
return self.__str__()
@property
def inverse(self):
return None
@property
def is_one_to_many(self):
return False
class SuperTransformation(AbstractTransformation):
"""
This is a transformation that is inherently one-to-many. It is constructed
from a list of transformations and returns one structure for each
transformation. The primary use for this class is extending a transmuter
object.
Args:
transformations ([transformations]): List of transformations to apply
to a structure. One transformation is applied to each output
structure.
nstructures_per_trans (int): If the transformations are one-to-many and,
nstructures_per_trans structures from each transformation are
added to the full list. Defaults to 1, i.e., only best structure.
"""
def __init__(self, transformations, nstructures_per_trans=1):
self._transformations = transformations
self.nstructures_per_trans = nstructures_per_trans
def apply_transformation(self, structure, return_ranked_list=False):
if not return_ranked_list:
raise ValueError("SuperTransformation has no single best structure"
" output. Must use return_ranked_list")
structures = []
for t in self._transformations:
if t.is_one_to_many:
for d in t.apply_transformation(
structure,
return_ranked_list=self.nstructures_per_trans):
d["transformation"] = t
structures.append(d)
else:
structures.append(
{"transformation": t,
"structure": t.apply_transformation(structure)})
return structures
def __str__(self):
return "Super Transformation : Transformations = " + \
"{}".format(" ".join([str(t) for t in self._transformations]))
def __repr__(self):
return self.__str__()
@property
def inverse(self):
return None
@property
def is_one_to_many(self):
return True
class MultipleSubstitutionTransformation(object):
"""
Performs multiple substitutions on a structure. For example, can do a
fractional replacement of Ge in LiGePS with a list of species, creating one
structure for each substitution. Ordering is done using a dummy element so
only one ordering must be done per substitution oxidation state. Charge
balancing of the structure is optionally performed.
.. note::
There are no checks to make sure that removal fractions are possible
and rounding may occur. Currently charge balancing only works for
removal of species.
"""
def __init__(self, sp_to_replace, r_fraction, substitution_dict,
charge_balance_species=None, order=True):
"""
Performs multiple fractional substitutions on a transmuter.
Args:
sp_to_replace: species to be replaced
r_fraction: fraction of that specie to replace
substitution_dict: dictionary of the format
{2: ["Mg", "Ti", "V", "As", "Cr", "Ta", "N", "Nb"],
3: ["Ru", "Fe", "Co", "Ce", "As", "Cr", "Ta", "N", "Nb"],
4: ["Ru", "V", "Cr", "Ta", "N", "Nb"],
5: ["Ru", "W", "Mn"]
}
The number is the charge used for each of the list of elements
(an element can be present in multiple lists)
charge_balance_species: If specified, will balance the charge on
the structure using that specie.
"""
self.sp_to_replace = sp_to_replace
self.r_fraction = r_fraction
self.substitution_dict = substitution_dict
self.charge_balance_species = charge_balance_species
self.order = order
def apply_transformation(self, structure, return_ranked_list=False):
if not return_ranked_list:
raise ValueError("MultipleSubstitutionTransformation has no single"
" best structure output. Must use"
" return_ranked_list.")
outputs = []
for charge, el_list in self.substitution_dict.items():
mapping = {}
if charge > 0:
sign = "+"
else:
sign = "-"
dummy_sp = "X{}{}".format(str(charge), sign)
mapping[self.sp_to_replace] = {
self.sp_to_replace: 1 - self.r_fraction,
dummy_sp: self.r_fraction}
trans = SubstitutionTransformation(mapping)
dummy_structure = trans.apply_transformation(structure)
if self.charge_balance_species is not None:
cbt = ChargeBalanceTransformation(self.charge_balance_species)
dummy_structure = cbt.apply_transformation(dummy_structure)
if self.order:
trans = OrderDisorderedStructureTransformation()
dummy_structure = trans.apply_transformation(dummy_structure)
for el in el_list:
if charge > 0:
sign = "+"
else:
sign = "-"
st = SubstitutionTransformation(
{"X{}+".format(str(charge)): "{}{}{}".format(el, charge,
sign)})
new_structure = st.apply_transformation(dummy_structure)
outputs.append({"structure": new_structure})
return outputs
def __str__(self):
return "Multiple Substitution Transformation : Substitution on " + \
"{}".format(self.sp_to_replace)
def __repr__(self):
return self.__str__()
@property
def inverse(self):
return None
@property
def is_one_to_many(self):
return True
class EnumerateStructureTransformation(AbstractTransformation):
"""
Order a disordered structure using enumlib. For complete orderings, this
generally produces fewer structures that the OrderDisorderedStructure
transformation, and at a much faster speed.
Args:
min_cell_size:
The minimum cell size wanted. Must be an int. Defaults to 1.
max_cell_size:
The maximum cell size wanted. Must be an int. Defaults to 1.
symm_prec:
Tolerance to use for symmetry.
refine_structure:
This parameter has the same meaning as in enumlib_caller.
If you are starting from a structure that has been relaxed via
some electronic structure code, it is usually much better to
start with symmetry determination and then obtain a refined
structure. The refined structure have cell parameters and
atomic positions shifted to the expected symmetry positions,
which makes it much less sensitive precision issues in enumlib.
If you are already starting from an experimental cif, refinment
should have already been done and it is not necessary. Defaults
to False.
enum_precision_parameter (float): Finite precision parameter for
enumlib. Default of 0.001 is usually ok, but you might need to
tweak it for certain cells.
check_ordered_symmetry (bool): Whether to check the symmetry of
the ordered sites. If the symmetry of the ordered sites is
lower, the lowest symmetry ordered sites is included in the
enumeration. This is important if the ordered sites break
symmetry in a way that is important getting possible
structures. But sometimes including ordered sites
slows down enumeration to the point that it cannot be
completed. Switch to False in those cases. Defaults to True.
"""
def __init__(self, min_cell_size=1, max_cell_size=1, symm_prec=0.1,
refine_structure=False, enum_precision_parameter=0.001,
check_ordered_symmetry=True):
self.symm_prec = symm_prec
self.min_cell_size = min_cell_size
self.max_cell_size = max_cell_size
self.refine_structure = refine_structure
self.enum_precision_parameter = enum_precision_parameter
self.check_ordered_symmetry = check_ordered_symmetry
def apply_transformation(self, structure, return_ranked_list=False):
"""
Return either a single ordered structure or a sequence of all ordered
structures.
Args:
structure: Structure to order.
return_ranked_list (bool): Whether or not multiple structures are
returned. If return_ranked_list is a number, that number of
structures is returned.
Returns:
Depending on returned_ranked list, either a transformed structure
or a list of dictionaries, where each dictionary is of the form
{"structure" = .... , "other_arguments"}
The list of ordered structures is ranked by ewald energy / atom, if
the input structure is an oxidation state decorated structure.
Otherwise, it is ranked by number of sites, with smallest number of
sites first.
"""
try:
num_to_return = int(return_ranked_list)
except ValueError:
num_to_return = 1
if self.refine_structure:
finder = SpacegroupAnalyzer(structure, self.symm_prec)
structure = finder.get_refined_structure()
contains_oxidation_state = all(
[hasattr(sp, "oxi_state") and sp.oxi_state != 0 for sp in
structure.composition.elements]
)
if structure.is_ordered:
warn("Enumeration skipped for structure with composition {} "
"because it is ordered".format(structure.composition))
structures = [structure.copy()]
else:
adaptor = EnumlibAdaptor(
structure, min_cell_size=self.min_cell_size,
max_cell_size=self.max_cell_size,
symm_prec=self.symm_prec, refine_structure=False,
enum_precision_parameter=self.enum_precision_parameter,
check_ordered_symmetry=self.check_ordered_symmetry)
adaptor.run()
structures = adaptor.structures
original_latt = structure.lattice
inv_latt = np.linalg.inv(original_latt.matrix)
ewald_matrices = {}
all_structures = []
for s in structures:
new_latt = s.lattice
transformation = np.dot(new_latt.matrix, inv_latt)
transformation = tuple([tuple([int(round(cell)) for cell in row])
for row in transformation])
if contains_oxidation_state:
if transformation not in ewald_matrices:
s_supercell = structure * transformation
ewald = EwaldSummation(s_supercell)
ewald_matrices[transformation] = ewald
else:
ewald = ewald_matrices[transformation]
energy = ewald.compute_sub_structure(s)
all_structures.append({"num_sites": len(s), "energy": energy,
"structure": s})
else:
all_structures.append({"num_sites": len(s), "structure": s})
def sort_func(s):
return s["energy"] / s["num_sites"] if contains_oxidation_state \
else s["num_sites"]
self._all_structures = sorted(all_structures, key=sort_func)
if return_ranked_list:
return self._all_structures[0:num_to_return]
else:
return self._all_structures[0]["structure"]
def __str__(self):
return "EnumerateStructureTransformation"
def __repr__(self):
return self.__str__()
@property
def inverse(self):
return None
@property
def is_one_to_many(self):
return True
class SubstitutionPredictorTransformation(AbstractTransformation):
"""
This transformation takes a structure and uses the structure
prediction module to find likely site substitutions.
Args:
threshold: Threshold for substitution.
**kwargs: Args for SubstitutionProbability class lambda_table, alpha
"""
def __init__(self, threshold=1e-2, **kwargs):
self.kwargs = kwargs
self.threshold = threshold
self._substitutor = SubstitutionPredictor(threshold=threshold,
**kwargs)
def apply_transformation(self, structure, return_ranked_list=False):
if not return_ranked_list:
raise ValueError("SubstitutionPredictorTransformation doesn't"
" support returning 1 structure")
preds = self._substitutor.composition_prediction(
structure.composition, to_this_composition=False)
preds.sort(key=lambda x: x['probability'], reverse=True)
outputs = []
for pred in preds:
st = SubstitutionTransformation(pred['substitutions'])
output = {'structure': st.apply_transformation(structure),
'probability': pred['probability'],
'threshold': self.threshold, 'substitutions': {}}
# dictionary keys have to be converted to strings for JSON
for key, value in pred['substitutions'].items():
output['substitutions'][str(key)] = str(value)
outputs.append(output)
return outputs
def __str__(self):
return "SubstitutionPredictorTransformation"
def __repr__(self):
return self.__str__()
@property
def inverse(self):
return None
@property
def is_one_to_many(self):
return True
class MagOrderingTransformation(AbstractTransformation):
"""
This transformation takes a structure and returns a list of magnetic
orderings. Currently only works for ordered structures.
Args:
mag_elements_spin:
A mapping of elements/species to magnetically order to spin
magnitudes. E.g., {"Fe3+": 5, "Mn3+": 4}
order_parameter:
degree of magnetization. 0.5 corresponds to
antiferromagnetic order
energy_model:
Energy model used to rank the structures. Some models are
provided in :mod:`pymatgen.analysis.energy_models`.
**kwargs:
Same keyword args as :class:`EnumerateStructureTransformation`,
i.e., min_cell_size, etc.
"""
def __init__(self, mag_species_spin, order_parameter=0.5,
energy_model=SymmetryModel(), **kwargs):
self.mag_species_spin = mag_species_spin
if order_parameter > 1 or order_parameter < 0:
raise ValueError('Order Parameter must lie between 0 and 1')
else:
self.order_parameter = order_parameter
self.energy_model = energy_model
self.kwargs = kwargs
@classmethod
def determine_min_cell(cls, structure, mag_species_spin, order_parameter):
"""
Determine the smallest supercell that is able to enumerate
the provided structure with the given order parameter
"""
def lcm(n1, n2):
"""
Find least common multiple of two numbers
"""
return n1 * n2 / gcd(n1, n2)
denom = Fraction(order_parameter).limit_denominator(100).denominator
atom_per_specie = [structure.composition[m]
for m in mag_species_spin.keys()]
n_gcd = six.moves.reduce(gcd, atom_per_specie)
if not n_gcd:
raise ValueError(
'The specified species do not exist in the structure'
' to be enumerated')
return lcm(n_gcd, denom) / n_gcd
def apply_transformation(self, structure, return_ranked_list=False):
# Make a mutable structure first
mods = Structure.from_sites(structure)
for sp, spin in self.mag_species_spin.items():
sp = get_el_sp(sp)
oxi_state = getattr(sp, "oxi_state", 0)
if spin:
up = Specie(sp.symbol, oxi_state, {"spin": abs(spin)})
down = Specie(sp.symbol, oxi_state, {"spin": -abs(spin)})
mods.replace_species(
{sp: Composition({up: self.order_parameter,
down: 1 - self.order_parameter})})
else:
mods.replace_species(
{sp: Specie(sp.symbol, oxi_state, {"spin": spin})})
if mods.is_ordered:
return [mods] if return_ranked_list > 1 else mods
enum_args = self.kwargs
enum_args["min_cell_size"] = max(int(
MagOrderingTransformation.determine_min_cell(
structure, self.mag_species_spin,
self.order_parameter)),
enum_args.get("min_cell_size", 1))
max_cell = enum_args.get('max_cell_size')
if max_cell:
if enum_args["min_cell_size"] > max_cell:
raise ValueError('Specified max cell size is smaller'
' than the minimum enumerable cell size')
else:
enum_args["max_cell_size"] = enum_args["min_cell_size"]
t = EnumerateStructureTransformation(**enum_args)
alls = t.apply_transformation(mods,
return_ranked_list=return_ranked_list)
try:
num_to_return = int(return_ranked_list)
except ValueError:
num_to_return = 1
if num_to_return == 1 or not return_ranked_list:
return alls[0]["structure"] if num_to_return else alls
m = StructureMatcher(comparator=SpinComparator())
key = lambda x: SpacegroupAnalyzer(x, 0.1).get_space_group_number()
out = []
for _, g in groupby(sorted([d["structure"] for d in alls],
key=key), key):
g = list(g)
grouped = m.group_structures(g)
out.extend([{"structure": g[0],
"energy": self.energy_model.get_energy(g[0])}
for g in grouped])
self._all_structures = sorted(out, key=lambda d: d["energy"])
return self._all_structures[0:num_to_return]
def __str__(self):
return "MagOrderingTransformation"
def __repr__(self):
return self.__str__()
@property
def inverse(self):
return None
@property
def is_one_to_many(self):
return True
def _find_codopant(target, oxidation_state, allowed_elements=None):
"""
Finds the element from "allowed elements" that (i) possesses the desired
"oxidation state" and (ii) is closest in ionic radius to the target specie
Args:
target: (Specie) provides target ionic radius.
oxidation_state: (float) codopant oxidation state.
allowed_elements: ([str]) List of allowed elements. If None,
all elements are tried.
Returns:
(Specie) with oxidation_state that has ionic radius closest to
target.
"""
ref_radius = target.ionic_radius
candidates = []
symbols = allowed_elements or [el.symbol for el in Element]
for sym in symbols:
try:
sp = Specie(sym, oxidation_state)
r = sp.ionic_radius
if r is not None:
candidates.append((r, sp))
except:
pass
return min(candidates, key=lambda l: abs(l[0]/ref_radius - 1))[1]
class DopingTransformation(AbstractTransformation):
"""
A transformation that performs doping of a structure.
"""
def __init__(self, dopant, ionic_radius_tol=float("inf"), min_length=10,
alio_tol=0, codopant=False, max_structures_per_enum=100,
allowed_doping_species=None, **kwargs):
"""
Args:
dopant (Specie-like): E.g., Al3+. Must have oxidation state.
ionic_radius_tol (float): E.g., Fractional allowable ionic radii
mismatch for dopant to fit into a site. Default of inf means
that any dopant with the right oxidation state is allowed.
min_Length (float): Min. lattice parameter between periodic
images of dopant. Defaults to 10A for now.
alio_tol (int): If this is not 0, attempt will be made to dope
sites with oxidation_states +- alio_tol of the dopant. E.g.,
1 means that the ions like Ca2+ and Ti4+ are considered as
potential doping sites for Al3+.
codopant (bool): If True, doping will be carried out with a
codopant to maintain charge neutrality. Otherwise, vacancies
will be used.
max_structures_per_enum (float): Maximum number of structures to
return per enumeration. Note that there can be more than one
candidate doping site, and each site enumeration will return at
max max_structures_per_enum structures. Defaults to 100.
allowed_doping_species (list): Species that are allowed to be
doping sites. This is an inclusionary list. If specified,
any sites which are not
\*\*kwargs:
Same keyword args as :class:`EnumerateStructureTransformation`,
i.e., min_cell_size, etc.
"""
self.dopant = get_el_sp(dopant)
self.ionic_radius_tol = ionic_radius_tol
self.min_length = min_length
self.alio_tol = alio_tol
self.codopant = codopant
self.max_structures_per_enum = max_structures_per_enum
self.allowed_doping_species = allowed_doping_species
self.kwargs = kwargs
def apply_transformation(self, structure, return_ranked_list=False):
"""
Args:
structure (Structure): Input structure to dope
Returns:
[{"structure": Structure, "energy": float}]
"""
comp = structure.composition
logger.info("Composition: %s" % comp)
for sp in comp:
try:
sp.oxi_state
except AttributeError:
analyzer = BVAnalyzer()
structure = analyzer.get_oxi_state_decorated_structure(
structure)
comp = structure.composition
break
ox = self.dopant.oxi_state
radius = self.dopant.ionic_radius
compatible_species = [
sp for sp in comp if sp.oxi_state == ox and
abs(sp.ionic_radius / radius - 1) < self.ionic_radius_tol]
if (not compatible_species) and self.alio_tol:
# We only consider aliovalent doping if there are no compatible
# isovalent species.
compatible_species = [
sp for sp in comp
if abs(sp.oxi_state - ox) <= self.alio_tol and
abs(sp.ionic_radius / radius - 1) < self.ionic_radius_tol and
sp.oxi_state * ox >= 0]
if self.allowed_doping_species is not None:
# Only keep allowed doping species.
compatible_species = [
sp for sp in compatible_species
if sp in [get_el_sp(s) for s in self.allowed_doping_species]]
logger.info("Compatible species: %s" % compatible_species)
lengths = structure.lattice.abc
scaling = [max(1, int(round(math.ceil(self.min_length/x))))
for x in lengths]
logger.info("Lengths are %s" % str(lengths))
logger.info("Scaling = %s" % str(scaling))
all_structures = []
t = EnumerateStructureTransformation(**self.kwargs)
for sp in compatible_species:
supercell = structure * scaling
nsp = supercell.composition[sp]
if sp.oxi_state == ox:
supercell.replace_species({sp: {sp: (nsp - 1)/nsp,
self.dopant: 1/nsp}})
logger.info("Doping %s for %s at level %.3f" % (
sp, self.dopant, 1 / nsp))
elif self.codopant:
codopant = _find_codopant(sp, 2 * sp.oxi_state - ox)
supercell.replace_species({sp: {sp: (nsp - 2) / nsp,
self.dopant: 1 / nsp,
codopant: 1 / nsp}})
logger.info("Doping %s for %s + %s at level %.3f" % (
sp, self.dopant, codopant, 1 / nsp))
elif abs(sp.oxi_state) < abs(ox):
# Strategy: replace the target species with a
# combination of dopant and vacancy.
# We will choose the lowest oxidation state species as a
# vacancy compensation species as it is likely to be lower in
# energy
sp_to_remove = min([s for s in comp if s.oxi_state * ox > 0],
key=lambda ss: abs(ss.oxi_state))
if sp_to_remove == sp:
common_charge = lcm(int(abs(sp.oxi_state)), int(abs(ox)))
ndopant = common_charge / abs(ox)
nsp_to_remove = common_charge / abs(sp.oxi_state)
logger.info("Doping %d %s with %d %s." %
(nsp_to_remove, sp, ndopant, self.dopant))
supercell.replace_species(
{sp: {sp: (nsp - nsp_to_remove) / nsp,
self.dopant: ndopant / nsp}})
else:
ox_diff = int(abs(round(sp.oxi_state - ox)))
vac_ox = int(abs(sp_to_remove.oxi_state))
common_charge = lcm(vac_ox, ox_diff)
ndopant = common_charge / ox_diff
nx_to_remove = common_charge / vac_ox
nx = supercell.composition[sp_to_remove]
logger.info("Doping %d %s with %s and removing %d %s." %
(ndopant, sp, self.dopant,
nx_to_remove, sp_to_remove))
supercell.replace_species(
{sp: {sp: (nsp - ndopant) / nsp,
self.dopant: ndopant / nsp},
sp_to_remove: {
sp_to_remove: (nx - nx_to_remove) / nx}})
elif abs(sp.oxi_state) > abs(ox):
# Strategy: replace the target species with dopant and also
# remove some opposite charged species for charge neutrality
if ox > 0:
sp_to_remove = max(supercell.composition.keys(),
key=lambda el: el.X)
else:
sp_to_remove = min(supercell.composition.keys(),
key=lambda el: el.X)
# Confirm species are of opposite oxidation states.
assert sp_to_remove.oxi_state * sp.oxi_state < 0
ox_diff = int(abs(round(sp.oxi_state - ox)))
anion_ox = int(abs(sp_to_remove.oxi_state))
nx = supercell.composition[sp_to_remove]
common_charge = lcm(anion_ox, ox_diff)
ndopant = common_charge / ox_diff
nx_to_remove = common_charge / anion_ox
logger.info("Doping %d %s with %s and removing %d %s." %
(ndopant, sp, self.dopant,
nx_to_remove, sp_to_remove))
supercell.replace_species(
{sp: {sp: (nsp - ndopant) / nsp,
self.dopant: ndopant / nsp},
sp_to_remove: {sp_to_remove: (nx - nx_to_remove)/nx}})
ss = t.apply_transformation(
supercell, return_ranked_list=self.max_structures_per_enum)
logger.info("%s distinct structures" % len(ss))
all_structures.extend(ss)
logger.info("Total %s doped structures" % len(all_structures))
if return_ranked_list:
return all_structures[:return_ranked_list]
return all_structures[0]["structure"]
@property
def inverse(self):
return None
@property
def is_one_to_many(self):
return True
class SlabTransformation(AbstractTransformation):
"""
A transformation that creates a slab from a structure.
"""
def __init__(self, miller_index, min_slab_size, min_vacuum_size,
lll_reduce=False, center_slab=False, primitive=True,
max_normal_search=None, shift=0, tol=0.1):
"""
Args:
miller_index (3-tuple or list): miller index of slab
min_slab_size (float): minimum slab size in angstroms
min_vacuum_size (float): minimum size of vacuum
lll_reduce (bool): whether to apply LLL reduction
center_slab (bool): whether to center the slab
primitive (bool): whether to reduce slabs to most primitive cell
max_normal_search (int): maximum index to include in linear
combinations of indices to find c lattice vector orthogonal
to slab surface
shift (float): shift to get termination
tol (float): tolerance for primitive cell finding
"""
self.miller_index = miller_index
self.min_slab_size = min_slab_size
self.min_vacuum_size = min_vacuum_size
self.lll_reduce = lll_reduce
self.center_slab = center_slab
self.primitive = primitive
self.max_normal_search = max_normal_search
self.shift = shift
self.tol = 0.1
def apply_transformation(self, structure):
sg = SlabGenerator(structure, self.miller_index, self.min_slab_size,
self.min_vacuum_size, self.lll_reduce,
self.center_slab, self.primitive,
self.max_normal_search)
slab = sg.get_slab(self.shift, self.tol)
return slab
@property
def inverse(self):
return None
@property
def is_one_to_many(self):
return None
| mit | -7,162,670,660,788,724,000 | 39.232893 | 80 | 0.579788 | false | 4.259532 | false | false | false |
aitormf/JdeRobot | src/libs/comm_py/comm/ros/publisherCmdVel.py | 5 | 4391 | import rospy
from geometry_msgs.msg import TwistStamped
import threading
from math import pi as PI
from jderobotTypes import CMDVel
from .threadPublisher import ThreadPublisher
def cmdvel2Twist(vel):
'''
Translates from JderobotTypes CMDVel to ROS Twist.
@param vel: JderobotTypes CMDVel to translate
@type img: JdeRobotTypes.CMDVel
@return a Twist translated from vel
'''
tw = TwistStamped()
tw.twist.linear.x = vel.vx
tw.twist.linear.y = vel.vy
tw.twist.linear.z = vel.vz
tw.twist.angular.x = vel.ax
tw.twist.angular.y = vel.ay
tw.twist.angular.z = vel.az
return tw
class PublisherCMDVel:
'''
ROS CMDVel Publisher. CMDVel Client to Send CMDVel to ROS nodes.
'''
def __init__(self, topic, jdrc):
'''
PublisherCMDVel Constructor.
@param topic: ROS topic to publish
@param jdrc: jderobot Communicator
@type topic: String
@type jdrc: jderobot Communicator
'''
rospy.init_node("ss")
self.topic = topic
self.jdrc = jdrc
self.vel = CMDVel()
self.pub = self.pub = rospy.Publisher(topic, TwistStamped, queue_size=1)
self.lock = threading.Lock()
self.kill_event = threading.Event()
self.thread = ThreadPublisher(self, self.kill_event)
self.thread.daemon = True
self.start()
def publish (self):
'''
Function to publish cmdvel.
'''
self.lock.acquire()
tw = cmdvel2Twist(self.vel)
self.lock.release()
if (self.jdrc.getState() == "flying"):
self.pub.publish(tw)
def stop(self):
'''
Stops (Unregisters) the client. If client is stopped you can not start again, Threading.Thread raised error
'''
self.kill_event.set()
self.pub.unregister()
def start (self):
'''
Starts (Subscribes) the client. If client is stopped you can not start again, Threading.Thread raised error
'''
self.kill_event.clear()
self.thread.start()
def sendVelocities(self):
'''
Sends CMDVel.
@param vel: CMDVel to publish
@type vel: CMDVel
'''
self.lock.acquire()
#self.vel = vel
self.lock.release()
def setVX(self, vx):
'''
Sends VX velocity.
@param vx: VX velocity
@type vx: float
'''
self.lock.acquire()
self.vel.vx = vx
self.lock.release()
def setVY(self, vy):
'''
Sends VY velocity.
@param vy: VY velocity
@type vy: float
'''
self.lock.acquire()
self.vel.vy = vy
self.lock.release()
def setVZ(self,vz):
'''
Sends VZ velocity.
@param vz: VZ velocity
@type vz: float
'''
self.lock.acquire()
self.vel.vz=vz
self.lock.release()
def setAngularZ(self, az):
'''
Sends AZ velocity.
@param az: AZ velocity
@type az: float
'''
self.lock.acquire()
self.vel.az = az
self.lock.release()
def setAngularX(self,ax):
'''
Sends AX velocity.
@param ax: AX velocity
@type ax: float
'''
self.lock.acquire()
self.vel.ax=ax
self.lock.release()
def setAngularY(self,ay):
'''
Sends AY velocity.
@param ay: AY velocity
@type ay: float
'''
self.lock.acquire()
self.vel.ay=ay
self.lock.release()
def setYaw(self,yaw):
self.setAngularZ(yaw)
def setRoll(self,roll):
self.setAngularX(roll)
def setPitch(self,pitch):
self.setAngularY(pitch)
def sendCMD (self, vel):
'''
Sends CMDVel.
@param vel: CMDVel to publish
@type vel: CMDVel
'''
self.lock.acquire()
self.vel = vel
self.lock.release()
def sendCMDVel (self, vx,vy,vz,ax,ay,az):
self.lock.acquire()
self.vel.vx=vx
self.vel.vy=vy
self.vel.vz=vz
self.vel.ax=ax
self.vel.ay=ay
self.vel.az=az
self.lock.release()
| gpl-3.0 | -1,091,195,975,461,559,400 | 19.712264 | 115 | 0.53177 | false | 3.59918 | false | false | false |
keras-team/keras-io | examples/vision/semantic_image_clustering.py | 1 | 19556 | """
Title: Semantic Image Clustering
Author: [Khalid Salama](https://www.linkedin.com/in/khalid-salama-24403144/)
Date created: 2021/02/28
Last modified: 2021/02/28
Description: Semantic Clustering by Adopting Nearest neighbors (SCAN) algorithm.
"""
"""
## Introduction
This example demonstrates how to apply the [Semantic Clustering by Adopting Nearest neighbors
(SCAN)](https://arxiv.org/abs/2005.12320) algorithm (Van Gansbeke et al., 2020) on the
[CIFAR-10](https://www.cs.toronto.edu/~kriz/cifar.html) dataset. The algorithm consists of
two phases:
1. Self-supervised visual representation learning of images, in which we use the
[simCLR](https://arxiv.org/abs/2002.05709) technique.
2. Clustering of the learned visual representation vectors to maximize the agreement
between the cluster assignments of neighboring vectors.
The example requires [TensorFlow Addons](https://www.tensorflow.org/addons),
which you can install using the following command:
```python
pip install tensorflow-addons
```
"""
"""
## Setup
"""
from collections import defaultdict
import random
import numpy as np
import tensorflow as tf
import tensorflow_addons as tfa
from tensorflow import keras
from tensorflow.keras import layers
import matplotlib.pyplot as plt
from tqdm import tqdm
"""
## Prepare the data
"""
num_classes = 10
input_shape = (32, 32, 3)
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()
x_data = np.concatenate([x_train, x_test])
y_data = np.concatenate([y_train, y_test])
print("x_data shape:", x_data.shape, "- y_data shape:", y_data.shape)
classes = [
"airplane",
"automobile",
"bird",
"cat",
"deer",
"dog",
"frog",
"horse",
"ship",
"truck",
]
"""
## Define hyperparameters
"""
target_size = 32 # Resize the input images.
representation_dim = 512 # The dimensions of the features vector.
projection_units = 128 # The projection head of the representation learner.
num_clusters = 20 # Number of clusters.
k_neighbours = 5 # Number of neighbours to consider during cluster learning.
tune_encoder_during_clustering = False # Freeze the encoder in the cluster learning.
"""
## Implement data preprocessing
The data preprocessing step resizes the input images to the desired `target_size` and applies
feature-wise normalization. Note that, when using `keras.applications.ResNet50V2` as the
visual encoder, resizing the images into 255 x 255 inputs would lead to more accurate results
but require a longer time to train.
"""
data_preprocessing = keras.Sequential(
[
layers.experimental.preprocessing.Resizing(target_size, target_size),
layers.experimental.preprocessing.Normalization(),
]
)
# Compute the mean and the variance from the data for normalization.
data_preprocessing.layers[-1].adapt(x_data)
"""
## Data augmentation
Unlike simCLR, which randomly picks a single data augmentation function to apply to an input
image, we apply a set of data augmentation functions randomly to the input image.
(You can experiment with other image augmentation techniques by following
the [data augmentation tutorial](https://www.tensorflow.org/tutorials/images/data_augmentation).)
"""
data_augmentation = keras.Sequential(
[
layers.experimental.preprocessing.RandomTranslation(
height_factor=(-0.2, 0.2), width_factor=(-0.2, 0.2), fill_mode="nearest"
),
layers.experimental.preprocessing.RandomFlip(mode="horizontal"),
layers.experimental.preprocessing.RandomRotation(
factor=0.15, fill_mode="nearest"
),
layers.experimental.preprocessing.RandomZoom(
height_factor=(-0.3, 0.1), width_factor=(-0.3, 0.1), fill_mode="nearest"
),
]
)
"""
Display a random image
"""
image_idx = np.random.choice(range(x_data.shape[0]))
image = x_data[image_idx]
image_class = classes[y_data[image_idx][0]]
plt.figure(figsize=(3, 3))
plt.imshow(x_data[image_idx].astype("uint8"))
plt.title(image_class)
_ = plt.axis("off")
"""
Display a sample of augmented versions of the image
"""
plt.figure(figsize=(10, 10))
for i in range(9):
augmented_images = data_augmentation(np.array([image]))
ax = plt.subplot(3, 3, i + 1)
plt.imshow(augmented_images[0].numpy().astype("uint8"))
plt.axis("off")
"""
## Self-supervised representation learning
"""
"""
### Implement the vision encoder
"""
def create_encoder(representation_dim):
encoder = keras.Sequential(
[
keras.applications.ResNet50V2(
include_top=False, weights=None, pooling="avg"
),
layers.Dense(representation_dim),
]
)
return encoder
"""
### Implement the unsupervised contrastive loss
"""
class RepresentationLearner(keras.Model):
def __init__(
self,
encoder,
projection_units,
num_augmentations,
temperature=1.0,
dropout_rate=0.1,
l2_normalize=False,
**kwargs
):
super(RepresentationLearner, self).__init__(**kwargs)
self.encoder = encoder
# Create projection head.
self.projector = keras.Sequential(
[
layers.Dropout(dropout_rate),
layers.Dense(units=projection_units, use_bias=False),
layers.BatchNormalization(),
layers.ReLU(),
]
)
self.num_augmentations = num_augmentations
self.temperature = temperature
self.l2_normalize = l2_normalize
self.loss_tracker = keras.metrics.Mean(name="loss")
@property
def metrics(self):
return [self.loss_tracker]
def compute_contrastive_loss(self, feature_vectors, batch_size):
num_augmentations = tf.shape(feature_vectors)[0] // batch_size
if self.l2_normalize:
feature_vectors = tf.math.l2_normalize(feature_vectors, -1)
# The logits shape is [num_augmentations * batch_size, num_augmentations * batch_size].
logits = (
tf.linalg.matmul(feature_vectors, feature_vectors, transpose_b=True)
/ self.temperature
)
# Apply log-max trick for numerical stability.
logits_max = tf.math.reduce_max(logits, axis=1)
logits = logits - logits_max
# The shape of targets is [num_augmentations * batch_size, num_augmentations * batch_size].
# targets is a matrix consits of num_augmentations submatrices of shape [batch_size * batch_size].
# Each [batch_size * batch_size] submatrix is an identity matrix (diagonal entries are ones).
targets = tf.tile(tf.eye(batch_size), [num_augmentations, num_augmentations])
# Compute cross entropy loss
return keras.losses.categorical_crossentropy(
y_true=targets, y_pred=logits, from_logits=True
)
def call(self, inputs):
# Preprocess the input images.
preprocessed = data_preprocessing(inputs)
# Create augmented versions of the images.
augmented = []
for _ in range(self.num_augmentations):
augmented.append(data_augmentation(preprocessed))
augmented = layers.Concatenate(axis=0)(augmented)
# Generate embedding representations of the images.
features = self.encoder(augmented)
# Apply projection head.
return self.projector(features)
def train_step(self, inputs):
batch_size = tf.shape(inputs)[0]
# Run the forward pass and compute the contrastive loss
with tf.GradientTape() as tape:
feature_vectors = self(inputs, training=True)
loss = self.compute_contrastive_loss(feature_vectors, batch_size)
# Compute gradients
trainable_vars = self.trainable_variables
gradients = tape.gradient(loss, trainable_vars)
# Update weights
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
# Update loss tracker metric
self.loss_tracker.update_state(loss)
# Return a dict mapping metric names to current value
return {m.name: m.result() for m in self.metrics}
def test_step(self, inputs):
batch_size = tf.shape(inputs)[0]
feature_vectors = self(inputs, training=False)
loss = self.compute_contrastive_loss(feature_vectors, batch_size)
self.loss_tracker.update_state(loss)
return {"loss": self.loss_tracker.result()}
"""
### Train the model
"""
# Create vision encoder.
encoder = create_encoder(representation_dim)
# Create representation learner.
representation_learner = RepresentationLearner(
encoder, projection_units, num_augmentations=2, temperature=0.1
)
# Create a a Cosine decay learning rate scheduler.
lr_scheduler = keras.experimental.CosineDecay(
initial_learning_rate=0.001, decay_steps=500, alpha=0.1
)
# Compile the model.
representation_learner.compile(
optimizer=tfa.optimizers.AdamW(learning_rate=lr_scheduler, weight_decay=0.0001),
)
# Fit the model.
history = representation_learner.fit(
x=x_data,
batch_size=512,
epochs=50, # for better results, increase the number of epochs to 500.
)
"""
Plot training loss
"""
plt.plot(history.history["loss"])
plt.ylabel("loss")
plt.xlabel("epoch")
plt.show()
"""
## Compute the nearest neighbors
"""
"""
### Generate the embeddings for the images
"""
batch_size = 500
# Get the feature vector representations of the images.
feature_vectors = encoder.predict(x_data, batch_size=batch_size, verbose=1)
# Normalize the feature vectores.
feature_vectors = tf.math.l2_normalize(feature_vectors, -1)
"""
### Find the *k* nearest neighbours for each embedding
"""
neighbours = []
num_batches = feature_vectors.shape[0] // batch_size
for batch_idx in tqdm(range(num_batches)):
start_idx = batch_idx * batch_size
end_idx = start_idx + batch_size
current_batch = feature_vectors[start_idx:end_idx]
# Compute the dot similarity.
similarities = tf.linalg.matmul(current_batch, feature_vectors, transpose_b=True)
# Get the indices of most similar vectors.
_, indices = tf.math.top_k(similarities, k=k_neighbours + 1, sorted=True)
# Add the indices to the neighbours.
neighbours.append(indices[..., 1:])
neighbours = np.reshape(np.array(neighbours), (-1, k_neighbours))
"""
Let's display some neighbors on each row
"""
nrows = 4
ncols = k_neighbours + 1
plt.figure(figsize=(12, 12))
position = 1
for _ in range(nrows):
anchor_idx = np.random.choice(range(x_data.shape[0]))
neighbour_indicies = neighbours[anchor_idx]
indices = [anchor_idx] + neighbour_indicies.tolist()
for j in range(ncols):
plt.subplot(nrows, ncols, position)
plt.imshow(x_data[indices[j]].astype("uint8"))
plt.title(classes[y_data[indices[j]][0]])
plt.axis("off")
position += 1
"""
You notice that images on each row are visually similar, and belong to similar classes.
"""
"""
## Semantic clustering with nearest neighbours
"""
"""
### Implement clustering consistency loss
This loss tries to make sure that neighbours have the same clustering assignments.
"""
class ClustersConsistencyLoss(keras.losses.Loss):
def __init__(self):
super(ClustersConsistencyLoss, self).__init__()
def __call__(self, target, similarity, sample_weight=None):
# Set targets to be ones.
target = tf.ones_like(similarity)
# Compute cross entropy loss.
loss = keras.losses.binary_crossentropy(
y_true=target, y_pred=similarity, from_logits=True
)
return tf.math.reduce_mean(loss)
"""
### Implement the clusters entropy loss
This loss tries to make sure that cluster distribution is roughly uniformed, to avoid
assigning most of the instances to one cluster.
"""
class ClustersEntropyLoss(keras.losses.Loss):
def __init__(self, entropy_loss_weight=1.0):
super(ClustersEntropyLoss, self).__init__()
self.entropy_loss_weight = entropy_loss_weight
def __call__(self, target, cluster_probabilities, sample_weight=None):
# Ideal entropy = log(num_clusters).
num_clusters = tf.cast(tf.shape(cluster_probabilities)[-1], tf.dtypes.float32)
target = tf.math.log(num_clusters)
# Compute the overall clusters distribution.
cluster_probabilities = tf.math.reduce_mean(cluster_probabilities, axis=0)
# Replacing zero probabilities - if any - with a very small value.
cluster_probabilities = tf.clip_by_value(
cluster_probabilities, clip_value_min=1e-8, clip_value_max=1.0
)
# Compute the entropy over the clusters.
entropy = -tf.math.reduce_sum(
cluster_probabilities * tf.math.log(cluster_probabilities)
)
# Compute the difference between the target and the actual.
loss = target - entropy
return loss
"""
### Implement clustering model
This model takes a raw image as an input, generated its feature vector using the trained
encoder, and produces a probability distribution of the clusters given the feature vector
as the cluster assignments.
"""
def create_clustering_model(encoder, num_clusters, name=None):
inputs = keras.Input(shape=input_shape)
# Preprocess the input images.
preprocessed = data_preprocessing(inputs)
# Apply data augmentation to the images.
augmented = data_augmentation(preprocessed)
# Generate embedding representations of the images.
features = encoder(augmented)
# Assign the images to clusters.
outputs = layers.Dense(units=num_clusters, activation="softmax")(features)
# Create the model.
model = keras.Model(inputs=inputs, outputs=outputs, name=name)
return model
"""
### Implement clustering learner
This model receives the input `anchor` image and its `neighbours`, produces the clusters
assignments for them using the `clustering_model`, and produces two outputs:
1. `similarity`: the similarity between the cluster assignments of the `anchor` image and
its `neighbours`. This output is fed to the `ClustersConsistencyLoss`.
2. `anchor_clustering`: cluster assignments of the `anchor` images. This is fed to the `ClustersEntropyLoss`.
"""
def create_clustering_learner(clustering_model):
anchor = keras.Input(shape=input_shape, name="anchors")
neighbours = keras.Input(
shape=tuple([k_neighbours]) + input_shape, name="neighbours"
)
# Changes neighbours shape to [batch_size * k_neighbours, width, height, channels]
neighbours_reshaped = tf.reshape(neighbours, shape=tuple([-1]) + input_shape)
# anchor_clustering shape: [batch_size, num_clusters]
anchor_clustering = clustering_model(anchor)
# neighbours_clustering shape: [batch_size * k_neighbours, num_clusters]
neighbours_clustering = clustering_model(neighbours_reshaped)
# Convert neighbours_clustering shape to [batch_size, k_neighbours, num_clusters]
neighbours_clustering = tf.reshape(
neighbours_clustering,
shape=(-1, k_neighbours, tf.shape(neighbours_clustering)[-1]),
)
# similarity shape: [batch_size, 1, k_neighbours]
similarity = tf.linalg.einsum(
"bij,bkj->bik", tf.expand_dims(anchor_clustering, axis=1), neighbours_clustering
)
# similarity shape: [batch_size, k_neighbours]
similarity = layers.Lambda(lambda x: tf.squeeze(x, axis=1), name="similarity")(
similarity
)
# Create the model.
model = keras.Model(
inputs=[anchor, neighbours],
outputs=[similarity, anchor_clustering],
name="clustering_learner",
)
return model
"""
### Train model
"""
# If tune_encoder_during_clustering is set to False,
# then freeze the encoder weights.
for layer in encoder.layers:
layer.trainable = tune_encoder_during_clustering
# Create the clustering model and learner.
clustering_model = create_clustering_model(encoder, num_clusters, name="clustering")
clustering_learner = create_clustering_learner(clustering_model)
# Instantiate the model losses.
losses = [ClustersConsistencyLoss(), ClustersEntropyLoss(entropy_loss_weight=5)]
# Create the model inputs and labels.
inputs = {"anchors": x_data, "neighbours": tf.gather(x_data, neighbours)}
labels = tf.ones(shape=(x_data.shape[0]))
# Compile the model.
clustering_learner.compile(
optimizer=tfa.optimizers.AdamW(learning_rate=0.0005, weight_decay=0.0001),
loss=losses,
)
# Begin training the model.
clustering_learner.fit(x=inputs, y=labels, batch_size=512, epochs=50)
"""
Plot training loss
"""
plt.plot(history.history["loss"])
plt.ylabel("loss")
plt.xlabel("epoch")
plt.show()
"""
## Cluster analysis
"""
"""
### Assign images to clusters
"""
# Get the cluster probability distribution of the input images.
clustering_probs = clustering_model.predict(x_data, batch_size=batch_size, verbose=1)
# Get the cluster of the highest probability.
cluster_assignments = tf.math.argmax(clustering_probs, axis=-1).numpy()
# Store the clustering confidence.
# Images with the highest clustering confidence are considered the 'prototypes'
# of the clusters.
cluster_confidence = tf.math.reduce_max(clustering_probs, axis=-1).numpy()
"""
Let's compute the cluster sizes
"""
clusters = defaultdict(list)
for idx, c in enumerate(cluster_assignments):
clusters[c].append((idx, cluster_confidence[idx]))
for c in range(num_clusters):
print("cluster", c, ":", len(clusters[c]))
"""
Notice that the clusters have roughly balanced sizes.
"""
"""
### Visualize cluster images
Display the *prototypes*—instances with the highest clustering confidence—of each cluster:
"""
num_images = 8
plt.figure(figsize=(15, 15))
position = 1
for c in range(num_clusters):
cluster_instances = sorted(clusters[c], key=lambda kv: kv[1], reverse=True)
for j in range(num_images):
image_idx = cluster_instances[j][0]
plt.subplot(num_clusters, num_images, position)
plt.imshow(x_data[image_idx].astype("uint8"))
plt.title(classes[y_data[image_idx][0]])
plt.axis("off")
position += 1
"""
### Compute clustering accuracy
First, we assign a label for each cluster based on the majority label of its images.
Then, we compute the accuracy of each cluster by dividing the number of image with the
majority label by the size of the cluster.
"""
cluster_label_counts = dict()
for c in range(num_clusters):
cluster_label_counts[c] = [0] * num_classes
instances = clusters[c]
for i, _ in instances:
cluster_label_counts[c][y_data[i][0]] += 1
cluster_label_idx = np.argmax(cluster_label_counts[c])
correct_count = np.max(cluster_label_counts[c])
cluster_size = len(clusters[c])
accuracy = (
np.round((correct_count / cluster_size) * 100, 2) if cluster_size > 0 else 0
)
cluster_label = classes[cluster_label_idx]
print("cluster", c, "label is:", cluster_label, " - accuracy:", accuracy, "%")
"""
## Conclusion
To improve the accuracy results, you can: 1) increase the number
of epochs in the representation learning and the clustering phases; 2)
allow the encoder weights to be tuned during the clustering phase; and 3) perform a final
fine-tuning step through self-labeling, as described in the [original SCAN paper](https://arxiv.org/abs/2005.12320).
Note that unsupervised image clustering techniques are not expected to outperform the accuracy
of supervised image classification techniques, rather showing that they can learn the semantics
of the images and group them into clusters that are similar to their original classes.
"""
| apache-2.0 | 6,607,691,726,261,113,000 | 31.750419 | 116 | 0.691387 | false | 3.688361 | false | false | false |
ViDA-NYU/domain_discovery_API | elastic/get_documents.py | 1 | 5691 | #!/usr/bin/python
from os import environ
import sys
from config import es as default_es
from pprint import pprint
def get_documents(terms, term_field, fields=["text"], es_index='memex', es_doc_type='page', es=None):
if es is None:
es = default_es
results = {}
if len(terms) > 0:
for term in terms:
query = {
"query": {
"term": {
term_field: term
}
},
"fields": fields
}
res = es.search(body=query,
index=es_index,
doc_type=es_doc_type)
if res['hits']['hits']:
hits = res['hits']['hits']
records = []
for hit in hits:
record = {}
if not hit.get('fields') is None:
record = hit['fields']
record['id'] =hit['_id']
records.append(record)
results[term] = records
return results
def get_more_like_this(urls, fields=[], pageCount=200, es_index='memex', es_doc_type='page', es=None):
if es is None:
es = default_es
docs = [{"_index": es_index, "_type": es_doc_type, "_id": url} for url in urls]
with open(environ['DD_API_HOME']+'/elastic/stopwords.txt', 'r') as f:
stopwords = [word.strip() for word in f.readlines()]
query = {
"query":{
"more_like_this": {
"fields" : ["text"],
"docs": docs,
"min_term_freq": 1,
"stop_words": stopwords
}
},
"fields": fields,
"size": pageCount
}
res = es.search(body=query, index = es_index, doc_type = es_doc_type)
hits = res['hits']['hits']
results = []
for hit in hits:
fields = hit['fields']
fields['id'] = hit['_id']
fields['score'] = hit['_score']
results.append(fields)
return results
def get_most_recent_documents(start=0, opt_maxNumberOfPages = 200, mapping=None, fields = [], opt_filter = None, es_index = 'memex', es_doc_type = 'page', es = None):
if mapping == None:
print "No mappings found"
return []
if es is None:
es = default_es
query = {
"size": opt_maxNumberOfPages,
"sort": [
{
mapping["timestamp"]: {
"order": "desc"
}
}
]
}
match_q = {
"match_all": {}
}
if not mapping.get("content_type") is None:
match_q = {
"match": {
mapping["content_type"]: "text/html"
}
}
if opt_filter is None:
query["query"] = {
"filtered": {
"query": match_q,
"filter":{
"exists": {
"field": mapping['text']
}
}
}
}
else:
query["query"] = {
"query_string": {
"query": "(" + mapping['text'] + ":" + opt_filter.replace('"', '\"') + ")"
}
}
if len(fields) > 0:
query["fields"] = fields
res = es.search(body=query, index = es_index, doc_type = es_doc_type, from_=start, request_timeout=600)
hits = res['hits']['hits']
results = []
for hit in hits:
fields = hit['fields']
fields['id'] = hit['_id']
results.append(fields)
return {"total": res['hits']['total'], 'results':results}
def get_all_ids(pageCount = 100000, fields=[], es_index = 'memex', es_doc_type = 'page', es = None):
if es is None:
es = default_es
query = {
"query": {
"match_all": {}
},
"fields": fields
}
try:
res = es.search(body=query, index = es_index, doc_type = es_doc_type, size = pageCount, request_timeout=600)
hits = res['hits']['hits']
results = []
for hit in hits:
fields = hit['fields']
fields['id'] = hit['_id']
results.append(fields)
return results
except:
print("Unexpected error:", sys.exc_info()[0])
print es_index
return []
def get_documents_by_id(ids=[], fields=[], es_index = 'memex', es_doc_type = 'page', es = None):
if es is None:
es = default_es
query = {
"query": {
"ids": {
"values": ids
}
},
"fields": fields
}
res = es.search(body=query, index = es_index, doc_type = es_doc_type, size=len(ids), request_timeout=30)
hits = res['hits']['hits']
results = []
for hit in hits:
if hit.get('fields'):
fields = hit['fields']
fields['id'] = hit['_id']
results.append(fields)
return results
def get_plotting_data(pageCount=200, es_index = 'memex', es_doc_type = 'page', es = None):
if es is None:
es = default_es
res = es.search(index=es_index, doc_type = es_doc_type, size=pageCount, fields=["retrieved", "url", "tag", "query"])
fields = []
for item in res['hits']['hits']:
if item['fields'].get('tag') != None:
if "" in item['fields']['tag']:
item['fields'].pop('tag')
fields.append(item['fields'])
return fields
if __name__ == "__main__":
urls = []
with open(environ['MEMEX_HOME']+'/seed_crawler/seeds_generator/results.txt', 'r') as f:
urls = f.readlines()
urls = [url.strip() for url in urls]
docs = get_documents(urls)
| gpl-3.0 | 3,535,834,563,850,043,000 | 25.469767 | 166 | 0.465648 | false | 3.832323 | false | false | false |
tomtor/QGIS | python/console/console_settings.py | 25 | 10378 | # -*- coding:utf-8 -*-
"""
/***************************************************************************
Python Console for QGIS
-------------------
begin : 2012-09-10
copyright : (C) 2012 by Salvatore Larosa
email : lrssvtml (at) gmail (dot) com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
Some portions of code were taken from https://code.google.com/p/pydee/
"""
from qgis.PyQt.QtCore import QCoreApplication, QUrl
from qgis.PyQt.QtWidgets import QWidget, QFileDialog, QMessageBox, QTableWidgetItem, QHBoxLayout
from qgis.PyQt.QtGui import QIcon, QDesktopServices
from qgis.core import QgsSettings, QgsApplication
from qgis.gui import QgsOptionsPageWidget, QgsOptionsWidgetFactory
from .console_compile_apis import PrepareAPIDialog
from .ui_console_settings import Ui_SettingsDialogPythonConsole
class ConsoleOptionsFactory(QgsOptionsWidgetFactory):
def __init__(self):
super(QgsOptionsWidgetFactory, self).__init__()
def icon(self):
return QgsApplication.getThemeIcon('/console/mIconRunConsole.svg')
def createWidget(self, parent):
return ConsoleOptionsPage(parent)
class ConsoleOptionsPage(QgsOptionsPageWidget):
def __init__(self, parent):
super(ConsoleOptionsPage, self).__init__(parent)
self.options_widget = ConsoleOptionsWidget(parent)
layout = QHBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.setMargin(0)
self.setLayout(layout)
layout.addWidget(self.options_widget)
self.setObjectName('consoleOptions')
def apply(self):
self.options_widget.accept()
def helpKey(self):
return 'plugins/python_console.html'
class ConsoleOptionsWidget(QWidget, Ui_SettingsDialogPythonConsole):
def __init__(self, parent):
super().__init__(parent)
self.setWindowTitle(QCoreApplication.translate(
"SettingsDialogPythonConsole", "Python Console Settings"))
self.parent = parent
self.setupUi(self)
self.listPath = []
self.lineEdit.setReadOnly(True)
self.restoreSettings()
self.initialCheck()
self.addAPIpath.setIcon(QIcon(":/images/themes/default/symbologyAdd.svg"))
self.addAPIpath.setToolTip(QCoreApplication.translate("PythonConsole", "Add API path"))
self.removeAPIpath.setIcon(QIcon(":/images/themes/default/symbologyRemove.svg"))
self.removeAPIpath.setToolTip(QCoreApplication.translate("PythonConsole", "Remove API path"))
self.preloadAPI.stateChanged.connect(self.initialCheck)
self.addAPIpath.clicked.connect(self.loadAPIFile)
self.removeAPIpath.clicked.connect(self.removeAPI)
self.compileAPIs.clicked.connect(self._prepareAPI)
self.generateToken.clicked.connect(self.generateGHToken)
def generateGHToken(self):
description = self.tr("PyQGIS Console")
url = 'https://github.com/settings/tokens/new?description={}&scopes=gist'.format(description)
QDesktopServices.openUrl(QUrl(url))
def initialCheck(self):
if self.preloadAPI.isChecked():
self.enableDisable(False)
else:
self.enableDisable(True)
def enableDisable(self, value):
self.tableWidget.setEnabled(value)
self.addAPIpath.setEnabled(value)
self.removeAPIpath.setEnabled(value)
self.groupBoxPreparedAPI.setEnabled(value)
def loadAPIFile(self):
settings = QgsSettings()
lastDirPath = settings.value("pythonConsole/lastDirAPIPath", "", type=str)
fileAPI, selected_filter = QFileDialog.getOpenFileName(
self, "Open API File", lastDirPath, "API file (*.api)")
if fileAPI:
self.addAPI(fileAPI)
settings.setValue("pythonConsole/lastDirAPIPath", fileAPI)
def _prepareAPI(self):
if self.tableWidget.rowCount() != 0:
pap_file, filter = QFileDialog().getSaveFileName(
self,
"",
'*.pap',
"Prepared APIs file (*.pap)")
else:
QMessageBox.information(
self, self.tr("Warning!"),
self.tr('You need to add some APIs file in order to compile'))
return
if pap_file:
api_lexer = 'QsciLexerPython'
api_files = []
count = self.tableWidget.rowCount()
for i in range(0, count):
api_files.append(self.tableWidget.item(i, 1).text())
api_dlg = PrepareAPIDialog(api_lexer, api_files, pap_file, self)
api_dlg.show()
api_dlg.activateWindow()
api_dlg.raise_()
api_dlg.prepareAPI()
self.lineEdit.setText(pap_file)
def accept(self):
if not self.preloadAPI.isChecked() and \
not self.groupBoxPreparedAPI.isChecked():
if self.tableWidget.rowCount() == 0:
QMessageBox.information(
self, self.tr("Warning!"),
self.tr('Please specify API file or check "Use preloaded API files"'))
return
if self.groupBoxPreparedAPI.isChecked() and \
not self.lineEdit.text():
QMessageBox.information(
self, self.tr("Warning!"),
QCoreApplication.translate('optionsDialog', 'The APIs file was not compiled, click on "Compile APIs…"')
)
return
self.saveSettings()
self.listPath = []
def addAPI(self, pathAPI):
count = self.tableWidget.rowCount()
self.tableWidget.setColumnCount(2)
self.tableWidget.insertRow(count)
pathItem = QTableWidgetItem(pathAPI)
pathSplit = pathAPI.split("/")
apiName = pathSplit[-1][0:-4]
apiNameItem = QTableWidgetItem(apiName)
self.tableWidget.setItem(count, 0, apiNameItem)
self.tableWidget.setItem(count, 1, pathItem)
def removeAPI(self):
listItemSel = self.tableWidget.selectionModel().selectedRows()
for index in reversed(listItemSel):
self.tableWidget.removeRow(index.row())
def saveSettings(self):
settings = QgsSettings()
settings.setValue("pythonConsole/preloadAPI", self.preloadAPI.isChecked())
settings.setValue("pythonConsole/autoSaveScript", self.autoSaveScript.isChecked())
settings.setValue("pythonConsole/accessTokenGithub", self.tokenGhLineEdit.text())
for i in range(0, self.tableWidget.rowCount()):
text = self.tableWidget.item(i, 1).text()
self.listPath.append(text)
settings.setValue("pythonConsole/userAPI", self.listPath)
settings.setValue("pythonConsole/autoCompThreshold", self.autoCompThreshold.value())
settings.setValue("pythonConsole/autoCompleteEnabled", self.groupBoxAutoCompletion.isChecked())
settings.setValue("pythonConsole/usePreparedAPIFile", self.groupBoxPreparedAPI.isChecked())
settings.setValue("pythonConsole/preparedAPIFile", self.lineEdit.text())
if self.autoCompFromAPI.isChecked():
settings.setValue("pythonConsole/autoCompleteSource", 'fromAPI')
elif self.autoCompFromDoc.isChecked():
settings.setValue("pythonConsole/autoCompleteSource", 'fromDoc')
elif self.autoCompFromDocAPI.isChecked():
settings.setValue("pythonConsole/autoCompleteSource", 'fromDocAPI')
settings.setValue("pythonConsole/enableObjectInsp", self.enableObjectInspector.isChecked())
settings.setValue("pythonConsole/autoCloseBracket", self.autoCloseBracket.isChecked())
settings.setValue("pythonConsole/autoInsertionImport", self.autoInsertionImport.isChecked())
def restoreSettings(self):
settings = QgsSettings()
self.preloadAPI.setChecked(settings.value("pythonConsole/preloadAPI", True, type=bool))
self.lineEdit.setText(settings.value("pythonConsole/preparedAPIFile", "", type=str))
self.tokenGhLineEdit.setText(settings.value("pythonConsole/accessTokenGithub", "", type=str))
itemTable = settings.value("pythonConsole/userAPI", [])
if itemTable:
self.tableWidget.setRowCount(0)
for i in range(len(itemTable)):
self.tableWidget.insertRow(i)
self.tableWidget.setColumnCount(2)
pathSplit = itemTable[i].split("/")
apiName = pathSplit[-1][0:-4]
self.tableWidget.setItem(i, 0, QTableWidgetItem(apiName))
self.tableWidget.setItem(i, 1, QTableWidgetItem(itemTable[i]))
self.autoSaveScript.setChecked(settings.value("pythonConsole/autoSaveScript", False, type=bool))
self.autoCompThreshold.setValue(settings.value("pythonConsole/autoCompThreshold", 2, type=int))
self.groupBoxAutoCompletion.setChecked(settings.value("pythonConsole/autoCompleteEnabled", True, type=bool))
self.enableObjectInspector.setChecked(settings.value("pythonConsole/enableObjectInsp", False, type=bool))
self.autoCloseBracket.setChecked(settings.value("pythonConsole/autoCloseBracket", False, type=bool))
self.autoInsertionImport.setChecked(settings.value("pythonConsole/autoInsertionImport", True, type=bool))
if settings.value("pythonConsole/autoCompleteSource") == 'fromDoc':
self.autoCompFromDoc.setChecked(True)
elif settings.value("pythonConsole/autoCompleteSource") == 'fromAPI':
self.autoCompFromAPI.setChecked(True)
elif settings.value("pythonConsole/autoCompleteSource") == 'fromDocAPI':
self.autoCompFromDocAPI.setChecked(True)
| gpl-2.0 | -6,853,548,758,034,385,000 | 43.34188 | 119 | 0.630301 | false | 4.352349 | false | false | false |
Stanford-Online/edx-ora2 | openassessment/xblock/schema.py | 1 | 4867 | """
Schema for validating and sanitizing data received from the JavaScript client.
"""
import dateutil
from pytz import utc
from voluptuous import All, Any, In, Invalid, Range, Required, Schema
def utf8_validator(value):
"""Validate and sanitize unicode strings.
If we're given a bytestring, assume that the encoding is UTF-8
Args:
value: The value to validate
Returns:
unicode
Raises:
Invalid
"""
try:
if isinstance(value, str):
return value.decode('utf-8')
else:
return unicode(value)
except (ValueError, TypeError):
raise Invalid(u"Could not load unicode from value \"{val}\"".format(val=value))
def datetime_validator(value):
"""Validate and sanitize a datetime string in ISO format.
Args:
value: The value to validate
Returns:
unicode: ISO-formatted datetime string
Raises:
Invalid
"""
try:
# The dateutil parser defaults empty values to the current day,
# which is NOT what we want.
if value is None or value == '':
raise Invalid(u"Datetime value cannot be \"{val}\"".format(val=value))
# Parse the date and interpret it as UTC
value = dateutil.parser.parse(value).replace(tzinfo=utc)
return unicode(value.isoformat())
except (ValueError, TypeError):
raise Invalid(u"Could not parse datetime from value \"{val}\"".format(val=value))
PROMPTS_TYPES = [
u'text',
u'html',
]
NECESSITY_OPTIONS = [
u'required',
u'optional',
u''
]
VALID_ASSESSMENT_TYPES = [
u'peer-assessment',
u'self-assessment',
u'student-training',
u'staff-assessment',
]
VALID_UPLOAD_FILE_TYPES = [
u'image',
u'pdf-and-image',
u'custom'
]
# Schema definition for an update from the Studio JavaScript editor.
EDITOR_UPDATE_SCHEMA = Schema({
Required('prompts'): [
Schema({
Required('description'): utf8_validator,
})
],
Required('prompts_type', default='text'): Any(All(utf8_validator, In(PROMPTS_TYPES)), None),
Required('title'): utf8_validator,
Required('feedback_prompt'): utf8_validator,
Required('feedback_default_text'): utf8_validator,
Required('submission_start'): Any(datetime_validator, None),
Required('submission_due'): Any(datetime_validator, None),
Required('text_response', default='required'): Any(All(utf8_validator, In(NECESSITY_OPTIONS)), None),
Required('file_upload_response', default=None): Any(All(utf8_validator, In(NECESSITY_OPTIONS)), None),
'allow_file_upload': bool, # Backwards compatibility.
Required('file_upload_type', default=None): Any(All(utf8_validator, In(VALID_UPLOAD_FILE_TYPES)), None),
'white_listed_file_types': utf8_validator,
Required('allow_latex'): bool,
Required('leaderboard_show'): int,
Required('assessments'): [
Schema({
Required('name'): All(utf8_validator, In(VALID_ASSESSMENT_TYPES)),
Required('start', default=None): Any(datetime_validator, None),
Required('due', default=None): Any(datetime_validator, None),
'required': bool,
'must_grade': All(int, Range(min=0)),
'must_be_graded_by': All(int, Range(min=0)),
'track_changes': utf8_validator,
'examples': [
Schema({
Required('answer'): [utf8_validator],
Required('options_selected'): [
Schema({
Required('criterion'): utf8_validator,
Required('option'): utf8_validator
})
]
})
],
'examples_xml': utf8_validator,
})
],
Required('editor_assessments_order'): [
All(utf8_validator, In(VALID_ASSESSMENT_TYPES))
],
Required('feedbackprompt', default=u""): utf8_validator,
Required('criteria'): [
Schema({
Required('order_num'): All(int, Range(min=0)),
Required('name'): utf8_validator,
Required('label'): utf8_validator,
Required('prompt'): utf8_validator,
Required('feedback'): All(
utf8_validator,
In([
'disabled',
'optional',
'required',
])
),
Required('options'): [
Schema({
Required('order_num'): All(int, Range(min=0)),
Required('name'): utf8_validator,
Required('label'): utf8_validator,
Required('explanation'): utf8_validator,
Required('points'): All(int, Range(min=0)),
})
]
})
]
})
| agpl-3.0 | -1,597,207,349,564,080,000 | 30 | 108 | 0.564208 | false | 4.213853 | false | false | false |
metaperl/karatbars-utils | k0de/upgraded/login.py | 1 | 1933 | #!/usr/bin/python
# system
from collections import defaultdict
from functools import wraps
import pdb
import pprint
import re
import sys
import time
import traceback
# pypi
from splinter import Browser
from treelib import Tree
# local
import user as userdata
import list_to_tree
pp = pprint.PrettyPrinter(indent=4)
base_url = 'http://www.karatbars.com'
action_path = dict(
login = "index.php?page=login_1",
binary = "members.php?page=binarytree"
)
def url_for_action(action):
return "{0}/{1}".format(base_url,action_path[action])
def try_method(fn):
@wraps(fn)
def wrapper(self):
try:
return fn(self)
except:
print traceback.format_exc()
self.visit_auction()
return wrapper
class Entry(object):
def __init__(self, user, browser):
self.user=user
self.browser=browser
def login(self):
print "Logging in..."
self.browser.visit(url_for_action('login'))
self.browser.fill('username', self.user['username'])
self.browser.fill('password', self.user['password'])
button = self.browser.find_by_id('btn_login')
button.click()
def visit_binary(self):
self.browser.visit(url_for_action('binary'))
tree = Tree()
while True:
users = self.browser.find_by_css('.binary_text')
users = [u.text for u in users]
l = list_to_tree.ListToTree(users)
l.show()
sleep_time = 5
print "\tSleeping for", sleep_time, "seconds"
time.sleep(sleep_time)
def main():
with Browser() as browser:
for user in userdata.users:
e = Entry(user, browser)
e.login()
e.visit_binary()
while True: pass
if __name__ == '__main__':
if len(sys.argv) == 2:
bid_url = sys.argv[1]
else:
bid_url = None
main(bid_url)
| mit | -5,121,185,275,655,001,000 | 20.477778 | 60 | 0.58924 | false | 3.592937 | false | false | false |
ychaim/snappy | snAppyModules/snParsers.py | 2 | 33783 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
from twisted.python import log
from lxml import etree
from requests import Response
##########################
##########################
##
## each QUERY gets its own little class.
## this is important to keep modularity
##
##########################
class Parser_RPC_Base(object):
""" this wrapper class can provide generic functionality for the
individual API Parser classes""" #
pass
# this is to connect to RPC on another port and parse differently
class Parser_RPC_Start(Parser_RPC_Base):
def parse(self, data2parse):
return data2parse
class Parser_RPC(object):
""" Parser_RPC
This is for communicating with BITCOINDARKD!!!
this talks to BTCD via RPC! used for start only atm
"""#
ql777_RPC_start = Parser_RPC_Start()
def __init__(self, environ = {}):
self.environ = environ
def parse_RPC(self, data2parse, requestType2Parse={'requestType':'start'}):
#In [7]: isinstance(ss,bytes)#isinstance(ss,str)
log.msg("----parse_RPC---------> ", data2parse, "requestType2Parse", requestType2Parse)
print(type(data2parse),"\n\n\n")
data = data2parse
if isinstance(data, bytes):
data = data2parse.decode()
try:
bsload=data.split("\r\n\r\n")[1]
bsload1=bsload.replace('null','"null"')
except:
print(5*"\nOOOOOOOOPS parse_RPC")
pass # need better parsing- but this is for start and stop ONLY!
try:
bsdi=eval(bsload1)
print(1*"~~~~~~~bsdi~777~~~~~", bsdi, "\n")
except:
return data.encode("utf-8")
# this takes the raw reply, strips it off header and fillers, evals into a dict
# and hands the dict to the class that is responsible for the particular query
# keep the try except here, but move the RPC to a different parser.!!!
try: # this would be the format that is returned by BTCD RPC on eg port 14632
result=bsdi['result']
data_result=eval(result)
except:# this would be the format that is returned by JL777 http on port 7777
data_result=bsdi
# there is a generic class for parsing each query
if requestType2Parse == 'start': #ToDO privateBet
parsed = self.ql777_RPC_start.parse(data_result)
else:
parsed = 'RAISE_ME_error'
data = str(parsed).encode("utf-8")
return data
##############
class Parser_JL777_Base(object):
""" this wrapper class can provide generic functionality for the
individual API Parser classes
Most of these responses from the SuperNET server are returned as is.
Some of them are internal, and have to fetched from the GUIlopp with GUIpoll.
These need special parsing.
eg PONG, havenode and some others
""" #
# 48 api.h xyz_func calls here + 1 pBET unfinished
# This is from api.h in libjl777 111314
# glue
# // GLUE 7
class Parser_jl777_gotjson(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_gotpacket(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_gotnewpeer(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_BTCDpoll(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_GUIpoll(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_stop(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_settings(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
# // passthru 2
class Parser_jl777_passthru(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_remote(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
# // ramchains 11
class Parser_jl777_ramstatus(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_ramaddrlist(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_ramstring(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_ramrawind(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_ramblock(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_ramscript(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_ramtxlist(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_ramrichlist(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_ramcompress(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_ramexpand(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_rambalances(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_rampyramid(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_ramresponse(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
# multisig MGW 7
class Parser_jl777_genmultisig(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_getmsigpubkey(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_MGWaddr(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_MGWresponse(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_setmsigpubkey(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_cosign(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_cosigned(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
# // IP comms 6
class Parser_jl777_ping(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_pong(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_sendfrag(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_gotfrag(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_startxfer(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_getfile(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
# Kademlia DHT 8
class Parser_jl777_store(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_findvalue(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_findnode(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_havenode(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_havenodeB(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_findaddress(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_puzzles(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_nonces(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
# // MofNfs 3
class Parser_jl777_savefile(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_restorefile(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_publish(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
# // Telepathy 9
class Parser_jl777_getpeers(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_addcontact(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_removecontact(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_dispcontact(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_telepathy(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_getdb(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_sendmessage(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_sendbinary(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
# // Teleport 3
class Parser_jl777_maketelepods(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_telepodacct(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_teleport(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
#InstantDEX 18
class Parser_jl777_trollbox(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_allorderbooks(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_openorders(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_orderbook(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_placebid(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_placeask(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_makeoffer3(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_respondtx(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_processutx(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_bid(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_ask(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_allsignals(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_lottostats(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_tradehistory(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_getsignal(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_cancelquote(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_processjumptrade(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_jumptrades(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
#Tradebot 3
class Parser_jl777_pricedb(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_getquotes(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_tradebot(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
# privatebet 1
class Parser_jl777_lotto(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
# embeddedLnags
class Parser_jl777_python(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_syscall(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
class Parser_jl777_checkmsg(Parser_JL777_Base):
def parse(self, data2parse):
return data2parse
##########################
##########################
##
## The Parser_777 Container and Admin class
##
##########################
##########################
class Parser_777(object):
""" Parser_777
// glue
// multisig
// Kademlia DHT
// MofNfs
// Telepathy
// Teleport
// InstantDEX
// Tradebot
// privatebet
"""#
# // glue
ql777_gotjson = Parser_jl777_gotjson()
ql777_gotpacket = Parser_jl777_gotpacket()
ql777_gotnewpeer = Parser_jl777_gotnewpeer()
ql777_BTCDpoll = Parser_jl777_BTCDpoll()
ql777_GUIpoll = Parser_jl777_GUIpoll()
ql777_settings = Parser_jl777_settings()
ql777_stop = Parser_jl777_stop()
ql777_settings = Parser_jl777_settings()
#// ramchains 13
ql777_ramstatus = Parser_jl777_ramstatus()
ql777_ramaddrlist = Parser_jl777_ramaddrlist()
ql777_ramstring = Parser_jl777_ramstring()
ql777_ramrawind = Parser_jl777_ramrawind()
ql777_ramblock = Parser_jl777_ramblock()
ql777_ramscript = Parser_jl777_ramscript()
ql777_ramtxlist = Parser_jl777_ramtxlist()
ql777_ramrichlist = Parser_jl777_ramrichlist()
ql777_ramcompress = Parser_jl777_ramcompress()
ql777_ramexpand = Parser_jl777_ramexpand()
ql777_rambalances = Parser_jl777_rambalances()
ql777_rampyramid = Parser_jl777_rampyramid()
ql777_ramresponse = Parser_jl777_ramresponse()
# // MGW 7
ql777_genmultisig = Parser_jl777_genmultisig()
ql777_getmsigpubkey = Parser_jl777_getmsigpubkey()
ql777_MGWaddr = Parser_jl777_MGWaddr()
ql777_setmsigpubkey = Parser_jl777_setmsigpubkey()
ql777_MGWresponse = Parser_jl777_MGWresponse()
ql777_cosign = Parser_jl777_cosign()
ql777_cosigned = Parser_jl777_cosigned()
# // IPcomms(MGW)
ql777_ping = Parser_jl777_ping()
ql777_pong = Parser_jl777_pong()
ql777_sendfrag = Parser_jl777_sendfrag()
ql777_gotfrag = Parser_jl777_gotfrag()
ql777_startxfer = Parser_jl777_startxfer()
ql777_getfile = Parser_jl777_getfile()
# // Kademlia DHT
ql777_store = Parser_jl777_store()
ql777_findvalue = Parser_jl777_findvalue()
ql777_findnode = Parser_jl777_findnode()
ql777_havenode = Parser_jl777_havenode()
ql777_havenodeB = Parser_jl777_havenodeB()
ql777_findaddress = Parser_jl777_findaddress()
ql777_nonces = Parser_jl777_nonces()
ql777_puzzles = Parser_jl777_puzzles()
# // MofNfs
ql777_savefile = Parser_jl777_savefile()
ql777_restorefile = Parser_jl777_restorefile()
ql777_sendfile = Parser_jl777_publish()
# // Telepathy
ql777_getpeers = Parser_jl777_getpeers()
ql777_addcontact = Parser_jl777_addcontact()
ql777_removecontact = Parser_jl777_removecontact()
ql777_dispcontact = Parser_jl777_dispcontact()
ql777_telepathy = Parser_jl777_telepathy()
ql777_getdb = Parser_jl777_getdb()
ql777_sendmessage = Parser_jl777_sendmessage()
ql777_sendbinary = Parser_jl777_sendbinary()
# // Teleport
ql777_maketelepods = Parser_jl777_maketelepods()
ql777_telepodacct = Parser_jl777_telepodacct()
ql777_teleport = Parser_jl777_teleport()
# // InstantDEX 18
ql777_trollbox = Parser_jl777_trollbox()
ql777_allorderbooks = Parser_jl777_allorderbooks()
ql777_openorders = Parser_jl777_openorders()
ql777_orderbook = Parser_jl777_orderbook()
ql777_placebid = Parser_jl777_placebid()
ql777_placeask = Parser_jl777_placeask()
ql777_makeoffer3 = Parser_jl777_makeoffer3()
ql777_respondtx = Parser_jl777_respondtx()
ql777_processutx = Parser_jl777_processutx()
ql777_bid = Parser_jl777_bid()
ql777_ask = Parser_jl777_ask()
ql777_allsignals = Parser_jl777_allsignals()
ql777_lottostats = Parser_jl777_lottostats()
ql777_tradehistory = Parser_jl777_tradehistory()
ql777_getsignal = Parser_jl777_getsignal()
ql777_cancelquote = Parser_jl777_cancelquote()
ql777_processjumptrade= Parser_jl777_processjumptrade()
ql777_jumptrades = Parser_jl777_jumptrades()
# // Tradebot
ql777_pricedb = Parser_jl777_pricedb()
ql777_getquotes = Parser_jl777_getquotes()
ql777_tradebot = Parser_jl777_tradebot()
# // # privatebet
ql777_lotto = Parser_jl777_lotto()
#// passthru
ql777_passthru = Parser_jl777_passthru()
ql777_remote = Parser_jl777_remote()
ql777_checkmsg = Parser_jl777_checkmsg()
# // Embedded Langs
ql777_python = Parser_jl777_python()
ql777_syscall = Parser_jl777_syscall()
def __init__(self, environ = {}):
self.environ = environ
def parse_777(self, data2parse, requestType2Parse):
""" here we should be flexible as to the data type we get and parse.
so we need some type checking and hand always the same data type to the actual parse functions."""#
log.msg("def parse_777()---------> ", data2parse, "requestType2Parse is: ", requestType2Parse)
try:
log.msg("def parse_777()---------> ", type(data2parse.content), data2parse.json(), data2parse.content)
except Exception as e:
log.msg("except def parse_777()---------> ", data2parse.content)
log.msg("except def parse_777()---------> ", type(data2parse.content))
log.msg("except def parse_777()---------> {0}".format(str(e)))
if isinstance(data2parse, Response):
data2parse = data2parse.json()
parsed_777= self.parseReturnedDict(data2parse, requestType2Parse)
log.msg("type(data2parse): ", type(data2parse))
return str(parsed_777).encode("utf-8")
elif isinstance(data2parse, dict):
parsed_777 = self.parseReturnedDict(data2parse, requestType2Parse)
return str(parsed_777).encode("utf-8")
elif isinstance(data2parse, bytes):
data = data2parse.decode()
bsload=data.split("\r\n\r\n")[1]
bsload1=bsload.replace('null','"null"')
try:
bsdi=eval(bsload1)
except:
return data.encode("utf-8")
try: # this would be the format that is returned by BTCD RPC on eg port 14632
result=bsdi['result']
data2parse=eval(result)
except:# this would be the format that is returned by JL777 http on port 7777
data2parse=bsdi
parsed_777=self.parseReturnedDict(data2parse, requestType2Parse)
return str(parsed_777).encode("utf-8")
def parseReturnedDict(self,data2parse, requestType2Parse):
#print("parseReturnedDict",type(data2parse),"\n\n\n")
# there is a generic class for parsing each query
if requestType2Parse == 'placeLay': #ToDO privateBet
# // # privatebet 1
parsed = self.ql777_placeLay.parse(data2parse)
# // glue 7 ql777_
elif requestType2Parse == 'gotjson':
parsed = self.ql777_gotjson.parse(data2parse)
elif requestType2Parse == 'gotpacket':
parsed = self.ql777_gotpacket.parse(data2parse)
elif requestType2Parse == 'gotnewpeer':
parsed = self.ql777_gotnewpeer.parse(data2parse)
elif requestType2Parse == 'BTCDpoll':
parsed = self.ql777_BTCDpoll.parse(data2parse)
elif requestType2Parse == 'GUIpoll':
parsed = self.ql777_GUIpoll.parse(data2parse)
elif requestType2Parse == 'stop':
parsed = self.ql777_stop.parse(data2parse)
elif requestType2Parse == 'settings':
parsed = self.ql777_settings.parse(data2parse)
# // ramchains 13
elif requestType2Parse == 'ramstatus':
parsed = self.ql777_ramstatus.parse(data2parse)
elif requestType2Parse == 'ramaddrlist':
parsed = self.ql777_ramaddrlist.parse(data2parse)
elif requestType2Parse == 'ramstring':
parsed = self.ql777_ramstring.parse(data2parse)
elif requestType2Parse == 'ramrawind':
parsed = self.ql777_ramrawind.parse(data2parse)
elif requestType2Parse == 'ramblock':
parsed = self.ql777_ramblock.parse(data2parse)
elif requestType2Parse == 'ramscript':
parsed = self.ql777_ramscript.parse(data2parse)
elif requestType2Parse == 'ramtxlist':
parsed = self.ql777_ramtxlist.parse(data2parse)
elif requestType2Parse == 'ramrichlist':
parsed = self.ql777_ramrichlist.parse(data2parse)
elif requestType2Parse == 'ramcompress':
parsed = self.ql777_ramcompress.parse(data2parse)
elif requestType2Parse == 'ramexpand':
parsed = self.ql777_ramexpand.parse(data2parse)
elif requestType2Parse == 'rambalances':
parsed = self.ql777_rambalances.parse(data2parse)
elif requestType2Parse == 'rampyramid':
parsed = self.ql777_rampyramid.parse(data2parse)
elif requestType2Parse == 'ramresponse':
parsed = self.ql777_ramresponse.parse(data2parse)
# // 7 MGW
elif requestType2Parse == 'genmultisig':
parsed = self.ql777_genmultisig.parse(data2parse)
elif requestType2Parse == 'getmsigpubkey':
parsed = self.ql777_getmsigpubkey.parse(data2parse)
elif requestType2Parse == 'MGWaddr':
parsed = self.ql777_MGWaddr.parse(data2parse)
elif requestType2Parse == 'MGWresonse':
parsed = self.ql777_MGWMGWresonse.parse(data2parse)
elif requestType2Parse == 'setmsigpubkey':
parsed = self.ql777_setmsigpubkey.parse(data2parse)
elif requestType2Parse == 'cosign':
parsed = self.ql777_cosign.parse(data2parse)
elif requestType2Parse == 'cosigned':
parsed = self.ql777_cosigned.parse(data2parse)
# // IPcomms 6
elif requestType2Parse == 'ping':
parsed = self.ql777_ping.parse(data2parse)
elif requestType2Parse == 'pong':
parsed = self.ql777_pong.parse(data2parse)
elif requestType2Parse == 'sendfrag':
parsed = self.ql777_sendfrag.parse(data2parse)
elif requestType2Parse == 'gotfrag':
parsed = self.ql777_gotfrag.parse(data2parse)
elif requestType2Parse == 'startxfer':
parsed = self.ql777_startxfer.parse(data2parse)
elif requestType2Parse == 'getfile':
parsed = self.ql777_getfile.parse(data2parse)
# // Kademlia DHT 8
elif requestType2Parse == 'store':
parsed = self.ql777_store.parse(data2parse)
elif requestType2Parse == 'findvalue':
parsed = self.ql777_findvalue.parse(data2parse)
elif requestType2Parse == 'findnode':
parsed = self.ql777_findnode.parse(data2parse)
elif requestType2Parse == 'havenode':
parsed = self.ql777_havenode.parse(data2parse)
elif requestType2Parse == 'havenodeB':
parsed = self.ql777_havenodeB.parse(data2parse)
elif requestType2Parse == 'findaddress':
parsed = self.ql777_findaddress.parse(data2parse)
elif requestType2Parse == 'puzzles':
parsed = self.ql777_puzzles.parse(data2parse)
elif requestType2Parse == 'nonces':
parsed = self.ql777_nonces.parse(data2parse)
# // MofNfs 3
elif requestType2Parse == 'savefile':
parsed = self.ql777_savefile.parse(data2parse)
elif requestType2Parse == 'restorefile':
parsed = self.ql777_restorefile.parse(data2parse)
elif requestType2Parse == 'publish':
parsed = self.ql777_publish.parse(data2parse)
# // Telepathy 9
elif requestType2Parse == 'getpeers':
parsed = self.ql777_getpeers.parse(data2parse)
elif requestType2Parse == 'addcontact':
parsed = self.ql777_addcontact.parse(data2parse)
elif requestType2Parse == 'removecontact':
parsed = self.ql777_removecontact.parse(data2parse)
elif requestType2Parse == 'dispcontact':
parsed = self.ql777_dispcontact.parse(data2parse)
elif requestType2Parse == 'telepathy':
parsed = self.ql777_telepathy.parse(data2parse)
elif requestType2Parse == 'getdb':
parsed = self.ql777_getdb.parse(data2parse)
elif requestType2Parse == 'sendmessage':
parsed = self.ql777_sendmessage.parse(data2parse)
elif requestType2Parse == 'sendbinary':
parsed = self.ql777_sendbinary.parse(data2parse)
elif requestType2Parse == 'checkmsg':
parsed = self.ql777_checkmsg.parse(data2parse)
# // Teleport 3
elif requestType2Parse == 'maketelepods':
parsed = self.ql777_maketelepods.parse(data2parse)
elif requestType2Parse == 'telepodacct':
parsed = self.ql777_telepodacct.parse(data2parse)
elif requestType2Parse == 'teleport':
parsed = self.ql777_teleport.parse(data2parse)
# // InstantDEX 18
elif requestType2Parse == 'trollbox':
parsed = self.ql777_trollbox.parse(data2parse)
elif requestType2Parse == 'allorderbooks':
parsed = self.ql777_allorderbooks.parse(data2parse)
elif requestType2Parse == 'openorders':
parsed = self.ql777_openorders.parse(data2parse)
elif requestType2Parse == 'orderbook':
parsed = self.ql777_orderbook.parse(data2parse)
elif requestType2Parse == 'placebid':
parsed = self.ql777_placebid.parse(data2parse)
elif requestType2Parse == 'placeask':
parsed = self.ql777_placeask.parse(data2parse)
elif requestType2Parse == 'makeoffer3':
parsed = self.ql777_makeoffer3.parse(data2parse)
elif requestType2Parse == 'respondtx':
parsed = self.ql777_respondtx.parse(data2parse)
elif requestType2Parse == 'processutx':
parsed = self.ql777_processutx.parse(data2parse)
elif requestType2Parse == 'bid':
parsed = self.ql777_bid.parse(data2parse)
elif requestType2Parse == 'ask':
parsed = self.ql777_ask.parse(data2parse)
elif requestType2Parse == 'allsignals':
parsed = self.ql777_allsignals.parse(data2parse)
elif requestType2Parse == 'lottostats':
parsed = self.ql777_lottostats.parse(data2parse)
elif requestType2Parse == 'tradehistory':
parsed = self.ql777_tradehistory.parse(data2parse)
elif requestType2Parse == 'getsignal':
parsed = self.ql777_getsignal.parse(data2parse)
elif requestType2Parse == 'cancelquote':
parsed = self.ql777_cancelquote.parse(data2parse)
elif requestType2Parse == 'processjumptrade':
parsed = self.ql777_processjumptrade.parse(data2parse)
elif requestType2Parse == 'jumptrades':
parsed = self.ql777_jumptrades.parse(data2parse)
# // Tradebot 3
elif requestType2Parse == 'pricedb':
parsed = self.ql777_pricedb.parse(data2parse)
elif requestType2Parse == 'getquotes':
parsed = self.ql777_getquotes.parse(data2parse)
elif requestType2Parse == 'tradebot':
parsed = self.ql777_tradebot.parse(data2parse)
# // privatebet
elif requestType2Parse == 'lotto':
parsed = self.ql777_lotto.parse(data2parse)
# // passthru 2
elif requestType2Parse == 'passthru':
parsed = self.ql777_passthru.parse(data2parse)
elif requestType2Parse == 'remote':
parsed = self.ql777_remote.parse(data2parse)
# // embedded langs
elif requestType2Parse == 'python':
parsed = self.ql777_python.parse(data2parse)
elif requestType2Parse == 'syscall':
parsed = self.ql777_syscall.parse(data2parse)
# //
else:
parsed = {'RAISE_ME_error':'RAISE_ME_error'}
return parsed
##########################
##########################
##########################
##########################
class Parser_XML_Base(object):
""" this wrapper class can provide generic functionality for the
individual API Parser classes""" #
pass
class Parser_XML_SoccerSchedule(Parser_XML_Base):
def parse(self, data2parse):
log.msg("XmlParser STARTPARSE!!", self)
daily_summary = etree.fromstring(data2parse) #parse(url)
daily_summaryIter = daily_summary.iter()
returnThis = ''#'<html>'
limi=0
for elem in daily_summaryIter:
returnThis += (str(elem.attrib) + "\r\n")
#print(elem.tag, " - " , str(elem.attrib)) # <--------------------
#limi+=1
#if limi > 20:
# break
#returnThis += "</html>"
returnThis = returnThis.encode("utf-8")
return returnThis
class Parser_XML_MatchBoxScore(Parser_XML_Base):
def parse(self, data2parse):
return data2parse
class Parser_XML_GetNewsFeed(Parser_XML_Base):
def parse(self, data2parse):
log.msg("XmlParser STARTPARSE!!", self)
daily_summary = etree.fromstring(data2parse) #parse(url)
daily_summaryIter = daily_summary.iter()
returnThis = ''#'<html>'
limi=0
for elem in daily_summaryIter:
returnThis += (str(elem.attrib) + "\r\n")
#print(elem.tag, " - " , str(elem.attrib)) # <--------------------
#limi+=1
#if limi > 20:
# break
#returnThis += "</html>"
returnThis = returnThis.encode("utf-8")
return returnThis
class Parser_XML_DailySummary(Parser_XML_Base):
def parse(self, data2parse):
log.msg(" Parser_LOC XmlParser STARTPARSE!!", self)
daily_summary = etree.fromstring(data2parse)
daily_summaryIter = daily_summary.iter()
returnThis = ''
limi=0
for elem in daily_summaryIter:
returnThis += (str(elem.attrib) + "\r\n")
#print(elem.tag, " - " , str(elem.attrib)) # <--------------------
#limi+=1
#if limi > 20:
# break
#returnThis += "</html>"
returnThis = returnThis.encode("utf-8")
return returnThis
# one data processor class
class Parser_XML(object):
"""- this parses the xml that is received from the remote data provider""" # customize info from fetched xml
parser_XML_MatchBoxScore = Parser_XML_MatchBoxScore()
parser_XML_GetNewsFeed = Parser_XML_GetNewsFeed()
parser_XML_DailySummary = Parser_XML_DailySummary()
def __init__(self, environ = {}):
self.environ = environ
def ack(self):
log.msg("XmlParser HERE!")
def parse_XML(self, data2parse, requestType2Parse ):
print(1*"\n++++++++++++",requestType2Parse, data2parse )
if requestType2Parse == 'getNewsFeed':
parsed = self.parser_XML_GetNewsFeed.parse(data2parse)
elif requestType2Parse == 'MatchBoxScore':
parsed = self.parser_XML_MatchBoxScore.parse(data2parse)
elif requestType2Parse == 'DailySummary':
parsed = self.parser_XML_DailySummary.parse(data2parse)
else:
parsed = 'RAISE ME error'
data = str(parsed).encode("utf-8")
return data
##########################
##########################
##########################
##########################
##########################
##########################
##
## each QUERY gets its own little class.
## this is important to keep modularity
##
##########################
### Here we mostly just MIRROR what is happening in the XML PARSERS!
### using these here is allowing for variations other than xml feed reading!
class Parser_Loc_Base(object):
""" this wrapper class can provide generic functionality for the
individual API Parser classes""" #
pass
class Parse_Loc_Season(Parser_Loc_Base):
pass # mabye not needed do locally in parser
def parse(self, data2parse):
return data2parse
class Parser_LOC(object):
"""- this parses the data that is retrieved from a local cache
This is the local Parser wrapper class. When we need to parse local XML, we can just use an xml Parser class
Or other parser classes for other file formats
This can access XML parsers as well as any other Parsers
"""#
qLOC_Season = Parse_Loc_Season()
parser_XML_DailySummary = Parser_XML_DailySummary()
parser_XML_SoccerSchedule = Parser_XML_SoccerSchedule()
def __init__(self, environ = {}):
self.environ = environ
def parse_File(selfdata2parse, requestType2Parse ):
pass
def parse_Message(selfdata2parse, requestType2Parse ):
pass
def parse_XML(self, data2parse, reqDict ):
#print(13*"\n\n\n***********", reqDict)
if reqDict['requestType'] == 'DailySummary':
parsed = self.parser_XML_DailySummary.parse(data2parse)
elif reqDict['requestType'] == 'soccer_schedule':
parsed = self.parser_XML_SoccerSchedule.parse(data2parse)
else:
parsed = 'RAISE ME error'
data = str(parsed).encode("utf-8")
return data
# log.msg(" Parser_LOC XmlParser STARTPARSE!!", self)
# THE LOCALS HAVE TO USE THE XML PARSERS TOO!!!!!!!! AT LEAST THE XML ONES, BECAUSE THEY LOAD A CACHED XML FILE
| mit | 2,204,642,186,154,606,000 | 25.168087 | 115 | 0.64124 | false | 3.421063 | false | false | false |
sangh/LaserShow | glyph.py | 1 | 6471 |
from header import *
import json
import math
# This used to be a class, but I want to use json w/o all the
# manual mangling needed, so it's just going to be a dictionary.
# It must have X and Y grid points and keys for
# the 'name' and a 'path' (list of (x,y) points or "on"/"off" cmds).
def pointIsValid( pt ):
try:
if "on" == pt or "off" == pt:
return True
if ( pt[0] < 0
or pt[0] >= XGridSize
or pt[1] < 0
or pt[1] >= YGridSize
):
return False
return True
except:
return False
def pathIsValid( path ):
try:
for pt in path:
if not pointIsValid( pt ):
return False
return True
except:
return False
def glyphIsValid( g ):
if not XGridSize == g['XGridSize']:
return False
if not YGridSize == g['YGridSize']:
return False
if 'name' not in g:
wrn("Glyph does not have a name.")
return False
if 'path' not in g:
wrn("Glyph \"%s\" does not have a path."%(str(g['name'])))
return False
if not pathIsValid( g['path'] ):
wrn("Path malformed in \"%s\"."%(str(g['name'])))
return False
return True
def glyphList():
"Return a list of glyphs saved already."
ls = sorted(os.listdir("glyphs"))
ret = []
for l in ls:
if ".json" != l[-5:]:
wrn("%s is not named correctly."%(l))
else:
ret.append( l[:-5] )
return ret
def glyphDump( g ):
if not glyphIsValid( g ):
raise NameError("Glyph is not valid, not storing.")
fileName = os.path.join("glyphs", str(g['name']) + ".json")
if( os.path.exists( fileName ) ):
raise NameError("It appears that this glyph exists, not storing.")
gs = g.copy()
gs.pop('name')
f = open( fileName, "w" )
json.dump(gs, f)
f.close()
def glyphLoad( name ):
fileName = os.path.join("glyphs", str(name) + ".json")
if( not os.path.exists( fileName ) ):
raise NameError("Glyph \"%s\" not found."%(str(name)))
f = open( fileName, "r" )
gu = json.load(f)
f.close()
# Now convert to ascii (from json's unicode).
# Will break if there are other things in here.
g = {}
for k in gu:
v = gu[k]
if isinstance( v, unicode ):
v = v.encode('ascii')
g[k.encode('ascii')] = v
p = []
for pt in g['path']:
if isinstance( pt, unicode ):
p.append( pt.encode('ascii') )
else:
p.append( pt )
g['path'] = p
g['name'] = str(name)
if glyphIsValid( g ):
return g
else:
raise NameError("Glyph \"%s\" is not valid."%(str(name)))
def glyphCreate( name, path ):
if not pathIsValid( path ):
raise SyntaxError("Path is invalid.")
newpath = []
for v in path:
if isinstance( v, list ):
newpath.append( tuple( v ) )
elif isinstance( v, unicode ):
newpath.append( v.encode('ascii') )
else:
newpath.append( v )
d = { 'name': str(name), 'path': newpath }
d['XGridSize'] = XGridSize
d['YGridSize'] = YGridSize
return d
def distanceEuclidean( lpt, pt ):
if lpt is None:
return 0.0
else:
y = float( pt[0] - lpt[0] )
x = float( pt[1] - lpt[1] )
return math.sqrt( ( x * x ) + ( y * y ) )
def interpolateEvenSpacedPtsOnALine( nPts, pt1, pt2 ):
"Return a list of nPts between pt1 and pt2 not inc. pt1 but inc. pt2."
"So pt2 is always the last pt in the list, and the list is nPts long."
expath = []
xOffset = float( pt2[0] - pt1[0] ) / nPts
yOffset = float( pt2[1] - pt1[1] ) / nPts
for i in range( 1, nPts ):
newX = int(( i * xOffset + pt1[0] ) // 1 )
newY = int(( i * yOffset + pt1[1] ) // 1 )
expath.append( ( newX, newY ) )
expath.append( pt2 )
return expath
def glyphExpandToPts( nPoints, glyph ):
"Return the glyph expanded to nPoints triplets."
# The general alg is to count the total path lenght, and then divede
# by the segment lengths. We want the glyph to be as sharp as possible
# so for now we only expand the lit parts.
lenTot = 0.0
lit = True
lpt = None
dummyPts = 0 # Pts that're off or duped.
# Calc total (lit) path lenght.
# We don't use the sqrt b/c it's computationally expensive and
# we don't cars about the number, only the ratios of the paths.
for pt in glyph['path']:
if "on" == pt:
lit = True
elif "off" == pt:
lit = False
else:
if( lit ):
d = distanceEuclidean( lpt, pt )
if 0.0 == d:
dummyPts = dummyPts + 1
lenTot = lenTot + d
else:
dummyPts = dummyPts + 1
lpt = pt
# Now we iterate again adding points to the lit parts.
expandToPts = nPoints - dummyPts
if len(filter(lambda p:not isinstance(p,str),glyph['path']))>=expandToPts:
raise SyntaxError("nPoints bigger than point-points in path?!?")
def ptToTriplet( lit, pt ):
if lit: blanked = 0
else: blanked = 1
return ( pt[0], pt[1], blanked )
expath = [] # This has the triplets.
lit = True
lpt = None
for pt in glyph['path']:
if "on" == pt:
lit = True
elif "off" == pt:
lit = False
else:
if( ( lpt is None ) or ( not lit ) ):
expath.append( ptToTriplet( lit, pt ) )
else:
dist = distanceEuclidean( lpt, pt )
nPtsToAdd = int(( expandToPts * dist / lenTot ) // 1 )
if( 0 < nPtsToAdd ):
interPts = interpolateEvenSpacedPtsOnALine( nPtsToAdd, lpt, pt )
expath = expath + map(lambda p: ptToTriplet( lit, p ), interPts )
else:
expath.append( ptToTriplet( lit, pt ) )
lpt = pt
# We add pts if the flooring interpalate did not add enough
# rather than spread them out we just repeat the last point.
le = len(expath)
if( le > nPoints ):
wrn("Truncated %d from glyph, the glyphExpandToPts fn is broken."%(le-nPoints))
return expath[0:nPoints]
elif( le < nPoints ):
return expath + (nPoints-le) * [expath[-1]]
else:
return expath
| bsd-3-clause | 2,095,060,305,932,436,500 | 30.876847 | 87 | 0.533302 | false | 3.440191 | false | false | false |
pnprog/goreviewpartner | gnugo_analysis.py | 1 | 13115 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from gtp import gtp
from Tkinter import *
from toolbox import *
from toolbox import _
def get_full_sequence_threaded(worker,current_color,deepness):
sequence=get_full_sequence(worker,current_color,deepness)
threading.current_thread().sequence=sequence
def get_full_sequence(worker,current_color,deepness):
try:
sequence=""
undos=0
for d in range(deepness):
if current_color.lower()=="b":
answer=worker.play_black()
current_color="w"
else:
answer=worker.play_white()
current_color="b"
sequence+=answer+" "
if answer=='RESIGN':
break
if answer=='PASS':
undos+=1
break
undos+=1
es=worker.get_gnugo_estimate_score()
for u in range(undos):
worker.undo()
return [sequence.strip(),es]
except Exception, e:
return e
class GnuGoAnalysis():
def run_analysis(self,current_move):
one_move=go_to_move(self.move_zero,current_move)
player_color=guess_color_to_play(self.move_zero,current_move)
gnugo=self.gnugo
log()
log("==============")
log("move",str(current_move))
final_score=gnugo.get_gnugo_estimate_score()
#linelog(final_score)
es=final_score.split()[0]
if es[0]=="B":
lbs="B%+d"%(-1*float(final_score.split()[3][:-1]))
ubs="B%+d"%(-1*float(final_score.split()[5][:-1]))
else:
ubs="W%+d"%(float(final_score.split()[3][:-1]))
lbs="W%+d"%(float(final_score.split()[5][:-1]))
node_set(one_move,"ES",es)
node_set(one_move,"UBS",ubs)
node_set(one_move,"LBS",lbs)
if player_color in ('w',"W"):
log("gnugo plays white")
top_moves=gnugo.gnugo_top_moves_white()
answer=gnugo.play_white()
else:
log("gnugo plays black")
top_moves=gnugo.gnugo_top_moves_black()
answer=gnugo.play_black()
log("====","Gnugo answer:",answer)
node_set(one_move,"CBM",answer)
log("==== Gnugo top moves")
for one_top_move in top_moves:
log("\t",one_top_move)
log()
top_moves=top_moves[:min(self.nb_variations,self.maxvariations)]
if (answer not in ["PASS","RESIGN"]):
gnugo.undo()
while len(top_moves)>0:
all_threads=[]
for worker in self.workers:
worker.need_undo=False
if len(top_moves)>0:
one_top_move=top_moves.pop(0)
if player_color in ('w',"W"):
worker.place_white(one_top_move)
one_thread=threading.Thread(target=get_full_sequence_threaded,args=(worker,'b',self.deepness))
else:
worker.place_black(one_top_move)
one_thread=threading.Thread(target=get_full_sequence_threaded,args=(worker,'w',self.deepness))
worker.need_undo=True
one_thread.one_top_move=one_top_move
one_thread.start()
all_threads.append(one_thread)
for one_thread in all_threads:
one_thread.join()
for worker in self.workers:
if worker.need_undo:
worker.undo()
for one_thread in all_threads:
if type(one_thread.sequence)!=type(["list"]):
raise GRPException(_("GnuGo thread failed:")+"\n"+str(one_thread.sequence))
one_sequence=one_thread.one_top_move+" "+one_thread.sequence[0]
es=one_thread.sequence[1]
one_sequence=one_sequence.strip()
log(">>>>>>",one_sequence)
previous_move=one_move.parent
current_color=player_color
first_move=True
for one_deep_move in one_sequence.split(' '):
if one_deep_move not in ['RESIGN','PASS']:
i,j=gtp2ij(one_deep_move)
new_child=previous_move.new_child()
node_set(new_child,current_color,(i,j))
if first_move:
first_move=False
node_set(new_child,"ES",es)
previous_move=new_child
if current_color in ('w','W'):
current_color='b'
else:
current_color='w'
else:
gnugo.undo()
#one_move.add_comment_text(additional_comments)
log("Creating the influence map")
black_influence=gnugo.get_gnugo_initial_influence_black()
black_territories_points=[]
black_influence_points=[]
white_influence=gnugo.get_gnugo_initial_influence_white()
white_territories_points=[]
white_influence_points=[]
for i in range(self.size):
for j in range(self.size):
if black_influence[i][j]==-3:
black_territories_points.append([i,j])
if white_influence[i][j]==3:
white_territories_points.append([i,j])
if black_influence[i][j]==-2:
black_influence_points.append([i,j])
if white_influence[i][j]==2:
white_influence_points.append([i,j])
if black_influence_points!=[]:
node_set(one_move,"IBM",black_influence_points) #IBM: influence black map
if black_territories_points!=[]:
node_set(one_move,"TBM",black_territories_points) #TBM: territories black map
if white_influence_points!=[]:
node_set(one_move,"IWM",white_influence_points) #IWM: influence white map
if white_territories_points!=[]:
node_set(one_move,"TWM",white_territories_points) #TWM: territories white map
return answer #returning the best move, necessary for live analysis
def play(self,gtp_color,gtp_move):#GnuGo needs to redifine this method to apply it to all its workers
if gtp_color=='w':
self.bot.place_white(gtp_move)
for worker in self.workers:
worker.place_white(gtp_move)
else:
self.bot.place_black(gtp_move)
for worker in self.workers:
worker.place_black(gtp_move)
def undo(self):
self.bot.undo()
for worker in self.workers:
worker.undo()
def terminate_bot(self):
log("killing gnugo")
self.gnugo.close()
log("killing gnugo workers")
for w in self.workers:
w.close()
def initialize_bot(self):
self.nb_variations=4
try:
self.nb_variations=int(self.profile["variations"])
except:
pass
#grp_config.set("GnuGo", "variations",self.nb_variations)"""
self.deepness=4
try:
self.deepness=int(self.profile["deepness"])
except:
pass
#grp_config.set("GnuGo", "deepness",self.deepness)"""
gnugo=gnugo_starting_procedure(self.g,self.profile)
self.nb_workers=self.nb_variations
log("Starting all GnuGo workers")
self.workers=[]
for w in range(self.nb_workers):
log("\t Starting worker",w+1)
gnugo_worker=gnugo_starting_procedure(self.g,self.profile)
self.workers.append(gnugo_worker)
log("All workers ready")
self.gnugo=gnugo
self.time_per_move=0
return gnugo
def gnugo_starting_procedure(sgf_g,profile,silentfail=False):
return bot_starting_procedure("GnuGo","GNU Go",GnuGo_gtp,sgf_g,profile,silentfail)
class RunAnalysis(GnuGoAnalysis,RunAnalysisBase):
def __init__(self,parent,filename,move_range,intervals,variation,komi,profile="slow",existing_variations="remove_everything"):
RunAnalysisBase.__init__(self,parent,filename,move_range,intervals,variation,komi,profile,existing_variations)
class LiveAnalysis(GnuGoAnalysis,LiveAnalysisBase):
def __init__(self,g,filename,profile="slow"):
LiveAnalysisBase.__init__(self,g,filename,profile)
class GnuGo_gtp(gtp):
def get_gnugo_initial_influence_black(self):
self.write("initial_influence black influence_regions")
one_line=self.readline()
one_line=one_line.split("= ")[1].strip().replace(" "," ")
lines=[one_line]
for i in range(self.size-1):
one_line=self.readline().strip().replace(" "," ")
lines.append(one_line)
influence=[]
for i in range(self.size):
influence=[[int(s) for s in lines[i].split(" ")]]+influence
return influence
def get_gnugo_initial_influence_white(self):
self.write("initial_influence white influence_regions")
one_line=self.readline()
one_line=one_line.split("= ")[1].strip().replace(" "," ")
lines=[one_line]
for i in range(self.size-1):
one_line=self.readline().strip().replace(" "," ")
lines.append(one_line)
influence=[]
for i in range(self.size):
influence=[[int(s) for s in lines[i].split(" ")]]+influence
return influence
def quick_evaluation(self,color):
return variation_data_formating["ES"]%self.get_gnugo_estimate_score()
def get_gnugo_estimate_score(self):
self.write("estimate_score")
answer=self.readline().strip()
try:
return answer[2:]
except:
raise GRPException("GRPException in get_gnugo_estimate_score()")
def gnugo_top_moves_black(self):
self.write("top_moves_black")
answer=self.readline()[:-1]
try:
answer=answer.split(" ")[1:-1]
except:
raise GRPException("GRPException in get_gnugo_top_moves_black()")
answers_list=[]
for value in answer:
try:
float(value)
except:
answers_list.append(value)
return answers_list
def gnugo_top_moves_white(self):
self.write("top_moves_white")
answer=self.readline()[:-1]
try:
answer=answer.split(" ")[1:-1]
except:
raise GRPException("GRPException in get_gnugo_top_moves_white()")
answers_list=[]
for value in answer:
try:
float(value)
except:
answers_list.append(value)
return answers_list
def get_gnugo_experimental_score(self,color):
self.write("experimental_score "+color)
answer=self.readline().strip()
return answer[2:]
class GnuGoSettings(BotProfiles):
def __init__(self,parent,bot="GnuGo"):
Frame.__init__(self,parent)
self.parent=parent
self.bot=bot
self.profiles=get_bot_profiles(bot,False)
profiles_frame=self
self.listbox = Listbox(profiles_frame)
self.listbox.grid(column=10,row=10,rowspan=10)
self.update_listbox()
row=10
Label(profiles_frame,text=_("Profile")).grid(row=row,column=11,sticky=W)
self.profile = StringVar()
Entry(profiles_frame, textvariable=self.profile, width=30).grid(row=row,column=12)
row+=1
Label(profiles_frame,text=_("Command")).grid(row=row,column=11,sticky=W)
self.command = StringVar()
Entry(profiles_frame, textvariable=self.command, width=30).grid(row=row,column=12)
row+=1
Label(profiles_frame,text=_("Parameters")).grid(row=row,column=11,sticky=W)
self.parameters = StringVar()
Entry(profiles_frame, textvariable=self.parameters, width=30).grid(row=row,column=12)
row+=1
Label(profiles_frame,text=_("Maximum number of variations")).grid(row=row,column=11,sticky=W)
self.variations = StringVar()
Entry(profiles_frame, textvariable=self.variations, width=30).grid(row=row,column=12)
row+=1
Label(profiles_frame,text=_("Deepness for each variation")).grid(row=row,column=11,sticky=W)
self.deepness = StringVar()
Entry(profiles_frame, textvariable=self.deepness, width=30).grid(row=row,column=12)
row+=10
buttons_frame=Frame(profiles_frame)
buttons_frame.grid(row=row,column=10,sticky=W,columnspan=3)
Button(buttons_frame, text=_("Add profile"),command=self.add_profile).grid(row=row,column=1,sticky=W)
Button(buttons_frame, text=_("Modify profile"),command=self.modify_profile).grid(row=row,column=2,sticky=W)
Button(buttons_frame, text=_("Delete profile"),command=self.delete_profile).grid(row=row,column=3,sticky=W)
Button(buttons_frame, text=_("Test"),command=lambda: self.parent.parent.test(self.bot_gtp,self.command,self.parameters)).grid(row=row,column=4,sticky=W)
self.listbox.bind("<Button-1>", lambda e: self.after(100,self.change_selection))
self.index=-1
self.bot_gtp=GnuGo_gtp
def clear_selection(self):
self.index=-1
self.profile.set("")
self.command.set("")
self.parameters.set("")
self.variations.set("")
self.deepness.set("")
def change_selection(self):
try:
index=int(self.listbox.curselection()[0])
self.index=index
except:
log("No selection")
self.clear_selection()
return
data=self.profiles[index]
self.profile.set(data["profile"])
self.command.set(data["command"])
self.parameters.set(data["parameters"])
self.variations.set(data["variations"])
self.deepness.set(data["deepness"])
def add_profile(self):
profiles=self.profiles
if self.profile.get()=="":
return
data={"bot":self.bot}
data["profile"]=self.profile.get()
data["command"]=self.command.get()
data["parameters"]=self.parameters.get()
data["variations"]=self.variations.get()
data["deepness"]=self.deepness.get()
self.empty_profiles()
profiles.append(data)
self.create_profiles()
self.clear_selection()
def modify_profile(self):
profiles=self.profiles
if self.profile.get()=="":
return
if self.index<0:
log("No selection")
return
index=self.index
profiles[index]["profile"]=self.profile.get()
profiles[index]["command"]=self.command.get()
profiles[index]["parameters"]=self.parameters.get()
profiles[index]["variations"]=self.variations.get()
profiles[index]["deepness"]=self.deepness.get()
self.empty_profiles()
self.create_profiles()
self.clear_selection()
class GnuGoOpenMove(BotOpenMove):
def __init__(self,sgf_g,profile):
BotOpenMove.__init__(self,sgf_g,profile)
self.name='Gnugo'
self.my_starting_procedure=gnugo_starting_procedure
GnuGo={}
GnuGo['name']="GnuGo"
GnuGo['gtp_name']="GNU Go"
GnuGo['analysis']=GnuGoAnalysis
GnuGo['openmove']=GnuGoOpenMove
GnuGo['settings']=GnuGoSettings
GnuGo['gtp']=GnuGo_gtp
GnuGo['liveanalysis']=LiveAnalysis
GnuGo['runanalysis']=RunAnalysis
GnuGo['starting']=gnugo_starting_procedure
if __name__ == "__main__":
main(GnuGo)
| gpl-3.0 | 9,138,587,210,094,939,000 | 28.015487 | 154 | 0.682425 | false | 2.833225 | false | false | false |
notion/a_sync | a_sync/examples.py | 1 | 3254 | """Example Use."""
# [ Imports ]
# [ -Python ]
import asyncio
import time
# [ -Project ]
import a_sync
def examples() -> None:
"""Run examples."""
def hello(name: str, seconds: int) -> str:
"""
Hello.
Prints 'hello <name>', waits for <seconds> seconds, and then
prints 'bye <name>' and returns the name.
Args:
name - the name to say hello to.
seconds - the seconds to wait to say bye.
Returns:
name - the given name.
"""
print('hello {}'.format(name))
time.sleep(seconds)
print('bye {}'.format(name))
return name
async def async_hello(name: str, seconds: int) -> str:
"""
Hello.
Prints 'hello <name>', waits for <seconds> seconds, and then
prints 'bye <name>' and returns the name.
Args:
name - the name to say hello to.
seconds - the seconds to wait to say bye.
Returns:
name - the given name.
"""
print('hello {}'.format(name))
await asyncio.sleep(seconds)
print('bye {}'.format(name))
return name
background_thread = a_sync.queue_background_thread(hello, 'background-joe', 20)
# expect background-joe immediately
parallel_1 = a_sync.Parallel()
parallel_1.schedule(hello, 'joe', 5)
parallel_1.schedule(hello, 'sam', 3)
parallel_1.schedule(async_hello, 'bob', 1)
# expect start in any order, stop in bob, sam, joe
parallel_2 = a_sync.Parallel()
parallel_2.schedule(async_hello, 'jill', 4)
parallel_2.schedule(async_hello, 'jane', 2)
parallel_2.schedule(hello, 'mary', 1)
# expect start in any order, stop in mary, jane, jill
serial_1 = a_sync.Serial()
serial_1.schedule(parallel_1.run)
serial_1.schedule(parallel_2.block)
# expect bob/sam/joe to end before mary/jane/jill start
parallel_3 = a_sync.Parallel()
parallel_3.schedule(async_hello, 'joseph', 5)
parallel_3.schedule(hello, 'joey', 3)
parallel_3.schedule(async_hello, 'jo', 1)
# expect start in any order, stop in jo, joey, joseph
parallel_4 = a_sync.Parallel()
parallel_4.schedule(hello, 'alex', 4)
parallel_4.schedule(async_hello, 'alexandria', 2)
parallel_4.schedule(hello, 'alexandra', 1)
# expect start in any order, stop in alexandra, alexandria, alex
serial_2 = a_sync.Serial()
serial_2.schedule(parallel_3.run)
serial_2.schedule(parallel_4.block)
# expect joe/joey/joseph to stop before alexandra/alexandria/alex start
final_parallel = a_sync.Parallel()
final_parallel.schedule(serial_1.block)
final_parallel.schedule(serial_2.run)
final_parallel.block()
background_thread.result()
# expect bob/sam/joe to start with jo/joey/joseph
# expect jill/jane/mary to start with alex/alexandria/alexandra
# total expected ordering:
# start joe/sam/bob/joseph/joey/jo
# stop bob/jo
# stop sam/joey
# stop joe/joseph
# start jill/jane/mary/alex/alexandria/alexandra
# stop mary/alexandra
# stop alexandria/jane
# stop alex/jill
# stop background-joe
# [ Examples ]
if __name__ == '__main__': # pragma: no branch
examples()
| apache-2.0 | 1,119,202,184,153,480,200 | 28.315315 | 83 | 0.618316 | false | 3.428872 | false | false | false |
naritotakizawa/ngo | ngo/admin.py | 1 | 1524 | #!/usr/bin/env python
"""ngo-admin で呼ばれる管理用コマンドモジュール."""
import os
import sys
from ngo.backends import NgoTemplate
def startproject(project_name):
"""ngoプロジェクトを作成する."""
import ngo
top_dir = os.getcwd()
origin_project_path = os.path.join(ngo.__path__[0], 'project_template')
# manaeg.pyの作成
manage_py_path = os.path.join(origin_project_path, 'manage')
with open(manage_py_path, 'r') as fp:
src = fp.read()
template = NgoTemplate(src)
src = template.render(
{'project_name': project_name}
)
new_file_path = os.path.join(top_dir, 'manage.py')
with open(new_file_path, 'w') as fp:
fp.write(src)
top_dir = os.path.join(top_dir, project_name)
# プロジェクトのディレクトリを作成する
os.makedirs(top_dir)
# settings.py, urls.py, wsgi.pyの作成
for file in ['settings', 'urls', 'wsgi']:
file_path = os.path.join(origin_project_path, file)
with open(file_path, 'r') as fp:
src = fp.read()
template = NgoTemplate(src)
src = template.render(
{'project_name': project_name}
)
new_file_path = os.path.join(top_dir, file+'.py')
with open(new_file_path, 'w') as fp:
fp.write(src)
def main():
"""main."""
function_name, args = sys.argv[1], sys.argv[2:]
function = globals()[function_name]
function(*args)
| mit | -69,341,242,456,434,130 | 27.583333 | 75 | 0.576056 | false | 2.828685 | false | false | false |
TheStackBox/xuansdk | SDKLibrary/com/cloudMedia/theKuroBox/sdk/util/oauth.py | 1 | 2010 | ##############################################################################################
# Copyright 2014-2015 Cloud Media Sdn. Bhd.
#
# This file is part of Xuan Application Development SDK.
#
# Xuan Application Development SDK is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Xuan Application Development SDK is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Xuan Application Development SDK. If not, see <http://www.gnu.org/licenses/>.
##############################################################################################
import time
import random
import hashlib
import hmac
import base64
from com.cloudMedia.theKuroBox.sdk.util.httpUtil import HttpUtil
from com.cloudMedia.theKuroBox.sdk.util.util import Util
class OAuth(object):
'''
This is an OAuth class for generate oauth signature.
'''
signatureMethod = "HMAC-SHA1"
version = "1.0"
def __init__(self):
pass
@staticmethod
def nonce():
'''
get oauth nonce.
'''
pass
@staticmethod
def sort(ls):
'''
sort the list according alphabetical order.
'''
pass
@staticmethod
def timestamp():
'''
get current timestamp.
'''
pass
@staticmethod
def create_signature(url, body="", _oauth_token="", _oauth_secret="", consumer_key="", consumer_secret="", http_method="POST"):
'''
return oauth signature, timestamp and nonce
'''
pass
| gpl-3.0 | 5,437,459,701,332,692,000 | 28.923077 | 131 | 0.581592 | false | 4.599542 | false | false | false |
emencia/emencia_paste_djangocms_2 | emencia_paste_djangocms_2/django_buildout/project/mods_available/socialaggregator/settings.py | 1 | 2719 | """
Settings for emencia-django-socialaggregator (EDSA)
"""
INSTALLED_APPS = add_to_tuple(INSTALLED_APPS,
'django_extensions',
'taggit',
'socialaggregator')
# Optional specific formatter
#RESSOURCE_FORMATTER = "project.mods_available.socialaggregator.formatter.RessourceFormatterCustom"
# Twitter access keys
EDSA_TWITTER_TOKEN = 'FILLME'
EDSA_TWITTER_SECRET = 'FILLME'
EDSA_TWITTER_CONSUMER_KEY = 'FILLME'
EDSA_TWITTER_CONSUMER_SECRET = 'FILLME'
# Instagram access keys
EDSA_INSTAGRAM_ACCESS_TOKEN = 'FILLME'
# Facebook access keys
EDSA_FB_APP_ID = 'FILLME'
EDSA_FB_APP_SECRET = 'FILLME'
# Google+ access keys
EDSA_GOOGLE_DEVELOPER_KEY = 'FILLME'
# Pagination for ressource list in views
EDSA_PAGINATION = 16
# Enabled plugins and their engine
EDSA_PLUGINS = {
"edsa_twitter": {
#"ENGINE": "socialaggregator.plugins.twitter_aggregator",
"ENGINE": "socialaggregator.plugins.twitter_noretweet_aggregator",
"NAME": "Twitter"
},
"edsa_instagram": {
"ENGINE": "socialaggregator.plugins.instagram_aggregator",
"NAME": "Instagram"
},
"edsa_facebook_fanpage": {
"ENGINE": "socialaggregator.plugins.facebook_fanpage_aggregator",
"NAME": "Facebook Fanpage"
},
"edsa_wordpress_rss": {
"ENGINE": "socialaggregator.plugins.wordpress_rss_aggregator",
"NAME": "Wordpress RSS"
},
"edsa_youtube_search": {
"ENGINE": "socialaggregator.plugins.youtube_search_aggregator",
"NAME": "Youtube search"
},
}
# Used templates
EDSA_VIEW_TEMPLATE = 'socialaggregator/ressource_list.html'
EDSA_TAG_TEMPLATE = 'socialaggregator/ressource_list_tag.html'
EDSA_PLUGIN_TEMPLATE = 'socialaggregator/cms_plugin_feed.html'
# Image size limit (in Ko, use 0 for no size limit)
EDSA_RESSOURCE_IMAGE_SIZE = 0
# Various ressource fields choices
EDSA_RESSOURCE_VIEW_SIZES = (
('default', gettext('default')),
('small', gettext('small')),
#('xsmall', gettext('Xsmall')),
('medium', gettext('medium')),
('large', gettext('large')),
#('xlarge', gettext('Xlarge')),
)
EDSA_RESSOURCE_TEXT_DISPLAY = (
('default', gettext('default')),
#('bottom', gettext('bottom')),
#('top', gettext('top')),
)
EDSA_RESSOURCE_BUTTON_COLOR = (
('white', gettext('white')),
#('black', gettext('black')),
#('primary', gettext('primary')),
#('secondary', gettext('secondary')),
#('tertiary', gettext('tertiary')),
)
EDSA_RESSOURCE_MEDIA_TYPE = (
('url', gettext('url')),
('image', gettext('image')),
('video', gettext('video')),
)
# Media content types to add to the ones from EDSA_PLUGINS
EDSA_RESSOURCE_BASE_MEDIA_TYPES = [
('edsa_article', 'Article Event'),
] | mit | -5,816,126,432,303,117,000 | 27.333333 | 99 | 0.662376 | false | 3.210153 | false | false | false |
Crystal-SDS/dashboard | crystal_dashboard/api/swift.py | 1 | 12025 | from __future__ import unicode_literals
from django.conf import settings
import six.moves.urllib.parse as urlparse
from horizon.utils.memoized import memoized # noqa
from openstack_dashboard.api.swift import swift_api
from openstack_dashboard.api.swift import Container
from openstack_dashboard.api.swift import GLOBAL_READ_ACL
from openstack_dashboard.api import base
from oslo_utils import timeutils
import requests
import json
@memoized
def get_token(request):
return request.user.token.id
# -----------------------------------------------------------------------------
#
# Swift - Regions
#
def swift_list_regions(request):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/swift/regions"
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
r = requests.get(url, headers=headers)
return r
def new_region(request, data):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/swift/regions"
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
r = requests.post(url, json.dumps(data), headers=headers)
return r
def delete_region(request, region_id):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/swift/regions/" + str(region_id)
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
r = requests.delete(url, headers=headers)
return r
def update_region(request, data):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/swift/regions/" + str(data['region_id'])
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
r = requests.put(url, json.dumps(data), headers=headers)
return r
def get_region(request, region_id):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/swift/regions/" + str(region_id)
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
r = requests.get(url, headers=headers)
return r
#
# Swift - Zones
#
def swift_list_zones(request):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/swift/zones"
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
r = requests.get(url, headers=headers)
return r
def new_zone(request, data):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/swift/zones"
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
r = requests.post(url, json.dumps(data), headers=headers)
return r
def delete_zone(request, zone_id):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/swift/zones/" + str(zone_id)
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
r = requests.delete(url, headers=headers)
return r
def update_zone(request, data):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/swift/zones/" + str(data['zone_id'])
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
r = requests.put(url, json.dumps(data), headers=headers)
return r
def get_zone(request, zone_id):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/swift/zones/" + str(zone_id)
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
r = requests.get(url, headers=headers)
return r
#
# Swift - Nodes
#
def swift_get_all_nodes(request):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/swift/nodes"
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
r = requests.get(url, headers=headers)
return r
def swift_get_node_detail(request, server_type, node_id):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/swift/nodes/" + str(server_type) + "/" + str(node_id)
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
r = requests.get(url, headers=headers)
return r
def swift_update_node(request, server_type, node_id, data):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/swift/nodes/" + str(server_type) + "/" + str(node_id)
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
r = requests.put(url, json.dumps(data), headers=headers)
return r
def swift_restart_node(request, server_type, node_id):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + '/swift/nodes/' + str(server_type) + "/" + str(node_id) + '/restart'
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
r = requests.put(url, headers=headers)
return r
#
# Swift - Storage Policies
#
def swift_new_storage_policy(request, data):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/swift/storage_policies"
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
r = requests.post(url, json.dumps(data), headers=headers)
return r
def swift_delete_storage_policy(request, storage_policy_id):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/swift/storage_policy/" + str(storage_policy_id)
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
r = requests.delete(url, headers=headers)
return r
# TODO
def load_swift_policies(request):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/swift/storage_policies/load"
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
r = requests.post(url, json.dumps({}), headers=headers)
return r
def deploy_storage_policy(request, storage_policy_id):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/swift/storage_policy/" + str(storage_policy_id) + "/deploy"
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
r = requests.post(url, json.dumps({}), headers=headers)
return r
def swift_list_storage_policies(request):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/swift/storage_policies"
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
r = requests.get(url, headers=headers)
return r
def swift_list_deployed_storage_policies(request):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/swift/storage_policies/deployed"
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
r = requests.get(url, headers=headers)
return r
def swift_available_disks_storage_policy(request, storage_policy_id):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/swift/storage_policy/" + str(storage_policy_id) + "/disk/"
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
r = requests.get(url, headers=headers)
return r
def swift_storage_policy_detail(request, storage_policy_id):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/swift/storage_policy/" + str(storage_policy_id)
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
r = requests.get(url, headers=headers)
return r
def swift_edit_storage_policy(request, storage_policy_id, data):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/swift/storage_policy/" + str(storage_policy_id)
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
r = requests.put(url, json.dumps(data), headers=headers)
return r
def swift_add_disk_storage_policy(request, storage_policy_id, disk_id):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/swift/storage_policy/" + str(storage_policy_id) + "/disk/"
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
r = requests.put(url, json.dumps(disk_id), headers=headers)
return r
def swift_remove_disk_storage_policy(request, storage_policy_id, disk_id):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/swift/storage_policy/" + str(storage_policy_id) + "/disk/" + disk_id
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
r = requests.delete(url, headers=headers)
return r
#
# Swift - Containers
#
def swift_get_container(request, container_name, with_data=True):
if with_data:
headers, data = swift_api(request).get_object(container_name, "")
else:
data = None
headers = swift_api(request).head_container(container_name)
timestamp = None
is_public = False
public_url = None
try:
is_public = GLOBAL_READ_ACL in headers.get('x-container-read', '')
if is_public:
swift_endpoint = base.url_for(request,
'object-store',
endpoint_type='publicURL')
parameters = urlparse.quote(container_name.encode('utf8'))
public_url = swift_endpoint + '/' + parameters
ts_float = float(headers.get('x-timestamp'))
timestamp = timeutils.iso8601_from_timestamp(ts_float)
metadata = ''
for header in headers:
if header.startswith('x-container-meta-'):
key_name = header.replace('x-container-meta-', '').replace('-', ' ').title()
value = headers[header]
metadata += key_name + '=' + value + ', '
metadata = metadata[0:-2]
except Exception:
pass
container_info = {
'name': container_name,
'container_object_count': headers.get('x-container-object-count'),
'container_bytes_used': headers.get('x-container-bytes-used'),
'timestamp': timestamp,
'data': data,
'is_public': is_public,
'public_url': public_url,
'storage_policy': headers.get('x-storage-policy'),
'metadata': metadata,
}
return Container(container_info)
def swift_get_project_containers(request, project_id):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/swift/" + str(project_id) + "/containers"
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
r = requests.get(url, headers=headers)
return r
def swift_create_container(request, project_id, container_name, container_headers):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/swift/" + project_id + "/" + str(container_name) + "/create"
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
r = requests.post(url, json.dumps(container_headers), headers=headers)
return r
def swift_update_container_policy(request, project_id, container_name, storage_policy_id):
token = get_token(request)
headers = {}
url = settings.IOSTACK_CONTROLLER_URL + "/swift/" + project_id + "/" + str(container_name) + "/policy"
headers["X-Auth-Token"] = str(token)
headers['Content-Type'] = "application/json"
r = requests.put(url, json.dumps(storage_policy_id), headers=headers)
return r | gpl-3.0 | -5,493,002,323,249,835,000 | 25.372807 | 114 | 0.641663 | false | 3.462424 | false | false | false |
mvo5/snapcraft | tests/unit/plugins/test_plainbox_provider.py | 1 | 8465 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2016-2018 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from textwrap import dedent
from unittest import mock
from testtools.matchers import Equals, HasLength
from snapcraft.internal import errors
from snapcraft.plugins import plainbox_provider
from snapcraft.project import Project
from tests import fixture_setup, unit
class PlainboxProviderPluginPropertiesTest(unit.TestCase):
def test_schema(self):
"""Test validity of the Scons Plugin schema"""
schema = plainbox_provider.PlainboxProviderPlugin.schema()
# Verify the presence of all properties
properties = schema["properties"]
self.assertThat(properties, Equals({}))
def test_get_pull_properties(self):
expected_pull_properties = []
resulting_pull_properties = (
plainbox_provider.PlainboxProviderPlugin.get_pull_properties()
)
self.assertThat(
resulting_pull_properties, HasLength(len(expected_pull_properties))
)
for property in expected_pull_properties:
self.assertIn(property, resulting_pull_properties)
def test_get_build_properties(self):
expected_build_properties = []
resulting_build_properties = (
plainbox_provider.PlainboxProviderPlugin.get_build_properties()
)
self.assertThat(
resulting_build_properties, HasLength(len(expected_build_properties))
)
for property in expected_build_properties:
self.assertIn(property, resulting_build_properties)
class PlainboxProviderPluginTest(unit.TestCase):
def setUp(self):
super().setUp()
snapcraft_yaml_path = self.make_snapcraft_yaml(
dedent(
"""\
name: plainbox-snap
base: core18
"""
)
)
self.project = Project(snapcraft_yaml_file_path=snapcraft_yaml_path)
class Options:
source = "."
self.options = Options()
patcher = mock.patch.object(plainbox_provider.PlainboxProviderPlugin, "run")
self.mock_run = patcher.start()
self.addCleanup(patcher.stop)
def test_build(self):
plugin = plainbox_provider.PlainboxProviderPlugin(
"test-part", self.options, self.project
)
os.makedirs(plugin.sourcedir)
# Place a few files with bad shebangs, and some files that shouldn't be
# changed.
files = [
{
"path": os.path.join(plugin.installdir, "baz"),
"contents": "#!/foo/bar/baz/python3",
"expected": "#!/usr/bin/env python3",
},
{
"path": os.path.join(plugin.installdir, "bin", "foobar"),
"contents": "#!/foo/baz/python3.5",
"expected": "#!/usr/bin/env python3.5",
},
{
"path": os.path.join(plugin.installdir, "foo"),
"contents": "foo",
"expected": "foo",
},
{
"path": os.path.join(plugin.installdir, "bar"),
"contents": "bar\n#!/usr/bin/python3",
"expected": "bar\n#!/usr/bin/python3",
},
]
for file_info in files:
os.makedirs(os.path.dirname(file_info["path"]), exist_ok=True)
with open(file_info["path"], "w") as f:
f.write(file_info["contents"])
plugin.build()
env = os.environ.copy()
env["PROVIDERPATH"] = ""
calls = [
mock.call(["python3", "manage.py", "validate"], env=env),
mock.call(["python3", "manage.py", "build"]),
mock.call(["python3", "manage.py", "i18n"]),
mock.call(
[
"python3",
"manage.py",
"install",
"--layout=relocatable",
"--prefix=/providers/test-part",
"--root={}".format(plugin.installdir),
]
),
]
self.mock_run.assert_has_calls(calls)
for file_info in files:
with open(os.path.join(plugin.installdir, file_info["path"]), "r") as f:
self.assertThat(f.read(), Equals(file_info["expected"]))
def test_build_with_provider_stage_dir(self):
self.useFixture(fixture_setup.CleanEnvironment())
plugin = plainbox_provider.PlainboxProviderPlugin(
"test-part", self.options, self.project
)
os.makedirs(plugin.sourcedir)
provider_path = os.path.join(
self.project.stage_dir, "providers", "test-provider"
)
os.makedirs(provider_path)
# Place a few files with bad shebangs, and some files that shouldn't be
# changed.
files = [
{
"path": os.path.join(plugin.installdir, "baz"),
"contents": "#!/foo/bar/baz/python3",
"expected": "#!/usr/bin/env python3",
},
{
"path": os.path.join(plugin.installdir, "bin", "foobar"),
"contents": "#!/foo/baz/python3.5",
"expected": "#!/usr/bin/env python3.5",
},
{
"path": os.path.join(plugin.installdir, "foo"),
"contents": "foo",
"expected": "foo",
},
{
"path": os.path.join(plugin.installdir, "bar"),
"contents": "bar\n#!/usr/bin/python3",
"expected": "bar\n#!/usr/bin/python3",
},
]
for file_info in files:
os.makedirs(os.path.dirname(file_info["path"]), exist_ok=True)
with open(file_info["path"], "w") as f:
f.write(file_info["contents"])
plugin.build()
calls = [
mock.call(
["python3", "manage.py", "validate"],
env={"PROVIDERPATH": provider_path},
),
mock.call(["python3", "manage.py", "build"]),
mock.call(["python3", "manage.py", "i18n"]),
mock.call(
[
"python3",
"manage.py",
"install",
"--layout=relocatable",
"--prefix=/providers/test-part",
"--root={}".format(plugin.installdir),
]
),
]
self.mock_run.assert_has_calls(calls)
for file_info in files:
with open(os.path.join(plugin.installdir, file_info["path"]), "r") as f:
self.assertThat(f.read(), Equals(file_info["expected"]))
def test_fileset_ignores(self):
plugin = plainbox_provider.PlainboxProviderPlugin(
"test-part", self.options, self.project
)
expected_fileset = [
"-usr/lib/python*/sitecustomize.py",
"-etc/python*/sitecustomize.py",
]
fileset = plugin.snap_fileset()
self.assertListEqual(expected_fileset, fileset)
class PlainboxProviderPluginUnsupportedBaseTest(unit.TestCase):
def setUp(self):
super().setUp()
snapcraft_yaml_path = self.make_snapcraft_yaml(
dedent(
"""\
name: plainbox-snap
base: unsupported-base
"""
)
)
self.project = Project(snapcraft_yaml_file_path=snapcraft_yaml_path)
class Options:
source = "dir"
self.options = Options()
def test_unsupported_base_raises(self):
self.assertRaises(
errors.PluginBaseError,
plainbox_provider.PlainboxProviderPlugin,
"test-part",
self.options,
self.project,
)
| gpl-3.0 | 3,288,589,442,993,010,000 | 31.557692 | 84 | 0.54117 | false | 4.182312 | true | false | false |
cfc603/django-twilio-sms-models | django_twilio_sms/management/commands/sync_responses.py | 1 | 1166 | from django.conf import settings
from django.core.management.base import BaseCommand
from django.utils.six import iteritems
from django_twilio_sms.models import Action, Response
class Command(BaseCommand):
help = "Sync responses from settings.DJANGO_TWILIO_SMS_RESPONSES"
def handle(self, *args, **options):
if hasattr(settings, 'DJANGO_TWILIO_SMS_RESPONSES'):
for action in Action.objects.all():
action.delete()
for action, response in iteritems(
settings.DJANGO_TWILIO_SMS_RESPONSES):
action = Action.objects.create(name=action)
response = Response.objects.create(
body=response, action=action
)
self.stdout.write('CREATED: {}-{}'.format(
action.name, response.body
))
else:
self.stdout.write('No responses found in settings.')
if Action.objects.all().count() > 0:
for action in Action.objects.all():
action.delete()
self.stdout.write('All saved responses have been deleted.')
| bsd-3-clause | 2,446,544,624,477,179,000 | 35.4375 | 75 | 0.586621 | false | 4.554688 | false | false | false |
bsipocz/ginga | ginga/qtw/plugins/Contents.py | 1 | 5110 | #
# Contents.py -- Table of Contents plugin for fits viewer
#
# Eric Jeschke ([email protected])
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
from ginga import GingaPlugin
from ginga.misc import Bunch
from ginga.qtw.QtHelp import QtGui, QtCore
import time
class Contents(GingaPlugin.GlobalPlugin):
def __init__(self, fv):
# superclass defines some variables for us, like logger
super(Contents, self).__init__(fv)
# For table-of-contents pane
self.nameDict = {}
self.columns = [('Name', 'NAME'),
('Object', 'OBJECT'),
('Date', 'DATE-OBS'),
('Time UT', 'UT')]
fv.set_callback('add-image', self.add_image)
fv.set_callback('delete-channel', self.delete_channel)
def build_gui(self, container):
# create the Treeview
treeview = QtGui.QTreeWidget()
treeview.setColumnCount(len(self.columns))
treeview.setSortingEnabled(True)
treeview.setAlternatingRowColors(True)
#treeview.itemClicked.connect(self.switch_image2)
#treeview.itemDoubleClicked.connect(self.switch_image2)
treeview.itemSelectionChanged.connect(self.switch_image3)
self.treeview = treeview
# create the column headers
col = 0
l = []
for hdr, kwd in self.columns:
l.append(hdr)
treeview.setHeaderLabels(l)
#self.treeview.connect('cursor-changed', self.switch_image2)
cw = container.get_widget()
cw.addWidget(treeview, stretch=1)
def switch_image(self, chname, imname):
fileDict = self.nameDict[chname]
key = imname.lower()
bnch = fileDict[key]
path = bnch.path
self.logger.debug("chname=%s name=%s path=%s" % (
chname, imname, path))
self.fv.switch_name(chname, bnch.NAME, path=path)
def switch_image2(self, item, column):
imname = str(item.text(0))
parent = item.parent()
if parent:
chname = str(parent.text(0))
#print "parent is %s" % chname
self.switch_image(chname, imname)
def switch_image3(self):
items = list(self.treeview.selectedItems())
self.switch_image2(items[0], 0)
def get_info(self, chname, name, image):
path = image.get('path', None)
bnch = Bunch.Bunch(NAME=name, CHNAME=chname, path=path)
# Get header keywords of interest
header = image.get_header()
for x, key in self.columns[1:]:
bnch[key] = header.get(key, 'N/A')
return bnch
def recreate_toc(self):
self.logger.debug("Recreating table of contents...")
toclist = list(self.nameDict.keys())
toclist.sort()
self.treeview.clear()
for key in toclist:
chitem = QtGui.QTreeWidgetItem(self.treeview, [chname])
chitem.setFirstColumnSpanned(True)
self.treeview.addTopLevelItem(chitem)
fileDict = self.nameDict[key]
filelist = list(fileDict.keys())
filelist.remove('_chitem')
fileDict['_chitem'] = chitem
filelist.sort(key=str.lower)
for fname in filelist:
bnch = fileDict[fname]
l = []
for hdr, kwd in self.columns:
l.append(bnch[kwd])
item = QtGui.QTreeWidgetItem(chitem, l)
chitem.addChild(item)
def add_image(self, viewer, chname, image):
noname = 'Noname' + str(time.time())
name = image.get('name', noname)
path = image.get('path', None)
if chname not in self.nameDict:
# channel does not exist yet in contents--add it
chitem = QtGui.QTreeWidgetItem(self.treeview, [chname])
chitem.setFirstColumnSpanned(True)
self.treeview.addTopLevelItem(chitem)
fileDict = { '_chitem': chitem }
self.nameDict[chname] = fileDict
else:
fileDict = self.nameDict[chname]
chitem = fileDict['_chitem']
key = name.lower()
if key in fileDict:
return
bnch = self.get_info(chname, name, image)
fileDict[key] = bnch
l = []
for hdr, kwd in self.columns:
l.append(bnch[kwd])
item = QtGui.QTreeWidgetItem(chitem, l)
chitem.addChild(item)
self.treeview.scrollToItem(item)
self.logger.debug("%s added to Contents" % (name))
def clear(self):
self.nameDict = {}
self.recreate_toc()
def delete_channel(self, viewer, chinfo):
"""Called when a channel is deleted from the main interface.
Parameter is chinfo (a bunch)."""
chname = chinfo.name
del self.nameDict[chname]
self.recreate_toc()
def __str__(self):
return 'contents'
#END
| bsd-3-clause | -7,208,640,380,648,436,000 | 30.349693 | 68 | 0.57319 | false | 3.732652 | false | false | false |
ieatnerds/rath_bot | rath_bot.py | 1 | 1285 | """
Author: [email protected]
Rath Twitter Bot
This is a simple bot for twitter that is being used for several functions.
The main function of the bot is to provide an easy learning experience with
the Twitter API and pulling information from other sources to post on twitter.
"""
# Imports
import check_weather
import logging
import sys
from make_dir import makeDir
from twython import Twython
from auth import (consumer_key,
consumer_secret,
access_token,
access_token_secret,
)
twitter = Twython(consumer_key,
consumer_secret,
access_token,
access_token_secret
)
# Checking if there's an argument for where to log.
if len(sys.argv) == 1:
path = 'logs/'
else:
path = str(sys.argv[1])
makeDir(path)
logging.basicConfig(filename=(path+'record.log'), level=logging.INFO,
format='%(asctime)s %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p')
message = check_weather.grabTemp()
try:
twitter.update_status(status=message)
logging.info('I Tweeted!\n')
except Exception as err:
# This is mostly to just catch 405 forbidden's on duplicates.
logging.info(str(err)+'\n')
exit()
| gpl-3.0 | -710,473,560,557,177,200 | 25.770833 | 78 | 0.628794 | false | 3.893939 | false | false | false |
yaoguai/sanzang-lib | sanzang.py | 1 | 8885 | # Copyright (c) 2014 the Sanzang Lib authors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Module for table-based translation from the CJK languages.
This module implements core functionality of the Sanzang system, including
the most common functions from Sanzang Utils. Using this module, you can
reformat CJK text, load translation tables, perform table-based string
substitution, and make table-based string translations.
"""
import re
def reflow(text):
"""
Reformat CJK text according to its punctuation.
Given a string, this function will reformat ("reflow") the text so that
words and terms are not broken apart between lines. The function first
strips out any leading margins as used by CBETA texts. It then
collapses all line breaks, and reformats the text according to
horizontal spacing and punctuation.
The string to be formatted should not include any incomplete CBETA
margins, as this formatting function can only reliably remove whole
margins that follow the standard format.
Margin format: X01n0020_p0404a01(00)║
"""
# Remove CBETA margins.
text = re.sub(r'^[T|X].*?║', '', text, flags=re.M)
# Separate poetry from prose. If the line is short and starts with a space,
# then add another space at the end to separate it from the following text.
#
text = re.sub(r'^ (.{1,15})$', ' \\1 ', text, flags=re.M)
# Collapse newlines.
text = text.replace('\n', '')
# Ender followed by non-ender: newline in between.
text = re.sub(
r'([:,;。?!」』.;:\?])([^:,;。?!」』.;:\?])',
'\\1\n\\2', text, flags=re.M)
# Non-starter, non-ender, followed by a starter: newline in between.
text = re.sub(
r'([^「『 \t:,;。?!」』.;:\?\n])([「『 \t])',
'\\1\n\\2', text, flags=re.M)
# Adjust newlines and return.
if len(text) > 0 and text[-1] != '\n':
text += '\n'
return text
def reflow_file(fd_in, fd_out):
"""
Reformat CJK text from one file object to another (buffered).
Given input and output file objects, reformat CJK text from one to the
other according to the punctuation and horizontal spacing. I/O is
buffered for higher performance.
"""
enders = ':,;。?!」』.;:?'
buffer_size = 1000
str_buf = ''
line_n = 0
for line in fd_in:
line_n += 1
str_buf = str_buf + line
if line_n % buffer_size == 0:
i = len(str_buf) - 1
while i > 0:
if str_buf[i-1] in enders and str_buf[i] not in enders:
fd_out.write(reflow(str_buf[:i]))
str_buf = str_buf[i:]
i = -1
else:
i = i - 1
if len(str_buf) > 0:
fd_out.write(reflow(str_buf))
def read_table(table_fd):
"""
Read a translation table from a file.
Given an open file object, read a well-formatted translation table and
return its contents to the caller. The translation table is assumed to
be internally consistent and sorted properly according to standards for
translation tables. Blank lines are ignored. If an individual record is
formatted incorrectly, then a RuntimeError exception will be raised.
"""
tab = []
width = -1
for line in table_fd.read().split('\n'):
rec = [f.strip() for f in line.split('|')]
if width != -1 and width == len(rec):
tab.append(rec)
elif width == -1 and len(rec) > 1:
width = len(rec)
tab.append(rec)
elif line.strip() != '':
raise RuntimeError('Table error: ' + line.strip())
return tab
def subst(table, text):
"""
Make 1-to-1 string substitutions using a two-column table.
Given a translation table and a text, perform 1-to-1 string
substitution on the text, replacing terms in the first column of the
table with the corresponding terms in the second column. Substitution
is performed on three forms for each term: the original term, all
lowercase, and all uppercase.
"""
for term1, term2 in table:
text = text.replace(term1, term2)
text = text.replace(term1.lower(), term2.lower())
text = text.replace(term1.upper(), term2.upper())
return text
def subst_file(table, fd_in, fd_out):
"""
Make string substitutions from file to file (buffered).
Given the contents of a two-column translation table, along with input
and output file objects, make one-to-one string substitutions using
buffered I/O.
"""
buffer_size = 1000
str_buf = ''
line_no = 1
for line in fd_in:
str_buf += line
if line_no % buffer_size == 0:
fd_out.write(subst(table, str_buf))
str_buf = ''
line_no += 1
fd_out.write(subst(table, str_buf))
def vocab(table, text):
"""
Return a new table containing only the vocabulary in the source text.
Create a new translation table containing only the rules that are
relevant for the given text. This is created by checking all source
terms against a copy of the text.
"""
text_rules = []
text_copy = str(text)
for rec in table:
if rec[0] in text_copy:
text_copy = text_copy.replace(rec[0], '\x1f')
text_rules.append(rec)
return text_rules
def tr_raw(table, text):
"""
Translate text using a table, return raw texts in a list.
Perform translation of a text by applying the rules in a translation
table. The result is a list of strings representing each column in the
translation table. For example, the first element in the list will be
the original source text, the second element will be the first target
language, etc.
"""
rules = vocab(table, text)
text = text.replace('\x1f', '')
collection = [text]
for col_no in range(1, len(table[0])):
trans = text
for rec in rules:
trans = trans.replace(rec[0], '\x1f' + rec[col_no] + '\x1f')
trans = trans.replace('\x1f\n', '\n')
trans = trans.replace('\x1f\x1f', ' ')
trans = trans.replace('\x1f', ' ')
collection.append(trans)
return collection
def tr_fmt(table, text, start=1):
"""
Translate text using a table, return a formatted listing string.
Perform translation of a text by applying rules in a translation table,
and return a formatted string. The formatted string represents the
source text and its translations collated together and organized by
line number and by translation table column number.
"""
collection = tr_raw(table, text)
for i in range(0, len(collection)):
collection[i] = collection[i].rstrip().split('\n')
listing = ''
for line_no in range(0, len(collection[0])):
for col_idx in range(0, len(table[0])):
listing += '%d.%d|%s\n' % (
start + line_no,
col_idx + 1,
collection[col_idx][line_no])
listing += '\n'
return listing
def tr_file(table, fd_in, fd_out):
"""
Translate from one file to another (buffered).
Given a table, an input file object, and an output file object, apply
the translation table rules to the input text and write the translation
as a formatted string to the output. This function uses buffered
translation for higher performance.
"""
buffer_size = 100
str_buf = ''
line_no = 1
for line in fd_in:
str_buf += line
if line_no % buffer_size == 0:
fd_out.write(tr_fmt(table, str_buf, line_no - buffer_size + 1))
str_buf = ''
line_no += 1
position = line_no - str_buf.count('\n')
fd_out.write(tr_fmt(table, str_buf, position))
| mit | 6,251,118,874,616,566,000 | 32.842308 | 79 | 0.634049 | false | 3.792672 | false | false | false |
IS-ENES-Data/submission_forms | dkrz_forms/config/project_config.py | 1 | 6967 | # -*- coding: utf-8 -*-
"""
Project dictionaries
======================
The dictionaries defining the keywords for the individual projects are defined here.
They get accessible to the FormFabric code by adding them to the PROJECT_DICT dictionary.
Thus e.g. PROJECT_DICT['CMIP6'] defines to the overall keyword dictionary used for CMIP6 data
e.g. PROJECT_DICT['CMIP6_FORM'] defines the keyword (sub-)dictionary with the information from the data providers
(by filling the jupyter notebook based FORMs)
To define the data management steps used in the individual projects, the 'workflow' keyword is used.
Thus e.g. PROJECT_DICT['CMIP6']['workflow'] provides the list (of lists) defining the workflow steps.
The workflow steps are defined in .ref workflow_steps.py
@author: stephan
.. automodule:: dkrz_forms.config.settings
.. automodule:: dkrz_forms.config.workflow_steps
"""
#================================================================================================
# This first section should stay as it is .. make project specific extensions in the second part
#
# name spaces for w3c prov transformation of submission provenance information
import base64
from string import Template
rt_pwd = base64.b64decode("Y2Y3RHI2dlM=")
# End of first part
#================================================================================================
#================================================================================================
# Second section: definition of project dictionaries
#
# Generic selection strings:
SUBMISSION_TYPE = "initial_submission, update_submission, submission_retraction, other"
STORAGE_TIMEFRAME = "6_months,12_months, 2_years, 3_years"
LTA = "long_term_archival, long_term_archival_and_and_data citation, no_long_term_archival"
YES_OR_NO = "yes,no"
PROJECT_DICT = {}
PROJECTS = ['CORDEX','CMIP6','test','ESGF_replication','DKRZ_CDP']
generic_wflow_description = Template("""
Form object for project $project
Workflow step related sub-forms (filled by data managers):
- sub: data submission form
- rev: data review_form
- ing: data ingest form
- qua: data quality assurance form
- pub: data publication form
Each workfow step form is structured according to
- entity_in : input information for this step
- entity_out: output information for this step
- agent: information related to responsible party for this step
- activity: information related the workflow step execution
End user provided form information is stored in
_this_form_object.sub.entity_out.form
The following generic attributes are defined:
- project: project this form is related to
- workflow: the workflow steps which are defined for this project
- status: overall workflow status
(keyword-structure = "workflow_step"_start, "workflow_step"_end
e.g. sub_start, sub_end
""")
for project in PROJECTS:
# submitted information
PROJECT_DICT[project] = {
'__doc__': generic_wflow_description.substitute(project=project),
"project":project,
"workflow": [("sub","data_submission"),
("rev","data_submission_review"),
("ing","data_ingest"),
("qua","data_quality_assurance"),
("pub","data_publication"),
# ("da", "data_archival")
],
"status": "0:open,1:data_submission,2:data_submission_review,3:data_ingest,4:data_quality_assurance,5:data_publication,6:data_archival"
}
PROJECT_DICT['CORDEX_FORM'] = {
"__doc__":"""
CORDEX information collected as part of form completion process
see CORDEX template
.. details on entries .. to be completed
""",
"project":"CORDEX",
"submission_type" : SUBMISSION_TYPE,
"institution" : "CV_CORDEX,institution",
"institute_id" : "CV_CORDEX,institute_id",
"model_id" : "CV_CORDEX,model_id",
"experiment_id" : "CV_CORDEX, experiment_id",
"time_period" : "",
"example_file_name" : "",
"grid_mapping_name" : "",
"grid_as_specified_if_rotated_pole" : "",
"data_qc_status" : "",
"data_qc_comment" : "",
"terms_of_use" : "",
"directory_structure" : "",
"data_path" : "",
"data_information" : "",
"exclude_variables_list" : "",
"variable_list_day" : "",
"variable_list_mon" : "",
"variable_list_sem" : "",
"variable_list_fx" : "",
"uniqueness_of_tracking_id" : YES_OR_NO}
PROJECT_DICT['DKRZ_CDP_FORM'] = {
"__doc__":"""
DKRZ CMIP Data pool ingest request related informtion .. to be completed
""",
"project":"DKRZ_CDP",
"comment": "",
"submission_type" : SUBMISSION_TYPE,
"storage_timeframe": STORAGE_TIMEFRAME,
"lta": LTA }
PROJECT_DICT['CMIP6_FORM'] = {
"__doc__":"""
DKRZ CMIP6 data ingest and publication request information .. to be completed
""",
"project":"CMIP6",
"comment": "",
"institute_id" : "CV_CMIP6,institute_id",
"model_id" : "CV_CMIP6,model_id",
"experiment_id" : "CV_CMIP6, experiment_id",
"data_qa_status" : "PREPARE_checked, DKRZ_QA_checked,other",
"data_qa_comment" : "",
"terms_of_use" : YES_OR_NO,
}
PROJECT_DICT['test_FORM'] = {
"__doc__":"""
test request related informtion .. to be completed
""",
"project":"test",
"comment": "",
"submission_type" : SUBMISSION_TYPE
}
PROJECT_DICT['ESGF_replication_FORM'] = {
"__doc__":"""
ESGF replication request related informtion .. to be completed
""",
"project":"ESGF_replication",
"comment": "optional",
"submission_type" : SUBMISSION_TYPE,
"scientific_context": "mandatory",
"update_notification":YES_OR_NO,
"collection_pid":YES_OR_NO
}
#
# End of section two
#================================================================================
| apache-2.0 | 6,383,556,357,326,457,000 | 36.86413 | 157 | 0.51299 | false | 4.34081 | false | false | false |
erinspace/scrapi | scrapi/processing/base.py | 3 | 3577 | import six
import json
from abc import abstractproperty, abstractmethod
from requests.structures import CaseInsensitiveDict
class BaseProcessor(object):
NAME = None
def process_raw(self, raw_doc, **kwargs):
pass # pragma: no cover
def process_normalized(self, raw_doc, normalized, **kwargs):
pass # pragma: no cover
@abstractmethod
def documents(self, *sources):
'''
an iterator that will return documents
'''
raise NotImplementedError
@abstractmethod
def get_versions(self, source, docID):
raise NotImplementedError
def different(self, old, new):
try:
return not all([new[key] == old[key] or (not new[key] and not old[key]) for key in new.keys() if key != 'timestamps'])
except Exception:
return True # If the document fails to load/compare for some reason, accept a new version
class BaseDatabaseManager(object):
'''A base class for database managers in the scrapi processing module
Must handle setup, teardown, and multi-process initialization of database connections
All errors should be logged, but not thrown
'''
@abstractmethod
def setup(self):
'''Sets up the database connection. Returns True if the database connection
is successful, False otherwise
'''
raise NotImplementedError
@abstractmethod
def tear_down(self):
'''Tears down the database connection.
'''
raise NotImplementedError
@abstractmethod
def clear(self, force=False):
'''Deletes everything in a table/keyspace etc
Should fail if called on the production database
for testing purposes only
'''
raise NotImplementedError
@abstractmethod
def celery_setup(self, *args, **kwargs):
'''Performs the necessary operations to allow a new process to connect to the database
'''
raise NotImplementedError
class BaseHarvesterResponse(object):
"""A parody of requests.response but stored in a database for caching
Should reflect all methods of a response object
Contains an additional field time_made, self-explanatory
"""
class DoesNotExist(Exception):
pass
@abstractproperty
def method(self):
raise NotImplementedError
@abstractproperty
def url(self):
raise NotImplementedError
@abstractproperty
def ok(self):
raise NotImplementedError
@abstractproperty
def content(self):
raise NotImplementedError
@abstractproperty
def encoding(self):
raise NotImplementedError
@abstractproperty
def headers_str(self):
raise NotImplementedError
@abstractproperty
def status_code(self):
raise NotImplementedError
@abstractproperty
def time_made(self):
raise NotImplementedError
@classmethod
@abstractmethod
def get(self, url=None, method=None):
raise NotImplementedError
@abstractmethod
def save(self):
raise NotImplementedError
@abstractmethod
def update(self, **kwargs):
raise NotImplementedError
def json(self):
try:
content = self.content.decode('utf-8')
except AttributeError: # python 3eeeee!
content = self.content
return json.loads(content)
@property
def headers(self):
return CaseInsensitiveDict(json.loads(self.headers_str))
@property
def text(self):
return six.u(self.content)
| apache-2.0 | -6,993,924,686,997,156,000 | 25.109489 | 130 | 0.656696 | false | 5.045134 | false | false | false |
Fusion-Data-Platform/fdf | fdf/factory.py | 2 | 39038 | # -*- coding: utf-8 -*-
"""
Root module for the FDF package.
**Classes**
* Machine - root class for the FDF package
* Shot - shot container class
* Logbook - logbook connection class
* Container - diagnostic container class
* Node - mdsplus signal node class
"""
"""
Created on Thu Jun 18 10:38:40 2015
@author: ktritz
"""
import xml.etree.ElementTree as ET
import sys, os, importlib
import fdf_globals
from fdf_signal import Signal
import numpy as np
import datetime as dt
#import modules # I think this import is unused - DRS 10/17/15
from collections import MutableMapping, Mapping
import MDSplus as mds
import types
import inspect
import pymssql
import matplotlib.pyplot as plt
FDF_DIR = fdf_globals.FDF_DIR
MDS_SERVERS = fdf_globals.MDS_SERVERS
EVENT_SERVERS = fdf_globals.EVENT_SERVERS
LOGBOOK_CREDENTIALS = fdf_globals.LOGBOOK_CREDENTIALS
FdfError = fdf_globals.FdfError
machineAlias = fdf_globals.machineAlias
class Machine(MutableMapping):
"""
Factory root class that contains shot objects and MDS access methods.
Note that fdf.factory.Machine is exposed in fdf.__init__, so fdf.Machine
is valid.
**Usage**::
>>> import fdf
>>> nstx = fdf.Machine('nstx')
>>> nstx.s140000.logbook()
>>> nstx.addshots(xp=1048)
>>> nstx.s140000.mpts.plot()
>>> nstx.listshot()
Machine class contains a model shot object: nstx.s0
Shot data can be accessed directly through the Machine class::
>>> nstx.s141398
>>> nstx.s141399
Alternatively, a list of shot #'s may be provided during initialization::
>>> nstx = Machine(name='nstx', shotlist=[141398, 141399])
Or added later using the method addshot()::
>>> nstx.addshot([141398, 141399])
"""
# Maintain a dictionary of cached MDS server connections to speed up
# access for multiple shots and trees. This is a static class variable
# to avoid proliferation of MDS server connections
_connections = []
_parent = None
_modules = None
def __init__(self, name='nstx', shotlist=[], xp=[], date=[]):
self._shots = {} # shot dictionary with shot number (int) keys
self._classlist = {} # unused as of 10/14/2015, DRS
self._name = machineAlias(name)
self._logbook = Logbook(name=self._name, root=self)
self.s0 = Shot(0, root=self, parent=self)
self._eventConnection = mds.Connection(EVENT_SERVERS[self._name])
if len(self._connections) is 0:
print('Precaching MDS server connections...')
for _ in range(2):
try:
connection = mds.Connection(MDS_SERVERS[self._name])
connection.tree = None
self._connections.append(connection)
except:
msg = 'MDSplus connection to {} failed'.format(
MDS_SERVERS[self._name])
raise FdfError(msg)
print('Finished.')
if shotlist or xp or date:
self.addshot(shotlist=shotlist, xp=xp, date=date)
def __getattr__(self, name):
# used for attribute referencing: s = nstx.s140000
try:
shot = int(name.split('s')[1])
if (shot not in self._shots):
self._shots[shot] = Shot(shot, root=self, parent=self)
return self._shots[shot]
except:
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self), name))
def __repr__(self):
return '<machine {}>'.format(self._name.upper())
def __iter__(self):
return iter(self._shots.values())
def __contains__(self, value):
return value in self._shots
def __len__(self):
return len(self._shots.keys())
def __delitem__(self, item):
self._shots.__delitem__(item)
def __getitem__(self, item):
# used for dictionary referencing: s = nstx[140000]
# note that getitem fails to catch missing key,
# but getattr does catch missing key
if item == 0:
return self.s0
return self._shots[item]
def __setitem__(self, item, value):
pass
def __dir__(self):
d = ['s0']
d.extend(['s{}'.format(shot) for shot in self._shots])
return d
def _get_connection(self, shot, tree):
for connection in self._connections:
if connection.tree == (tree, shot):
self._connections.remove(connection)
self._connections.insert(0, connection)
return connection
connection = self._connections.pop()
try:
connection.closeAllTrees()
except:
pass
try:
connection.openTree(tree, shot)
connection.tree = (tree, shot)
except:
connection.tree = (None, None)
finally:
self._connections.insert(0, connection)
return connection
def _get_mdsdata(self, signal):
# shot = base_container(signal)._parent.shot
shot = signal.shot
if shot is 0:
print('No MDS data exists for model tree')
return None
connection = self._get_connection(shot, signal._mdstree)
try:
data = connection.get(signal._mdsnode)
except:
msg = "MDSplus connection error for tree '{}' and node '{}'".format(
signal._mdstree, signal._mdsnode)
raise FdfError(msg)
try:
if signal._raw_of is not None:
data = data.raw_of()
except:
pass
try:
if signal._dim_of is not None:
data = data.dim_of()
except:
pass
data = data.value_of().value
try:
if signal._transpose is not None:
data = data.transpose(signal._transpose)
except:
pass
try:
data = signal._postprocess(data)
except:
pass
return data
def _get_modules(self):
if self._modules is None:
module_dir = os.path.join(FDF_DIR, 'modules')
self._modules = [module for module in os.listdir(module_dir)
if os.path.isdir(os.path.join(module_dir, module)) and
module[0] is not '_']
return self._modules
def addshot(self, shotlist=[], date=[], xp=[], verbose=False):
"""
Load shots into the Machine class
**Usage**
>>> nstx.addshot([140000 140001])
>>> nstx.addshot(xp=1032)
>>> nstx.addshot(date=20100817, verbose=True)
Note: You can reference shots even if the shots have not been loaded.
"""
if not iterable(shotlist):
shotlist = [shotlist]
if not iterable(xp):
xp = [xp]
if not iterable(date):
date = [date]
shots = []
if shotlist:
shots.extend([shotlist])
if xp:
shots.extend(self._logbook.get_shotlist(xp=xp,
verbose=verbose))
if date:
shots.extend(self._logbook.get_shotlist(date=date,
verbose=verbose))
for shot in np.unique(shots):
if shot not in self._shots:
self._shots[shot] = Shot(shot, root=self, parent=self)
def addxp(self, xp=[], verbose=False):
"""
Add all shots for one or more XPx
**Usage**
>>> nstx.addxp(1032)
>>> nstx.addxp(xp=1013)
>>> nstx.addxp([1042, 1016])
"""
self.addshot(xp=xp, verbose=verbose)
def adddate(self, date=[], verbose=False):
"""
Add all shots for one or more dates (format YYYYMMDD)
**Usage**
>>> nstx.adddate(date=20100817)
"""
self.addshot(date=date, verbose=verbose)
def list_shots(self):
for shotnum in self._shots:
shotObj = self._shots[shotnum]
print('{} in XP {} on {}'.format(
shotObj.shot, shotObj.xp, shotObj.date))
def get_shotlist(self, date=[], xp=[], verbose=False):
"""
Get a list of shots
**Usage**
>>> shots = nstx.get_shotlist(xp=1013)
"""
return self._logbook.get_shotlist(date=date, xp=xp, verbose=verbose)
def filter_shots(self, date=[], xp=[]):
"""
Get a Machine-like object with an immutable shotlist for XP(s)
or date(s)
"""
self.addshot(xp=xp, date=date)
return ImmutableMachine(xp=xp, date=date, parent=self)
def setevent(self, event, shot_number=None, data=None):
event_data = bytearray()
if shot_number is not None:
shot_data = shot_number // 256**np.arange(4) % 256
event_data.extend(shot_data.astype(np.ubyte))
if data is not None:
event_data.extend(str(data))
mdsdata = mds.mdsdata.makeData(np.array(event_data))
event_string = 'setevent("{}", {})'.format(event, mdsdata)
status = self._eventConnection.get(event_string)
return status
def wfevent(self, event, timeout=0):
event_string = 'kind(_data=wfevent("{}",*,{})) == 0BU ? "timeout"' \
': _data'.format(event, timeout)
data = self._eventConnection.get(event_string).value
if type(data) is str:
raise FdfError('Timeout after {}s in wfevent'.format(timeout))
if not data.size:
return None
if data.size > 3:
shot_data = data[0:4]
shot_number = np.sum(shot_data * 256**np.arange(4))
data = data[4:]
return shot_number, ''.join(map(chr, data))
return data
def logbook(self):
"""
Print logbook entries for all shots
"""
for shotnum in self._shots:
shotObj = self._shots[shotnum]
shotObj.logbook()
class ImmutableMachine(Mapping):
def __init__(self, xp=[], date=[], parent=None):
self._shots = {}
self._parent = parent
shotlist = self._parent.get_shotlist(xp=xp, date=date)
for shot in shotlist:
self._shots[shot] = getattr(self._parent, 's{}'.format(shot))
def __getattr__(self, name):
try:
shot = int(name.split('s')[1])
return self._shots[shot]
except:
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self), name))
def __repr__(self):
return '<immutable machine {}>'.format(self._name.upper())
def __iter__(self):
return iter(self._shots.values())
def __contains__(self, value):
return value in self._shots
def __len__(self):
return len(self._shots.keys())
def __getitem__(self, item):
pass
def __dir__(self):
return ['s{}'.format(shot) for shot in self._shots]
def logbook(self):
for shotnum in self._shots:
shotObj = self._shots[shotnum]
shotObj.logbook()
def list_shots(self):
for shotnum in self._shots:
shotObj = self._shots[shotnum]
print('{} in XP {} on {}'.format(
shotObj.shot, shotObj.xp, shotObj.date))
class Shot(MutableMapping):
def __init__(self, shot, root=None, parent=None):
self.shot = shot
self._shotobj = self
self._root = root
self._parent = parent
try:
self._logbook = self._root._logbook
except:
txt = 'No logbook connection for shot {}'.format(self.shot)
raise FdfError(txt)
self._logbook_entries = []
self._modules = {module: None for module in root._get_modules()}
self.xp = self._get_xp()
self.date = self._get_date()
self._efits = []
def __getattr__(self, attribute):
# first see if the attribute is in the Machine object
try:
attr = getattr(self._parent, attribute)
if inspect.ismethod(attr):
return types.MethodType(attr.im_func, self)
else:
return attr
except:
pass # failed, so check other locations
if attribute in self._modules:
if self._modules[attribute] is None:
self._modules[attribute] = Factory(attribute, root=self._root,
shot=self.shot, parent=self)
return self._modules[attribute]
raise AttributeError("Shot object has no attribute '{}'".format(attribute))
def __repr__(self):
return '<Shot {}>'.format(self.shot)
def __iter__(self):
# return iter(self._modules.values())
return iter(self._modules)
def __contains__(self, value):
return value in self._modules
def __len__(self):
return len(self._modules.keys())
def __delitem__(self, item):
pass
def __getitem__(self, item):
return self._modules[item]
def __setitem__(self, item, value):
pass
def __dir__(self):
return self._modules.keys()
def _get_xp(self):
# query logbook for XP, return XP (list if needed)
if self._logbook and not self._logbook_entries:
self._logbook_entries = self._logbook.get_entries(shot=self.shot)
xplist = []
for entry in self._logbook_entries:
xplist.append(entry['xp'])
return np.unique(xplist)
def _get_date(self):
# query logbook for rundate, return rundate
if self._logbook and not self._logbook_entries:
self._logbook_entries = self._logbook.get_entries(shot=self.shot)
date = 0
if self._logbook_entries:
date = self._logbook_entries[0]['rundate']
return date
def logbook(self):
# print a list of logbook entries
print('Logbook entries for {}'.format(self.shot))
if not self._logbook_entries:
self._logbook_entries = self._logbook.get_entries(shot=self.shot)
for entry in self._logbook_entries:
print('************************************')
print(('{shot} on {rundate} in XP {xp}\n'
'{username} in topic {topic}\n\n'
'{text}').format(**entry))
print('************************************')
def plot(self, overwrite=False, label=None, multi=False):
if not overwrite and not multi:
plt.figure()
plt.subplot(1, 1, 1)
if self.shape != self.time.shape:
msg = 'Dimension mismatch: {}\n shape data {} shape time ()'.format(
self._name, self.shape, self.time.shape)
raise FdfError(msg)
if self.size==0 or self.time.size==0:
msg = 'Empty data and/or time axis: {}\n shape data {} shape time {}'.format(
self._name, self.shape, self.time.shape)
raise FdfError(msg)
plt.plot(self.time[:], self[:], label=label)
title = self._title if self._title else self._name
if not overwrite or multi:
plt.suptitle('Shot #{}'.format(self.shot), x=0.5, y=1.00,
fontsize=12, horizontalalignment='center')
plt.ylabel('{} ({})'.format(self._name.upper(), self.units))
plt.title('{} {}'.format(self._container.upper(), title),
fontsize=12)
plt.xlabel('{} ({})'.format(self.time._name.capitalize(),
self.time.units))
plt.legend()
plt.show()
def check_efit(self):
if len(self._efits):
return self._efits
trees = ['efit{}'.format(str(index).zfill(2)) for index in range(1, 7)]
trees.extend(['lrdfit{}'.format(str(index).zfill(2))
for index in range(1, 13)])
tree_exists = []
for tree in trees:
data = None
connection = self._get_connection(self.shot, tree)
try:
data = connection.get('\{}::userid'.format(tree)).value
except:
pass
if data and data is not '*':
tree_exists.append(tree)
self._efits = tree_exists
return self._efits
class Logbook(object):
def __init__(self, name='nstx', root=None):
self._name = name.lower()
self._root = root
self._credentials = {}
self._table = ''
self._shotlist_query_prefix = ''
self._shot_query_prefix = ''
self._logbook_connection = None
self._make_logbook_connection()
# dict of cached logbook entries
# kw is shot, value is list of logbook entries
self.logbook = {}
def _make_logbook_connection(self):
self._credentials = LOGBOOK_CREDENTIALS[self._name]
self._table = self._credentials['table']
self._shotlist_query_prefix = (
'SELECT DISTINCT rundate, shot, xp, voided '
'FROM {} WHERE voided IS null').format(self._table)
self._shot_query_prefix = (
'SELECT dbkey, username, rundate, shot, xp, topic, text, entered, voided '
'FROM {} WHERE voided IS null').format(self._table)
try:
self._logbook_connection = pymssql.connect(
server=self._credentials['server'],
user=self._credentials['username'],
password=self._credentials['password'],
database=self._credentials['database'],
port=self._credentials['port'],
as_dict=True)
except:
print('Attempting logbook server connection as drsmith')
try:
self._logbook_connection = pymssql.connect(
server=self._credentials['server'],
user='drsmith',
password=self._credentials['password'],
database=self._credentials['database'],
port=self._credentials['port'],
as_dict=True)
except:
txt = '{} logbook connection failed. '.format(self._name.upper())
txt = txt + 'Server credentials:'
for key in self._credentials:
txt = txt + ' {0}:{1}'.format(key, self._credentials[key])
raise FdfError(txt)
def _get_cursor(self):
try:
cursor = self._logbook_connection.cursor()
cursor.execute('SET ROWCOUNT 500')
except:
raise FdfError('Cursor error')
return cursor
def _shot_query(self, shot=[]):
cursor = self._get_cursor()
if shot and not iterable(shot):
shot = [shot]
for sh in shot:
if sh not in self.logbook:
query = ('{0} and shot={1} '
'ORDER BY shot ASC, entered ASC'
).format(self._shot_query_prefix, sh)
cursor.execute(query)
rows = cursor.fetchall() # list of logbook entries
for row in rows:
rundate = repr(row['rundate'])
year = rundate[0:4]
month = rundate[4:6]
day = rundate[6:8]
row['rundate'] = dt.date(int(year), int(month), int(day))
self.logbook[sh] = rows
def get_shotlist(self, date=[], xp=[], verbose=False):
# return list of shots for date and/or XP
cursor = self._get_cursor()
rows = []
shotlist = [] # start with empty shotlist
date_list = date
if not iterable(date_list): # if it's just a single date
date_list = [date_list] # put it into a list
for date in date_list:
query = ('{0} and rundate={1} ORDER BY shot ASC'.
format(self._shotlist_query_prefix, date))
cursor.execute(query)
rows.extend(cursor.fetchall())
xp_list = xp
if not iterable(xp_list): # if it's just a single xp
xp_list = [xp_list] # put it into a list
for xp in xp_list:
query = ('{0} and xp={1} ORDER BY shot ASC'.
format(self._shotlist_query_prefix, xp))
cursor.execute(query)
rows.extend(cursor.fetchall())
for row in rows:
rundate = repr(row['rundate'])
year = rundate[0:4]
month = rundate[4:6]
day = rundate[6:8]
row['rundate'] = dt.date(int(year), int(month), int(day))
if verbose:
print('date {}'.format(rows[0]['rundate']))
for row in rows:
print(' {shot} in XP {xp}'.format(**row))
# add shots to shotlist
shotlist.extend([row['shot'] for row in rows
if row['shot'] is not None])
cursor.close()
return np.unique(shotlist)
def get_entries(self, shot=[], date=[], xp=[]):
# return list of lobgook entries (dictionaries) for shot(s)
if shot and not iterable(shot):
shot = [shot]
if xp or date:
shot.extend(self.get_shotlist(date=date, xp=xp))
if shot:
self._shot_query(shot=shot)
entries = []
for sh in np.unique(shot):
if sh in self.logbook:
entries.extend(self.logbook[sh])
return entries
_tree_dict = {}
def Factory(module_branch, root=None, shot=None, parent=None):
global _tree_dict
"""
Factory method
"""
try:
module_branch = module_branch.lower()
module_list = module_branch.split('.')
module = module_list[-1]
branch_str = ''.join([word.capitalize() for word in module_list])
if module_branch not in _tree_dict:
module_path = os.path.join(FDF_DIR, 'modules', *module_list)
parse_tree = ET.parse(os.path.join(module_path,
''.join([module, '.xml'])))
module_tree = parse_tree.getroot()
_tree_dict[module_branch] = module_tree
ContainerClassName = ''.join(['Container', branch_str])
if ContainerClassName not in Container._classes:
ContainerClass = type(ContainerClassName, (Container,), {})
init_class(ContainerClass, _tree_dict[module_branch], root=root,
container=module, classparent=parent.__class__)
Container._classes[ContainerClassName] = ContainerClass
else:
ContainerClass = Container._classes[ContainerClassName]
return ContainerClass(_tree_dict[module_branch], shot=shot,
parent=parent, top=True)
except None:
print("{} not found in modules directory".format(module))
raise
class Container(object):
"""
Container class
"""
_instances = {}
_classes = {}
def __init__(self, module_tree, top=False, **kwargs):
cls = self.__class__
self._signals = {}
self._containers = {}
self._subcontainers = {}
self._title = module_tree.get('title')
self._desc = module_tree.get('desc')
for read_only in ['parent']:
setattr(self, '_'+read_only, kwargs.get(read_only, None))
try:
self.shot = kwargs['shot']
self._mdstree = kwargs['mdstree']
except:
pass
if self.shot is not None:
try:
cls._instances[cls][self.shot].append(self)
except:
cls._instances[cls][self.shot] = [self]
if top:
self._get_subcontainers()
for node in module_tree.findall('node'):
NodeClassName = ''.join(['Node', cls._name.capitalize()])
if NodeClassName not in cls._classes:
NodeClass = type(NodeClassName, (Node, cls), {})
cls._classes[NodeClassName] = NodeClass
else:
NodeClass = cls._classes[NodeClassName]
NodeClass._mdstree = parse_mdstree(self, node)
setattr(self, node.get('name'), NodeClass(node, parent=self))
for element in module_tree.findall('axis'):
signal_list = parse_signal(self, element)
branch_str = self._get_branchstr()
for signal_dict in signal_list:
SignalClassName = ''.join(['Axis', branch_str])
if SignalClassName not in cls._classes:
SignalClass = type(SignalClassName, (Signal, cls), {})
parse_method(SignalClass, element)
cls._classes[SignalClassName] = SignalClass
else:
SignalClass = cls._classes[SignalClassName]
SignalObj = SignalClass(**signal_dict)
refs = parse_refs(self, element, SignalObj._transpose)
if not refs:
refs = SignalObj.axes
for axis, ref in zip(SignalObj.axes, refs):
setattr(SignalObj, axis, getattr(self, '_'+ref))
setattr(self, ''.join(['_', signal_dict['_name']]), SignalObj)
for branch in module_tree.findall('container'):
name = branch.get('name')
branch_str = self._get_branchstr()
ContainerClassName = ''.join(['Container', branch_str,
name.capitalize()])
if ContainerClassName not in cls._classes:
ContainerClass = type(ContainerClassName, (cls, Container), {})
init_class(ContainerClass, branch, classparent=cls)
cls._classes[ContainerClassName] = ContainerClass
else:
ContainerClass = cls._classes[ContainerClassName]
ContainerObj = ContainerClass(branch, parent=self)
setattr(self, name, ContainerObj)
self._containers[name] = ContainerObj
for element in module_tree.findall('signal'):
signal_list = parse_signal(self, element)
branch_str = self._get_branchstr()
for signal_dict in signal_list:
# name = element.get('name').format('').capitalize()
SignalClassName = ''.join(['Signal', branch_str])
if SignalClassName not in cls._classes:
SignalClass = type(SignalClassName, (Signal, cls), {})
parse_method(SignalClass, element)
cls._classes[SignalClassName] = SignalClass
else:
SignalClass = cls._classes[SignalClassName]
SignalObj = SignalClass(**signal_dict)
refs = parse_refs(self, element, SignalObj._transpose)
if not refs:
refs = SignalObj.axes
for axis, ref in zip(SignalObj.axes, refs):
setattr(SignalObj, axis, getattr(self, '_'+ref))
setattr(self, signal_dict['_name'], SignalObj)
self._signals[signal_dict['_name']] = SignalObj
if top and hasattr(self, '_preprocess'):
self._preprocess()
def __getattr__(self, attribute):
try:
if self._subcontainers[attribute] is None:
branch_path = '.'.join([self._get_branch(), attribute])
self._subcontainers[attribute] = \
Factory(branch_path, root=self._root,
shot=self.shot, parent=self)
return self._subcontainers[attribute]
except KeyError:
pass
if not hasattr(self, '_parent') or self._parent is None:
raise AttributeError("Attribute '{}' not found".format(attribute))
if hasattr(self._parent, '_signals') and \
attribute in self._parent._signals:
raise AttributeError("Attribute '{}' not found".format(attribute))
attr = getattr(self._parent, attribute)
if Container in attr.__class__.mro() and attribute[0] is not '_':
raise AttributeError("Attribute '{}' not found".format(attribute))
if inspect.ismethod(attr):
return types.MethodType(attr.im_func, self)
else:
return attr
def _get_subcontainers(self):
if len(self._subcontainers) is 0:
container_dir = self._get_path()
if not os.path.isdir(container_dir):
return
files = os.listdir(container_dir)
self._subcontainers = {container: None for container in
files if os.path.isdir(
os.path.join(container_dir, container)) and
container[0] is not '_'}
@classmethod
def _get_path(cls):
branch = cls._get_branch().split('.')
path = os.path.join(FDF_DIR, 'modules')
for step in branch:
newpath = os.path.join(path, step)
if not os.path.isdir(newpath):
break
path = newpath
return path
def __dir__(self):
# print('in dir')
items = self.__dict__.keys()
items.extend(self.__class__.__dict__.keys())
if Signal not in self.__class__.mro():
items.extend(self._subcontainers.keys())
return [item for item in set(items).difference(self._base_items)
if item[0] is not '_']
def __iter__(self):
if not len(self._signals):
items = self._containers.values()
# items.extend(self._subcontainers.values())
else:
items = self._signals.values()
return iter(items)
@classmethod
def _get_branch(cls):
branch = cls._name
parent = cls._classparent
while parent is not Shot and parent.__class__ is not Shot:
branch = '.'.join([parent._name, branch])
parent = parent._classparent
return branch
@classmethod
def _get_branchstr(cls):
branch = cls._get_branch()
return ''.join([sub.capitalize() for sub in branch.split('.')])
def init_class(cls, module_tree, **kwargs):
cls._name = module_tree.get('name')
if cls not in cls._instances:
cls._instances[cls] = {}
for read_only in ['root', 'container', 'classparent']:
try:
setattr(cls, '_'+read_only, kwargs[read_only])
# print(cls._name, read_only, kwargs.get(read_only, 'Not there'))
except:
pass
for item in ['mdstree', 'mdspath', 'units']:
getitem = module_tree.get(item)
if getitem is not None:
setattr(cls, '_'+item, getitem)
cls._base_items = set(cls.__dict__.keys())
parse_method(cls, module_tree)
def parse_method(obj, module_tree):
objpath = obj._get_path()
sys.path.insert(0, objpath)
for method in module_tree.findall('method'):
method_name = method.text
if method_name is None:
method_name = method.get('name')
module = method.get('module')
if module is None:
module = method_name
method_in_module = method.get('method_in_module')
if method_in_module is None:
method_in_module = method_name
module_object = importlib.import_module(module)
method_from_object = module_object.__getattribute__(method_in_module)
setattr(obj, method_name, method_from_object)
sys.path.pop(0)
def base_container(container):
parent_container = container
while type(parent_container._parent) is not Shot:
parent_container = parent_container._parent
return parent_container
def parse_signal(obj, element):
units = parse_units(obj, element)
axes, transpose = parse_axes(obj, element)
number_range = element.get('range')
if number_range is None:
name = element.get('name')
title = element.get('title')
desc = element.get('desc')
mdspath, dim_of = parse_mdspath(obj, element)
mdstree = parse_mdstree(obj, element)
error = parse_error(obj, element)
signal_dict = [{'_name': name, 'units': units, 'axes': axes,
'_mdsnode': mdspath, '_mdstree': mdstree,
'_dim_of': dim_of, '_error': error, '_parent': obj,
'_transpose': transpose, '_title': title,
'_desc': desc}]
else:
number_list = number_range.split(',')
len_number_list = len(number_list)
if len_number_list == 1:
start = 0
end = int(number_list[0])
else:
start = int(number_list[0])
end = int(number_list[1])+1
signal_dict = []
if len_number_list == 3:
# 3rd item, if present, controls zero padding (cf. BES and magnetics)
digits = int(number_list[2])
else:
digits = int(np.ceil(np.log10(end-1)))
for index in range(start, end):
name = element.get('name').format(str(index).zfill(digits))
title = None
if element.get('title'):
title = element.get('title').format(str(index).zfill(digits))
desc = None
if element.get('desc'):
desc = element.get('desc').format(str(index).zfill(digits))
mdspath, dim_of = parse_mdspath(obj, element)
mdspath = mdspath.format(str(index).zfill(digits))
mdstree = parse_mdstree(obj, element)
error = parse_error(obj, element)
signal_dict.append({'_name': name, 'units': units, 'axes': axes,
'_mdsnode': mdspath, '_mdstree': mdstree,
'_dim_of': dim_of, '_error': error,
'_parent': obj, '_transpose': transpose,
'_title': title, '_desc': desc})
return signal_dict
def parse_axes(obj, element):
axes = []
transpose = None
time_ind = 0
try:
axes = [axis.strip() for axis in element.get('axes').split(',')]
if 'time' in axes:
time_ind = axes.index('time')
if time_ind is not 0:
transpose = list(range(len(axes)))
transpose.pop(time_ind)
transpose.insert(0, time_ind)
axes.pop(time_ind)
axes.insert(0, 'time')
except:
pass
return axes, transpose
def parse_refs(obj, element, transpose=None):
refs = None
try:
refs = [ref.strip() for ref in element.get('axes_refs').split(',')]
if transpose is not None:
refs = [refs[index] for index in transpose]
except:
pass
return refs
def parse_units(obj, element):
units = element.get('units')
if units is None:
try:
units = obj.units
except:
pass
return units
def parse_error(obj, element):
error = element.get('error')
if error is not None:
mdspath = element.get('mdspath')
if mdspath is None:
try:
mdspath = obj._mdspath
error = '.'.join([mdspath, error])
except:
pass
else:
error = '.'.join([mdspath, error])
return error
_path_dict = {}
def parse_mdspath(obj, element):
global _path_dict
key = (type(obj), element)
try:
return _path_dict[key]
except KeyError:
mdspath = element.get('mdspath')
try:
dim_of = int(element.get('dim_of'))
except:
dim_of = None
if mdspath is None:
try:
mdspath = obj._mdspath
except:
pass
if mdspath is not None:
mdspath = '.'.join([mdspath, element.get('mdsnode')])
else:
mdspath = element.get('mdsnode')
_path_dict[key] = (mdspath, dim_of)
return mdspath, dim_of
def parse_mdstree(obj, element):
mdstree = element.get('mdstree')
if mdstree is None and hasattr(obj, '_mdstree'):
mdstree = obj._mdstree
return mdstree
def iterable(obj):
try:
iter(obj)
if type(obj) is str:
return False
return True
except TypeError:
return False
class Node(object):
"""
Node class
"""
def __init__(self, element, parent=None):
self._parent = parent
self._name = element.get('name')
self._mdsnode = parse_mdspath(self, element)
self._data = None
self._title = element.get('title')
self._desc = element.get('desc')
self.units = element.get('units')
def __repr__(self):
if self._data is None:
self._data = self._get_mdsdata()
return str(self._data)
def __getattr__(self, attribute):
if attribute is '_parent':
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self), attribute))
if self._parent is None:
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self), attribute))
attr = getattr(self._parent, attribute)
if inspect.ismethod(attr):
return types.MethodType(attr.im_func, self)
else:
return attr
if __name__ == '__main__':
nstx = Machine()
s = nstx.s141000
s.bes.ch01.myfft()
# s.bes.ch01.fft2()
| mit | -7,257,784,443,417,488,000 | 33.264679 | 90 | 0.518648 | false | 4.147243 | false | false | false |
hanfang/scikit-ribo | scripts/NProt2013/Compare_RPM_norm_read_dens.py | 1 | 1911 | """
Supplementary Note 9: Compare RPM-normalized read densities
Author: Annemarie Becker
inputFile1:
RPM-normalized read densities along the whole genome or in protein coding regions on plus or minus strand from sample 1 (Supplementary Note 7 or 8)
col0: position along genome
col1: RPM-normalized read density at that position
inputFile2:
RPM-normalized read densities along the whole genome or in protein coding regions on plus or minus strand from sample 2 (Supplementary Note 7 or 8)
col0: position along genome
col1: RPM-normalized read density at that position
outputFile:
comparison of RPM-normalized read density files for protein coding regions on plus or minus strand from samples 1 and 2
col0: RPM-normalized read density of sample 1
col1: RPM-normalized read density of sample 2
"""
def matchRPM(inputFile1, inputFile2, outputFile):
# Upload list of sample 1
list1 = []
inFile1 = open(inputFile1, 'r')
line = inFile1.readline()
while line != '':
fields = line.split()
list1.append(fields)
line = inFile1.readline()
# Upload list of sample 2
list2 = []
inFile2 = open(inputFile2, 'r')
line = inFile2.readline()
while line != '':
fields = line.split()
list2.append(fields)
line = inFile2.readline()
# Compile both lists
listA = zip(list1, list2)
# Output files
outFile = open(outputFile, 'w')
for Z in listA:
position = int(Z[0][0])
read1 = float(Z[0][1])
read2 = float(Z[1][1])
outFile.write(str(read1) + '\t' + str(read2) + '\n')
if __name__ == '__main__':
inputFile1 = ''
inputFile2 = ''
outputFile = ''
matchRPM(inputFile1, inputFile2, outputFile)
| gpl-2.0 | -2,494,370,387,285,067,000 | 24.915493 | 147 | 0.608059 | false | 3.732422 | false | false | false |
eirannejad/pyRevit | extensions/pyRevitTools.extension/pyRevit.tab/Selection.panel/select.stack/Select.pulldown/Invert Selection.pushbutton/script.py | 1 | 1456 | """Inverts selection in active view.
Shift-Click:
Select group members instead of parent group elements.
"""
#pylint: disable=import-error,invalid-name,broad-except
from pyrevit import revit, DB
# get view elements
viewelements = DB.FilteredElementCollector(revit.doc, revit.active_view.Id)\
.WhereElementIsNotElementType()\
.ToElements()
# remove anything that is a direct DB.Element obj
# these are the weird internal objects that Revit uses like a camera obj
view_element_ids = \
{x.Id.IntegerValue for x in viewelements if x.GetType() is not DB.Element}
# get current selection
selection = revit.get_selection()
selected_element_ids = {x.Id.IntegerValue for x in selection}
# find elements that are not selected
invert_ids = view_element_ids.difference(selected_element_ids)
# if shiftclick, select all the invert elements
# otherwise do not select elements inside a group
filtered_invert_ids = invert_ids.copy()
if not __shiftclick__: #pylint: disable=undefined-variable
# collect ids of elements inside a group
grouped_element_ids = \
[x.Id.IntegerValue for x in viewelements
if x.GetType() is not DB.Element
and x.GroupId != DB.ElementId.InvalidElementId]
for element_id in invert_ids:
if element_id in grouped_element_ids:
filtered_invert_ids.remove(element_id)
# set selection
selection.set_to([DB.ElementId(x) for x in filtered_invert_ids])
| gpl-3.0 | 7,466,079,972,496,531,000 | 33.666667 | 78 | 0.724588 | false | 3.733333 | false | false | false |
MrLpk/nba | mode2/MTool.py | 1 | 2045 | #-*- coding: utf-8 -*-
'''
Created on 2013-8-27
@author: liaopengkai
'''
import urllib2
import os
import json
import re
import time
class MTool:
def isNum(self, tempStr):
"""判断字符串是否为数字,整型和浮点型皆适用"""
try:
float(tempStr)
return True
except Exception:
return False
def save(self, filename, contents, reNew = True, path = '', path2 = ''):
'''保存文件,参数:文件名、内容、是否覆盖更新、路径'''
if not path == '':
if not os.path.isdir(path):
os.mkdir(path)
if not os.path.isdir(path + path2):
os.mkdir(path + path2)
filename = path + path2 + filename
if os.path.exists(filename):
if not reNew:
print 'You already have ' + filename
return
fh = open(filename, 'w')
fh.write(contents)
fh.close()
# print filename
print 'Save '+filename+' success...'
def download(self, url, path = '', reNew = True):
'''下载并保存'''
temp = url.split('/')
name = temp[len(temp)-1]
if path != '':
filename = path + name
if os.path.exists(filename):
if not reNew:
print 'You already have ' + filename
return
result = urllib2.urlopen(url).read()
self.save(name, result, reNew, path)
def getTime(self, _str = '%Y-%m-%d %H:%M:%S', _t = time.localtime()):
t = time.strftime(_str, _t)
return t
def sumTime(self, _hour = 0, _min = 0, _sec = 0):
t = time.time()
t += (3600*_hour + 60*_min + _sec)
return time.localtime(t)
def subTime(self, _hour = 0, _min = 0, _sec = 0):
t = time.time()
t -= (3600*_hour + 60*_min + _sec)
return time.localtime(t)
if __name__ == '__main__':
pass
| agpl-3.0 | -2,248,984,619,081,911,800 | 23.683544 | 76 | 0.475115 | false | 3.41331 | false | false | false |
dronir/EM | python/xrInspect_qt3.py | 1 | 4382 | #!/usr/bin/python
import sys
#from PyQt4 import Qt
#from PyQt4 import QtGui
#from PyQt4 import QtCore
import qt
import Qwt5 as Qwt
import numpy as np
import math
from gather import *
class elementList(QtGui.QWidget):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.setWindowTitle('Element List')
self.pt = QtGui.QCheckBox("Relative plots", self)
self.nt = QtGui.QCheckBox("Normalize plots", self)
self.nt.setEnabled(False)
self.el = QtGui.QListWidget(self)
self.el.addItems(hs.Elements)
self.el.setCurrentRow(0)
self.el.setEnabled(False)
self.el.setMinimumHeight(350)
self.el.setMaximumWidth(150)
self.tl = QtGui.QListWidget(self)
for i in range(hs.nThetaI):
self.tl.addItem("%6.2f" % math.degrees(hs.thetaI[i]))
self.tl.setCurrentRow(0)
self.tl.setMinimumHeight(150)
self.tl.setMaximumWidth(150)
self.qb = QtGui.QPushButton("Quit")
self.qb.setMaximumWidth(100)
## SET PLOT ATTRIBUTES
##
self.ratioPlots = []
for i in range(len(hs.Elements)):
self.ratioPlots.append( Qwt.QwtPlot(self))
self.ratioPlots[i].enableAxis(Qwt.QwtPlot.xBottom, False)
self.ratioPlots[i].setCanvasBackground(Qt.Qt.white)
#self.ratioPlots[i].plotLayout().setCanvasMargin(0)
#self.ratioPlots[i].plotLayout().setAlignCanvasToScales(True)
self.ratioPlots[i].setAxisScale(Qwt.QwtPlot.xBottom, 0, 90)
self.ratioPlots[i].setAxisMaxMajor(Qwt.QwtPlot.yLeft, 4)
#self.ratioPlots[i].axisWidget(Qwt.QwtPlot.yLeft).setBorderDist(50,60)
## LOAD DATA
##
self.data = []
for iTht in range(hs.nThetaI):
self.data.append([])
for iElem in range(len(hs.Elements)):
self.data[iTht].append((hs.toArray(set=iTht, lvl=iElem*2) + hs.toArray(set=iTht, lvl=iElem*2+1)).mean(1))
## PLOT
##
self.plotData = []
x = np.linspace(0, 90, hs.resTheta)
for iElem in range(len(hs.Elements)):
self.plotData.append(Qwt.QwtPlotCurve('y = sin(x)'))
self.plotData[iElem].setPen(Qt.QPen(Qt.Qt.red))
y = self.data[0][iElem]
self.plotData[iElem].setData(x, y)
self.plotData[iElem].attach(self.ratioPlots[iElem])
## SET LAYOUT
##
sbox = QtGui.QHBoxLayout()
rbox = QtGui.QVBoxLayout()
hbox = QtGui.QVBoxLayout()
hbox.addWidget(self.el)
hbox.addWidget(self.pt)
hbox.addWidget(self.nt)
hbox.addSpacing(50)
hbox.addWidget(self.tl)
hbox.addStretch(1)
hbox.addWidget(self.qb)
for i in range(len(hs.Elements)):
rbox.addWidget(self.ratioPlots[i])
sbox.addLayout(hbox)
sbox.addSpacing(50)
sbox.addLayout(rbox)
self.setLayout(sbox)
self.resize(800, 1000)
## SET CONNECTIONS
##
self.connect(self.el, QtCore.SIGNAL('itemSelectionChanged()'), self.plot)
self.connect(self.tl, QtCore.SIGNAL('itemSelectionChanged()'), self.plot)
self.connect(self.pt, QtCore.SIGNAL('stateChanged(int)'), self.changeRel)
self.connect(self.nt, QtCore.SIGNAL('stateChanged(int)'), self.changeRel)
self.connect(self.qb, QtCore.SIGNAL('clicked()'), QtGui.qApp, QtCore.SLOT('quit()'))
def plot(self):
iTht = self.tl.currentRow()
x = np.linspace(0, 90, hs.resTheta)
for iElem in range(len(hs.Elements)):
if(self.pt.isChecked()):
y = self.data[iTht][iElem] / self.data[iTht][self.el.currentRow()]
if(self.nt.isChecked()):
y /= y[0]
else:
y = self.data[iTht][iElem]
self.plotData[iElem].setData(x, y)
self.plotData[iElem].attach(self.ratioPlots[iElem])
self.ratioPlots[iElem].replot()
def changeRel(self):
self.nt.setEnabled(self.pt.isChecked())
self.el.setEnabled(self.pt.isChecked())
self.plot()
app = QtGui.QApplication(sys.argv)
hs = xrHemisphere()
hs.load(sys.argv[1])
hs.divideBySolidAngle()
icon = elementList()
icon.show()
app.exec_()
| gpl-3.0 | 2,774,238,473,935,634,400 | 26.734177 | 121 | 0.590826 | false | 3.314675 | false | false | false |
gridsync/gridsync | gridsync/network.py | 1 | 1321 | import errno
import logging
import socket
import sys
from random import randint
def get_local_network_ip() -> str:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
s.connect(("10.255.255.255", 1))
ip = s.getsockname()[0]
s.close()
return ip
def get_free_port(
port: int = 0, range_min: int = 49152, range_max: int = 65535
) -> int:
if not port:
port = randint(range_min, range_max)
while True:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
try:
logging.debug("Trying to bind to port: %i", port)
s.bind(("127.0.0.1", port))
except OSError as err:
logging.debug("Couldn't bind to port %i: %s", port, err)
if err.errno == errno.EADDRINUSE or (
# "[WinError 10013] An attempt was made to access a
# socket in a way forbidden by its access
# permissions"
sys.platform == "win32"
and err.winerror == 10013
):
port = randint(range_min, range_max)
continue
raise
logging.debug("Port %s is free", port)
return port
| gpl-3.0 | -780,984,637,094,630,900 | 32.025 | 72 | 0.531416 | false | 3.908284 | false | false | false |
drkatnz/CombinedOneClass | example.py | 1 | 1682 | import numpy as np
from scipy import stats
from oneclass import oneclass
from sklearn.tree import DecisionTreeClassifier
rng = np.random.RandomState(42)
# Example settings
n_samples = 2000
outliers_fraction = 0.25
clusters_separation = [0,1,2]
# define two or more outlier detection tools to be compared
classifiers = {
"One-Class": oneclass.OneClassClassifier(contamination=outliers_fraction,base_classifier=DecisionTreeClassifier(max_depth=2),density_only=True)
}
# Compare given classifiers under given settings
xx, yy = np.meshgrid(np.linspace(-7, 7, 500), np.linspace(-7, 7, 500))
n_inliers = int((1. - outliers_fraction) * n_samples)
n_outliers = int(outliers_fraction * n_samples)
ground_truth = np.ones(n_samples, dtype=int)
ground_truth[-n_outliers:] = -1
# Fit the problem with varying cluster separation
for i, offset in enumerate(clusters_separation):
np.random.seed(42)
# Data generation
X1 = 0.3 * np.random.randn(n_inliers // 2, 2) - offset
X2 = 0.3 * np.random.randn(n_inliers // 2, 2) + offset
X = np.r_[X1, X2]
# Add outliers
X = np.r_[X, np.random.uniform(low=-6, high=6, size=(n_outliers, 2))]
# Fit the model
for i, (clf_name, clf) in enumerate(classifiers.items()):
# fit the data and tag outliers
clf.fit(X)
scores_pred = clf.decision_function(X)
threshold = stats.scoreatpercentile(scores_pred,
100 * outliers_fraction)
y_pred = clf.predict(X)
n_errors = (y_pred != ground_truth).sum()
# plot the levels lines and the points
print "Cluster separation: ", offset, " Number of errors: ", n_errors | mit | 4,941,039,829,575,862,000 | 34.808511 | 147 | 0.664685 | false | 3.357285 | false | false | false |
SUSE/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_03_01/models/network_interface_dns_settings.py | 9 | 2698 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class NetworkInterfaceDnsSettings(Model):
"""DNS settings of a network interface.
:param dns_servers: List of DNS servers IP addresses. Use
'AzureProvidedDNS' to switch to azure provided DNS resolution.
'AzureProvidedDNS' value cannot be combined with other IPs, it must be the
only value in dnsServers collection.
:type dns_servers: list of str
:param applied_dns_servers: If the VM that uses this NIC is part of an
Availability Set, then this list will have the union of all DNS servers
from all NICs that are part of the Availability Set. This property is what
is configured on each of those VMs.
:type applied_dns_servers: list of str
:param internal_dns_name_label: Relative DNS name for this NIC used for
internal communications between VMs in the same virtual network.
:type internal_dns_name_label: str
:param internal_fqdn: Fully qualified DNS name supporting internal
communications between VMs in the same virtual network.
:type internal_fqdn: str
:param internal_domain_name_suffix: Even if internalDnsNameLabel is not
specified, a DNS entry is created for the primary NIC of the VM. This DNS
name can be constructed by concatenating the VM name with the value of
internalDomainNameSuffix.
:type internal_domain_name_suffix: str
"""
_attribute_map = {
'dns_servers': {'key': 'dnsServers', 'type': '[str]'},
'applied_dns_servers': {'key': 'appliedDnsServers', 'type': '[str]'},
'internal_dns_name_label': {'key': 'internalDnsNameLabel', 'type': 'str'},
'internal_fqdn': {'key': 'internalFqdn', 'type': 'str'},
'internal_domain_name_suffix': {'key': 'internalDomainNameSuffix', 'type': 'str'},
}
def __init__(self, dns_servers=None, applied_dns_servers=None, internal_dns_name_label=None, internal_fqdn=None, internal_domain_name_suffix=None):
self.dns_servers = dns_servers
self.applied_dns_servers = applied_dns_servers
self.internal_dns_name_label = internal_dns_name_label
self.internal_fqdn = internal_fqdn
self.internal_domain_name_suffix = internal_domain_name_suffix
| mit | -7,172,057,725,379,304,000 | 48.962963 | 151 | 0.669755 | false | 4.222222 | false | false | false |
tavultesoft/keymanweb | linux/keyman-config/experiments/list_kmp.py | 1 | 4610 | #!/usr/bin/python3
import datetime
import logging
import os
import requests
import requests_cache
import subprocess
import tempfile
import time
from dirlist import list_keyboards
#from keymankeyboards import get_api_keyboards
#from keyman_config import get_kmp, install_kmp
from keyman_config.get_kmp import get_keyboard_data, get_kmp_file, keyman_cache_dir
from keyman_config.install_kmp import get_metadata, get_infdata, extract_kmp
#TODO check for kmn and check if it is compilable
#TODO extra output files jsonkmpnokmn, jsonkmpbadkmn, goodjsonkmpkmn and for inf as well
def get_kmn(kbid, sourcePath):
base_url = "https://raw.github.com/keymanapp/keyboards/master/" + sourcePath
kmn_url = base_url + "/source/" + kbid + ".kmn"
cache_dir = keyman_cache_dir()
current_dir = os.getcwd()
expire_after = datetime.timedelta(days=7)
if not os.path.isdir(cache_dir):
os.makedirs(cache_dir)
os.chdir(cache_dir)
requests_cache.install_cache(cache_name='keyman_cache', backend='sqlite', expire_after=expire_after)
now = time.ctime(int(time.time()))
response = requests.get(kmn_url)
logging.debug("Time: {0} / Used Cache: {1}".format(now, response.from_cache))
os.chdir(current_dir)
requests_cache.core.uninstall_cache()
return requests.get(kmn_url)
def main():
logging.basicConfig(level=logging.DEBUG, format='%(levelname)s:%(message)s')
keyboarddata = list_keyboards()
if keyboarddata:
with open('./nokmp.txt', 'wt') as nokmp, \
open('./infnokeyboard.txt', 'wt') as infnokeyboard, \
open('./goodjsonkmpkmn.txt', 'wt') as goodjsonkmpkmn, \
open('./jsonkmpnokmn.txt', 'wt') as jsonkmpnokmn, \
open('./jsonkmpbadkmn.txt', 'wt') as jsonkmpbadkmn, \
open('./jsonkmpmissingkmn.txt', 'wt') as jsonkmpmissingkmn, \
open('./brokeninf.txt', 'wt') as brokeninf, \
open('./nodata.txt', 'wt') as nodata, \
open('./goodinfkmp.txt', 'wt') as goodinfkmp:
print("Keyboard: will work in kmfl :)", file=goodjsonkmpkmn) # goodjsonkmpkmn
print("Keyboard: has uncompilable kmn", file=jsonkmpbadkmn) # jsonkmpbadkmn
print("Keyboard: has json in kmp but can't find the kmn on github", file=jsonkmpmissingkmn) # jsonkmpmissingkmn
print("Keyboard: has json in kmp but has no sourcePath to look for kmn on github", file=jsonkmpnokmn) # jsonkmpnokmn
print("Keyboard: has kmp with kmp.inf", file=goodinfkmp)
print("Keyboard: has kmp with kmp.inf but it has no Keyboard", file=infnokeyboard)
print("Keyboard: has kmp but no kmp.json and no or broken kmp.inf", file=brokeninf)
print("Keyboard: does not have kmp so mobile/web only", file=nokmp)
print("Keyboard: has no data", file=nodata)
for kbid in keyboarddata:
kbdata = get_keyboard_data(kbid, True)
print(kbid)
if kbdata:
if 'packageFilename' in kbdata:
kmpfile = get_kmp_file(kbdata, True)
with tempfile.TemporaryDirectory() as tmpdirname:
extract_kmp(kmpfile, tmpdirname)
try:
info, system, options, keyboards, files = get_metadata(tmpdirname)
if keyboards:
if 'sourcePath' in kbdata:
response = get_kmn(kbid, kbdata['sourcePath'])
if response.status_code == 200:
kmndownloadfile = os.path.join(tmpdirname, kbid + ".kmn")
with open(kmndownloadfile, 'wb') as f:
f.write(response.content)
subprocess.run(["kmflcomp", kmndownloadfile], stdout=subprocess.PIPE, stderr= subprocess.STDOUT)
kmfl_file = os.path.join(tmpdirname, kbid + ".kmfl")
if os.path.isfile(kmfl_file):
logging.debug("goodjsonkmpkmn")
print(kbid, file=goodjsonkmpkmn) # goodjsonkmpkmn
else:
logging.debug("jsonkmpbadkmn")
print(kbid, file=jsonkmpbadkmn) # jsonkmpbadkmn
else:
logging.debug("jsonkmpmissingkmn")
print(kbid, file=jsonkmpmissingkmn) # jsonkmpmissingkmn
else:
logging.debug("jsonkmpnokmn")
print(kbid, file=jsonkmpnokmn) # jsonkmpnokmn
else:
info, system, options, keyboards, files = get_infdata(tmpdirname)
if keyboards:
logging.debug("infnokeyboard")
print(kbid, file=goodinfkmp)
elif files:
logging.debug("goodinfkmp")
print(kbid, file=infnokeyboard)
else:
print(kbid, file=brokeninf)
except KeyError:
logging.debug("brokeninf")
print(kbid, file=brokeninf)
else:
logging.debug("nokmp")
print(kbid, file=nokmp)
else:
logging.debug("nodata")
print(kbid, file=nodata)
if __name__ == "__main__":
main()
| apache-2.0 | -5,219,260,245,291,101,000 | 38.067797 | 119 | 0.673536 | false | 2.930706 | false | false | false |
trakt/script.trakt | resources/lib/rating.py | 1 | 9061 | # -*- coding: utf-8 -*-
"""Module used to launch rating dialogues and send ratings to Trakt"""
import xbmc
import xbmcaddon
import xbmcgui
from resources.lib import utilities
from resources.lib import kodiUtilities
from resources.lib import globals
import logging
logger = logging.getLogger(__name__)
__addon__ = xbmcaddon.Addon("script.trakt")
def ratingCheck(media_type, items_to_rate, watched_time, total_time):
"""Check if a video should be rated and if so launches the rating dialog"""
logger.debug("Rating Check called for '%s'" % media_type)
if not kodiUtilities.getSettingAsBool("rate_%s" % media_type):
logger.debug("'%s' is configured to not be rated." % media_type)
return
if items_to_rate is None:
logger.debug("Summary information is empty, aborting.")
return
watched = (watched_time / total_time) * 100
if watched >= kodiUtilities.getSettingAsFloat("rate_min_view_time"):
rateMedia(media_type, items_to_rate)
else:
logger.debug("'%s' does not meet minimum view time for rating (watched: %0.2f%%, minimum: %0.2f%%)" % (
media_type, watched, kodiUtilities.getSettingAsFloat("rate_min_view_time")))
def rateMedia(media_type, itemsToRate, unrate=False, rating=None):
"""Launches the rating dialog"""
for summary_info in itemsToRate:
if not utilities.isValidMediaType(media_type):
logger.debug("Not a valid media type")
return
elif 'user' not in summary_info:
logger.debug("No user data")
return
s = utilities.getFormattedItemName(media_type, summary_info)
logger.debug("Summary Info %s" % summary_info)
if unrate:
rating = None
if summary_info['user']['ratings']['rating'] > 0:
rating = 0
if not rating is None:
logger.debug("'%s' is being unrated." % s)
__rateOnTrakt(rating, media_type, summary_info, unrate=True)
else:
logger.debug("'%s' has not been rated, so not unrating." % s)
return
rerate = kodiUtilities.getSettingAsBool('rate_rerate')
if rating is not None:
if summary_info['user']['ratings']['rating'] == 0:
logger.debug(
"Rating for '%s' is being set to '%d' manually." % (s, rating))
__rateOnTrakt(rating, media_type, summary_info)
else:
if rerate:
if not summary_info['user']['ratings']['rating'] == rating:
logger.debug(
"Rating for '%s' is being set to '%d' manually." % (s, rating))
__rateOnTrakt(rating, media_type, summary_info)
else:
kodiUtilities.notification(
kodiUtilities.getString(32043), s)
logger.debug(
"'%s' already has a rating of '%d'." % (s, rating))
else:
kodiUtilities.notification(
kodiUtilities.getString(32041), s)
logger.debug("'%s' is already rated." % s)
return
if summary_info['user']['ratings'] and summary_info['user']['ratings']['rating']:
if not rerate:
logger.debug("'%s' has already been rated." % s)
kodiUtilities.notification(kodiUtilities.getString(32041), s)
return
else:
logger.debug("'%s' is being re-rated." % s)
gui = RatingDialog(
"script-trakt-RatingDialog.xml",
__addon__.getAddonInfo('path'),
media_type,
summary_info,
rerate
)
gui.doModal()
if gui.rating:
rating = gui.rating
if rerate:
if summary_info['user']['ratings'] and summary_info['user']['ratings']['rating'] > 0 and rating == summary_info['user']['ratings']['rating']:
rating = 0
if rating == 0 or rating == "unrate":
__rateOnTrakt(rating, gui.media_type, gui.media, unrate=True)
else:
__rateOnTrakt(rating, gui.media_type, gui.media)
else:
logger.debug("Rating dialog was closed with no rating.")
del gui
# Reset rating and unrate for multi part episodes
unrate = False
rating = None
def __rateOnTrakt(rating, media_type, media, unrate=False):
logger.debug("Sending rating (%s) to Trakt.tv" % rating)
params = media
if utilities.isMovie(media_type):
key = 'movies'
params['rating'] = rating
if 'movieid' in media:
kodiUtilities.kodiJsonRequest({"jsonrpc": "2.0", "id": 1, "method": "VideoLibrary.SetMovieDetails", "params": {
"movieid": media['movieid'], "userrating": rating}})
elif utilities.isShow(media_type):
key = 'shows'
# we need to remove this key or trakt will be confused
del(params["seasons"])
params['rating'] = rating
if 'tvshowid' in media:
kodiUtilities.kodiJsonRequest({"jsonrpc": "2.0", "id": 1, "method": "VideoLibrary.SetTVShowDetails", "params": {
"tvshowid": media['tvshowid'], "userrating": rating}})
elif utilities.isSeason(media_type):
key = 'shows'
params['seasons'] = [{'rating': rating, 'number': media['season']}]
elif utilities.isEpisode(media_type):
key = 'episodes'
params['rating'] = rating
if 'episodeid' in media:
kodiUtilities.kodiJsonRequest({"jsonrpc": "2.0", "id": 1, "method": "VideoLibrary.SetEpisodeDetails", "params": {
"episodeid": media['episodeid'], "userrating": rating}})
else:
return
root = {key: [params]}
if not unrate:
data = globals.traktapi.addRating(root)
else:
data = globals.traktapi.removeRating(root)
if data:
s = utilities.getFormattedItemName(media_type, media)
if 'not_found' in data and not data['not_found']['movies'] and not data['not_found']['episodes'] and not data['not_found']['shows']:
if not unrate:
kodiUtilities.notification(kodiUtilities.getString(32040), s)
else:
kodiUtilities.notification(kodiUtilities.getString(32042), s)
else:
kodiUtilities.notification(kodiUtilities.getString(32044), s)
class RatingDialog(xbmcgui.WindowXMLDialog):
buttons = {
11030: 1,
11031: 2,
11032: 3,
11033: 4,
11034: 5,
11035: 6,
11036: 7,
11037: 8,
11038: 9,
11039: 10
}
focus_labels = {
11030: 32028,
11031: 32029,
11032: 32030,
11033: 32031,
11034: 32032,
11035: 32033,
11036: 32034,
11037: 32035,
11038: 32036,
11039: 32027
}
def __init__(self, xmlFile, resourcePath, media_type, media, rerate):
self.media_type = media_type
self.media = media
self.rating = None
self.rerate = rerate
self.default_rating = kodiUtilities.getSettingAsInt('rating_default')
def __new__(cls, xmlFile, resourcePath, media_type, media, rerate):
return super(RatingDialog, cls).__new__(cls, xmlFile, resourcePath)
def onInit(self):
s = utilities.getFormattedItemName(self.media_type, self.media)
self.getControl(10012).setLabel(s)
rateID = 11029 + self.default_rating
if self.rerate and self.media['user']['ratings'] and int(self.media['user']['ratings']['rating']) > 0:
rateID = 11029 + int(self.media['user']['ratings']['rating'])
self.setFocus(self.getControl(rateID))
def onClick(self, controlID):
if controlID in self.buttons:
self.rating = self.buttons[controlID]
self.close()
def onFocus(self, controlID):
if controlID in self.focus_labels:
s = kodiUtilities.getString(self.focus_labels[controlID])
if self.rerate:
if self.media['user']['ratings'] and self.media['user']['ratings']['rating'] == self.buttons[controlID]:
if utilities.isMovie(self.media_type):
s = kodiUtilities.getString(32037)
elif utilities.isShow(self.media_type):
s = kodiUtilities.getString(32038)
elif utilities.isEpisode(self.media_type):
s = kodiUtilities.getString(32039)
elif utilities.isSeason(self.media_type):
s = kodiUtilities.getString(32132)
else:
pass
self.getControl(10013).setLabel(s)
else:
self.getControl(10013).setLabel('')
| gpl-2.0 | -4,677,418,030,784,672,000 | 36.912134 | 157 | 0.556671 | false | 4.012843 | false | false | false |
ch1bo/ambicam | calibrate_capture.py | 1 | 3853 | import picamera
import picamera.array
import cv2
import time
import numpy as np
import multiprocessing as mp
import queue
import signal
def capture(q, stop, resolution=(640,480), framerate=30):
print('Start capturing...')
with picamera.PiCamera(resolution=resolution, framerate=framerate) as camera:
with picamera.array.PiRGBArray(camera, size=resolution) as raw:
time.sleep(2)
start = cv2.getTickCount()
for frame in camera.capture_continuous(raw, format="bgr", use_video_port=True):
try:
q.put(frame.array, False)
except queue.Full:
print('capture: full')
raw.truncate(0)
fps = (cv2.getTickCount() - start) / cv2.getTickFrequency() * 1000
print('capture: ' + str(fps))
start = cv2.getTickCount()
if stop.is_set():
break
print('Capturing done')
q.cancel_join_thread()
def order_points(pts):
rect = np.zeros((4, 2), dtype = "float32")
s = pts.sum(axis = 1)
rect[0] = pts[np.argmin(s)]
rect[2] = pts[np.argmax(s)]
diff = np.diff(pts, axis = 1)
rect[1] = pts[np.argmin(diff)]
rect[3] = pts[np.argmax(diff)]
return rect
def process(q, stop):
print('Start processing...')
M = np.load('M.npy')
def nothing(x):
pass
cv2.namedWindow('video', cv2.WINDOW_NORMAL)
cv2.createTrackbar('threshold', 'video', 58, 255, nothing)
cv2.createTrackbar('cannyLow', 'video', 50, 255, nothing)
cv2.createTrackbar('cannyHigh', 'video', 150, 255, nothing)
video = cv2.VideoWriter('output.avi',cv2.VideoWriter_fourcc(*'MJPG'), 20.0, (640,480))
while not stop.is_set():
start = cv2.getTickCount()
frame = None
try:
while True: # clear queue
frame = q.get(False)
except queue.Empty:
if frame is None:
continue
threshold = cv2.getTrackbarPos('threshold','video')
cannyLow = cv2.getTrackbarPos('cannyLow','video')
cannyHigh = cv2.getTrackbarPos('cannyHigh','video')
frame = frame[:300, :320]
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
ret, black = cv2.threshold(gray, threshold, 255, cv2.THRESH_BINARY)
if not ret:
continue
edges = cv2.Canny(black, cannyLow, cannyHigh)
_, contours, hierarchy = cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if not contours:
continue
out = frame.copy()
allC = np.vstack(contours)
hull = cv2.convexHull(allC)
cv2.drawContours(out, [hull], 0, (0,0,255), 2)
rect = cv2.minAreaRect(allC)
box = np.int0(cv2.boxPoints(rect))
im = cv2.drawContours(out,[box],0,(0,255,0),2)
corners = order_points(box)
dst = np.array([[0, 0],
[639, 0],
[639, 479],
[0, 479]], dtype = "float32")
M = cv2.getPerspectiveTransform(corners, dst)
np.save("M", M)
# video.write(out)
cv2.imshow('video', out)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
fps = (cv2.getTickCount() - start) / cv2.getTickFrequency() * 1000
print('process: ' + str(fps))
print('Processing done')
def main():
q = mp.Queue(10)
stop = mp.Event()
def sigint(signal, frame):
stop.set()
signal.signal(signal.SIGINT, sigint)
p = mp.Process(target=capture, args=(q, stop))
p.start()
try:
process(q, stop)
finally:
stop.set()
p.join()
if __name__ == "__main__":
main()
| mpl-2.0 | 5,458,559,826,258,748,000 | 32.504348 | 104 | 0.540618 | false | 3.455605 | false | false | false |
TylerSandman/mopy | mopy/impl/nim/game.py | 1 | 2195 | """This module contains a concrete implementation of the game Nim."""
from random import choice
from mopy.game import Game
from mopy.impl.nim.state import NimState
from mopy.impl.nim.action import NimAction
class NimGame(Game):
def __init__(self):
pass
def new_game(self):
"""
Initialize a new game with 3 heaps with 3, 4, and 5 elements.
Initial state looks like the following:
Player 1's move
x
x x
x x x
x x x
x x x
----------
H1 H2 H3
"""
heaps = [3, 4, 5]
current_player = 0
return NimState(heaps, current_player)
def do_action(self, state, action):
"""Take a non-zero number of elements from a heap."""
state.heaps[action.heap_num] -= action.num_taken
state.current_player = 1 if state.current_player == 0 else 0
def is_over(self, state):
"""Game is only over when all heaps are empty."""
return sum(state.heaps) == 0
def get_result(self, state):
"""
If the game is over, the winner is the previous player.
This is because after we execute the final action, we still
advance the current player. Make sure to only call this when
the game is actually complete!
"""
done = self.is_over(state)
if not done:
raise Exception("Game is not done yet!")
return 1 if state.current_player == 0 else 0
def get_random_action(self, state):
"""Take a random number of elements from a random heap."""
return choice(self.get_legal_actions(state))
def get_legal_actions(self, state):
"""
Return all possible take actions the current player can take.
Note that you can take any number of elements from any heap
from 1 to the number of elements on that heap.
"""
actions = []
heaps = state.heaps
for i, h in enumerate(heaps):
for n in range(1, h + 1):
actions.append(NimAction(i, n))
return actions
| mit | 2,817,251,315,706,340,400 | 28.486111 | 69 | 0.560364 | false | 4.173004 | false | false | false |
secgroup/MTFGatheRing | code/web.py | 1 | 7006 | #!/usr/bin/env python3
import time
import random
import socket
from flask import Flask, render_template, redirect, url_for, request, jsonify
import config
log = None
# classes
class Agent():
def __init__(self, ip, cw=True, node=None, state='initial'):
self.ip = ip
self.cw = cw
self.state = state
self.node = node
def __repr__(self):
return 'Agent: ip {}, direction CW: {}, state: {}, node: {}'.format(self.ip, self.cw, self.state, self.node)
class Node():
def __init__(self, label):
assert isinstance(label, int), 'Node constructor accepts numeric label only'
self.label = label
# list of agent ips in the current node
self.agents = []
def add_agent(self, agent_ip):
# add an agent ip to the list of agents in the current node
self.agents.append(agent_ip)
def __repr__(self):
return '<Node {}: [{}]>'.format(self.label, ' | '.join(str(app.agents[ip]) for ip in self.agents))
class Ring():
def __init__(self, n_nodes):
self._nodes = [Node(i) for i in range(n_nodes)]
self.n_nodes = n_nodes
def get_node(self, label):
return self._nodes[label]
def next(self, agent):
"""Return next node."""
i = 1 if agent.cw else -1
return self._nodes[(agent.node+i) % self.n_nodes]
def prev(self, agent):
"""Return prev node."""
i = -1 if agent.cw else 1
return self._nodes[(agent.node+i) % self.n_nodes]
def blocked(self, agent):
"""Check if the next node is blocked."""
next_node = self.next(agent)
if agent.ip == app.malicious_ip:
return len(next_node.agents) > 0
else:
return app.malicious_ip in next_node.agents
def random_place_agents(self):
"""Randomly place agents in the ring."""
#a = app.agents[app.agents_ips[0]]
#a.node = 1
#self.get_node(1).add_agent(a.ip)
#a.cw = False
#a = app.agents[app.agents_ips[1]]
#a.node = 2
#self.get_node(2).add_agent(a.ip)
#a.cw = False
#a = app.agents[app.agents_ips[2]]
#a.node = 4
#self.get_node(4).add_agent(a.ip)
#a.cw = True
#a = app.agents[app.malicious_ip]
#a.node = 6
#self.get_node(6).add_agent(a.ip)
#a.cw = True
# True = clockwise
# False = counterclockwise
a = app.agents[app.agents_ips[0]]
a.node = 3
self.get_node(3).add_agent(a.ip)
a.cw = False
a = app.agents[app.agents_ips[1]]
a.node = 6
self.get_node(6).add_agent(a.ip)
a.cw = False
a = app.agents[app.agents_ips[2]]
a.node = 5
self.get_node(5).add_agent(a.ip)
a.cw = True
a = app.agents[app.malicious_ip]
a.node = 1
self.get_node(1).add_agent(a.ip)
a.cw = False
return
# at most 1 agent per node, randomize direction in case of unoriented ring
for agent, node in zip(app.agents.values(), random.sample(self._nodes, len(app.agents.keys()))):
agent.cw = True if config.oriented else random.choice([True, False])
agent.node = node.label
self.get_node(node.label).add_agent(agent.ip)
def dump(self):
ring = dict()
for node in self._nodes:
ring[str(node.label)] = [(app.agents[a].ip, str(app.agents[a].cw), app.agents[a].state, app.agents[a].node) for a in node.agents]
return ring
def __repr__(self):
return ', '.join(str(node) for node in self._nodes)
class MTFGRServer(Flask):
'''Wrapper around the Flask class used to store additional information.'''
def __init__(self, *args, **kwargs):
super(MTFGRServer, self).__init__(*args, **kwargs)
self.ring = Ring(config.n_nodes)
self.agents_ips = config.agents_ips
self.agents = dict()
self.malicious_ip = config.malicious_ip
self.oriented = config.oriented
self.started = False
# instance of the web application
app = MTFGRServer(__name__)
# auxiliary functions
def _reset():
"""Reset the global variables by parsing again the config file."""
import config
global log
app.ring = Ring(config.n_nodes)
app.agents = {ip: Agent(ip) for ip in config.agents_ips}
app.malicious_ip = config.malicious_ip
app.agents[app.malicious_ip] = Agent(app.malicious_ip, state='malicious')
app.oriented = config.oriented
app.started = False
app.ring.random_place_agents()
log = open('/tmp/ev3.log', 'a')
log.write('\n\nIIIIIIIIIINNNNNNNNNIIIIIIIIIIITTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT\\n\n')
# views
def _communicate_start():
"""Instruct each bot to start."""
port = 31337
for ip in app.agents_ips[::-1] + [app.malicious_ip]:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip, port))
# s.sendall(b'Go!\n')
s.close()
@app.route('/start')
def start():
app.started = True
try:
_communicate_start()
except Exception:
pass
return redirect(url_for('index'))
@app.route('/reset')
def reset():
_reset()
return redirect(url_for('index'))
@app.route('/status')
def global_status():
"""Get the whole ring status."""
return jsonify(**app.ring.dump())
@app.route('/get/<agent_ip>')
def get_status(agent_ip):
"""Get the list of agents in the current node."""
agent = app.agents[agent_ip]
# aggiungere blocked
return jsonify(agents=[app.agents[ip].state for ip in app.ring.get_node(agent.node).agents if ip != agent_ip],
blocked=app.ring.blocked(agent))
@app.route('/set/<agent_ip>', methods=['GET'])
def set_status(agent_ip):
global log
turned = request.args.get('turned') == '1'
state = request.args.get('state')
stopped = request.args.get('stopped') == '1'
# logging
sss = '\n\n[Request] {} - ip: {}, turned: {}, state: {}, stopped: {}\n'.format(time.time(), agent_ip, turned, state, stopped)
log.write(sss)
log.write('[Status pre]\n')
log.write(str(app.ring.dump()))
agent = app.agents[agent_ip]
agent.state = state
agent.cw = agent.cw if not turned else not agent.cw
blocked = app.ring.blocked(agent)
if not blocked and not stopped:
# advance to the next node if not blocked
node = app.ring.get_node(agent.node)
next_node = app.ring.next(agent)
agent.node = next_node.label
node.agents.remove(agent_ip)
next_node.add_agent(agent_ip)
log.write('\n[Status post]\n')
log.write(str(app.ring.dump()))
return jsonify(blocked=blocked)
@app.route('/')
def index():
return render_template('base.html', started=app.started)
def main():
app.run(host='0.0.0.0', debug=config.debug)
if __name__ == '__main__':
main()
| mit | 5,087,806,801,212,509,000 | 27.024 | 140 | 0.58193 | false | 3.334603 | true | false | false |
Chukwunonso/ndi_anambra | blog/migrations/migrations/0001_initial.py | 1 | 6723 | # Generated by Django 2.1.7 on 2019-02-18 20:43
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.contrib.taggit
import modelcluster.fields
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.images.blocks
class Migration(migrations.Migration):
initial = True
dependencies = [
('wagtaildocs', '0010_document_file_hash'),
('wagtailcore', '0041_group_collection_permissions_verbose_name_plural'),
('taggit', '0002_auto_20150616_2121'),
('wagtailimages', '0001_squashed_0021'),
]
operations = [
migrations.CreateModel(
name='BlogIndexPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('intro', wagtail.core.fields.RichTextField(blank=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='BlogIndexPageRelatedLink',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('link_external', models.URLField(blank=True, verbose_name='External link')),
('title', models.CharField(help_text='Link title', max_length=255)),
('link_document', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtaildocs.Document')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='BlogPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('intro', wagtail.core.fields.RichTextField()),
('body', wagtail.core.fields.StreamField([('heading', wagtail.core.blocks.CharBlock(classname='full title')), ('paragraph', wagtail.core.blocks.RichTextBlock()), ('image', wagtail.images.blocks.ImageChooserBlock())])),
('date', models.DateField(verbose_name='Post date')),
('feed_image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='BlogPageCarouselItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('link_external', models.URLField(blank=True, verbose_name='External link')),
('embed_url', models.URLField(blank=True, verbose_name='Embed URL')),
('caption', wagtail.core.fields.RichTextField(blank=True)),
('image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
('link_document', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtaildocs.Document')),
('link_page', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailcore.Page')),
('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='carousel_items', to='blog.BlogPage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='BlogPageRelatedLink',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('link_external', models.URLField(blank=True, verbose_name='External link')),
('title', models.CharField(help_text='Link title', max_length=255)),
('link_document', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtaildocs.Document')),
('link_page', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailcore.Page')),
('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='related_links', to='blog.BlogPage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='BlogPageTag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content_object', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='tagged_items', to='blog.BlogPage')),
('tag', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='blog_blogpagetag_items', to='taggit.Tag')),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='blogpage',
name='tags',
field=modelcluster.contrib.taggit.ClusterTaggableManager(blank=True, help_text='A comma-separated list of tags.', through='blog.BlogPageTag', to='taggit.Tag', verbose_name='Tags'),
),
migrations.AddField(
model_name='blogindexpagerelatedlink',
name='link_page',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailcore.Page'),
),
migrations.AddField(
model_name='blogindexpagerelatedlink',
name='page',
field=modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='related_links', to='blog.BlogIndexPage'),
),
]
| mit | -8,735,230,153,706,615,000 | 53.658537 | 234 | 0.599435 | false | 4.111927 | false | false | false |
JorisBolsens/PYNQ | python/pynq/iop/iop_const.py | 2 | 7201 | # Copyright (c) 2016, Xilinx, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
__author__ = "Yun Rock Qu"
__copyright__ = "Copyright 2016, Xilinx"
__email__ = "[email protected]"
import os
# Microblaze constants
BIN_LOCATION = os.path.dirname(os.path.realpath(__file__))+"/"
MAILBOX_PROGRAM = 'mailbox.bin'
IOP_FREQUENCY = 100000000
# IOP mailbox constants
MAILBOX_OFFSET = 0xF000
MAILBOX_SIZE = 0x1000
MAILBOX_PY2IOP_CMD_OFFSET = 0xffc
MAILBOX_PY2IOP_ADDR_OFFSET = 0xff8
MAILBOX_PY2IOP_DATA_OFFSET = 0xf00
# IOP mailbox commands
WRITE_CMD = 0
READ_CMD = 1
IOP_MMIO_REGSIZE = 0x10000
# IOP Switch Register Map
PMOD_SWITCHCONFIG_BASEADDR = 0x44A00000
PMOD_SWITCHCONFIG_NUMREGS = 8
# Each Pmod pin can be tied to digital IO, SPI, or IIC
PMOD_SWCFG_DIO0 = 0
PMOD_SWCFG_DIO1 = 1
PMOD_SWCFG_DIO2 = 2
PMOD_SWCFG_DIO3 = 3
PMOD_SWCFG_DIO4 = 4
PMOD_SWCFG_DIO5 = 5
PMOD_SWCFG_DIO6 = 6
PMOD_SWCFG_DIO7 = 7
PMOD_SWCFG_IIC0_SCL = 8
PMOD_SWCFG_IIC0_SDA = 9
# Switch config - all digital IOs
PMOD_SWCFG_DIOALL = [ PMOD_SWCFG_DIO0, PMOD_SWCFG_DIO1,
PMOD_SWCFG_DIO2, PMOD_SWCFG_DIO3,
PMOD_SWCFG_DIO4, PMOD_SWCFG_DIO5,
PMOD_SWCFG_DIO6, PMOD_SWCFG_DIO7]
# Switch config - IIC0, top row
PMOD_SWCFG_IIC0_TOPROW = [ PMOD_SWCFG_DIO0, PMOD_SWCFG_DIO1,
PMOD_SWCFG_IIC0_SCL, PMOD_SWCFG_IIC0_SDA,
PMOD_SWCFG_DIO2, PMOD_SWCFG_DIO3,
PMOD_SWCFG_DIO4, PMOD_SWCFG_DIO5]
# Switch config - IIC0, bottom row
PMOD_SWCFG_IIC0_BOTROW = [ PMOD_SWCFG_DIO0, PMOD_SWCFG_DIO1,
PMOD_SWCFG_DIO2, PMOD_SWCFG_DIO3,
PMOD_SWCFG_DIO4, PMOD_SWCFG_DIO5,
PMOD_SWCFG_IIC0_SCL, PMOD_SWCFG_IIC0_SDA]
# IIC register map
PMOD_XIIC_0_BASEADDR = 0x40800000
PMOD_XIIC_DGIER_OFFSET = 0x1C
PMOD_XIIC_IISR_OFFSET = 0x20
PMOD_XIIC_IIER_OFFSET = 0x28
PMOD_XIIC_RESETR_OFFSET = 0x40
PMOD_XIIC_CR_REG_OFFSET = 0x100
PMOD_XIIC_SR_REG_OFFSET = 0x104
PMOD_XIIC_DTR_REG_OFFSET = 0x108
PMOD_XIIC_DRR_REG_OFFSET = 0x10C
PMOD_XIIC_ADR_REG_OFFSET = 0x110
PMOD_XIIC_TFO_REG_OFFSET = 0x114
PMOD_XIIC_RFO_REG_OFFSET = 0x118
PMOD_XIIC_TBA_REG_OFFSET = 0x11C
PMOD_XIIC_RFD_REG_OFFSET = 0x120
PMOD_XIIC_GPO_REG_OFFSET = 0x124
# SPI register map
PMOD_SPI_0_BASEADDR = 0x44A10000
PMOD_XSP_DGIER_OFFSET = 0x1C
PMOD_XSP_IISR_OFFSET = 0x20
PMOD_XSP_IIER_OFFSET = 0x28
PMOD_XSP_SRR_OFFSET = 0x40
PMOD_XSP_CR_OFFSET = 0x60
PMOD_XSP_SR_OFFSET = 0x64
PMOD_XSP_DTR_OFFSET = 0x68
PMOD_XSP_DRR_OFFSET = 0x6C
PMOD_XSP_SSR_OFFSET = 0x70
PMOD_XSP_TFO_OFFSET = 0x74
PMOD_XSP_RFO_OFFSET = 0x78
# IO register map
PMOD_DIO_BASEADDR = 0x40000000
PMOD_DIO_DATA_OFFSET = 0x0
PMOD_DIO_TRI_OFFSET = 0x4
PMOD_DIO_DATA2_OFFSET = 0x8
PMOD_DIO_TRI2_OFFSET = 0xC
PMOD_DIO_GIE_OFFSET = 0x11C
PMOD_DIO_ISR_OFFSET = 0x120
PMOD_DIO_IER_OFFSET = 0x128
# AXI IO direction constants
PMOD_CFG_DIO_ALLOUTPUT = 0x0
PMOD_CFG_DIO_ALLINPUT = 0xff
# IOP switch register map
ARDUINO_SWITCHCONFIG_BASEADDR = 0x44A20000
ARDUINO_SWITCHCONFIG_NUMREGS = 19
# Each arduino pin can be tied to digital IO, SPI, or IIC
ARDUINO_SWCFG_AIO = 0x0
ARDUINO_SWCFG_AINT = 0x0
ARDUINO_SWCFG_SDA = 0x2
ARDUINO_SWCFG_SCL = 0x3
ARDUINO_SWCFG_DIO = 0x0
ARDUINO_SWCFG_DUART = 0x1
ARDUINO_SWCFG_DINT = 0x1
ARDUINO_SWCFG_DPWM = 0x2
ARDUINO_SWCFG_DTIMERG = 0x3
ARDUINO_SWCFG_DSPICLK = 0x4
ARDUINO_SWCFG_DMISO = 0x5
ARDUINO_SWCFG_DMOSI = 0x6
ARDUINO_SWCFG_DSS = 0x7
ARDUINO_SWCFG_DTIMERIC = 0xB
# Switch config - all digital IOs
ARDUINO_SWCFG_DIOALL = [ ARDUINO_SWCFG_AIO, ARDUINO_SWCFG_AIO,
ARDUINO_SWCFG_AIO, ARDUINO_SWCFG_AIO,
ARDUINO_SWCFG_AIO, ARDUINO_SWCFG_AIO,
ARDUINO_SWCFG_DIO, ARDUINO_SWCFG_DIO,
ARDUINO_SWCFG_DIO, ARDUINO_SWCFG_DIO,
ARDUINO_SWCFG_DIO, ARDUINO_SWCFG_DIO,
ARDUINO_SWCFG_DIO, ARDUINO_SWCFG_DIO,
ARDUINO_SWCFG_DIO, ARDUINO_SWCFG_DIO,
ARDUINO_SWCFG_DIO, ARDUINO_SWCFG_DIO,
ARDUINO_SWCFG_DIO]
# IO register map
ARDUINO_AIO_BASEADDR = 0x40020000
ARDUINO_AIO_DATA_OFFSET = 0x8
ARDUINO_AIO_TRI_OFFSET = 0xc
ARDUINO_DIO_BASEADDR = 0x40020000
ARDUINO_DIO_DATA_OFFSET = 0x0
ARDUINO_DIO_TRI_OFFSET = 0x4
ARDUINO_UART_BASEADDR = 0x40600000
ARDUINO_UART_DATA_OFFSET = 0x0
ARDUINO_UART_TRI_OFFSET = 0x4
# AXI IO direction constants
ARDUINO_CFG_AIO_ALLOUTPUT = 0x0
ARDUINO_CFG_AIO_ALLINPUT = 0xffffffff
ARDUINO_CFG_DIO_ALLOUTPUT = 0x0
ARDUINO_CFG_DIO_ALLINPUT = 0xffffffff
ARDUINO_CFG_UART_ALLOUTPUT = 0x0
ARDUINO_CFG_UART_ALLINPUT = 0xffffffff
# IOP mapping
PMODA = 1
PMODB = 2
ARDUINO = 3
# Stickit Pmod to grove pin mapping
PMOD_GROVE_G1 = [0,4]
PMOD_GROVE_G2 = [1,5]
PMOD_GROVE_G3 = [7,3]
PMOD_GROVE_G4 = [6,2]
# Arduino shield to grove pin mapping
ARDUINO_GROVE_A1 = [0,1]
ARDUINO_GROVE_A2 = [2,3]
ARDUINO_GROVE_A3 = [3,4]
ARDUINO_GROVE_A4 = [4,5]
ARDUINO_GROVE_I2C = []
ARDUINO_GROVE_UART = [0,1]
ARDUINO_GROVE_G1 = [2,3]
ARDUINO_GROVE_G2 = [3,4]
ARDUINO_GROVE_G3 = [4,5]
ARDUINO_GROVE_G4 = [6,7]
ARDUINO_GROVE_G5 = [8,9]
ARDUINO_GROVE_G6 = [10,11]
ARDUINO_GROVE_G7 = [12,13] | bsd-3-clause | 1,644,779,637,057,130,800 | 33.625 | 79 | 0.652409 | false | 2.472022 | true | false | false |
laginha/django-alo-forms | src/alo/decorators.py | 1 | 2238 | from django.http import HttpRequest
try:
from django.http import JsonResponse
native_json_response = True
except ImportError:
from easy_response.http import JsonResponse
native_json_response = False
from django.shortcuts import get_object_or_404
from django.forms import ModelForm
from functools import wraps
class validate(object):
def __init__(self, form_class, add_instance_using='pk'):#, extra=None):
self.form_class = form_class
self.add_instance_using = add_instance_using
# self.extra = extra
def __call__(self, view):
@wraps(view)
def wrapper(*args, **kwargs):
def get_form_kwargs(request):
data = request.GET if request.method=='GET' else request.POST
form_kwargs = {'data': data}
if not hasattr(request, "FILES"):
form_kwargs['files'] = request.FILES
if issubclass(self.form_class, ModelForm) and kwargs:
value = kwargs.get(self.add_instance_using, None)
if value != None:
model = self.form_class.Meta.model
instance = get_object_or_404(model, pk=value)
form_kwargs['instance'] = instance
# if self.extra != None:
# form_kwargs.update(self.extra(request))
return form_kwargs
def error_response(form):
content = {'Errors': form.errors}
if native_json_response:
return JsonResponse(content, status=400, safe=False)
else:
return JsonResponse(content, status=400)
def validate(request):
request.form = self.form_class(**get_form_kwargs(each))
if request.form.is_valid():
return view(*args, **kwargs)
return error_response(request.form)
for each in args:
if isinstance(each, HttpRequest):
return validate( each )
return validate( args[0] )
return wrapper
| mit | -6,294,782,326,722,869,000 | 37.603448 | 77 | 0.529491 | false | 4.844156 | false | false | false |
pexip/pygobject | examples/demo/demos/links.py | 2 | 2664 | #!/usr/bin/env python
# -*- Mode: Python; py-indent-offset: 4 -*-
# vim: tabstop=4 shiftwidth=4 expandtab
#
# Copyright (C) 2010 Red Hat, Inc., John (J5) Palmieri <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
# USA
title = "Links"
description = """
GtkLabel can show hyperlinks. The default action is to call gtk_show_uri() on
their URI, but it is possible to override this with a custom handler.
"""
from gi.repository import Gtk
class LinksApp:
def __init__(self):
self.window = Gtk.Window()
self.window.set_title('Links')
self.window.set_border_width(12)
self.window.connect('destroy', Gtk.main_quit)
label = Gtk.Label(label="""Some <a href="http://en.wikipedia.org/wiki/Text"
title="plain text">text</a> may be marked up
as hyperlinks, which can be clicked
or activated via <a href="keynav">keynav</a>""")
label.set_use_markup(True)
label.connect("activate-link", self.activate_link)
self.window.add(label)
label.show()
self.window.show()
def activate_link(self, label, uri):
if uri == 'keynav':
parent = label.get_toplevel()
markup = """The term <i>keynav</i> is a shorthand for
keyboard navigation and refers to the process of using
a program (exclusively) via keyboard input."""
dialog = Gtk.MessageDialog(transient_for=parent,
destroy_with_parent=True,
message_type=Gtk.MessageType.INFO,
buttons=Gtk.ButtonsType.OK,
text=markup,
use_markup=True)
dialog.present()
dialog.connect('response', self.response_cb)
return True
def response_cb(self, dialog, response_id):
dialog.destroy()
def main(demoapp=None):
LinksApp()
Gtk.main()
if __name__ == '__main__':
main()
| lgpl-2.1 | 4,680,339,128,670,448,000 | 33.597403 | 83 | 0.631006 | false | 3.982063 | false | false | false |
stevejefferies/robotframework-selenium2library | src/Selenium2Library/keywords/_browsermanagement.py | 8 | 20035 | import os
import robot
from robot.errors import DataError
from selenium import webdriver
from Selenium2Library import webdrivermonkeypatches
from Selenium2Library.utils import BrowserCache
from Selenium2Library.locators import WindowManager
from keywordgroup import KeywordGroup
ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
FIREFOX_PROFILE_DIR = os.path.join(ROOT_DIR, 'resources', 'firefoxprofile')
BROWSER_NAMES = {'ff': "_make_ff",
'firefox': "_make_ff",
'ie': "_make_ie",
'internetexplorer': "_make_ie",
'googlechrome': "_make_chrome",
'gc': "_make_chrome",
'chrome': "_make_chrome",
'opera' : "_make_opera",
'htmlunit' : "_make_htmlunit",
'htmlunitwithjs' : "_make_htmlunitwithjs"
}
class _BrowserManagementKeywords(KeywordGroup):
def __init__(self):
self._cache = BrowserCache()
self._window_manager = WindowManager()
self._speed_in_secs = float(0)
self._timeout_in_secs = float(5)
self._implicit_wait_in_secs = float(0)
# Public, open and close
def close_all_browsers(self):
"""Closes all open browsers and resets the browser cache.
After this keyword new indexes returned from `Open Browser` keyword
are reset to 1.
This keyword should be used in test or suite teardown to make sure
all browsers are closed.
"""
self._debug('Closing all browsers')
self._cache.close_all()
def close_browser(self):
"""Closes the current browser."""
if self._cache.current:
self._debug('Closing browser with session id %s'
% self._cache.current.session_id)
self._cache.close()
def open_browser(self, url, browser='firefox', alias=None,remote_url=False,
desired_capabilities=None,ff_profile_dir=None):
"""Opens a new browser instance to given URL.
Returns the index of this browser instance which can be used later to
switch back to it. Index starts from 1 and is reset back to it when
`Close All Browsers` keyword is used. See `Switch Browser` for
example.
Optional alias is an alias for the browser instance and it can be used
for switching between browsers (just as index can be used). See `Switch
Browser` for more details.
Possible values for `browser` are as follows:
| firefox | FireFox |
| ff | FireFox |
| internetexplorer | Internet Explorer |
| ie | Internet Explorer |
| googlechrome | Google Chrome |
| gc | Google Chrome |
| chrome | Google Chrome |
| opera | Opera |
| htmlunit | HTMLUnit |
| htmlunitwithjs | HTMLUnit with Javascipt support |
Note, that you will encounter strange behavior, if you open
multiple Internet Explorer browser instances. That is also why
`Switch Browser` only works with one IE browser at most.
For more information see:
http://selenium-grid.seleniumhq.org/faq.html#i_get_some_strange_errors_when_i_run_multiple_internet_explorer_instances_on_the_same_machine
Optional 'remote_url' is the url for a remote selenium server for example
http://127.0.0.1/wd/hub. If you specify a value for remote you can
also specify 'desired_capabilities' which is a string in the form
key1:val1,key2:val2 that will be used to specify desired_capabilities
to the remote server. This is useful for doing things like specify a
proxy server for internet explorer or for specify browser and os if your
using saucelabs.com.
Optional 'ff_profile_dir' is the path to the firefox profile dir if you
wish to overwrite the default.
"""
if remote_url:
self._info("Opening browser '%s' to base url '%s' through remote server at '%s'"
% (browser, url, remote_url))
else:
self._info("Opening browser '%s' to base url '%s'" % (browser, url))
browser_name = browser
browser = self._make_browser(browser_name,desired_capabilities,ff_profile_dir,remote_url)
browser.get(url)
self._debug('Opened browser with session id %s'
% browser.session_id)
return self._cache.register(browser, alias)
def switch_browser(self, index_or_alias):
"""Switches between active browsers using index or alias.
Index is returned from `Open Browser` and alias can be given to it.
Example:
| Open Browser | http://google.com | ff |
| Location Should Be | http://google.com | |
| Open Browser | http://yahoo.com | ie | 2nd conn |
| Location Should Be | http://yahoo.com | |
| Switch Browser | 1 | # index |
| Page Should Contain | I'm feeling lucky | |
| Switch Browser | 2nd conn | # alias |
| Page Should Contain | More Yahoo! | |
| Close All Browsers | | |
Above example expects that there was no other open browsers when
opening the first one because it used index '1' when switching to it
later. If you aren't sure about that you can store the index into
a variable as below.
| ${id} = | Open Browser | http://google.com | *firefox |
| # Do something ... |
| Switch Browser | ${id} | | |
"""
try:
self._cache.switch(index_or_alias)
self._debug('Switched to browser with Selenium session id %s'
% self._cache.current.session_id)
except (RuntimeError, DataError): # RF 2.6 uses RE, earlier DE
raise RuntimeError("No browser with index or alias '%s' found."
% index_or_alias)
# Public, window management
def close_window(self):
"""Closes currently opened pop-up window."""
self._current_browser().close()
def get_window_identifiers(self):
"""Returns and logs id attributes of all windows known to the browser."""
return self._log_list(self._window_manager.get_window_ids(self._current_browser()))
def get_window_names(self):
"""Returns and logs names of all windows known to the browser."""
values = self._window_manager.get_window_names(self._current_browser())
# for backward compatibility, since Selenium 1 would always
# return this constant value for the main window
if len(values) and values[0] == 'undefined':
values[0] = 'selenium_main_app_window'
return self._log_list(values)
def get_window_titles(self):
"""Returns and logs titles of all windows known to the browser."""
return self._log_list(self._window_manager.get_window_titles(self._current_browser()))
def maximize_browser_window(self):
"""Maximizes current browser window."""
self._current_browser().maximize_window()
def select_frame(self, locator):
"""Sets frame identified by `locator` as current frame.
Key attributes for frames are `id` and `name.` See `introduction` for
details about locating elements.
"""
self._info("Selecting frame '%s'." % locator)
element = self._element_find(locator, True, True)
self._current_browser().switch_to_frame(element)
def select_window(self, locator=None):
"""Selects the window found with `locator` as the context of actions.
If the window is found, all subsequent commands use that window, until
this keyword is used again. If the window is not found, this keyword fails.
By default, when a locator value is provided,
it is matched against the title of the window and the
javascript name of the window. If multiple windows with
same identifier are found, the first one is selected.
Special locator `main` (default) can be used to select the main window.
It is also possible to specify the approach Selenium2Library should take
to find a window by specifying a locator strategy:
| *Strategy* | *Example* | *Description* |
| title | Select Window `|` title=My Document | Matches by window title |
| name | Select Window `|` name=${name} | Matches by window javascript name |
| url | Select Window `|` url=http://google.com | Matches by window's current URL |
Example:
| Click Link | popup_link | # opens new window |
| Select Window | popupName |
| Title Should Be | Popup Title |
| Select Window | | | # Chooses the main window again |
"""
self._window_manager.select(self._current_browser(), locator)
def unselect_frame(self):
"""Sets the top frame as the current frame."""
self._current_browser().switch_to_default_content()
# Public, browser/current page properties
def get_location(self):
"""Returns the current location."""
return self._current_browser().get_current_url()
def get_source(self):
"""Returns the entire html source of the current page or frame."""
return self._current_browser().get_page_source()
def get_title(self):
"""Returns title of current page."""
return self._current_browser().get_title()
def location_should_be(self, url):
"""Verifies that current URL is exactly `url`."""
actual = self.get_location()
if actual != url:
raise AssertionError("Location should have been '%s' but was '%s'"
% (url, actual))
self._info("Current location is '%s'." % url)
def location_should_contain(self, expected):
"""Verifies that current URL contains `expected`."""
actual = self.get_location()
if not expected in actual:
raise AssertionError("Location should have contained '%s' "
"but it was '%s'." % (expected, actual))
self._info("Current location contains '%s'." % expected)
def log_location(self):
"""Logs and returns the current location."""
url = self.get_location()
self._info(url)
return url
def log_source(self, loglevel='INFO'):
"""Logs and returns the entire html source of the current page or frame.
The `loglevel` argument defines the used log level. Valid log levels are
`WARN`, `INFO` (default), `DEBUG`, `TRACE` and `NONE` (no logging).
"""
source = self.get_source()
self._log(source, loglevel.upper())
return source
def log_title(self):
"""Logs and returns the title of current page."""
title = self.get_title()
self._info(title)
return title
def title_should_be(self, title):
"""Verifies that current page title equals `title`."""
actual = self.get_title()
if actual != title:
raise AssertionError("Title should have been '%s' but was '%s'"
% (title, actual))
self._info("Page title is '%s'." % title)
# Public, navigation
def go_back(self):
"""Simulates the user clicking the "back" button on their browser."""
self._current_browser().back()
def go_to(self, url):
"""Navigates the active browser instance to the provided URL."""
self._info("Opening url '%s'" % url)
self._current_browser().get(url)
def reload_page(self):
"""Simulates user reloading page."""
self._current_browser().refresh()
# Public, execution properties
def get_selenium_speed(self):
"""Gets the delay in seconds that is waited after each Selenium command.
See `Set Selenium Speed` for an explanation."""
return robot.utils.secs_to_timestr(self._speed_in_secs)
def get_selenium_timeout(self):
"""Gets the timeout in seconds that is used by various keywords.
See `Set Selenium Timeout` for an explanation."""
return robot.utils.secs_to_timestr(self._timeout_in_secs)
def get_selenium_implicit_wait(self):
"""Gets the wait in seconds that is waited by Selenium.
See `Set Selenium Implicit Wait` for an explanation."""
return robot.utils.secs_to_timestr(self._implicit_wait_in_secs)
def set_selenium_speed(self, seconds):
"""Sets the delay in seconds that is waited after each Selenium command.
This is useful mainly in slowing down the test execution to be able to
view the execution. `seconds` may be given in Robot Framework time
format. Returns the previous speed value.
Example:
| Set Selenium Speed | .5 seconds |
"""
old_speed = self.get_selenium_speed()
self._speed_in_secs = robot.utils.timestr_to_secs(seconds)
for browser in self._cache.browsers:
browser.set_speed(self._speed_in_secs)
return old_speed
def set_selenium_timeout(self, seconds):
"""Sets the timeout in seconds used by various keywords.
There are several `Wait ...` keywords that take timeout as an
argument. All of these timeout arguments are optional. The timeout
used by all of them can be set globally using this keyword.
See `introduction` for more information about timeouts.
The previous timeout value is returned by this keyword and can
be used to set the old value back later. The default timeout
is 5 seconds, but it can be altered in `importing`.
Example:
| ${orig timeout} = | Set Selenium Timeout | 15 seconds |
| Open page that loads slowly |
| Set Selenium Timeout | ${orig timeout} |
"""
old_timeout = self.get_selenium_timeout()
self._timeout_in_secs = robot.utils.timestr_to_secs(seconds)
for browser in self._cache.get_open_browsers():
browser.set_script_timeout(self._timeout_in_secs)
return old_timeout
def set_selenium_implicit_wait(self, seconds):
"""Sets Selenium 2's default implicit wait in seconds and
sets the implicit wait for all open browsers.
From selenium 2 function 'Sets a sticky timeout to implicitly
wait for an element to be found, or a command to complete.
This method only needs to be called one time per session.'
Example:
| ${orig wait} = | Set Selenium Implicit Wait | 10 seconds |
| Perform AJAX call that is slow |
| Set Selenium Implicit Wait | ${orig wait} |
"""
old_wait = self.get_selenium_implicit_wait()
self._implicit_wait_in_secs = robot.utils.timestr_to_secs(seconds)
for browser in self._cache.get_open_browsers():
browser.implicitly_wait(self._implicit_wait_in_secs)
return old_wait
def set_browser_implicit_wait(self, seconds):
"""Sets current browser's implicit wait in seconds.
From selenium 2 function 'Sets a sticky timeout to implicitly
wait for an element to be found, or a command to complete.
This method only needs to be called one time per session.'
Example:
| Set Browser Implicit Wait | 10 seconds |
See also `Set Selenium Implicit Wait`.
"""
implicit_wait_in_secs = robot.utils.timestr_to_secs(seconds)
self._current_browser().implicitly_wait(implicit_wait_in_secs)
# Private
def _current_browser(self):
if not self._cache.current:
raise RuntimeError('No browser is open')
return self._cache.current
def _get_browser_token(self, browser_name):
return BROWSER_NAMES.get(browser_name.lower().replace(' ', ''), browser_name)
def _get_browser_creation_function(self,browser_name):
return BROWSER_NAMES.get(browser_name.lower().replace(' ', ''), browser_name)
def _make_browser(self , browser_name , desired_capabilities=None , profile_dir=None,
remote=None):
creation_func = self._get_browser_creation_function(browser_name)
browser = getattr(self,creation_func)(remote , desired_capabilities , profile_dir)
if browser is None:
raise ValueError(browser_name + " is not a supported browser.")
browser.set_speed(self._speed_in_secs)
browser.set_script_timeout(self._timeout_in_secs)
browser.implicitly_wait(self._implicit_wait_in_secs)
return browser
def _make_ff(self , remote , desired_capabilites , profile_dir):
if not profile_dir: profile_dir = FIREFOX_PROFILE_DIR
profile = webdriver.FirefoxProfile(profile_dir)
if remote:
browser = self._create_remote_web_driver(webdriver.DesiredCapabilities.FIREFOX ,
remote , desired_capabilites , profile)
else:
browser = webdriver.Firefox(firefox_profile=profile)
return browser
def _make_ie(self , remote , desired_capabilities , profile_dir):
return self._generic_make_browser(webdriver.Ie,
webdriver.DesiredCapabilities.INTERNETEXPLORER, remote, desired_capabilities)
def _make_chrome(self , remote , desired_capabilities , profile_dir):
return self._generic_make_browser(webdriver.Chrome,
webdriver.DesiredCapabilities.CHROME, remote, desired_capabilities)
def _make_opera(self , remote , desired_capabilities , profile_dir):
return self._generic_make_browser(webdriver.Opera,
webdriver.DesiredCapabilities.OPERA, remote, desired_capabilities)
def _make_htmlunit(self , remote , desired_capabilities , profile_dir):
return self._generic_make_browser(webdriver.Remote,
webdriver.DesiredCapabilities.HTMLUNIT, remote, desired_capabilities)
def _make_htmlunitwithjs(self , remote , desired_capabilities , profile_dir):
return self._generic_make_browser(webdriver.Remote,
webdriver.DesiredCapabilities.HTMLUNITWITHJS, remote, desired_capabilities)
def _generic_make_browser(self, webdriver_type , desired_cap_type, remote_url, desired_caps):
'''most of the make browser functions just call this function which creates the
appropriate web-driver'''
if not remote_url:
browser = webdriver_type()
else:
browser = self._create_remote_web_driver(desired_cap_type,remote_url , desired_caps)
return browser
def _create_remote_web_driver(self , capabilities_type , remote_url , desired_capabilities=None , profile=None):
'''parses the string based desired_capabilities which should be in the form
key1:val1,key2:val2 and creates the associated remote web driver'''
desired_cap = self._create_desired_capabilities(capabilities_type , desired_capabilities)
return webdriver.Remote(desired_capabilities=desired_cap , command_executor=str(remote_url) , browser_profile=profile)
def _create_desired_capabilities(self, capabilities_type, capabilities_string):
desired_capabilities = capabilities_type
if capabilities_string:
for cap in capabilities_string.split(","):
(key, value) = cap.split(":")
desired_capabilities[key.strip()] = value.strip()
return desired_capabilities
| apache-2.0 | -4,220,819,633,051,470,300 | 41.62766 | 164 | 0.615323 | false | 4.334704 | false | false | false |
vladan-m/ggrc-core | src/ggrc/models/object_control.py | 1 | 3050 | # Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: [email protected]
# Maintained By: [email protected]
from ggrc import db
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.ext.declarative import declared_attr
from .mixins import deferred, Mapping, Timeboxed
from .reflection import PublishOnly
class ObjectControl(Timeboxed, Mapping, db.Model):
__tablename__ = 'object_controls'
role = deferred(db.Column(db.String), 'ObjectControl')
notes = deferred(db.Column(db.Text), 'ObjectControl')
control_id = db.Column(
db.Integer, db.ForeignKey('controls.id'), nullable=False)
controllable_id = db.Column(db.Integer, nullable=False)
controllable_type = db.Column(db.String, nullable=False)
@property
def controllable_attr(self):
return '{0}_controllable'.format(self.controllable_type)
@property
def controllable(self):
return getattr(self, self.controllable_attr)
@controllable.setter
def controllable(self, value):
self.controllable_id = value.id if value is not None else None
self.controllable_type = value.__class__.__name__ if value is not None \
else None
return setattr(self, self.controllable_attr, value)
@staticmethod
def _extra_table_args(cls):
return (
db.UniqueConstraint(
'control_id', 'controllable_id', 'controllable_type'),
db.Index('ix_control_id', 'control_id'),
)
_publish_attrs = [
'role',
'notes',
'control',
'controllable',
]
_sanitize_html = [
'notes',
]
@classmethod
def eager_query(cls):
from sqlalchemy import orm
query = super(ObjectControl, cls).eager_query()
return query.options(
orm.subqueryload('control'))
def _display_name(self):
return self.controllable.display_name + '<->' + self.control.display_name
class Controllable(object):
@declared_attr
def object_controls(cls):
cls.controls = association_proxy(
'object_controls', 'control',
creator=lambda control: ObjectControl(
control=control,
controllable_type=cls.__name__,
)
)
joinstr = 'and_(foreign(ObjectControl.controllable_id) == {type}.id, '\
'foreign(ObjectControl.controllable_type) == "{type}")'
joinstr = joinstr.format(type=cls.__name__)
return db.relationship(
'ObjectControl',
primaryjoin=joinstr,
backref='{0}_controllable'.format(cls.__name__),
cascade='all, delete-orphan',
)
_publish_attrs = [
PublishOnly('controls'),
'object_controls',
]
_include_links = [
#'object_controls',
]
@classmethod
def eager_query(cls):
from sqlalchemy import orm
query = super(Controllable, cls).eager_query()
return cls.eager_inclusions(query, Controllable._include_links).options(
orm.subqueryload('object_controls'))
| apache-2.0 | 333,521,147,788,283,970 | 29.19802 | 78 | 0.661967 | false | 3.665865 | false | false | false |
cjahangir/geodash-new | geonode/contrib/metadataxsl/models.py | 21 | 2098 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.conf import settings
from django.core.urlresolvers import reverse
from django.db.models import signals
from geonode.base.models import Link
from geonode.layers.models import Layer
from geonode.documents.models import Document
ISO_XSL_NAME = 'ISO with XSL'
settings.DOWNLOAD_FORMATS_METADATA.append(ISO_XSL_NAME)
def xsl_post_save(instance, sender, **kwargs):
"""Add a link to the enriched ISO metadata
"""
add_xsl_link(instance.resourcebase_ptr)
def add_xsl_link(resourcebase):
"""Add a link to the enriched ISO metadata
"""
urlpath = reverse('prefix_xsl_line', args=[resourcebase.id])
url = '{}{}'.format(settings.SITEURL, urlpath)
link, created = Link.objects.get_or_create(
resource=resourcebase,
url=url,
defaults=dict(name=ISO_XSL_NAME,
extension='xml',
mime='text/xml',
link_type='metadata'))
return created
if 'geonode.catalogue' in settings.INSTALLED_APPS:
signals.post_save.connect(xsl_post_save, sender=Layer)
signals.post_save.connect(xsl_post_save, sender=Document)
# TODO: maps as well?
| gpl-3.0 | 6,332,501,313,276,306,000 | 32.301587 | 73 | 0.619638 | false | 4.221328 | false | false | false |
srgtrujillo/TuiterPy | controller/command_line.py | 1 | 1644 | # -*- encoding: utf-8 -*-
################################################################################
# TuiterPy - A Python Command Line Social Network Application
# Copyright (C) 2016 Sergio Trujillo ([email protected])
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
################################################################################
class CommandLine(object):
def __init__(self, view, processor):
self.view = view
self.processor = processor
self.exit = False
def start(self):
self.view.show_welcome()
def resume(self):
self.view.show_prompt()
command_line = self.view.read_command_line()
self.process(command_line)
def process(self, command):
if self.processor.exit(command):
self.view.show_farewell()
self.exit = True
else:
printer = self.processor.process(command)
printer.print_command()
def is_exit(self):
return self.exit
| gpl-3.0 | -5,209,058,976,015,899,000 | 35.533333 | 80 | 0.596107 | false | 4.28125 | false | false | false |
djgroen/flee-release | flee/SimulationSettings.py | 1 | 4308 | import sys
import csv
class SimulationSettings:
Softening = 10.0 # KM added to every link distance to eliminate needless distinction between very short routes.
#TurnBackAllowed = True # feature disabled for now.
AgentLogLevel = 0 # set to 1 for basic agent information.
CampLogLevel = 0 # set to 1 to obtain average times for agents to reach camps at any time step (aggregate info).
InitLogLevel = 0 # set to 1 for basic information on locations added and conflict zones assigned.
TakeRefugeesFromPopulation = True
sqrt_ten = 3.16227766017 # square root of ten (10^0.5).
CampWeight = sqrt_ten # attraction factor for camps.
ConflictWeight = 1.0 / sqrt_ten # reduction factor for refugees entering conflict zones.
MaxMoveSpeed = 360 # most number of km that we expect refugees to traverse per time step (30 km/h * 12 hours).
MaxWalkSpeed = 35 # most number of km that we expect refugees to traverse per time step on foot (3.5 km/h * 10 hours).
MaxCrossingSpeed = 20 # most number of km that we expect refugees to traverse per time step on boat/walk to cross river (2 km/h * 10 hours).
StartOnFoot = True # Agents walk on foot when they travers their very first link.
CapacityBuffer = 1.0
# default move chance
ConflictMoveChance = 1.0
CampMoveChance = 0.001
DefaultMoveChance = 0.3
# Specific enhancements for the 2.0 ruleset.
# This includes a movespeed of 420 and a walk speed of 42.
AvoidShortStints = True # Displaced people will not take a break unless they at least travelled for a full day's distance in the last two days.
FlareConflictInputFile = ""
AwarenessLevel = 1 #-1, no weighting at all, 0 = road only, 1 = location, 2 = neighbours, 3 = region.
#NumProcs = 1 #This is not supported at the moment.
UseV1Rules = False
if UseV1Rules == True:
MaxMoveSpeed = 200
StartOnFoot = False
AvoidShortStints = False # Displaced people will not take a break unless they at least travelled for a full day's distance in the last two days.
CampWeight = 2.0 # attraction factor for camps.
ConflictWeight = 0.25 # reduction factor for refugees entering conflict zones.
def ReadFromCSV(csv_name):
"""
Reads simulation settings from CSV
"""
number_of_steps = -1
with open(csv_name, newline='') as csvfile:
values = csv.reader(csvfile)
for row in values:
if row[0][0] == "#":
pass
elif row[0].lower() == "agentloglevel":
SimulationSettings.AgentLogLevel = int(row[1])
elif row[0].lower() == "camploglevel":
SimulationSettings.CampLogLevel = int(row[1])
elif row[0].lower() == "initloglevel":
SimulationSettings.InitLogLevel = int(row[1])
elif row[0].lower() == "minmovespeed":
SimulationSettings.MinMoveSpeed = float(row[1])
elif row[0].lower() == "maxmovespeed":
SimulationSettings.MaxMoveSpeed = float(row[1])
elif row[0].lower() == "numberofsteps":
number_of_steps = int(row[1])
elif row[0].lower() == "campweight":
SimulationSettings.CampWeight = float(row[1])
elif row[0].lower() == "conflictweight":
SimulationSettings.ConflictWeight = float(row[1])
elif row[0].lower() == "conflictmovechance":
SimulationSettings.ConflictMoveChance = float(row[1])
elif row[0].lower() == "campmovechance":
SimulationSettings.CampMoveChance = float(row[1])
elif row[0].lower() == "defaultmovechance":
SimulationSettings.DefaultMoveChance = float(row[1])
elif row[0].lower() == "awarenesslevel":
SimulationSettings.AwarenessLevel = int(row[1])
elif row[0].lower() == "flareconflictinputfile":
SimulationSettings.FlareConflictInputFile = row[1]
elif row[0].lower() == "usev1rules":
SimulationSettings.UseV1Rules = (row[1].lower() == "true")
elif row[0].lower() == "startonfoot":
SimulationSettings.StartOnFoot = (row[1].lower() == "true")
elif row[0].lower() == "avoidshortstints":
SimulationSettings.AvoidShortStints = (row[1].lower() == "true")
else:
print("FLEE Initialization Error: unrecognized simulation parameter:",row[0])
sys.exit()
return number_of_steps
| bsd-3-clause | 5,661,824,005,492,261,000 | 43.875 | 148 | 0.668524 | false | 3.635443 | false | false | false |
kwameboame/newsdex | news/views/settings_views.py | 1 | 1307 | # coding=utf-8
import logging
from django.shortcuts import render, redirect
from news.models import Feed
from news.tasks import twitter_task
from news.utils.common import get_active_tasks
from newsproject import celery_app
__author__ = 'ilov3'
logger = logging.getLogger(__name__)
def settings(request):
feeds = Feed.objects.all()
tasks_list = get_active_tasks(name='news.tasks.twitter_task.twitter_task')
return render(request, 'settings/settings.html', {'streams': tasks_list, 'feeds': feeds})
def stop_stream(request):
if request.method == 'POST':
task_id = request.POST.get('task_id')
if task_id:
celery_app.control.revoke(task_id=task_id, terminate=True)
return redirect('settings')
def new_tweet_stream(request):
if request.method == "POST":
keyword = request.POST.get('keyword')
location = request.POST.get('location')
if location:
try:
location = [float(coordinate) for coordinate in location.split(',')]
except Exception as e:
logger.error('Could not convert location string (%s) into coordinates. Error: %s' % (location, e))
twitter_task.delay(keyword=keyword, location=location)
return redirect('settings')
return redirect('settings')
| bsd-2-clause | 4,652,564,497,210,808,000 | 31.675 | 114 | 0.664116 | false | 3.913174 | false | false | false |
adamacosta/hacks | school/course_availability.py | 1 | 5055 | """
IMPORTANT: DO NOT USE THIS SCRIPT TO DOS THE SCHOOL SERVER. THIS IS
INTENDED TO MAKE IT EASIER TO CHECK WHICH COURSES ARE OPEN WITHOUT
HAVING TO LOG IN AND MANUALLY GO THROUGH EACH STEP TO SELECT THE
TERM AND DEPARTMENT. USE IT ONCE AND THEN REGISTER. AT MOSE USE IT
ONCE EVERY 15 MINUTES, ABOUT THE RATE AT WHICH A HUMAN USING A MOUSE
WOULD REFRESH THEIR OWN SEARCHES.
This is a single-module script to scrape OSCAR for course openings.
It works for Summer and Fall of 2016. There is no guarantee it will
work for future terms unless you change the CRNs and term dates.
Additionally, if you are using this in the future, you will want to
ensure that the OSCAR API has not changed and this url structure still
works. I will likely maintain this myself until I have graduated and
no longer.
(c) Adam Acosta 2016
"""
from __future__ import print_function
import re
import sys
import argparse
from urllib2 import urlopen
from bs4 import BeautifulSoup
class TermDependent(argparse.Action):
"""Custom Action to ensure user selects a term if specifying crn."""
def __call__(self, parser, namespace, values, option_string=None):
term = getattr(namespace, 'term')
# User tried to specify a crn without specifying a term
if term == 'all':
parser.error("must specify term to use crn")
else:
setattr(namespace, self.dest, values)
parser = argparse.ArgumentParser()
parser.add_argument('--term', type=str, default='all',
help='the term you wish to check')
parser.add_argument('--crn', type=str, default=None,
action=TermDependent,
help='use this if you only want to check one CRN')
crns = {
'summer':
{'intro to info security': '56393',
'software dev process': '55424',
'software arch and design': '56394',
'software analysis and test': '56395',
'comp photography': '55805',
'knowledge-based ai': '55806',
'artificial intelligence for robotics': '55426',
'intro to operating systems': '55804',
'reinforcement learning': '56396',
'embedded software': '56397'},
'fall':
{'high performance computing': '89826',
'data and visual analytics': '91202',
'big data for health': '91201',
'intro to info security': '89823',
'adv operating systems': '88770',
'computer networks': '88771',
'network security': '91203',
'high performance computer arch': '88775',
'software dev process': '88772',
'software arch and design': '88776',
'software analysis and test': '91197',
'db sys concepts and design': '91198',
'intro health informatics': '88777',
'educ tech foundations': '90228',
'comp photography': '89821',
'computer vision': '90192',
'computability and algorithms': '88778',
'artificial intelligence': '91199',
'knowledge-based ai': '88779',
'machine learning': '88773',
'mach learning for trading': '89824',
'artificial intelligence for robotics': '88774',
'intro to operating systems': '89822',
'reinforcement learning': '89825',
'embedded software': '91200',
'cyber-physical systems': '91581'},
}
terms = {'summer': '201605', 'fall': '201608'}
def get_seats(term, crn):
"""Enter the term and crn and return the number of open seats."""
# This is the API as of April 2016
url = "https://oscar.gatech.edu/pls/bprod/bwckschd" + \
".p_disp_detail_sched?term_in={}&crn_in={}".format(term, crn)
html = urlopen(url).read()
soup = BeautifulSoup(html, 'html.parser')
text = soup.get_text()
# Uncomment one of these to test
# return url
# return soup
# return text
# Available seats is the third column in the table
seats = re.search('(?<=Seats\n[0-9]{3}\n[0-9]{3}\n)[0-9]{1,3}', text)
if seats is not None:
return seats.group(0)
# In this case, the course only has double-digit enrollment
# Do this twice because re.search() only accepts fixed-length strings
seats = re.search('(?<=Seats\n[0-9]{3}\n[0-9]{2}\n)[0-9]{1,3}', text)
return seats.group(0)
if __name__ == '__main__':
args = parser.parse_args()
# I am double-checking here that you are not DOSing the server
ans = raw_input("Have you checked in the last 15 minutes? (y/n): ")
if str(ans).lower() != 'n':
print("Please wait at least 15 minutes.")
sys.exit(0)
# Single CRN
if args.crn:
print(get_seats(terms[args.term], args.crn))
sys.exit(0)
# Single term
if args.term != 'all':
for course in crns[args.term]:
print(course, get_seats(terms[args.term], crns[args.term][course]))
sys.exit(0)
# Go ahead and check
for term in terms:
for course in crns[term]:
print(term, course, get_seats(terms[term], crns[term][course]))
sys.exit(0)
| mit | -1,649,449,341,379,516,400 | 33.104167 | 79 | 0.624332 | false | 3.481405 | false | false | false |
ravello/testmill | lib/testmill/inflect.py | 1 | 1307 | # Copyright 2012-2013 Ravello Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function
# These inflection functions are trivial and only work for regular nouns
# and verbs. Special cases can be added as and when needed.
def plural_noun(noun, count=2):
"""Takes a singular English noun ``noun`` and returns a plural version,
depending on ``count``."""
if count == 1:
return noun
else:
return noun + 's'
def plural_verb(verb, count=2):
"""Return the plural conjugation of the English verb ``verb``, depending
on ``count``."""
if count == 1:
if verb == 'was':
return 'was'
return verb + 's'
else:
if verb == 'was':
return 'were'
return verb
| apache-2.0 | -2,928,847,715,319,305,700 | 32.512821 | 76 | 0.671767 | false | 3.913174 | false | false | false |
tylerlong/toolkit_library | toolkit_library/input_util.py | 1 | 1526 | # coding=utf-8
"""
toolkit_library.input_util
~~~~~~~~~~~~~~~~~~~~~~~~~~
Get inputs from user and validate them
"""
import re
class InputUtil(object):
"""get inputs from user and validate them"""
@staticmethod
def get_input(name, default = None, pattern = None):
"""get inputs from user and validate them
If user enters empty and default value is not None, default value will be returned.
if user enters non-empty and pattern is not None, user input should match the regex pattern.
Otherwise user will be prompt to enter again.
"""
assert type(name) == str and len(name) > 0
prompt = name
if pattern is not None:
prompt = '{0} ({1})'.format(prompt, pattern)
if default is not None:
prompt = '{0} [{1}]'.format(prompt, default)
prompt = 'Please enter {0}: '.format(prompt)
while True:
result = raw_input(prompt)
if not result:
if default is not None:
return default
else:
print 'Please enter sth, as there is no default value available.'
else:
if pattern is None:
return result
else:
if re.match(pattern, result):
return result
else:
print 'What you just entered is not valid, please try again.'
| bsd-3-clause | -7,760,437,563,598,933,000 | 34.333333 | 100 | 0.516383 | false | 4.954545 | false | false | false |
ToontownUprising/src | otp/avatar/PositionExaminer.py | 5 | 4046 | from pandac.PandaModules import *
from direct.showbase.DirectObject import DirectObject
from otp.otpbase import OTPGlobals
class PositionExaminer(DirectObject, NodePath):
def __init__(self):
try:
self.__initialized
return
except:
self.__initialized = 1
NodePath.__init__(self, hidden.attachNewNode('PositionExaminer'))
self.cRay = CollisionRay(0.0, 0.0, 6.0, 0.0, 0.0, -1.0)
self.cRayNode = CollisionNode('cRayNode')
self.cRayNode.addSolid(self.cRay)
self.cRayNodePath = self.attachNewNode(self.cRayNode)
self.cRayNodePath.hide()
self.cRayBitMask = OTPGlobals.FloorBitmask
self.cRayNode.setFromCollideMask(self.cRayBitMask)
self.cRayNode.setIntoCollideMask(BitMask32.allOff())
self.cSphere = CollisionSphere(0.0, 0.0, 0.0, 1.5)
self.cSphereNode = CollisionNode('cSphereNode')
self.cSphereNode.addSolid(self.cSphere)
self.cSphereNodePath = self.attachNewNode(self.cSphereNode)
self.cSphereNodePath.hide()
self.cSphereBitMask = OTPGlobals.WallBitmask
self.cSphereNode.setFromCollideMask(self.cSphereBitMask)
self.cSphereNode.setIntoCollideMask(BitMask32.allOff())
self.ccLine = CollisionSegment(0.0, 0.0, 0.0, 1.0, 0.0, 0.0)
self.ccLineNode = CollisionNode('ccLineNode')
self.ccLineNode.addSolid(self.ccLine)
self.ccLineNodePath = self.attachNewNode(self.ccLineNode)
self.ccLineNodePath.hide()
self.ccLineBitMask = OTPGlobals.CameraBitmask
self.ccLineNode.setFromCollideMask(self.ccLineBitMask)
self.ccLineNode.setIntoCollideMask(BitMask32.allOff())
self.cRayTrav = CollisionTraverser('PositionExaminer.cRayTrav')
self.cRayTrav.setRespectPrevTransform(False)
self.cRayQueue = CollisionHandlerQueue()
self.cRayTrav.addCollider(self.cRayNodePath, self.cRayQueue)
self.cSphereTrav = CollisionTraverser('PositionExaminer.cSphereTrav')
self.cSphereTrav.setRespectPrevTransform(False)
self.cSphereQueue = CollisionHandlerQueue()
self.cSphereTrav.addCollider(self.cSphereNodePath, self.cSphereQueue)
self.ccLineTrav = CollisionTraverser('PositionExaminer.ccLineTrav')
self.ccLineTrav.setRespectPrevTransform(False)
self.ccLineQueue = CollisionHandlerQueue()
self.ccLineTrav.addCollider(self.ccLineNodePath, self.ccLineQueue)
def delete(self):
del self.cRay
del self.cRayNode
self.cRayNodePath.removeNode()
del self.cRayNodePath
del self.cSphere
del self.cSphereNode
self.cSphereNodePath.removeNode()
del self.cSphereNodePath
del self.ccLine
del self.ccLineNode
self.ccLineNodePath.removeNode()
del self.ccLineNodePath
del self.cRayTrav
del self.cRayQueue
del self.cSphereTrav
del self.cSphereQueue
del self.ccLineTrav
del self.ccLineQueue
def consider(self, node, pos, eyeHeight):
self.reparentTo(node)
self.setPos(pos)
result = None
self.cRayTrav.traverse(render)
if self.cRayQueue.getNumEntries() != 0:
self.cRayQueue.sortEntries()
floorPoint = self.cRayQueue.getEntry(0).getSurfacePoint(self.cRayNodePath)
if abs(floorPoint[2]) <= 4.0:
pos += floorPoint
self.setPos(pos)
self.cSphereTrav.traverse(render)
if self.cSphereQueue.getNumEntries() == 0:
self.ccLine.setPointA(0, 0, eyeHeight)
self.ccLine.setPointB(-pos[0], -pos[1], eyeHeight)
self.ccLineTrav.traverse(render)
if self.ccLineQueue.getNumEntries() == 0:
result = pos
self.reparentTo(hidden)
self.cRayQueue.clearEntries()
self.cSphereQueue.clearEntries()
self.ccLineQueue.clearEntries()
return result
| mit | 7,802,838,456,947,292,000 | 42.042553 | 86 | 0.657934 | false | 3.434635 | false | false | false |
zlsun/XX-Net | code/default/gae_proxy/server/lib/google/appengine/ext/ndb/tasklets.py | 5 | 42437 | #
# Copyright 2008 The ndb Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A tasklet decorator.
Tasklets are a way to write concurrently running functions without
threads; tasklets are executed by an event loop and can suspend
themselves blocking for I/O or some other operation using a yield
statement. The notion of a blocking operation is abstracted into the
Future class, but a tasklet may also yield an RPC in order to wait for
that RPC to complete.
The @tasklet decorator wraps generator function so that when it is
called, a Future is returned while the generator is executed by the
event loop. Within the tasklet, any yield of a Future waits for and
returns the Future's result. For example:
@tasklet
def foo():
a = yield <some Future>
b = yield <another Future>
raise Return(a + b)
def main():
f = foo()
x = f.get_result()
print x
Note that blocking until the Future's result is available using
get_result() is somewhat inefficient (though not vastly -- it is not
busy-waiting). In most cases such code should be rewritten as a tasklet
instead:
@tasklet
def main_tasklet():
f = foo()
x = yield f
print x
Calling a tasklet automatically schedules it with the event loop:
def main():
f = main_tasklet()
eventloop.run() # Run until no tasklets left to do
f.done() # Returns True
As a special feature, if the wrapped function is not a generator
function, its return value is returned via the Future. This makes the
following two equivalent:
@tasklet
def foo():
return 42
@tasklet
def foo():
if False: yield # The presence of 'yield' makes foo a generator
raise Return(42) # Or, after PEP 380, return 42
This feature (inspired by Monocle) is handy in case you are
implementing an interface that expects tasklets but you have no need to
suspend -- there's no need to insert a dummy yield in order to make
the tasklet into a generator.
"""
import collections
import logging
import os
import sys
import types
from .google_imports import apiproxy_stub_map
from .google_imports import apiproxy_rpc
from .google_imports import datastore
from .google_imports import datastore_errors
from .google_imports import datastore_pbs
from .google_imports import datastore_rpc
from .google_imports import namespace_manager
from . import eventloop
from . import utils
__all__ = ['Return', 'tasklet', 'synctasklet', 'toplevel', 'sleep',
'add_flow_exception', 'get_return_value',
'get_context', 'set_context',
'make_default_context', 'make_context',
'Future', 'MultiFuture', 'QueueFuture', 'SerialQueueFuture',
'ReducingFuture',
]
_logging_debug = utils.logging_debug
def _is_generator(obj):
"""Helper to test for a generator object.
NOTE: This tests for the (iterable) object returned by calling a
generator function, not for a generator function.
"""
return isinstance(obj, types.GeneratorType)
class _State(utils.threading_local):
"""Hold thread-local state."""
current_context = None
def __init__(self):
super(_State, self).__init__()
self.all_pending = set()
def add_pending(self, fut):
_logging_debug('all_pending: add %s', fut)
self.all_pending.add(fut)
def remove_pending(self, fut, status='success'):
if fut in self.all_pending:
_logging_debug('all_pending: %s: remove %s', status, fut)
self.all_pending.remove(fut)
else:
_logging_debug('all_pending: %s: not found %s', status, fut)
def clear_all_pending(self):
if self.all_pending:
logging.info('all_pending: clear %s', self.all_pending)
self.all_pending.clear()
else:
_logging_debug('all_pending: clear no-op')
def dump_all_pending(self, verbose=False):
pending = []
for fut in self.all_pending:
if verbose:
line = fut.dump() + ('\n' + '-' * 40)
else:
line = fut.dump_stack()
pending.append(line)
return '\n'.join(pending)
_state = _State()
# Tuple of exceptions that should not be logged (except in debug mode).
_flow_exceptions = ()
def add_flow_exception(exc):
"""Add an exception that should not be logged.
The argument must be a subclass of Exception.
"""
global _flow_exceptions
if not isinstance(exc, type) or not issubclass(exc, Exception):
raise TypeError('Expected an Exception subclass, got %r' % (exc,))
as_set = set(_flow_exceptions)
as_set.add(exc)
_flow_exceptions = tuple(as_set)
def _init_flow_exceptions():
"""Internal helper to initialize _flow_exceptions.
This automatically adds webob.exc.HTTPException, if it can be imported.
"""
global _flow_exceptions
_flow_exceptions = ()
add_flow_exception(datastore_errors.Rollback)
try:
from webob import exc
except ImportError:
pass
else:
add_flow_exception(exc.HTTPException)
_init_flow_exceptions()
class Future(object):
"""A Future has 0 or more callbacks.
The callbacks will be called when the result is ready.
NOTE: This is somewhat inspired but not conformant to the Future interface
defined by PEP 3148. It is also inspired (and tries to be somewhat
compatible with) the App Engine specific UserRPC and MultiRpc classes.
"""
# TODO: Trim the API; there are too many ways to do the same thing.
# TODO: Compare to Monocle's much simpler Callback class.
# Constants for state property.
IDLE = apiproxy_rpc.RPC.IDLE # Not yet running (unused)
RUNNING = apiproxy_rpc.RPC.RUNNING # Not yet completed.
FINISHING = apiproxy_rpc.RPC.FINISHING # Completed.
# XXX Add docstrings to all methods. Separate PEP 3148 API from RPC API.
_geninfo = None # Extra info about suspended generator.
def __init__(self, info=None):
# TODO: Make done a method, to match PEP 3148?
# pylint: disable=invalid-name
__ndb_debug__ = 'SKIP' # Hide this frame from self._where
self._info = info # Info from the caller about this Future's purpose.
self._where = utils.get_stack()
self._context = None
self._reset()
def _reset(self):
self._done = False
self._result = None
self._exception = None
self._traceback = None
self._callbacks = []
self._immediate_callbacks = []
_state.add_pending(self)
self._next = None # Links suspended Futures together in a stack.
# TODO: Add a __del__ that complains if neither get_exception() nor
# check_success() was ever called? What if it's not even done?
def __repr__(self):
if self._done:
if self._exception is not None:
state = 'exception %s: %s' % (self._exception.__class__.__name__,
self._exception)
else:
state = 'result %r' % (self._result,)
else:
state = 'pending'
line = '?'
for line in self._where:
if 'tasklets.py' not in line:
break
if self._info:
line += ' for %s' % self._info
if self._geninfo:
line += ' %s' % self._geninfo
return '<%s %x created by %s; %s>' % (
self.__class__.__name__, id(self), line, state)
def dump(self):
return '%s\nCreated by %s' % (self.dump_stack(),
'\n called by '.join(self._where))
def dump_stack(self):
lines = []
fut = self
while fut is not None:
lines.append(str(fut))
fut = fut._next
return '\n waiting for '.join(lines)
def add_callback(self, callback, *args, **kwds):
if self._done:
eventloop.queue_call(None, callback, *args, **kwds)
else:
self._callbacks.append((callback, args, kwds))
def add_immediate_callback(self, callback, *args, **kwds):
if self._done:
callback(*args, **kwds)
else:
self._immediate_callbacks.append((callback, args, kwds))
def set_result(self, result):
if self._done:
raise RuntimeError('Result cannot be set twice.')
self._result = result
self._done = True
_state.remove_pending(self)
for callback, args, kwds in self._immediate_callbacks:
callback(*args, **kwds)
for callback, args, kwds in self._callbacks:
eventloop.queue_call(None, callback, *args, **kwds)
def set_exception(self, exc, tb=None):
if not isinstance(exc, BaseException):
raise TypeError('exc must be an Exception; received %r' % exc)
if self._done:
raise RuntimeError('Exception cannot be set twice.')
self._exception = exc
self._traceback = tb
self._done = True
_state.remove_pending(self, status='fail')
for callback, args, kwds in self._immediate_callbacks:
callback(*args, **kwds)
for callback, args, kwds in self._callbacks:
eventloop.queue_call(None, callback, *args, **kwds)
def done(self):
return self._done
@property
def state(self):
# This is just for compatibility with UserRPC and MultiRpc.
# A Future is considered running as soon as it is created.
if self._done:
return self.FINISHING
else:
return self.RUNNING
def wait(self):
if self._done:
return
ev = eventloop.get_event_loop()
while not self._done:
if not ev.run1():
logging.info('Deadlock in %s', self)
logging.info('All pending Futures:\n%s', _state.dump_all_pending())
_logging_debug('All pending Futures (verbose):\n%s',
_state.dump_all_pending(verbose=True))
self.set_exception(RuntimeError('Deadlock waiting for %s' % self))
def get_exception(self):
self.wait()
return self._exception
def get_traceback(self):
self.wait()
return self._traceback
def check_success(self):
self.wait()
if self._exception is not None:
raise self._exception.__class__, self._exception, self._traceback
def get_result(self):
self.check_success()
return self._result
# TODO: Have a tasklet that does this
@classmethod
def wait_any(cls, futures):
# TODO: Flatten MultiRpcs.
waiting_on = set(futures)
ev = eventloop.get_event_loop()
while waiting_on:
for f in waiting_on:
if f.state == cls.FINISHING:
return f
ev.run1()
return None
# TODO: Have a tasklet that does this
@classmethod
def wait_all(cls, futures):
# TODO: Flatten MultiRpcs.
waiting_on = set(futures)
ev = eventloop.get_event_loop()
while waiting_on:
waiting_on = set(f for f in waiting_on if f.state == cls.RUNNING)
ev.run1()
def _help_tasklet_along(self, ns, ds_conn, gen, val=None, exc=None, tb=None):
# XXX Docstring
info = utils.gen_info(gen)
# pylint: disable=invalid-name
__ndb_debug__ = info
try:
save_context = get_context()
save_namespace = namespace_manager.get_namespace()
save_ds_connection = datastore._GetConnection()
try:
set_context(self._context)
if ns != save_namespace:
namespace_manager.set_namespace(ns)
if ds_conn is not save_ds_connection:
datastore._SetConnection(ds_conn)
if exc is not None:
_logging_debug('Throwing %s(%s) into %s',
exc.__class__.__name__, exc, info)
value = gen.throw(exc.__class__, exc, tb)
else:
_logging_debug('Sending %r to %s', val, info)
value = gen.send(val)
self._context = get_context()
finally:
ns = namespace_manager.get_namespace()
ds_conn = datastore._GetConnection()
set_context(save_context)
if save_namespace != ns:
namespace_manager.set_namespace(save_namespace)
if save_ds_connection is not ds_conn:
datastore._SetConnection(save_ds_connection)
except StopIteration, err:
result = get_return_value(err)
_logging_debug('%s returned %r', info, result)
self.set_result(result)
return
except GeneratorExit:
# In Python 2.5, this derives from Exception, but we don't want
# to handle it like other Exception instances. So we catch and
# re-raise it immediately. See issue 127. http://goo.gl/2p5Pn
# TODO: Remove when Python 2.5 is no longer supported.
raise
except Exception, err:
_, _, tb = sys.exc_info()
if isinstance(err, _flow_exceptions):
# Flow exceptions aren't logged except in "heavy debug" mode,
# and then only at DEBUG level, without a traceback.
_logging_debug('%s raised %s(%s)',
info, err.__class__.__name__, err)
elif utils.DEBUG and logging.getLogger().level < logging.DEBUG:
# In "heavy debug" mode, log a warning with traceback.
# (This is the same condition as used in utils.logging_debug().)
logging.warning('%s raised %s(%s)',
info, err.__class__.__name__, err, exc_info=True)
else:
# Otherwise, log a warning without a traceback.
logging.warning('%s raised %s(%s)', info, err.__class__.__name__, err)
self.set_exception(err, tb)
return
else:
_logging_debug('%s yielded %r', info, value)
if isinstance(value, (apiproxy_stub_map.UserRPC,
datastore_rpc.MultiRpc)):
# TODO: Tail recursion if the RPC is already complete.
eventloop.queue_rpc(value, self._on_rpc_completion,
value, ns, ds_conn, gen)
return
if isinstance(value, Future):
# TODO: Tail recursion if the Future is already done.
if self._next:
raise RuntimeError('Future has already completed yet next is %r' %
self._next)
self._next = value
self._geninfo = utils.gen_info(gen)
_logging_debug('%s is now blocked waiting for %s', self, value)
value.add_callback(self._on_future_completion, value, ns, ds_conn, gen)
return
if isinstance(value, (tuple, list)):
# Arrange for yield to return a list of results (not Futures).
info = 'multi-yield from %s' % utils.gen_info(gen)
mfut = MultiFuture(info)
try:
for subfuture in value:
mfut.add_dependent(subfuture)
mfut.complete()
except GeneratorExit:
raise
except Exception, err:
_, _, tb = sys.exc_info()
mfut.set_exception(err, tb)
mfut.add_callback(self._on_future_completion, mfut, ns, ds_conn, gen)
return
if _is_generator(value):
# TODO: emulate PEP 380 here?
raise NotImplementedError('Cannot defer to another generator.')
raise RuntimeError('A tasklet should not yield a plain value: '
'%.200s yielded %.200r' % (info, value))
def _on_rpc_completion(self, rpc, ns, ds_conn, gen):
try:
result = rpc.get_result()
except GeneratorExit:
raise
except Exception, err:
_, _, tb = sys.exc_info()
self._help_tasklet_along(ns, ds_conn, gen, exc=err, tb=tb)
else:
self._help_tasklet_along(ns, ds_conn, gen, result)
def _on_future_completion(self, future, ns, ds_conn, gen):
if self._next is future:
self._next = None
self._geninfo = None
_logging_debug('%s is no longer blocked waiting for %s', self, future)
exc = future.get_exception()
if exc is not None:
self._help_tasklet_along(ns, ds_conn, gen,
exc=exc, tb=future.get_traceback())
else:
val = future.get_result() # This won't raise an exception.
self._help_tasklet_along(ns, ds_conn, gen, val)
def sleep(dt):
"""Public function to sleep some time.
Example:
yield tasklets.sleep(0.5) # Sleep for half a sec.
"""
fut = Future('sleep(%.3f)' % dt)
eventloop.queue_call(dt, fut.set_result, None)
return fut
class MultiFuture(Future):
"""A Future that depends on multiple other Futures.
This is used internally by 'v1, v2, ... = yield f1, f2, ...'; the
semantics (e.g. error handling) are constrained by that use case.
The protocol from the caller's POV is:
mf = MultiFuture()
mf.add_dependent(<some other Future>) -OR- mf.putq(<some value>)
mf.add_dependent(<some other Future>) -OR- mf.putq(<some value>)
.
. (More mf.add_dependent() and/or mf.putq() calls)
.
mf.complete() # No more dependents will be added.
.
. (Time passes)
.
results = mf.get_result()
Now, results is a list of results from all dependent Futures in
the order in which they were added.
It is legal to add the same dependent multiple times.
Callbacks can be added at any point.
From a dependent Future POV, there's nothing to be done: a callback
is automatically added to each dependent Future which will signal
its completion to the MultiFuture.
Error handling: if any dependent future raises an error, it is
propagated to mf. To force an early error, you can call
mf.set_exception() instead of mf.complete(). After this you can't
call mf.add_dependent() or mf.putq() any more.
"""
def __init__(self, info=None):
# pylint: disable=invalid-name
__ndb_debug__ = 'SKIP' # Hide this frame from self._where
self._full = False
self._dependents = set()
self._results = []
super(MultiFuture, self).__init__(info=info)
def __repr__(self):
# TODO: This may be invoked before __init__() returns,
# from Future.__init__(). Beware.
line = super(MultiFuture, self).__repr__()
lines = [line]
for fut in self._results:
lines.append(fut.dump_stack().replace('\n', '\n '))
return '\n waiting for '.join(lines)
# TODO: Maybe rename this method, since completion of a Future/RPC
# already means something else. But to what?
def complete(self):
if self._full:
raise RuntimeError('MultiFuture cannot complete twice.')
self._full = True
if not self._dependents:
self._finish()
# TODO: Maybe don't overload set_exception() with this?
def set_exception(self, exc, tb=None):
self._full = True
super(MultiFuture, self).set_exception(exc, tb)
def _finish(self):
if not self._full:
raise RuntimeError('MultiFuture cannot finish until completed.')
if self._dependents:
raise RuntimeError('MultiFuture cannot finish whilst waiting for '
'dependents %r' % self._dependents)
if self._done:
raise RuntimeError('MultiFuture done before finishing.')
try:
result = [r.get_result() for r in self._results]
except GeneratorExit:
raise
except Exception, err:
_, _, tb = sys.exc_info()
self.set_exception(err, tb)
else:
self.set_result(result)
def putq(self, value):
if isinstance(value, Future):
fut = value
else:
fut = Future()
fut.set_result(value)
self.add_dependent(fut)
def add_dependent(self, fut):
if isinstance(fut, list):
mfut = MultiFuture()
map(mfut.add_dependent, fut)
mfut.complete()
fut = mfut
elif not isinstance(fut, Future):
raise TypeError('Expected Future, received %s: %r' % (type(fut), fut))
if self._full:
raise RuntimeError('MultiFuture cannot add a dependent once complete.')
self._results.append(fut)
if fut not in self._dependents:
self._dependents.add(fut)
fut.add_callback(self._signal_dependent_done, fut)
def _signal_dependent_done(self, fut):
self._dependents.remove(fut)
if self._full and not self._dependents and not self._done:
self._finish()
class QueueFuture(Future):
"""A Queue following the same protocol as MultiFuture.
However, instead of returning results as a list, it lets you
retrieve results as soon as they are ready, one at a time, using
getq(). The Future itself finishes with a result of None when the
last result is ready (regardless of whether it was retrieved).
The getq() method returns a Future which blocks until the next
result is ready, and then returns that result. Each getq() call
retrieves one unique result. Extra getq() calls after the last
result is already returned return EOFError as their Future's
exception. (I.e., q.getq() returns a Future as always, but yieding
that Future raises EOFError.)
NOTE: Values can also be pushed directly via .putq(value). However
there is no flow control -- if the producer is faster than the
consumer, the queue will grow unbounded.
"""
# TODO: Refactor to share code with MultiFuture.
def __init__(self, info=None):
self._full = False
self._dependents = set()
self._completed = collections.deque()
self._waiting = collections.deque()
# Invariant: at least one of _completed and _waiting is empty.
# Also: _full and not _dependents <==> _done.
super(QueueFuture, self).__init__(info=info)
# TODO: __repr__
def complete(self):
if self._full:
raise RuntimeError('MultiFuture cannot complete twice.')
self._full = True
if not self._dependents:
self.set_result(None)
self._mark_finished()
def set_exception(self, exc, tb=None):
self._full = True
super(QueueFuture, self).set_exception(exc, tb)
if not self._dependents:
self._mark_finished()
def putq(self, value):
if isinstance(value, Future):
fut = value
else:
fut = Future()
fut.set_result(value)
self.add_dependent(fut)
def add_dependent(self, fut):
if not isinstance(fut, Future):
raise TypeError('fut must be a Future instance; received %r' % fut)
if self._full:
raise RuntimeError('QueueFuture add dependent once complete.')
if fut not in self._dependents:
self._dependents.add(fut)
fut.add_callback(self._signal_dependent_done, fut)
def _signal_dependent_done(self, fut):
if not fut.done():
raise RuntimeError('Future not done before signalling dependant done.')
self._dependents.remove(fut)
exc = fut.get_exception()
tb = fut.get_traceback()
val = None
if exc is None:
val = fut.get_result()
if self._waiting:
waiter = self._waiting.popleft()
self._pass_result(waiter, exc, tb, val)
else:
self._completed.append((exc, tb, val))
if self._full and not self._dependents and not self._done:
self.set_result(None)
self._mark_finished()
def _mark_finished(self):
if not self.done():
raise RuntimeError('Future not done before marking as finished.')
while self._waiting:
waiter = self._waiting.popleft()
self._pass_eof(waiter)
def getq(self):
fut = Future()
if self._completed:
exc, tb, val = self._completed.popleft()
self._pass_result(fut, exc, tb, val)
elif self._full and not self._dependents:
self._pass_eof(fut)
else:
self._waiting.append(fut)
return fut
def _pass_eof(self, fut):
if not self._done:
raise RuntimeError('QueueFuture cannot pass EOF until done.')
exc = self.get_exception()
if exc is not None:
tb = self.get_traceback()
else:
exc = EOFError('Queue is empty')
tb = None
self._pass_result(fut, exc, tb, None)
def _pass_result(self, fut, exc, tb, val):
if exc is not None:
fut.set_exception(exc, tb)
else:
fut.set_result(val)
class SerialQueueFuture(Future):
"""Like QueueFuture but maintains the order of insertion.
This class is used by Query operations.
Invariants:
- At least one of _queue and _waiting is empty.
- The Futures in _waiting are always pending.
(The Futures in _queue may be pending or completed.)
In the discussion below, add_dependent() is treated the same way as
putq().
If putq() is ahead of getq(), the situation is like this:
putq()
v
_queue: [f1, f2, ...]; _waiting: []
^
getq()
Here, putq() appends a Future to the right of _queue, and getq()
removes one from the left.
If getq() is ahead of putq(), it's like this:
putq()
v
_queue: []; _waiting: [f1, f2, ...]
^
getq()
Here, putq() removes a Future from the left of _waiting, and getq()
appends one to the right.
When both are empty, putq() appends a Future to the right of _queue,
while getq() appends one to the right of _waiting.
The _full flag means that no more calls to putq() will be made; it
is set by calling either complete() or set_exception().
Calling complete() signals that no more putq() calls will be made.
If getq() is behind, subsequent getq() calls will eat up _queue
until it is empty, and after that will return a Future that passes
EOFError (note that getq() itself never raises EOFError). If getq()
is ahead when complete() is called, the Futures in _waiting are all
passed an EOFError exception (thereby eating up _waiting).
If, instead of complete(), set_exception() is called, the exception
and traceback set there will be used instead of EOFError.
"""
def __init__(self, info=None):
self._queue = collections.deque()
self._waiting = collections.deque()
super(SerialQueueFuture, self).__init__(info=info)
# TODO: __repr__
def complete(self):
while self._waiting:
waiter = self._waiting.popleft()
waiter.set_exception(EOFError('Queue is empty'))
# When the writer is complete the future will also complete. If there are
# still pending queued futures, these futures are themselves in the pending
# list, so they will eventually be executed.
self.set_result(None)
def set_exception(self, exc, tb=None):
super(SerialQueueFuture, self).set_exception(exc, tb)
while self._waiting:
waiter = self._waiting.popleft()
waiter.set_exception(exc, tb)
def putq(self, value):
if isinstance(value, Future):
fut = value
else:
if self._waiting:
waiter = self._waiting.popleft()
waiter.set_result(value)
return
fut = Future()
fut.set_result(value)
self.add_dependent(fut)
def add_dependent(self, fut):
if not isinstance(fut, Future):
raise TypeError('fut must be a Future instance; received %r' % fut)
if self._done:
raise RuntimeError('SerialQueueFuture cannot add dependent '
'once complete.')
if self._waiting:
waiter = self._waiting.popleft()
fut.add_callback(_transfer_result, fut, waiter)
else:
self._queue.append(fut)
def getq(self):
if self._queue:
fut = self._queue.popleft()
else:
fut = Future()
if self._done:
err = self.get_exception()
if err is not None:
tb = self.get_traceback()
else:
err = EOFError('Queue is empty')
tb = None
fut.set_exception(err, tb)
else:
self._waiting.append(fut)
return fut
def _transfer_result(fut1, fut2):
"""Helper to transfer result or errors from one Future to another."""
exc = fut1.get_exception()
if exc is not None:
tb = fut1.get_traceback()
fut2.set_exception(exc, tb)
else:
val = fut1.get_result()
fut2.set_result(val)
class ReducingFuture(Future):
"""A Queue following the same protocol as MultiFuture.
However the result, instead of being a list of results of dependent
Futures, is computed by calling a 'reducer' tasklet. The reducer tasklet
takes a list of values and returns a single value. It may be called
multiple times on sublists of values and should behave like
e.g. sum().
NOTE: The reducer input values may be reordered compared to the
order in which they were added to the queue.
"""
# TODO: Refactor to reuse some code with MultiFuture.
def __init__(self, reducer, info=None, batch_size=20):
self._reducer = reducer
self._batch_size = batch_size
self._full = False
self._dependents = set()
self._completed = collections.deque()
self._queue = collections.deque()
super(ReducingFuture, self).__init__(info=info)
# TODO: __repr__
def complete(self):
if self._full:
raise RuntimeError('ReducingFuture cannot complete twice.')
self._full = True
if not self._dependents:
self._mark_finished()
def set_exception(self, exc, tb=None):
self._full = True
self._queue.clear()
super(ReducingFuture, self).set_exception(exc, tb)
def putq(self, value):
if isinstance(value, Future):
fut = value
else:
fut = Future()
fut.set_result(value)
self.add_dependent(fut)
def add_dependent(self, fut):
if self._full:
raise RuntimeError('ReducingFuture cannot add dependent once complete.')
self._internal_add_dependent(fut)
def _internal_add_dependent(self, fut):
if not isinstance(fut, Future):
raise TypeError('fut must be a Future; received %r' % fut)
if fut not in self._dependents:
self._dependents.add(fut)
fut.add_callback(self._signal_dependent_done, fut)
def _signal_dependent_done(self, fut):
if not fut.done():
raise RuntimeError('Future not done before signalling dependant done.')
self._dependents.remove(fut)
if self._done:
return # Already done.
try:
val = fut.get_result()
except GeneratorExit:
raise
except Exception, err:
_, _, tb = sys.exc_info()
self.set_exception(err, tb)
return
self._queue.append(val)
if len(self._queue) >= self._batch_size:
todo = list(self._queue)
self._queue.clear()
try:
nval = self._reducer(todo)
except GeneratorExit:
raise
except Exception, err:
_, _, tb = sys.exc_info()
self.set_exception(err, tb)
return
if isinstance(nval, Future):
self._internal_add_dependent(nval)
else:
self._queue.append(nval)
if self._full and not self._dependents:
self._mark_finished()
def _mark_finished(self):
if not self._queue:
self.set_result(None)
elif len(self._queue) == 1:
self.set_result(self._queue.pop())
else:
todo = list(self._queue)
self._queue.clear()
try:
nval = self._reducer(todo)
except GeneratorExit:
raise
except Exception, err:
_, _, tb = sys.exc_info()
self.set_exception(err, tb)
return
if isinstance(nval, Future):
self._internal_add_dependent(nval)
else:
self.set_result(nval)
# Alias for StopIteration used to mark return values.
# To use this, raise Return(<your return value>). The semantics
# are exactly the same as raise StopIteration(<your return value>)
# but using Return clarifies that you are intending this to be the
# return value of a tasklet.
# TODO: According to Monocle authors Steve and Greg Hazel, Twisted
# used an exception to signal a return value from a generator early
# on, and they found out it was error-prone. Should I worry?
Return = StopIteration
def get_return_value(err):
# XXX Docstring
if not err.args:
result = None
elif len(err.args) == 1:
result = err.args[0]
else:
result = err.args
return result
def tasklet(func):
# XXX Docstring
@utils.wrapping(func)
def tasklet_wrapper(*args, **kwds):
# XXX Docstring
# TODO: make most of this a public function so you can take a bare
# generator and turn it into a tasklet dynamically. (Monocle has
# this I believe.)
# pylint: disable=invalid-name
__ndb_debug__ = utils.func_info(func)
fut = Future('tasklet %s' % utils.func_info(func))
fut._context = get_context()
try:
result = func(*args, **kwds)
except StopIteration, err:
# Just in case the function is not a generator but still uses
# the "raise Return(...)" idiom, we'll extract the return value.
result = get_return_value(err)
if _is_generator(result):
ns = namespace_manager.get_namespace()
ds_conn = datastore._GetConnection()
eventloop.queue_call(None, fut._help_tasklet_along, ns, ds_conn, result)
else:
fut.set_result(result)
return fut
return tasklet_wrapper
def synctasklet(func):
"""Decorator to run a function as a tasklet when called.
Use this to wrap a request handler function that will be called by
some web application framework (e.g. a Django view function or a
webapp.RequestHandler.get method).
"""
taskletfunc = tasklet(func) # wrap at declaration time.
@utils.wrapping(func)
def synctasklet_wrapper(*args, **kwds):
# pylint: disable=invalid-name
__ndb_debug__ = utils.func_info(func)
return taskletfunc(*args, **kwds).get_result()
return synctasklet_wrapper
def toplevel(func):
"""A sync tasklet that sets a fresh default Context.
Use this for toplevel view functions such as
webapp.RequestHandler.get() or Django view functions.
"""
synctaskletfunc = synctasklet(func) # wrap at declaration time.
@utils.wrapping(func)
def add_context_wrapper(*args, **kwds):
# pylint: disable=invalid-name
__ndb_debug__ = utils.func_info(func)
_state.clear_all_pending()
# Create and install a new context.
ctx = make_default_context()
try:
set_context(ctx)
return synctaskletfunc(*args, **kwds)
finally:
set_context(None)
ctx.flush().check_success()
eventloop.run() # Ensure writes are flushed, etc.
return add_context_wrapper
_CONTEXT_KEY = '__CONTEXT__'
_DATASTORE_APP_ID_ENV = 'DATASTORE_APP_ID'
_DATASTORE_PROJECT_ID_ENV = 'DATASTORE_PROJECT_ID'
_DATASTORE_ADDITIONAL_APP_IDS_ENV = 'DATASTORE_ADDITIONAL_APP_IDS'
_DATASTORE_USE_PROJECT_ID_AS_APP_ID_ENV = 'DATASTORE_USE_PROJECT_ID_AS_APP_ID'
def get_context():
# XXX Docstring
ctx = None
if os.getenv(_CONTEXT_KEY):
ctx = _state.current_context
if ctx is None:
ctx = make_default_context()
set_context(ctx)
return ctx
def make_default_context():
# XXX Docstring
datastore_app_id = os.environ.get(_DATASTORE_APP_ID_ENV, None)
datastore_project_id = os.environ.get(_DATASTORE_PROJECT_ID_ENV, None)
if datastore_app_id or datastore_project_id:
# We will create a Cloud Datastore context.
app_id_override = bool(os.environ.get(
_DATASTORE_USE_PROJECT_ID_AS_APP_ID_ENV, False))
if not datastore_app_id and not app_id_override:
raise ValueError('Could not determine app id. To use project id (%s) '
'instead, set %s=true. This will affect the '
'serialized form of entities and should not be used '
'if serialized entities will be shared between '
'code running on App Engine and code running off '
'App Engine. Alternatively, set %s=<app id>.'
% (datastore_project_id,
_DATASTORE_USE_PROJECT_ID_AS_APP_ID_ENV,
_DATASTORE_APP_ID_ENV))
elif datastore_app_id:
if app_id_override:
raise ValueError('App id was provided (%s) but %s was set to true. '
'Please unset either %s or %s.' %
(datastore_app_id,
_DATASTORE_USE_PROJECT_ID_AS_APP_ID_ENV,
_DATASTORE_APP_ID_ENV,
_DATASTORE_USE_PROJECT_ID_AS_APP_ID_ENV))
elif datastore_project_id:
# Project id and app id provided, make sure they are the same.
id_resolver = datastore_pbs.IdResolver([datastore_app_id])
if (datastore_project_id !=
id_resolver.resolve_project_id(datastore_app_id)):
raise ValueError('App id "%s" does not match project id "%s".'
% (datastore_app_id, datastore_project_id))
datastore_app_id = datastore_project_id or datastore_app_id
additional_app_str = os.environ.get(_DATASTORE_ADDITIONAL_APP_IDS_ENV, '')
additional_apps = (app.strip() for app in additional_app_str.split(','))
return _make_cloud_datastore_context(datastore_app_id, additional_apps)
return make_context()
@utils.positional(0)
def make_context(conn=None, config=None):
# XXX Docstring
from . import context # Late import to deal with circular imports.
return context.Context(conn=conn, config=config)
def _make_cloud_datastore_context(app_id, external_app_ids=()):
"""Creates a new context to connect to a remote Cloud Datastore instance.
This should only be used outside of Google App Engine.
Args:
app_id: The application id to connect to. This differs from the project
id as it may have an additional prefix, e.g. "s~" or "e~".
external_app_ids: A list of apps that may be referenced by data in your
application. For example, if you are connected to s~my-app and store keys
for s~my-other-app, you should include s~my-other-app in the external_apps
list.
Returns:
An ndb.Context that can connect to a Remote Cloud Datastore. You can use
this context by passing it to ndb.set_context.
"""
from . import model # Late import to deal with circular imports.
# Late import since it might not exist.
if not datastore_pbs._CLOUD_DATASTORE_ENABLED:
raise datastore_errors.BadArgumentError(
datastore_pbs.MISSING_CLOUD_DATASTORE_MESSAGE)
import googledatastore
try:
from google.appengine.datastore import cloud_datastore_v1_remote_stub
except ImportError:
from google3.apphosting.datastore import cloud_datastore_v1_remote_stub
current_app_id = os.environ.get('APPLICATION_ID', None)
if current_app_id and current_app_id != app_id:
# TODO(pcostello): We should support this so users can connect to different
# applications.
raise ValueError('Cannot create a Cloud Datastore context that connects '
'to an application (%s) that differs from the application '
'already connected to (%s).' % (app_id, current_app_id))
os.environ['APPLICATION_ID'] = app_id
id_resolver = datastore_pbs.IdResolver((app_id,) + tuple(external_app_ids))
project_id = id_resolver.resolve_project_id(app_id)
endpoint = googledatastore.helper.get_project_endpoint_from_env(project_id)
datastore = googledatastore.Datastore(
project_endpoint=endpoint,
credentials=googledatastore.helper.get_credentials_from_env())
conn = model.make_connection(_api_version=datastore_rpc._CLOUD_DATASTORE_V1,
_id_resolver=id_resolver)
# If necessary, install the stubs
try:
stub = cloud_datastore_v1_remote_stub.CloudDatastoreV1RemoteStub(datastore)
apiproxy_stub_map.apiproxy.RegisterStub(datastore_rpc._CLOUD_DATASTORE_V1,
stub)
except:
pass # The stub is already installed.
# TODO(pcostello): Ensure the current stub is connected to the right project.
return make_context(conn=conn)
def set_context(new_context):
# XXX Docstring
os.environ[_CONTEXT_KEY] = '1'
_state.current_context = new_context
# TODO: Rework the following into documentation.
# A tasklet/coroutine/generator can yield the following things:
# - Another tasklet/coroutine/generator; this is entirely equivalent to
# "for x in g: yield x"; this is handled entirely by the @tasklet wrapper.
# (Actually, not. @tasklet returns a function that when called returns
# a Future. You can use the pep380 module's @gwrap decorator to support
# yielding bare generators though.)
# - An RPC (or MultiRpc); the tasklet will be resumed when this completes.
# This does not use the RPC's callback mechanism.
# - A Future; the tasklet will be resumed when the Future is done.
# This uses the Future's callback mechanism.
# A Future can be used in several ways:
# - Yield it from a tasklet; see above.
# - Check (poll) its status via f.done.
# - Call its wait() method, perhaps indirectly via check_success()
# or get_result(). This invokes the event loop.
# - Call the Future.wait_any() or Future.wait_all() method.
# This is waits for any or all Futures and RPCs in the argument list.
# XXX HIRO XXX
# - A tasklet is a (generator) function decorated with @tasklet.
# - Calling a tasklet schedules the function for execution and returns a Future.
# - A function implementing a tasklet may:
# = yield a Future; this waits for the Future which returns f.get_result();
# = yield an RPC; this waits for the RPC and then returns rpc.get_result();
# = raise Return(result); this sets the outer Future's result;
# = raise StopIteration or return; this sets the outer Future's result;
# = raise another exception: this sets the outer Future's exception.
# - If a function implementing a tasklet is not a generator it will be
# immediately executed to completion and the tasklet wrapper will
# return a Future that is already done. (XXX Alternative behavior:
# it schedules the call to be run by the event loop.)
# - Code not running in a tasklet can call f.get_result() or f.wait() on
# a future. This is implemented by a simple loop like the following:
# while not self._done:
# eventloop.run1()
# - Here eventloop.run1() runs one "atomic" part of the event loop:
# = either it calls one immediately ready callback;
# = or it waits for the first RPC to complete;
# = or it sleeps until the first callback should be ready;
# = or it raises an exception indicating all queues are empty.
# - It is possible but suboptimal to call rpc.get_result() or
# rpc.wait() directly on an RPC object since this will not allow
# other callbacks to run as they become ready. Wrapping an RPC in a
# Future will take care of this issue.
# - The important insight is that when a generator function
# implementing a tasklet yields, raises or returns, there is always a
# wrapper that catches this event and either turns it into a
# callback sent to the event loop, or sets the result or exception
# for the tasklet's Future.
| bsd-2-clause | 1,676,909,870,192,614,000 | 32.573576 | 80 | 0.650776 | false | 3.717327 | false | false | false |
Zaharid/reportengine | src/reportengine/configparser.py | 1 | 10215 | # -*- coding: utf-8 -*-
"""
Created on Fri Dec 11 15:31:29 2015
@author: Zahari Kassabov
"""
import inspect
import difflib
import logging
import functools
import yaml
from reportengine import namespaces
from reportengine.utils import ChainMap
log = logging.getLogger(__name__)
_config_token = 'parse_'
class ConfigError(Exception):
alternatives_header="Instead of '%s', did you mean one of the following?"
def __init__(self, message, bad_item = None, alternatives = None, *,
display_alternatives='best'):
super().__init__(message)
self.bad_item = bad_item
if alternatives:
alternatives = list(alternatives)
self.alternatives = alternatives
self.display_alternatives = display_alternatives
def alternatives_text(self):
if (self.display_alternatives=='none' or not self.display_alternatives
or not self.alternatives):
return ''
if self.display_alternatives == 'best':
alternatives = difflib.get_close_matches(self.bad_item,
self.alternatives)
elif self.display_alternatives == 'all':
alternatives = self.alternatives
else:
raise ValueError("Unrecognized display_alternatives option. "
"Must be one of: 'all', 'best' or 'none'.")
if not alternatives:
return ''
head = (self.alternatives_header
% (self.bad_item,))
txts = [' - {}'.format(alt) for alt in alternatives]
return '\n'.join((head, *txts))
class BadInputType(ConfigError, TypeError):
def __init__(self, param, val, input_type):
msg = ("Bad input type for parameter '{param}': Value '{val}' "
"is not of type {input_type}.").format(**locals())
super().__init__(msg)
class InputNotFoundError(ConfigError, KeyError):
alternatives_header = "Maybe you mistyped %s in one of the following keys?"
def element_of(paramname, elementname=None):
def inner(f):
nonlocal elementname
if elementname is None:
if f.__name__.startswith(_config_token):
elementname = f.__name__[len(_config_token):]
f._element_of = paramname
f._elementname = elementname
return f
return inner
def named_element_of(paramname, elementname=None):
def inner(f):
element_of(paramname, elementname)(f)
f._named = True
return f
return inner
def _make_element_of(f):
if getattr(f, '_named', False):
def parse_func(self, param:dict, **kwargs):
d = {k: f(self, v , **kwargs) for k,v in param.items()}
return namespaces.NSItemsDict(d, nskey=f._elementname)
else:
def parse_func(self, param:list, **kwargs):
l = [f(self, elem, **kwargs) for elem in param]
return namespaces.NSList(l, nskey=f._elementname)
#We replicate the same signature for the kwarg parameters, so that we can
#use that to build the graph.
list_params = list(inspect.signature(parse_func).parameters.values())[0:2]
kwarg_params = list(inspect.signature(f).parameters.values())[2:]
params = [*list_params, *kwarg_params]
parse_func.__signature__ = inspect.Signature(parameters=params)
return parse_func
def _parse_func(f):
"""Check that the function has at least one argument, and check that the
argument corresponds the type declared in the annotation id any."""
sig = inspect.signature(f)
try:
first_param = list(sig.parameters.values())[1]
except IndexError:
raise TypeError(("Parser functiom must have at least one "
"parameter: %s")
% f.__qualname__)
input_type = first_param.annotation
@functools.wraps(f)
def f_(self, val, *args, **kwargs):
if input_type is not sig.empty:
if not isinstance(val, input_type):
raise BadInputType(f.__name__, val, input_type)
return f(self, val, *args, **kwargs)
return f_
class ElementOfResolver(type):
"""Generate a parsing function for collections of each 'atomic' parsing
function found in the class, and marked with the relevant decorator."""
def __new__(cls, name, bases, attrs):
newattrs = {}
_list_keys = {}
for attr, f in attrs.items():
if hasattr(f, '_element_of'):
newattr = _config_token + f._element_of
if newattr in attrs:
raise ValueError("Cannot construct {newattr} from "
"'_element_of' {attr} because it is "
"already declared.")
#We have to apply parse func in here as well.
newattrs[newattr] = _make_element_of(_parse_func(f))
_list_keys[f._element_of] = f._elementname
newattrs['_list_keys'] = _list_keys
attrs = {**newattrs, **attrs}
return super().__new__(cls, name, bases, attrs)
class AutoTypeCheck(type):
"""Apply automatically the _parse_func decorator
to every parsing method fouds in the class."""
def __new__(cls, name, bases, attrs):
for k,v in attrs.items():
if k.startswith(_config_token):
attrs[k] = _parse_func(v)
return super().__new__(cls, name, bases, attrs)
class ConfigMetaClass(ElementOfResolver, AutoTypeCheck):
pass
class Config(metaclass=ConfigMetaClass):
def __init__(self, input_params, environment=None):
self.environment = environment
self.input_params = input_params
#self.params = self.process_params(input_params)
def get_parse_func(self, param):
func_name = _config_token + param
try:
return getattr(self, func_name)
except AttributeError:
return lambda x : x
def resolve_key(self, key, ns, input_params=None, parents=None):
if key in ns:
return ns.get_where(key)
if parents is None:
parents = []
if input_params is None:
input_params = self.input_params
if not key in input_params:
msg = "A parameter is required: {key}.".format(key=key)
if parents:
msg += "\nThis is needed to process:\n"
msg += '\ntrough:\n'.join(' - ' + str(p) for
p in reversed(parents))
#alternatives_text = "Note: The following similarly spelled "
# "params exist in the input:"
raise InputNotFoundError(msg, key, alternatives=input_params.keys())
input_val = input_params[key]
f = self.get_parse_func(key)
max_index = len(ns.maps) -1
put_index = max_index
sig = inspect.signature(f)
kwargs = {}
for pname, param in list(sig.parameters.items())[1:]:
if pname in ns:
index, pval = ns.get_where(pname)
else:
try:
index, pval = self.resolve_key(pname, ns, parents=[*parents, key])
except KeyError:
if param.default is not sig.empty:
pval = param.default
index = max_index
else:
raise
if index < put_index:
put_index = index
kwargs[pname] = pval
val = f(input_val, **kwargs)
ns.maps[put_index][key] = val
return put_index, val
def process_fuzzyspec(self, fuzzy, ns, parents=None):
if not parents:
parents = []
gen = namespaces.expand_fuzzyspec_partial(fuzzy, ns)
while True:
try:
key, currspec, currns = next(gen)
except StopIteration as e:
return e.value
else:
self.resolve_key(key, currns, parents=[*parents, currspec])
def process_all_params(self, input_params=None):
"""Simple shortcut to process all paams in a simple namespace, if
possible."""
if input_params is None:
input_params = self.input_params
ns = ChainMap()
for param in input_params:
if param not in ns:
self.resolve_key(param, ns, input_params=input_params)
return ns
def _parse_actions_gen(self, actions, currspec=()):
if isinstance(actions, dict):
for k,v in actions.items():
yield from self._parse_actions_gen(v, (*currspec, k))
elif isinstance(actions, list):
for v in actions:
if isinstance(v, dict):
if len(v) != 1:
raise ConfigError(("Invalid action specification %s. "
"Must be a scalar or a mapping with exactly one key") % v)
k = next(iter(v.keys()))
args = v[k]
if not isinstance(args, dict):
raise ConfigError("Action arguments must be "
"a mapping if present" % k)
yield k, currspec, tuple(args.items())
elif isinstance(v, str):
yield v, currspec, ()
else:
raise ConfigError("Unrecognized format for actions. "
"Must be a string or mapping, not '%s'" %v)
else:
raise ConfigError("Unrecognized format for actions")
def parse_actions_(self, actions):
return list(self._parse_actions_gen(actions))
def __getitem__(self, item):
return self.input_params[item]
def __iter__(self):
return iter(self.input_params)
def __len__(self):
return len(self.input_params)
def __contains__(self, item):
return item in self.input_params
@classmethod
def from_yaml(cls, o, *args, **kwargs):
try:
return cls(yaml.load(o), *args, **kwargs)
except yaml.error.YAMLError as e:
raise ConfigError("Failed to parse yaml file: %s" % e)
| gpl-2.0 | 5,674,606,203,002,205,000 | 33.744898 | 86 | 0.558982 | false | 4.18476 | true | false | false |
reneetrei/agile-bayou-76491 | snipts/views.py | 1 | 11881 | from annoying.decorators import render_to
from annoying.functions import get_object_or_None
from blogs.views import blog_list
from django.conf import settings
from django.contrib.auth.models import User
from django.core.mail import send_mail
from django.core.paginator import Paginator, InvalidPage
from django.db.models import Count
from django.db.models import Q
from django.http import Http404, HttpResponse, HttpResponseRedirect, HttpResponseBadRequest
from django.shortcuts import get_object_or_404, render_to_response
from django.template import RequestContext
from django.views.decorators.cache import never_cache
from haystack.forms import ModelSearchForm
from haystack.query import EmptySearchQuerySet, SearchQuerySet
from pygments.lexers import get_lexer_by_name
from snipts.models import Favorite, Snipt, SniptSecureView
from taggit.models import Tag
from teams.models import Team
RESULTS_PER_PAGE = getattr(settings, 'HAYSTACK_SEARCH_RESULTS_PER_PAGE', 20)
@render_to('snipts/detail.html')
def detail(request, username, snipt_slug):
snipt = get_object_or_404(Snipt, user__username=username, slug=snipt_slug)
user = snipt.user
if snipt.lexer != 'markdown':
if 'linenos' not in snipt.stylized:
snipt.save()
if user != request.user:
if not snipt.public:
if 'key' not in request.GET:
raise Http404
else:
if request.GET.get('key') != snipt.key:
raise Http404
if snipt.secure and not request.user.is_authenticated():
raise Http404
snipt.views = snipt.views + 1
snipt.save()
if snipt.secure:
secure_view = SniptSecureView(user=request.user, snipt=snipt)
secure_view.save()
tags = Tag.objects
if user == request.user:
tags = tags.filter(snipt__user=user)
public = False
else:
tags = tags.filter(snipt__user=user, snipt__public=True)
public = True
tags = tags.annotate(count=Count('taggit_taggeditem_items__id'))
tags = tags.order_by('-count', 'name')
return {
'detail': True,
'has_snipts': True,
'public': public,
'snipt': snipt,
'tags': tags,
'user': user,
}
def download(request, snipt_key):
snipt = get_object_or_404(Snipt, key=snipt_key)
return HttpResponse(snipt.code, content_type='application/x-download')
def embed(request, snipt_key):
snipt = get_object_or_404(Snipt, key=snipt_key)
lines = snipt.embedded.split('\n')
return render_to_response('snipts/embed.html',
{'lines': lines, 'snipt': snipt},
context_instance=RequestContext(request),
content_type='application/javascript')
def report_spam(request, snipt_id):
if not request.user.is_authenticated():
return HttpResponseBadRequest()
snipt = get_object_or_404(Snipt, pk=snipt_id)
send_mail('[Snipt] Spam reported',
"""
Snipt: https://snipt.net/admin/snipts/snipt/{}/
User: https://snipt.net/admin/auth/user/{}/delete/
Reporter: https://snipt.net/{}/
""".format(snipt.id, snipt.user.id, request.user.username),
'[email protected]',
['[email protected]'],
fail_silently=False)
return HttpResponse("""Thanks! Your report has been
submitted to the site admins.""")
@render_to('snipts/list-user.html')
def blog_posts(request, username):
if request.blog_user:
raise Http404
if request.user.username == username:
public = False
public_user = False
user = request.user
snipts = Snipt.objects.filter(user=request.user, blog_post=True)
tags = Tag.objects.filter(snipt__user=request.user).distinct()
else:
public = True
public_user = True
user = get_object_or_404(User, username=username)
snipts = Snipt.objects.filter(blog_post=True, user=user, public=True)
tags = Tag.objects.filter(snipt__user=user,
snipt__public=True).distinct()
tags = tags.order_by('name')
snipts = snipts.order_by('-created')
context = {
'has_snipts': True,
'public': public,
'public_user': public_user,
'snipts': snipts,
'tags': tags,
'user': user,
}
if 'rss' in request.GET:
context['snipts'] = context['snipts'][:20]
return rss(request, context)
return context
@render_to('snipts/list-user.html')
def favorites(request, username):
if request.user.username != username:
raise Http404
if request.blog_user:
raise Http404
public = False
favorites = Favorite.objects.filter(user=request.user).values('snipt')
favorites = [f['snipt'] for f in favorites]
snipts = Snipt.objects.filter(Q(pk__in=favorites))
tags = Tag.objects.filter(snipt__user=request.user).distinct()
tags = tags.order_by('name')
snipts = snipts.order_by('-created')
context = {
'favorites': favorites,
'has_snipts': True,
'public': public,
'public_user': False,
'snipts': snipts,
'tags': tags,
'user': request.user,
}
if 'rss' in request.GET:
context['snipts'] = context['snipts'][:20]
return rss(request, context)
return context
@render_to('snipts/list-public.html')
def list_public(request, tag_slug=None):
if request.blog_user:
return blog_list(request)
snipts = Snipt.objects.filter(public=True).order_by('-created')
if tag_slug:
snipts = snipts.filter(tags__slug__in=[tag_slug])
tag = get_object_or_404(Tag, slug=tag_slug)
else:
tag = None
context = {
'has_snipts': True,
'public': True,
'snipts': snipts,
'tag': tag,
}
if 'rss' in request.GET:
context['snipts'] = context['snipts'][:20]
return rss(request, context)
return context
@render_to('snipts/list-user.html')
def list_user(request, username_or_custom_slug, tag_slug=None):
if request.blog_user:
return blog_list(request, username_or_custom_slug)
user = get_object_or_None(User, username=username_or_custom_slug)
if user is None:
snipt = get_object_or_404(Snipt, custom_slug=username_or_custom_slug)
return detail(request, snipt.user, snipt.slug)
tags = Tag.objects
snipts = Snipt.objects
if user == request.user or \
(request.GET.get('api_key') == user.api_key.key) or \
(user.profile.is_a_team and
user.team.user_is_member(request.user)):
public = False
favorites = Favorite.objects.filter(user=user).values('snipt')
favorites = [f['snipt'] for f in favorites]
snipts = snipts.filter(Q(user=user) | Q(pk__in=favorites))
tags = tags.filter(snipt__user=user).distinct()
else:
tags = tags.filter(snipt__user=user, snipt__public=True).distinct()
snipts = snipts.filter(user=user, public=True)
public = True
tags = tags.order_by('name')
snipts = snipts.order_by('-created')
if tag_slug:
snipts = snipts.filter(tags__slug__in=[tag_slug])
tag = get_object_or_404(Tag, slug=tag_slug)
else:
tag = None
if tag is None:
snipts = snipts.exclude(tags__name__in=['tmp'])
context = {
'has_snipts': True,
'public': public,
'public_user': (public and user),
'snipts': snipts,
'tags': tags,
'tag': tag,
'user': user,
}
if 'rss' in request.GET:
context['snipts'] = context['snipts'][:20]
return rss(request, context)
return context
def raw(request, snipt_key, lexer=None):
snipt = get_object_or_404(Snipt, key=snipt_key)
if request.user == snipt.user:
if lexer:
lexer = lexer.strip('/')
if lexer != snipt.lexer:
try:
lexer_obj = get_lexer_by_name(lexer)
except:
lexer_obj = None
if lexer_obj:
snipt.lexer = lexer
snipt.save()
content_type = 'text/plain'
if 'nice' in request.GET:
content_type = 'text/html'
return render_to_response('snipts/raw.html',
{'snipt': snipt},
context_instance=RequestContext(request),
content_type=content_type)
def rss(request, context):
return render_to_response('rss.xml',
context,
context_instance=RequestContext(request),
content_type="application/rss+xml")
@never_cache
def search(request, template='search/search.html', load_all=True,
form_class=ModelSearchForm, searchqueryset=None,
context_class=RequestContext, extra_context=None,
results_per_page=None):
query = ''
results = EmptySearchQuerySet()
if request.GET.get('q'):
searchqueryset = SearchQuerySet() \
.filter(Q(public=True) | Q(author=request.user)) \
.order_by('-pub_date')
if request.user.is_authenticated() and \
'mine-only' in request.GET:
searchqueryset = SearchQuerySet().filter(author=request.user) \
.order_by('-pub_date')
elif request.user.is_authenticated() and \
('author' in request.GET and
request.GET.get('author')):
author = request.GET.get('author')
if author == request.user.username:
searchqueryset = SearchQuerySet().filter(author=request.user) \
.order_by('-pub_date')
else:
team = get_object_or_None(Team, slug=author)
if team and team.user_is_member(request.user):
searchqueryset = SearchQuerySet().filter(author=team) \
.order_by('-pub_date')
form = ModelSearchForm(request.GET,
searchqueryset=searchqueryset,
load_all=load_all)
if form.is_valid():
query = form.cleaned_data['q']
results = form.search()
else:
form = form_class(searchqueryset=searchqueryset, load_all=load_all)
paginator = Paginator(results, results_per_page or RESULTS_PER_PAGE)
try:
page = paginator.page(int(request.GET.get('page', 1)))
except InvalidPage:
raise Http404("No such page of results!")
context = {
'form': form,
'has_snipts': True,
'page': page,
'paginator': paginator,
'query': query,
'suggestion': None,
}
if results.query.backend.include_spelling:
context['suggestion'] = form.get_suggestion()
if extra_context:
context.update(extra_context)
return render_to_response(template,
context,
context_instance=context_class(request))
def redirect_snipt(request, snipt_key, lexer=None):
snipt = get_object_or_404(Snipt, key=snipt_key)
return HttpResponseRedirect(snipt.get_absolute_url())
def redirect_public_tag_feed(request, tag_slug):
return HttpResponseRedirect('/public/tag/{}/?rss'.format(tag_slug))
def redirect_user_feed(request, username):
user = get_object_or_404(User, username=username)
return HttpResponseRedirect(user.get_absolute_url() + '?rss')
def redirect_user_tag_feed(request, username, tag_slug):
return HttpResponseRedirect(u'/{}/tag/{}/?rss'.format(username, tag_slug))
| mit | 8,332,129,242,457,065,000 | 28.628429 | 91 | 0.591533 | false | 3.67719 | false | false | false |
glasslion/SPF | spf/extra/sitecloner.py | 8 | 8290 | import json
import sys
import urllib2
import re
import os
from HTMLParser import HTMLParser
class htmltagparser(HTMLParser):
def __init__(self):
self.reset()
self.NEWATTRS = []
def handle_starttag(self, tag, attrs):
self.NEWATTRS = attrs
def clean(self):
self.NEWATTRS = []
class Cloner(object):
def __init__(self, url, path, maxdepth=3):
self.start_url = url
self.path = os.getcwd() + "/" + path
self.maxdepth = maxdepth
self.seenurls = []
self.user_agent="Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; Trident/4.0)"
# ######################################3
# Utility Functions
# ######################################3
# http get request
def get_url(self, url):
headers = { 'User-Agent' : self.user_agent }
try:
req = urllib2.Request(url, None, headers)
return urllib2.urlopen(req).read()
except urllib2.HTTPError, e:
print 'We failed with error code - %s.' % e.code
if e.code == 404:
return ""
else:
return ""
# download a binary file
def download_binary(self, url):
filename = ""
if url.startswith(self.start_url):
filename = url[len(self.start_url):]
else:
return
data = self.get_url(url)
if (data == ""):
return
self.write_outfile(data, filename)
return
# writeout a file
def write_outfile(self, data, filename):
if filename.startswith("/"):
filename = filename[1:]
fullfilename = self.path + "/" + filename
if not os.path.exists(os.path.dirname(fullfilename)):
os.makedirs(os.path.dirname(fullfilename))
print "WRITING OUT FILE [%s]" % (filename)
f = open(fullfilename, 'a')
f.write(data)
f.close()
# unique a list
def unique_list(self, old_list):
new_list = []
if old_list != []:
for x in old_list:
if x not in new_list:
new_list.append(x)
return new_list
# ######################################3
# html and link processing functions
# ######################################3
def find_forms(self, html):
form_regex = re.compile('<form[^>]+>')
return self.unique_list(form_regex.findall(html))
# convert all forms to contain hooks
def process_forms(self, html, method="get", action="index"):
# find all forms in page
forms = self.find_forms(html)
parser = htmltagparser()
# loop over each form
for form in forms:
print "FOUND A FORM [%s]" % (form)
# parse out parts of old form tag
parser.feed(form)
attrs = parser.NEWATTRS
parser.clean()
# build new form
new_form = "<form method=\"%s\" action=\"%s\"" % (method, action)
for (name, value) in attrs:
if ((name.lower() != "method") and (name.lower() != "action")):
new_form += " %s=\"%s\"" % (name, value)
new_form += ">"
print "REWROTE FORM TO BE [%s]" % (new_form)
# rewrite html with new form
html = html.replace(form, new_form)
return html
# build new list of only the link types we are interested in
def process_links(self, links):
new_links = []
for link in links:
link = link.lower()
if (link.endswith(".css") or
link.endswith(".html") or
link.endswith(".php") or
link.endswith(".asp") or
link.endswith(".aspx") or
link.endswith(".js") or
link.endswith(".ico") or
link.endswith(".png") or
link.endswith(".jpg") or
link.endswith(".jpeg") or
link.endswith(".bmp") or
link.endswith(".gif")
# ("." not in os.path.basename(link))
):
new_links.append(link)
return new_links
# primary recersive function used to clone and crawl the site
def clone(self, depth=0, url="", base="", method="get", action="index"):
# early out if max depth is reached
if (depth > self.maxdepth):
print "MAX URL DEPTH [%s]" % (url)
return
# if no url is specified, then assume the starting url
if (url == ""):
url = self.start_url
# if no base is specified, then assume the starting url
if (base == ""):
base = self.start_url
# check to see if we have processed this url before
if (url in self.seenurls):
print "ALREADY SEEN URL [%s]" % (url)
return
else:
self.seenurls.append(url)
# get the url and return if nothing was returned
html = self.get_url(url)
if (html == ""):
return
# determine the websites script/filename
filename = ""
# we are only interested in urls on the same site
if url.startswith(base):
filename = url[len(base):]
# if filename is blank, assume index.html
if (filename == ""):
filename = "index.html"
else:
print "BAD URL [%s]" % (url)
return
print "CLONING URL [%s]" % (url)
# find links
links = re.findall(r"<link.*?\s*href=\"(.*?)\".*?>", html)
links += re.findall(r"<script.*?\s*src=\"(.*?)\".*?>", html)
links += re.findall(r"<img.*?\s*src=\"(.*?)\".*?>", html)
links += re.findall(r"\"(.*?)\"", html)
links += re.findall(r"url\(\"?(.*?)\"?\);", html)
links = self.process_links(self.unique_list(links))
# loop over the links
for link in links:
link = link.lower()
new_link = link
if link.startswith("http"):
new_link = link
elif link.startswith("//"):
new_link = "http:" + link
elif link.startswith("/"):
new_link = base + link
elif link.startswith("../"):
new_link = base + "/" + link[3:]
else:
new_link = base + "/" + link
good_link = new_link
if (new_link.startswith(self.start_url)):
good_link = new_link[len(self.start_url):]
print "FOUND A NEW LINK [%s]" % (new_link)
print "FOUND A NEW LINK * [%s]" % (good_link)
# switch out new_link for link
html = html.replace("\"" + link + "\"", "\"" + good_link + "\"")
# determine is we need to call Clone recursively
if (link.endswith(".css") or
link.endswith(".html") or
link.endswith(".php") or
link.endswith(".asp") or
link.endswith(".aspx") or
link.endswith(".js")
# ("." not in os.path.basename(link))
):
# recursively call process_html on each non-image link
if base != self.start_url:
self.clone(url=new_link, base=os.path.dirname(url), depth=depth+1)
else:
self.clone(url=new_link, depth=depth+1)
else:
# must be a binary file, so just download it
self.download_binary(new_link)
# update any forms within the page
html = self.process_forms(html, action=action)
# write out the html for the page we have been processing
self.write_outfile(html, filename)
return
if __name__ == "__main__":
def usage():
print "%s <URL> <outdirectory> (optional <form action>)" % (sys.argv[0])
if ((len(sys.argv) < 3) or (len(sys.argv) > 4)):
usage()
sys.exit(0)
c = Cloner(sys.argv[1], sys.argv[2])
if len(sys.argv) == 4:
c.clone(action=sys.argv[3])
else:
c.clone()
| bsd-3-clause | -7,408,070,258,081,257,000 | 31.896825 | 89 | 0.483353 | false | 4.120278 | false | false | false |
Iristyle/ChocolateyPackages | EthanBrown.SublimeText2.WebPackages/tools/PackageCache/Tag/tag_indent.py | 3 | 5979 | import sublime, sublime_plugin
import re
# to find on which indentation level we currently are
current_indentation_re = re.compile("^\s*")
# to leave additional new lines as is
aditional_new_lines_re = re.compile("^\s*\n+\s*\n+\s*$")
# no indentation
no_indent = re.compile("^</?(head|body)[>| ]", re.I)
# possible self closing tags: XML-------HTML------------------------------------------------HTML5----------------
self_closing_tags = re.compile("^<(\?|\!|%|#|area|base|br|col|frame|hr|img|input|link|meta|param|command|embed|source)", re.I)
skip_content_of_this_tags_re = re.compile("^<(script|style|pre|code)(>| )", re.I)
trim_outter_left = "abbr|acronym|dfn|em|strong|b|i|u|font|del|ins|sub|sup".split('|')
trim_outter_right = "".split('|')
trim_inner_left = "abbr|acronym|dfn|em|strong|b|i|u|font|del|ins|sub|sup|title".split('|')
trim_inner_right = "abbr|acronym|dfn|em|strong|b|i|u|font|del|ins|sub|sup|title".split('|')
def TagIndentBlock(data, view):
# User settings
settings = sublime.load_settings('Tag Package.sublime-settings')
preserve_additional_new_lines = bool(settings.get('preserve_additional_new_lines', True))
num_chars_considered_little_content = str(int(settings.get('little_content_means_this_number_of_characters', 60)))
# the indent character
if view.settings().get('translate_tabs_to_spaces') :
indent_character = ' '*int(view.settings().get('tab_size', 4))
else:
indent_character = '\t'
# on which indentation level we currently are?
indentation_level = (current_indentation_re.search(data).group(0)).split("\n")
current_indentation = indentation_level.pop()
if len(indentation_level) == 1:
beauty = "\n"+indentation_level[0]
elif len(indentation_level) > 1:
beauty = "\n".join(indentation_level)
else:
beauty = ''
# pre processing
if preserve_additional_new_lines == False:
#fix comments
data = re.sub(r'(\n\s*<\!--)', '\n\t\n\\1', data)
# first newline should be skipped
starting = True
# inspiration from http://jyro.blogspot.com/2009/08/makeshift-xml-beautifier-in-python.html
level = 0
tags = re.split('(<[^>]+>)',data)
lenght = len(tags)
i = 0
while i < lenght:
f = tags[i]
no_indent_match = no_indent.match(f[:20])
if f.strip() == '':
if preserve_additional_new_lines and aditional_new_lines_re.match(f):
beauty += '\n'
elif f[0]=='<' and f[1] != '/':
# beauty += '1'
if starting == False:
beauty += '\n'
starting = False
beauty += current_indentation
if not no_indent_match:
beauty += indent_character*level
if skip_content_of_this_tags_re.match(f[:20]):
tag_is = re.sub(r'<([^ ]+)(>| ).*', '\\1', f[:20], 1)
tag_is = re.compile("/"+tag_is+">$", re.I)
beauty += f
i = i+1
while i < lenght:
f = tags[i]
if not tag_is.search(f[-20:]):
beauty += f
i = i+1
else:
beauty += f
break
else:
beauty += f.strip()
if not no_indent_match:
level = level + 1
#self closing tag
if f[-2:] == '/>' or self_closing_tags.match(f):
#beauty += '2'
beauty += current_indentation
if not no_indent_match:
level = level - 1
elif f[:2]=='</':
if not no_indent_match:
level = level - 1
#beauty += '3'
if starting == False:
beauty += '\n'
starting = False
beauty += current_indentation
if not no_indent_match:
beauty += indent_character*level
beauty += f.strip()
else:
#beauty += '4'
if starting == False:
beauty += '\n'
starting = False
beauty += current_indentation
if not no_indent_match:
beauty += indent_character*level
beauty += f.strip()
i = i+1
if bool(settings.get('empty_tags_close_on_same_line', True)):
# put empty tags on same line
beauty = re.sub(r'<([^/!][^>]*[^/])>\s+</', '<\\1></', beauty)
# put empty tags on same line for tags with one character
beauty = re.sub(r'<([^/!])>\s+</', '<\\1></', beauty)
if bool(settings.get('tags_with_little_content_on_same_line', True)):
# put tags with little content on same line
beauty = re.sub(r'<([^/][^>]*[^/])>\s*([^<\t\n]{1,'+num_chars_considered_little_content+'})\s*</', '<\\1>\\2</', beauty)
# put tags with little content on same line for tags with one character
beauty = re.sub(r'<([^/])>\s*([^<\t\n]{1,'+num_chars_considered_little_content+'})\s*</', '<\\1>\\2</', beauty)
for tag in trim_outter_left:
beauty = re.sub(r'\s+<'+tag+'(>| )', ' <'+tag+'\\1', beauty, re.I)
for tag in trim_outter_right:
beauty = re.sub(r'</'+tag+'>\s+([^\s])', '</'+tag+'> \\1', beauty, re.I)
for tag in trim_inner_left:
beauty = re.sub(r'<'+tag+'(>| [^>]*>)\s+([^\s])', '<'+tag+'\\1\\2', beauty, re.I)
for tag in trim_inner_right:
beauty = re.sub(r'\s+</'+tag+'>', '</'+tag+'> ', beauty, re.I)
return beauty
class TagIndentCommand(sublime_plugin.TextCommand):
def run(self, edit):
for region in self.view.sel():
if region.empty():
continue
if self.view.score_selector(region.a, 'text.html | text.xml') <= 0:
dataRegion = region
else:
dataRegion = sublime.Region(self.view.line(region.begin()).begin(), region.end())
data = TagIndentBlock(self.view.substr(dataRegion), self.view)
self.view.replace(edit, dataRegion, data);
def is_visible(self):
for region in self.view.sel():
if not region.empty():
return True
return False
class TagIndentDocumentCommand(sublime_plugin.TextCommand):
def run(self, edit):
dataRegion = sublime.Region(0, self.view.size())
data = TagIndentBlock(self.view.substr(dataRegion).strip(), self.view)
self.view.replace(edit, dataRegion, data);
def is_visible(self):
value = False
for region in self.view.sel():
if region.empty():
continue
if self.view.score_selector(region.a, 'text.html | text.xml') <= 0:
return False
else:
value = True
return value or self.view.score_selector(0, 'text.html | text.xml') > 0 | mit | -2,178,400,988,483,608,800 | 32.595506 | 126 | 0.607459 | false | 2.751496 | false | false | false |
aaboffill/django-allmedia | media/utils.py | 1 | 2102 | # coding=utf-8
import os
import re
import unicodedata
from django.utils import six
from .settings import (MEDIA_IMAGE_EXTENSION, MEDIA_IMAGE_FORMAT, MEDIA_IMAGE_QUALITY,
MEDIA_NORMALIZE_FILENAME, MEDIA_CONVERT_FILENAME)
def thumbnail_path(path, size, method):
"""
Returns the path for the resized image.
"""
directory, name = os.path.split(path)
image_name, ext = name.rsplit('.', 1)
return os.path.join(directory, '%s_%s_%s.%s' % (image_name, method, size, MEDIA_IMAGE_EXTENSION))
def generate_thumbnail(path, size, method):
try:
import Image
except ImportError:
try:
from PIL import Image
except ImportError:
raise ImportError('Cannot import the Python Image Library.')
image = Image.open(path)
# normalize image mode
if image.mode != 'RGB':
image = image.convert('RGB')
# parse size string 'WIDTHxHEIGHT'
width, height = [int(i) for i in size.split('x')]
# use PIL methods to edit images
if method == 'scale':
image.thumbnail((width, height), Image.ANTIALIAS)
image.save(thumbnail_path(path, size, method), MEDIA_IMAGE_FORMAT, quality=MEDIA_IMAGE_QUALITY)
elif method == 'crop':
try:
import ImageOps
except ImportError:
from PIL import ImageOps
ImageOps.fit(
image, (width, height), Image.ANTIALIAS
).save(thumbnail_path(path, size, method), MEDIA_IMAGE_FORMAT, quality=MEDIA_IMAGE_QUALITY)
def process_filename(value):
"""
Convert Filename. # from django-filebrowser
"""
if MEDIA_NORMALIZE_FILENAME:
chunks = value.split(os.extsep)
normalized = []
for v in chunks:
v = unicodedata.normalize('NFKD', six.text_type(v)).encode('ascii', 'ignore').decode('ascii')
v = re.sub(r'[^\w\s-]', '', v).strip()
normalized.append(v)
value = '.'.join(normalized) if len(normalized) > 1 else normalized[0]
return value.replace(" ", "_").lower() if MEDIA_CONVERT_FILENAME else value
| bsd-3-clause | 8,028,434,819,355,424,000 | 29.028571 | 105 | 0.619886 | false | 3.885397 | false | false | false |
timothyryanwalsh/cca-diskimageprocessor | main.py | 1 | 6150 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
import subprocess
import sys
import design
class StartScanThread(QThread):
def __init__(self, process_list):
QThread.__init__(self)
self.process_list = process_list
def start_scan(self):
print(self.process_list) # for debugging
subprocess.check_output(self.process_list)
def run(self):
self.start_scan()
class DiskImageProcessorApp(QMainWindow, design.Ui_DiskImageProcessor):
def __init__(self, parent=None):
super(DiskImageProcessorApp, self).__init__(parent)
self.setupUi(self)
# build browse functionality buttons
self.analysisSourceBtn.clicked.connect(self.browse_analysis_source)
self.procSourceBtn.clicked.connect(self.browse_processing_source)
self.analysisDestBtn.clicked.connect(self.browse_analysis_dest)
self.procDestBtn.clicked.connect(self.browse_processing_dest)
# build start functionality
self.analysisStartBtn.clicked.connect(self.start_analysis)
self.procStartBtn.clicked.connect(self.start_processing)
# about dialog
self.actionAbout.triggered.connect(self.about_dialog)
def about_dialog(self):
QMessageBox.information(
self,
"About",
"Disk Image Processor v1.0.0\nCanadian Centre for Architecture\nDeveloper: Tessa Walsh\n2018\nMIT License\nhttps://github.com/CCA-Public/cca-diskimageprocessor",
)
def browse_analysis_source(self):
self.analysisSource.clear() # clear directory source text
directory = QFileDialog.getExistingDirectory(self, "Select folder")
if directory: # if user didn't pick directory don't continue
self.analysisSource.setText(directory)
def browse_processing_source(self):
self.procSource.clear() # clear directory source text
directory = QFileDialog.getExistingDirectory(self, "Select folder")
if directory: # if user didn't pick directory don't continue
self.procSource.setText(directory)
def browse_analysis_dest(self):
self.analysisDest.clear() # clear directory source text
directory = QFileDialog.getExistingDirectory(self, "Select folder")
if directory: # if user didn't pick directory don't continue
self.analysisDest.setText(directory)
def browse_processing_dest(self):
self.procDest.clear() # clear directory source text
directory = QFileDialog.getExistingDirectory(self, "Select folder")
if directory: # if user didn't pick directory don't continue
self.procDest.setText(directory)
def done_analysis(self):
self.analysisCancelBtn.setEnabled(False)
self.analysisStartBtn.setEnabled(True)
QMessageBox.information(self, "Finished", "Analysis complete.")
self.analysisStatus.setText("Completed")
def done_processing(self):
self.procCancelBtn.setEnabled(False)
self.procStartBtn.setEnabled(True)
QMessageBox.information(self, "Finished", "Processing complete.")
self.procStatus.setText("Completed")
def start_analysis(self):
# clear status
self.analysisStatus.clear()
# create list for process
self.process_list = list()
self.process_list.append("python3")
self.process_list.append(
"/usr/share/ccatools/diskimageprocessor/diskimageanalyzer.py"
)
# give indication process has started
self.analysisStatus.setText("Processing. Please be patient.")
# option handling
if self.quietLogBtn.isChecked():
self.process_list.append("--quiet")
if self.retainFilesBtn.isChecked():
self.process_list.append("-k")
if self.unallocBtn.isChecked():
self.process_list.append("-e")
if self.resForksBtn.isChecked():
self.process_list.append("-r")
# add source and dest
self.process_list.append(self.analysisSource.text())
self.process_list.append(self.analysisDest.text())
# process
self.get_thread = StartScanThread(self.process_list)
self.get_thread.finished.connect(self.done_analysis)
self.get_thread.start()
self.analysisCancelBtn.setEnabled(True)
self.analysisCancelBtn.clicked.connect(self.get_thread.terminate)
self.analysisStartBtn.setEnabled(False)
def start_processing(self):
# clear status
self.procStatus.clear()
# create list for process
self.process_list = list()
self.process_list.append("python3")
self.process_list.append(
"/usr/share/ccatools/diskimageprocessor/diskimageprocessor.py"
)
# give indication process has started
self.procStatus.setText("Processing. Please be patient.")
# option handling
if self.quietLogBtn.isChecked():
self.process_list.append("--quiet")
if self.unallocBtn.isChecked():
self.process_list.append("-e")
if self.resForksBtn.isChecked():
self.process_list.append("-r")
if self.bagBtn.isChecked():
self.process_list.append("-b")
if self.logicalFilesOnlyBtn.isChecked():
self.process_list.append("-f")
if self.bulkExtBtn.isChecked():
self.process_list.append("-p")
# add source and dest
self.process_list.append(self.procSource.text())
self.process_list.append(self.procDest.text())
# process
self.get_thread = StartScanThread(self.process_list)
self.get_thread.finished.connect(self.done_processing)
self.get_thread.start()
self.procCancelBtn.setEnabled(True)
self.procCancelBtn.clicked.connect(self.get_thread.terminate)
self.procStartBtn.setEnabled(False)
def main():
app = QApplication(sys.argv)
form = DiskImageProcessorApp()
form.show()
app.exec_()
if __name__ == "__main__":
main()
| mit | 3,563,083,440,929,152,000 | 33.943182 | 173 | 0.658049 | false | 4.119223 | false | false | false |
UManPychron/pychron | pychron/monitors/pumping_monitor.py | 2 | 2731 | # ===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# from traits.api import HasTraits
# from traitsui.api import View,Item,Group,HGroup,VGroup
# ============= standard library imports ========================
from __future__ import absolute_import
import time
from .monitor import Monitor
# from threading import Thread
# ============= local library imports ==========================
cnt = 0
class PumpingMonitor(Monitor):
'''
G{classtree}
'''
gauge_manager = None
tank_gauge_name = 'gauge1'
pump_gauge_name = 'gauge2'
# pumping_duration=Float
# idle_duration=Float
name = 'AnalyticalPumpingMonitor'
def _monitor_(self):
'''
'''
pump_start = 0
idle_start = 0
def get_time(_start_):
ct = time.time()
_dur_ = 0
if _start_ == 0:
_start_ = ct
else:
_dur_ = ct - _start_
return _start_, _dur_
while self.gauge_manager is not None:
state = self._get_pumping_state()
if state == 'pumping':
idle_start = 0
pump_start, pump_duration = get_time(pump_start)
self.parent.update_pumping_duration(self.name, pump_duration)
else: # state=='idle'
pump_start = 0
idle_start, idle_duration = get_time(idle_start)
# print 'idle duir',idle_duration
self.parent.update_idle_duration(self.name, idle_duration)
time.sleep(1)
def _get_pumping_state(self):
'''
'''
state = 'idle'
# gm=self.gauge_manager
global cnt
if cnt >= 5 and cnt < 10:
state = 'pumping'
# tankgauge=gm.get_gauge_by_name(self.tank_gauge_name)
# if tankgauge.pressure<1:
# state='pumping'
cnt += 1
return state
# ============= EOF ====================================
| apache-2.0 | 1,754,242,747,121,334,000 | 29.344444 | 81 | 0.517759 | false | 4.208012 | false | false | false |
wodore/wodore-ng | main/model/icon.py | 1 | 11758 | # coding: utf-8
from __future__ import absolute_import
from google.appengine.ext import ndb
#from api import fields
import model
import util
import config
from .counter import CountableLazy
from .collection import Collection, AddCollection
import config
import cloudstorage as gcs
from google.appengine.api import images
from google.appengine.ext import blobstore
"""
An icon consists of two model classes:
IconStructure: Which helds all icon specific data but no additional information.
Icon: The Icon model contains an IconStructure as an icon and additional information
like a counter and collection.
For each icon exists a toplevel icon which can have children grouped by collection.
Once an icon is created it should not be changed anymore.
If one of the childrens counter is updated the topevel icon's counter is updated
as well.
The highest toplevel has the default collection Collection.top_key().
"""
class IconValidator(model.BaseValidator):
name = [2,30]
class Icon(CountableLazy, AddCollection, model.Base):
name = ndb.StringProperty(required=True,\
validator=IconValidator.create('name'))
#icon = ndb.StructuredProperty(IconStructure)
icon = ndb.BlobProperty(required=True)
icon_url = ndb.StringProperty(required=True,default="",indexed=False)
private = ndb.BooleanProperty(required=True,default=False) # not shown for others
# private means inside its collection
replaced_by = ndb.KeyProperty(kind='Icon') # if the icon should not be used anymore
fallback = ndb.KeyProperty(kind='Icon') # fallback icon, for example a png for a svg
external_source = ndb.StringProperty(indexed=False) # not recommended
filetype = ndb.StringProperty(choices=['svg','pixel','external'],indexed=True,
default='svg', required=True)
#see: http://support.flaticon.com/hc/en-us/articles/202798381-How-to-attribute-the-icons-to-their-authors
# this would be the author link
author_html = ndb.StringProperty()
comment = ndb.TextProperty()
# take as keywords the tags from flaticon
keywords = ndb.StringProperty(indexed=True,repeated=True)
@classmethod
def create(cls,icon,name,icon_url=None,collection=Collection.top_key(),\
toplevel=None, private=False, author_html=None,\
fallback=None, external_source=None, \
filetype=None, keywords=None, comment=None, auto=True):
""" Creates and puts a new icon to the database.
As icon is the source code expected (svg or image).
Keywords should be a list.
Returns Icon key"""
new_icon = Icon(icon = icon,
name=name,
collection=collection,
private=private,
icon_url=icon_url)
if toplevel:
new_icon.toplevel = toplevel
if fallback:
new_icon.toplevel = fallback
if author_html:
new_icon.author_html = author_html
if external_source:
new_icon.external_source = external_source
if filetype:
new_icon.filetype = filetype
if comment:
new_icon.comment = comment
if keywords:
# TODO check keywords (tag validator) and make list unique
new_icon.keywords = model.TagValidator.name(keywords)
# SAVE TO CLOUD STORAGE
adr = "{}/{}/{}/{}".format(config.BUCKET, collection.urlsafe(), 'icons', name)
write_retry_params = gcs.RetryParams(backoff_factor=1.1)
gcs_file = gcs.open(adr, 'w',
content_type="image/svg+xml",
options={
'x-goog-meta-name': name
},
retry_params=write_retry_params)
gcs_file.write(icon) # saves file to cloud storage
gcs_file.close()
blob_key = blobstore.create_gs_key('/gs' + adr)
img_url = images.get_serving_url(blob_key=blob_key)
if not icon_url:
new_icon.icon_url = img_url
if not external_source:
new_icon.external_source = img_url
key = new_icon._add_and_put(auto=auto)
return key
@classmethod
def add(cls,key,collection=None, as_child=False):
""" Add a icon which already exists by key.
If no collection or the same belonging to the key is given the icon
counter is increased by one.
If the collection is different two things can happen:
1. If the key's collection is Collection.top_key() (no toplevel) or 'as_child' is true:
The key is assigned as toplevel.
('as_child' means the icon is added with 'key' as 'toplevel')
2. It is not a toplevel key:
The property 'toplevel' is assigned as key.
In both cases a toplevel is set. The next step is to look for a icon with
the same toplevel and collection, if one exists its counter is increased.
If none exists a new one is created.
"""
icon_db = key.get()
if icon_db.collection == collection or not collection:
icon_db.incr()
icon_db.put()
return key
else:
if collection == Collection.top_key():
return self.add(icon_db.toplevel,collection)
elif icon_db.collection == Collection.top_key() or as_child:
toplevel = key
else:
toplevel = icon_db.toplevel
## Look for icons with same toplevel and collection
keys = Icon.get_by_toplevel(toplevel, collection=collection, keys_only=True, limit=1)
if keys:
#for key in keys:
key = keys[0]
return Icon.add(key,collection)
else:
return Icon.create(icon_db.icon,icon_db.name,collection=collection,toplevel=toplevel)
@classmethod
def remove(cls,id):
"""Removes a icon by its key
Remove means its counter is decreased by one"""
key = cls.id_to_key(id)
icon_db = key.get()
icon_db.decr()
icon_db.put()
def get_tags(self,limit=10):
"""Fetches tags which are used together with this icon
returns a tag dbs and a variable more if more tags are available."""
#TODO write test
dbs = model.Tag.query(model.Tag.icon_id==self.key.id())\
.order(-model.Tag.cnt).fetch(limit+1)
if len(dbs) > limit:
more = True
else:
more = False
return dbs, more
@classmethod
def qry(cls, toplevel=None, name=None, collection=None, private=False,
replaced_by=None, order_by_count=True, **kwargs):
"""Query for the icon model"""
qry = cls.query(**kwargs)
if toplevel:
qry_tmp = qry
qry = qry.filter(cls.toplevel==toplevel)
if name:
qry_tmp = qry
qry = qry.filter(cls.name==name,)
if collection:
qry_tmp = qry
qry = qry.filter(cls.collection == collection)
if not private:
qry_tmp = qry
qry = qry_tmp.filter(cls.private==False)
if order_by_count:
qry_tmp = qry
qry = qry.order(-cls.cnt)
#else filter for private True and False
return qry
@classmethod
def get_by_toplevel(cls, toplevel=None, collection=None, private=False,
keys_only=False, limit=100):
"""Returns icon dbs or keys defined by its toplevel and some addition parameters"""
return cls.qry(toplevel=toplevel,collection=collection,private=private).\
fetch(keys_only=keys_only, limit=limit)
@classmethod
def get_dbs(
cls, name=None, private=None, \
replaced_by=None, **kwargs
):
kwargs = cls.get_col_dbs(**kwargs)
kwargs = cls.get_counter_dbs(**kwargs)
return super(Icon, cls).get_dbs(
name=name or util.param('name', None),
private=private or util.param('private', bool),
replaced_by=replaced_by or util.param('replaced_by', ndb.Key),
**kwargs
)
def _add_and_put(self, auto=True):
""" Adds and puts an icon to the DB
If 'auto' is true it automatically creates a toplevel icon if none is given.
This only works for one level, if a higher hierarchy is required it needs to be
done manually.
"""
if not getattr(self,'toplevel',None) \
and self.collection != Collection.top_key() \
and auto \
and not self.private: #no toplevel if private
#top = Icon(icon=self.icon,name=self.name)
top = Icon(icon=self.icon,name=self.name,\
private=False, icon_url=self.icon_url, \
external_source=self.external_source, \
filetype=self.filetype, keywords=self.keywords)
if getattr(self,'fallback',None) : # TODO test fallbacks
fallback_db = fallback.get()
fallback_key = getattr(fallback_db,'toplevel',None) # take toplevel if available
if not fallback_key:
fallback_key = self.fallback
top.fallback=fallback_key
top_key = top.put()
self.toplevel = top_key
self.incr()
self.put()
#self.get_icon()
return self.key
class Iconize(ndb.Model):
"""Adds an icon property
Icons are managed in the 'Icon' model, this mzixins
adds two methods to deal with icons:
'add_icon': if an icon already exists it can be added by its key
'create_icon': create a new icon
The two method 'put' the icons automatically, this means it is recommanded to
put the iconized model as well or remove the icon again if something went wrong.
"""
#icon = ndb.StructuredProperty(IconStructure)
icon_id = ndb.IntegerProperty(indexed=True,required=True, default=0)
icon_url = ndb.StringProperty(required=True,default="",indexed=False)
def add_icon(self, key=None, id=None):
"""Adds an icon by key or id, the key is either a toplevel key or an icon key.
'id' needs to be a integer."""
if id:
key = Icon.id_to_key(id)
elif key:
id = key.id()
else:
return False
if not getattr(self,'collection',None):
col = Collection.top_key()
else:
col = self.collection
key = Icon.add(key,collection=col)
#self.icon = key.get().get_icon()
self.icon_id = key.id()
self.icon_url = key.get().icon_url
def create_icon(self,icon,name,private=False):
if not getattr(self,'collection',None):
col = Collection.top_key()
else:
col = self.collection
key = Icon.create(icon=icon,name=name,collection=col,private=private)
#icon.icon_key = key
#self.icon = icon
self.icon_id = key.id()
self.icon_url = key.get().icon_url
def remove_icon(self):
if getattr(self,'icon_id',None):
Icon.remove(self.icon_id)
self.icon_id = 0
self.icon_url = ""
## TODO write test
# shuld not be used anymore, replaced by get_icon_id
def get_icon_key(self):
if getattr(self,'icon',None):
return self.icon.icon_key
elif getattr(self,'toplevel',None):
top_db = self.toplevel.get()
if getattr(top_db,'icon',None):
return top_db.icon.icon_key
else:
None
def get_icon_id(self):
if getattr(self,'icon_id',None):
return self.icon_id
elif getattr(self,'toplevel',None):
top_db = self.toplevel.get()
if getattr(top_db,'icon_id',None):
return top_db.icon_id
else:
None
| mit | 4,735,112,402,842,403,000 | 35.178462 | 109 | 0.609117 | false | 3.950941 | false | false | false |
Splawik/pytigon | pytigon/prj/_schtools/schtools/views.py | 1 | 8305 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render, redirect
from django import forms
from django.template.loader import render_to_string
from django.template import Context, Template
from django.template import RequestContext
from django.conf import settings
from django.views.generic import TemplateView
from pytigon_lib.schviews.form_fun import form_with_perms
from pytigon_lib.schviews.viewtools import dict_to_template, dict_to_odf, dict_to_pdf, dict_to_json, dict_to_xml
from pytigon_lib.schviews.viewtools import render_to_response
from pytigon_lib.schdjangoext.tools import make_href
from pytigon_lib.schviews import actions
from django.utils.translation import ugettext_lazy as _
from . import models
import os
import sys
import datetime
import time
from pytigon_lib.schdjangoext.tools import import_model
from pyexcel_odsr import get_data
from pytigon_lib.schtools.schjson import json_dumps, json_loads
from pytigon_lib.schfs.vfstools import get_temp_filename
import openpyxl
import csv
PFORM = form_with_perms('schtools')
class ImportTableForm(forms.Form):
import_file = forms.FileField(label=_('File to import'), required=True, )
def view_importtableform(request, *argi, **argv):
return PFORM(request, ImportTableForm, 'schtools/formimporttableform.html', {})
def autocomplete_search(request, type):
q = request.GET.get('query', request.POST.get('query', None))
if not q:
return HttpResponse(content_type='text/plain')
limit = request.GET.get('limit', request.POST.get('limit', 15))
try:
limit = int(limit)
except ValueError:
return HttpResponseBadRequest()
if q != ' ':
tab = Autocomplete.objects.filter(type=typ, label__istartswith=q)[:limit]
else:
tab = Autocomplete.objects.filter(type=typ)[:limit]
out_tab = []
for pos in tab:
out_tab.append({'id': pos.id, 'label': pos.label, 'name': pos.label, 'value': pos.value})
json_data = json.dumps(out_tab)
return HttpResponse(json_data, content_type='application/x-javascript')
def set_user_param(request, **argv):
key = request.POST.get('param', None)
value = request.POST.get('value', None)
user = request.user.username
p = models.Parameter.objects.filter(type='sys_user', subtype=user, key=key)
if len(p)>0:
obj = p[0]
else:
obj = models.Parameter()
obj.type = 'sys_user'
obj.subtype = user
obj.key = key
obj.value = value
obj.save()
return HttpResponse("OK")
def get_user_param(request, **argv):
key = request.POST.get('param', None)
user = request.user.username
p = models.Parameter.objects.filter(type='sys_user', subtype=user, key=key)
if len(p)>0:
obj = p[0]
return HttpResponse(obj.value)
else:
return HttpResponse("")
@dict_to_template('schtools/v_import_table.html')
def import_table(request, app, table):
if request.FILES:
if 'import_file' in request.FILES:
data = request.FILES['import_file']
name = data.name
ext = name.split('.')[-1].lower()
model = import_model(app, table)
table = []
if ext in ('xlsx', 'xls', 'ods'):
if ext == 'ods':
d = get_data(data)
#print("F0", d)
#buf = json_dumps(d)
for key in d:
table = d[key]
break
else:
first_line = True
width = 0
file_name = get_temp_filename("temp.xlsx")
f = open(file_name, 'wb')
f.write(data.read())
f.close()
workbook = openpyxl.load_workbook(filename=file_name, read_only=True)
worksheets = workbook.get_sheet_names()
worksheet = workbook.get_sheet_by_name(worksheets[0])
for row in list(worksheet.rows):
if first_line:
first_line = False
buf = []
i = 0;
for pos in row:
value = pos.value
if value:
buf.append(value)
else:
break
i += 1
if len(buf)>0:
count = len(buf)
table.append(buf)
else:
break
else:
if row[0].value:
buf = []
i = 0
for pos in row:
if i >= count:
break
buf.append(pos.value)
i += 1
table.append(buf)
else:
break
os.remove(file_name)
elif ext in ('txt', 'csv'):
first_line = True
sep_list = ['\t', ';', ',', '|', ]
sep = None
txt = data.read().decode('utf-8').replace('\r','').split('\n')
for line in txt:
for pos in sep_list:
if pos in line:
sep = pos
break
break
if sep:
csv_reader = csv.reader(txt, delimiter=sep)
for row in csv_reader:
table.append(row)
if table and len(table)>1:
header = list([pos.strip() for pos in table[0] if pos])
tree = False
tmp = []
for pos in header:
if not pos in tmp:
tmp.append(pos)
else:
tree = True
id1 = tmp.index(pos)
id2 = len(tmp)
break
for row in table[1:]:
if len(row) == len(header):
x = model()
parent = None
for index, (attr_name, value) in enumerate(zip(header,row)):
if tree:
if index == id1:
if row[id2]:
objs = model.objects.filter(**{ attr_name: value })
if len(objs)==1:
parent = objs[0]
else:
setattr(x, attr_name, value)
elif index == id2:
if row[id2]:
setattr(x, attr_name, value)
if parent:
setattr(x, 'parent', parent)
else:
setattr(x, attr_name, value)
else:
setattr(x, attr_name, value)
x.save()
return { 'redirect': '/schsys/ok/' }
else:
form = ImportTableForm(request.POST, request.FILES)
else:
form = ImportTableForm()
return { 'form': form }
| lgpl-3.0 | 3,185,471,773,918,943,700 | 31.065637 | 112 | 0.429741 | false | 4.82288 | false | false | false |
cmsdaq/hltd | lib/elasticsearch-py-7.0.0/elasticsearch/transport.py | 2 | 15172 | import time
from itertools import chain
from .connection import Urllib3HttpConnection
from .connection_pool import ConnectionPool, DummyConnectionPool
from .serializer import JSONSerializer, Deserializer, DEFAULT_SERIALIZERS
from .exceptions import ConnectionError, TransportError, SerializationError, \
ConnectionTimeout
def get_host_info(node_info, host):
"""
Simple callback that takes the node info from `/_cluster/nodes` and a
parsed connection information and return the connection information. If
`None` is returned this node will be skipped.
Useful for filtering nodes (by proximity for example) or if additional
information needs to be provided for the :class:`~elasticsearch.Connection`
class. By default master only nodes are filtered out since they shouldn't
typically be used for API operations.
:arg node_info: node information from `/_cluster/nodes`
:arg host: connection information (host, port) extracted from the node info
"""
# ignore master only nodes
if node_info.get('roles', []) == ['master']:
return None
return host
class Transport(object):
"""
Encapsulation of transport-related to logic. Handles instantiation of the
individual connections as well as creating a connection pool to hold them.
Main interface is the `perform_request` method.
"""
def __init__(self, hosts, connection_class=Urllib3HttpConnection,
connection_pool_class=ConnectionPool, host_info_callback=get_host_info,
sniff_on_start=False, sniffer_timeout=None, sniff_timeout=.1,
sniff_on_connection_fail=False, serializer=JSONSerializer(), serializers=None,
default_mimetype='application/json', max_retries=3, retry_on_status=(502, 503, 504, ),
retry_on_timeout=False, send_get_body_as='GET', **kwargs):
"""
:arg hosts: list of dictionaries, each containing keyword arguments to
create a `connection_class` instance
:arg connection_class: subclass of :class:`~elasticsearch.Connection` to use
:arg connection_pool_class: subclass of :class:`~elasticsearch.ConnectionPool` to use
:arg host_info_callback: callback responsible for taking the node information from
`/_cluser/nodes`, along with already extracted information, and
producing a list of arguments (same as `hosts` parameter)
:arg sniff_on_start: flag indicating whether to obtain a list of nodes
from the cluser at startup time
:arg sniffer_timeout: number of seconds between automatic sniffs
:arg sniff_on_connection_fail: flag controlling if connection failure triggers a sniff
:arg sniff_timeout: timeout used for the sniff request - it should be a
fast api call and we are talking potentially to more nodes so we want
to fail quickly. Not used during initial sniffing (if
``sniff_on_start`` is on) when the connection still isn't
initialized.
:arg serializer: serializer instance
:arg serializers: optional dict of serializer instances that will be
used for deserializing data coming from the server. (key is the mimetype)
:arg default_mimetype: when no mimetype is specified by the server
response assume this mimetype, defaults to `'application/json'`
:arg max_retries: maximum number of retries before an exception is propagated
:arg retry_on_status: set of HTTP status codes on which we should retry
on a different node. defaults to ``(502, 503, 504)``
:arg retry_on_timeout: should timeout trigger a retry on different
node? (default `False`)
:arg send_get_body_as: for GET requests with body this option allows
you to specify an alternate way of execution for environments that
don't support passing bodies with GET requests. If you set this to
'POST' a POST method will be used instead, if to 'source' then the body
will be serialized and passed as a query parameter `source`.
Any extra keyword arguments will be passed to the `connection_class`
when creating and instance unless overridden by that connection's
options provided as part of the hosts parameter.
"""
# serialization config
_serializers = DEFAULT_SERIALIZERS.copy()
# if a serializer has been specified, use it for deserialization as well
_serializers[serializer.mimetype] = serializer
# if custom serializers map has been supplied, override the defaults with it
if serializers:
_serializers.update(serializers)
# create a deserializer with our config
self.deserializer = Deserializer(_serializers, default_mimetype)
self.max_retries = max_retries
self.retry_on_timeout = retry_on_timeout
self.retry_on_status = retry_on_status
self.send_get_body_as = send_get_body_as
# data serializer
self.serializer = serializer
# store all strategies...
self.connection_pool_class = connection_pool_class
self.connection_class = connection_class
# ...save kwargs to be passed to the connections
self.kwargs = kwargs
self.hosts = hosts
# ...and instantiate them
self.set_connections(hosts)
# retain the original connection instances for sniffing
self.seed_connections = self.connection_pool.connections[:]
# sniffing data
self.sniffer_timeout = sniffer_timeout
self.sniff_on_connection_fail = sniff_on_connection_fail
self.last_sniff = time.time()
self.sniff_timeout = sniff_timeout
# callback to construct host dict from data in /_cluster/nodes
self.host_info_callback = host_info_callback
if sniff_on_start:
self.sniff_hosts(True)
def add_connection(self, host):
"""
Create a new :class:`~elasticsearch.Connection` instance and add it to the pool.
:arg host: kwargs that will be used to create the instance
"""
self.hosts.append(host)
self.set_connections(self.hosts)
def set_connections(self, hosts):
"""
Instantiate all the connections and create new connection pool to hold them.
Tries to identify unchanged hosts and re-use existing
:class:`~elasticsearch.Connection` instances.
:arg hosts: same as `__init__`
"""
# construct the connections
def _create_connection(host):
# if this is not the initial setup look at the existing connection
# options and identify connections that haven't changed and can be
# kept around.
if hasattr(self, 'connection_pool'):
for (connection, old_host) in self.connection_pool.connection_opts:
if old_host == host:
return connection
# previously unseen params, create new connection
kwargs = self.kwargs.copy()
kwargs.update(host)
return self.connection_class(**kwargs)
connections = map(_create_connection, hosts)
connections = list(zip(connections, hosts))
if len(connections) == 1:
self.connection_pool = DummyConnectionPool(connections)
else:
# pass the hosts dicts to the connection pool to optionally extract parameters from
self.connection_pool = self.connection_pool_class(connections, **self.kwargs)
def get_connection(self):
"""
Retreive a :class:`~elasticsearch.Connection` instance from the
:class:`~elasticsearch.ConnectionPool` instance.
"""
if self.sniffer_timeout:
if time.time() >= self.last_sniff + self.sniffer_timeout:
self.sniff_hosts()
return self.connection_pool.get_connection()
def _get_sniff_data(self, initial=False):
"""
Perform the request to get sniffins information. Returns a list of
dictionaries (one per node) containing all the information from the
cluster.
It also sets the last_sniff attribute in case of a successful attempt.
In rare cases it might be possible to override this method in your
custom Transport class to serve data from alternative source like
configuration management.
"""
previous_sniff = self.last_sniff
try:
# reset last_sniff timestamp
self.last_sniff = time.time()
# go through all current connections as well as the
# seed_connections for good measure
for c in chain(self.connection_pool.connections, self.seed_connections):
try:
# use small timeout for the sniffing request, should be a fast api call
_, headers, node_info = c.perform_request(
'GET', '/_nodes/_all/http',
timeout=self.sniff_timeout if not initial else None)
node_info = self.deserializer.loads(node_info, headers.get('content-type'))
break
except (ConnectionError, SerializationError):
pass
else:
raise TransportError("N/A", "Unable to sniff hosts.")
except:
# keep the previous value on error
self.last_sniff = previous_sniff
raise
return list(node_info['nodes'].values())
def _get_host_info(self, host_info):
host = {}
address = host_info.get('http', {}).get('publish_address')
# malformed or no address given
if not address or ':' not in address:
return None
host['host'], host['port'] = address.rsplit(':', 1)
host['port'] = int(host['port'])
return self.host_info_callback(host_info, host)
def sniff_hosts(self, initial=False):
"""
Obtain a list of nodes from the cluster and create a new connection
pool using the information retrieved.
To extract the node connection parameters use the ``nodes_to_host_callback``.
:arg initial: flag indicating if this is during startup
(``sniff_on_start``), ignore the ``sniff_timeout`` if ``True``
"""
node_info = self._get_sniff_data(initial)
hosts = list(filter(None, (self._get_host_info(n) for n in node_info)))
# we weren't able to get any nodes or host_info_callback blocked all -
# raise error.
if not hosts:
raise TransportError("N/A", "Unable to sniff hosts - no viable hosts found.")
self.set_connections(hosts)
def mark_dead(self, connection):
"""
Mark a connection as dead (failed) in the connection pool. If sniffing
on failure is enabled this will initiate the sniffing process.
:arg connection: instance of :class:`~elasticsearch.Connection` that failed
"""
# mark as dead even when sniffing to avoid hitting this host during the sniff process
self.connection_pool.mark_dead(connection)
if self.sniff_on_connection_fail:
self.sniff_hosts()
def perform_request(self, method, url, headers=None, params=None, body=None):
"""
Perform the actual request. Retrieve a connection from the connection
pool, pass all the information to it's perform_request method and
return the data.
If an exception was raised, mark the connection as failed and retry (up
to `max_retries` times).
If the operation was succesful and the connection used was previously
marked as dead, mark it as live, resetting it's failure count.
:arg method: HTTP method to use
:arg url: absolute url (without host) to target
:arg headers: dictionary of headers, will be handed over to the
underlying :class:`~elasticsearch.Connection` class
:arg params: dictionary of query parameters, will be handed over to the
underlying :class:`~elasticsearch.Connection` class for serialization
:arg body: body of the request, will be serializes using serializer and
passed to the connection
"""
if body is not None:
body = self.serializer.dumps(body)
# some clients or environments don't support sending GET with body
if method in ('HEAD', 'GET') and self.send_get_body_as != 'GET':
# send it as post instead
if self.send_get_body_as == 'POST':
method = 'POST'
# or as source parameter
elif self.send_get_body_as == 'source':
if params is None:
params = {}
params['source'] = body
body = None
if body is not None:
try:
body = body.encode('utf-8', 'surrogatepass')
except (UnicodeDecodeError, AttributeError):
# bytes/str - no need to re-encode
pass
ignore = ()
timeout = None
if params:
timeout = params.pop('request_timeout', None)
ignore = params.pop('ignore', ())
if isinstance(ignore, int):
ignore = (ignore, )
for attempt in range(self.max_retries + 1):
connection = self.get_connection()
try:
# add a delay before attempting the next retry
# 0, 1, 3, 7, etc...
delay = 2**attempt - 1
time.sleep(delay)
status, headers_response, data = connection.perform_request(method, url, params, body, headers=headers, ignore=ignore, timeout=timeout)
except TransportError as e:
if method == 'HEAD' and e.status_code == 404:
return False
retry = False
if isinstance(e, ConnectionTimeout):
retry = self.retry_on_timeout
elif isinstance(e, ConnectionError):
retry = True
elif e.status_code in self.retry_on_status:
retry = True
if retry:
# only mark as dead if we are retrying
self.mark_dead(connection)
# raise exception on last retry
if attempt == self.max_retries:
raise
else:
raise
else:
# connection didn't fail, confirm it's live status
self.connection_pool.mark_live(connection)
if method == 'HEAD':
return 200 <= status < 300
if data:
data = self.deserializer.loads(data, headers_response.get('content-type'))
return data
def close(self):
"""
Explicitly closes connections
"""
self.connection_pool.close()
| lgpl-3.0 | -6,250,744,510,277,125,000 | 41.617978 | 151 | 0.615476 | false | 4.719129 | false | false | false |
treejames/viewfinder | backend/services/sms_util.py | 13 | 2242 | # -*- coding: utf-8 -*-
# Copyright 2012 Viewfinder Inc. All Rights Reserved.
"""SMS utilities.
Helper methods used when sending SMS messages.
"""
__author__ = '[email protected] (Andy Kimball)'
import re
from tornado import escape
# Regular expression used to identify valid GSM characters, which is the 7-bit character set
# that is widely supported by SMS systems across the world (i.e. *not* ASCII):
# https://en.wikipedia.org/wiki/GSM_03.38
_good_gsm_chars = u'@£$¥èéùìòÇ\nØø\rÅå_ÆæßÉ !"#%&\'()*+,-./0123456789:;<=>?¡ABCDEFGHIJKLMNOPQRSTUVWXYZÄÖÑܧ¿abcdefghijklmnopqrstuvwxyzäöñüà'
assert len(_good_gsm_chars) == 116
_gsm_re = re.compile(u'^[%s]*$' % re.escape(_good_gsm_chars))
# Greek capital letters contained in the GSM character set, and the currency symbol don't get
# sent properly in the GSM encoding (they get mapped into other chars by some intermediary).
_bad_gsm_chars = u'¤ΔΦΓΛΩΠΨΣΘΞ'
assert len(escape.to_unicode(_bad_gsm_chars)) == 11
_force_unicode_re = re.compile(u'^[%s%s]*$' % (re.escape(_bad_gsm_chars), re.escape(_good_gsm_chars)))
# Maximum number of GSM encoded chars that Twilio can send.
MAX_GSM_CHARS = 160
# Maximum number of UTF-16 encoded chars that Twilio can send. The SMS spec really uses the
# UCS-2 encoding, but many/most devices allow UTF-16, which allows non-BMP chars to be used
# (such as Emoji).
MAX_UTF16_CHARS = 70
def ForceUnicode(value):
"""Returns true if the value contains only GSM chars, but also contains at least one
problematic GSM char, such as a Greek capital letter. In this case, the caller should
force the UCS-2 SMS encoding so that GSM will not be attempted.
"""
value = escape.to_unicode(value)
return _force_unicode_re.search(value) and not _gsm_re.search(value)
def IsOneSMSMessage(value):
"""Returns true if the value can be sent in a single SMS message. If the value contains
only GSM chars, then it can be up to 160 chars. Otherwise, it must be sent as Unicode and
can only be up to 70 chars.
"""
value = escape.to_unicode(value)
utf16_count = len(value.encode('utf-16-be')) / 2
if _gsm_re.search(value):
return utf16_count <= MAX_GSM_CHARS
return utf16_count <= MAX_UTF16_CHARS
| apache-2.0 | -8,735,502,104,594,132,000 | 36.982759 | 140 | 0.719927 | false | 3.001362 | false | false | false |
wangzitian0/BOJ-V4 | submission/migrations/0001_initial.py | 1 | 1249 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('problem', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Submission',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('datetime', models.DateTimeField(auto_now_add=True)),
('score', models.IntegerField(default=0)),
('status', models.CharField(default=b'QUE', max_length=3)),
('running_time', models.IntegerField(default=0)),
('running_memory', models.IntegerField(default=0)),
('info', models.TextField(blank=True)),
('code', models.TextField()),
('Language', models.ForeignKey(related_name='submissions', to='problem.Language')),
('problem', models.ForeignKey(to='problem.Problem')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
]
| mit | -6,486,124,797,967,729,000 | 38.03125 | 114 | 0.578062 | false | 4.525362 | false | false | false |
brclark-usgs/flopy | flopy/modflow/mflpf.py | 1 | 25430 | """
mflpf module. Contains the ModflowLpf class. Note that the user can access
the ModflowLpf class as `flopy.modflow.ModflowLpf`.
Additional information for this MODFLOW package can be found at the `Online
MODFLOW Guide
<http://water.usgs.gov/ogw/modflow/MODFLOW-2005-Guide/index.html?lpf.htm>`_.
"""
import sys
import numpy as np
from .mfpar import ModflowPar as mfpar
from ..pakbase import Package
from ..utils import Util2d, Util3d, read1d
from ..utils.flopy_io import line_parse
class ModflowLpf(Package):
"""
MODFLOW Layer Property Flow Package Class.
Parameters
----------
model : model object
The model object (of type :class:`flopy.modflow.mf.Modflow`) to which
this package will be added.
ipakcb : int
A flag that is used to determine if cell-by-cell budget data should be
saved. If ipakcb is non-zero cell-by-cell budget data will be saved.
(default is 53)
hdry : float
Is the head that is assigned to cells that are converted to dry during
a simulation. Although this value plays no role in the model
calculations, it is useful as an indicator when looking at the
resulting heads that are output from the model. HDRY is thus similar
to HNOFLO in the Basic Package, which is the value assigned to cells
that are no-flow cells at the start of a model simulation.
(default is -1.e30).
laytyp : int or array of ints (nlay)
Layer type, contains a flag for each layer that specifies the layer type.
0 confined
>0 convertible
<0 convertible unless the THICKSTRT option is in effect.
(default is 0).
layavg : int or array of ints (nlay)
Layer average
0 is harmonic mean
1 is logarithmic mean
2 is arithmetic mean of saturated thickness and logarithmic mean of
of hydraulic conductivity
(default is 0).
chani : float or array of floats (nlay)
contains a value for each layer that is a flag or the horizontal
anisotropy. If CHANI is less than or equal to 0, then variable HANI
defines horizontal anisotropy. If CHANI is greater than 0, then CHANI
is the horizontal anisotropy for the entire layer, and HANI is not
read. If any HANI parameters are used, CHANI for all layers must be
less than or equal to 0. Use as many records as needed to enter a
value of CHANI for each layer. The horizontal anisotropy is the ratio
of the hydraulic conductivity along columns (the Y direction) to the
hydraulic conductivity along rows (the X direction).
(default is 1).
layvka : float or array of floats (nlay)
a flag for each layer that indicates whether variable VKA is vertical
hydraulic conductivity or the ratio of horizontal to vertical
hydraulic conductivity.
0: VKA is vertical hydraulic conductivity
not 0: VKA is the ratio of horizontal to vertical hydraulic conductivity
(default is 0).
laywet : float or array of floats (nlay)
contains a flag for each layer that indicates if wetting is active.
0 wetting is inactive
not 0 wetting is active
(default is 0).
wetfct : float
is a factor that is included in the calculation of the head that is
initially established at a cell when it is converted from dry to wet.
(default is 0.1).
iwetit : int
is the iteration interval for attempting to wet cells. Wetting is
attempted every IWETIT iteration. If using the PCG solver
(Hill, 1990), this applies to outer iterations, not inner iterations.
If IWETIT less than or equal to 0, it is changed to 1.
(default is 1).
ihdwet : int
is a flag that determines which equation is used to define the
initial head at cells that become wet.
(default is 0)
hk : float or array of floats (nlay, nrow, ncol)
is the hydraulic conductivity along rows. HK is multiplied by
horizontal anisotropy (see CHANI and HANI) to obtain hydraulic
conductivity along columns.
(default is 1.0).
hani : float or array of floats (nlay, nrow, ncol)
is the ratio of hydraulic conductivity along columns to hydraulic
conductivity along rows, where HK of item 10 specifies the hydraulic
conductivity along rows. Thus, the hydraulic conductivity along
columns is the product of the values in HK and HANI.
(default is 1.0).
vka : float or array of floats (nlay, nrow, ncol)
is either vertical hydraulic conductivity or the ratio of horizontal
to vertical hydraulic conductivity depending on the value of LAYVKA.
(default is 1.0).
ss : float or array of floats (nlay, nrow, ncol)
is specific storage unless the STORAGECOEFFICIENT option is used.
When STORAGECOEFFICIENT is used, Ss is confined storage coefficient.
(default is 1.e-5).
sy : float or array of floats (nlay, nrow, ncol)
is specific yield.
(default is 0.15).
vkcb : float or array of floats (nlay, nrow, ncol)
is the vertical hydraulic conductivity of a Quasi-three-dimensional
confining bed below a layer. (default is 0.0). Note that if an array
is passed for vkcb it must be of size (nlay, nrow, ncol) even though
the information for the bottom layer is not needed.
wetdry : float or array of floats (nlay, nrow, ncol)
is a combination of the wetting threshold and a flag to indicate
which neighboring cells can cause a cell to become wet.
(default is -0.01).
storagecoefficient : boolean
indicates that variable Ss and SS parameters are read as storage
coefficient rather than specific storage.
(default is False).
constantcv : boolean
indicates that vertical conductance for an unconfined cell is
computed from the cell thickness rather than the saturated thickness.
The CONSTANTCV option automatically invokes the NOCVCORRECTION
option. (default is False).
thickstrt : boolean
indicates that layers having a negative LAYTYP are confined, and their
cell thickness for conductance calculations will be computed as
STRT-BOT rather than TOP-BOT. (default is False).
nocvcorrection : boolean
indicates that vertical conductance is not corrected when the vertical
flow correction is applied. (default is False).
novfc : boolean
turns off the vertical flow correction under dewatered conditions.
This option turns off the vertical flow calculation described on p.
5-8 of USGS Techniques and Methods Report 6-A16 and the vertical
conductance correction described on p. 5-18 of that report.
(default is False).
extension : string
Filename extension (default is 'lpf')
unitnumber : int
File unit number (default is None).
filenames : str or list of str
Filenames to use for the package and the output files. If
filenames=None the package name will be created using the model name
and package extension and the cbc output name will be created using
the model name and .cbc extension (for example, modflowtest.cbc),
if ipakcbc is a number greater than zero. If a single string is passed
the package will be set to the string and cbc output name will be
created using the model name and .cbc extension, if ipakcbc is a
number greater than zero. To define the names for all package files
(input and output) the length of the list of strings should be 2.
Default is None.
Attributes
----------
Methods
-------
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> lpf = flopy.modflow.ModflowLpf(m)
"""
'Layer-property flow package class\n'
def __init__(self, model, laytyp=0, layavg=0, chani=1.0, layvka=0,
laywet=0, ipakcb=None, hdry=-1E+30, iwdflg=0, wetfct=0.1,
iwetit=1, ihdwet=0, hk=1.0, hani=1.0, vka=1.0, ss=1e-5,
sy=0.15, vkcb=0.0, wetdry=-0.01, storagecoefficient=False,
constantcv=False, thickstrt=False, nocvcorrection=False,
novfc=False, extension='lpf',
unitnumber=None, filenames=None):
# set default unit number of one is not specified
if unitnumber is None:
unitnumber = ModflowLpf.defaultunit()
# set filenames
if filenames is None:
filenames = [None, None]
elif isinstance(filenames, str):
filenames = [filenames, None]
elif isinstance(filenames, list):
if len(filenames) < 2:
filenames.append(None)
# update external file information with cbc output, if necessary
if ipakcb is not None:
fname = filenames[1]
model.add_output_file(ipakcb, fname=fname,
package=ModflowLpf.ftype())
else:
ipakcb = 0
# Fill namefile items
name = [ModflowLpf.ftype()]
units = [unitnumber]
extra = ['']
# set package name
fname = [filenames[0]]
# Call ancestor's init to set self.parent, extension, name and unit number
Package.__init__(self, model, extension=extension, name=name,
unit_number=units, extra=extra, filenames=fname)
self.heading = '# {} package for '.format(self.name[0]) + \
' {}, '.format(model.version_types[model.version]) + \
'generated by Flopy.'
self.url = 'lpf.htm'
nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper
# item 1
self.ipakcb = ipakcb
self.hdry = hdry # Head in cells that are converted to dry during a simulation
self.nplpf = 0 # number of LPF parameters
self.laytyp = Util2d(model, (nlay,), np.int, laytyp, name='laytyp')
self.layavg = Util2d(model, (nlay,), np.int, layavg, name='layavg')
self.chani = Util2d(model, (nlay,), np.float32, chani, name='chani')
self.layvka = Util2d(model, (nlay,), np.int, layvka, name='layvka')
self.laywet = Util2d(model, (nlay,), np.int, laywet, name='laywet')
self.wetfct = wetfct # Factor that is included in the calculation of the head when a cell is converted from dry to wet
self.iwetit = iwetit # Iteration interval for attempting to wet cells
self.ihdwet = ihdwet # Flag that determines which equation is used to define the initial head at cells that become wet
self.options = ' '
if storagecoefficient:
self.options = self.options + 'STORAGECOEFFICIENT '
if constantcv: self.options = self.options + 'CONSTANTCV '
if thickstrt: self.options = self.options + 'THICKSTRT '
if nocvcorrection: self.options = self.options + 'NOCVCORRECTION '
if novfc: self.options = self.options + 'NOVFC '
self.hk = Util3d(model, (nlay, nrow, ncol), np.float32, hk, name='hk',
locat=self.unit_number[0])
self.hani = Util3d(model, (nlay, nrow, ncol), np.float32, hani,
name='hani', locat=self.unit_number[0])
keys = []
for k in range(nlay):
key = 'vka'
if self.layvka[k] != 0:
key = 'vani'
keys.append(key)
self.vka = Util3d(model, (nlay, nrow, ncol), np.float32, vka,
name=keys, locat=self.unit_number[0])
tag = 'ss'
if storagecoefficient:
tag = 'storage'
self.ss = Util3d(model, (nlay, nrow, ncol), np.float32, ss, name=tag,
locat=self.unit_number[0])
self.sy = Util3d(model, (nlay, nrow, ncol), np.float32, sy, name='sy',
locat=self.unit_number[0])
self.vkcb = Util3d(model, (nlay, nrow, ncol), np.float32, vkcb,
name='vkcb', locat=self.unit_number[0])
self.wetdry = Util3d(model, (nlay, nrow, ncol), np.float32, wetdry,
name='wetdry', locat=self.unit_number[0])
self.parent.add_package(self)
return
def write_file(self, check=True, f=None):
"""
Write the package file.
Parameters
----------
check : boolean
Check package data for common errors. (default True)
Returns
-------
None
"""
if check: # allows turning off package checks when writing files at model level
self.check(f='{}.chk'.format(self.name[0]),
verbose=self.parent.verbose, level=1)
# get model information
nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper
dis = self.parent.get_package('DIS')
if dis is None:
dis = self.parent.get_package('DISU')
# Open file for writing
if f is None:
f = open(self.fn_path, 'w')
# Item 0: text
f.write('{}\n'.format(self.heading))
# Item 1: IBCFCB, HDRY, NPLPF
f.write('{0:10d}{1:10.6G}{2:10d} {3:s}\n'.format(self.ipakcb,
self.hdry,
self.nplpf,
self.options))
# LAYTYP array
f.write(self.laytyp.string)
# LAYAVG array
f.write(self.layavg.string)
# CHANI array
f.write(self.chani.string)
# LAYVKA array
f.write(self.layvka.string)
# LAYWET array
f.write(self.laywet.string)
# Item 7: WETFCT, IWETIT, IHDWET
iwetdry = self.laywet.sum()
if iwetdry > 0:
f.write('{0:10f}{1:10d}{2:10d}\n'.format(self.wetfct,
self.iwetit,
self.ihdwet))
transient = not dis.steady.all()
for k in range(nlay):
f.write(self.hk[k].get_file_entry())
if self.chani[k] <= 0.:
f.write(self.hani[k].get_file_entry())
f.write(self.vka[k].get_file_entry())
if transient == True:
f.write(self.ss[k].get_file_entry())
if self.laytyp[k] != 0:
f.write(self.sy[k].get_file_entry())
if dis.laycbd[k] > 0:
f.write(self.vkcb[k].get_file_entry())
if (self.laywet[k] != 0 and self.laytyp[k] != 0):
f.write(self.wetdry[k].get_file_entry())
f.close()
return
@staticmethod
def load(f, model, ext_unit_dict=None, check=True):
"""
Load an existing package.
Parameters
----------
f : filename or file handle
File to load.
model : model object
The model object (of type :class:`flopy.modflow.mf.Modflow`) to
which this package will be added.
ext_unit_dict : dictionary, optional
If the arrays in the file are specified using EXTERNAL,
or older style array control records, then `f` should be a file
handle. In this case ext_unit_dict is required, which can be
constructed using the function
:class:`flopy.utils.mfreadnam.parsenamefile`.
check : boolean
Check package data for common errors. (default True)
Returns
-------
lpf : ModflowLpf object
ModflowLpf object.
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> lpf = flopy.modflow.ModflowLpf.load('test.lpf', m)
"""
if model.verbose:
sys.stdout.write('loading lpf package file...\n')
if not hasattr(f, 'read'):
filename = f
f = open(filename, 'r')
# dataset 0 -- header
while True:
line = f.readline()
if line[0] != '#':
break
# determine problem dimensions
nr, nc, nlay, nper = model.get_nrow_ncol_nlay_nper()
dis = model.get_package('DIS')
if dis is None:
dis = model.get_package('DISU')
# Item 1: IBCFCB, HDRY, NPLPF - line already read above
if model.verbose:
print(' loading IBCFCB, HDRY, NPLPF...')
t = line_parse(line)
ipakcb, hdry, nplpf = int(t[0]), float(t[1]), int(t[2])
#if ipakcb != 0:
# model.add_pop_key_list(ipakcb)
# ipakcb = 53
# options
storagecoefficient = False
constantcv = False
thickstrt = False
nocvcorrection = False
novfc = False
if len(t) > 3:
for k in range(3, len(t)):
if 'STORAGECOEFFICIENT' in t[k].upper():
storagecoefficient = True
elif 'CONSTANTCV' in t[k].upper():
constantcv = True
elif 'THICKSTRT' in t[k].upper():
thickstrt = True
elif 'NOCVCORRECTION' in t[k].upper():
nocvcorrection = True
elif 'NOVFC' in t[k].upper():
novfc = True
# LAYTYP array
if model.verbose:
print(' loading LAYTYP...')
laytyp = np.empty((nlay), dtype=np.int)
laytyp = read1d(f, laytyp)
# LAYAVG array
if model.verbose:
print(' loading LAYAVG...')
layavg = np.empty((nlay), dtype=np.int)
layavg = read1d(f, layavg)
# CHANI array
if model.verbose:
print(' loading CHANI...')
chani = np.empty((nlay), dtype=np.float32)
chani = read1d(f, chani)
# LAYVKA array
if model.verbose:
print(' loading LAYVKA...')
layvka = np.empty((nlay), dtype=np.float32)
layvka = read1d(f, layvka)
# LAYWET array
if model.verbose:
print(' loading LAYWET...')
laywet = np.empty((nlay), dtype=np.int)
laywet = read1d(f, laywet)
# Item 7: WETFCT, IWETIT, IHDWET
wetfct, iwetit, ihdwet = None, None, None
iwetdry = laywet.sum()
if iwetdry > 0:
if model.verbose:
print(' loading WETFCT, IWETIT, IHDWET...')
line = f.readline()
t = line.strip().split()
wetfct, iwetit, ihdwet = float(t[0]), int(t[1]), int(t[2])
# parameters data
par_types = []
if nplpf > 0:
par_types, parm_dict = mfpar.load(f, nplpf, model.verbose)
# print parm_dict
# non-parameter data
transient = not dis.steady.all()
hk = [0] * nlay
hani = [0] * nlay
vka = [0] * nlay
ss = [0] * nlay
sy = [0] * nlay
vkcb = [0] * nlay
wetdry = [0] * nlay
# load by layer
for k in range(nlay):
# allow for unstructured changing nodes per layer
if nr is None:
nrow = 1
ncol = nc[k]
else:
nrow = nr
ncol = nc
# hk
if model.verbose:
print(' loading hk layer {0:3d}...'.format(k + 1))
if 'hk' not in par_types:
t = Util2d.load(f, model, (nrow, ncol), np.float32, 'hk',
ext_unit_dict)
else:
line = f.readline()
t = mfpar.parameter_fill(model, (nrow, ncol), 'hk', parm_dict,
findlayer=k)
hk[k] = t
# hani
if chani[k] <= 0.:
if model.verbose:
print(' loading hani layer {0:3d}...'.format(k + 1))
if 'hani' not in par_types:
t = Util2d.load(f, model, (nrow, ncol), np.float32, 'hani',
ext_unit_dict)
else:
line = f.readline()
t = mfpar.parameter_fill(model, (nrow, ncol), 'hani',
parm_dict, findlayer=k)
hani[k] = t
# vka
if model.verbose:
print(' loading vka layer {0:3d}...'.format(k + 1))
key = 'vk'
if layvka[k] != 0:
key = 'vani'
if 'vk' not in par_types and 'vani' not in par_types:
t = Util2d.load(f, model, (nrow, ncol), np.float32, key,
ext_unit_dict)
else:
line = f.readline()
key = 'vk'
if 'vani' in par_types:
key = 'vani'
t = mfpar.parameter_fill(model, (nrow, ncol), key, parm_dict,
findlayer=k)
vka[k] = t
# storage properties
if transient:
# ss
if model.verbose:
print(' loading ss layer {0:3d}...'.format(k + 1))
if 'ss' not in par_types:
t = Util2d.load(f, model, (nrow, ncol), np.float32, 'ss',
ext_unit_dict)
else:
line = f.readline()
t = mfpar.parameter_fill(model, (nrow, ncol), 'ss',
parm_dict, findlayer=k)
ss[k] = t
# sy
if laytyp[k] != 0:
if model.verbose:
print(' loading sy layer {0:3d}...'.format(k + 1))
if 'sy' not in par_types:
t = Util2d.load(f, model, (nrow, ncol), np.float32,
'sy',
ext_unit_dict)
else:
line = f.readline()
t = mfpar.parameter_fill(model, (nrow, ncol), 'sy',
parm_dict, findlayer=k)
sy[k] = t
# vkcb
if dis.laycbd[k] > 0:
if model.verbose:
print(' loading vkcb layer {0:3d}...'.format(k + 1))
if 'vkcb' not in par_types:
t = Util2d.load(f, model, (nrow, ncol), np.float32, 'vkcb',
ext_unit_dict)
else:
line = f.readline()
t = mfpar.parameter_fill(model, (nrow, ncol), 'vkcb',
parm_dict, findlayer=k)
vkcb[k] = t
# wetdry
if (laywet[k] != 0 and laytyp[k] != 0):
if model.verbose:
print(' loading wetdry layer {0:3d}...'.format(k + 1))
t = Util2d.load(f, model, (nrow, ncol), np.float32, 'wetdry',
ext_unit_dict)
wetdry[k] = t
# set package unit number
unitnumber = None
filenames = [None, None]
if ext_unit_dict is not None:
unitnumber, filenames[0] = \
model.get_ext_dict_attr(ext_unit_dict,
filetype=ModflowLpf.ftype())
if ipakcb > 0:
iu, filenames[1] = \
model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb)
model.add_pop_key_list(ipakcb)
# create instance of lpf class
lpf = ModflowLpf(model, ipakcb=ipakcb, laytyp=laytyp, layavg=layavg,
chani=chani, layvka=layvka, laywet=laywet, hdry=hdry,
iwdflg=iwetdry, wetfct=wetfct, iwetit=iwetit,
ihdwet=ihdwet, hk=hk, hani=hani, vka=vka, ss=ss,
sy=sy, vkcb=vkcb, wetdry=wetdry,
storagecoefficient=storagecoefficient,
constantcv=constantcv, thickstrt=thickstrt,
novfc=novfc,
unitnumber=unitnumber, filenames=filenames)
if check:
lpf.check(f='{}.chk'.format(lpf.name[0]),
verbose=lpf.parent.verbose, level=0)
return lpf
@staticmethod
def ftype():
return 'LPF'
@staticmethod
def defaultunit():
return 15
| bsd-3-clause | -4,485,107,904,453,336,000 | 38.950081 | 127 | 0.527133 | false | 3.827514 | false | false | false |
stadtgestalten/stadtgestalten | grouprise/features/subscriptions/migrations/0014_auto_20171102_1045.py | 1 | 1144 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-11-02 09:45
from __future__ import unicode_literals
from django.db import migrations
from django.db.transaction import atomic
from django.db.utils import IntegrityError
def add_subscriptions_for_memberships(apps, schema_editor):
ContentType = apps.get_model('contenttypes', 'ContentType')
Membership = apps.get_model('memberships', 'Membership')
Subscription = apps.get_model('subscriptions', 'Subscription')
for membership in Membership.objects.all():
try:
with atomic():
Subscription.objects.create(
subscribed_to_id=membership.group.id,
subscribed_to_type=ContentType.objects.get_for_model(membership.group),
subscriber=membership.member)
except IntegrityError:
pass
class Migration(migrations.Migration):
dependencies = [
('memberships', '0014_auto_20170609_1029'),
('subscriptions', '0013_auto_20170918_1340'),
]
operations = [
migrations.RunPython(add_subscriptions_for_memberships),
]
| agpl-3.0 | 4,482,073,682,586,433,500 | 32.647059 | 95 | 0.651224 | false | 4.333333 | false | false | false |
gavinjackson/junkcode | eco_scrapy/ecocrackenback.py | 1 | 3671 | import scrapy
import time
from openpyxl import Workbook
import sqlite3
# This app prints data to standard out, creates a spreadsheet and updates an sqlite3 database
class EcocrackenbackSpider(scrapy.Spider):
name = 'Ecocrackenback Availability'
wb = Workbook()
properties = {
'33': 'Ecocrackenback 2',
'34': 'Ecocrackenback 3',
'35': 'Ecocrackenback 4',
'36': 'Ecocrackenback 5',
'37': 'Ecocrackenback 7',
'38': 'Ecocrackenback 9',
'39': 'Ecocrackenback 10',
'40': 'Ecocrackenback 11',
'41': 'Ecocrackenback 12',
'42': 'Ecocrackenback 13',
'43': 'Ecocrackenback 14',
'46': 'Ecocrackenback 15',
'44': 'Ecocrackenback 16',
'50': 'Ecocrackenback 17',
'45': 'Ecocrackenback 18',
'49': 'Ecocrackenback 19'
}
ws1 = wb.active
ws1.append(["Ecocrackenback bookings last extracted {0}".format(time.strftime("%c"))])
start_urls = [ 'http://www.jindabyneaccommodationcentre.com.au/accommodation/{0}'.format(p) for p in properties.keys() ]
conn = sqlite3.connect('./eco.db')
c = conn.cursor()
c.execute("insert into eco_execution_run values (NULL, '{0}');".format(time.strftime("%Y-%m-%d %H:%M:%S")))
eid = c.lastrowid
conn.commit()
def parse(self, response):
print('\n= {0} ='.format(self.properties[response.url.split('/')[-1:][0]]))
self.c.execute("insert into eco_property values (NULL, {0}, '{1}', '{2}');".format(self.eid, self.properties[response.url.split('/')[-1:][0]], response.url.split('/')[-1:][0]))
pid = self.c.lastrowid
self.conn.commit()
ws = self.wb.create_sheet(title="{0}".format(self.properties[response.url.split('/')[-1:][0]]))
print('*'*80)
attributes = {}
rows = response.xpath('//*[@id="ipage"]/div[4]/table/tr')
for index, row in enumerate(rows):
if index > 0:
print('== {0} =='.format(row.xpath('td[1]/text()').extract()[0]))
self.c.execute("insert into eco_month values (NULL, {0}, {1}, '{2}');".format(self.eid, pid, row.xpath('td[1]/text()').extract()[0]))
mid = self.c.lastrowid
self.conn.commit()
ws.append([row.xpath('td[1]/text()').extract()[0]])
print('AVAILABLE {0}'.format(row.css('.available').xpath('@title').extract()))
for str_date in row.css('.available').xpath('@title').extract():
from datetime import datetime
date_object = datetime.strptime(str_date, '%a %d-%b-%Y')
self.c.execute("insert into eco_day values (NULL, {0}, 'AVAILABLE', '{1}', '{2}')".format(mid, str_date.split(' ')[0], date_object.strftime('%Y-%m-%d')))
self.conn.commit()
ws.append(['AVAILABLE'] + row.css('.available').xpath('@title').extract())
print('BOOKED {0}'.format(row.css('.booked').xpath('@title').extract()))
for str_date in row.css('.booked').xpath('@title').extract():
from datetime import datetime
date_object = datetime.strptime(str_date, '%a %d-%b-%Y')
self.c.execute("insert into eco_day values (NULL, {0}, 'BOOKED', '{1}', '{2}')".format(mid, str_date.split(' ')[0], date_object.strftime('%Y-%m-%d')))
self.conn.commit()
ws.append(['BOOKED'] + row.css('.booked').xpath('@title').extract())
ws.append([''])
def closed(self, reason):
self.wb.save(filename = "./output.xlsx")
self.conn.commit()
self.c.close()
| unlicense | 6,534,433,803,781,908,000 | 48.608108 | 184 | 0.550804 | false | 3.364803 | false | false | false |
breunigs/livestreamer | src/livestreamer/plugins/livestream.py | 4 | 3365 | import re
from collections import defaultdict
from livestreamer.compat import urljoin
from livestreamer.exceptions import PluginError, NoStreamsError
from livestreamer.plugin import Plugin
from livestreamer.stream import AkamaiHDStream, HLSStream
from livestreamer.utils import urlget, verifyjson, res_xml, parse_json
SWF_URL = "http://cdn.livestream.com/swf/hdplayer-2.0.swf"
class Livestream(Plugin):
@classmethod
def default_stream_types(cls, streams):
return ["akamaihd", "hls"]
@classmethod
def can_handle_url(self, url):
return "new.livestream.com" in url
def _get_stream_info(self):
res = urlget(self.url)
match = re.search("window.config = ({.+})", res.text)
if match:
config = match.group(1)
return parse_json(config, "config JSON")
def _parse_smil(self, url, swfurl):
res = urlget(url)
smil = res_xml(res, "SMIL config")
streams = {}
httpbase = smil.find("{http://www.w3.org/2001/SMIL20/Language}head/"
"{http://www.w3.org/2001/SMIL20/Language}meta[@name='httpBase']")
if not (httpbase is not None and httpbase.attrib.get("content")):
raise PluginError("Missing HTTP base in SMIL")
httpbase = httpbase.attrib.get("content")
videos = smil.findall("{http://www.w3.org/2001/SMIL20/Language}body/"
"{http://www.w3.org/2001/SMIL20/Language}switch/"
"{http://www.w3.org/2001/SMIL20/Language}video")
for video in videos:
url = urljoin(httpbase, video.attrib.get("src"))
bitrate = int(video.attrib.get("system-bitrate"))
streams[bitrate] = AkamaiHDStream(self.session, url,
swf=swfurl)
return streams
def _get_streams(self):
self.logger.debug("Fetching stream info")
info = self._get_stream_info()
if not info:
raise NoStreamsError(self.url)
event = verifyjson(info, "event")
streaminfo = verifyjson(event, "stream_info")
if not streaminfo or not streaminfo.get("is_live"):
raise NoStreamsError(self.url)
streams = defaultdict(list)
play_url = streaminfo.get("play_url")
if play_url:
swfurl = info.get("hdPlayerSwfUrl") or SWF_URL
if not swfurl.startswith("http://"):
swfurl = "http://" + swfurl
qualities = streaminfo.get("qualities", [])
smil = self._parse_smil(streaminfo["play_url"], swfurl)
for bitrate, stream in smil.items():
name = "{0}k".format(bitrate/1000)
for quality in qualities:
if quality["bitrate"] == bitrate:
name = "{0}p".format(quality["height"])
streams[name].append(stream)
m3u8_url = streaminfo.get("m3u8_url")
if m3u8_url:
hls_streams = HLSStream.parse_variant_playlist(self.session,
m3u8_url,
namekey="pixels")
for name, stream in hls_streams.items():
streams[name].append(stream)
return streams
__plugin__ = Livestream
| bsd-2-clause | 2,662,027,426,692,755,500 | 34.421053 | 94 | 0.564636 | false | 3.828214 | false | false | false |
DRL/GenomeBiology2016_globodera_rostochiensis | scripts/extractRegionFromCoordinates.py | 1 | 9707 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
File : extractRegionFromCoordinates.py
Author : Dominik R. Laetsch, dominik.laetsch at gmail dot com
Version : 0.1
Description :
- INPUT: fasta file, gff file (with intron features), coordinates
- OUTPUT: line for each intron
"""
from __future__ import division
import sys, time
class DataObj():
def __init__(self, filename):
self.filename = filename
self.geneObj_order = []
self.geneObj_dict = {}
def add_geneObj(self, geneObj):
if not geneObj.name in self.geneObj_dict:
self.geneObj_order.append(geneObj.name)
self.geneObj_dict[geneObj.name] = geneObj
def add_intronObj(self, intronObj):
gene_name = intronObj.name
if gene_name in self.geneObj_dict:
self.geneObj_dict[gene_name].add_intronObj(intronObj)
else:
sys.exit("ERROR1")
def yield_introns(self):
for gene_name in self.geneObj_order:
geneObj = self.geneObj_dict[gene_name]
introns = ""
if geneObj.strand == "+":
for idx, intronObj in enumerate(geneObj.introns):
introns += "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % ( \
geneObj.contig, \
intronObj.start, \
intronObj.stop, \
geneObj.strand, \
geneObj.name + "_" + str(idx + 1), \
geneObj.name, \
idx + 1, \
len(geneObj.introns) \
)
elif geneObj.strand == "-":
for idx, intronObj in enumerate(reversed(geneObj.introns)):
introns += "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % ( \
geneObj.contig, \
intronObj.start, \
intronObj.stop, \
geneObj.strand, \
geneObj.name + "_" + str(idx + 1), \
geneObj.name, \
idx + 1, \
len(geneObj.introns) \
)
yield introns
def write_intron_pos(self):
out_f = self.filename + ".intronpos.txt"
print "Writing %s" % out_f
with open(out_f, "w") as fh:
for string in dataObj.yield_introns():
fh.write(string)
class GeneObj():
def __init__(self, contig, strand, name):
self.contig = contig
self.strand = strand
self.name = name
self.introns = []
def add_intronObj(self, intronObj):
self.introns.append(intronObj)
class IntronObj():
def __init__(self, name, start, stop):
self.name = name
self.start = start
self.stop = stop
def parse_gff(gff_f):
dataObj = DataObj(gff_f)
with open(gff_f) as fh:
for line in fh:
if not line.startswith("#"):
temp = line.rstrip("\n").split()
if temp[2] == "intron":
contig = temp[0]
start = int(temp[3])
stop = int(temp[4])
strand = temp[6]
name = temp[8].replace("Parent=", "")
geneObj = GeneObj(contig, strand, name)
intronObj = IntronObj(name, start, stop)
dataObj.add_geneObj(geneObj)
dataObj.add_intronObj(intronObj)
#print "%s\t%s\t%s\t%s\t%s" % (contig, start, stop, strand, name)
return dataObj
def parse_fasta(fasta_f):
fasta_dict = {}
header, seq = '', ''
with open(fasta) as fh:
for line in fh:
if line.startswith(">"):
if (seq):
fasta_dict[header] = seq.upper()
seq = ''
header = line.rstrip("\n").lstrip(">").split(" ")[0]
else:
seq += line.rstrip("\n")
fasta_dict[header] = seq
return fasta_dict
def compute_splice_sites(fasta_dict, dataObj, upstream_start, downstream_start, upstream_end, downstream_end):
'''
1234567890 2 9 + 4 7 -
-23----89- 0 1 1 0 D=2:4 A=8:10 => D=(start-1)-UP:(start-1)+DOWN A=(end-1)-UP:(end-1)
---4567--- 0 1 1 0 A=4:6 D=6:8
0123456789
AGTGATGAGG D=1:3 A=7:9 D=(start-1)-UP:(start)+DOWN A=(end-1)-UP:(end)+DOWN
GCACTACTCC A=3:5 D=5:7 A=(start-1)-UP:(start)+DOWN D=(end-1)-UP:end+DOWN
0123456789
'''
for introns in dataObj.yield_introns():
for line in introns.split("\n"):
if (line):
field = line.rstrip("\n").split("\t") # LOCATION\tSTART\tSTOP\tORIENTATION\tNAME
location = field[0]
start = int(field[1])
end = int(field[2])
strand = field[3]
name = field[4]
gene = field[5]
intron_pos = field[6]
intron_count = field[7]
donor_start, donor_end, acceptor_start, acceptor_end = 0,0,0,0
if location in fasta_dict:
if end - start > MIN_INTRON_LENGTH:
if strand == '+':
donor_start = (start-1)-upstream_start
donor_end = start + downstream_start
acceptor_start = (end-1)-upstream_end
acceptor_end = end + downstream_end
if donor_start < 0:
donor_start = 0
if acceptor_end > len(fasta_dict[location]):
acceptor_end = len(fasta_dict[location])
elif strand == '-':
acceptor_start = (start-1) - downstream_end
acceptor_end = start + upstream_end
donor_start = (end-1) - downstream_start
donor_end = end + upstream_start
if donor_start > len(fasta_dict[location]):
donor_start = len(fasta_dict[location])
if acceptor_end < 0:
acceptor_end = 0
else:
sys.exit("[ERROR] - strand should be +/-, not : %s" % (strand))
#print "Start", donor_start, donor_end
#print str(donor_start) + ":" + str(donor_end) + "," + str(acceptor_start) + ":" + str(acceptor_end)
#donor_header = ">donor;"+ str(start) + "|" + str(donor_start) + ":" + str(donor_end) + ":" + strand #+ " " + fasta_dict[location]
donor_seq = getRegion(fasta_dict[location], donor_start, donor_end, strand)
#acceptor_header = ">acceptor;"+ str(end) + "_" + str(acceptor_start) + ":" + str(acceptor_end) + ":" + strand #+ " " + fasta_dict[location]
acceptor_seq = getRegion(fasta_dict[location], acceptor_start, acceptor_end, strand)
print "%s\t%s\t%s" % ("\t".join(field), donor_seq, acceptor_seq)
#print ">Donor_%s\n%s\n>Acceptor_%s\n%s" % (name, donor_seq, name, acceptor_seq)
else:
print "[WARN] - %s\t : Intron length is below threshold of %s " % ("\t".join(field), MIN_INTRON_LENGTH)
else:
print line
print fasta_dict.keys()
sys.exit("[ERROR] %s from coordinate %s not found in fasta %s" % (location, coordinates, fasta))
#with open(coordinates) as fh:
# for line in fh:
# if line.startswith("#"):
# pass
# else:
# field = line.rstrip("\n").split("\t") # LOCATION\tSTART\tSTOP\tORIENTATION\tNAME
# location = field[0]
# start = int(field[1])
# end = int(field[2])
# strand = field[3]
# name = field[4]
# gene = field[5]
# intron_pos = field[6]
# intron_count = field[7]
# #print field
#
# donor_start, donor_end, acceptor_start, acceptor_end = 0,0,0,0
# if location in fasta_dict:
# if end - start > MIN_INTRON_LENGTH:
# if strand == '+':
# donor_start = (start-1)-upstream_start
# donor_end = start + downstream_start
# acceptor_start = (end-1)-upstream_end
# acceptor_end = end + downstream_end
# if donor_start < 0:
# donor_start = 0
# if acceptor_end > len(fasta_dict[location]):
# acceptor_end = len(fasta_dict[location])
# elif strand == '-':
#
#
# acceptor_start = (start-1) - downstream_end
# acceptor_end = start + upstream_end
# donor_start = (end-1) - downstream_start
# donor_end = end + upstream_start
#
# if donor_start > len(fasta_dict[location]):
# donor_start = len(fasta_dict[location])
# if acceptor_end < 0:
# acceptor_end = 0
# else:
# sys.exit("[ERROR] - strand should be +/-, not : %s" % (strand))
# #print "Start", donor_start, donor_end
# #print str(donor_start) + ":" + str(donor_end) + "," + str(acceptor_start) + ":" + str(acceptor_end)
#
# #donor_header = ">donor;"+ str(start) + "|" + str(donor_start) + ":" + str(donor_end) + ":" + strand #+ " " + fasta_dict[location]
# donor_seq = getRegion(fasta_dict[location], donor_start, donor_end, strand)
#
#
# #acceptor_header = ">acceptor;"+ str(end) + "_" + str(acceptor_start) + ":" + str(acceptor_end) + ":" + strand #+ " " + fasta_dict[location]
# acceptor_seq = getRegion(fasta_dict[location], acceptor_start, acceptor_end, strand)
# print "%s\t%s\t%s" % ("\t".join(field), donor_seq, acceptor_seq)
# #print ">Donor_%s\n%s\n>Acceptor_%s\n%s" % (name, donor_seq, name, acceptor_seq)
# else:
# print "[WARN] - %s\t : Intron length is below threshold of %s " % ("\t".join(field), MIN_INTRON_LENGTH)
# else:
# print line
# print fasta_dict.keys()
# sys.exit("[ERROR] %s from coordinate %s not found in fasta %s" % (location, coordinates, fasta))
def getRegion(seq, start, stop, strand):
region = seq[int(start):int(stop)]
if strand == '-':
complement = {'A':'T','C':'G','G':'C','T':'A','N':'N'}
region = "".join([complement.get(nt.upper(), '') for nt in region[::-1]])
elif strand == '+':
pass
else :
sys.exit("[ERROR] - strand should be +/-, not : %s" % (strand))
return region
if __name__ == "__main__":
MIN_INTRON_LENGTH = 4
try:
gff_f = sys.argv[1]
fasta = sys.argv[2]
upstream_start = int(sys.argv[3])
downstream_start = int(sys.argv[4])
upstream_end = int(sys.argv[5])
downstream_end = int(sys.argv[6])
except:
sys.exit("Usage: ./extractRegionFromCoordinates.py [GFF] [FASTA] [US] [DS] [UE] [DE] \n\n\
[GFF] : Intron features have to be present in GFF (use Genometools)\n\
[US] : Positions upstream of start of intron feature in GFF\n\
[DS] : Positions downstream of start of intron feature in GFF\n\
[UE] : Positions upstream of end of intron feature in GFF\n\
[DS] : Positions downstream of end of intron feature in GFF\n\n - Extracting splice sites : \n\n ./extractRegionFromCoordinates.py nGr.v1.0.gff3 nGr.v1.0.fa 0 1 1 0 \n\n")
dataObj = parse_gff(gff_f)
#dataObj.write_intron_pos()
fasta_dict = parse_fasta(fasta)
compute_splice_sites(fasta_dict, dataObj, upstream_start, downstream_start, upstream_end, downstream_end)
| gpl-3.0 | -4,000,169,579,499,924,000 | 34.043321 | 176 | 0.606882 | false | 2.596149 | false | false | false |
rshk/clitools | clitools/__init__.py | 1 | 11875 | """
CLI Tools - Command Line Interface building tools
Example usage::
from clitools import CliApp
cli = CliApp()
@cli.command
def hello(args):
print("Hello, world!")
@cli.command
@cli.parser_arg('--name')
def hello2(args):
print("Hello, {0}!".format(args.name))
if __name__ == '__main__':
cli.run_from_command_line()
"""
import argparse
import logging
import sys
__version__ = '0.4a2' # sync with setup.py!
logger = logging.getLogger('clitools')
class Command(object):
def __init__(self, func, func_info):
self.func = func
self.func_info = func_info
logger.debug('-- New CliApp instance')
def __call__(self, parsed_args):
"""
We need to map parsed arguments to function arguments
before calling..
"""
args = []
kwargs = {}
for argname in self.func_info['positional_args']:
args.append(getattr(parsed_args, argname))
for argname, default in self.func_info['keyword_args']:
kwargs[argname] = getattr(parsed_args, argname, default)
return self.func(*args, **kwargs)
class CliApp(object):
class arg(object):
"""Class used to wrap arguments as function defaults"""
def __init__(self, *a, **kw):
self.args = a
self.kwargs = kw
def __init__(self, prog_name='cli-app'):
self.prog_name = prog_name
self.parser = argparse.ArgumentParser(prog=prog_name)
self.subparsers = self.parser.add_subparsers(help='sub-commands')
def command(self, func=None, **kwargs):
"""
Decorator to register a command function
:param name: Name for the command
:param help: Help text for the function
"""
def decorator(func):
self._register_command(func, **kwargs)
return func
if func is None:
return decorator
return decorator(func)
def _register_command(self, func, **kwargs):
"""
Register a command function. We need to hack things a bit here:
- we need to change argument defaults in the function (copying it)
- The original function is copied, and default values changed
- The new function is copied in the subparser object
WARNING! variable arguments / keyword arguments are not supported
(yet)! They are just stripped & ignored, ATM..
"""
func_info = self._analyze_function(func)
## WARNING! We're not supporting things like this, right now:
## def func(a, ((b, c), d)=((1, 2), 3)): pass
## Maybe, we should fallback to requiring "flat" arguments,
## at least for the moment?
## Read keyword arguments
name = kwargs.get('name')
if name is None:
name = func_info['name']
## Strip the command_ prefix from function name
if name.startswith('command_'):
name = name[len('command_'):]
help_text = kwargs.get('help')
if help_text is None:
help_text = func_info['help_text']
## Create the new subparser
subparser = self.subparsers.add_parser(name, help=help_text)
## Process required positional arguments
for argname in func_info['positional_args']:
logger.debug('New argument: {0}'.format(argname))
subparser.add_argument(argname)
## Process optional keyword arguments
func_new_defaults = []
for argname, argvalue in func_info['keyword_args']:
if isinstance(argvalue, self.arg):
## We already have args / kwargs for this argument
a = (['--' + argname] + list(argvalue.args))
kw = argvalue.kwargs
func_new_defaults.append(kw.get('default'))
else:
## We need to guess args / kwargs from default value
a, kw = self._arg_from_free_value(argname, argvalue)
func_new_defaults.append(argvalue) # just use the old one
logger.debug('New argument: {0!r} {1!r}'.format(a, kwargs))
subparser.add_argument(*a, **kw)
func.func_defaults = tuple(func_new_defaults)
## todo: replace defaults on the original function, to strip
## any instance of ``self.arg``?
new_function = Command(func=func, func_info=func_info)
## Positional arguments are treated as required values
subparser.set_defaults(func=new_function)
return subparser # for further analysis during tests
def _analyze_function(self, func):
"""
Extract information from a function:
- positional argument names
- optional argument names / default values
- does it accept *args?
- does it accept **kwargs?
"""
import inspect
info = {}
info['name'] = func.func_name
# todo extract arguments docs too!
info['help_text'] = inspect.getdoc(func)
argspec = inspect.getargspec(func)
is_generator = inspect.isgeneratorfunction(func)
info['accepts_varargs'] = argspec.varargs is not None
info['varargs_name'] = argspec.varargs
info['accepts_kwargs'] = argspec.keywords is not None
info['kwargs_name'] = argspec.keywords
info['is_generator'] = is_generator
arg_defaults = argspec.defaults or []
akw_limit = len(argspec.args) - len(arg_defaults)
info['positional_args'] = argspec.args[:akw_limit]
kwargs_names = argspec.args[akw_limit:]
assert len(kwargs_names) == len(arg_defaults)
info['keyword_args'] = zip(kwargs_names, arg_defaults)
return info
def _arg_from_free_value(self, name, value):
"""
Guess the correct argument type to be built for free-form
arguments (default values)
"""
logger.debug('_arg_from_free_value({0!r}, {1!r})'.format(name, value))
arg_name = '--' + name
def o(*a, **kw):
return a, kw
if value is None:
## None: this is just a generic argument, accepting any value
logger.debug('None -> generic optional argument')
return o(arg_name, default=value)
elif (value is True) or (value is False):
## Boolean value: on/off flag
logger.debug('bool -> flag')
action = 'store_false' if value else 'store_true'
return o(arg_name, action=action, default=value)
elif isinstance(value, (list, tuple)):
## List/tuple: if has at least two items, it will
## be used for a 'choice' option, else for an 'append'
## list.
if len(value) > 1:
## Choices
logger.debug('List with length >= 2 -> choices')
return o(arg_name, type='choice', choices=value,
default=value[0])
else:
## Append (of type)
type_ = None
logger.debug('List with length < 2 -> list of items')
if len(value) > 0:
## This is [<type>]
type_ = (value[0]
if isinstance(value[0], type)
else type(value[0]))
return o(arg_name, type=type_, action='append', default=[])
else:
## Anything of this type will fit..
## todo: make sure the type is a supported one?
if isinstance(value, type):
type_ = value
default = None
else:
type_ = type(value)
default = value
logger.debug('Generic object of type {0!r} (default: {1!r})'
.format(type_, default))
# import ipdb; ipdb.set_trace()
return o(arg_name, type=type_, default=default)
def run(self, args=None):
"""Handle running from the command line"""
parsed_args = self.parser.parse_args(args)
function = getattr(parsed_args, 'func', None)
if function is None:
## Emulate Python2 behavior..
self.parser.print_help(sys.stderr)
sys.exit(2)
# function = parsed_args.func
return parsed_args.func(parsed_args)
## Utility methods
##----------------------------------------
def split_function_doc(doc):
"""
Performs a very simple splitting of a function documentation:
- separate blocks starting with :name from the rest of the
function documentation
Note: this function expects the passed-in docstring
to be already cleaned, usually via pydoc.getdoc().
:yields: two-tuples (block_info, block_data).
- block info is a tuple of strings describing the workds
between the first two colons, or None
- block data is the block data without any prefix
"""
def tokenize_blocks(lines):
## We need to loop until we find a line starting with :
## or the end of the docstring.
buf = []
for line in lines:
if line.startswith(':'):
if len(buf):
yield buf
buf = []
buf.append(line)
if len(buf):
yield buf
for block in tokenize_blocks(doc.splitlines()):
block_data = '\n'.join(block).strip()
if block_data.startswith(':'):
_, args, block_data = block_data.split(':', 2)
block_info = tuple(args.split())
else:
block_info = None
yield block_info, block_data.strip()
def extract_arguments_info(doc):
"""
Extract (organized) arguments information from a docstring.
This will extract all the :param: and :type: arguments
from the function docstring and return them in a dictionary,
along with function docstring.
>>> extract_arguments_info('''
... My example function.
...
... :param spam: Some spam argument
... :type spam: str
... :param int eggs: Some eggs argument
... :param bacon: Yummy!
... ''') == {
... 'function_help': 'My example function.\\n',
... 'params_help': {
... 'spam': {'help': 'Some spam argument', 'type': 'str'},
... 'eggs': {'help': 'Some eggs argument', 'type': 'int'},
... 'bacon': {'help': 'Yummy!'}
... }
... }
True
"""
from collections import defaultdict
func_doc = []
args_doc = defaultdict(dict)
for block_info, block_data in split_function_doc(doc):
if block_info is None:
func_doc.append(block_data)
else:
block_type = block_info[0]
# :param <type> <name>: <doc>
# :param <name>: <doc>
# :type <name>: <type>
if block_type in ('param', 'type'):
if block_type == 'param' and len(block_info) == 3:
p_type, p_name = block_info[1:3]
p_help = block_data
args_doc[p_name]['type'] = p_type
args_doc[p_name]['help'] = p_help
elif block_type == 'param' and len(block_info) == 2:
p_name = block_info[1]
p_help = block_data
args_doc[p_name]['help'] = p_help
elif block_type == 'type' and len(block_info) == 2:
p_name = block_info[1]
p_type = block_data
args_doc[p_name]['type'] = p_type
else:
raise ValueError("Wrong block information")
return {
'function_help': '\n'.join(func_doc).strip() + '\n',
'params_help': dict(args_doc),
}
| bsd-3-clause | -4,145,025,209,179,156,500 | 31.181572 | 78 | 0.544 | false | 4.205028 | false | false | false |
kurtraschke/camelot | camelot/core/constants.py | 1 | 1553 | # ============================================================================
#
# Copyright (C) 2007-2010 Conceptive Engineering bvba. All rights reserved.
# www.conceptive.be / [email protected]
#
# This file is part of the Camelot Library.
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file license.txt included in the packaging of
# this file. Please review this information to ensure GNU
# General Public Licensing requirements will be met.
#
# If you are unsure which license is appropriate for your use, please
# visit www.python-camelot.com or contact [email protected]
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
# For use of this library in commercial applications, please contact
# [email protected]
#
# ============================================================================
"""
Created on Jul 19, 2009
@author: tw55413
"""
camelot_maxint = 2147483647
camelot_minint = -2147483648
camelot_maxfloat = 1e15
camelot_minfloat = -1e15
camelot_time_format = 'hh:mm'
strftime_time_format = '%H:%M'
camelot_datetime_format = 'dd-MM-yyyy hh:mm'
strftime_datetime_format = '%d-%m-%Y %H:%M'
camelot_date_format = 'dd-MM-yyyy'
strftime_date_format = '%d-%m-%Y'
camelot_small_icon_width = 16
camelot_small_icon_height = 16
MAXIMIZED = 'maximized'
MINIMIZED = 'minimized'
| gpl-2.0 | -5,293,704,356,363,386,000 | 34.295455 | 79 | 0.671603 | false | 3.428256 | false | false | false |
python/pythondotorg | blogs/parser.py | 3 | 1552 | import datetime
import feedparser
from django.conf import settings
from django.template.loader import render_to_string
from django.utils.timezone import make_aware, utc
from boxes.models import Box
from .models import BlogEntry, Feed
def get_all_entries(feed_url):
""" Retrieve all entries from a feed URL """
d = feedparser.parse(feed_url)
entries = []
for e in d['entries']:
published = make_aware(
datetime.datetime(*e['published_parsed'][:7]), timezone=utc
)
entry = {
'title': e['title'],
'summary': e.get('summary', ''),
'pub_date': published,
'url': e['link'],
}
entries.append(entry)
return entries
def _render_blog_supernav(entry):
""" Utility to make testing update_blogs management command easier """
return render_to_string('blogs/supernav.html', {'entry': entry})
def update_blog_supernav():
"""Retrieve latest entry and update blog supernav item """
try:
latest_entry = BlogEntry.objects.filter(
feed=Feed.objects.get(
feed_url=settings.PYTHON_BLOG_FEED_URL,
)
).latest()
except (BlogEntry.DoesNotExist, Feed.DoesNotExist):
pass
else:
rendered_box = _render_blog_supernav(latest_entry)
box, _ = Box.objects.update_or_create(
label='supernav-python-blog',
defaults={
'content': rendered_box,
'content_markup_type': 'html',
}
)
| apache-2.0 | -3,155,940,080,421,918,000 | 26.22807 | 74 | 0.590206 | false | 4.052219 | false | false | false |
NeoGeographyToolkit/Tools | pbs_parallel_stereo.py | 1 | 7453 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# __BEGIN_LICENSE__
# Copyright (c) 2009-2013, United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration. All
# rights reserved.
#
# The NGT platform is licensed under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# __END_LICENSE__
import sys
import os, glob, re, shutil, subprocess, string, time, errno, optparse
import IrgFileFunctions, IrgIsisFunctions, IrgPbsFunctions, IrgSystemFunctions
def man(option, opt, value, parser):
print >>sys.stderr, parser.usage
print >>sys.stderr, '''\
Calls mapproject in parallel with ISIS cameras.
'''
sys.exit()
#------------------------------------------------------------------------------
def main(argsIn):
try:
usage = "usage: pbs_parallel_stereo.py (same inputs as parallel_stereo plus new options)"
parser = IrgSystemFunctions.PassThroughOptionParser(usage=usage) # Use parser that ignores unknown options
parser.add_option("--num-correlation-nodes", dest="numCorrelationNodes", type='int', default=1,
help="Number of nodes to use for the two correlation steps")
parser.add_option("--num-triangulation-nodes", dest="numTriangulationNodes", type='int', default=1,
help="Number of nodes to use for the triangulation steps")
parser.add_option('--node-type', dest='nodeType', default='wes',
help='Type of processing node to request (wes, san, or ivy)')
parser.add_option('--group-id', dest='groupId', default='',
help='GID to charge the hours to [REQUIRED]')
# This call handles all the specific options for this code.
(options, args) = parser.parse_args(argsIn)
# 'args' contains everything for parallel_stereo
# Check the required positional arguments.
if not options.groupId:
parser.error("Must input a group ID to charge to!")
# Any additional arguments need to be forwarded to the mapproject function
options.extraArgs = args
except optparse.OptionError, msg:
raise Usage(msg)
#startTime = time.time()
# Currently all the outputs are written to the current directory!
scriptFolder = os.getcwd()
pbsPath = os.path.abspath(os.path.join(scriptFolder, 'mainPbsScript.sh'))
stdLogPaths = []
errLogPaths = []
scriptCalls = []
for i in range(4):
stdLogPaths.append(os.path.abspath(os.path.join(scriptFolder, 'stdLog' +str(i)+'.txt')))
errLogPaths.append(os.path.abspath(os.path.join(scriptFolder, 'errLog' +str(i)+'.txt')))
scriptCalls.append(os.path.abspath(os.path.join(scriptFolder, 'pbsScript'+str(i)+'.sh' )))
# Generate the core PBS string
cpusPerNode = 12
if options.nodeType == 'san':
cpusPerNode = 18
elif options.nodeType == 'ivy':
cpusPerNode = 24
# TODO: Allow users to input these times!
stepHours = ['5:00:00', '40:00:00', "30:00:00", "40:00:00"]
corePbsString = ('qsub -q long -W group_list='+ options.groupId+
' -m eb -S /bin/bash -V -j oe -C '+ scriptFolder)
# Generate all of the individual PBS calls
pbsStrings = []
# Preprocessing stage
pbsStrings.append('subjob1=$( ' + corePbsString + ' -N pbs_stereo1 -l walltime="'+stepHours[0]+'"'
+ ' -e '+ errLogPaths[0] +' -o '+ stdLogPaths[0]
+ ' -l select='+str(1)+':ncpus='+str(cpusPerNode)+':model='+options.nodeType
+ ' -- '+ scriptCalls[0] +')')
# Correlation stage
pbsStrings.append('subjob2=$( ' + corePbsString + ' -N pbs_stereo2 -l walltime="'+stepHours[1]+'"'
+ ' -e '+ errLogPaths[1] +' -o '+ stdLogPaths[1]
+ ' -l select='+str(options.numCorrelationNodes)+':ncpus='+str(cpusPerNode)+':model='+options.nodeType
+ ' -W depend=afterok:$subjob1 -- '+ scriptCalls[1] +')')
# Filtering stage
pbsStrings.append('subjob3=$( ' + corePbsString + ' -N pbs_stereo3 -l walltime="'+stepHours[2]+'"'
+ ' -e '+ errLogPaths[2] +' -o '+ stdLogPaths[2]
+ ' -l select='+str(1)+':ncpus='+str(cpusPerNode)+':model='+options.nodeType
+ ' -W depend=afterok:$subjob2 -- '+ scriptCalls[2] +')')
# Triangulation stage
pbsStrings.append(corePbsString + ' -N pbs_stereo4 -l walltime="'+stepHours[3]+'"'
+ ' -e '+ errLogPaths[3] +' -o '+ stdLogPaths[3]
+ ' -l select='+str(options.numTriangulationNodes)+':ncpus='+str(cpusPerNode)+':model='+options.nodeType
+ ' -W depend=afterok:$subjob3 -- '+ scriptCalls[3])
# Set up the command line for parallel_stereo
commandList = ['parallel_stereo', '--nodes-list', '$PBS_NODEFILE']
commandList = commandList + options.extraArgs # Append other options
commandString = IrgSystemFunctions.argListToString(commandList)
phases = [' --entry-point 0 --stop-point 1', # Init
' --entry-point 1 --stop-point 3', # Correlation
' --entry-point 3 --stop-point 4', # Filtering
' --entry-point 4 --stop-point 6'] # Triangulation
# Generate a set of four script files
for i in range(4):
print 'Writing script file ' + scriptCalls[i]
scriptFile = open(scriptCalls[i], 'w')
scriptFile.write('#!/bin/bash\n\n')
thisCommandString = commandString + phases[i]
scriptFile.write(thisCommandString)
scriptFile.close()
# Set the script file to be executable
os.system('chmod +x ' + scriptCalls[i])
# Write the PBS script
print 'Writing main PBS script ' + pbsPath
scriptFile = open(pbsPath, 'w')
scriptFile.write('#!/bin/bash\n\n\n')
scriptFile.write('# The parallel_stereo command which is implemented:\n')
scriptFile.write('# '+ commandString) # Show the implemented command in comments
for i in range(4):
scriptFile.write('\n\n\n' + pbsStrings[i])
scriptFile.close()
# Set the PBS file to be executable
os.system('chmod +x ' + pbsPath)
## Clean up temporary files
#if not options.keep:
# IrgFileFunctions.removeFolderIfExists(tempFolder)
#endTime = time.time()
#
#print "Finished in " + str(endTime - startTime) + " seconds."
print 'Finished! To run parallel stereo, run the file ' + pbsPath
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| apache-2.0 | 8,849,569,182,561,258,000 | 42.080925 | 140 | 0.584999 | false | 3.810327 | false | false | false |
kernelci/kernelci-backend | app/models/report.py | 1 | 4253 | # Copyright (C) Linaro Limited 2015,2017
# Author: Milo Casagrande <[email protected]>
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this library; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""The model that represents a report document to store in the db.
Reports here refer to boot or build email reports as sent (or not sent).
"""
import bson
import copy
import datetime
import types
import models
import models.base as modb
# pylint: disable=too-many-instance-attributes
class ReportDocument(modb.BaseDocument):
"""A report document that should be stored in the db.
This is used to provide some historic data about email reports sent or
which error they had when sending.
"""
def __init__(self, name, version="1.1"):
self._created_on = None
self._id = None
self._name = name
self._version = version
self.errors = []
self.job = None
self.kernel = None
self.git_branch = None
# The report type.
self.report_type = None
self.status = None
self.updated_on = None
@property
def collection(self):
return models.REPORT_COLLECTION
@property
def name(self):
"""The name of the report."""
return self._name
# pylint: disable=invalid-name
@property
def id(self):
"""The ID of this object as returned by mongodb."""
return self._id
# pylint: disable=invalid-name
@id.setter
def id(self, value):
"""Set the ID of this object with the ObjectID from mongodb.
:param value: The ID of this object.
:type value: str
"""
self._id = value
@property
def version(self):
"""The schema version of this document."""
return self._version
@version.setter
def version(self, value):
"""Set the schema version of this document."""
self._version = value
@property
def created_on(self):
"""When this object was created."""
if self._created_on is None:
self._created_on = datetime.datetime.now(tz=bson.tz_util.utc)
return self._created_on
@created_on.setter
def created_on(self, value):
"""Set the creation date of this lab object.
:param value: The lab creation date, in UTC time zone.
:type value: datetime
"""
self._created_on = value
def to_dict(self):
report_dict = {
models.CREATED_KEY: self.created_on,
models.ERRORS_KEY: self.errors,
models.GIT_BRANCH_KEY: self.git_branch,
models.JOB_KEY: self.job,
models.KERNEL_KEY: self.kernel,
models.NAME_KEY: self.name,
models.STATUS_KEY: self.status,
models.TYPE_KEY: self.report_type,
models.UPDATED_KEY: self.updated_on,
models.VERSION_KEY: self.version
}
if self.id:
report_dict[models.ID_KEY] = self.id
return report_dict
@staticmethod
def from_json(json_obj):
report_doc = None
if json_obj and isinstance(json_obj, types.DictionaryType):
local_obj = copy.deepcopy(json_obj)
j_pop = local_obj.pop
report_doc = ReportDocument(
j_pop(models.NAME_KEY), version=j_pop(models.VERSION_KEY))
report_doc.report_type = j_pop(models.TYPE_KEY)
for key, val in local_obj.iteritems():
setattr(report_doc, key, val)
report_doc.updated_on = datetime.datetime.now(tz=bson.tz_util.utc)
return report_doc
| lgpl-2.1 | 2,941,570,709,900,832,000 | 28.950704 | 79 | 0.626852 | false | 4.004708 | false | false | false |
mlperf/training_results_v0.5 | v0.5.0/nvidia/submission/code/single_stage_detector/pytorch/eval.py | 1 | 7472 | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import xml.etree.ElementTree as ET
import pickle
import os
def voc_ap(rec, prec, use_07_metric=True):
""" ap = voc_ap(rec, prec, [use_07_metric])
Compute VOC AP given precision and recall.
If use_07_metric is true, uses the
VOC 07 11 point method (default:True).
"""
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def parse_rec(filename):
""" Parse a PASCAL VOC xml file """
tree = ET.parse(filename)
objects = []
for obj in tree.findall('object'):
obj_struct = {}
obj_struct['name'] = obj.find('name').text
obj_struct['pose'] = obj.find('pose').text
obj_struct['truncated'] = int(obj.find('truncated').text)
obj_struct['difficult'] = int(obj.find('difficult').text)
bbox = obj.find('bndbox')
obj_struct['bbox'] = [int(bbox.find('xmin').text) - 1,
int(bbox.find('ymin').text) - 1,
int(bbox.find('xmax').text) - 1,
int(bbox.find('ymax').text) - 1]
objects.append(obj_struct)
return objects
def voc_eval(detpath,
annopath,
imagesetfile,
classname,
cachedir,
ovthresh=0.5,
use_07_metric=True):
"""rec, prec, ap = voc_eval(detpath,
annopath,
imagesetfile,
classname,
[ovthresh],
[use_07_metric])
Top level function that does the PASCAL VOC evaluation.
detpath: Path to detections
detpath.format(classname) should produce the detection results file.
annopath: Path to annotations
annopath.format(imagename) should be the xml annotations file.
imagesetfile: Text file containing the list of images, one image per line.
classname: Category name (duh)
cachedir: Directory for caching the annotations
[ovthresh]: Overlap threshold (default = 0.5)
[use_07_metric]: Whether to use VOC07's 11 point AP computation
(default True)
"""
# assumes detections are in detpath.format(classname)
# assumes annotations are in annopath.format(imagename)
# assumes imagesetfile is a text file with each line an image name
# cachedir caches the annotations in a pickle file
# first load gt
if not os.path.isdir(cachedir):
os.mkdir(cachedir)
cachefile = os.path.join(cachedir, 'annots.pkl')
# read list of images
with open(imagesetfile, 'r') as f:
lines = f.readlines()
imagenames = [x.strip() for x in lines]
if not os.path.isfile(cachefile):
# load annots
recs = {}
for i, imagename in enumerate(imagenames):
recs[imagename] = parse_rec(annopath % (imagename))
if i % 100 == 0:
print('Reading annotation for {:d}/{:d}'.format(
i + 1, len(imagenames)))
# save
print('Saving cached annotations to {:s}'.format(cachefile))
with open(cachefile, 'wb') as f:
pickle.dump(recs, f)
else:
# load
with open(cachefile, 'rb') as f:
recs = pickle.load(f)
# extract gt objects for this class
class_recs = {}
npos = 0
for imagename in imagenames:
R = [obj for obj in recs[imagename] if obj['name'] == classname]
bbox = np.array([x['bbox'] for x in R])
difficult = np.array([x['difficult'] for x in R]).astype(np.bool)
det = [False] * len(R)
npos = npos + sum(~difficult)
class_recs[imagename] = {'bbox': bbox,
'difficult': difficult,
'det': det}
# read dets
detfile = detpath.format(classname)
with open(detfile, 'r') as f:
lines = f.readlines()
if any(lines) == 1:
splitlines = [x.strip().split(' ') for x in lines]
image_ids = [x[0] for x in splitlines]
confidence = np.array([float(x[1]) for x in splitlines])
BB = np.array([[float(z) for z in x[2:]] for x in splitlines])
# sort by confidence
sorted_ind = np.argsort(-confidence)
sorted_scores = np.sort(-confidence)
BB = BB[sorted_ind, :]
image_ids = [image_ids[x] for x in sorted_ind]
# go down dets and mark TPs and FPs
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
R = class_recs[image_ids[d]]
bb = BB[d, :].astype(float)
ovmax = -np.inf
BBGT = R['bbox'].astype(float)
if BBGT.size > 0:
# compute overlaps
# intersection
ixmin = np.maximum(BBGT[:, 0], bb[0])
iymin = np.maximum(BBGT[:, 1], bb[1])
ixmax = np.minimum(BBGT[:, 2], bb[2])
iymax = np.minimum(BBGT[:, 3], bb[3])
iw = np.maximum(ixmax - ixmin, 0.)
ih = np.maximum(iymax - iymin, 0.)
inters = iw * ih
uni = ((bb[2] - bb[0]) * (bb[3] - bb[1]) +
(BBGT[:, 2] - BBGT[:, 0]) *
(BBGT[:, 3] - BBGT[:, 1]) - inters)
overlaps = inters / uni
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
if ovmax > ovthresh:
if not R['difficult'][jmax]:
if not R['det'][jmax]:
tp[d] = 1.
R['det'][jmax] = 1
else:
fp[d] = 1.
else:
fp[d] = 1.
# compute precision recall
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec, use_07_metric)
else:
rec = -1.
prec = -1.
ap = -1.
return rec, prec, ap
| apache-2.0 | -6,352,871,810,177,470,000 | 35.096618 | 78 | 0.534261 | false | 3.630709 | false | false | false |
cqychen/quants | quants/loaddata/skyeye_ods_invest_refer_sh_margins.py | 1 | 2056 | #coding=utf8
import tushare as ts;
import pymysql;
import time as dt
from datashape.coretypes import string
from pandas.io.sql import SQLDatabase
import sqlalchemy
import datetime
from sqlalchemy import create_engine
from pandas.io import sql
import threading
import pandas as pd;
import sys
sys.path.append('../') #添加配置文件
from common_function import *
def create_table(table_name):
cmd='''
create table if not exists %s
(
opDate VARCHAR (63) comment '信用交易日期'
,rzye BIGINT comment '本日融资余额(元)'
,rzmre BIGINT comment '本日融资买入额(元)'
,rqyl BIGINT comment '本日融券余量'
,rqylje BIGINT comment '本日融券余量金额(元)'
,rqmcl BIGINT comment '本日融券卖出量'
,rzrqjyzl BIGINT comment '本日融资融券余额(元)'
,PRIMARY KEY(`opDate`)
,index(opDate)
)DEFAULT CHARSET=utf8
'''%table_name
print (cmd)
run_mysql_cmd(cmd,conn)
def load_data():
start_date=get_date_add_days(get_max_date_sh_margins(),1)
print(start_date,end_date)
rs=ts.sh_margins(start=start_date, end=end_date)
pd.DataFrame.to_sql(rs, table_name, con=conn, flavor='mysql', if_exists='append', index=False)
if __name__ == '__main__':
#--------------------设置基本信息---------------------------------
print("--------------加载股票日k线-----------------------------")
startTime=dt.time()
iphost,user,passwd=get_mysql_conn()
db='ods_data'
charset='utf8'
table_name='ods_invest_refer_sh_margins'
conn = pymysql.connect(user=user, passwd=passwd,host=iphost, db=db,charset=charset)
end_date= dt.strftime('%Y-%m-%d',dt.localtime(dt.time()))
#--------------------脚本运行开始--------------------------------
create_table(table_name=table_name)
load_data()
endTime=dt.time()
print("---------------脚本运行完毕,共计耗费时间%sS------------------"%(endTime-startTime))
| epl-1.0 | -8,458,707,878,542,335,000 | 34.471698 | 98 | 0.583511 | false | 2.822823 | false | false | false |
fboers/jumegX | connectivity/con_viz.py | 1 | 10426 | #!/usr/bin/env python
""" Visualization functions for connectivity analysis. """
import sys
import os.path as op
import numpy as np
import scipy as sci
import matplotlib.pyplot as pl
import mne
import yaml
import pickle
def sensor_connectivity_3d(raw, picks, con, idx, n_con=20, min_dist=0.05,
scale_factor=0.005, tube_radius=0.001):
""" Function to plot sensor connectivity showing strongest
connections(n_con) excluding sensors that are less than min_dist apart.
https://github.com/mne-tools/mne-python/blob/master/examples/connectivity/plot_sensor_connectivity.py
Parameters
----------
raw : Raw object
Instance of mne.io.Raw
picks : list
Picks to be included.
con : ndarray (n_channels, n_channels)
Connectivity matrix.
idx : list
List of indices of sensors of interest.
n_con : int
Number of connections of interest.
min_dist : float
Minimum distance between sensors allowed.
Note: Please modify scale factor and tube radius to appropriate sizes
if the plot looks scrambled.
"""
# Now, visualize the connectivity in 3D
try:
from enthought.mayavi import mlab
except:
from mayavi import mlab
mlab.figure(size=(600, 600), bgcolor=(0.5, 0.5, 0.5))
# Plot the sensor location
sens_loc = [raw.info['chs'][picks[i]]['loc'][:3] for i in idx]
sens_loc = np.array(sens_loc)
pts = mlab.points3d(sens_loc[:, 0], sens_loc[:, 1], sens_loc[:, 2],
color=(1, 1, 1), opacity=1, scale_factor=scale_factor)
# Get the strongest connections
threshold = np.sort(con, axis=None)[-n_con]
ii, jj = np.where(con >= threshold)
# Remove close connections
con_nodes = list()
con_val = list()
for i, j in zip(ii, jj):
if sci.linalg.norm(sens_loc[i] - sens_loc[j]) > min_dist:
con_nodes.append((i, j))
con_val.append(con[i, j])
con_val = np.array(con_val)
# Show the connections as tubes between sensors
vmax = np.max(con_val)
vmin = np.min(con_val)
for val, nodes in zip(con_val, con_nodes):
x1, y1, z1 = sens_loc[nodes[0]]
x2, y2, z2 = sens_loc[nodes[1]]
points = mlab.plot3d([x1, x2], [y1, y2], [z1, z2], [val, val],
vmin=vmin, vmax=vmax, tube_radius=tube_radius,
colormap='RdBu')
points.module_manager.scalar_lut_manager.reverse_lut = True
mlab.scalarbar(title='Phase Lag Index (PLI)', nb_labels=4)
# Add the sensor names for the connections shown
nodes_shown = list(set([n[0] for n in con_nodes] +
[n[1] for n in con_nodes]))
for node in nodes_shown:
x, y, z = sens_loc[node]
mlab.text3d(x, y, z, raw.ch_names[picks[node]], scale=0.005,
color=(0, 0, 0))
view = (-88.7, 40.8, 0.76, np.array([-3.9e-4, -8.5e-3, -1e-2]))
mlab.view(*view)
def plot_grouped_connectivity_circle(yaml_fname, con, orig_labels,
node_order_size=68, indices=None,
out_fname='circle.png', title=None,
subplot=111, include_legend=False,
n_lines=None, fig=None, show=True,
vmin=None, vmax=None, colormap='hot',
colorbar=False):
'''
Plot the connectivity circle grouped and ordered according to
groups in the yaml input file provided.
orig_labels : list of str
Label names in the order as appears in con.
'''
# read the yaml file with grouping
if op.isfile(yaml_fname):
with open(yaml_fname, 'r') as f:
labels = yaml.load(f)
else:
print '%s - File not found.' % yaml_fname
sys.exit()
cortex_colors = ['m', 'b', 'y', 'c', 'r', 'g',
'g', 'r', 'c', 'y', 'b', 'm']
# make list of label_names (without individual cortex locations)
label_names = list()
for lab in labels:
label_names.extend(labels[lab])
lh_labels = [name + '-lh' for name in label_names]
rh_labels = [name + '-rh' for name in label_names]
# Save the plot order and create a circular layout
node_order = list()
node_order.extend(lh_labels[::-1]) # reverse the order
node_order.extend(rh_labels)
assert len(node_order) == node_order_size, 'Node order length is correct.'
# the respective no. of regions in each cortex
group_bound = [len(labels[key]) for key in labels.keys()]
group_bound = [0] + group_bound[::-1] + group_bound
group_boundaries = [sum(group_bound[:i+1]) for i in range(len(group_bound))]
# remove the first element of group_bound
# make label colours such that each cortex is of one colour
group_bound.pop(0)
label_colors = []
for ind, rep in enumerate(group_bound):
label_colors += [cortex_colors[ind]] * rep
assert len(label_colors) == len(node_order), 'Number of colours do not match'
# remove the last total sum of the list
group_boundaries.pop()
from mne.viz.circle import circular_layout
node_angles = circular_layout(orig_labels, node_order, start_pos=90,
group_boundaries=group_boundaries)
# the order of the node_colors must match that of orig_labels
# therefore below reordering is necessary
reordered_colors = [label_colors[node_order.index(orig)]
for orig in orig_labels]
# Plot the graph using node_order and colours
# orig_labels is the order of nodes in the con matrix (important)
from mne.viz import plot_connectivity_circle
plot_connectivity_circle(con, orig_labels, n_lines=n_lines,
facecolor='white', textcolor='black',
node_angles=node_angles, colormap=colormap,
node_colors=reordered_colors,
node_edgecolor='white', fig=fig,
fontsize_names=6, vmax=vmax, vmin=vmin,
colorbar_size=0.2, colorbar_pos=(-0.3, 0.1),
colorbar=colorbar, show=show, subplot=subplot,
indices=indices, title=title)
if include_legend:
import matplotlib.patches as mpatches
legend_patches = [mpatches.Patch(color=col, label=key)
for col, key in zip(['g', 'r', 'c', 'y', 'b', 'm'],
labels.keys())]
pl.legend(handles=legend_patches, loc=(0.02, 0.02), ncol=1,
mode=None, fontsize='small')
if out_fname:
pl.savefig(out_fname, facecolor='white', dpi=300)
def plot_generic_grouped_circle(yaml_fname, con, orig_labels,
node_order_size,
out_fname='circle.png', title=None,
subplot=111, include_legend=False,
n_lines=None, fig=None, show=True,
vmin=None, vmax=None,
colorbar=False):
'''
Plot the connectivity circle grouped and ordered according to
groups in the yaml input file provided. This is not specific to aparc and
does not automatically split the labels into left and right hemispheres.
orig_labels : list of str
Label names in the order as appears in con.
'''
# read the yaml file with grouping
if op.isfile(yaml_fname):
with open(yaml_fname, 'r') as f:
labels = yaml.load(f)
else:
print '%s - File not found.' % yaml_fname
sys.exit()
cortex_colors = ['m', 'b', 'y', 'c', 'r', 'g']
# make list of label_names (without individual cortex locations)
label_names = list()
for lab in labels:
label_names.extend(labels[lab])
# here label_names are the node_order
node_order = label_names
assert len(node_order) == node_order_size, 'Node order length is correct.'
# the respective no. of regions in each cortex
group_bound = [len(labels[key]) for key in labels.keys()]
group_bound = [0] + group_bound
group_boundaries = [sum(group_bound[:i+1]) for i in range(len(group_bound))]
# remove the first element of group_bound
# make label colours such that each cortex is of one colour
group_bound.pop(0)
label_colors = []
for ind, rep in enumerate(group_bound):
label_colors += [cortex_colors[ind]] * rep
assert len(label_colors) == len(node_order), 'Number of colours do not match'
# remove the last total sum of the list
group_boundaries.pop()
from mne.viz.circle import circular_layout
node_angles = circular_layout(orig_labels, label_names, start_pos=90,
group_boundaries=group_boundaries)
# the order of the node_colors must match that of orig_labels
# therefore below reordering is necessary
reordered_colors = [label_colors[node_order.index(orig)]
for orig in orig_labels]
# Plot the graph using node_order and colours
# orig_labels is the order on nodes in the con matrix (important)
from mne.viz import plot_connectivity_circle
plot_connectivity_circle(con, orig_labels, n_lines=n_lines,
facecolor='white', textcolor='black',
node_angles=node_angles,
node_colors=reordered_colors,
node_edgecolor='white', fig=fig,
fontsize_names=8, vmax=vmax, vmin=vmin,
colorbar_size=0.2, colorbar_pos=(-0.3, 0.1),
colorbar=colorbar, show=show, subplot=subplot,
title=title)
if include_legend:
import matplotlib.patches as mpatches
legend_patches = [mpatches.Patch(color=col, label=key)
for col, key in zip(['g', 'r', 'c', 'y', 'b', 'm'],
labels.keys())]
pl.legend(handles=legend_patches, loc=(0.02, 0.02), ncol=1,
mode=None, fontsize='small')
if out_fname:
pl.savefig(out_fname, facecolor='white', dpi=300)
| bsd-3-clause | -2,334,218,166,791,869,000 | 38.642586 | 109 | 0.573182 | false | 3.792652 | false | false | false |
alivecor/tensorflow | tensorflow/contrib/framework/python/framework/__init__.py | 43 | 1458 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A module containing TensorFlow ops whose API may change in the future."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.framework.python.framework.checkpoint_utils import *
from tensorflow.contrib.framework.python.framework.experimental import experimental
from tensorflow.contrib.framework.python.framework.tensor_util import *
# pylint: enable=wildcard-import
from tensorflow.python.util import decorator_utils
from tensorflow.python.util import nest
from tensorflow.python.util.deprecation import deprecated
from tensorflow.python.util.deprecation import deprecated_arg_values
from tensorflow.python.util.deprecation import deprecated_args
| apache-2.0 | 921,204,177,520,799,200 | 47.6 | 83 | 0.758573 | false | 4.643312 | false | false | false |
giancds/attentive_lm | utils.py | 1 | 3590 | # -*- coding: utf-8 -*-
"""
utility functions to train the RNN-based VariationalAutoencoder
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import numpy as np
import tensorflow as tf
import attention
import cells
# TODO: pydocs
TRAIN_INFO_LM = {
"epoch": 0,
"best_valid_ppx": np.inf,
"best_epoch": 0,
"estop_counter": 0,
"current_cost": 0.0
}
def get_2d_tensor_shapes(tensor):
""" """
length = tensor.get_shape()[0].value
if length is None:
length = tf.shape(tensor)[0]
dim = tensor.get_shape()[1].value
return length, dim
def get_3d_tensor_shapes(tensor):
""" """
batch = tensor.get_shape()[0].value
length = tensor.get_shape()[1].value
if length is None:
length = tf.shape(tensor)[2]
dim = tensor.get_shape()[2].value
return batch, length, dim
def reshape_attention(attention_states):
""" """
_, attn_length, attn_dim = get_3d_tensor_shapes(attention_states)
# To calculate W1 * h_t we use a 1-by-1 convolution, need to reshape before.
hidden = tf.reshape(attention_states, [-1, attn_length, 1, attn_dim])
return hidden, attn_dim
def convolve(tensor, matrix):
""" """
return tf.nn.conv2d(tensor, matrix, [1, 1, 1, 1], "SAME")
def build_lm_layers(num_layers,
size,
is_training=False,
decoding_function_name=None,
keep_prob=1.0,
keep_attention_weights=False):
""" Helper to build recurrent layers for he LM. """
decoding_function = None
# building the layers
lstm_cell0 = tf.contrib.rnn.BasicLSTMCell(
size, forget_bias=1.0, reuse=not is_training)
# lstm_cell0 = tf.contrib.rnn.LSTMBlockCell(
# size, forget_bias=1.0)
lstm_cell1 = tf.contrib.rnn.DropoutWrapper(
lstm_cell0, output_keep_prob=keep_prob
) if is_training and keep_prob < 1.0 else lstm_cell0
if decoding_function_name is not None:
decoding_function = attention.get_decoder_content_function(
decoding_function_name)
lstm_cellA = cells.AttentionLSTMCell(
size, forget_bias=1.0, state_is_tuple=True,
init_constant_output_bias=False,
decoding_function=decoding_function,
keep_attention_weights=keep_attention_weights,
reuse=tf.get_variable_scope().reuse)
lstm_cellA = tf.contrib.rnn.DropoutWrapper(
lstm_cellA, output_keep_prob=keep_prob
) if is_training and keep_prob < 1.0 else lstm_cellA
# internal_cell = [lstm_cell1] * (num_layers - 1)
internal_cell = [lstm_cell1 for _ in range(num_layers - 1)]
internal_cell = internal_cell + [lstm_cellA]
else:
internal_cell = [lstm_cell1 for _ in range(num_layers)]
cell = tf.contrib.rnn.MultiRNNCell(internal_cell, state_is_tuple=True)
return cell
def create_queue(data_size,
num_steps,
capacity=128,
dtype=tf.int32):
""" Create the queue and related ops and placeholders to be used to
feed the network.
"""
# Feeds for inputs.
input_data = tf.placeholder(
dtype, shape=[data_size, num_steps], name="input_data")
targets = tf.placeholder(
dtype, shape=[data_size, num_steps], name="targets")
queue = tf.FIFOQueue(
capacity=capacity,
# min_after_dequeue=min_after_dequeue,
dtypes=[dtype, dtype],
shapes=[[num_steps]] * 2)
enqueue_op = queue.enqueue_many(
[input_data, targets])
placeholders = {
"input_data": input_data,
"targets": targets
}
return queue, enqueue_op, placeholders
| apache-2.0 | -6,731,724,072,265,585,000 | 22.933333 | 78 | 0.645961 | false | 3.314866 | false | false | false |
wcainboundary/boundary-api-cli | boundary/alarm_create.py | 1 | 1368 | #
# Copyright 2014-2015 Boundary, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from boundary import AlarmModify
class AlarmCreate(AlarmModify):
def __init__(self):
AlarmModify.__init__(self, False)
self.method = "POST"
self.cli_description = "Creates a new alarm definition in an Boundary account"
def addArguments(self):
self.parser.add_argument('-n', '--alarm-name', dest='alarm_name', action='store', required=True,
metavar='alarm_name', help='Name of the alarm')
AlarmModify.addArguments(self)
def getArguments(self):
"""
Extracts the specific arguments of this CLI
"""
AlarmModify.getArguments(self)
self.path = 'v1/alarms'
def getDescription(self):
return 'Creates an alarm definition in an Boundary account'
| apache-2.0 | -1,871,054,217,826,630,000 | 29.4 | 104 | 0.678363 | false | 4.145455 | false | false | false |
Finntack/pootle | tests/models/translationproject.py | 1 | 10435 | # -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import os
import shutil
import pytest
from translate.filters import checks
from django.db import IntegrityError
from pytest_pootle.factories import (
LanguageDBFactory, ProjectDBFactory, TranslationProjectFactory)
from pootle.core.plugin import getter
from pootle.core.delegate import tp_tool
from pootle_app.models import Directory
from pootle_language.models import Language
from pootle_project.models import Project
from pootle_store.models import Store
from pootle_translationproject.models import TranslationProject
from pootle_translationproject.utils import TPTool
@pytest.mark.django_db
def test_tp_create_fail(tutorial, english):
# Trying to create a TP with no Language raises a RelatedObjectDoesNotExist
# which can be caught with Language.DoesNotExist
with pytest.raises(Language.DoesNotExist):
TranslationProject.objects.create()
# TP needs a project set too...
with pytest.raises(Project.DoesNotExist):
TranslationProject.objects.create(language=english)
# There is already an english tutorial was automagically set up
with pytest.raises(IntegrityError):
TranslationProject.objects.create(project=tutorial, language=english)
@pytest.mark.django_db
def test_tp_create_templates(tutorial, klingon_vpw, templates):
# As there is a tutorial template it will automatically create stores for
# our new TP
template_tp = TranslationProject.objects.get(
language=templates, project=tutorial)
tp = TranslationProject.objects.create(
project=tutorial, language=klingon_vpw)
tp.init_from_templates()
assert tp.stores.count() == template_tp.stores.count()
assert (
[(s, t)
for s, t
in template_tp.stores.first().units.values_list("source_f",
"target_f")]
== [(s, t)
for s, t
in tp.stores.first().units.values_list("source_f",
"target_f")])
@pytest.mark.django_db
def test_tp_create_with_files(tutorial, klingon, settings):
# lets add some files by hand
trans_dir = settings.POOTLE_TRANSLATION_DIRECTORY
shutil.copytree(
os.path.join(trans_dir, "tutorial/en"),
os.path.join(trans_dir, "tutorial/kl"))
TranslationProject.objects.create(project=tutorial, language=klingon)
@pytest.mark.django_db
def test_tp_empty_stats():
"""Tests if empty stats is initialized when translation project (new language)
is added for a project with existing but empty template translation project.
"""
# Create an empty template translation project for project0.
project = Project.objects.get(code="project0")
english = Language.objects.get(code="en")
TranslationProjectFactory(project=project, language=english)
# Create a new language to test.
language = LanguageDBFactory()
tp = TranslationProject.objects.create(language=language, project=project)
tp.init_from_templates()
# There are no files on disk so TP was not automagically filled.
assert list(tp.stores.all()) == []
# Check if zero stats is calculated and available.
stats = tp.get_stats()
assert stats['total'] == 0
assert stats['translated'] == 0
assert stats['fuzzy'] == 0
assert stats['suggestions'] == 0
assert stats['critical'] == 0
assert not tp.is_dirty()
@pytest.mark.django_db
def test_tp_stats_created_from_template(tutorial, templates):
language = LanguageDBFactory()
tp = TranslationProject.objects.create(language=language, project=tutorial)
tp.init_from_templates()
assert tp.stores.all().count() == 1
stats = tp.get_stats()
assert stats['total'] == 2 # there are 2 words in test template
assert stats['translated'] == 0
assert stats['fuzzy'] == 0
assert stats['suggestions'] == 0
assert stats['critical'] == 0
assert not tp.is_dirty()
@pytest.mark.django_db
def test_can_be_inited_from_templates(tutorial, templates):
language = LanguageDBFactory()
tp = TranslationProject(project=tutorial, language=language)
assert tp.can_be_inited_from_templates()
@pytest.mark.django_db
def test_cannot_be_inited_from_templates():
language = LanguageDBFactory()
project = Project.objects.get(code='project0')
tp = TranslationProject(project=project, language=language)
assert not tp.can_be_inited_from_templates()
@pytest.mark.django_db
def test_tp_checker(tp_checker_tests):
language = Language.objects.get(code="language0")
checker_name_, project = tp_checker_tests
tp = TranslationProject.objects.create(project=project, language=language)
checkerclasses = [
checks.projectcheckers.get(tp.project.checkstyle,
checks.StandardChecker)
]
assert [x.__class__ for x in tp.checker.checkers] == checkerclasses
@pytest.mark.django_db
def test_tp_create_with_none_treestyle(english, templates, settings):
project = ProjectDBFactory(
source_language=english,
treestyle="none")
language = LanguageDBFactory()
TranslationProjectFactory(
language=templates, project=project)
tp = TranslationProject.objects.create(
project=project, language=language)
assert not tp.abs_real_path
assert not os.path.exists(
os.path.join(
settings.POOTLE_TRANSLATION_DIRECTORY,
project.code))
tp.save()
assert not tp.abs_real_path
assert not os.path.exists(
os.path.join(
settings.POOTLE_TRANSLATION_DIRECTORY,
project.code))
@pytest.mark.django_db
def test_tp_tool_move(language0, project0, templates):
tp = project0.translationproject_set.get(language=language0)
original_stores = list(tp.stores.all())
TPTool(project0).move(tp, templates)
assert tp.language == templates
assert (
tp.pootle_path
== tp.directory.pootle_path
== "/%s/%s/" % (templates.code, project0.code))
assert tp.directory.parent == templates.directory
# all of the stores and their directories are updated
for store in original_stores:
store = Store.objects.get(pk=store.pk)
assert store.pootle_path.startswith(tp.pootle_path)
assert store.parent.pootle_path.startswith(tp.pootle_path)
assert not Store.objects.filter(
pootle_path__startswith="/%s/%s"
% (language0.code, project0.code))
assert not Directory.objects.filter(
pootle_path__startswith="/%s/%s/"
% (language0.code, project0.code))
# calling with already set language does nothing
assert TPTool(project0).move(tp, templates) is None
@pytest.mark.django_db
def test_tp_tool_bad(tp0, templates, english):
other_project = ProjectDBFactory(source_language=english)
other_tp = TranslationProjectFactory(
project=other_project,
language=LanguageDBFactory())
tp_tool = TPTool(tp0.project)
with pytest.raises(ValueError):
tp_tool.check_tp(other_tp)
with pytest.raises(ValueError):
tp_tool.set_parents(tp0.directory, other_tp.directory)
with pytest.raises(ValueError):
tp_tool.set_parents(other_tp.directory, tp0.directory)
with pytest.raises(ValueError):
tp_tool.move(other_tp, templates)
with pytest.raises(ValueError):
tp_tool.clone(other_tp, templates)
with pytest.raises(ValueError):
# cant set tp to a language if a tp already exists
tp_tool.move(
tp0, Language.objects.get(code="language1"))
with pytest.raises(ValueError):
# cant clone tp to a language if a tp already exists
tp_tool.clone(
tp0, Language.objects.get(code="language1"))
def _test_tp_match(source_tp, target_tp):
source_stores = []
for store in source_tp.stores.live():
source_stores.append(store.pootle_path)
update_path = (
"/%s/%s"
% (target_tp.language.code,
store.pootle_path[(len(source_tp.language.code) + 2):]))
updated = Store.objects.get(pootle_path=update_path)
assert store.state == updated.state
updated_units = updated.units
for i, unit in enumerate(store.units):
updated_unit = updated_units[i]
assert unit.source == updated_unit.source
assert unit.target == updated_unit.target
assert unit.state == updated_unit.state
for store in target_tp.stores.live():
source_path = (
"/%s/%s"
% (source_tp.language.code,
store.pootle_path[(len(target_tp.language.code) + 2):]))
assert source_path in source_stores
@pytest.mark.django_db
def test_tp_tool_clone(tp0, templates):
new_lang = LanguageDBFactory()
tp_tool = TPTool(tp0.project)
_test_tp_match(tp0, tp_tool.clone(tp0, new_lang))
@pytest.mark.django_db
def test_tp_tool_update(tp0, templates):
new_lang = LanguageDBFactory()
tp0_tool = TPTool(tp0.project)
new_tp = tp0.project.translationproject_set.create(
language=new_lang)
# this will clone stores/directories as new_tp is empty
tp0_tool.update_from_tp(tp0, new_tp)
_test_tp_match(tp0, new_tp)
tp0_tool.update_from_tp(tp0, new_tp)
tp0.stores.first().delete()
tp0.stores.first().units.first().delete()
unit = tp0.stores.first().units.first()
unit.target = "NEW TARGET"
unit.save()
tp0_tool.update_from_tp(tp0, new_tp)
_test_tp_match(tp0, new_tp)
# doing another update does nothing
tp0_tool.update_from_tp(tp0, new_tp)
_test_tp_match(tp0, new_tp)
@pytest.mark.django_db
def test_tp_tool_getter(project0):
assert tp_tool.get(Project) is TPTool
assert isinstance(project0.tp_tool, TPTool)
@pytest.mark.django_db
def test_tp_tool_custom_getter(project0, no_tp_tool_):
class CustomTPTool(TPTool):
pass
@getter(tp_tool, sender=Project, weak=False)
def custom_tp_tool_getter(**kwargs_):
return CustomTPTool
assert tp_tool.get(Project) is CustomTPTool
assert isinstance(project0.tp_tool, CustomTPTool)
| gpl-3.0 | -3,743,231,149,240,998,000 | 31.711599 | 82 | 0.676186 | false | 3.714845 | true | false | false |
schollz/extract_recipe | build_database.py | 1 | 6286 | from dbcommands import *
import logging
import json
from recipe import *
import os.path
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M:%S',
filename='log',
filemode='a')
DATABASE_PATH = 'cookbook'
DB = DataBase(DATABASE_PATH)
'''
recipes = "'title':title,\\\n"
recipes = recipes + "'url':url,\\\n"
recipes = recipes + "'source':source,\\"
recipes = recipes + "'directions':directions,\\\n"
recipes = recipes + "'time':time,\\\n"
recipes = recipes + "'total_cost':total_cost,\\\n"
recipes = recipes + "'total_cost_per_serving':total_cost_per_serving,\\\n"
recipes = recipes + "'serving_size':serving_size,\\\n"
recipes = recipes + "'total_grams':total_grams,\\\n"
recipes = recipes + "'num_ingredients':num_ingredients,\\\n"
with open('list_of_nutrients.txt','r') as f:
for line in f:
recipes = recipes + "'" + formatIngredientKey(line) + "':0,\\\n"
print(recipes)
'''
logger = logging.getLogger('build_database.createTable')
recipes = 'recipes (id INTEGER PRIMARY KEY AUTOINCREMENT, '
recipes = recipes + 'title TEXT, '
recipes = recipes + 'url TEXT UNIQUE, '
recipes = recipes + 'source TEXT, '
recipes = recipes + 'directions TEXT, '
recipes = recipes + 'time TEXT, '
recipes = recipes + 'total_cost REAL, '
recipes = recipes + 'total_cost_per_serving REAL, '
recipes = recipes + 'serving_size REAL, '
recipes = recipes + 'total_grams REAL, '
recipes = recipes + 'num_ingredients INTEGER, '
with open('list_of_nutrients.txt','r') as f:
for line in f:
recipes = recipes + formatIngredientKey(line) + " REAL,"
recipes = recipes[:-1] + ')'
if not DB.tableExists('recipes'):
logger.warning('"recipes" table not found')
logger.info('Creating "recipes" table...')
DB.createTable(recipes)
else:
logger.debug('Table "recipes" found')
logger = logging.getLogger('build_database.createIngredients')
recipes = 'ingredients (id INTEGER PRIMARY KEY AUTOINCREMENT, '
recipes = recipes + 'recipe_id INTEGER, '
recipes = recipes + 'ingredient_uuid TEXT UNIQUE, '
recipes = recipes + 'actual TEXT, '
recipes = recipes + 'measurement TEXT, '
recipes = recipes + 'description TEXT, '
recipes = recipes + 'ndb_no TEXT, '
recipes = recipes + 'cost REAL, '
recipes = recipes + 'grams REAL)'
if not DB.tableExists('ingredients'):
logger.warning('"ingredients" table not found')
logger.info('Creating "ingredients" table...')
DB.createTable(recipes)
else:
logger.debug('Table "ingredients" found')
'''
def newRecipe( self,\
title,\
url,\
source,\
directions,\
time,\
total_cost,\
total_cost_per_serving,\
serving_size,\
total_grams,\
num_ingredients):
'''
'''
'recipe_id':recipe_id,\
'ingredient_uuid':ingredient_uuid,\
'actual':actual,\
'measurement':measurement,\
'description':description,\
'ndb_no':ndb_no,\
'cost':cost,\
'grams':grams\
'''
startNum = 9620
logger = logging.getLogger('build_database.building')
with open('get_recipes/recipes/index0_10.txt','r') as f:
for line in f:
#try:
try:
data = line.strip().split()
recipeNum = int(data[0])
url = data[1]
title = ' '.join(data[2:])
except:
recipeNum = 0
file = 'get_recipes/recipes/' + str(recipeNum/500) + '/' + str(recipeNum) + '.md'
if recipeNum>startNum and os.path.isfile(file):
logger.info(line)
try:
a = Recipe('get_recipes/recipes/' + str(recipeNum/500) + '/' + str(recipeNum) + '.md')
recipe = a.returnJson()
recipe['url'] = url
recipe['title'] = title
# Insert the new recipe
try:
recipeID = DB.newRecipe(recipe['title'],recipe['url'],recipe['source'],recipe['directions'],recipe['time'],recipe['total_cost'],recipe['total_cost_per_serving'],recipe['serving_size'],recipe['total_grams'],len(recipe['ingredients']))
except:
recipeID = DB.getRecipeIDfromURL(recipe['url'])
# Update the nutrients
for nutritionClass in recipe['nutrition'].keys():
for nutrient in recipe['nutrition'][nutritionClass].keys():
DB.updateIngredient(nutrient,recipe['nutrition'][nutritionClass][nutrient],recipeID)
# Insert the ingredients
for ingredient in recipe['ingredients']:
try:
actual = ingredient['actual']
ingredient_uuid = recipe['url']+ingredient['ndb_no']
measurement = ingredient['measurement']
description = ingredient['description']
ndb_no = ingredient['ndb_no']
cost = ingredient['cost']
grams = ingredient['grams']
foo = DB.addIngredient(recipeID,ingredient_uuid,actual,measurement,description,ndb_no,cost,grams)
except:
logger.warning("ingredient already exists")
except:
logger.error("Unexpected error:", sys.exc_info()[0])
'''
recipe = Recipe(sys.argv[1])
recipe['url']='asdlfkj'
try:
recipeID = DB.newRecipe(recipe['title'],recipe['url'],recipe['source'],recipe['directions'],recipe['time'],recipe['total_cost'],recipe['total_cost_per_serving'],recipe['serving_size'],recipe['total_grams'],len(recipe['ingredients']))
except:
recipeID = DB.getRecipeIDfromURL(recipe['url'])
for nutritionClass in recipe['nutrition'].keys():
for nutrient in recipe['nutrition'][nutritionClass].keys():
DB.updateIngredient(nutrient,recipe['nutrition'][nutritionClass][nutrient],recipeID)
for ingredient in recipe['ingredients']:
print(ingredient)
actual = ingredient['actual']
ingredient_uuid = recipe['url']+ingredient['ndb_no']
measurement = ingredient['measurement']
description = ingredient['description']
ndb_no = ingredient['ndb_no']
cost = ingredient['cost']
grams = ingredient['grams']
foo = DB.addIngredient(recipeID,ingredient_uuid,actual,measurement,description,ndb_no,cost,grams)
break
'''
| apache-2.0 | 6,716,819,749,854,889,000 | 34.546512 | 243 | 0.61629 | false | 3.340064 | false | false | false |
sigmunau/nav | python/nav/ipdevpoll/plugins/virtualrouter.py | 1 | 2946 | #
# Copyright (C) 2012 UNINETT AS
#
# This file is part of Network Administration Visualized (NAV).
#
# NAV is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 2 as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details. You should have received a copy of the GNU General Public
# License along with NAV. If not, see <http://www.gnu.org/licenses/>.
#
"""ipdevpoll plugin to poll Cisco HSRP address information"""
from IPy import IP
from twisted.internet import defer
from nav.ipdevpoll import Plugin
from nav.ipdevpoll.shadows import GwPortPrefix
from nav.mibs.vrrp_mib import VRRPMib
from nav.mibs.cisco_hsrp_mib import CiscoHSRPMib
class VirtualRouter(Plugin):
"""ipdevpoll plugin to collect Virtual Router addresses from VRRP and
HSRP routers.
These addresses are marked as virtual in NAV database,
and will ensure that networks with redundant routers aren't classified
incorrectly as link networks.
This plugin will only update existing addresses that were collected by a
plugin that ran before this one in the same job (such as the Prefix
plugin). This is to ensure we don't create addresses that aren't active
on the router.
"""
@classmethod
def can_handle(cls, netbox):
daddy_says_ok = super(VirtualRouter, cls).can_handle(netbox)
return daddy_says_ok and netbox.category.id in ('GW', 'GSW')
def __init__(self, *args, **kwargs):
super(VirtualRouter, self).__init__(*args, **kwargs)
self.mibs = [mib(self.agent) for mib in (CiscoHSRPMib, VRRPMib)]
@defer.inlineCallbacks
def handle(self):
"""Handles address collection"""
if self.gwportprefixes_found():
mibs = []
virtual_addrs = set()
for mib in self.mibs:
addrs_from_mib = yield mib.get_virtual_addresses()
virtual_addrs.update(addrs_from_mib)
if addrs_from_mib:
mibs.append(mib.mib['moduleName'])
self.update_containers_with(virtual_addrs, mibs)
def gwportprefixes_found(self):
if GwPortPrefix not in self.containers:
self._logger.debug("there are no collected GwPortPrefixes to "
"update")
return False
else:
return True
def update_containers_with(self, addresses, from_mib=None):
if addresses:
self._logger.debug("Found virtual addresses from %s: %r",
from_mib, addresses)
for gwp_prefix in self.containers[GwPortPrefix].values():
gwp_prefix.virtual = IP(gwp_prefix.gw_ip) in addresses
| gpl-2.0 | -2,532,902,995,663,710,000 | 36.769231 | 77 | 0.668364 | false | 3.959677 | false | false | false |
Jazende/ProjectEuler | problem_038.py | 1 | 1397 | ##Take the number 192 and multiply it by each of 1, 2, and 3:
##
##192 × 1 = 192
##192 × 2 = 384
##192 × 3 = 576
##
##By concatenating each product we get the 1 to 9 pandigital, 192384576.
##We will call 192384576 the concatenated product of 192 and (1,2,3)
##
##The same can be achieved by starting with 9 and multiplying by
##1, 2, 3, 4, and 5, giving the pandigital, 918273645,
##which is the concatenated product of 9 and (1,2,3,4,5).
##
##What is the largest 1 to 9 pandigital 9-digit number that can be
##formed as the concatenated product of an integer with (1,2, ... , n)
##where n > 1?
check_set = [x for x in range(1, 10)]
def concat(number):
result = ""
count = 1
while True:
result += str(number*count)
count += 1
if len(result) > 9:
return False
if len(result) == 9 == len(set([int(x) for x in result])) and count > 1:
if sorted([int(x) for x in list(result)]) == sorted(check_set):
return int(result)
def problem_38():
cur_max = 0
cur_value = 0
for x in range(1, 99999):
if x % 1000000 == 0:
print(x)
value = concat(x)
if not value is False:
if value > cur_max:
cur_max = value
cur_value = x
print(cur_value, cur_max)
return (cur_value, cur_max)
problem_38()
| gpl-3.0 | 3,013,663,619,253,840,400 | 28.659574 | 80 | 0.563845 | false | 3.295508 | false | false | false |
vallemrv/tpvB3 | receptor/valle_libs/components/pagenavigations.py | 2 | 3118 | # @Author: Manuel Rodriguez <valle>
# @Date: 14-Jul-2017
# @Email: [email protected]
# @Filename: pagenavigations.py
# @Last modified by: valle
# @Last modified time: 13-Aug-2017
# @License: Apache license vesion 2.0
from kivy.uix.relativelayout import RelativeLayout
from kivy.uix.floatlayout import FloatLayout
from kivy.properties import (StringProperty, ListProperty, NumericProperty,
ObjectProperty, DictProperty)
from kivy.animation import Animation
from kivy.lang import Builder
import components.resources as res
Builder.load_file(res.get_kv("pagenavigations"))
class MainPage(RelativeLayout):
title = StringProperty('')
title_bgColor = StringProperty("#ffffff")
page_manager = ObjectProperty(None)
show = ObjectProperty(None)
def __init__(self, **kargs):
super(MainPage, self).__init__(**kargs)
def add_widget(self, widget):
if len(self.children) < 1:
super(MainPage, self).add_widget(widget)
else:
self.content_page.add_widget(widget)
class Page(RelativeLayout):
title = StringProperty('')
title_bgColor = StringProperty("#ffffff")
id_page = StringProperty("")
bgColor = StringProperty("#ffffff")
show = ObjectProperty(None)
def add_widget(self, widget):
if len(self.children) < 1:
super(Page, self).add_widget(widget)
else:
self.content_page.add_widget(widget)
def collide_point(self, x, y):
return (x > self.x and x < self.x +self.width) and (y > self.y and y < self.y +self.height)
def on_touch_down(self, touch, *args):
super(Page, self).on_touch_down(touch)
if self.collide_point(touch.x, touch.y):
return True
class PageManager(FloatLayout):
pages = DictProperty({})
stack_pages = ListProperty([])
bgColor = StringProperty('#FFFFFF')
def __init__(self, **kargs):
super(PageManager, self).__init__(**kargs)
def add_widget(self, widget):
widget.page_manager = self
if self.__esPage__(widget, MainPage):
self.stack_pages.append(widget)
elif self.__esPage__(widget, Page):
widget.bind(id_page=self.on_id_pages)
super(PageManager,self).add_widget(widget)
def on_width(self, w, val):
for child in self.pages.values():
child.pos = val +10, 0
def on_id_pages(self, w, val):
self.pages[val] = w
def navigate(self, nav):
if nav in self.pages:
w = self.pages[nav]
self.stack_pages.append(self.pages[nav])
self.remove_widget(w)
self.add_widget(w)
ai = Animation(x=0, duration=.1)
ai.start(w)
if w.show:
w.show(self)
def back_page(self):
w = self.stack_pages.pop()
ai = Animation(x=self.width+10, duration=.1)
ai.start(w)
def __esPage__(self, widget, clase):
esPage = type(widget) == clase
for base in widget.__class__.__bases__:
esPage = esPage or (base == clase)
return esPage
| apache-2.0 | 1,133,843,800,251,685,900 | 28.695238 | 98 | 0.609044 | false | 3.555302 | false | false | false |
dlzhangxg/cloud-ml-sdk | cloud_ml_sdk/cloud_ml_sdk/models/train_job.py | 1 | 15082 | # Copyright 2017 Xiaomi, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from . import constant
from . import util
class TrainJob(object):
"""Class for train method.
A TrainJob instance provides variables getter and setter apis. After
specifying the necessary parameters, users can call start_run func to start
the train job.
"""
def __init__(self,
job_name,
module_name,
trainer_uri,
job_args=None,
cpu_limit=None,
gpu_limit=None,
memory_limit=None,
ps_count=None,
worker_count=None,
framework=None,
framework_version=None,
docker_image=None,
docker_command=None,
volume_type=None,
volume_path=None,
mount_path=None,
mount_read_only=None,
prepare_command=None,
finish_command=None,
node_selector_key=None,
node_selector_value=None):
"""Creates a new TrainJob with given definition.
The `job_name`, `module_name` and `trainer_uri` arguments must be provided
when the object is creating.
Args:
job_name: The name of specific job.
module_name: The name of module.
trainer_uri: The uri that save the source code of job.
"""
self.job_name = job_name
self.module_name = module_name
self.trainer_uri = trainer_uri
self.job_args = job_args
self.cpu_limit = cpu_limit
self.memory_limit = memory_limit
self.gpu_limit = gpu_limit
self.ps_count = ps_count
self.worker_count = worker_count
self.framework = framework
self.framework_version = framework_version
self.docker_image = docker_image
self.docker_command = docker_command
self.volume_type = volume_type
self.volume_path = volume_path
self.mount_path = mount_path
self.mount_read_only = mount_read_only
self.prepare_command = prepare_command
self.finish_command = finish_command
self.node_selector_key = node_selector_key
self.node_selector_value = node_selector_value
@property
def job_name(self):
return self._job_name
@job_name.setter
def job_name(self, value):
"""Function for setting job_name.
Args:
value: String type value that is going to be set to job_name. Which
cannot be empty.
Raises:
ValueError: If value is not str instance or empty.
"""
if not isinstance(value, str):
raise ValueError("job_name must be a string!")
if value == "":
raise ValueError("job_name cannot be None!")
if not util.check_kube_resource_name_regex(value):
raise StandardError("job_name must match {}.".format(
util.kube_resource_name_regex))
self._job_name = value
@property
def module_name(self):
return self._module_name
@module_name.setter
def module_name(self, value):
"""Function for setting module_name.
Args:
value: String type value that is going to be set to module_name. Which
cannot be empty.
Raises:
ValueError: If value is not str instance or empty.
"""
if not isinstance(value, str):
raise ValueError("module_name must be a string!")
if value == "":
raise ValueError("module_name cannot be None!")
self._module_name = value
@property
def trainer_uri(self):
return self._trainer_uri
@trainer_uri.setter
def trainer_uri(self, value):
"""Function for setting trainer_uri.
Args:
value: String type value that is going to be set to trainer_uri. Which
cannot be empty.
Raises:
ValueError: If value is not str instance or does not start with `http://`
or `https://`.
"""
if not isinstance(value, str):
raise ValueError("trainer_uri must be a string!")
self._trainer_uri = value
@property
def job_args(self):
return self._job_args
@job_args.setter
def job_args(self, value):
"""Function for setting job_args.
Args:
value: The job arguments.
Raises:
ValueError: If value is not string instance.
"""
if value != None:
if not isinstance(value, str):
raise ValueError("job_args must be a string!")
self._job_args = value
@property
def cpu_limit(self):
return self._cpu_limit
@cpu_limit.setter
def cpu_limit(self, value):
"""Function for setting cpu_limit.
Args:
value: Cpu limit.
Raises:
ValueError: If value is not a positive number.
"""
if value != None:
if not isinstance(value, str):
raise ValueError("cpu_limit must be a string!")
if not value.replace(".", "", 1).isdigit():
raise ValueError("cpu_limit must be a number!")
digits = value.split(".")
if len(digits) == 2 and len(digits[1]) > constant.QUOTA_ACCURACY_PLACE:
raise StandardError(
"The value of cpu_limit accurate to two decimal places, for example: {}".format(
round(
float(value), constant.QUOTA_ACCURACY_PLACE)))
self._cpu_limit = value
@property
def memory_limit(self):
return self._memory_limit
@memory_limit.setter
def memory_limit(self, value):
"""Function for setting memory_limit.
Args:
value: Memory limit.
Raises:
ValueError: Doesn't end with K, M or G.
"""
if value != None:
if not isinstance(value, str):
raise ValueError("memory_limit must be a string")
unit = value[-1:]
float_value = value[:-1]
if unit not in constant.CLOUDML_MEMORY_UNITS:
raise ValueError("memory_limit unit must be one of %s!" %
constant.CLOUDML_MEMORY_UNITS)
if not float_value.replace(".", "", 1).isdigit():
raise ValueError("memory_limit must be a number!")
digits = float_value.split(".")
if len(digits) == 2 and len(digits[1]) > constant.QUOTA_ACCURACY_PLACE:
raise StandardError(
"The value of memory_limit accurate to two decimal places, for example: {}".format(
round(
float(float_value), constant.QUOTA_ACCURACY_PLACE)))
self._memory_limit = value
@property
def gpu_limit(self):
return self._gpu_limit
@gpu_limit.setter
def gpu_limit(self, value):
"""Function for setting gpu_limit.
Args:
value: GPU limit.
Raises:
ValueError: If value is not a positive number.
"""
if value != None:
if not (isinstance(value, int) and value > 0):
raise ValueError("gpu_limit must be a postive integer!")
self._gpu_limit = value
@property
def ps_count(self):
return self._ps_count
@ps_count.setter
def ps_count(self, value):
"""Function for setting ps_count.
Args:
value: TensorFlow PS count.
Raises:
ValueError: If value is not a positive number.
"""
if value != None:
if not (isinstance(value, int) and value > 0):
raise ValueError("ps_count must be a positive integer!")
self._ps_count = value
@property
def worker_count(self):
return self._worker_count
@worker_count.setter
def worker_count(self, value):
"""Function for setting worker_count.
Args:
value: TensorFlow worker count.
Raises:
ValueError: If value is not a positive number.
"""
if value != None:
if not (isinstance(value, int) and value > 0):
raise ValueError("worker_count must be a positive integer!")
self._worker_count = value
@property
def framework(self):
return self._framework
@framework.setter
def framework(self, value):
"""Function for setting framework.
Args:
value: The framework.
Raises:
ValueError: If value is not string instance.
"""
if value != None:
if not isinstance(value, str):
raise ValueError("Must be a string!")
self._framework = value
@property
def framework_version(self):
return self._framework_version
@framework_version.setter
def framework_version(self, value):
"""Function for setting version of framework.
Args:
value: The version of framework.
Raises:
ValueError: If value is not string instance.
"""
if value != None:
if not isinstance(value, str):
raise ValueError("Must be a string!")
self._framework_version = value
@property
def docker_image(self):
return self._docker_image
@docker_image.setter
def docker_image(self, value):
"""Function for setting docker_image.
Args:
value: The docker_image.
Raises:
ValueError: If value is not string instance.
"""
if value != None:
if not isinstance(value, str):
raise ValueError("Must be a string!")
self._docker_image = value
@property
def docker_command(self):
return self._docker_command
@docker_command.setter
def docker_command(self, value):
"""Function for setting docker_command.
Args:
value: The docker_command.
Raises:
ValueError: If value is not string instance.
"""
if value != None:
if not isinstance(value, str):
raise ValueError("Must be a string!")
self._docker_command = value
@property
def volume_type(self):
return self._volume_type
@volume_type.setter
def volume_type(self, value):
"""Function for set.
Args:
value: String value.
Raises:
ValueError: If value is not str instance or empty.
"""
if value == "":
raise ValueError("Volume type can not be None!")
self._volume_type = value
@property
def volume_path(self):
return self._volume_path
@volume_path.setter
def volume_path(self, value):
"""Function for set.
Args:
value: String value.
Raises:
ValueError: If value is not str instance or empty.
"""
if value == "":
raise ValueError("Volume path can not be None!")
self._volume_path = value
@property
def mount_path(self):
return self._mount_path
@mount_path.setter
def mount_path(self, value):
"""Function for set.
Args:
value: String value.
Raises:
ValueError: If value is not str instance or empty.
"""
if value == "":
raise ValueError("Mount path can not be None!")
self._mount_path = value
@property
def mount_read_only(self):
return self._mount_read_only
@mount_read_only.setter
def mount_read_only(self, value):
"""Function for set.
Args:
value: Boolean value.
Raises:
ValueError: If value is not boolean instance or empty.
"""
if value != None and type(value) != bool:
raise ValueError("Mount read only should be boolean!")
self._mount_read_only = value
@property
def prepare_command(self):
return self._prepare_command
@prepare_command.setter
def prepare_command(self, value):
"""Function for set prepare_command.
Args:
value: String value.
Raises:
ValueError: If value is not string instance or empty.
"""
if value == "":
raise ValueError("Prepare command can not be None!")
self._prepare_command = value
@property
def finish_command(self):
return self._finish_command
@finish_command.setter
def finish_command(self, value):
"""Function for set finish_command.
Args:
value: String value.
Raises:
ValueError: If value is not string instance or empty.
"""
if value == "":
raise ValueError("Finish command can not be None!")
self._finish_command = value
@property
def node_selector_key(self):
return self._node_selector_key
@node_selector_key.setter
def node_selector_key(self, value):
"""Function for set node_selector_key.
Args:
value: String value.
Raises:
ValueError: If value is not string instance or empty.
"""
if value == "":
raise ValueError("Node selector key can not be None!")
self._node_selector_key = value
@property
def node_selector_value(self):
return self._node_selector_value
@node_selector_value.setter
def node_selector_value(self, value):
"""Function for set node_selector_value.
Args:
value: String value.
Raises:
ValueError: If value is not string instance or empty.
"""
if value == "":
raise ValueError("Node selector value can not be None!")
self._node_selector_value = value
def get_json_data(self):
"""Get the needed train job data after setting necessary varibles.
Returns:
data: The json data which is necessary for the train job.
Raises:
ValueError: If endpoint is not a string starting with `http://`.
If _job_name, _module_name or _trainer_uri is empty.
"""
data = {
"job_name": self._job_name,
"module_name": self._module_name,
"trainer_uri": self._trainer_uri,
}
if self._job_args is not None:
data["job_args"] = self._job_args
if self._cpu_limit is not None:
data["cpu_limit"] = self._cpu_limit
if self._memory_limit is not None:
data["memory_limit"] = self._memory_limit
if self._gpu_limit is not None:
data["gpu_limit"] = self._gpu_limit
if self._ps_count is not None:
data["ps_count"] = self._ps_count
if self._worker_count is not None:
data["worker_count"] = self._worker_count
if self._docker_image is not None:
data["docker_image"] = self._docker_image
if self._docker_command is not None:
data["docker_command"] = self._docker_command
if self._framework is not None:
data["framework"] = self._framework
if self._framework_version is not None:
data["framework_version"] = self._framework_version
if self._volume_type is not None:
data["volume_type"] = self._volume_type
if self._volume_path is not None:
data["volume_path"] = self._volume_path
if self._mount_path is not None:
data["mount_path"] = self._mount_path
if self._mount_read_only is not None:
data["mount_read_only"] = self._mount_read_only
if self._prepare_command:
data["prepare_command"] = self._prepare_command
if self._finish_command:
data["finish_command"] = self._finish_command
if self._node_selector_key:
data["node_selector_key"] = self._node_selector_key
if self._node_selector_value:
data["node_selector_value"] = self._node_selector_value
return json.dumps(data)
| apache-2.0 | -5,845,657,298,114,418,000 | 26.174775 | 95 | 0.628763 | false | 4.006908 | false | false | false |
lrt512/emol | emol/emol/initialize/errors.py | 1 | 1517 | # -*- coding: utf-8 -*-
"""Set up custom error handlers."""
# standard library imports
# third-party imports
from flask import render_template, request, Response, current_app
# application imports
def init_error_handlers():
"""Custom error pages for the app."""
current_app.logger.info('Initialize error handling')
# pylint really hates these 'unused' decorated functions.
# In reality, they are callbacks for Flask
# pylint: disable=unused-variable
@current_app.errorhandler(404)
def not_found(error):
"""Custom 404 handler to return error page."""
current_app.logger.debug(error)
if len(request.form) > 0:
# Requests with form data are likely AJAX
return Response(None, 404)
return render_template('errors/404.html', http_error=True), 404
@current_app.errorhandler(403)
def forbidden(error):
"""Custom 404 handler to return error page."""
current_app.logger.debug(error)
return render_template('errors/403.html', http_error=True), 403
@current_app.errorhandler(401)
def unauthorized(error):
"""Custom 401 handler to return error page."""
current_app.logger.debug(error)
return render_template('errors/401.html', http_error=True), 401
@current_app.errorhandler(500)
def uhoh(error):
"""Custom 500 handler to return error page."""
current_app.logger.error(error)
return render_template('errors/500.html', http_error=True), 500
| mit | 3,753,175,237,952,431,000 | 32.711111 | 71 | 0.66381 | false | 4.023873 | false | false | false |
numbas/editor | first_setup.py | 1 | 12406 | import random
import re
import os
import traceback
import urllib.parse
import importlib
def print_notice(s):
print('\033[92m'+s+'\033[0m\n')
def path_exists(path):
if not os.path.exists(path):
answer = input("That path doesn't exist. Create it? [y/n]").strip().lower()
if answer=='y':
os.makedirs(path)
return True
else:
return False
else:
return True
class Question(object):
def __init__(self, key, question, default, validation=None):
self.key = key
self.question = question
self.default = default
self.validation = validation
def get_default(self, values):
if callable(self.default):
return self.default(values)
else:
return self.default
def validate(self, value):
return self.validation is None or self.validation(value)
class Command(object):
questions = [
Question('DEBUG', 'Is this installation for development?', False),
Question('NUMBAS_PATH', 'Path of the Numbas compiler:','/srv/numbas/compiler/', validation=path_exists),
Question('DB_ENGINE', 'Which database engine are you using? (Common options: postgres, mysql, sqlite3)', lambda v: 'sqlite3' if v['DEBUG'] else 'mysql'),
Question('STATIC_ROOT', 'Where are static files stored?','/srv/numbas/static/', validation=path_exists),
Question('MEDIA_ROOT', 'Where are uploaded files stored?','/srv/numbas/media/', validation=path_exists),
Question('PREVIEW_PATH', 'Where are preview exams stored?','/srv/numbas/previews/', validation=path_exists),
Question('PREVIEW_URL', 'Base URL of previews:','/numbas-previews/'),
Question('PYTHON_EXEC', 'Python command:','python3'),
Question('SITE_TITLE', 'Title of the site:','Numbas'),
Question('ALLOW_REGISTRATION', 'Allow new users to register themselves?', True),
Question('DEFAULT_FROM_EMAIL', 'Address to send emails from:', ''),
]
db_questions = [
Question('DB_NAME', 'Name of the database:','numbas_editor'),
Question('DB_USER', 'Database user:', 'numbas_editor'),
Question('DB_PASSWORD', 'Database password:', ''),
Question('DB_HOST', 'Database host:', 'localhost'),
]
sqlite_template = """DATABASES = {{
'default': {{
'ENGINE': 'django.db.backends.{DB_ENGINE}',
'NAME': os.path.join(BASE_DIR, '{DB_NAME}'),
}}
}}"""
other_db_template = """DATABASES = {{
'default': {{
'ENGINE': 'django.db.backends.{DB_ENGINE}',
'NAME': '{DB_NAME}',
'USER': '{DB_USER}',
'PASSWORD': '{DB_PASSWORD}',
'HOST': '{DB_HOST}',
}}
}}"""
def __init__(self):
self.written_files = []
def handle(self):
print_notice("This script will configure the Numbas editor up to a point where you can open it in a web browser, based on your answers to the following questions.")
self.get_values()
self.write_files()
import numbas.settings
importlib.reload(numbas.settings)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "numbas.settings")
print_notice("Now we'll check that everything works properly")
self.run_management_command('check')
if self.get_input('Would you like to automatically set up the database now?',True):
self.run_management_command('migrate')
import django
django.setup()
self.setup_site()
from django.contrib.auth.models import User
superusers = User.objects.filter(is_superuser=True)
if superusers.exists():
if self.get_input("There's already at least one admin user.\nWould you like to create another admin user now?",False):
self.run_management_command('createsuperuser')
else:
if self.get_input('Would you like to create an admin user now?',True):
self.run_management_command('createsuperuser')
print_notice("Done!")
if self.values['DEBUG']:
print_notice("Run\n python manage.py runserver\nto start a development server at http://localhost:8000.")
else:
self.run_management_command('collectstatic')
print_notice("The Numbas editor is now set up. Once you've configured your web server, it'll be ready to use at http://{}".format(self.domain))
def setup_site(self):
from django.contrib.sites.models import Site
try:
domain = Site.objects.first().domain
except Site.DoesNotExist:
domain = 'numbas.example.com'
domain = self.get_input('What domain will the site be accessed from?', domain)
try:
url = urllib.parse.urlparse(domain)
self.domain = url.netloc if url.netloc else domain
except ValueError:
self.domain = domain
s, created = Site.objects.get_or_create(domain=self.domain)
s.name = self.values['SITE_TITLE']
self.rvalues['SITE_ID'] = str(s.id)
s.save()
self.sub_settings(confirm_overwrite=False)
import numbas.settings
importlib.reload(numbas.settings)
def get_values(self):
self.values = {}
self.values['SECRET_KEY'] =''.join(random.SystemRandom().choice('abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)') for i in range(50))
self.values['PWD'] = os.getcwd()
for question in self.questions:
self.get_value(question)
if question.key=='DB_ENGINE':
if 'sqlite' not in self.values[question.key]:
for question in self.db_questions:
self.get_value(question)
else:
self.get_value(Question('DB_NAME', 'Name of the database file:','db.sqlite3'))
def enrep(value):
rep = repr(value)
if isinstance(value,str):
rep = rep[1:-1]
return rep
self.values['SITE_ID'] = self.get_default_value(Question('SITE_ID','','1'))
self.rvalues = {key: enrep(value) for key, value in self.values.items()}
def get_default_value(self, question):
default = question.get_default(self.values)
if os.path.exists('numbas/settings.py'):
import numbas.settings
try:
if question.key=='DB_ENGINE':
default = numbas.settings.DATABASES['default']['ENGINE'].replace('django.db.backends.', '')
elif question.key[:3]=='DB_' and question.key[3:] in numbas.settings.DATABASES['default']:
default = numbas.settings.DATABASES['default'][question.key[3:]]
else:
try:
default = getattr(numbas.settings, question.key)
except AttributeError:
default = numbas.settings.GLOBAL_SETTINGS[question.key]
if isinstance(default,list):
default = default[0] if len(default)==1 else ''
except (AttributeError,KeyError):
pass
return default
def get_value(self, question):
self.values[question.key] = self.get_input(question.question, self.get_default_value(question), question.validation)
def write_files(self):
self.sub_settings()
if not self.values['DEBUG']:
self.sub_file('web/django.wsgi',[ (r"sys.path.append\('(.*?)'\)", 'PWD') ])
index_subs = [
(r"Welcome to (the Numbas editor)", 'SITE_TITLE'),
]
self.sub_file('editor/templates/index_message.html', index_subs)
self.sub_file('editor/templates/terms_of_use_content.html', [])
self.sub_file('editor/templates/privacy_policy_content.html', [])
if len(self.written_files):
print_notice("The following files have been written. You should look at them now to see if you need to make any more changes.")
for f in self.written_files:
print_notice(' * '+f)
print('')
def sub_settings(self, confirm_overwrite=True):
def set_database(m, rvalues):
template = self.sqlite_template if 'sqlite' in rvalues['DB_ENGINE'] else self.other_db_template
return template.format(**rvalues)
settings_subs = [
(r"^DEBUG = (True)", 'DEBUG'),
(r"'NUMBAS_PATH': '(.*?)',", 'NUMBAS_PATH'),
(r"^STATIC_ROOT = '(static/)'", 'STATIC_ROOT'),
(r"^MEDIA_ROOT = '(media/)'", 'MEDIA_ROOT'),
(r"'PREVIEW_PATH': '(.*?)'", 'PREVIEW_PATH'),
(r"'PREVIEW_URL': '(.*?)',", 'PREVIEW_URL'),
(r"'PYTHON_EXEC': '(.*?)',", 'PYTHON_EXEC'),
(r"^SITE_TITLE = '(.*?)'", 'SITE_TITLE'),
(r"^DATABASES = {.*?^}", set_database),
(r"^SECRET_KEY = '(.*?)'", 'SECRET_KEY'),
(r"^ALLOW_REGISTRATION = (True|False)", 'ALLOW_REGISTRATION'),
(r"^DEFAULT_FROM_EMAIL = '(.*?)'", 'DEFAULT_FROM_EMAIL'),
(r"^SITE_ID = (\d+)", 'SITE_ID'),
]
self.sub_file('numbas/settings.py', settings_subs, confirm_overwrite)
def sub_file(self, fname, subs, confirm_overwrite=True):
if os.path.exists(fname) and confirm_overwrite:
overwrite = self.get_input("{} already exists. Overwrite it?".format(fname),True)
if not overwrite:
return
self.written_files.append(fname)
with open(fname+'.dist') as f:
text = f.read()
for pattern, key in subs:
pattern = re.compile(pattern, re.MULTILINE | re.DOTALL)
if callable(key):
text = self.sub_fn(text, pattern, key)
else:
text = self.sub(text,pattern,self.rvalues[key])
with open(fname,'w') as f:
f.write(text)
print("Wrote",fname)
def sub_fn(self, source, pattern, fn):
m = pattern.search(source)
if not m:
raise Exception("Didn't find {}".format(pattern.pattern))
start, end = m.span(0)
out = fn(m, self.rvalues)
return source[:start]+out+source[end:]
def sub(self, source, pattern, value):
def fix(m):
t = m.group(0)
start, end = m.span(1)
ts,te = m.span(0)
start -= ts
end -= ts
return t[:start]+value+t[end:]
if not pattern.search(source):
raise Exception("Didn't find {}".format(pattern.pattern))
return pattern.sub(fix, source)
def run_management_command(self, *args):
from django.core.management import ManagementUtility
args = ['manage.py'] + list(args)
utility = ManagementUtility(args)
try:
utility.execute()
except SystemExit:
pass
print('')
def get_input(self, question, default, validation=None):
v = None
try:
while v is None:
if isinstance(default,bool):
if default is not None:
q = question+(' [Y/n]' if default else ' [y/N]')
else:
q = question
t = input(q+' ').strip().lower()
if t=='' and default is not None:
v = default
if t=='y':
v = True
if t=='n':
v = False
else:
if default is not None:
q = "{} ['{}']".format(question,str(default))
else:
q = question
t = input(q+' ').strip()
if t=='' and default is not None:
v = default
if t:
v = t
if validation is not None and not validation(v):
v = None
except KeyboardInterrupt:
print('')
raise SystemExit
print('')
return v
if __name__ == '__main__':
command = Command()
try:
command.handle()
except Exception as e:
traceback.print_exc()
print_notice("The setup script failed. Look at the error message above for a description of why.")
| apache-2.0 | -3,493,529,359,381,949,000 | 36.593939 | 172 | 0.550298 | false | 4.145005 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.