repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ejfresch/qdc | taxi_driver.py | 1 | 4234 | #!/usr/bin/python
# Copyright 2009-2012 - Luca Freschi <[email protected]>
# This file is part of QDC.
# QDC is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import os.path
import commands
def checks():
msg=":: Preliminary checks..."
if os.path.isdir('results'):
status, out = commands.getstatusoutput('rm -rf results')
if status !=0:
msg=msg+"[ERROR]\n"
return status, msg, out
if os.path.isfile('engine'):
status, out = commands.getstatusoutput('rm engine')
if status !=0:
msg=msg+"[ERROR]\n"
return status, msg, out
status, out = commands.getstatusoutput('mkdir results')
if status !=0:
msg=msg+"[ERROR]\n"
return status, msg, out
status=0
out="[ok]\n"
return status, msg, out
def compile_model(file):
msg=":: Compilation of the model..."
parse_file='./parser '+str(file)
status, out = commands.getstatusoutput(parse_file)
if status !=0:
msg=msg+"[ERROR]\n"
return status, msg, out
status, out = commands.getstatusoutput('make engine')
if status !=0:
msg=msg+"[ERROR]\n"
return status, msg, out
status=0
out="[ok]\n"
return status, msg, out
def simulation():
msg=":: Simulation..."
status, out = commands.getstatusoutput('./engine '+' 0.1')
if status !=0:
msg=msg+"[ERROR]\n"
return status, msg, out
status=0
out="[ok]\n"
return status, msg, out
def write_results(i,model):
msg=":: Output files..."
base_name=os.path.basename(model)
current=base_name+'_reagents'+str(i)+'.csv'
cmd='mv '+model+'_reagents.csv '+'results/'+current
status, out = commands.getstatusoutput(cmd)
if status !=0:
msg=msg+"[ERROR]\n"
return status, msg, out
current=base_name+'_reactions'+str(i)+'.csv'
cmd='mv '+model+'_reactions.csv '+'results/'+current
status, out = commands.getstatusoutput(cmd)
if status !=0:
msg=msg+"[ERROR]\n"
return status, msg, out
current=base_name+'_reactioncounts'+str(i)+'.csv'
cmd='mv '+model+'_reactioncounts.csv '+'results/'+current
status, out = commands.getstatusoutput(cmd)
if status !=0:
msg=msg+"[ERROR]\n"
return status, msg, out
current=base_name+'_log'+str(i)+'.txt'
cmd='mv '+model+'_log.txt '+'results/'+current
status, out = commands.getstatusoutput(cmd)
if status !=0:
msg=msg+"[ERROR]\n"
return status, msg, out
status=0
out="[ok]\n"
return status, msg, out
if __name__ == '__main__':
if len(sys.argv) != 3:
print "usage: %s <file_input> <number_of_simulations>" % sys.argv[0]
sys.exit()
file=sys.argv[1]
print file+"\n"
#I cut the extension
model=os.path.splitext(file)[0]
print model+"\n"
n_of_simulations=int(sys.argv[2])
s, m, o =checks()
print m+o
if s !=0:
sys.exit()
s, m, o=compile_model(file)
print m+o
if s !=0:
sys.exit()
for i in range(n_of_simulations):
print "Run "+str(i+1)
s, m, o=simulation()
print m+o
if s !=0:
break
s, m, o=write_results(i,model)
print m+o
if s!=0:
break
| gpl-3.0 | -4,987,247,999,163,791,000 | 28.816901 | 129 | 0.548181 | false | 3.740283 | false | false | false |
shuoli84/gevent_socketio2 | socketio/binary.py | 1 | 2907 | # coding=utf-8
"""
Binary class deconstruct, reconstruct packet
"""
import copy
class Binary(object):
@staticmethod
def deconstruct_packet(packet):
"""
Replaces every bytearray in packet with a numbered placeholder.
:param packet:
:return: dict with packet and list of buffers
"""
buffers = []
packet_data = packet.get('data', None)
def _deconstruct_packet(data):
if type(data) is bytearray:
place_holder = {
'_placeholder': True,
'num': len(buffers)
}
buffers.append(data)
return place_holder
if type(data) is list:
new_data = []
for d in data:
new_data.append(_deconstruct_packet(d))
return new_data
if type(data) is dict:
new_data = {}
for k, v in data.items():
new_data[k] = _deconstruct_packet(v)
return new_data
return data
pack = copy.copy(packet)
pack['data'] = _deconstruct_packet(packet_data)
pack['attachments'] = len(buffers)
return {
'packet': pack,
'buffers': buffers
}
@staticmethod
def reconstruct_packet(packet, buffers):
def _reconstruct_packet(data):
if type(data) is dict:
if '_placeholder' in data:
buf = buffers[data['num']]
return buf
else:
for k, v in data.items():
data[k] = _reconstruct_packet(v)
return data
if type(data) is list:
for i in xrange(len(data)):
data[i] = _reconstruct_packet(data[i])
return data
return data
packet['data'] = _reconstruct_packet(packet['data'])
del packet['attachments']
return packet
@staticmethod
def remove_blobs(data):
def _remove_blobs(obj, cur_key=None, containing_obj=None):
if not obj:
return obj
try:
# Try to read it as a file
buf = bytearray(obj.read())
if containing_obj is not None and cur_key is not None:
containing_obj[cur_key] = buf
else:
return buf
except AttributeError:
pass
if type(obj) is list:
for index, item in enumerate(obj):
_remove_blobs(item, index, obj)
if type(obj) is dict:
for k, v in obj.items():
_remove_blobs(v, k, obj)
return obj
blobless_data = _remove_blobs(data)
return blobless_data
| mit | -4,149,683,405,712,881,000 | 25.669725 | 71 | 0.471276 | false | 4.734528 | false | false | false |
SimpleITK/SimpleITK | Examples/ImageRegistrationMethodExhaustive/ImageRegistrationMethodExhaustive.py | 4 | 4049 | #!/usr/bin/env python
# =========================================================================
#
# Copyright NumFOCUS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# =========================================================================
"""
This script demonstrates the use of the Exhaustive optimizer in the
ImageRegistrationMethod to estimate a good initial rotation position.
Because gradient descent base optimization can get stuck in local
minima, a good initial transform is critical for reasonable
results. Search a reasonable space on a grid with brute force may be a
reliable way to get a starting location for further optimization.
The initial translation and center of rotation for the transform is
initialized based on the first principle moments of the intensities of
the image. Then in either 2D or 3D a Euler transform is used to
exhaustively search a grid of the rotation space at a certain step
size. The resulting transform is a reasonable guess where to start
further registration.
"""
import SimpleITK as sitk
import sys
import os
from math import pi
def command_iteration(method):
if (method.GetOptimizerIteration() == 0):
print("Scales: ", method.GetOptimizerScales())
print(f"{method.GetOptimizerIteration():3} = {method.GetMetricValue():7.5f} : {method.GetOptimizerPosition()}")
if len(sys.argv) < 4:
print("Usage:", sys.argv[0], "<fixedImageFilter> <movingImageFile>",
"<outputTransformFile>")
sys.exit(1)
fixed = sitk.ReadImage(sys.argv[1], sitk.sitkFloat32)
moving = sitk.ReadImage(sys.argv[2], sitk.sitkFloat32)
R = sitk.ImageRegistrationMethod()
R.SetMetricAsMattesMutualInformation(numberOfHistogramBins=50)
sample_per_axis = 12
if fixed.GetDimension() == 2:
tx = sitk.Euler2DTransform()
# Set the number of samples (radius) in each dimension, with a
# default step size of 1.0
R.SetOptimizerAsExhaustive([sample_per_axis // 2, 0, 0])
# Utilize the scale to set the step size for each dimension
R.SetOptimizerScales([2.0 * pi / sample_per_axis, 1.0, 1.0])
elif fixed.GetDimension() == 3:
tx = sitk.Euler3DTransform()
R.SetOptimizerAsExhaustive([sample_per_axis // 2, sample_per_axis // 2,
sample_per_axis // 4, 0, 0, 0])
R.SetOptimizerScales(
[2.0 * pi / sample_per_axis, 2.0 * pi / sample_per_axis,
2.0 * pi / sample_per_axis, 1.0, 1.0, 1.0])
# Initialize the transform with a translation and the center of
# rotation from the moments of intensity.
tx = sitk.CenteredTransformInitializer(fixed, moving, tx)
R.SetInitialTransform(tx)
R.SetInterpolator(sitk.sitkLinear)
R.AddCommand(sitk.sitkIterationEvent, lambda: command_iteration(R))
outTx = R.Execute(fixed, moving)
print("-------")
print(outTx)
print(f"Optimizer stop condition: {R.GetOptimizerStopConditionDescription()}")
print(f" Iteration: {R.GetOptimizerIteration()}")
print(f" Metric value: {R.GetMetricValue()}")
sitk.WriteTransform(outTx, sys.argv[3])
if ("SITK_NOSHOW" not in os.environ):
resampler = sitk.ResampleImageFilter()
resampler.SetReferenceImage(fixed)
resampler.SetInterpolator(sitk.sitkLinear)
resampler.SetDefaultPixelValue(1)
resampler.SetTransform(outTx)
out = resampler.Execute(moving)
simg1 = sitk.Cast(sitk.RescaleIntensity(fixed), sitk.sitkUInt8)
simg2 = sitk.Cast(sitk.RescaleIntensity(out), sitk.sitkUInt8)
cimg = sitk.Compose(simg1, simg2, simg1 // 2. + simg2 // 2.)
sitk.Show(cimg, "ImageRegistrationExhaustive Composition")
| apache-2.0 | 3,926,961,951,353,954,300 | 35.809091 | 115 | 0.70289 | false | 3.454778 | false | false | false |
erwilan/ansible | lib/ansible/plugins/terminal/dellos6.py | 5 | 2788 | # 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import json
from ansible.module_utils._text import to_text, to_bytes
from ansible.plugins.terminal import TerminalBase
from ansible.errors import AnsibleConnectionFailure
class TerminalModule(TerminalBase):
terminal_stdout_re = [
re.compile(br"[\r\n]?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:>|#) ?$"),
re.compile(br"\[\w+\@[\w\-\.]+(?: [^\]])\] ?[>#\$] ?$")
]
terminal_stderr_re = [
re.compile(br"% ?Error: (?:(?!\bdoes not exist\b)(?!\balready exists\b)(?!\bHost not found\b)(?!\bnot active\b).)*$"),
re.compile(br"% ?Bad secret"),
re.compile(br"invalid input", re.I),
re.compile(br"(?:incomplete|ambiguous) command", re.I),
re.compile(br"connection timed out", re.I),
re.compile(br"'[^']' +returned error code: ?\d+"),
]
def on_authorize(self, passwd=None):
if self._get_prompt().endswith('#'):
return
cmd = {u'command': u'enable'}
if passwd:
cmd['prompt'] = to_text(r"[\r\n]?password: $", errors='surrogate_or_strict')
cmd['answer'] = passwd
try:
self._exec_cli_command(to_bytes(json.dumps(cmd), errors='surrogate_or_strict'))
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to elevate privilege to enable mode')
# in dellos6 the terminal settings are accepted after the privilege mode
try:
self._exec_cli_command(b'terminal length 0')
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to set terminal parameters')
def on_deauthorize(self):
prompt = self._get_prompt()
if prompt is None:
# if prompt is None most likely the terminal is hung up at a prompt
return
if prompt.strip().endswith(b')#'):
self._exec_cli_command(b'end')
self._exec_cli_command(b'disable')
elif prompt.endswith(b'#'):
self._exec_cli_command(b'disable')
| gpl-3.0 | 3,488,803,437,115,528,000 | 36.675676 | 126 | 0.628407 | false | 3.757412 | false | false | false |
zorna/zorna | zorna/site/urls.py | 1 | 1743 | from django.conf.urls.defaults import url, patterns
from zorna.site import views
urlpatterns = patterns('',
url(r'^options/$',
views.admin_list_options,
name='admin_list_options'),
url(r'^registration/$',
views.admin_site_registration,
name='admin_site_registration'),
url(r'^version/$',
views.admin_site_version,
name='admin_site_version'),
url(r'^alerts/$',
views.admin_list_alerts,
name='admin_list_alerts'),
url(r'^alerts/add/$',
views.admin_add_alert,
name='admin_add_alert'),
url(r'^edit/(?P<alert>\d+)/$',
views.admin_edit_alert,
name='admin_edit_alert'),
url(r'^calendar/categories/$',
views.admin_list_calendar_categories,
name='admin_list_calendar_categories'),
url(r'^calendar/categories/add/$',
views.admin_add_calendar_category,
name='admin_add_calendar_category'),
url(r'^calendar/categories/edit/(?P<category>\d+)/$',
views.admin_edit_calendar_category,
name='admin_edit_calendar_category'),
)
| bsd-3-clause | 5,005,190,076,417,932,000 | 50.264706 | 89 | 0.383247 | false | 5.829431 | false | true | false |
petersanchez/django-allauth | allauth/utils.py | 1 | 7725 | import re
import unicodedata
import json
from django.core.exceptions import ImproperlyConfigured
from django.core.validators import validate_email, ValidationError
from django.core import urlresolvers
from django.contrib.sites.models import Site
from django.db.models import FieldDoesNotExist
from django.db.models.fields import (DateTimeField, DateField,
EmailField, TimeField)
from django.utils import six, dateparse
from django.utils.datastructures import SortedDict
from django.core.serializers.json import DjangoJSONEncoder
try:
from django.utils.encoding import force_text
except ImportError:
from django.utils.encoding import force_unicode as force_text
try:
import importlib
except:
from django.utils import importlib
def _generate_unique_username_base(txts, regex=None):
username = None
regex = regex or '[^\w\s@+.-]'
for txt in txts:
if not txt:
continue
username = unicodedata.normalize('NFKD', force_text(txt))
username = username.encode('ascii', 'ignore').decode('ascii')
username = force_text(re.sub(regex, '', username).lower())
# Django allows for '@' in usernames in order to accomodate for
# project wanting to use e-mail for username. In allauth we don't
# use this, we already have a proper place for putting e-mail
# addresses (EmailAddress), so let's not use the full e-mail
# address and only take the part leading up to the '@'.
username = username.split('@')[0]
username = username.strip()
username = re.sub('\s+', '_', username)
if username:
break
return username or 'user'
def get_username_max_length():
from .account.app_settings import USER_MODEL_USERNAME_FIELD
if USER_MODEL_USERNAME_FIELD is not None:
User = get_user_model()
max_length = User._meta.get_field(USER_MODEL_USERNAME_FIELD).max_length
else:
max_length = 0
return max_length
def generate_unique_username(txts, regex=None):
from .account.app_settings import USER_MODEL_USERNAME_FIELD
username = _generate_unique_username_base(txts, regex)
User = get_user_model()
max_length = get_username_max_length()
i = 0
while True:
try:
if i:
pfx = str(i + 1)
else:
pfx = ''
ret = username[0:max_length - len(pfx)] + pfx
query = {USER_MODEL_USERNAME_FIELD + '__iexact': ret}
User.objects.get(**query)
i += 1
except User.MultipleObjectsReturned:
i += 1
except User.DoesNotExist:
return ret
def valid_email_or_none(email):
ret = None
try:
if email:
validate_email(email)
if len(email) <= EmailField().max_length:
ret = email
except ValidationError:
pass
return ret
def email_address_exists(email, exclude_user=None):
from .account import app_settings as account_settings
from .account.models import EmailAddress
emailaddresses = EmailAddress.objects
if exclude_user:
emailaddresses = emailaddresses.exclude(user=exclude_user)
ret = emailaddresses.filter(email__iexact=email).exists()
if not ret:
email_field = account_settings.USER_MODEL_EMAIL_FIELD
if email_field:
users = get_user_model().objects
if exclude_user:
users = users.exclude(pk=exclude_user.pk)
ret = users.filter(**{email_field+'__iexact': email}).exists()
return ret
def import_attribute(path):
assert isinstance(path, six.string_types)
pkg, attr = path.rsplit('.', 1)
ret = getattr(importlib.import_module(pkg), attr)
return ret
def import_callable(path_or_callable):
if not hasattr(path_or_callable, '__call__'):
ret = import_attribute(path_or_callable)
else:
ret = path_or_callable
return ret
try:
from django.contrib.auth import get_user_model
except ImportError:
# To keep compatibility with Django 1.4
def get_user_model():
from . import app_settings
from django.db.models import get_model
try:
app_label, model_name = app_settings.USER_MODEL.split('.')
except ValueError:
raise ImproperlyConfigured("AUTH_USER_MODEL must be of the"
" form 'app_label.model_name'")
user_model = get_model(app_label, model_name)
if user_model is None:
raise ImproperlyConfigured("AUTH_USER_MODEL refers to model"
" '%s' that has not been installed"
% app_settings.USER_MODEL)
return user_model
def get_current_site(request=None):
"""Wrapper around ``Site.objects.get_current`` to handle ``Site`` lookups
by request in Django >= 1.8.
:param request: optional request object
:type request: :class:`django.http.HttpRequest`
"""
# >= django 1.8
if request and hasattr(Site.objects, '_get_site_by_request'):
site = Site.objects.get_current(request=request)
else:
site = Site.objects.get_current()
return site
def resolve_url(to):
"""
Subset of django.shortcuts.resolve_url (that one is 1.5+)
"""
try:
return urlresolvers.reverse(to)
except urlresolvers.NoReverseMatch:
# If this doesn't "feel" like a URL, re-raise.
if '/' not in to and '.' not in to:
raise
# Finally, fall back and assume it's a URL
return to
def serialize_instance(instance):
"""
Since Django 1.6 items added to the session are no longer pickled,
but JSON encoded by default. We are storing partially complete models
in the session (user, account, token, ...). We cannot use standard
Django serialization, as these are models are not "complete" yet.
Serialization will start complaining about missing relations et al.
"""
ret = dict([(k, v)
for k, v in instance.__dict__.items()
if not (k.startswith('_') or callable(v))])
return json.loads(json.dumps(ret, cls=DjangoJSONEncoder))
def deserialize_instance(model, data):
ret = model()
for k, v in data.items():
if v is not None:
try:
f = model._meta.get_field(k)
if isinstance(f, DateTimeField):
v = dateparse.parse_datetime(v)
elif isinstance(f, TimeField):
v = dateparse.parse_time(v)
elif isinstance(f, DateField):
v = dateparse.parse_date(v)
except FieldDoesNotExist:
pass
setattr(ret, k, v)
return ret
def set_form_field_order(form, fields_order):
if isinstance(form.fields, SortedDict):
form.fields.keyOrder = fields_order
else:
# Python 2.7+
from collections import OrderedDict
assert isinstance(form.fields, OrderedDict)
form.fields = OrderedDict((f, form.fields[f])
for f in fields_order)
def build_absolute_uri(request, location, protocol=None):
uri = request.build_absolute_uri(location)
if protocol:
uri = protocol + ':' + uri.partition(':')[2]
return uri
def get_form_class(forms, form_id, default_form):
form_class = forms.get(form_id, default_form)
if isinstance(form_class, six.string_types):
form_class = import_attribute(form_class)
return form_class
def get_request_param(request, param, default=None):
return request.POST.get(param) or request.GET.get(param, default)
| mit | -8,072,015,998,628,125,000 | 32.154506 | 79 | 0.621489 | false | 4.113419 | false | false | false |
branchard/django-react-scrapy-sample | djangoapp/components/migrations/0001_initial.py | 1 | 7888 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-21 23:32
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Brand',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Component',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, unique=True)),
('photoUrl', models.URLField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='HardDriveType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=10)),
],
),
migrations.CreateModel(
name='MotherBoardFormFactor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=10)),
],
),
migrations.CreateModel(
name='PciType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='PowerSupplyFormFactor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=10)),
],
),
migrations.CreateModel(
name='RamFrequency',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('frequency', models.IntegerField()),
],
),
migrations.CreateModel(
name='RamType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('typeName', models.CharField(max_length=10)),
],
),
migrations.CreateModel(
name='Socket',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=10)),
],
),
migrations.CreateModel(
name='Case',
fields=[
('component_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='components.Component')),
('weight', models.FloatField()),
('width', models.IntegerField()),
('height', models.IntegerField()),
('depth', models.IntegerField()),
('motherBoardFormFactors', models.ManyToManyField(to='components.MotherBoardFormFactor')),
('powerSupplyFormFactor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='components.PowerSupplyFormFactor')),
],
bases=('components.component',),
),
migrations.CreateModel(
name='GraphicCard',
fields=[
('component_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='components.Component')),
('memory', models.IntegerField()),
('pcitype', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='components.PciType')),
],
bases=('components.component',),
),
migrations.CreateModel(
name='HardDrive',
fields=[
('component_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='components.Component')),
('capacity', models.IntegerField()),
('hardDriveType', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='components.HardDriveType')),
],
bases=('components.component',),
),
migrations.CreateModel(
name='Motherboard',
fields=[
('component_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='components.Component')),
('ramSlots', models.IntegerField()),
('maxRam', models.IntegerField()),
('formfactor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='components.MotherBoardFormFactor')),
('pcitypes', models.ManyToManyField(to='components.PciType')),
('ramfrequency', models.ManyToManyField(to='components.RamFrequency')),
('ramtype', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='components.RamType')),
('socket', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='components.Socket')),
],
bases=('components.component',),
),
migrations.CreateModel(
name='PowerSupply',
fields=[
('component_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='components.Component')),
('watts', models.IntegerField()),
('modular', models.BooleanField()),
('factorForm', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='components.PowerSupplyFormFactor')),
],
bases=('components.component',),
),
migrations.CreateModel(
name='Processor',
fields=[
('component_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='components.Component')),
('frequency', models.FloatField()),
('cores', models.IntegerField()),
('socket', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='components.Socket')),
],
bases=('components.component',),
),
migrations.CreateModel(
name='Ram',
fields=[
('component_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='components.Component')),
('capacity', models.IntegerField()),
('quantity', models.IntegerField()),
('frequency', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='components.RamFrequency')),
('ramtype', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='components.RamType')),
],
bases=('components.component',),
),
migrations.AddField(
model_name='component',
name='brand',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='components.Brand'),
),
]
| gpl-3.0 | 1,862,894,931,031,018,800 | 47.691358 | 200 | 0.574797 | false | 4.54902 | false | false | false |
doctaphred/projecteuler | projecteuler/problems/problem12.py | 1 | 1814 | # -*- coding: utf-8 -*-
"""
Project Euler: Problem 12
=========================
https://projecteuler.net/problem=12
Highly divisible triangular number
----------------------------------
The sequence of triangle numbers is generated by adding the natural
numbers. So the 7th triangle number would be 1 + 2 + 3 + 4 + 5 + 6 + 7 =
28. The first ten terms would be:
1, 3, 6, 10, 15, 21, 28, 36, 45, 55, ...
Let us list the factors of the first seven triangle numbers:
1: 1
3: 1,3
6: 1,2,3,6
10: 1,2,5,10
15: 1,3,5,15
21: 1,3,7,21
28: 1,2,4,7,14,28
We can see that 28 is the first triangle number to have over five
divisors.
What is the value of the first triangle number to have over five hundred
divisors?
"""
from collections import Counter
from itertools import count, islice
from projecteuler.problems.problem5 import factorize
number = 12
target = 500
answer = 76576500
def triangle_numbers():
"""Generate the triangle numbers (sums of the natural numbers).
>>> list(islice(triangle_numbers(), 10)
[1, 3, 6, 10, 15, 21, 28, 36, 45, 55]
"""
current = 0
for i in count(1):
current += i
yield current
assert (list(islice(triangle_numbers(), 10)) ==
[1, 3, 6, 10, 15, 21, 28, 36, 45, 55])
def num_divisors(n):
"""Find the number of unique divisors of n."""
c = Counter(factorize(n))
total = 1
for n in c.values():
total *= n + 1
return total
divisor_tests = {
1: [1],
3: [1, 3],
6: [1, 2, 3, 6],
10: [1, 2, 5, 10],
15: [1, 3, 5, 15],
21: [1, 3, 7, 21],
28: [1, 2, 4, 7, 14, 28],
}
for k, expected in divisor_tests.items():
assert num_divisors(k) == len(expected)
def solution():
return next(n for n in triangle_numbers() if num_divisors(n) > target)
| gpl-3.0 | 85,330,717,630,826,640 | 21.121951 | 74 | 0.5871 | false | 3.023333 | false | false | false |
TeamspykBot/Plugins | Plugins/CommandHelp.py | 1 | 2230 | from operator import itemgetter
from Bot.Plugins.Base import PluginBase
class HelpCommandPlugin(PluginBase):
"""
This plugin sends a list of all available commands to the user when he types a certain command
help - Sends a list of all available commands
optional example json ( can be inserted in config.json under plugins ):
"mydayyy_help_command": {
"commands": {
"help": {
"command": "help",
"accesslevel": 0
}
}
}
"""
def __init__(self, bot_instance):
super().__init__(bot_instance)
# init command variables
self.command_help_cmd = bot_instance.get_user_setting("mydayyy_help_command.commands.help.command") or "help"
self.command_help_al = bot_instance.get_user_setting("mydayyy_help_command.commands.help.accesslevel") or 0
self.bot_instance.add_chat_command(self.command_help_cmd,
"Subscribes the client to receive links",
self.command_help_al,
self.command_help,
[])
def command_help(self, invokerid, invokername, invokeruid, msg_splitted):
client_access_level = self.bot_instance.get_client_accesslevel(invokerid)
chat_commands = self.bot_instance.get_all_commands()
sorted_commands = []
idx = 0
for key, command in chat_commands.items():
idx += 1
color = "[COLOR=green]" if client_access_level >= command.accesslevel else "[COLOR=red]"
args = " ".join(command.args)
if args == "" or args is None:
answer = color + "" + key + " - " + command.description + " [" + str(command.accesslevel) + "][/COLOR]"
else:
answer = color + "" + key + " " + args + " - " + command.description + " [" + str(command.accesslevel) + "][/COLOR]"
sorted_commands.append([idx, answer])
sorted_commands = sorted(sorted_commands, key=itemgetter(0))
for answer in sorted_commands:
self.bot_instance.send_text_to_client(invokerid, answer[1])
| gpl-3.0 | -1,927,694,817,722,317,600 | 41.075472 | 132 | 0.560987 | false | 4.183865 | false | false | false |
jonparrott/gcloud-python | storage/google/cloud/storage/bucket.py | 2 | 66912 | # Copyright 2014 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create / interact with Google Cloud Storage buckets."""
import base64
import copy
import datetime
import json
import warnings
import six
from google.api_core import page_iterator
from google.api_core import datetime_helpers
from google.cloud._helpers import _datetime_to_rfc3339
from google.cloud._helpers import _NOW
from google.cloud._helpers import _rfc3339_to_datetime
from google.cloud.exceptions import NotFound
from google.cloud.iam import Policy
from google.cloud.storage import _signing
from google.cloud.storage._helpers import _PropertyMixin
from google.cloud.storage._helpers import _scalar_property
from google.cloud.storage._helpers import _validate_name
from google.cloud.storage.acl import BucketACL
from google.cloud.storage.acl import DefaultObjectACL
from google.cloud.storage.blob import Blob
from google.cloud.storage.blob import _get_encryption_headers
from google.cloud.storage.notification import BucketNotification
from google.cloud.storage.notification import NONE_PAYLOAD_FORMAT
_LOCATION_SETTER_MESSAGE = (
"Assignment to 'Bucket.location' is deprecated, as it is only "
"valid before the bucket is created. Instead, pass the location "
"to `Bucket.create`.")
def _blobs_page_start(iterator, page, response):
"""Grab prefixes after a :class:`~google.cloud.iterator.Page` started.
:type iterator: :class:`~google.api_core.page_iterator.Iterator`
:param iterator: The iterator that is currently in use.
:type page: :class:`~google.cloud.api.core.page_iterator.Page`
:param page: The page that was just created.
:type response: dict
:param response: The JSON API response for a page of blobs.
"""
page.prefixes = tuple(response.get('prefixes', ()))
iterator.prefixes.update(page.prefixes)
def _item_to_blob(iterator, item):
"""Convert a JSON blob to the native object.
.. note::
This assumes that the ``bucket`` attribute has been
added to the iterator after being created.
:type iterator: :class:`~google.api_core.page_iterator.Iterator`
:param iterator: The iterator that has retrieved the item.
:type item: dict
:param item: An item to be converted to a blob.
:rtype: :class:`.Blob`
:returns: The next blob in the page.
"""
name = item.get('name')
blob = Blob(name, bucket=iterator.bucket)
blob._set_properties(item)
return blob
def _item_to_notification(iterator, item):
"""Convert a JSON blob to the native object.
.. note::
This assumes that the ``bucket`` attribute has been
added to the iterator after being created.
:type iterator: :class:`~google.api_core.page_iterator.Iterator`
:param iterator: The iterator that has retrieved the item.
:type item: dict
:param item: An item to be converted to a blob.
:rtype: :class:`.BucketNotification`
:returns: The next notification being iterated.
"""
return BucketNotification.from_api_repr(item, bucket=iterator.bucket)
class LifecycleRuleConditions(dict):
"""Map a single lifecycle rule for a bucket.
See: https://cloud.google.com/storage/docs/lifecycle
:type age: int
:param age: (optional) apply rule action to items whos age, in days,
exceeds this value.
:type created_before: datetime.date
:param created_before: (optional) apply rule action to items created
before this date.
:type is_live: bool
:param is_live: (optional) if true, apply rule action to non-versioned
items, or to items with no newer versions. If false, apply
rule action to versioned items with at least one newer
version.
:type matches_storage_class: list(str), one or more of
:attr:`Bucket._STORAGE_CLASSES`.
:param matches_storage_class: (optional) apply rule action to items which
whose storage class matches this value.
:type number_of_newer_versions: int
:param number_of_newer_versions: (optional) apply rule action to versioned
items having N newer versions.
:raises ValueError: if no arguments are passed.
"""
def __init__(self, age=None, created_before=None, is_live=None,
matches_storage_class=None, number_of_newer_versions=None,
_factory=False):
conditions = {}
if age is not None:
conditions['age'] = age
if created_before is not None:
conditions['createdBefore'] = created_before.isoformat()
if is_live is not None:
conditions['isLive'] = is_live
if matches_storage_class is not None:
conditions['matchesStorageClass'] = matches_storage_class
if number_of_newer_versions is not None:
conditions['numNewerVersions'] = number_of_newer_versions
if not _factory and not conditions:
raise ValueError("Supply at least one condition")
super(LifecycleRuleConditions, self).__init__(conditions)
@classmethod
def from_api_repr(cls, resource):
"""Factory: construct instance from resource.
:type resource: dict
:param resource: mapping as returned from API call.
:rtype: :class:`LifecycleRuleConditions`
:returns: Instance created from resource.
"""
instance = cls(_factory=True)
instance.update(resource)
return instance
@property
def age(self):
"""Conditon's age value."""
return self.get('age')
@property
def created_before(self):
"""Conditon's created_before value."""
before = self.get('createdBefore')
if before is not None:
return datetime_helpers.from_iso8601_date(before)
@property
def is_live(self):
"""Conditon's 'is_live' value."""
return self.get('isLive')
@property
def matches_storage_class(self):
"""Conditon's 'matches_storage_class' value."""
return self.get('matchesStorageClass')
@property
def number_of_newer_versions(self):
"""Conditon's 'number_of_newer_versions' value."""
return self.get('numNewerVersions')
class LifecycleRuleDelete(dict):
"""Map a lifecycle rule deleting matching items.
:type kw: dict
:params kw: arguments passed to :class:`LifecycleRuleConditions`.
"""
def __init__(self, **kw):
conditions = LifecycleRuleConditions(**kw)
rule = {
'action': {
'type': 'Delete',
},
'condition': dict(conditions),
}
super(LifecycleRuleDelete, self).__init__(rule)
@classmethod
def from_api_repr(cls, resource):
"""Factory: construct instance from resource.
:type resource: dict
:param resource: mapping as returned from API call.
:rtype: :class:`LifecycleRuleDelete`
:returns: Instance created from resource.
"""
instance = cls(_factory=True)
instance.update(resource)
return instance
class LifecycleRuleSetStorageClass(dict):
"""Map a lifecycle rule upating storage class of matching items.
:type storage_class: str, one of :attr:`Bucket._STORAGE_CLASSES`.
:param storage_class: new storage class to assign to matching items.
:type kw: dict
:params kw: arguments passed to :class:`LifecycleRuleConditions`.
"""
def __init__(self, storage_class, **kw):
conditions = LifecycleRuleConditions(**kw)
rule = {
'action': {
'type': 'SetStorageClass',
'storageClass': storage_class,
},
'condition': dict(conditions),
}
super(LifecycleRuleSetStorageClass, self).__init__(rule)
@classmethod
def from_api_repr(cls, resource):
"""Factory: construct instance from resource.
:type resource: dict
:param resource: mapping as returned from API call.
:rtype: :class:`LifecycleRuleDelete`
:returns: Instance created from resource.
"""
action = resource['action']
instance = cls(action['storageClass'], _factory=True)
instance.update(resource)
return instance
class Bucket(_PropertyMixin):
"""A class representing a Bucket on Cloud Storage.
:type client: :class:`google.cloud.storage.client.Client`
:param client: A client which holds credentials and project configuration
for the bucket (which requires a project).
:type name: str
:param name: The name of the bucket. Bucket names must start and end with a
number or letter.
:type user_project: str
:param user_project: (Optional) the project ID to be billed for API
requests made via this instance.
"""
_MAX_OBJECTS_FOR_ITERATION = 256
"""Maximum number of existing objects allowed in iteration.
This is used in Bucket.delete() and Bucket.make_public().
"""
_STORAGE_CLASSES = (
'MULTI_REGIONAL',
'REGIONAL',
'NEARLINE',
'COLDLINE',
'STANDARD', # alias for MULTI_REGIONAL/REGIONAL, based on location
'DURABLE_REDUCED_AVAILABILITY', # deprecated
)
"""Allowed values for :attr:`storage_class`.
See
https://cloud.google.com/storage/docs/json_api/v1/buckets#storageClass
https://cloud.google.com/storage/docs/storage-classes
"""
def __init__(self, client, name=None, user_project=None):
name = _validate_name(name)
super(Bucket, self).__init__(name=name)
self._client = client
self._acl = BucketACL(self)
self._default_object_acl = DefaultObjectACL(self)
self._label_removals = set()
self._user_project = user_project
def __repr__(self):
return '<Bucket: %s>' % (self.name,)
@property
def client(self):
"""The client bound to this bucket."""
return self._client
def _set_properties(self, value):
"""Set the properties for the current object.
:type value: dict or :class:`google.cloud.storage.batch._FutureDict`
:param value: The properties to be set.
"""
self._label_removals.clear()
return super(Bucket, self)._set_properties(value)
@property
def user_project(self):
"""Project ID to be billed for API requests made via this bucket.
If unset, API requests are billed to the bucket owner.
:rtype: str
"""
return self._user_project
def blob(self, blob_name, chunk_size=None,
encryption_key=None, kms_key_name=None):
"""Factory constructor for blob object.
.. note::
This will not make an HTTP request; it simply instantiates
a blob object owned by this bucket.
:type blob_name: str
:param blob_name: The name of the blob to be instantiated.
:type chunk_size: int
:param chunk_size: The size of a chunk of data whenever iterating
(in bytes). This must be a multiple of 256 KB per
the API specification.
:type encryption_key: bytes
:param encryption_key:
Optional 32 byte encryption key for customer-supplied encryption.
:type kms_key_name: str
:param kms_key_name:
Optional resource name of KMS key used to encrypt blob's content.
:rtype: :class:`google.cloud.storage.blob.Blob`
:returns: The blob object created.
"""
return Blob(name=blob_name, bucket=self, chunk_size=chunk_size,
encryption_key=encryption_key, kms_key_name=kms_key_name)
def notification(self, topic_name,
topic_project=None,
custom_attributes=None,
event_types=None,
blob_name_prefix=None,
payload_format=NONE_PAYLOAD_FORMAT):
"""Factory: create a notification resource for the bucket.
See: :class:`.BucketNotification` for parameters.
:rtype: :class:`.BucketNotification`
"""
return BucketNotification(
self, topic_name,
topic_project=topic_project,
custom_attributes=custom_attributes,
event_types=event_types,
blob_name_prefix=blob_name_prefix,
payload_format=payload_format,
)
def exists(self, client=None):
"""Determines whether or not this bucket exists.
If :attr:`user_project` is set, bills the API request to that project.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the current bucket.
:rtype: bool
:returns: True if the bucket exists in Cloud Storage.
"""
client = self._require_client(client)
# We only need the status code (200 or not) so we seek to
# minimize the returned payload.
query_params = {'fields': 'name'}
if self.user_project is not None:
query_params['userProject'] = self.user_project
try:
# We intentionally pass `_target_object=None` since fields=name
# would limit the local properties.
client._connection.api_request(
method='GET', path=self.path,
query_params=query_params, _target_object=None)
# NOTE: This will not fail immediately in a batch. However, when
# Batch.finish() is called, the resulting `NotFound` will be
# raised.
return True
except NotFound:
return False
def create(self, client=None, project=None, location=None):
"""Creates current bucket.
If the bucket already exists, will raise
:class:`google.cloud.exceptions.Conflict`.
This implements "storage.buckets.insert".
If :attr:`user_project` is set, bills the API request to that project.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the current bucket.
:type project: str
:param project: Optional. The project under which the bucket is to
be created. If not passed, uses the project set on
the client.
:raises ValueError: if :attr:`user_project` is set.
:raises ValueError: if ``project`` is None and client's
:attr:`project` is also None.
:type location: str
:param location: Optional. The location of the bucket. If not passed,
the default location, US, will be used. See
https://cloud.google.com/storage/docs/bucket-locations
"""
if self.user_project is not None:
raise ValueError("Cannot create bucket with 'user_project' set.")
client = self._require_client(client)
if project is None:
project = client.project
if project is None:
raise ValueError(
"Client project not set: pass an explicit project.")
query_params = {'project': project}
properties = {key: self._properties[key] for key in self._changes}
properties['name'] = self.name
if location is not None:
properties['location'] = location
api_response = client._connection.api_request(
method='POST', path='/b', query_params=query_params,
data=properties, _target_object=self)
self._set_properties(api_response)
def patch(self, client=None):
"""Sends all changed properties in a PATCH request.
Updates the ``_properties`` with the response from the backend.
If :attr:`user_project` is set, bills the API request to that project.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current object.
"""
# Special case: For buckets, it is possible that labels are being
# removed; this requires special handling.
if self._label_removals:
self._changes.add('labels')
self._properties.setdefault('labels', {})
for removed_label in self._label_removals:
self._properties['labels'][removed_label] = None
# Call the superclass method.
return super(Bucket, self).patch(client=client)
@property
def acl(self):
"""Create our ACL on demand."""
return self._acl
@property
def default_object_acl(self):
"""Create our defaultObjectACL on demand."""
return self._default_object_acl
@staticmethod
def path_helper(bucket_name):
"""Relative URL path for a bucket.
:type bucket_name: str
:param bucket_name: The bucket name in the path.
:rtype: str
:returns: The relative URL path for ``bucket_name``.
"""
return '/b/' + bucket_name
@property
def path(self):
"""The URL path to this bucket."""
if not self.name:
raise ValueError('Cannot determine path without bucket name.')
return self.path_helper(self.name)
def get_blob(self, blob_name, client=None, encryption_key=None, **kwargs):
"""Get a blob object by name.
This will return None if the blob doesn't exist:
.. literalinclude:: snippets.py
:start-after: [START get_blob]
:end-before: [END get_blob]
If :attr:`user_project` is set, bills the API request to that project.
:type blob_name: str
:param blob_name: The name of the blob to retrieve.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the current bucket.
:type encryption_key: bytes
:param encryption_key:
Optional 32 byte encryption key for customer-supplied encryption.
See
https://cloud.google.com/storage/docs/encryption#customer-supplied.
:type kwargs: dict
:param kwargs: Keyword arguments to pass to the
:class:`~google.cloud.storage.blob.Blob` constructor.
:rtype: :class:`google.cloud.storage.blob.Blob` or None
:returns: The blob object if it exists, otherwise None.
"""
client = self._require_client(client)
query_params = {}
if self.user_project is not None:
query_params['userProject'] = self.user_project
blob = Blob(bucket=self, name=blob_name, encryption_key=encryption_key,
**kwargs)
try:
headers = _get_encryption_headers(encryption_key)
response = client._connection.api_request(
method='GET',
path=blob.path,
query_params=query_params,
headers=headers,
_target_object=blob,
)
# NOTE: We assume response.get('name') matches `blob_name`.
blob._set_properties(response)
# NOTE: This will not fail immediately in a batch. However, when
# Batch.finish() is called, the resulting `NotFound` will be
# raised.
return blob
except NotFound:
return None
def list_blobs(self, max_results=None, page_token=None, prefix=None,
delimiter=None, versions=None,
projection='noAcl', fields=None, client=None):
"""Return an iterator used to find blobs in the bucket.
If :attr:`user_project` is set, bills the API request to that project.
:type max_results: int
:param max_results: (Optional) Maximum number of blobs to return.
:type page_token: str
:param page_token: (Optional) Opaque marker for the next "page" of
blobs. If not passed, will return the first page
of blobs.
:type prefix: str
:param prefix: (Optional) prefix used to filter blobs.
:type delimiter: str
:param delimiter: (Optional) Delimiter, used with ``prefix`` to
emulate hierarchy.
:type versions: bool
:param versions: (Optional) Whether object versions should be returned
as separate blobs.
:type projection: str
:param projection: (Optional) If used, must be 'full' or 'noAcl'.
Defaults to ``'noAcl'``. Specifies the set of
properties to return.
:type fields: str
:param fields: (Optional) Selector specifying which fields to include
in a partial response. Must be a list of fields. For
example to get a partial response with just the next
page token and the language of each blob returned:
``'items/contentLanguage,nextPageToken'``.
:type client: :class:`~google.cloud.storage.client.Client`
:param client: (Optional) The client to use. If not passed, falls back
to the ``client`` stored on the current bucket.
:rtype: :class:`~google.api_core.page_iterator.Iterator`
:returns: Iterator of all :class:`~google.cloud.storage.blob.Blob`
in this bucket matching the arguments.
"""
extra_params = {'projection': projection}
if prefix is not None:
extra_params['prefix'] = prefix
if delimiter is not None:
extra_params['delimiter'] = delimiter
if versions is not None:
extra_params['versions'] = versions
if fields is not None:
extra_params['fields'] = fields
if self.user_project is not None:
extra_params['userProject'] = self.user_project
client = self._require_client(client)
path = self.path + '/o'
iterator = page_iterator.HTTPIterator(
client=client,
api_request=client._connection.api_request,
path=path,
item_to_value=_item_to_blob,
page_token=page_token,
max_results=max_results,
extra_params=extra_params,
page_start=_blobs_page_start)
iterator.bucket = self
iterator.prefixes = set()
return iterator
def list_notifications(self, client=None):
"""List Pub / Sub notifications for this bucket.
See:
https://cloud.google.com/storage/docs/json_api/v1/notifications/list
If :attr:`user_project` is set, bills the API request to that project.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the current bucket.
:rtype: list of :class:`.BucketNotification`
:returns: notification instances
"""
client = self._require_client(client)
path = self.path + '/notificationConfigs'
iterator = page_iterator.HTTPIterator(
client=client,
api_request=client._connection.api_request,
path=path,
item_to_value=_item_to_notification)
iterator.bucket = self
return iterator
def delete(self, force=False, client=None):
"""Delete this bucket.
The bucket **must** be empty in order to submit a delete request. If
``force=True`` is passed, this will first attempt to delete all the
objects / blobs in the bucket (i.e. try to empty the bucket).
If the bucket doesn't exist, this will raise
:class:`google.cloud.exceptions.NotFound`. If the bucket is not empty
(and ``force=False``), will raise
:class:`google.cloud.exceptions.Conflict`.
If ``force=True`` and the bucket contains more than 256 objects / blobs
this will cowardly refuse to delete the objects (or the bucket). This
is to prevent accidental bucket deletion and to prevent extremely long
runtime of this method.
If :attr:`user_project` is set, bills the API request to that project.
:type force: bool
:param force: If True, empties the bucket's objects then deletes it.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the current bucket.
:raises: :class:`ValueError` if ``force`` is ``True`` and the bucket
contains more than 256 objects / blobs.
"""
client = self._require_client(client)
query_params = {}
if self.user_project is not None:
query_params['userProject'] = self.user_project
if force:
blobs = list(self.list_blobs(
max_results=self._MAX_OBJECTS_FOR_ITERATION + 1,
client=client))
if len(blobs) > self._MAX_OBJECTS_FOR_ITERATION:
message = (
'Refusing to delete bucket with more than '
'%d objects. If you actually want to delete '
'this bucket, please delete the objects '
'yourself before calling Bucket.delete().'
) % (self._MAX_OBJECTS_FOR_ITERATION,)
raise ValueError(message)
# Ignore 404 errors on delete.
self.delete_blobs(blobs, on_error=lambda blob: None,
client=client)
# We intentionally pass `_target_object=None` since a DELETE
# request has no response value (whether in a standard request or
# in a batch request).
client._connection.api_request(
method='DELETE',
path=self.path,
query_params=query_params,
_target_object=None)
def delete_blob(self, blob_name, client=None):
"""Deletes a blob from the current bucket.
If the blob isn't found (backend 404), raises a
:class:`google.cloud.exceptions.NotFound`.
For example:
.. literalinclude:: snippets.py
:start-after: [START delete_blob]
:end-before: [END delete_blob]
If :attr:`user_project` is set, bills the API request to that project.
:type blob_name: str
:param blob_name: A blob name to delete.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the current bucket.
:raises: :class:`google.cloud.exceptions.NotFound` (to suppress
the exception, call ``delete_blobs``, passing a no-op
``on_error`` callback, e.g.:
.. literalinclude:: snippets.py
:start-after: [START delete_blobs]
:end-before: [END delete_blobs]
"""
client = self._require_client(client)
query_params = {}
if self.user_project is not None:
query_params['userProject'] = self.user_project
blob_path = Blob.path_helper(self.path, blob_name)
# We intentionally pass `_target_object=None` since a DELETE
# request has no response value (whether in a standard request or
# in a batch request).
client._connection.api_request(
method='DELETE',
path=blob_path,
query_params=query_params,
_target_object=None)
def delete_blobs(self, blobs, on_error=None, client=None):
"""Deletes a list of blobs from the current bucket.
Uses :meth:`delete_blob` to delete each individual blob.
If :attr:`user_project` is set, bills the API request to that project.
:type blobs: list
:param blobs: A list of :class:`~google.cloud.storage.blob.Blob`-s or
blob names to delete.
:type on_error: callable
:param on_error: (Optional) Takes single argument: ``blob``. Called
called once for each blob raising
:class:`~google.cloud.exceptions.NotFound`;
otherwise, the exception is propagated.
:type client: :class:`~google.cloud.storage.client.Client`
:param client: (Optional) The client to use. If not passed, falls back
to the ``client`` stored on the current bucket.
:raises: :class:`~google.cloud.exceptions.NotFound` (if
`on_error` is not passed).
"""
for blob in blobs:
try:
blob_name = blob
if not isinstance(blob_name, six.string_types):
blob_name = blob.name
self.delete_blob(blob_name, client=client)
except NotFound:
if on_error is not None:
on_error(blob)
else:
raise
def copy_blob(self, blob, destination_bucket, new_name=None,
client=None, preserve_acl=True, source_generation=None):
"""Copy the given blob to the given bucket, optionally with a new name.
If :attr:`user_project` is set, bills the API request to that project.
:type blob: :class:`google.cloud.storage.blob.Blob`
:param blob: The blob to be copied.
:type destination_bucket: :class:`google.cloud.storage.bucket.Bucket`
:param destination_bucket: The bucket into which the blob should be
copied.
:type new_name: str
:param new_name: (optional) the new name for the copied file.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the current bucket.
:type preserve_acl: bool
:param preserve_acl: Optional. Copies ACL from old blob to new blob.
Default: True.
:type source_generation: long
:param source_generation: Optional. The generation of the blob to be
copied.
:rtype: :class:`google.cloud.storage.blob.Blob`
:returns: The new Blob.
"""
client = self._require_client(client)
query_params = {}
if self.user_project is not None:
query_params['userProject'] = self.user_project
if source_generation is not None:
query_params['sourceGeneration'] = source_generation
if new_name is None:
new_name = blob.name
new_blob = Blob(bucket=destination_bucket, name=new_name)
api_path = blob.path + '/copyTo' + new_blob.path
copy_result = client._connection.api_request(
method='POST',
path=api_path,
query_params=query_params,
_target_object=new_blob,
)
if not preserve_acl:
new_blob.acl.save(acl={}, client=client)
new_blob._set_properties(copy_result)
return new_blob
def rename_blob(self, blob, new_name, client=None):
"""Rename the given blob using copy and delete operations.
If :attr:`user_project` is set, bills the API request to that project.
Effectively, copies blob to the same bucket with a new name, then
deletes the blob.
.. warning::
This method will first duplicate the data and then delete the
old blob. This means that with very large objects renaming
could be a very (temporarily) costly or a very slow operation.
:type blob: :class:`google.cloud.storage.blob.Blob`
:param blob: The blob to be renamed.
:type new_name: str
:param new_name: The new name for this blob.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the current bucket.
:rtype: :class:`Blob`
:returns: The newly-renamed blob.
"""
same_name = blob.name == new_name
new_blob = self.copy_blob(blob, self, new_name, client=client)
if not same_name:
blob.delete(client=client)
return new_blob
@property
def cors(self):
"""Retrieve or set CORS policies configured for this bucket.
See http://www.w3.org/TR/cors/ and
https://cloud.google.com/storage/docs/json_api/v1/buckets
.. note::
The getter for this property returns a list which contains
*copies* of the bucket's CORS policy mappings. Mutating the list
or one of its dicts has no effect unless you then re-assign the
dict via the setter. E.g.:
>>> policies = bucket.cors
>>> policies.append({'origin': '/foo', ...})
>>> policies[1]['maxAgeSeconds'] = 3600
>>> del policies[0]
>>> bucket.cors = policies
>>> bucket.update()
:setter: Set CORS policies for this bucket.
:getter: Gets the CORS policies for this bucket.
:rtype: list of dictionaries
:returns: A sequence of mappings describing each CORS policy.
"""
return [copy.deepcopy(policy)
for policy in self._properties.get('cors', ())]
@cors.setter
def cors(self, entries):
"""Set CORS policies configured for this bucket.
See http://www.w3.org/TR/cors/ and
https://cloud.google.com/storage/docs/json_api/v1/buckets
:type entries: list of dictionaries
:param entries: A sequence of mappings describing each CORS policy.
"""
self._patch_property('cors', entries)
default_event_based_hold = _scalar_property('defaultEventBasedHold')
"""Are uploaded objects automatically placed under an even-based hold?
If True, uploaded objects will be placed under an event-based hold to
be released at a future time. When released an object will then begin
the retention period determined by the policy retention period for the
object bucket.
See https://cloud.google.com/storage/docs/json_api/v1/buckets
If the property is not set locally, returns ``None``.
:rtype: bool or ``NoneType``
"""
@property
def default_kms_key_name(self):
"""Retrieve / set default KMS encryption key for objects in the bucket.
See https://cloud.google.com/storage/docs/json_api/v1/buckets
:setter: Set default KMS encryption key for items in this bucket.
:getter: Get default KMS encryption key for items in this bucket.
:rtype: str
:returns: Default KMS encryption key, or ``None`` if not set.
"""
encryption_config = self._properties.get('encryption', {})
return encryption_config.get('defaultKmsKeyName')
@default_kms_key_name.setter
def default_kms_key_name(self, value):
"""Set default KMS encryption key for objects in the bucket.
:type value: str or None
:param value: new KMS key name (None to clear any existing key).
"""
encryption_config = self._properties.get('encryption', {})
encryption_config['defaultKmsKeyName'] = value
self._patch_property('encryption', encryption_config)
@property
def labels(self):
"""Retrieve or set labels assigned to this bucket.
See
https://cloud.google.com/storage/docs/json_api/v1/buckets#labels
.. note::
The getter for this property returns a dict which is a *copy*
of the bucket's labels. Mutating that dict has no effect unless
you then re-assign the dict via the setter. E.g.:
>>> labels = bucket.labels
>>> labels['new_key'] = 'some-label'
>>> del labels['old_key']
>>> bucket.labels = labels
>>> bucket.update()
:setter: Set labels for this bucket.
:getter: Gets the labels for this bucket.
:rtype: :class:`dict`
:returns: Name-value pairs (string->string) labelling the bucket.
"""
labels = self._properties.get('labels')
if labels is None:
return {}
return copy.deepcopy(labels)
@labels.setter
def labels(self, mapping):
"""Set labels assigned to this bucket.
See
https://cloud.google.com/storage/docs/json_api/v1/buckets#labels
:type mapping: :class:`dict`
:param mapping: Name-value pairs (string->string) labelling the bucket.
"""
# If any labels have been expressly removed, we need to track this
# so that a future .patch() call can do the correct thing.
existing = set([k for k in self.labels.keys()])
incoming = set([k for k in mapping.keys()])
self._label_removals = self._label_removals.union(
existing.difference(incoming),
)
# Actually update the labels on the object.
self._patch_property('labels', copy.deepcopy(mapping))
@property
def etag(self):
"""Retrieve the ETag for the bucket.
See https://tools.ietf.org/html/rfc2616#section-3.11 and
https://cloud.google.com/storage/docs/json_api/v1/buckets
:rtype: str or ``NoneType``
:returns: The bucket etag or ``None`` if the bucket's
resource has not been loaded from the server.
"""
return self._properties.get('etag')
@property
def id(self):
"""Retrieve the ID for the bucket.
See https://cloud.google.com/storage/docs/json_api/v1/buckets
:rtype: str or ``NoneType``
:returns: The ID of the bucket or ``None`` if the bucket's
resource has not been loaded from the server.
"""
return self._properties.get('id')
@property
def lifecycle_rules(self):
"""Retrieve or set lifecycle rules configured for this bucket.
See https://cloud.google.com/storage/docs/lifecycle and
https://cloud.google.com/storage/docs/json_api/v1/buckets
.. note::
The getter for this property returns a list which contains
*copies* of the bucket's lifecycle rules mappings. Mutating the
list or one of its dicts has no effect unless you then re-assign
the dict via the setter. E.g.:
>>> rules = bucket.lifecycle_rules
>>> rules.append({'origin': '/foo', ...})
>>> rules[1]['rule']['action']['type'] = 'Delete'
>>> del rules[0]
>>> bucket.lifecycle_rules = rules
>>> bucket.update()
:setter: Set lifestyle rules for this bucket.
:getter: Gets the lifestyle rules for this bucket.
:rtype: generator(dict)
:returns: A sequence of mappings describing each lifecycle rule.
"""
info = self._properties.get('lifecycle', {})
for rule in info.get('rule', ()):
action_type = rule['action']['type']
if action_type == 'Delete':
yield LifecycleRuleDelete.from_api_repr(rule)
elif action_type == 'SetStorageClass':
yield LifecycleRuleSetStorageClass.from_api_repr(rule)
else:
raise ValueError("Unknown lifecycle rule: {}".format(rule))
@lifecycle_rules.setter
def lifecycle_rules(self, rules):
"""Set lifestyle rules configured for this bucket.
See https://cloud.google.com/storage/docs/lifecycle and
https://cloud.google.com/storage/docs/json_api/v1/buckets
:type entries: list of dictionaries
:param entries: A sequence of mappings describing each lifecycle rule.
"""
rules = [dict(rule) for rule in rules] # Convert helpers if needed
self._patch_property('lifecycle', {'rule': rules})
def clear_lifecyle_rules(self):
"""Set lifestyle rules configured for this bucket.
See https://cloud.google.com/storage/docs/lifecycle and
https://cloud.google.com/storage/docs/json_api/v1/buckets
"""
self.lifecycle_rules = []
def add_lifecycle_delete_rule(self, **kw):
"""Add a "delete" rule to lifestyle rules configured for this bucket.
See https://cloud.google.com/storage/docs/lifecycle and
https://cloud.google.com/storage/docs/json_api/v1/buckets
.. literalinclude:: snippets.py
:start-after: [START add_lifecycle_delete_rule]
:end-before: [END add_lifecycle_delete_rule]
:type kw: dict
:params kw: arguments passed to :class:`LifecycleRuleConditions`.
"""
rules = list(self.lifecycle_rules)
rules.append(LifecycleRuleDelete(**kw))
self.lifecycle_rules = rules
def add_lifecycle_set_storage_class_rule(self, storage_class, **kw):
"""Add a "delete" rule to lifestyle rules configured for this bucket.
See https://cloud.google.com/storage/docs/lifecycle and
https://cloud.google.com/storage/docs/json_api/v1/buckets
.. literalinclude:: snippets.py
:start-after: [START add_lifecycle_set_storage_class_rule]
:end-before: [END add_lifecycle_set_storage_class_rule]
:type storage_class: str, one of :attr:`_STORAGE_CLASSES`.
:param storage_class: new storage class to assign to matching items.
:type kw: dict
:params kw: arguments passed to :class:`LifecycleRuleConditions`.
"""
rules = list(self.lifecycle_rules)
rules.append(LifecycleRuleSetStorageClass(storage_class, **kw))
self.lifecycle_rules = rules
_location = _scalar_property('location')
@property
def location(self):
"""Retrieve location configured for this bucket.
See https://cloud.google.com/storage/docs/json_api/v1/buckets and
https://cloud.google.com/storage/docs/bucket-locations
Returns ``None`` if the property has not been set before creation,
or if the bucket's resource has not been loaded from the server.
:rtype: str or ``NoneType``
"""
return self._location
@location.setter
def location(self, value):
"""(Deprecated) Set `Bucket.location`
This can only be set at bucket **creation** time.
See https://cloud.google.com/storage/docs/json_api/v1/buckets and
https://cloud.google.com/storage/docs/bucket-locations
.. warning::
Assignment to 'Bucket.location' is deprecated, as it is only
valid before the bucket is created. Instead, pass the location
to `Bucket.create`.
"""
warnings.warn(
_LOCATION_SETTER_MESSAGE, DeprecationWarning, stacklevel=2)
self._location = value
def get_logging(self):
"""Return info about access logging for this bucket.
See https://cloud.google.com/storage/docs/access-logs#status
:rtype: dict or None
:returns: a dict w/ keys, ``logBucket`` and ``logObjectPrefix``
(if logging is enabled), or None (if not).
"""
info = self._properties.get('logging')
return copy.deepcopy(info)
def enable_logging(self, bucket_name, object_prefix=''):
"""Enable access logging for this bucket.
See https://cloud.google.com/storage/docs/access-logs
:type bucket_name: str
:param bucket_name: name of bucket in which to store access logs
:type object_prefix: str
:param object_prefix: prefix for access log filenames
"""
info = {'logBucket': bucket_name, 'logObjectPrefix': object_prefix}
self._patch_property('logging', info)
def disable_logging(self):
"""Disable access logging for this bucket.
See https://cloud.google.com/storage/docs/access-logs#disabling
"""
self._patch_property('logging', None)
@property
def metageneration(self):
"""Retrieve the metageneration for the bucket.
See https://cloud.google.com/storage/docs/json_api/v1/buckets
:rtype: int or ``NoneType``
:returns: The metageneration of the bucket or ``None`` if the bucket's
resource has not been loaded from the server.
"""
metageneration = self._properties.get('metageneration')
if metageneration is not None:
return int(metageneration)
@property
def owner(self):
"""Retrieve info about the owner of the bucket.
See https://cloud.google.com/storage/docs/json_api/v1/buckets
:rtype: dict or ``NoneType``
:returns: Mapping of owner's role/ID. Returns ``None`` if the bucket's
resource has not been loaded from the server.
"""
return copy.deepcopy(self._properties.get('owner'))
@property
def project_number(self):
"""Retrieve the number of the project to which the bucket is assigned.
See https://cloud.google.com/storage/docs/json_api/v1/buckets
:rtype: int or ``NoneType``
:returns: The project number that owns the bucket or ``None`` if
the bucket's resource has not been loaded from the server.
"""
project_number = self._properties.get('projectNumber')
if project_number is not None:
return int(project_number)
@property
def retention_policy_effective_time(self):
"""Retrieve the effective time of the bucket's retention policy.
:rtype: datetime.datetime or ``NoneType``
:returns: point-in time at which the bucket's retention policy is
effective, or ``None`` if the property is not
set locally.
"""
policy = self._properties.get('retentionPolicy')
if policy is not None:
timestamp = policy.get('effectiveTime')
if timestamp is not None:
return _rfc3339_to_datetime(timestamp)
@property
def retention_policy_locked(self):
"""Retrieve whthere the bucket's retention policy is locked.
:rtype: bool
:returns: True if the bucket's policy is locked, or else False
if the policy is not locked, or the property is not
set locally.
"""
policy = self._properties.get('retentionPolicy')
if policy is not None:
return policy.get('isLocked')
@property
def retention_period(self):
"""Retrieve or set the retention period for items in the bucket.
:rtype: int or ``NoneType``
:returns: number of seconds to retain items after upload or release
from event-based lock, or ``None`` if the property is not
set locally.
"""
policy = self._properties.get('retentionPolicy')
if policy is not None:
period = policy.get('retentionPeriod')
if period is not None:
return int(period)
@retention_period.setter
def retention_period(self, value):
"""Set the retention period for items in the bucket.
:type value: int
:param value:
number of seconds to retain items after upload or release from
event-based lock.
:raises ValueError: if the bucket's retention policy is locked.
"""
policy = self._properties.setdefault('retentionPolicy', {})
if value is not None:
policy['retentionPeriod'] = str(value)
else:
policy = None
self._patch_property('retentionPolicy', policy)
@property
def self_link(self):
"""Retrieve the URI for the bucket.
See https://cloud.google.com/storage/docs/json_api/v1/buckets
:rtype: str or ``NoneType``
:returns: The self link for the bucket or ``None`` if
the bucket's resource has not been loaded from the server.
"""
return self._properties.get('selfLink')
@property
def storage_class(self):
"""Retrieve or set the storage class for the bucket.
See https://cloud.google.com/storage/docs/storage-classes
:setter: Set the storage class for this bucket.
:getter: Gets the the storage class for this bucket.
:rtype: str or ``NoneType``
:returns: If set, one of "MULTI_REGIONAL", "REGIONAL",
"NEARLINE", "COLDLINE", "STANDARD", or
"DURABLE_REDUCED_AVAILABILITY", else ``None``.
"""
return self._properties.get('storageClass')
@storage_class.setter
def storage_class(self, value):
"""Set the storage class for the bucket.
See https://cloud.google.com/storage/docs/storage-classes
:type value: str
:param value: one of "MULTI_REGIONAL", "REGIONAL", "NEARLINE",
"COLDLINE", "STANDARD", or "DURABLE_REDUCED_AVAILABILITY"
"""
if value not in self._STORAGE_CLASSES:
raise ValueError('Invalid storage class: %s' % (value,))
self._patch_property('storageClass', value)
@property
def time_created(self):
"""Retrieve the timestamp at which the bucket was created.
See https://cloud.google.com/storage/docs/json_api/v1/buckets
:rtype: :class:`datetime.datetime` or ``NoneType``
:returns: Datetime object parsed from RFC3339 valid timestamp, or
``None`` if the bucket's resource has not been loaded
from the server.
"""
value = self._properties.get('timeCreated')
if value is not None:
return _rfc3339_to_datetime(value)
@property
def versioning_enabled(self):
"""Is versioning enabled for this bucket?
See https://cloud.google.com/storage/docs/object-versioning for
details.
:setter: Update whether versioning is enabled for this bucket.
:getter: Query whether versioning is enabled for this bucket.
:rtype: bool
:returns: True if enabled, else False.
"""
versioning = self._properties.get('versioning', {})
return versioning.get('enabled', False)
@versioning_enabled.setter
def versioning_enabled(self, value):
"""Enable versioning for this bucket.
See https://cloud.google.com/storage/docs/object-versioning for
details.
:type value: convertible to boolean
:param value: should versioning be enabled for the bucket?
"""
self._patch_property('versioning', {'enabled': bool(value)})
@property
def requester_pays(self):
"""Does the requester pay for API requests for this bucket?
See https://cloud.google.com/storage/docs/requester-pays for
details.
:setter: Update whether requester pays for this bucket.
:getter: Query whether requester pays for this bucket.
:rtype: bool
:returns: True if requester pays for API requests for the bucket,
else False.
"""
versioning = self._properties.get('billing', {})
return versioning.get('requesterPays', False)
@requester_pays.setter
def requester_pays(self, value):
"""Update whether requester pays for API requests for this bucket.
See https://cloud.google.com/storage/docs/<DOCS-MISSING> for
details.
:type value: convertible to boolean
:param value: should requester pay for API requests for the bucket?
"""
self._patch_property('billing', {'requesterPays': bool(value)})
def configure_website(self, main_page_suffix=None, not_found_page=None):
"""Configure website-related properties.
See https://cloud.google.com/storage/docs/hosting-static-website
.. note::
This (apparently) only works
if your bucket name is a domain name
(and to do that, you need to get approved somehow...).
If you want this bucket to host a website, just provide the name
of an index page and a page to use when a blob isn't found:
.. literalinclude:: snippets.py
:start-after: [START configure_website]
:end-before: [END configure_website]
You probably should also make the whole bucket public:
.. literalinclude:: snippets.py
:start-after: [START make_public]
:end-before: [END make_public]
This says: "Make the bucket public, and all the stuff already in
the bucket, and anything else I add to the bucket. Just make it
all public."
:type main_page_suffix: str
:param main_page_suffix: The page to use as the main page
of a directory.
Typically something like index.html.
:type not_found_page: str
:param not_found_page: The file to use when a page isn't found.
"""
data = {
'mainPageSuffix': main_page_suffix,
'notFoundPage': not_found_page,
}
self._patch_property('website', data)
def disable_website(self):
"""Disable the website configuration for this bucket.
This is really just a shortcut for setting the website-related
attributes to ``None``.
"""
return self.configure_website(None, None)
def get_iam_policy(self, client=None):
"""Retrieve the IAM policy for the bucket.
See
https://cloud.google.com/storage/docs/json_api/v1/buckets/getIamPolicy
If :attr:`user_project` is set, bills the API request to that project.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the current bucket.
:rtype: :class:`google.cloud.iam.Policy`
:returns: the policy instance, based on the resource returned from
the ``getIamPolicy`` API request.
"""
client = self._require_client(client)
query_params = {}
if self.user_project is not None:
query_params['userProject'] = self.user_project
info = client._connection.api_request(
method='GET',
path='%s/iam' % (self.path,),
query_params=query_params,
_target_object=None)
return Policy.from_api_repr(info)
def set_iam_policy(self, policy, client=None):
"""Update the IAM policy for the bucket.
See
https://cloud.google.com/storage/docs/json_api/v1/buckets/setIamPolicy
If :attr:`user_project` is set, bills the API request to that project.
:type policy: :class:`google.cloud.iam.Policy`
:param policy: policy instance used to update bucket's IAM policy.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the current bucket.
:rtype: :class:`google.cloud.iam.Policy`
:returns: the policy instance, based on the resource returned from
the ``setIamPolicy`` API request.
"""
client = self._require_client(client)
query_params = {}
if self.user_project is not None:
query_params['userProject'] = self.user_project
resource = policy.to_api_repr()
resource['resourceId'] = self.path
info = client._connection.api_request(
method='PUT',
path='%s/iam' % (self.path,),
query_params=query_params,
data=resource,
_target_object=None)
return Policy.from_api_repr(info)
def test_iam_permissions(self, permissions, client=None):
"""API call: test permissions
See
https://cloud.google.com/storage/docs/json_api/v1/buckets/testIamPermissions
If :attr:`user_project` is set, bills the API request to that project.
:type permissions: list of string
:param permissions: the permissions to check
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the current bucket.
:rtype: list of string
:returns: the permissions returned by the ``testIamPermissions`` API
request.
"""
client = self._require_client(client)
query_params = {'permissions': permissions}
if self.user_project is not None:
query_params['userProject'] = self.user_project
path = '%s/iam/testPermissions' % (self.path,)
resp = client._connection.api_request(
method='GET',
path=path,
query_params=query_params)
return resp.get('permissions', [])
def make_public(self, recursive=False, future=False, client=None):
"""Update bucket's ACL, granting read access to anonymous users.
:type recursive: bool
:param recursive: If True, this will make all blobs inside the bucket
public as well.
:type future: bool
:param future: If True, this will make all objects created in the
future public as well.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the current bucket.
:raises ValueError:
If ``recursive`` is True, and the bucket contains more than 256
blobs. This is to prevent extremely long runtime of this
method. For such buckets, iterate over the blobs returned by
:meth:`list_blobs` and call
:meth:`~google.cloud.storage.blob.Blob.make_public`
for each blob.
"""
self.acl.all().grant_read()
self.acl.save(client=client)
if future:
doa = self.default_object_acl
if not doa.loaded:
doa.reload(client=client)
doa.all().grant_read()
doa.save(client=client)
if recursive:
blobs = list(self.list_blobs(
projection='full',
max_results=self._MAX_OBJECTS_FOR_ITERATION + 1,
client=client))
if len(blobs) > self._MAX_OBJECTS_FOR_ITERATION:
message = (
"Refusing to make public recursively with more than "
"%d objects. If you actually want to make every object "
"in this bucket public, iterate through the blobs "
"returned by 'Bucket.list_blobs()' and call "
"'make_public' on each one."
) % (self._MAX_OBJECTS_FOR_ITERATION,)
raise ValueError(message)
for blob in blobs:
blob.acl.all().grant_read()
blob.acl.save(client=client)
def make_private(self, recursive=False, future=False, client=None):
"""Update bucket's ACL, revoking read access for anonymous users.
:type recursive: bool
:param recursive: If True, this will make all blobs inside the bucket
private as well.
:type future: bool
:param future: If True, this will make all objects created in the
future private as well.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the current bucket.
:raises ValueError:
If ``recursive`` is True, and the bucket contains more than 256
blobs. This is to prevent extremely long runtime of this
method. For such buckets, iterate over the blobs returned by
:meth:`list_blobs` and call
:meth:`~google.cloud.storage.blob.Blob.make_private`
for each blob.
"""
self.acl.all().revoke_read()
self.acl.save(client=client)
if future:
doa = self.default_object_acl
if not doa.loaded:
doa.reload(client=client)
doa.all().revoke_read()
doa.save(client=client)
if recursive:
blobs = list(self.list_blobs(
projection='full',
max_results=self._MAX_OBJECTS_FOR_ITERATION + 1,
client=client))
if len(blobs) > self._MAX_OBJECTS_FOR_ITERATION:
message = (
'Refusing to make private recursively with more than '
'%d objects. If you actually want to make every object '
"in this bucket private, iterate through the blobs "
"returned by 'Bucket.list_blobs()' and call "
"'make_private' on each one."
) % (self._MAX_OBJECTS_FOR_ITERATION,)
raise ValueError(message)
for blob in blobs:
blob.acl.all().revoke_read()
blob.acl.save(client=client)
def generate_upload_policy(
self, conditions, expiration=None, client=None):
"""Create a signed upload policy for uploading objects.
This method generates and signs a policy document. You can use
`policy documents`_ to allow visitors to a website to upload files to
Google Cloud Storage without giving them direct write access.
For example:
.. literalinclude:: snippets.py
:start-after: [START policy_document]
:end-before: [END policy_document]
.. _policy documents:
https://cloud.google.com/storage/docs/xml-api\
/post-object#policydocument
:type expiration: datetime
:param expiration: Optional expiration in UTC. If not specified, the
policy will expire in 1 hour.
:type conditions: list
:param conditions: A list of conditions as described in the
`policy documents`_ documentation.
:type client: :class:`~google.cloud.storage.client.Client`
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the current bucket.
:rtype: dict
:returns: A dictionary of (form field name, form field value) of form
fields that should be added to your HTML upload form in order
to attach the signature.
"""
client = self._require_client(client)
credentials = client._base_connection.credentials
_signing.ensure_signed_credentials(credentials)
if expiration is None:
expiration = _NOW() + datetime.timedelta(hours=1)
conditions = conditions + [
{'bucket': self.name},
]
policy_document = {
'expiration': _datetime_to_rfc3339(expiration),
'conditions': conditions,
}
encoded_policy_document = base64.b64encode(
json.dumps(policy_document).encode('utf-8'))
signature = base64.b64encode(
credentials.sign_bytes(encoded_policy_document))
fields = {
'bucket': self.name,
'GoogleAccessId': credentials.signer_email,
'policy': encoded_policy_document.decode('utf-8'),
'signature': signature.decode('utf-8'),
}
return fields
def lock_retention_policy(self, client=None):
"""Lock the bucket's retention policy.
:raises ValueError:
if the bucket has no metageneration (i.e., new or never reloaded);
if the bucket has no retention policy assigned;
if the bucket's retention policy is already locked.
"""
if 'metageneration' not in self._properties:
raise ValueError(
"Bucket has no retention policy assigned: try 'reload'?")
policy = self._properties.get('retentionPolicy')
if policy is None:
raise ValueError(
"Bucket has no retention policy assigned: try 'reload'?")
if policy.get('isLocked'):
raise ValueError("Bucket's retention policy is already locked.")
client = self._require_client(client)
query_params = {'ifMetagenerationMatch': self.metageneration}
if self.user_project is not None:
query_params['userProject'] = self.user_project
path = '/b/{}/lockRetentionPolicy'.format(self.name)
api_response = client._connection.api_request(
method='POST', path=path, query_params=query_params,
_target_object=self)
self._set_properties(api_response)
| apache-2.0 | -8,911,138,920,853,945,000 | 35.66411 | 84 | 0.60058 | false | 4.391992 | false | false | false |
Yukarumya/Yukarum-Redfoxes | security/generate_mapfile.py | 1 | 1861 | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# This script processes NSS .def files according to the rules defined in
# a comment at the top of each one. The files are used to define the
# exports from NSS shared libraries, with -DEFFILE on Windows, a linker
# script on Linux, or with -exported_symbols_list on OS X.
#
# The NSS build system processes them using a series of sed replacements,
# but the Mozilla build system is already running a Python script to generate
# the file so it's simpler to just do the replacement in Python.
import buildconfig
def main(output, input):
is_darwin = buildconfig.substs['OS_ARCH'] == 'Darwin'
with open(input, 'rb') as f:
for line in f:
line = line.rstrip()
# Remove all lines containing ';-'
if ';-' in line:
continue
# On OS X, remove all lines containing ';+'
if is_darwin and ';+' in line:
continue
# Remove the string ' DATA '.
line = line.replace(' DATA ', '')
# Remove the string ';+'
line = line.replace(';+', '')
# Remove the string ';;'
line = line.replace(';;', '')
# If a ';' is present, remove everything after it,
# and on OS X, remove it as well.
i = line.find(';')
if i != -1:
if is_darwin:
line = line[:i]
else:
line = line[:i+1]
# On OS X, symbols get an underscore in front.
if line and is_darwin:
output.write('_')
output.write(line)
output.write('\n')
| mpl-2.0 | 3,943,185,764,499,847,700 | 36.979592 | 77 | 0.558302 | false | 4.210407 | false | false | false |
rackerlabs/pycon-workshop | pyrax/monitoring.py | 1 | 3994 | #!/usr/bin/env python
import argparse
import os
import sys
import pyrax
pyrax.set_setting("identity_type", "rackspace")
pyrax.set_credential_file(os.path.expanduser("~/.pyraxcreds"))
cm = pyrax.cloud_monitoring
auto = pyrax.autoscale
def get_entity(ip):
"""Create or get an entity."""
entities = cm.list_entities()
matches = [entity for entity in entities if ip in entity.ip_addresses]
if len(matches) == 1:
return matches[0]
else:
ent = cm.create_entity(label="%s-entity" % ip,
ip_addresses={"ip": ip})
return ent
def create_email_notification(args):
"""Create an email notification."""
entity = get_entity(args.ip)
# Create a check on our entity.
# This will do an HTTP GET request on the API every 60 seconds with
# a 10 second timeout.
check = cm.create_check(entity, label="my-check",
check_type="remote.http",
details={"url": "http://bikeshed.io/api/v1.0/color",
"method": "GET"},
period=60, timeout=10, # How often to check, and what timeout
monitoring_zones_poll=["mzdfw"], # Which DCs to check from
target_alias="ip" # The public IP for our entity
)
# Create an email notification.
email = cm.create_notification("email", label="my-email",
details={"address": "[email protected]"})
# Create a notification plan that will email for all states.
plan = cm.create_notification_plan("my-plan", ok_state=email,
warning_state=email, critical_state=email)
# Create an alarm that will cause a critical state to be reached
# if our HTTP GET check returns a 500 status code.
alarm = cm.create_alarm(entity, check, plan,
"if (metric[\"code\"] == \"111\") { return new AlarmStatus(CRITICAL); }")
def create_webhook_notification(args):
"""Create a webhook notification."""
entity = get_entity(args.ip)
# Create a check on our entity.
# This will do an HTTP GET request on the API every 60 seconds with
# a 10 second timeout.
check = cm.create_check(entity, label="my-check",
check_type="remote.http",
details={"url": "http://bikeshed.io/api/v1.0/color",
"method": "GET"},
period=60, timeout=10, # How often to check, and what timeout
monitoring_zones_poll=["mzdfw"], # Which DCs to check from
target_alias="ip" # The public IP for our entity
)
# Now we bring up our autoscale scaling group.
group = auto.list()[0]
# Get our policy, which has the webhook.
policy = group.list_policies()[0]
# Get the hook out of the policy.
hook = policy.list_webhooks()[0]
# Create an email notification.
email = cm.create_notification("email", label="my-email",
details={"address": "[email protected]"})
# Create a web hook notification with the HREF link in the hook.
webhook = cm.create_notification("webhook", label="my-webhook",
details={"url": hook.links[1]["href"]})
# Create another notification plan which will call our hook
plan = cm.create_notification_plan("my-webhook", ok_state=email,
warning_state=email, critical_state=webhook)
# Create an alarm
alarm = cm.create_alarm(entity, check, plan,
"if (metric[\"code\"] == \"111\") { return new AlarmStatus(CRITICAL); }")
def _main():
parser = argparse.ArgumentParser()
parser.add_argument("--ip")
subparsers = parser.add_subparsers()
email_notify = subparsers.add_parser("email-notify")
email_notify.set_defaults(func=create_email_notification)
webhook_notify = subparsers.add_parser("webhook-notify")
webhook_notify.set_defaults(func=create_webhook_notification)
args = parser.parse_args()
args.func(args)
return 0
if __name__ == "__main__":
sys.exit(_main())
| apache-2.0 | -880,856,842,529,316,000 | 31.737705 | 78 | 0.619179 | false | 3.870155 | false | false | false |
jscott1989/happening | src/events/templatetags/tickets.py | 2 | 2585 | """Template tags relating to tickets."""
from django import template
import json
register = template.Library()
@register.filter()
def has_tickets(user, event):
"""Return True if the user has tickets for this event."""
if user.is_authenticated():
return event.tickets.filter(user=user, cancelled=False).count() > 0
return False
@register.filter()
def tickets(user, event):
"""Return the tickets the user has for the event."""
if user.is_authenticated():
return event.tickets.filter(user=user, cancelled=False)
return []
@register.filter()
def orders(user, event):
"""Return the orders the user has for the event."""
if user.is_authenticated():
return [o for o in event.orders.filter(user=user) if not o.cancelled]
return []
@register.filter()
def other_tickets(user, event):
"""Return the tickets the user has for the event that have no order."""
# All of this functionality is legacy and will be removed
if user.is_authenticated():
return event.tickets.filter(user=user, cancelled=False, order=None)
return []
@register.filter()
def visible_tickets_json(event, user):
"""Return json of available tickets for ticket widget."""
def ticket_type_to_dict(ticket_type, purchasable):
ret = {
"name": ticket_type.name,
"remaining_tickets": ticket_type.remaining_tickets,
"price": ticket_type.price,
"pk": ticket_type.pk}
if not purchasable:
ret["remaining_tickets"] = 0
return ret
return json.dumps([ticket_type_to_dict(t, t.purchasable_by(user)) for t in
event.ticket_types.visible_to(user)])
@register.filter()
def purchasable_by(ticket_type, user):
"""The ticket is/not purchasable by the user."""
return ticket_type.purchasable_by(user)
@register.filter()
def purchasable_tickets_no(event, user):
"""Return the number of tickets purchasable by a user for an event."""
return sum([t.remaining_tickets for t in
event.ticket_types.purchasable_by(user)])
@register.filter()
def waiting_list_available(event, user):
"""Return if waiting lists are available for this user."""
return len([t for t in event.ticket_types.waiting_list_available()
if t.visible_to(user)]) > 0
@register.filter()
def rsvp_going(user, event):
"""Return True if the user has indicated they will attend this event."""
if user.is_authenticated():
return user.rsvps.filter(event=event, going=True).count() > 0
return False
| mit | -5,112,925,305,039,460,000 | 29.77381 | 78 | 0.661122 | false | 3.661473 | false | false | false |
googleapis/googleapis-gen | google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/services/types/customer_service.py | 1 | 7684 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v8.enums.types import access_role as gage_access_role
from google.ads.googleads.v8.enums.types import response_content_type as gage_response_content_type
from google.ads.googleads.v8.resources.types import customer as gagr_customer
from google.protobuf import field_mask_pb2 # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v8.services',
marshal='google.ads.googleads.v8',
manifest={
'GetCustomerRequest',
'MutateCustomerRequest',
'CreateCustomerClientRequest',
'CustomerOperation',
'CreateCustomerClientResponse',
'MutateCustomerResponse',
'MutateCustomerResult',
'ListAccessibleCustomersRequest',
'ListAccessibleCustomersResponse',
},
)
class GetCustomerRequest(proto.Message):
r"""Request message for
[CustomerService.GetCustomer][google.ads.googleads.v8.services.CustomerService.GetCustomer].
Attributes:
resource_name (str):
Required. The resource name of the customer
to fetch.
"""
resource_name = proto.Field(
proto.STRING,
number=1,
)
class MutateCustomerRequest(proto.Message):
r"""Request message for
[CustomerService.MutateCustomer][google.ads.googleads.v8.services.CustomerService.MutateCustomer].
Attributes:
customer_id (str):
Required. The ID of the customer being
modified.
operation (google.ads.googleads.v8.services.types.CustomerOperation):
Required. The operation to perform on the
customer
validate_only (bool):
If true, the request is validated but not
executed. Only errors are returned, not results.
response_content_type (google.ads.googleads.v8.enums.types.ResponseContentTypeEnum.ResponseContentType):
The response content type setting. Determines
whether the mutable resource or just the
resource name should be returned post mutation.
"""
customer_id = proto.Field(
proto.STRING,
number=1,
)
operation = proto.Field(
proto.MESSAGE,
number=4,
message='CustomerOperation',
)
validate_only = proto.Field(
proto.BOOL,
number=5,
)
response_content_type = proto.Field(
proto.ENUM,
number=6,
enum=gage_response_content_type.ResponseContentTypeEnum.ResponseContentType,
)
class CreateCustomerClientRequest(proto.Message):
r"""Request message for
[CustomerService.CreateCustomerClient][google.ads.googleads.v8.services.CustomerService.CreateCustomerClient].
Attributes:
customer_id (str):
Required. The ID of the Manager under whom
client customer is being created.
customer_client (google.ads.googleads.v8.resources.types.Customer):
Required. The new client customer to create.
The resource name on this customer will be
ignored.
email_address (str):
Email address of the user who should be
invited on the created client customer.
Accessible only to customers on the allow-list.
access_role (google.ads.googleads.v8.enums.types.AccessRoleEnum.AccessRole):
The proposed role of user on the created
client customer. Accessible only to customers on
the allow-list.
validate_only (bool):
If true, the request is validated but not
executed. Only errors are returned, not results.
"""
customer_id = proto.Field(
proto.STRING,
number=1,
)
customer_client = proto.Field(
proto.MESSAGE,
number=2,
message=gagr_customer.Customer,
)
email_address = proto.Field(
proto.STRING,
number=5,
optional=True,
)
access_role = proto.Field(
proto.ENUM,
number=4,
enum=gage_access_role.AccessRoleEnum.AccessRole,
)
validate_only = proto.Field(
proto.BOOL,
number=6,
)
class CustomerOperation(proto.Message):
r"""A single update on a customer.
Attributes:
update (google.ads.googleads.v8.resources.types.Customer):
Mutate operation. Only updates are supported
for customer.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
FieldMask that determines which resource
fields are modified in an update.
"""
update = proto.Field(
proto.MESSAGE,
number=1,
message=gagr_customer.Customer,
)
update_mask = proto.Field(
proto.MESSAGE,
number=2,
message=field_mask_pb2.FieldMask,
)
class CreateCustomerClientResponse(proto.Message):
r"""Response message for CreateCustomerClient mutate.
Attributes:
resource_name (str):
The resource name of the newly created
customer client.
invitation_link (str):
Link for inviting user to access the created
customer. Accessible to allowlisted customers
only.
"""
resource_name = proto.Field(
proto.STRING,
number=2,
)
invitation_link = proto.Field(
proto.STRING,
number=3,
)
class MutateCustomerResponse(proto.Message):
r"""Response message for customer mutate.
Attributes:
result (google.ads.googleads.v8.services.types.MutateCustomerResult):
Result for the mutate.
"""
result = proto.Field(
proto.MESSAGE,
number=2,
message='MutateCustomerResult',
)
class MutateCustomerResult(proto.Message):
r"""The result for the customer mutate.
Attributes:
resource_name (str):
Returned for successful operations.
customer (google.ads.googleads.v8.resources.types.Customer):
The mutated customer with only mutable fields after mutate.
The fields will only be returned when response_content_type
is set to "MUTABLE_RESOURCE".
"""
resource_name = proto.Field(
proto.STRING,
number=1,
)
customer = proto.Field(
proto.MESSAGE,
number=2,
message=gagr_customer.Customer,
)
class ListAccessibleCustomersRequest(proto.Message):
r"""Request message for
[CustomerService.ListAccessibleCustomers][google.ads.googleads.v8.services.CustomerService.ListAccessibleCustomers].
"""
class ListAccessibleCustomersResponse(proto.Message):
r"""Response message for
[CustomerService.ListAccessibleCustomers][google.ads.googleads.v8.services.CustomerService.ListAccessibleCustomers].
Attributes:
resource_names (Sequence[str]):
Resource name of customers directly
accessible by the user authenticating the call.
"""
resource_names = proto.RepeatedField(
proto.STRING,
number=1,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | 1,351,810,789,427,220,700 | 29.736 | 120 | 0.653826 | false | 4.326577 | false | false | false |
HenrikPoulsen/Json2Class | convert/py/generator.py | 1 | 5455 | import re
from convert.base.generator import BaseGenerator
from convert.base.parsedobject import *
import datetime
class Generator(BaseGenerator):
def _generate_default_constructor(self):
if self.data.type == ParsedObjectType.Enum:
return ""
constructor = " def __init__(self):\n"
if self.data.data.__len__() == 0:
constructor += " pass\n"
else:
for member in self.data.data:
if member.type == ParsedObjectType.Array:
constructor += " self._{0} = []\n".format(_camel_case(member.name))
elif member.type == ParsedObjectType.String:
constructor += " self._{0} = \"\"\n".format(_camel_case(member.name))
elif member.type == ParsedObjectType.Int:
constructor += " self._{0} = 0\n".format(_camel_case(member.name))
elif member.type == ParsedObjectType.Float:
constructor += " self._{0} = 0.0\n".format(_camel_case(member.name))
elif member.type == ParsedObjectType.Object:
constructor += " self._{0} = None\n".format(_camel_case(member.name))
elif member.type == ParsedObjectType.Bool:
constructor += " self._{0} = False\n".format(_camel_case(member.name))
elif member.type == ParsedObjectType.Enum:
constructor += " self._{0} = {1}(0)\n".format(_camel_case(member.name), _capitalize(member.type_name))
constructor += "\n"
return constructor
def _generate_footer(self):
return ""
def _generate_member_access(self):
result = ""
for member in self.data.data:
result += self._generate_getter_setter(member)
return result
def _generate_header(self):
result = ""
# Enums only need to import enum, and won't have a factory
if self.data.type == ParsedObjectType.Enum:
result += "from enum import Enum\n"
else:
for factory in self.factories:
result += factory.generate_import()
for member in self.data.data:
if _capitalize(member.type_name) == _capitalize(self.data.type_name):
# if the member is the same class as the current class then we shouldn't import it
continue
if member.type == ParsedObjectType.Object or member.type == ParsedObjectType.Enum:
result += "from {0} import {1}\n".format(member.type_name.lower(), _capitalize(member.type_name))
elif member.type == ParsedObjectType.Array:
child = member.data[0]
if _capitalize(child.type_name) == _capitalize(self.data.type_name):
continue
if child.type == ParsedObjectType.Object:
result += "from {0} import {1}\n".format(child.type_name.lower(), _capitalize(child.type_name))
date_str = "Date: {0}".format(datetime.date.today())
if BaseGenerator.skip_date_comment:
date_str = ""
date_str = date_str.ljust(82)
result += ("#####################################################################################\n"
"# This file is generated by Json2Class (https://github.com/DragonSpawn/Json2Class) #\n"
"# Modifications to this file will be lost the next time you run the tool. #\n"
"# {0}#\n"
"#####################################################################################\n\n").format(date_str)
inheritance_str = "object"
if self.data.type == ParsedObjectType.Enum:
inheritance_str = "Enum"
result += "\nclass {0}({1}):\n".format(_capitalize(self.data.type_name), inheritance_str)
return result
def file_name(self, json_name):
return json_name.lower() + ".py"
def _generate_getter_setter(self, member):
if self.data.type == ParsedObjectType.Enum:
return " {0} = {1}\n".format(_capitalize(member.name), member.data)
return (" @property\n"
" def {0}(self):\n"
" \"\"\":rtype: {1}\"\"\"\n"
" return self._{0}\n\n"
" @{0}.setter\n"
" def {0}(self, value):\n"
" \"\"\":type value: {1}\n"
" :rtype: None\"\"\"\n"
" self._{0} = value\n\n").format(_camel_case(member.name), _get_type_name(member))
def _camel_case(obj):
a = re.compile('((?<=[a-z0-9])[A-Z]|(?!^)[A-Z](?=[a-z]))')
return a.sub(r'_\1', obj).lower()
def _capitalize(obj):
"""
Returns the object name with the first letter capitalized (all other untouched).
:param obj:
:return:
"""
if obj.__len__() < 2:
return obj
if obj == "string" or obj == "float" or obj == "int":
return obj
return obj[0].upper() + obj[1:]
def _get_type_name(obj):
if obj.type == ParsedObjectType.String:
return "str"
if obj.type == ParsedObjectType.Object or obj.type == ParsedObjectType.Enum:
return _capitalize(obj.type_name)
if obj.type == ParsedObjectType.Array:
return "list of [{0}]".format(_get_type_name(obj.data[0]))
return obj.type.name.lower() | mit | -1,390,719,304,453,153,000 | 42.301587 | 129 | 0.519707 | false | 3.893647 | false | false | false |
LuoZijun/solidity-sc2-replay-reader | solidity.py | 1 | 5965 | #!/usr/bin/env python
#coding: utf8
import os, sys, json
BASE_PATH = os.path.dirname(os.path.abspath(__file__))
# Solc Compiler
SOLC = "solc"
build_dir = os.path.join(BASE_PATH, "build")
src_dir = os.path.join(BASE_PATH, "src")
dst_dir = os.path.join(BASE_PATH, "build/src")
bin_dir = os.path.join(BASE_PATH, "build/bin")
abi_dir = os.path.join(BASE_PATH, "build/abi")
ast_dir = os.path.join(BASE_PATH, "build/ast")
src_entry = os.path.join(src_dir, "main.sol")
def rmdir(path):
for root, dirs, files in os.walk(path, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
os.rmdir(path)
def diff_path():
if not os.path.exists(build_dir):
os.mkdir(build_dir)
if not os.path.exists(dst_dir):
os.mkdir(dst_dir)
if not os.path.exists(bin_dir):
os.mkdir(bin_dir)
if not os.path.exists(abi_dir):
os.mkdir(abi_dir)
if not os.path.exists(ast_dir):
os.mkdir(ast_dir)
assert(os.path.exists(build_dir) and os.path.isdir(build_dir) )
assert(os.path.exists(src_dir) and os.path.isdir(src_dir) )
assert(os.path.exists(dst_dir) and os.path.isdir(dst_dir) )
assert(os.path.exists(bin_dir) and os.path.isdir(bin_dir) )
assert(os.path.exists(abi_dir) and os.path.isdir(abi_dir) )
assert(os.path.exists(ast_dir) and os.path.isdir(ast_dir) )
src_paths = map(lambda (root, dirs, files): root.replace(src_dir, ""), os.walk(src_dir) )
dst_paths = map(lambda (root, dirs, files): root.replace(dst_dir, ""), os.walk(dst_dir) )
_paths = filter(lambda p: p not in src_paths, dst_paths)
paths = map(lambda p: os.path.join(dst_dir, p[1:] if p.startswith("/") else p ), _paths )
map(lambda p: rmdir(p), paths )
_paths = filter(lambda p: p not in dst_paths, src_paths)
paths = map(lambda p: os.path.join(dst_dir, p[1:] if p.startswith("/") else p ), _paths )
map(lambda p: os.mkdir(p), paths)
def clean_dst_path():
rmdir(dst_dir)
os.mkdir(dst_dir)
def find_compilers():
paths = os.environ["PATH"].split(":")
solc = filter(lambda p: os.path.exists(os.path.join(p, "solc")) and os.path.isfile(os.path.join(p, "solc")), paths)
# os.path.exists(os.path.join(p, "solcjs")) and os.path.isfile(os.path.join(p, "solcjs"))
serpent = filter(lambda p: os.path.exists(os.path.join(p, "serpent")) and os.path.isfile(os.path.join(p, "serpent")), paths)
lllc = filter(lambda p: os.path.exists(os.path.join(p, "lllc")) and os.path.isfile(os.path.join(p, "lllc")), paths)
result = []
if len(solc) > 0:
result.append("Solidity")
if len(serpent) > 0:
result.append("Serpent")
if len(lllc) > 0:
result.append("LLL")
return result
def complie_soldity():
"""
solc --optimize --bin -o ./build/bin contract.sol
solc --optimize --ast -o ./build/ast contract.sol
solc --optimize --abi -o ./build contract.sol
"""
assert(os.path.exists(src_entry) and os.path.isfile(src_entry) )
commands = [
[SOLC, "--optimize", "--bin", "-o", os.path.relpath(bin_dir), os.path.relpath(src_entry) ]
, [SOLC, "--optimize", "--ast", "-o", os.path.relpath(ast_dir), os.path.relpath(src_entry) ]
, [SOLC, "--optimize", "--abi", "-o", os.path.relpath(build_dir), os.path.relpath(src_entry) ]
]
print("======================Complie Solidity Language=========================")
for cmd in commands:
command = " ".join(cmd)
print(command)
os.system(command)
# result = map(lambda cmd: os.system(" ".join(cmd)), commands )
# print(result)
def restruct():
contract = {}
bin_files = reduce(lambda a, (root, dirs, files): a + map(lambda filename: os.path.join(root, filename), files ), os.walk(bin_dir), [] )
abi_files = reduce(lambda a, (root, dirs, files): a + map(lambda filename: os.path.join(root, filename), files ), os.walk(dst_dir), [] )
def path_handle(data, filepath):
_, filename = os.path.split(filepath)
assert(filename.endswith(".bin") or filename.endswith(".abi") )
if filename.endswith(".bin"):
key = "code"
elif filename.endswith(".abi"):
key = "interface"
else:
pass
object_name = filename[:-4]
_tmp = object_name.split(":")
if len(_tmp) > 1:
object_name = _tmp[-1]
if object_name not in data or type(data[object_name]) != dict:
data[object_name] = {}
if key not in data[object_name]:
res = open(filepath, "rb").read()
if key == "interface":
open(os.path.join(abi_dir, object_name+".abi"), "wb").write(res)
data[object_name][key] = json.loads(res)
elif key == "code":
res = "0x" + res
data[object_name][key] = res
else:
data[object_name][key] = res
return data
data = reduce(path_handle, abi_files, reduce(path_handle, bin_files, {}) )
print("======================Contract=========================")
output = json.dumps(data)
open(os.path.join(build_dir, "contract.json"), "wb").write(output)
print(output)
def usage():
message = """
$ python solidity.py -src ./src -entry main.sol -out ./build -target contract.json
-src solidity source dir
-entry source entry file
-out output dir
-target solidity bytecode and interface file (JSON Format)
--help show this help text
"""
print(message)
def main():
compilers = find_compilers()
print("====================Compilers====================")
print(compilers)
assert("Solidity" in compilers)
clean_dst_path()
diff_path()
complie_soldity()
restruct()
if __name__ == '__main__':
main()
| gpl-3.0 | 1,277,137,600,390,265,900 | 34.295858 | 140 | 0.57435 | false | 3.142782 | false | false | false |
bert/geda-gaf | xorn/src/python/base64.py | 1 | 7731 | # xorn.geda - Python library for manipulating gEDA files
#**********************************************************************
# _ _ __ _ _
# __ _ _ __ ___| |_ | |__ __ _ ___ ___ / /_ | or |
# / _` | '_ \ / _ \ __| | '_ \ / _` / __|/ _ \ '_ \| or |_
# | (_| | | | | __/ |_ | |_) | (_| \__ \ __/ (_) |__ _|
# \__, |_| |_|\___|\__| |_.__/ \__,_|___/\___|\___/ |_|
# |___/
#
# created by Alfred Reibenschuh <[email protected]>,
# under the "GNU Library General Public License" (see below).
#
#**********************************************************************
# Copyright (C) 2003 Free Software Foundation
# Copyright (C) 2013-2017 Roland Lutz
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
## \namespace xorn.base64
## Reading and writing base64-encoded data
from gettext import gettext as _
BASE64 = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
PAD64 = '='
RANK = [
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 0x00-0x0f
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 0x10-0x1f
255,255,255,255,255,255,255,255,255,255,255, 62,255,255,255, 63, # 0x20-0x2f
52, 53, 54, 55, 56, 57, 58, 59, 60, 61,255,255,255,255,255,255, # 0x30-0x3f
255, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, # 0x40-0x4f
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,255,255,255,255,255, # 0x50-0x5f
255, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, # 0x60-0x6f
41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,255,255,255,255,255, # 0x70-0x7f
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 0x80-0x8f
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 0x90-0x9f
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 0xa0-0xaf
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 0xb0-0xbf
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 0xc0-0xcf
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 0xd0-0xdf
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 0xe0-0xef
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 0xf0-0xff
]
## Write a binary string to a file in %base64 representation.
#
# If \a columns is not \c None, insert a newline every \a columns
# characters. This is required by RFC 2045, but some applications
# don't require it. \a columns must positive and a multiple of \c 4.
#
# If \a delim is not \c None, it is written on a separate line after
# the data. This argument is provided for symmetry with \ref decode.
#
# \return \c None.
def encode(f, src, columns = 72, delim = None):
# bulk encoding
blen = len(src) - len(src) % 3
ocnt = 0
for pos in xrange(0, blen, 3):
# Convert 3 bytes of src to 4 bytes of output
#
# output[0] = input[0] 7:2
# output[1] = input[0] 1:0 input[1] 7:4
# output[2] = input[1] 3:0 input[2] 7:6
# output[3] = input[1] 5:0
i0, i1, i2 = [ord(ch) for ch in src[pos:pos + 3]]
# Map output to the Base64 alphabet
f.write(BASE64[i0 >> 2] +
BASE64[((i0 & 0x03) << 4) + (i1 >> 4)] +
BASE64[((i1 & 0x0f) << 2) + (i2 >> 6)] +
BASE64[i2 & 0x3f])
if columns is not None:
ocnt += 1
if ocnt % (columns / 4) == 0 and pos != len(src) - 3:
f.write('\n')
# Now worry about padding with remaining 1 or 2 bytes
if blen != len(src):
i0 = ord(src[blen])
if blen == len(src) - 1:
i1 = 0
else:
i1 = ord(src[blen + 1])
i2 = 0
f.write(BASE64[i0 >> 2] +
BASE64[((i0 & 0x03) << 4) + (i1 >> 4)])
if blen == len(src) - 1:
f.write(PAD64)
else:
f.write(BASE64[((i1 & 0x0f) << 2) + (i2 >> 6)])
f.write(PAD64)
if src:
f.write('\n')
if delim is not None:
f.write(delim + '\n')
## Raised when reading invalid or unterminated base64-encoded data.
class DecodingError(Exception):
pass
## Read a string in %base64 representation from a file.
#
# This function is liberal in what it will accept. It ignores
# non-base64 symbols.
#
# If \a delim is \c None, read until the end of the file. If \a delim
# is not \c None, read until a line containing exactly \a delim is
# found.
#
# \return A string containing the decoded data.
#
# \throw DecodingError if reading something that is not valid
# base64-encoded data
# \throw DecodingError if the end of the file is hit and \a delim is
# not \c None
def decode(f, delim = None):
ch = 0
state = 0
res = 0
dst = []
pad = 0
while True:
try:
line = f.next()
except StopIteration:
if delim is not None:
raise DecodingError, _("Unexpected end-of-file")
break
if delim is not None and line == delim + '\n':
break
for ch in line:
if ch == PAD64:
pad += 1
continue
pos = RANK[ord(ch)]
if pos == 255:
# Skip any non-base64 anywhere
continue
if pad != 0:
raise DecodingError
if state == 0:
dst += [pos << 2]
state = 1
elif state == 1:
dst[-1] |= pos >> 4
res = (pos & 0x0f) << 4
state = 2
elif state == 2:
dst += [res | (pos >> 2)]
res = (pos & 0x03) << 6
state = 3
elif state == 3:
dst += [res | pos]
state = 0
# We are done decoding Base-64 chars. Let's see if we ended
# on a byte boundary, and/or with erroneous trailing characters.
if pad != 0:
# We got a pad char.
if state == 0:
# Invalid = in first position
raise DecodingError
elif state == 1:
# Invalid = in second position
raise DecodingError
elif state == 2:
# Valid, means one byte of info
# Make sure there is another trailing = sign.
if pad != 2:
raise DecodingError
elif state == 3:
# Valid, means two bytes of info
# We know this char is an =. Is there anything but
# whitespace after it?
if pad != 1:
raise DecodingError
if state == 2 or state == 3:
# Now make sure for cases 2 and 3 that the "extra"
# bits that slopped past the last full byte were
# zeros. If we don't check them, they become a
# subliminal channel.
if res != 0:
raise DecodingError
else:
# We ended by seeing the end of the string. Make sure we
# have no partial bytes lying around.
if state != 0:
raise DecodingError
return ''.join(chr(b) for b in dst)
| gpl-2.0 | -3,761,338,956,071,024,000 | 34.95814 | 80 | 0.542621 | false | 3.164552 | false | false | false |
leigh123linux/cinnamon-screensaver | src/stage.py | 1 | 36783 | #!/usr/bin/python3
import gi
gi.require_version('CDesktopEnums', '3.0')
from gi.repository import Gtk, Gdk, CScreensaver, CDesktopEnums, GObject
import random
import status
import constants as c
import singletons
from monitorView import MonitorView
from unlock import UnlockDialog
from clock import ClockWidget
from albumArt import AlbumArt
from audioPanel import AudioPanel
from infoPanel import InfoPanel
from osk import OnScreenKeyboard
from floating import ALIGNMENTS
from util import utils, trackers, settings
from util.eventHandler import EventHandler
class Stage(Gtk.Window):
"""
The Stage is the toplevel window of the entire screensaver while
in Active mode.
It's the first thing made, the last thing destroyed, and all other
widgets live inside of it (or rather, inside the GtkOverlay below)
It is Gtk.WindowType.POPUP to avoid being managed/composited by muffin,
and to prevent animation during its creation and destruction.
The Stage reponds pretty much only to the instructions of the
ScreensaverManager.
"""
def __init__(self, manager, away_message):
if status.InteractiveDebug:
Gtk.Window.__init__(self,
type=Gtk.WindowType.TOPLEVEL,
decorated=True,
skip_taskbar_hint=False)
else:
Gtk.Window.__init__(self,
type=Gtk.WindowType.POPUP,
decorated=False,
skip_taskbar_hint=True)
self.get_style_context().add_class("csstage")
trackers.con_tracker_get().connect(singletons.Backgrounds,
"changed",
self.on_bg_changed)
self.destroying = False
self.manager = manager
status.screen = CScreensaver.Screen.new(status.Debug)
self.away_message = away_message
self.monitors = []
self.last_focus_monitor = -1
self.overlay = None
self.clock_widget = None
self.albumart_widget = None
self.unlock_dialog = None
self.audio_panel = None
self.info_panel = None
self.stage_refresh_id = 0
self.floaters = []
self.event_handler = EventHandler(manager)
self.get_style_context().remove_class("background")
self.set_events(self.get_events() |
Gdk.EventMask.POINTER_MOTION_MASK |
Gdk.EventMask.BUTTON_PRESS_MASK |
Gdk.EventMask.BUTTON_RELEASE_MASK |
Gdk.EventMask.KEY_PRESS_MASK |
Gdk.EventMask.KEY_RELEASE_MASK |
Gdk.EventMask.EXPOSURE_MASK |
Gdk.EventMask.VISIBILITY_NOTIFY_MASK |
Gdk.EventMask.ENTER_NOTIFY_MASK |
Gdk.EventMask.LEAVE_NOTIFY_MASK |
Gdk.EventMask.FOCUS_CHANGE_MASK)
c = Gdk.RGBA(0, 0, 0, 0)
self.override_background_color (Gtk.StateFlags.NORMAL, c);
self.update_geometry()
self.overlay = Gtk.Overlay()
trackers.con_tracker_get().connect(self.overlay,
"realize",
self.on_realized)
trackers.con_tracker_get().connect(self.overlay,
"get-child-position",
self.position_overlay_child)
self.overlay.show_all()
self.add(self.overlay)
# We hang onto the UPowerClient here so power events can
# trigger changes to the info panel.
self.power_client = singletons.UPowerClient
trackers.con_tracker_get().connect(self.power_client,
"power-state-changed",
self.on_power_state_changed)
# This filter suppresses any other windows that might share
# our window group in muffin, from showing up over the Stage.
# For instance: Chrome and Firefox native notifications.
self.gdk_filter = CScreensaver.GdkEventFilter()
trackers.con_tracker_get().connect(status.screen,
"size-changed",
self.on_screen_size_changed)
trackers.con_tracker_get().connect(status.screen,
"monitors-changed",
self.on_monitors_changed)
trackers.con_tracker_get().connect(status.screen,
"composited-changed",
self.on_composited_changed)
trackers.con_tracker_get().connect(self,
"grab-broken-event",
self.on_grab_broken_event)
if status.InteractiveDebug:
self.set_interactive_debugging(True)
def update_monitors(self):
self.destroy_monitor_views()
try:
self.setup_monitors()
for monitor in self.monitors:
self.sink_child_widget(monitor)
except Exception as e:
print("Problem updating monitor views views: %s" % str(e))
def on_screen_size_changed(self, screen, data=None):
"""
The screen changing size should be acted upon immediately, to ensure coverage.
Wallpapers are secondary.
"""
if status.Debug:
print("Stage: Received screen size-changed signal, refreshing stage")
self.update_geometry()
self.move_onscreen()
self.overlay.queue_resize()
def on_monitors_changed(self, screen, data=None):
"""
Updating monitors also will trigger an immediate stage coverage update (same
as on_screen_size_changed), and follow up at idle with actual monitor view
refreshes (wallpapers.)
"""
if status.Debug:
print("Stage: Received screen monitors-changed signal, refreshing stage")
self.update_geometry()
self.move_onscreen()
self.overlay.queue_resize()
Gdk.flush()
self.queue_refresh_stage()
def on_composited_changed(self, screen, data=None):
if self.get_realized():
user_time = self.get_display().get_user_time()
self.hide()
self.unrealize()
self.realize()
self.get_window().set_user_time(user_time)
self.show()
GObject.idle_add(self.manager.grab_stage)
def on_grab_broken_event(self, widget, event, data=None):
GObject.idle_add(self.manager.grab_stage)
return False
def queue_refresh_stage(self):
"""
Queues a complete refresh of the stage, resizing the screen if necessary,
reconstructing the individual monitor objects, etc...
"""
if self.stage_refresh_id > 0:
GObject.source_remove(self.stage_refresh_id)
self.stage_refresh_id = 0
self.stage_refresh_id = GObject.idle_add(self._update_full_stage_on_idle)
def _update_full_stage_on_idle(self, data=None):
self.stage_refresh_id = 0
self._refresh()
return False
def _refresh(self):
Gdk.flush()
if status.Debug:
print("Stage: refresh callback")
self.update_geometry()
self.move_onscreen()
self.update_monitors()
self.overlay.queue_resize()
def activate(self, callback):
"""
This is the primary way of making the Stage visible.
"""
self.set_opacity(1.0)
self.move_onscreen()
self.show()
callback()
def deactivate(self, callback):
"""
This is the primary way of destroying the stage.
"""
self.hide()
callback()
def on_realized(self, widget):
"""
Repositions the window when it is realized, to cover the entire
GdkScreen (a rectangle exactly encompassing all monitors.)
From here we also proceed to construct all overlay children and
activate our window suppressor.
"""
window = self.get_window()
utils.override_user_time(window)
self.setup_children()
self.gdk_filter.start(self)
trackers.con_tracker_get().disconnect(self.overlay,
"realize",
self.on_realized)
def move_onscreen(self):
w = self.get_window()
if w:
w.move_resize(self.rect.x,
self.rect.y,
self.rect.width,
self.rect.height)
self.move(self.rect.x, self.rect.y)
self.resize(self.rect.width, self.rect.height)
def deactivate_after_timeout(self):
self.manager.set_active(False)
def setup_children(self):
"""
Creates all of our overlay children. If a new 'widget' gets added,
this should be the setup point for it.
We bail if something goes wrong on a critical widget - a monitor view or
unlock widget.
"""
total_failure = False
try:
self.setup_monitors()
except Exception as e:
print("Problem setting up monitor views: %s" % str(e))
total_failure = True
try:
self.setup_unlock()
except Exception as e:
print("Problem setting up unlock dialog: %s" % str(e))
total_failure = True
if not total_failure:
try:
self.setup_clock()
except Exception as e:
print("Problem setting up clock widget: %s" % str(e))
self.clock_widget = None
try:
self.setup_albumart()
except Exception as e:
print("Problem setting up albumart widget: %s" % str(e))
self.albumart_widget = None
try:
self.setup_status_bars()
except Exception as e:
print("Problem setting up status bars: %s" % str(e))
self.audio_panel = None
self.info_panel = None
try:
self.setup_osk()
except Exception as e:
print("Problem setting up on-screen keyboard: %s" % str(e))
self.osk = None
if total_failure:
print("Total failure somewhere, deactivating screensaver.")
GObject.idle_add(self.deactivate_after_timeout)
def destroy_children(self):
try:
self.destroy_monitor_views()
except Exception as e:
print(e)
try:
if self.unlock_dialog != None:
self.unlock_dialog.destroy()
except Exception as e:
print(e)
try:
if self.clock_widget != None:
self.clock_widget.stop_positioning()
self.clock_widget.destroy()
except Exception as e:
print(e)
try:
if self.albumart_widget != None:
self.albumart_widget.stop_positioning()
self.albumart_widget.destroy()
except Exception as e:
print(e)
try:
if self.info_panel != None:
self.info_panel.destroy()
except Exception as e:
print(e)
try:
if self.info_panel != None:
self.audio_panel.destroy()
except Exception as e:
print(e)
try:
if self.osk != None:
self.osk.destroy()
except Exception as e:
print(e)
self.unlock_dialog = None
self.clock_widget = None
self.albumart_widget = None
self.info_panel = None
self.audio_panel = None
self.osk = None
self.away_message = None
self.monitors = []
self.floaters = []
def destroy_stage(self):
"""
Performs all tear-down necessary to destroy the Stage, destroying
all children in the process, and finally destroying itself.
"""
trackers.con_tracker_get().disconnect(singletons.Backgrounds,
"changed",
self.on_bg_changed)
trackers.con_tracker_get().disconnect(self.power_client,
"power-state-changed",
self.on_power_state_changed)
trackers.con_tracker_get().disconnect(self,
"grab-broken-event",
self.on_grab_broken_event)
self.set_timeout_active(None, False)
self.destroy_children()
self.gdk_filter.stop()
self.gdk_filter = None
trackers.con_tracker_get().disconnect(status.screen,
"size-changed",
self.on_screen_size_changed)
trackers.con_tracker_get().disconnect(status.screen,
"monitors-changed",
self.on_monitors_changed)
trackers.con_tracker_get().disconnect(self.overlay,
"get-child-position",
self.position_overlay_child)
self.destroy()
status.screen = None
def setup_monitors(self):
"""
Iterate through the monitors, and create MonitorViews for each one
to cover them.
"""
self.monitors = []
status.Spanned = settings.bg_settings.get_enum("picture-options") == CDesktopEnums.BackgroundStyle.SPANNED
if status.InteractiveDebug or status.Spanned:
monitors = (status.screen.get_primary_monitor(),)
else:
n = status.screen.get_n_monitors()
monitors = ()
for i in range(n):
monitors += (i,)
for index in monitors:
monitor = MonitorView(index)
image = Gtk.Image()
singletons.Backgrounds.create_and_set_gtk_image (image,
monitor.rect.width,
monitor.rect.height)
monitor.set_next_wallpaper_image(image)
self.monitors.append(monitor)
self.add_child_widget(monitor)
self.update_monitor_views()
def on_bg_changed(self, bg):
"""
Callback for our GnomeBackground instance, this tells us when
the background settings have changed, so we can update our wallpaper.
"""
for monitor in self.monitors:
image = Gtk.Image()
singletons.Backgrounds.create_and_set_gtk_image (image,
monitor.rect.width,
monitor.rect.height)
monitor.set_next_wallpaper_image(image)
def on_power_state_changed(self, client, data=None):
"""
Callback for UPower changes, this will make our MonitorViews update
themselves according to user setting and power state.
"""
if status.Debug:
print("stage: Power state changed, updating info panel")
self.info_panel.update_visibility()
def setup_clock(self):
"""
Construct the clock widget and add it to the overlay, but only actually
show it if we're a) Not running a plug-in, and b) The user wants it via
preferences.
Initially invisible, regardless - its visibility is controlled via its
own positioning timer.
"""
self.clock_widget = ClockWidget(self.away_message, status.screen.get_mouse_monitor(), status.screen.get_low_res_mode())
self.add_child_widget(self.clock_widget)
self.floaters.append(self.clock_widget)
if settings.get_show_clock():
self.clock_widget.start_positioning()
def setup_albumart(self):
"""
Construct the AlbumArt widget and add it to the overlay, but only actually
show it if we're a) Not running a plug-in, and b) The user wants it via
preferences.
Initially invisible, regardless - its visibility is controlled via its
own positioning timer.
"""
self.albumart_widget = AlbumArt(None, status.screen.get_mouse_monitor())
self.add_child_widget(self.albumart_widget)
self.floaters.append(self.clock_widget)
if settings.get_show_albumart():
self.albumart_widget.start_positioning()
def setup_osk(self):
self.osk = OnScreenKeyboard()
self.add_child_widget(self.osk)
def setup_unlock(self):
"""
Construct the unlock dialog widget and add it to the overlay. It will always
initially be invisible.
Any time the screensaver is awake, and the unlock dialog is raised, a timer runs.
After a certain elapsed time, the state will be reset, and the dialog will be hidden
once more. Mouse and key events reset this timer, and the act of authentication
temporarily suspends it - the unlock widget accomplishes this via its inhibit- and
uninhibit-timeout signals
We also listen to actual authentication events, to destroy the stage if there is success,
and to do something cute if we fail (for now, this consists of 'blinking' the unlock
dialog.)
"""
self.unlock_dialog = UnlockDialog()
self.set_default(self.unlock_dialog.auth_unlock_button)
self.add_child_widget(self.unlock_dialog)
# Prevent a dialog timeout during authentication
trackers.con_tracker_get().connect(self.unlock_dialog,
"inhibit-timeout",
self.set_timeout_active, False)
trackers.con_tracker_get().connect(self.unlock_dialog,
"uninhibit-timeout",
self.set_timeout_active, True)
# Respond to authentication success/failure
trackers.con_tracker_get().connect(self.unlock_dialog,
"authenticate-success",
self.authentication_result_callback, True)
trackers.con_tracker_get().connect(self.unlock_dialog,
"authenticate-failure",
self.authentication_result_callback, False)
trackers.con_tracker_get().connect(self.unlock_dialog,
"authenticate-cancel",
self.authentication_cancel_callback)
def setup_status_bars(self):
"""
Constructs the AudioPanel and InfoPanel and adds them to the overlay.
"""
self.audio_panel = AudioPanel()
self.add_child_widget(self.audio_panel)
self.info_panel = InfoPanel()
self.add_child_widget(self.info_panel)
self.info_panel.update_visibility()
def queue_dialog_key_event(self, event):
"""
Sent from our EventHandler via the ScreensaverManager, this catches
initial key events before the unlock dialog is made visible, so that
the user doesn't have to first jiggle the mouse to wake things up before
beginning to type their password. They can just start typing, and no
keystrokes will be lost.
"""
self.unlock_dialog.queue_key_event(event)
# Timer stuff - after a certain time, the unlock dialog will cancel itself.
# This timer is suspended during authentication, and any time a new user event is received
def reset_timeout(self):
"""
This is called when any user event is received in our EventHandler.
This restarts our dialog timeout.
"""
self.set_timeout_active(None, True)
def set_timeout_active(self, dialog, active):
"""
Start or stop the dialog timer
"""
if active and not status.InteractiveDebug:
trackers.timer_tracker_get().start("wake-timeout",
c.UNLOCK_TIMEOUT * 1000,
self.on_wake_timeout)
else:
trackers.timer_tracker_get().cancel("wake-timeout")
def on_wake_timeout(self):
"""
Go back to Sleep if we hit our timer limit
"""
self.set_timeout_active(None, False)
self.manager.cancel_unlock_widget()
return False
def authentication_result_callback(self, dialog, success):
"""
Called by authentication success or failure. Either starts
the stage despawning process or simply 'blinks' the unlock
widget, depending on the outcome.
"""
if success:
if self.clock_widget != None:
self.clock_widget.hide()
if self.albumart_widget != None:
self.albumart_widget.hide()
self.unlock_dialog.hide()
self.manager.unlock()
else:
self.unlock_dialog.blink()
def authentication_cancel_callback(self, dialog):
self.cancel_unlock_widget()
def set_message(self, msg):
"""
Passes along an away-message to the clock.
"""
if self.clock_widget != None:
self.clock_widget.set_message(msg)
def initialize_pam(self):
return self.unlock_dialog.initialize_auth_client()
def raise_unlock_widget(self):
"""
Bring the unlock widget to the front and make sure it's visible.
"""
self.reset_timeout()
if status.Awake:
return
status.screen.place_pointer_in_primary_monitor ()
utils.clear_clipboards(self.unlock_dialog)
if self.clock_widget != None:
self.clock_widget.stop_positioning()
if self.albumart_widget != None:
self.albumart_widget.stop_positioning()
status.Awake = True
if self.info_panel:
self.info_panel.refresh_power_state()
if self.clock_widget != None:
self.clock_widget.show()
if self.albumart_widget != None:
self.albumart_widget.show()
self.unlock_dialog.show()
if self.audio_panel != None:
self.audio_panel.show_panel()
if self.info_panel != None:
self.info_panel.update_visibility()
if self.osk != None:
self.osk.show()
def cancel_unlocking(self):
if self.unlock_dialog:
self.unlock_dialog.cancel_auth_client()
def cancel_unlock_widget(self):
"""
Hide the unlock widget (and others) if the unlock has been canceled
"""
if not status.Awake:
return
self.set_timeout_active(None, False)
utils.clear_clipboards(self.unlock_dialog)
self.unlock_dialog.hide()
if self.clock_widget != None:
self.clock_widget.hide()
if self.albumart_widget != None:
self.albumart_widget.hide()
if self.audio_panel != None:
self.audio_panel.hide()
if self.info_panel != None:
self.info_panel.hide()
if self.osk != None:
self.osk.hide()
self.unlock_dialog.cancel()
status.Awake = False
self.update_monitor_views()
self.info_panel.update_visibility()
def update_monitor_views(self):
"""
Updates all of our MonitorViews based on the power
or Awake states.
"""
if not status.Awake:
if self.clock_widget != None and settings.get_show_clock():
self.clock_widget.start_positioning()
if self.albumart_widget != None and settings.get_show_albumart():
self.albumart_widget.start_positioning()
for monitor in self.monitors:
monitor.show()
def destroy_monitor_views(self):
"""
Destroy all MonitorViews
"""
for monitor in self.monitors:
monitor.destroy()
del monitor
def do_motion_notify_event(self, event):
"""
GtkWidget class motion-event handler. Delegate to EventHandler
"""
return self.event_handler.on_motion_event(event)
def do_key_press_event(self, event):
"""
GtkWidget class key-press-event handler. Delegate to EventHandler
"""
return self.event_handler.on_key_press_event(event)
def do_button_press_event(self, event):
"""
GtkWidget class button-press-event handler. Delegate to EventHandler
"""
return self.event_handler.on_button_press_event(event)
def update_geometry(self):
"""
Override BaseWindow.update_geometry() - the Stage should always be the
GdkScreen size, unless status.InteractiveDebug is True
"""
if status.InteractiveDebug:
monitor_n = status.screen.get_primary_monitor()
self.rect = status.screen.get_monitor_geometry(monitor_n)
else:
self.rect = status.screen.get_screen_geometry()
if status.Debug:
print("Stage.update_geometry - new backdrop position: %d, %d new size: %d x %d" % (self.rect.x, self.rect.y, self.rect.width, self.rect.height))
hints = Gdk.Geometry()
hints.min_width = self.rect.width
hints.min_height = self.rect.height
hints.max_width = self.rect.width
hints.max_height = self.rect.height
hints.base_width = self.rect.width
hints.base_height = self.rect.height
self.set_geometry_hints(self, hints, Gdk.WindowHints.MIN_SIZE | Gdk.WindowHints.MAX_SIZE | Gdk.WindowHints.BASE_SIZE)
# Overlay window management
def get_mouse_monitor(self):
if status.InteractiveDebug:
return status.screen.get_primary_monitor()
else:
return status.screen.get_mouse_monitor()
def maybe_update_layout(self):
"""
Called on all user events, moves widgets to the currently
focused monitor if it changes (whichever monitor the mouse is in)
"""
current_focus_monitor = status.screen.get_mouse_monitor()
if self.last_focus_monitor == -1:
self.last_focus_monitor = current_focus_monitor
return
if self.unlock_dialog and current_focus_monitor != self.last_focus_monitor:
self.last_focus_monitor = current_focus_monitor
self.overlay.queue_resize()
def add_child_widget(self, widget):
"""
Add a new child to the overlay
"""
self.overlay.add_overlay(widget)
def sink_child_widget(self, widget):
"""
Move a child to the bottom of the overlay
"""
self.overlay.reorder_overlay(widget, 0)
def position_overlay_child(self, overlay, child, allocation):
"""
Callback for our GtkOverlay, think of this as a mini-
window manager for our Stage.
Depending on what type child is, we position it differently.
We always call child.get_preferred_size() whether we plan to use
it or not - this prevents allocation warning spew, particularly in
Gtk >= 3.20.
Returning True says, yes draw it. Returning False tells it to skip
drawing.
If a new widget type is introduced that spawns directly on the stage,
it must have its own handling code here.
"""
if isinstance(child, MonitorView):
"""
MonitorView is always the size and position of its assigned monitor.
This is calculated and stored by the child in child.rect)
"""
w, h = child.get_preferred_size()
allocation.x = child.rect.x
allocation.y = child.rect.y
allocation.width = child.rect.width
allocation.height = child.rect.height
return True
if isinstance(child, UnlockDialog):
"""
UnlockDialog always shows on the currently focused monitor (the one the
mouse is currently in), and is kept centered.
"""
monitor = status.screen.get_mouse_monitor()
monitor_rect = status.screen.get_monitor_geometry(monitor)
min_rect, nat_rect = child.get_preferred_size()
allocation.width = nat_rect.width
allocation.height = nat_rect.height
allocation.x = monitor_rect.x + (monitor_rect.width / 2) - (allocation.width / 2)
allocation.y = monitor_rect.y + (monitor_rect.height / 2) - (allocation.height / 2)
return True
if isinstance(child, ClockWidget) or isinstance(child, AlbumArt):
"""
ClockWidget and AlbumArt behave differently depending on if status.Awake is True or not.
The widgets' halign and valign properties are used to store their gross position on the
monitor. This limits the number of possible positions to (3 * 3 * n_monitors) when our
screensaver is not Awake, and the widgets have an internal timer that randomizes halign,
valign, and current monitor every so many seconds, calling a queue_resize on itself after
each timer tick (which forces this function to run).
"""
min_rect, nat_rect = child.get_preferred_size()
if status.Awake:
current_monitor = status.screen.get_mouse_monitor()
else:
current_monitor = child.current_monitor
monitor_rect = status.screen.get_monitor_geometry(current_monitor)
region_w = monitor_rect.width / 3
region_h = monitor_rect.height
if status.Awake:
"""
If we're Awake, force the clock to track to the active monitor, and be aligned to
the left-center. The albumart widget aligns right-center.
"""
unlock_mw, unlock_nw = self.unlock_dialog.get_preferred_width()
"""
If, for whatever reason, we need more than 1/3 of the screen to fully display
the unlock dialog, reduce our available region width to accomodate it, reducing
the allocation for the floating widgets as required.
"""
if (unlock_nw > region_w):
region_w = (monitor_rect.width - unlock_nw) / 2
region_h = monitor_rect.height
if isinstance(child, ClockWidget):
child.set_halign(Gtk.Align.START)
else:
child.set_halign(Gtk.Align.END)
child.set_valign(Gtk.Align.CENTER)
else:
if settings.get_allow_floating():
for floater in self.floaters:
"""
Don't let our floating widgets end up in the same spot.
"""
if floater is child:
continue
if floater.get_halign() != child.get_halign() and floater.get_valign() != child.get_valign():
continue
region_h = monitor_rect.height / 3
fa = floater.get_halign()
ca = child.get_halign()
while fa == ca:
ca = ALIGNMENTS[random.randint(0, 2)]
child.set_halign(ca)
fa = floater.get_valign()
ca = child.get_valign()
while fa == ca:
ca = ALIGNMENTS[random.randint(0, 2)]
child.set_valign(ca)
# Restrict the widget size to the allowable region sizes if necessary.
allocation.width = min(nat_rect.width, region_w)
allocation.height = min(nat_rect.height, region_h)
# Calculate padding required to center widgets within their particular 1/9th of the monitor
padding_left = padding_right = (region_w - allocation.width) / 2
padding_top = padding_bottom = (region_h - allocation.height) / 2
halign = child.get_halign()
valign = child.get_valign()
if halign == Gtk.Align.START:
allocation.x = monitor_rect.x + padding_left
elif halign == Gtk.Align.CENTER:
allocation.x = monitor_rect.x + (monitor_rect.width / 2) - (allocation.width / 2)
elif halign == Gtk.Align.END:
allocation.x = monitor_rect.x + monitor_rect.width - allocation.width - padding_right
if valign == Gtk.Align.START:
allocation.y = monitor_rect.y + padding_top
elif valign == Gtk.Align.CENTER:
allocation.y = monitor_rect.y + (monitor_rect.height / 2) - (allocation.height / 2)
elif valign == Gtk.Align.END:
allocation.y = monitor_rect.y + monitor_rect.height - allocation.height - padding_bottom
return True
if isinstance(child, AudioPanel):
"""
The AudioPanel is only shown when Awake, and attaches
itself to the upper-left corner of the active monitor.
"""
min_rect, nat_rect = child.get_preferred_size()
if status.Awake:
current_monitor = status.screen.get_mouse_monitor()
monitor_rect = status.screen.get_monitor_geometry(current_monitor)
allocation.x = monitor_rect.x
allocation.y = monitor_rect.y
allocation.width = nat_rect.width
allocation.height = nat_rect.height
else:
allocation.x = child.rect.x
allocation.y = child.rect.y
allocation.width = nat_rect.width
allocation.height = nat_rect.height
return True
if isinstance(child, InfoPanel):
"""
The InfoPanel can be shown while not Awake, but will only appear if a) We have received
notifications while the screensaver is running, or b) we're either on battery
or plugged in but with a non-full battery. It attaches itself to the upper-right
corner of the monitor.
"""
min_rect, nat_rect = child.get_preferred_size()
if status.Awake:
current_monitor = status.screen.get_mouse_monitor()
monitor_rect = status.screen.get_monitor_geometry(current_monitor)
allocation.x = monitor_rect.x + monitor_rect.width - nat_rect.width
allocation.y = monitor_rect.y
allocation.width = nat_rect.width
allocation.height = nat_rect.height
else:
allocation.x = child.rect.x + child.rect.width - nat_rect.width
allocation.y = child.rect.y
allocation.width = nat_rect.width
allocation.height = nat_rect.height
return True
if isinstance(child, OnScreenKeyboard):
"""
The InfoPanel can be shown while not Awake, but will only appear if a) We have received
notifications while the screensaver is running, or b) we're either on battery
or plugged in but with a non-full battery. It attaches itself to the upper-right
corner of the monitor.
"""
min_rect, nat_rect = child.get_preferred_size()
current_monitor = status.screen.get_mouse_monitor()
monitor_rect = status.screen.get_monitor_geometry(current_monitor)
allocation.x = monitor_rect.x
allocation.y = monitor_rect.y + monitor_rect.height - (monitor_rect.height / 3)
allocation.width = monitor_rect.width
allocation.height = monitor_rect.height / 3
return True
return False
| gpl-2.0 | 449,252,281,052,089,700 | 35.061765 | 157 | 0.558764 | false | 4.489016 | false | false | false |
lasa/website | app/post.py | 1 | 3761 | import time
import datetime
from app import db, utils
from app.models import Post, Message
from flask import redirect, request
from flask_login import current_user
from flask_wtf import Form
from wtforms import validators, StringField, TextAreaField, HiddenField
class NewPostForm(Form):
title = StringField('Title:', validators=[validators.DataRequired(), validators.Length(min=0, max=1000)])
body = TextAreaField('Body:', validators=[validators.Length(min=0, max=30000)], widget=utils.TinyMCE)
bodyhtml = HiddenField()
def validate(self):
is_valid = Form.validate(self)
self.body.data = self.bodyhtml.data # preserve what has already been entered
return is_valid
def new_post():
form = NewPostForm()
if form.validate_on_submit():
data = {"title": form.title.data,
"body": form.bodyhtml.data,
"author": current_user.id_,
"timestamp": datetime.datetime.now()}
newpost = Post(**data)
db.session.add(newpost)
db.session.commit()
time.sleep(0.5)
return redirect("/news")
return utils.render_with_navbar("post/form.html", form=form, heading="News Item")
def new_message():
form = NewPostForm()
if form.validate_on_submit():
data = {"title": form.title.data,
"body": form.bodyhtml.data,
"author": current_user.id_,
"timestamp": datetime.datetime.now()}
newpost = Message(**data)
db.session.add(newpost)
db.session.commit()
time.sleep(0.5)
return redirect("/message")
return utils.render_with_navbar("post/form.html", form=form, heading="Principal's Message")
def edit_post():
postid = request.args.get("postid")
if not postid:
return redirect("/newpost")
current_post = Post.query.filter_by(id_=postid).first()
if not current_post:
return redirect("/newpost")
data = {"title": current_post.title,
"body": current_post.body}
form = NewPostForm(**data)
if form.validate_on_submit():
new_data = {"title": form.title.data,
"body": form.body.data}
for key, value in new_data.items():
setattr(current_post, key, value)
db.session.commit()
time.sleep(0.5)
return redirect("/news?postid="+postid)
return utils.render_with_navbar("post/form.html", form=form, heading="News Item")
def edit_message():
postid = request.args.get("postid")
if not postid:
return redirect("/messages")
current_post = Message.query.filter_by(id_=postid).first()
if not current_post:
return redirect("/messages")
data = {"title": current_post.title,
"body": current_post.body}
form = NewPostForm(**data)
if form.validate_on_submit():
new_data = {"title": form.title.data,
"body": form.body.data}
for key, value in new_data.items():
setattr(current_post, key, value)
db.session.commit()
time.sleep(0.5)
return redirect("/messages?postid="+postid)
return utils.render_with_navbar("post/form.html", form=form, heading="Principal's Message")
def delete_post():
postid = request.args.get("postid")
if not postid:
return redirect("/news")
post = Post.query.filter_by(id_=postid)
post.delete()
db.session.commit()
time.sleep(0.5)
return redirect("/news")
def delete_message():
postid = request.args.get("postid")
if not postid:
return redirect("/messages")
post = Message.query.filter_by(id_=postid)
post.delete()
db.session.commit()
time.sleep(0.5)
return redirect("/messages")
| agpl-3.0 | -3,114,586,670,228,496,000 | 27.492424 | 109 | 0.616325 | false | 3.709073 | false | false | false |
Weiya-CF/reco_gesture | dataAcquisition.py | 1 | 4190 | from recoDataStructure import *
class DataReceiver:
"""This class helps us to read data into the program.
During the training stage, it can read data from file
and during recognition stage, it can get real time tracking data and
pass it to the Feature Extraction module."""
def __init__(self, l_or_r):
# 0 or 1, whether it's for the left or right hand
self._l_or_r = l_or_r
# data structure for real time training or recognition
self._gloveData = None
# data structure for training from file
self._gloveDataList = list()
def readDataFromFile(self, filePath):
"""Read a sample file and create a list of ARTGlove data samples"""
# read the file into a list
f = open(filePath, 'r')
lines = f.readlines()
f.close()
print(len(lines), "are read")
# create glove data and add it into the glove data list
indice = 0
limit = len(lines)
print(limit)
n = 0
while indice + 53 <= limit:
glove = self.createGloveFromFile(lines[indice:indice+53])
n += 1
self._gloveDataList.append(glove)
indice += 53
print(n,"samples are created.")
def createFingerFromFile(self, n, lines):
"""Function called by the createGloveFromFile function"""
pos_str = lines[0][0:-1].split(' ')
pos = list()
for p in pos_str:
pos.append(float(p))
ori_str = lines[1][0:-1] + ' ' + lines[2][0:-1] + ' ' + lines[3][0:-1]
ori_str = ori_str.split(' ')
ori = list()
for o in ori_str:
ori.append(float(o))
phalen_str = lines[5][0:-1].split(' ')
phalen = list()
for p in phalen_str:
phalen.append(float(p))
#print("lines[6]:",lines[6])
phaang_str = lines[6][0:-1].split(' ')
phaang = list()
for p in phaang_str:
phaang.append(float(p))
f = Finger(n, pos, ori, float(lines[4][0:-1]), phalen, phaang)
return f
def createGloveFromFile(self, lines):
"""Function called by the readDataFromFile function"""
pos_str = lines[5][0:-1].split(' ')
pos = list()
for p in pos_str:
pos.append(float(p))
ori_str = lines[6][0:-1] + ' ' + lines[7][0:-1] + ' ' + lines[8][0:-1]
ori_str = ori_str.split(' ')
ori = list()
for o in ori_str:
ori.append(float(o))
finger_name_list = ['pouce','index','majeur','annulaire','auriculaire']
i = 11
n = 0
fingers = list()
while n < 5:
fingers.append(self.createFingerFromFile(finger_name_list[n],lines[i+n*8:i+7+n*8]))
n += 1
lr = -1
if lines[3][0:-1] == 'left':
lr = 0
else:
lr = 1
g = Glove(lines[1][0:-1], 0, lines[2][0:-1], lr, int(lines[4][0:-1]), fingers, pos, ori)
return g
def readRealTimeData(self, g_frame):
""" Add a glove frame to pass later to the feature extractor """
for glove in g_frame._glove_list:
if glove._l_or_r == 1:
# use only right hand for now
self._gloveData = glove
def getOneSampleFrameFile(self):
"""Data from file, return the first data frame in the list"""
if len(self._gloveDataList) != 0:
return self._gloveDataList.pop(0)
else:
return None
def getOneSampleFrameRT(self):
return self._gloveData
def showGlovesFromFile(self):
for g in self._gloveDataList:
print(g._timestamp)
def getGloveNumberFromFile(self):
"""Return the number of samples that we create from file"""
return len(self._gloveDataList)
if __name__ == "__main__":
dr_left = DataReceiver(0)
dr_right = DataReceiver(1)
dr_left.readDataFromFile("data/final_dataset2.txt")
dr_right.readDataFromFile("data/final_dataset2.txt")
print("finish for left hand", dr_left.getGloveNumberFromFile())
print("finish for right hand", dr_right.getGloveNumberFromFile())
| mit | -8,908,244,186,650,463,000 | 32.52 | 96 | 0.553222 | false | 3.428805 | false | false | false |
vaporry/pydevp2p | devp2p/crypto.py | 1 | 9987 | #!/usr/bin/python
CIPHERNAMES = set(('aes-128-ctr',))
import os
import sys
if sys.platform not in ('darwin',):
import pyelliptic
else:
# FIX PATH ON OS X ()
# https://github.com/yann2192/pyelliptic/issues/11
_openssl_lib_paths = ['/usr/local/Cellar/openssl/']
for p in _openssl_lib_paths:
if os.path.exists(p):
p = os.path.join(p, os.listdir(p)[-1], 'lib')
os.environ['DYLD_LIBRARY_PATH'] = p
import pyelliptic
if CIPHERNAMES.issubset(set(pyelliptic.Cipher.get_all_cipher())):
break
if 'pyelliptic' not in dir() or not CIPHERNAMES.issubset(set(pyelliptic.Cipher.get_all_cipher())):
print 'required ciphers %r not available in openssl library' % CIPHERNAMES
if sys.platform == 'darwin':
print 'use homebrew or macports to install newer openssl'
print '> brew install openssl / > sudo port install openssl'
sys.exit(1)
import bitcoin
from sha3 import sha3_256
from hashlib import sha256
import struct
import random
import devp2p.utils as utils
try:
from ecdsa_recover import ecdsa_raw_sign, ecdsa_raw_verify, ecdsa_raw_recover
from ecdsa_recover import ecdsa_sign, ecdsa_verify
except:
ecdsa_raw_sign = bitcoin.ecdsa_raw_sign
ecdsa_raw_verify = bitcoin.ecdsa_raw_verify
ecdsa_raw_recover = bitcoin.ecdsa_raw_recover
ecdsa_sign = bitcoin.ecdsa_sign
ecdsa_verify = bitcoin.ecdsa_verify
hmac_sha256 = pyelliptic.hmac_sha256
class ECIESDecryptionError(Exception):
pass
class ECCx(pyelliptic.ECC):
"""
Modified to work with raw_pubkey format used in RLPx
and binding default curve and cipher
"""
ecies_ciphername = 'aes-128-ctr'
curve = 'secp256k1'
ecies_encrypt_overhead_length = 113
def __init__(self, raw_pubkey=None, raw_privkey=None):
if raw_privkey:
assert not raw_pubkey
raw_pubkey = privtopub(raw_privkey)
if raw_pubkey:
assert len(raw_pubkey) == 64
_, pubkey_x, pubkey_y, _ = self._decode_pubkey(raw_pubkey)
else:
pubkey_x, pubkey_y = None, None
while True:
pyelliptic.ECC.__init__(self, pubkey_x=pubkey_x, pubkey_y=pubkey_y,
raw_privkey=raw_privkey, curve=self.curve)
try:
if self.raw_privkey:
bitcoin.get_privkey_format(self.raw_privkey) # failed for some keys
valid_priv_key = True
except AssertionError:
valid_priv_key = False
if len(self.raw_pubkey) == 64 and valid_priv_key:
break
elif raw_privkey or raw_pubkey:
raise Exception('invalid priv or pubkey')
assert len(self.raw_pubkey) == 64
@property
def raw_pubkey(self):
return self.pubkey_x + self.pubkey_y
@classmethod
def _decode_pubkey(cls, raw_pubkey):
assert len(raw_pubkey) == 64
pubkey_x = raw_pubkey[:32]
pubkey_y = raw_pubkey[32:]
return cls.curve, pubkey_x, pubkey_y, 64
def get_ecdh_key(self, raw_pubkey):
"Compute public key with the local private key and returns a 256bits shared key"
_, pubkey_x, pubkey_y, _ = self._decode_pubkey(raw_pubkey)
key = self.raw_get_ecdh_key(pubkey_x, pubkey_y)
assert len(key) == 32
return key
@property
def raw_privkey(self):
return self.privkey
def is_valid_key(self, raw_pubkey, raw_privkey=None):
try:
assert len(raw_pubkey) == 64
failed = bool(self.raw_check_key(raw_privkey, raw_pubkey[:32], raw_pubkey[32:]))
except (AssertionError, Exception):
failed = True
return not failed
@classmethod
def ecies_encrypt(cls, data, raw_pubkey):
"""
ECIES Encrypt, where P = recipient public key is:
1) generate r = random value
2) generate shared-secret = kdf( ecdhAgree(r, P) )
3) generate R = rG [same op as generating a public key]
4) send 0x04 || R || AsymmetricEncrypt(shared-secret, plaintext) || tag
currently used by go:
ECIES_AES128_SHA256 = &ECIESParams{
Hash: sha256.New,
hashAlgo: crypto.SHA256,
Cipher: aes.NewCipher,
BlockSize: aes.BlockSize,
KeyLen: 16,
}
"""
# 1) generate r = random value
ephem = ECCx()
# 2) generate shared-secret = kdf( ecdhAgree(r, P) )
key_material = ephem.raw_get_ecdh_key(pubkey_x=raw_pubkey[:32], pubkey_y=raw_pubkey[32:])
assert len(key_material) == 32
key = eciesKDF(key_material, 32)
assert len(key) == 32
key_enc, key_mac = key[:16], key[16:]
key_mac = sha256(key_mac).digest() # !!!
assert len(key_mac) == 32
# 3) generate R = rG [same op as generating a public key]
ephem_pubkey = ephem.raw_pubkey
# encrypt
iv = pyelliptic.Cipher.gen_IV(cls.ecies_ciphername)
assert len(iv) == 16
ctx = pyelliptic.Cipher(key_enc, iv, 1, cls.ecies_ciphername)
ciphertext = ctx.ciphering(data)
assert len(ciphertext) == len(data)
# 4) send 0x04 || R || AsymmetricEncrypt(shared-secret, plaintext) || tag
msg = chr(0x04) + ephem_pubkey + iv + ciphertext
# the MAC of a message (called the tag) as per SEC 1, 3.5.
tag = hmac_sha256(key_mac, msg[1 + 64:])
assert len(tag) == 32
msg += tag
assert len(msg) == 1 + 64 + 16 + 32 + len(data) == 113 + len(data)
assert len(msg) - cls.ecies_encrypt_overhead_length == len(data)
return msg
def ecies_decrypt(self, data):
"""
Decrypt data with ECIES method using the local private key
ECIES Decrypt (performed by recipient):
1) generate shared-secret = kdf( ecdhAgree(myPrivKey, msg[1:65]) )
2) verify tag
3) decrypt
ecdhAgree(r, recipientPublic) == ecdhAgree(recipientPrivate, R)
[where R = r*G, and recipientPublic = recipientPrivate*G]
"""
if data[0] != chr(0x04):
raise ECIESDecryptionError("wrong ecies header")
# 1) generate shared-secret = kdf( ecdhAgree(myPrivKey, msg[1:65]) )
_shared = data[1:1 + 64]
# FIXME, check that _shared_pub is a valid one (on curve)
key_material = self.raw_get_ecdh_key(pubkey_x=_shared[:32], pubkey_y=_shared[32:])
assert len(key_material) == 32
key = eciesKDF(key_material, 32)
assert len(key) == 32
key_enc, key_mac = key[:16], key[16:]
key_mac = sha256(key_mac).digest()
assert len(key_mac) == 32
tag = data[-32:]
assert len(tag) == 32
# 2) verify tag
if not pyelliptic.equals(hmac_sha256(key_mac, data[1 + 64:- 32]), tag):
raise ECIESDecryptionError("Fail to verify data")
# 3) decrypt
blocksize = pyelliptic.OpenSSL.get_cipher(self.ecies_ciphername).get_blocksize()
iv = data[1 + 64:1 + 64 + blocksize]
assert len(iv) == 16
ciphertext = data[1 + 64 + blocksize:- 32]
assert 1 + len(_shared) + len(iv) + len(ciphertext) + len(tag) == len(data)
ctx = pyelliptic.Cipher(key_enc, iv, 0, self.ecies_ciphername)
return ctx.ciphering(ciphertext)
encrypt = ecies_encrypt
decrypt = ecies_decrypt
def sign(self, data):
"""
pyelliptic.ECC.sign is DER-encoded
https://bitcoin.stackexchange.com/questions/12554
"""
signature = ecdsa_sign(data, self.raw_privkey)
assert len(signature) == 65
return signature
def verify(self, signature, message):
assert len(signature) == 65
return ecdsa_verify(self.raw_pubkey, signature, message)
def lzpad32(x):
return '\x00' * (32 - len(x)) + x
def _encode_sig(v, r, s):
assert isinstance(v, (int, long))
assert v in (27, 28)
vb, rb, sb = chr(v - 27), bitcoin.encode(r, 256), bitcoin.encode(s, 256)
return lzpad32(rb) + lzpad32(sb) + vb
def _decode_sig(sig):
return ord(sig[64]) + 27, bitcoin.decode(sig[0:32], 256), bitcoin.decode(sig[32:64], 256)
def ecdsa_verify(pubkey, signature, message):
assert len(signature) == 65
assert len(pubkey) == 64
return ecdsa_raw_verify(message, _decode_sig(signature), pubkey)
verify = ecdsa_verify
def ecdsa_sign(message, privkey):
s = _encode_sig(*ecdsa_raw_sign(message, privkey))
return s
sign = ecdsa_sign
def ecdsa_recover(message, signature):
assert len(signature) == 65
pub = ecdsa_raw_recover(message, _decode_sig(signature))
assert pub, 'pubkey could not be recovered'
pub = bitcoin.encode_pubkey(pub, 'bin_electrum')
assert len(pub) == 64
return pub
recover = ecdsa_recover
def sha3(seed):
return sha3_256(seed).digest()
def mk_privkey(seed):
return sha3(seed)
def privtopub(raw_privkey):
raw_pubkey = bitcoin.encode_pubkey(bitcoin.privtopub(raw_privkey), 'bin_electrum')
assert len(raw_pubkey) == 64
return raw_pubkey
def encrypt(data, raw_pubkey):
"""
Encrypt data with ECIES method using the public key of the recipient.
"""
assert len(raw_pubkey) == 64, 'invalid pubkey of len {}'.format(len(raw_pubkey))
return ECCx.encrypt(data, raw_pubkey)
def eciesKDF(key_material, key_len):
"""
interop w/go ecies implementation
for sha3, blocksize is 136 bytes
for sha256, blocksize is 64 bytes
NIST SP 800-56a Concatenation Key Derivation Function (see section 5.8.1).
"""
s1 = ""
key = ""
hash_blocksize = 64
reps = ((key_len + 7) * 8) / (hash_blocksize * 8)
counter = 0
while counter <= reps:
counter += 1
ctx = sha256()
ctx.update(struct.pack('>I', counter))
ctx.update(key_material)
ctx.update(s1)
key += ctx.digest()
return key[:key_len]
| mit | -468,143,978,608,255,500 | 31.009615 | 98 | 0.603685 | false | 3.38887 | false | false | false |
aanunez/tortilla8 | tortilla8/instructions.py | 1 | 8266 | #!/usr/bin/env python3
from random import randint
from . import EmulationError
from .constants.reg_rom_stack import STACK_ADDRESS, STACK_SIZE
from .constants.graphics import GFX_FONT_ADDRESS, GFX_RESOLUTION, GFX_ADDRESS, \
GFX_WIDTH, GFX_HEIGHT_PX, GFX_WIDTH_PX, \
SET_VF_ON_GFX_OVERFLOW
# Instructions - All 20 mnemonics, 35 total instructions
# Add-3 SE-2 SNE-2 LD-11 JP-2 (mnemonics w/ extra instructions)
def i_cls(emu):
emu.ram[GFX_ADDRESS:GFX_ADDRESS + GFX_RESOLUTION] = [0x00] * GFX_RESOLUTION
emu.draw_flag = True
def i_ret(emu):
emu.stack_pointer -= 1
if emu.stack_pointer < 0:
emu.log("Stack underflow", EmulationError._Fatal)
emu.program_counter = emu.stack.pop()
def i_sys(emu):
emu.log("RCA 1802 call to " + hex( get_address(emu) ) + " was ignored.", EmulationError._Warning)
def i_call(emu):
if STACK_ADDRESS:
emu.ram[stack_pointer] = emu.program_counter
emu.stack_pointer += 1
emu.stack.append(emu.program_counter)
if emu.stack_pointer > STACK_SIZE:
emu.log("Stack overflow. Stack is now size " + emu.stack_pointer, EmulationError._Warning)
emu.program_counter = get_address(emu) - 2
def i_skp(emu):
if emu.keypad[ get_reg1_val(emu) & 0x0F ]:
emu.program_counter += 2
def i_sknp(emu):
if not emu.keypad[ get_reg1_val(emu) & 0x0F ]:
emu.program_counter += 2
def i_se(emu):
comp = get_lower_byte(emu) if 'byte' is emu.dis_ins.mnemonic_arg_types[1] else get_reg2_val(emu)
if get_reg1_val(emu) == comp:
emu.program_counter += 2
def i_sne(emu):
comp = get_lower_byte(emu) if 'byte' is emu.dis_ins.mnemonic_arg_types[1] else get_reg2_val(emu)
if get_reg1_val(emu) != comp:
emu.program_counter += 2
def i_shl(emu):
if emu.legacy_shift:
emu.register[0xF] = 0x01 if get_reg2_val(emu) >= 0x80 else 0x0
emu.register[ get_reg1(emu) ] = ( get_reg2_val(emu) << 1 ) & 0xFF
else:
emu.register[0xF] = 0x01 if get_reg1_val(emu) >= 0x80 else 0x0
emu.register[ get_reg1(emu) ] = ( get_reg1_val(emu) << 1 ) & 0xFF
def i_shr(emu):
if emu.legacy_shift:
emu.register[0xF] = 0x01 if ( get_reg2_val(emu) % 2) == 1 else 0x0
emu.register[ get_reg1(emu) ] = get_reg2_val(emu) >> 1
else:
emu.register[0xF] = 0x01 if ( get_reg1_val(emu) % 2) == 1 else 0x0
emu.register[ get_reg1(emu) ] = get_reg1_val(emu) >> 1
def i_or(emu):
emu.register[ get_reg1(emu) ] = get_reg1_val(emu) | get_reg2_val(emu)
def i_and(emu):
emu.register[ get_reg1(emu) ] = get_reg1_val(emu) & get_reg2_val(emu)
def i_xor(emu):
emu.register[ get_reg1(emu) ] = get_reg1_val(emu) ^ get_reg2_val(emu)
def i_sub(emu):
emu.register[0xF] = 0x01 if get_reg1_val(emu) >= get_reg2_val(emu) else 0x00
emu.register[ get_reg1(emu) ] = get_reg1_val(emu) - get_reg2_val(emu)
emu.register[ get_reg1(emu) ] &= 0xFF
def i_subn(emu):
emu.register[0xF] = 0x01 if get_reg2_val(emu) >= get_reg1_val(emu) else 0x00
emu.register[ get_reg1(emu) ] = get_reg2_val(emu) - get_reg1_val(emu)
emu.register[ get_reg1(emu) ] &= 0xFF
def i_jp(emu):
init_pc = emu.program_counter
numb_args = len(emu.dis_ins.mnemonic_arg_types)
if 'v0' is emu.dis_ins.mnemonic_arg_types[0] and numb_args == 2:
emu.program_counter = get_address(emu) + emu.register[0] - 2
elif numb_args == 1:
emu.program_counter = get_address(emu) - 2
else:
emu.log("Unknown argument at address " + hex(emu.program_counter), EmulationError._Fatal)
if init_pc == emu.program_counter + 2:
emu.spinning = True
def i_rnd(emu):
emu.register[ get_reg1(emu) ] = randint(0, 255) & get_lower_byte(emu)
def i_add(emu):
arg1 = emu.dis_ins.mnemonic_arg_types[0]
arg2 = emu.dis_ins.mnemonic_arg_types[1]
if 'reg' is arg1:
if 'byte' is arg2:
emu.register[ get_reg1(emu) ] = get_reg1_val(emu) + get_lower_byte(emu)
emu.register[ get_reg1(emu) ] &= 0xFF
elif 'reg' is arg2:
emu.register[ get_reg1(emu) ] = get_reg1_val(emu) + get_reg2_val(emu)
emu.register[0xF] = 0x01 if emu.register[ get_reg1(emu) ] > 0xFF else 0x00
emu.register[ get_reg1(emu) ] &= 0xFF
else:
emu.log("Unknown argument at address " + hex(emu.program_counter), EmulationError._Fatal)
elif 'i' in arg1 and 'reg' is arg2:
emu.index_register += get_reg1_val(emu)
if (emu.index_register > 0xFF) and SET_VF_ON_GFX_OVERFLOW:
emu.register[0xF] = 0x01
emu.index_register &= 0xFFF
else:
emu.log("Unknown argument at address " + hex(emu.program_counter), EmulationError._Fatal)
def i_ld(emu):
arg1 = emu.dis_ins.mnemonic_arg_types[0]
arg2 = emu.dis_ins.mnemonic_arg_types[1]
if 'reg' is arg1:
if 'byte' is arg2:
emu.register[ get_reg1(emu) ] = get_lower_byte(emu)
elif 'reg' is arg2:
emu.register[ get_reg1(emu) ] = get_reg2_val(emu)
elif 'dt' is arg2:
emu.register[ get_reg1(emu) ] = emu.delay_timer_register
elif 'k' is arg2:
emu.waiting_for_key = True
emu.program_counter -= 2
elif '[i]' == arg2:
emu.register[0: get_reg1(emu) + 1] = emu.ram[ emu.index_register : emu.index_register + get_reg1(emu) + 1]
else:
emu.log("Loads with second argument type '" + arg2 + \
"' are not supported.", EmulationError._Fatal)
elif 'reg' is arg2:
if 'dt' is arg1:
emu.delay_timer_register = get_reg1_val(emu)
elif 'st' is arg1:
emu.sound_timer_register = get_reg1_val(emu)
elif 'f' is arg1:
emu.index_register = GFX_FONT_ADDRESS + ( 5 * get_reg1_val(emu) )
elif 'b' is arg1:
bcd = [int(f) for f in list(str( get_reg1_val(emu) ).zfill(3))]
emu.ram[ emu.index_register : emu.index_register + len(bcd)] = bcd
elif '[i]' == arg1:
emu.ram[ emu.index_register : emu.index_register + get_reg1(emu) + 1] = emu.register[0: get_reg1(emu) + 1]
else:
emu.log("Unknown argument at address " + hex(emu.program_counter), EmulationError._Fatal)
elif 'i' is arg1 and 'addr' is arg2:
emu.index_register = get_address(emu)
else:
emu.log("Unknown argument at address " + hex(emu.program_counter), EmulationError._Fatal)
def i_drw(emu):
emu.draw_flag = True
height = int(emu.dis_ins.hex_instruction[3],16)
x_origin_byte = int( get_reg1_val(emu) / 8 ) % GFX_WIDTH
y_origin_byte = (get_reg2_val(emu) % GFX_HEIGHT_PX) * GFX_WIDTH
shift_amount = get_reg1_val(emu) % GFX_WIDTH_PX % 8
next_byte_offset = 1 if x_origin_byte + 1 != GFX_WIDTH else 1-GFX_WIDTH
emu.register[0xF] = 0x00
for y in range(height):
sprite = emu.ram[ emu.index_register + y ] << (8-shift_amount)
working_bytes = (
GFX_ADDRESS + (( x_origin_byte + y_origin_byte + (y * GFX_WIDTH) ) % GFX_RESOLUTION) ,
GFX_ADDRESS + (( x_origin_byte + y_origin_byte + (y * GFX_WIDTH) + next_byte_offset ) % GFX_RESOLUTION)
)
original = ( emu.ram[ working_bytes[0] ], emu.ram[ working_bytes[1] ] )
xor = (original[0]*256 + original[1]) ^ sprite
emu.ram[ working_bytes[0] ], emu.ram[ working_bytes[1] ] = xor >> 8, xor & 0x00FF
if (bin( ( emu.ram[ working_bytes[0] ] ^ original[0] ) & original[0] ) + \
bin( ( emu.ram[ working_bytes[1] ] ^ original[1] ) & original[1] )).find('1') != -1:
emu.register[0xF] = 0x01
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Hex Extraction
def get_address(emu):
return int(emu.dis_ins.hex_instruction[1:4], 16)
def get_reg1(emu):
return int(emu.dis_ins.hex_instruction[1],16)
def get_reg2(emu):
return int(emu.dis_ins.hex_instruction[2],16)
def get_reg1_val(emu):
return emu.register[int(emu.dis_ins.hex_instruction[1],16)]
def get_reg2_val(emu):
return emu.register[int(emu.dis_ins.hex_instruction[2],16)]
def get_lower_byte(emu):
return int(emu.dis_ins.hex_instruction[2:4], 16)
| gpl-3.0 | -7,325,783,240,247,029,000 | 37.092166 | 118 | 0.594967 | false | 2.673351 | false | false | false |
cinayc/crawler | crawler/pipelines.py | 1 | 2611 | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import scrapy
import pymysql
import hashlib
from scrapy.exceptions import DropItem
class CrawlerPipeline(object):
def __init__(self, my_settings):
self.settings = my_settings
db_host = self.settings.get('DB_HOST')
db_port = self.settings.get('DB_PORT')
db_user = self.settings.get('DB_USER')
db_pass = self.settings.get('DB_PASS')
db_db = self.settings.get('DB_DB')
db_charset = self.settings.get('DB_CHARSET')
self.conn = pymysql.connect(
host=db_host,
port=db_port,
user=db_user,
passwd=db_pass,
database=db_db,
use_unicode=True,
charset=db_charset)
self.cursor = self.conn.cursor()
@classmethod
def from_crawler(cls, crawler):
my_settings = crawler.settings
return cls(my_settings)
def process_item(self, item, spider):
url = item['url']
id = self.get_doc_id(url)
is_visited = item['is_visited'] is None and 'N' or item['is_visited']
raw = item['raw']
parsed = item['parsed']
rvrsd_domain = item['rvrsd_domain']
status = item['status']
if is_visited == "N":
sql = """
INSERT INTO DOC (id, c_time, url, is_visited, rvrsd_domain, visit_cnt)
SELECT %s, now(), %s, %s, %s, 0 FROM DUAL
WHERE NOT EXISTS (SELECT * FROM DOC WHERE id=%s)
"""
self.cursor.execute(sql, (id, url, is_visited, rvrsd_domain, id))
print("Save new URL: [%s] %s" % (id, url))
elif is_visited == "Y":
sql = """
INSERT INTO DOC (id, c_time, v_time, raw, parsed, url, is_visited, rvrsd_domain)
VALUES (%s, now(), now(), %s, %s, %s, %s, %s)
ON DUPLICATE KEY UPDATE raw = %s, is_visited = %s, parsed = %s, v_time = now(), visit_cnt = visit_cnt + 1, status = %s
"""
self.cursor.execute(sql, (id, raw, parsed, url, is_visited, rvrsd_domain, raw, is_visited, parsed, status))
print("Update URL: [%s] %s" % (id, url))
else:
print("Pass URL: [%s] %s" % (id, url))
pass
self.conn.commit()
return item
def get_doc_id(self, url):
return hashlib.md5(url.encode('utf-8')).hexdigest()[0:16]
def open_spider(self, spider):
pass
def close_spider(self, spider):
self.cursor.close()
self.conn.close()
| unlicense | 7,208,958,898,935,078,000 | 31.6375 | 122 | 0.561854 | false | 3.29256 | false | false | false |
daira/zcash | qa/zcash/updatecheck.py | 3 | 16150 | #!/usr/bin/env python3
#
# This script checks for updates to zcashd's dependencies.
#
# The SOURCE_ROOT constant specifies the location of the zcashd codebase to
# check, and the GITHUB_API_* constants specify a personal access token for the
# GitHub API, which need not have any special privileges.
#
# All dependencies must be specified inside the get_dependency_list() function
# below. A dependency is specified by:
#
# (a) A way to fetch a list of current releases.
#
# This is usually regular-expression-based parsing of GitHub tags, but
# might otherwise parse version numbers out of the project's webpage.
#
# GitHub tag regexps can be tested by specifying test cases in the third
# argument to GithubTagReleaseLister's constructor.
#
# (b) A way to fetch the currently-used version out of the source tree.
#
# This is typically parsed out of the depends/packages/*.mk files.
#
# If any dependency is found to be out-of-date, or there are un-accounted-for
# .mk files in depends/packages, this script will exit with
# a nonzero status. The latter case would suggest someone added a new dependency
# without adding a corresponding entry to get_dependency_list() below.
#
# To test the script itself, run it with --functionality-test as the only
# argument. This will exercise the full functionality of the script, but will
# only return a non-zero exit status when there's something wrong with the
# script itself, for example if a new file was added to depends/packages/ but
# wasn't added to this script.
import requests
import os
import re
import sys
import datetime
SOURCE_ROOT = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "..")
def get_dependency_list():
dependencies = [
Dependency("bdb",
BerkeleyDbReleaseLister(),
DependsVersionGetter("bdb")),
Dependency("boost",
GithubTagReleaseLister("boostorg", "boost", "^boost-(\d+)\.(\d+)\.(\d+)$",
{ "boost-1.69.0": (1, 69, 0), "boost-1.69.0-beta1": None }),
DependsVersionGetter("boost")),
Dependency("googletest",
GithubTagReleaseLister("google", "googletest", "^release-(\d+)\.(\d+)\.(\d+)$",
{ "release-1.8.1": (1, 8, 1) }),
DependsVersionGetter("googletest")),
# libc++ matches the Clang version
Dependency("libcxx",
GithubTagReleaseLister("llvm", "llvm-project", "^llvmorg-(\d+)\.(\d+).(\d+)$",
{ "llvmorg-11.0.0": (11, 0, 0), "llvmorg-9.0.1-rc3": None}),
DependsVersionGetter("native_clang")),
Dependency("libevent",
GithubTagReleaseLister("libevent", "libevent", "^release-(\d+)\.(\d+)\.(\d+)-stable$",
{ "release-2.0.22-stable": (2, 0, 22), "release-2.1.9-beta": None }),
DependsVersionGetter("libevent")),
Dependency("libsodium",
GithubTagReleaseLister("jedisct1", "libsodium", "^(\d+)\.(\d+)\.(\d+)$",
{ "1.0.17": (1, 0, 17) }),
DependsVersionGetter("libsodium")),
# b2 matches the Boost version
Dependency("native_b2",
GithubTagReleaseLister("boostorg", "boost", "^boost-(\d+)\.(\d+)\.(\d+)$",
{ "boost-1.69.0": (1, 69, 0), "boost-1.69.0-beta1": None }),
DependsVersionGetter("boost")),
Dependency("native_ccache",
GithubTagReleaseLister("ccache", "ccache", "^v?(\d+)\.(\d+)(?:\.(\d+))?$",
{ "v3.5.1": (3, 5, 1), "v3.6": (3, 6)}),
DependsVersionGetter("native_ccache")),
Dependency("native_clang",
GithubTagReleaseLister("llvm", "llvm-project", "^llvmorg-(\d+)\.(\d+).(\d+)$",
{ "llvmorg-11.0.0": (11, 0, 0), "llvmorg-9.0.1-rc3": None}),
DependsVersionGetter("native_clang")),
Dependency("native_rust",
GithubTagReleaseLister("rust-lang", "rust", "^(\d+)\.(\d+)(?:\.(\d+))?$",
{ "1.33.0": (1, 33, 0), "0.9": (0, 9) }),
DependsVersionGetter("native_rust")),
Dependency("zeromq",
GithubTagReleaseLister("zeromq", "libzmq", "^v(\d+)\.(\d+)(?:\.(\d+))?$",
{ "v4.3.1": (4, 3, 1), "v4.2.0-rc1": None }),
DependsVersionGetter("zeromq")),
Dependency("leveldb",
GithubTagReleaseLister("google", "leveldb", "^v(\d+)\.(\d+)$",
{ "v1.13": (1, 13) }),
LevelDbVersionGetter()),
Dependency("univalue",
GithubTagReleaseLister("bitcoin-core", "univalue", "^v(\d+)\.(\d+)\.(\d+)$",
{ "v1.0.1": (1, 0, 1) }),
UnivalueVersionGetter()),
Dependency("utfcpp",
GithubTagReleaseLister("nemtrif", "utfcpp", "^v(\d+)\.(\d+)(?:\.(\d+))?$",
{ "v3.1": (3, 1), "v3.0.3": (3, 0, 3) }),
DependsVersionGetter("utfcpp"))
]
return dependencies
class GitHubToken:
def __init__(self):
token_path = os.path.join(SOURCE_ROOT, ".updatecheck-token")
try:
with open(token_path, encoding='utf8') as f:
token = f.read().strip()
self._user = token.split(":")[0]
self._password = token.split(":")[1]
except:
print("Please make sure a GitHub API token is in .updatecheck-token in the root of this repository.")
print("The format is username:hex-token.")
sys.exit(1)
def user(self):
return self.user
def password(self):
return self.password
class Version(list):
def __init__(self, version_tuple):
for part in version_tuple:
if part: # skip None's which can come from optional regexp groups
if str(part).isdigit():
self.append(int(part))
else:
self.append(part)
def __str__(self):
return '.'.join(map(str, self))
def __hash__(self):
return hash(tuple(self))
class Dependency:
def __init__(self, name, release_lister, current_getter):
self.name = name
self.release_lister = release_lister
self.current_getter = current_getter
self.cached_known_releases = None
def current_version(self):
return self.current_getter.current_version()
def known_releases(self):
if self.cached_known_releases is None:
self.cached_known_releases = sorted(self.release_lister.known_releases())
return self.cached_known_releases
def released_versions_after_current_version(self):
current_version = self.current_version()
releases_after_current = []
for release in self.known_releases():
if release > current_version:
releases_after_current.append(release)
return releases_after_current
def is_up_to_date(self):
return len(self.released_versions_after_current_version()) == 0
class GithubTagReleaseLister:
def __init__(self, org, repo, regex, testcases={}):
self.org = org
self.repo = repo
self.regex = regex
self.testcases = testcases
self.token = GitHubToken()
for tag, expected in testcases.items():
match = re.match(self.regex, tag)
if (expected and not match) or (match and not expected) or (match and Version(match.groups()) != list(expected)):
groups = str(match.groups())
raise RuntimeError("GitHub tag regex test case [" + tag + "] failed, got [" + groups + "].")
def known_releases(self):
release_versions = []
all_tags = self.all_tag_names()
# sanity check against the test cases
for tag, expected in self.testcases.items():
if tag not in all_tags:
raise RuntimeError("Didn't find expected tag [" + tag + "].")
for tag_name in all_tags:
match = re.match(self.regex, tag_name)
if match:
release_versions.append(Version(match.groups()))
return release_versions
def all_tag_names(self):
url = "https://api.github.com/repos/" + safe(self.org) + "/" + safe(self.repo) + "/git/refs/tags"
r = requests.get(url, auth=requests.auth.HTTPBasicAuth(self.token.user(), self.token.password()))
if r.status_code != 200:
raise RuntimeError("Request to GitHub tag API failed.")
json = r.json()
return list(map(lambda t: t["ref"].split("/")[-1], json))
class BerkeleyDbReleaseLister:
def known_releases(self):
url = "https://www.oracle.com/database/technologies/related/berkeleydb-downloads.html"
r = requests.get(url)
if r.status_code != 200:
raise RuntimeError("Request to Berkeley DB download directory failed.")
page = r.text
# We use a set because the search will result in duplicates.
release_versions = set()
for match in re.findall("Berkeley DB (\d+)\.(\d+)\.(\d+)\.tar.gz", page):
release_versions.add(Version(match))
if len(release_versions) == 0:
raise RuntimeError("Missing expected version from Oracle web page.")
return list(release_versions)
class DependsVersionGetter:
def __init__(self, name):
self.name = name
def current_version(self):
mk_file_path = os.path.join(SOURCE_ROOT, "depends", "packages", safe_depends(self.name) + ".mk")
mk_file = open(mk_file_path, 'r', encoding='utf8').read()
regexp_whitelist = [
"package\)_version=(\d+)\.(\d+)\.(\d+)$",
"package\)_version=(\d+)\.(\d+)$",
"package\)_version=(\d+)_(\d+)_(\d+)$",
"package\)_version=(\d+)\.(\d+)\.(\d+)([a-z])$",
# Workaround for wasi 0.9.0 preview
"package\)_version=(\d+)\.(\d+)\.(\d+)\+wasi-snapshot-preview1$",
]
current_version = None
for regexp in regexp_whitelist:
match = re.search(regexp, mk_file, re.MULTILINE)
if match:
current_version = Version(match.groups())
if not current_version:
raise RuntimeError("Couldn't parse version number from depends .mk file.")
return current_version
class LevelDbVersionGetter:
def current_version(self):
header_path = os.path.join(SOURCE_ROOT, "src", "leveldb", "include", "leveldb", "db.h")
header_contents = open(header_path, 'r', encoding='utf8').read()
match = re.search("kMajorVersion\s*=\s*(\d+);\s*.*kMinorVersion\s*=\s*(\d+);\s*$", header_contents, re.MULTILINE)
if match:
return Version(match.groups())
else:
raise RuntimeError("Couldn't parse LevelDB's version from db.h")
class UnivalueVersionGetter:
def current_version(self):
configure_path = os.path.join(SOURCE_ROOT, "src", "univalue", "configure.ac")
configure_contents = open(configure_path, 'r', encoding='utf8').read()
match = re.search("AC_INIT.*univalue.*\[(\d+)\.(\d+)\.(\d+)\]", configure_contents)
if match:
return Version(match.groups())
else:
raise RuntimeError("Couldn't parse univalue's version from its configure.ac")
class PostponedUpdates():
def __init__(self):
self.postponedlist = dict()
postponedlist_path = os.path.join(
os.path.dirname(__file__),
"postponed-updates.txt"
)
file = open(postponedlist_path, 'r', encoding='utf8')
for line in file.readlines():
stripped = re.sub('#.*$', '', line).strip()
if stripped != "":
match = re.match('^(\S+)\s+(\S+)\s+(\S+)$', stripped)
if match:
postponed_name = match.groups()[0]
postponed_version = Version(match.groups()[1].split("."))
postpone_expiration = datetime.datetime.strptime(match.groups()[2], '%Y-%m-%d')
if datetime.datetime.utcnow() < postpone_expiration:
self.postponedlist[(postponed_name, str(postponed_version))] = True
else:
raise RuntimeError("Could not parse line in postponed-updates.txt:" + line)
def is_postponed(self, name, version):
return (name, str(version)) in self.postponedlist
def safe(string):
if re.match('^[a-zA-Z0-9_-]*$', string):
return string
else:
raise RuntimeError("Potentially-dangerous string encountered.")
def safe_depends(string):
if re.match('^[a-zA-Z0-9._-]*$', string):
return string
else:
raise RuntimeError("Potentially-dangerous string encountered.")
def print_row(name, status, current_version, known_versions):
COL_FMT_LARGE = "{:<35}"
COL_FMT_SMALL = "{:<18}"
print(COL_FMT_LARGE.format(name) +
COL_FMT_SMALL.format(status) +
COL_FMT_SMALL.format(current_version) +
COL_FMT_SMALL.format(known_versions))
def main():
# Get a list of all depends-system dependencies so we can verify that we're
# checking them all for updates.
unchecked_dependencies = [f[:-3] for f in os.listdir(os.path.join(SOURCE_ROOT, "depends", "packages")) if f.endswith(".mk")]
untracked = [
# packages.mk is not a dependency, it just specifies the list of them all.
"packages",
# This package doesn't have conventional version numbers
"native_cctools"
]
print_row("NAME", "STATUS", "CURRENT VERSION", "NEWER VERSIONS")
status = 0
for dep in untracked:
print_row(dep, "skipped", "", "")
if dep in unchecked_dependencies:
unchecked_dependencies.remove(dep)
else:
print("Error: Please remove " + dep + " from the list of unchecked dependencies.")
status = 3
# Exit early so the problem is clear from the output.
if status != 0:
sys.exit(status)
deps = get_dependency_list()
postponed = PostponedUpdates()
for dependency in deps:
if dependency.name in unchecked_dependencies:
unchecked_dependencies.remove(dependency.name)
if dependency.is_up_to_date():
print_row(
dependency.name,
"up to date",
str(dependency.current_version()),
"")
else:
# The status can either be POSTPONED or OUT OF DATE depending
# on whether or not all the new versions are whitelisted.
status_text = "POSTPONED"
newver_list = "["
for newver in dependency.released_versions_after_current_version():
if postponed.is_postponed(dependency.name, newver):
newver_list += str(newver) + " (postponed),"
else:
newver_list += str(newver) + ","
status_text = "OUT OF DATE"
status = 1
newver_list = newver_list[:-1] + "]"
print_row(
dependency.name,
status_text,
str(dependency.current_version()),
newver_list
)
if len(unchecked_dependencies) > 0:
unchecked_dependencies.sort()
print("WARNING: The following dependencies are not being checked for updates by this script: " + ', '.join(unchecked_dependencies))
sys.exit(2)
if len(sys.argv) == 2 and sys.argv[1] == "--functionality-test":
print("We're only testing this script's functionality. The exit status will only be nonzero if there's a problem with the script itself.")
sys.exit(0)
if status == 0:
print("All non-Rust dependencies are up-to-date or postponed.")
elif status == 1:
print("Release is BLOCKED. There are new dependency updates that have not been postponed.")
print("""
You should also check the Rust dependencies using cargo:
cargo install cargo-outdated cargo-audit
cargo outdated
cargo audit
""")
if status == 0:
print("After checking those, you'll be ready for release! :-)")
sys.exit(status)
main()
| mit | -5,905,578,815,122,862,000 | 38.583333 | 146 | 0.579009 | false | 3.776011 | true | false | false |
lyoshida/conference-central | main.py | 1 | 2418 | #!/usr/bin/env python
"""
main.py -- Udacity conference server-side Python App Engine
HTTP controller handlers for memcache & task queue access
$Id$
created by wesc on 2014 may 24
"""
__author__ = '[email protected] (Wesley Chun)'
import webapp2
from google.appengine.api import app_identity
from google.appengine.api import mail
from google.appengine.api import memcache
from google.appengine.ext import ndb
from models import Conference
from models import Session
from conference import ConferenceApi
MEMCACHE_SPEAKER_KEY = 'SPEAKER'
SPEAKER_TPL = 'More sessions from %s: %s.'
class SetAnnouncementHandler(webapp2.RequestHandler):
def get(self):
"""Set Announcement in Memcache."""
ConferenceApi._cacheAnnouncement()
self.response.set_status(204)
class SendConfirmationEmailHandler(webapp2.RequestHandler):
def post(self):
"""Send email confirming Conference creation."""
mail.send_mail(
'noreply@%s.appspotmail.com' % (
app_identity.get_application_id()), # from
self.request.get('email'), # to
'You created a new Conference!', # subj
'Hi, you have created a following ' # body
'conference:\r\n\r\n%s' % self.request.get(
'conferenceInfo')
)
class SetFeatureSpeakerHandler(webapp2.RequestHandler):
def post(self):
"""Sets the featured speaker in memcache"""
# Retrieves a list of sessions from the same speaker at this conference
p_key = ndb.Key(urlsafe=self.request.get('websafeConferenceKey'))
sessions_by_speaker = Session.query(ancestor=p_key)\
.filter(Session.speaker == self.request.get('speaker'))
if sessions_by_speaker.count() > 0:
sessions_str = ''
for session in sessions_by_speaker:
sessions_str += session.name + ', '
sessions_str = sessions_str[:-2]
speaker_memcache_message = SPEAKER_TPL % (self.request.get('speaker'), sessions_str)
memcache.set(MEMCACHE_SPEAKER_KEY, speaker_memcache_message)
app = webapp2.WSGIApplication([
('/crons/set_announcement', SetAnnouncementHandler),
('/tasks/send_confirmation_email', SendConfirmationEmailHandler),
('/tasks/setFeaturedSpeaker', SetFeatureSpeakerHandler),
], debug=True)
| apache-2.0 | 4,636,045,090,729,896,000 | 32.123288 | 96 | 0.648056 | false | 3.86262 | false | false | false |
imincik/medobs | medobs/reservations/views.py | 1 | 14838 | import json
from datetime import datetime, date, time, timedelta
from view_utils import get_offices, get_reservations_data, is_reservation_on_date
from view_utils import send_reservation_notification, send_reschedule_notificaion, send_cancel_notificaion
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.contrib.auth import authenticate, login as django_login, logout as django_logout
from django.views.decorators.csrf import csrf_exempt
from django.contrib import messages
from django.http import HttpResponseRedirect, HttpResponse, Http404
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
from medobs.reservations.forms import PatientForm, PatientDetailForm
from medobs.reservations.models import Office, Patient, Reservation
def front_page(request):
try:
if request.user.is_authenticated():
office = Office.objects.filter(published=True)[0]
else:
office = Office.objects.filter(published=True, authenticated_only=False)[0]
except IndexError:
return render_to_response(
"missing_config.html",
{},
context_instance=RequestContext(request)
)
return HttpResponseRedirect("/office/%d/" % office.id)
class DateInPast(Exception):
pass
class BadStatus(Exception):
pass
def office_page(request, office_id, for_date=None):
office = get_object_or_404(Office, published=True, pk=office_id)
if not request.user.is_authenticated() and office.authenticated_only: # authentication required
return HttpResponseRedirect("/")
reschedule_reservation = request.GET.get('reschedule')
if reschedule_reservation:
try:
reschedule_reservation = Reservation.objects.get(pk=reschedule_reservation)
except Reservation.DoesNotExist:
raise Http404
form = None
message = None
start_date = date.today()
end_date = start_date + timedelta(office.days_to_generate)
dates = list(Reservation.objects.filter(date__gte=date.today()).dates("date", "day"))
if dates:
if not request.user.is_authenticated():
start_date = dates[0]
end_date = dates[-1]
if for_date:
actual_date = datetime.strptime(for_date, "%Y-%m-%d").date()
if actual_date < start_date:
actual_date = start_date
else:
actual_date = start_date
reservation_id = 0
if request.method == 'POST':
action = request.POST.get("action")
if action == "reschedule":
old_reservation = get_object_or_404(Reservation, pk=request.POST.get("old_reservation"))
new_reservation = get_object_or_404(Reservation, pk=request.POST.get("reservation"))
if new_reservation.patient or new_reservation.get_actual_status() == Reservation.STATUS_DISABLED:
messages.error(
request,
render_to_string(
"messages/reschedule_failed.html", {
"old_reservation": old_reservation,
"new_reservation": new_reservation,
}
)
)
return HttpResponseRedirect("/status/%d/" % new_reservation.pk)
actual_date = new_reservation.date
new_reservation.patient = old_reservation.patient
new_reservation.exam_kind = old_reservation.exam_kind
old_reservation.cancel()
new_reservation.save()
old_reservation.save()
send_reschedule_notificaion(old_reservation, new_reservation)
messages.success(
request,
render_to_string(
"messages/rescheduled.html", {
"old_reservation": old_reservation,
"new_reservation": new_reservation,
}
)
)
return HttpResponseRedirect("/status/%d/" % new_reservation.pk)
else:
form = PatientForm(request.POST)
form.fields["exam_kind"].queryset = office.exam_kinds.all()
if form.is_valid():
try:
reservation = form.cleaned_data["reservation"]
actual_date = reservation.date
reservation_id = reservation.id
if request.user.is_authenticated():
if reservation.status not in (Reservation.STATUS_ENABLED, Reservation.STATUS_IN_HELD):
raise BadStatus()
else:
if reservation.status != Reservation.STATUS_ENABLED:
raise BadStatus()
datetime_limit = datetime.combine(date.today() + timedelta(1), time(0, 0))
if reservation.starting_time < datetime_limit:
raise DateInPast()
hexdigest = Patient.get_ident_hash(form.cleaned_data["ident_hash"])
patient, patient_created = Patient.objects.get_or_create(
ident_hash=hexdigest,
defaults={
"first_name": form.cleaned_data["first_name"],
"last_name": form.cleaned_data["last_name"],
"ident_hash": form.cleaned_data["ident_hash"],
"phone_number": form.cleaned_data["phone_number"],
"email": form.cleaned_data["email"],
}
)
if not patient_created and patient.has_reservation():
messages.error(
request,
render_to_string(
"messages/creation_failed.html", {
"reservations": patient.actual_reservations(),
"user": request.user,
}
)
)
return HttpResponseRedirect("/status/%d/" % reservation.pk)
if not patient_created:
patient.first_name = form.cleaned_data["first_name"]
patient.last_name = form.cleaned_data["last_name"]
patient.phone_number = form.cleaned_data["phone_number"]
patient.email = form.cleaned_data["email"]
patient.save()
reservation.patient = patient
reservation.exam_kind = form.cleaned_data["exam_kind"]
reservation.status = Reservation.STATUS_ENABLED # clean 'in held' state
reservation.reservation_time = datetime.now()
reservation.reserved_by = request.user.username
reservation.save()
send_reservation_notification(reservation)
messages.success(
request,
render_to_string(
"messages/created.html", {
"reservation": reservation,
}
)
)
return HttpResponseRedirect("/status/%d/" % reservation.pk)
except DateInPast:
message = _("Can't make reservation for current day or day in the past.")
except BadStatus:
message = _("Can't make reservation. Please try again.")
reservation_id = 0
else:
r_val = form["reservation"].value()
if r_val:
reservation_id = int(r_val)
actual_date = Reservation.objects.get(pk=reservation_id).date
if form is None:
form = PatientForm()
form.fields["exam_kind"].queryset = office.exam_kinds.all()
office_data = {
"id": office.id,
"name": office.name,
"reservations": json.dumps(
get_reservations_data(
office.reservations(actual_date),
all_attrs=request.user.is_authenticated()
)
),
"days_status": json.dumps(office.days_status(start_date, end_date))
}
data = {
"offices": get_offices(request.user),
"office": office_data,
"form": form,
"message": message,
"start_date": start_date,
"actual_date": actual_date,
"end_date": end_date,
"reservation_id": reservation_id,
"reschedule_mode": reschedule_reservation is not None
}
if reschedule_reservation:
data.update({
"reschedule_mode": True,
"reservation": reschedule_reservation
})
return render_to_response(
"index.html",
data,
context_instance=RequestContext(request)
)
def date_reservations(request, for_date, office_id):
office = get_object_or_404(Office, pk=office_id)
for_date = datetime.strptime(for_date, "%Y-%m-%d").date()
data = get_reservations_data(
office.reservations(for_date),
all_attrs=request.user.is_authenticated()
)
response = HttpResponse(json.dumps(data), "application/json")
response["Cache-Control"] = "no-cache"
return response
@login_required
def patient_details(request):
response_data = {
"first_name": "",
"last_name": "",
"phone_number": "",
"email": "",
}
if request.method == 'POST':
form = PatientDetailForm(request.POST)
if form.is_valid():
hexdigest = Patient.get_ident_hash(form.cleaned_data["ident_hash"])
try:
patient = Patient.objects.get(ident_hash=hexdigest)
response_data = {
"pk": patient.pk,
"first_name": patient.first_name,
"last_name": patient.last_name,
"phone_number": patient.phone_number,
"email": patient.email,
}
except Patient.DoesNotExist:
pass
return HttpResponse(json.dumps(response_data), "application/json")
@login_required
def hold_reservation(request, r_id):
reservation = get_object_or_404(Reservation, pk=r_id)
if reservation.status == Reservation.STATUS_ENABLED:
reservation.status = Reservation.STATUS_IN_HELD
reservation.reservation_time = datetime.now()
reservation.reserved_by = request.user.username
reservation.save()
response_data = {"status_ok": True}
else:
response_data = {"status_ok": False}
response = HttpResponse(json.dumps(response_data), "application/json")
response["Cache-Control"] = "no-cache"
return response
@login_required
def unhold_reservation(request, r_id):
reservation = get_object_or_404(Reservation, pk=r_id)
if reservation.status == Reservation.STATUS_IN_HELD:
reservation.status = Reservation.STATUS_ENABLED
reservation.reservation_time = None
reservation.reserved_by = ""
reservation.save()
response_data = {"status_ok": True}
else:
response_data = {"status_ok": False}
response = HttpResponse(json.dumps(response_data), "application/json")
response["Cache-Control"] = "no-cache"
return response
@login_required
def cancel_reservation(request):
reservation = get_object_or_404(Reservation, pk=request.POST.get('reservation_id'))
tmp_reservation = Reservation(
office=reservation.office,
patient=reservation.patient,
date=reservation.date,
time=reservation.time,
exam_kind=reservation.exam_kind
)
if reservation.patient is not None:
reservation.cancel()
reservation.save()
send_cancel_notificaion(tmp_reservation)
messages.success(
request,
render_to_string(
"messages/canceled.html", {
"reservation": tmp_reservation,
}
)
)
else:
messages.error(
request,
render_to_string(
"messages/cancel_failed.html", {
"reservation": tmp_reservation
}
)
)
return HttpResponseRedirect("/status/%d/" % reservation.pk)
@login_required
def disable_reservation(request, r_id):
reservation = get_object_or_404(Reservation, pk=r_id)
if reservation.status in (Reservation.STATUS_ENABLED, Reservation.STATUS_IN_HELD) and request.user.is_staff:
reservation.status = Reservation.STATUS_DISABLED
reservation.reservation_time = datetime.now()
reservation.reserved_by = request.user.username
reservation.save()
response_data = {"status_ok": True}
else:
response_data = {"status_ok": False}
response = HttpResponse(json.dumps(response_data), "application/json")
response["Cache-Control"] = "no-cache"
return response
@login_required
def enable_reservation(request, r_id):
reservation = get_object_or_404(Reservation, pk=r_id)
if reservation.status == Reservation.STATUS_DISABLED and request.user.is_staff:
reservation.status = Reservation.STATUS_ENABLED
reservation.reservation_time = None
reservation.reserved_by = ""
reservation.save()
response_data = {"status_ok": True}
else:
response_data = {"status_ok": False}
response = HttpResponse(json.dumps(response_data), "application/json")
response["Cache-Control"] = "no-cache"
return response
@login_required
def list_reservations(request, for_date, office_id):
for_date = datetime.strptime(for_date, "%Y-%m-%d").date()
office = get_object_or_404(Office, pk=office_id)
return render_to_response(
"list/office.html",
{
"for_date": for_date,
"office": office,
"reservations": get_reservations_data(office.reservations(for_date)),
},
context_instance=RequestContext(request)
)
@login_required
def reservation_details(request, r_id):
reservation = get_object_or_404(Reservation, pk=r_id)
response_data = {
"first_name": reservation.patient.first_name,
"last_name": reservation.patient.last_name,
"phone_number": reservation.patient.phone_number,
"email": reservation.patient.email,
"exam_kind": reservation.exam_kind_id,
}
response = HttpResponse(json.dumps(response_data), "application/json")
response["Cache-Control"] = "no-cache"
return response
@login_required
def patient_reservations(request):
response_data = {"patient": None}
if request.method == 'POST':
ident_hash = request.POST.get("ident_hash", "")
if len(ident_hash) < 12:
ident_hash = Patient.get_ident_hash(ident_hash)
try:
response_data["patient"] = Patient.objects.get(ident_hash=ident_hash)
except Patient.DoesNotExist:
raise Http404
return render_to_response(
"list/patient.html",
response_data,
context_instance=RequestContext(request)
)
raise Http404
def days_status(request, year, month, office_id):
office = get_object_or_404(Office, pk=office_id)
year = int(year)
month = int(month)
start_date = date(year, month, 1)
if month == 12:
end_date = date(year+1, 1, 31)
else:
end_date = date(year, month + 1, 1) - timedelta(1)
response_data = office.days_status(start_date, end_date)
response = HttpResponse(json.dumps(response_data), "application/json")
response["Cache-Control"] = "no-cache"
return response
@csrf_exempt
def login(request):
try:
if request.POST:
username = request.POST["username"]
password = request.POST["password"]
if username and password:
user = authenticate(username=username, password=password)
if user and user.is_authenticated():
django_login(request, user)
return HttpResponse(status=200)
except:
pass
return HttpResponse(status=401)
@login_required
def logout(request):
django_logout(request)
return HttpResponse(status=200)
@login_required
def list_offices(request):
response_data = [{
"id": office.pk,
"name": office.name,
"street": office.street,
"zip_code": office.zip_code,
"city": office.city,
"email": office.email,
"order": office.order,
"authenticated_only": office.authenticated_only,
"phones": [phone.number for phone in office.phone_numbers.all()],
} for office in Office.objects.filter(published=True)]
return HttpResponse(json.dumps(response_data), "application/json")
@login_required
def enable_auth_only(request, r_id):
reservation = get_object_or_404(Reservation, pk=r_id)
reservation.authenticated_only = True
reservation.save()
response_data = {"status_ok": True}
response = HttpResponse(json.dumps(response_data), "application/json")
response["Cache-Control"] = "no-cache"
return response
@login_required
def disable_auth_only(request, r_id):
reservation = get_object_or_404(Reservation, pk=r_id)
reservation.authenticated_only = False
reservation.save()
response_data = {"status_ok": True}
response = HttpResponse(json.dumps(response_data), "application/json")
response["Cache-Control"] = "no-cache"
return response
# vim: set ts=4 sts=4 sw=4 noet:
| gpl-3.0 | -6,299,085,958,533,279,000 | 29.530864 | 109 | 0.707845 | false | 3.236205 | false | false | false |
informatics-isi-edu/hatrac | hatrac/model/storage/filesystem.py | 1 | 8769 |
#
# Copyright 2015-2019 University of Southern California
# Distributed under the Apache License, Version 2.0. See LICENSE for more info.
#
"""Filesystem-backed object bulk storage for Hatrac.
This module handles only low-level byte storage. Object and
object-version lifecycle and authorization is handled by the caller.
"""
import os
import hashlib
import base64
import binascii
import random
import struct
import io
from ...core import BadRequest, Conflict, coalesce
def make_file(dirname, relname, accessmode):
"""Create and open file with accessmode, including missing parents.
Returns fp.
"""
# TODO: test for conflicts during creation?
filename = "%s/%s" % (dirname, relname)
if not os.path.exists(dirname):
os.makedirs(dirname, mode=0o755)
return open(filename, accessmode, 0)
class HatracStorage (object):
"""Implement HatracStorage API using basic POSIX filesystem mapping.
A configured storage rootdir, object name, and object version
are combined to form one filename to store the immutable
object:
/ rootdir / object_name : object_version
consistent with Hatrac rules. The incoming name may include
RFC3986 percent-encoded URL characters, which we assume our
filesystem can tolerate.
"""
track_chunks = False
_bufsize = 1024**2
def __init__(self, config):
self.root = config.get('storage_path', '/var/www/hatrac')
def _dirname_relname(self, name, version):
"""Map Hatrac identifiers to backend storage."""
# TODO: consider hashing if too many namespaces exist at top level
assert name
assert version
assert ':' not in version
dirname = self.root
nameparts = [ n for n in name.split('/') if n ]
dirparts = nameparts[0:-1]
relpart = nameparts[-1]
relname = "%s:%s" % (relpart, version)
assert relpart
if dirparts:
dirname = "%s/%s" % (self.root, "/".join(dirparts))
else:
dirname = self.root
return (dirname, relname)
def create_from_file(self, name, input, nbytes, metadata={}):
"""Create an entire file-version object from input content, returning version ID."""
version = base64.b32encode(
(struct.pack('Q', random.getrandbits(64))
+ struct.pack('Q', random.getrandbits(64)))[0:26]
).decode().replace('=', '') # strip off '=' padding
dirname, relname = self._dirname_relname(name, version)
f = make_file(dirname, relname, 'wb')
# upload whole content at offset 0 (for code reuse)
self.upload_chunk_from_file(None, None, 0, 0, input, nbytes, metadata, f)
return version
def create_upload(self, name, nbytes=None, metadata={}):
upload_id = self.create_from_file(name, io.BytesIO(b''), 0)
return upload_id
def cancel_upload(self, name, upload_id):
# this backend uses upload_id as version_id
self.delete(name, upload_id)
return None
def finalize_upload(self, name, upload_id, chunk_data, metadata={}):
# nothing changes in storage for this backend strategy
version_id = upload_id
assert chunk_data is None
# aggressively validate uploaded content against pre-defined MD5 if it was given at job start
if 'content-md5' in metadata:
dirname, relname = self._dirname_relname(name, version_id)
fullname = "%s/%s" % (dirname, relname)
f = open(fullname, "rb")
hasher = hashlib.md5()
eof = False
while not eof:
buf = f.read(self._bufsize)
if len(buf) != 0:
hasher.update(buf)
else:
eof = True
stored_md5 = hasher.digest()
if metadata['content-md5'] != stored_md5:
raise Conflict(
'Current uploaded content MD5 %s does not match expected %s.'
% (binascii.hexlify(stored_md5), binascii.hexlify(metadata['content-md5']))
)
return version_id
def upload_chunk_from_file(self, name, version, position, chunksize, input, nbytes, metadata={}, f=None):
"""Save chunk data into storage.
If self.track_chunks, return value must be None or a value
that can be serialized using webauthn2.util.jsonWriteRaw,
i.e. dict, array, or scalar values.
"""
if f is None:
dirname, relname = self._dirname_relname(name, version)
fullname = "%s/%s" % (dirname, relname)
f = open(fullname, "r+b")
f.seek(position*chunksize)
if 'content-md5' in metadata:
hasher = hashlib.md5()
else:
hasher = None
rbytes = 0
eof = False
while not eof:
if nbytes is not None:
bufsize = min(nbytes-rbytes, self._bufsize)
else:
bufsize = self._bufsize
buf = input.read(bufsize)
f.write(buf)
bufsize = len(buf)
rbytes += bufsize
if hasher:
hasher.update(buf)
if nbytes is not None:
if rbytes >= nbytes:
eof = True
elif bufsize == 0:
f.close()
raise BadRequest('Only received %s of %s expected bytes.' % (rbytes, nbytes))
elif bufsize == 0:
eof = True
if hasher:
received_md5 = hasher.digest()
if metadata['content-md5'] != received_md5:
raise BadRequest(
'Received content MD5 %r does not match expected %r.'
% (received_md5, metadata['content-md5'])
#% (binascii.hexlify(received_md5), binascii.hexlify(metadata['content-md5'].encode()))
)
return "test"
def get_content(self, name, version, metadata={}):
return self.get_content_range(name, version, metadata)
def get_content_range(self, name, version, metadata={}, get_slice=None):
"""Return (nbytes, metadata, data_iterator) tuple for existing file-version object."""
dirname, relname = self._dirname_relname(name, version)
fullname = "%s/%s" % (dirname, relname)
nbytes = os.path.getsize(fullname)
if get_slice is not None:
pos = coalesce(get_slice.start, 0)
limit = coalesce(get_slice.stop, nbytes)
else:
pos = 0
limit = nbytes
if pos != 0 or limit != nbytes:
# most object metadata does not apply to partial read content
metadata = {
k: v
for k, v in metadata.items()
if k in {'content-type'}
}
length = limit - pos
def helper():
if 'content-md5' in metadata:
hasher = hashlib.md5()
else:
hasher = None
rpos = pos
eof = False
with open(fullname, 'rb') as f:
f.seek(rpos)
while not eof:
buf = f.read(min(limit-rpos, self._bufsize))
buflen = len(buf)
rpos += buflen
if hasher:
hasher.update(buf)
if rpos >= (limit-1):
eof = True
elif buflen == 0:
raise IOError('Read truncated at %s when %s expected.' % (rpos, limit))
if eof and hasher:
retrieved_md5 = hasher.digest()
if metadata['content-md5'] != retrieved_md5:
raise IOError(
'Retrieved content MD5 %s does not match expected %s.'
% (binascii.hexlify(retrieved_md5), binascii.hexlify(metadata['content-md5']))
)
yield buf
return (length, metadata, helper())
def delete(self, name, version):
"""Delete object version."""
dirname, relname = self._dirname_relname(name, version)
fullname = "%s/%s" % (dirname, relname)
os.remove(fullname)
def delete_namespace(self, name):
"""Tidy up after an empty namespace that has been deleted."""
dirname, relname = self._dirname_relname(name, 'dummy')
try:
os.removedirs(dirname)
except OSError:
pass
| apache-2.0 | -6,930,290,742,175,459,000 | 32.342205 | 110 | 0.542707 | false | 4.360517 | false | false | false |
Shaffi08/yowsup | yowsup/layers/protocol_contacts/protocolentities/notificiation_contacts_sync.py | 59 | 1237 | from yowsup.structs import ProtocolTreeNode
from .notification_contact import ContactNotificationProtocolEntity
class ContactsSyncNotificationProtocolEntity(ContactNotificationProtocolEntity):
'''
<notification from="[email protected]" t="1437251557" offline="0" type="contacts" id="4174521704">
<sync after="1437251557"></sync>
</notification>
'''
def __init__(self, _id, _from, timestamp, notify, offline, after):
super(ContactsSyncNotificationProtocolEntity, self).__init__(_id, _from, timestamp, notify, offline)
self.setData(after)
def setData(self, after):
self.after = int(after)
def toProtocolTreeNode(self):
node = super(ContactsSyncNotificationProtocolEntity, self).toProtocolTreeNode()
syncNode = ProtocolTreeNode("sync", {"after": str(self.after)}, None, None)
node.addChild(syncNode)
return node
@staticmethod
def fromProtocolTreeNode(node):
entity = ContactNotificationProtocolEntity.fromProtocolTreeNode(node)
entity.__class__ = ContactsSyncNotificationProtocolEntity
syncNode = node.getChild("sync")
entity.setData(syncNode.getAttributeValue("after"))
return entity | gpl-3.0 | -5,041,676,321,133,361,000 | 41.689655 | 113 | 0.709782 | false | 4.221843 | false | false | false |
vtemian/kruncher | crunch/jobs/add.py | 1 | 2347 | import sys
import rethinkdb as r
from disco.core import Job
import csv
class GroupSum(Job):
def __init__(self, group_by, fields, *args, **kwargs):
self.group_by = int(group_by)
self.fields = map(int, fields)
super(GroupSum, self).__init__(*args, **kwargs)
@staticmethod
def map_reader(fd, size, url, params):
reader = csv.reader(fd, delimiter=',')
for row in reader:
if len(row) <= 1:
continue
yield row
def map(self, line, params):
words = line
total = 0
result = []
for word in range(len(words)):
if word == self.group_by:
continue
try:
if word in self.fields:
total += int(words[word])
else:
result.append(int(words[word]))
except:
pass
result.insert(0, total)
yield words[self.group_by], result
def reduce(self, rows_iter, out, params):
from disco.util import kvgroup
final = {}
for key, result in kvgroup(rows_iter):
if key not in final:
final[key] = []
for line in result:
for value in range(len(line)):
if len(final[key]) <= value:
final[key].append(line[value])
else:
final[key][value] += line[value]
out.add(final, "a")
if __name__ == '__main__':
from add import GroupSum
db = r.connect(**{
'host': 'batman.krunchr.net',
'port': 28019,
'auth_key': '',
'db': 'krunchr'
})
dataset = r.db("krunchr").table('datasets').get(sys.argv[1]).run(db)
fields = [str(dataset['fields'].index(field)) for field in sys.argv[2:]]
group_by = dataset['fields'].index(sys.argv[2])
job = GroupSum(group_by, fields)
job.run(input=['data:%s' % sys.argv[1]])
from disco.core import result_iterator
table_name = sys.argv[1].replace('-', '_')
try:
r.db("krunchr").table_create(table_name).run(db)
except:
pass
lines = []
fields = dataset['fields']
fields.remove(sys.argv[2])
for line in result_iterator(job.wait(show=True)):
for key in line[0]:
insert = {sys.argv[2]: key}
if len(line[0][key]) < len(fields):
continue
insert.update({field: line[0][key][fields.index(field)-1] for field in fields})
r.table(table_name).insert(insert).run(db)
r.table('datasets').filter({'id': sys.argv[1]}).update({'ready': True}).run(db)
| apache-2.0 | 5,782,297,061,048,121,000 | 25.370787 | 85 | 0.585428 | false | 3.282517 | false | false | false |
openstack/octavia | octavia/tests/unit/common/sample_configs/sample_configs_split.py | 1 | 42888 | # Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import collections
from oslo_config import cfg
from octavia.common import constants
from octavia.tests.common import sample_certs
CONF = cfg.CONF
def sample_amphora_tuple(id='sample_amphora_id_1', lb_network_ip='10.0.1.1',
vrrp_ip='10.1.1.1', ha_ip='192.168.10.1',
vrrp_port_id='1234', ha_port_id='1234', role=None,
status='ACTIVE', vrrp_interface=None,
vrrp_priority=None, api_version='0.5'):
in_amphora = collections.namedtuple(
'amphora', 'id, lb_network_ip, vrrp_ip, ha_ip, vrrp_port_id, '
'ha_port_id, role, status, vrrp_interface,'
'vrrp_priority, api_version')
return in_amphora(
id=id,
lb_network_ip=lb_network_ip,
vrrp_ip=vrrp_ip,
ha_ip=ha_ip,
vrrp_port_id=vrrp_port_id,
ha_port_id=ha_port_id,
role=role,
status=status,
vrrp_interface=vrrp_interface,
vrrp_priority=vrrp_priority,
api_version=api_version)
RET_PERSISTENCE = {
'type': 'HTTP_COOKIE',
'cookie_name': None}
RET_MONITOR_1 = {
'id': 'sample_monitor_id_1',
'type': 'HTTP',
'delay': 30,
'timeout': 31,
'fall_threshold': 3,
'rise_threshold': 2,
'http_method': 'GET',
'url_path': '/index.html',
'expected_codes': '418',
'enabled': True,
'http_version': 1.0,
'domain_name': None}
RET_MONITOR_2 = {
'id': 'sample_monitor_id_2',
'type': 'HTTP',
'delay': 30,
'timeout': 31,
'fall_threshold': 3,
'rise_threshold': 2,
'http_method': 'GET',
'url_path': '/healthmon.html',
'expected_codes': '418',
'enabled': True,
'http_version': 1.0,
'domain_name': None}
RET_MEMBER_1 = {
'id': 'sample_member_id_1',
'address': '10.0.0.99',
'protocol_port': 82,
'weight': 13,
'subnet_id': '10.0.0.1/24',
'enabled': True,
'operating_status': 'ACTIVE',
'monitor_address': None,
'monitor_port': None,
'backup': False}
RET_MEMBER_2 = {
'id': 'sample_member_id_2',
'address': '10.0.0.98',
'protocol_port': 82,
'weight': 13,
'subnet_id': '10.0.0.1/24',
'enabled': True,
'operating_status': 'ACTIVE',
'monitor_address': None,
'monitor_port': None,
'backup': False}
RET_MEMBER_3 = {
'id': 'sample_member_id_3',
'address': '10.0.0.97',
'protocol_port': 82,
'weight': 13,
'subnet_id': '10.0.0.1/24',
'enabled': True,
'operating_status': 'ACTIVE',
'monitor_address': None,
'monitor_port': None,
'backup': False}
RET_POOL_1 = {
'id': 'sample_pool_id_1',
'protocol': 'http',
'lb_algorithm': 'roundrobin',
'members': [RET_MEMBER_1, RET_MEMBER_2],
'health_monitor': RET_MONITOR_1,
'session_persistence': RET_PERSISTENCE,
'enabled': True,
'operating_status': 'ACTIVE',
'stick_size': '10k',
constants.HTTP_REUSE: False,
'ca_tls_path': '',
'crl_path': '',
'tls_enabled': False}
RET_POOL_2 = {
'id': 'sample_pool_id_2',
'protocol': 'http',
'lb_algorithm': 'roundrobin',
'members': [RET_MEMBER_3],
'health_monitor': RET_MONITOR_2,
'session_persistence': RET_PERSISTENCE,
'enabled': True,
'operating_status': 'ACTIVE',
'stick_size': '10k',
constants.HTTP_REUSE: False,
'ca_tls_path': '',
'crl_path': '',
'tls_enabled': False}
RET_DEF_TLS_CONT = {'id': 'cont_id_1', 'allencompassingpem': 'imapem',
'primary_cn': 'FakeCn'}
RET_SNI_CONT_1 = {'id': 'cont_id_2', 'allencompassingpem': 'imapem2',
'primary_cn': 'FakeCn'}
RET_SNI_CONT_2 = {'id': 'cont_id_3', 'allencompassingpem': 'imapem3',
'primary_cn': 'FakeCn2'}
RET_L7RULE_1 = {
'id': 'sample_l7rule_id_1',
'type': constants.L7RULE_TYPE_PATH,
'compare_type': constants.L7RULE_COMPARE_TYPE_STARTS_WITH,
'key': None,
'value': '/api',
'invert': False,
'enabled': True}
RET_L7RULE_2 = {
'id': 'sample_l7rule_id_2',
'type': constants.L7RULE_TYPE_HEADER,
'compare_type': constants.L7RULE_COMPARE_TYPE_CONTAINS,
'key': 'Some-header',
'value': 'This\\ string\\\\\\ with\\ stuff',
'invert': True,
'enabled': True}
RET_L7RULE_3 = {
'id': 'sample_l7rule_id_3',
'type': constants.L7RULE_TYPE_COOKIE,
'compare_type': constants.L7RULE_COMPARE_TYPE_REGEX,
'key': 'some-cookie',
'value': 'this.*|that',
'invert': False,
'enabled': True}
RET_L7RULE_4 = {
'id': 'sample_l7rule_id_4',
'type': constants.L7RULE_TYPE_FILE_TYPE,
'compare_type': constants.L7RULE_COMPARE_TYPE_EQUAL_TO,
'key': None,
'value': 'jpg',
'invert': False,
'enabled': True}
RET_L7RULE_5 = {
'id': 'sample_l7rule_id_5',
'type': constants.L7RULE_TYPE_HOST_NAME,
'compare_type': constants.L7RULE_COMPARE_TYPE_ENDS_WITH,
'key': None,
'value': '.example.com',
'invert': False,
'enabled': True}
RET_L7RULE_6 = {
'id': 'sample_l7rule_id_6',
'type': constants.L7RULE_TYPE_HOST_NAME,
'compare_type': constants.L7RULE_COMPARE_TYPE_ENDS_WITH,
'key': None,
'value': '.example.com',
'invert': False,
'enabled': False}
RET_L7POLICY_1 = {
'id': 'sample_l7policy_id_1',
'action': constants.L7POLICY_ACTION_REDIRECT_TO_POOL,
'redirect_pool': RET_POOL_2,
'redirect_url': None,
'redirect_prefix': None,
'enabled': True,
'l7rules': [RET_L7RULE_1],
'redirect_http_code': None}
RET_L7POLICY_2 = {
'id': 'sample_l7policy_id_2',
'action': constants.L7POLICY_ACTION_REDIRECT_TO_URL,
'redirect_pool': None,
'redirect_url': 'http://www.example.com',
'redirect_prefix': None,
'enabled': True,
'l7rules': [RET_L7RULE_2, RET_L7RULE_3],
'redirect_http_code': 302}
RET_L7POLICY_3 = {
'id': 'sample_l7policy_id_3',
'action': constants.L7POLICY_ACTION_REJECT,
'redirect_pool': None,
'redirect_url': None,
'redirect_prefix': None,
'enabled': True,
'l7rules': [RET_L7RULE_4, RET_L7RULE_5],
'redirect_http_code': None}
RET_L7POLICY_4 = {
'id': 'sample_l7policy_id_4',
'action': constants.L7POLICY_ACTION_REJECT,
'redirect_pool': None,
'redirect_url': None,
'redirect_prefix': None,
'enabled': True,
'l7rules': [],
'redirect_http_code': None}
RET_L7POLICY_5 = {
'id': 'sample_l7policy_id_5',
'action': constants.L7POLICY_ACTION_REJECT,
'redirect_pool': None,
'redirect_url': None,
'redirect_prefix': None,
'enabled': False,
'l7rules': [RET_L7RULE_5],
'redirect_http_code': None}
RET_L7POLICY_6 = {
'id': 'sample_l7policy_id_6',
'action': constants.L7POLICY_ACTION_REJECT,
'redirect_pool': None,
'redirect_url': None,
'redirect_prefix': None,
'enabled': True,
'l7rules': [],
'redirect_http_code': None}
RET_L7POLICY_7 = {
'id': 'sample_l7policy_id_7',
'action': constants.L7POLICY_ACTION_REDIRECT_PREFIX,
'redirect_pool': None,
'redirect_url': None,
'redirect_prefix': 'https://example.com',
'enabled': True,
'l7rules': [RET_L7RULE_2, RET_L7RULE_3],
'redirect_http_code': 302}
RET_L7POLICY_8 = {
'id': 'sample_l7policy_id_8',
'action': constants.L7POLICY_ACTION_REDIRECT_TO_URL,
'redirect_pool': None,
'redirect_url': 'http://www.example.com',
'redirect_prefix': None,
'enabled': True,
'l7rules': [RET_L7RULE_2, RET_L7RULE_3],
'redirect_http_code': None}
RET_LISTENER = {
'id': 'sample_listener_id_1',
'protocol_port': '80',
'protocol': 'HTTP',
'protocol_mode': 'http',
'default_pool': RET_POOL_1,
'connection_limit': constants.HAPROXY_MAX_MAXCONN,
'amphorae': [sample_amphora_tuple()],
'peer_port': 1024,
'topology': 'SINGLE',
'user_log_format': '12345\\ sample_loadbalancer_id_1\\ %f\\ %ci\\ %cp\\ '
'%t\\ %{+Q}r\\ %ST\\ %B\\ %U\\ %[ssl_c_verify]\\ '
'%{+Q}[ssl_c_s_dn]\\ %b\\ %s\\ %Tt\\ %tsc',
'pools': [RET_POOL_1],
'l7policies': [],
'enabled': True,
'insert_headers': {},
'timeout_client_data': 50000,
'timeout_member_connect': 5000,
'timeout_member_data': 50000,
'timeout_tcp_inspect': 0,
}
RET_LISTENER_L7 = {
'id': 'sample_listener_id_1',
'protocol_port': '80',
'protocol': 'HTTP',
'protocol_mode': 'http',
'default_pool': RET_POOL_1,
'connection_limit': constants.HAPROXY_MAX_MAXCONN,
'amphorae': [sample_amphora_tuple()],
'peer_port': 1024,
'topology': 'SINGLE',
'user_log_format': '12345\\ sample_loadbalancer_id_1\\ %f\\ %ci\\ %cp\\ '
'%t\\ %{+Q}r\\ %ST\\ %B\\ %U\\ %[ssl_c_verify]\\ '
'%{+Q}[ssl_c_s_dn]\\ %b\\ %s\\ %Tt\\ %tsc',
'pools': [RET_POOL_1, RET_POOL_2],
'l7policies': [RET_L7POLICY_1, RET_L7POLICY_2, RET_L7POLICY_3,
RET_L7POLICY_4, RET_L7POLICY_5, RET_L7POLICY_6,
RET_L7POLICY_7],
'enabled': True,
'insert_headers': {},
'timeout_client_data': 50000,
'timeout_member_connect': 5000,
'timeout_member_data': 50000,
'timeout_tcp_inspect': 0,
}
RET_LISTENER_TLS = {
'id': 'sample_listener_id_1',
'protocol_port': '443',
'protocol': 'TERMINATED_HTTPS',
'protocol_mode': 'http',
'default_pool': RET_POOL_1,
'connection_limit': constants.HAPROXY_MAX_MAXCONN,
'tls_certificate_id': 'cont_id_1',
'default_tls_path': '/etc/ssl/sample_loadbalancer_id_1/fakeCN.pem',
'default_tls_container': RET_DEF_TLS_CONT,
'pools': [RET_POOL_1],
'l7policies': [],
'enabled': True,
'insert_headers': {}}
RET_LISTENER_TLS_SNI = {
'id': 'sample_listener_id_1',
'protocol_port': '443',
'protocol': 'TERMINATED_HTTPS',
'default_pool': RET_POOL_1,
'connection_limit': constants.HAPROXY_MAX_MAXCONN,
'tls_certificate_id': 'cont_id_1',
'default_tls_path': '/etc/ssl/sample_loadbalancer_id_1/fakeCN.pem',
'default_tls_container': RET_DEF_TLS_CONT,
'crt_dir': '/v2/sample_loadbalancer_id_1',
'sni_container_ids': ['cont_id_2', 'cont_id_3'],
'sni_containers': [RET_SNI_CONT_1, RET_SNI_CONT_2],
'pools': [RET_POOL_1],
'l7policies': [],
'enabled': True,
'insert_headers': {}}
RET_AMPHORA = {
'id': 'sample_amphora_id_1',
'lb_network_ip': '10.0.1.1',
'vrrp_ip': '10.1.1.1',
'ha_ip': '192.168.10.1',
'vrrp_port_id': '1234',
'ha_port_id': '1234',
'role': None,
'status': 'ACTIVE',
'vrrp_interface': None,
'vrrp_priority': None}
RET_LB = {
'host_amphora': RET_AMPHORA,
'id': 'sample_loadbalancer_id_1',
'vip_address': '10.0.0.2',
'listener': RET_LISTENER,
'topology': 'SINGLE',
'enabled': True,
'global_connection_limit': constants.HAPROXY_MAX_MAXCONN}
RET_LB_L7 = {
'host_amphora': RET_AMPHORA,
'id': 'sample_loadbalancer_id_1',
'vip_address': '10.0.0.2',
'listener': RET_LISTENER_L7,
'topology': 'SINGLE',
'enabled': True,
'global_connection_limit': constants.HAPROXY_MAX_MAXCONN}
UDP_SOURCE_IP_BODY = {
'type': constants.SESSION_PERSISTENCE_SOURCE_IP,
'persistence_timeout': 33,
'persistence_granularity': '255.0.0.0'
}
RET_UDP_HEALTH_MONITOR = {
'id': 'sample_monitor_id_1',
'type': constants.HEALTH_MONITOR_UDP_CONNECT,
'delay': 30,
'timeout': 31,
'enabled': True,
'fall_threshold': 3,
'check_script_path': (CONF.haproxy_amphora.base_path +
'/lvs/check/udp_check.sh')
}
UDP_HEALTH_MONITOR_NO_SCRIPT = {
'id': 'sample_monitor_id_1',
'check_script_path': None,
'delay': 30,
'enabled': True,
'fall_threshold': 3,
'timeout': 31,
'type': 'UDP'
}
RET_UDP_MEMBER = {
'id': 'member_id_1',
'address': '192.0.2.10',
'protocol_port': 82,
'weight': 13,
'enabled': True,
'monitor_address': None,
'monitor_port': None
}
RET_UDP_MEMBER_MONITOR_IP_PORT = {
'id': 'member_id_1',
'address': '192.0.2.10',
'protocol_port': 82,
'weight': 13,
'enabled': True,
'monitor_address': '192.168.1.1',
'monitor_port': 9000
}
UDP_MEMBER_1 = {
'id': 'sample_member_id_1',
'address': '10.0.0.99',
'enabled': True,
'protocol_port': 82,
'weight': 13,
'monitor_address': None,
'monitor_port': None
}
UDP_MEMBER_2 = {
'id': 'sample_member_id_2',
'address': '10.0.0.98',
'enabled': True,
'protocol_port': 82,
'weight': 13,
'monitor_address': None,
'monitor_port': None
}
RET_UDP_POOL = {
'id': 'sample_pool_id_1',
'enabled': True,
'health_monitor': UDP_HEALTH_MONITOR_NO_SCRIPT,
'lb_algorithm': 'rr',
'members': [UDP_MEMBER_1, UDP_MEMBER_2],
'protocol': 'udp',
'session_persistence': UDP_SOURCE_IP_BODY
}
RET_UDP_LISTENER = {
'connection_limit': 98,
'default_pool': {
'id': 'sample_pool_id_1',
'enabled': True,
'health_monitor': RET_UDP_HEALTH_MONITOR,
'lb_algorithm': 'rr',
'members': [UDP_MEMBER_1, UDP_MEMBER_2],
'protocol': 'udp',
'session_persistence': UDP_SOURCE_IP_BODY
},
'enabled': True,
'id': 'sample_listener_id_1',
'protocol_mode': 'udp',
'protocol_port': '80'
}
def sample_loadbalancer_tuple(proto=None, monitor=True, persistence=True,
persistence_type=None, tls=False, sni=False,
topology=None, l7=False, enabled=True):
proto = 'HTTP' if proto is None else proto
topology = 'SINGLE' if topology is None else topology
in_lb = collections.namedtuple(
'load_balancer', 'id, name, protocol, vip, listeners, amphorae,'
' enabled')
return in_lb(
id='sample_loadbalancer_id_1',
name='test-lb',
protocol=proto,
vip=sample_vip_tuple(),
topology=topology,
listeners=[sample_listener_tuple(proto=proto, monitor=monitor,
persistence=persistence,
persistence_type=persistence_type,
tls=tls,
sni=sni,
l7=l7,
enabled=enabled)],
enabled=enabled
)
def sample_listener_loadbalancer_tuple(proto=None, topology=None,
enabled=True):
proto = 'HTTP' if proto is None else proto
if topology and topology in ['ACTIVE_STANDBY', 'ACTIVE_ACTIVE']:
more_amp = True
else:
more_amp = False
topology = constants.TOPOLOGY_SINGLE
in_lb = collections.namedtuple(
'load_balancer', 'id, name, protocol, vip, amphorae, topology, '
'listeners, enabled, project_id')
return in_lb(
id='sample_loadbalancer_id_1',
name='test-lb',
protocol=proto,
vip=sample_vip_tuple(),
amphorae=[sample_amphora_tuple(role=constants.ROLE_MASTER),
sample_amphora_tuple(
id='sample_amphora_id_2',
lb_network_ip='10.0.1.2',
vrrp_ip='10.1.1.2',
role=constants.ROLE_BACKUP)]
if more_amp else [sample_amphora_tuple()],
topology=topology,
listeners=[],
enabled=enabled,
project_id='12345'
)
def sample_lb_with_udp_listener_tuple(
proto=None, topology=None, enabled=True, pools=None):
proto = 'HTTP' if proto is None else proto
if topology and topology in ['ACTIVE_STANDBY', 'ACTIVE_ACTIVE']:
more_amp = True
else:
more_amp = False
topology = constants.TOPOLOGY_SINGLE
listeners = [sample_listener_tuple(
proto=constants.PROTOCOL_UDP,
persistence_type=constants.SESSION_PERSISTENCE_SOURCE_IP,
persistence_timeout=33,
persistence_granularity='255.255.0.0',
monitor_proto=constants.HEALTH_MONITOR_UDP_CONNECT)]
in_lb = collections.namedtuple(
'load_balancer', 'id, name, protocol, vip, amphorae, topology, '
'pools, enabled, project_id, listeners')
return in_lb(
id='sample_loadbalancer_id_1',
name='test-lb',
protocol=proto,
vip=sample_vip_tuple(),
amphorae=[sample_amphora_tuple(role=constants.ROLE_MASTER),
sample_amphora_tuple(
id='sample_amphora_id_2',
lb_network_ip='10.0.1.2',
vrrp_ip='10.1.1.2',
role=constants.ROLE_BACKUP)]
if more_amp else [sample_amphora_tuple()],
topology=topology,
listeners=listeners,
pools=pools or [],
enabled=enabled,
project_id='12345'
)
def sample_vrrp_group_tuple():
in_vrrp_group = collections.namedtuple(
'vrrp_group', 'load_balancer_id, vrrp_auth_type, vrrp_auth_pass, '
'advert_int, smtp_server, smtp_connect_timeout, '
'vrrp_group_name')
return in_vrrp_group(
vrrp_group_name='sample_loadbalancer_id_1',
load_balancer_id='sample_loadbalancer_id_1',
vrrp_auth_type='PASS',
vrrp_auth_pass='123',
advert_int='1',
smtp_server='',
smtp_connect_timeout='')
def sample_vip_tuple():
vip = collections.namedtuple('vip', 'ip_address')
return vip(ip_address='10.0.0.2')
def sample_listener_tuple(proto=None, monitor=True, alloc_default_pool=True,
persistence=True, persistence_type=None,
persistence_cookie=None, persistence_timeout=None,
persistence_granularity=None,
tls=False, sni=False, peer_port=None, topology=None,
l7=False, enabled=True, insert_headers=None,
be_proto=None, monitor_ip_port=False,
monitor_proto=None, monitor_expected_codes=None,
backup_member=False, disabled_member=False,
connection_limit=-1,
timeout_client_data=50000,
timeout_member_connect=5000,
timeout_member_data=50000,
timeout_tcp_inspect=0,
client_ca_cert=False, client_crl_cert=False,
ssl_type_l7=False, pool_cert=False,
pool_ca_cert=False, pool_crl=False,
tls_enabled=False, hm_host_http_check=False,
id='sample_listener_id_1', recursive_nest=False,
provisioning_status=constants.ACTIVE):
proto = 'HTTP' if proto is None else proto
if be_proto is None:
be_proto = 'HTTP' if proto == 'TERMINATED_HTTPS' else proto
topology = 'SINGLE' if topology is None else topology
port = '443' if proto in ['HTTPS', 'TERMINATED_HTTPS'] else '80'
peer_port = 1024 if peer_port is None else peer_port
insert_headers = insert_headers or {}
in_listener = collections.namedtuple(
'listener', 'id, project_id, protocol_port, protocol, default_pool, '
'connection_limit, tls_certificate_id, '
'sni_container_ids, default_tls_container, '
'sni_containers, load_balancer, peer_port, pools, '
'l7policies, enabled, insert_headers, timeout_client_data,'
'timeout_member_connect, timeout_member_data, '
'timeout_tcp_inspect, client_ca_tls_certificate_id, '
'client_ca_tls_certificate, client_authentication, '
'client_crl_container_id, provisioning_status')
if l7:
pools = [
sample_pool_tuple(
proto=be_proto, monitor=monitor, persistence=persistence,
persistence_type=persistence_type,
persistence_cookie=persistence_cookie,
monitor_ip_port=monitor_ip_port, monitor_proto=monitor_proto,
pool_cert=pool_cert, pool_ca_cert=pool_ca_cert,
pool_crl=pool_crl, tls_enabled=tls_enabled,
hm_host_http_check=hm_host_http_check),
sample_pool_tuple(
proto=be_proto, monitor=monitor, persistence=persistence,
persistence_type=persistence_type,
persistence_cookie=persistence_cookie, sample_pool=2,
monitor_ip_port=monitor_ip_port, monitor_proto=monitor_proto,
pool_cert=pool_cert, pool_ca_cert=pool_ca_cert,
pool_crl=pool_crl, tls_enabled=tls_enabled,
hm_host_http_check=hm_host_http_check)]
l7policies = [
sample_l7policy_tuple('sample_l7policy_id_1', sample_policy=1),
sample_l7policy_tuple('sample_l7policy_id_2', sample_policy=2),
sample_l7policy_tuple('sample_l7policy_id_3', sample_policy=3),
sample_l7policy_tuple('sample_l7policy_id_4', sample_policy=4),
sample_l7policy_tuple('sample_l7policy_id_5', sample_policy=5),
sample_l7policy_tuple('sample_l7policy_id_6', sample_policy=6),
sample_l7policy_tuple('sample_l7policy_id_7', sample_policy=7)]
if ssl_type_l7:
l7policies.append(sample_l7policy_tuple(
'sample_l7policy_id_8', sample_policy=8))
else:
pools = [
sample_pool_tuple(
proto=be_proto, monitor=monitor, persistence=persistence,
persistence_type=persistence_type,
persistence_cookie=persistence_cookie,
monitor_ip_port=monitor_ip_port, monitor_proto=monitor_proto,
backup_member=backup_member, disabled_member=disabled_member,
pool_cert=pool_cert, pool_ca_cert=pool_ca_cert,
pool_crl=pool_crl, tls_enabled=tls_enabled,
hm_host_http_check=hm_host_http_check)]
l7policies = []
listener = in_listener(
id=id,
project_id='12345',
protocol_port=port,
protocol=proto,
load_balancer=sample_listener_loadbalancer_tuple(proto=proto,
topology=topology),
peer_port=peer_port,
default_pool=sample_pool_tuple(
proto=be_proto, monitor=monitor, persistence=persistence,
persistence_type=persistence_type,
persistence_cookie=persistence_cookie,
persistence_timeout=persistence_timeout,
persistence_granularity=persistence_granularity,
monitor_ip_port=monitor_ip_port,
monitor_proto=monitor_proto,
monitor_expected_codes=monitor_expected_codes,
pool_cert=pool_cert,
pool_ca_cert=pool_ca_cert,
pool_crl=pool_crl,
tls_enabled=tls_enabled,
hm_host_http_check=hm_host_http_check
) if alloc_default_pool else '',
connection_limit=connection_limit,
tls_certificate_id='cont_id_1' if tls else '',
sni_container_ids=['cont_id_2', 'cont_id_3'] if sni else [],
default_tls_container=sample_tls_container_tuple(
id='cont_id_1', certificate=sample_certs.X509_CERT,
private_key=sample_certs.X509_CERT_KEY,
intermediates=sample_certs.X509_IMDS_LIST,
primary_cn=sample_certs.X509_CERT_CN
) if tls else '',
sni_containers=[
sample_tls_sni_container_tuple(
tls_container_id='cont_id_2',
tls_container=sample_tls_container_tuple(
id='cont_id_2', certificate=sample_certs.X509_CERT_2,
private_key=sample_certs.X509_CERT_KEY_2,
intermediates=sample_certs.X509_IMDS_LIST,
primary_cn=sample_certs.X509_CERT_CN_2)),
sample_tls_sni_container_tuple(
tls_container_id='cont_id_3',
tls_container=sample_tls_container_tuple(
id='cont_id_3', certificate=sample_certs.X509_CERT_3,
private_key=sample_certs.X509_CERT_KEY_3,
intermediates=sample_certs.X509_IMDS_LIST,
primary_cn=sample_certs.X509_CERT_CN_3))]
if sni else [],
pools=pools,
l7policies=l7policies,
enabled=enabled,
insert_headers=insert_headers,
timeout_client_data=timeout_client_data,
timeout_member_connect=timeout_member_connect,
timeout_member_data=timeout_member_data,
timeout_tcp_inspect=timeout_tcp_inspect,
client_ca_tls_certificate_id='cont_id_ca' if client_ca_cert else '',
client_ca_tls_certificate=sample_tls_container_tuple(
id='cont_id_ca', certificate=sample_certs.X509_CA_CERT,
primary_cn=sample_certs.X509_CA_CERT_CN
) if client_ca_cert else '',
client_authentication=(
constants.CLIENT_AUTH_MANDATORY if client_ca_cert else
constants.CLIENT_AUTH_NONE),
client_crl_container_id='cont_id_crl' if client_crl_cert else '',
provisioning_status=provisioning_status,
)
if recursive_nest:
listener.load_balancer.listeners.append(listener)
return listener
def sample_tls_sni_container_tuple(tls_container_id=None, tls_container=None):
sc = collections.namedtuple('sni_container', 'tls_container_id, '
'tls_container')
return sc(tls_container_id=tls_container_id, tls_container=tls_container)
def sample_tls_sni_containers_tuple(tls_container_id=None, tls_container=None):
sc = collections.namedtuple('sni_containers', 'tls_container_id, '
'tls_container')
return [sc(tls_container_id=tls_container_id, tls_container=tls_container)]
def sample_tls_container_tuple(id='cont_id_1', certificate=None,
private_key=None, intermediates=None,
primary_cn=None):
sc = collections.namedtuple(
'tls_container',
'id, certificate, private_key, intermediates, primary_cn')
return sc(id=id, certificate=certificate, private_key=private_key,
intermediates=intermediates or [], primary_cn=primary_cn)
def sample_pool_tuple(proto=None, monitor=True, persistence=True,
persistence_type=None, persistence_cookie=None,
persistence_timeout=None, persistence_granularity=None,
sample_pool=1, monitor_ip_port=False,
monitor_proto=None, monitor_expected_codes=None,
backup_member=False,
disabled_member=False, has_http_reuse=True,
pool_cert=False, pool_ca_cert=False, pool_crl=False,
tls_enabled=False, hm_host_http_check=False,
provisioning_status=constants.ACTIVE):
proto = 'HTTP' if proto is None else proto
monitor_proto = proto if monitor_proto is None else monitor_proto
in_pool = collections.namedtuple(
'pool', 'id, protocol, lb_algorithm, members, health_monitor, '
'session_persistence, enabled, operating_status, '
'tls_certificate_id, ca_tls_certificate_id, '
'crl_container_id, tls_enabled, provisioning_status, ' +
constants.HTTP_REUSE)
if (proto == constants.PROTOCOL_UDP and
persistence_type == constants.SESSION_PERSISTENCE_SOURCE_IP):
kwargs = {'persistence_type': persistence_type,
'persistence_timeout': persistence_timeout,
'persistence_granularity': persistence_granularity}
else:
kwargs = {'persistence_type': persistence_type,
'persistence_cookie': persistence_cookie}
persis = sample_session_persistence_tuple(**kwargs)
mon = None
if sample_pool == 1:
id = 'sample_pool_id_1'
members = [sample_member_tuple('sample_member_id_1', '10.0.0.99',
monitor_ip_port=monitor_ip_port),
sample_member_tuple('sample_member_id_2', '10.0.0.98',
monitor_ip_port=monitor_ip_port,
backup=backup_member,
enabled=not disabled_member)]
if monitor is True:
mon = sample_health_monitor_tuple(
proto=monitor_proto,
host_http_check=hm_host_http_check,
expected_codes=monitor_expected_codes)
elif sample_pool == 2:
id = 'sample_pool_id_2'
members = [sample_member_tuple('sample_member_id_3', '10.0.0.97',
monitor_ip_port=monitor_ip_port)]
if monitor is True:
mon = sample_health_monitor_tuple(
proto=monitor_proto, sample_hm=2,
host_http_check=hm_host_http_check,
expected_codes=monitor_expected_codes)
return in_pool(
id=id,
protocol=proto,
lb_algorithm='ROUND_ROBIN',
members=members,
health_monitor=mon,
session_persistence=persis if persistence is True else None,
enabled=True,
operating_status='ACTIVE', has_http_reuse=has_http_reuse,
tls_certificate_id='pool_cont_1' if pool_cert else None,
ca_tls_certificate_id='pool_ca_1' if pool_ca_cert else None,
crl_container_id='pool_crl' if pool_crl else None,
tls_enabled=tls_enabled, provisioning_status=provisioning_status)
def sample_member_tuple(id, ip, enabled=True,
operating_status=constants.ACTIVE,
provisioning_status=constants.ACTIVE,
monitor_ip_port=False, backup=False):
in_member = collections.namedtuple('member',
'id, ip_address, protocol_port, '
'weight, subnet_id, '
'enabled, operating_status, '
'monitor_address, monitor_port, '
'backup, provisioning_status')
monitor_address = '192.168.1.1' if monitor_ip_port else None
monitor_port = 9000 if monitor_ip_port else None
return in_member(
id=id,
ip_address=ip,
protocol_port=82,
weight=13,
subnet_id='10.0.0.1/24',
enabled=enabled,
operating_status=operating_status,
monitor_address=monitor_address,
monitor_port=monitor_port,
backup=backup, provisioning_status=provisioning_status)
def sample_session_persistence_tuple(persistence_type=None,
persistence_cookie=None,
persistence_timeout=None,
persistence_granularity=None):
spersistence = collections.namedtuple('SessionPersistence',
'type, cookie_name, '
'persistence_timeout, '
'persistence_granularity')
pt = 'HTTP_COOKIE' if persistence_type is None else persistence_type
return spersistence(type=pt,
cookie_name=persistence_cookie,
persistence_timeout=persistence_timeout,
persistence_granularity=persistence_granularity)
def sample_health_monitor_tuple(proto='HTTP', sample_hm=1,
host_http_check=False, expected_codes=None,
provisioning_status=constants.ACTIVE):
proto = 'HTTP' if proto == 'TERMINATED_HTTPS' else proto
monitor = collections.namedtuple(
'monitor', 'id, type, delay, timeout, fall_threshold, rise_threshold,'
'http_method, url_path, expected_codes, enabled, '
'check_script_path, http_version, domain_name, '
'provisioning_status')
if sample_hm == 1:
id = 'sample_monitor_id_1'
url_path = '/index.html'
elif sample_hm == 2:
id = 'sample_monitor_id_2'
url_path = '/healthmon.html'
kwargs = {
'id': id,
'type': proto,
'delay': 30,
'timeout': 31,
'fall_threshold': 3,
'rise_threshold': 2,
'http_method': 'GET',
'url_path': url_path,
'expected_codes': '418',
'enabled': True,
'provisioning_status': provisioning_status,
}
if host_http_check:
kwargs.update({'http_version': 1.1, 'domain_name': 'testlab.com'})
else:
kwargs.update({'http_version': 1.0, 'domain_name': None})
if expected_codes:
kwargs.update({'expected_codes': expected_codes})
if proto == constants.HEALTH_MONITOR_UDP_CONNECT:
kwargs['check_script_path'] = (CONF.haproxy_amphora.base_path +
'lvs/check/' + 'udp_check.sh')
else:
kwargs['check_script_path'] = None
return monitor(**kwargs)
def sample_l7policy_tuple(id,
action=constants.L7POLICY_ACTION_REJECT,
redirect_pool=None, redirect_url=None,
redirect_prefix=None,
enabled=True, redirect_http_code=302,
sample_policy=1,
provisioning_status=constants.ACTIVE):
in_l7policy = collections.namedtuple('l7policy',
'id, action, redirect_pool, '
'redirect_url, redirect_prefix, '
'l7rules, enabled, '
'redirect_http_code, '
'provisioning_status')
l7rules = []
if sample_policy == 1:
action = constants.L7POLICY_ACTION_REDIRECT_TO_POOL
redirect_pool = sample_pool_tuple(sample_pool=2)
l7rules = [sample_l7rule_tuple('sample_l7rule_id_1')]
elif sample_policy == 2:
action = constants.L7POLICY_ACTION_REDIRECT_TO_URL
redirect_url = 'http://www.example.com'
l7rules = [sample_l7rule_tuple('sample_l7rule_id_2', sample_rule=2),
sample_l7rule_tuple('sample_l7rule_id_3', sample_rule=3)]
elif sample_policy == 3:
action = constants.L7POLICY_ACTION_REJECT
l7rules = [sample_l7rule_tuple('sample_l7rule_id_4', sample_rule=4),
sample_l7rule_tuple('sample_l7rule_id_5', sample_rule=5)]
elif sample_policy == 4:
action = constants.L7POLICY_ACTION_REJECT
elif sample_policy == 5:
action = constants.L7POLICY_ACTION_REJECT
enabled = False
l7rules = [sample_l7rule_tuple('sample_l7rule_id_5', sample_rule=5)]
elif sample_policy == 6:
action = constants.L7POLICY_ACTION_REJECT
l7rules = [sample_l7rule_tuple('sample_l7rule_id_6', sample_rule=6)]
elif sample_policy == 7:
action = constants.L7POLICY_ACTION_REDIRECT_PREFIX
redirect_prefix = 'https://example.com'
l7rules = [sample_l7rule_tuple('sample_l7rule_id_2', sample_rule=2),
sample_l7rule_tuple('sample_l7rule_id_3', sample_rule=3)]
elif sample_policy == 8:
action = constants.L7POLICY_ACTION_REDIRECT_TO_URL
redirect_url = 'http://www.ssl-type-l7rule-test.com'
l7rules = [sample_l7rule_tuple('sample_l7rule_id_7', sample_rule=7),
sample_l7rule_tuple('sample_l7rule_id_8', sample_rule=8),
sample_l7rule_tuple('sample_l7rule_id_9', sample_rule=9),
sample_l7rule_tuple('sample_l7rule_id_10', sample_rule=10),
sample_l7rule_tuple('sample_l7rule_id_11', sample_rule=11)]
return in_l7policy(
id=id,
action=action,
redirect_pool=redirect_pool,
redirect_url=redirect_url,
redirect_prefix=redirect_prefix,
l7rules=l7rules,
enabled=enabled,
redirect_http_code=redirect_http_code
if (action in [constants.L7POLICY_ACTION_REDIRECT_TO_URL,
constants.L7POLICY_ACTION_REDIRECT_PREFIX] and
redirect_http_code) else None,
provisioning_status=provisioning_status)
def sample_l7rule_tuple(id,
type=constants.L7RULE_TYPE_PATH,
compare_type=constants.L7RULE_COMPARE_TYPE_STARTS_WITH,
key=None, value='/api',
invert=False, enabled=True,
sample_rule=1, provisioning_status=constants.ACTIVE):
in_l7rule = collections.namedtuple('l7rule',
'id, type, compare_type, '
'key, value, invert, enabled, '
'provisioning_status')
if sample_rule == 2:
type = constants.L7RULE_TYPE_HEADER
compare_type = constants.L7RULE_COMPARE_TYPE_CONTAINS
key = 'Some-header'
value = 'This string\\ with stuff'
invert = True
enabled = True
if sample_rule == 3:
type = constants.L7RULE_TYPE_COOKIE
compare_type = constants.L7RULE_COMPARE_TYPE_REGEX
key = 'some-cookie'
value = 'this.*|that'
invert = False
enabled = True
if sample_rule == 4:
type = constants.L7RULE_TYPE_FILE_TYPE
compare_type = constants.L7RULE_COMPARE_TYPE_EQUAL_TO
key = None
value = 'jpg'
invert = False
enabled = True
if sample_rule == 5:
type = constants.L7RULE_TYPE_HOST_NAME
compare_type = constants.L7RULE_COMPARE_TYPE_ENDS_WITH
key = None
value = '.example.com'
invert = False
enabled = True
if sample_rule == 6:
type = constants.L7RULE_TYPE_HOST_NAME
compare_type = constants.L7RULE_COMPARE_TYPE_ENDS_WITH
key = None
value = '.example.com'
invert = False
enabled = False
if sample_rule == 7:
type = constants.L7RULE_TYPE_SSL_CONN_HAS_CERT
compare_type = constants.L7RULE_COMPARE_TYPE_EQUAL_TO
key = None
value = 'tRuE'
invert = False
enabled = True
if sample_rule == 8:
type = constants.L7RULE_TYPE_SSL_VERIFY_RESULT
compare_type = constants.L7RULE_COMPARE_TYPE_EQUAL_TO
key = None
value = '1'
invert = True
enabled = True
if sample_rule == 9:
type = constants.L7RULE_TYPE_SSL_DN_FIELD
compare_type = constants.L7RULE_COMPARE_TYPE_REGEX
key = 'STREET'
value = r'^STREET.*NO\.$'
invert = True
enabled = True
if sample_rule == 10:
type = constants.L7RULE_TYPE_SSL_DN_FIELD
compare_type = constants.L7RULE_COMPARE_TYPE_STARTS_WITH
key = 'OU-3'
value = 'Orgnization Bala'
invert = True
enabled = True
return in_l7rule(
id=id,
type=type,
compare_type=compare_type,
key=key,
value=value,
invert=invert,
enabled=enabled, provisioning_status=provisioning_status)
def sample_base_expected_config(frontend=None, logging=None, backend=None,
peers=None, global_opts=None, defaults=None):
if frontend is None:
frontend = ("frontend sample_listener_id_1\n"
" maxconn {maxconn}\n"
" bind 10.0.0.2:80\n"
" mode http\n"
" default_backend sample_pool_id_1\n"
" timeout client 50000\n").format(
maxconn=constants.HAPROXY_MAX_MAXCONN)
if logging is None:
logging = (" log-format 12345\\ sample_loadbalancer_id_1\\ %f\\ "
"%ci\\ %cp\\ %t\\ %{+Q}r\\ %ST\\ %B\\ %U\\ "
"%[ssl_c_verify]\\ %{+Q}[ssl_c_s_dn]\\ %b\\ %s\\ %Tt\\ "
"%tsc\n\n")
if backend is None:
backend = ("backend sample_pool_id_1\n"
" mode http\n"
" balance roundrobin\n"
" cookie SRV insert indirect nocache\n"
" timeout check 31s\n"
" option httpchk GET /index.html HTTP/1.0\\r\\n\n"
" http-check expect rstatus 418\n"
" fullconn {maxconn}\n"
" option allbackups\n"
" timeout connect 5000\n"
" timeout server 50000\n"
" server sample_member_id_1 10.0.0.99:82 weight 13 "
"check inter 30s fall 3 rise 2 cookie sample_member_id_1\n"
" server sample_member_id_2 10.0.0.98:82 weight 13 "
"check inter 30s fall 3 rise 2 cookie sample_member_id_2\n"
"\n").format(maxconn=constants.HAPROXY_MAX_MAXCONN)
if peers is None:
peers = "\n\n"
if global_opts is None:
global_opts = " maxconn {maxconn}\n\n".format(
maxconn=constants.HAPROXY_MAX_MAXCONN)
if defaults is None:
defaults = ("defaults\n"
" log global\n"
" retries 3\n"
" option redispatch\n"
" option splice-request\n"
" option splice-response\n"
" option http-keep-alive\n\n")
return ("# Configuration for loadbalancer sample_loadbalancer_id_1\n"
"global\n"
" daemon\n"
" user nobody\n"
" log /run/rsyslog/octavia/log local0\n"
" log /run/rsyslog/octavia/log local1 notice\n"
" stats socket /var/lib/octavia/sample_listener_id_1.sock"
" mode 0666 level user\n" +
global_opts + defaults + peers + frontend + logging + backend)
| apache-2.0 | 8,750,399,087,215,040,000 | 36.9876 | 79 | 0.562162 | false | 3.49792 | false | false | false |
fossilet/6.00x | week8/lecture15/L15P5/histogramfun.py | 1 | 1308 | import pylab
WORDLIST_FILENAME = "words.txt"
def loadWords():
"""
Returns a list of valid words. Words are strings of lowercase letters.
Depending on the size of the word list, this function may
take a while to finish.
"""
print "Loading word list from file..."
# inFile: file
inFile = open(WORDLIST_FILENAME, 'r', 0)
# wordList: list of strings
wordList = []
for line in inFile:
wordList.append(line.strip().lower())
print " ", len(wordList), "words loaded."
return wordList
def plotVowelProportionHistogram(wordList, numBins=15):
"""
Plots a histogram of the proportion of vowels in each word in wordList
using the specified number of bins in numBins
"""
vowels = [c for c in 'aeiou']
fracs = []
for word in wordList:
frac = sum(1 if c in vowels else 0 for c in word)/float(len(word))
fracs.append(frac)
pylab.figure()
pylab.hist(fracs, bins=numBins)
pylab.title('Histogram of the proportion of vowels in each word')
pylab.xlabel('Vowel proportions')
pylab.ylabel('Number of words with the vowel proportion')
pylab.show()
if __name__ == '__main__':
wordList = loadWords()
plotVowelProportionHistogram(wordList)
| mit | -5,151,276,438,908,687,000 | 28.418605 | 74 | 0.630734 | false | 3.726496 | false | false | false |
redapple/sketchtml | sketchtml/lzw.py | 1 | 1198 | import binascii
def fingerprint(tokens, dict_limit=25, token_limit=None, debug=False):
'''
Paper: "Locality Sensitive Hashing for Scalable Structural
Classification and Clustering of Web Documents"
Hachenberg, Christian; Gottron, Thomas (2013)
https://west.uni-koblenz.de/de/forschung/datensaetze/template-detection
https://dl.acm.org/citation.cfm?id=2505673
'''
d = {}
dict_entry_id = 1
buff = tuple()
prefix_id = 0
output = []
for cnt, tok in enumerate(tokens, start=1):
if token_limit is not None and cnt > token_limit:
break
token = (tok,)
buffer_token = buff + token
if buffer_token in d:
buff = buffer_token
else:
prefix_id = d.get(buff)
if prefix_id is not None:
output.append(prefix_id)
else:
output.append(0)
d[buffer_token] = dict_entry_id
dict_entry_id += 1
buff = tuple()
if dict_entry_id > dict_limit:
break
return output
def hexfp(fingerprint):
_bytes = bytearray(fingerprint)
return binascii.hexlify(_bytes).decode('ascii')
| mit | -7,910,488,060,922,732,000 | 26.860465 | 75 | 0.583472 | false | 3.708978 | false | false | false |
aselle/tensorflow | tensorflow/compiler/tests/rmsprop_test.py | 5 | 5189 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for RMSProp optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import rmsprop
class RmspropTest(xla_test.XLATestCase):
def _rmsprop_update_numpy(self,
var,
g,
mg,
rms,
mom,
lr,
decay=0.9,
momentum=0.0,
epsilon=1e-10,
centered=False):
rms_t = rms * decay + (1 - decay) * g * g
denom_t = rms_t + epsilon
if centered:
mg_t = mg * decay + (1 - decay) * g
denom_t -= mg_t * mg_t
else:
mg_t = mg
mom_t = momentum * mom + lr * g / np.sqrt(denom_t, dtype=denom_t.dtype)
var_t = var - mom_t
return var_t, mg_t, rms_t, mom_t
def testBasic(self):
for dtype in self.float_types:
for centered in [False, True]:
with self.test_session(), self.test_scope():
# Initialize variables for numpy implementation.
var0_np = np.array([1.0, 2.0], dtype=dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype)
mg0_np = np.array([0.0, 0.0], dtype=dtype)
mg1_np = np.array([0.0, 0.0], dtype=dtype)
rms0_np = np.array([1.0, 1.0], dtype=dtype)
rms1_np = np.array([1.0, 1.0], dtype=dtype)
mom0_np = np.array([0.0, 0.0], dtype=dtype)
mom1_np = np.array([0.0, 0.0], dtype=dtype)
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
learning_rate = 3.0
rms_opt = rmsprop.RMSPropOptimizer(learning_rate, centered=centered)
rms_update = rms_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
mg0 = rms_opt.get_slot(var0, "mg")
self.assertEqual(mg0 is not None, centered)
mg1 = rms_opt.get_slot(var1, "mg")
self.assertEqual(mg1 is not None, centered)
rms0 = rms_opt.get_slot(var0, "rms")
self.assertTrue(rms0 is not None)
rms1 = rms_opt.get_slot(var1, "rms")
self.assertTrue(rms1 is not None)
mom0 = rms_opt.get_slot(var0, "momentum")
self.assertTrue(mom0 is not None)
mom1 = rms_opt.get_slot(var1, "momentum")
self.assertTrue(mom1 is not None)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Run 3 steps of RMSProp
for _ in range(3):
rms_update.run()
var0_np, mg0_np, rms0_np, mom0_np = self._rmsprop_update_numpy(
var0_np,
grads0_np,
mg0_np,
rms0_np,
mom0_np,
learning_rate,
centered=centered)
var1_np, mg1_np, rms1_np, mom1_np = self._rmsprop_update_numpy(
var1_np,
grads1_np,
mg1_np,
rms1_np,
mom1_np,
learning_rate,
centered=centered)
# Validate updated params
if centered:
self.assertAllCloseAccordingToType(mg0_np, mg0.eval())
self.assertAllCloseAccordingToType(mg1_np, mg1.eval())
self.assertAllCloseAccordingToType(rms0_np, rms0.eval())
self.assertAllCloseAccordingToType(rms1_np, rms1.eval())
self.assertAllCloseAccordingToType(mom0_np, mom0.eval())
self.assertAllCloseAccordingToType(mom1_np, mom1.eval())
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
if __name__ == "__main__":
test.main()
| apache-2.0 | 4,907,214,135,932,618,000 | 38.310606 | 80 | 0.564849 | false | 3.706429 | true | false | false |
humenda/GladTeX | gleetex/parser.py | 1 | 2397 | # (c) 2013-2018 Sebastian Humenda
# This code is licenced under the terms of the LGPL-3+, see the file COPYING for
# more details.
"""Top-level API to parse input documents.
The main point of the parsing is to extract formulas from a given input
document, while preserving the remaining formatting.
The returned parsed document structure is highly dependent on the input format
and hence document in their respective functions."""
import enum
import json
import sys
from . import htmlhandling
from . import pandoc
ParseException = (
htmlhandling.ParseException
) # re-export for consistent API from outside
class Format(enum.Enum):
HTML = 0
# while this is json, we never know what other applications might decide to
# use json as their intermediate representation ;)
PANDOCFILTER = 1
@staticmethod
def parse(string):
string = string.lower()
if string == "html":
return Format.HTML
elif string == "pandocfilter":
return Format.PANDOCFILTER
else:
raise ValueError("unrecognised format: %s" % string)
def parse_document(doc, fmt):
"""This function parses an input document (string or bytes) with the given
format specifier. For HTML, the returned "parsed" document is a list of
chunks, where raw chunks are just plain HTML instructions and data and
formula chunks are parsed from the '<eq/>' tags.
If the input document is a pandoc AST, the formulas will be extracted and
the document is a tuple of (pandoc AST, formulas).
:param doc input of bytes or string to parse
:param fmt either the enum type `Format` or a string understood by Format.parse
:return (encoding, document) (a tuple)"""
if isinstance(fmt, str):
fmt = Format.parse(fmt)
encoding = None
if fmt == Format.HTML:
docparser = htmlhandling.EqnParser()
docparser.feed(doc)
encoding = docparser.get_encoding()
encoding = encoding if encoding else "utf-8"
doc = docparser.get_data()
elif fmt == Format.PANDOCFILTER:
if isinstance(doc, bytes):
doc = doc.decode(sys.getdefaultencoding())
ast = json.loads(doc)
formulas = pandoc.extract_formulas(ast)
doc = (ast, formulas) # ← see doc string
if not encoding:
encoding = sys.getdefaultencoding()
return encoding, doc
| lgpl-3.0 | 1,394,008,949,783,631,000 | 34.220588 | 84 | 0.68142 | false | 4.094017 | false | false | false |
Arvedui/picuplib | picuplib/checks.py | 1 | 3800 | # -*- coding:utf8 -*-
# ####################### BEGIN LICENSE BLOCK ########################
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
# ######################## END LICENSE BLOCK #########################
"""
module for some argument cheking
"""
from json import loads
from requests import head
from .exceptions import (MallformedResize, UnsupportedRotation,
UnsupportedFormat, UnkownError, ServerError,
EmptyResponse, PercentageOutOfRange)
from .globals import ALLOWED_ROTATION, USER_AGENT
def check_rotation(rotation):
"""checks rotation parameter if illegal value raises exception"""
if rotation not in ALLOWED_ROTATION:
allowed_rotation = ', '.join(ALLOWED_ROTATION)
raise UnsupportedRotation('Rotation %s is not allwoed. Allowed are %s'
% (rotation, allowed_rotation))
def check_resize(resize):
"""checks resize parameter if illegal value raises exception"""
if resize is None:
return
resize = resize.lower().strip()
if 'x' in resize:
tmp = resize.lower().split('x')
tmp = [x.strip() for x in resize.split('x')]
if len(tmp) == 2 and tmp[0].isdigit() and tmp[1].isdigit():
return
elif '%' in resize:
tmp = resize.split('%')[0]
if tmp.isnumeric():
tmp = int(tmp)
if 1 <= tmp <= 1000:
return
else:
raise PercentageOutOfRange("percentage must be between 1 and 1000")
raise MallformedResize('Resize value "%s" is mallformed. '
'Desired format is: {width}x{height} or {percentage}%%' % resize)
def check_noexif(noexif):
"""checks if noexif parameter is boolean"""
if not isinstance(noexif, bool):
raise TypeError('noexif must be boolean')
def check_callback(callback):
"""checks if callback is callable"""
if not callable(callback) and callback is not None:
raise TypeError('%s is not callable' % callback)
def check_response(response):
"""
checks the response if the server returned an error raises an exception.
"""
if response.status_code < 200 or response.status_code > 300:
raise ServerError('API requests returned with error: %s'
% response.status_code)
try:
response_text = loads(response.text)
except ValueError:
raise ServerError('The API did not returned a JSON string.')
if not response_text:
raise EmptyResponse()
if 'failure' in response_text:
if response_text['failure'] == 'Falscher Dateityp':
raise UnsupportedFormat('Please look at picflash.org '
'witch formats are supported')
else:
raise UnkownError(response_text['failure'])
def check_if_redirect(url):
"""
checks if server redirects url
"""
response = head(url, headers={'User-Agent': USER_AGENT})
if response.status_code >= 300 and response.status_code < 400:
return response.headers['location']
return None
| lgpl-2.1 | 927,483,443,651,689,300 | 33.862385 | 92 | 0.627895 | false | 4.418605 | false | false | false |
Xeralux/tensorflow | tensorflow/contrib/eager/python/checkpointable_utils_test.py | 1 | 55336 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import six
from tensorflow.contrib.eager.python import checkpointable_utils
from tensorflow.python.client import session as session_lib
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras._impl.keras.engine import training
from tensorflow.python.layers import core
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import template
from tensorflow.python.ops import variable_scope
from tensorflow.python.training import adam
from tensorflow.python.training import checkpointable
from tensorflow.python.training import saver as core_saver
from tensorflow.python.training import training_util
class NonLayerCheckpointable(checkpointable.Checkpointable):
def __init__(self):
super(NonLayerCheckpointable, self).__init__()
self.a_variable = checkpointable_utils.add_variable(
self, name="a_variable", shape=[])
# pylint: disable=not-callable
class MyModel(training.Model):
"""A concrete Model for testing."""
def __init__(self):
super(MyModel, self).__init__()
self._named_dense = core.Dense(1, use_bias=True)
self._second = core.Dense(1, use_bias=False)
# We can still track Checkpointables which aren't Layers.
self._non_layer = NonLayerCheckpointable()
def call(self, values):
ret = self._second(self._named_dense(values))
return ret
class InterfaceTests(test.TestCase):
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testAddVariable(self):
obj = NonLayerCheckpointable()
with self.assertRaisesRegexp(ValueError, "do not specify shape"):
checkpointable_utils.add_variable(
obj, name="shape_specified_twice", shape=[], initializer=1)
constant_initializer = checkpointable_utils.add_variable(
obj, name="constant_initializer", initializer=1)
with variable_scope.variable_scope("some_variable_scope"):
ones_initializer = checkpointable_utils.add_variable(
obj,
name="ones_initializer",
shape=[2],
initializer=init_ops.ones_initializer(dtype=dtypes.float32))
bare_initializer = checkpointable_utils.add_variable(
obj,
name="bare_initializer",
shape=[2, 2],
dtype=dtypes.float64,
initializer=init_ops.zeros_initializer)
# Even in graph mode, there are no naming conflicts between objects, only
# naming conflicts within an object.
other_duplicate = resource_variable_ops.ResourceVariable(
name="duplicate", initial_value=1.)
duplicate = checkpointable_utils.add_variable(
obj, name="duplicate", shape=[])
with self.assertRaisesRegexp(ValueError, "'duplicate' already exists"):
checkpointable_utils.add_variable(obj, name="duplicate", shape=[])
self.evaluate(checkpointable_utils.gather_initializers(obj))
self.assertEqual("constant_initializer:0", constant_initializer.name)
self.assertEqual(1, self.evaluate(constant_initializer))
self.assertEqual("some_variable_scope/ones_initializer:0",
ones_initializer.name)
self.assertAllEqual([1, 1], self.evaluate(ones_initializer))
self.assertAllEqual([[0., 0.],
[0., 0.]], self.evaluate(bare_initializer))
self.assertEqual("a_variable:0", obj.a_variable.name)
self.assertEqual("duplicate:0", other_duplicate.name)
if context.executing_eagerly():
# When executing eagerly, there's no uniquification of variable names. The
# checkpoint name will be the same.
self.assertEqual("duplicate:0", duplicate.name)
else:
# The .name attribute may be globally influenced, but the checkpoint name
# won't be (tested below).
self.assertEqual("duplicate_1:0", duplicate.name)
named_variables, _ = checkpointable_utils._serialize_object_graph(obj)
expected_checkpoint_names = (
"a_variable/.ATTRIBUTES/VARIABLE_VALUE",
"bare_initializer/.ATTRIBUTES/VARIABLE_VALUE",
"constant_initializer/.ATTRIBUTES/VARIABLE_VALUE",
"duplicate/.ATTRIBUTES/VARIABLE_VALUE",
"ones_initializer/.ATTRIBUTES/VARIABLE_VALUE",
)
six.assertCountEqual(
self, expected_checkpoint_names, named_variables.keys())
def testInitNotCalled(self):
class NoInit(checkpointable.Checkpointable):
def __init__(self):
pass
# __init__ for Checkpointable will be called implicitly.
checkpointable_utils.add_variable(NoInit(), "var", shape=[])
def testShapeDtype(self):
root = checkpointable.Checkpointable()
v1 = checkpointable_utils.add_variable(
root, name="v1", initializer=3., dtype=dtypes.float64)
self.assertEqual(dtypes.float64, v1.dtype)
v2 = checkpointable_utils.add_variable(
root,
name="v2",
shape=[3],
initializer=init_ops.ones_initializer,
dtype=dtypes.float64)
self.assertEqual(dtypes.float64, v2.dtype)
self.assertAllEqual([1., 1., 1.], self.evaluate(v2))
class _MirroringSaveable(core_saver.BaseSaverBuilder.SaveableObject):
def __init__(self, primary_variable, mirrored_variable, name):
self._primary_variable = primary_variable
self._mirrored_variable = mirrored_variable
tensor = self._primary_variable.read_value()
spec = core_saver.BaseSaverBuilder.SaveSpec(
tensor=tensor,
slice_spec="",
name=name)
super(_MirroringSaveable, self).__init__(
tensor, [spec], name)
def restore(self, restored_tensors, restored_shapes):
"""Restore the same value into both variables."""
tensor, = restored_tensors
return control_flow_ops.group(
self._primary_variable.assign(tensor),
self._mirrored_variable.assign(tensor))
class _OwnsMirroredVariables(checkpointable.CheckpointableBase):
"""A Checkpointable object which returns a more complex SaveableObject."""
def __init__(self):
self.non_dep_variable = variable_scope.get_variable(
name="non_dep_variable", initializer=6., use_resource=True)
self.mirrored = variable_scope.get_variable(
name="mirrored", initializer=15., use_resource=True)
def _gather_saveables_for_checkpoint(self):
def _saveable_factory(name=self.non_dep_variable.name):
return _MirroringSaveable(
primary_variable=self.non_dep_variable,
mirrored_variable=self.mirrored,
name=name)
return {checkpointable.VARIABLE_VALUE_KEY: _saveable_factory}
# The Saver sorts by name before parsing, so we need a name property.
@property
def name(self):
return self.non_dep_variable.name
class CheckpointingTests(test.TestCase):
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testNamingWithOptimizer(self):
input_value = constant_op.constant([[3.]])
model = MyModel()
# A nuisance Model using the same optimizer. Its slot variables should not
# go in the checkpoint, since it is never depended on.
other_model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
optimizer_step = training_util.get_or_create_global_step()
root_checkpointable = checkpointable_utils.Checkpoint(
optimizer=optimizer, model=model, optimizer_step=optimizer_step)
if context.executing_eagerly():
optimizer.minimize(
lambda: model(input_value),
global_step=optimizer_step)
optimizer.minimize(
lambda: other_model(input_value),
global_step=optimizer_step)
else:
train_op = optimizer.minimize(
model(input_value), global_step=optimizer_step)
optimizer.minimize(
other_model(input_value),
global_step=optimizer_step)
self.evaluate(checkpointable_utils.gather_initializers(
root_checkpointable))
self.evaluate(train_op)
named_variables, serialized_graph = (
checkpointable_utils._serialize_object_graph(root_checkpointable))
expected_checkpoint_names = (
# Created in the root node, so no prefix.
"optimizer_step",
"model/_second/kernel",
"model/_named_dense/kernel",
"model/_named_dense/bias",
# non-Layer dependency of the model
"model/_non_layer/a_variable",
# The optimizer creates two non-slot variables
"optimizer/beta1_power",
"optimizer/beta2_power",
# Slot variables
"model/_second/kernel/.OPTIMIZER_SLOT/optimizer/m",
"model/_second/kernel/.OPTIMIZER_SLOT/optimizer/v",
"model/_named_dense/kernel/.OPTIMIZER_SLOT/optimizer/m",
"model/_named_dense/kernel/.OPTIMIZER_SLOT/optimizer/v",
"model/_named_dense/bias/.OPTIMIZER_SLOT/optimizer/m",
"model/_named_dense/bias/.OPTIMIZER_SLOT/optimizer/v",
)
suffix = "/.ATTRIBUTES/VARIABLE_VALUE"
expected_checkpoint_names = [
name + suffix for name in expected_checkpoint_names]
six.assertCountEqual(self, expected_checkpoint_names,
named_variables.keys())
# Check that we've mapped to the right variable objects (not exhaustive)
self.assertEqual(
"global_step:0",
named_variables["optimizer_step" + suffix].name)
self.assertEqual(
"my_model/dense_1/kernel:0",
named_variables["model/_second/kernel" + suffix].name)
self.assertEqual(
"my_model/dense/kernel:0",
named_variables["model/_named_dense/kernel" + suffix].name)
self.assertEqual(
"beta1_power:0",
named_variables["optimizer/beta1_power" + suffix].name)
self.assertEqual(
"beta2_power:0",
named_variables["optimizer/beta2_power" + suffix].name)
# Spot check the generated protocol buffers.
self.assertEqual("optimizer",
serialized_graph.nodes[0].children[1].local_name)
optimizer_node = serialized_graph.nodes[serialized_graph.nodes[0].children[
1].node_id]
self.assertEqual("beta1_power",
optimizer_node.children[0].local_name)
self.assertEqual("beta1_power",
serialized_graph.nodes[optimizer_node.children[0].node_id]
.attributes[0].full_name)
self.assertEqual(
"my_model/dense/kernel",
serialized_graph.nodes[optimizer_node.slot_variables[0]
.original_variable_node_id]
.attributes[0].full_name)
# We strip off the :0 suffix, as variable.name-based saving does.
self.assertEqual(
"my_model/dense/kernel/Adam",
serialized_graph.nodes[optimizer_node.slot_variables[0]
.slot_variable_node_id]
.attributes[0].full_name)
self.assertEqual(
"my_model/dense/kernel/Adam:0",
optimizer.get_slot(
var=named_variables["model/_named_dense/kernel" + suffix],
name="m").name)
self.assertEqual(
"model/_named_dense/kernel" + suffix,
serialized_graph.nodes[
optimizer_node.slot_variables[0]
.original_variable_node_id].attributes[0].checkpoint_key)
self.assertEqual("m", optimizer_node.slot_variables[0].slot_name)
self.assertEqual(
"model/_named_dense/kernel/.OPTIMIZER_SLOT/optimizer/m" + suffix,
serialized_graph.nodes[
optimizer_node.slot_variables[0]
.slot_variable_node_id].attributes[0].checkpoint_key)
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testMoreComplexSaveableReturned(self):
v = _OwnsMirroredVariables()
checkpoint = checkpointable_utils.Checkpoint(v=v)
test_dir = self.get_temp_dir()
prefix = os.path.join(test_dir, "ckpt")
self.evaluate(v.non_dep_variable.assign(42.))
save_path = checkpoint.save(prefix)
self.evaluate(v.non_dep_variable.assign(43.))
self.evaluate(v.mirrored.assign(44.))
checkpoint.restore(save_path).assert_consumed().initialize_or_restore()
self.assertEqual(42., self.evaluate(v.non_dep_variable))
self.assertEqual(42., self.evaluate(v.mirrored))
self.evaluate(v.non_dep_variable.assign(44.))
save_path = checkpoint.save(prefix)
self.evaluate(v.non_dep_variable.assign(45.))
checkpoint.restore(save_path).assert_consumed().initialize_or_restore()
self.assertEqual(44., self.evaluate(v.non_dep_variable))
self.assertEqual(44., self.evaluate(v.mirrored))
@test_util.run_in_graph_and_eager_modes()
def testMoreComplexSaveableReturnedWithGlobalName(self):
# The same object can also be saved using the name-based saver.
v = _OwnsMirroredVariables()
saver = core_saver.Saver(var_list=[v])
test_dir = self.get_temp_dir()
prefix = os.path.join(test_dir, "ckpt")
self.evaluate(v.non_dep_variable.assign(42.))
with self.test_session() as sess:
save_path = saver.save(sess, prefix)
self.evaluate(v.non_dep_variable.assign(43.))
self.evaluate(v.mirrored.assign(44.))
saver.restore(sess, save_path)
self.assertEqual(42., self.evaluate(v.non_dep_variable))
self.assertEqual(42., self.evaluate(v.mirrored))
@test_util.run_in_graph_and_eager_modes()
def testSaveRestore(self):
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
root_checkpointable = checkpointable_utils.Checkpoint(
optimizer=optimizer, model=model)
input_value = constant_op.constant([[3.]])
if context.executing_eagerly():
optimizer.minimize(
lambda: model(input_value))
else:
train_op = optimizer.minimize(model(input_value))
# TODO(allenl): Make initialization more pleasant when graph building.
root_checkpointable.save_counter # pylint: disable=pointless-statement
self.evaluate(checkpointable_utils.gather_initializers(
root_checkpointable))
self.evaluate(train_op)
prefix = os.path.join(self.get_temp_dir(), "ckpt")
self.evaluate(state_ops.assign(model._named_dense.variables[1], [42.]))
m_bias_slot = optimizer.get_slot(model._named_dense.variables[1], "m")
self.evaluate(state_ops.assign(m_bias_slot, [1.5]))
save_path = root_checkpointable.save(file_prefix=prefix)
self.evaluate(state_ops.assign(model._named_dense.variables[1], [43.]))
self.evaluate(state_ops.assign(root_checkpointable.save_counter, 3))
optimizer_variables = self.evaluate(optimizer.variables())
self.evaluate(state_ops.assign(m_bias_slot, [-2.]))
# Immediate restoration
status = root_checkpointable.restore(save_path=save_path).assert_consumed()
status.run_restore_ops()
self.assertAllEqual([42.], self.evaluate(model._named_dense.variables[1]))
self.assertAllEqual(1, self.evaluate(root_checkpointable.save_counter))
self.assertAllEqual([1.5], self.evaluate(m_bias_slot))
if not context.executing_eagerly():
return # Restore-on-create is only supported when executing eagerly
on_create_model = MyModel()
on_create_optimizer = adam.AdamOptimizer(
0.001,
# Preserve beta1_power and beta2_power when appying gradients so we can
# test that they've been restored correctly.
beta1=1.0, beta2=1.0)
on_create_root = checkpointable_utils.Checkpoint(
optimizer=on_create_optimizer, model=on_create_model)
# Deferred restoration
status = on_create_root.restore(save_path=save_path)
on_create_model(constant_op.constant([[3.]])) # create variables
self.assertAllEqual(1, self.evaluate(on_create_root.save_counter))
self.assertAllEqual([42.],
self.evaluate(
on_create_model._named_dense.variables[1]))
on_create_m_bias_slot = on_create_optimizer.get_slot(
on_create_model._named_dense.variables[1], "m")
# Optimizer slot variables are created when the original variable is
# restored.
self.assertAllEqual([1.5], self.evaluate(on_create_m_bias_slot))
self.assertAllEqual(optimizer_variables[2:],
self.evaluate(on_create_optimizer.variables()))
dummy_var = resource_variable_ops.ResourceVariable([1.])
on_create_optimizer.minimize(loss=dummy_var.read_value)
status.assert_consumed()
beta1_power, beta2_power = on_create_optimizer._get_beta_accumulators()
self.assertAllEqual(optimizer_variables[0], self.evaluate(beta1_power))
self.assertAllEqual(optimizer_variables[1], self.evaluate(beta2_power))
# TODO(allenl): Debug garbage created by this test in python3.
def testDeferredRestorationUsageEager(self):
"""An idiomatic eager execution example."""
num_training_steps = 10
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
for training_continuation in range(3):
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
root = checkpointable_utils.Checkpoint(
optimizer=optimizer, model=model,
optimizer_step=training_util.get_or_create_global_step())
root.restore(core_saver.latest_checkpoint(checkpoint_directory))
for _ in range(num_training_steps):
# TODO(allenl): Use a Dataset and serialize/checkpoint it.
input_value = constant_op.constant([[3.]])
optimizer.minimize(
lambda: model(input_value), # pylint: disable=cell-var-from-loop
global_step=root.optimizer_step)
root.save(file_prefix=checkpoint_prefix)
self.assertEqual((training_continuation + 1) * num_training_steps,
root.optimizer_step.numpy())
def testUsageGraph(self):
"""Expected usage when graph building."""
with context.graph_mode():
num_training_steps = 10
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
for training_continuation in range(3):
with ops.Graph().as_default():
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
root = checkpointable_utils.Checkpoint(
optimizer=optimizer, model=model,
global_step=training_util.get_or_create_global_step())
input_value = constant_op.constant([[3.]])
train_op = optimizer.minimize(
model(input_value),
global_step=root.global_step)
checkpoint_path = core_saver.latest_checkpoint(checkpoint_directory)
with self.test_session(graph=ops.get_default_graph()) as session:
status = root.restore(save_path=checkpoint_path)
status.initialize_or_restore(session=session)
if checkpoint_path is None:
self.assertEqual(0, training_continuation)
with self.assertRaises(AssertionError):
status.assert_consumed()
else:
status.assert_consumed()
for _ in range(num_training_steps):
session.run(train_op)
root.save(file_prefix=checkpoint_prefix, session=session)
self.assertEqual((training_continuation + 1) * num_training_steps,
session.run(root.global_step))
self.assertEqual(training_continuation + 1,
session.run(root.save_counter))
@test_util.run_in_graph_and_eager_modes()
def testAgnosticUsage(self):
"""Graph/eager agnostic usage."""
# Does create garbage when executing eagerly due to ops.Graph() creation.
num_training_steps = 10
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
for training_continuation in range(3):
with ops.Graph().as_default(), self.test_session(
graph=ops.get_default_graph()), test_util.device(use_gpu=True):
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
root = checkpointable_utils.Checkpoint(
optimizer=optimizer, model=model,
global_step=training_util.get_or_create_global_step())
checkpoint_path = core_saver.latest_checkpoint(checkpoint_directory)
status = root.restore(save_path=checkpoint_path)
input_value = constant_op.constant([[3.]])
train_fn = functools.partial(
optimizer.minimize,
functools.partial(model, input_value),
global_step=root.global_step)
if not context.executing_eagerly():
train_fn = functools.partial(self.evaluate, train_fn())
status.initialize_or_restore()
for _ in range(num_training_steps):
train_fn()
root.save(file_prefix=checkpoint_prefix)
self.assertEqual((training_continuation + 1) * num_training_steps,
self.evaluate(root.global_step))
self.assertEqual(training_continuation + 1,
self.evaluate(root.save_counter))
def _get_checkpoint_name(self, name):
root = checkpointable.Checkpointable()
checkpointable_utils.add_variable(
root, name=name, shape=[1, 2], dtype=dtypes.float64)
named_variables, _ = checkpointable_utils._serialize_object_graph(root)
checkpoint_name, = named_variables.keys()
with ops.name_scope("root/" + checkpoint_name):
pass # Make sure we can use this as an op name if we prefix it.
return checkpoint_name
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testVariableNameEscaping(self):
suffix = "/.ATTRIBUTES/VARIABLE_VALUE"
self.assertEqual(r"a.Sb.Sc" + suffix, self._get_checkpoint_name(r"a/b/c"))
self.assertEqual(r"b" + suffix, self._get_checkpoint_name(r"b"))
self.assertEqual(r"c.S" + suffix, self._get_checkpoint_name(r"c/"))
self.assertEqual(r"d.S..S" + suffix, self._get_checkpoint_name(r"d/.S"))
self.assertEqual(r"d.S..ATTRIBUTES.Sf" + suffix,
self._get_checkpoint_name(r"d/.ATTRIBUTES/f"))
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testNumberedPath(self):
root = checkpointable.Checkpointable()
leaf = checkpointable.Checkpointable()
root.leaf = leaf
checkpointable_utils.add_variable(leaf, name="v", shape=[])
named_variables, _ = checkpointable_utils._serialize_object_graph(root)
variable_name, = named_variables.keys()
self.assertEqual(r"leaf/v/.ATTRIBUTES/VARIABLE_VALUE", variable_name)
@test_util.run_in_graph_and_eager_modes()
def testLocalNameValidation(self):
root = checkpointable.Checkpointable()
leaf = checkpointable.Checkpointable()
# Dots are escaped, which avoids conflicts with reserved names.
root._track_checkpointable(leaf, name=".ATTRIBUTES")
checkpointable_utils.add_variable(checkpointable=leaf, name="a", shape=[])
named_variables, _ = checkpointable_utils._serialize_object_graph(root)
name, = named_variables.keys()
self.assertEqual(name, "..ATTRIBUTES/a/.ATTRIBUTES/VARIABLE_VALUE")
def testAnonymousVarsInInit(self):
class Model(training.Model):
def __init__(self):
super(Model, self).__init__()
self.w = resource_variable_ops.ResourceVariable(0.0)
self.b = resource_variable_ops.ResourceVariable(0.0)
self.vars = [self.w, self.b]
def call(self, x):
return x * self.w + self.b
with context.eager_mode():
model = Model()
optimizer = adam.AdamOptimizer(learning_rate=0.05)
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
checkpoint = checkpointable_utils.Checkpoint(
model=model, optimizer=optimizer)
for _ in range(2):
checkpoint.save(checkpoint_prefix)
with backprop.GradientTape() as tape:
loss = (constant_op.constant(1.)
- model(constant_op.constant(1.))) ** 2
grad = tape.gradient(loss, model.vars)
optimizer.apply_gradients(
[(g, v) for g, v in zip(grad, model.vars)])
@test_util.run_in_graph_and_eager_modes()
def testLateDependencyTracking(self):
class Dependency(checkpointable.Checkpointable):
def build(self):
self.var = checkpointable_utils.add_variable(
self, "var", initializer=0.)
class LateDependencies(checkpointable.Checkpointable):
def add_dep(self):
self.dep = Dependency()
self.dep.build()
original = LateDependencies()
original.add_dep()
self.evaluate(state_ops.assign(original.dep.var, 123.))
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_path = checkpointable_utils.CheckpointableSaver(
original).save(checkpoint_prefix)
load_into = LateDependencies()
status = checkpointable_utils.CheckpointableSaver(
load_into).restore(save_path)
with self.assertRaises(AssertionError):
status.assert_consumed()
load_into.add_dep()
status.assert_consumed()
status.run_restore_ops()
self.assertEqual(123., self.evaluate(load_into.dep.var))
@test_util.run_in_graph_and_eager_modes()
def testDepAfterVar(self):
class Dependency(checkpointable.Checkpointable):
def build(self):
self.var = checkpointable_utils.add_variable(
self, "var", initializer=0.)
class DepAfterVar(checkpointable.Checkpointable):
def add_dep(self):
dep = Dependency()
dep.build()
self.dep = dep
dep_after_var = DepAfterVar()
dep_after_var.add_dep()
self.evaluate(state_ops.assign(dep_after_var.dep.var, -14.))
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_path = checkpointable_utils.CheckpointableSaver(dep_after_var).save(
checkpoint_prefix)
loaded_dep_after_var = DepAfterVar()
status = checkpointable_utils.CheckpointableSaver(
loaded_dep_after_var).restore(save_path)
loaded_dep_after_var.add_dep()
status.assert_consumed()
status.run_restore_ops()
self.assertEqual(-14., self.evaluate(loaded_dep_after_var.dep.var))
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testDeferredSlotRestoration(self):
checkpoint_directory = self.get_temp_dir()
root = checkpointable.Checkpointable()
root.var = checkpointable_utils.add_variable(
root, name="var", initializer=0.)
optimizer = adam.AdamOptimizer(0.1)
if context.executing_eagerly():
optimizer.minimize(root.var.read_value)
else:
train_op = optimizer.minimize(root.var)
# Note that `optimizer` has not been added as a dependency of
# `root`. Create a one-off grouping so that slot variables for `root.var`
# get initialized too.
self.evaluate(checkpointable_utils.gather_initializers(
checkpointable_utils.Checkpoint(root=root, optimizer=optimizer)))
self.evaluate(train_op)
self.evaluate(state_ops.assign(root.var, 12.))
no_slots_path = checkpointable_utils.CheckpointableSaver(root).save(
os.path.join(checkpoint_directory, "no_slots"))
root.optimizer = optimizer
self.evaluate(state_ops.assign(root.var, 13.))
self.evaluate(state_ops.assign(optimizer.get_slot(name="m", var=root.var),
14.))
slots_path = checkpointable_utils.CheckpointableSaver(root).save(
os.path.join(checkpoint_directory, "with_slots"))
new_root = checkpointable.Checkpointable()
# Load the slot-containing checkpoint (deferred), then immediately overwrite
# the non-slot variable (also deferred).
slot_status = checkpointable_utils.CheckpointableSaver(
new_root).restore(slots_path)
no_slot_status = checkpointable_utils.CheckpointableSaver(
new_root).restore(no_slots_path)
with self.assertRaises(AssertionError):
no_slot_status.assert_consumed()
new_root.var = checkpointable_utils.add_variable(
new_root, name="var", shape=[])
no_slot_status.assert_consumed()
no_slot_status.run_restore_ops()
self.assertEqual(12., self.evaluate(new_root.var))
new_root.optimizer = adam.AdamOptimizer(0.1)
with self.assertRaisesRegexp(AssertionError, "beta1_power"):
slot_status.assert_consumed()
self.assertEqual(12., self.evaluate(new_root.var))
if context.executing_eagerly():
# Slot variables are only created with restoring initializers when
# executing eagerly.
self.assertEqual(14., self.evaluate(
new_root.optimizer.get_slot(name="m", var=new_root.var)))
else:
self.assertIs(new_root.optimizer.get_slot(name="m", var=new_root.var),
None)
if context.executing_eagerly():
new_root.optimizer.minimize(new_root.var.read_value)
else:
train_op = new_root.optimizer.minimize(new_root.var)
# The slot variable now exists; restore() didn't create it, but we should
# now have a restore op for it.
slot_status.run_restore_ops()
self.assertEqual(14., self.evaluate(
new_root.optimizer.get_slot(name="m", var=new_root.var)))
self.evaluate(train_op)
slot_status.assert_consumed()
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testOverlappingRestores(self):
checkpoint_directory = self.get_temp_dir()
save_root = checkpointable.Checkpointable()
save_root.dep = checkpointable.Checkpointable()
save_root.dep.var = checkpointable_utils.add_variable(
save_root.dep, name="var", initializer=0.)
self.evaluate(state_ops.assign(save_root.dep.var, 12.))
saver = checkpointable_utils.CheckpointableSaver(save_root)
first_path = saver.save(os.path.join(checkpoint_directory, "first"))
self.evaluate(state_ops.assign(save_root.dep.var, 13.))
second_path = saver.save(os.path.join(checkpoint_directory, "second"))
first_root = checkpointable.Checkpointable()
second_root = checkpointable.Checkpointable()
first_status = checkpointable_utils.CheckpointableSaver(
first_root).restore(first_path)
second_status = checkpointable_utils.CheckpointableSaver(
second_root).restore(second_path)
load_dep = checkpointable.Checkpointable()
load_dep.var = checkpointable_utils.add_variable(
load_dep, name="var", shape=[])
first_root.dep = load_dep
first_status.assert_consumed()
first_status.run_restore_ops()
self.assertEqual(12., self.evaluate(load_dep.var))
second_root.dep = load_dep
second_status.assert_consumed()
second_status.run_restore_ops()
self.assertEqual(13., self.evaluate(load_dep.var))
# Try again with the order of the restore() reversed. The last restore
# determines the final value.
first_root = checkpointable.Checkpointable()
second_root = checkpointable.Checkpointable()
second_status = checkpointable_utils.CheckpointableSaver(
second_root).restore(second_path)
first_status = checkpointable_utils.CheckpointableSaver(
first_root).restore(first_path)
load_dep = checkpointable.Checkpointable()
load_dep.var = checkpointable_utils.add_variable(
load_dep, name="var", shape=[])
first_root.dep = load_dep
first_status.assert_consumed()
first_status.run_restore_ops()
self.assertEqual(12., self.evaluate(load_dep.var))
second_root.dep = load_dep
second_status.assert_consumed()
second_status.run_restore_ops()
self.assertEqual(12., self.evaluate(load_dep.var))
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testAmbiguousLoad(self):
# Not OK to split one checkpoint object into two
checkpoint_directory = self.get_temp_dir()
save_root = checkpointable.Checkpointable()
save_root.dep_one = checkpointable.Checkpointable()
save_root.dep_two = checkpointable.Checkpointable()
dep_three = checkpointable.Checkpointable()
save_root.dep_one.dep_three = dep_three
save_root.dep_two.dep_three = dep_three
checkpointable_utils.add_variable(dep_three, name="var", initializer=0.)
self.evaluate(checkpointable_utils.gather_initializers(save_root))
save_path = checkpointable_utils.CheckpointableSaver(save_root).save(
os.path.join(checkpoint_directory, "ckpt"))
load_root = checkpointable.Checkpointable()
checkpointable_utils.CheckpointableSaver(load_root).restore(save_path)
load_root.dep_one = checkpointable.Checkpointable()
load_root.dep_two = checkpointable.Checkpointable()
load_root.dep_one.dep_three = checkpointable.Checkpointable()
with self.assertRaisesRegexp(AssertionError,
"resolved to different objects"):
load_root.dep_two.dep_three = checkpointable.Checkpointable()
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testObjectsCombined(self):
# Currently fine to load two checkpoint objects into one Python object
checkpoint_directory = self.get_temp_dir()
save_root = checkpointable.Checkpointable()
save_root.dep_one = checkpointable.Checkpointable()
save_root.dep_two = checkpointable.Checkpointable()
checkpointable_utils.add_variable(
save_root.dep_one, name="var1", initializer=32., dtype=dtypes.float64)
checkpointable_utils.add_variable(
save_root.dep_two, name="var2", initializer=64., dtype=dtypes.float64)
self.evaluate(checkpointable_utils.gather_initializers(save_root))
save_path = checkpointable_utils.CheckpointableSaver(save_root).save(
os.path.join(checkpoint_directory, "ckpt"))
load_root = checkpointable.Checkpointable()
load_root.dep_one = checkpointable.Checkpointable()
load_root.dep_two = load_root.dep_one
v1 = checkpointable_utils.add_variable(
load_root.dep_one, name="var1", shape=[], dtype=dtypes.float64)
v2 = checkpointable_utils.add_variable(
load_root.dep_one, name="var2", shape=[], dtype=dtypes.float64)
status = checkpointable_utils.CheckpointableSaver(load_root).restore(
save_path).assert_consumed()
status.run_restore_ops()
self.assertEqual(32., self.evaluate(v1))
self.assertEqual(64., self.evaluate(v2))
@test_util.run_in_graph_and_eager_modes()
def testDependencyLoop(self):
# Note: this test creates garbage during eager execution because it
# purposefully creates a reference cycle.
first = checkpointable.Checkpointable()
second = checkpointable.Checkpointable()
first.second = second
second.first = first
first.v = checkpointable_utils.add_variable(
first, "v1", initializer=[3., 1., 4.])
second.v = checkpointable_utils.add_variable(
second, "v2", initializer=[1., 1., 2., 3.])
self.evaluate(checkpointable_utils.gather_initializers(first))
checkpoint_directory = self.get_temp_dir()
save_path = checkpointable_utils.CheckpointableSaver(first).save(
os.path.join(checkpoint_directory, "ckpt"))
# Test deferred loading
first_load = checkpointable.Checkpointable()
status = checkpointable_utils.CheckpointableSaver(
first_load).restore(save_path)
second_load = checkpointable.Checkpointable()
first_load.second = second_load
second_load.first = first_load
with self.assertRaises(AssertionError):
status.assert_consumed()
first_load.v = checkpointable_utils.add_variable(
first_load, "v1", shape=[3])
second_load.v = checkpointable_utils.add_variable(
second_load, "v2", shape=[4])
status.assert_consumed()
status.run_restore_ops()
self.assertAllEqual([3., 1., 4.], self.evaluate(first_load.v))
self.assertAllEqual([1., 1., 2., 3.], self.evaluate(second_load.v))
# Test loading when variables have already been created
self.evaluate(first_load.v.assign([2., 7., 1.]))
self.assertAllEqual([2., 7., 1.], self.evaluate(first_load.v))
self.evaluate(second_load.v.assign([2., 7., 1., 8.]))
self.assertAllEqual([2., 7., 1., 8.], self.evaluate(second_load.v))
status = checkpointable_utils.CheckpointableSaver(first_load).restore(
save_path).assert_consumed()
status.run_restore_ops()
self.assertAllEqual([3., 1., 4.], self.evaluate(first_load.v))
self.assertAllEqual([1., 1., 2., 3.], self.evaluate(second_load.v))
@test_util.run_in_graph_and_eager_modes()
def testRestoreOnAssign(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_graph = ops.Graph()
with save_graph.as_default(), self.test_session(save_graph):
first = checkpointable.Checkpointable()
first.var1 = variable_scope.get_variable(
name="outside_var", initializer=0.)
first.var2 = variable_scope.get_variable(
name="blah", initializer=0.)
self.evaluate(first.var1.assign(4.))
self.evaluate(first.var2.assign(8.))
save_path = checkpointable_utils.CheckpointableSaver(first).save(
checkpoint_prefix)
restore_graph = ops.Graph()
with restore_graph.as_default(), self.test_session(restore_graph):
second = checkpointable.Checkpointable()
second.var2 = variable_scope.get_variable(
name="blah", initializer=0.)
status = checkpointable_utils.CheckpointableSaver(
second).restore(save_path)
recreated_var1 = variable_scope.get_variable(
name="outside_var", initializer=0.)
status.run_restore_ops()
self.assertEqual(8., self.evaluate(second.var2))
self.evaluate(recreated_var1.assign(-2.))
self.assertEqual(-2., self.evaluate(recreated_var1))
second.var1 = recreated_var1
status.run_restore_ops()
self.assertEqual(4., self.evaluate(recreated_var1))
def testManySavesGraph(self):
"""Saves after the first should not modify the graph."""
with context.graph_mode():
graph = ops.Graph()
with graph.as_default(), self.test_session(graph):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
obj = checkpointable.Checkpointable()
obj.var = variable_scope.get_variable(name="v", initializer=0.)
obj.opt = adam.AdamOptimizer(0.1)
obj.opt.minimize(obj.var.read_value())
self.evaluate(checkpointable_utils.gather_initializers(obj))
saver = checkpointable_utils.CheckpointableSaver(obj)
saver.save(checkpoint_prefix)
before_ops = graph.get_operations()
saver.save(checkpoint_prefix)
self.assertEqual(before_ops, graph.get_operations())
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testCheckpointCleanup(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
obj = checkpointable.Checkpointable()
obj.var = variable_scope.get_variable(name="v", initializer=0.)
self.evaluate(checkpointable_utils.gather_initializers(obj))
saver = checkpointable_utils.Checkpoint(obj=obj)
for _ in range(10):
saver.save(checkpoint_prefix)
expected_filenames = ["checkpoint"]
for checkpoint_number in range(6, 11):
expected_filenames.append("ckpt-%d.index" % (checkpoint_number,))
expected_filenames.append(
"ckpt-%d.data-00000-of-00001" % (checkpoint_number,))
six.assertCountEqual(
self,
expected_filenames,
os.listdir(checkpoint_directory))
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testCheckpointCleanupChangingVarList(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
obj = checkpointable.Checkpointable()
obj.var = variable_scope.get_variable(name="v", initializer=0.)
self.evaluate(checkpointable_utils.gather_initializers(obj))
checkpoint = checkpointable_utils.Checkpoint(obj=obj)
looped_variables = []
for iteration in range(10):
new_variable = resource_variable_ops.ResourceVariable(iteration)
self.evaluate(new_variable.initializer)
setattr(checkpoint, "var_%d" % iteration, new_variable)
checkpoint.save(checkpoint_prefix)
looped_variables.append(new_variable)
expected_filenames = ["checkpoint"]
# We've copied the saver each time, but checkpoint management should still
# be consistent.
for checkpoint_number in range(6, 11):
expected_filenames.append("ckpt-%d.index" % (checkpoint_number,))
expected_filenames.append(
"ckpt-%d.data-00000-of-00001" % (checkpoint_number,))
six.assertCountEqual(
self,
expected_filenames,
os.listdir(checkpoint_directory))
for v in looped_variables:
self.evaluate(v.assign(314))
checkpoint.restore(checkpoint_prefix + "-6").run_restore_ops()
self.assertEqual(314, self.evaluate(checkpoint.var_9))
self.assertEqual(314, self.evaluate(checkpoint.var_8))
self.assertEqual(314, self.evaluate(checkpoint.var_6))
self.assertEqual(5, self.evaluate(checkpoint.var_5))
self.assertEqual(1, self.evaluate(checkpoint.var_1))
self.assertEqual(0, self.evaluate(checkpoint.var_0))
if context.executing_eagerly():
checkpoint.restore(checkpoint_prefix + "-10").run_restore_ops()
self.assertEqual(9, self.evaluate(checkpoint.var_9))
self.assertEqual(8, self.evaluate(checkpoint.var_8))
self.assertEqual(1, self.evaluate(checkpoint.var_1))
self.assertEqual(0, self.evaluate(checkpoint.var_0))
else:
# Restoring into modified graphs is an error while graph building.
with self.assertRaises(NotImplementedError):
checkpoint.restore(checkpoint_prefix + "-10").run_restore_ops()
def testManyRestoresGraph(self):
"""Restores after the first should not modify the graph."""
with context.graph_mode():
graph = ops.Graph()
with graph.as_default(), self.test_session(graph):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
obj = checkpointable.Checkpointable()
obj.var = variable_scope.get_variable(name="v", initializer=0.)
obj.opt = adam.AdamOptimizer(0.1)
obj.opt.minimize(obj.var.read_value())
self.evaluate(checkpointable_utils.gather_initializers(obj))
saver = checkpointable_utils.CheckpointableSaver(obj)
save_path = saver.save(checkpoint_prefix)
saver.restore(save_path)
before_ops = graph.get_operations()
saver.restore(save_path)
self.assertEqual(before_ops, graph.get_operations())
def testMultipleGraphsNonSlotVariables(self):
with context.graph_mode():
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
optimizer = adam.AdamOptimizer(0.001)
# Construct a model in one graph
first_graph = ops.Graph()
first_session = session_lib.Session(graph=first_graph)
with first_graph.as_default(), first_session.as_default():
first_variable = resource_variable_ops.ResourceVariable([1.])
first_root_checkpointable = checkpointable_utils.Checkpoint(
optimizer=optimizer, variable=first_variable)
train_op = optimizer.minimize(first_variable.read_value)
self.evaluate(checkpointable_utils.gather_initializers(
first_root_checkpointable))
self.evaluate(train_op)
self.evaluate(first_variable.assign([1.]))
self.evaluate(optimizer.get_slot(
var=first_variable, name="m").assign([2.]))
beta1_power, _ = optimizer._get_beta_accumulators()
self.evaluate(beta1_power.assign(3.))
# Save and load in a second graph
second_graph = ops.Graph()
with second_graph.as_default(), session_lib.Session(graph=second_graph):
second_variable = resource_variable_ops.ResourceVariable([1.])
second_root_checkpointable = checkpointable_utils.Checkpoint(
optimizer=optimizer, variable=second_variable)
train_op = optimizer.minimize(second_variable.read_value)
second_root_checkpointable.restore(None).initialize_or_restore()
self.evaluate(train_op)
self.evaluate(second_variable.assign([4.]))
self.evaluate(optimizer.get_slot(
var=second_variable, name="m").assign([5.]))
beta1_power, _ = optimizer._get_beta_accumulators()
self.evaluate(beta1_power.assign(6.))
save_path = second_root_checkpointable.save(checkpoint_prefix)
self.evaluate(second_variable.assign([7.]))
self.evaluate(optimizer.get_slot(
var=second_variable, name="m").assign([8.]))
beta1_power, _ = optimizer._get_beta_accumulators()
self.assertAllEqual(6., self.evaluate(beta1_power))
status = second_root_checkpointable.restore(save_path)
status.assert_consumed().run_restore_ops()
self.assertAllEqual([4.], self.evaluate(second_variable))
self.assertAllEqual([5.], self.evaluate(optimizer.get_slot(
var=second_variable, name="m")))
beta1_power, _ = optimizer._get_beta_accumulators()
self.assertAllEqual(6., self.evaluate(beta1_power))
# Check that the first graph is unmolested
with first_graph.as_default(), first_session.as_default():
self.assertAllEqual([1.], self.evaluate(first_variable))
self.assertAllEqual([2.], self.evaluate(optimizer.get_slot(
var=first_variable, name="m")))
beta1_power, _ = optimizer._get_beta_accumulators()
self.assertAllEqual(3., self.evaluate(beta1_power))
class TemplateTests(test.TestCase):
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def test_checkpointable_save_restore(self):
def _templated():
v = variable_scope.get_variable(
"v", shape=[1], initializer=init_ops.zeros_initializer())
v2 = variable_scope.get_variable(
"v2", shape=[1], initializer=init_ops.zeros_initializer())
return v, v + 1., v2
save_template = template.make_template("s1", _templated)
save_root = checkpointable_utils.Checkpoint(my_template=save_template)
v1_save, _, v2_save = save_template()
self.evaluate(v1_save.assign([12.]))
self.evaluate(v2_save.assign([14.]))
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_path = save_root.save(checkpoint_prefix)
load_template = template.make_template("s2", _templated)
load_root = checkpointable_utils.Checkpoint(my_template=load_template)
status = load_root.restore(save_path)
var, var_plus_one, var2 = load_template()
self.assertEqual(2, len(load_template._checkpoint_dependencies))
self.assertEqual("v", load_template._checkpoint_dependencies[0].name)
self.assertEqual("v2", load_template._checkpoint_dependencies[1].name)
status.assert_consumed().run_restore_ops()
self.assertAllEqual([12.], self.evaluate(var))
self.assertAllEqual([13.], self.evaluate(var_plus_one))
self.assertAllEqual([14.], self.evaluate(var2))
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def test_checkpointable_save_restore_nested(self):
def _inner_template():
v = variable_scope.get_variable(
"v", shape=[1], initializer=init_ops.zeros_initializer())
return v
def _outer_template():
first_inner = template.make_template("i1", _inner_template)
second_inner = template.make_template("i2", _inner_template)
v1 = first_inner()
v2 = second_inner()
v3 = second_inner()
return (first_inner, second_inner), (v1, v2, v3)
with variable_scope.variable_scope("ignored"):
save_template = template.make_template("s1", _outer_template)
save_root = checkpointable_utils.Checkpoint(my_template=save_template)
(inner_template_one, inner_template_two), _ = save_template()
self.evaluate(inner_template_one.variables[0].assign([20.]))
self.evaluate(inner_template_two.variables[0].assign([25.]))
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_path = save_root.save(checkpoint_prefix)
load_template = template.make_template("s2", _outer_template)
load_root = checkpointable_utils.Checkpoint(my_template=load_template)
status = load_root.restore(save_path)
(inner_template_one, inner_template_two), (v1, v2, v3) = load_template()
outer_template_dependencies = load_root.my_template._checkpoint_dependencies
self.assertEqual(2, len(outer_template_dependencies))
self.assertEqual("i1", outer_template_dependencies[0].name)
self.assertIs(inner_template_one, outer_template_dependencies[0].ref)
self.assertEqual("i2", outer_template_dependencies[1].name)
self.assertIs(inner_template_two, outer_template_dependencies[1].ref)
self.assertEqual(1, len(inner_template_one._checkpoint_dependencies))
self.assertEqual("v", inner_template_one._checkpoint_dependencies[0].name)
self.assertEqual(1, len(inner_template_two._checkpoint_dependencies))
self.assertEqual("v", inner_template_two._checkpoint_dependencies[0].name)
status.assert_consumed().run_restore_ops()
self.assertAllEqual([20.], self.evaluate(v1))
self.assertAllEqual([25.], self.evaluate(v2))
self.assertAllEqual([25.], self.evaluate(v3))
class CheckpointCompatibilityTests(test.TestCase):
def _initialized_model(self):
input_value = constant_op.constant([[3.]])
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
optimizer_step = training_util.get_or_create_global_step()
root_checkpointable = checkpointable_utils.Checkpoint(
optimizer=optimizer, model=model, optimizer_step=optimizer_step)
train_op = optimizer.minimize(
functools.partial(model, input_value),
global_step=optimizer_step)
self.evaluate(checkpointable_utils.gather_initializers(
root_checkpointable))
self.evaluate(train_op)
# A regular variable, a slot variable, and a non-slot Optimizer variable
# with known values to check when loading.
self.evaluate(model._named_dense.bias.assign([1.]))
self.evaluate(optimizer.get_slot(
var=model._named_dense.bias, name="m").assign([2.]))
beta1_power, _ = optimizer._get_beta_accumulators()
self.evaluate(beta1_power.assign(3.))
return root_checkpointable
def _set_sentinels(self, root_checkpointable):
self.evaluate(root_checkpointable.model._named_dense.bias.assign([101.]))
self.evaluate(
root_checkpointable.optimizer.get_slot(
var=root_checkpointable.model._named_dense.bias, name="m")
.assign([102.]))
beta1_power, _ = root_checkpointable.optimizer._get_beta_accumulators()
self.evaluate(beta1_power.assign(103.))
def _check_sentinels(self, root_checkpointable):
self.assertAllEqual(
[1.], self.evaluate(root_checkpointable.model._named_dense.bias))
self.assertAllEqual([2.], self.evaluate(
root_checkpointable.optimizer.get_slot(
var=root_checkpointable.model._named_dense.bias, name="m")))
beta1_power, _ = root_checkpointable.optimizer._get_beta_accumulators()
self.assertAllEqual(3., self.evaluate(beta1_power))
def _write_name_based_checkpoint(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
with context.graph_mode():
save_graph = ops.Graph()
with save_graph.as_default(), self.test_session(
graph=save_graph) as session:
root = self._initialized_model()
name_saver = core_saver.Saver()
return name_saver.save(
sess=session, save_path=checkpoint_prefix,
global_step=root.optimizer_step)
@test_util.run_in_graph_and_eager_modes()
def testLoadFromNameBasedSaver(self):
"""Save a name-based checkpoint, load it using the object-based API."""
with test_util.device(use_gpu=True):
save_path = self._write_name_based_checkpoint()
root = self._initialized_model()
self._set_sentinels(root)
with self.assertRaises(AssertionError):
self._check_sentinels(root)
object_saver = checkpointable_utils.CheckpointableSaver(root)
status = object_saver.restore(save_path)
with self.assertRaises(AssertionError):
status.assert_consumed()
status.run_restore_ops()
self._check_sentinels(root)
self._set_sentinels(root)
status.initialize_or_restore()
self._check_sentinels(root)
# TODO(allenl): Test for the core name-based saver loading object-based
# checkpoints once object-based checkpointing is in core.
def testSaveGraphLoadEager(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
with context.graph_mode():
save_graph = ops.Graph()
with save_graph.as_default(), self.test_session(
graph=save_graph) as session:
root = self._initialized_model()
object_saver = checkpointable_utils.CheckpointableSaver(root)
save_path = object_saver.save(
session=session, file_prefix=checkpoint_prefix)
with context.eager_mode():
root = self._initialized_model()
self._set_sentinels(root)
root.restore(save_path).assert_consumed()
self._check_sentinels(root)
def testSaveEagerLoadGraph(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
with context.eager_mode():
root = self._initialized_model()
object_saver = checkpointable_utils.CheckpointableSaver(root)
save_path = object_saver.save(file_prefix=checkpoint_prefix)
with context.graph_mode():
save_graph = ops.Graph()
with save_graph.as_default(), self.test_session(
graph=save_graph):
root = self._initialized_model()
self._set_sentinels(root)
root.restore(save_path).assert_consumed().run_restore_ops()
self._check_sentinels(root)
if __name__ == "__main__":
test.main()
| apache-2.0 | -1,253,107,922,935,844,900 | 43.988618 | 80 | 0.683533 | false | 3.760772 | true | false | false |
walletmona/coinunifier | script/unify_coins_simple.py | 1 | 3723 | #!/usr/bin/env python
import sys
from optparse import OptionParser
from bisect import bisect_left, bisect_right
from coinunifier.wallet.factory import load_wallet
##
## Process arguments
##
USAGE = ''''
% unify_coins_simple.py [OPTIONS] KIND THRESHOLD ADDRESS
KIND: kind of coin (e.g. bitcoin, litecoin, ...)
THRESHOLD: threshold amount
ADDRESS: address to send coins'''
DESCRIPTION = \
'Make a free transaction with sub-THRESHOLD coins and a least' \
' large-amount-and-high-priority coin. Then, send minimul amount of' \
' coins (== DUST_SOFT_LIMIT) to the ADDRESS by using the inputs and' \
' deposit the change. This script is useful to unify sub-threshold coins' \
' into one without fee.'
optparser = OptionParser(USAGE, description=DESCRIPTION)
optparser.add_option('', '--no-dry-run',
action='store_false', dest='dryrun', default=True,
help='Broadcast a transaction to nodes')
(opts, args) = optparser.parse_args()
if len(args) != 3:
optparser.error("Incorrect number of arguments.")
kind = args[0]
theta = int(float(args[1]) * 10**8)
address = args[2]
##
## Functions
##
def coins2inputs(coins):
res = []
for c in coins:
res.append({"txid": c['txid'], "vout": c['vout']})
return res
def cumsum(ls):
res = list(ls) # shallow copy
for i in range(1, len(res)): res[i] += res[i-1]
return res
# Unify sub-threshold coins to a large-amount-and-high-priority coin
#
# O(n log n)
def unify_coins_simple(wallet, coins):
n = len(coins)
remain = wallet.free_tx_size-1 - wallet.base_size - 2*wallet.output_size
maxin = min(n, int(remain / wallet.input_size))
coins.sort(key=lambda x: x['amount'])
amounts = [c['amount'] for c in coins]
prios = [c['prio'] for c in coins]
camounts = cumsum(amounts)
cprios = cumsum(prios)
hiprios = list(prios)
for i in range(len(prios)-1, 0, -1):
hiprios[i-1] = max(hiprios[i-1], hiprios[i])
num = min(bisect_right(amounts, theta), maxin-1)
if num == 0:
print('No sub-threshold coins found')
return
# Determine included sub-threshold coins by binary search in (left, right]
left = 0
right = num
while left < right:
# use coins in range [0, m) and a large coin
m = int((left + right + 1) / 2)
size = wallet.base_size + (m+1)*wallet.input_size + 2*wallet.output_size
index = bisect_left(amounts, 2*wallet.dust_soft_limit - camounts[m-1],
lo=m)
if cprios[m-1]+hiprios[index] < wallet.prio_threshold*size:
# decrease size
right = m-1
else:
# increase size
left = m
num = left
if num == 0:
print('No large coin found')
return
size = wallet.base_size + (num+1)*wallet.input_size + 2*wallet.output_size
# Find a large coin
index = bisect_left(amounts, 2*wallet.dust_soft_limit - camounts[num-1],
lo=num)
while cprios[num-1]+prios[index] < wallet.prio_threshold*size:
index += 1
res = coins[0:num]
res.append(coins[index])
inputs = coins2inputs(res)
if opts.dryrun:
print('Inputs (confirmations amount)')
for c in res:
print(' %6d %.8f' % (c['confirmations'],
float(c['amount']) / 10**8))
wallet.show_send_info(inputs, address, wallet.dust_soft_limit)
print('Add --no-dry-run option to proceed')
else:
print(wallet.send(inputs, address, wallet.dust_soft_limit))
##
## Main
##
wallet = load_wallet(kind)
wallet.connect()
unify_coins_simple(wallet, wallet.unspent_coins())
| gpl-2.0 | 6,460,143,289,946,502,000 | 26.375 | 80 | 0.607037 | false | 3.223377 | false | false | false |
cloudify-cosmo/cloudify-plugins-common | cloudify/workflows/tasks.py | 1 | 29721 | ########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import sys
import time
import uuid
import Queue
from cloudify import utils
from cloudify import exceptions
from cloudify.workflows import api
from cloudify.celery.app import get_celery_app
from cloudify.manager import get_node_instance
from cloudify.constants import MGMTWORKER_QUEUE
INFINITE_TOTAL_RETRIES = -1
DEFAULT_TOTAL_RETRIES = INFINITE_TOTAL_RETRIES
DEFAULT_RETRY_INTERVAL = 30
DEFAULT_SUBGRAPH_TOTAL_RETRIES = 0
DEFAULT_SEND_TASK_EVENTS = True
TASK_PENDING = 'pending'
TASK_SENDING = 'sending'
TASK_SENT = 'sent'
TASK_STARTED = 'started'
TASK_RESCHEDULED = 'rescheduled'
TASK_SUCCEEDED = 'succeeded'
TASK_FAILED = 'failed'
TERMINATED_STATES = [TASK_RESCHEDULED, TASK_SUCCEEDED, TASK_FAILED]
DISPATCH_TASK = 'cloudify.dispatch.dispatch'
INSPECT_TIMEOUT = 30
def retry_failure_handler(task):
"""Basic on_success/on_failure handler that always returns retry"""
return HandlerResult.retry()
class WorkflowTask(object):
"""A base class for workflow tasks"""
def __init__(self,
workflow_context,
task_id=None,
info=None,
on_success=None,
on_failure=None,
total_retries=DEFAULT_TOTAL_RETRIES,
retry_interval=DEFAULT_RETRY_INTERVAL,
send_task_events=DEFAULT_SEND_TASK_EVENTS):
"""
:param task_id: The id of this task (generated if none is provided)
:param info: A short description of this task (for logging)
:param on_success: A handler called when the task's execution
terminates successfully.
Expected to return one of
[HandlerResult.retry(), HandlerResult.cont()]
to indicate whether this task should be re-executed.
:param on_failure: A handler called when the task's execution
fails.
Expected to return one of
[HandlerResult.retry(), HandlerResult.ignore(),
HandlerResult.fail()]
to indicate whether this task should be re-executed,
cause the engine to terminate workflow execution
immediately or simply ignore this task failure and
move on.
:param total_retries: Maximum retry attempt for this task, in case
the handlers return a retry attempt.
:param retry_interval: Number of seconds to wait between retries
:param workflow_context: the CloudifyWorkflowContext instance
"""
self.id = task_id or str(uuid.uuid4())
self._state = TASK_PENDING
self.async_result = None
self.on_success = on_success
self.on_failure = on_failure
self.info = info
self.error = None
self.total_retries = total_retries
self.retry_interval = retry_interval
self.terminated = Queue.Queue(maxsize=1)
self.is_terminated = False
self.workflow_context = workflow_context
self.send_task_events = send_task_events
self.containing_subgraph = None
self.current_retries = 0
# timestamp for which the task should not be executed
# by the task graph before reached, overridden by the task
# graph during retries
self.execute_after = time.time()
def dump(self):
return {
'id': self.id,
'state': self.get_state(),
'info': self.info,
'error': self.error,
'current_retries': self.current_retries,
'cloudify_context': self.cloudify_context
}
def is_remote(self):
"""
:return: Is this a remote task
"""
return not self.is_local()
def is_local(self):
"""
:return: Is this a local task
"""
raise NotImplementedError('Implemented by subclasses')
def is_nop(self):
"""
:return: Is this a NOP task
"""
return False
def get_state(self):
"""
Get the task state
:return: The task state [pending, sending, sent, started,
rescheduled, succeeded, failed]
"""
return self._state
def set_state(self, state):
"""
Set the task state
:param state: The state to set [pending, sending, sent, started,
rescheduled, succeeded, failed]
"""
if state not in [TASK_PENDING, TASK_SENDING, TASK_SENT, TASK_STARTED,
TASK_RESCHEDULED, TASK_SUCCEEDED, TASK_FAILED]:
raise RuntimeError('Illegal state set on task: {0} '
'[task={1}]'.format(state, str(self)))
self._state = state
if state in TERMINATED_STATES:
self.is_terminated = True
self.terminated.put_nowait(True)
def wait_for_terminated(self, timeout=None):
if self.is_terminated:
return
self.terminated.get(timeout=timeout)
def handle_task_terminated(self):
if self.get_state() in (TASK_FAILED, TASK_RESCHEDULED):
handler_result = self._handle_task_not_succeeded()
else:
handler_result = self._handle_task_succeeded()
if handler_result.action == HandlerResult.HANDLER_RETRY:
if any([self.total_retries == INFINITE_TOTAL_RETRIES,
self.current_retries < self.total_retries,
handler_result.ignore_total_retries]):
if handler_result.retry_after is None:
handler_result.retry_after = self.retry_interval
if handler_result.retried_task is None:
new_task = self.duplicate_for_retry(
time.time() + handler_result.retry_after)
handler_result.retried_task = new_task
else:
handler_result.action = HandlerResult.HANDLER_FAIL
if self.containing_subgraph:
subgraph = self.containing_subgraph
retried_task = None
if handler_result.action == HandlerResult.HANDLER_FAIL:
handler_result.action = HandlerResult.HANDLER_IGNORE
# It is possible that two concurrent tasks failed.
# we will only consider the first one handled
if not subgraph.failed_task:
subgraph.failed_task = self
subgraph.set_state(TASK_FAILED)
elif handler_result.action == HandlerResult.HANDLER_RETRY:
retried_task = handler_result.retried_task
subgraph.task_terminated(task=self, new_task=retried_task)
return handler_result
def _handle_task_succeeded(self):
"""Call handler for task success"""
if self.on_success:
return self.on_success(self)
else:
return HandlerResult.cont()
def _handle_task_not_succeeded(self):
"""
Call handler for task which hasn't ended in 'succeeded' state
(i.e. has either failed or been rescheduled)
"""
try:
exception = self.async_result.result
except Exception as e:
exception = exceptions.NonRecoverableError(
'Could not de-serialize '
'exception of task {0} --> {1}: {2}'
.format(self.name,
type(e).__name__,
str(e)))
if isinstance(exception, exceptions.OperationRetry):
# operation explicitly requested a retry, so we ignore
# the handler set on the task.
handler_result = HandlerResult.retry()
elif self.on_failure:
handler_result = self.on_failure(self)
else:
handler_result = HandlerResult.retry()
if handler_result.action == HandlerResult.HANDLER_RETRY:
if isinstance(exception, exceptions.NonRecoverableError):
handler_result = HandlerResult.fail()
elif isinstance(exception, exceptions.RecoverableError):
handler_result.retry_after = exception.retry_after
if not self.is_subgraph:
causes = []
if isinstance(exception, (exceptions.RecoverableError,
exceptions.NonRecoverableError)):
causes = exception.causes or []
if isinstance(self, LocalWorkflowTask):
tb = self.async_result._holder.error[1]
causes.append(utils.exception_to_error_cause(exception, tb))
self.workflow_context.internal.send_task_event(
state=self.get_state(),
task=self,
event={'exception': exception, 'causes': causes})
return handler_result
def __str__(self):
suffix = self.info if self.info is not None else ''
return '{0}({1})'.format(self.name, suffix)
def duplicate_for_retry(self, execute_after):
"""
:return: A new instance of this task with a new task id
"""
dup = self._duplicate()
dup.execute_after = execute_after
dup.current_retries = self.current_retries + 1
if dup.cloudify_context and 'operation' in dup.cloudify_context:
op_ctx = dup.cloudify_context['operation']
op_ctx['retry_number'] = dup.current_retries
return dup
def _duplicate(self):
raise NotImplementedError('Implemented by subclasses')
@property
def cloudify_context(self):
raise NotImplementedError('Implemented by subclasses')
@property
def name(self):
"""
:return: The task name
"""
raise NotImplementedError('Implemented by subclasses')
@property
def is_subgraph(self):
return False
class RemoteWorkflowTask(WorkflowTask):
"""A WorkflowTask wrapping a celery based task"""
# cache for registered tasks queries to celery workers
cache = {}
def __init__(self,
kwargs,
cloudify_context,
workflow_context,
task_queue=None,
task_target=None,
task_id=None,
info=None,
on_success=None,
on_failure=retry_failure_handler,
total_retries=DEFAULT_TOTAL_RETRIES,
retry_interval=DEFAULT_RETRY_INTERVAL,
send_task_events=DEFAULT_SEND_TASK_EVENTS):
"""
:param kwargs: The keyword argument this task will be invoked with
:param cloudify_context: the cloudify context dict
:param task_queue: the cloudify context dict
:param task_target: the cloudify context dict
:param task_id: The id of this task (generated if none is provided)
:param info: A short description of this task (for logging)
:param on_success: A handler called when the task's execution
terminates successfully.
Expected to return one of
[HandlerResult.retry(), HandlerResult.cont()]
to indicate whether this task should be re-executed.
:param on_failure: A handler called when the task's execution
fails.
Expected to return one of
[HandlerResult.retry(), HandlerResult.ignore(),
HandlerResult.fail()]
to indicate whether this task should be re-executed,
cause the engine to terminate workflow execution
immediately or simply ignore this task failure and
move on.
:param total_retries: Maximum retry attempt for this task, in case
the handlers return a retry attempt.
:param retry_interval: Number of seconds to wait between retries
:param workflow_context: the CloudifyWorkflowContext instance
"""
super(RemoteWorkflowTask, self).__init__(
workflow_context,
task_id,
info=info,
on_success=on_success,
on_failure=on_failure,
total_retries=total_retries,
retry_interval=retry_interval,
send_task_events=send_task_events)
self._task_target = task_target
self._task_queue = task_queue
self._kwargs = kwargs
self._cloudify_context = cloudify_context
self._cloudify_agent = None
def apply_async(self):
"""
Call the underlying celery tasks apply_async. Verify the worker
is alive and send an event before doing so.
:return: a RemoteWorkflowTaskResult instance wrapping the
celery async result
"""
try:
self._set_queue_kwargs()
self._verify_worker_alive()
task = self.workflow_context.internal.handler.get_task(
self, queue=self._task_queue, target=self._task_target)
self.workflow_context.internal.send_task_event(TASK_SENDING, self)
async_result = self.workflow_context.internal.handler.send_task(
self, task)
self.async_result = RemoteWorkflowTaskResult(self, async_result)
self.set_state(TASK_SENT)
except (exceptions.NonRecoverableError,
exceptions.RecoverableError) as e:
self.set_state(TASK_FAILED)
self.async_result = RemoteWorkflowErrorTaskResult(self, e)
return self.async_result
def is_local(self):
return False
def _duplicate(self):
dup = RemoteWorkflowTask(kwargs=self._kwargs,
task_queue=self.queue,
task_target=self.target,
cloudify_context=self.cloudify_context,
workflow_context=self.workflow_context,
task_id=None, # we want a new task id
info=self.info,
on_success=self.on_success,
on_failure=self.on_failure,
total_retries=self.total_retries,
retry_interval=self.retry_interval,
send_task_events=self.send_task_events)
dup.cloudify_context['task_id'] = dup.id
return dup
@property
def name(self):
"""The task name"""
return self.cloudify_context['task_name']
@property
def cloudify_context(self):
return self._cloudify_context
@property
def target(self):
"""The task target (worker name)"""
return self._task_target
@property
def queue(self):
"""The task queue"""
return self._task_queue
@property
def kwargs(self):
"""kwargs to pass when invoking the task"""
return self._kwargs
def _verify_worker_alive(self):
verify_worker_alive(self.name,
self.target,
self._get_registered)
def _get_registered(self):
tenant = self.workflow_context.tenant
with get_celery_app(tenant=tenant, target=self.target) as app:
worker_name = 'celery@{0}'.format(self.target)
inspect = app.control.inspect(destination=[worker_name],
timeout=INSPECT_TIMEOUT)
registered = inspect.registered()
if registered is None or worker_name not in registered:
return None
return set(registered[worker_name])
def _set_queue_kwargs(self):
if self._task_queue is None:
self._task_queue = self._derive('queue')
if self._task_target is None:
self._task_target = self._derive('name')
self.kwargs['__cloudify_context']['task_queue'] = self._task_queue
self.kwargs['__cloudify_context']['task_target'] = self._task_target
def _derive(self, property_name):
executor = self.cloudify_context['executor']
host_id = self.cloudify_context['host_id']
if executor == 'host_agent':
if self._cloudify_agent is None:
host_node_instance = get_node_instance(host_id)
cloudify_agent = host_node_instance.runtime_properties.get(
'cloudify_agent', {})
if property_name not in cloudify_agent:
raise exceptions.NonRecoverableError(
'Missing cloudify_agent.{0} runtime information. '
'This most likely means that the Compute node was '
'never started successfully'.format(property_name))
self._cloudify_agent = cloudify_agent
return self._cloudify_agent[property_name]
else:
return MGMTWORKER_QUEUE
class LocalWorkflowTask(WorkflowTask):
"""A WorkflowTask wrapping a local callable"""
def __init__(self,
local_task,
workflow_context,
node=None,
info=None,
on_success=None,
on_failure=retry_failure_handler,
total_retries=DEFAULT_TOTAL_RETRIES,
retry_interval=DEFAULT_RETRY_INTERVAL,
send_task_events=DEFAULT_SEND_TASK_EVENTS,
kwargs=None,
task_id=None,
name=None):
"""
:param local_task: A callable
:param workflow_context: the CloudifyWorkflowContext instance
:param node: The CloudifyWorkflowNode instance (if in node context)
:param info: A short description of this task (for logging)
:param on_success: A handler called when the task's execution
terminates successfully.
Expected to return one of
[HandlerResult.retry(), HandlerResult.cont()]
to indicate whether this task should be re-executed.
:param on_failure: A handler called when the task's execution
fails.
Expected to return one of
[HandlerResult.retry(), HandlerResult.ignore(),
HandlerResult.fail()]
to indicate whether this task should be re-executed,
cause the engine to terminate workflow execution
immediately or simply ignore this task failure and
move on.
:param total_retries: Maximum retry attempt for this task, in case
the handlers return a retry attempt.
:param retry_interval: Number of seconds to wait between retries
:param kwargs: Local task keyword arguments
:param name: optional parameter (default: local_task.__name__)
"""
super(LocalWorkflowTask, self).__init__(
info=info,
on_success=on_success,
on_failure=on_failure,
total_retries=total_retries,
retry_interval=retry_interval,
task_id=task_id,
workflow_context=workflow_context,
send_task_events=send_task_events)
self.local_task = local_task
self.node = node
self.kwargs = kwargs or {}
self._name = name or local_task.__name__
def dump(self):
super_dump = super(LocalWorkflowTask, self).dump()
super_dump.update({
'name': self._name
})
return super_dump
def apply_async(self):
"""
Execute the task in the local task thread pool
:return: A wrapper for the task result
"""
def local_task_wrapper():
try:
self.workflow_context.internal.send_task_event(TASK_STARTED,
self)
result = self.local_task(**self.kwargs)
self.workflow_context.internal.send_task_event(
TASK_SUCCEEDED, self, event={'result': str(result)})
self.async_result._holder.result = result
self.set_state(TASK_SUCCEEDED)
except BaseException as e:
new_task_state = TASK_RESCHEDULED if isinstance(
e, exceptions.OperationRetry) else TASK_FAILED
exc_type, exception, tb = sys.exc_info()
self.async_result._holder.error = (exception, tb)
self.set_state(new_task_state)
self.async_result = LocalWorkflowTaskResult(self)
self.workflow_context.internal.send_task_event(TASK_SENDING, self)
self.set_state(TASK_SENT)
self.workflow_context.internal.add_local_task(local_task_wrapper)
return self.async_result
def is_local(self):
return True
def _duplicate(self):
dup = LocalWorkflowTask(local_task=self.local_task,
workflow_context=self.workflow_context,
node=self.node,
info=self.info,
on_success=self.on_success,
on_failure=self.on_failure,
total_retries=self.total_retries,
retry_interval=self.retry_interval,
send_task_events=self.send_task_events,
kwargs=self.kwargs,
name=self.name)
return dup
@property
def name(self):
"""The task name"""
return self._name
@property
def cloudify_context(self):
return self.kwargs.get('__cloudify_context')
# NOP tasks class
class NOPLocalWorkflowTask(LocalWorkflowTask):
def __init__(self, workflow_context):
super(NOPLocalWorkflowTask, self).__init__(lambda: None,
workflow_context)
@property
def name(self):
"""The task name"""
return 'NOP'
def apply_async(self):
self.set_state(TASK_SUCCEEDED)
return LocalWorkflowTaskResult(self)
def is_nop(self):
return True
# Dry run tasks class
class DryRunLocalWorkflowTask(LocalWorkflowTask):
def apply_async(self):
self.workflow_context.internal.send_task_event(TASK_SENDING, self)
self.workflow_context.internal.send_task_event(TASK_STARTED, self)
self.workflow_context.internal.send_task_event(
TASK_SUCCEEDED,
self,
event={'result': 'dry run'}
)
self.set_state(TASK_SUCCEEDED)
return LocalWorkflowTaskResult(self)
def is_nop(self):
return True
class WorkflowTaskResult(object):
"""A base wrapper for workflow task results"""
def __init__(self, task):
self.task = task
def _process(self, retry_on_failure):
if self.task.workflow_context.internal.graph_mode:
return self._get()
task_graph = self.task.workflow_context.internal.task_graph
while True:
self._wait_for_task_terminated()
handler_result = self.task.handle_task_terminated()
task_graph.remove_task(self.task)
try:
result = self._get()
if handler_result.action != HandlerResult.HANDLER_RETRY:
return result
except Exception:
if (not retry_on_failure or
handler_result.action == HandlerResult.HANDLER_FAIL):
raise
self._sleep(handler_result.retry_after)
self.task = handler_result.retried_task
task_graph.add_task(self.task)
self._check_execution_cancelled()
self.task.apply_async()
self._refresh_state()
@staticmethod
def _check_execution_cancelled():
if api.has_cancel_request():
raise api.ExecutionCancelled()
def _wait_for_task_terminated(self):
while True:
self._check_execution_cancelled()
try:
self.task.wait_for_terminated(timeout=1)
break
except Queue.Empty:
continue
def _sleep(self, seconds):
while seconds > 0:
self._check_execution_cancelled()
sleep_time = 1 if seconds > 1 else seconds
time.sleep(sleep_time)
seconds -= sleep_time
def get(self, retry_on_failure=True):
"""
Get the task result.
Will block until the task execution ends.
:return: The task result
"""
return self._process(retry_on_failure)
def _get(self):
raise NotImplementedError('Implemented by subclasses')
def _refresh_state(self):
raise NotImplementedError('Implemented by subclasses')
class RemoteWorkflowErrorTaskResult(WorkflowTaskResult):
def __init__(self, task, exception):
super(RemoteWorkflowErrorTaskResult, self).__init__(task)
self.exception = exception
def _get(self):
raise self.exception
@property
def result(self):
return self.exception
class RemoteWorkflowTaskResult(WorkflowTaskResult):
"""A wrapper for celery's AsyncResult"""
def __init__(self, task, async_result):
super(RemoteWorkflowTaskResult, self).__init__(task)
self.async_result = async_result
def _get(self):
return self.async_result.get()
def _refresh_state(self):
self.async_result = self.task.async_result.async_result
@property
def result(self):
return self.async_result.result
class LocalWorkflowTaskResult(WorkflowTaskResult):
"""A wrapper for local workflow task results"""
class ResultHolder(object):
def __init__(self, result=None, error=None):
self.result = result
self.error = error
def __init__(self, task):
"""
:param task: The LocalWorkflowTask instance
"""
super(LocalWorkflowTaskResult, self).__init__(task)
self._holder = self.ResultHolder()
def _get(self):
if self._holder.error is not None:
exception, traceback = self._holder.error
raise exception, None, traceback
return self._holder.result
def _refresh_state(self):
self._holder = self.task.async_result._holder
@property
def result(self):
if self._holder.error:
return self._holder.error[0]
else:
return self._holder.result
class StubAsyncResult(object):
"""Stub async result that always returns None"""
result = None
class HandlerResult(object):
HANDLER_RETRY = 'handler_retry'
HANDLER_FAIL = 'handler_fail'
HANDLER_IGNORE = 'handler_ignore'
HANDLER_CONTINUE = 'handler_continue'
def __init__(self,
action,
ignore_total_retries=False,
retry_after=None):
self.action = action
self.ignore_total_retries = ignore_total_retries
self.retry_after = retry_after
# this field is filled by handle_terminated_task() below after
# duplicating the task and updating the relevant task fields
# or by a subgraph on_XXX handler
self.retried_task = None
@classmethod
def retry(cls, ignore_total_retries=False, retry_after=None):
return HandlerResult(cls.HANDLER_RETRY,
ignore_total_retries=ignore_total_retries,
retry_after=retry_after)
@classmethod
def fail(cls):
return HandlerResult(cls.HANDLER_FAIL)
@classmethod
def cont(cls):
return HandlerResult(cls.HANDLER_CONTINUE)
@classmethod
def ignore(cls):
return HandlerResult(cls.HANDLER_IGNORE)
def verify_worker_alive(name, target, get_registered):
cache = RemoteWorkflowTask.cache
registered = cache.get(target)
if not registered:
registered = get_registered()
cache[target] = registered
if registered is None:
raise exceptions.RecoverableError(
'Timed out querying worker celery@{0} for its registered '
'tasks. [timeout={1} seconds]'.format(target, INSPECT_TIMEOUT))
if DISPATCH_TASK not in registered:
raise exceptions.NonRecoverableError(
'Missing {0} task in worker {1} \n'
'Registered tasks are: {2}. (This probably means the agent '
'configuration is invalid) [{3}]'.format(
DISPATCH_TASK, target, registered, name))
| apache-2.0 | -3,218,372,813,292,323,300 | 35.467485 | 79 | 0.573635 | false | 4.565438 | false | false | false |
p-ho/emailIdx | emailidx/contentfilter/SMimeDecryption.py | 1 | 3901 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# emailIdx - Synchronizes emails from IMAP to Elasticsearch
# Copyright (C) 2015 Paul Hofmann
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
#########################################################################################
# Imports #
#########################################################################################
from M2Crypto import SMIME, BIO
from emailidx import SslKeystore, EmailSerializer
#########################################################################################
# Actual Decryption #
#########################################################################################
def actual_decrypt_message(message, content_type=None, content_transfer_encoding=None):
msg_str = ""
if content_type is not None:
msg_str += "Content-Type: %s\r\n" % content_type
if content_transfer_encoding is not None:
msg_str += "Content-Transfer-Encoding: %s\r\n" % content_transfer_encoding
msg_str += "\r\n%s\r\n" % message
msg_buf = BIO.MemoryBuffer(msg_str)
p7 = SMIME.smime_load_pkcs7_bio(msg_buf)[0]
decrypted_data = None
s = SMIME.SMIME()
for key_pair in SslKeystore.keystore:
s.pkey = key_pair['key']
s.x509 = key_pair['cert']
try:
decrypted_data = s.decrypt(p7)
print "[S/MIME] decrypt with %s : SUCCESS" % key_pair['email']
break
except SMIME.PKCS7_Error:
print "[S/MIME] decrypt with %s : FAILED" % key_pair['email']
continue
return EmailSerializer.serialize_email_raw_message(decrypted_data) if decrypted_data is not None else None
#########################################################################################
# Exposed Functions #
#########################################################################################
def try_decrypt_smime(message_part, crypto_method):
message_part['crypto_method'] = crypto_method
msg_headers = message_part['headers']
content_type = msg_headers['Content-Type'][0] \
if ('Content-Type' in msg_headers) and (len(msg_headers['Content-Type']) > 0) \
else None
content_transfer_encoding = msg_headers['Content-Transfer-Encoding'][0] \
if ('Content-Transfer-Encoding' in msg_headers) and (len(msg_headers['Content-Transfer-Encoding']) > 0) \
else None
msg_dec = actual_decrypt_message(message_part['content'], content_type, content_transfer_encoding)
message_part['message_decrypted'] = msg_dec
message_part['crypto_success'] = msg_dec is not None
def is_smime(message_part, crypto_method):
content_type = message_part['content_type']
if 'smime-type' not in content_type:
return False
return (content_type['_type'] == 'application/pkcs7-mime') and (content_type['smime-type'] == 'enveloped-data')
def __get_content_filter_functions__(settings):
return (is_smime, try_decrypt_smime)
| gpl-3.0 | 1,187,557,389,535,473,200 | 43.83908 | 125 | 0.524737 | false | 4.339266 | false | false | false |
itsnotmyfault1/kimcopter2 | crazyflie-pc-client/lib/cfclient/ui/dialogs/about.py | 3 | 2518 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2011-2013 Bitcraze AB
#
# Crazyflie Nano Quadcopter Client
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
The about dialog.
"""
__author__ = 'Bitcraze AB'
__all__ = ['AboutDialog']
import sys
from PyQt4 import Qt, QtCore, QtGui, uic
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4.Qt import *
import cfclient
import cflib.crtp
(about_widget_class,
about_widget_base_class) = (uic.loadUiType(sys.path[0] +
'/cfclient/ui/dialogs/about.ui'))
debuginfo = """
<b>Cfclient version:</b> {version}<br>
<b>System:</b> {system}<br>
<br>
<b>Interface status</b><br>
{interface_status}
"""
class AboutDialog(QtGui.QWidget, about_widget_class):
def __init__(self, helper, *args):
super(AboutDialog, self).__init__(*args)
self.setupUi(self)
self._close_button.clicked.connect(self.close)
self._name_label.setText(
self._name_label.text().replace('#version#',
cfclient.VERSION))
def showEvent(self, ev):
status_text = ""
interface_status = cflib.crtp.get_interfaces_status()
for s in interface_status.keys():
status_text += "<b>{}</b>: {}<br>\n".format(s, interface_status[s])
self._debug_out.setHtml(debuginfo.format(version=cfclient.VERSION,
system=sys.platform,
interface_status=status_text))
| gpl-2.0 | 8,543,637,364,166,004,000 | 32.131579 | 82 | 0.560763 | false | 3.416554 | false | false | false |
airilyan/nescient | nescient/crypto/galois.py | 2 | 10839 | # Nescient: A Python program for packing/unpacking encrypted, salted, and authenticated file containers.
# Copyright (C) 2018 Ariel Antonitis. Licensed under the MIT license.
#
# nescient/crypto/galois.py
""" Classes for creating and interacting with Galois fields (otherwise known as finite fields)
A galois field of order q exists iff q is a prime power.
Elements in fields are represented as integers in the range 0...q-1, or alternatively, polynomials of the form:
x_0*p^0+x_1*p^1+...+x_(n-1)*p^(n-1)
"""
# TODO: Make better class docstrings
import math
class GaloisField:
""" Defines a finite field of order q=p**n, with optional generator g and irreducible polynomial r
Elements are consider to be normal integers in the range 0...q-1 (inclusive)
Can perform the standard operations (add, mult, exponentiation, inversion), optionally using lookup tables
"""
def __init__(self, p, n=1, r=None, g=None, maxMem=2 ** 30):
if p < 2 or n < 1:
raise ValueError('Unable to instantiate a finite field with these arguments')
self.p, self.n = p, n
self.q = self.p ** self.n # Order of the field
self.f = range(self.q) # Iterator for elements in the field
self.g = g
self.r = p if n == 1 else r # Technically reduce by p if this is a prime field
if r is None and n > 1: # If an r was not provided and is required (n > 1), find one
self.r = self.findR()
self.expTable = {}
self.logTable = {}
self.haveTables = False
# If the memory needed to make lookup tables is less than 1 GB (default), calculate them now
if self.q * math.log(self.q, 2) / 8 <= maxMem:
self.makeLookupTables()
self.haveTables = True
# Calculate the unique set of prime factors of n
@staticmethod
def prime_factors(n):
i = 2
factors = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(i)
if n > 1:
factors.add(n)
return factors
# Euclidean algorithm for gcd
@staticmethod
def gcd(a, b):
while b > 0:
a, b = b, a % b
return a
# Euler's totient function
@staticmethod
def phi(a):
b = a - 1
c = 0
while b > 0:
if not GaloisField.gcd(a, b) - 1:
c += 1
b -= 1
return c
# Given an element x, returns an n+1-element vector representing x as polynomials in GF(p)
def intToPoly(self, x):
return [(x // self.p ** i) % self.p for i in range(self.n + 1)]
# Given a vector of polynomials in GF(p), return the corresponding element (as an integer)
def polyToInt(self, poly):
return sum([self.p ** i * poly[i] for i in range(len(poly))])
# Generates exp & log lookup tables, for increased multiplication speed
def makeLookupTables(self):
if self.g is None or self.generate(self.g) is False: # If a generator was not provided or was invalid, find one
if self.n == 1: # If this is a prime field we can find a generator faster than brute force
pfs = GaloisField.prime_factors(self.q - 1) # Calculate the prime factors of phi(p), equal to p-1
for g in self.f:
s = set()
isGen = True
for pf in pfs:
y = self.pow(g, (self.q - 1) / pf)
if y in s or y == 1:
isGen = False
break
s.add(y)
if isGen:
self.generate(g, False) # g is known to be valid, so no need to double check
self.g = g
return
else: # Otherwise use the brute force method
for g in self.f:
if self.generate(g): # When this is true, tables will be generated as as part of the method call
self.g = g
return
else:
return
raise RuntimeError('Unable to find a generator for the specified field')
# Returns whether g is a generator for the field, also updates exp and log tables accordingly
def generate(self, g, check=True):
if check: # If using this method to check whether the generator is valid, use dictionaries
self.expTable = {}
self.logTable = {}
else: # Otherwise assume g is valid and use lists to optimize for speed
self.expTable = [0] * self.q
self.logTable = [0] * self.q
y = 1
for x in self.f:
if check and y in self.logTable and x != self.q - 1:
return False
self.expTable[x] = y
self.logTable[y] = x
y = self.mult(g, y)
if check and len(self.logTable) != self.q - 1:
return False
self.logTable[1] = 0
return True
# Attempts to find the smallest degree n irreducible polynomial over the field
def findR(self):
for r in range(self.q + self.p, self.q * self.p): # Search only for degree n polynomials
if self.isIrreducible(r):
return r
raise RuntimeError('Unable to find an irreducible polynomial for the specified field')
# Checks whether a given polynomial is irreducible
def isIrreducible(self, r):
for i in range(self.p, self.q):
if self.modP(r, i) == 0:
return False
return True
# Multiplies two elements, without reducing if the product is outside of the field
def multPoly(self, a, b):
if self.n == 1: # Multiplication in a prime field without reduction
return a * b
if self.p == 2: # We can use bitwise operations when p==2
# Multiply each polynomial via bit shifts and xors
c = 0
for i in range(self.n):
if b & (1 << i):
c ^= a * 1 << i
return c
else: # Otherwise operate on polynomial representations of integers
p_a = self.intToPoly(a)
p_b = self.intToPoly(b)
p_c = [0] * 2 * self.n # Need enough space for the x**n * x**n term
# Multiply polynomials mod P (naively)
for i in range(self.n):
for j in range(self.n):
p_c[i + j] += p_a[i] * p_b[j]
p_c[i + j] %= self.p
return self.polyToInt(p_c)
# Calculates the remainder a mod b, performing subtraction of polynomials mod p
# Optionally, continues until the remainder is below some bound
def modP(self, a, b, bound=None):
if self.n == 1: # Mod in prime fields is easy!
return a % b
if bound is None:
bound = b
if self.p == 2: # Mod in 2**n fields is also easy (bitwise)
while a >= bound:
aBits = int(math.log2(a))
bBits = int(math.log2(b))
a ^= b << (aBits - bBits)
return a
else: # Otherwise use the slower polynomial method
p_a = self.intToPoly(a)
p_b = self.intToPoly(b)
while a >= bound:
aPits = int(math.log(a, self.p))
bPits = int(math.log(b, self.p))
for i in range(bPits + 1):
p_a[aPits - bPits + i] -= p_b[i]
p_a[aPits - bPits + i] %= self.p
a = self.polyToInt(p_a)
return a
# Adds two elements in the field
def add(self, a, b):
if self.n == 1: # Addition in a prime field is just modulo p
return (a + b) % self.p
if self.p == 2: # Special case, when p=2, addition is bitwise XOR
return (a ^ b) & (self.q - 1)
else: # Otherwise we need to break integers into polynomial representations and add modulo p
a_p = self.intToPoly(a)
b_p = self.intToPoly(b)
c_p = [(a_p[i] + b_p[i]) % self.p for i in range(self.n)]
return self.polyToInt(c_p)
# Multiplies two elements in the field
def mult(self, a, b):
if self.haveTables: # Use lookup tables if possible
return 0 if (a == 0 or b == 0) else self.expTable[(self.logTable[a] + self.logTable[b]) % (self.q - 1)]
else: # Otherwise use the slower reduction method
return self.modP(self.multPoly(a, b), self.r, bound=self.q)
# Returns the multiplicative inverse of an element using lookup tables
def inverse(self, x):
if self.haveTables: # Use lookup tables if possible
# Technically speaking, 0 has no multiplicative inverse, so just define it as itself
return 0 if x == 0 else self.expTable[self.q - 1 - self.logTable[x]]
else: # TODO Otherwise, well, give up (might do this later, there's an easy way for prime fields)
raise NotImplementedError
# Raise an element in the field to a power
def pow(self, a, b):
if self.haveTables: # Use lookup tables if possible
return 0 if a == 0 else self.expTable[(self.logTable[a] * b) % (self.q - 1)]
elif self.n == 1: # If this is a prime field use Python's modular exponentiation
return pow(a, b, self.p)
else: # Otherwise use exponentiation by repeated squaring
c = 1
while b > 0:
if b % 2 == 0:
a = self.mult(a, a)
b /= 2
else:
c = self.mult(a, c)
b -= 1
return c
# Allows for grabbing GfElement representations by indexing
def __getitem__(self, item):
if 0 <= item < self.q:
return GfElement(item, self)
raise IndexError
class GfElement:
""" Object representation of a GaloisField element.
Allows one to perform intuitive operations on the elements and get the correct results
"""
def __init__(self, val, f):
assert (0 <= val < f.q)
self.f = f
self.val = val
def __add__(self, other):
assert (self.f == other.f)
return self.f.add(self.val, other.val)
def __mul__(self, other):
assert (self.f == other.f)
return self.f.mult(self.val, other.val)
def __pow__(self, power): # Note that power is considered to be an integer, not a GfElement
return self.f.pow(self.val, power)
def __invert__(self):
return self.f.inverse(self.val)
def __str__(self):
return str(self.val)
def __index__(self):
return int(self.val)
def __int__(self):
return int(self.val)
| mit | -5,047,232,777,198,569,000 | 39.144444 | 120 | 0.548021 | false | 3.774025 | false | false | false |
hkkwok/MachOTool | mach_o/non_headers/symbol_table_block.py | 1 | 2304 | from utils.header import Header, NonEncodingField
from utils.commafy import commafy
class SymbolTableBase(Header):
FIELDS = (
NonEncodingField('desc'),
)
def __init__(self, entry_type, num_entries=None):
if num_entries is not None:
desc = '%s %s' % (commafy(num_entries), entry_type)
else:
desc = entry_type
super(SymbolTableBase, self).__init__('SymbolTable: %s' % desc, desc=desc)
class SymbolTable(SymbolTableBase):
SYM_INDEX = 0
N_STRX = 1
N_TYPE = 2
N_SECT = 3
N_DESC = 4
N_VALUE = 5
SYM_NAME = 6
def __init__(self, num_symbols):
super(SymbolTable, self).__init__('symbol entries', num_symbols)
self.symbols = list()
def add(self, nlist):
idx = len(self.symbols)
self.symbols.append((idx, nlist.n_strx, nlist.n_type, nlist.n_sect, nlist.n_desc, nlist.n_value, None))
def correlate_string_table(self, sym_str_tab):
assert isinstance(sym_str_tab, SymbolStringTable)
for idx in xrange(len(self.symbols)):
n_strx = self.symbols[idx][self.N_STRX]
if n_strx == 0:
continue
sym_name = sym_str_tab.symbol_strings.get(n_strx, None)
if sym_name is not None:
self.symbols[idx] = self.symbols[idx][:self.SYM_NAME] + (sym_name,)
def filter(self, pattern=None):
if pattern is None:
return range(len(self.symbols))
indices = list()
for (sym_idx, (index, n_strx, n_type, n_sect, n_desc, n_value, symbol_name)) in enumerate(self.symbols):
if pattern in symbol_name:
indices.append(sym_idx)
return indices
class SymbolStringTable(SymbolTableBase):
def __init__(self):
super(SymbolStringTable, self).__init__('string table')
self.symbol_strings = dict()
def add(self, n_strx, s):
self.symbol_strings[n_strx] = s
class IndirectSymbolTable(SymbolTableBase):
def __init__(self, num_indirect_symbols):
super(IndirectSymbolTable, self).__init__('indirect symbols', num_indirect_symbols)
class ExtRefSymbolTable(SymbolTableBase):
def __init__(self, num_ext_ref):
super(ExtRefSymbolTable, self).__init__('external references', num_ext_ref)
| apache-2.0 | 5,418,894,315,636,065,000 | 31 | 112 | 0.607639 | false | 3.46988 | false | false | false |
shurain/archiver | archiver/sink.py | 1 | 4439 | # -*- coding: utf-8 -*-
import hashlib
import binascii
from thrift.transport.THttpClient import THttpClient
from thrift.protocol.TBinaryProtocol import TBinaryProtocol
from evernote.edam.userstore import UserStore
from evernote.edam.notestore import NoteStore
import evernote.edam.type.ttypes as Types
import evernote.edam.error.ttypes as Errors
from evernote.api.client import EvernoteClient
from .settings import EVERNOTE_NOTEBOOK
import logging
class Sink(object):
pass
class EvernoteSink(Sink):
def __init__(self, token, sandbox=False):
"""Initialize evernote connection.
Client connection handle is assigned to the client property.
Two properties user_store and note_store are provided for the convenience.
"""
self.token = token
self.client = EvernoteClient(token=self.token, sandbox=sandbox)
self.user_store = self.client.get_user_store()
self.note_store = self.client.get_note_store()
def image_resource(self, item):
#FIXME create pdf resource
md5 = hashlib.md5()
md5.update(item.content)
hashvalue = md5.digest()
data = Types.Data()
data.size = len(item.content) #FIXME better ways of doing this calculation?
data.bodyHash = hashvalue
data.body = item.content
resource = Types.Resource()
resource.mime = item.content_type
resource.data = data
return resource
def pdf_resource(self, item):
#FIXME create pdf resource
md5 = hashlib.md5()
md5.update(item.content)
hashvalue = md5.digest()
data = Types.Data()
data.size = len(item.content) #FIXME better ways of doing this calculation?
data.bodyHash = hashvalue
data.body = item.content
resource = Types.Resource()
resource.mime = 'application/pdf'
resource.data = data
return resource
def note_attribute(self, source_url=''):
attributes = Types.NoteAttributes()
attributes.sourceURL = source_url
return attributes
def create_note(self, title, content, notebook_name='', tags='', attributes=None, resources=None):
note = Types.Note()
note.title = title
if attributes:
note.attributes = attributes
if tags:
note.tagNames = [t.encode('utf-8', 'xmlcharrefreplace') for t in tags.split()] # Assuming no spaces in tags
logging.debug(note.tagNames)
if notebook_name:
notebooks = self.note_store.listNotebooks(self.token)
for notebook in notebooks:
if notebook.name == notebook_name:
note.notebookGuid = notebook.guid
break
else:
pass # create a note in default notebook
note.content = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE en-note SYSTEM "http://xml.evernote.com/pub/enml2.dtd">
<en-note>{}""".format(content.encode('utf-8', 'xmlcharrefreplace'))
if resources:
note.resources = resources
for r in resources:
note.content += """<en-media type="{}" hash="{}"/>""".format(r.mime, binascii.hexlify(r.data.bodyHash))
note.content += "</en-note>"
logging.debug(note.content)
created_note = self.note_store.createNote(self.token, note)
return created_note
def push(self, item):
kwargs = {
'title': item.title.encode('utf-8', 'xmlcharrefreplace'),
'content': item.body,
'tags': item.tags,
'notebook_name': EVERNOTE_NOTEBOOK,
'attributes': self.note_attribute(item.url),
}
if item.itemtype == 'PDF':
resource = self.pdf_resource(item)
kwargs['resources'] = [resource]
elif item.itemtype == 'image':
resource = self.image_resource(item)
kwargs['resources'] = [resource]
elif item.itemtype == 'HTML':
#FIXME check for image inside and create image resources
kwargs['content'] = item.content
elif item.itemtype == 'text':
kwargs['content'] = item.content
else:
# XXX Assuming plaintext type
# Should I raise exception for unknown items?
item.itemtype = 'text'
self.create_note(**kwargs)
class Database(Sink):
pass
| mit | 2,208,738,899,252,046,300 | 32.37594 | 120 | 0.609597 | false | 4.203598 | false | false | false |
haanme/FinnBrain | generate_anon_list.py | 1 | 1369 | #!/usr/bin/env python
experiment_dir = '/Users/eija/Documents/FinnBrain/pipelinedata'
DTIprep_protocol = '/Users/eija/Documents/FinnBrain/scripts/default.xml'
from argparse import ArgumentParser
import os
import math
import numpy as np
import glob
import dicom
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--basedir", dest="basedir", help="base directory for image data", required=True)
args = parser.parse_args()
# Go through all patient subdirectories
DICOMbasedirs = glob.glob(args.basedir + os.sep + '*')
for DICOMbasedir in DICOMbasedirs:
#print "READING BASE DICOM [" + DICOMbasedir + "]"
StudyDirs = glob.glob(DICOMbasedir + os.sep + '*')
# Take first file of first subdirectory
for StudyDir in StudyDirs:
SeriesDirs = glob.glob(StudyDir + os.sep + '*')
break;
SeriesDir = SeriesDirs[0]
#print "READING DTI DICOM STUDY [" + SeriesDir + "]"
try:
filenames = os.listdir(SeriesDir)
ds = dicom.read_file(os.path.join(SeriesDir, filenames[0]))
except Exception as inst:
print type(inst) # the exception instance
print inst.args # arguments stored in .args
print inst # __str__ allows args to be printed directly
print ds.PatientsName
| mit | -411,932,060,426,310,850 | 36 | 105 | 0.6355 | false | 3.740437 | false | false | false |
jendrikseipp/rednotebook | rednotebook/gui/browser_cef.py | 1 | 5589 | # -----------------------------------------------------------------------
# Copyright (c) 2018 Jendrik Seipp
#
# RedNotebook is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# RedNotebook is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with RedNotebook; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# -----------------------------------------------------------------------
import ctypes
import logging
import sys
from gi.repository import Gdk, GObject, Gtk
from rednotebook.util import filesystem
try:
from cefpython3 import cefpython as cef
except ImportError as err:
cef = None
if filesystem.IS_WIN:
logging.info(
"CEF Python not found. Disabling clouds and"
' in-app previews. Error message: "{}"'.format(err)
)
if cef:
class HtmlView(Gtk.DrawingArea):
NOTEBOOK_URL = "file:///"
"""
Loading HTML strings only works if we pass the `url` parameter to
CreateBrowserSync.
When we call load_html() the first time, the browser is not yet
created. Therefore, we store the initial html and load it when
the browser is created.
"""
def __init__(self):
super().__init__()
self._browser = None
self._win32_handle = None
self._initial_html = ""
sys.excepthook = cef.ExceptHook # To shutdown CEF processes on error.
cef.Initialize(settings={"context_menu": {"enabled": False}})
GObject.threads_init()
GObject.timeout_add(10, self.on_timer)
self.connect("configure-event", self.on_configure)
self.connect("size-allocate", self.on_size_allocate)
self.connect("focus-in-event", self.on_focus_in)
self.connect("realize", self.on_realize)
def load_html(self, html):
if self._browser:
self._browser.GetMainFrame().LoadString(html, self.NOTEBOOK_URL)
else:
self._initial_html = html
def set_font_size(self, size):
pass
def get_handle(self):
Gdk.threads_enter()
ctypes.pythonapi.PyCapsule_GetPointer.restype = ctypes.c_void_p
ctypes.pythonapi.PyCapsule_GetPointer.argtypes = [ctypes.py_object]
gpointer = ctypes.pythonapi.PyCapsule_GetPointer(
self.get_property("window").__gpointer__, None
)
# The GTK 3.22 stack needs "gdk-3-3.0.dll".
libgdk = ctypes.CDLL("libgdk-3-0.dll")
handle = libgdk.gdk_win32_window_get_handle(gpointer)
Gdk.threads_leave()
return handle
def on_timer(self):
cef.MessageLoopWork()
return True
def on_realize(self, *_):
self._embed_browser()
def _embed_browser(self):
window_info = cef.WindowInfo()
self._win32_handle = self.get_handle()
window_info.SetAsChild(self._win32_handle)
self._browser = cef.CreateBrowserSync(window_info, url=self.NOTEBOOK_URL)
self._browser.SetClientCallback("OnBeforeBrowse", self.on_before_browse)
self._browser.SetClientCallback("OnAddressChange", self.on_address_change)
self.load_html(self._initial_html)
self._initial_html = None
@GObject.Signal(name="on-url-clicked", arg_types=(str,))
def url_clicked_signal(self, url):
logging.debug("Emitting on-url-clicked signal: %s", url)
def on_before_browse(self, browser, frame, request, **_):
url = request.GetUrl()
# For some reason GetUrl() appends slash to the returned URL so we need to compensate for it:
# (https://bugs.chromium.org/p/chromium/issues/detail?id=339054 might be the cause)
if url == self.NOTEBOOK_URL + "/":
# On first invocation the url points to dummy NOTEBOOK_URL.
# There is no reason to emit signal for it.
return False
self.url_clicked_signal.emit(url)
return True
def on_address_change(self, browser, frame, url):
if url == self.NOTEBOOK_URL:
return
self.url_clicked_signal.emit(url)
def on_configure(self, *_):
if self._browser:
self._browser.NotifyMoveOrResizeStarted()
return False
def on_size_allocate(self, _, data):
if self._browser:
cef.WindowUtils().OnSize(self._win32_handle, 0, 0, 0)
def on_focus_in(self, *_):
if self._browser:
self._browser.SetFocus(True)
return True
return False
def shutdown(self, *_):
if self._browser:
self._browser.CloseBrowser(True)
# Clear browser references that you keep anywhere in your
# code. All references must be cleared for CEF to shutdown cleanly.
self._browser = None
cef.Shutdown()
| gpl-2.0 | -4,047,744,652,660,251,000 | 36.013245 | 105 | 0.582752 | false | 4.152303 | false | false | false |
mtjvc/gpew | examples/single_line.py | 1 | 3029 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import inspect
import numpy as np
import emcee
import george
from george import kernels
import os
import sys
currentframe = inspect.currentframe()
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(currentframe)))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
import profiles
import gpew
def single_kernel_noisemodel(p):
"""
Simple one squared-exponential kernel noise model.
"""
return george.GP(p[0] * kernels.ExpSquaredKernel(p[1]))
def single_kernel_lnprior(p):
amp, xcen, sigma, lna, lnalpha = p
if (-50. < lna < 0. and amp > 0. and sigma > 0. and xcen > 8685 and
xcen < 8690):
return 0.0
return -np.inf
# Load the spectrum
d = np.loadtxt('spec.txt').T
# Select the region around the line
sel = (d[0] > 8680) & (d[0] < 8696)
# Come up with uncertainties for S/N = 100
yerr = np.ones_like(d[0][sel]) * 0.01
# Store the line in the lines array
lines = [(d[0][sel], d[1][sel], yerr)]
# Define the profile for the line
pfiles = [profiles.gaussian]
# Generate the array that stores how many parameters each profile
# has. There is only one and we are using a Gaussian profile so we
# now we have 3 parameters but this way we don't need to think about it.
pparn = np.cumsum([0] +\
[len(inspect.getargspec(i)[0]) - 1 for i in pfiles])
# Initial values for the parameters. The first three are for the Gaussian
# profile, the next two for the one kernel GP noise model. The values
# should be close to the optimal (this is important).
initial = [0.28, # profile amplitude
8687.82, # profile center wavelength
1.53, # profile sigma
-6.1, # kernel amplitude
0.3 # kernel scale-length
]
# Sampler initialization
nwalkers = 128
ndim = len(initial)
# 100 is not enough! Make sure the convergence is satisfacory before
# accepting any results!
niter = 100
# Replace with None to get a trivial chi2 like noise model
noisemodel = single_kernel_noisemodel
data = [lines, pfiles, pparn, noisemodel, single_kernel_lnprior]
# Initial states of the walkers - N-dim Gaussian around the initial values
p0 = np.array([np.array(initial) + 1e-2 * np.random.randn(ndim)
for i in xrange(nwalkers)])
# Sampler object
sampler = emcee.EnsembleSampler(nwalkers, ndim, gpew.lnprob, args=data)
# Let's run it!
p0, lnp, _ = sampler.run_mcmc(p0, niter)
sampler.reset()
# Let's get the best lnp value, re-initialize it and run it again.
p = p0[np.argmax(lnp)]
p0 = [p + 1e-2 * np.random.randn(ndim) for i in xrange(nwalkers)]
p0, _, _ = sampler.run_mcmc(p0, niter)
# Collect the samples
samples = sampler.flatchain
# Plot stuff:
# error bars: observed line
# red: +-1 sigma of the complete model
# blue: +-1 sigma of the profile model
# gpew.plot_lines(lines, pfiles, pparn, single_kernel_noisemodel, samples,
gpew.plot_lines(lines, pfiles, pparn, noisemodel, samples,
nwalkers, wlwidth=8.1, gpsamples=100)
| mit | -5,366,648,903,730,181,000 | 26.536364 | 76 | 0.685705 | false | 3.109856 | false | false | false |
jamesiter/JimV-C | jimvc/api/guest.py | 1 | 56141 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import copy
from math import ceil
from IPy import IP
import requests
import json
from uuid import uuid4
import random
import time
import jimit as ji
from flask import Blueprint, url_for, request
from jimvc.api.base import Base
from jimvc.models.initialize import dev_table
from jimvc.models import app_config
from jimvc.models import GuestState
from jimvc.models import Service
from jimvc.models import IPPool
from jimvc.models import ReservedIP
from jimvc.models import DiskState, Host
from jimvc.models import Database as db
from jimvc.models import Config
from jimvc.models import Disk
from jimvc.models import Rules
from jimvc.models import Utils
from jimvc.models import Guest
from jimvc.models import OSTemplateImage
from jimvc.models import OSTemplateProfile
from jimvc.models import OSTemplateInitializeOperate
from jimvc.models import GuestXML
from jimvc.models import SSHKeyGuestMapping
from jimvc.models import SSHKey
from jimvc.models import Snapshot
from jimvc.models import status
__author__ = 'James Iter'
__date__ = '2017/3/22'
__contact__ = '[email protected]'
__copyright__ = '(c) 2017 by James Iter.'
blueprint = Blueprint(
'api_guest',
__name__,
url_prefix='/api/guest'
)
blueprints = Blueprint(
'api_guests',
__name__,
url_prefix='/api/guests'
)
guest_base = Base(the_class=Guest, the_blueprint=blueprint, the_blueprints=blueprints)
os_template_image_base = Base(the_class=OSTemplateImage, the_blueprint=blueprint, the_blueprints=blueprints)
os_template_profile_base = Base(the_class=OSTemplateProfile, the_blueprint=blueprint, the_blueprints=blueprints)
@Utils.dumps2response
def r_create():
args_rules = [
Rules.CPU.value,
Rules.MEMORY.value,
Rules.BANDWIDTH.value,
Rules.BANDWIDTH_UNIT.value,
Rules.OS_TEMPLATE_IMAGE_ID.value,
Rules.QUANTITY.value,
Rules.REMARK.value,
Rules.PASSWORD.value,
Rules.LEASE_TERM.value
]
if 'node_id' in request.json:
args_rules.append(
Rules.NODE_ID.value
)
if 'ssh_keys_id' in request.json:
args_rules.append(
Rules.SSH_KEYS_ID.value
)
if 'service_id' in request.json:
args_rules.append(
Rules.SERVICE_ID.value
)
if 'autostart' in request.json:
args_rules.append(
Rules.AUTOSTART.value
)
try:
ret = dict()
ret['state'] = ji.Common.exchange_state(20000)
ji.Check.previewing(args_rules, request.json)
config = Config()
config.id = 1
config.get()
os_template_image = OSTemplateImage()
os_template_profile = OSTemplateProfile()
os_template_image.id = request.json.get('os_template_image_id')
if not os_template_image.exist():
ret['state'] = ji.Common.exchange_state(40450)
ret['state']['sub']['zh-cn'] = ''.join([ret['state']['sub']['zh-cn'], ': ', os_template_image.id.__str__()])
return ret
os_template_image.get()
os_template_profile.id = os_template_image.os_template_profile_id
os_template_profile.get()
os_template_initialize_operates, os_template_initialize_operates_count = \
OSTemplateInitializeOperate.get_by_filter(
filter_str='os_template_initialize_operate_set_id:eq:' +
os_template_profile.os_template_initialize_operate_set_id.__str__())
node_id = request.json.get('node_id', None)
# 默认只取可随机分配虚拟机的 hosts
available_hosts = Host.get_available_hosts(nonrandom=False)
# 当指定了 host 时,取全部活着的 hosts
if node_id is not None:
available_hosts = Host.get_available_hosts(nonrandom=None)
if available_hosts.__len__() == 0:
ret['state'] = ji.Common.exchange_state(50351)
return ret
available_hosts_mapping_by_node_id = dict()
for host in available_hosts:
if host['node_id'] not in available_hosts_mapping_by_node_id:
available_hosts_mapping_by_node_id[host['node_id']] = host
if node_id is not None and node_id not in available_hosts_mapping_by_node_id:
ret['state'] = ji.Common.exchange_state(50351)
return ret
ssh_keys_id = request.json.get('ssh_keys_id', list())
ssh_keys = list()
ssh_key_guest_mapping = SSHKeyGuestMapping()
if ssh_keys_id.__len__() > 0:
rows, _ = SSHKey.get_by_filter(
filter_str=':'.join(['id', 'in', ','.join(_id.__str__() for _id in ssh_keys_id)]))
for row in rows:
ssh_keys.append(row['public_key'])
# 确保目标 服务组 存在
service = Service()
service.id = request.json.get('service_id', 1)
service.get()
bandwidth = request.json.get('bandwidth')
bandwidth_unit = request.json.get('bandwidth_unit')
if bandwidth_unit == 'k':
bandwidth = bandwidth * 1000
elif bandwidth_unit == 'm':
bandwidth = bandwidth * 1000 ** 2
elif bandwidth_unit == 'g':
bandwidth = bandwidth * 1000 ** 3
else:
ret = dict()
ret['state'] = ji.Common.exchange_state(41203)
raise ji.PreviewingError(json.dumps(ret, ensure_ascii=False))
# http://man7.org/linux/man-pages/man8/tc.8.html
# 如果带宽大于 tc 所控最大速率,则置其为无限带宽
# 34359738360 等于 tc 最大可控字节速率,换算出的比特位
if bandwidth > 34359738360:
bandwidth = 0
quantity = request.json.get('quantity')
occupied_ips = list()
occupied_vnc_ports = list()
rows, count = Guest.get_all()
for row in rows:
occupied_ips.append(row['ip'])
occupied_vnc_ports.append(row['vnc_port'])
rows, count = ReservedIP.get_all()
for row in rows:
occupied_ips.append(row['ip'])
rows, count = IPPool.get_by_filter(filter_str=':'.join(['activity', 'eq', '1']))
if count < 1:
ret['state'] = ji.Common.exchange_state(50350)
return ret
ip_pool = IPPool()
ip_pool.id = rows[0]['id']
ip_pool.get()
guest_ip_generator = ip_pool.ip_generator(occupied_ips=occupied_ips)
guest_vnc_port_generator = ip_pool.vnc_port_generator(occupied_vnc_ports=occupied_vnc_ports)
while quantity:
quantity -= 1
guest = Guest()
guest.uuid = uuid4().__str__()
guest.cpu = request.json.get('cpu')
# 虚拟机内存单位,模板生成方法中已置其为GiB
guest.memory = request.json.get('memory')
guest.bandwidth = bandwidth
guest.os_template_image_id = request.json.get('os_template_image_id')
guest.label = ji.Common.generate_random_code(length=8)
guest.remark = request.json.get('remark', '')
guest.autostart = request.json.get('autostart', False)
guest.password = request.json.get('password')
if guest.password is None or guest.password.__len__() < 1:
guest.password = ji.Common.generate_random_code(length=16)
guest.ip = guest_ip_generator.next()
guest.vnc_port = guest_vnc_port_generator.next()
guest.network = config.vm_network
guest.manage_network = config.vm_manage_network
guest.vnc_password = ji.Common.generate_random_code(length=16)
disk = Disk()
disk.uuid = guest.uuid
disk.remark = guest.label.__str__() + '_SystemImage'
disk.format = 'qcow2'
disk.sequence = 0
disk.size = 0
disk.path = config.storage_path + '/' + disk.uuid + '.' + disk.format
disk.guest_uuid = ''
# disk.node_id 由 guest 事件处理机更新。涉及迁移时,其所属 node_id 会变更。参见 @models/event_processory.py:111 附近。
disk.node_id = 0
disk.quota(config=config)
disk.create()
if node_id is None:
# 在可用计算节点中平均分配任务
chosen_host = available_hosts[quantity % available_hosts.__len__()]
else:
chosen_host = available_hosts_mapping_by_node_id[node_id]
guest.node_id = chosen_host['node_id']
guest.service_id = service.id
guest_xml = GuestXML(host=chosen_host, guest=guest, disk=disk, config=config,
os_type=os_template_profile.os_type)
guest.xml = guest_xml.get_domain()
guest.node_id = int(guest.node_id)
guest.create()
ssh_key_guest_mapping.guest_uuid = guest.uuid
if ssh_keys_id.__len__() > 0:
for ssh_key_id in ssh_keys_id:
ssh_key_guest_mapping.ssh_key_id = ssh_key_id
ssh_key_guest_mapping.create()
if os_template_profile.os_distro == 'coreos':
ip_pool.netmask = IP(guest.ip).make_net(ip_pool.netmask).prefixlen().__str__()
# 替换占位符为有效内容
_os_template_initialize_operates = copy.deepcopy(os_template_initialize_operates)
for k, v in enumerate(_os_template_initialize_operates):
_os_template_initialize_operates[k]['content'] = v['content'].replace('{IP}', guest.ip).\
replace('{HOSTNAME}', guest.label). \
replace('{PASSWORD}', guest.password). \
replace('{NETMASK}', ip_pool.netmask).\
replace('{GATEWAY}', ip_pool.gateway).\
replace('{DNS1}', ip_pool.dns1).\
replace('{DNS2}', ip_pool.dns2). \
replace('{SSH-KEY}', '\n'.join(ssh_keys))
_os_template_initialize_operates[k]['command'] = v['command'].replace('{IP}', guest.ip). \
replace('{HOSTNAME}', guest.label). \
replace('{PASSWORD}', guest.password). \
replace('{NETMASK}', ip_pool.netmask). \
replace('{GATEWAY}', ip_pool.gateway). \
replace('{DNS1}', ip_pool.dns1). \
replace('{DNS2}', ip_pool.dns2). \
replace('{SSH-KEY}', '\n'.join(ssh_keys))
message = {
'_object': 'guest',
'action': 'create',
'uuid': guest.uuid,
'storage_mode': config.storage_mode,
'dfs_volume': config.dfs_volume,
'node_id': guest.node_id,
'autostart': guest.autostart,
'name': guest.label,
'template_path': os_template_image.path,
'os_type': os_template_profile.os_type,
'disks': [disk.__dict__],
'xml': guest_xml.get_domain(),
'os_template_initialize_operates': _os_template_initialize_operates,
'passback_parameters': {}
}
Utils.emit_instruction(message=json.dumps(message, ensure_ascii=False))
return ret
except ji.PreviewingError, e:
return json.loads(e.message)
@Utils.dumps2response
def r_autostart(uuids, autostart):
args_rules = [
Rules.UUIDS.value,
Rules.AUTOSTART.value
]
if str(autostart).lower() in ['false', '0']:
autostart = False
else:
autostart = True
try:
ji.Check.previewing(args_rules, {'uuids': uuids, 'autostart': autostart})
guest = Guest()
for uuid in uuids.split(','):
guest.uuid = uuid
guest.get_by('uuid')
for uuid in uuids.split(','):
guest.uuid = uuid
guest.get_by('uuid')
message = {
'_object': 'guest',
'action': 'autostart',
'uuid': uuid,
'node_id': guest.node_id,
'autostart': autostart,
'passback_parameters': {'autostart': autostart}
}
Utils.emit_instruction(message=json.dumps(message))
ret = dict()
ret['state'] = ji.Common.exchange_state(20000)
return ret
except ji.PreviewingError, e:
return json.loads(e.message)
@Utils.dumps2response
def r_reboot(uuids):
args_rules = [
Rules.UUIDS.value
]
try:
ji.Check.previewing(args_rules, {'uuids': uuids})
guest = Guest()
for uuid in uuids.split(','):
guest.uuid = uuid
guest.get_by('uuid')
for uuid in uuids.split(','):
guest.uuid = uuid
guest.get_by('uuid')
message = {
'_object': 'guest',
'action': 'reboot',
'uuid': uuid,
'node_id': guest.node_id
}
Utils.emit_instruction(message=json.dumps(message))
ret = dict()
ret['state'] = ji.Common.exchange_state(20000)
return ret
except ji.PreviewingError, e:
return json.loads(e.message)
@Utils.dumps2response
def r_force_reboot(uuids):
args_rules = [
Rules.UUIDS.value
]
try:
ji.Check.previewing(args_rules, {'uuids': uuids})
guest = Guest()
for uuid in uuids.split(','):
guest.uuid = uuid
guest.get_by('uuid')
for uuid in uuids.split(','):
guest.uuid = uuid
guest.get_by('uuid')
disks, _ = Disk.get_by_filter(filter_str=':'.join(['guest_uuid', 'eq', guest.uuid]))
message = {
'_object': 'guest',
'action': 'force_reboot',
'uuid': uuid,
'node_id': guest.node_id,
'disks': disks
}
Utils.emit_instruction(message=json.dumps(message))
ret = dict()
ret['state'] = ji.Common.exchange_state(20000)
return ret
except ji.PreviewingError, e:
return json.loads(e.message)
@Utils.dumps2response
def r_shutdown(uuids):
args_rules = [
Rules.UUIDS.value
]
try:
ji.Check.previewing(args_rules, {'uuids': uuids})
guest = Guest()
for uuid in uuids.split(','):
guest.uuid = uuid
guest.get_by('uuid')
for uuid in uuids.split(','):
guest.uuid = uuid
guest.get_by('uuid')
message = {
'_object': 'guest',
'action': 'shutdown',
'uuid': uuid,
'node_id': guest.node_id
}
Utils.emit_instruction(message=json.dumps(message))
ret = dict()
ret['state'] = ji.Common.exchange_state(20000)
return ret
except ji.PreviewingError, e:
return json.loads(e.message)
@Utils.dumps2response
def r_force_shutdown(uuids):
args_rules = [
Rules.UUIDS.value
]
try:
ji.Check.previewing(args_rules, {'uuids': uuids})
guest = Guest()
for uuid in uuids.split(','):
guest.uuid = uuid
guest.get_by('uuid')
for uuid in uuids.split(','):
guest.uuid = uuid
guest.get_by('uuid')
message = {
'_object': 'guest',
'action': 'force_shutdown',
'uuid': uuid,
'node_id': guest.node_id
}
Utils.emit_instruction(message=json.dumps(message))
ret = dict()
ret['state'] = ji.Common.exchange_state(20000)
return ret
except ji.PreviewingError, e:
return json.loads(e.message)
@Utils.dumps2response
def r_boot(uuids):
# TODO: 做好关系依赖判断,比如boot不可以对suspend的实例操作。
args_rules = [
Rules.UUIDS.value
]
try:
ji.Check.previewing(args_rules, {'uuids': uuids})
guest = Guest()
for uuid in uuids.split(','):
guest.uuid = uuid
guest.get_by('uuid')
config = Config()
config.id = 1
config.get()
for uuid in uuids.split(','):
guest.uuid = uuid
guest.get_by('uuid')
disks, _ = Disk.get_by_filter(filter_str=':'.join(['guest_uuid', 'eq', guest.uuid]))
message = {
'_object': 'guest',
'action': 'boot',
'uuid': uuid,
'node_id': guest.node_id,
'passback_parameters': {},
'disks': disks
}
Utils.emit_instruction(message=json.dumps(message))
ret = dict()
ret['state'] = ji.Common.exchange_state(20000)
return ret
except ji.PreviewingError, e:
return json.loads(e.message)
@Utils.dumps2response
def r_suspend(uuids):
args_rules = [
Rules.UUIDS.value
]
try:
ji.Check.previewing(args_rules, {'uuids': uuids})
guest = Guest()
for uuid in uuids.split(','):
guest.uuid = uuid
guest.get_by('uuid')
for uuid in uuids.split(','):
guest.uuid = uuid
guest.get_by('uuid')
message = {
'_object': 'guest',
'action': 'suspend',
'uuid': uuid,
'node_id': guest.node_id
}
Utils.emit_instruction(message=json.dumps(message))
ret = dict()
ret['state'] = ji.Common.exchange_state(20000)
return ret
except ji.PreviewingError, e:
return json.loads(e.message)
@Utils.dumps2response
def r_resume(uuids):
args_rules = [
Rules.UUIDS.value
]
try:
ji.Check.previewing(args_rules, {'uuids': uuids})
guest = Guest()
for uuid in uuids.split(','):
guest.uuid = uuid
guest.get_by('uuid')
for uuid in uuids.split(','):
guest.uuid = uuid
guest.get_by('uuid')
message = {
'_object': 'guest',
'action': 'resume',
'uuid': uuid,
'node_id': guest.node_id
}
Utils.emit_instruction(message=json.dumps(message))
ret = dict()
ret['state'] = ji.Common.exchange_state(20000)
return ret
except ji.PreviewingError, e:
return json.loads(e.message)
@Utils.dumps2response
def r_delete(uuids):
args_rules = [
Rules.UUIDS.value
]
# TODO: 加入是否删除使用的数据磁盘开关,如果为True,则顺便删除使用的磁盘。否则解除该磁盘被使用的状态。
try:
ji.Check.previewing(args_rules, {'uuids': uuids})
guest = Guest()
# 检测所指定的 UUDIs 实例都存在
for uuid in uuids.split(','):
guest.uuid = uuid
guest.get_by('uuid')
config = Config()
config.id = 1
config.get()
# 执行删除操作
for uuid in uuids.split(','):
guest.uuid = uuid
guest.get_by('uuid')
message = {
'_object': 'guest',
'action': 'delete',
'uuid': uuid,
'storage_mode': config.storage_mode,
'dfs_volume': config.dfs_volume,
'node_id': guest.node_id
}
Utils.emit_instruction(message=json.dumps(message))
# 删除创建失败的 Guest
if guest.status == status.GuestState.dirty.value:
disk = Disk()
disk.uuid = guest.uuid
disk.get_by('uuid')
if disk.state == status.DiskState.pending.value:
disk.delete()
guest.delete()
SSHKeyGuestMapping.delete_by_filter(filter_str=':'.join(['guest_uuid', 'eq', guest.uuid]))
ret = dict()
ret['state'] = ji.Common.exchange_state(20000)
return ret
except ji.PreviewingError, e:
return json.loads(e.message)
@Utils.dumps2response
def r_attach_disk(uuid, disk_uuid):
args_rules = [
Rules.UUID.value,
Rules.DISK_UUID.value
]
try:
ji.Check.previewing(args_rules, {'uuid': uuid, 'disk_uuid': disk_uuid})
guest = Guest()
guest.uuid = uuid
guest.get_by('uuid')
disk = Disk()
disk.uuid = disk_uuid
disk.get_by('uuid')
config = Config()
config.id = 1
config.get()
ret = dict()
ret['state'] = ji.Common.exchange_state(20000)
# 判断欲挂载的磁盘是否空闲
if disk.guest_uuid.__len__() > 0 or disk.state != DiskState.idle.value:
ret['state'] = ji.Common.exchange_state(41258)
return ret
# 判断 Guest 是否处于可用状态
if guest.status in (status.GuestState.no_state.value, status.GuestState.dirty.value):
ret['state'] = ji.Common.exchange_state(41259)
return ret
# 判断 Guest 与 磁盘是否在同一宿主机上
if config.storage_mode in [status.StorageMode.local.value, status.StorageMode.shared_mount.value]:
if guest.node_id != disk.node_id:
ret['state'] = ji.Common.exchange_state(41260)
return ret
# 通过检测未被使用的序列,来确定当前磁盘在目标 Guest 身上的序列
disk.guest_uuid = guest.uuid
disks, count = disk.get_by_filter(filter_str='guest_uuid:in:' + guest.uuid)
already_used_sequence = list()
for _disk in disks:
already_used_sequence.append(_disk['sequence'])
for sequence in range(0, dev_table.__len__()):
if sequence not in already_used_sequence:
disk.sequence = sequence
break
disk.state = DiskState.mounting.value
guest_xml = GuestXML(guest=guest, disk=disk, config=config)
message = {
'_object': 'guest',
'action': 'attach_disk',
'uuid': uuid,
'node_id': guest.node_id,
'xml': guest_xml.get_disk(),
'passback_parameters': {'disk_uuid': disk.uuid, 'sequence': disk.sequence},
'disks': [disk.__dict__]
}
Utils.emit_instruction(message=json.dumps(message))
disk.update()
return ret
except ji.PreviewingError, e:
return json.loads(e.message)
@Utils.dumps2response
def r_detach_disk(disk_uuid):
args_rules = [
Rules.DISK_UUID.value
]
try:
ji.Check.previewing(args_rules, {'disk_uuid': disk_uuid})
disk = Disk()
disk.uuid = disk_uuid
disk.get_by('uuid')
ret = dict()
ret['state'] = ji.Common.exchange_state(20000)
if disk.state != DiskState.mounted.value or disk.sequence == 0:
# 表示未被任何实例使用,已被分离
# 序列为 0 的表示实例系统盘,系统盘不可以被分离
# TODO: 系统盘单独范围其它状态
return ret
guest = Guest()
guest.uuid = disk.guest_uuid
guest.get_by('uuid')
# 判断 Guest 是否处于可用状态
if guest.status in (status.GuestState.no_state.value, status.GuestState.dirty.value):
ret['state'] = ji.Common.exchange_state(41259)
return ret
config = Config()
config.id = 1
config.get()
guest_xml = GuestXML(guest=guest, disk=disk, config=config)
message = {
'_object': 'guest',
'action': 'detach_disk',
'uuid': disk.guest_uuid,
'node_id': guest.node_id,
'xml': guest_xml.get_disk(),
'passback_parameters': {'disk_uuid': disk.uuid}
}
Utils.emit_instruction(message=json.dumps(message))
disk.state = DiskState.unloading.value
disk.update()
return ret
except ji.PreviewingError, e:
return json.loads(e.message)
@Utils.dumps2response
def r_migrate(uuids, node_id):
args_rules = [
Rules.UUIDS.value,
Rules.NODE_ID.value
]
try:
ji.Check.previewing(args_rules, {'uuids': uuids, 'node_id': node_id})
ret = dict()
ret['state'] = ji.Common.exchange_state(20000)
config = Config()
config.id = 1
config.get()
# 取全部活着的 hosts
available_hosts = Host.get_available_hosts(nonrandom=None)
if available_hosts.__len__() == 0:
ret['state'] = ji.Common.exchange_state(50351)
return ret
available_hosts_mapping_by_node_id = dict()
for host in available_hosts:
if host['node_id'] not in available_hosts_mapping_by_node_id:
available_hosts_mapping_by_node_id[host['node_id']] = host
dst_ip = available_hosts_mapping_by_node_id[node_id]['interfaces'][config.vm_manage_network]['ip']
guest = Guest()
for uuid in uuids.split(','):
guest.uuid = uuid
guest.get_by('uuid')
for uuid in uuids.split(','):
guest.uuid = uuid
guest.get_by('uuid')
# 忽略宕机计算节点 上面的 虚拟机 迁移请求
# 忽略目标计算节点 等于 当前所在 计算节点 的虚拟机 迁移请求
if guest.node_id.__str__() not in available_hosts_mapping_by_node_id or guest.node_id.__str__() == node_id:
continue
message = {
'_object': 'guest',
'action': 'migrate',
'uuid': uuid,
'node_id': guest.node_id,
'storage_mode': config.storage_mode,
'duri': 'qemu+ssh://' + dst_ip + '/system'
}
Utils.emit_instruction(message=json.dumps(message))
return ret
except ji.PreviewingError, e:
return json.loads(e.message)
@Utils.dumps2response
def r_get(uuids):
ret = guest_base.get(ids=uuids, ids_rule=Rules.UUIDS.value, by_field='uuid')
if '200' != ret['state']['code']:
return ret
rows, _ = SSHKeyGuestMapping.get_by_filter(filter_str=':'.join(['guest_uuid', 'in', uuids]))
guest_uuid_ssh_key_id_mapping = dict()
ssh_keys_id = list()
for row in rows:
if row['ssh_key_id'] not in ssh_keys_id:
ssh_keys_id.append(row['ssh_key_id'].__str__())
if row['guest_uuid'] not in guest_uuid_ssh_key_id_mapping:
guest_uuid_ssh_key_id_mapping[row['guest_uuid']] = list()
guest_uuid_ssh_key_id_mapping[row['guest_uuid']].append(row['ssh_key_id'])
rows, _ = SSHKey.get_by_filter(filter_str=':'.join(['id', 'in', ','.join(ssh_keys_id)]))
ssh_key_id_mapping = dict()
for row in rows:
row['url'] = url_for('v_ssh_keys.show')
ssh_key_id_mapping[row['id']] = row
hosts_url = url_for('api_hosts.r_get_by_filter', _external=True)
hosts_ret = requests.get(url=hosts_url, cookies=request.cookies)
hosts_ret = json.loads(hosts_ret.content)
hosts_mapping_by_node_id = dict()
for host in hosts_ret['data']:
hosts_mapping_by_node_id[int(host['node_id'])] = host
if -1 == uuids.find(','):
if 'ssh_keys' not in ret['data']:
ret['data']['ssh_keys'] = list()
if ret['data']['uuid'] in guest_uuid_ssh_key_id_mapping:
for ssh_key_id in guest_uuid_ssh_key_id_mapping[ret['data']['uuid']]:
if ssh_key_id not in ssh_key_id_mapping:
continue
ret['data']['ssh_keys'].append(ssh_key_id_mapping[ssh_key_id])
if not hosts_mapping_by_node_id[ret['data']['node_id']]['alive']:
ret['data']['status'] = GuestState.no_state.value
else:
for i, guest in enumerate(ret['data']):
if 'ssh_keys' not in ret['data'][i]:
ret['data'][i]['ssh_keys'] = list()
if ret['data'][i]['uuid'] in guest_uuid_ssh_key_id_mapping:
for ssh_key_id in guest_uuid_ssh_key_id_mapping[ret['data'][i]['uuid']]:
if ssh_key_id not in ssh_key_id_mapping:
continue
ret['data'][i]['ssh_keys'].append(ssh_key_id_mapping[ssh_key_id])
if not hosts_mapping_by_node_id[ret['data'][i]['node_id']]['alive']:
ret['data'][i]['status'] = GuestState.no_state.value
return ret
def exchange_guest_os_templates_logo(os_templates_image_mapping_by_id=None, os_templates_profile_mapping_by_id=None,
os_template_image_id=None):
assert isinstance(os_templates_image_mapping_by_id, dict)
assert isinstance(os_templates_profile_mapping_by_id, dict)
assert isinstance(os_template_image_id, int)
if os_templates_image_mapping_by_id[os_template_image_id]['logo'] == "":
logo = os_templates_profile_mapping_by_id[os_templates_image_mapping_by_id[os_template_image_id]['os_template_profile_id']]['icon']
else:
logo = os_templates_image_mapping_by_id[os_template_image_id]['logo']
label = os_templates_image_mapping_by_id[os_template_image_id]['label']
return logo, label
def format_guest_status(_status, progress):
from jimvc.models import GuestState
color = 'FF645B'
icon = 'glyph-icon icon-bolt'
desc = '未知状态'
if _status == GuestState.booting.value:
color = '00BBBB'
icon = 'glyph-icon icon-circle'
desc = '启动中'
elif _status == GuestState.running.value:
color = '00BB00'
icon = 'glyph-icon icon-circle'
desc = '运行中'
elif _status == GuestState.creating.value:
color = 'FFC543'
icon = 'glyph-icon icon-spinner'
desc = ' '.join(['创建中', str(progress) + '%'])
elif _status == GuestState.blocked.value:
color = '3D4245'
icon = 'glyph-icon icon-minus-square'
desc = '被阻塞'
elif _status == GuestState.paused.value:
color = 'B7B904'
icon = 'glyph-icon icon-pause'
desc = '暂停'
elif _status == GuestState.shutdown.value:
color = '4E5356'
icon = 'glyph-icon icon-terminal'
desc = '关闭'
elif _status == GuestState.shutoff.value:
color = 'FFC543'
icon = 'glyph-icon icon-plug'
desc = '断电'
elif _status == GuestState.crashed.value:
color = '9E2927'
icon = 'glyph-icon icon-question'
desc = '已崩溃'
elif _status == GuestState.pm_suspended.value:
color = 'FCFF07'
icon = 'glyph-icon icon-anchor'
desc = '悬挂'
elif _status == GuestState.migrating.value:
color = '1CF5E7'
icon = 'glyph-icon icon-space-shuttle'
desc = '迁移中'
elif _status == GuestState.dirty.value:
color = 'FF0707'
icon = 'glyph-icon icon-remove'
desc = '创建失败,待清理'
else:
pass
return '<span class="{icon}" style="color: #{color};"> {desc}</span>'.format(
icon=icon, color=color, desc=desc)
def exchange_guest_bandwidth(bandwidth=None):
assert isinstance(bandwidth, int)
if bandwidth == 0:
bandwidth = '<span style="font-size: 16px;" title="无限带宽"> ∞</span>'
elif 0 < bandwidth < 1000 ** 2:
bandwidth = str(bandwidth // 1000) + ' Kbps'
elif 1000 ** 2 <= bandwidth < 1000 ** 3:
bandwidth = str(bandwidth // 1000 ** 2) + ' Mbps'
else:
bandwidth = str(bandwidth // 1000 ** 3) + ' Gbps'
return bandwidth
@Utils.dumps2response
def r_get_by_filter():
ret = guest_base.get_by_filter()
uuids = list()
for guest in ret['data']:
uuids.append(guest['uuid'])
rows, _ = SSHKeyGuestMapping.get_by_filter(filter_str=':'.join(['guest_uuid', 'in', ','.join(uuids)]))
guest_uuid_ssh_key_id_mapping = dict()
ssh_keys_id = list()
for row in rows:
if row['ssh_key_id'] not in ssh_keys_id:
ssh_keys_id.append(row['ssh_key_id'].__str__())
if row['guest_uuid'] not in guest_uuid_ssh_key_id_mapping:
guest_uuid_ssh_key_id_mapping[row['guest_uuid']] = list()
guest_uuid_ssh_key_id_mapping[row['guest_uuid']].append(row['ssh_key_id'])
rows, _ = SSHKey.get_by_filter(filter_str=':'.join(['id', 'in', ','.join(ssh_keys_id)]))
ssh_key_id_mapping = dict()
for row in rows:
row['url'] = url_for('v_ssh_keys.show')
ssh_key_id_mapping[row['id']] = row
rows, _ = Snapshot.get_by_filter(filter_str=':'.join(['guest_uuid', 'in', ','.join(uuids)]))
snapshots_guest_uuid_mapping = dict()
for row in rows:
guest_uuid = row['guest_uuid']
if guest_uuid not in snapshots_guest_uuid_mapping:
snapshots_guest_uuid_mapping[guest_uuid] = list()
snapshots_guest_uuid_mapping[guest_uuid].append(row)
hosts_url = url_for('api_hosts.r_get_by_filter', _external=True)
hosts_ret = requests.get(url=hosts_url, cookies=request.cookies)
hosts_ret = json.loads(hosts_ret.content)
hosts_mapping_by_node_id = dict()
for host in hosts_ret['data']:
hosts_mapping_by_node_id[int(host['node_id'])] = host
os_templates_image, _ = OSTemplateImage.get_by_filter()
os_templates_image_mapping_by_id = dict()
for os_template_image in os_templates_image:
os_templates_image_mapping_by_id[os_template_image['id']] = os_template_image
os_templates_profile, _ = OSTemplateProfile.get_by_filter()
os_templates_profile_mapping_by_id = dict()
for os_template_profile in os_templates_profile:
os_templates_profile_mapping_by_id[os_template_profile['id']] = os_template_profile
for i, guest in enumerate(ret['data']):
guest_uuid = ret['data'][i]['uuid']
if 'ssh_keys' not in ret['data'][i]:
ret['data'][i]['ssh_keys'] = list()
if guest_uuid in guest_uuid_ssh_key_id_mapping:
for ssh_key_id in guest_uuid_ssh_key_id_mapping[guest_uuid]:
if ssh_key_id not in ssh_key_id_mapping:
continue
ret['data'][i]['ssh_keys'].append(ssh_key_id_mapping[ssh_key_id])
if 'snapshot' not in ret['data'][i]:
ret['data'][i]['snapshot'] = {
'creatable': True,
'mapping': list()
}
if guest_uuid in snapshots_guest_uuid_mapping:
ret['data'][i]['snapshot']['mapping'] = snapshots_guest_uuid_mapping[guest_uuid]
for snapshot in snapshots_guest_uuid_mapping[guest_uuid]:
if snapshot['progress'] == 100:
continue
else:
ret['data'][i]['snapshot']['creatable'] = False
if not hosts_mapping_by_node_id[ret['data'][i]['node_id']]['alive']:
ret['data'][i]['status'] = GuestState.no_state.value
ret['data'][i]['hostname'] = hosts_mapping_by_node_id[guest['node_id']]['hostname']
ret['data'][i]['html'] = dict()
ret['data'][i]['html']['logo'], ret['data'][i]['html']['os_template_label'] = exchange_guest_os_templates_logo(
os_templates_image_mapping_by_id=os_templates_image_mapping_by_id,
os_templates_profile_mapping_by_id=os_templates_profile_mapping_by_id,
os_template_image_id=guest['os_template_image_id'])
ret['data'][i]['html']['status'] = format_guest_status(_status=guest['status'], progress=guest['progress'])
ret['data'][i]['html']['bandwidth'] = exchange_guest_bandwidth(bandwidth=guest['bandwidth'])
return ret
@Utils.dumps2response
def r_content_search():
ret = guest_base.content_search()
uuids = list()
for guest in ret['data']:
uuids.append(guest['uuid'])
rows, _ = SSHKeyGuestMapping.get_by_filter(filter_str=':'.join(['guest_uuid', 'in', ','.join(uuids)]))
guest_uuid_ssh_key_id_mapping = dict()
ssh_keys_id = list()
for row in rows:
if row['ssh_key_id'] not in ssh_keys_id:
ssh_keys_id.append(row['ssh_key_id'].__str__())
if row['guest_uuid'] not in guest_uuid_ssh_key_id_mapping:
guest_uuid_ssh_key_id_mapping[row['guest_uuid']] = list()
guest_uuid_ssh_key_id_mapping[row['guest_uuid']].append(row['ssh_key_id'])
rows, _ = SSHKey.get_by_filter(filter_str=':'.join(['id', 'in', ','.join(ssh_keys_id)]))
ssh_key_id_mapping = dict()
for row in rows:
row['url'] = url_for('v_ssh_keys.show')
ssh_key_id_mapping[row['id']] = row
rows, _ = Snapshot.get_by_filter(filter_str=':'.join(['guest_uuid', 'in', ','.join(uuids)]))
snapshots_guest_uuid_mapping = dict()
for row in rows:
guest_uuid = row['guest_uuid']
if guest_uuid not in snapshots_guest_uuid_mapping:
snapshots_guest_uuid_mapping[guest_uuid] = list()
snapshots_guest_uuid_mapping[guest_uuid].append(row)
hosts_url = url_for('api_hosts.r_get_by_filter', _external=True)
hosts_ret = requests.get(url=hosts_url, cookies=request.cookies)
hosts_ret = json.loads(hosts_ret.content)
hosts_mapping_by_node_id = dict()
for host in hosts_ret['data']:
hosts_mapping_by_node_id[int(host['node_id'])] = host
os_templates_image, _ = OSTemplateImage.get_by_filter()
os_templates_image_mapping_by_id = dict()
for os_template_image in os_templates_image:
os_templates_image_mapping_by_id[os_template_image['id']] = os_template_image
os_templates_profile, _ = OSTemplateProfile.get_by_filter()
os_templates_profile_mapping_by_id = dict()
for os_template_profile in os_templates_profile:
os_templates_profile_mapping_by_id[os_template_profile['id']] = os_template_profile
for i, guest in enumerate(ret['data']):
guest_uuid = ret['data'][i]['uuid']
if 'ssh_keys' not in ret['data'][i]:
ret['data'][i]['ssh_keys'] = list()
if guest_uuid in guest_uuid_ssh_key_id_mapping:
for ssh_key_id in guest_uuid_ssh_key_id_mapping[guest_uuid]:
if ssh_key_id not in ssh_key_id_mapping:
continue
ret['data'][i]['ssh_keys'].append(ssh_key_id_mapping[ssh_key_id])
if 'snapshot' not in ret['data'][i]:
ret['data'][i]['snapshot'] = {
'creatable': True,
'mapping': list()
}
if guest_uuid in snapshots_guest_uuid_mapping:
ret['data'][i]['snapshot']['mapping'] = snapshots_guest_uuid_mapping[guest_uuid]
for snapshot in snapshots_guest_uuid_mapping[guest_uuid]:
if snapshot['progress'] == 100:
continue
else:
ret['data'][i]['snapshot']['creatable'] = False
if not hosts_mapping_by_node_id[ret['data'][i]['node_id']]['alive']:
ret['data'][i]['status'] = GuestState.no_state.value
ret['data'][i]['hostname'] = hosts_mapping_by_node_id[guest['node_id']]['hostname']
ret['data'][i]['html'] = dict()
ret['data'][i]['html']['logo'], ret['data'][i]['html']['os_template_label'] = exchange_guest_os_templates_logo(
os_templates_image_mapping_by_id=os_templates_image_mapping_by_id,
os_templates_profile_mapping_by_id=os_templates_profile_mapping_by_id,
os_template_image_id=guest['os_template_image_id'])
ret['data'][i]['html']['status'] = format_guest_status(_status=guest['status'], progress=guest['progress'])
ret['data'][i]['html']['bandwidth'] = exchange_guest_bandwidth(bandwidth=guest['bandwidth'])
return ret
@Utils.dumps2response
def r_distribute_count():
from jimvc.models import Guest
rows, count = Guest.get_all()
ret = dict()
ret['state'] = ji.Common.exchange_state(20000)
ret['data'] = {
'os_template_image_id': dict(),
'status': dict(),
'node_id': dict(),
'cpu_memory': dict(),
'cpu': 0,
'memory': 0,
'guests': rows.__len__()
}
for guest in rows:
if guest['os_template_image_id'] not in ret['data']['os_template_image_id']:
ret['data']['os_template_image_id'][guest['os_template_image_id']] = 0
if guest['status'] not in ret['data']['status']:
ret['data']['status'][guest['status']] = 0
if guest['node_id'] not in ret['data']['node_id']:
ret['data']['node_id'][guest['node_id']] = 0
cpu_memory = '_'.join([str(guest['cpu']), str(guest['memory'])])
if cpu_memory not in ret['data']['cpu_memory']:
ret['data']['cpu_memory'][cpu_memory] = 0
ret['data']['os_template_image_id'][guest['os_template_image_id']] += 1
ret['data']['status'][guest['status']] += 1
ret['data']['node_id'][guest['node_id']] += 1
ret['data']['cpu_memory'][cpu_memory] += 1
ret['data']['cpu'] += guest['cpu']
ret['data']['memory'] += guest['memory']
return ret
@Utils.dumps2response
def r_update(uuids):
ret = dict()
ret['state'] = ji.Common.exchange_state(20000)
ret['data'] = list()
args_rules = [
Rules.UUIDS.value
]
if 'remark' in request.json:
args_rules.append(
Rules.REMARK.value,
)
if args_rules.__len__() < 2:
return ret
request.json['uuids'] = uuids
try:
ji.Check.previewing(args_rules, request.json)
guest = Guest()
# 检测所指定的 UUDIs 实例都存在
for uuid in uuids.split(','):
guest.uuid = uuid
guest.get_by('uuid')
for uuid in uuids.split(','):
guest.uuid = uuid
guest.get_by('uuid')
guest.remark = request.json.get('remark', guest.remark)
guest.update()
guest.get()
ret['data'].append(guest.__dict__)
return ret
except ji.PreviewingError, e:
return json.loads(e.message)
@Utils.dumps2response
def r_revise_ip(uuid, ip):
ret = dict()
ret['state'] = ji.Common.exchange_state(20000)
args_rules = [
Rules.UUID.value,
Rules.IP.value
]
try:
ji.Check.previewing(args_rules, {'uuid': uuid, 'ip': ip})
guest = Guest()
guest.uuid = uuid
guest.get_by('uuid')
guest.ip = ip
guest.update()
guest.get()
ret['data'] = guest.__dict__
return ret
except ji.PreviewingError, e:
return json.loads(e.message)
@Utils.dumps2response
def r_reset_password(uuids, password):
args_rules = [
Rules.UUIDS.value,
Rules.PASSWORD.value
]
try:
ji.Check.previewing(args_rules, {'uuids': uuids, 'password': password})
guest = Guest()
os_template_image = OSTemplateImage()
os_template_profile = OSTemplateProfile()
# 检测所指定的 UUDIs 实例都存在
for uuid in uuids.split(','):
guest.uuid = uuid
guest.get_by('uuid')
for uuid in uuids.split(','):
guest.uuid = uuid
guest.get_by('uuid')
os_template_image.id = guest.os_template_image_id
os_template_image.get()
os_template_profile.id = os_template_image.os_template_profile_id
os_template_profile.get()
user = 'root'
if os_template_profile.os_type == 'windows':
user = 'administrator'
# guest.password 由 guest 事件处理机更新。参见 @models/event_processory.py:189 附近。
message = {
'_object': 'guest',
'action': 'reset_password',
'uuid': guest.uuid,
'node_id': guest.node_id,
'os_type': os_template_profile.os_type,
'user': user,
'password': password,
'passback_parameters': {'password': password}
}
Utils.emit_instruction(message=json.dumps(message, ensure_ascii=False))
ret = dict()
ret['state'] = ji.Common.exchange_state(20000)
return ret
except ji.PreviewingError, e:
return json.loads(e.message)
@Utils.dumps2response
def r_allocate_bandwidth(uuids, bandwidth, bandwidth_unit):
args_rules = [
Rules.UUIDS.value,
Rules.BANDWIDTH_IN_URL.value,
Rules.BANDWIDTH_UNIT.value,
]
try:
ji.Check.previewing(args_rules, {'uuids': uuids, 'bandwidth': bandwidth, 'bandwidth_unit': bandwidth_unit})
ret = dict()
ret['state'] = ji.Common.exchange_state(20000)
bandwidth = int(bandwidth)
if bandwidth_unit == 'k':
bandwidth = bandwidth * 1000
elif bandwidth_unit == 'm':
bandwidth = bandwidth * 1000 ** 2
elif bandwidth_unit == 'g':
bandwidth = bandwidth * 1000 ** 3
else:
ret['state'] = ji.Common.exchange_state(41203)
return ret
# http://man7.org/linux/man-pages/man8/tc.8.html
# 如果带宽大于 tc 所控最大速率,则置其为无限带宽
# 34359738360 等于 tc 最大可控字节速率,换算出的比特位
if bandwidth > 34359738360:
bandwidth = 0
guest = Guest()
# 检测所指定的 UUDIs 实例都存在
for uuid in uuids.split(','):
guest.uuid = uuid
guest.get_by('uuid')
for uuid in uuids.split(','):
guest.uuid = uuid
guest.get_by('uuid')
guest.bandwidth = bandwidth
message = {
'_object': 'guest',
'action': 'allocate_bandwidth',
'uuid': guest.uuid,
'node_id': guest.node_id,
'bandwidth': guest.bandwidth,
'passback_parameters': {'bandwidth': guest.bandwidth}
}
Utils.emit_instruction(message=json.dumps(message, ensure_ascii=False))
return ret
except ji.PreviewingError, e:
return json.loads(e.message)
@Utils.dumps2response
def r_adjust_ability(uuids, cpu, memory):
args_rules = [
Rules.UUIDS.value,
Rules.CPU.value,
Rules.MEMORY.value,
]
try:
ret = dict()
ret['state'] = ji.Common.exchange_state(20000)
cpu = int(cpu)
memory = int(memory)
ji.Check.previewing(args_rules, {'uuids': uuids, 'cpu': cpu, 'memory': memory})
not_ready_yet_of_guests = list()
guest = Guest()
# 检测所指定的 UUDIs 实例都存在。且状态都为可以操作状态(即关闭状态)。
for uuid in uuids.split(','):
guest.uuid = uuid
guest.get_by('uuid')
if guest.status != status.GuestState.shutoff.value:
not_ready_yet_of_guests.append(guest.__dict__)
if not_ready_yet_of_guests.__len__() > 0:
ret['state'] = ji.Common.exchange_state(41261)
ret['data'] = not_ready_yet_of_guests
return ret
for uuid in uuids.split(','):
guest.uuid = uuid
guest.get_by('uuid')
guest.cpu = cpu
guest.memory = memory
message = {
'_object': 'guest',
'action': 'adjust_ability',
'uuid': guest.uuid,
'node_id': guest.node_id,
'cpu': guest.cpu,
'memory': guest.memory,
'passback_parameters': {'cpu': guest.cpu, 'memory': guest.memory}
}
Utils.emit_instruction(message=json.dumps(message, ensure_ascii=False))
return ret
except ji.PreviewingError, e:
return json.loads(e.message)
@Utils.dumps2response
def r_change_prepared_by(uuids, service_id):
ret = dict()
ret['state'] = ji.Common.exchange_state(20000)
ret['data'] = list()
args_rules = [
Rules.UUIDS.value,
Rules.SERVICE_ID_IN_URL.value
]
try:
ji.Check.previewing(args_rules, {'uuids': uuids, 'service_id': service_id})
guest = Guest()
# 检测所指定的 UUDIs 实例都存在
for uuid in uuids.split(','):
guest.uuid = uuid
guest.get_by('uuid')
for uuid in uuids.split(','):
guest.uuid = uuid
guest.get_by('uuid')
guest.service_id = int(service_id)
guest.update()
guest.get()
ret['data'].append(guest.__dict__)
return ret
except ji.PreviewingError, e:
return json.loads(e.message)
@Utils.dumps2response
def r_refresh_guest_state():
try:
ret = dict()
ret['state'] = ji.Common.exchange_state(20000)
# 取全部活着的 hosts
available_hosts = Host.get_available_hosts(nonrandom=None)
if available_hosts.__len__() == 0:
ret['state'] = ji.Common.exchange_state(50351)
return ret
for host in available_hosts:
message = {
'_object': 'global',
'action': 'refresh_guest_state',
'node_id': host['node_id']
}
Utils.emit_instruction(message=json.dumps(message, ensure_ascii=False))
except ji.PreviewingError, e:
return json.loads(e.message)
@Utils.dumps2response
def r_show():
args = list()
page = int(request.args.get('page', 1))
page_size = int(request.args.get('page_size', 20))
keyword = request.args.get('keyword', None)
if page is not None:
args.append('page=' + page.__str__())
if page_size is not None:
args.append('page_size=' + page_size.__str__())
if keyword is not None:
args.append('keyword=' + keyword.__str__())
hosts_url = url_for('api_hosts.r_get_by_filter', _external=True)
guests_url = url_for('api_guests.r_get_by_filter', _external=True)
if keyword is not None:
guests_url = url_for('api_guests.r_content_search', _external=True)
if args.__len__() > 0:
guests_url = guests_url + '?' + '&'.join(args)
hosts_ret = requests.get(url=hosts_url, cookies=request.cookies)
hosts_ret = json.loads(hosts_ret.content)
hosts_mapping_by_node_id = dict()
for host in hosts_ret['data']:
hosts_mapping_by_node_id[int(host['node_id'])] = host
guests_ret = requests.get(url=guests_url, cookies=request.cookies)
guests_ret = json.loads(guests_ret.content)
os_templates_image, _ = OSTemplateImage.get_by_filter()
os_templates_image_mapping_by_id = dict()
for os_template_image in os_templates_image:
os_templates_image_mapping_by_id[os_template_image['id']] = os_template_image
os_templates_profile, _ = OSTemplateProfile.get_by_filter()
os_templates_profile_mapping_by_id = dict()
for os_template_profile in os_templates_profile:
os_templates_profile_mapping_by_id[os_template_profile['id']] = os_template_profile
last_page = int(ceil(guests_ret['paging']['total'] / float(page_size)))
page_length = 5
pages = list()
if page < int(ceil(page_length / 2.0)):
for i in range(1, page_length + 1):
pages.append(i)
if i == last_page or last_page == 0:
break
elif last_page - page < page_length / 2:
for i in range(last_page - page_length + 1, last_page + 1):
if i < 1:
continue
pages.append(i)
else:
for i in range(page - page_length / 2, page + int(ceil(page_length / 2.0))):
pages.append(i)
if i == last_page or last_page == 0:
break
ret = dict()
ret['state'] = ji.Common.exchange_state(20000)
ret['data'] = {
'guests': guests_ret['data'],
'os_templates_image_mapping_by_id': os_templates_image_mapping_by_id,
'os_templates_profile_mapping_by_id': os_templates_profile_mapping_by_id,
'hosts_mapping_by_node_id': hosts_mapping_by_node_id,
'paging': guests_ret['paging'],
'page': page,
'page_size': page_size,
'keyword': keyword,
'pages': pages,
'last_page': last_page
}
return ret
@Utils.dumps2response
def r_vnc(uuid):
guest_ret = guest_base.get(ids=uuid, ids_rule=Rules.UUID.value, by_field='uuid')
if '200' != guest_ret['state']['code']:
return guest_ret
hosts_url = url_for('api_hosts.r_get_by_filter', _external=True)
hosts_ret = requests.get(url=hosts_url, cookies=request.cookies)
hosts_ret = json.loads(hosts_ret.content)
hosts_mapping_by_node_id = dict()
for host in hosts_ret['data']:
hosts_mapping_by_node_id[int(host['node_id'])] = host
port = random.randrange(50000, 60000)
while True:
if not Utils.port_is_opened(port=port):
break
port = random.randrange(50000, 60000)
payload = {'listen_port': port, 'target_host': hosts_mapping_by_node_id[guest_ret['data']['node_id']]['hostname'],
'target_port': guest_ret['data']['vnc_port']}
db.r.rpush(app_config['ipc_queue'], json.dumps(payload, ensure_ascii=False))
time.sleep(1)
ret = dict()
ret['state'] = ji.Common.exchange_state(20000)
ret['data'] = {
'port': port,
'vnc_password': guest_ret['data']['vnc_password']
}
return ret
@Utils.dumps2response
def r_detail(uuid):
hosts_url = url_for('api_hosts.r_get_by_filter', _external=True)
hosts_ret = requests.get(url=hosts_url, cookies=request.cookies)
hosts_ret = json.loads(hosts_ret.content)
hosts_mapping_by_node_id = dict()
for host in hosts_ret['data']:
hosts_mapping_by_node_id[int(host['node_id'])] = host
guest = Guest()
guest.uuid = uuid
guest.get_by(field='uuid')
guest.ssh_keys = list()
rows, _ = SSHKeyGuestMapping.get_by_filter(filter_str=':'.join(['guest_uuid', 'in', guest.uuid]))
ssh_keys_id = list()
for row in rows:
if row['ssh_key_id'] not in ssh_keys_id:
ssh_keys_id.append(row['ssh_key_id'].__str__())
rows, _ = SSHKey.get_by_filter(filter_str=':'.join(['id', 'in', ','.join(ssh_keys_id)]))
for row in rows:
row['url'] = url_for('v_ssh_keys.show')
if row['id'].__str__() not in ssh_keys_id:
continue
guest.ssh_keys.append(row)
os_template_image = OSTemplateImage()
os_template_image.id = guest.os_template_image_id.__str__()
os_template_image.get()
os_template_profiles, _ = OSTemplateProfile.get_by_filter()
os_templates_profile_mapping_by_id = dict()
for os_template_profile in os_template_profiles:
os_templates_profile_mapping_by_id[os_template_profile['id']] = os_template_profile
disks_url = url_for('api_disks.r_get_by_filter', filter='guest_uuid:in:' + guest.uuid, _external=True)
disks_ret = requests.get(url=disks_url, cookies=request.cookies)
disks = json.loads(disks_ret.content)['data']
if not hosts_mapping_by_node_id[guest.node_id]['alive']:
guest.status = GuestState.no_state.value
config = Config()
config.id = 1
config.get()
ret = dict()
ret['state'] = ji.Common.exchange_state(20000)
ret['data'] = {
'uuid': uuid,
'guest': guest.__dict__,
'os_template_image': os_template_image.__dict__,
'os_templates_profile_mapping_by_id': os_templates_profile_mapping_by_id,
'hosts_mapping_by_node_id': hosts_mapping_by_node_id,
'disks': disks,
'config': config.__dict__
}
return ret
| gpl-3.0 | 499,616,185,876,565,700 | 29.027854 | 139 | 0.556847 | false | 3.439967 | true | false | false |
gwu-libraries/sfm-utils | sfmutils/warc_iter.py | 1 | 7870 | from warcio.archiveiterator import WARCIterator
import json
import argparse
import logging
import sys
import os
from collections import namedtuple
log = logging.getLogger(__name__)
IterItem = namedtuple('IterItem', ['type', 'id', 'date', 'url', 'item'])
class BaseWarcIter:
"""
Base class for a warc iterator. A warc iterator iterates over the social media
items recorded in a WARC file.
This supports payloads which are json or line-oriented json.
Subclasses should overrride _select_record(), _item_iter(), item_types, and
possibly line_oriented.
"""
def __init__(self, filepaths):
if isinstance(filepaths, str):
self.filepaths = (filepaths,)
else:
self.filepaths = filepaths
def __iter__(self):
return self.iter()
@staticmethod
def _debug_counts(filename, record_count, yield_count, by_record_count=True):
should_debug = False
if by_record_count and record_count <= 100 and record_count % 10 == 0:
should_debug = True
elif by_record_count and 100 < record_count and record_count % 100 == 0:
should_debug = True
elif not by_record_count and yield_count <= 1000 and yield_count % 100 == 0:
should_debug = True
elif not by_record_count and 1000 < yield_count and yield_count % 1000 == 0:
should_debug = True
if should_debug:
log.debug("File %s. Processed %s records. Yielded %s items.", filename, record_count, yield_count)
def iter(self, limit_item_types=None, dedupe=False, item_date_start=None, item_date_end=None):
"""
:return: Iterator returning IterItems.
"""
seen_ids = {}
for filepath in self.filepaths:
log.info("Iterating over %s", filepath)
filename = os.path.basename(filepath)
with open(filepath, 'rb') as f:
yield_count = 0
for record_count, record in enumerate((r for r in WARCIterator(f) if r.rec_type == 'response')):
self._debug_counts(filename, record_count, yield_count, by_record_count=True)
record_url = record.rec_headers.get_header('WARC-Target-URI')
record_id = record.rec_headers.get_header('WARC-Record-ID')
if self._select_record(record_url):
stream = record.content_stream()
line = stream.readline().decode('utf-8')
while line:
json_obj = None
try:
if line != "\r\n":
# A non-line-oriented payload only has one payload part.
json_obj = json.loads(line)
except ValueError:
log.warning("Bad json in record %s: %s", record_id, line)
if json_obj:
for item_type, item_id, item_date, item in self._item_iter(record_url, json_obj):
# None for item_type indicates that the type is not handled. OK to ignore.
if item_type is not None:
yield_item = True
if limit_item_types and item_type not in limit_item_types:
yield_item = False
if item_date_start and item_date and item_date < item_date_start:
yield_item = False
if item_date_end and item_date and item_date > item_date_end:
yield_item = False
if not self._select_item(item):
yield_item = False
if dedupe and yield_item:
if item_id in seen_ids:
yield_item = False
else:
seen_ids[item_id] = True
if yield_item:
if item is not None:
yield_count += 1
self._debug_counts(filename, record_count, yield_count,
by_record_count=False)
yield IterItem(item_type, item_id, item_date, record_url, item)
else:
log.warn("Bad response in record %s", record_id)
line = stream.readline().decode('utf-8')
def _select_record(self, url):
"""
Return True to process this record. This allows a WarcIter to only process
records for the type of social media content that it handles.
"""
pass
def _select_item(self, item):
"""
Return True to select this item. This allows a WarcIter to filter items.
"""
return True
def print_iter(self, pretty=False, fp=sys.stdout, limit_item_types=None, print_item_type=False, dedupe=False):
for item_type, _, _, _, item in self.iter(limit_item_types=limit_item_types, dedupe=dedupe):
if print_item_type:
fp.write("{}:".format(item_type))
json.dump(item, fp, indent=4 if pretty else None)
fp.write("\n")
def _item_iter(self, url, json_obj):
"""
Returns an iterator over the social media item types and items (as JSON objects).
:returns item_type, item_id, item_date, item iterator
"""
pass
@staticmethod
def item_types():
"""
Returns a list of item types that are handled by this WarcIter.
"""
pass
@property
def line_oriented(self):
"""
Indicates whether the payload should be handled as line-oriented.
Subclasses that support line-oriented payloads should return True.
"""
return False
@staticmethod
def main(cls):
# Logging
logging.basicConfig(format='%(asctime)s: %(name)s --> %(message)s', level=logging.DEBUG)
parser = argparse.ArgumentParser()
item_types = cls.item_types()
if len(item_types) > 1:
parser.add_argument("--item-types",
help="A comma separated list of item types to limit the results. "
"Item types are {}".format(", ".join(item_types)))
parser.add_argument("--pretty", action="store_true", help="Format the json for viewing.")
parser.add_argument("--dedupe", action="store_true", help="Remove duplicate items.")
parser.add_argument("--print-item-type", action="store_true", help="Print the item type.")
parser.add_argument("--debug", type=lambda v: v.lower() in ("yes", "true", "t", "1"), nargs="?",
default="False", const="True")
parser.add_argument("filepaths", nargs="+", help="Filepath of the warc.")
args = parser.parse_args()
# Logging
logging.getLogger().setLevel(logging.DEBUG if args.debug else logging.INFO)
main_limit_item_types = args.item_types.split(",") if "item_types" in vars(args) else None
cls(args.filepaths).print_iter(limit_item_types=main_limit_item_types, pretty=args.pretty,
print_item_type=args.print_item_type, dedupe=args.dedupe)
| mit | 763,000,765,478,507,900 | 45.023392 | 114 | 0.509022 | false | 4.599649 | false | false | false |
espadrine/opera | chromium/src/third_party/protobuf/python/google/protobuf/internal/wire_format.py | 561 | 8431 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Constants and static functions to support protocol buffer wire format."""
__author__ = '[email protected] (Will Robinson)'
import struct
from google.protobuf import descriptor
from google.protobuf import message
TAG_TYPE_BITS = 3 # Number of bits used to hold type info in a proto tag.
TAG_TYPE_MASK = (1 << TAG_TYPE_BITS) - 1 # 0x7
# These numbers identify the wire type of a protocol buffer value.
# We use the least-significant TAG_TYPE_BITS bits of the varint-encoded
# tag-and-type to store one of these WIRETYPE_* constants.
# These values must match WireType enum in google/protobuf/wire_format.h.
WIRETYPE_VARINT = 0
WIRETYPE_FIXED64 = 1
WIRETYPE_LENGTH_DELIMITED = 2
WIRETYPE_START_GROUP = 3
WIRETYPE_END_GROUP = 4
WIRETYPE_FIXED32 = 5
_WIRETYPE_MAX = 5
# Bounds for various integer types.
INT32_MAX = int((1 << 31) - 1)
INT32_MIN = int(-(1 << 31))
UINT32_MAX = (1 << 32) - 1
INT64_MAX = (1 << 63) - 1
INT64_MIN = -(1 << 63)
UINT64_MAX = (1 << 64) - 1
# "struct" format strings that will encode/decode the specified formats.
FORMAT_UINT32_LITTLE_ENDIAN = '<I'
FORMAT_UINT64_LITTLE_ENDIAN = '<Q'
FORMAT_FLOAT_LITTLE_ENDIAN = '<f'
FORMAT_DOUBLE_LITTLE_ENDIAN = '<d'
# We'll have to provide alternate implementations of AppendLittleEndian*() on
# any architectures where these checks fail.
if struct.calcsize(FORMAT_UINT32_LITTLE_ENDIAN) != 4:
raise AssertionError('Format "I" is not a 32-bit number.')
if struct.calcsize(FORMAT_UINT64_LITTLE_ENDIAN) != 8:
raise AssertionError('Format "Q" is not a 64-bit number.')
def PackTag(field_number, wire_type):
"""Returns an unsigned 32-bit integer that encodes the field number and
wire type information in standard protocol message wire format.
Args:
field_number: Expected to be an integer in the range [1, 1 << 29)
wire_type: One of the WIRETYPE_* constants.
"""
if not 0 <= wire_type <= _WIRETYPE_MAX:
raise message.EncodeError('Unknown wire type: %d' % wire_type)
return (field_number << TAG_TYPE_BITS) | wire_type
def UnpackTag(tag):
"""The inverse of PackTag(). Given an unsigned 32-bit number,
returns a (field_number, wire_type) tuple.
"""
return (tag >> TAG_TYPE_BITS), (tag & TAG_TYPE_MASK)
def ZigZagEncode(value):
"""ZigZag Transform: Encodes signed integers so that they can be
effectively used with varint encoding. See wire_format.h for
more details.
"""
if value >= 0:
return value << 1
return (value << 1) ^ (~0)
def ZigZagDecode(value):
"""Inverse of ZigZagEncode()."""
if not value & 0x1:
return value >> 1
return (value >> 1) ^ (~0)
# The *ByteSize() functions below return the number of bytes required to
# serialize "field number + type" information and then serialize the value.
def Int32ByteSize(field_number, int32):
return Int64ByteSize(field_number, int32)
def Int32ByteSizeNoTag(int32):
return _VarUInt64ByteSizeNoTag(0xffffffffffffffff & int32)
def Int64ByteSize(field_number, int64):
# Have to convert to uint before calling UInt64ByteSize().
return UInt64ByteSize(field_number, 0xffffffffffffffff & int64)
def UInt32ByteSize(field_number, uint32):
return UInt64ByteSize(field_number, uint32)
def UInt64ByteSize(field_number, uint64):
return TagByteSize(field_number) + _VarUInt64ByteSizeNoTag(uint64)
def SInt32ByteSize(field_number, int32):
return UInt32ByteSize(field_number, ZigZagEncode(int32))
def SInt64ByteSize(field_number, int64):
return UInt64ByteSize(field_number, ZigZagEncode(int64))
def Fixed32ByteSize(field_number, fixed32):
return TagByteSize(field_number) + 4
def Fixed64ByteSize(field_number, fixed64):
return TagByteSize(field_number) + 8
def SFixed32ByteSize(field_number, sfixed32):
return TagByteSize(field_number) + 4
def SFixed64ByteSize(field_number, sfixed64):
return TagByteSize(field_number) + 8
def FloatByteSize(field_number, flt):
return TagByteSize(field_number) + 4
def DoubleByteSize(field_number, double):
return TagByteSize(field_number) + 8
def BoolByteSize(field_number, b):
return TagByteSize(field_number) + 1
def EnumByteSize(field_number, enum):
return UInt32ByteSize(field_number, enum)
def StringByteSize(field_number, string):
return BytesByteSize(field_number, string.encode('utf-8'))
def BytesByteSize(field_number, b):
return (TagByteSize(field_number)
+ _VarUInt64ByteSizeNoTag(len(b))
+ len(b))
def GroupByteSize(field_number, message):
return (2 * TagByteSize(field_number) # START and END group.
+ message.ByteSize())
def MessageByteSize(field_number, message):
return (TagByteSize(field_number)
+ _VarUInt64ByteSizeNoTag(message.ByteSize())
+ message.ByteSize())
def MessageSetItemByteSize(field_number, msg):
# First compute the sizes of the tags.
# There are 2 tags for the beginning and ending of the repeated group, that
# is field number 1, one with field number 2 (type_id) and one with field
# number 3 (message).
total_size = (2 * TagByteSize(1) + TagByteSize(2) + TagByteSize(3))
# Add the number of bytes for type_id.
total_size += _VarUInt64ByteSizeNoTag(field_number)
message_size = msg.ByteSize()
# The number of bytes for encoding the length of the message.
total_size += _VarUInt64ByteSizeNoTag(message_size)
# The size of the message.
total_size += message_size
return total_size
def TagByteSize(field_number):
"""Returns the bytes required to serialize a tag with this field number."""
# Just pass in type 0, since the type won't affect the tag+type size.
return _VarUInt64ByteSizeNoTag(PackTag(field_number, 0))
# Private helper function for the *ByteSize() functions above.
def _VarUInt64ByteSizeNoTag(uint64):
"""Returns the number of bytes required to serialize a single varint
using boundary value comparisons. (unrolled loop optimization -WPierce)
uint64 must be unsigned.
"""
if uint64 <= 0x7f: return 1
if uint64 <= 0x3fff: return 2
if uint64 <= 0x1fffff: return 3
if uint64 <= 0xfffffff: return 4
if uint64 <= 0x7ffffffff: return 5
if uint64 <= 0x3ffffffffff: return 6
if uint64 <= 0x1ffffffffffff: return 7
if uint64 <= 0xffffffffffffff: return 8
if uint64 <= 0x7fffffffffffffff: return 9
if uint64 > UINT64_MAX:
raise message.EncodeError('Value out of range: %d' % uint64)
return 10
NON_PACKABLE_TYPES = (
descriptor.FieldDescriptor.TYPE_STRING,
descriptor.FieldDescriptor.TYPE_GROUP,
descriptor.FieldDescriptor.TYPE_MESSAGE,
descriptor.FieldDescriptor.TYPE_BYTES
)
def IsTypePackable(field_type):
"""Return true iff packable = true is valid for fields of this type.
Args:
field_type: a FieldDescriptor::Type value.
Returns:
True iff fields of this type are packable.
"""
return field_type not in NON_PACKABLE_TYPES
| bsd-3-clause | 3,566,976,627,402,225,700 | 30.458955 | 77 | 0.735619 | false | 3.618455 | false | false | false |
WellDone/pymomo | pymomo/mib/api.py | 2 | 1460 | #api.py
#Routines for dumping the MIB API region of a mib12 executive module and verifying
#the contents to make sure they have not been stomped on by some other process.
from pymomo.hex8.decode import *
from pymomo.utilities.paths import MomoPaths
from pymomo.utilities import build
from config12 import MIB12Processor
from pymomo.utilities import intelhex
class MIBAPI:
def __init__(self, hexfile, chip):
with open(hexfile, "r"):
self.hf = intelhex.IntelHex16bit(hexfile)
proc = MIB12Processor.FromChip(chip)
self.api_base = proc.api_range[0]
self.valid = self.verify_api()
def verify_api(self):
"""
Verify that all instructions in the MIB api region are either retlw 0
or goto.
"""
for i in xrange(0, 16):
try:
val = decode_retlw(self.hf, self.api_base + i)
if val == 0:
continue
return False
except:
pass
try:
decode_goto(self.hf, self.api_base + i)
continue
except:
pass
return False
return True
def print_api(self):
print "MIB API Block"
print "Valid:", self.valid
print "\nTable Contents Follow"
for i in xrange(0, 16):
try:
val = decode_retlw(self.hf, self.api_base + i)
print "%d: retlw 0x%x" % (i, val)
continue
except:
pass
try:
addr = decode_goto(self.hf, self.api_base + i)
print "%d: goto 0x%x" % (i, addr)
continue
except:
pass
print "%d: Invalid Instruction (0x%x)" % (i, self.hf[self.api_base + i])
| lgpl-3.0 | 7,744,014,180,232,141,000 | 21.121212 | 82 | 0.662329 | false | 2.857143 | false | false | false |
tomix86/hpcpm | api/hpcpm/api/resources/endpoints/nodes/computation_node/ComputationNode.py | 1 | 4792 | import json
import requests
from flask_restful import Resource, request, abort
from flask_restful_swagger import swagger
from hpcpm.api import log
from hpcpm.api.helpers.database import database
from hpcpm.api.helpers.utils import abort_when_port_invalid
from hpcpm.api.helpers.constants import COMPUTATION_NODE_PARAM_NAME, COMPUTATION_NODE_PARAM_ADDRESS, \
COMPUTATION_NODE_PARAM_PORT, COMPUTATION_NODE_ADDED_RESPONSE, COMPUTATION_NODE_NOT_FOUND_RESPONSE, \
COMPUTATION_NODE_FETCHED_RESPONSE, COMPUTATION_NODE_PUT_NOT_FOUND_RESPONSE
from hpcpm.api.helpers.requests import get_node_information, delete_power_limit
from hpcpm.api.resources.endpoints.nodes.computation_node.StatisticsInterval import set_statistics_interval
class ComputationNode(Resource):
@swagger.operation(
notes='This endpoint is used for registering new computation node',
nickname='/nodes/computation_node/<string:name>',
parameters=[
COMPUTATION_NODE_PARAM_NAME,
COMPUTATION_NODE_PARAM_ADDRESS,
COMPUTATION_NODE_PARAM_PORT
],
responseMessages=[
COMPUTATION_NODE_ADDED_RESPONSE,
COMPUTATION_NODE_PUT_NOT_FOUND_RESPONSE
]
)
def put(self, name):
address = request.args.get('address')
port = request.args.get('port')
abort_when_port_invalid(port)
node_by_ip = database.get_computation_node_info_by_address(address, port)
if node_by_ip and node_by_ip.get('name') != name:
log.warning('Node with IP: %s:%s is present in database: %s', address, port, node_by_ip)
try:
response = get_node_information(address, port)
except requests.exceptions.ConnectionError:
log.error('Connection could not be established to %s:%s', address, port)
abort(406)
log.info('Response %s:', response.text)
backend_info = json.loads(response.text)
node_info = {
'name': name,
'address': address,
'port': port,
'backend_info': backend_info
}
upsert_result = database.replace_computation_node_info(name, node_info)
if upsert_result.modified_count:
log.info('Node %s was already present in a database', name)
log.info('Stored Node info %s', node_info)
else:
log.info('Stored Node info %s on id %s', node_info, upsert_result.upserted_id)
for device in backend_info['devices']:
set_statistics_interval(name, device['id'], 1)
return name, 201
@swagger.operation(
notes='This endpoint is used for getting computation node information from database',
nickname='/nodes/computation_node/<string:name>',
parameters=[
COMPUTATION_NODE_PARAM_NAME
],
responseMessages=[
COMPUTATION_NODE_FETCHED_RESPONSE,
COMPUTATION_NODE_NOT_FOUND_RESPONSE
]
)
def get(self, name):
result = database.get_computation_node_info(name)
if not result:
log.info('No such computation node %s', name)
abort(404)
log.info('Successfully get node %s info: %s', name, result)
return result, 200
@swagger.operation(
notes='This endpoint is used for removing computation node information from database',
nickname='/nodes/computation_node/<string:name>',
parameters=[
COMPUTATION_NODE_PARAM_NAME
],
responseMessages=[
COMPUTATION_NODE_FETCHED_RESPONSE,
COMPUTATION_NODE_NOT_FOUND_RESPONSE
]
)
def delete(self, name):
result_node_info = database.delete_computation_node_info(name)
result_power_limit_info = database.delete_power_limit_infos(name)
if not result_node_info:
log.info('No such computation node %s', name)
abort(404)
if not result_power_limit_info:
log.info('No such power limit info for node %s', name)
abort(404)
address = result_node_info.get('address')
port = result_node_info.get('port')
abort_when_port_invalid(port)
for device in result_node_info['backend_info']['devices']:
try:
response = delete_power_limit(address, port, device['id'], device['Type'])
log.info('Device %s deletion info: %s', device['id'], response)
except requests.exceptions.ConnectionError:
log.error('Connection could not be established to %s:%s', address, port)
abort(406)
log.info('Successfully deleted node %s info and its power limit: %s %s', name, result_node_info,
result_power_limit_info)
return 204
| mit | -1,720,521,261,261,079,300 | 37.95935 | 107 | 0.629382 | false | 3.993333 | false | false | false |
openconnectome/ndlib | restutil.py | 3 | 2484 | # Copyright 2014 NeuroData (http://neurodata.io)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import requests
from ndlib.ndtype import *
try:
from django.conf import settings
SECRET_TOKEN = settings.SECRET_TOKEN
except:
SECRET_TOKEN = None
def generateURLBlosc(server_name, token_name, channel_list, res_value, range_args, direct=False):
"""Generate a URL for blosc"""
try:
url = "https://{}/sd/{}/{}/blosc/{}/{},{}/{},{}/{},{}/".format(server_name, token_name, ','.join(channel_list), res_value, *range_args)
if direct:
url = url + DIRECT
except Exception as e:
return ""
return url
def generateURLBlaze(server_name, token_name, channel_list, res_value, range_args):
"""Generate a URL for blosc"""
try:
url = "https://{}/blaze/{}/{}/blosc/{}/{},{}/{},{}/{},{}/".format(server_name, token_name, ','.join(channel_list), res_value, *range_args)
except Exception as e:
return ""
return url
def postJson(url, data):
try:
response = requests.post(url, json=data, headers={'Authorization': 'Token {}'.format(SECRET_TOKEN)} if SECRET_TOKEN else None, verify=False)
return response
except requests.HTTPError as e:
return e
def getJson(url):
return getURL(url)
def deleteJson(url):
return deleteURL(url)
def getURL(url):
try:
response = requests.get(url, headers={'Authorization': 'Token {}'.format(SECRET_TOKEN)} if SECRET_TOKEN else None, verify=False)
return response
except requests.HTTPError as e:
return e
def deleteURL(url):
try:
response = requests.delete(url, headers={'Authorization': 'Token {}'.format(SECRET_TOKEN)} if SECRET_TOKEN else None, verify=False)
return response
except requests.HTTPError as e:
return e
def postURL(url, data):
try:
response = requests.post(url, data, headers={'Authorization': 'Token {}'.format(SECRET_TOKEN)} if SECRET_TOKEN else None, verify=False)
return response
except requests.HTTPError as e:
return e
| apache-2.0 | -3,062,821,905,153,752,000 | 29.666667 | 144 | 0.69525 | false | 3.584416 | false | false | false |
JohnGarbutt/taskflow-1 | taskflow/patterns/graph_flow.py | 1 | 5825 | # -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import logging
from networkx import exception as g_exc
from networkx.algorithms import dag
from networkx.classes import digraph
from taskflow import exceptions as exc
from taskflow.patterns import ordered_flow
LOG = logging.getLogger(__name__)
class Flow(ordered_flow.Flow):
"""A flow which will analyze the attached tasks input requirements and
determine who provides said input and order the task so that said providing
task will be ran before."""
def __init__(self, name, parents=None, allow_same_inputs=True):
super(Flow, self).__init__(name, parents)
self._graph = digraph.DiGraph()
self._connected = False
self._allow_same_inputs = allow_same_inputs
def add(self, task):
# Do something with the task, either store it for later
# or add it to the graph right now...
#
# Only insert the node to start, connect all the edges
# together later after all nodes have been added.
self._graph.add_node(task)
self._connected = False
def _fetch_task_inputs(self, task):
def extract_inputs(place_where, would_like, is_optional=False):
for n in would_like:
for (them, there_result) in self.results:
if not n in set(getattr(them, 'provides', [])):
continue
if (not is_optional and
not self._graph.has_edge(them, task)):
continue
if there_result and n in there_result:
place_where[n].append(there_result[n])
if is_optional:
# Take the first task that provides this optional
# item.
break
elif not is_optional:
place_where[n].append(None)
required_inputs = set(getattr(task, 'requires', []))
optional_inputs = set(getattr(task, 'optional', []))
optional_inputs = optional_inputs - required_inputs
task_inputs = collections.defaultdict(list)
extract_inputs(task_inputs, required_inputs)
extract_inputs(task_inputs, optional_inputs, is_optional=True)
def collapse_functor(k_v):
(k, v) = k_v
if len(v) == 1:
v = v[0]
return (k, v)
return dict(map(collapse_functor, task_inputs.iteritems()))
def order(self):
self.connect()
try:
return dag.topological_sort(self._graph)
except g_exc.NetworkXUnfeasible:
raise exc.InvalidStateException("Unable to correctly determine "
"the path through the provided "
"flow which will satisfy the "
"tasks needed inputs and outputs.")
def connect(self):
"""Connects the nodes & edges of the graph together."""
if self._connected or len(self._graph) == 0:
return
# Figure out the provider of items and the requirers of items.
provides_what = collections.defaultdict(list)
requires_what = collections.defaultdict(list)
for t in self._graph.nodes_iter():
for r in getattr(t, 'requires', []):
requires_what[r].append(t)
for p in getattr(t, 'provides', []):
provides_what[p].append(t)
def get_providers(node, want_what):
providers = []
for (producer, me) in self._graph.in_edges_iter(node):
providing_what = self._graph.get_edge_data(producer, me)
if want_what in providing_what:
providers.append(producer)
return providers
# Link providers to consumers of items.
for (want_what, who_wants) in requires_what.iteritems():
who_provided = 0
for p in provides_what[want_what]:
# P produces for N so thats why we link P->N and not N->P
for n in who_wants:
if p is n:
# No self-referencing allowed.
continue
if (len(get_providers(n, want_what)) and not
self._allow_same_inputs):
msg = "Multiple providers of %s not allowed."
raise exc.InvalidStateException(msg % (want_what))
self._graph.add_edge(p, n, attr_dict={
want_what: True,
})
who_provided += 1
if not who_provided:
who_wants = ", ".join([str(a) for a in who_wants])
raise exc.InvalidStateException("%s requires input %s "
"but no other task produces "
"said output." % (who_wants,
want_what))
self._connected = True
| apache-2.0 | 4,437,600,558,059,972,600 | 39.734266 | 79 | 0.545751 | false | 4.586614 | false | false | false |
openprocurement/robot_tests.broker.publicportal | publicportal.py | 1 | 1378 | # -*- coding: utf-8 -*-
from datetime import datetime, timedelta
def parse_date_publicportal(date_str):
return ' '.join([date_str.split(' ')[1], date_str.split(' ')[-1]])
def subtract_from_time(date_time, subtr_min, subtr_sec):
sub = (datetime.strptime(date_time, "%d.%m.%Y %H:%M") - timedelta(minutes=int(subtr_min), seconds=int(subtr_sec)))
return sub.isoformat()
def insert_tender_id_into_xpath(xpath_to_change, tender_id):
return xpath_to_change.format(tender_id)
def adapt_tender_data(tender_data):
sum_for_value, sum_for_minimal_step = 0, 0
for lot in tender_data['data'].get('lots', []):
lot['value']['amount'] = int(lot['value']['amount'])
sum_for_value += lot['value']['amount']
lot['minimalStep']['amount'] = int(lot['minimalStep']['amount'])
sum_for_minimal_step += lot['minimalStep']['amount']
tender_data['data']['minimalStep']['amount'] = sum_for_minimal_step if sum_for_minimal_step != 0 else int(
tender_data['data']['minimalStep']['amount'])
tender_data['data']['value']['amount'] = sum_for_value if sum_for_value != 0 else int(
tender_data['data']['value']['amount'])
return tender_data
def get_only_numbers(given_string):
numbers = int(u''.join([s for s in given_string.split() if s.isdigit()]))
return numbers
| apache-2.0 | 5,936,651,659,007,901,000 | 37.277778 | 118 | 0.613933 | false | 3.3125 | false | false | false |
etsrepo/currentcostgui | currentcostcomhistory.py | 9 | 4524 | # -*- coding: utf-8 -*-
#
# CurrentCost GUI
#
# Copyright (C) 2008 Dale Lane
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# The author of this code can be contacted at [email protected]
# Any contact about this application is warmly welcomed.
#
import os
import serial
import time
import string
from currentcostparser import CurrentCostDataParser
from currentcostdb import CurrentCostDB
from tracer import CurrentCostTracer
# class for logging diagnostics
trc = CurrentCostTracer()
#
#
#
#
# Dale Lane (http://dalelane.co.uk/blog)
class CurrentCostSerialHistoryConnection():
guicallback = None
#
# Establish a connection to the CurrentCost meter
#
def EstablishConnection(self, comportobj, guihandle, dbfilelocation):
global trc
trc.FunctionEntry("EstablishConnection")
self.ser = comportobj
self.toCancel = False
self.guicallback = guihandle
myparser = CurrentCostDataParser()
#
# we create our own connection to the database here
#
# we need our own connection to the database because we are running
# in a background thread, and pysqlite (used to implement the database)
# cannot reuse a connection across multiple threads
# the connection is relatively low cost, so having two connections open
# - one for the GUI thread and one for this background worker thread -
# doesn't seem like a burdensome extravagance :-)
#
dbconnection = CurrentCostDB()
dbconnection.InitialiseDB(dbfilelocation)
#
# look for the current reading in the data
#
line = ""
receivedHistory = False
while self.toCancel == False:
try:
line = self.ser.readUpdate()
# try to parse the XML
currentcoststruct = myparser.parseCurrentCostXML(line)
if currentcoststruct != None:
if 'hist' in currentcoststruct['msg']:
# we have received history data - parse and store the CurrentCost
# data in the datastore
# the parser will return the number of updates still expected
# (0 if this was the last or only expected update)
myparser.storeTimedCurrentCostData(dbconnection)
receivedHistory = True
elif receivedHistory == True:
# we received live data only
# if we have received un-graphed history data, we refresh the
# graphs now
trc.Trace("finished receiving history data - need to redraw graphs")
self.guicallback.updateGraphs()
receivedHistory = False
except Exception, exception:
if self.toCancel == False:
self.guicallback.exitOnError('Error reading from COM port: ' + str(exception))
trc.Error("Error when closing COM port")
trc.Error(str(exception))
trc.FunctionExit("EstablishConnection")
return
# cleanup
dbconnection.CloseDB()
try:
self.ser.disconnect()
except Exception, exc:
self.guicallback.exitOnError('Error when closing COM port')
trc.Error("Error when closing COM port")
trc.Error(str(exc))
trc.FunctionExit("EstablishConnection")
#
# Disconnect from the serial port
#
def Disconnect(self):
self.toCancel = True
self.ser.disconnect()
| gpl-3.0 | -8,730,828,092,541,913,000 | 33.34375 | 98 | 0.587533 | false | 4.569697 | false | false | false |
vedgar/ip | Chomsky/TS.py | 1 | 2189 | from util import *
class TuringovStroj(types.SimpleNamespace):
@classmethod
def iz_komponenti(klasa, stanja, abeceda, radna_abeceda, praznina,
prijelaz, početno, prihvat):
assert abeceda
assert abeceda <= radna_abeceda
assert praznina in radna_abeceda - abeceda
assert {početno, prihvat} <= stanja
assert funkcija(prijelaz,
Kartezijev_produkt(stanja - {prihvat}, radna_abeceda),
Kartezijev_produkt(stanja, radna_abeceda, {-1, 1}))
return klasa(**vars())
@classmethod
def iz_tablice(klasa, tablica):
"""Parsiranje tabličnog zapisa Turingovog stroja.
Pogledati funkciju util.parsiraj_tablicu_TS za detalje."""
return klasa.iz_komponenti(*parsiraj_tablicu_TS(tablica))
@property
def komponente(stroj):
"""Relacijska definicija - rastav u sedmorku."""
return (stroj.stanja, stroj.abeceda, stroj.radna_abeceda,
stroj.praznina, stroj.prijelaz, stroj.početno, stroj.prihvat)
def prihvaća(T, riječ):
"""Prihvaća li Turingov stroj T zadanu riječ?
Poluodlučivo: može zapeti u beskonačnoj petlji ako ne prihvaća."""
return T.rezultat(riječ) is not None
def izračunavanje(T, riječ):
assert set(riječ) <= T.abeceda
stanje, pozicija, traka = T.početno, 0, list(riječ)
yield stanje, pozicija, traka
while stanje != T.prihvat:
if pozicija >= len(traka): traka.append(T.praznina)
stanje, traka[pozicija], pomak = T.prijelaz[stanje, traka[pozicija]]
pozicija = max(pozicija + pomak, 0)
yield stanje, pozicija, traka
def rezultat(T, riječ):
for stanje, pozicija, traka in T.izračunavanje(riječ):
if stanje == T.prihvat: break
if (T.prijelaz[stanje, T.praznina] == (stanje, T.praznina, 1) and
pozicija == len(traka)): return
while traka and traka[~0] == T.praznina: del traka[~0]
join_ok = all(type(znak) is str and len(znak) == 1 for znak in traka)
return ''.join(traka) if join_ok else traka
| unlicense | 8,859,561,109,812,507,000 | 40.692308 | 80 | 0.618542 | false | 2.587112 | false | false | false |
anastue/netforce | netforce_mfg/netforce_mfg/controllers/issue_rm.py | 4 | 1553 | from netforce.controller import Controller
from netforce import database
from netforce.model import get_model,clear_cache
from netforce import template
class IssueRM(Controller):
_path="/issue_rm"
def get(self):
db=database.get_connection()
try:
data={}
html=template.render("issue_rm",data)
self.write(html)
db.commit()
except Exception as e:
db.rollback()
import traceback
traceback.print_exc()
def post(self):
db=database.get_connection()
data={}
try:
data["barcode"]=self.get_argument("barcode",None)
data["qty"]=self.get_argument("qty",None)
barcode=data["barcode"]
if not barcode:
raise Exception("Missing barcode!")
barcode=int(barcode)
qty=data["qty"]
if not qty:
raise Exception("Missing qty!")
qty=int(qty)
res=get_model("production.component").search([["id","=",barcode]])
if not res:
raise Exception("Invalid barcode")
comp_id=res[0]
# TODO: create goods issue for that component
db.commit()
self.redirect("/issue_rm")
except Exception as e:
data["error"]="ERROR: %s"%e
html=template.render("issue_rm",data)
self.write(html)
db.rollback()
import traceback
traceback.print_exc()
IssueRM.register()
| mit | 2,009,952,782,927,295,200 | 30.06 | 78 | 0.537025 | false | 4.501449 | false | false | false |
oczkers/gdown | gdown/modules/rapidu.py | 1 | 1575 | # -*- coding: utf-8 -*-
"""
gdown.modules.rapidu
~~~~~~~~~~~~~~~~~~~
This module contains handlers for rapidu.
"""
import re
from datetime import datetime, timedelta
from ..module import browser, acc_info_template
from ..exceptions import ModuleError
def accInfo(username, passwd, proxy=False):
"""Returns account info."""
acc_info = acc_info_template()
r = browser(proxy)
r.verify = False
rc = r.post('https://rapidu.net/ajax.php?a=getUserLogin', {'login': username, 'pass': passwd, 'remember': 0, '_go': ''}).json()
if rc['message'] == 'error':
acc_info['status'] = 'deleted'
return acc_info
elif rc['message'] == 'success':
rc = r.get('https://rapidu.net').text
open('gdown.log', 'w').write(rc)
if 'Account: <b>Free</b>' in rc or 'Konto: <b>Free</b>' in rc:
acc_info['status'] = 'free'
return acc_info
else:
days = re.search('A?c?c?o?u?n?t?K?o?n?t?o?: <b>Premium \(([0-9]+) dz?i?e?ń?a?y?s?n?i?\)</b>', rc).group(1) # TODO: this is just wrong
acc_info['status'] = 'premium'
acc_info['expire_date'] = datetime.utcnow() + timedelta(days=int(days))
acc_info['transfer'] = re.search('class="tipsyS"><b>(.+?)</b>', rc).group(1)
return acc_info
elif rc['message'] == 'block':
acc_info['status'] = 'blocked'
return acc_info
else:
print(rc)
open('gdown.log', 'w').write(rc) # this won't work - json.dumps first
ModuleError('Unknown error, full log in gdown.log')
| gpl-3.0 | 7,319,476,821,231,289,000 | 33.977778 | 146 | 0.561626 | false | 3.148 | false | false | false |
stevegood/filesync-server | src/backends/filesync/data/downloadservices.py | 6 | 5228 | # Copyright 2008-2015 Canonical
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# For further info, check http://launchpad.net/filesync-server
"""Services for handling downloads."""
from backends.filesync.data import model, errors
from backends.filesync.data.gateway import SystemGateway
from backends.filesync.data.dbmanager import (
get_shard_ids,
retryable_transaction,
fsync_commit,
fsync_readonly,
)
# states
UNKNOWN = "Unknown"
DOWNLOADED_NOT_PRESENT = "Downloaded But Not Present"
QUEUED = model.DOWNLOAD_STATUS_QUEUED
DOWNLOADING = model.DOWNLOAD_STATUS_DOWNLOADING
DOWNLOADED = model.DOWNLOAD_STATUS_COMPLETE
ERROR = model.DOWNLOAD_STATUS_ERROR
@retryable_transaction()
@fsync_commit
def download_start(user_id, download_id):
"""Start the download."""
SystemGateway().update_download(user_id, download_id,
status=model.DOWNLOAD_STATUS_DOWNLOADING)
@retryable_transaction()
@fsync_commit
def download_error(user_id, download_id, message):
"""Mark the download as in error."""
return SystemGateway().update_download(
user_id, download_id,
status=model.DOWNLOAD_STATUS_ERROR, error_message=message)
@retryable_transaction()
@fsync_commit
def download_complete(user_id, download_id, hash, crc32, size,
deflated_size, mimetype, storage_key):
"""Complete the download."""
gw = SystemGateway()
return gw.download_complete(user_id, download_id, hash, crc32, size,
deflated_size, mimetype, storage_key)
@retryable_transaction()
@fsync_commit
def get_or_make_download(user_id, volume_id, path, download_url, dl_key):
"""Get or make a download if it doesn't already exist."""
gw = SystemGateway()
try:
download = gw.get_download(
user_id, volume_id, path, download_url, dl_key)
except errors.DoesNotExist:
download = gw.make_download(
user_id, volume_id, path, download_url, dl_key)
return download
@retryable_transaction()
@fsync_commit
def download_update(user_id, download_id, status=None,
node_id=None, error_message=None):
"""Update a download directly.
Typically this isn't used.
"""
gw = SystemGateway()
return gw.update_download(user_id, download_id, status=status,
node_id=node_id, error_message=error_message)
@fsync_readonly
def get_status_from_download(user_id, download):
"""Gets the status from a download object."""
gw = SystemGateway()
if download.status == model.DOWNLOAD_STATUS_COMPLETE:
# check if the file is actually present
user = gw.get_user(user_id)
try:
gw.get_node(download.node_id, user.shard_id)
except errors.DoesNotExist:
return DOWNLOADED_NOT_PRESENT
return download.status
@fsync_readonly
def get_status(user_id, volume_id, path, download_url, dl_key):
"""Get the status of the download."""
gw = SystemGateway()
try:
download = gw.get_download(
user_id, volume_id, path, download_url, dl_key)
except errors.DoesNotExist:
return UNKNOWN
return get_status_from_download(user_id, download)
@fsync_readonly
def get_status_by_id(user_id, dl_id):
"""Get the status of the download."""
gw = SystemGateway()
try:
download = gw.get_download_by_id(user_id, dl_id)
except errors.DoesNotExist:
return UNKNOWN
return get_status_from_download(user_id, download)
@retryable_transaction()
@fsync_commit
def make_download(user_id, udf_id, file_path, download_url, download_key=None):
"""Create a new download object."""
gw = SystemGateway()
return gw.make_download(
user_id, udf_id, file_path, download_url, download_key)
@fsync_readonly
def get_download(user_id, udf_id, file_path, download_url, download_key=None):
"""Get a download by its UDF, file path and download URL and key."""
gw = SystemGateway()
return gw.get_download(
user_id, udf_id, file_path, download_url, download_key)
@fsync_readonly
def get_download_by_id(user_id, download_id):
"""Get a download by its ID."""
gw = SystemGateway()
return gw.get_download_by_id(user_id, download_id)
@fsync_readonly
def get_failed_downloads(start_date, end_date):
"""Get the failed downloads between start_date and end_date."""
gw = SystemGateway()
downloads = []
for shard_id in get_shard_ids():
downloads.extend(list(gw.get_failed_downloads(
shard_id, start_date, end_date)))
return downloads
| agpl-3.0 | -6,586,029,267,998,936,000 | 31.47205 | 79 | 0.68114 | false | 3.645746 | false | false | false |
dpazel/music_rep | midi/score_to_midi_converter.py | 1 | 16776 | """
File: score_to_midi_converter.py
Purpose: Provides a means to convert a score to a midi file.
"""
from mido import MidiFile, MidiTrack, Message
from fractions import Fraction
from timemodel.tempo_event import TempoEvent
from timemodel.time_signature_event import TimeSignatureEvent
from mido.midifiles import MetaMessage
from structure.dynamics import Dynamics
from structure.tempo import Tempo
from structure.time_signature import TimeSignature
from structure.score import Score
from timemodel.duration import Duration
from timemodel.position import Position
from timemodel.offset import Offset
from instruments.instrument_catalog import InstrumentCatalog
from structure.instrument_voice import InstrumentVoice
import logging
from timemodel.dynamics_event import DynamicsEvent
from timemodel.dynamics_function_event import DynamicsFunctionEvent
from misc.utility import convert_to_numeric
from timemodel.time_conversion import TimeConversion
from timemodel.tempo_function_event import TempoFunctionEvent
from timemodel.tempo_event_sequence import TempoEventSequence
class ScoreToMidiConverter(object):
"""
This class is used to convert a score to a midi file. The procedure is:
1) Create a converter: smc = ScoreToMidiConverter(score)
2) Create the output file: smc.create(filename)
Note:
All tempos messages are on channel 1 track 0
All note messages are on channel 1 for other tracks.
"""
# Number of MIDI ticks per quarter note.
TICKS_PER_BEAT = 480
DEFAULT_NOTE_CHANNEL = 1
DEFAULT_VELOCITY = 64
# number of ms between volume events for dynamic function events
VOLUME_EVENT_DURATION_MS = 5
TEMPO_EVENT_DURATION_MS = 50
DEFAUTLT_BEAT_DURATION = Duration(1, 4)
def __init__(self, score):
"""
Constructor. Set up the tick track map.
Args:
score: of Score class
"""
self.__score = score
self.__filename = ''
self.mid = None
self.inst_voice_channel = {}
self.channel_assignment = 1
self.fine_tempo_sequence = None
self.time_conversion = None
def create(self, filename):
"""
Create a midi file from the score, with midi filename provided.
Args:
filename - String filename. Can include path, should have filetype '.mid'.
"""
self.__filename = filename
self.mid = MidiFile(type=1)
self.mid.ticks_per_beat = ScoreToMidiConverter.TICKS_PER_BEAT
# assign each instrument voice to a channel
self.inst_voice_channel = {}
# used for assigning channels to each voice.
self.channel_assignment = 1
(self.fine_tempo_sequence, self.time_conversion) = self._build_time_conversion()
meta_track = MidiTrack()
self.mid.tracks.append(meta_track)
self._fill_meta_track(meta_track)
self._assign_voices_tracks()
self.mid.save(self.filename)
@property
def score(self):
return self.__score
@property
def filename(self):
return self.__filename
@staticmethod
def convert_score(score, filename):
"""
Static method to convert a Score to a midi file.
Args:
score: Class Score object
filename: The name of the midi file, should have filetype .mid
"""
smc = ScoreToMidiConverter(score)
smc.create(filename)
@staticmethod
def convert_line(line, filename, tempo=Tempo(60, Duration(1, 4)),
time_signature=TimeSignature(4, Duration(1, 4))):
"""
Static method to convert a Line to a midi file
Args:
line: Class Line object
filename: The name of the midi file, should have filetype .mid
tempo: Tempo for playback, default is 60 BPM tempo beat = quarter note
time_signature: TimeSiganture on playback, default is 4 quarter notes
"""
score = Score()
tempo_sequence = score.tempo_sequence
tempo_sequence.add(TempoEvent(tempo, Position(0)))
ts_sequence = score.time_signature_sequence
ts_sequence.add(TimeSignatureEvent(time_signature, Position(0)))
c = InstrumentCatalog.instance()
piano = c.get_instrument("piano")
piano_instrument_voice = InstrumentVoice(piano, 1)
piano_voice = piano_instrument_voice.voice(0)
piano_voice.pin(line, Offset(0))
score.add_instrument_voice(piano_instrument_voice)
ScoreToMidiConverter.convert_score(score, filename)
def _assign_voices_tracks(self):
# assign a channel to each instrument voice
for inst_voice in self.score.instrument_voices:
self.inst_voice_channel[inst_voice] = self._next_channel()
self._add_notes(inst_voice, self.inst_voice_channel[inst_voice])
def _next_channel(self):
"""
Allocates channels starting at 1 through 15. Raises exception beyond that.
"""
if self.channel_assignment == 15:
raise Exception('Ran out of channels.')
self.channel_assignment += 1
if self.channel_assignment == 9: # drums
return self._next_channel()
return self.channel_assignment
def _add_notes(self, inst_voice, channel):
voice_note_map = inst_voice.get_all_notes()
for voice, notes in voice_note_map.items():
track = MidiTrack()
track.name = inst_voice.instrument.name
self.mid.tracks.append(track)
# For each note
# build a note on and off message, compute the ticks of the message
# append both messages to out list msgs
velocity_msgs = self._gen_velocity_msgs(voice, channel)
msgs = []
for n in notes:
# We do not need to set velocity outside of the default
# Crescendo and decrescendo are taken care of by channel change messages only,
# which modify the constant velocity set per note.
# If the velocity was set here, the channel change would distort the setting.
# Otherwise, the velocity would be acquired as follows
ticks = self._wnt_to_ticks(n.get_absolute_position())
msg = NoteMessage('note_on', channel, n.diatonic_pitch.chromatic_distance + 12, ticks,
ScoreToMidiConverter.DEFAULT_VELOCITY)
msgs.append(msg)
end_ticks = self._wnt_to_ticks(n.get_absolute_position() + n.duration)
msg = NoteMessage('note_off', channel, n.diatonic_pitch.chromatic_distance + 12, end_ticks)
msgs.append(msg)
# Sort the msgs list by tick time, and respect to off before on if same time
msgs.extend(velocity_msgs)
from functools import cmp_to_key
msgs = sorted(msgs, key=cmp_to_key(lambda x, y: ScoreToMidiConverter.compare_note_msgs(x, y)))
prior_tick = 0
for m in msgs:
logging.info('{0}'.format(m))
ticks_value = int(m.abs_tick_time - prior_tick)
# Append the midi message to the track, with tics being incremental over succeeding messages.
# We default to channel 1 for all tracks.
track.append(m.to_midi_message(ticks_value))
prior_tick = m.abs_tick_time
def _gen_velocity_msgs(self, voice, channel):
"""
The method runs through the dynamic sequence events, and generates channel change events to set velocity.
In the case of a DynamicsEvent, the process is trivial.
In the case of a DynamicsFunctionEvent, we generate channel change events in small steps over the domain
of the event, providing a 'simulation' of velocity changes as dictated by the function behind the event.
"""
msgs = []
dyn_seq = voice.dynamics_sequence.sequence_list
voice_len = voice.length()
tc = self.time_conversion
for event in dyn_seq:
if event.time >= voice_len:
break
if isinstance(event, DynamicsEvent):
velocity = event.velocity()
ticks = self._wnt_to_ticks(event.time)
msgs.append(ExpressionVelocityMessage(channel, ticks, velocity))
elif isinstance(event, DynamicsFunctionEvent):
t1 = tc.position_to_actual_time(event.time)
next_event = voice.dynamics_sequence.successor(event)
t2 = tc.position_to_actual_time(next_event if next_event is not None else Position(voice_len.duration))
while t1 < t2:
wnt = tc.actual_time_to_position(t1)
ticks = self._wnt_to_ticks(wnt)
velocity = int(event.velocity(wnt, next_event.time if next_event is not None else
Position(voice_len.duration)))
msgs.append(ExpressionVelocityMessage(channel, ticks, velocity))
t1 += ScoreToMidiConverter.VOLUME_EVENT_DURATION_MS
return msgs
def _fill_meta_track(self, meta_track):
event_list = self.score.tempo_sequence.sequence_list
score_len = self.score.length()
# Loop over list, for every change in tempo , the tempo should be reset.
# Note, that there may be tempo or ts changes that last for 0 duration - we skip those.
last_tick_time = 0
for tempo_event in event_list:
if tempo_event.time >= score_len:
break
if isinstance(tempo_event, TempoEvent):
current_tick_time = self._wnt_to_ticks(tempo_event.time)
# If there is a ts and tempo event, effect a midi tempo change
beat_ratio = Fraction(1, 4) / tempo_event.object.beat_duration.duration
# tempo_value = (60/BPM) * (ts_beat / tempo_beat)
tempo_value = int((60.0 / tempo_event.object.tempo) * beat_ratio * 1000000)
ticks = int(current_tick_time - last_tick_time)
msg = MetaMessage('set_tempo', tempo=tempo_value, time=ticks)
meta_track.append(msg)
last_tick_time = current_tick_time
elif isinstance(tempo_event, TempoFunctionEvent):
# Run over event range making a small step function effectively, and setting the tempo
# every TEMPO_EVENT_DURATION_MS.
t1 = tempo_event.time
beat_duration = tempo_event.beat_duration if tempo_event.beat_duration is None else \
ScoreToMidiConverter.DEFAUTLT_BEAT_DURATION
next_event = self.score.tempo_sequence.successor(tempo_event)
t2 = next_event.time if next_event is not None else Position(score_len.duration)
while t1 < t2:
tempo = int(tempo_event.tempo(t1, next_event.time if next_event is not None else
Position(score_len)))
delta_wnt = (tempo * ScoreToMidiConverter.TEMPO_EVENT_DURATION_MS * beat_duration.duration) / \
(60.0 * 1000.0)
current_tick_time = self._wnt_to_ticks(t1)
ticks = int(current_tick_time - last_tick_time)
# If there is a ts and tempo event, effect a midi tempo change
beat_ratio = Fraction(1, 4) / beat_duration.duration
# tempo_value = (60/BMP) * (ts_beat / tempo_beat)
tempo_value = int((60.0 / tempo) * beat_ratio * 1000000)
msg = MetaMessage('set_tempo', tempo=tempo_value, time=ticks)
meta_track.append(msg)
t1 += delta_wnt
last_tick_time = current_tick_time
def _build_time_conversion(self):
event_list = self.score.tempo_sequence.sequence_list
score_len = self.score.length()
fine_tempo_sequence = TempoEventSequence()
for event in event_list:
if isinstance(event, TempoEvent):
fine_tempo_sequence.add(TempoEvent(event.object, event.time))
elif isinstance(event, TempoFunctionEvent):
t1 = event.time
beat_duration = event.beat_duration if event.beat_duration is None else \
ScoreToMidiConverter.DEFAUTLT_BEAT_DURATION
next_event = self.score.tempo_sequence.successor(event)
t2 = next_event.time if next_event is not None else Position(score_len.duration)
while t1 < t2:
tempo = int(event.tempo(t1, next_event.time if next_event is not None else Position(score_len)))
delta_wnt = (tempo * ScoreToMidiConverter.TEMPO_EVENT_DURATION_MS * beat_duration.duration) / \
(60.0 * 1000.0)
fine_tempo_sequence.add(TempoEvent(Tempo(tempo, beat_duration), t1))
t1 += delta_wnt
tc = TimeConversion(fine_tempo_sequence, self.score.time_signature_sequence, Position(score_len))
return fine_tempo_sequence, tc
def _wnt_to_ticks(self, wnt):
# Convert whole note time to ticks.
offset = convert_to_numeric(wnt)
return int((offset / Fraction(1, 4)) * self.mid.ticks_per_beat)
@staticmethod
def compare_note_msgs(a, b):
a_ticks = a.abs_tick_time
b_ticks = b.abs_tick_time
comp_value = -1 if a_ticks < b_ticks else 1 if a_ticks > b_ticks else 0
if isinstance(a, ExpressionVelocityMessage) or isinstance(b, ExpressionVelocityMessage):
return comp_value
if comp_value != 0:
return comp_value
a_is_note_off = a.msg_type == 'note_off'
b_is_note_off = b.msg_type == 'note_off'
if a_is_note_off and not b_is_note_off:
return -1
if not a_is_note_off and b_is_note_off:
return 1
return 0
class MidiMessage(object):
def __init__(self, msg_type, channel, abs_tick_time):
self.__msg_type = msg_type
self.__channel = channel
self.__abs_tick_time = abs_tick_time
@property
def msg_type(self):
return self.__msg_type
@property
def channel(self):
return self.__channel
@property
def abs_tick_time(self):
return self.__abs_tick_time
def to_midi_message(self, prior_msg_ticks):
return None
class NoteMessage(MidiMessage):
def __init__(self, msg_type, channel, note_value, abs_tick_time, velocity=Dynamics.DEFAULT_DYNAMICS_VELOCITY):
MidiMessage.__init__(self, msg_type, channel, abs_tick_time)
self.__note_value = note_value
self.__velocity = velocity
@property
def note_value(self):
return self.__note_value
@property
def velocity(self):
return self.__velocity
def to_midi_message(self, ticks_from_prior_msg):
return Message(self.msg_type, note=self.note_value, velocity=self.velocity, time=ticks_from_prior_msg,
channel=self.channel)
def __str__(self):
return '{0} {1}/{2}({3}, {4})'.format(self.abs_tick_time, self.msg_type, self.channel, self.note_value,
self.velocity)
class ExpressionVelocityMessage(MidiMessage):
def __init__(self, channel, abs_tick_time, velocity=Dynamics.DEFAULT_DYNAMICS_VELOCITY):
MidiMessage.__init__(self, 'control_change', channel, abs_tick_time)
self.__velocity = velocity
@property
def velocity(self):
return self.__velocity
def to_midi_message(self, ticks_from_prior_msg):
return Message(self.msg_type, control=11, value=self.velocity, time=ticks_from_prior_msg,
channel=self.channel)
def __str__(self):
return '{0} {1}/{2}({3})'.format(self.abs_tick_time, self.msg_type, self.channel, self.velocity)
| mit | -3,183,113,065,999,324,000 | 39.917073 | 119 | 0.587685 | false | 4.052174 | false | false | false |
Azure/azure-storage-python | azure-storage-common/azure/storage/common/_constants.py | 1 | 2363 | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import platform
import sys
__author__ = 'Microsoft Corp. <[email protected]>'
__version__ = '2.1.0'
# UserAgent string sample: 'Azure-Storage/0.37.0-0.38.0 (Python CPython 3.4.2; Windows 8)'
# First version(0.37.0) is the common package, and the second version(0.38.0) is the service package
USER_AGENT_STRING_PREFIX = 'Azure-Storage/{}-'.format(__version__)
USER_AGENT_STRING_SUFFIX = '(Python {} {}; {} {})'.format(platform.python_implementation(),
platform.python_version(), platform.system(),
platform.release())
# default values for common package, in case it is used directly
DEFAULT_X_MS_VERSION = '2019-02-02'
DEFAULT_USER_AGENT_STRING = '{}None {}'.format(USER_AGENT_STRING_PREFIX, USER_AGENT_STRING_SUFFIX)
# Live ServiceClient URLs
SERVICE_HOST_BASE = 'core.windows.net'
DEFAULT_PROTOCOL = 'https'
# Development ServiceClient URLs
DEV_BLOB_HOST = '127.0.0.1:10000'
DEV_QUEUE_HOST = '127.0.0.1:10001'
# Default credentials for Development Storage Service
DEV_ACCOUNT_NAME = 'devstoreaccount1'
DEV_ACCOUNT_SECONDARY_NAME = 'devstoreaccount1-secondary'
DEV_ACCOUNT_KEY = 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=='
# Socket timeout in seconds
DEFAULT_SOCKET_TIMEOUT = 20
# for python 3.5+, there was a change to the definition of the socket timeout (as far as socket.sendall is concerned)
# The socket timeout is now the maximum total duration to send all data.
if sys.version_info >= (3, 5):
# the timeout to connect is 20 seconds, and the read timeout is 2000 seconds
# the 2000 seconds was calculated with: 100MB (max block size)/ 50KB/s (an arbitrarily chosen minimum upload speed)
DEFAULT_SOCKET_TIMEOUT = (20, 2000)
# Encryption constants
_ENCRYPTION_PROTOCOL_V1 = '1.0'
_AUTHORIZATION_HEADER_NAME = 'Authorization'
_COPY_SOURCE_HEADER_NAME = 'x-ms-copy-source'
_REDACTED_VALUE = 'REDACTED'
_CLIENT_REQUEST_ID_HEADER_NAME = 'x-ms-client-request-id'
| mit | 2,878,465,554,714,151,400 | 44.442308 | 119 | 0.662717 | false | 3.500741 | false | false | false |
luminnem/KittysHeart | arrowsLauncher.py | 1 | 1352 | '''
Created on 23/8/2014
@author: Alberto
'''
import pygame as py
from settings import SCREEN_WIDTH, SCREEN_HEIGHT
class ArrowsLauncher(object):
def __init__(self, arrowsManager, img, i):
self.am = arrowsManager
self.img = img
self.ConfigurePosition(i)
self.canShoot = True
self.timer = 0
self.delay = 1000
def ConfigurePosition(self, i):
floor = SCREEN_HEIGHT - 15 - self.img.get_height()
self.i = i
if i == 0:
x = 0
y = floor
elif i == 1:
x = 0
y = floor - self.img.get_height() * 3
if i == 2:
x = SCREEN_WIDTH - self.img.get_width()
y = floor
elif i == 3:
x = SCREEN_WIDTH - self.img.get_width()
y = floor - self.img.get_height() * 3
self.rect = py.Rect(x, y, self.img.get_width(), self.img.get_height())
def Update(self):
if not self.canShoot:
self.Timer()
def Render(self, screen):
screen.blit(self.img, self.rect)
def Timer(self):
if py.time.get_ticks() - self.timer > self.delay:
self.timer = py.time.get_ticks()
self.canShoot = True
def Shoot(self):
if self.canShoot:
self.am.AddArrow(self)
self.canShoot = False | gpl-3.0 | -5,816,272,487,389,536,000 | 22.327586 | 78 | 0.522929 | false | 3.371571 | false | false | false |
jepler/linuxcnc-mirror | lib/python/qtvcp/widgets/state_label.py | 3 | 5885 | #!/usr/bin/python2.7
# QTVcp Widget
#
# Copyright (c) 2017 Chris Morley
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
###############################################################################
import os
from PyQt5 import QtCore, QtWidgets
from qtvcp.widgets.simple_widgets import ScaledLabel
from qtvcp.widgets.widget_baseclass import _HalWidgetBase
from qtvcp.core import Status
from qtvcp import logger
# Instantiate the libraries with global reference
# STATUS gives us status messages from linuxcnc
# LOG is for running code logging
STATUS = Status()
LOG = logger.getLogger(__name__)
# Set the log level for this module
# LOG.setLevel(logger.INFO) # One of DEBUG, INFO, WARNING, ERROR, CRITICAL
class StateLabel(ScaledLabel, _HalWidgetBase):
def __init__(self, parent=None):
super(StateLabel, self).__init__(parent)
self._true_textTemplate = 'True'
self._false_textTemplate = 'False'
self.metric_mode = True
self.css_mode = False
self.fpr_mode = False
self.diameter_mode = False
def _hal_init(self):
def _f(data):
self._set_text(data)
if self.metric_mode:
STATUS.connect('metric-mode-changed', lambda w, data: _f(data))
elif self.css_mode:
STATUS.connect('css-mode', lambda w, data: _f(data))
elif self.fpr_mode:
STATUS.connect('fpr-mode', lambda w, data: _f(data))
elif self.diameter_mode:
STATUS.connect('diameter-mode', lambda w, data: _f(data))
def _set_text(self, data):
if data:
self.setText(self._true_textTemplate)
else:
self.setText(self._false_textTemplate)
#########################################################################
# This is how designer can interact with our widget properties.
# designer will show the pyqtProperty properties in the editor
# it will use the get set and reset calls to do those actions
#
# _toggle_properties makes it so we can only select one option
########################################################################
def _toggle_properties(self, picked):
data = ('metric_mode', 'css_mode', 'fpr_mode', 'diameter_mode')
for i in data:
if not i == picked:
self[i + '_status'] = False
# property getter/setters
def set_true_textTemplate(self, data):
self._true_textTemplate = data
try:
self._set_text(True)
except Exception as e:
LOG.exception("textTemplate: {}, Data: {}".format(self._textTemplate, data), exc_info=e)
self.setText('Error')
def get_true_textTemplate(self):
return self._true_textTemplate
def reset_true_textTemplate(self):
self._true_textTemplate = '%s'
def set_false_textTemplate(self, data):
self._false_textTemplate = data
try:
self._set_text(False)
except:
self.setText('Error 2')
def get_false_textTemplate(self):
return self._false_textTemplate
def reset_false_textTemplate(self):
self._false_textTemplate = '%s'
# metric mode status
def set_metric_mode(self, data):
self.metric_mode = data
if data:
self._toggle_properties('metric_mode')
def get_metric_mode(self):
return self.metric_mode
def reset_metric_mode(self):
self.metric_mode = True
# css mode status
def set_css_mode(self, data):
self.css_mode = data
if data:
self._toggle_properties('css_mode')
def get_css_mode(self):
return self.css_mode
def reset_css_mode(self):
self.css_mode = True
# fpr mode status
def set_fpr_mode(self, data):
self.fpr_mode = data
if data:
self._toggle_properties('fpr_modee')
def get_fpr_mode(self):
return self.fpr_mode
def reset_fpr_mode(self):
self.fpr_mode = True
# diameter mode status
def set_diameter_mode(self, data):
self.diameter_mode = data
if data:
self._toggle_properties('diameter_mode')
def get_diameter_mode(self):
return self.diameter_mode
def reset_diameter_mode(self):
self.diameter_mode = True
# designer will show these properties in this order:
# BOOL
metric_mode_status = QtCore.pyqtProperty(bool, get_metric_mode, set_metric_mode, reset_metric_mode)
css_mode_status = QtCore.pyqtProperty(bool, get_css_mode, set_css_mode, reset_css_mode)
fpr_mode_status = QtCore.pyqtProperty(bool, get_fpr_mode, set_fpr_mode, reset_fpr_mode)
diameter_mode_status = QtCore.pyqtProperty(bool, get_diameter_mode, set_diameter_mode, reset_diameter_mode)
# Non BOOL
true_textTemplate = QtCore.pyqtProperty(str, get_true_textTemplate,
set_true_textTemplate, reset_true_textTemplate)
false_textTemplate = QtCore.pyqtProperty(str, get_false_textTemplate,
set_false_textTemplate, reset_false_textTemplate)
# boilder code
def __getitem__(self, item):
return getattr(self, item)
def __setitem__(self, item, value):
return setattr(self, item, value)
if __name__ == "__main__":
import sys
app = QApplication(sys.argv)
label = Lcnc_STATUS_Bool_Label()
label.show()
sys.exit(app.exec_())
| lgpl-2.1 | 8,506,048,133,853,998,000 | 33.415205 | 111 | 0.611555 | false | 3.853962 | false | false | false |
Bergiu/smarthomepi | packages/shp/Controller.py | 1 | 2815 | #
from abc import ABCMeta, abstractmethod
class Controller (metaclass=ABCMeta):
#public:
@abstractmethod
def __init__( self, process, database):
self.process = process
self.database = database
self.itemList = [] # Array
self.indexIdx = 0 # int
#self.initiateAllFromDB()
@abstractmethod
def createItem( self, **kwargs ):
"""
@**kwargs: all parameter used to create an item
"""
raise NotImplementedError
@abstractmethod
def delItem( self, id_item ):
"""
@id_item:int
"""
raise NotImplementedError
@abstractmethod
def initiateFromDB( self, id_item, validation ):
"""
@id_item:int
"""
raise NotImplementedError
@abstractmethod
def initiateAllFromDB( self, validation ):
"""
"""
raise NotImplementedError
def setDatabase( self, database ):
self.database=database
return True
def addItem( self, item ):
"""
@item:
"""
duplicate=self.isItem(item.getId())
if not duplicate:
self.itemList.append(item)
self.indexIdx+=1
return True
else:
print("Item is a duplicate! (%s: id=%s)"%(type(item).__name__,item.getId()))
return False
def removeItem( self, id_item ):
"""
@id_item:int
"""
for i in self.itemList:
if i.getId() == id_item:
item=i
if item:
self.itemList.remove(item)
self.indexIdx-=1
return True
else:
return False
#private:
def getItemById( self, id):
"""
@id:int
@item
"""
validation=False
"""
isUpToDate gibt True zurück,
wenn id ist nicht in usr
gibt False zurück,
wenn id ist in usr
"""
upToDate=self.process.isUpToDate(self.table_name,id)
removed=not self.process.isUpToDate(self.table_name,-id)
if not upToDate:
self.initiateFromDB(id)
self.process.setUpToDate(self.table_name,id)
elif removed:
self.removeItem(id)
self.process.setUpToDate(self.table_name,-id)
return False
for item in self.itemList:
if item.getId() == id:
validation=item
if not validation:
if self.initiateFromDB(id):
for item in self.itemList:
if item.getId() == id:
validation=item
return validation
def isItem(self, id):
validation=False
for item in self.itemList:
if item.getId() == id:
validation=True
return validation
def getAllItems( self ):
"""
@itemList:Array
"""
updates=self.process.getUpdates(self.table_name)
for update in updates:
if int(update) < 0:
self.getItemById(-update)
else:
self.getItemById(update)
return self.itemList
#public:
def getId( self, idx_permission):
"""
@idx_permission:int
@id:int
"""
if self.indexIdx > idx_permission and idx_permission >= 0:
id=self.itemList[idx_permission].getId()
return id
else:
return False
def getTableName( self ):
"""
@table_name:str
"""
return self.table_name
| gpl-3.0 | 3,497,706,627,547,256,300 | 19.23741 | 79 | 0.658372 | false | 2.954832 | false | false | false |
fuhongxue/CoronaSDK-SublimeText | run_project.py | 1 | 2260 | #
# Sublime Text plugin to support Corona Editor
#
# Copyright (c) 2013 Corona Labs Inc. A mobile development software company. All rights reserved.
#
# MIT License - see https://raw.github.com/coronalabs/CoronaSDK-SublimeText/master/LICENSE
import sublime
import sublime_plugin
try:
from . import _corona_utils # P3
except:
import _corona_utils # P2
class ToggleBuildPanelCommand(sublime_plugin.WindowCommand):
def run(self):
output_panel = self.window.get_output_panel("exec")
if output_panel.window():
self.window.run_command("hide_panel", {"panel": "output.exec"})
else:
self.window.run_command("show_panel", {"panel": "output.exec"})
def description(self):
output_panel = self.window.get_output_panel("exec")
if output_panel.window():
return "Hide Build Panel"
else:
return "Show Build Panel"
class RunProjectCommand(sublime_plugin.WindowCommand):
# find a main.lua file to start the Simulator with or failing that, any open Lua
# file we can use as a place to start looking for a main.lua
def findLuaFile(self):
filename = self.window.active_view().file_name()
if filename is None or not filename.endswith(".lua"):
filename = None
# No current .lua file, see if we have one open
for view in self.window.views():
if view.file_name() and view.file_name().endswith(".lua"):
filename = view.file_name()
return filename
def is_enabled(self):
return self.findLuaFile() is not None
def run(self):
cmd = []
filename = self.findLuaFile()
if filename is None:
sublime.error_message("Can't find an open '.lua' file to determine the location of 'main.lua'")
return
mainlua = _corona_utils.ResolveMainLua(filename)
if mainlua is None:
sublime.error_message("Can't locate 'main.lua' for this project (try opening it in an editor tab)")
return
simulator_path, simulator_flags = _corona_utils.GetSimulatorCmd(mainlua)
cmd = [simulator_path]
cmd += simulator_flags
cmd.append(mainlua)
print(_corona_utils.PACKAGE_NAME + ": Running: " + str(cmd))
# Save our changes before we run
self.window.run_command("save")
self.window.run_command('exec', {'cmd': cmd})
| mit | 7,294,193,245,420,829,000 | 29.133333 | 105 | 0.680088 | false | 3.460949 | false | false | false |
SynTuner/WIXOSS | o8g/Scripts/Cost.py | 1 | 43132 | # -*- coding: utf-8 -*-
'''
Created on 2014年7月18日
@author: SynTuner
'''
class CardCost: #have some problems handling Chinese with regular expressions on OCTGN. All these classes is necessary until we can handle it in a uniform way.
def __init__(self):
self.etype = -2
def WX01_001(self, card, etype): #太阳之巫女 玉依姬
mute()
global cost
global echoice
global specialcost
notify("done")
growcost = ["白","白", "白"]
effectcost2 = ["白"]
if etype == -2:
return False
elif etype == -1:
cost == []
elif etype == 0: #to grow
cost = growcost
for color in cost:
notify(color)
elif etype == 1: #to activate Arrival
pass
else:
cost = effectcost2
specialcost = {"Discard":{"color": "白", "ctype": "SIGNI", "qty": 1}}
def WX01_002(self, card, etype): #晓之巫女 玉依姬
mute()
global cost
global echoice
global specialcost
notify("done")
growcost = ["白","白", "红","红"]
effectcost2 = ["白","白","红"]
if etype == -2:
return False
elif etype == -1:
cost == []
elif etype == 0: #to grow
cost = growcost
for color in cost:
notify(color)
elif etype == 1: #to activate Arrival
pass
else:
cost = effectcost2
specialcost = {"Down":{"target":"self"}}
def WX01_003(self, card, etype): #百火缭乱 花代•肆
mute()
global cost
global echoice
global specialcost
notify("done")
growcost = ["红", "红","红"]
effectcost2 = ["红"]
if etype == -2:
return False
elif etype == -1:
cost == []
elif etype == 0: #to grow
cost = growcost
for color in cost:
notify(color)
elif etype == 1: #to activate Arrival
pass
else:
cost = effectcost2
specialcost = {"Discard":{"color": "红", "ctype": "SIGNI", "qty": 1}}
def WX01_004(self, card, etype): #轰炎 花代•贰改
mute()
global cost
global echoice
global specialcost
notify("done")
growcost = ["红", "红"]
effectcost2 = ["红", "红", "红"]
if etype == -2:
return False
elif etype == -1:
cost == []
elif etype == 0: #to grow
cost = growcost
for color in cost:
notify(color)
elif etype == 1: #to activate Arrival
pass
else:
cost = effectcost2
def WX01_005(self, card, etype): #代号 皮璐璐可•Ω
mute()
uniformCost(etype, ["蓝","蓝","蓝"], [], [], [], {"Discard":{"color": "蓝", "ctype": "SIGNI", "qty": 1}}, 2)
def WX01_006(self, card, etype): #四式战帝女 绿姬
mute()
global cost
global echoice
global specialcost
notify("done")
growcost = ["绿","绿","绿"]
effectcost2 = []
if etype == -2:
return False
elif etype == -1:
cost == []
elif etype == 0: #to grow
cost = growcost
for color in cost:
notify(color)
elif etype == 1: #to activate Arrival effect
pass
else:
cost = effectcost2
specialcost = {"Discard":{"color": "绿", "ctype": "SIGNI", "qty": 1}}
def WX01_007(self, card, etype): #月蚀之巫女 玉依姬
mute()
global cost
global echoice
global specialcost
notify("done")
growcost = ["白","白"]
effectcost1 = ["白"]
if etype == -2:
return False
elif etype == -1:
cost == []
elif etype == 0: #to grow
cost = growcost
for color in cost:
notify(color)
elif etype == 1: #to activate Arrival effect
cost = effectcost1
else:
pass
def WX01_008(self, card, etype): #流星之巫女 玉依姬
mute()
uniformCost(etype, ["白"], [], [], [], {}, 0)
def WX01_009(self, card, etype): #新星之巫女 玉依姬
mute()
uniformCost(etype, [], [], [], [], {"Discard":{"qty": 1}}, 1)
def WX01_010(self, card, etype): #杰诺之门
mute()
uniformCost(etype, [], [], [], ["白"], {}, 0)
def WX01_011(self, card, etype): #炽炎舞 花代•叁
mute()
uniformCost(etype, ["红", "红"], [], ["红"], [], {}, 0)
def WX01_012(self, card, etype): #刚炎 花代•贰
mute()
uniformCost(etype, ["红"], [], [], [], {}, 0)
def WX01_013(self, card, etype): #焰 花代•壹
mute()
uniformCost(etype, [], [], [], [], {"Discard":{"qty": 1}}, 1)
def WX01_014(self, card, etype): #烈霸一络
mute()
uniformCost(etype, [], [], [], ["红", "红", "红"], {}, 0)
def WX01_015(self, card, etype): #代号 皮璐璐可•Γ
mute()
uniformCost(etype, ["蓝", "蓝"], [], ["蓝"], [], {}, 0)
def WX01_016(self, card, etype): #代号 皮璐璐可•Β
mute()
uniformCost(etype, ["蓝"], [], [], [], {}, 0)
def WX01_017(self, card, etype): #代号 皮璐璐可•Α
mute()
uniformCost(etype, [], [], [], [], {"Discard":{"qty": 1}}, 1)
def WX01_018(self, card, etype): #魔法反制
mute()
uniformCost(etype, [], [], ["蓝"], ["蓝", "无"], {}, 0)
def WX01_019(self, card, etype): #四型皇艳娘 绿姬
mute()
uniformCost(etype, ["绿", "绿", "绿"], [], [], [], {}, 0)
def WX01_020(self, card, etype): #三型雌雌娘 绿姬
mute()
uniformCost(etype, ["绿", "绿"], [], [], [], {}, 0)
def WX01_021(self, card, etype): #二型斗婚娘 绿姬
mute()
uniformCost(etype, ["绿"], [], [], [], {}, 0)
def WX01_022(self, card, etype): #一型舞斗娘 绿姬
mute()
uniformCost(etype, [], [], [], [], {"Discard":{"qty": 1}}, 1)
def WX01_023(self, card, etype): #大器晚成
mute()
uniformCost(etype, [], [], [], ["绿", "绿", "绿", "绿", "绿", "无", "无", "无", "无", "无", "无", "无"], {}, 0)
def WX01_024(self, card, etype): #奇奇怪怪
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX01_025(self, card, etype): #营救
mute()
uniformCost(etype, [], [], [], ["无"], {}, 0)
def WX01_026(self, card, etype): #充能
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX01_027(self, card, etype): #原枪 源能枪
mute()
uniformCost(etype, [], [], ["白"], ["白", "白"], {}, 0)
def WX01_028(self, card, etype): #弧光圣气
mute()
uniformCost(etype, [], [], [], ["白", "白", "白", "白", "白"], {}, 0)
def WX01_029(self, card, etype): #罗辉石 金刚珠玉
mute()
uniformCost(etype, [], [], ["红"], ["红", "红"], {}, 0)
def WX01_030(self, card, etype): #赎罪之对火
mute()
uniformCost(etype, [], [], [], ["红", "红", "红"], {}, 0)
def WX01_031(self, card, etype): #核心代号 V•A•C
mute()
uniformCost(etype, [], [], ["蓝"], ["蓝", "蓝"], {}, 0)
def WX01_032(self, card, etype): #抢夺
mute()
uniformCost(etype, [], [], [], ["蓝", "蓝", "无"], {}, 0)
def WX01_033(self, card, etype): #幻兽神 御先狐
mute()
uniformCost(etype, [], [], ["绿"], ["绿", "绿"], {}, 0)
def WX01_034(self, card, etype): #修复
mute()
uniformCost(etype, [], [], [], ["绿", "绿", "绿"], {}, 0)
def WX01_035(self, card, etype): #祝福女神 雅典娜
mute()
uniformCost(etype, [], [], [], ["白"], {"Down":{"target":"self"}}, 2)
def WX01_036(self, card, etype): #巨弓 抛射弓
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX01_037(self, card, etype): #无法忘却的幻想 瓦尔基里
mute()
uniformCost(etype, [], [], [], [], {"Down":{"target":"self"}}, 2)
def WX01_038(self, card, etype): #获得但他林
mute()
uniformCost(etype, [], [], [], ["白", "红"], {}, 0)
def WX01_039(self, card, etype): #弩炮 加农炮
mute()
uniformCost(etype, [], [], [], ["红"], {"Down":{"target":"self"}}, 2)
def WX01_040(self, card, etype): #罗石 山铜
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX01_041(self, card, etype): #轰炮 法典炮
mute()
uniformCost(etype, [], [], [], [], {"Down":{"target":"self"}}, 2)
def WX01_042(self, card, etype): #断罪之轹断
mute()
uniformCost(etype, [], [], [], ["红", "红", "红"], {}, 0)
def WX01_043(self, card, etype): #幻水 雅莱娅尔
mute()
uniformCost(etype, [], [], [], ["蓝"], {"Down":{"target":"self"}}, 2)
def WX01_044(self, card, etype): #技艺代号 P•Z•L
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX01_045(self, card, etype): #幻水 夏克兰丝
mute()
uniformCost(etype, [], [], [], [], {"Down":{"target":"self"}}, 2)
def WX01_046(self, card, etype): #情况糟糕
mute()
uniformCost(etype, [], [], [], ["蓝"], {}, 0)
def WX01_047(self, card, etype): #罗植 曼茶罗花
mute()
uniformCost(etype, [], [], [], [], {"Down":{"target":"self"}}, 2)
def WX01_048(self, card, etype): #幻兽 雪怪
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX01_049(self, card, etype): #罗植 植生羊
mute()
uniformCost(etype, [], [], [], [], {"Down":{"target":"self"}}, 2)
def WX01_050(self, card, etype): #大化
mute()
uniformCost(etype, [], [], [], ["绿"], {}, 0)
def WX01_051(self, card, etype): #侍从Q
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX01_052(self, card, etype): #包括的知识
mute()
uniformCost(etype, [], [], [], ["无", "无"], {}, 0)
def WX01_053(self, card, etype): #极剑 噬神剑
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX01_054(self, card, etype): #极盾 埃奎斯盾
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX01_055(self, card, etype): #大盾 镇暴盾
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX01_056(self, card, etype): #中盾 方盾
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX01_057(self, card, etype): #出弓 炽天弓
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX01_058(self, card, etype): #重新开始的对话 米迦勒
mute()
uniformCost(etype, [], [], [], [], {"Discard":{"qty": 1}, "Down":{"target":"self"}}, 2)
def WX01_059(self, card, etype): #出弓 普弓
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX01_060(self, card, etype): #小盾 圆盾
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX01_061(self, card, etype): #探求的思想 汉尼尔
mute()
uniformCost(etype, [], [], [], [], {"Discard":{"qty": 1}, "Down":{"target":"self"}}, 2)
def WX01_062(self, card, etype): #将之开启
mute()
uniformCost(etype, [], [], [], ["白"], {}, 0)
def WX01_063(self, card, etype): #做好准备
mute()
uniformCost(etype, [], [], [], ["白"], {}, 0)
def WX01_064(self, card, etype): #罗石 金属
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX01_065(self, card, etype): #罗石 绿宝石
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX01_066(self, card, etype): #罗石 红宝石
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX01_067(self, card, etype): #罗石 磷矿石
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX01_068(self, card, etype): #罗石 琥珀
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX01_069(self, card, etype): #爆炮 远射炮
mute()
uniformCost(etype, [], [], [], [], {"Discard":{"qty": 1}, "Down":{"target":"self"}}, 2)
def WX01_070(self, card, etype): #罗石 海人草
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX01_071(self, card, etype): #罗石 蓝宝石
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX01_072(self, card, etype): #小炮 德拉古诺夫枪
mute()
uniformCost(etype, [], [], [], [], {"Discard":{"qty": 1}, "Down":{"target":"self"}}, 2)
def WX01_073(self, card, etype): #落星炎球
mute()
uniformCost(etype, [], [], [], ["红", "红", "红"], {}, 0)
def WX01_074(self, card, etype): #棱晶火柱
mute()
uniformCost(etype, [], [], [], ["白", "红"], {}, 0)
def WX01_075(self, card, etype): #技艺代号 A•S•M
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX01_076(self, card, etype): #技艺代号 I•D•O•L
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX01_077(self, card, etype): #技艺代号 A•D•B
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX01_078(self, card, etype): #技艺代号 S•T•G
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX01_079(self, card, etype): #技艺代号 W•T•C
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX01_080(self, card, etype): #幻水 夏可檀
mute()
uniformCost(etype, [], [], [], [], {"Discard":{"qty": 1}, "Down":{"target":"self"}}, 2)
def WX01_081(self, card, etype): #技艺代号 T•V
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX01_082(self, card, etype): #技艺代号 F•A•N
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX01_083(self, card, etype): #幻水 克马诺明
mute()
uniformCost(etype, [], [], [], [], {"Discard":{"qty": 1}, "Down":{"target":"self"}}, 2)
def WX01_084(self, card, etype): #事不过三
mute()
uniformCost(etype, [], [], [], ["蓝"], {}, 0)
def WX01_085(self, card, etype): #冰封
mute()
uniformCost(etype, [], [], [], ["蓝"], {}, 0)
def WX01_086(self, card, etype): #幻兽 飞鹰
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX01_087(self, card, etype): #幻兽 猫妖精
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX01_088(self, card, etype): #幻兽 猫头鹰
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX01_089(self, card, etype): #幻兽 黑猫
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX01_090(self, card, etype): #幻兽 麻雀
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX01_091(self, card, etype): #幻兽 树袋熊
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX01_092(self, card, etype): #幻兽 白猫
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX01_093(self, card, etype): #罗植 蒲公英
mute()
uniformCost(etype, [], [], [], [], {"Discard":{"qty": 1}, "Down":{"target":"self"}}, 2)
def WX01_094(self, card, etype): #幻兽 燕子
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX01_095(self, card, etype): #幻兽 大熊猫
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX01_096(self, card, etype): #幻兽 三色猫
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX01_097(self, card, etype): #罗植 鼠尾草
mute()
uniformCost(etype, [], [], [], [], {"Discard":{"qty": 1}, "Down":{"target":"self"}}, 2)
def WX01_098(self, card, etype): #芽生
mute()
uniformCost(etype, [], [], [], ["绿"], {}, 0)
def WX01_099(self, card, etype): #逆出
mute()
uniformCost(etype, [], [], [], ["绿"], {}, 0)
def WX01_100(self, card, etype): #侍从T
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX01_101(self, card, etype): #侍从D
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX01_102(self, card, etype): #侍从O
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX01_103(self, card, etype): #喷流的知识
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WD01_001(self, card, etype): #满月之巫女 玉依姬
mute()
uniformCost(etype, ["白","白","白"], [], [], [], {}, 0)
def WD01_002(self, card, etype): #弦月之巫女 玉依姬
mute()
uniformCost(etype, ["白","白"], [], [], [], {}, 0)
def WD01_003(self, card, etype): #半月之巫女 玉依姬
mute()
uniformCost(etype, ["白"], [], [], [], {}, 0)
def WD01_004(self, card, etype): #三日月之巫女 玉依姬
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WD01_005(self, card, etype): #新月之巫女 玉依姬
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WD01_006(self, card, etype): #洛可可界线
mute()
uniformCost(etype, [], [], [], ["白", "白", "白", "无", "无"], {}, 0)
def WD01_007(self, card, etype): #艾本之书
mute()
uniformCost(etype, [], [], [], ["白", "白", "白"], {}, 0)
def WD01_008(self, card, etype): #巴洛克防御
mute()
uniformCost(etype, [], [], [], ["白", "白"], {}, 0)
def WD01_009(self, card, etype): #甲胄 皇家铠
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WD01_010(self, card, etype): #大剑 石中剑
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WD01_011(self, card, etype): #笼手 铁拳
mute()
uniformCost(etype, [], [], ["白"], [], {}, 0)
def WD01_012(self, card, etype): #中剑 焰形剑
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WD01_013(self, card, etype): #小剑 库克力弯刀
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WD01_014(self, card, etype): #小弓 箭矢
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WD01_015(self, card, etype): #获得圣经
mute()
uniformCost(etype, [], [], [], ["白"], {}, 0)
def WD02_001(self, card, etype): #花代•肆
mute()
uniformCost(etype, ["红", "红", "红"], [], [], [], {}, 0)
def WD02_002(self, card, etype): #花代•叁
mute()
uniformCost(etype, ["红", "红"], [], [], [], {}, 0)
def WD02_003(self, card, etype): #花代•贰
mute()
uniformCost(etype, ["红"], [], [], [], {}, 0)
def WD02_004(self, card, etype): #花代•壹
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WD02_005(self, card, etype): #花代•零
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WD02_006(self, card, etype): #飞火夏虫
mute()
uniformCost(etype, [], [], [], ["红", "红", "红"], {}, 0)
def WD02_007(self, card, etype): #背炎之阵
mute()
uniformCost(etype, [], [], [], ["红", "红"], {"Discard":{"qty": 3}}, 2)
def WD02_008(self, card, etype): #烧石炎
mute()
uniformCost(etype, [], [], [], ["红", "无"], {}, 0)
def WD02_009(self, card, etype): #罗石 火山石
mute()
uniformCost(etype, [], [], ["红", "红", "红"], [], {}, 0)
def WD02_010(self, card, etype): #罗石 白银
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WD02_011(self, card, etype): #罗石 石榴石
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WD02_012(self, card, etype): #罗石 铜
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WD02_013(self, card, etype): #罗石 铁
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WD02_014(self, card, etype): #罗石 紫水晶
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WD02_015(self, card, etype): #轰音火柱
mute()
uniformCost(etype, [], [], [], ["红"], {}, 0)
def WD03_001(self, card, etype): #代号•皮璐璐可•T
mute()
uniformCost(etype, ["蓝", "蓝", "蓝"], [], [], [], {}, 0)
def WD03_002(self, card, etype): #代号•皮璐璐可•G
mute()
uniformCost(etype, ["蓝", "蓝"], [], [], [], {}, 0)
def WD03_003(self, card, etype): #代号•皮璐璐可•M
mute()
uniformCost(etype, ["蓝"], [], [], [], {}, 0)
def WD03_004(self, card, etype): #代号•皮璐璐可•K
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WD03_005(self, card, etype): #代号•皮璐璐可
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WD03_006(self, card, etype): #窥视分析
mute()
uniformCost(etype, [], [], [], ["蓝", "蓝", "蓝"], {}, 0)
def WD03_007(self, card, etype): #不可行动
mute()
uniformCost(etype, [], [], [], ["蓝", "蓝", "蓝"], {}, 0)
def WD03_008(self, card, etype): #双重抽卡
mute()
uniformCost(etype, [], [], [], ["蓝", "无"], {}, 0)
def WD03_009(self, card, etype): #技艺代号 R•M•N
mute()
uniformCost(etype, [], [], ["蓝"], [], {}, 0)
def WD03_010(self, card, etype): #技艺代号 D•R•S
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WD03_011(self, card, etype): #技艺代号 S•M•P
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WD03_012(self, card, etype): #技艺代号 J•V
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WD03_013(self, card, etype): #技艺代号 S•C
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WD03_014(self, card, etype): #技艺代号 R•F•R
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WD03_015(self, card, etype): #真可惜
mute()
uniformCost(etype, [], [], [], ["蓝"], {}, 0)
def PR_017(self, card, etype): #中枪 古罗马长矛
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def PR_018(self, card, etype): #罗石 秘银
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def PR_019(self, card, etype): #珍宝
mute()
uniformCost(etype, [], [], [], ["蓝"], {}, 0) #special case , we treat TREASURE's special cost as its effect.
def PR_020(self, card, etype): #增援
mute()
effectcost2 = ["绿"]
choiceList = ["「从你的卡组里探寻1张力量10000以上的SIGNI卡,将其公开并加入加入手牌。之后将卡组洗切。」", \
"「从你的卡组顶将2张卡放置到能量区」"]
colorsList = ['#FF0000', '#FF0000']
global cost
global echoice
cost = effectcost2
if etype == 2:
echoice = askChoice("选择一个效果发动:", choiceList, colorsList)
def PR_040(self, card, etype): #多重
mute()
effectcost2 = ["白","白","蓝","蓝"]
choiceList = ["「对战对手的1只LRIG在这个回合中不能攻击。」", \
"「将对战对手的所有SIGNI冻结。」", \
"「将对战对手的1只SIGNI返回手牌。」", \
"「抽2张卡。」"]
colorsList = ['#FF0000', '#FF0000', '#FF0000', '#FF0000']
global cost
global echoice
cost = effectcost2
if etype == 2:
echoice1 = askChoice("选择第一个效果:", choiceList, colorsList)
del choiceList[echoice1 - 1]
del colorsList[echoice1 - 1]
inter = askChoice("选择第二个效果:", choiceList, colorsList)
if echoice1 <= inter:
echoice2 = inter + 1
else:
echoice2 = inter
echoice = [echoice1, echoice2]
def WD04_001(self, card, etype): #四之娘 绿姬
mute()
uniformCost(etype, ["绿", "绿", "绿"], [], [], [], {}, 0)
def WD04_002(self, card, etype): #三之娘 绿姬
mute()
uniformCost(etype, ["绿", "绿"], [], [], [], {}, 0)
def WD04_003(self, card, etype): #二之娘 绿姬
mute()
uniformCost(etype, ["绿"], [], [], [], {}, 0)
def WD04_004(self, card, etype): #一之娘 绿姬
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WD04_005(self, card, etype): #斗娘 绿姬
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WD04_006(self, card, etype): #意气扬扬
mute()
uniformCost(etype, [], [], [], ["绿"], {}, 0)
def WD04_007(self, card, etype): #再三再四
mute()
uniformCost(etype, [], [], [], ["绿"], {}, 0)
def WD04_008(self, card, etype): #付和雷同
mute()
uniformCost(etype, [], [], [], ["绿", "绿", "绿"], {}, 0)
def WD04_009(self, card, etype): #幻兽 青龙
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WD04_010(self, card, etype): #幻兽 朱雀小姐
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WD04_013(self, card, etype): #幻兽 小玄武
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WD04_015(self, card, etype): #幻兽 白虎
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WD04_016(self, card, etype): #侍从 Q2
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WD04_017(self, card, etype): #侍从 O2
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WD04_018(self, card, etype): #堕络
mute()
uniformCost(etype, [], [], [], ["绿"], {}, 0)
def WD05_001(self, card, etype): #狱卒阎魔 乌莉丝
mute()
uniformCost(etype, ["黑", "黑", "黑"], [], [], [], {}, 0)
def WD05_002(self, card, etype): #阿鼻阎魔 乌莉丝
mute()
uniformCost(etype, ["黑", "黑"], [], [], [], {}, 0)
def WD05_003(self, card, etype): #众合阎魔 乌莉丝
mute()
uniformCost(etype, ["黑"], [], [], [], {}, 0)
def WD05_004(self, card, etype): #灼热阎魔 乌莉丝
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WD05_005(self, card, etype): #阎魔 乌莉丝
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WD05_006(self, card, etype): #处刑时刻
mute()
uniformCost(etype, [], [], [], ["黑", "黑"], {}, 0)
def WD05_007(self, card, etype): #永恒处刑
mute()
uniformCost(etype, [], [], [], ["黑", "黑", "黑"], {}, 0)
def WD05_008(self, card, etype): #出墓
mute()
uniformCost(etype, [], [], [], ["黑", "黑", "黑", "黑", "黑"], {}, 0)
def WD05_009(self, card, etype): #堕落炮女 缅茨姆
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WD05_010(self, card, etype): #废恶象征 别西卜
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WD05_011(self, card, etype): #堕落炮女 卡莉
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WD05_012(self, card, etype): #背德象征 科思莫
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WD05_013(self, card, etype): #小恶象征 小鬼
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WD05_014(self, card, etype): #堕落炮女 魅魔
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WD05_017(self, card, etype): #完全漆黑
mute()
uniformCost(etype, [], [], [], ["黑"], {}, 0)
def WD05_018(self, card, etype): #回想的祝福
mute()
uniformCost(etype, [], [], [], ["无", "无"], {}, 0)
def WX02_001(self, card, etype): #金木犀之巫女 玉依姬
mute()
growcost = ["白","红", "绿"]
effectcost1 = ["白"]
effectcost2 = ["白","红"]
effectcost3 = ["白","绿","无"]
choiceList = ["【起】白1+红1:将对战对手的1只力量7000以下的SIGNI驱逐。", "【起】白1+绿1+无1:将对战对手的1只力量10000以上的SIGNI驱逐。"]
colorsList = ['#FF0000', '#FF0000']
global cost
global echoice
if etype == -2:
return False
elif etype == -1:
cost == []
elif etype == 0: #to grow
cost = growcost
for color in cost:
notify(color)
elif etype == 1: #to activate Arrival
cost = effectcost1
else:
echoice = askChoice("选择一个效果发动:", choiceList, colorsList, customButtons = ["取消"])
if echoice == 1:
cost = effectcost2
elif echoice == 2:
cost = effectcost3
def WX02_002(self, card, etype): #火鸟风月 游月·肆
mute()
uniformCost(etype, ["红", "红", "绿", "绿"], [], [], [], {"Down":{"target":"self"}}, 2)
def WX02_003(self, card, etype): #艾尔德拉×Ⅳ式
mute()
uniformCost(etype, ["蓝", "蓝", "蓝"], [], [], [], {}, 0)
def WX02_004(self, card, etype): #无间阎魔 乌莉丝
mute()
uniformCost(etype, ["黑", "黑", "黑"], [], [], ["黑"], {"Discard":{"color": "黑", "ctype": "SIGNI", "qty": 1}}, 0)
def WX02_005(self, card, etype): #纯白希望
mute()
uniformCost(etype, [], [], [], ["白", "白", "红"], {}, 0)
def WX02_006(self, card, etype): #漆黑野望
mute()
uniformCost(etype, [], [], [], ["黑", "黑", "黑"], {}, 0)
def WX02_007(self, card, etype): #轰罪炎 游月·叁
mute()
growcost = ["红", "绿"]
effectcost2 = ["红"]
effectcost3 = ["绿"]
choiceList = ["【起】红1:将对战对手的1只力量5000以下的SIGNI驱逐。", "【起】绿1:直到回合结束时为止,你所有的SIGNI的力量+5000。"]
colorsList = ['#FF0000', '#FF0000']
global cost
global echoice
if etype == -2:
return False
elif etype == -1:
cost == []
elif etype == 0: #to grow
cost = growcost
elif etype == 1: #to activate Arrival
cost = effectcost1
else:
echoice = askChoice("选择一个效果发动:", choiceList, colorsList, customButtons = ["取消"])
if echoice == 1:
cost = effectcost2
elif echoice == 2:
cost = effectcost3
def WX02_008(self, card, etype): #焰海 游月•贰
mute()
uniformCost(etype, ["红"], [], [], [], {}, 0)
def WX02_009(self, card, etype): #焰 游月•壹
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX02_010(self, card, etype): #艾尔德拉×Ⅲ式
mute()
uniformCost(etype, ["蓝", "蓝"], [], [], [], {}, 0)
def WX02_011(self, card, etype): #艾尔德拉×Ⅱ式
mute()
uniformCost(etype, ["蓝"], [], [], [], {}, 0)
def WX02_012(self, card, etype): #艾尔德拉×Ⅰ式
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX02_013(self, card, etype): #叫唤阎魔 乌莉丝
mute()
uniformCost(etype, ["黑", "黑"], [], [], [], {"Discard":{"qty": 1}}, 1)
def WX02_014(self, card, etype): #黑绳阎魔 乌莉丝
mute()
uniformCost(etype, ["黑"], [], [], [], {}, 0)
def WX02_015(self, card, etype): #等活阎魔 乌莉丝
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX02_016(self, card, etype): #哥特界限
mute()
uniformCost(etype, [], [], [], ["白", "无", "无"], {}, 0)
def WX02_017(self, card, etype): #气炎万丈
mute()
uniformCost(etype, [], [], [], ["红", "绿"], {}, 0)
def WX02_018(self, card, etype): #火红柳绿
mute()
uniformCost(etype, [], [], [], ["红"], {}, 0)
def WX02_019(self, card, etype): #交织生命护甲
mute()
uniformCost(etype, [], [], [], ["蓝"], {}, 0)
def WX02_020(self, card, etype): #鲜血斩击
mute()
uniformCost(etype, [], [], [], ["黑", "黑"], {}, 0)
def WX02_021(self, card, etype): #先驱的大天使 大天使该隐
mute()
global cost
cost = []
effectcost = ["白", "白"]
if etype == 1:
cost = effectcost
else:
cost =[]
def WX02_022(self, card, etype): #弩炮 狙击枪
mute()
uniformCost(etype, [], [], ["红"], [], {}, 0)
def WX02_023(self, card, etype): #幻水姬 丝派拉尔•卡米拉
mute()
uniformCost(etype, [], [], ["蓝"], [], {}, 0)
def WX02_024(self, card, etype): #罗植姬 戈休·雅格尼丝
mute()
uniformCost(etype, [], [], ["绿"], [], {"Down":{"target":"植物"}}, 0)
def WX02_025(self, card, etype): #恶魔姬 安娜•蜃影
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX02_026(self, card, etype): #愿望危机
mute()
uniformCost(etype, [], [], [], ["白"], {}, 0)
def WX02_027(self, card, etype): #焦土的代价
mute()
uniformCost(etype, [], [], [], ["红", "红"], {}, 0)
def WX02_028(self, card, etype): #谜言暗气
mute()
uniformCost(etype, [], [], [], ["黑", "黑"], {}, 0)
def WX02_029(self, card, etype): #宝具 御剑
mute()
global cost
global echoice
global specialcost
if etype == -2:
return False
elif etype == -1: pass
elif etype == 0: pass
elif etype == 1:
specialcost = {"Discard":{"ctype": "SIGNI", "signiclass": ["武装", "武器"], "qty": 1}}
else:
pass
def WX02_030(self, card, etype): #宝具 御镜
mute()
uniformCost(etype, [], [], [], [], {"Down":{"target":"self"}}, 2)
def WX02_031(self, card, etype): #使其反跳
mute()
uniformCost(etype, [], [], [], ["白", "白"], {}, 0)
def WX02_032(self, card, etype): #罗石 蛋白石
mute()
uniformCost(etype, [], [], [], [], {"Discard":{"ctype": "SIGNI", "signiclass": ["矿石", "宝石"], "qty": 1}}, 1)
def WX02_033(self, card, etype): #罗石 红玉髓
mute()
uniformCost(etype, [], [], [], [], {"Down":{"target":"self"}}, 2)
def WX02_034(self, card, etype): #不希望的冲动
mute()
uniformCost(etype, [], [], [], ["红", "绿"], {}, 0)
def WX02_035(self, card, etype): #技艺代号 C·P·U
mute()
uniformCost(etype, [], [], [], [], {"Discard":{"ctype": "SIGNI", "signiclass": ["电机"], "qty": 1}}, 1)
def WX02_036(self, card, etype): #技艺代号 G•A•B
mute()
uniformCost(etype, [], [], [], [], {"Down":{"target":"self"}}, 2)
def WX02_037(self, card, etype): #飞溅
mute()
uniformCost(etype, [], [], [], ["蓝", "无"], {}, 0)
def WX02_038(self, card, etype): #幻兽 雉
mute()
uniformCost(etype, [], [], [], [], {"Discard":{"ctype": "SIGNI", "signiclass": ["空兽", "地兽"], "qty": 1}}, 1)
def WX02_039(self, card, etype): #幻兽 八犬
mute()
uniformCost(etype, [], [], [], [], {"Down":{"target":"self"}}, 2)
def WX02_040(self, card, etype): #着植
mute()
uniformCost(etype, [], [], [], ["绿", "绿", "无", "无", "无"], {}, 0)
def WX02_041(self, card, etype): #大损
mute()
uniformCost(etype, [], [], [], ["绿", "绿"], {}, 0)
def WX02_042(self, card, etype): #反制代号 巴勒贝克
mute()
uniformCost(etype, [], [], [], ["黑"], {"Down":{"target":"self"}}, 2)
def WX02_043(self, card, etype): #反制代号 基西拉
mute()
uniformCost(etype, [], [], [], [], {"Down":{"target":"self"}}, 2)
def WX02_044(self, card, etype): #大罪缘由 巴力
mute()
uniformCost(etype, [], [], [], [], {"Down":{"target":"self"}}, 2)
def WX02_045(self, card, etype): #献祭斩击
mute()
uniformCost(etype, [], [], [], ["黑", "黑", "黑"], {}, 0)
def WX02_046(self, card, etype): #牺牲的微笑 丘雅耶尔
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX02_047(self, card, etype): #虚构的爱情 希耶尔
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX02_048(self, card, etype): #宝具 勾玉
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX02_049(self, card, etype): #博爱的聚集 萨尼耶尔
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX02_050(self, card, etype): #刀剑本领
mute()
uniformCost(etype, [], [], [], ["白", "白"], {}, 0)
def WX02_051(self, card, etype): #轰炮 远射装置
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX02_052(self, card, etype): #爆炮 MP5
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX02_053(self, card, etype): #罗石 翡翠
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX02_054(self, card, etype): #小炮 枪匠
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX02_055(self, card, etype): #光欲宝剑
mute()
uniformCost(etype, [], [], [], ["红"], {}, 0)
def WX02_056(self, card, etype): #幻水 奥科特
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX02_057(self, card, etype): #幻水 珍珠
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX02_058(self, card, etype): #技艺代号 M•M•R
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX02_059(self, card, etype): #幻水 科塞梅
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX02_060(self, card, etype): #探寻者
mute()
uniformCost(etype, [], [], [], ["无"], {}, 0)
def WX02_061(self, card, etype): #蓝色收获
mute()
uniformCost(etype, [], [], [], ["蓝", "无"], {}, 0)
def WX02_062(self, card, etype): #罗植 葵小姐
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX02_063(self, card, etype): #罗植 莲
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX02_064(self, card, etype): #幻兽 猴
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX02_065(self, card, etype): #罗植 虎尾兰
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX02_066(self, card, etype): #丰润
mute()
uniformCost(etype, [], [], [], ["绿"], {}, 0)
def WX02_067(self, card, etype): #恶魔续发 莉莉丝
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX02_068(self, card, etype): #恶魔勇武 摩莉甘
mute()
uniformCost(etype, [], [], [], [], {"Discard":{"ctype": "SIGNI", "signiclass": ["恶魔"], "qty": 1}}, 1)
def WX02_069(self, card, etype): #反制代号 星云
mute()
uniformCost(etype, [], [], [], ["黑", "黑"], {}, 0)
def WX02_070(self, card, etype): #真实死神 阿尼玛
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX02_071(self, card, etype): #反制代号 德里
mute()
uniformCost(etype, [], [], [], ["黑", "无"], {}, 0)
def WX02_072(self, card, etype): #反制代号 马丘比
mute()
uniformCost(etype, [], [], [], [], {"Discard":{"qty": 1}, "Down":{"target":"self"}}, 2)
def WX02_073(self, card, etype): #反制代号 敌左反魔
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX02_074(self, card, etype): #小恶忧郁 格里姆
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX02_075(self, card, etype): #造墓者
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX02_077(self, card, etype): #侍从 T2
mute()
uniformCost(etype, [], [], [], [], {}, 0)
def WX02_078(self, card, etype): #侍从 D2
mute()
uniformCost(etype, [], [], [], [], {}, 0) | apache-2.0 | -7,812,407,356,915,212,000 | 30.585542 | 160 | 0.426468 | false | 2.698346 | false | false | false |
geobricks/pgeorest | pgeorest/rest/stats.py | 1 | 11799 | import json
from flask import Blueprint, Response
from flask.ext.cors import cross_origin
import copy
import StringIO
import uuid
from pgeo.error.custom_exceptions import PGeoException, errors
from pgeo.utils import log
from pgeorest.config.settings import settings
from pgeo.stats.raster import Stats
from pgeo.gis.raster_scatter import create_scatter
from pgeo.gis.raster_mapalgebra import filter_layers
from flask import request
from pgeo.manager.manager import Manager
app = Blueprint(__name__, __name__)
log = log.logger(__name__)
#TODO: Review the REST for also layers that are not published, but are on the filesystem
# default json_statistics
raster_statistics = {
"raster": {
"uid": None
},
"stats": {
"force": True
}
}
raster_histogram = {
"raster": {
"uid": None
},
"stats": {
"force": True,
"buckets": 256
}
}
@app.route('/')
def index():
"""
Welcome message
@return: welcome message
"""
return 'Welcome to the stats module!'
@app.route('/raster/<layer>/', methods=['GET'])
@app.route('/raster/<layer>', methods=['GET'])
def get_stats(layer):
"""
Extracts all the statistics of a layer
@param layer: workspace:layername
@return: json with the raster statistics
"""
try:
if ":" not in layer:
return PGeoException("Please Specify a workspace for " + str(layer), status_code=500)
json_stats = copy.deepcopy(raster_statistics)
json_stats["raster"]["uid"] = layer
# Module to process statistics
stats = Stats(settings)
return Response(json.dumps(stats.get_stats(json_stats)), content_type='application/json; charset=utf-8')
except PGeoException, e:
raise PGeoException(e.get_message(), e.get_status_code())
@app.route('/raster/<layer>/hist/', methods=['GET'])
@app.route('/raster/<layer>/hist', methods=['GET'])
@cross_origin()
def get_histogram(layer):
"""
Extracts histogram from a layer
@param layer: workspace:layername
@return: json with the raster statistics
"""
try:
if ":" not in layer:
return PGeoException("Please Specify a workspace for " + str(layer), status_code=500)
json_stats = copy.deepcopy(raster_histogram)
json_stats["raster"]["uid"] = layer
# Module to process statistics
stats = Stats(settings)
return Response(json.dumps(stats.get_histogram(json_stats)), content_type='application/json; charset=utf-8')
except PGeoException, e:
raise PGeoException(e.get_message(), e.get_status_code())
@app.route('/raster/<layer>/hist/<buckets>/', methods=['GET'])
@app.route('/raster/<layer>/hist/<buckets>', methods=['GET'])
@cross_origin(origins='*')
def get_histogram_buckets(layer, buckets):
"""
Extracts histogram from a layer
TODO: add a boolean and buckets
default: boolean = True, buckets = 256
@param layer: workspace:layername
@param buckets: number of buckets i.e. 256
@return: json with the raster statistics
"""
try:
if ":" not in layer:
return PGeoException("Please Specify a workspace for " + str(layer), status_code=500)
json_stats = copy.deepcopy(raster_histogram)
json_stats["raster"]["uid"] = layer
json_stats["stats"]["buckets"] = int(buckets)
# Module to process statistics
stats = Stats(settings)
return Response(json.dumps(stats.get_histogram(json_stats)), content_type='application/json; charset=utf-8')
except PGeoException, e:
raise PGeoException(e.get_message(), e.get_status_code())
@app.route('/raster/<layer>/hist/buckets/<buckets>/min/<min>/max/<max>/', methods=['GET'])
@app.route('/raster/<layer>/hist/buckets/<buckets>/min/<min>/max/<max>', methods=['GET'])
@cross_origin(origins='*')
def get_histogram_buckets_min_max(layer, buckets, min, max):
"""
Extracts histogram from a layer
TODO: add a boolean and buckets
default: boolean = True, buckets = 256
@param layer: workspace:layername
@param buckets: number of buckets i.e. 256
@return: json with the raster statistics
"""
try:
if ":" not in layer:
return PGeoException("Please Specify a workspace for " + str(layer), status_code=500)
json_stats = copy.deepcopy(raster_histogram)
json_stats["raster"]["uid"] = layer
json_stats["stats"]["buckets"] = int(buckets)
json_stats["stats"]["min"] = float(min)
json_stats["stats"]["max"] = float(max)
# Module to process statistics
stats = Stats(settings)
return Response(json.dumps(stats.get_histogram(json_stats)), content_type='application/json; charset=utf-8')
except PGeoException, e:
raise PGeoException(e.get_message(), e.get_status_code())
@app.route('/rasters/<layers>/lat/<lat>/lon/<lon>/', methods=['GET'])
@app.route('/rasters/<layers>/lat/<lat>/lon/<lon>', methods=['GET'])
@cross_origin(origins='*')
def get_lat_lon(layers, lat, lon):
"""
Get the value of the layer at lat/lon position
@param layer: workspace:layername
@param lat: latitude
@param lon: longitude
@return: json with the raster statistics
"""
try:
if ":" not in layers:
return PGeoException("Please Specify a workspace for " + str(layers), status_code=500)
input_layers = layers.split(",")
# Module to process statistics
stats = Stats(settings)
s = stats.get_location_values(input_layers, lat, lon)
return Response(json.dumps(s), content_type='application/json; charset=utf-8')
except PGeoException, e:
raise PGeoException(e.get_message(), e.get_status_code())
@app.route('/raster/spatial_query/', methods=['POST'])
@app.route('/raster/spatial_query', methods=['POST'])
@cross_origin(origins='*', headers=['Content-Type'])
def get_stats_by_layer():
"""
TODO is it useful of should be used the one below? @Deprecated?
Get raster statistic filtered by a spatial query:
TODO add json definition of the spatial query and statistics that can be applied
:return: a json with the zonal statistics
"""
try:
user_json = request.get_json()
# Module to process statistics
stats = Stats(settings)
s = stats.zonal_stats(user_json)
return Response(json.dumps(s), content_type='application/json; charset=utf-8')
except PGeoException, e:
raise PGeoException(e.get_message(), e.get_status_code())
@app.route('/rasters/spatial_query/', methods=['POST'])
@app.route('/rasters/spatial_query', methods=['POST'])
@cross_origin(origins='*', headers=['Content-Type'])
def get_stats_by_layers():
"""
Get raster statistic filtered by a spatial query:
TODO add json definition of the spatial query and statistics that can be applied
:return: a json with the zonal statistics
"""
try:
# Module to process statistics
stats = Stats(settings)
user_json = request.get_json()
response = []
for uid in user_json["raster"]["uids"]:
json_stat = copy.deepcopy(user_json)
json_stat["raster"]["uid"] = uid
s = {}
s[uid] = stats.zonal_stats(json_stat)
response.append(s)
return Response(json.dumps(response), content_type='application/json; charset=utf-8')
except PGeoException, e:
raise PGeoException(e.get_message(), e.get_status_code())
@app.route('/rasters/scatter_analysis/', methods=['POST'])
@app.route('/rasters/scatter_analysis', methods=['POST'])
@cross_origin(origins='*', headers=['Content-Type'])
def get_scatter_analysis():
try:
# Module to process statistics
stats = Stats(settings)
user_json = request.get_json()
log.info(user_json)
response = []
for uid in user_json["raster"]["uids"]:
log.info(user_json)
json_stat = copy.deepcopy(user_json)
json_stat["raster"]["uid"] = uid
response.append(stats.zonal_stats(json_stat))
log.info(response[0])
log.info(response[1])
# io.BytesIO()
si = StringIO.StringIO()
result = stats.create_csv_merge(si, response[0], response[1])
log.info(result.getvalue())
return Response(result.getvalue())
except PGeoException, e:
raise PGeoException(e.get_message(), e.get_status_code())
@app.route('/rasters/scatter_plot/<layers>/', methods=['GET'])
@app.route('/rasters/scatter_plot/<layers>', methods=['GET'])
@cross_origin(origins='*', headers=['Content-Type'])
def get_scatter_plot(layers):
try:
"""
Create a scatter plot from two rasters of the same dimension
@param layers: workspace:layername1,workspace:layername2
@return: json with the scatter plot data
"""
if ":" not in layers:
return PGeoException("Please Specify a workspace for " + str(layers), status_code=500)
input_layers = layers.split(",")
stats = Stats(settings)
raster_path1 = stats.get_raster_path(input_layers[0])
raster_path2 = stats.get_raster_path(input_layers[1])
# creating scatter
response = create_scatter(raster_path1, raster_path2, 300)
return Response(json.dumps(response), content_type='application/json; charset=utf-8')
except PGeoException, e:
raise PGeoException(e.get_message(), e.get_status_code())
@app.route('/rasters/mapalgebra/layers/<layers>/minmax/<minmax>', methods=['GET'])
@app.route('/rasters/mapalgebra/layers/<layers>/minmax/<minmax>', methods=['GET'])
@cross_origin(origins='*', headers=['Content-Type'])
def get_filter_layers(layers, minmax):
try:
"""
Create a temporary mask layer using min-max of the layers
@param layers: workspace:layername1,workspace:layername2
@param minmax: min1,max1,min2,max2
@return: json with the scatter plot data
"""
if ":" not in layers:
return PGeoException("Please Specify a workspace for " + str(layers), status_code=500)
input_layers = layers.split(",")
minmax_values = minmax.split(",")
stats = Stats(settings)
# getting raster information
raster_path1 = stats.get_raster_path(input_layers[0])
raster_path2 = stats.get_raster_path(input_layers[1])
# getting raster min max values
min1 = float(minmax_values[0])
max1 = float(minmax_values[1])
min2 = float(minmax_values[2])
max2 = float(minmax_values[3])
# create the layer
path = filter_layers(raster_path1, raster_path2, min1, max1, min2, max2)
# creating metdata
uid = str(uuid.uuid4())
metadata_def = {}
metadata_def["uid"] = "tmp:" + uid
metadata_def["title"] = {}
metadata_def["title"]["EN"] = "masked_" + uid
metadata_def["meSpatialRepresentation"] = {}
# publish the new tmp layer
# TODO: metadata? style to be applied?
# TODO: how to handle a tmp workspace overhead?
s = copy.deepcopy(settings)
# this copies the geoserver_tmp dato to "geoserver" settings to be passed to the manager
s["geoserver"] = s["geoserver_tmp"]
manager = Manager(s)
manager.publish_coverage(path, metadata_def, False, True, False)
# adding the tmp geoserver WMS URL
if "geoserver_wms" in s["geoserver"]:
metadata_def["url_wms"] = s["geoserver"]["geoserver_wms"]
return Response(json.dumps(metadata_def), content_type='application/json; charset=utf-8')
except PGeoException, e:
raise PGeoException(e.get_message(), e.get_status_code())
| gpl-2.0 | 3,271,741,245,350,275,000 | 34.220896 | 116 | 0.635223 | false | 3.795111 | false | false | false |
rmorshea/spectate | tests/test_events.py | 1 | 1896 | import pytest
from spectate import mvc
from .mock import model_events, Counter
def test_hold_events():
counter, events = model_events(Counter)
with mvc.hold(counter) as cache:
counter.increment(1)
assert cache == [{"old": 0, "new": 1}]
counter.increment(1)
assert cache == [{"old": 0, "new": 1}, {"old": 1, "new": 2}]
# Pop off one of the events so
# it isn't sent to notifiers.
cache.pop()
assert events == [{"old": 0, "new": 1}]
def test_hold_uses_events_from_reducer():
counter, events = model_events(Counter)
def reducer(model, events):
assert events == [{"old": 0, "new": 1}]
yield {"custom": "event-1"}
yield {"custom": "event-2"}
with mvc.hold(counter, reducer=reducer):
counter.increment(1)
assert events == [{"custom": "event-1"}, {"custom": "event-2"}]
def test_rollback_events():
counter, events = model_events(Counter)
with pytest.raises(ValueError):
with mvc.rollback(counter):
counter.increment(1)
raise ValueError()
assert not events
def test_rollback_calls_undo_without_side_effects():
calls = []
counter, events = model_events(Counter)
def undo(model, events, error):
calls.append(1)
assert error is error_from_rollback
assert events == ({"old": 0, "new": 1},)
# this decrement should not notify
model.decrement(1)
with pytest.raises(ValueError):
with mvc.rollback(counter, undo=undo):
counter.increment(1)
error_from_rollback = ValueError()
raise error_from_rollback
assert calls
assert counter.value == 0
def test_mute_events():
counter, events = model_events(Counter)
with mvc.mute(counter):
counter.increment(1)
counter.increment(1)
assert events == []
| mit | 2,735,460,593,841,551,000 | 23.307692 | 68 | 0.595464 | false | 3.761905 | true | false | false |
blacksector/python-url-shortner | app.py | 1 | 4829 | from flask import Flask, jsonify, abort, render_template, request, session, escape
import MySQLdb
from flask_limiter import Limiter
import time
import random
app = Flask(__name__)
#####################################
#### MAIN CONFIG ####
#####################################
# Set website limits
limiter = Limiter(app, global_limits=["2 per second"])
websiteTitle = "Python" # Website Title
websiteURL = "http://192.168.2.12" # Website address, no "/" needed
websitePort = 1313 # Website port number to use
MySQLHost = "localhost" # MySQL hostname
MySQLUser = "root" # MySQL username
MySQLPass = "" # MySQL pass
MySQLDB = "pythonurlshortner" # Database name
storeIP = True # Store IP Address of user?
urlLength = 6 # The length of your short URLS
enableHyphenAndUnderscore = True # Have a "-" and "_"
# (Hyphen/Dash and Underscore) in URLs?
enableNumbers = True # Have numbers in the short URL?
enableUppercase = True # Have upper case along with lowercase
enableRedirectTimeout = False # Have a redirect page time out
# To use this give it a seconds timeout
# To disable, set to "False"
##############################################################################################################################
#################################### DO NOT EDIT BELOW UNLESS YOU KNOW WHAT YOU ARE DOING ####################################
##############################################################################################################################
#####################################
#### TOOLS ####
#####################################
def genUrl():
l = list(letterChoices)
final = ""
for x in range(urlLength):
final += random.choice(l)
return final
#####################################
#### SETUP URLS ####
#####################################
# Numbers and letters that look similar have been removed!
numbers = "123456789"
lowerCase = "abcdefghjkmnpqrstuvwxyz"
upperCase = lowerCase.upper() # Will take the lowercase variable
# and turn it into uppercase
letterChoices = lowerCase
if enableHyphenAndUnderscore:
letterChoices += "-_"
if enableUppercase:
letterChoices += upperCase
if enableNumbers:
letterChoices += numbers
#####################################
#### HOME PAGE ####
#####################################
# The main page
@app.route('/', methods=["GET"])
@app.route('/<path:url>', methods=["GET"])
@limiter.limit("200 per minute")
def home_page(url=None):
if (not url):
return render_template('index.html', websiteTitle=websiteTitle)
else:
db = MySQLdb.connect(MySQLHost, MySQLUser, MySQLPass, MySQLDB)
cursor = db.cursor()
cursor.execute("SELECT longLink FROM short WHERE shortLink='%s'" % (str(escape(url))))
if cursor.rowcount > 0:
foundURL = cursor.fetchone()[0]
db.close()
if (enableRedirectTimeout):
return render_template('redirect.html', redirectTimeout=enableRedirectTimeout, url=foundURL)
else:
return render_template('redirect.html', redirectTimeout=0, url=foundURL)
else:
return render_template('redirect.html', redirectTimeout=0, url="/")
#####################################
#### SAVE PAGE ####
#####################################
@app.route('/saveURL', methods=["GET", "POST"])
def save_URL():
if request.method == "POST":
url = str(escape(request.form["url"]))
cont = True
while cont:
custom = str(genUrl())
db = MySQLdb.connect(MySQLHost, MySQLUser, MySQLPass, MySQLDB)
cursor = db.cursor()
cursor.execute("SELECT shortLink FROM short WHERE shortLink='%s'" % (str(custom)))
if cursor.rowcount > 0:
cont = True
else:
cont = False
cursor = db.cursor()
cursor.execute("INSERT INTO short (id, shortLink, longLink, time, ipAddress) VALUES (DEFAULT, '%s', '%s', '%d','%s')" % (custom, url, time.time(), str(request.remote_addr)))
db.commit()
db.close()
return render_template('result.html', websiteTitle=websiteTitle, longURL=url, websiteURL=websiteURL+":"+str(websitePort), shortURL=custom)
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port=websitePort)
| gpl-2.0 | -3,498,428,097,392,850,000 | 31.409396 | 181 | 0.485608 | false | 4.68835 | false | false | false |
acutesoftware/worldbuild | worldbuild/quest_gen/quest.py | 1 | 6660 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# quest.py
import random
objects = [
{'name':'rusty key',
'desc':'an old rusty key',
'location':'woods',
'when_seen':'the key is old and rusty, but glitters slightly',
'on_success':'the key slides into the lock with a firm click'},
{'name':'chest',
'desc':'a battered chest',
'location':'ravine',
'when_seen':'the chest is covered in dirt',
'on_success':'the chest opens, revealing $TREASURE$'},
]
locations = [
{'name':'home'},
{'name':'woods'},
{'name':'forest'},
{'name':'hills'},
{'name':'ravine'},
]
characters = [
{'name':'Zentar',
}
]
def main():
"""
for loc in locations:
print('you search ' + loc['name'])
for obj in objects:
if obj['location'] == loc['name']:
print('you find ', obj['name'])
print(obj['when_seen'])
"""
# setup locations
hills = Location('hills', 'rolling hills', [4,8])
forest = Location('forest', 'Dark forest near the hills', [4,7])
woods = Location('woods', 'Woodland between mountains and river', [7,12])
beach = Location('beach', 'Nice sandy beach', [2,1])
wood = Item('wood', 'Wooden planks', [forest, hills, woods ])
string = Item('string', 'Used for fishing rods and bows', [hills ])
shells = Item('shells', 'Shells', [beach ])
jim = NPC('Jim', forest, 'Idle', [wood])
sandy = NPC('Sandy', hills, 'hunting', [string, shells])
# generate quest list
my_quests = []
my_quests.append(Quest().create_next_quest_via_npc_needs(jim))
my_quests.append(Quest().create_next_quest_via_npc_needs(sandy))
# Display game
print('NPCs in this land:')
print(jim)
print(sandy)
print('Your Quest List:')
for q in my_quests:
print(q)
class Location(object):
"""
map areas
"""
def __init__(self, name, desc, coords):
self.name = name
self.desc = desc
self.coords = coords
def __str__(self):
res = ''
res += self.name + ' - ' + self.desc
res += str(self.coords)
return res
class DataSet(object):
"""
handles a collection of Objects loaded from a reference file
"""
def __init__(self):
self.raw_data = []
self.object_list = []
def __str__(self):
return ''.join([d for d in self.raw_data])
def fill_from_csv(self, fname):
with open(fname, 'r') as fip:
for line in fip:
self.raw_data.append(line)
class Locations(DataSet):
"""
handles a collection of Locations loaded from a reference file
"""
def __init__(self):
DataSet.__init__(self)
def rebuild_list(self):
self.object_list = [] # clear the object list
for raw_line in self.raw_data:
cols = raw_line.split(',')
print('LOCATION RAW = ', cols)
cur_loc = Location(cols[0], cols[1], cols[2])
self.object_list.append(cur_loc)
class NPCs(DataSet):
"""
handles a collection of NPC Characters loaded from a reference file
"""
def __init__(self):
DataSet.__init__(self)
def rebuild_list(self):
self.object_list = [] # clear the object list
for raw_line in self.raw_data:
cols = raw_line.split(',')
cur_npc = NPC(cols[0], cols[1], cols[2], [cols[3]])
self.object_list.append(cur_npc)
class Items(DataSet):
"""
handles a collection of Items loaded from a reference file
"""
def __init__(self):
DataSet.__init__(self)
class Item(object):
"""
Items / objects that are in the world. Can be collected
or crafted
"""
def __init__(self, name, desc, spawns_at_locations):
self.name = name
self.desc = desc
self.spawns_at_locations = spawns_at_locations
def __str__(self):
res = ''
res += self.name + ' - ' + self.desc + ' (Spawns at locations:'
res += '|'.join([l.name for l in self.spawns_at_locations])
res += ')\n'
return res
class NPC(object):
"""
a Non-Player character
"""
def __init__(self, name, location, status, items_needed):
self.name = name
self.location = location
self.status = status
self.items_needed = items_needed
"""
print('init NPC!')
print('self.name = ', self.name )
print('self.location = ', self.location )
print('self.status = ', self.status )
print('self.items_needed = ', self.items_needed )
"""
def __str__(self):
res = ''
res += self.name + ' is at ' + str(self.location) + '. Status = ' + self.status
if len(self.items_needed) > 0:
if self.items_needed is list:
res += '\nThis NPC needs : '
#for i in self.items_needed:
# res += str(i.name)
res += ', '.join([i.name for i in self.items_needed])
return res
class Quest(object):
"""
handles a single quest
"""
def __init__(self):
pass
def __str__(self):
res = '+------------------------------------------------------------\n'
res += '| ***' + self.name + ' ***\n'
res += '| ' + self.desc + '\n'
res += '| Location = ' + str(self.location[0].name) + '\n'
res += '| Status = ' + self.status + '\n'
res += '| Reward = ' + self.reward + '\n'
res += '| Return to ' + self.quest_giver.name + ' with '
#res += ','.join([i.name for i in self.items_required])
res += str(self.quantity) + ' ' + self.items_required.name + '\n'
res += '+------------------------------------------------------------\n'
return res
def create_next_quest_via_npc_needs(self, npc):
"""
takes NPC as parameter and finds the next quest this person needs
"""
for needs in npc.items_needed: # just the first one
self.name = 'Collect ' + needs.name
self.quest_giver = npc
self.quantity = random.choice([4,8,10,25])
self.reward = random.choice(['fishing rod', 'hammer', '5 Gold', '10 Gold'])
self.items_required = needs
self.desc = npc.name + ' needs you to collect ' + needs.name #+ '. You can find these at ' + str(needs.spawns_at_locations)
self.status = 'Available'
self.location = needs.spawns_at_locations
return self
if __name__ == '__main__':
main()
| gpl-2.0 | 3,023,595,139,140,641,000 | 27.461538 | 136 | 0.517868 | false | 3.514512 | false | false | false |
xodus7/tensorflow | tensorflow/contrib/distribute/python/tpu_strategy.py | 2 | 13762 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TPU Distribution Strategy.
This is experimental. It's not ready for general use.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distribute.python import cross_tower_ops as cross_tower_ops_lib
from tensorflow.contrib.distribute.python import one_device_strategy
from tensorflow.contrib.distribute.python import values
from tensorflow.contrib.tpu.python.ops import tpu_ops
from tensorflow.contrib.tpu.python.tpu import tpu
from tensorflow.contrib.tpu.python.tpu import tpu_system_metadata as tpu_system_metadata_lib
from tensorflow.contrib.tpu.python.tpu import training_loop
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.training import device_util
from tensorflow.python.util import nest
def get_tpu_system_metadata(tpu_cluster_resolver):
"""Retrieves TPU system metadata given a TPUClusterResolver."""
master = tpu_cluster_resolver.master()
# pylint: disable=protected-access
cluster_spec = tpu_cluster_resolver.cluster_spec()
cluster_def = cluster_spec.as_cluster_def() if cluster_spec else None
tpu_system_metadata = (
tpu_system_metadata_lib._query_tpu_system_metadata(
master,
cluster_def=cluster_def,
query_topology=False))
return tpu_system_metadata
class TPUStrategy(one_device_strategy.OneDeviceStrategy):
"""Experimental TPU distribution strategy implementation."""
def __init__(self, tpu_cluster_resolver, steps_per_run, num_cores=None):
"""Initializes the TPUStrategy object.
Args:
tpu_cluster_resolver: A tf.contrib.cluster_resolver.TPUClusterResolver,
which provides information about the TPU cluster.
steps_per_run: Number of steps to run on device before returning to the
host. Note that this can have side-effects on performance, hooks,
metrics, summaries etc.
This parameter is only used when Distribution Strategy is used with
estimator or keras.
num_cores: Number of cores to use on the TPU. If None specified, then
auto-detect the cores and topology of the TPU system.
"""
# TODO(sourabhbajaj): OneDeviceStrategy should be initialized with the
# master node fetched from the cluster resolver.
super(TPUStrategy, self).__init__('/device:CPU:0')
self._tpu_cluster_resolver = tpu_cluster_resolver
self._tpu_metadata = get_tpu_system_metadata(self._tpu_cluster_resolver)
# TODO(sourabhbajaj): Change this from num_cores to metadata_override
self._num_cores_override = num_cores
# TODO(sourabhbajaj): Remove this once performance of running one step
# at a time is comparable to multiple steps.
self.steps_per_run = steps_per_run
def _get_enqueue_op_per_host(self, host_id, iterator, input_shapes,
iterations):
"""Create an enqueue op for a single host identified using host_id.
The while_loop op returned will run `iterations` times and in each run
enqueue batches for each shard.
Args:
host_id: integer, id of the host to run the enqueue ops on.
iterator: `tf.data` iterator to read the input data.
input_shapes: shape of inputs to be enqueue on the queue. This is same as
the value of `nest.flatten(iterator.output_shapes)`.
iterations: integer, number of iterations to be run; determines the
number of batches to be enqueued.
Returns:
while_loop_op running `iterations` times; in each run we enqueue a batch
on the infeed queue from the host with id `host_id` for each device shard.
"""
host = self.get_host_cpu_device(host_id)
def _infeed_enqueue_ops_fn():
"""Enqueue ops for one iteration."""
control_deps = []
sharded_inputs = []
enqueue_ops = []
with ops.device(host):
for _ in range(self.num_towers_per_host):
# Use control dependencies to ensure a deterministic ordering.
with ops.control_dependencies(control_deps):
inputs = nest.flatten(iterator.get_next())
control_deps.extend(inputs)
sharded_inputs.append(inputs)
for core_id, shard_input in enumerate(sharded_inputs):
enqueue_ops.append(
tpu_ops.infeed_enqueue_tuple(
inputs=shard_input,
shapes=input_shapes,
device_ordinal=core_id))
return enqueue_ops
def enqueue_ops_loop_body(i):
"""Callable for the loop body of the while_loop instantiated below."""
with ops.control_dependencies(_infeed_enqueue_ops_fn()):
return i + 1
with ops.device(host):
enqueue_op_per_host = control_flow_ops.while_loop(
lambda i: i < iterations,
enqueue_ops_loop_body,
[constant_op.constant(0)],
parallel_iterations=1)
return enqueue_op_per_host
def distribute_dataset(self, dataset_fn):
# TODO(priyag): Perhaps distribute across cores here.
return self._call_dataset_fn(dataset_fn)
# TODO(priyag): Deal with OutOfRange errors once b/111349762 is fixed.
# TODO(sourabhbajaj): Remove the initial_loop_values parameter when we have
# a mechanism to infer the outputs of `fn`. Pending b/110550782.
def _run_steps_on_dataset(self, fn, iterator, iterations,
initial_loop_values=None):
shapes = nest.flatten(iterator.output_shapes)
if any([not s.is_fully_defined() for s in shapes]):
raise ValueError(
'TPU currently requires fully defined shapes. Either use '
'set_shape() on the input tensors or use '
'dataset.apply(map_and_batch(..., drop_remainder=True)).')
types = nest.flatten(iterator.output_types)
enqueue_ops = [
self._get_enqueue_op_per_host(host_id, iterator, shapes, iterations)
for host_id in range(self.num_hosts)]
def dequeue_fn():
dequeued = tpu_ops.infeed_dequeue_tuple(dtypes=types, shapes=shapes)
return nest.pack_sequence_as(iterator.output_shapes, dequeued)
# Wrap `fn` for repeat.
if initial_loop_values is None:
initial_loop_values = {}
initial_loop_values = nest.flatten(initial_loop_values)
ctx = values.MultiStepContext()
def run_fn(*args, **kwargs):
"""Single step on the TPU device."""
del args, kwargs
fn_inputs = dequeue_fn()
if not isinstance(fn_inputs, tuple):
fn_inputs = (fn_inputs,)
fn_result = fn(ctx, *fn_inputs)
flat_last_step_outputs = nest.flatten(ctx.last_step_outputs)
if flat_last_step_outputs:
with ops.control_dependencies([fn_result]):
return [array_ops.identity(f) for f in flat_last_step_outputs]
else:
return fn_result
# TODO(sourabhbajaj): The input to while loop should be based on the output
# type of the step_fn
def iterate_on_tpu():
return training_loop.repeat(iterations, run_fn, initial_loop_values)
# We capture the control_flow_context at this point, before we run `fn`
# inside a while_loop and TPU replicate context. This is useful in cases
# where we might need to exit these contexts and get back to the outer
# context to do some things, for e.g. create an op which should be
# evaluated only once at the end of the loop on the host. One such usage
# is in creating metrics' value op.
self._outer_control_flow_context = (
ops.get_default_graph()._get_control_flow_context()) # pylint: disable=protected-access
replicate_inputs = [[]] * self.num_towers
replicate_outputs = tpu.replicate(iterate_on_tpu, replicate_inputs)
del self._outer_control_flow_context
ctx.run_op = control_flow_ops.group(replicate_outputs, enqueue_ops)
# Filter out any ops from the outputs, typically this would be the case
# when there were no tensor outputs.
last_step_tensor_outputs = [x for x in replicate_outputs
if not isinstance(x, ops.Operation)]
# Outputs are currently of the structure (grouped by device)
# [[output0_device0, output1_device0, output2_device0],
# [output0_device1, output1_device1, output2_device1]]
# Convert this to the following structure instead: (grouped by output)
# [[output0_device0, output0_device1],
# [output1_device0, output1_device1],
# [output2_device0, output2_device1]]
last_step_tensor_outputs = [list(x) for x in zip(*last_step_tensor_outputs)]
# Convert replicate_outputs to the original dict structure of
# last_step_outputs.
last_step_tensor_outputs_dict = nest.pack_sequence_as(
ctx.last_step_outputs, last_step_tensor_outputs)
for (name, aggregation) in ctx._last_step_outputs_aggregations.items(): # pylint: disable=protected-access
output = last_step_tensor_outputs_dict[name]
# For outputs that have already been aggregated, take the first value
# from the list as each value should be the same. Else return the full
# list of values.
if aggregation is not variables_lib.VariableAggregation.NONE:
# TODO(priyag): Should this return the element or a list with 1 element
last_step_tensor_outputs_dict[name] = output[0]
ctx._set_last_step_outputs(last_step_tensor_outputs_dict) # pylint: disable=protected-access
return ctx
def _call_for_each_tower(self, fn, *args, **kwargs):
kwargs.pop('run_concurrently', None)
with one_device_strategy._OneDeviceTowerContext(self): # pylint: disable=protected-access
return fn(*args, **kwargs)
def initialize(self):
if context.executing_eagerly():
# TODO(priyag): Add appopriate call here when eager is supported for TPUs.
raise NotImplementedError('Eager mode not supported in TPUStrategy.')
else:
return [tpu.initialize_system()]
def finalize(self):
if context.executing_eagerly():
# TODO(priyag): Add appopriate call here when eager is supported for TPUs.
raise NotImplementedError('Eager mode not supported in TPUStrategy.')
else:
return [tpu.shutdown_system()]
def _reduce(self, aggregation, value, destinations):
graph = ops.get_default_graph()
cf_context = graph._get_control_flow_context() # pylint: disable=protected-access
# If we're inside the ReplicateContext, reduction should be done using
# CrossReplicaSum while outside we can directly use an add_n op.
while cf_context:
if isinstance(cf_context, tpu.TPUReplicateContext):
if aggregation == vs.VariableAggregation.MEAN:
# TODO(jhseu): Revisit once we support model-parallelism.
value *= (1. / self.num_towers)
elif aggregation != vs.VariableAggregation.SUM:
raise NotImplementedError(
'Currently only support sum & mean in TPUStrategy.')
return tpu_ops.cross_replica_sum(value)
cf_context = cf_context.outer_context
# Validate that the destination is same as the host device
# Note we don't do this when in replicate context as the reduction is
# performed on the TPU device itself.
devices = cross_tower_ops_lib.get_devices_from(destinations)
if len(devices) == 1:
assert device_util.canonicalize(devices[0]) == device_util.canonicalize(
self.get_host_cpu_device(0))
else:
raise ValueError('Multiple devices are not supported for TPUStrategy')
if aggregation == vs.VariableAggregation.ONLY_FIRST_TOWER:
return value[0]
output = math_ops.add_n(value)
if aggregation == vs.VariableAggregation.MEAN:
return output * (1. / len(value))
return output
def _unwrap(self, value):
if isinstance(value, list):
return value
return [value]
@property
def num_towers(self):
return self._num_cores_override or self._tpu_metadata.num_cores
@property
def num_hosts(self):
return self._tpu_metadata.num_hosts
@property
def num_towers_per_host(self):
return self._tpu_metadata.num_of_cores_per_host
def get_host_cpu_device(self, host_id):
if self._tpu_cluster_resolver.get_master() in ('', 'local'):
return '/replica:0/task:0/device:CPU:0'
job_name = self._tpu_cluster_resolver.get_job_name() or 'tpu_worker'
return '/job:%s/task:%d/device:CPU:0' % (job_name, host_id)
def configure(self,
session_config=None,
cluster_spec=None,
task_type=None,
task_id=None):
del cluster_spec, task_type, task_id
if session_config:
session_config.isolate_session_state = True
cluster_spec = self._tpu_cluster_resolver.cluster_spec()
if cluster_spec:
session_config.cluster_def.CopyFrom(cluster_spec.as_cluster_def())
| apache-2.0 | 777,925,020,914,767,700 | 41.085627 | 111 | 0.686165 | false | 3.858144 | false | false | false |
google/protocall | protocall/interpreter/parser_converter.py | 1 | 8657 | # Copyright 2016 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyparsing import ParseResults
from protocall.proto import protocall_pb2
from grammar import expression, statement, assignment, call, return_, block, scope, define, while_expression, while_scope, if_expression, if_scope, elif_expression, elif_scope, elif_scopes, else_scope, conditional
from AST import Call, Assignment, ArrayAssignment, Integer, String, Boolean, Proto, Array, Identifier, Field, ArrayRef, While, ArithmeticOperator, ComparisonOperator, Conditional, Return, Define
def convert_field(field):
f = protocall_pb2.Field()
for component in field.components:
c = f.component.add()
c.name = component.identifier
return f
def convert_statement(statement):
s = protocall_pb2.Statement()
if isinstance(statement.statement, Call):
call = statement.statement
field, args = call.field, call.args
c = protocall_pb2.Call()
c.field.CopyFrom(convert_field(field))
for arg in args:
a = c.argument.add()
a.identifier.name = arg.identifier.identifier
a.expression.CopyFrom(convert_expression(arg.expression.expression))
s.call.CopyFrom(c)
elif isinstance(statement.statement, Assignment):
assignment = statement.statement
field, expression = assignment.field, assignment.expression
a = protocall_pb2.Assignment()
a.field.CopyFrom(convert_field(field))
a.expression.CopyFrom(convert_expression(expression.expression))
s.assignment.CopyFrom(a)
elif isinstance(statement.statement, ArrayAssignment):
array_assignment = statement.statement
array_ref, expression = array_assignment.array_ref, array_assignment.expression
a = protocall_pb2.ArrayAssignment()
a.array_ref.field.CopyFrom(convert_field(array_ref.field))
a.array_ref.index.value = array_ref.index
a.expression.CopyFrom(convert_expression(expression.expression))
s.array_assignment.CopyFrom(a)
elif isinstance(statement.statement, While):
while_expression = statement.statement
expression, scope = while_expression.expression, while_expression.scope
w = protocall_pb2.While()
w.expression_scope.expression.CopyFrom(convert_expression(expression.expression))
w.expression_scope.scope.CopyFrom(convert_scope(scope.scope))
s.while_.CopyFrom(w)
elif isinstance(statement.statement, Conditional):
conditional = statement.statement
if_scope = conditional.if_scope
elif_scopes = conditional.elif_scopes
c = protocall_pb2.Conditional()
c.if_scope.expression.CopyFrom(convert_expression(if_scope.expression.expression))
c.if_scope.scope.CopyFrom(convert_scope(if_scope.scope.scope))
for elif_scope in elif_scopes:
es = c.elif_scope.add()
es.expression.CopyFrom(convert_expression(elif_scope.expression.expression))
es.scope.CopyFrom(convert_scope(elif_scope.scope.scope))
else_scope = conditional.else_scope
if else_scope:
c.else_scope.CopyFrom(convert_scope(else_scope.scope.scope))
s.conditional.CopyFrom(c)
elif isinstance(statement.statement, Return):
return_ = statement.statement
expression = return_.expression
r = protocall_pb2.Return()
r.expression.CopyFrom(convert_expression(expression.expression))
s.return_.CopyFrom(r)
elif isinstance(statement.statement, Define):
define = statement.statement
field = define.field
scope = define.scope
d = protocall_pb2.Define()
d.field.CopyFrom(convert_field(field))
d.scope.CopyFrom(convert_scope(scope.scope))
s.define.CopyFrom(d)
else:
print statement.statement
raise RuntimeError
return s
def convert_block(block):
bl = protocall_pb2.Block()
for statement in block.block:
s = convert_statement(statement)
bl.statement.add().CopyFrom(s)
return bl
def convert_argument(argument):
ar = protocall_pb2.Argument()
ar.identifier.name = argument.identifier.identifier
e = convert_expression(argument.expression.expression)
ar.expression.CopyFrom(e)
return ar
def convert_scope(scope):
s_pb = protocall_pb2.Scope()
block = scope.block
for statement in block:
s_pb.block.statement.add().CopyFrom(convert_statement(statement))
return s_pb
def convert_arithmetic_operator(arithmetic_operator, e):
if arithmetic_operator.operator == '*':
op = protocall_pb2.ArithmeticOperator.Op.Value("MULTIPLY")
elif arithmetic_operator.operator == '/':
op = protocall_pb2.ArithmeticOperator.Op.Value("DIVIDE")
elif arithmetic_operator.operator == '+':
op = protocall_pb2.ArithmeticOperator.Op.Value("PLUS")
elif arithmetic_operator.operator == '-':
op = protocall_pb2.ArithmeticOperator.Op.Value("MINUS")
else:
print arithmetic_operator.operator
raise RuntimeError
e.arithmetic_operator.operator = op
left = convert_expression(arithmetic_operator.left)
if isinstance(left, protocall_pb2.Expression):
e.arithmetic_operator.left.CopyFrom(left)
elif isinstance(left, protocall_pb2.Identifier):
e.atom.identifier.CopyFrom(left)
else:
raise RuntimeError
e.arithmetic_operator.left.CopyFrom(left)
right = convert_expression(arithmetic_operator.right)
if isinstance(right, protocall_pb2.Expression):
e.arithmetic_operator.right.CopyFrom(right)
elif isinstance(right, protocall_pb2.Identifier):
e.atom.identifier.CopyFrom(right)
else:
raise RuntimeError
e.arithmetic_operator.right.CopyFrom(right)
def convert_comparison_operator(comparison_operator, e):
if comparison_operator.operator == '>':
op = protocall_pb2.ComparisonOperator.Op.Value("GREATER_THAN")
elif comparison_operator.operator == '<':
op = protocall_pb2.ComparisonOperator.Op.Value("LESS_THAN")
elif comparison_operator.operator == '==':
op = protocall_pb2.ComparisonOperator.Op.Value("EQUALS")
else:
print comparison_operator.operator
raise RuntimeError
e.comparison_operator.operator = op
left = convert_expression(comparison_operator.left)
if isinstance(left, protocall_pb2.Expression):
e.comparison_operator.left.CopyFrom(left)
elif isinstance(left, protocall_pb2.Identifier):
e.atom.identifier.CopyFrom(left)
else:
raise RuntimeError
e.comparison_operator.left.CopyFrom(left)
right = convert_expression(comparison_operator.right)
if isinstance(right, protocall_pb2.Expression):
e.comparison_operator.right.CopyFrom(right)
elif isinstance(right, protocall_pb2.Identifier):
e.atom.identifier.CopyFrom(right)
else:
raise RuntimeError
e.comparison_operator.right.CopyFrom(right)
def convert_expression(expression):
e = protocall_pb2.Expression()
if isinstance(expression, Integer):
e.atom.literal.integer.value = expression.value
elif isinstance(expression, String):
e.atom.literal.string.value = expression.value
elif isinstance(expression, Boolean):
e.atom.literal.boolean.value = expression.value
elif isinstance(expression, Proto):
e.atom.literal.proto.field.CopyFrom(convert_expression(expression.field).atom.field)
e.atom.literal.proto.value = str(expression.proto)
elif isinstance(expression, Field):
e.atom.field.CopyFrom(convert_field(expression))
elif isinstance(expression, Array):
array = e.atom.literal.array
for item in expression.elements:
element = array.element.add()
element.CopyFrom(convert_expression(item.expression))
elif isinstance(expression, ArrayRef):
e.atom.array_ref.field.CopyFrom(convert_field(expression.field))
e.atom.array_ref.index.value = expression.index
elif isinstance(expression, ArithmeticOperator):
convert_arithmetic_operator(expression, e)
elif isinstance(expression, ComparisonOperator):
convert_comparison_operator(expression, e)
elif isinstance(expression, Call):
e.call.field.CopyFrom(convert_field(expression.field))
for arg in expression.args:
a = e.call.argument.add()
a.CopyFrom(convert_argument(arg))
else:
print expression.__class__
raise RuntimeError
return e
| apache-2.0 | 367,662,007,759,672,700 | 40.028436 | 213 | 0.735359 | false | 3.89955 | false | false | false |
whaleygeek/mb_deathstar | game.py | 1 | 6760 | # game.py (c) 2017 D.J.Whale 22/01/2017
# Star-Wars 'Use the Force, Luke' game
# Using many moving parts provided by Martin O'Hanlon
#----- CONFIGURATION ----------------------------------------------------------
DEATHSTAR_CENTRE_POS = (100,100,10)
TARGET_POS = (100,100,10)
IN_RANGE = ((100,100,10), (100,100,10))
XWING_START_POS = (46,10,-61)
PLAY_TIME_SECS = 5 #(2*60)
NUMBER_OF_TRIES = 3
FRAMES_PER_SEC = 10
#TODO: Mart's code animates the trench separately from deathstar
#so do we need to switch over to that animation at the right position?
#also is there a visual clue to where the trench is, in the deathstar model?
#TODO: xwing can turn or shift
#might make it turn if you tilt it left or right a long way
#in which case we need l,L and r,R for two ranges of left and right tilt
#----- LOAD ALL THE DEPENDENT PARTS -------------------------------------------
import sys
if sys.version_info[0] != 2:
print("Please run this game with Python version 2")
sys.exit()
import time
import controller # auto-connects to the controller
import starwars # auto-connects to Minecraft
#----- GAME STATE -------------------------------------------------------------
deathstar = None
xwing = None
missile = None
xwing_crashed = False
missile_missed = False
missile_hit = False
game_stop_time = 0
#----- BUILD THE GAME WORLD ---------------------------------------------------
def clear_space():
print("will clear_space")
#TODO:
def build_deathstar():
print("will build_deathstar")
#TODO: build at DEATHSTAR_CENTRE_POS
def create_xwing():
global xwing
if xwing is not None:
# kill off old x-wing
xwing.clear()
xwing = None
xwing = starwars.MCObject(starwars.XWING_BLOCKS, XWING_START_POS)
xwing.draw()
def setup_game():
clear_space()
build_deathstar()
create_xwing()
clear_flags()
def wait_for_start():
print("will wait_for_start")
raw_input("press RETURN to start")
#TODO: wait for A button press on micro:bit
#loop, read from micro:bit, until see 'A'
#----- GAME ACTIONS -----------------------------------------------------------
def fly_xwing():
buttons = controller.get_command_flags()
if buttons is not None:
up = 'U' in buttons
down = 'D' in buttons
left = 'L' in buttons
right = 'R' in buttons
fire = 'A' in buttons
eject = 'B' in buttons
# change xwing position based on u/d/l/r
if left:
xwing.rotate_by(yaw=-10)
print("left")
if right:
xwing.rotate_by(yaw=+10)
print("right")
if up:
xwing.move_by(y=+1)
print("up")
if down:
xwing.move_by(y=-1)
print("down")
if fire: print("boom!!")
if eject: print("yeehar!!")
# always move xwing forward by one block
xwing.fly()
# if xwing crashes into any block
# set_xwing_crashed()
#if fire: start_missile()
#if eject: ejector_seat()
def start_missile():
print("will start_missile")
#TODO:
# create missile object in front of xwing
# note we need to know what direction the xwing is flying in
# we also need to know a range of positions to succeed from
def move_missile():
print("will move_missile")
#TODO:
# if missile now out of range:
# set_missile_missed()
# elif missile not yet hit target:
# move missile forward by 1
# else must have hit
# set_missile_hit()
def ejector_seat():
print("will ejector_seat")
animate_eject()
animate_xwing_crashed()
set_xwing_crashed()
#------ GAME CONDITIONS -------------------------------------------------------
#
# Set various game conditions in the game state.
# The main loop will detect and action these appropriately.
# This prevents passing lots of variables around,
# but contains the global variables a bit more into a controlled space (here)
def clear_flags():
global xwing_crashed, missile_missed, missile_hit
xwing_crashed = False
missile_missed = False
missile_hit = False
def set_xwing_crashed():
global xwing_crashed
xwing_crashed = True
def set_missile_missed():
global missile_missed
missile_missed = True
def set_missile_hit():
global missile_hit
missile_hit = True
#----- ANIMATIONS -------------------------------------------------------------
def animate_missile_missed():
print("will animate_missile_missed")
#TODO:
def animate_missile_hit():
print("will animate_missile_hit")
#TODO:
def animate_eject():
print("will animate_eject")
#TODO:
def animate_xwing_crashed():
print("will xwing_crashed")
#TODO:
def animate_blow_up_deathstar():
print("will blow_up_deathstar")
#TODO:
# auto pilot the ship to a safe location
# animate the deathstar blowing up
# return when deathstar gone
#----- SPLASH SCREENS ---------------------------------------------------------
def splash_screen():
print("will splash_screen")
#TODO:
def game_over_failed():
print("will game_over_failed")
#TODO:
def game_over_succeeded():
print("will game_over_succeeded")
#TODO:
#----- GAME LOOP --------------------------------------------------------------
def start_game():
global game_stop_time
print("will start_game")
#TODO: move player to position on start (hides splash screen)
game_stop_time = time.time() + PLAY_TIME_SECS
def run_out_of_time():
return time.time() >= game_stop_time
def play_game():
missiles_left = NUMBER_OF_TRIES
while not run_out_of_time() and not xwing_crashed and not missile_hit and missiles_left > 0:
time.sleep(1/float(FRAMES_PER_SEC))
fly_xwing()
if missile is not None:
move_missile()
if missile_missed:
animate_missile_missed()
missiles_left -= 1
elif missile_hit:
animate_missile_hit()
animate_blow_up_deathstar()
return missile_hit
def whereami():
import starwars.mcpi.minecraft as minecraft
mc = minecraft.Minecraft.create()
x,y,z = mc.player.getTilePos()
print(x,y,z)
#----- MAIN PROGRAM -----------------------------------------------------------
#if __name__ == "__main__":
# while True:
# setup_game()
# splash_screen()
# wait_for_start()
# start_game()
#
# success = play_game()
#
# if success:
# game_over_succeeded()
# else:
# game_over_failed()
#whereami()
create_xwing()
while True:
print("fly")
fly_xwing()
time.sleep(0.1)
# END
| mit | -1,244,756,361,887,171,600 | 23.316547 | 96 | 0.564053 | false | 3.300781 | false | false | false |
Benjamin-S/AccountManagement | MultiToolSettings.py | 1 | 6257 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'C:\Users\symonsbe\Downloads\WinPython-64bit-3.4.4.5Qt5\notebooks\MultiToolSettings.ui'
#
# Created by: PyQt5 UI code generator 5.5.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(559, 296)
self.gridLayout = QtWidgets.QGridLayout(Dialog)
self.gridLayout.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)
self.gridLayout.setObjectName("gridLayout")
self.treeWidget = QtWidgets.QTreeWidget(Dialog)
self.treeWidget.setMinimumSize(QtCore.QSize(133, 0))
self.treeWidget.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.treeWidget.setFrameShadow(QtWidgets.QFrame.Sunken)
self.treeWidget.setMidLineWidth(0)
self.treeWidget.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.treeWidget.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.treeWidget.setAlternatingRowColors(False)
self.treeWidget.setObjectName("treeWidget")
item_0 = QtWidgets.QTreeWidgetItem(self.treeWidget)
item_1 = QtWidgets.QTreeWidgetItem(item_0)
self.treeWidget.header().setVisible(False)
self.gridLayout.addWidget(self.treeWidget, 0, 0, 2, 1)
self.stackedWidget = QtWidgets.QStackedWidget(Dialog)
self.stackedWidget.setObjectName("stackedWidget")
self.page = QtWidgets.QWidget()
self.page.setObjectName("page")
self.gridLayout_2 = QtWidgets.QGridLayout(self.page)
self.gridLayout_2.setContentsMargins(0, 0, 0, 0)
self.gridLayout_2.setSpacing(0)
self.gridLayout_2.setObjectName("gridLayout_2")
self.groupBox = QtWidgets.QGroupBox(self.page)
self.groupBox.setMinimumSize(QtCore.QSize(300, 0))
self.groupBox.setFlat(False)
self.groupBox.setObjectName("groupBox")
self.formLayout = QtWidgets.QFormLayout(self.groupBox)
self.formLayout.setObjectName("formLayout")
self._lblCSVDirectory = QtWidgets.QLabel(self.groupBox)
self._lblCSVDirectory.setObjectName("_lblCSVDirectory")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.LabelRole, self._lblCSVDirectory)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self._lineCSVDirectory = QtWidgets.QLineEdit(self.groupBox)
self._lineCSVDirectory.setObjectName("_lineCSVDirectory")
self.horizontalLayout.addWidget(self._lineCSVDirectory)
self._btnCSVDirectory = QtWidgets.QToolButton(self.groupBox)
self._btnCSVDirectory.setObjectName("_btnCSVDirectory")
self.horizontalLayout.addWidget(self._btnCSVDirectory)
self.formLayout.setLayout(2, QtWidgets.QFormLayout.FieldRole, self.horizontalLayout)
self._lblPDFDirectory = QtWidgets.QLabel(self.groupBox)
self._lblPDFDirectory.setObjectName("_lblPDFDirectory")
self.formLayout.setWidget(3, QtWidgets.QFormLayout.LabelRole, self._lblPDFDirectory)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self._linePDFDirectory = QtWidgets.QLineEdit(self.groupBox)
self._linePDFDirectory.setObjectName("_linePDFDirectory")
self.horizontalLayout_2.addWidget(self._linePDFDirectory)
self._btnPDFDirectory = QtWidgets.QToolButton(self.groupBox)
self._btnPDFDirectory.setObjectName("_btnPDFDirectory")
self.horizontalLayout_2.addWidget(self._btnPDFDirectory)
self.formLayout.setLayout(3, QtWidgets.QFormLayout.FieldRole, self.horizontalLayout_2)
self.gridLayout_2.addWidget(self.groupBox, 0, 0, 1, 1)
self.stackedWidget.addWidget(self.page)
self.page_2 = QtWidgets.QWidget()
self.page_2.setObjectName("page_2")
self.stackedWidget.addWidget(self.page_2)
self.gridLayout.addWidget(self.stackedWidget, 0, 1, 1, 1)
self.buttonBox = QtWidgets.QDialogButtonBox(Dialog)
self.buttonBox.setLayoutDirection(QtCore.Qt.LeftToRight)
self.buttonBox.setAutoFillBackground(False)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Save)
self.buttonBox.setCenterButtons(False)
self.buttonBox.setObjectName("buttonBox")
self.gridLayout.addWidget(self.buttonBox, 3, 1, 1, 1)
self.gridLayout.setColumnStretch(0, 1)
self.gridLayout.setColumnStretch(1, 3)
self.retranslateUi(Dialog)
self.stackedWidget.setCurrentIndex(0)
self.buttonBox.accepted.connect(Dialog.accept)
self.buttonBox.rejected.connect(Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Settings"))
self.treeWidget.headerItem().setText(0, _translate("Dialog", "Settings"))
__sortingEnabled = self.treeWidget.isSortingEnabled()
self.treeWidget.setSortingEnabled(False)
self.treeWidget.topLevelItem(0).setText(0, _translate("Dialog", "Centrepay"))
self.treeWidget.topLevelItem(0).child(0).setText(0, _translate("Dialog", "Directories"))
self.treeWidget.setSortingEnabled(__sortingEnabled)
self.groupBox.setTitle(_translate("Dialog", "Centrepay Directories"))
self._lblCSVDirectory.setText(_translate("Dialog", "CSV Directory"))
self._btnCSVDirectory.setText(_translate("Dialog", "..."))
self._lblPDFDirectory.setText(_translate("Dialog", "PDF Directory"))
self._btnPDFDirectory.setText(_translate("Dialog", "..."))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Dialog = QtWidgets.QDialog()
ui = Ui_Dialog()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
| mit | -6,289,644,542,471,036,000 | 51.939655 | 140 | 0.702253 | false | 4.031572 | false | false | false |
paanil/Mapot | 3Dmap/blender_script.py | 1 | 6184 |
# Copyright (C) 2014 Susanne Jauhiainen, Markku Kovanen, Ilari Paananen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import bpy
from mathutils import Vector
import json
import sys
import os
# Three.js blender export module 'export_threejs.py'
# needs THREE_exportGeometry custom property to be defined like this:
bpy.types.Object.THREE_exportGeometry = bpy.props.BoolProperty(default = True)
# The module is assumed to be in the same folder with this file.
sys.path.append(os.path.dirname(__file__))
import export_threejs
def clear_scene(scene):
for object in scene.objects:
object.select = True
bpy.ops.object.delete()
for mesh in bpy.data.meshes:
bpy.data.meshes.remove(mesh)
def get_data(shp_path, python_path):
path = os.path.dirname(__file__)
path = os.path.join(path, "shp2json.py")
with os.popen(python_path + " " + path + " " + shp_path) as f:
return json.loads(f.read())
def separate_regions(regions):
regions_sub = []
for region in regions:
last_point = len(region) - 1
pt1 = region[last_point]
pt2 = region[0]
sum = (pt2[0] - pt1[0]) * (pt2[1] + pt1[1])
for j in range(0, last_point): # we dont want to add last edge twice
pt1 = region[j]
pt2 = region[j + 1]
sum = sum + (pt2[0] - pt1[0]) * (pt2[1] + pt1[1])
if sum < 0:
regions_sub.append(region)
regions.remove(region)
return (regions, regions_sub)
def build_mesh(mesh, regions, height):
extrude_vec = Vector((0.0, 0.0, height))
verts = []
edges = []
for region in regions:
first = len(verts)
for pt in region:
index = len(verts)
verts.append((pt[0], pt[1], 0.0))
edges.append([index, index + 1])
last = len(edges) - 1
edges[last][1] = first
mesh.from_pydata(verts, edges,[])
bpy.ops.object.mode_set(mode = 'EDIT')
bpy.ops.mesh.extrude_edges_move(TRANSFORM_OT_translate={"value":extrude_vec})
bpy.ops.mesh.edge_face_add()
bpy.ops.mesh.select_all(action='SELECT')
if height > 1.0: # TODO: fix this
bpy.ops.mesh.normals_make_consistent(inside=False)
bpy.ops.mesh.quads_convert_to_tris()
bpy.ops.object.mode_set(mode = 'OBJECT')
def boolean_substract(object, object_sub):
bpy.context.scene.objects.active = object
bpy.ops.object.modifier_add(type='BOOLEAN')
bpy.context.object.modifiers["Boolean"].operation = 'DIFFERENCE'
bpy.context.object.modifiers["Boolean"].object = object_sub
bpy.ops.object.modifier_apply(apply_as='DATA', modifier="Boolean")
id_name_dict = None
def create_scene(scene, data):
global id_name_dict
id_name_dict = {}
for country in data:
id = country[0]
name = country[1]
regions, regions_sub = separate_regions(country[2])
mesh = bpy.data.meshes.new(id)
object = bpy.data.objects.new(id, mesh)
scene.objects.link(object)
scene.objects.active = object
build_mesh(mesh, regions, 1.0)
if len(regions_sub) > 0:
mesh_sub = bpy.data.meshes.new(id + "_sub")
object_sub = bpy.data.objects.new(id + "_sub", mesh_sub)
scene.objects.link(object_sub)
scene.objects.active = object_sub
build_mesh(mesh_sub, regions_sub, 1.5)
boolean_substract(object, object_sub)
bpy.ops.object.select_all(action='DESELECT')
object_sub.select = True
bpy.ops.object.delete()
bpy.data.meshes.remove(mesh_sub)
id_name_dict[object.name] = name
def export_scene(scene, path):
global id_name_dict
data = []
for object in scene.objects:
id = object.name
name = id_name_dict[id]
file = id + ".js" # objects are not actually written in separate files
text = export_threejs.generate_mesh_string([object], scene,
True, # option_vertices
False, # option_vertices_truncate
True, # option_faces
False, # option_normals
False, # option_uv_coords
False, # option_materials
False, # option_colors
False, # option_bones
False, # option_skinning
"None", # align_model
True, # flipyz
1.0, # option_scale
True, # export_single_model
False, # option_copy_textures
file, # filepath
False, # option_animation_morph
False, # option_animation_skeletal
False, # option_frame_index_as_time
1)[0] # option_frame_step
data.append((id, name, json.loads(text)))
dir, _ = os.path.split(path)
if not os.path.isdir(dir):
os.makedirs(dir)
with open(path, "w") as f:
f.write(json.dumps(data, separators=(",", ":")))
def run(shp_file, out_file, python_path):
data = get_data(shp_file, python_path)
scene = bpy.context.scene
clear_scene(scene)
create_scene(scene, data)
export_scene(scene, out_file)
argv = sys.argv
argc = len(argv)
try:
argv = argv[argv.index("--"):]
argc = len(argv)
except ValueError:
pass
if argc < 2:
print("Give .shp file as 1st argument")
elif argc < 3:
print("Give output file as 2nd argument")
elif argc < 4:
print("Give path to python as 3rd argument")
else:
run(argv[1], argv[2], argv[3])
| gpl-3.0 | -341,470,612,410,070,500 | 29.463054 | 81 | 0.601876 | false | 3.443207 | false | false | false |
kingsj/music-player | SongEdit.py | 1 | 5917 | # -*- coding: utf-8 -*-
# MusicPlayer, https://github.com/albertz/music-player
# Copyright (c) 2012, Albert Zeyer, www.az2000.de
# All rights reserved.
# This code is under the 2-clause BSD license, see License.txt in the root directory of this project.
import utils
from utils import UserAttrib, Event, initBy
import Traits
# Note: I'm not too happy with all the complicated update handling here...
# In general, the design is ok. But it needs some more specification
# and then some drastic simplification. Most of it should be one-liners.
class SongEdit:
@initBy
def _updateEvent(self): return Event()
def __init__(self, ctx=None):
if not ctx:
import gui
ctx = gui.ctx()
assert ctx, "no gui context"
self.ctx = ctx
self._updateHandler = lambda: self._updateEvent.push()
ctx.curSelectedSong_updateEvent.register(self._updateHandler)
@UserAttrib(type=Traits.Object)
@property
def song(self):
return self.ctx.curSelectedSong
@UserAttrib(type=Traits.EditableText)
def artist(self, updateText=None):
if self.song:
if updateText:
self.song.artist = updateText
return self.song.artist
return ""
@UserAttrib(type=Traits.EditableText)
def title(self, updateText=None):
if self.song:
if updateText:
self.song.title = updateText
return self.song.title
return ""
@staticmethod
def _convertTagsToText(tags):
def txtForTag(tag):
value = tags[tag]
if value >= 1: return tag
return tag + ":" + str(value)
return " ".join(map(txtForTag, sorted(tags.keys())))
@staticmethod
def _convertTextToTags(txt):
pass
# todo...
#@UserAttrib(type=Traits.EditableText)
def tags(self, updateText=None):
if self.song:
return self._convertTagsToText(self.song.tags)
return ""
@staticmethod
def _formatGain(gain):
factor = 10.0 ** (gain / 20.0)
return "%f dB (factor %f)" % (gain, factor)
@UserAttrib(type=Traits.Table(keys=("key", "value")), variableHeight=True)
@property
def metadata(self):
d = dict(self.song.metadata)
for (key,func) in (
("artist",None),
("title",None),
("album",None),
("duration",utils.formatTime),
("url",None),
("rating",None),
("tags",self._convertTagsToText),
("gain",self._formatGain),
("completedCount",None),
("skipCount",None),
("lastPlayedDate",utils.formatDate),
("id",repr),
):
try: value = getattr(self.song, key)
except AttributeError: pass
else:
if func: value = func(value)
d[key] = unicode(value)
l = []
for key,value in sorted(d.items()):
l += [{"key": key, "value": value}]
return l
@metadata.setUpdateEvent
@property
def metadata_updateEvent(self): return self.song._updateEvent
def _queryAcoustId(self):
fingerprint = self.song.get("fingerprint_AcoustId", timeout=None)[0]
duration = self.song.get("duration", timeout=None, accuracy=0.5)[0]
import base64
fingerprint = base64.urlsafe_b64encode(fingerprint)
api_url = "http://api.acoustid.org/v2/lookup"
# "8XaBELgH" is the one from the web example from AcoustID.
# "cSpUJKpD" is from the example from pyacoustid
# get an own one here: http://acoustid.org/api-key
client_api_key = "cSpUJKpD"
params = {
'format': 'json',
'client': client_api_key,
'duration': int(duration),
'fingerprint': fingerprint,
'meta': 'recordings recordingids releasegroups releases tracks compress',
}
import urllib
body = urllib.urlencode(params)
import urllib2
req = urllib2.Request(api_url, body)
import contextlib
with contextlib.closing(urllib2.urlopen(req)) as f:
data = f.read()
headers = f.info()
import json
data = json.loads(data)
return data
def queryAcoustIdResults_selectionChangeHandler(self, selection):
self._queryAcoustId_selection = selection
@UserAttrib(type=Traits.Table(keys=("artist", "title", "album", "track", "score")),
selectionChangeHandler=queryAcoustIdResults_selectionChangeHandler)
@property
def queryAcoustIdResults(self):
if getattr(self, "_queryAcoustIdResults_songId", "") != getattr(self.song, "id", ""):
return []
return list(getattr(self, "_queryAcoustIdResults", []))
@queryAcoustIdResults.setUpdateEvent
@initBy
def queryAcoustIdResults_updateEvent(self): return Event()
@UserAttrib(type=Traits.Action, variableWidth=False)
def queryAcoustId(self):
data = self._queryAcoustId()
self._queryAcoustIdResults_songId = self.song.id
self._queryAcoustIdResults = []
for result in data.get("results", []):
for recording in result.get("recordings", []):
for resGroup in recording.get("releasegroups", []):
artist = resGroup["artists"][0]
release = resGroup["releases"][0]
medium = release["mediums"][0]
track = medium["tracks"][0]
if artist["name"] == "Various Artists":
artist = track["artists"][0]
entry = {
"id": result["id"],
"score": result["score"],
"recording-id": recording["id"],
"releasegroup-id": resGroup["id"],
"artist-id": artist["id"],
"artist": artist["name"],
"title": track["title"],
"album": resGroup["title"],
"track": "%i/%i" % (track["position"], medium["track_count"])
}
self._queryAcoustIdResults += [entry]
if not self._queryAcoustIdResults:
self._queryAcoustIdResults += [{"artist":"- None found -","title":"","album":"","track":""}]
self.queryAcoustIdResults_updateEvent.push()
@UserAttrib(type=Traits.Action, variableWidth=False, alignRight=True)
def apply(self):
if getattr(self, "_queryAcoustIdResults_songId", "") != getattr(self.song, "id", ""):
return
sel = getattr(self, "_queryAcoustId_selection", [])
if not sel: return
sel = sel[0]
for key in ("artist","title"):
if not sel[key]: return
for key in ("artist","title","album","track"):
setattr(self.song, key, sel[key])
self._updateEvent.push() # the song is updating itself - but the edit fields aren't atm...
| bsd-2-clause | 5,378,411,905,825,060,000 | 29.035533 | 101 | 0.674835 | false | 3.07217 | false | false | false |
kinoraw/kinoraw_repo | external/script_auto_complete/__init__.py | 1 | 2810 | '''
Copyright (C) 2014 Jacques Lucke
[email protected]
Created by Jacques Lucke
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import importlib, sys, os
from fnmatch import fnmatch
bl_info = {
"name": "Code Autocomplete",
"description": "Improve the scripting experience in Blenders text editor.",
"author": "Jacques Lucke",
"version": (1,0, 0),
"blender": (2, 7, 4),
"location": "Text Editor",
"category": "Development"
}
# import all modules in same/subdirectories
###########################################
currentPath = os.path.dirname(__file__)
module_name = "script_auto_complete"
sys.modules[module_name] = sys.modules[__name__]
def getAllImportFiles():
def get_path(base):
b, t = os.path.split(base)
if __name__ == t:
return [module_name]
else:
return get_path(b) + [t]
for root, dirs, files in os.walk(currentPath):
path = ".".join(get_path(root))
for f in filter(lambda f:f.endswith(".py"), files):
name = f[:-3]
if not name == "__init__":
yield path + "." + name
auto_complete_modules = []
for name in getAllImportFiles():
mod = importlib.import_module(name)
auto_complete_modules.append(mod)
reload_event = "bpy" in locals()
import bpy
# Reload
# makes F8 reload actually reload the code
if reload_event:
for module in auto_complete_modules:
importlib.reload(module)
class AddonPreferences(bpy.types.AddonPreferences):
bl_idname = module_name
line_amount = bpy.props.IntProperty(default = 8, min = 1, max = 20, name = "Lines")
def draw(self, context):
layout = self.layout
row = layout.row(align = False)
row.prop(self, "line_amount")
# register
##################################
def register():
try: bpy.utils.register_module(module_name)
except: pass
print("Loaded Script Auto Completion with {} modules".format(len(auto_complete_modules)))
def unregister():
try: bpy.utils.unregister_module(module_name)
except: pass
if __name__ == "__main__":
register()
| gpl-3.0 | 2,808,274,111,072,506,000 | 27.673469 | 93 | 0.617082 | false | 3.823129 | false | false | false |
ngageoint/six-library | six/modules/python/six.sicd/tests/test_six_sicd.py | 1 | 5272 | #!/usr/bin/env python
#
# =========================================================================
# This file is part of six.sicd-python
# =========================================================================
#
# (C) Copyright 2004 - 2015, MDA Information Systems LLC
#
# six.sicd-python is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; If not,
# see <http://www.gnu.org/licenses/>.
#
from pysix.six_sicd import *
from pysix.six_base import *
from coda.xml_lite import *
from coda.coda_io import *
from coda.coda_logging import *
from coda.coda_types import *
import os
import sys
import os.path
import filecmp
##############################################################
#
# roundTrip test
#
##############################################################
# This test loads up an XML file to a complex data object and
# writes it out again. Then it compares the two files to see if
# they match
def roundTrip(filename):
vs = VectorString()
schemapath = os.environ['SIX_SCHEMA_PATH']
vs.push_back(schemapath)
fis = FileInputStream(filename)
xmlparser = MinidomParser()
xmlparser.preserveCharacterData(True)
xmlparser.parse(fis)
doc = xmlparser.getDocument()
dt = DataType(DataType.COMPLEX)
xml_ctrl = ComplexXMLControl()
data = xml_ctrl.fromXML(doc, vs)
cmplx = asComplexData(data)
out_doc = xml_ctrl.toXML(cmplx, vs)
out_filename = 'round_trip_' + os.path.basename(filename)
fos = FileOutputStream(out_filename)
root = out_doc.getRootElement()
root.prettyPrint(fos)
round_tripped_cmplx = SixSicdUtilities_parseDataFromFile(out_filename,
vs, Logger())
if cmplx == round_tripped_cmplx:
print(filename + " passed")
else:
print("handling " + filename + " failed")
##############################################################
#
# loadSicd test
#
##############################################################
# This test loads a sicd file as a complex data object and writes
# it out as an xml file. Unfortunately there is no handy way to compare
# the resulting xml so it just prints out various information and assumes
# it worked. Also real sicds can be kind of big so there probably won't
# be any in the repository, so this is really more of an example.
def loadSicd(filename):
vs = VectorString()
vs.push_back( os.environ['SIX_SCHEMA_PATH'] )
cmplx = SixSicdUtilities.getComplexData(filename, vs)
showInfo(cmplx)
out_filename = 'from_sicd_' + os.path.basename(filename) + '.xml'
xml_ctrl = ComplexXMLControl()
fos = FileOutputStream(out_filename)
out_doc = xml_ctrl.toXML(cmplx, vs)
root = out_doc.getRootElement()
root.prettyPrint(fos)
print(filename + " probably passed")
return cmplx
##############################################################
#
# showInfo test
#
##############################################################
# This isn't a test per se but is useful to put some assorted
# ComplexData member information on the screen just to show
# that we can actually get to the data
def showInfo(cmplx):
print("file is " + str(cmplx.getNumRows()) + "x" + str(cmplx.getNumCols()))
print(str(cmplx.getNumBytesPerPixel()) + " bytes per pixel")
print( "tx polarization: " + cmplx.radarCollection.txPolarization.toString())
print("image formation algorithm: " + cmplx.imageFormation.imageFormationAlgorithm.toString())
print("graze angle: " + str(cmplx.scpcoa.grazeAngle))
print("slant range: " + str(cmplx.scpcoa.slantRange))
print("radar mode: " + cmplx.collectionInformation.radarMode.toString())
collectionParameters = cmplx.radarCollection.parameters
print("radar collection parameters: ")
if collectionParameters.empty():
print(' (none)')
else:
for idx in range(0,collectionParameters.size()):
print(" " + collectionParameters[idx].getName() + ": " + str(collectionParameters[idx]))
print('image data \'validData\'')
validData = cmplx.imageData.validData
if validData.empty():
print(' (none)')
else:
for idx in range(0,validData.size()):
print(' (' + str(validData[idx].row) + ',' + str(validData[idx].col) + ')')
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
if 1 == len(sys.argv):
print("please provide a sicd or XML file")
for arg in sys.argv:
if (arg.endswith(".ntf") or arg.endswith(".nitf")):
cmplx = loadSicd(arg)
elif arg.endswith(".xml"):
roundTrip(arg)
| lgpl-3.0 | 301,591,056,922,000,700 | 33.913907 | 97 | 0.588771 | false | 3.893648 | true | false | false |
bacovcin/corpus-tools | PTree.py | 1 | 2935 | class PTree:
def __init__(self, name, content):
if type(name) is str and ' ' not in name:
self.name = name
else:
print("Name is not a string")
self.content = content
self.height = 0
if type(content) is list:
for tree in content:
if tree.height >= self.height:
self.height = tree.height + 1
def __str__(self):
if (type(self.content) is str):
output = '\n(' + self.name + ' ' + self.content + ')'
else:
output = '\n(' + self.name
for y in self.content:
text = str(y).split('\n')
output = output + '\n '.join(text)
output = output + '\n)'
return output
def MatchParen(x):
output = []
outtext = ''
i = 0
while i < len(x):
c = x[i]
if c == '(':
if outtext not in [' ', '', '\t']:
output.append(outtext)
outtext = ''
y = MatchParen(x[i+1:])
output.append(y[0])
i = i+y[1]
elif c == ')':
if outtext not in [' ', '']:
output.append(outtext)
break
else:
outtext = outtext + c
i = i + 1
return (output, i+2)
def ParseTree(x):
if len(x) > 1 or type(x[0]) is list:
try:
name = x[0].rstrip()
start = 1
except:
name = ''
start = 0
content = []
for y in x[start:]:
if type(y) is list:
content.append(ParseTree(y))
else:
content.append(y)
else:
y = x[0].split(' ')
name = y[0]
content = y[1]
return PTree(name, content)
def ParseFiles(argvs,numtrees=1):
toklist = {}
for i in range(len(argvs)):
arg = argvs[i]
if arg[-3:] in ['ref', 'psd', 'out', 'cod']:
print(arg)
file = open(arg)
tokens = []
token = ''
storing = 1
for line in file:
if '/*' in line or '/~*' in line:
if token != '' and 'ID' in token:
tokens.append(ParseTree(MatchParen(token.lstrip().rstrip())[0][0]))
print('Tree found!')
token = ''
storing = 0
elif '*/' in line or '*~/' in line:
storing = 1
elif line == '\n' and 'ID' in token:
tokens.append(ParseTree(MatchParen(token.lstrip().rstrip())[0][0]))
print('Tree found!')
token = ''
elif line == '\n':
token = ''
elif storing == 1:
token = token + line.rstrip().lstrip()
toklist[arg[:-4]] = tokens
return toklist
| mit | -4,020,557,727,004,430,000 | 28.94898 | 91 | 0.405451 | false | 4.053867 | false | false | false |
Guilouz/repository.guilouz | skin.estuary.modv2/scripts/viewswitcher.py | 1 | 3271 | import time
import xbmc
import xbmcgui
import xbmcaddon
if __name__ == '__main__':
# init props
trans_title = xbmc.getLocalizedString(369)
monitor = xbmc.Monitor()
xbmc.log("service.skin.viewswitcher - Start service", level=xbmc.LOGNOTICE)
while not monitor.abortRequested():
# Sleep/wait for abort for 0.5 seconds
if monitor.waitForAbort(0.5):
# Abort was requested while waiting. We should exit
break
# Check if forced view is enabled and do it's magic if yes
if not xbmc.getCondVisibility("!Skin.HasSetting(ForcedViews.Enabled)") == 1:
current_content = xbmc.getInfoLabel("Container.Content")
path = xbmc.getInfoLabel("Container.FolderName")
# Check if movie is part of a set
if current_content == "movies":
setname = xbmc.getInfoLabel("ListItem.Set")
if (str(trans_title) != str(path) and (str(trans_title)+'s' != str(path))):
#dlg = xbmcgui.Dialog()
#dlg.notification("Compare",str(path) + " - " + str(trans_title),xbmcgui.NOTIFICATION_INFO,1000)
current_content = "setmovies"
# Check if content is part of addon - if yes disable forced view and let addon select view
plugin = xbmc.getInfoLabel("Container.PluginName")
if plugin != "":
current_content = ""
# Check if conent type is part if defined views
if current_content in "movies|sets|setmovies|tvshows|seasons|episodes|albums|artists|songs|musicvideos|pictures|videos|files" and not current_content == "":
# Get labels and force ascii for compare to make rockstable for languages with special chars
current_view_label = xbmc.getInfoLabel("Container.Viewmode").decode("utf-8").encode("ascii","ignore")
dest_view_id = xbmc.getInfoLabel("Skin.String(SkinHelper.ForcedViews.%s)" % current_content).decode("utf-8").encode("ascii","ignore")
dest_view_label = xbmc.getInfoLabel("Skin.String(SkinHelper.ForcedViews.%s.label)" % current_content).decode("utf-8").encode("ascii","ignore")
# Set current view to forced one
if (dest_view_id != ""):
if current_view_label != dest_view_label:
#dlg = xbmcgui.Dialog()
#dlg.notification("Set",str(path) + " - " + current_content,xbmcgui.NOTIFICATION_INFO,1000)
xbmc.executebuiltin("Container.SetViewMode(%s)" % dest_view_id)
xbmc.log("service.skin.viewswitcher - Cur label: " + current_view_label, level=xbmc.LOGNOTICE)
xbmc.log("service.skin.viewswitcher - Cur content: " + str(current_content), level=xbmc.LOGNOTICE)
xbmc.log("service.skin.viewswitcher - Switching to:", level=xbmc.LOGNOTICE)
xbmc.log("service.skin.viewswitcher - Dest label: " + str(dest_view_label), level=xbmc.LOGNOTICE)
xbmc.log("service.skin.viewswitcher - Dest id: " + str(dest_view_id), level=xbmc.LOGNOTICE)
# give kodi time to relax :-)
time.sleep(1) | gpl-2.0 | -4,197,966,663,311,798,300 | 65.77551 | 168 | 0.598594 | false | 4.058313 | false | false | false |
ndp-systemes/odoo-addons | purchase_delivery_tracking_colissimo/__openerp__.py | 1 | 1441 | # -*- coding: utf8 -*-
#
# Copyright (C) 2015 NDP Systèmes (<http://www.ndp-systemes.fr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
#
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
{
'name': 'Purchase delivery tracking (Colissimo)',
'version': '0.1',
'author': 'NDP Systèmes',
'maintainer': 'NDP Systèmes',
'category': 'Purchase',
'depends': ['purchase_delivery_tracking', 'base_delivery_tracking_colissimo'],
'description': """
Purchase delivery tracking (Colissimo)
======================================
This module allows to track the Colissimo deliveries
""",
'website': 'http://www.ndp-systemes.fr',
'data': ['purchase_delivery_tracking_colissimo.xml'],
'demo': [],
'test': [],
'installable': True,
'auto_install': False,
'license': 'AGPL-3',
'application': False,
}
| agpl-3.0 | 8,709,168,868,862,906,000 | 34.95 | 82 | 0.655076 | false | 3.640506 | false | false | false |
binoculars/osf.io | api/preprints/views.py | 2 | 12118 | import re
from rest_framework import generics
from rest_framework.exceptions import NotFound, PermissionDenied, NotAuthenticated
from rest_framework import permissions as drf_permissions
from framework.auth.oauth_scopes import CoreScopes
from osf.models import ReviewAction, PreprintService
from osf.utils.requests import check_select_for_update
from api.actions.permissions import ReviewActionPermission
from api.actions.serializers import ReviewActionSerializer
from api.actions.views import get_review_actions_queryset
from api.base.exceptions import Conflict
from api.base.views import JSONAPIBaseView, WaterButlerMixin
from api.base.filters import ListFilterMixin, PreprintFilterMixin
from api.base.parsers import (
JSONAPIMultipleRelationshipsParser,
JSONAPIMultipleRelationshipsParserForRegularJSON,
)
from api.base.utils import absolute_reverse, get_user_auth
from api.base import permissions as base_permissions
from api.citations.utils import render_citation, preprint_csl
from api.preprints.serializers import (
PreprintSerializer,
PreprintCreateSerializer,
PreprintCitationSerializer,
)
from api.nodes.serializers import (
NodeCitationStyleSerializer,
)
from api.identifiers.views import IdentifierList
from api.identifiers.serializers import PreprintIdentifierSerializer
from api.nodes.views import NodeMixin, NodeContributorsList
from api.nodes.permissions import ContributorOrPublic
from api.preprints.permissions import PreprintPublishedOrAdmin
class PreprintMixin(NodeMixin):
serializer_class = PreprintSerializer
preprint_lookup_url_kwarg = 'preprint_id'
def get_preprint(self, check_object_permissions=True):
qs = PreprintService.objects.filter(guids___id=self.kwargs[self.preprint_lookup_url_kwarg])
try:
preprint = qs.select_for_update().get() if check_select_for_update(self.request) else qs.select_related('node').get()
except PreprintService.DoesNotExist:
raise NotFound
if preprint.node.is_deleted:
raise NotFound
# May raise a permission denied
if check_object_permissions:
self.check_object_permissions(self.request, preprint)
return preprint
class PreprintList(JSONAPIBaseView, generics.ListCreateAPIView, PreprintFilterMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/preprints_list).
"""
# These permissions are not checked for the list of preprints, permissions handled by the query
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
ContributorOrPublic,
)
parser_classes = (JSONAPIMultipleRelationshipsParser, JSONAPIMultipleRelationshipsParserForRegularJSON,)
required_read_scopes = [CoreScopes.NODE_PREPRINTS_READ]
required_write_scopes = [CoreScopes.NODE_PREPRINTS_WRITE]
serializer_class = PreprintSerializer
ordering = ('-created')
ordering_fields = ('created', 'date_last_transitioned')
view_category = 'preprints'
view_name = 'preprint-list'
def get_serializer_class(self):
if self.request.method == 'POST':
return PreprintCreateSerializer
else:
return PreprintSerializer
def get_default_queryset(self):
auth = get_user_auth(self.request)
auth_user = getattr(auth, 'user', None)
# Permissions on the list objects are handled by the query
return self.preprints_queryset(PreprintService.objects.all(), auth_user)
# overrides ListAPIView
def get_queryset(self):
return self.get_queryset_from_request()
class PreprintDetail(JSONAPIBaseView, generics.RetrieveUpdateDestroyAPIView, PreprintMixin, WaterButlerMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/preprints_read).
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
ContributorOrPublic,
PreprintPublishedOrAdmin,
)
parser_classes = (
JSONAPIMultipleRelationshipsParser,
JSONAPIMultipleRelationshipsParserForRegularJSON,
)
required_read_scopes = [CoreScopes.NODE_PREPRINTS_READ]
required_write_scopes = [CoreScopes.NODE_PREPRINTS_WRITE]
serializer_class = PreprintSerializer
view_category = 'preprints'
view_name = 'preprint-detail'
def get_object(self):
return self.get_preprint()
def perform_destroy(self, instance):
if instance.is_published:
raise Conflict('Published preprints cannot be deleted.')
PreprintService.delete(instance)
def get_parser_context(self, http_request):
"""
Tells parser that type is required in request
"""
res = super(PreprintDetail, self).get_parser_context(http_request)
res['legacy_type_allowed'] = True
return res
class PreprintCitationDetail(JSONAPIBaseView, generics.RetrieveAPIView, PreprintMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/preprints_citation_list).
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.NODE_CITATIONS_READ]
required_write_scopes = [CoreScopes.NULL]
serializer_class = PreprintCitationSerializer
view_category = 'preprints'
view_name = 'preprint-citation'
def get_object(self):
preprint = self.get_preprint()
auth = get_user_auth(self.request)
if preprint.node.is_public or preprint.node.can_view(auth) or preprint.is_published:
return preprint_csl(preprint, preprint.node)
raise PermissionDenied if auth.user else NotAuthenticated
class PreprintCitationStyleDetail(JSONAPIBaseView, generics.RetrieveAPIView, PreprintMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/preprints_citation_read).
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.NODE_CITATIONS_READ]
required_write_scopes = [CoreScopes.NULL]
serializer_class = NodeCitationStyleSerializer
view_category = 'preprint'
view_name = 'preprint-citation'
def get_object(self):
preprint = self.get_preprint()
auth = get_user_auth(self.request)
style = self.kwargs.get('style_id')
if preprint.node.is_public or preprint.node.can_view(auth) or preprint.is_published:
try:
citation = render_citation(node=preprint, style=style)
except ValueError as err: # style requested could not be found
csl_name = re.findall('[a-zA-Z]+\.csl', err.message)[0]
raise NotFound('{} is not a known style.'.format(csl_name))
return {'citation': citation, 'id': style}
raise PermissionDenied if auth.user else NotAuthenticated
class PreprintIdentifierList(IdentifierList, PreprintMixin):
"""List of identifiers for a specified preprint. *Read-only*.
##Identifier Attributes
OSF Identifier entities have the "identifiers" `type`.
name type description
----------------------------------------------------------------------------
category string e.g. 'ark', 'doi'
value string the identifier value itself
##Links
self: this identifier's detail page
##Relationships
###Referent
The identifier is refers to this preprint.
##Actions
*None*.
##Query Params
Identifiers may be filtered by their category.
#This Request/Response
"""
permission_classes = (
PreprintPublishedOrAdmin,
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
serializer_class = PreprintIdentifierSerializer
required_read_scopes = [CoreScopes.IDENTIFIERS_READ]
required_write_scopes = [CoreScopes.NULL]
preprint_lookup_url_kwarg = 'preprint_id'
view_category = 'preprints'
view_name = 'identifier-list'
# overrides IdentifierList
def get_object(self, check_object_permissions=True):
return self.get_preprint(check_object_permissions=check_object_permissions)
class PreprintContributorsList(NodeContributorsList, PreprintMixin):
def create(self, request, *args, **kwargs):
self.kwargs['node_id'] = self.get_preprint(check_object_permissions=False).node._id
return super(PreprintContributorsList, self).create(request, *args, **kwargs)
class PreprintActionList(JSONAPIBaseView, generics.ListCreateAPIView, ListFilterMixin, PreprintMixin):
"""Action List *Read-only*
Actions represent state changes and/or comments on a reviewable object (e.g. a preprint)
##Action Attributes
name type description
====================================================================================
date_created iso8601 timestamp timestamp that the action was created
date_modified iso8601 timestamp timestamp that the action was last modified
from_state string state of the reviewable before this action was created
to_state string state of the reviewable after this action was created
comment string comment explaining the state change
trigger string name of the trigger for this action
##Relationships
###Target
Link to the object (e.g. preprint) this action acts on
###Provider
Link to detail for the target object's provider
###Creator
Link to the user that created this action
##Links
- `self` -- Detail page for the current action
##Query Params
+ `page=<Int>` -- page number of results to view, default 1
+ `filter[<fieldname>]=<Str>` -- fields and values to filter the search results on.
Actions may be filtered by their `id`, `from_state`, `to_state`, `date_created`, `date_modified`, `creator`, `provider`, `target`
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
ReviewActionPermission,
)
required_read_scopes = [CoreScopes.ACTIONS_READ]
required_write_scopes = [CoreScopes.ACTIONS_WRITE]
parser_classes = (JSONAPIMultipleRelationshipsParser, JSONAPIMultipleRelationshipsParserForRegularJSON,)
serializer_class = ReviewActionSerializer
model_class = ReviewAction
ordering = ('-created',)
view_category = 'preprints'
view_name = 'preprint-review-action-list'
# overrides ListCreateAPIView
def perform_create(self, serializer):
target = serializer.validated_data['target']
self.check_object_permissions(self.request, target)
if not target.provider.is_reviewed:
raise Conflict('{} is an unmoderated provider. If you are an admin, set up moderation by setting `reviews_workflow` at {}'.format(
target.provider.name,
absolute_reverse('preprint_providers:preprint_provider-detail', kwargs={
'provider_id': target.provider._id,
'version': self.request.parser_context['kwargs']['version']
})
))
serializer.save(user=self.request.user)
# overrides ListFilterMixin
def get_default_queryset(self):
return get_review_actions_queryset().filter(target_id=self.get_preprint().id)
# overrides ListAPIView
def get_queryset(self):
return self.get_queryset_from_request()
| apache-2.0 | 5,948,116,108,433,933,000 | 35.281437 | 142 | 0.674204 | false | 4.262399 | false | false | false |
fxia22/ASM_xf | PythonD/site_python/twisted/test/test_words.py | 2 | 3573 |
# Twisted, the Framework of Your Internet
# Copyright (C) 2001 Matthew W. Lefkowitz
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from twisted.trial import unittest
from twisted.spread import util
from twisted.words import service
from twisted.internet import app
from twisted.cred.authorizer import DefaultAuthorizer
class WordsTestCase(unittest.TestCase):
def testWords(self):
ap = app.Application("testwords")
au = DefaultAuthorizer()
s = service.Service('twisted.words', ap, au)
s.createParticipant("glyph")
s.createParticipant("sean")
# XXX OBSOLETE: should be async getPerspectiveRequest
glyph = s.getPerspectiveNamed("glyph")
sean = s.getPerspectiveNamed("sean")
glyph.addContact("sean")
t = glyph.transcribeConversationWith('sean')
glyph.attached(DummyWordsClient(), None)
sean.attached(DummyWordsClient(), None)
glyph.directMessage("sean", "ping")
sean.directMessage("glyph", "pong")
self.failUnlessEqual(len(t.chat), 2)
t.endTranscript()
glyph.directMessage("sean", "(DUP!)")
self.failUnlessEqual(len(t.chat), 2)
class DummyWordsClient(util.LocalAsRemote):
"""A client to a perspective on the twisted.words service.
I attach to that participant with Participant.attached(),
and detatch with Participant.detached().
"""
def async_receiveContactList(self, contactList):
"""Receive a list of contacts and their status.
The list is composed of 2-tuples, of the form
(contactName, contactStatus)
"""
def async_notifyStatusChanged(self, name, status):
"""Notify me of a change in status of one of my contacts.
"""
def async_receiveGroupMembers(self, names, group):
"""Receive a list of members in a group.
'names' is a list of participant names in the group named 'group'.
"""
def async_setGroupMetadata(self, metadata, name):
"""Some metadata on a group has been set.
XXX: Should this be receiveGroupMetadata(name, metedata)?
"""
def async_receiveDirectMessage(self, sender, message, metadata=None):
"""Receive a message from someone named 'sender'.
'metadata' is a dict of special flags. So far 'style': 'emote'
is defined. Note that 'metadata' *must* be optional.
"""
def async_receiveGroupMessage(self, sender, group, message, metadata=None):
"""Receive a message from 'sender' directed to a group.
'metadata' is a dict of special flags. So far 'style': 'emote'
is defined. Note that 'metadata' *must* be optional.
"""
def async_memberJoined(self, member, group):
"""Tells me a member has joined a group.
"""
def async_memberLeft(self, member, group):
"""Tells me a member has left a group.
"""
testCases = [WordsTestCase]
| gpl-2.0 | -7,671,457,463,034,201,000 | 35.090909 | 79 | 0.673104 | false | 4.074116 | true | false | false |
UnrememberMe/pants | tests/python/pants_test/build_graph/test_target.py | 1 | 12283 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os.path
from hashlib import sha1
import mock
from pants.base.exceptions import TargetDefinitionException
from pants.base.fingerprint_strategy import DefaultFingerprintStrategy
from pants.base.payload import Payload
from pants.base.payload_field import SetOfPrimitivesField
from pants.build_graph.address import Address
from pants.build_graph.target import Target
from pants.build_graph.target_scopes import Scopes
from pants.source.wrapped_globs import Globs
from pants_test.base_test import BaseTest
from pants_test.subsystem.subsystem_util import init_subsystem
class ImplicitSourcesTestingTarget(Target):
default_sources_globs = '*.foo'
class ImplicitSourcesTestingTargetMulti(Target):
default_sources_globs = ('*.foo', '*.bar')
default_sources_exclude_globs = ('*.baz', '*.qux')
class SourcesTarget(Target):
def __init__(self, sources, address=None, exports=None, **kwargs):
payload = Payload()
payload.add_field('sources', self.create_sources_field(sources,
sources_rel_path=address.spec_path,
key_arg='sources'))
payload.add_field('exports', SetOfPrimitivesField(exports))
super(SourcesTarget, self).__init__(address=address, payload=payload, **kwargs)
@property
def export_specs(self):
return self.payload.exports
class TargetTest(BaseTest):
def test_derived_from_chain(self):
# add concrete target
concrete = self.make_target('y:concrete', Target)
# add synthetic targets
syn_one = self.make_target('y:syn_one', Target, derived_from=concrete)
syn_two = self.make_target('y:syn_two', Target, derived_from=syn_one)
# validate
self.assertEquals(list(syn_two.derived_from_chain), [syn_one, concrete])
self.assertEquals(list(syn_one.derived_from_chain), [concrete])
self.assertEquals(list(concrete.derived_from_chain), [])
def test_is_synthetic(self):
# add concrete target
concrete = self.make_target('y:concrete', Target)
# add synthetic targets
syn_one = self.make_target('y:syn_one', Target, derived_from=concrete)
syn_two = self.make_target('y:syn_two', Target, derived_from=syn_one)
syn_three = self.make_target('y:syn_three', Target, synthetic=True)
self.assertFalse(concrete.is_synthetic)
self.assertTrue(syn_one.is_synthetic)
self.assertTrue(syn_two.is_synthetic)
self.assertTrue(syn_three.is_synthetic)
def test_empty_traversable_properties(self):
target = self.make_target(':foo', Target)
self.assertSequenceEqual([], list(target.compute_dependency_specs(payload=target.payload)))
def test_validate_target_representation_args_invalid_exactly_one(self):
with self.assertRaises(AssertionError):
Target._validate_target_representation_args(None, None)
with self.assertRaises(AssertionError):
Target._validate_target_representation_args({}, Payload())
def test_validate_target_representation_args_invalid_type(self):
with self.assertRaises(AssertionError):
Target._validate_target_representation_args(kwargs=Payload(), payload=None)
with self.assertRaises(AssertionError):
Target._validate_target_representation_args(kwargs=None, payload={})
def test_validate_target_representation_args_valid(self):
Target._validate_target_representation_args(kwargs={}, payload=None)
Target._validate_target_representation_args(kwargs=None, payload=Payload())
def test_illegal_kwargs(self):
init_subsystem(Target.Arguments)
with self.assertRaises(Target.Arguments.UnknownArgumentError) as cm:
self.make_target('foo:bar', Target, foobar='barfoo')
self.assertTrue('foobar = barfoo' in str(cm.exception))
self.assertTrue('foo:bar' in str(cm.exception))
def test_unknown_kwargs(self):
options = {Target.Arguments.options_scope: {'ignored': {'Target': ['foobar']}}}
init_subsystem(Target.Arguments, options)
target = self.make_target('foo:bar', Target, foobar='barfoo')
self.assertFalse(hasattr(target, 'foobar'))
def test_target_id_long(self):
long_path = 'dummy'
for i in range(1,30):
long_path = os.path.join(long_path, 'dummy{}'.format(i))
long_target = self.make_target('{}:foo'.format(long_path), Target)
long_id = long_target.id
self.assertEqual(len(long_id), 200)
self.assertEqual(long_id,
'dummy.dummy1.dummy2.dummy3.dummy4.dummy5.dummy6.dummy7.dummy8.dummy9.dummy10.du.'
'c582ce0f60008b3dc8196ae9e6ff5e8c40096974.y20.dummy21.dummy22.dummy23.dummy24.dummy25.'
'dummy26.dummy27.dummy28.dummy29.foo')
def test_target_id_short(self):
short_path = 'dummy'
for i in range(1,10):
short_path = os.path.join(short_path, 'dummy{}'.format(i))
short_target = self.make_target('{}:foo'.format(short_path), Target)
short_id = short_target.id
self.assertEqual(short_id,
'dummy.dummy1.dummy2.dummy3.dummy4.dummy5.dummy6.dummy7.dummy8.dummy9.foo')
def test_implicit_sources(self):
options = {Target.Arguments.options_scope: {'implicit_sources': True}}
init_subsystem(Target.Arguments, options)
target = self.make_target(':a', ImplicitSourcesTestingTarget)
# Note explicit key_arg.
sources = target.create_sources_field(sources=None, sources_rel_path='src/foo/bar',
key_arg='sources')
self.assertEqual(sources.filespec, {'globs': ['src/foo/bar/*.foo']})
target = self.make_target(':b', ImplicitSourcesTestingTargetMulti)
# Note no explicit key_arg, which should behave just like key_arg='sources'.
sources = target.create_sources_field(sources=None, sources_rel_path='src/foo/bar')
self.assertEqual(sources.filespec, {
'globs': ['src/foo/bar/*.foo', 'src/foo/bar/*.bar'],
'exclude': [{'globs': ['src/foo/bar/*.baz', 'src/foo/bar/*.qux']}],
})
# Ensure that we don't use implicit sources when creating resources fields.
resources = target.create_sources_field(sources=None, sources_rel_path='src/foo/bar',
key_arg='resources')
self.assertEqual(resources.filespec, {'globs': []})
def test_create_sources_field_with_string_fails(self):
target = self.make_target(':a-target', Target)
# No key_arg.
with self.assertRaises(TargetDefinitionException) as cm:
target.create_sources_field(sources='a-string', sources_rel_path='')
self.assertIn("Expected a glob, an address or a list, but was <type \'unicode\'>",
str(cm.exception))
# With key_arg.
with self.assertRaises(TargetDefinitionException) as cm:
target.create_sources_field(sources='a-string', sources_rel_path='', key_arg='my_cool_field')
self.assertIn("Expected 'my_cool_field' to be a glob, an address or a list, but was <type \'unicode\'>",
str(cm.exception))
#could also test address case, but looks like nothing really uses it.
def test_max_recursion(self):
target_a = self.make_target('a', Target)
target_b = self.make_target('b', Target, dependencies=[target_a])
self.make_target('c', Target, dependencies=[target_b])
target_a.inject_dependency(Address.parse('c'))
with self.assertRaises(Target.RecursiveDepthError):
target_a.transitive_invalidation_hash()
def test_transitive_invalidation_hash(self):
target_a = self.make_target('a', Target)
target_b = self.make_target('b', Target, dependencies=[target_a])
target_c = self.make_target('c', Target, dependencies=[target_b])
hasher = sha1()
dep_hash = hasher.hexdigest()[:12]
target_hash = target_a.invalidation_hash()
hash_value = '{}.{}'.format(target_hash, dep_hash)
self.assertEqual(hash_value, target_a.transitive_invalidation_hash())
hasher = sha1()
hasher.update(hash_value)
dep_hash = hasher.hexdigest()[:12]
target_hash = target_b.invalidation_hash()
hash_value = '{}.{}'.format(target_hash, dep_hash)
self.assertEqual(hash_value, target_b.transitive_invalidation_hash())
hasher = sha1()
hasher.update(hash_value)
dep_hash = hasher.hexdigest()[:12]
target_hash = target_c.invalidation_hash()
hash_value = '{}.{}'.format(target_hash, dep_hash)
self.assertEqual(hash_value, target_c.transitive_invalidation_hash())
# Check direct invalidation.
class TestFingerprintStrategy(DefaultFingerprintStrategy):
def direct(self, target):
return True
fingerprint_strategy = TestFingerprintStrategy()
hasher = sha1()
hasher.update(target_b.invalidation_hash(fingerprint_strategy=fingerprint_strategy))
dep_hash = hasher.hexdigest()[:12]
target_hash = target_c.invalidation_hash(fingerprint_strategy=fingerprint_strategy)
hash_value = '{}.{}'.format(target_hash, dep_hash)
self.assertEqual(hash_value, target_c.transitive_invalidation_hash(fingerprint_strategy=fingerprint_strategy))
def test_has_sources(self):
def sources(rel_path, *args):
return Globs.create_fileset_with_spec(rel_path, *args)
self.create_file('foo/bar/a.txt', 'a_contents')
txt_sources = self.make_target('foo/bar:txt',
SourcesTarget,
sources=sources('foo/bar', '*.txt'))
self.assertTrue(txt_sources.has_sources())
self.assertTrue(txt_sources.has_sources('.txt'))
self.assertFalse(txt_sources.has_sources('.rs'))
no_sources = self.make_target('foo/bar:none',
SourcesTarget,
sources=sources('foo/bar', '*.rs'))
self.assertFalse(no_sources.has_sources())
self.assertFalse(no_sources.has_sources('.txt'))
self.assertFalse(no_sources.has_sources('.rs'))
def _generate_strict_dependencies(self):
init_subsystem(Target.Arguments)
self.lib_aa = self.make_target(
'com/foo:AA',
target_type=SourcesTarget,
sources=['com/foo/AA.scala'],
)
self.lib_a = self.make_target(
'com/foo:A',
target_type=SourcesTarget,
sources=['com/foo/A.scala'],
)
self.lib_b = self.make_target(
'com/foo:B',
target_type=SourcesTarget,
sources=['com/foo/B.scala'],
dependencies=[self.lib_a, self.lib_aa],
exports=[':A'],
)
self.lib_c = self.make_target(
'com/foo:C',
target_type=SourcesTarget,
sources=['com/foo/C.scala'],
dependencies=[self.lib_b],
exports=[':B'],
)
self.lib_c_alias = self.make_target(
'com/foo:C_alias',
dependencies=[self.lib_c],
)
self.lib_d = self.make_target(
'com/foo:D',
target_type=SourcesTarget,
sources=['com/foo/D.scala'],
dependencies=[self.lib_c_alias],
exports=[':C_alias'],
)
self.lib_f = self.make_target(
'com/foo:F',
target_type=SourcesTarget,
sources=['com/foo/E.scala'],
scope=Scopes.RUNTIME
)
self.lib_e = self.make_target(
'com/foo:E',
target_type=SourcesTarget,
sources=['com/foo/E.scala'],
dependencies=[self.lib_d, self.lib_f],
)
def test_strict_dependencies(self):
self._generate_strict_dependencies()
dep_context = mock.Mock()
dep_context.compiler_plugin_types = ()
dep_context.codegen_types = ()
dep_context.alias_types = (Target,)
dep_context.target_closure_kwargs = {'include_scopes': Scopes.JVM_COMPILE_SCOPES}
self.assertEqual(set(self.lib_b.strict_dependencies(dep_context)), {self.lib_a, self.lib_aa})
self.assertEqual(set(self.lib_c.strict_dependencies(dep_context)), {self.lib_b, self.lib_a})
self.assertEqual(set(self.lib_c_alias.strict_dependencies(dep_context)), {self.lib_c, self.lib_b, self.lib_a})
self.assertEqual(set(self.lib_d.strict_dependencies(dep_context)), {self.lib_c, self.lib_b, self.lib_a})
self.assertEqual(set(self.lib_e.strict_dependencies(dep_context)), {self.lib_d, self.lib_c, self.lib_b, self.lib_a})
| apache-2.0 | -8,117,164,829,789,744,000 | 39.272131 | 120 | 0.673288 | false | 3.544877 | true | false | false |
dburggie/py3D | bodies/ConcCircle.py | 1 | 1475 | import bounds
from py3D import Vector, Ray, Color, Body
from CheckPlane import CheckPlane
class ConcCircle(CheckPlane):
def set_orientation(self, orientation):
self.oX = (orientation - self._origin).norm()
self.oY = self._normal.cross(self.oX).norm()
return self
def __init__(self,
r = 1.0,
normal = Vector(0.0,1.0,0.0),
origin = Vector(0.0,0.0,0.0),
orientation = Vector(1.0,0.0,0.0),
c1 = Color(0.01,0.01,0.01),
c2 = Color(0.99,0.99,0.99)):
"""Initializes plane and plane colors."""
CheckPlane.__init__(self, normal, origin, orientation, c1, c2)
self.origin = origin
self.set_orientation(orientation)
self.r = r
self.R = r ** 2.0
def intersection(self, ray):
distance = CheckPlane.intersection(self, ray)
if distance < 0.0:
return distance
else:
point = ray.follow(distance).add(self.origin, -1.0)
dx = point.dot(self.oX)
dy = point.dot(self.oY)
if dx ** 2.0 + dy ** 2.0 > self.R:
return -1.0
else:
return distance
def get_color(self, point):
"""Returns color of plane at the point."""
d = point - self._origin
dist = int(d.dot(d) ** 0.5) % 2
if dist == 0:
return self.c1.dup()
else:
return self.c2.dup()
| mit | -2,887,579,285,533,850,600 | 29.102041 | 70 | 0.51661 | false | 3.503563 | false | false | false |
ethanyishchan/parksandrec | app.py | 1 | 2548 | from flask import Flask, request,render_template, jsonify, Response
import random
import glob
import os
import uuid
import urllib2
import time
import httplib, urllib
import requests
import json
app = Flask(__name__)
# print "hello"
@app.route("/upload_phrase", methods=['POST'])
def classify_url():
if request.method == 'POST':
phrase = request.form['phrase']
phrase = parse_and_dump.get_language_text(phrase)
counter = len(phrase)
test_splice_text.wrapper_main(phrase)
# time.sleep(counter / 2)
print phrase
return jsonify({'phrase':phrase})
else:
#get 10 most similar and return
return
@app.route("/")
def index_main():
print "rendering website"
return render_template('index.html', name = "hahahahahahah")
@app.route("/add_activity",methods=["POST"])
def add_activity(req = None):
print "rendering post activity"
# print req
# print request.form["description"]
# print request
name = request.form['activity_name']
print name, " activity_name"
description = request.form['description']
print description , " desc"
try:
capacity = int(request.form['capacity'])
except:
capacity = 12
print capacity, "capacity"
location = request.form['location']
print location , "location"
x = request.form['loclat']
print x, "loclat"
y = request.form['loclong']
print y, "locLong"
point = str(x) + "," + str(y)
print point, "point"
start_time = request.form['start_time']
end_time = request.form['end_time']
owner = 555
category = request.form['category']
data_r = {
"name" : name,
"desc" : description,
"cap" : capacity,
"loc" : location,
"point" : point,
"start" : start_time,
"end" : end_time,
"owner" : owner,
"categ" : category
}
data_r_json = json.dumps(data_r)
r = requests.post("http://10.10.200.66:8080/activity", data= data_r_json)
print(r.status_code, r.reason)
return render_template('submit_form.html', name = "add_activity_meh")
def gen(camera):
while True:
frame = camera.get_frame()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
@app.route('/video_feed')
def video_feed():
return Response(gen(VideoCamera()),
mimetype='multipart/x-mixed-replace; boundary=frame')
if __name__ == "__main__":
app.run(host = '0.0.0.0', debug = True)
| bsd-3-clause | -6,499,030,696,478,848,000 | 24.227723 | 77 | 0.600863 | false | 3.509642 | false | false | false |
cpodlesny/lisbon | src/news/models.py | 2 | 1864 | import datetime
from django.db import models
from tours.models import Category, Tour
from easy_thumbnails.fields import ThumbnailerImageField
from django.utils.translation import ugettext as _
from django.core.urlresolvers import reverse
class Article(models.Model):
title_PT = models.CharField(_('Title PT'), max_length=100, blank=True, null=False)
title_EN = models.CharField(_('Title EN'), max_length=100, blank=True, null=False)
title_DE = models.CharField(_('Title DE'), max_length=100, blank=True, null=False)
description_PT = models.TextField(_('Description PT'), max_length=5000, blank=True, null=False)
description_EN = models.TextField(_('Description EN'), max_length=5000, blank=True, null=False)
description_DE = models.TextField(_('Description DE'), max_length=5000, blank=True, null=False)
category = models.ForeignKey(Category, default=None, blank=True, null=True)
tour = models.ForeignKey(Tour, default=None, blank=True, null=True)
link = models.URLField(max_length=100, blank=True, null=False)
img = ThumbnailerImageField(_('Article thumbnail'), null=True, blank=True)
keywords_SEO = models.TextField(_('Article keywords for SEO'), max_length=5000, blank=True, null=False)
description_SEO = models.TextField(_('Article description for SEO'), max_length=5000, blank=True, null=False)
created_on = models.DateTimeField(default=datetime.datetime.now(), blank=True)
def get_absolute_url(self):
return reverse('news:detail', args=[str(self.id)])
def get_edit_url(self):
return reverse('news:edit', args=[str(self.id)])
def get_delete_url(self):
return reverse('news:delete', args=[str(self.id)])
def __str__(self):
return self.title_EN
def __unicode__(self):
return self.title_EN
class Meta:
ordering = ['-created_on']
| mit | -3,807,208,999,162,011,600 | 44.463415 | 113 | 0.701717 | false | 3.683794 | false | false | false |
KE-works/pykechain | pykechain/models/representations/representation_base.py | 1 | 4247 | import warnings
from abc import abstractmethod
from typing import Dict, Any
from jsonschema import validate
from pykechain.models.validators.validator_schemas import representation_jsonschema_stub
class BaseRepresentation(object):
"""
Base class for all Representations.
:cvar jsonschema: jsonschema to validate the json of the representation
:type jsonschema: Dict
:cvar rtype: type of representation
:type rtype: Text
"""
jsonschema = representation_jsonschema_stub
rtype = None
_config_value_key = None
def __init__(self, obj=None, json=None, value=None, prop=None):
"""
Construct a base representation.
:param obj: the object to which the representation is applied, such as a property.
:type obj: Base
:param json: representation json (usually part of the original object json)
:type json: dict
:param value: value of the representation, its options vary per representation type
:type value: Any
:param prop: deprecated keyword for obj
:type prop: Property
"""
if prop is not None:
warnings.warn(
"Keyword `prop` is deprecated in favor of `obj`.",
PendingDeprecationWarning,
)
obj = prop
del prop
self._obj = obj
self._json: dict = json or dict(rtype=self.rtype, config=dict())
self._config: dict = self._json.get("config", dict())
self._json["config"] = self._config
if value is not None:
self.validate_representation(value)
self._config[self._config_value_key] = value
def __repr__(self):
return "{} ({})".format(self.__class__.__name__, self.value)
def as_json(self) -> Dict:
"""Parse the validator to a proper validator json."""
return self._json
def validate_json(self) -> Any:
"""Validate the json representation of the validator against the validator jsonschema."""
return validate(self._json, self.jsonschema)
@classmethod
def parse(cls, obj: Any, json: Dict) -> "BaseRepresentation":
"""Parse a json dict and return the correct subclass of :class:`BaseRepresentation`.
It uses the 'rtype' key to determine which :class:`BaseRepresentation` to instantiate.
:param obj: object to which the `BaseRepresentation` belongs.
:type: prop: Base
:param json: dictionary containing the specific keys to parse into a :class:`BaseRepresentation`
:type json: dict
:returns: the instantiated subclass of :class:`BaseRepresentation`
:rtype: :class:`BaseRepresentation` or subclass thereof
"""
try:
rtype = json["rtype"]
except KeyError:
raise ValueError(
"Representation unknown, incorrect json: '{}'".format(json)
)
try:
from pykechain.models.representations import rtype_class_map
repr_class: type(BaseRepresentation) = rtype_class_map[rtype]
except KeyError:
raise TypeError('Unknown rtype "{}" in json'.format(rtype))
return repr_class(obj=obj, json=json)
@property
def value(self):
"""
Retrieve current representation value.
:return: value
:rtype Any
"""
return self._config[self._config_value_key] if self._config_value_key else None
@value.setter
def value(self, value):
"""
Set a new representation value.
:param value: the new value to be set
:type value: Any
:return: the value
:rtype Any
"""
self.validate_representation(value)
self._config[self._config_value_key] = value
# Update the property in-place
if self._obj:
self._obj.representations = self._obj.representations
@abstractmethod
def validate_representation(self, value: Any) -> None:
"""
Validate whether the representation value can be set.
:param value: representation value to set.
:type value: Any
:raises IllegalArgumentError
:return: None
"""
pass # pragma: no cover
| apache-2.0 | 7,312,249,588,611,529,000 | 31.419847 | 104 | 0.616435 | false | 4.581446 | true | false | false |
treethought/flask-assistant | api_ai/schema_handlers.py | 1 | 14253 | import os
import inspect
import json
from ruamel import yaml
from .models import Intent, Entity
class SchemaHandler(object):
def __init__(self, assist, object_type=None):
self.assist = assist
self.intents = []
self.api = assist.api
self.object_type = object_type
# File set up
def get_or_create_dir(self, dir_name):
d = os.path.join(self.assist.app.root_path, dir_name)
if not os.path.isdir(d):
os.mkdir(d)
return d
@property
def schema_dir(self):
return self.get_or_create_dir('schema')
@property
def json_file(self):
file_name = '{}.json'.format(self.object_type)
f = os.path.join(self.schema_dir, file_name)
if not os.path.isfile(f):
open(f, 'w+').close()
return f
@property
def saved_schema(self):
with open(self.json_file, 'r') as f:
try:
return json.load(f)
except ValueError as e: # python2
return []
except json.decoder.JSONDecodeError: # python3
return []
@property
def registered(self):
if self.saved_schema:
return [i for i in self.saved_schema if i if i.get('id')]
def dump_schema(self, schema):
print('Writing schema json to file')
with open(self.json_file, 'w') as f:
json.dump(schema, f, indent=4)
# templates
@property
def template_dir(self):
return self.get_or_create_dir('templates')
def template_file(self, template_type):
file_name = '{}.yaml'.format(template_type)
f = os.path.join(self.template_dir, file_name)
if not os.path.isfile(f):
open(f, 'w+').close()
return f
@property
def user_says_template(self):
return self.template_file('user_says')
@property
def entity_template(self):
return self.template_file('entities')
def load_yaml(self, template_file):
with open(template_file) as f:
try:
return yaml.safe_load(f)
except yaml.YAMLError as e:
print(e)
return []
def user_says_yaml(self):
return self.load_yaml(self.user_says_template)
def entity_yaml(self):
return self.load_yaml(self.entity_template)
def grab_id(self, obj_name):
if self.registered:
for obj in self.registered:
if obj['name'] == obj_name:
return obj['id']
class IntentGenerator(SchemaHandler):
def __init__(self, assist):
super(IntentGenerator, self).__init__(assist, object_type='intents')
@property
def app_intents(self):
"""Returns a list of Intent objects created from the assistant's acion functions"""
from_app = []
for intent_name in self.assist._intent_action_funcs:
intent = self.build_intent(intent_name)
from_app.append(intent)
return from_app
def build_intent(self, intent_name):
"""Builds an Intent object of the given name"""
# TODO: contexts
is_fallback = self.assist._intent_fallbacks[intent_name]
contexts = self.assist._required_contexts[intent_name]
events = self.assist._intent_events[intent_name]
new_intent = Intent(intent_name, fallback_intent=is_fallback, contexts=contexts, events=events)
self.build_action(new_intent)
self.build_user_says(new_intent) # TODO
return new_intent
def build_action(self, intent):
action_name = self.assist._intent_action_funcs[intent.name][0].__name__
params = self.parse_params(intent.name)
intent.add_action(action_name, parameters=params)
def parse_params(self, intent_name):
"""Parses params from an intent's action decorator and view function.
Returns a list of parameter field dicts to be included in the intent object's response field.
"""
params = []
action_func = self.assist._intent_action_funcs[intent_name][0]
argspec = inspect.getargspec(action_func)
param_entity_map = self.assist._intent_mappings.get(intent_name)
args, defaults = argspec.args, argspec.defaults
default_map = {}
if defaults:
default_map = dict(zip(args[-len(defaults):], defaults))
# import ipdb; ipdb.set_trace()
for arg in args:
param_info = {}
param_entity = param_entity_map.get(arg, arg)
param_name = param_entity.replace('sys.', '')
# param_name = arg
param_info['name'] = param_name
param_info['value'] = '$' + param_name
param_info['dataType'] = '@' + param_entity
param_info['prompts'] = [] # TODO: fill in provided prompts
param_info['required'] = arg not in default_map
param_info['isList'] = isinstance(default_map.get(arg), list)
if param_info['isList']:
param_info['defaultValue'] = ''
else:
param_info['defaultValue'] = default_map.get(arg, '')
params.append(param_info)
return params
def get_synonyms(self, annotation, entity):
raw_temp = self.entity_yaml()
for temp_dict in [d for d in raw_temp if d == entity]:
for entry in raw_temp.get(temp_dict):
if isinstance(entry, dict):
for a, s in entry.items():
if a == annotation:
for synonym in s:
yield(synonym)
def build_user_says(self, intent):
raw = self.user_says_yaml()
intent_data = raw.get(intent.name)
if intent_data:
phrases = intent_data.get('UserSays', [])
annotations = intent_data.get('Annotations', [])
events = intent_data.get('Events', [])
mapping = {}
for a in [a for a in annotations if a]:
for annotation, entity in a.items():
mapping.update({str(annotation):str(entity)})
for synonym in self.get_synonyms(annotation, entity):
mapping.update({str(synonym):str(entity)})
for phrase in [p for p in phrases if p]:
if phrase != '':
intent.add_example(phrase, templ_entity_map=mapping)
for event in [e for e in events if e]:
intent.add_event(event)
def push_intent(self, intent):
"""Registers or updates an intent and returns the intent_json with an ID"""
if intent.id:
print('Updating {} intent'.format(intent.name))
self.update(intent)
else:
print('Registering {} intent'.format(intent.name))
intent = self.register(intent)
return intent
def register(self, intent):
"""Registers a new intent and returns the Intent object with an ID"""
response = self.api.post_intent(intent.serialize)
print(response)
print()
if response['status']['code'] == 200:
intent.id = response['id']
elif response['status']['code'] == 409: # intent already exists
intent.id = next(i.id for i in self.api.agent_intents if i.name == intent.name)
self.update(intent)
return intent
def update(self, intent):
response = self.api.put_intent(intent.id, intent.serialize)
print(response)
print()
if response['status']['code'] == 200:
return response
def generate(self):
print('Generating intent schema...')
schema = []
for intent in self.app_intents:
intent.id = self.grab_id(intent.name)
intent = self.push_intent(intent)
schema.append(intent.__dict__)
self.dump_schema(schema)
class EntityGenerator(SchemaHandler):
def __init__(self, assist):
super(EntityGenerator, self).__init__(assist, object_type='entities')
def build_entities(self):
raw_temp = self.entity_yaml()
for entity_name in raw_temp:
e = Entity(entity_name)
self.build_entries(e, raw_temp)
yield e
def build_entries(self, entity, temp_dict):
entries = temp_dict.get(entity.name, [])
for entry in entries:
if isinstance(entry, dict): # mapping
(value, synyms), = entry.items()
else: # enum/composite
entity.isEnum = True
value = entry
synyms = [entry]
entity.add_entry(value, synyms)
def register(self, entity):
"""Registers a new entity and returns the entity object with an ID"""
response = self.api.post_entity(entity.serialize)
print(response)
print()
if response['status']['code'] == 200:
entity.id = response['id']
if response['status']['code'] == 409: # entity already exists
entity.id = next(i.id for i in self.api.agent_entities if i.name == entity.name)
self.update(entity)
return entity
def update(self, entity):
response = self.api.put_entity(entity.id, entity.serialize)
print(response)
print()
if response['status']['code'] == 200:
return response
def push_entity(self, entity):
"""Registers or updates an entity and returns the entity_json with an ID"""
if entity.id:
print('Updating {} entity'.format(entity.name))
self.update(entity)
else:
print('Registering {} entity'.format(entity.name))
entity = self.register(entity)
return entity
def generate(self):
print('Generating entity schema...')
schema = []
for entity in self.build_entities():
entity.id = self.grab_id(entity.name)
entity = self.push_entity(entity)
schema.append(entity.__dict__)
self.dump_schema(schema)
class TemplateCreator(SchemaHandler):
def __init__(self, assist):
super(TemplateCreator, self).__init__(assist)
self.assist = assist
def generate(self):
if not self.user_says_yaml():
self.create_user_says_skeleton()
if not self.entity_yaml():
self.create_entity_skeleton()
def get_or_create_dir(self, dir_name):
try:
root = self.assist.app.root_path
except AttributeError: # for blueprints
root = self.assist.blueprint.root_path
d = os.path.join(root, dir_name)
if not os.path.isdir(d):
os.mkdir(d)
return d
@property
def template_dir(self):
return self.get_or_create_dir('templates')
@property
def user_says_exists(self):
return self._user_says_exists
def parse_annotations_from_action_mappings(self, intent_name):
annotations = []
entity_map = self.assist._intent_mappings.get(intent_name, {})
for param in entity_map:
annotations.append({param: entity_map[param]})
return annotations
def create(self, user_says=True, entities=True):
if user_says:
self.create_user_says_skeleton()
if entities:
self.create_entity_skeleton()
def create_user_says_skeleton(self):
template = os.path.join(self.template_dir, 'user_says.yaml')
skeleton = {}
for intent in self.assist._intent_action_funcs:
# print(type(intent))
entity_map_from_action = self.assist._intent_mappings.get(intent, {})
d = yaml.compat.ordereddict()
d['UserSays'] = [None, None]
d['Annotations'] = [None, None]
d['Events'] = [None]
# d['Annotations'] = self.parse_annotations_from_action_mappings(intent)
data = yaml.comments.CommentedMap(d) # to preserve order w/o tags
skeleton[intent] = data
with open(template, 'a') as f:
f.write('# Template for defining UserSays examples\n\n')
f.write('# give-color-intent:\n\n')
f.write('# UserSays:\n')
f.write('# - My color is blue\n')
f.write('# - red is my favorite color\n\n')
f.write('# Annotations:\n')
f.write('# - blue: sys.color # maps param value -> entity\n')
f.write('# - red: sys.color\n\n')
f.write('# Events:\n')
f.write('# - event1 # adds a triggerable event named \'event1\' to the intent\n\n\n\n')
# f.write(header)
yaml.dump(skeleton, f, default_flow_style=False, Dumper=yaml.RoundTripDumper)
def create_entity_skeleton(self):
print('Creating Template for Entities')
template = os.path.join(self.template_dir, 'entities.yaml')
message = """# Template file for entities\n\n"""
skeleton = {}
for intent in self.assist._intent_action_funcs:
entity_map = self.assist._intent_mappings.get(intent)
action_func = self.assist._intent_action_funcs[intent][0]
args = inspect.getargspec(action_func).args
# dont add API 'sys' entities to the template
if entity_map:
args = [a for a in args if 'sys.' not in entity_map.get(a, [])]
for param in [p for p in args if p not in skeleton]:
skeleton[param] = [None, None]
with open(template, 'w') as f:
f.write(message)
f.write('#Format as below\n\n')
f.write("# entity_name:\n")
f.write("# - entry1: list of synonyms \n")
f.write("# - entry2: list of synonyms \n\n")
f.write("#For example:\n\n")
f.write("# drink:\n")
f.write("# - water: ['aqua', 'h20'] \n")
f.write("# - coffee: ['joe', 'caffeine', 'espresso', 'late'] \n")
f.write("# - soda: ['pop', 'coke']\n\n\n\n")
yaml.dump(skeleton, f, default_flow_style=False, Dumper=yaml.RoundTripDumper)
| apache-2.0 | 2,774,965,515,386,810,000 | 32.774882 | 115 | 0.562618 | false | 3.907072 | false | false | false |
artefactual/archivematica-storage-service | storage_service/locations/api/resources.py | 1 | 73908 | # This file contains the base models that individual versioned models
# are based on. They shouldn't be directly used with Api objects.
# stdlib, alphabetical
import json
import logging
import os
import pprint
import re
import shutil
import urllib
# Core Django, alphabetical
from django.conf import settings
from django.conf.urls import url
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.forms.models import model_to_dict
from django.utils.translation import ugettext as _
from django.utils import six
# Third party dependencies, alphabetical
import bagit
from tastypie.authentication import (
BasicAuthentication,
ApiKeyAuthentication,
MultiAuthentication,
SessionAuthentication,
)
from tastypie.authorization import DjangoAuthorization
import tastypie.exceptions
from tastypie import fields
from tastypie import http
from tastypie.resources import ModelResource, ALL, ALL_WITH_RELATIONS
from tastypie.validation import CleanedDataFormValidation
from tastypie.utils import trailing_slash, dict_strip_unicode_keys
# This project, alphabetical
from administration.models import Settings
from common import utils
from locations.api.sword import views as sword_views
from ..models import (
Callback,
CallbackError,
Event,
File,
Package,
Location,
LocationPipeline,
Space,
Pipeline,
StorageException,
Async,
PosixMoveUnsupportedError,
)
from ..forms import SpaceForm
from ..constants import PROTOCOL
from locations import signals
from ..models.async_manager import AsyncManager
LOGGER = logging.getLogger(__name__)
# FIXME ModelResources with ForeignKeys to another model don't work with
# validation = CleanedDataFormValidation On creation, it errors with:
# "Select a valid choice. That choice is not one of the available choices."
# This is because the ModelResource accepts a URI, but does not convert it to a
# primary key (in our case, UUID) before passing it to Django.
# See https://github.com/toastdriven/django-tastypie/issues/152 for details
def _custom_endpoint(expected_methods=["get"], required_fields=[]):
"""
Decorator for custom endpoints that handles boilerplate code.
Checks if method allowed, authenticated, deserializes and can require fields
in the body.
Custom endpoint must accept request and bundle.
"""
def decorator(func):
""" The decorator applied to the endpoint """
def wrapper(resource, request, **kwargs):
""" Wrapper for custom endpoints with boilerplate code. """
# Tastypie API checks
resource.method_check(request, allowed=expected_methods)
resource.is_authenticated(request)
resource.throttle_check(request)
# Get object
try:
obj = resource._meta.queryset.get(uuid=kwargs["uuid"])
except ObjectDoesNotExist:
return http.HttpNotFound(
_("Resource with UUID %(uuid)s does not exist")
% {"uuid": kwargs["uuid"]}
)
except MultipleObjectsReturned:
return http.HttpMultipleChoices(
_("More than one resource is found at this URI.")
)
# Get body content
try:
deserialized = resource.deserialize(
request,
request.body,
format=request.META.get("CONTENT_TYPE", "application/json"),
)
deserialized = resource.alter_deserialized_detail_data(
request, deserialized
)
except Exception:
# Trouble decoding request body - may not actually exist
deserialized = []
# Check required fields, if any
if not all(k in deserialized for k in required_fields):
# Don't have enough information to make the request - return error
return http.HttpBadRequest(
_("All of these fields must be provided: %(fields)s")
% {"fields": ", ".join(required_fields)}
)
# Build bundle and return it
bundle = resource.build_bundle(obj=obj, data=deserialized, request=request)
bundle = resource.alter_detail_data_to_serialize(request, bundle)
# Call the decorated method
result = func(resource, request, bundle, **kwargs)
resource.log_throttled_access(request)
return result
return wrapper
return decorator
class PipelineResource(ModelResource):
# Attributes used for POST, exclude from GET
create_default_locations = fields.BooleanField(use_in=lambda x: False)
shared_path = fields.CharField(use_in=lambda x: False)
class Meta:
queryset = Pipeline.active.all()
authentication = MultiAuthentication(
BasicAuthentication(), ApiKeyAuthentication(), SessionAuthentication()
)
authorization = DjangoAuthorization()
# validation = CleanedDataFormValidation(form_class=PipelineForm)
resource_name = "pipeline"
fields = ["uuid", "description", "remote_name", "api_key", "api_username"]
list_allowed_methods = ["get", "post"]
detail_allowed_methods = ["get"]
detail_uri_name = "uuid"
always_return_data = True
filtering = {"description": ALL, "uuid": ALL}
def dehydrate(self, bundle):
# Don't return API username or key
del bundle.data["api_username"]
del bundle.data["api_key"]
return bundle
def obj_create(self, bundle, **kwargs):
bundle = super(PipelineResource, self).obj_create(bundle, **kwargs)
bundle.obj.enabled = not utils.get_setting("pipelines_disabled", False)
create_default_locations = bundle.data.get("create_default_locations", False)
# Try to guess Pipeline's IP if remote_name is undefined
if bundle.data.get("remote_name") is None:
ip = bundle.request.META.get("REMOTE_ADDR") or None
bundle.obj.remote_name = ip
shared_path = bundle.data.get("shared_path", None)
bundle.obj.save(create_default_locations, shared_path)
return bundle
class SpaceResource(ModelResource):
class Meta:
queryset = Space.objects.all()
authentication = MultiAuthentication(
BasicAuthentication(), ApiKeyAuthentication(), SessionAuthentication()
)
authorization = DjangoAuthorization()
validation = CleanedDataFormValidation(form_class=SpaceForm)
resource_name = "space"
fields = [
"access_protocol",
"last_verified",
"location_set",
"path",
"size",
"used",
"uuid",
"verified",
]
list_allowed_methods = ["get", "post"]
detail_allowed_methods = ["get"]
detail_uri_name = "uuid"
always_return_data = True
filtering = {
"access_protocol": ALL,
"path": ALL,
"size": ALL,
"used": ALL,
"uuid": ALL,
"verified": ALL,
}
def prepend_urls(self):
return [
url(
r"^(?P<resource_name>%s)/(?P<%s>\w[\w/-]*)/browse%s$"
% (
self._meta.resource_name,
self._meta.detail_uri_name,
trailing_slash(),
),
self.wrap_view("browse"),
name="browse",
)
]
# Is there a better place to add protocol-specific space info?
# alter_detail_data_to_serialize
# alter_deserialized_detail_data
def dehydrate(self, bundle):
""" Add protocol specific fields to an entry. """
bundle = super(SpaceResource, self).dehydrate(bundle)
access_protocol = bundle.obj.access_protocol
model = PROTOCOL[access_protocol]["model"]
try:
space = model.objects.get(space=bundle.obj.uuid)
except model.DoesNotExist:
LOGGER.error("Space matching UUID %s does not exist", bundle.obj.uuid)
# TODO this should assert later once creation/deletion stuff works
else:
keep_fields = PROTOCOL[access_protocol]["fields"]
added_fields = model_to_dict(space, keep_fields)
bundle.data.update(added_fields)
return bundle
def obj_create(self, bundle, **kwargs):
""" Creates protocol specific class when creating a Space. """
# TODO How to move this to the model?
# Make dict of fields in model and values from bundle.data
access_protocol = bundle.data["access_protocol"]
keep_fields = PROTOCOL[access_protocol]["fields"]
fields_dict = {key: bundle.data[key] for key in keep_fields}
bundle = super(SpaceResource, self).obj_create(bundle, **kwargs)
model = PROTOCOL[access_protocol]["model"]
obj = model.objects.create(space=bundle.obj, **fields_dict)
obj.save()
return bundle
def get_objects(self, space, path):
message = _("This method should be accessed via a versioned subclass")
raise NotImplementedError(message)
@_custom_endpoint(expected_methods=["get"])
def browse(self, request, bundle, **kwargs):
""" Returns all of the entries in a space, optionally at a subpath.
Returns a dict with
{'entries': [list of entries in the directory],
'directories': [list of directories in the directory]}
Directories is a subset of entries, all are just the name.
If a path=<path> parameter is provided, will look in that path inside
the Space. """
space = bundle.obj
path = request.GET.get("path", "")
if not path.startswith(space.path):
path = os.path.join(space.path, path)
objects = self.get_objects(space, path)
return self.create_response(request, objects)
class LocationResource(ModelResource):
space = fields.ForeignKey(SpaceResource, "space")
path = fields.CharField(attribute="full_path", readonly=True)
pipeline = fields.ToManyField(PipelineResource, "pipeline")
class Meta:
queryset = Location.active.all()
authentication = MultiAuthentication(
BasicAuthentication(), ApiKeyAuthentication(), SessionAuthentication()
)
authorization = DjangoAuthorization()
# validation = CleanedDataFormValidation(form_class=LocationForm)
resource_name = "location"
fields = [
"enabled",
"relative_path",
"purpose",
"quota",
"used",
"uuid",
"description",
]
list_allowed_methods = ["get", "post"]
detail_allowed_methods = ["get", "post"]
detail_uri_name = "uuid"
always_return_data = True
filtering = {
"relative_path": ALL,
"pipeline": ALL_WITH_RELATIONS,
"purpose": ALL,
"quota": ALL,
"space": ALL_WITH_RELATIONS,
"used": ALL,
"uuid": ALL,
"description": ALL,
}
def prepend_urls(self):
return [
url(
r"^(?P<resource_name>%s)/default/(?P<purpose>[A-Z]{2})%s$"
% (self._meta.resource_name, trailing_slash()),
self.wrap_view("default"),
name="default_location",
),
url(
r"^(?P<resource_name>%s)/(?P<%s>\w[\w/-]*)/browse%s$"
% (
self._meta.resource_name,
self._meta.detail_uri_name,
trailing_slash(),
),
self.wrap_view("browse"),
name="browse",
),
url(
r"^(?P<resource_name>%s)/(?P<%s>\w[\w/-]*)/async%s$"
% (
self._meta.resource_name,
self._meta.detail_uri_name,
trailing_slash(),
),
self.wrap_view("post_detail_async"),
name="post_detail_async",
),
# FEDORA/SWORD2 endpoints
url(
r"^(?P<resource_name>%s)/(?P<%s>\w[\w/-]*)/sword/collection%s$"
% (
self._meta.resource_name,
self._meta.detail_uri_name,
trailing_slash(),
),
self.wrap_view("sword_collection"),
name="sword_collection",
),
]
def decode_path(self, path):
return path
def get_objects(self, space, path):
message = _("This method should be accessed via a versioned subclass")
raise NotImplementedError(message)
def default(self, request, **kwargs):
"""Redirects to the default location for the given purpose.
This function is not using the `_custom_endpoint` decorator because it
is not bound to an object.
"""
# Tastypie API checks
self.method_check(request, allowed=["get", "post"])
self.is_authenticated(request)
self.throttle_check(request)
self.log_throttled_access(request)
try:
name = "default_{}_location".format(kwargs["purpose"])
uuid = Settings.objects.get(name=name).value
except (Settings.DoesNotExist, KeyError):
return http.HttpNotFound("Default location not defined for this purpose.")
return HttpResponseRedirect(
reverse(
"api_dispatch_detail",
kwargs={"api_name": "v2", "resource_name": "location", "uuid": uuid},
)
)
def save_m2m(self, bundle):
for field_name, field_object in self.fields.items():
if field_name != "pipeline":
continue
if not getattr(field_object, "is_m2m", False):
continue
if not field_object.attribute:
continue
pipelines = bundle.data["pipeline"]
for item in pipelines:
LocationPipeline.objects.get_or_create(
pipeline=item.obj, location=bundle.obj
)
def obj_create(self, bundle, **kwargs):
"""Create a new location and make it the default when requested."""
if "default" in bundle.data:
# This is going to result in calling the `default` attribute setter
# in the underlying model (Location).
kwargs["default"] = bundle.data["default"]
return super(LocationResource, self).obj_create(bundle, **kwargs)
@_custom_endpoint(expected_methods=["get"])
def browse(self, request, bundle, **kwargs):
""" Returns all of the entries in a location, optionally at a subpath.
Returns a dict with
{'entries': [list of entries in the directory],
'directories': [list of directories in the directory]}
Directories is a subset of entries, all are just the name.
If a path=<path> parameter is provided, will look in that path inside
the Location. """
location = bundle.obj
path = request.GET.get("path", "")
path = self.decode_path(path)
location_path = location.full_path
if isinstance(location_path, six.text_type):
location_path = location_path.encode("utf8")
if not path.startswith(location_path):
path = os.path.join(location_path, path)
objects = self.get_objects(location.space, path)
return self.create_response(request, objects)
def _move_files_between_locations(
self, files, origin_location, destination_location
):
"""
Synchronously move files from one location to another. May be called from
the request thread, or as an async task.
"""
# For each file in files, call move to/from
origin_space = origin_location.space
destination_space = destination_location.space
for sip_file in files:
source_path = sip_file.get("source", None)
destination_path = sip_file.get("destination", None)
# make path relative to the location
source_path = os.path.join(origin_location.relative_path, source_path)
destination_path = os.path.join(
destination_location.relative_path, destination_path
)
try:
if not origin_location.is_move_allowed():
LOGGER.debug("Moving files from this location is not allowed")
raise PosixMoveUnsupportedError
origin_space.posix_move(
source_path=source_path,
destination_path=destination_path,
destination_space=destination_space,
package=None,
)
except PosixMoveUnsupportedError:
origin_space.move_to_storage_service(
source_path=source_path,
destination_path=destination_path,
destination_space=destination_space,
)
origin_space.post_move_to_storage_service()
destination_space.move_from_storage_service(
source_path=destination_path,
destination_path=destination_path,
package=None,
)
destination_space.post_move_from_storage_service(
destination_path, destination_path
)
def _handle_location_file_move(self, move_files_fn, request, *args, **kwargs):
"""
Handle a request to moves files to this Location.
Intended for use with creating Transfers, SIPs, etc and other cases
where files need to be moved but not tracked by the storage service.
POST body should contain a dict with elements:
origin_location: URI of the Location the files should be moved from
pipeline: URI of the Pipeline both Locations belong to
files: List of dicts containing 'source' and 'destination', paths
relative to their Location of the files to be moved.
The actual work of moving the files is delegated to move_files_fn, which
will be called with:
* The list of files to move
* The origin location
* The destination location
and should return a HttpResponse suitable for response to the
client. This is parameterised in this way to give the caller the choice
of copying synchronously (returning a HTTP 201 response) or
asynchronously (returning a HTTP 202 + redirect).
"""
data = self.deserialize(request, request.body)
data = self.alter_deserialized_detail_data(request, data)
# Get the object for this endpoint
try:
destination_location = Location.active.get(uuid=kwargs["uuid"])
except Location.DoesNotExist:
return http.HttpNotFound()
# Check for require fields
required_fields = ["origin_location", "pipeline", "files"]
if not all((k in data) for k in required_fields):
# Don't have enough information to make the request - return error
return http.HttpBadRequest
# Get the destination Location
origin_uri = data["origin_location"]
try:
# splitting origin_uri on / results in:
# ['', 'api', 'v1', '<resource_name>', '<uuid>', '']
origin_uuid = origin_uri.split("/")[4]
origin_location = Location.active.get(uuid=origin_uuid)
except (IndexError, Location.DoesNotExist):
return http.HttpNotFound(
_("The URL provided '%(url)s' was not a link to a valid Location.")
% {"url": origin_uri}
)
# For each file in files, call move to/from
for sip_file in data["files"]:
source_path = sip_file.get("source", None)
destination_path = sip_file.get("destination", None)
if not all([source_path, destination_path]):
return http.HttpBadRequest
return move_files_fn(data["files"], origin_location, destination_location)
@_custom_endpoint(expected_methods=["post"])
def post_detail_async(self, request, *args, **kwargs):
"""
Moves files to this Location. Return an async response (202 code) on
success.
See _handle_location_file_move for a description of the expected request
format.
"""
def move_files(files, origin_location, destination_location):
"""Move our list of files in a background task, returning a HTTP Accepted response."""
def task():
self._move_files_between_locations(
files, origin_location, destination_location
)
return _("Files moved successfully")
async_task = AsyncManager.run_task(task)
response = http.HttpAccepted()
response["Location"] = reverse(
"api_dispatch_detail",
kwargs={
"api_name": "v2",
"resource_name": "async",
"id": async_task.id,
},
)
return response
return self._handle_location_file_move(move_files, request, *args, **kwargs)
def post_detail(self, request, *args, **kwargs):
""" Moves files to this Location.
See _handle_location_file_move for a description of the expected request
format.
"""
def move_files(files, origin_location, destination_location):
"""Move our list of files synchronously, returning a HTTP Created response."""
self._move_files_between_locations(
files, origin_location, destination_location
)
response = {"error": None, "message": _("Files moved successfully")}
return self.create_response(request, response)
return self._handle_location_file_move(move_files, request, *args, **kwargs)
def sword_collection(self, request, **kwargs):
try:
location = Location.objects.get(uuid=kwargs["uuid"])
except Location.DoesNotExist:
location = None
if location and (
location.purpose != Location.SWORD_DEPOSIT
or location.space.access_protocol != Space.FEDORA
):
return http.HttpBadRequest(_("This is not a SWORD server space."))
self.log_throttled_access(request)
return sword_views.collection(request, location or kwargs["uuid"])
class PackageResource(ModelResource):
""" Resource for managing Packages.
List (api/v1/file/) supports:
GET: List of files
POST: Create new Package
Detail (api/v1/file/<uuid>/) supports:
GET: Get details on a specific file
Download package (/api/v1/file/<uuid>/download/) supports:
GET: Get package as download
Extract file (/api/v1/file/<uuid>/extract_file/) supports:
GET: Extract file from package (param "relative_path_to_file" specifies which file)
api/v1/file/<uuid>/delete_aip/ supports:
POST: Create a delete request for that AIP.
Validate fixity (api/v1/file/<uuid>/check_fixity/) supports:
GET: Scan package for fixity
Compress package (api/v1/file/<uuid>/compress/) supports:
PUT: Compress an existing Package
"""
origin_pipeline = fields.ForeignKey(PipelineResource, "origin_pipeline")
origin_location = fields.ForeignKey(LocationResource, None, use_in=lambda x: False)
origin_path = fields.CharField(use_in=lambda x: False)
current_location = fields.ForeignKey(LocationResource, "current_location")
current_full_path = fields.CharField(attribute="full_path", readonly=True)
related_packages = fields.ManyToManyField("self", "related_packages", null=True)
replicated_package = fields.ForeignKey(
"self", "replicated_package", null=True, blank=True, readonly=True
)
replicas = fields.ManyToManyField(
"self", "replicas", null=True, blank=True, readonly=True
)
default_location_regex = re.compile(
r"\/api\/v2\/location\/default\/(?P<purpose>[A-Z]{2})\/?"
)
class Meta:
queryset = Package.objects.all()
authentication = MultiAuthentication(
BasicAuthentication(), ApiKeyAuthentication(), SessionAuthentication()
)
authorization = DjangoAuthorization()
# validation = CleanedDataFormValidation(form_class=PackageForm)
#
# Note that this resource is exposed as 'file' to the API for
# compatibility because the resource itself was originally under
# that name.
resource_name = "file"
fields = [
"current_path",
"package_type",
"size",
"status",
"uuid",
"related_packages",
"misc_attributes",
"replicated_package",
"replicas",
]
list_allowed_methods = ["get", "post"]
detail_allowed_methods = ["get", "put", "patch"]
allowed_patch_fields = ["reingest"] # for customized update_in_place
detail_uri_name = "uuid"
always_return_data = True
filtering = {
"current_location": ALL_WITH_RELATIONS,
"package_type": ALL,
"path": ALL,
"uuid": ALL,
"status": ALL,
"related_packages": ALL_WITH_RELATIONS,
}
def prepend_urls(self):
return [
url(
r"^(?P<resource_name>%s)/async%s$"
% (self._meta.resource_name, trailing_slash()),
self.wrap_view("obj_create_async"),
name="obj_create_async",
),
url(
r"^(?P<resource_name>%s)/(?P<%s>\w[\w/-]*)/delete_aip%s$"
% (
self._meta.resource_name,
self._meta.detail_uri_name,
trailing_slash(),
),
self.wrap_view("delete_aip_request"),
name="delete_aip_request",
),
url(
r"^(?P<resource_name>%s)/(?P<%s>\w[\w/-]*)/recover_aip%s$"
% (
self._meta.resource_name,
self._meta.detail_uri_name,
trailing_slash(),
),
self.wrap_view("recover_aip_request"),
name="recover_aip_request",
),
url(
r"^(?P<resource_name>%s)/(?P<%s>\w[\w/-]*)/extract_file%s$"
% (
self._meta.resource_name,
self._meta.detail_uri_name,
trailing_slash(),
),
self.wrap_view("extract_file_request"),
name="extract_file_request",
),
url(
r"^(?P<resource_name>%s)/(?P<%s>\w[\w/-]*)/download/(?P<chunk_number>\d+)%s$"
% (
self._meta.resource_name,
self._meta.detail_uri_name,
trailing_slash(),
),
self.wrap_view("download_request"),
name="download_lockss",
),
url(
r"^(?P<resource_name>%s)/(?P<%s>\w[\w/-]*)/download%s$"
% (
self._meta.resource_name,
self._meta.detail_uri_name,
trailing_slash(),
),
self.wrap_view("download_request"),
name="download_request",
),
url(
r"^(?P<resource_name>%s)/(?P<%s>\w[\w/-]*)/pointer_file%s$"
% (
self._meta.resource_name,
self._meta.detail_uri_name,
trailing_slash(),
),
self.wrap_view("pointer_file_request"),
name="pointer_file_request",
),
url(
r"^(?P<resource_name>%s)/(?P<%s>\w[\w/-]*)/check_fixity%s$"
% (
self._meta.resource_name,
self._meta.detail_uri_name,
trailing_slash(),
),
self.wrap_view("check_fixity_request"),
name="check_fixity_request",
),
url(
r"^(?P<resource_name>%s)/(?P<%s>\w[\w/-]*)/compress%s$"
% (
self._meta.resource_name,
self._meta.detail_uri_name,
trailing_slash(),
),
self.wrap_view("compress_request"),
name="compress_request",
),
url(
r"^(?P<resource_name>%s)/(?P<%s>\w[\w/-]*)/send_callback/post_store%s$"
% (
self._meta.resource_name,
self._meta.detail_uri_name,
trailing_slash(),
),
self.wrap_view("aip_store_callback_request"),
name="aip_store_callback_request",
),
url(
r"^(?P<resource_name>%s)/(?P<%s>\w[\w/-]*)/contents%s$"
% (
self._meta.resource_name,
self._meta.detail_uri_name,
trailing_slash(),
),
self.wrap_view("manage_contents"),
name="manage_contents",
),
url(
r"^(?P<resource_name>%s)/metadata%s$"
% (self._meta.resource_name, trailing_slash()),
self.wrap_view("file_data"),
name="file_data",
),
url(
r"^(?P<resource_name>%s)/(?P<%s>\w[\w/-]*)/reindex%s$"
% (
self._meta.resource_name,
self._meta.detail_uri_name,
trailing_slash(),
),
self.wrap_view("reindex_request"),
name="reindex_request",
),
# Reingest
url(
r"^(?P<resource_name>%s)/(?P<%s>\w[\w/-]*)/reingest%s$"
% (
self._meta.resource_name,
self._meta.detail_uri_name,
trailing_slash(),
),
self.wrap_view("reingest_request"),
name="reingest_request",
),
# Move
url(
r"^(?P<resource_name>%s)/(?P<%s>\w[\w/-]*)/move%s$"
% (
self._meta.resource_name,
self._meta.detail_uri_name,
trailing_slash(),
),
self.wrap_view("move_request"),
name="move_request",
),
# FEDORA/SWORD2 endpoints
url(
r"^(?P<resource_name>%s)/(?P<%s>\w[\w/-]*)/sword%s$"
% (
self._meta.resource_name,
self._meta.detail_uri_name,
trailing_slash(),
),
self.wrap_view("sword_deposit"),
name="sword_deposit",
),
url(
r"^(?P<resource_name>%s)/(?P<%s>\w[\w/-]*)/sword/media%s$"
% (
self._meta.resource_name,
self._meta.detail_uri_name,
trailing_slash(),
),
self.wrap_view("sword_deposit_media"),
name="sword_deposit_media",
),
url(
r"^(?P<resource_name>%s)/(?P<%s>\w[\w/-]*)/sword/state%s$"
% (
self._meta.resource_name,
self._meta.detail_uri_name,
trailing_slash(),
),
self.wrap_view("sword_deposit_state"),
name="sword_deposit_state",
),
]
def dehydrate_misc_attributes(self, bundle):
"""Customize serialization of misc_attributes."""
# Serialize JSONField as dict, not as repr of a dict
return bundle.obj.misc_attributes
def dehydrate(self, bundle):
"""Add an encrypted boolean key to the returned package indicating
whether it is encrypted.
"""
encrypted = False
space = bundle.obj.current_location.space
if space.access_protocol == Space.GPG:
encrypted = True
bundle.data["encrypted"] = encrypted
return bundle
def hydrate_current_location(self, bundle):
"""Customize unserialization of current_location.
If current_location uses the default location form (i.e. if matches the
regular expression ``default_location_regex``), this method augments
its value by converting it into the absolute path of the location being
referenced, which is the expected form internally.
This method is invoked in Tastypie's hydrate cycle.
E.g.: ``/api/v2/location/default/DS/`` becomes:
``/api/v2/location/363f42ea-905d-40f5-a2e8-1b6b9c122629/`` or similar.
"""
try:
current_location = bundle.data["current_location"]
except KeyError:
return bundle
matches = self.default_location_regex.match(current_location)
try:
purpose = matches.group("purpose")
except AttributeError:
LOGGER.debug(
"`current_location` was not matched by `default_location_regex`"
)
return bundle
try:
name = "default_{}_location".format(purpose)
uuid = Settings.objects.get(name=name).value
except (Settings.DoesNotExist, KeyError):
LOGGER.debug(
"`current_location` had the form of a default location (purpose %s) but the setting `%s` was not found",
purpose,
name,
)
return bundle
location_path = reverse(
"api_dispatch_detail",
kwargs={"api_name": "v2", "resource_name": "location", "uuid": uuid},
)
LOGGER.info("`current_location` was augmented: `%s`", location_path)
bundle.data["current_location"] = location_path
return bundle
def _store_bundle(self, bundle):
"""
Synchronously store a bundle. May be called from the request thread, or as
an async task.
"""
related_package_uuid = bundle.data.get("related_package_uuid")
# IDEA add custom endpoints, instead of storing all AIPS that come in?
origin_location_uri = bundle.data.get("origin_location")
origin_location = self.origin_location.build_related_resource(
origin_location_uri, bundle.request
).obj
origin_path = bundle.data.get("origin_path")
if bundle.obj.package_type in (
Package.AIP,
Package.AIC,
Package.DIP,
) and bundle.obj.current_location.purpose in (
Location.AIP_STORAGE,
Location.DIP_STORAGE,
):
# Store AIP/AIC
events = bundle.data.get("events", [])
agents = bundle.data.get("agents", [])
aip_subtype = bundle.data.get("aip_subtype", None)
bundle.obj.store_aip(
origin_location,
origin_path,
related_package_uuid,
premis_events=events,
premis_agents=agents,
aip_subtype=aip_subtype,
)
elif bundle.obj.package_type in (
Package.TRANSFER,
) and bundle.obj.current_location.purpose in (Location.BACKLOG,):
# Move transfer to backlog
bundle.obj.backlog_transfer(origin_location, origin_path)
def obj_create_async(self, request, **kwargs):
"""
Create a new Package model instance. Called when a POST request is made
to api/v2/file/async/.
Returns a HTTP 202 response immediately, along with a redirect to a URL
for polling for job completion.
"""
try:
self.method_check(request, allowed=["post"])
self.is_authenticated(request)
self.throttle_check(request)
self.log_throttled_access(request)
deserialized = self.deserialize(
request,
request.body,
format=request.META.get("CONTENT_TYPE", "application/json"),
)
deserialized = self.alter_deserialized_detail_data(request, deserialized)
bundle = self.build_bundle(
data=dict_strip_unicode_keys(deserialized), request=request
)
bundle = super(PackageResource, self).obj_create(bundle, **kwargs)
def task():
self._store_bundle(bundle)
new_bundle = self.full_dehydrate(bundle)
new_bundle = self.alter_detail_data_to_serialize(request, new_bundle)
return new_bundle.data
async_task = AsyncManager.run_task(task)
response = http.HttpAccepted()
response["Location"] = reverse(
"api_dispatch_detail",
kwargs={
"api_name": "v2",
"resource_name": "async",
"id": async_task.id,
},
)
return response
except Exception as e:
LOGGER.warning("Failure in obj_create_async: %s" % e)
raise e
def obj_create(self, bundle, **kwargs):
"""
Create a new Package model instance. Called when a POST request is
made to api/v2/file/.
"""
bundle = super(PackageResource, self).obj_create(bundle, **kwargs)
self._store_bundle(bundle)
return bundle
def obj_update(self, bundle, skip_errors=False, **kwargs):
"""
Modified version of the Django ORM implementation of obj_update.
Identical to original function except obj_update_hook added between hydrating the data and saving the object.
"""
if not bundle.obj or not self.get_bundle_detail_data(bundle):
try:
lookup_kwargs = self.lookup_kwargs_with_identifiers(bundle, kwargs)
except Exception:
# if there is trouble hydrating the data, fall back to just
# using kwargs by itself (usually it only contains a "pk" key
# and this will work fine.
lookup_kwargs = kwargs
try:
bundle.obj = self.obj_get(bundle=bundle, **lookup_kwargs)
except ObjectDoesNotExist:
raise tastypie.exceptions.NotFound(
_(
"A model instance matching the provided arguments could not be found."
)
)
bundle = self.full_hydrate(bundle)
bundle = self.obj_update_hook(bundle, **kwargs)
return self.save(bundle, skip_errors=skip_errors)
def obj_update_hook(self, bundle, **kwargs):
"""
Hook to update Package and move files around before package is saved.
bundle.obj has been updated, but not yet saved.
"""
# PATCH should be only for updating metadata, not actually moving files.
# Don't do any additional processing.
if bundle.request.method == "PATCH":
# Update reingest - should only be notifications of done/failed
if "reingest" in bundle.data:
bundle.obj.misc_attributes.update({"reingest_pipeline": None})
return bundle
origin_location_uri = bundle.data.get("origin_location")
origin_path = bundle.data.get("origin_path")
events = bundle.data.get("events", [])
agents = bundle.data.get("agents", [])
aip_subtype = bundle.data.get("aip_subtype", None)
if origin_location_uri and origin_path:
# Sending origin information implies that the package should be copied from there
origin_location = self.origin_location.build_related_resource(
origin_location_uri, bundle.request
).obj
if (
bundle.obj.package_type in (Package.AIP, Package.AIC)
and bundle.obj.current_location.purpose in (Location.AIP_STORAGE)
and "reingest" in bundle.data
):
# AIP Reingest
# Reset the current Location & path to original values
# Package.finish_reingest will update them if successful
original_package = self._meta.queryset.get(uuid=bundle.obj.uuid)
bundle.obj.current_path = original_package.current_path
bundle.obj.current_location = original_package.current_location
reingest_location = self.origin_location.build_related_resource(
bundle.data["current_location"], bundle.request
).obj
reingest_path = bundle.data["current_path"]
bundle.obj.finish_reingest(
origin_location,
origin_path,
reingest_location,
reingest_path,
premis_events=events,
premis_agents=agents,
aip_subtype=aip_subtype,
)
return bundle
def update_in_place(self, request, original_bundle, new_data):
"""
Update the object in original_bundle in-place using new_data.
Overridden to restrict what fields can be updated to only
`allowed_patch_fields`.
"""
# From http://stackoverflow.com/questions/13704344/tastypie-where-to-restrict-fields-that-may-be-updated-by-patch
if set(new_data.keys()) - set(self._meta.allowed_patch_fields):
raise tastypie.exceptions.BadRequest(
_("PATCH only allowed on %(fields)s")
% {"fields": ", ".join(self._meta.allowed_patch_fields)}
)
return super(PackageResource, self).update_in_place(
request, original_bundle, new_data
)
@_custom_endpoint(
expected_methods=["post"],
required_fields=("event_reason", "pipeline", "user_id", "user_email"),
)
def delete_aip_request(self, request, bundle, **kwargs):
"""Create a delete request for an AIP. Does not perform the deletion."""
request_info = bundle.data
package = bundle.obj
if package.package_type not in Package.PACKAGE_TYPE_CAN_DELETE:
# Can only request deletion on AIPs
response = {"message": _("Deletes not allowed on this package type.")}
response_json = json.dumps(response)
return http.HttpMethodNotAllowed(
response_json, content_type="application/json"
)
(status_code, response) = self._attempt_package_request_event(
package, request_info, Event.DELETE, Package.DEL_REQ
)
if status_code == 202:
# This isn't configured by default
site_url = getattr(settings, "SITE_BASE_URL", None)
signals.deletion_request.send(
sender=self,
url=site_url,
uuid=package.uuid,
location=package.full_path,
pipeline=request_info["pipeline"],
)
else:
response = {"message": _("A deletion request already exists for this AIP.")}
self.log_throttled_access(request)
response_json = json.dumps(response)
return http.HttpResponse(
status=status_code, content=response_json, content_type="application/json"
)
@_custom_endpoint(
expected_methods=["post"],
required_fields=("event_reason", "pipeline", "user_id", "user_email"),
)
def recover_aip_request(self, request, bundle, **kwargs):
request_info = bundle.data
package = bundle.obj
if package.package_type not in Package.PACKAGE_TYPE_CAN_RECOVER:
# Can only request recovery of AIPs
response = {"message": _("Recovery not allowed on this package type.")}
response_json = json.dumps(response)
return http.HttpMethodNotAllowed(
response_json, content_type="application/json"
)
(status_code, response) = self._attempt_package_request_event(
package, request_info, Event.RECOVER, Package.RECOVER_REQ
)
self.log_throttled_access(request)
response_json = json.dumps(response)
return http.HttpResponse(
status=status_code, content=response_json, content_type="application/json"
)
@_custom_endpoint(expected_methods=["get", "head"])
def extract_file_request(self, request, bundle, **kwargs):
"""Return a single file from the Package, extracting if necessary."""
# NOTE this responds to HEAD because AtoM uses HEAD to check for the existence of a file. The storage service has no way to check if a file exists except by downloading and extracting this AIP
# TODO this needs to be fixed so that HEAD is not identical to GET
relative_path_to_file = request.GET.get("relative_path_to_file")
if not relative_path_to_file:
return http.HttpBadRequest(
_("All of these fields must be provided: relative_path_to_file")
)
relative_path_to_file = urllib.unquote(relative_path_to_file)
temp_dir = extracted_file_path = ""
# Get Package details
package = bundle.obj
# Handle package name duplication in path for compressed packages
if not package.is_compressed:
full_path = package.fetch_local_path()
# The basename of the AIP may be included with the request, because
# all AIPs contain a base directory. That directory may already be
# inside the full path though, so remove the basename only if the
# relative path begins with it.
basename = os.path.join(os.path.basename(full_path), "")
if relative_path_to_file.startswith(basename):
relative_path_to_file = relative_path_to_file.replace(basename, "", 1)
# Check if the package is in Arkivum and not actually there
if package.current_location.space.access_protocol == Space.ARKIVUM:
is_local = package.current_location.space.get_child_space().is_file_local(
package,
path=relative_path_to_file,
email_nonlocal=request.method == "GET",
)
if is_local is False:
# Need to fetch from tape, return 202
return http.HttpAccepted(
json.dumps(
{
"error": False,
"message": _(
"File is not locally available. Contact your storage administrator to fetch it."
),
}
)
)
if is_local is None:
# Arkivum error, return 502
return http.HttpResponse(
json.dumps(
{
"error": True,
"message": _(
"Error checking if file in Arkivum in locally available."
),
}
),
content_type="application/json",
status=502,
)
# If local file exists - return that
if not package.is_compressed:
extracted_file_path = os.path.join(full_path, relative_path_to_file)
if not os.path.exists(extracted_file_path):
return http.HttpResponse(
status=404,
content=_("Requested file, %(filename)s, not found in AIP")
% {"filename": relative_path_to_file},
)
elif package.package_type in Package.PACKAGE_TYPE_CAN_EXTRACT:
# If file doesn't exist, try to extract it
(extracted_file_path, temp_dir) = package.extract_file(
relative_path_to_file
)
else:
# If the package is compressed and we can't extract it,
return http.HttpResponse(
status=501,
content=_("Unable to extract package of type: %(typename)s")
% {"typename": package.package_type},
)
response = utils.download_file_stream(extracted_file_path, temp_dir)
return response
@_custom_endpoint(expected_methods=["get", "head"])
def download_request(self, request, bundle, **kwargs):
"""Return the entire Package to be downloaded."""
# NOTE this responds to HEAD because AtoM uses HEAD to check for the existence of a package. The storage service has no way to check if the package still exists except by downloading it
# TODO this needs to be fixed so that HEAD is not identical to GET
# Get AIP details
package = bundle.obj
# Check if the package is in Arkivum and not actually there
if package.current_location.space.access_protocol == Space.ARKIVUM:
is_local = package.current_location.space.get_child_space().is_file_local(
package, email_nonlocal=request.method == "GET"
)
if is_local is False:
# Need to fetch from tape, return 202
return http.HttpAccepted(
json.dumps(
{
"error": False,
"message": _(
"File is not locally available. Contact your storage administrator to fetch it."
),
}
)
)
if is_local is None:
# Arkivum error, return 502
return http.HttpResponse(
json.dumps(
{
"error": True,
"message": _(
"Error checking if file in Arkivum in locally available."
),
}
),
content_type="application/json",
status=502,
)
lockss_au_number = kwargs.get("chunk_number")
try:
temp_dir = None
full_path = package.get_download_path(lockss_au_number)
except StorageException:
full_path, temp_dir = package.compress_package(utils.COMPRESSION_TAR)
response = utils.download_file_stream(full_path, temp_dir)
return response
@_custom_endpoint(expected_methods=["get"])
def pointer_file_request(self, request, bundle, **kwargs):
"""Return AIP pointer file."""
# Get AIP details
pointer_path = bundle.obj.full_pointer_file_path
if not pointer_path:
response = http.HttpNotFound(
_("Resource with UUID %(uuid)s does not have a pointer file")
% {"uuid": bundle.obj.uuid}
)
else:
response = utils.download_file_stream(pointer_path)
return response
@_custom_endpoint(expected_methods=["get"])
def check_fixity_request(self, request, bundle, **kwargs):
"""
Check a package's bagit/fixity.
:param force_local: GET parameter. If True, will ignore any space-specific bagit checks and run it locally.
"""
force_local = False
if request.GET.get("force_local") in ("True", "true", "1"):
force_local = True
report_json, report_dict = bundle.obj.get_fixity_check_report_send_signals(
force_local=force_local
)
return http.HttpResponse(report_json, content_type="application/json")
@_custom_endpoint(expected_methods=["put"])
def compress_request(self, request, bundle, **kwargs):
"""Compress an existing package.
PUT /api/v1/file/<uuid>/compress/
"""
return http.HttpResponse(
{"response": "You want to compress package {}".format(bundle.obj.uuid)},
content_type="application/json",
)
@_custom_endpoint(expected_methods=["get"])
def aip_store_callback_request(self, request, bundle, **kwargs):
package = bundle.obj
callbacks = Callback.objects.filter(event="post_store", enabled=True)
if len(callbacks) == 0:
return http.HttpNoContent()
fail = 0
if package.is_compressed:
# Don't extract the entire AIP, which could take forever;
# instead, just extract bagit.txt and manifest-sha512.txt,
# which is enough to get bag.entries with the
# precalculated sha512 checksums
try:
basedir = package.get_base_directory()
# Currently we only support this for local packages.
except NotImplementedError:
return http.HttpNoContent()
__, tmpdir = package.extract_file(os.path.join(basedir, "bagit.txt"))
package.extract_file(
os.path.join(basedir, "manifest-sha512.txt"), extract_path=tmpdir
)
package_dir = os.path.join(tmpdir, basedir)
else:
package_dir = package.full_path()
tmpdir = None
safe_files = ("bag-info.txt", "manifest-sha512.txt", "bagit.txt")
bag = bagit.Bag(package_dir)
for f, checksums in bag.entries.items():
try:
cksum = checksums["sha512"]
except KeyError:
# These files do not typically have an sha512 hash, so it's
# fine for these to be missing that key; every other file should.
if f not in safe_files:
LOGGER.warning("Post-store callback: sha512 missing for file %s", f)
continue
files = File.objects.filter(checksum=cksum, stored=False)
if len(files) > 1:
LOGGER.warning("Multiple File entries found for sha512 %s", cksum)
for file_ in files:
for callback in callbacks:
uri = callback.uri.replace("<source_id>", file_.source_id)
body = callback.body.replace("<source_id>", file_.source_id)
try:
callback.execute(uri, body)
file_.stored = True
file_.save()
except CallbackError:
fail += 1
if tmpdir is not None:
shutil.rmtree(tmpdir)
if fail > 0:
response = {
"message": _("Failed to POST %(count)d responses to callback URI")
% {"count": fail},
"failure_count": fail,
"callback_uris": [c.uri for c in callbacks],
}
return http.HttpApplicationError(
json.dumps(response), content_type="application/json"
)
else:
return http.HttpNoContent()
@_custom_endpoint(expected_methods=["post"])
def reindex_request(self, request, bundle, **kwargs):
"""Index file data from the Package transfer METS file."""
package = bundle.obj
if package.package_type != Package.TRANSFER:
return http.HttpBadRequest(
json.dumps(
{"error": True, "message": _("This package is not a transfer.")}
),
content_type="application/json",
)
if package.current_location.purpose != Location.BACKLOG:
return http.HttpBadRequest(
json.dumps(
{
"error": True,
"message": _("This package is not in transfer backlog."),
}
),
content_type="application/json",
)
try:
package.index_file_data_from_transfer_mets() # Create File entries for every file in the transfer
except Exception as e:
LOGGER.warning(
"An error occurred while reindexing the Transfer: %s",
str(e),
exc_info=True,
)
return http.HttpApplicationError(
json.dumps(
{
"error": True,
"message": _(
"An error occurred while reindexing the Transfer."
),
}
),
content_type="application/json",
)
count = File.objects.filter(package=package).count()
response = {
"error": False,
"message": _("Files indexed: %(count)d") % {"count": count},
}
return http.HttpResponse(
content=json.dumps(response), content_type="application/json"
)
@_custom_endpoint(
expected_methods=["post"], required_fields=("pipeline", "reingest_type")
)
def reingest_request(self, request, bundle, **kwargs):
"""Request to reingest an AIP."""
try:
pipeline = Pipeline.objects.get(uuid=bundle.data["pipeline"])
except (Pipeline.DoesNotExist, Pipeline.MultipleObjectsReturned):
response = {
"error": True,
"message": _("Pipeline UUID %(uuid)s failed to return a pipeline")
% {"uuid": bundle.data["pipeline"]},
}
return self.create_response(
request, response, response_class=http.HttpBadRequest
)
reingest_type = bundle.data["reingest_type"]
processing_config = bundle.data.get("processing_config", "default")
response = bundle.obj.start_reingest(pipeline, reingest_type, processing_config)
status_code = response.get("status_code", 500)
return self.create_response(request, response, status=status_code)
@_custom_endpoint(expected_methods=["post"])
def move_request(self, request, bundle, **kwargs):
"""Request to move a stored AIP.
Called when a POST request is made to api/v2/file/UUID/move/ with a location_uuid
parameter with the UUID of the location that the AIP should be moved to.
"""
package = bundle.obj
if package.status != Package.UPLOADED:
response = {
"error": True,
"message": _(
"The file must be in an %s state to be moved. "
"Current state: %s" % (Package.UPLOADED, package.status)
),
}
return self.create_response(
request, response, response_class=http.HttpBadRequest
)
location_uuid = request.POST.get("location_uuid")
if not location_uuid:
return http.HttpBadRequest(
_("All of these fields must be provided: " "location_uuid")
)
try:
location = Location.objects.get(uuid=location_uuid)
except (Location.DoesNotExist, Location.MultipleObjectsReturned):
response = {
"error": True,
"message": _(
"Location UUID %(uuid)s \
failed to return a location"
)
% {"uuid": location_uuid},
}
return self.create_response(
request, response, response_class=http.HttpBadRequest
)
if location == package.current_location:
response = {
"error": True,
"message": _(
"New location must be different " "to the current location"
),
}
return self.create_response(
request, response, response_class=http.HttpBadRequest
)
if location.purpose != package.current_location.purpose:
response = {
"error": True,
"message": _(
"New location must have the same purpose as "
"the current location - %s" % package.current_location.purpose
),
}
return self.create_response(
request, response, response_class=http.HttpBadRequest
)
number_matched = Package.objects.filter(
id=package.id, status=Package.UPLOADED
).update(status=Package.MOVING)
if number_matched == 1:
package.refresh_from_db()
else:
response = {
"error": True,
"message": _(
"The package must be in an %s state to be moved. "
"Current state: %s" % (Package.UPLOADED, package.status)
),
}
return self.create_response(
request, response, response_class=http.HttpBadRequest
)
def task():
package.move(location)
package.status = Package.UPLOADED
package.save()
return _("Package moved successfully")
async_task = AsyncManager.run_task(task)
response = http.HttpAccepted()
response["Location"] = reverse(
"api_dispatch_detail",
kwargs={"api_name": "v2", "resource_name": "async", "id": async_task.id},
)
return response
def sword_deposit(self, request, **kwargs):
try:
package = Package.objects.get(uuid=kwargs["uuid"])
except Package.DoesNotExist:
package = None
if package and package.package_type != Package.DEPOSIT:
return http.HttpBadRequest(_("This is not a SWORD deposit location."))
self.log_throttled_access(request)
return sword_views.deposit_edit(request, package or kwargs["uuid"])
def sword_deposit_media(self, request, **kwargs):
try:
package = Package.objects.get(uuid=kwargs["uuid"])
except Package.DoesNotExist:
package = None
if package and package.package_type != Package.DEPOSIT:
return http.HttpBadRequest(_("This is not a SWORD deposit location."))
self.log_throttled_access(request)
return sword_views.deposit_media(request, package or kwargs["uuid"])
def sword_deposit_state(self, request, **kwargs):
try:
package = Package.objects.get(uuid=kwargs["uuid"])
except Package.DoesNotExist:
package = None
if package and package.package_type != Package.DEPOSIT:
return http.HttpBadRequest(_("This is not a SWORD deposit location."))
self.log_throttled_access(request)
return sword_views.deposit_state(request, package or kwargs["uuid"])
def _attempt_package_request_event(
self, package, request_info, event_type, event_status
):
"""Generic package request handler, e.g. package recovery: RECOVER_REQ,
or package deletion: DEL_REQ.
"""
LOGGER.info(
"Package event: '{}' requested, with package status: '{}'".format(
event_type, event_status
)
)
LOGGER.debug(pprint.pformat(request_info))
pipeline = Pipeline.objects.get(uuid=request_info["pipeline"])
request_description = event_type.replace("_", " ").lower()
# See if an event already exists
existing_requests = Event.objects.filter(
package=package, event_type=event_type, status=Event.SUBMITTED
).count()
if existing_requests < 1:
request_event = Event(
package=package,
event_type=event_type,
status=Event.SUBMITTED,
event_reason=request_info["event_reason"],
pipeline=pipeline,
user_id=request_info["user_id"],
user_email=request_info["user_email"],
store_data=package.status,
)
request_event.save()
response = {
"message": _("%(event_type)s request created successfully.")
% {"event_type": request_description.title()},
"id": request_event.id,
}
status_code = 202
else:
response = {
"error_message": _(
"A %(event_type)s request already exists for this AIP."
)
% {"event_type": request_description}
}
status_code = 200
return (status_code, response)
@_custom_endpoint(expected_methods=["get", "put", "delete"])
def manage_contents(self, request, bundle, **kwargs):
if request.method == "PUT":
return self._add_files_to_package(request, bundle, **kwargs)
elif request.method == "DELETE":
return self._remove_files_from_package(request, bundle, **kwargs)
elif request.method == "GET":
return self._package_contents(request, bundle, **kwargs)
def _remove_files_from_package(self, request, bundle, **kwargs):
"""
Removes all file records associated with this package.
"""
bundle.obj.file_set.all().delete()
return http.HttpNoContent()
def _add_files_to_package(self, request, bundle, **kwargs):
"""
Adds a set of files to a package.
The PUT body must be a list of zero or more JavaScript objects in the following format:
{
"relative_path": "string",
"fileuuid": "string",
"accessionid", "string",
"sipuuid": "string",
"origin": "string"
}
"""
try:
files_list = json.load(request)
except ValueError:
response = {
"success": False,
"error": _("No JSON object could be decoded from POST body."),
}
return http.HttpBadRequest(
json.dumps(response), content_type="application/json"
)
if not isinstance(files_list, list):
response = {
"success": False,
"error": _("JSON request must contain a list of objects."),
}
return http.HttpBadRequest(
json.dumps(response), content_type="application/json"
)
property_map = {
"relative_path": "name",
"fileuuid": "source_id",
"accessionid": "accessionid",
"sipuuid": "source_package",
"origin": "origin",
}
if len(files_list) == 0:
return http.HttpResponse()
created_files = []
for f in files_list:
kwargs = {"package": bundle.obj}
for source, dest in property_map.items():
try:
kwargs[dest] = f[source]
except KeyError:
response = {
"success": False,
"error": _("File object was missing key: %(key)s")
% {"key": source},
}
return http.HttpBadRequest(
json.dumps(response), content_type="application_json"
)
created_files.append(File(**kwargs))
for f in created_files:
f.save()
response = {
"success": True,
"message": _("%(count)d files created in package %(uuid)s")
% {"count": len(created_files), "uuid": bundle.obj.uuid},
}
return http.HttpCreated(json.dumps(response), content_type="application_json")
def _package_contents(self, request, bundle, **kwargs):
"""
Returns metadata about every file within a specified Storage Service
package, specified via Storage Service UUID.
The file properties provided are the properties of the ~:class:`~locations.models.event.File` class; see the class definition for more information.
:returns: a JSON object in the following format:
{
"success": True,
"package": "UUID (as string)",
"files": [
# array containing zero or more objects containing
# all of the file's properties, in the format:
{
"source_id": "",
# ...
}
]
}
"""
response = {"success": True, "package": bundle.obj.uuid, "files": []}
for f in bundle.obj.file_set.all():
response["files"].append(
{
attr: getattr(f, attr)
for attr in (
"source_id",
"name",
"source_package",
"checksum",
"accessionid",
"origin",
)
}
)
return http.HttpResponse(
status=200, content=json.dumps(response), content_type="application/json"
)
def file_data(self, request, **kwargs):
"""
Returns file metadata as a JSON array of objects.
This maps properties of the File class to the names of the
Elasticsearch indices' Transferfile index, allowing this to directly
substitute for Elasticsearch when looking up metadata on specific files.
Acceptable parameters are:
* relative_path (searches the `name` field)
* fileuuid (searches the `source_id` field)
* accessionid (searches the `accessionid` field)
* sipuuid (searches the `source_package` field)
:returns: an array of one or more objects. See the transferfile
index for information on the return format.
If no results are found for the specified query, returns 404.
If no acceptable query parameters are found, returns 400.
"""
# Tastypie API checks
self.method_check(request, allowed=["get", "post"])
self.is_authenticated(request)
self.throttle_check(request)
self.log_throttled_access(request)
property_map = {
"relative_path": "name",
"fileuuid": "source_id",
"accessionid": "accessionid",
"sipuuid": "source_package",
}
query = {}
for source, dest in property_map.items():
try:
query[dest] = request.GET[source]
except KeyError:
pass
if not query:
response = {
"success": False,
"error": _("No supported query properties found!"),
}
return http.HttpBadRequest(
content=json.dumps(response), content_type="application/json"
)
files = File.objects.filter(**query)
if not files.exists():
return http.HttpNotFound()
response = []
for f in files:
response.append(
{
"accessionid": f.accessionid,
"file_extension": os.path.splitext(f.name)[1],
"filename": os.path.basename(f.name),
"relative_path": f.name,
"fileuuid": f.source_id,
"origin": f.origin,
"sipuuid": f.source_package,
}
)
return http.HttpResponse(
content=json.dumps(response), content_type="application/json"
)
class AsyncResource(ModelResource):
"""
Represents an async task that may or may not still be running.
"""
class Meta:
queryset = Async.objects.all()
resource_name = "async"
authentication = MultiAuthentication(
BasicAuthentication(), ApiKeyAuthentication(), SessionAuthentication()
)
authorization = DjangoAuthorization()
fields = [
"id",
"completed",
"was_error",
"created_time",
"updated_time",
"completed_time",
]
always_return_data = True
detail_allowed_methods = ["get"]
detail_uri_name = "id"
def dehydrate(self, bundle):
"""Pull out errors and results using our accessors so they get unpickled."""
if bundle.obj.completed:
if bundle.obj.was_error:
bundle.data["error"] = bundle.obj.error
else:
bundle.data["result"] = bundle.obj.result
return bundle
| agpl-3.0 | 889,811,449,057,808,400 | 36.998972 | 200 | 0.543595 | false | 4.56645 | false | false | false |
ZeitOnline/zeit.content.cp | src/zeit/content/cp/browser/workflow.py | 1 | 3104 | from zeit.cms.i18n import MessageFactory as _
import gocept.form.grouped
import zeit.cms.browser.interfaces
import zeit.cms.content.interfaces
import zeit.cms.workflow.interfaces
import zeit.content.cp.interfaces
import zeit.objectlog.interfaces
import zeit.workflow.browser.form
import zope.component
import zope.dublincore.interfaces
import zope.formlib.form
def is_published_and_has_permission(form, action):
return (zeit.workflow.browser.form.is_published(form, action) and
form.request.interaction.checkPermission(
'zeit.content.cp.Retract', form.context))
class CenterPageWorkflowForm(zeit.workflow.browser.form.WorkflowForm):
# same as zeit.workflow.browser.form.ContentWorkflow, except for the
# fields: we use ITimeBasedPublishing instead of IContentWorkflow
zope.component.adapts(
zeit.content.cp.interfaces.ICenterPage,
zeit.cms.browser.interfaces.ICMSLayer)
field_groups = (
gocept.form.grouped.Fields(
_("Status"),
zeit.workflow.browser.form.WorkflowForm.modified_fields,
css_class='column-left'),
gocept.form.grouped.RemainingFields(
_("Settings"), css_class='column-right'),
gocept.form.grouped.Fields(
_("Log"), fields=('logs', ),
css_class='full-width')
)
form_fields = (
zope.formlib.form.FormFields(
zeit.workflow.interfaces.ITimeBasedPublishing,
zeit.objectlog.interfaces.ILog,
zeit.cms.workflow.interfaces.IModified,
zeit.cms.content.interfaces.ISemanticChange).omit(
*zeit.workflow.browser.form.WorkflowForm.omit_fields) +
zope.formlib.form.FormFields(
zope.dublincore.interfaces.IDCTimes, for_display=True).select(
'created'))
@zope.formlib.form.action(_('Save state only'), name='save')
def handle_save_state(self, action, data):
"""Duplicate action from base class, since we overwrite handle_retract.
"""
super(CenterPageWorkflowForm, self).handle_save_state.success(data)
@zope.formlib.form.action(_('Save state and publish now'), name='publish')
def handle_publish(self, action, data):
"""Duplicate action from base class, since we overwrite handle_retract.
"""
super(CenterPageWorkflowForm, self).handle_publish.success(data)
@gocept.form.action.confirm(
_('Save state and retract now'),
name='retract',
confirm_message=_('Really retract? This will remove the object from '
'all channels it is syndicated in and make it '
'unavailable to the public!'),
condition=is_published_and_has_permission)
def handle_retract(self, action, data):
"""Overwrite action to additionally test Retract permission."""
super(CenterPageWorkflowForm, self).handle_retract.success(data)
def get_error_message(self, mapping):
return _('Could not publish ${id} since it has validation errors.',
mapping=mapping)
| bsd-3-clause | -1,244,946,245,865,622,800 | 39.842105 | 79 | 0.667848 | false | 3.949109 | false | false | false |
ACS-Community/ACS | LGPL/CommonSoftware/nsStatisticsService/test/TConsumer.py | 3 | 3170 | #!/usr/bin/env python
# @(#) $Id: TConsumer.py,v 1.4 2015/01/23 16:51:58 pcolomer Exp $
#*******************************************************************************
# ALMA - Atacama Large Millimiter Array
# (c) Associated Universities Inc., 2002
# (c) European Southern Observatory, 2002
# Copyright by ESO (in the framework of the ALMA collaboration)
# and Cosylab 2002, All rights reserved
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
#
#------------------------------------------------------------------------------
from Acspy.Nc.Consumer import Consumer
from time import sleep
import TEST_NS_STATISTICS_SERVICE
import sys
def dataHandler1(params):
print "Consumer1 - ", params.strVal
sleep(1)
return
def dataHandler2(params):
print "Consumer2 - ", params.strVal
sleep(1)
return
def dataHandler3(params):
print "Consumer3 - ", params.strVal
sleep(1)
return
def dataHandler4(params):
print "Consumer4 - ", params.strVal
sleep(1)
return
def main(argv):
consumers = []
wait_sec = int(argv[0])
for ch in argv[1:]:
ch = int(ch)
print "Creating channel %d" % (ch)
consumer = None
if ch == 1:
consumer = Consumer(TEST_NS_STATISTICS_SERVICE.CHANNEL_1)
consumer.addSubscription(TEST_NS_STATISTICS_SERVICE.Test1EventData,handler_function=dataHandler1)
elif ch == 2:
consumer = Consumer(TEST_NS_STATISTICS_SERVICE.CHANNEL_2)
consumer.addSubscription(TEST_NS_STATISTICS_SERVICE.Test1EventData,handler_function=dataHandler2)
elif ch == 3:
consumer = Consumer(TEST_NS_STATISTICS_SERVICE.CHANNEL_3)
consumer.addSubscription(TEST_NS_STATISTICS_SERVICE.Test1EventData,handler_function=dataHandler3)
elif ch == 4:
consumer = Consumer(TEST_NS_STATISTICS_SERVICE.CHANNEL_4)
consumer.addSubscription(TEST_NS_STATISTICS_SERVICE.Test1EventData,handler_function=dataHandler4)
if consumer is None:
raise BaseException("Unknown channel. Allowed values are from 1 to 4: %d"%(ch))
else:
consumers.append(consumer)
print "Enable %d consumers"%(len(consumers))
for consumer in consumers:
consumer.consumerReady()
if wait_sec > 0:
print "Wait %d seconds"%(wait_sec)
sleep(wait_sec)
# Disconnect consumers
print "Disconnect %d consumers"%(len(consumers))
for consumer in consumers:
consumer.disconnect()
if __name__ == "__main__":
# > TConsumer.py wait_sec ch_id_1 ch_id_2 ... ch_id_N
# Where ch_id can be 1, 2, 3, 4
main(sys.argv[1:])
| lgpl-2.1 | -7,423,310,020,961,331,000 | 32.368421 | 103 | 0.680442 | false | 3.383138 | true | false | false |
minrk/dask | dask/dataframe/io.py | 1 | 11154 | import pandas as pd
import numpy as np
from functools import wraps
import re
import struct
import os
from glob import glob
from math import ceil
from toolz import curry, merge, partial
from itertools import count
import bcolz
from operator import getitem
from ..compatibility import StringIO
from ..utils import textblock
from .core import names, DataFrame, compute, concat, categorize_block
from .shuffle import set_partition
def _StringIO(data):
if isinstance(data, bytes):
data = data.decode()
return StringIO(data)
def file_size(fn, compression=None):
""" Size of a file on disk
If compressed then return the uncompressed file size
"""
if compression == 'gzip':
with open(fn, 'rb') as f:
f.seek(-4, 2)
result = struct.unpack('I', f.read(4))[0]
else:
result = os.stat(fn).st_size
return result
@wraps(pd.read_csv)
def read_csv(fn, *args, **kwargs):
if '*' in fn:
return concat([read_csv(f, *args, **kwargs) for f in sorted(glob(fn))])
categorize = kwargs.pop('categorize', None)
index = kwargs.pop('index', None)
if index and categorize == None:
categorize = True
compression = kwargs.pop('compression', None)
# Chunk sizes and numbers
chunkbytes = kwargs.pop('chunkbytes', 2**28) # 500 MB
total_bytes = file_size(fn, compression)
nchunks = int(ceil(float(total_bytes) / chunkbytes))
divisions = [None] * (nchunks - 1)
# Let pandas infer on the first 100 rows
head = pd.read_csv(fn, *args, nrows=100, compression=compression, **kwargs)
if names not in kwargs:
kwargs['names'] = csv_names(fn, compression=compression, **kwargs)
if 'header' not in kwargs:
header = infer_header(fn, compression=compression, **kwargs)
if header is True:
header = 0
else:
header = kwargs.pop('header')
if 'parse_dates' not in kwargs:
parse_dates = [col for col in head.dtypes.index
if np.issubdtype(head.dtypes[col], np.datetime64)]
if parse_dates:
kwargs['parse_dates'] = parse_dates
else:
parse_dates = kwargs.get('parse_dates')
if 'dtypes' in kwargs:
dtype = kwargs['dtype']
else:
dtype = dict(head.dtypes)
if parse_dates:
for col in parse_dates:
del dtype[col]
kwargs['dtype'] = dtype
first_read_csv = curry(pd.read_csv, *args, header=header, **kwargs)
rest_read_csv = curry(pd.read_csv, *args, header=None, **kwargs)
# Create dask graph
name = next(names)
dsk = dict(((name, i), (rest_read_csv, (_StringIO,
(textblock, fn,
i*chunkbytes, (i+1) * chunkbytes,
compression))))
for i in range(1, nchunks))
dsk[(name, 0)] = (first_read_csv, (_StringIO,
(textblock, fn, 0, chunkbytes, compression)))
result = DataFrame(dsk, name, head.columns, divisions)
if categorize or index:
categories, quantiles = categories_and_quantiles(fn, args, kwargs,
index, categorize,
chunkbytes=chunkbytes)
if categorize:
func = partial(categorize_block, categories=categories)
result = result.map_blocks(func, columns=result.columns)
if index:
result = set_partition(result, index, quantiles)
return result
def infer_header(fn, encoding='utf-8', compression=None, **kwargs):
""" Guess if csv file has a header or not
This uses Pandas to read a sample of the file, then looks at the column
names to see if they are all word-like.
Returns True or False
"""
# See read_csv docs for header for reasoning
try:
df = pd.read_csv(fn, encoding=encoding, compression=compression,nrows=5)
except StopIteration:
df = pd.read_csv(fn, encoding=encoding, compression=compression)
return (len(df) > 0 and
all(re.match('^\s*\D\w*\s*$', n) for n in df.columns) and
not all(dt == 'O' for dt in df.dtypes))
def csv_names(fn, encoding='utf-8', compression=None, names=None,
parse_dates=None, **kwargs):
try:
df = pd.read_csv(fn, encoding=encoding, compression=compression,
names=names, parse_dates=parse_dates, nrows=5)
except StopIteration:
df = pd.read_csv(fn, encoding=encoding, compression=compression,
names=names, parse_dates=parse_dates)
return list(df.columns)
def categories_and_quantiles(fn, args, kwargs, index=None, categorize=None,
chunkbytes=2**28):
"""
Categories of Object columns and quantiles of index column for CSV
Computes both of the following in a single pass
1. The categories for all object dtype columns
2. The quantiles of the index column
Parameters
----------
fn: string
Filename of csv file
args: tuple
arguments to be passed in to read_csv function
kwargs: dict
keyword arguments to pass in to read_csv function
index: string or None
Name of column on which to compute quantiles
categorize: bool
Whether or not to compute categories of Object dtype columns
"""
kwargs = kwargs.copy()
compression = kwargs.get('compression', None)
total_bytes = file_size(fn, compression)
nchunks = int(ceil(float(total_bytes) / chunkbytes))
if infer_header(fn, **kwargs):
kwargs['header'] = 0
one_chunk = pd.read_csv(fn, *args, nrows=100, **kwargs)
if categorize is not False:
category_columns = [c for c in one_chunk.dtypes.index
if one_chunk.dtypes[c] == 'O'
and c not in kwargs.get('parse_dates', ())]
else:
category_columns = []
cols = category_columns + ([index] if index else [])
dtypes = dict((c, one_chunk.dtypes[c]) for c in cols)
d = read_csv(fn, *args, **merge(kwargs,
dict(usecols=cols,
parse_dates=None,
dtype=dtypes)))
categories = [d[c].drop_duplicates() for c in category_columns]
import dask
if index:
quantiles = d[index].quantiles(np.linspace(0, 100, nchunks + 1)[1:-1])
result = compute(quantiles, *categories)
quantiles, categories = result[0], result[1:]
else:
categories = compute(*categories)
quantiles = None
categories = dict(zip(category_columns, categories))
return categories, quantiles
from_array_names = ('from-array-%d' % i for i in count(1))
def from_array(x, chunksize=50000):
""" Read dask Dataframe from any slicable array with record dtype
Uses getitem syntax to pull slices out of the array. The array need not be
a NumPy array but must support slicing syntax
x[50000:100000]
and have a record dtype
x.dtype == [('name', 'O'), ('balance', 'i8')]
"""
columns = tuple(x.dtype.names)
divisions = tuple(range(chunksize, len(x), chunksize))
name = next(from_array_names)
dsk = dict(((name, i), (pd.DataFrame,
(getitem, x,
(slice(i * chunksize, (i + 1) * chunksize),))))
for i in range(0, int(ceil(float(len(x)) / chunksize))))
return DataFrame(dsk, name, columns, divisions)
from pframe.categories import reapply_categories
def from_bcolz(x, chunksize=None, categorize=True, index=None, **kwargs):
""" Read dask Dataframe from bcolz.ctable
Parameters
----------
x : bcolz.ctable
Input data
chunksize : int (optional)
The size of blocks to pull out from ctable. Ideally as large as can
comfortably fit in memory
categorize : bool (defaults to True)
Automatically categorize all string dtypes
index : string (optional)
Column to make the index
See Also
--------
from_array: more generic function not optimized for bcolz
"""
import dask.array as da
bc_chunklen = max(x[name].chunklen for name in x.names)
if chunksize is None and bc_chunklen > 10000:
chunksize = bc_chunklen
categories = dict()
if categorize:
for name in x.names:
if (np.issubdtype(x.dtype[name], np.string_) or
np.issubdtype(x.dtype[name], np.unicode_) or
np.issubdtype(x.dtype[name], np.object_)):
a = da.from_array(x[name], chunks=(chunksize*len(x.names),))
categories[name] = da.unique(a)
columns = tuple(x.dtype.names)
divisions = tuple(range(chunksize, len(x), chunksize))
new_name = next(from_array_names)
dsk = dict(((new_name, i),
(dataframe_from_ctable,
x,
(slice(i * chunksize, (i + 1) * chunksize),),
None, categories))
for i in range(0, int(ceil(float(len(x)) / chunksize))))
result = DataFrame(dsk, new_name, columns, divisions)
if index:
assert index in x.names
a = da.from_array(x[index], chunks=(chunksize*len(x.names),))
q = np.linspace(1, 100, len(x) / chunksize + 2)[1:-1]
divisions = da.percentile(a, q).compute()
return set_partition(result, index, divisions, **kwargs)
else:
return result
def dataframe_from_ctable(x, slc, columns=None, categories=None):
""" Get DataFrame from bcolz.ctable
Parameters
----------
x: bcolz.ctable
slc: slice
columns: list of column names or None
>>> x = bcolz.ctable([[1, 2, 3, 4], [10, 20, 30, 40]], names=['a', 'b'])
>>> dataframe_from_ctable(x, slice(1, 3))
a b
0 2 20
1 3 30
>>> dataframe_from_ctable(x, slice(1, 3), columns=['b'])
b
0 20
1 30
>>> dataframe_from_ctable(x, slice(1, 3), columns='b')
0 20
1 30
Name: b, dtype: int64
"""
if columns is not None:
if isinstance(columns, tuple):
columns = list(columns)
x = x[columns]
name = next(names)
if isinstance(x, bcolz.ctable):
chunks = [x[name][slc] for name in x.names]
if categories is not None:
chunks = [pd.Categorical.from_codes(np.searchsorted(categories[name],
chunk),
categories[name], True)
if name in categories else chunk
for name, chunk in zip(x.names, chunks)]
return pd.DataFrame(dict(zip(x.names, chunks)))
elif isinstance(x, bcolz.carray):
chunk = x[slc]
if categories is not None and columns and columns in categories:
chunk = pd.Categorical.from_codes(
np.searchsorted(categories[columns], chunk),
categories[columns], True)
return pd.Series(chunk, name=columns)
| bsd-3-clause | -4,378,829,436,264,550,000 | 31.236994 | 81 | 0.584095 | false | 3.860852 | false | false | false |
kentwills/pgctl | pgctl/daemontools.py | 1 | 4975 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from collections import namedtuple
from subprocess import CalledProcessError
from subprocess import PIPE
from subprocess import Popen
from subprocess import STDOUT
from .debug import debug
from .errors import Unsupervised
def svc(args):
"""Wrapper for daemontools svc cmd"""
# svc never writes to stdout.
cmd = ('s6-svc',) + tuple(args)
debug('CMD: %s', cmd)
process = Popen(cmd, stderr=PIPE)
_, error = process.communicate()
if error.startswith('s6-svc: fatal: unable to control '):
raise Unsupervised(cmd, error)
if process.returncode: # pragma: no cover: there's no known way to hit this.
import sys
sys.stderr.write(error)
raise CalledProcessError(process.returncode, cmd)
class SvStat(
namedtuple('SvStat', ['state', 'pid', 'exitcode', 'seconds', 'process'])
):
__slots__ = ()
UNSUPERVISED = 'could not get status, supervisor is down'
INVALID = 'no such service'
def __repr__(self):
format = '{0.state}'
if self.pid is not None:
format += ' (pid {0.pid})'
if self.exitcode is not None:
format += ' (exitcode {0.exitcode})'
if self.seconds is not None:
format += ' {0.seconds} seconds'
if self.process is not None:
format += ', {0.process}'
return format.format(self)
def svstat_string(service_path):
"""Wrapper for daemontools svstat cmd"""
# svstat *always* exits with code zero...
cmd = ('s6-svok', service_path)
process = Popen(cmd, stdout=PIPE, stderr=STDOUT)
status, _ = process.communicate()
assert status == ''
if process.returncode != 0:
return SvStat.UNSUPERVISED
cmd = ('s6-svstat', service_path)
process = Popen(cmd, stdout=PIPE, stderr=STDOUT)
status, _ = process.communicate()
#status is listed per line for each argument
return status
def svstat_parse(svstat_string):
r'''
up (pid 2557675) 172858 seconds, ready 172856 seconds\n
>>> svstat_parse('up (pid 1202562) 100 seconds, ready 10 seconds\n')
ready (pid 1202562) 10 seconds
>>> svstat_parse('up (pid 1202562) 100 seconds\n')
up (pid 1202562) 100 seconds
>>> svstat_parse('down 4334 seconds, normally up, want up')
down 4334 seconds, starting
>>> svstat_parse('down (exitcode 0) 0 seconds, normally up, want up, ready 0 seconds')
down (exitcode 0) 0 seconds, starting
>>> svstat_parse('down 0 seconds, normally up')
down 0 seconds
>>> svstat_parse('up (pid 1202) 1 seconds, want down\n')
up (pid 1202) 1 seconds, stopping
>>> svstat_parse('down 0 seconds, normally up')
down 0 seconds
>>> svstat_parse('down 0 seconds, normally up')
down 0 seconds
>>> svstat_parse('s6-svstat: fatal: unable to read status for wat: No such file or directory')
could not get status, supervisor is down
>>> svstat_parse("s6-svstat: fatal: unable to read status for sweet: Broken pipe\n")
could not get status, supervisor is down
>>> svstat_parse('unable to chdir: file does not exist')
no such service
>>> svstat_parse('totally unpredictable error message')
totally unpredictable error message
'''
status = svstat_string.strip()
debug('RAW : %s', status)
state, status = __get_state(status)
if status.startswith('(pid '):
pid, status = status[5:].rsplit(') ', 1)
pid = int(pid)
else:
pid = None
if status.startswith('(exitcode '):
exitcode, status = status[10:].rsplit(') ', 1)
exitcode = int(exitcode)
else:
exitcode = None
try:
seconds, status = status.split(' seconds', 1)
seconds = int(seconds)
except ValueError:
seconds = None
if ', want up' in status:
process = 'starting'
elif ', want down' in status:
process = 'stopping'
else:
process = None
if status.startswith(', ready '):
state = 'ready'
status = status[8:]
seconds, status = status.split(' seconds', 1)
seconds = int(seconds)
process = None
return SvStat(state, pid, exitcode, seconds, process)
def __get_state(status):
first, rest = status.split(None, 1)
if first in ('up', 'down'):
return first, rest
elif status.startswith('unable to chdir:'):
return SvStat.INVALID, rest
elif (
status.startswith('s6-svstat: fatal: unable to read status for ') and status.endswith((
': No such file or directory',
': Broken pipe',
))
):
# the service has never been started before; it's down.
return SvStat.UNSUPERVISED, ''
else: # unknown errors
return status, ''
def svstat(path):
return svstat_parse(svstat_string(path))
| mit | -713,848,870,300,144,900 | 28.613095 | 99 | 0.61809 | false | 3.823982 | false | false | false |
unixxxx/simplecms | admin/controllers/passwordcontroller.py | 1 | 1097 | import base64
from bottle import jinja2_template as template, request
from models.cmsmodels import Users
import admin.session as withsession
@withsession.app.app.route('/password', method=['GET', 'POST'])
@withsession.issessionactive()
def password():
try:
if request.method == 'GET':
return template('admin/views/password.jinja2')
else:
user = Users.objects.get()
if user and user.password == base64.b64encode(bytes(request.forms.get('oldPassword'), 'UTF8')):
user.password = base64.b64encode(bytes(request.forms.get('newPassword'), 'UTF8'))
user.save()
return template('admin/views/password.jinja2', {"saved": True})
else:
return template('admin/views/password.jinja2',
{"saved": False, "errorMessage": "incorrect password"})
except:
return template('admin/views/password.jinja2',
{"saved": False, "errorMessage": "DB error"})
def initialize():
print('password controller initialized') | mit | -8,460,016,379,660,171,000 | 35.6 | 107 | 0.611668 | false | 4.268482 | false | false | false |
igemsoftware/SYSU-Software2013 | project/Python27/Lib/site-packages/pypm/external/zclockfile.py | 2 | 2619 | ##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
import os
import errno
import logging
logger = logging.getLogger("zc.lockfile")
class LockError(Exception):
"""Couldn't get a lock
"""
try:
import fcntl
except ImportError:
try:
import msvcrt
except ImportError:
def _lock_file(file):
raise TypeError('No file-locking support on this platform')
def _unlock_file(file):
raise TypeError('No file-locking support on this platform')
else:
# Windows
def _lock_file(file):
# Lock just the first byte
try:
msvcrt.locking(file.fileno(), msvcrt.LK_NBLCK, 1)
except IOError:
raise LockError("Couldn't lock %r" % file.name)
def _unlock_file(file):
try:
file.seek(0)
msvcrt.locking(file.fileno(), msvcrt.LK_UNLCK, 1)
except IOError:
raise LockError("Couldn't unlock %r" % file.name)
else:
# Unix
_flags = fcntl.LOCK_EX | fcntl.LOCK_NB
def _lock_file(file):
try:
fcntl.flock(file.fileno(), _flags)
except IOError:
raise LockError("Couldn't lock %r" % file.name)
def _unlock_file(file):
# File is automatically unlocked on close
pass
class LockFile:
_fp = None
def __init__(self, path):
self._path = path
fp = open(path, 'w+')
try:
_lock_file(fp)
except:
fp.seek(1)
pid = fp.read().strip()[:20]
fp.close()
if not pid:
pid = 'UNKNOWN'
logger.exception("Error locking file %s; pid=%s", path, pid)
raise
self._fp = fp
fp.write(" %s\n" % os.getpid())
fp.truncate()
fp.flush()
def close(self):
if self._fp is not None:
_unlock_file(self._fp)
self._fp.close()
self._fp = None
| mit | -2,048,824,077,317,107,500 | 26.568421 | 78 | 0.524246 | false | 4.272431 | false | false | false |
skython/eXe | rpm-setup.py | 2 | 2490 | #!/usr/bin/python
# setup.py
import glob
import os.path
from distutils.command.install import install
from distutils.core import setup
from exe.engine import version
g_files = { '/usr/share/exe': ["README",
"COPYING",
"NEWS",
"ChangeLog",
"exe/webui/mr_x.gif"]}
g_oldBase = "exe/webui"
g_newBase = "/usr/share/exe"
def dataFiles(dirs, excludes=[]):
"""Recursively get all the files in these directories"""
import os.path
import glob
global dataFiles, g_oldBase, g_newBase, g_files
for file in dirs:
if not os.path.basename(file[0]).startswith("."):
if os.path.isfile(file) and file not in excludes:
path = file[len(g_oldBase)+1:]
dir = g_newBase + "/" + os.path.dirname(path)
if dir in g_files:
g_files[dir].append(file)
else:
g_files[dir] = [file]
elif os.path.isdir(file):
dataFiles(glob.glob(file+"/*"))
dataFiles(["exe/webui/style",
"exe/webui/css",
"exe/webui/docs",
"exe/webui/images",
"exe/webui/schemas",
"exe/webui/scripts",
"exe/webui/templates",
"exe/webui/linux-profile",
"exe/webui/firefox"],
excludes = ["mimetex.64.cgi", "mimetex-darwin.cgi", "mimetex.exe"])
g_oldBase = "exe"
g_newBase = "/usr/share/exe"
dataFiles(["exe/locale"])
g_oldBase = "exe/xului"
g_newBase = "/usr/share/exe"
dataFiles(["exe/xului/scripts",
"exe/xului/templates"])
opts = {
"bdist_rpm": {
"requires": ["python-imaging",]
}
}
setup(name = version.project,
version = version.release,
description = "eLearning XHTML editor",
long_description = """\
The eXe project is an authoring environment to enable teachers to publish
web content without the need to become proficient in HTML or XML markup.
Content generated using eXe can be used by any Learning Management System.
""",
url = "http://exelearning.org",
author = "eXe Project",
author_email = "[email protected]",
license = "GPL",
scripts = ["exe/exe"],
packages = ["exe", "exe.webui", "exe.xului",
"exe.engine", "exe.export"],
data_files = g_files.items(),
options = opts
)
| gpl-2.0 | -2,697,922,290,048,538,600 | 31.763158 | 76 | 0.544578 | false | 3.562232 | false | false | false |
milankl/swm | calc/misc/tempautocorr.py | 1 | 2286 | ## COMPUTE AND PRODUCE TIMESCALE PLOTS
from __future__ import print_function
path = '/home/mkloewer/python/swm/'
import os; os.chdir(path) # change working directory
import numpy as np
import matplotlib.pyplot as plt
from netCDF4 import Dataset
def acf(x,l):
""" autocorrelation function of vector x up to lag l."""
return np.array([1]+[np.corrcoef(x[:-i],x[i:])[0,1] for i in range(1,l)])
def findc(x,a):
""" find crossing of vector x with value a."""
return np.argmin(abs(x-a))
runfolders = [7,8,9,10]
p1 = np.array([300,1920])*1e3
p2 = np.array([2880,1920])*1e3
pi = np.zeros((3,2,2),dtype=np.int) # (u,v,T) x (p1,p2) x (i,j)
## read data
for r,i in zip(runfolders,range(len(runfolders))):
runpath = path+'data/run%04i' % r
param = np.load(runpath+'/param.npy').all()
# find p1 and p2 indices
for ig,g in enumerate(['u','v','T']):
for ip,p in enumerate([p1,p2]):
for ij,xy in enumerate(['x','y']):
pi[ig,ip,ij] = findc(param[xy+'_'+g],p[ij])
ncu = Dataset(runpath+'/u.nc')
ncv = Dataset(runpath+'/v.nc')
nch = Dataset(runpath+'/h.nc')
istart = 0
if i == 0:
u = ncu.variables['u'][istart:,pi[0,:,0],pi[0,:,1]][:,[0,1],[0,1]]
v = ncv.variables['v'][istart:,pi[1,:,0],pi[1,:,1]][:,[0,1],[0,1]]
h = nch.variables['h'][istart:,pi[2,:,0],pi[2,:,1]][:,[0,1],[0,1]]
t = nch.variables['t'][istart:]
else:
u = np.concatenate((u,ncu.variables['u'][1:,pi[0,:,0],pi[0,:,1]][:,[0,1],[0,1]]))
v = np.concatenate((v,ncv.variables['v'][1:,pi[1,:,0],pi[1,:,1]][:,[0,1],[0,1]]))
h = np.concatenate((h,nch.variables['h'][1:,pi[2,:,0],pi[2,:,1]][:,[0,1],[0,1]]))
t = np.hstack((t,nch.variables['t'][1:]))
ncu.close()
ncv.close()
nch.close()
print('run %i read.' % r)
## computation
l = 200 # in 1/4 days
acfs = np.zeros((l,3,2))
for iv,var in enumerate([u,v,h]):
for ip in range(2):
acfs[:,iv,ip] = acf(var[:,ip],l)
dt = t[1]-t[0]
time = np.arange(l)*dt/24/3600
## STORING
dic = dict()
all_var2export = ['time','acfs','p1','p2']
for var in all_var2export:
exec('dic[var] ='+var)
np.save(runpath+'/analysis/acfs.npy',dic)
print('Everything stored.')
| gpl-3.0 | 5,510,185,352,351,144,000 | 28.688312 | 89 | 0.545494 | false | 2.548495 | false | false | false |
sxjscience/mxnet | python/mxnet/optimizer/rmsprop.py | 9 | 7538 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""RMSProp optimizer."""
from __future__ import absolute_import
from ..ndarray import (zeros, clip, sqrt, square)
from ..ndarray import (rmsprop_update, rmspropalex_update)
from .optimizer import Optimizer, register
__all__ = ['RMSProp']
@register
class RMSProp(Optimizer):
"""The RMSProp optimizer.
Two versions of RMSProp are implemented:
If ``centered=False``, we follow
http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf by
Tieleman & Hinton, 2012.
For details of the update algorithm see :class:`~mxnet.ndarray.rmsprop_update`.
If ``centered=True``, we follow http://arxiv.org/pdf/1308.0850v5.pdf (38)-(45)
by Alex Graves, 2013.
For details of the update algorithm see :class:`~mxnet.ndarray.rmspropalex_update`.
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
Parameters
----------
learning_rate : float, default 0.001
The initial learning rate. If None, the optimization will use the
learning rate from ``lr_scheduler``. If not None, it will overwrite
the learning rate in ``lr_scheduler``. If None and ``lr_scheduler``
is also None, then it will be set to 0.01 by default.
rho: float, default 0.9
A decay factor of moving average over past squared gradient.
momentum: float, default 0.9
Heavy ball momentum factor. Only used if `centered`=``True``.
epsilon : float, default 1e-8
Small value to avoid division by 0.
centered : bool, default False
Flag to control which version of RMSProp to use.::
True: will use Graves's version of `RMSProp`,
False: will use Tieleman & Hinton's version of `RMSProp`.
clip_weights : float, optional
Clips weights into range ``[-clip_weights, clip_weights]``.
use_fused_step : bool, default True
Whether or not to use fused kernels for optimizer.
When use_fused_step=False, step is called,
otherwise, fused_step is called.
"""
def __init__(self, learning_rate=0.001, rho=0.9, momentum=0.9,
epsilon=1e-8, centered=False, clip_weights=None,
use_fused_step=True, **kwargs):
super(RMSProp, self).__init__(learning_rate=learning_rate,
use_fused_step=use_fused_step,
**kwargs)
self.rho = rho
self.momentum = momentum
self.centered = centered
self.epsilon = epsilon
self.clip_weights = clip_weights
def create_state(self, index, weight):
if self.centered:
return (
zeros(weight.shape, weight.context, stype=weight.stype), # mean
zeros(weight.shape, weight.context, stype=weight.stype), # var
zeros(weight.shape, weight.context, stype=weight.stype)) # mom
else:
return zeros(weight.shape, weight.context, stype=weight.stype) # var
def step(self, indices, weights, grads, states):
"""Perform an optimization step using gradients and states.
Parameters
----------
indices : list of int
List of unique indices of the parameters into the individual learning rates
and weight decays. Learning rates and weight decay may be set via `set_lr_mult()`
and `set_wd_mult()`, respectively.
weights : list of NDArray
List of parameters to be updated.
grads : list of NDArray
List of gradients of the objective with respect to this parameter.
states : List of any obj
List of state returned by `create_state()`.
"""
for index, weight, grad, state in zip(indices, weights, grads, states):
self._update_count(index)
lr = self._get_lr(index)
wd = self._get_wd(index)
# preprocess grad
grad *= self.rescale_grad
if self.clip_gradient is not None:
grad = clip(grad, - self.clip_gradient, self.clip_gradient)
grad += wd * weight
if not self.centered:
# update var
var = state
var[:] *= self.rho
var[:] += (1 - self.rho) * square(grad)
# update weight
d = grad / (sqrt(var) + self.epsilon)
weight[:] -= lr * d
else:
# update mean, var, mom
mean, var, mom = state
mean[:] *= self.rho
mean[:] += (1 - self.rho) * grad
var[:] *= self.rho
var[:] += (1 - self.rho) * square(grad)
mom[:] *= self.momentum
mom[:] -= lr * grad / sqrt(var - square(mean) + self.epsilon)
# update weight
weight[:] += mom
if self.clip_weights:
clip(weight, -self.clip_weights, self.clip_weights, out=weight)
def fused_step(self, indices, weights, grads, states):
"""Perform a fused optimization step using gradients and states.
Fused kernel is used for update.
Parameters
----------
indices : list of int
List of unique indices of the parameters into the individual learning rates
and weight decays. Learning rates and weight decay may be set via `set_lr_mult()`
and `set_wd_mult()`, respectively.
weights : list of NDArray
List of parameters to be updated.
grads : list of NDArray
List of gradients of the objective with respect to this parameter.
states : List of any obj
List of state returned by `create_state()`.
"""
for index, weight, grad, state in zip(indices, weights, grads, states):
self._update_count(index)
lr = self._get_lr(index)
wd = self._get_wd(index)
kwargs = {'rho': self.rho, 'epsilon': self.epsilon,
'rescale_grad': self.rescale_grad}
if self.centered:
kwargs['momentum'] = self.momentum
if self.clip_gradient:
kwargs['clip_gradient'] = self.clip_gradient
if self.clip_weights:
kwargs['clip_weights'] = self.clip_weights
# update weight with fused kernel
if not self.centered:
var = state
rmsprop_update(weight, grad, var, out=weight, lr=lr, wd=wd, **kwargs)
else:
mean, var, mom = state
rmspropalex_update(weight, grad, mean, var, mom, out=weight,
lr=lr, wd=wd, **kwargs)
| apache-2.0 | -7,770,516,226,256,501,000 | 40.646409 | 93 | 0.591536 | false | 4.287827 | false | false | false |
dgoldman916/nyu-python | flask_blog/app.py | 1 | 3964 | from flask import Flask, render_template, request, redirect, url_for
from flask_sqlalchemy import SQLAlchemy
from flask.ext.uploads import UploadSet, configure_uploads, IMAGES
from werkzeug.utils import secure_filename
from datetime import datetime
from random import choice
from functools import wraps
import os
app = Flask(__name__)
UPLOAD_FOLDER = 'static/'
ALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.db'
db = SQLAlchemy(app)
class Blogpost(db.Model):
userid = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(50))
author = db.Column(db.String(20))
date_posted = db.Column(db.DateTime)
content = db.Column(db.Text)
image = db.Column(db.Text)
class User:
def __init__(self, username, password):
self.session = {}
self.username = username
self.password = password
dean = User('dean', 'bloggy')
@app.route('/')
def index():
bgs = [ 'blog-bg1.jpg',
'blog-bg2.jpg',
'blog-bg3.jpg',
'blog-bg4.jpg',
'blog-bg5.jpg',]
bg = choice(bgs)
posts = Blogpost.query.order_by(Blogpost.date_posted.desc()).all()
return render_template('index.html', posts=posts, filename=bg)
def login_required(test):
@wraps(test)
def wrap(*args, **kwargs):
if 'logged_in' in dean.session:
return test(*args, **kwargs)
else:
return redirect(url_for('login'))
return wrap
@app.route("/signin", methods = ['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
if request.form['username'] == dean.username or request.form['password'] == dean.password:
dean.session['logged_in'] = True
return redirect(url_for('add'))
else:
error = "Wrong username or password"
return render_template("login.html", error=error)
@app.route('/add')
@login_required
def add():
bgs = ['add-bg1.jpg','add-bg2.jpg','add-bg3.jpg']
bg = choice(bgs)
return render_template('add.html',filename=bg)
@app.route('/post/<int:post_id>')
def post(post_id):
post = Blogpost.query.filter_by(userid=post_id).one()
print('{} \n{} \n{} \n {}\n'.format(post.author, post.title, post.content, post.image))
if post.image:
filename= post.image
else:
filename='home-bg.jpg'
return render_template('post.html', post=post, filename=filename)
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
@app.route('/addpost', methods=['POST'])
def addpost():
title = request.form['title']
author = request.form['author']
content = request.form['content']
if request.method == 'POST':
image = request.files['image']
img_filename = secure_filename(image.filename)
image.save(os.path.join(app.config['UPLOAD_FOLDER'], img_filename))
else:
img_filename=None
post = Blogpost(title=title, author=author, date_posted=datetime.now(), content=content, image=img_filename)
db.session.add(post)
db.session.commit()
return redirect(url_for('index'))
@app.route('/addimage', methods=['POST'])
def addimage():
if request.method == 'POST' and 'image' in request.files:
image = request.files['image']
if image and allowed_file(image.filename):
img_filename = secure_filename(image.filename)
image.save(os.path.join(app.config['UPLOAD_FOLDER'], img_filename))
else:
img_filename=None
return None
@app.route('/about')
def about():
bg = 'about-bg.jpg'
return render_template('about.html', filename=bg)
if __name__ == '__main__':
db.create_all()
app.run(debug=True)
| mit | -8,502,223,185,065,904,000 | 29.967742 | 114 | 0.610999 | false | 3.477193 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.