repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
dlowder-salesforce/advent2016 | advent06.py | 1 | 1068 | import sys, os
import re
pathname = os.path.dirname(sys.argv[0])
f = open(pathname + '/input06.txt','r')
input = f.read().strip().split('\n')
letters = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
def frequencyMap(s):
m = {}
for i in range(len(letters)):
m[letters[i]] = s.count(letters[i])
output = ""
for i in range(len(s),0,-1):
for j in range(len(letters)):
if m[letters[j]] == i:
output += letters[j]
return output
def mostFrequentChar(s):
return frequencyMap(s)[0]
def leastFrequentChar(s):
return frequencyMap(s)[-1]
messageLength = len(input[0])
messageChars = [""]*messageLength
for i in range(len(input)):
for j in range(messageLength):
messageChars[j] = messageChars[j] + input[i][j]
output = ""
for i in range(messageLength):
output += mostFrequentChar(messageChars[i])
print(output)
output = ""
for i in range(messageLength):
output += leastFrequentChar(messageChars[i])
print(output)
| gpl-3.0 | 7,397,833,635,566,431,000 | 20.36 | 115 | 0.586142 | false |
lichong012245/django-lfs-0.7.8 | lfs/manage/views/carts.py | 1 | 10423 | # python imports
from datetime import datetime
from datetime import timedelta
# django imports
from django.contrib.auth.decorators import permission_required
from django.core.paginator import Paginator
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template.loader import render_to_string
from django.template import RequestContext
from django.utils import simplejson
from django.utils.translation import ugettext_lazy as _
# lfs imports
import lfs.core.utils
from lfs.caching.utils import lfs_get_object_or_404
from lfs.cart.models import Cart
from lfs.core.utils import LazyEncoder
from lfs.customer.models import Customer
# Views
@permission_required("core.manage_shop", login_url="/login/")
def carts_view(request, template_name="manage/cart/carts.html"):
"""Displays the carts overview.
"""
return render_to_response(template_name, RequestContext(request, {
"carts_filters_inline": carts_filters_inline(request),
"carts_inline": carts_inline(request),
}))
@permission_required("core.manage_shop", login_url="/login/")
def cart_view(request, cart_id, template_name="manage/cart/cart.html"):
"""Displays the cart with the passed cart id.
"""
return render_to_response(template_name, RequestContext(request, {
"cart_filters_inline": cart_filters_inline(request, cart_id),
"selectable_carts_inline": selectable_carts_inline(request, cart_id),
"cart_inline": cart_inline(request, cart_id),
}))
# Parts
def cart_filters_inline(request, cart_id, template_name="manage/cart/cart_filters_inline.html"):
"""Renders the filters section of the cart view.
"""
cart = lfs_get_object_or_404(Cart, pk=cart_id)
cart_filters = request.session.get("cart-filters", {})
return render_to_string(template_name, RequestContext(request, {
"cart": cart,
"start": cart_filters.get("start", ""),
"end": cart_filters.get("end", ""),
}))
def carts_filters_inline(request, template_name="manage/cart/carts_filters_inline.html"):
"""Displays the filters part of the carts overview.
"""
cart_filters = request.session.get("cart-filters", {})
temp = _get_filtered_carts(cart_filters)
paginator = Paginator(temp, 30)
page = request.REQUEST.get("page", 1)
page = paginator.page(page)
return render_to_string(template_name, RequestContext(request, {
"page": page,
"paginator": paginator,
"start": cart_filters.get("start", ""),
"end": cart_filters.get("end", ""),
}))
@permission_required("core.manage_shop", login_url="/login/")
def carts_inline(request, template_name="manage/cart/carts_inline.html"):
"""Displays carts overview.
"""
cart_filters = request.session.get("cart-filters", {})
temp = _get_filtered_carts(cart_filters)
paginator = Paginator(temp, 30)
page = request.REQUEST.get("page", 1)
page = paginator.page(page)
carts = []
for cart in page.object_list:
products = []
total = 0
for item in cart.get_items():
total += item.get_price_gross(request)
products.append(item.product.get_name())
try:
if cart.user:
customer = Customer.objects.get(user=cart.user)
else:
customer = Customer.objects.get(session=cart.session)
except Customer.DoesNotExist:
customer = None
carts.append({
"id": cart.id,
"amount_of_items": cart.get_amount_of_items(),
"session": cart.session,
"user": cart.user,
"total": total,
"products": ", ".join(products),
"creation_date": cart.creation_date,
"modification_date": cart.modification_date,
"customer": customer,
})
return render_to_string(template_name, RequestContext(request, {
"carts": carts,
"page": page,
"paginator": paginator,
"start": cart_filters.get("start", ""),
"end": cart_filters.get("end", ""),
}))
@permission_required("core.manage_shop", login_url="/login/")
def cart_inline(request, cart_id, template_name="manage/cart/cart_inline.html"):
"""Displays cart with provided cart id.
"""
cart = lfs_get_object_or_404(Cart, pk=cart_id)
total = 0
for item in cart.get_items():
total += item.get_price_gross(request)
try:
if cart.user:
customer = Customer.objects.get(user=cart.user)
else:
customer = Customer.objects.get(session=cart.session)
except Customer.DoesNotExist:
customer = None
cart_filters = request.session.get("cart-filters", {})
return render_to_string(template_name, RequestContext(request, {
"cart": cart,
"customer": customer,
"total": total,
"start": cart_filters.get("start", ""),
"end": cart_filters.get("end", ""),
}))
@permission_required("core.manage_shop", login_url="/login/")
def selectable_carts_inline(request, cart_id, template_name="manage/cart/selectable_carts_inline.html"):
"""Displays selectable carts section within cart view.
"""
cart_filters = request.session.get("cart-filters", {})
carts = _get_filtered_carts(cart_filters)
paginator = Paginator(carts, 30)
try:
page = int(request.REQUEST.get("page", 1))
except TypeError:
page = 1
page = paginator.page(page)
return render_to_string(template_name, RequestContext(request, {
"paginator": paginator,
"page": page,
"cart_id": int(cart_id),
}))
# Actions
@permission_required("core.manage_shop", login_url="/login/")
def set_carts_page(request):
"""Sets the page of the displayed carts.
"""
result = simplejson.dumps({
"html": (
("#carts-inline", carts_inline(request)),
("#carts-filters-inline", carts_filters_inline(request)),
),
}, cls=LazyEncoder)
return HttpResponse(result)
@permission_required("core.manage_shop", login_url="/login/")
def set_cart_page(request):
"""Sets the page of the selectable carts within cart view.
"""
cart_id = request.GET.get("cart-id")
result = simplejson.dumps({
"html": (
("#cart-inline", cart_inline(request, cart_id)),
("#cart-filters-inline", cart_filters_inline(request, cart_id)),
("#selectable-carts-inline", selectable_carts_inline(request, cart_id)),
),
}, cls=LazyEncoder)
return HttpResponse(result)
@permission_required("core.manage_shop", login_url="/login/")
def set_cart_filters(request):
"""Sets cart filters given by passed request.
"""
cart_filters = request.session.get("cart-filters", {})
if request.POST.get("start", "") != "":
cart_filters["start"] = request.POST.get("start")
else:
if cart_filters.get("start"):
del cart_filters["start"]
if request.POST.get("end", "") != "":
cart_filters["end"] = request.POST.get("end")
else:
if cart_filters.get("end"):
del cart_filters["end"]
request.session["cart-filters"] = cart_filters
if request.REQUEST.get("came-from") == "cart":
cart_id = request.REQUEST.get("cart-id")
html = (
("#selectable-carts-inline", selectable_carts_inline(request, cart_id)),
("#cart-filters-inline", cart_filters_inline(request, cart_id)),
("#cart-inline", cart_inline(request, cart_id)),
)
else:
html = (
("#carts-filters-inline", carts_filters_inline(request)),
("#carts-inline", carts_inline(request)),
)
msg = _(u"Cart filters has been set.")
result = simplejson.dumps({
"html": html,
"message": msg,
}, cls=LazyEncoder)
return HttpResponse(result)
@permission_required("core.manage_shop", login_url="/login/")
def set_cart_filters_date(request):
"""Sets the date filter by given short cut link
"""
cart_filters = request.session.get("cart-filters", {})
start = datetime.now() - timedelta(int(request.REQUEST.get("start")))
end = datetime.now() - timedelta(int(request.REQUEST.get("end")))
cart_filters["start"] = start.strftime("%Y-%m-%d")
cart_filters["end"] = end.strftime("%Y-%m-%d")
request.session["cart-filters"] = cart_filters
if request.REQUEST.get("came-from") == "cart":
cart_id = request.REQUEST.get("cart-id")
html = (
("#selectable-carts-inline", selectable_carts_inline(request, cart_id)),
("#cart-filters-inline", cart_filters_inline(request, cart_id)),
("#cart-inline", cart_inline(request, cart_id)),
)
else:
html = (
("#carts-filters-inline", carts_filters_inline(request)),
("#carts-inline", carts_inline(request)),
)
msg = _(u"Cart filters has been set")
result = simplejson.dumps({
"html": html,
"message": msg,
}, cls=LazyEncoder)
return HttpResponse(result)
@permission_required("core.manage_shop", login_url="/login/")
def reset_cart_filters(request):
"""Resets all cart filters.
"""
if "cart-filters" in request.session:
del request.session["cart-filters"]
if request.REQUEST.get("came-from") == "cart":
cart_id = request.REQUEST.get("cart-id")
html = (
("#selectable-carts-inline", selectable_carts_inline(request, cart_id)),
("#cart-inline", cart_inline(request, cart_id)),
)
else:
html = (("#carts-inline", carts_inline(request)),)
msg = _(u"Cart filters has been reset")
result = simplejson.dumps({
"html": html,
"message": msg,
}, cls=LazyEncoder)
return HttpResponse(result)
# Private methods
def _get_filtered_carts(cart_filters):
"""
"""
carts = Cart.objects.all().order_by("-modification_date")
# start
start = cart_filters.get("start", "")
if start != "":
s = lfs.core.utils.get_start_day(start)
else:
s = datetime.min
# end
end = cart_filters.get("end", "")
if end != "":
e = lfs.core.utils.get_end_day(end)
else:
e = datetime.max
carts = carts.filter(modification_date__range=(s, e))
return carts
| bsd-3-clause | -5,679,406,835,815,099,000 | 30.020833 | 104 | 0.614506 | false |
boriel/zxbasic | src/arch/zx48k/peephole/pattern.py | 1 | 4181 | # -*- coding: utf-8 -*-
import re
import itertools
from typing import Dict
from typing import List
from typing import Optional
RE_SVAR = re.compile(r'(\$(?:\$|[0-9]+))')
RE_PARSE = re.compile(r'(\s+|"(?:[^"]|"")*")')
class BasicLinePattern:
""" Defines a pattern for a line, like 'push $1' being
$1 a pattern variable
"""
__slots__ = 'line', 'vars', 're_pattern', 're', 'output'
@staticmethod
def sanitize(pattern):
""" Returns a sanitized pattern version of a string to be later
compiled into a reg exp
"""
meta = r'.^$*+?{}[]\|()'
return ''.join(r'\%s' % x if x in meta else x for x in pattern)
def __init__(self, line):
self.line = ''.join(x.strip() or ' ' for x in RE_PARSE.split(line) if x).strip()
self.vars = []
self.re_pattern = ''
self.output = []
for token in RE_PARSE.split(self.line):
if token == ' ':
self.re_pattern += r'\s+'
self.output.append(' ')
continue
subtokens = [x for x in RE_SVAR.split(token) if x]
for tok in subtokens:
if tok == '$$':
self.re_pattern += r'\$'
self.output.append('$')
elif RE_SVAR.match(tok):
self.output.append(tok)
mvar = '_%s' % tok[1:]
if mvar not in self.vars:
self.vars.append(mvar)
self.re_pattern += '(?P<%s>.*)' % mvar
else:
self.re_pattern += r'\%i' % (self.vars.index(mvar) + 1)
else:
self.output.append(tok)
self.re_pattern += BasicLinePattern.sanitize(tok)
self.re = re.compile(self.re_pattern)
self.vars = set(x.replace('_', '$') for x in self.vars)
class LinePattern(BasicLinePattern):
""" Defines a pattern to match against a source assembler.
Given an assembler instruction with substitution variables
($1, $2, ...) creates an instance that matches against a list
of real assembler instructions. e.g.
push $1
matches against
push af
and bounds $1 to 'af'
Note that $$ matches against the $ sign
Returns whether the pattern matched (True) or not.
If it matched, the vars_ dictionary will be updated with unified vars.
"""
__slots__ = 'line', 'vars', 're_pattern', 're', 'output'
def match(self, line: str, vars_: Dict[str, str]) -> bool:
match = self.re.match(line)
if match is None:
return False
mdict = match.groupdict()
if any(mdict.get(k, v) != v for k, v in vars_.items()):
return False
vars_.update(mdict)
return True
def __repr__(self):
return repr(self.re)
class BlockPattern:
""" Given a list asm instructions, tries to match them
"""
__slots__ = 'lines', 'patterns', 'vars'
def __init__(self, lines: List[str]):
lines = [x.strip() for x in lines]
self.patterns = [LinePattern(x) for x in lines if x]
self.lines = [pattern.line for pattern in self.patterns]
self.vars = set(itertools.chain(*[p.vars for p in self.patterns]))
def __len__(self):
return len(self.lines)
def match(self, instructions: List[str], start: int = 0) -> Optional[Dict[str, str]]:
""" Given a list of instructions and a starting point,
returns whether this pattern matches or not from such point
onwards.
E.g. given the pattern:
push $1
pop $1
and the list
ld a, 5
push af
pop af
this pattern will match at position 1
"""
lines = instructions[start:]
if len(self) > len(lines):
return None
univars: Dict[str, str] = {}
if not all(patt.match(line, vars_=univars) for patt, line in zip(self.patterns, lines)):
return None
return {'$' + k[1:]: v for k, v in univars.items()}
def __repr__(self):
return str([repr(x) for x in self.patterns])
| gpl-3.0 | -789,134,962,122,211,800 | 30.674242 | 96 | 0.532169 | false |
dhongu/l10n-romania | l10n_ro_account_bank_statement_import_mt940_ing/tests/test_import_bank_statement.py | 1 | 1411 | # Copyright (C) 2016 Forest and Biomass Romania
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
import base64
from odoo.tests.common import TransactionCase
from odoo.modules.module import get_module_resource
class TestINGImport(TransactionCase):
"""Run test to import MT940 BRD import."""
def setUp(self):
super(TestINGImport, self).setUp()
self.statement_import_model = self.env['account.bank.statement.import']
self.bank_statement_model = self.env['account.bank.statement']
def test_statement_import(self):
"""Test correct creation of single statement."""
brd_file_path = get_module_resource(
'l10n_ro_account_bank_statement_import_mt940_ing',
'test_files', 'test_ing_940.txt')
brd_file = open(brd_file_path, 'rb').read()
brd_data_file = base64.b64encode(brd_file)
bank_statement = self.statement_import_model.create(
dict(data_file=brd_data_file))
bank_statement.import_file()
bank_st_record = self.bank_statement_model.search( [('name', '=', '00138/1')])[0]
self.assertEquals(bank_st_record.balance_start, 3885.24)
self.assertEquals(bank_st_record.balance_end_real, 3671.88)
line = bank_st_record.line_ids[-1]
self.assertEquals(line.name, 'PLATA FACT 4603309')
self.assertEquals(line.amount, -210.60)
| agpl-3.0 | 5,554,827,728,480,217,000 | 41.757576 | 100 | 0.659816 | false |
yuyakanemoto/neural-style-loss | neural_style_loss_multi.py | 1 | 3697 | import os
import scipy.misc
import pandas as pd
from argparse import ArgumentParser
import time
from neural_style_loss import styleloss, imread
# default arguments
OUTPUT = 'output.csv'
LAYER_WEIGHT_EXP = 1
VGG_PATH = 'imagenet-vgg-verydeep-19.mat'
POOLING = 'max'
NORMALIZE = 1
VERBOSE = 1
TIMEIT = 1
def build_parser():
parser = ArgumentParser()
parser.add_argument('--path',
dest='path', help='path to image folder',
metavar='PATH', required=True)
parser.add_argument('--output',
dest='output', help='output path (default %(default)s)',
metavar='OUTPUT', default=OUTPUT)
parser.add_argument('--network',
dest='network', help='path to network parameters (default %(default)s)',
metavar='VGG_PATH', default=VGG_PATH)
parser.add_argument('--style-layer-weight-exp', type=float,
dest='layer_weight_exp', help='style layer weight exponentional increase - weight(layer<n+1>) = weight_exp*weight(layer<n>) (default %(default)s)',
metavar='LAYER_WEIGHT_EXP', default=LAYER_WEIGHT_EXP)
parser.add_argument('--pooling',
dest='pooling', help='pooling layer configuration: max or avg (default %(default)s)',
metavar='POOLING', default=POOLING)
parser.add_argument('--normalize',
dest='normalize', help='normalize output values (default %(default)s)',
metavar='NORMALIZE', default=NORMALIZE)
parser.add_argument('--verbose',
dest='verbose', help='print raw style loss value in each iteration (default %(default)s)',
metavar='VERBOSE', default=VERBOSE)
parser.add_argument('--timeit',
dest='timeit', help='calculate and print the calculation time (default %(default)s)',
metavar='TIMEIT', default=TIMEIT)
return parser
def main():
start_time = time.time()
parser = build_parser()
options = parser.parse_args()
if not os.path.isfile(options.network):
parser.error("Network %s does not exist. (Did you forget to download it?)" % options.network)
# take only JPEG or PNG files
path = options.path
images = [f for f in os.listdir(path) if (f.endswith(".jpg") | f.endswith(".png"))]
df = pd.DataFrame(0, index=images, columns=images)
for i, impath1 in enumerate(images):
image1 = imread(os.path.join(path,impath1))
for j, impath2 in enumerate(images):
if i<j:
image2 = imread(os.path.join(path,impath2))
if image1.shape[1] < image2.shape[1]:
image2 = scipy.misc.imresize(image2, image1.shape[1] / image2.shape[1])
else:
image1 = scipy.misc.imresize(image1, image2.shape[1] / image1.shape[1])
style_loss = styleloss(
network=options.network,
image1=image1,
image2=image2,
layer_weight_exp=options.layer_weight_exp,
pooling=options.pooling
)
df.iloc[i,j] = style_loss
if options.verbose == 1:
print('style_loss between '+str(impath1)+' and '+str(impath2)+': '+str(style_loss))
elif i>j:
df.iloc[i,j] = df.iloc[j,i]
else:
df.iloc[i,j] = 0
# normalize data array
if options.normalize == 1:
maxval = df.values.max()
df = df/maxval
output = options.output
df.to_csv(output)
if options.timeit == 1:
print("calculation time: %s seconds" % (time.time() - start_time))
if __name__ == '__main__':
main()
| mit | -4,748,993,838,807,096,000 | 36.343434 | 159 | 0.58561 | false |
iagcl/data_pipeline | data_pipeline/db/db_query_results.py | 1 | 2506 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
###############################################################################
# Module: db_query_results
# Purpose: Represents the query result object returned from a DB API query
# execution.
#
# Notes:
###############################################################################
from .query_results import QueryResults
COL_NAME_INDEX = 0
class DbQueryResults(QueryResults):
def __init__(self, cursor):
super(DbQueryResults, self).__init__()
self._cursor = cursor
self._col_map = {}
self._col_names = []
self._build_col_map()
def __iter__(self):
return self
def __str__(self):
return str(self._col_map)
def next(self):
record = self._cursor.fetchone()
if not record:
raise StopIteration
else:
return record
def fetchone(self):
record = None
try:
record = self.next()
except StopIteration, e:
pass
return record
def fetchall(self):
return self._cursor.fetchall()
def fetchmany(self, arraysize=None):
if arraysize is None:
return self._cursor.fetchmany()
return self._cursor.fetchmany(arraysize)
def get_col_index(self, col):
return self._col_map[col]
def get_col_names(self):
return self._col_names
def _build_col_map(self):
i = 0
for description in self._cursor.description:
self._col_map[description[COL_NAME_INDEX]] = i
self._col_names.append(description[COL_NAME_INDEX])
i += 1
def get(self, record, column_name):
return record[self.get_col_index(column_name)]
| apache-2.0 | 3,803,351,040,096,441,300 | 29.560976 | 79 | 0.598962 | false |
Sberned/djaio | djaio/core/urlconf.py | 1 | 1777 | #!-*- coding: utf-8 -*-
import inspect
from collections import namedtuple
from aiohttp.web import UrlDispatcher
from aiohttp import hdrs
from aiohttp.test_utils import make_mocked_request
VERBS = [
hdrs.METH_GET.lower(),
hdrs.METH_POST.lower(),
hdrs.METH_PUT.lower(),
hdrs.METH_DELETE.lower()
]
class DjaioUrlDispatcher(UrlDispatcher):
def add_route(self, methods, path, handler, *, name=None, expect_handler=None):
"""
Replace a base add_route to own for ClassBasedViews.
"""
resource = self.add_resource(path, name=name)
if isinstance(methods, str):
methods = [methods]
for m in methods:
resource.add_route(m, handler, expect_handler=expect_handler)
urls = []
_url_type = namedtuple('url_item', ['method', 'path', 'handler', 'name'])
def url(method, path, handler, name=None):
urls.append(_url_type(method=method, path=path, handler=handler, name=name))
class handle_url: # noqa
def __init__(self, path, name=None):
self.path = path
self.name = name
def get_name(self, cls):
if not self.name:
return cls.__name__
return self.name
def __call__(self, cls):
if inspect.isclass(cls):
http_meths = []
_view = cls(make_mocked_request(hdrs.METH_GET, '/'))
for verb in VERBS:
if getattr(_view, '{}_method'.format(verb)):
http_meths.append(verb)
url(method=http_meths, path=self.path, handler=cls, name=self.get_name(cls))
return cls
def setup(app):
app.urls = {}
for url in urls:
app.urls[url.handler.__name__] = url.name
app.router.add_route(url.method, url.path, url.handler, name=url.name)
| apache-2.0 | 8,866,033,871,370,243,000 | 28.131148 | 88 | 0.604389 | false |
stplaydog/OPTKIT | scripts/data_analysis.py | 1 | 12608 | #
# Copy right YMSys, 2015,2016 Zhaoming Yin
#
# @brief This script performs data analytics
# 1) it can list:
# name, numV, numE, numCC, avgDiam, varDiam, avgCluCoeff, varCluCoeff
# 2) it can draw distribution of
# clique, truss
#
#
# MODIFIED (MM/DD/YY)
# stplaydog 10/21/16 - Clustering coefficient analysis
# stplaydog 08/27/16 - Add data plot functions
# stplaydog 08/20/16 - Implementation
# stplaydog 08/07/16 - Creation
#
import sys
import json
import numpy
import datetime
import time
import argparse
from enum import Enum
import glob, os
import re
from os.path import basename
import math
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
from scipy.interpolate import spline
from pandas import DataFrame
import pandas as pd
import rpy2.robjects as robj
import rpy2.robjects.pandas2ri # for dataframe conversion
from rpy2.robjects.packages import importr
from rpy2.robjects.lib import ggplot2
from subprocess import call
import os
class JsonStats:
def __init__(self, file):
with open(file) as data_file:
data = json.load(data_file)
#name_items = basename(file).replace(".json", "").split("_")
#self.name = "long_" if name_items[2] == "200" else "short_"
self.name = ""
self.numV = data["content"]["graph property"]["numV"]
self.numE = data["content"]["graph property"]["numE"]
self.numCC = data["content"]["graph property"]["numCC"]
numDiam = data["content"]["graph property"]["diameter"].split(",")
LDiam = [float(n) for n in numDiam if n]
self.avgDiam = str(numpy.average(LDiam))
self.varDiam = str(numpy.var(LDiam))
numClu = data["content"]["graph property"]["clusterCoeff"].split(",")
LClu = [float(n) for n in numClu if n]
self.avgCluCoeff = str(numpy.average(LClu))
self.varCluCoeff = str(numpy.var(LClu))
self.clique = self.reduce(data["content"]["graph property"]["clique"], True)
self.truss = self.reduce(data["content"]["graph property"]["truss"], True)
self.core = self.reduce(data["content"]["graph property"]["dbscan"], True)
self.dbscan = self.reduce(data["content"]["graph property"]["core"], True)
self.cliqueSize = self.reduce(data["content"]["graph property"]["clique"], False)
self.trussSize = self.reduce(data["content"]["graph property"]["truss"], False)
self.coreSize = self.reduce(data["content"]["graph property"]["dbscan"], False)
self.dbscanSize = self.reduce(data["content"]["graph property"]["core"], False)
self.cliqueSize = self.getSizeMean(self.clique, self.cliqueSize)
self.trussSize = self.getSizeMean(self.truss, self.trussSize)
self.coreSize = self.getSizeMean(self.core, self.coreSize)
self.dbscanSize = self.getSizeMean(self.dbscan, self.dbscanSize)
self.trussCoe = self.reduce(data["content"]["graph property"]["truss_coe"], False)
self.coreCoe = self.reduce(data["content"]["graph property"]["dbscan_coe"], False)
self.dbscanCoe = self.reduce(data["content"]["graph property"]["core_coe"], False)
self.trussCoe = self.getSizeMean(self.truss, self.trussCoe)
self.coreCoe = self.getSizeMean(self.core, self.coreCoe)
self.dbscanCoe = self.getSizeMean(self.dbscan, self.dbscanCoe)
def reduce(self, stats_str, if_freq):
stats_item = {}
items = stats_str.split("\n")
for item in items:
if item == "":
continue
pair = item.split(",")
if int(pair[0]) in stats_item:
if if_freq:
stats_item[int(pair[0])] += 1
else:
stats_item[int(pair[0])] += float(pair[1])
else:
if if_freq:
stats_item[int(pair[0])] = 1
else:
stats_item[int(pair[0])] = float(pair[1])
X = [0] * len(stats_item)
Y = [0] * len(stats_item)
i=0
for key in stats_item:
X[i] = int(key)
Y[i] = stats_item[key]
i+=1
return {'x':X,'y':Y}
def getSizeMean(self, freq, size):
for i in range(0, len(freq['y'])):
size['y'][i] = float(size['y'][i]) / float(freq['y'][i])
return size
def smooth_plot(self, item, plt, c, ls, mar, la):
if len(item['x']) == 0:
return
arr = numpy.array(item['x'])
xnew = numpy.linspace(arr.min(),arr.max(),300)
smooth = spline(item['x'], item['y'], xnew)
plt.plot(xnew, smooth, color=c, linestyle=ls, marker=mar, label = la)
def plot(self, ofname):
plt.plot(self.clique['x'], self.clique['y'], color='k', linestyle='-', marker=',', label = 'k-clique')
plt.plot(self.truss['x'], self.truss['y'], color='k', linestyle='-', marker='.', label = 'k-truss')
plt.plot(self.dbscan['x'], self.clique['y'], color='k', linestyle='-', marker='v', label = 'dbscan')
plt.plot(self.core['x'], self.core['y'], color='k', linestyle='-', marker='o', label = 'k-core')
plt.legend( loc='lower right', numpoints = 1, prop={'size':15} )
plt.tick_params(labelsize=15)
plt.xlabel("K", fontsize=20)
plt.ylabel("number of cohesive subgraphs", fontsize=20)
plt.tight_layout()
plt.savefig(ofname)
plt.close()
def summary(self):
list = [self.name, str(self.numV), str(self.numE), \
str(self.numCC), str(round(self.avgDiam,2)), str(round(self.varDiam,2)), \
str(round(self.avgCluCoeff,2)), str(round(self.varCluCoeff,2)) ]
return ",".join(list)
class JsonStatsCollections:
def __init__(self, dir, prefix):
os.chdir(dir)
self.coll = {}
for file in glob.glob("*.json"):
try:
if file.find(prefix) != -1:
stats = JsonStats(file)
self.coll[file] = stats
except Exception, e:
print e
print "Data Corruption in " + file
def plot(self, ofname, is_freq):
colors = ['k', 'b', 'r', 'g']
i = 0
for c in self.coll:
if is_freq == False:
self.coll[c].smooth_plot(self.coll[c].cliqueSize, plt, colors[i], '--', ',', self.coll[c].name+'-clique')
self.coll[c].smooth_plot(self.coll[c].trussSize, plt, colors[i], '--', '.', self.coll[c].name+'-truss')
self.coll[c].smooth_plot(self.coll[c].coreSize, plt, colors[i], '-', 'v', self.coll[c].name+'-core')
self.coll[c].smooth_plot(self.coll[c].dbscanSize, plt, colors[i], '-', 'o', self.coll[c].name+'-dbscan')
elif is_freq == True:
plt.plot(self.coll[c].clique['x'], self.coll[c].clique['y'], color=colors[i], linestyle='--', marker=',', label = self.coll[c].name+'-clique')
plt.plot(self.coll[c].truss['x'], self.coll[c].truss['y'], color=colors[i], linestyle='--', marker='.', label = self.coll[c].name+'-truss')
plt.plot(self.coll[c].core['x'], self.coll[c].core['y'], color=colors[i], linestyle='-', marker='v', label = self.coll[c].name+'-core')
plt.plot(self.coll[c].dbscan['x'], self.coll[c].dbscan['y'], color=colors[i], linestyle='-', marker='o', label = self.coll[c].name+'-dbscan')
i += 1
plt.legend( loc=0, numpoints = 1, prop={'size':15} )
plt.tick_params(labelsize=15)
plt.xlabel("K", fontsize=20)
plt.ylabel("number of cohesive subgraphs", fontsize=20)
plt.tight_layout()
plt.savefig(ofname)
plt.close()
def gplot(self, ofname, is_freq):
i = 0
d = []
for c in self.coll:
if is_freq == 1:
d = self.transformDataGgPlot(c, d)
elif is_freq == 2:
d = self.transformDataGgPlotSize(c, d)
elif is_freq == 3:
d = self.transformDataGgPlotCoe(c, d)
f = DataFrame(d)
print ofname
f.to_csv(ofname.replace("png", "csv"), sep=',')
call(["Rscript", "../../../scripts/data_analysis.R", ofname.replace("png", "csv"), ofname ])
def transformDataGgPlotSize(self, c, ret):
item = self.coll[c].trussSize
for i in range(0, len(item['x'])):
trip = {'data': self.coll[c].name+'truss', 'x': item['x'][i], 'y' : item['y'][i]}
ret.append(trip)
item = self.coll[c].cliqueSize
for i in range(0, len(item['x'])):
trip = {'data': self.coll[c].name+'clique', 'x': item['x'][i], 'y' : item['y'][i]}
ret.append(trip)
item = self.coll[c].coreSize
for i in range(0, len(item['x'])):
trip = {'data': self.coll[c].name+'core', 'x': item['x'][i], 'y' : item['y'][i]}
ret.append(trip)
item = self.coll[c].dbscanSize
for i in range(0, len(item['x'])):
trip = {'data': self.coll[c].name+'dbscan', 'x': item['x'][i], 'y' : item['y'][i]}
ret.append(trip)
def transformDataGgPlotCoe(self, c, ret):
item = self.coll[c].trussCoe
for i in range(0, len(item['x'])):
trip = {'data': self.coll[c].name+'truss_coe', 'x': item['x'][i], 'y' : item['y'][i]}
ret.append(trip)
item = self.coll[c].coreCoe
for i in range(0, len(item['x'])):
trip = {'data': self.coll[c].name+'core_coe', 'x': item['x'][i], 'y' : item['y'][i]}
ret.append(trip)
item = self.coll[c].dbscanCoe
for i in range(0, len(item['x'])):
trip = {'data': self.coll[c].name+'dbscan_coe', 'x': item['x'][i], 'y' : item['y'][i]}
ret.append(trip)
return ret
def transformDataGgPlot(self, c, ret):
item = self.coll[c].truss
for i in range(0, len(item['x'])):
trip = {'data': self.coll[c].name+'truss', 'x': item['x'][i], 'y' : item['y'][i]}
ret.append(trip)
item = self.coll[c].clique
for i in range(0, len(item['x'])):
trip = {'data': self.coll[c].name+'clique', 'x': item['x'][i], 'y' : item['y'][i]}
ret.append(trip)
item = self.coll[c].core
for i in range(0, len(item['x'])):
trip = {'data': self.coll[c].name+'core', 'x': item['x'][i], 'y' : item['y'][i]}
ret.append(trip)
item = self.coll[c].dbscan
for i in range(0, len(item['x'])):
trip = {'data': self.coll[c].name+'dbscan', 'x': item['x'][i], 'y' : item['y'][i]}
ret.append(trip)
return ret
def main(argv):
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group()
group.add_argument("-f", "--file", action="store_true")
group.add_argument("-d", "--directory", action="store_true")
group.add_argument("-p", "--prefix", action="store_true")
parser.add_argument("fname", help="file/directory name")
args = parser.parse_args()
if args.file:
stats = JsonStats(args.fname)
print stats.summary()
ofname = args.fname.replace('json', '') + 'png'
stats.plot(ofname)
elif args.directory:
os.chdir(args.fname)
for file in glob.glob("*.json"):
try:
stats = JsonStats(file)
print stats.summary()
ofname = file.replace("json", "") + "png"
stats.plot(ofname)
except:
print "Data Corruption in " + file
elif args.prefix:
config = open(args.fname)
lines = config.readlines()
for line in lines:
if line.find("directory") != -1:
dir = line.strip().split(" ")[1]
if line.find("prefix") != -1:
pfx = line.strip().split(" ")[1]
coll = JsonStatsCollections(dir, pfx)
oname1 = dir + pfx + '.png'
oname2 = dir + pfx + '_size.png'
oname3 = dir + pfx + '_coe.png'
#coll.plot(oname2, False)
#coll.plot(oname1, True)
coll.gplot(oname1, 1)
coll.gplot(oname2, 2)
coll.gplot(oname3, 3)
if __name__ == "__main__":
main(sys.argv)
| gpl-3.0 | -1,100,012,202,485,038,300 | 38.898734 | 158 | 0.531726 | false |
luizfelippesr/galmag | galmag/halo_profiles.py | 1 | 5922 | # Copyright (C) 2017,2018,2019,2020 Luiz Felippe S. Rodrigues <[email protected]>
#
# This file is part of GalMag.
#
# GalMag is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GalMag is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GalMag. If not, see <http://www.gnu.org/licenses/>.
#
"""
GalMag
Contains the definitions of the halo rotation curve and alpha profile.
"""
import numpy as np
def simple_V(rho, theta, phi, r_h=1.0, Vh=220, fraction=3./15., normalize=True,
fraction_z=None, legacy=False):
r"""
Simple form of the rotation curve to be used for the halo
.. math::
V(r,\theta,\phi) \propto [1-\exp(-r \sin(\theta) / s_v) ]
Note
----
This simple form has no z dependence
Parameters
----------
rho : array
Spherical radial coordinate, :math:`r`
theta : array
Polar coordinate, :math:`\theta`
phi : array
Azimuthal coordinate, :math:`\phi`
fraction :
fraction of the halo radius corresponding to the turnover
of the rotation curve.
r_h :
halo radius in the same units as rho. Default: 1.0
Vh :
Value of the rotation curve at rho=r_h. Default: 220 km/s
normalize : bool, optional
if True, the rotation curve will be normalized to one at rho=r_h
Returns
-------
list
List containing three `numpy` arrays corresponding to:
:math:`V_r`, :math:`V_\theta`, :math:`V_\phi`
"""
Vr, Vt = [np.zeros_like(rho) for i in range(2)]
Vp = (1.0-np.exp(-np.abs(rho*np.sin(theta))/(fraction*r_h)))
if not legacy:
Vp /= (1.0-np.exp(-1./fraction))
if not normalize:
Vp *= Vh
return Vr, Vt, Vp
def simple_V_legacy(rho, theta, phi, r_h=1.0, Vh=220, fraction=0.5,
fraction_z=None, normalize=True):
"""
Rotation curve employed in version 0.1 and in the MMath Final Report of
James Hollins. Same as simple_V but with a slight change in the way it
is normalized.
"""
return simple_V(rho, theta, phi, r_h, Vh, fraction, normalize, legacy=True)
def simple_V_exp(rho, theta, phi, r_h=1.0, Vh=220, fraction=3./15.,
fraction_z=11./15., normalize=True,
legacy=False):
r"""
Variation on simple_V which decays exponentially with z
.. math::
V(r,\theta,\phi) \propto (1-\exp(-r \sin(\theta) / s_v)) \exp(-r \cos(\theta)/z_v)
Parameters
----------
rho : array
Spherical radial coordinate, :math:`r`
theta : array
Polar coordinate, :math:`\theta`
phi : array
Azimuthal coordinate, :math:`\phi`
fraction :
fraction of the halo radius corresponding to the turnover of the
rotation curve.
fraction_z :
fraction of the halo radius corresponding to the characteristic
vertical decay length of the rotation
r_h :
halo radius in the same units as rho. Default: 1.0
Vh :
Value of the rotation curve at rho=r_h. Default: 220 km/s
normalize : bool, optional
if True, the rotation curve will be normalized to one at rho=r_h
Returns
-------
list
List containing three `numpy` arrays corresponding to:
:math:`V_r`, :math:`V_\theta`, :math:`V_\phi`
"""
Vr, Vt, Vp = simple_V(rho, theta, phi, r_h, Vh, fraction, normalize)
z = np.abs(rho/r_h * np.cos(theta))
decay_factor = np.exp(-z/fraction_z)
return Vr, Vt, Vp*decay_factor
def simple_V_linear(rho, theta, phi, r_h=1.0, Vh=220, fraction=3./15.,
fraction_z=11./15, normalize=True,
legacy=False):
r"""
Variation on simple_V which decays linearly with z, reaching 0 at
z=(halo radius), and V_h at z=0
.. math::
V(r,\theta,\phi) \propto [1-\exp(-r \sin(\theta) / s_v)] (1-z/z_v)
Parameters
----------
rho : array
Spherical radial coordinate, :math:`r`
theta : array
Polar coordinate, :math:`\theta`
phi : array
Azimuthal coordinate, :math:`\phi`
fraction :
fraction of the halo radius corresponding to the turnover of the
rotation curve. (s_v = fraction*r_h)
fraction_z :
fraction of the halo radius controling the "lag" of the rotation curve.
(z_v = fraction_z*r_h)
r_h :
halo radius in the same units as rho. Default: 1.0
Vh :
Value of the rotation curve at rho=r_h. Default: 220 km/s
normalize : bool, optional
if True, the rotation curve will be normalized to one at rho=r_h
Returns:
List containing three `numpy` arrays corresponding to:
:math:`V_r`, :math:`V_\theta`, :math:`V_\phi`
"""
Vr, Vt, Vp = simple_V(rho, theta, phi, r_h, Vh, fraction, normalize)
z = np.abs(rho/r_h * np.cos(theta)) # Dimensionless z
decay_factor = (1-z/fraction_z)
Vp[decay_factor<0.] = 0.
return Vr, Vt, Vp*decay_factor
def simple_alpha(rho, theta, phi, alpha0=1.0):
r"""
Simple profile for alpha
.. math::
\alpha(\mathbf{r}) = \alpha_0\cos(\theta)
Parameters
----------
rho : array
Spherical radial coordinate, :math:`r`
theta : array
Polar coordinate, :math:`\theta`
phi : array
Azimuthal coordinate, :math:`\phi`
alpha0 : float, optional
Normalization. Default: 1.0
"""
alpha = np.cos(theta)
alpha[rho>1.] = 0.
return alpha*alpha0
| gpl-3.0 | 2,771,070,054,194,636,300 | 29.525773 | 91 | 0.60689 | false |
emschorsch/easy-thumbnails | easy_thumbnails/processors.py | 1 | 8519 | import re
import six
try:
from PIL import Image, ImageChops, ImageFilter
except ImportError:
import Image
import ImageChops
import ImageFilter
from easy_thumbnails import utils
def _compare_entropy(start_slice, end_slice, slice, difference):
"""
Calculate the entropy of two slices (from the start and end of an axis),
returning a tuple containing the amount that should be added to the start
and removed from the end of the axis.
"""
start_entropy = utils.image_entropy(start_slice)
end_entropy = utils.image_entropy(end_slice)
if end_entropy and abs(start_entropy / end_entropy - 1) < 0.01:
# Less than 1% difference, remove from both sides.
if difference >= slice * 2:
return slice, slice
half_slice = slice // 2
return half_slice, slice - half_slice
if start_entropy > end_entropy:
return 0, slice
else:
return slice, 0
def colorspace(im, bw=False, replace_alpha=False, **kwargs):
"""
Convert images to the correct color space.
A passive option (i.e. always processed) of this method is that all images
(unless grayscale) are converted to RGB colorspace.
This processor should be listed before :func:`scale_and_crop` so palette is
changed before the image is resized.
bw
Make the thumbnail grayscale (not really just black & white).
replace_alpha
Replace any transparency layer with a solid color. For example,
``replace_alpha='#fff'`` would replace the transparency layer with
white.
"""
is_transparent = utils.is_transparent(im)
if bw:
if im.mode in ('L', 'LA'):
return im
if is_transparent:
return im.convert('LA')
else:
return im.convert('L')
if im.mode in ('L', 'RGB'):
return im
if is_transparent:
if im.mode != 'RGBA':
im = im.convert('RGBA')
if not replace_alpha:
return im
base = Image.new('RGBA', im.size, replace_alpha)
base.paste(im)
im = base
return im.convert('RGB')
def autocrop(im, autocrop=False, **kwargs):
"""
Remove any unnecessary whitespace from the edges of the source image.
This processor should be listed before :func:`scale_and_crop` so the
whitespace is removed from the source image before it is resized.
autocrop
Activates the autocrop method for this image.
"""
if autocrop:
bw = im.convert('1')
bw = bw.filter(ImageFilter.MedianFilter)
# White background.
bg = Image.new('1', im.size, 255)
diff = ImageChops.difference(bw, bg)
bbox = diff.getbbox()
if bbox:
im = im.crop(bbox)
return im
def scale_and_crop(im, size, crop=False, upscale=False, **kwargs):
"""
Handle scaling and cropping the source image.
Images can be scaled / cropped against a single dimension by using zero
as the placeholder in the size. For example, ``size=(100, 0)`` will cause
the image to be resized to 100 pixels wide, keeping the aspect ratio of
the source image.
crop
Crop the source image height or width to exactly match the requested
thumbnail size (the default is to proportionally resize the source
image to fit within the requested thumbnail size).
By default, the image is centered before being cropped. To crop from
the edges, pass a comma separated string containing the ``x`` and ``y``
percentage offsets (negative values go from the right/bottom). Some
examples follow:
* ``crop="0,0"`` will crop from the left and top edges.
* ``crop="-10,-0"`` will crop from the right edge (with a 10% offset)
and the bottom edge.
* ``crop=",0"`` will keep the default behavior for the x axis
(horizontally centering the image) and crop from the top edge.
The image can also be "smart cropped" by using ``crop="smart"``. The
image is incrementally cropped down to the requested size by removing
slices from edges with the least entropy.
Finally, you can use ``crop="scale"`` to simply scale the image so that
at least one dimension fits within the size dimensions given (you may
want to use the upscale option too).
upscale
Allow upscaling of the source image during scaling.
"""
source_x, source_y = [float(v) for v in im.size]
target_x, target_y = [float(v) for v in size]
if crop or not target_x or not target_y:
scale = max(target_x / source_x, target_y / source_y)
else:
scale = min(target_x / source_x, target_y / source_y)
# Handle one-dimensional targets.
if not target_x:
target_x = source_x * scale
elif not target_y:
target_y = source_y * scale
if scale < 1.0 or (scale > 1.0 and upscale):
# Resize the image to the target size boundary. Round the scaled
# boundary sizes to avoid floating point errors.
im = im.resize((int(round(source_x * scale)),
int(round(source_y * scale))),
resample=Image.ANTIALIAS)
if crop:
# Use integer values now.
source_x, source_y = im.size
# Difference between new image size and requested size.
diff_x = int(source_x - min(source_x, target_x))
diff_y = int(source_y - min(source_y, target_y))
if diff_x or diff_y:
# Center cropping (default).
halfdiff_x, halfdiff_y = diff_x // 2, diff_y // 2
box = [halfdiff_x, halfdiff_y,
min(source_x, int(target_x) + halfdiff_x),
min(source_y, int(target_y) + halfdiff_y)]
# See if an edge cropping argument was provided.
edge_crop = (isinstance(crop, six.string_types) and
re.match(r'(?:(-?)(\d+))?,(?:(-?)(\d+))?$', crop))
if edge_crop and filter(None, edge_crop.groups()):
x_right, x_crop, y_bottom, y_crop = edge_crop.groups()
if x_crop:
offset = min(int(target_x) * int(x_crop) // 100, diff_x)
if x_right:
box[0] = diff_x - offset
box[2] = source_x - offset
else:
box[0] = offset
box[2] = source_x - (diff_x - offset)
if y_crop:
offset = min(int(target_y) * int(y_crop) // 100, diff_y)
if y_bottom:
box[1] = diff_y - offset
box[3] = source_y - offset
else:
box[1] = offset
box[3] = source_y - (diff_y - offset)
# See if the image should be "smart cropped".
elif crop == 'smart':
left = top = 0
right, bottom = source_x, source_y
while diff_x:
slice = min(diff_x, max(diff_x // 5, 10))
start = im.crop((left, 0, left + slice, source_y))
end = im.crop((right - slice, 0, right, source_y))
add, remove = _compare_entropy(start, end, slice, diff_x)
left += add
right -= remove
diff_x = diff_x - add - remove
while diff_y:
slice = min(diff_y, max(diff_y // 5, 10))
start = im.crop((0, top, source_x, top + slice))
end = im.crop((0, bottom - slice, source_x, bottom))
add, remove = _compare_entropy(start, end, slice, diff_y)
top += add
bottom -= remove
diff_y = diff_y - add - remove
box = (left, top, right, bottom)
# Finally, crop the image!
if crop != 'scale':
im = im.crop(box)
return im
def filters(im, detail=False, sharpen=False, **kwargs):
"""
Pass the source image through post-processing filters.
sharpen
Sharpen the thumbnail image (using the PIL sharpen filter)
detail
Add detail to the image, like a mild *sharpen* (using the PIL
``detail`` filter).
"""
if detail:
im = im.filter(ImageFilter.DETAIL)
if sharpen:
im = im.filter(ImageFilter.SHARPEN)
return im
| bsd-3-clause | -4,515,509,988,609,493,500 | 35.097458 | 79 | 0.562742 | false |
RamonGuiuGou/l10n-spain | l10n_es_aeat/models/l10n_es_aeat_map_tax.py | 1 | 2460 | # -*- coding: utf-8 -*-
# Copyright 2016 Antonio Espinosa <[email protected]>
# Copyright 2014-2017 Tecnativa - Pedro M. Baeza <[email protected]>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl
from openerp import models, fields, api, exceptions, _
class L10nEsAeatMapTax(models.Model):
_name = 'l10n.es.aeat.map.tax'
date_from = fields.Date(string="From Date")
date_to = fields.Date(string="To Date")
map_line_ids = fields.One2many(
comodel_name='l10n.es.aeat.map.tax.line',
inverse_name='map_parent_id', string="Map lines", required=True)
model = fields.Integer(string="AEAT Model", required=True)
@api.multi
@api.constrains('date_from', 'date_to')
def _unique_date_range(self):
for map in self:
domain = [('id', '!=', map.id)]
if map.date_from and map.date_to:
domain += ['|', '&',
('date_from', '<=', map.date_to),
('date_from', '>=', map.date_from),
'|', '&',
('date_to', '<=', map.date_to),
('date_to', '>=', map.date_from),
'|', '&',
('date_from', '=', False),
('date_to', '>=', map.date_from),
'|', '&',
('date_to', '=', False),
('date_from', '<=', map.date_to),
]
elif map.date_from:
domain += [('date_to', '>=', map.date_from)]
elif map.date_to:
domain += [('date_from', '<=', map.date_to)]
date_lst = map.search(domain)
if date_lst:
raise exceptions.Warning(
_("Error! The dates of the record overlap with an "
"existing record.")
)
@api.multi
def name_get(self):
vals = []
for record in self:
name = "%s" % record.model
if record.date_from or record.date_to:
name += " (%s-%s)" % (
record.date_from and
fields.Date.from_string(record.date_from) or '',
record.date_to and
fields.Date.from_string(record.date_to) or '')
vals.append(tuple([record.id, name]))
return vals
| agpl-3.0 | -2,142,182,223,258,955,300 | 39.327869 | 76 | 0.455285 | false |
markovmodel/msmtools | tests/estimation/tests/test_mle_trev_given_pi.py | 1 | 7671 |
# This file is part of MSMTools.
#
# Copyright (c) 2015, 2014 Computational Molecular Biology Group
#
# MSMTools is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
import numpy as np
from tests.numeric import assert_allclose
import scipy
import scipy.sparse
import warnings
import msmtools.util.exceptions
from os.path import abspath, join
from os import pardir
from msmtools.estimation.dense.mle.mle_trev_given_pi import mle_trev_given_pi as impl_dense
from msmtools.estimation.sparse.mle.mle_trev_given_pi import mle_trev_given_pi as impl_sparse
from msmtools.estimation import tmatrix as apicall
from msmtools.analysis import statdist, is_transition_matrix
testpath = abspath(join(abspath(__file__), pardir)) + '/testfiles/'
def transition_matrix_reversible_fixpi(Z, mu, maxerr=1e-10, maxiter=10000, return_iterations=False,
warn_not_converged=True):
r"""
maximum likelihood transition matrix with fixed stationary distribution
developed by Fabian Paul and Frank Noe
Parameters
----------
Z: ndarray, shape (n,n)
count matrix
mu: ndarray, shape (n)
stationary distribution
maxerr: float
Will exit (as converged) when the 2-norm of the Langrange multiplier vector changes less than maxerr
in one iteration
maxiter: int
Will exit when reaching maxiter iterations without reaching convergence.
return_iterations: bool (False)
set true in order to return (T, it), where T is the transition matrix and it is the number of iterations needed
warn_not_converged : bool, default=True
Prints a warning if not converged.
Returns
-------
T, the transition matrix. When return_iterations=True, (T,it) is returned with it the number of iterations needed
"""
it = 0
n = len(mu)
# constants
B = Z + Z.transpose()
# variables
csum = np.sum(Z, axis=1)
if (np.min(csum) <= 0):
raise ValueError('Count matrix has rowsum(s) of zero. Require a count matrix with positive rowsums.')
if (np.min(mu) <= 0):
raise ValueError('Stationary distribution has zero elements. Require a positive stationary distribution.')
if (np.min(np.diag(Z)) == 0):
raise ValueError(
'Count matrix has diagonals with 0. Cannot guarantee convergence of algorithm. Suggestion: add a small prior (e.g. 1e-10) to the diagonal')
l = 1.0 * csum
lnew = 1.0 * csum
q = np.zeros((n))
A = np.zeros((n, n))
D = np.zeros((n, n))
# iterate lambda
converged = False
while (not converged) and (it < maxiter):
# q_i = mu_i / l_i
np.divide(mu, l, q)
# d_ij = (mu_i / mu_j) * (l_j/l_i) + 1
D[:] = q[:, np.newaxis]
D /= q
D += 1
# a_ij = b_ij / d_ij
np.divide(B, D, A)
# new l_i = rowsum_i(A)
np.sum(A, axis=1, out=lnew)
# evaluate change
err = np.linalg.norm(l - lnew, 2)
# is it converged?
converged = (err <= maxerr)
# copy new to old l-vector
l[:] = lnew[:]
it += 1
if warn_not_converged and (not converged) and (it >= maxiter):
warnings.warn('NOT CONVERGED: 2-norm of Langrange multiplier vector is still ' +
str(err) + ' > ' + str(maxerr) + ' after ' + str(it) +
' iterations. Increase maxiter or decrease maxerr',
msmtools.util.exceptions.NotConvergedWarning)
# compute T from Langrangian multipliers
T = np.divide(A, l[:, np.newaxis])
# return
if return_iterations:
return T, it
else:
return T
impl_dense_Frank = transition_matrix_reversible_fixpi
class Test_mle_trev_given_pi(unittest.TestCase):
def test_mle_trev_given_pi(self):
C = np.loadtxt(testpath + 'C_1_lag.dat')
pi = np.loadtxt(testpath + 'pi.dat')
T_impl_algo_dense_type_dense = impl_dense(C, pi)
T_impl_algo_sparse_type_sparse = impl_sparse(scipy.sparse.csr_matrix(C), pi).toarray()
T_Frank = impl_dense_Frank(C, pi)
T_api_algo_dense_type_dense = apicall(C, reversible=True, mu=pi, method='dense')
T_api_algo_sparse_type_dense = apicall(C, reversible=True, mu=pi, method='sparse')
T_api_algo_dense_type_sparse = apicall(scipy.sparse.csr_matrix(C), reversible=True, mu=pi, method='dense').toarray()
T_api_algo_sparse_type_sparse = apicall(scipy.sparse.csr_matrix(C), reversible=True, mu=pi, method='sparse').toarray()
T_api_algo_auto_type_dense = apicall(C, reversible=True, mu=pi, method='auto')
T_api_algo_auto_type_sparse = apicall(scipy.sparse.csr_matrix(C), reversible=True, mu=pi, method='auto').toarray()
assert_allclose(T_impl_algo_dense_type_dense, T_Frank)
assert_allclose(T_impl_algo_sparse_type_sparse, T_Frank)
assert_allclose(T_api_algo_dense_type_dense, T_Frank)
assert_allclose(T_api_algo_sparse_type_dense, T_Frank)
assert_allclose(T_api_algo_dense_type_sparse, T_Frank)
assert_allclose(T_api_algo_sparse_type_sparse, T_Frank)
assert_allclose(T_api_algo_auto_type_dense, T_Frank)
assert_allclose(T_api_algo_auto_type_sparse, T_Frank)
assert is_transition_matrix(T_Frank)
assert is_transition_matrix(T_impl_algo_dense_type_dense)
assert is_transition_matrix(T_impl_algo_sparse_type_sparse)
assert is_transition_matrix(T_api_algo_dense_type_dense)
assert is_transition_matrix(T_api_algo_sparse_type_dense)
assert is_transition_matrix(T_api_algo_dense_type_sparse)
assert is_transition_matrix(T_api_algo_sparse_type_sparse)
assert is_transition_matrix(T_api_algo_auto_type_dense)
assert is_transition_matrix(T_api_algo_auto_type_sparse)
assert_allclose(statdist(T_Frank), pi)
assert_allclose(statdist(T_impl_algo_dense_type_dense), pi)
assert_allclose(statdist(T_impl_algo_sparse_type_sparse), pi)
assert_allclose(statdist(T_api_algo_dense_type_dense), pi)
assert_allclose(statdist(T_api_algo_sparse_type_dense), pi)
assert_allclose(statdist(T_api_algo_dense_type_sparse), pi)
assert_allclose(statdist(T_api_algo_sparse_type_sparse), pi)
assert_allclose(statdist(T_api_algo_auto_type_dense), pi)
assert_allclose(statdist(T_api_algo_auto_type_sparse), pi)
def test_warnings(self):
C = np.loadtxt(testpath + 'C_1_lag.dat')
pi = np.loadtxt(testpath + 'pi.dat')
ncw = msmtools.util.exceptions.NotConvergedWarning
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('ignore')
warnings.simplefilter('always', category=ncw)
impl_sparse(scipy.sparse.csr_matrix(C), pi, maxiter=1)
assert len(w) == 1
assert issubclass(w[-1].category, ncw)
impl_dense(C, pi, maxiter=1)
assert len(w) == 2
assert issubclass(w[-1].category, ncw)
if __name__ == '__main__':
unittest.main()
| lgpl-3.0 | -7,695,240,549,958,906,000 | 40.918033 | 151 | 0.652066 | false |
Agnishom/ascii-art-007 | main.py | 1 | 5842 | #!/usr/bin/env python
#
# Copyright 2010 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A barebones AppEngine application that uses Facebook for login.
1. Make sure you add a copy of facebook.py (from python-sdk/src/)
into this directory so it can be imported.
2. Don't forget to tick Login With Facebook on your facebook app's
dashboard and place the app's url wherever it is hosted
3. Place a random, unguessable string as a session secret below in
config dict.
4. Fill app id and app secret.
5. Change the application name in app.yaml.
"""
FACEBOOK_APP_ID = "419102868181159"
FACEBOOK_APP_SECRET = "2e68f2262f57651fb9ae90e824ea7967"
import facebook
import webapp2
import os
import jinja2
import urllib2
from google.appengine.ext import db
from webapp2_extras import sessions
config = {}
config['webapp2_extras.sessions'] = dict(secret_key='vdfsbvbhjdhjabhjdfsvhj12e3r4')
class User(db.Model):
id = db.StringProperty(required=True)
created = db.DateTimeProperty(auto_now_add=True)
updated = db.DateTimeProperty(auto_now=True)
name = db.StringProperty(required=True)
profile_url = db.StringProperty(required=True)
access_token = db.StringProperty(required=True)
class BaseHandler(webapp2.RequestHandler):
"""Provides access to the active Facebook user in self.current_user
The property is lazy-loaded on first access, using the cookie saved
by the Facebook JavaScript SDK to determine the user ID of the active
user. See http://developers.facebook.com/docs/authentication/ for
more information.
"""
@property
def current_user(self):
if self.session.get("user"):
# User is logged in
return self.session.get("user")
else:
# Either used just logged in or just saw the first page
# We'll see here
cookie = facebook.get_user_from_cookie(self.request.cookies,
FACEBOOK_APP_ID,
FACEBOOK_APP_SECRET)
if cookie:
# Okay so user logged in.
# Now, check to see if existing user
user = User.get_by_key_name(cookie["uid"])
if not user:
# Not an existing user so get user info
graph = facebook.GraphAPI(cookie["access_token"])
profile = graph.get_object("me")
user = User(
key_name=str(profile["id"]),
id=str(profile["id"]),
name=profile["name"],
profile_url=profile["link"],
access_token=cookie["access_token"]
)
user.put()
elif user.access_token != cookie["access_token"]:
user.access_token = cookie["access_token"]
user.put()
# User is now logged in
self.session["user"] = dict(
name=user.name,
profile_url=user.profile_url,
id=user.id,
access_token=user.access_token
)
return self.session.get("user")
return None
def dispatch(self):
"""
This snippet of code is taken from the webapp2 framework documentation.
See more at
http://webapp-improved.appspot.com/api/webapp2_extras/sessions.html
"""
self.session_store = sessions.get_store(request=self.request)
try:
webapp2.RequestHandler.dispatch(self)
finally:
self.session_store.save_sessions(self.response)
@webapp2.cached_property
def session(self):
"""
This snippet of code is taken from the webapp2 framework documentation.
See more at
http://webapp-improved.appspot.com/api/webapp2_extras/sessions.html
"""
return self.session_store.get_session()
class HomeHandler(BaseHandler):
def get(self):
template = jinja_environment.get_template('index.html')
self.response.out.write(template.render(dict(
facebook_app_id=FACEBOOK_APP_ID,
current_user=self.current_user
)))
def post(self):
template = jinja_environment.get_template('index.html')
self.response.out.write(template.render(dict(
facebook_app_id=FACEBOOK_APP_ID,
current_user=self.current_user
)))
class Upload(BaseHandler):
def post(self):
pic = urllib2.urlopen('http://ascii-art-007.appspot.com/Img?id='+ self.current_user['id'])
graph = facebook.GraphAPI(self.current_user['access_token'])
graph.put_photo(pic, message = "Try yours at http://apps.facebook.com/asciidp")
self.redirect('/')
class LogoutHandler(BaseHandler):
def get(self):
if self.current_user is not None:
self.session['user'] = None
self.redirect('/')
jinja_environment = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__))
)
app = webapp2.WSGIApplication(
[('/', HomeHandler), ('/logout', LogoutHandler),('/Upload',Upload)],
debug=True,
config=config
)
| mit | -5,926,090,419,844,823,000 | 34.061728 | 98 | 0.610236 | false |
sdbonin/SOQresearch | SOQplot.py | 1 | 2527 | """
SOQplot.py - sdbonin (work in progress)
read _plot array from txt and plot them
"""
import numpy as np
import matplotlib.pyplot as plt
S_plot = np.loadtxt('S_plot.txt',delimiter=',')
q_plot = np.loadtxt('q_plot.txt',delimiter=',')
p_plot = np.loadtxt('p_plot.txt',delimiter=',')
time = np.loadtxt('time.txt',delimiter=',')
S_1r = S_plot[:,0] #= S_1r
S_1x = S_plot[:,1] #= S_1x
S_1y = S_plot[:,2] #= S_1y
S_1z = S_plot[:,3] #= S_1z
S_2r = S_plot[:,4] #= S_2r
S_2x = S_plot[:,5] #= S_2x
S_2y = S_plot[:,6] #= S_2y
S_2z = S_plot[:,7] #= S_2z
q_1x = q_plot[:,1] #= q_1x
q_1y = q_plot[:,2] #= q_1y
q_1z = q_plot[:,3] #= q_1z
q_2x = q_plot[:,5] #= q_2x
q_2y = q_plot[:,6] #= q_2y
q_2z = q_plot[:,7] #= q_2z
q_1r = q_plot[:,0] #= q_1r
q_2r = q_plot[:,4] #= q_2r
p_1x = p_plot[:,1] #= p_1x
p_1y = p_plot[:,2] #= p_1y
p_1z = p_plot[:,3] #= p_1z
p_2x = p_plot[:,5] #= p_2x
p_2y = p_plot[:,6] #= p_2y
p_2z = p_plot[:,7] #= p_2z
p_1r = p_plot[:,0] #= p_1r
p_2r = p_plot[:,4] #= p_2r
plt.figure()
plt.subplot(221)
#plt.semilogy(time,np.abs(S_1r),label='S_1r',color='purple')
plt.plot(time,np.abs(S_1r),label='S_1r',color='purple')
plt.plot(time,S_1x,label='S_1i',color='red')
plt.plot(time,S_1y,label='S_1j',color='blue')
plt.plot(time,S_1z,label='S_1k',color='green')
plt.xlabel('time')
plt.ylabel('S_1')
plt.legend(loc='best')
axes = plt.gca()
axes.set_ylim([-1,1])
plt.subplot(222)
#plt.semilogy(time,np.abs(S_2r),label='S_2r',color='purple')
plt.plot(time,S_2r,label='S_2r',color='purple')
plt.plot(time,S_2x,label='S_2i',color='red')
plt.plot(time,S_2y,label='S_2j',color='blue')
plt.plot(time,S_2z,label='S_2k',color='green')
plt.xlabel('time')
plt.ylabel('S_1')
plt.legend(loc='best')
axes = plt.gca()
axes.set_ylim([-1,1])
plt.subplot(223)
#plt.semilogy(time,np.abs(S_2r),label='S_2r',color='purple')
plt.plot(time,S_1r,label='S_1r',color='purple')
plt.plot(time,S_1x,label='S_1i',color='red')
plt.plot(time,S_1y,label='S_1j',color='blue')
plt.plot(time,S_1z,label='S_1k',color='green')
plt.xlabel('time')
plt.ylabel('S_1')
plt.legend(loc='best')
axes = plt.gca()
axes.set_ylim([-1,1])
axes.set_xlim([500,512.35])
plt.subplot(224)
#plt.semilogy(time,np.abs(S_2r),label='S_2r',color='purple')
plt.plot(time,S_2r,label='S_2r',color='purple')
plt.plot(time,S_2x,label='S_2i',color='red')
plt.plot(time,S_2y,label='S_2j',color='blue')
plt.plot(time,S_2z,label='S_2k',color='green')
plt.xlabel('time')
plt.ylabel('S_1')
plt.legend(loc='best')
axes = plt.gca()
axes.set_ylim([-1,1])
axes.set_xlim([0,12.35])
plt.show()
| mit | -2,696,191,542,874,873,000 | 25.051546 | 60 | 0.611397 | false |
taigaio/taiga-back | taiga/userstorage/migrations/0001_initial.py | 1 | 1491 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import taiga.base.db.models.fields
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='StorageEntry',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
('created_date', models.DateTimeField(auto_now_add=True, verbose_name='created date')),
('modified_date', models.DateTimeField(verbose_name='modified date', auto_now=True)),
('key', models.CharField(max_length=255, verbose_name='key')),
('value', taiga.base.db.models.fields.JSONField(verbose_name='value', blank=True, default=None, null=True)),
('owner', models.ForeignKey(to=settings.AUTH_USER_MODEL, verbose_name='owner', related_name='storage_entries', on_delete=models.CASCADE)),
],
options={
'verbose_name_plural': 'storages entries',
'verbose_name': 'storage entry',
'ordering': ['owner', 'key'],
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='storageentry',
unique_together=set([('owner', 'key')]),
),
]
| agpl-3.0 | -7,703,106,756,495,621,000 | 39.297297 | 154 | 0.589537 | false |
dper/pumptweet | pumptweet/PumpLogin.py | 1 | 3481 | import os.path
import sys
import twitter
from dateutil.parser import parse
from configparser import ConfigParser
from pypump import PyPump
from pypump import Client
from pypump.exceptions import ClientException
from requests.exceptions import ConnectionError
def simple_verifier(url):
print('Please follow the instructions at the following URL:')
print(url)
return raw_input("Verifier: ") # the verifier is a string
class PumpTweetParser:
"""Parses the ini file and provides the results on demand."""
# This file must exist in the current directory.
filename = 'PumpTweet.ini'
# Parses the ini file.
def parse_ini(self):
print('Reading the config file...')
# This verifies that the ini file exists.
if not os.path.isfile(self.filename):
message = self.filename + ' not found.'
raise Exception(message)
parser = ConfigParser()
parser.read(self.filename)
self._parser = parser
self._history = True
#self._recent = parser.get('history', 'recent')
if 'recent' in parser['history']:
self._recent = parser['history']['recent']
else:
self._history = False
# Converts the date to a usable form.
if 'published' in parser['history']:
date = parser['history']['published']
try:
self._published = parse(date)
except ValueError:
pass
else:
self._history = False
# Logs in to the Pump server.
def pump_login(self):
print('Logging into the Pump server...')
username = self._parser.get('pump', 'username')
client = Client(
webfinger = username,
name = "Pump.io",
type = "native")
try:
pump = PyPump(
client = client,
verifier_callback = simple_verifier)
except ConnectionError as e:
domain = username.split('@')[1]
print('Error: Unable to connect to ' + domain + '.')
print(e)
sys.exit()
except ClientException:
domain = username.split('@')[1]
print('Error: Pump server not found at ' + domain + '.')
sys.exit()
me = pump.Person(username)
self._username = username
self._pump = pump
self._me = me
# Logs in to Twitter.
def twitter_login(self):
print('Logging into Twitter...')
key = self._parser.get('twitter', 'key')
secret = self._parser.get('twitter', 'secret')
token = self._parser.get('twitter', 'token')
token_secret = self._parser.get('twitter', 'token_secret')
api = twitter.Api(
consumer_key=key,
consumer_secret=secret,
access_token_key=token,
access_token_secret=token_secret
)
self._api = api
def __init__(self):
self.parse_ini()
self.pump_login()
self.twitter_login()
# Writes the latest update Pump ID in the ini file.
# Be careful when changing this. It rewrites the ini file.
def update_recent(self, latest, published):
self._parser.set('history', 'recent', str(latest))
self._parser.set('history', 'published', str(published))
with open(self.filename, 'w') as inifile:
self._parser.write(inifile)
# Returns the ID for the last update (from the ini file).
def get_recent(self):
return self._recent
# Returns the datetime of the last update (from the ini file).
def get_published(self):
return self._published
# Returns True iff there is valid history (from the ini file).
def get_history(self):
return self._history
# Returns the Pump user object.
def get_pump_me(self):
return self._me
# Returns the Twitter user object.
def get_twitter_api(self):
return self._api
# Returns the pump username.
def get_pump_username(self):
return self._username
| mit | -6,776,578,844,620,818,000 | 24.595588 | 63 | 0.687159 | false |
ArcherSys/ArcherSys | Scripts/rstpep2html.py | 1 | 2234 | <<<<<<< HEAD
<<<<<<< HEAD
#!K:\ArcherVMPeridot\htdocs\Scripts\python.exe
# $Id: rstpep2html.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing HTML from PEP
(Python Enhancement Proposal) documents.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates (X)HTML from reStructuredText-format PEP files. '
+ default_description)
publish_cmdline(reader_name='pep', writer_name='pep_html',
description=description)
=======
#!K:\ArcherVMPeridot\htdocs\Scripts\python.exe
# $Id: rstpep2html.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing HTML from PEP
(Python Enhancement Proposal) documents.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates (X)HTML from reStructuredText-format PEP files. '
+ default_description)
publish_cmdline(reader_name='pep', writer_name='pep_html',
description=description)
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
#!K:\ArcherVMPeridot\htdocs\Scripts\python.exe
# $Id: rstpep2html.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing HTML from PEP
(Python Enhancement Proposal) documents.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates (X)HTML from reStructuredText-format PEP files. '
+ default_description)
publish_cmdline(reader_name='pep', writer_name='pep_html',
description=description)
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
| mit | -6,067,688,109,114,015,000 | 26.580247 | 76 | 0.713518 | false |
berkmancenter/mediacloud | apps/crawler-fetcher/tests/python/test_create_child_download_for_story_content_delay.py | 1 | 2238 | # noinspection PyProtectedMember
from crawler_fetcher.new_story import _create_child_download_for_story
from .setup_test_stories import TestStories
class TestCreateChildDownloadForStoryContentDelay(TestStories):
def test_create_child_download_for_story_content_delay(self):
"""Test create_child_download_for_story() with media.content_delay set."""
downloads = self.db.query('SELECT * FROM downloads').hashes()
assert len(downloads) == 1
content_delay_hours = 3
self.db.query("""
UPDATE media
SET content_delay = %(content_delay)s -- Hours
WHERE media_id = %(media_id)s
""", {
'content_delay': content_delay_hours,
'media_id': self.test_medium['media_id'],
})
_create_child_download_for_story(
db=self.db,
story=self.test_story,
parent_download=self.test_download,
)
parent_download = self.db.query("""
SELECT EXTRACT(EPOCH FROM download_time)::int AS download_timestamp
FROM downloads
WHERE downloads_id = %(downloads_id)s
""", {'downloads_id': self.test_download['downloads_id']}).hash()
assert parent_download
child_download = self.db.query("""
SELECT EXTRACT(EPOCH FROM download_time)::int AS download_timestamp
FROM downloads
WHERE parent = %(parent_downloads_id)s
""", {'parent_downloads_id': self.test_download['downloads_id']}).hash()
assert child_download
time_difference = abs(parent_download['download_timestamp'] - child_download['download_timestamp'])
# 1. I have no idea in which timezone are downloads being stored (it's definitely not UTC, maybe
# America/New_York)
# 2. It appears that "downloads.download_time" has dual-use: "download not earlier than" for pending downloads,
# and "downloaded at" for completed downloads, which makes things more confusing
#
# So, in a test, let's just be happy if the download times differ (which might not even be the case depending on
# server's / database's timezone).
assert time_difference > 10
| agpl-3.0 | 4,825,333,151,816,424,000 | 40.444444 | 120 | 0.628686 | false |
ktheis/pqtutor | calculator.py | 1 | 21622 | # coding=utf-8
"""
Functions to run an interpreter for expressions containing physical quantities.
Based on the Q class in the quantities module, this provides functions to input and interpret expressions,
calculating their values and showing the steps, and finally storing the quantities so that they can be
used for further calculations.
This is an arithmetic calculator rather than an algebra system (i.e. unknowns are not allowed and will lead to
error messages). Output is either in plain text or in HTML/MathML via LaTeX.
Call hierarchy:
calc(memory, commands, mob):
class State(OrderedDict):
classify_input(a, state):
extract_identifier()
check_name(sym, state):
interpret(t, state): -> mathparser.py
register_result(result0, sym, state):
show_work(result, sym, flags, error=False, addon="", skipsteps=False):
comments(line):
convert_units(input_type, command, quant, units, state):
create_comment(a, state):
markup_comment(): -> comments.py
create_unknown()
change_flag(flags, name, expression):
deal_with_errors(err, a, state):
calc2:
see calc
"""
from form import exdict
from mathparser import CalcError, interpret, scan_it
from mathparser import check_name, extract_identifier
import quantities
from quantities import QuantError, Q, X, latex_name, unitquant, Units
from comments import create_comment, markup_comment
from chemistry import typicalunits, pronounce
#from mpmath import mpf
from fractions import Fraction
#X, Units, Fraction are all for an eval statement
def calc(memory, commands, browser=None):
'''
:param memory: quantities already defined, given as repr()s line by line in a string
:param commands: string of user input specifying math operations to define new quantities
:param mob: device the output will be sent to (determines format)
:return: everything needed to show the result in a browser and keep state for the next calculation
'''
state = State(memory)
#rint('commands', commands)
command_list = commands.replace('\r', '').split("\n")
try:
for command in command_list:
if not command:
continue
#rint ("command: %s" % command)
input_type, name, expression = classify_input(command, state)
if input_type == Calculation:
name = check_name(name, state)
quantity = interpret(expression, state)
state.printwork(show_work(quantity, name, state.flags))
register_result(quantity, name, state)
elif input_type == Comment:
create_comment(name, state)
elif input_type == Unknown:
create_unknown(name, state)
elif input_type in [ConversionUsing, ConversionIn]:
convert_units(input_type, command, name, expression, state)
elif input_type == Flags:
change_flag(state.flags, name, expression)
# else: pass because command is empty
state.log_input(command)
except (CalcError, OverflowError, QuantError) as err:
deal_with_errors(err, command, state)
return state.export()
# return output, logput, memory, known, mob, linespace
from collections import OrderedDict
class State(OrderedDict):
def __init__(self, memory=None, mob=None):
"""
Loads quantities from previous calculations by evaluating their repr()s
:param memory: String of repr()s
:stores OrderedDict of symbols, output, logput
"""
OrderedDict.__init__(self)
self.flags = set()
self.output = []
self.logput = []
self.good_input = []
self.mob = mob
self.snuck = 0
if mob == 'ipud':
self.flags.add('plain math')
#self['R'] = Q(8.3144598, 'R', Units(kg=1,m=2,s=-2,mol=-1,K=-1), 4.8e-06, {'mol', 'J', 'K'})
if memory:
old = memory.replace('\r', '').split('\n')
for a in old:
if a.startswith('__') or a == 'plain math':
self.flags.add(a)
else:
q = eval(a)
self[q.name] = q
def addsnuck(self, q):
name = 'Snuck%s' % self.snuck
self.snuck += 1
self[name] = q
def popsnuck(self):
if self.snuck > 0:
self.snuck -= 1
name = 'Snuck%s' % self.snuck
del(self[name])
else:
pass #raise ZeroDivisionError
def printit(self, str):
self.output.append(str)
def logit(self, str):
self.logput.append(str)
def printnlog(self, str):
self.printit(str)
self.logput.append(str)
def printwork(self, outlog):
#rint()
#rint('-2', self.output[-2][-80:])
cond1 = len(self.output) >= 2
cond2 = cond1 and not self.output[-2].endswith('>')
if cond1 and cond2:
self.printnlog('<br>')
#rint('-1', self.output[-1][-80:])
#rint(' 0', outlog[0][0][:80])
#rint(' 1', outlog[0][1][:80])
self.output.extend(outlog[0])
self.logput.extend(outlog[1])
def log_input(self, inp):
self.good_input.append(inp)
def export(self):
if self.output and not self.output[-1].endswith("<hr>"):
self.output = ["<hr>"] + self.output
memory = [q.__repr__() for q in self.values()]
flags = [f for f in self.flags]
memory.extend(flags)
m = '\n'.join(memory)
known = [s + " = " + self[s].__str__() for s in self]
if "__latex__" in self.flags:
self.output = ["<pre>"] + self.output + ["</pre>"]
input_log = '\n'.join(self.good_input)
if not input_log.endswith('\n'):
input_log = input_log + '\n'
verbose_work = '\n'.join(self.output)
brief_work = '\n'.join(self.logput)
linespace = '40%' if '__scrunch__' in self.flags else '120%'
return (verbose_work, brief_work, m, known, linespace), input_log
def classify_input(a, state):
'''
:param a: the user input string containing a calculation, unit conversion or comment
:param state: contains known quantities as ordered dict, along with flags and output
:return: a tuple (type of input, symbol name, expression/units)
Empty, Calculation, ConversionIn, ConversionUsing, Comment, Flags, Unknown = range(7)
>>> classify_input(' ', State())
(0, None, None)
>>> classify_input('K[A<=>B] = 13', State())
(1, u'K[A<=>B]', u' 13')
>>> classify_input('R using J', State())
(3, u'R', u'J')
>>> classify_input('K[A<=>B] in mM', State())
(2, u'K[A<=>B]', u'mM')
>>> classify_input('5 + 6', State())
(1, u'result', u'5 + 6')
>>> classify_input('#comment', State())
(4, None, None)
>>> classify_input('!H2O', State())
(4, None, None)
'''
#rint('a', a)
if not a or not a.strip():
return Empty, None, None
#rint('wahh', '\n'.join(state.output[-3:]))
if '__newby__' in state.flags and not a.startswith('__'):
state.printit('<pre style="color:maroon"><b>>>>> %s</b></pre>' % a.replace('<','<').replace('>','>'))
elif not a.startswith('__'):
state.printit("<br>" if not '/h' in ''.join(state.output[-1:]) else '')
state.logit("<br>" if not '</h' in ''.join(state.logput[-1:]) else '')
if a[0] in "!#@":
return Comment, a, None
a = a.strip()
if a.startswith('__'):
if not '=' in a:
return Comment, a, None
name, onoff = a.split('=', 1)
return Flags, name.strip(), onoff
m = extract_identifier(a)
start = m.end() if m else 0
rest = a[start:].strip()
if m and rest.startswith('='):
r2 = rest[1:].strip()
if not r2:
return Empty, None, None
if r2[0] == '?':
return Unknown, a[:start], None
scanned, remainder = scan_it(rest[1:])
if remainder and remainder.startswith('='):
return Comment, a, None
return Calculation, a[:start], rest[1:] # Calculation #
elif m and a[start:].startswith(' using') and a[:start].strip() in state:
return ConversionUsing, a[:start], rest[len('using'):].strip()
elif m and a[start:].startswith(' in ') and a[:start].strip() in state:
return ConversionIn, a[:start], rest[len('in'):].strip()
if '__newby__' in state.flags:
raise CalcError("Your tutor says: Please come up with a name for the quantity you are calculating")
try:
interpret(a, state, warning=False)
return Calculation, "result", a
except:
return Comment, a, None
Empty, Calculation, ConversionIn, ConversionUsing, Comment, Flags, Unknown = range(7)
def register_result(result0, sym, state, keep_provenance=False):
"""
Enters the quantity that was just calculated into the database
:param result0: quantity Q()
:param sym: name of the quantity
:param state: contains known quantities as ordered dict, along with flags and output
"""
if not keep_provenance:
result0.provenance = []
result0.name = sym[:]
if hasattr(result0,'depth'):
del result0.depth
if sym in state and '__allowupdate__' not in state.flags:
state.printit('<div style="color: green;">Warning: Updated value of \\(%s\\)</div><br>' % (latex_name(sym)))
state[sym] = result0
if '__fracunits__' not in state.flags:
for u in result0.units:
if u.denominator != 1:#oddly, ints have and floats don't
state.printit('<div style="color: green;">Warning: Units have non-integer exponents %s</div><br>' % u)
if "__checkunits__" in state.flags:
if len(sym) == 1 or sym[1] in "_0123456789[" or sym[0] == "[":
if sym[0] in typicalunits and result0.units != typicalunits[sym[0]][0]:
state.printit(
'<div style="color: green;">Warning: \\(%s\\) looks like a %s, but units are strange</div><br>' % (
latex_name(sym), typicalunits[sym[0]][1]))
latex_subs = {"%s / %s": "\\dfrac{%s}{%s}",
"%s * %s": "%s \\cdot %s",
"%s ^ %s": "{%s}^{%s}",
"exp0(%s)": "e^{%s}",
"exp(%s)": "\\mathrm{exp}【%s】",
"log(%s)": "\\mathrm{log}【%s】",
"ln(%s)": "\\mathrm{ln}【%s】",
"sin(%s)": "\\mathrm{sin}【%s◗",
"cos(%s)": "\\mathrm{cos}【%s】",
"tan(%s)": "\\mathrm{tan}【%s】",
"sqrt(%s)": "\\sqrt{%s}",
"quadn(%s": "\\mathrm{quadn}(%s",
"quadp(%s": "\\mathrm{quadp}(%s",
"avg(%s": "\\mathrm{avg}(%s",
"min(%s": "\\mathrm{min}(%s",
"sum(%s": "\sum (%s",
"max(%s": "\\mathrm{max}(%s",
"abs(%s)": "\\mathrm{abs}【%s】",
"moredigits(%s)": "\\mathrm{moredigits}(%s)",
"uncertainty(%s)": "\\mathrm{uncertainty}(%s)",
}
def show_work(result, sym, flags, error=False, addon="", skipsteps=False):
"""
Shows the steps in getting from formula to calculated value. This function is called not only by calc(),
but also by convert_units() and deal_with_errors() to show steps in calculations.
:param result: value and provenance of the quantity Q()
:param sym: name of the quantity
:param flags: Switches that determine how much detail is shown
:param error: True if used to show an error in a step of a calculation
:param addon: Used when called from convert_units(ConversionIn)
:param skipsteps: True when intermediate steps are to be skipped
:return: tuple containing detailed output, brief output
"""
output = []
logput = []
math = not 'plain math' in flags
if math:
writer = quantities.latex_writer
if not "__latex__" in flags:
logput.append('''<span title='%s' style="color:navy; cursor:pointer; font-size:12pt;" onclick="insertAtCaret('commands','%s ', 0)">''' % (pronounce(sym, result.units), sym))
output.append('''<span title='%s' style="color:navy; cursor:pointer; font-size:12pt;" onclick="insertAtCaret('commands','%s ', 0)">''' % (pronounce(sym, result.units), sym))
else:
writer = quantities.ascii_writer
subs = latex_subs if math else None
d = result.setdepth()
if math:
template1 = "\(%s = %s%s\)%s<br>"
template2 = "<br>\(\\ \\ \\ =%s%s\)<br>"
else:
template1 = "%s = %s%s%s" if d <= 0 else "%s = %s\n = %s%s"
template2 = " = %s%s"
task = result.steps(-1, writer, subs, ()) # task
if '__hidenumbers__' in flags:
task = result.steps(-1, quantities.latex_writer, subs)
name = latex_name(sym) if math else sym
output.append(template1 % (name, task, addon, markup_comment(result.comment)))
logput.append(template1 % (name, task, addon, markup_comment(result.comment)))
if not skipsteps:
for dd in range(1, d + 1):
if dd == 1:
first = result.steps(dd, writer, subs, flags)
if '__hidenumbers__' in flags:
first = result.steps(dd, quantities.latex_writer, subs, {'__hidenumbers__'})
if first != task:
output.append(template2 % (first, addon))
else:
output.append(template2 % (result.steps(dd, writer, subs, flags), addon)) # intermediate steps
result_str = result.steps(0, writer, subs, flags) # result
if '__hideunits__' in flags:
task = result.steps(-1, writer, subs, flags)
if result_str != task and not error and not ('__hidenumbers__' in flags and d == 0):
logput.append(template2 % (result_str, addon))
output.append(template2 % (result_str, addon))
if math and not '__latex__' in flags:
logput.append('<br></span>')
output.append('<br></span>')
return output, logput
def convert_units(input_type, command, quant, units, state):
"""
Shows the quantity in different units, either once only ('in') or from now on ('using')
:param input_type: Whether conversion is with "using" or "in"
:param command: user input
:param quant: Q() to be converted
:param units: requested units
:param state: contains known quantities as ordered dict, along with flags and output
:raise CalcError: if requested units are unknown
"""
flags2 = set(i for i in state.flags if i != '__hideunits__')
if input_type == ConversionUsing:
print(repr(state[quant.strip()]))
if units in ['°ΔC','°aC']:
prefu = [units]
q = state[quant.strip()] + Q(0.0)
if units == '°aC' and unitquant['K'].units != q.units:
raise CalcError("Only quantities in kelvin may be converted to celsius")
else:
prefu = units.split()
for p in prefu:
if p not in unitquant:
raise CalcError(
"PQCalc does not recognize the unit '%s', so 'using' does not work. Try 'in' instead." % p)
try:
q = state[quant.strip()] + Q(0.0)
except KeyError:
raise CalcError("The quantity '%s' is not defined yet. Check for typos." % quant.strip())
q.name = ''
q.provenance = None
q.comment = ''
outp, _ = show_work(q, quant, flags2)
output = (outp[:-1])
state[quant.strip()].prefu = set(prefu)
q_old = state[quant.strip()]
if q_old.provenance: # when called by calc2
q_old.name = ''
q_old.provenance = None
q = q_old + Q(0.0)
q.comment = ''
outp, _ = show_work(q, quant, flags2)
output.extend(outp[-2 if not 'plain math' in state.flags else -1:])
q = state[quant.strip()] + Q(0.0)
q.name = ""
q.provenance = None
_, logp = show_work(q, quant, flags2)
state.printit('\n'.join(output))
state.logit('\n'.join(logp))
print(repr(state[quant.strip()]))
else:
tmp = interpret(units, state, warning=False)
try:
qq = state[quant.strip()] / tmp
except KeyError:
raise CalcError("The quantity '%s' is not defined yet. Check for typos." % quant.strip())
addon = ("\mathrm{\ %s}" % quantities.latex_name(units)) if state.mob != "ipud" else units
work = show_work(qq, quant, flags2, addon=addon)
state.printwork(work)
def create_unknown(sym, state):
state.printnlog('''<span title="%s" style="color:navy; font-size:12pt; cursor:pointer" onclick="insertAtCaret('commands','%s = ', 0)">''' % (pronounce(sym), sym))
state.printnlog("\(%s =\\ ?\)<br>" % latex_name(sym))
state.printnlog('<br></span>')
def change_flag(flags, name, expression):
if expression.strip() != '0':
flags.add(name)
else:
flags.discard(name)
def deal_with_errors(err, a, state):
"""
:param err: the exception that was raised
:param a: the imput that led to the exception
:param state: contains known quantities as ordered dict, along with flags and output
:raise err: For exceptions not raised explicitly "unexpected errors"
"""
if type(err) is QuantError:
# QuantError((complaint, Q(0, name, provenance=provenance)))
problem = err.args[0]
state.printit('<div style="color: red;"><pre>' + a + "</pre>")
state.printit('Calculation failed: %s<br><br>' % problem[0])
if problem[1]:
output, _ = show_work(problem[1], "problem", state.flags, error=True)
state.printit('\n'.join(output))
state.printit("</div>")
elif type(err) is ZeroDivisionError:
state.printit("Overflow error, sorry: %s" % a)
elif type(err) is CalcError:
if "__newby__" not in state.flags:
state.printit("<pre>\n%s</pre>" % a)
state.printit(err.args[0])
else:
raise err
def calc2(command, state=State(), keep_provenance=True):
'''
:param command:
:param state:
:return: type, name, q, expression = calc2(line, state)
'''
quantity = None
name = None
type_text = 'Calc'
try:
input_type, name, expression = classify_input(command, state)
if input_type == Calculation:
name = check_name(name, state)
quantity = interpret(expression, state, keep_onthefly=True)
if quantity.provenance and any(name == q.name for q in quantity.provenance):
print('uhoh')
if not quantity.provenance:
if quantity.name and not name.startswith('M['):
quantity = Q(number = quantity.number, name='is %s', units = quantity.units, uncert = quantity.uncert, provenance=(quantity,))
else:
if quantity.units != Units():
state.popsnuck()
type_text = 'Known'
if quantity.name == '-%s' and not quantity.provenance[0].provenance:
type_text = 'Known'
register_result(quantity.copy(), name, state, keep_provenance=keep_provenance)
elif input_type in [ConversionUsing, ConversionIn]:
convert_units(input_type, command, name, expression, state)
return 'Conversion', None, None, None
elif input_type == Unknown:
return 'Unknown', name, None, None
else:
return 'Comment', None, None, None
except (CalcError, OverflowError, QuantError) as err:
return 'Error', None, None, None
return type_text, name, quantity, expression
def test_examples(math, minex, maxex):
a = 0
for ex, commands in exdict.items():
a += 1
if False and a in [2, 3, 12, 14, 19 , 20, 21, 22, 24, 25, 26, 27, 42]:
#print(a, ex)
continue
if a < minex:
continue
if a > maxex:
break
print(a)
#print('########################################################################')
print(a, '# %-60s#' % ex)
#print('########################################################################')
#print (commands)
try:
if a == 45:
gg = 5
results, input_log = calc("", commands, math)
except:
raise
verbose_work, brief_work, m, known, linespace = results
for line in verbose_work[1:]:
pass#print(line)
def profile_program():
import cProfile
memory = ""
cProfile.run(task)
if __name__ == "__main__":
print('hello')
#profile_program()
#import sys; sys.stdout = open('C:/Users/Karsten/Desktop/tmp.txt', 'w');
#calc('', 'a = 10^10^10', 'ipud')
#test_examples('iphone')
test_examples('ipud', 0, 400)
print('did the examples')
test_examples('iphone', 0, 400)
print('did the examples again')
'''commands = input(">>> ")
memory = ""
while True:
output, _, memory, _, _, _, _ = calc(memory, commands, 'ipud')
#output, logput, memory, known, mob, oneline, linespace
for line in output[1:]:
print(line)
memory = '\n'.join(memory)
commands = input(">>> ")
'''
"""
Test input that should fail gracefully:
sadf = 56 * (67 )()
asdf = 5 + t + &#@
gg = sin(45)
vv = 10^10^10
omg = 1 / 10^-1000
"""
| gpl-3.0 | -7,214,875,545,875,896,000 | 36.877193 | 185 | 0.560398 | false |
freaxmind/miage-m1 | csi/FreeMAP/producteurs/views.py | 1 | 1521 | # -*- coding: utf-8 -*-
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import render_to_response, redirect
from django.template.context import RequestContext
from producteurs.forms import ProducteurPropositionForm
from gestion.models import Proposition, Producteur
@login_required()
def propositions(request):
if not 'producteurs' in [g['name'] for g in request.user.groups.values()]:
raise Exception("Vous n'avez pas la permission de proposer des produits")
producteur = Producteur.objects.get(login=request.user.username)
encours, historique = Proposition.par_producteur(producteur)
var = {'encours': encours, 'historique': historique,}
return render_to_response('proposition.html', var,
context_instance=RequestContext(request))
@csrf_exempt
@login_required()
def nouvelle(request):
if request.method == 'POST':
form = ProducteurPropositionForm(request.POST)
if form.is_valid():
proposition = form.save(commit=False)
proposition.quantite_commandee = 0
proposition.producteur = Producteur.objects.get(login=request.user.username)
proposition.save()
return redirect(propositions)
else:
form = ProducteurPropositionForm()
var = {'form': form}
return render_to_response('nouvelle.html', var,
context_instance=RequestContext(request))
| gpl-3.0 | -8,363,585,596,895,973,000 | 35.214286 | 88 | 0.692308 | false |
openstack/ironic | ironic/drivers/modules/ibmc/utils.py | 1 | 6956 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
iBMC Driver common utils
"""
import functools
import os
from oslo_log import log
from oslo_utils import importutils
from oslo_utils import netutils
from oslo_utils import strutils
import tenacity
from ironic.common import exception
from ironic.common.i18n import _
from ironic.conductor import task_manager
from ironic.conf import CONF
ibmc_client = importutils.try_import('ibmcclient')
ibmc_error = importutils.try_import('ibmc_client.exceptions')
LOG = log.getLogger(__name__)
REQUIRED_PROPERTIES = {
'ibmc_address': _('The URL address to the iBMC controller. It must '
'include the authority portion of the URL. '
'If the scheme is missing, https is assumed. '
'For example: https://mgmt.vendor.com. Required.'),
'ibmc_username': _('User account with admin/server-profile access '
'privilege. Required.'),
'ibmc_password': _('User account password. Required.'),
}
OPTIONAL_PROPERTIES = {
'ibmc_verify_ca': _('Either a Boolean value, a path to a CA_BUNDLE '
'file or directory with certificates of trusted '
'CAs. If set to True the driver will verify the '
'host certificates; if False the driver will '
'ignore verifying the SSL certificate. If it\'s '
'a path the driver will use the specified '
'certificate or one of the certificates in the '
'directory. Defaults to True. Optional.'),
}
COMMON_PROPERTIES = REQUIRED_PROPERTIES.copy()
COMMON_PROPERTIES.update(OPTIONAL_PROPERTIES)
def parse_driver_info(node):
"""Parse the information required for Ironic to connect to iBMC.
:param node: an Ironic node object
:returns: dictionary of parameters
:raises: InvalidParameterValue on malformed parameter(s)
:raises: MissingParameterValue on missing parameter(s)
"""
driver_info = node.driver_info or {}
missing_info = [key for key in REQUIRED_PROPERTIES
if not driver_info.get(key)]
if missing_info:
raise exception.MissingParameterValue(_(
'Missing the following iBMC properties in node '
'%(node)s driver_info: %(info)s') % {'node': node.uuid,
'info': missing_info})
# Validate the iBMC address
address = driver_info['ibmc_address']
if '://' not in address:
address = 'https://%s' % address
parsed = netutils.urlsplit(address)
if not parsed.netloc:
raise exception.InvalidParameterValue(
_('Invalid iBMC address %(address)s set in '
'driver_info/ibmc_address on node %(node)s') %
{'address': address, 'node': node.uuid})
# Check if verify_ca is a Boolean or a file/directory in the file-system
verify_ca = driver_info.get('ibmc_verify_ca', True)
if isinstance(verify_ca, str):
if not os.path.exists(verify_ca):
try:
verify_ca = strutils.bool_from_string(verify_ca, strict=True)
except ValueError:
raise exception.InvalidParameterValue(
_('Invalid value type set in driver_info/'
'ibmc_verify_ca on node %(node)s. '
'The value should be a Boolean or the path '
'to a file/directory, not "%(value)s"'
) % {'value': verify_ca, 'node': node.uuid})
elif not isinstance(verify_ca, bool):
raise exception.InvalidParameterValue(
_('Invalid value type set in driver_info/ibmc_verify_ca '
'on node %(node)s. The value should be a Boolean or the path '
'to a file/directory, not "%(value)s"') % {'value': verify_ca,
'node': node.uuid})
return {'address': address,
'username': driver_info.get('ibmc_username'),
'password': driver_info.get('ibmc_password'),
'verify_ca': verify_ca}
def revert_dictionary(d):
return {v: k for k, v in d.items()}
def handle_ibmc_exception(action):
"""Decorator to handle iBMC client exception.
Decorated functions must take a :class:`TaskManager` as the first
parameter.
"""
def decorator(f):
def should_retry(e):
connect_error = isinstance(e, exception.IBMCConnectionError)
if connect_error:
LOG.info(_('Failed to connect to iBMC, will retry now. '
'Max retry times is %(retry_times)d.'),
{'retry_times': CONF.ibmc.connection_attempts})
return connect_error
@tenacity.retry(
retry=tenacity.retry_if_exception(should_retry),
stop=tenacity.stop_after_attempt(CONF.ibmc.connection_attempts),
wait=tenacity.wait_fixed(CONF.ibmc.connection_retry_interval),
reraise=True)
@functools.wraps(f)
def wrapper(*args, **kwargs):
# NOTE(dtantsur): this code could be written simpler, but then unit
# testing decorated functions is pretty hard, as we usually pass a
# Mock object instead of TaskManager there.
if len(args) > 1:
is_task_mgr = isinstance(args[1], task_manager.TaskManager)
task = args[1] if is_task_mgr else args[0]
else:
task = args[0]
node = task.node
try:
return f(*args, **kwargs)
except ibmc_error.IBMCConnectionError as e:
error = (_('Failed to connect to iBMC for node %(node)s, '
'Error: %(error)s')
% {'node': node.uuid, 'error': e})
LOG.error(error)
raise exception.IBMCConnectionError(node=node.uuid,
error=error)
except ibmc_error.IBMCClientError as e:
error = (_('Failed to %(action)s for node %(node)s, '
'Error %(error)s')
% {'node': node.uuid, 'action': action, 'error': e})
LOG.error(error)
raise exception.IBMCError(node=node.uuid, error=error)
return wrapper
return decorator
| apache-2.0 | 7,757,055,855,073,051,000 | 39.44186 | 79 | 0.585825 | false |
schollz/trello-changelog | trello.py | 1 | 2403 | import json
import datetime
import sys
import random
a =json.load(open(sys.argv[1],'rb'))
def find_between( s, first, last ):
try:
start = s.index( first ) + len( first )
end = s.index( last, start )
return s[start:end]
except ValueError:
return ""
# BOARDS
boards = {}
for board in a['lists']:
boards[board['id']] = board['name']
# CARDS
cards = []
for i in a['cards']:
name = i['name']
labels = i['labels']
ls = []
for j in labels:
ls.append(j['name'])
boardID = i['idList']
board = boards[boardID]
card = {}
card['name']=name
card['labels']=ls
card['list']=board
card['date']=datetime.datetime.strptime(i['dateLastActivity'].split('.')[0], '%Y-%m-%dT%H:%M:%S').strftime('%Y-%m-%d')
cards.append(card)
availableColors = []
with open('colors.txt','r') as f:
for line in f:
availableColors.append(line.decode('utf-8').strip())
random.shuffle(availableColors)
changelog = {}
dates = {}
colors = {}
latestDate = None
for card in cards:
if 'Done' in card['list']:
version = card['list']
if version not in changelog:
changelog[version] = {}
changelog[version]['date'] = card['date']
changelog[version]['features'] = []
dates[changelog[version]['date']] = version
dat = {'feature':card['name'], 'categories': card['labels']}
changelog[version]['features'].append(dat)
for category in card['labels']:
if category not in colors:
colors[category] = availableColors.pop()
versionOrdering = []
for k in sorted(dates.keys(),reverse=True):
versionOrdering.append(dates[k])
changelist = {}
for version in versionOrdering:
heading = changelog[version]['date'] + ' ' + version.replace('Done ','').replace('(','').replace(')','')
changelist[heading] = []
for feature in changelog[version]['features']:
line = '- '
for category in feature['categories']:
line += '<span style="color:' + colors[category] + '; font-family: Courier New;">[' + category + '] </span>'
line += '<span style="font-family: Courier New;">' + feature['feature'] + '</span>'
changelist[heading].append(line)
print "\n\n\n\n\n\n\n# Changelog \n"
for version in versionOrdering:
heading = changelog[version]['date'] + ' ' + version.replace('Done ','').replace('(','').replace(')','')
print "\n\n## " + heading
for line in sorted(changelist[heading]):
print line
print "\n" | mit | 6,869,297,867,386,190,000 | 26.318182 | 120 | 0.61881 | false |
petercable/mi-instrument | mi/instrument/seabird/driver.py | 1 | 22403 | """
@package mi.instrument.seabird.driver
@file mi/instrument/seabird/driver.py
@author Roger Unwin
@brief Base class for seabird instruments
Release notes:
None.
"""
import datetime
import re
from mi.core.log import get_logger, get_logging_metaclass
from mi.core.instrument.instrument_protocol import CommandResponseInstrumentProtocol, InitializationType
from mi.core.instrument.instrument_driver import SingleConnectionInstrumentDriver
from mi.core.instrument.data_particle import DataParticle
from mi.core.instrument.data_particle import DataParticleKey
from mi.core.instrument.protocol_param_dict import ParameterDictVisibility
from mi.core.instrument.instrument_driver import DriverProtocolState
from mi.core.instrument.instrument_driver import DriverAsyncEvent
from mi.core.instrument.instrument_driver import DriverEvent
from mi.core.exceptions import InstrumentProtocolException
from mi.core.exceptions import InstrumentParameterException
from mi.core.exceptions import NotImplementedException
from mi.core.exceptions import SampleException
from mi.core.time_tools import get_timestamp_delayed
__author__ = 'Roger Unwin'
__license__ = 'Apache 2.0'
log = get_logger()
NEWLINE = '\r\n'
ESCAPE = "\x1b"
SBE_EPOCH = (datetime.date(2000, 1, 1) - datetime.date(1970, 1, 1)).total_seconds()
# default timeout.
TIMEOUT = 20
DEFAULT_ENCODER_KEY = '__default__'
###############################################################################
# Particles
###############################################################################
class SeaBirdParticle(DataParticle):
"""
Overload the base particle to add in some common parsing logic for SBE
instruments. Add regex methods to help identify and parse multiline
strings.
"""
@staticmethod
def regex():
"""
Return a regex string to use in matching functions. This can be used
for parsing too if more complex parsing isn't needed.
Static methods because it is called outside this class.
@return: uncompiled regex string
"""
NotImplementedException()
@staticmethod
def regex_compiled():
"""
Return a regex compiled regex of the regex
Static methods because it is called outside this class.
@return: compiled regex
"""
NotImplementedException()
def regex_multiline(self):
"""
return a dictionary containing uncompiled regex used to match patterns
in SBE multiline results. includes an encoder method.
@return: dictionary of uncompiled regexs
"""
NotImplementedException()
def regex_multiline_compiled(self):
"""
return a dictionary containing compiled regex used to match patterns
in SBE multiline results.
@return: dictionary of compiled regexs
"""
result = {}
for (key, regex) in self.regex_multiline().iteritems():
result[key] = re.compile(regex, re.DOTALL)
return result
def encoders(self):
"""
return a dictionary containing encoder methods for parameters
a special key 'default' can be used to name the default mechanism
@return: dictionary containing encoder callbacks
"""
NotImplementedException()
def _get_multiline_values(self, split_fun=None):
"""
return a dictionary containing keys and found values from a
multiline sample using the multiline regex
@param: split_fun - function to which splits sample into lines
@return: dictionary of compiled regexs
"""
result = []
if split_fun is None:
split_fun = self._split_on_newline
matchers = self.regex_multiline_compiled()
regexs = self.regex_multiline()
for line in split_fun(self.raw_data):
log.trace("Line: %s" % line)
for key in matchers.keys():
log.trace("match: %s" % regexs.get(key))
match = matchers[key].search(line)
if match:
encoder = self._get_encoder(key)
if encoder:
log.debug("encoding value %s (%s)" % (key, match.group(1)))
value = encoder(match.group(1))
else:
value = match.group(1)
log.trace("multiline match %s = %s (%s)" % (key, match.group(1), value))
result.append({
DataParticleKey.VALUE_ID: key,
DataParticleKey.VALUE: value
})
return result
def _split_on_newline(self, value):
"""
default split method for multiline regex matches
@param: value string to split
@return: list of line split on NEWLINE
"""
return value.split(NEWLINE)
def _get_encoder(self, key):
"""
Get an encoder for a key, if one isn't specified look for a default.
Can return None for no encoder
@param: key encoder we are looking for
@return: dictionary of encoders.
"""
encoder = self.encoders().get(key)
if not encoder:
encoder = self.encoders().get(DEFAULT_ENCODER_KEY)
return encoder
def _map_param_to_xml_tag(self, parameter_name):
"""
@return: a string containing the xml tag name for a parameter
"""
NotImplementedException()
def _extract_xml_elements(self, node, tag, raise_exception_if_none_found=True):
"""
extract elements with tag from an XML node
@param: node - XML node to look in
@param: tag - tag of elements to look for
@param: raise_exception_if_none_found - raise an exception if no element is found
@return: return list of elements found; empty list if none found
"""
elements = node.getElementsByTagName(tag)
if raise_exception_if_none_found and len(elements) == 0:
raise SampleException("_extract_xml_elements: No %s in input data: [%s]" % (tag, self.raw_data))
return elements
def _extract_xml_element_value(self, node, tag, raise_exception_if_none_found=True):
"""
extract element value that has tag from an XML node
@param: node - XML node to look in
@param: tag - tag of elements to look for
@param: raise_exception_if_none_found - raise an exception if no value is found
@return: return value of element
"""
elements = self._extract_xml_elements(node, tag, raise_exception_if_none_found)
children = elements[0].childNodes
if raise_exception_if_none_found and len(children) == 0:
raise SampleException("_extract_xml_element_value: No value for %s in input data: [%s]" % (tag, self.raw_data))
return children[0].nodeValue
def _get_xml_parameter(self, xml_element, parameter_name, data_type=float):
return {DataParticleKey.VALUE_ID: parameter_name,
DataParticleKey.VALUE: data_type(self._extract_xml_element_value(xml_element,
self._map_param_to_xml_tag(parameter_name)))}
########################################################################
# Static helpers.
########################################################################
@staticmethod
def hex2value(hex_value, divisor=None):
"""
Convert a SBE hex value to a value. Some hex values are converted
from raw counts to volts using a divisor. If passed the value
will be calculated, otherwise return an int.
@param hex_value: string to convert
@param divisor: conversion value
@return: int or float of the converted value
"""
if not isinstance(hex_value, basestring):
raise InstrumentParameterException("hex value not a string")
if divisor is not None and divisor == 0:
raise InstrumentParameterException("divisor can not be 0")
value = int(hex_value, 16)
if divisor is not None:
return float(value) / divisor
else:
return value
@staticmethod
def yesno2bool(value):
"""
convert a yes no response to a bool
@param value: string to convert
@return: bool
"""
if not isinstance(value, basestring):
raise InstrumentParameterException("value (%r) not a string" % value)
if value.lower() == 'no':
return 0
if value.lower() == 'yes':
return 1
raise InstrumentParameterException("Could not convert '%s' to bool" % value)
@staticmethod
def disabled2bool(value):
"""
convert a disabled/enabled to bool
@param value: string to convert
@return: bool
"""
if not isinstance(value, str):
raise InstrumentParameterException("value not a string")
if value.lower() == 'disabled':
return False
if value.lower() == 'enabled':
return True
raise InstrumentParameterException("Could not convert '%s' to bool" % value)
@staticmethod
def sbetime2unixtime(value):
"""
Convert an SBE integer time (epoch 1-1-2000) to unix time
@param value: sbe integer time
@return: unix time
"""
if not isinstance(value, int):
raise InstrumentParameterException("value (%r) not a int" % value)
return SBE_EPOCH + value
###############################################################################
# Driver
###############################################################################
class SeaBirdInstrumentDriver(SingleConnectionInstrumentDriver):
"""
Base class for all seabird instrument drivers.
"""
###############################################################################
# Protocol
###############################################################################
class SeaBirdProtocol(CommandResponseInstrumentProtocol):
"""
Instrument protocol class for seabird driver.
Subclasses CommandResponseInstrumentProtocol
"""
__metaclass__ = get_logging_metaclass(log_level='trace')
def __init__(self, prompts, newline, driver_event):
"""
Protocol constructor.
@param prompts A BaseEnum class containing instrument prompts.
@param newline The sbe26plus newline.
@param driver_event Driver process event callback.
"""
# Construct protocol superclass.
CommandResponseInstrumentProtocol.__init__(self, prompts, newline, driver_event)
########################################################################
# Common handlers
########################################################################
def _handler_command_enter(self, *args, **kwargs):
"""
Enter command state.
@throws InstrumentTimeoutException if the device cannot be woken.
@throws InstrumentProtocolException if the update commands and not recognized.
"""
# Command device to initialize parameters and send a config change event.
self._protocol_fsm.on_event(DriverEvent.INIT_PARAMS)
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
def _handler_autosample_enter(self, *args, **kwargs):
"""
Enter autosample state.
"""
self._protocol_fsm.on_event(DriverEvent.INIT_PARAMS)
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
def _handler_command_init_params(self, *args, **kwargs):
"""
initialize parameters
"""
next_state = None
result = []
self._init_params()
return next_state, (next_state, result)
def _handler_autosample_init_params(self, *args, **kwargs):
"""
initialize parameters. For this instrument we need to
put the instrument into command mode, apply the changes
then put it back.
"""
next_state = None
result = []
if self._init_type != InitializationType.NONE:
try:
self._stop_logging()
self._init_params()
finally:
# Switch back to streaming
if not self._is_logging():
log.debug("SBE is logging again")
self._start_logging()
return next_state, (next_state, result)
def _handler_command_get(self, *args, **kwargs):
"""
Get device parameters from the parameter dict. First we set a baseline timestamp
that all data expiration will be calculated against. Then we try to get parameter
value. If we catch an expired parameter then we will update all parameters and get
values using the original baseline time that we set at the beginning of this method.
Assuming our _update_params is updating all parameter values properly then we can
ensure that all data will be fresh. Nobody likes stale data!
@param args[0] list of parameters to retrieve, or DriverParameter.ALL.
@raise InstrumentParameterException if missing or invalid parameter.
@raise InstrumentParameterExpirationException If we fail to update a parameter
on the second pass this exception will be raised on expired data
"""
next_state, result = self._handler_get(*args, **kwargs)
# TODO - match all other return signatures - return next_state, (next_state, result)
return next_state, result
def _handler_command_set(self, *args, **kwargs):
"""
Perform a set command.
@param args[0] parameter : value dict.
@param args[1] parameter : startup parameters?
@retval (next_state, result) tuple, (None, None).
@throws InstrumentParameterException if missing set parameters, if set parameters not ALL and
not a dict, or if parameter can't be properly formatted.
@throws InstrumentTimeoutException if device cannot be woken for set command.
@throws InstrumentProtocolException if set command could not be built or misunderstood.
"""
next_state = None
result = None
startup = False
try:
params = args[0]
except IndexError:
raise InstrumentParameterException('_handler_command_set Set command requires a parameter dict.')
try:
startup = args[1]
except IndexError:
pass
if not isinstance(params, dict):
raise InstrumentParameterException('Set parameters not a dict.')
# For each key, val in the dict, issue set command to device.
# Raise if the command not understood.
else:
self._set_params(params, startup)
return next_state, result
########################################################################
# Private helpers.
########################################################################
def _discover(self, *args, **kwargs):
"""
Discover current state; can be COMMAND or AUTOSAMPLE.
@throws InstrumentTimeoutException if the device cannot be woken.
@throws InstrumentStateException if the device response does not correspond to
an expected state.
"""
next_state = DriverProtocolState.COMMAND
result = []
try:
logging = self._is_logging()
log.debug("are we logging? %s" % logging)
if logging is None:
next_state = DriverProtocolState.UNKNOWN
elif logging:
next_state = DriverProtocolState.AUTOSAMPLE
else:
next_state = DriverProtocolState.COMMAND
except NotImplemented:
log.warning('logging not implemented, defaulting to command state')
pass
return next_state, (next_state, result)
def _sync_clock(self, command, date_time_param, timeout=TIMEOUT, delay=1, time_format="%d %b %Y %H:%M:%S"):
"""
Send the command to the instrument to synchronize the clock
@param command: command to set6 date time
@param date_time_param: date time parameter that we want to set
@param timeout: command timeout
@param delay: wakeup delay
@param time_format: time format string for set command
@raise: InstrumentProtocolException if command fails
"""
# lets clear out any past data so it doesnt confuse the command
self._linebuf = ''
self._promptbuf = ''
log.debug("Set time format(%s) '%s''", time_format, date_time_param)
str_val = get_timestamp_delayed(time_format)
log.debug("Set time value == '%s'", str_val)
self._do_cmd_resp(command, date_time_param, str_val)
########################################################################
# Startup parameter handlers
########################################################################
def apply_startup_params(self):
"""
Apply all startup parameters. First we check the instrument to see
if we need to set the parameters. If they are they are set
correctly then we don't do anything.
If we need to set parameters then we might need to transition to
command first. Then we will transition back when complete.
@todo: This feels odd. It feels like some of this logic should
be handled by the state machine. It's a pattern that we
may want to review. I say this because this command
needs to be run from autosample or command mode.
@raise: InstrumentProtocolException if not in command or streaming
"""
# Let's give it a try in unknown state
log.debug("CURRENT STATE: %s", self.get_current_state())
if (self.get_current_state() != DriverProtocolState.COMMAND and
self.get_current_state() != DriverProtocolState.AUTOSAMPLE):
raise InstrumentProtocolException("Not in command or autosample state. Unable to apply startup params")
log.debug("sbe apply_startup_params, logging?")
logging = self._is_logging()
log.debug("sbe apply_startup_params, logging == %s", logging)
# If we are in streaming mode and our configuration on the
# instrument matches what we think it should be then we
# don't need to do anything.
if not self._instrument_config_dirty():
log.debug("configuration not dirty. Nothing to do here")
return True
try:
if logging:
# Switch to command mode,
log.debug("stop logging")
self._stop_logging()
log.debug("sbe apply_startup_params now")
self._apply_params()
finally:
# Switch back to streaming
if logging:
log.debug("sbe apply_startup_params start logging again")
self._start_logging()
def _start_logging(self):
"""
Issue the instrument command to start logging data
"""
raise NotImplementedException()
def _stop_logging(self):
"""
Issue the instrument command to stop logging data
"""
raise NotImplementedException()
def _is_logging(self):
"""
Is the instrument in logging or command mode.
@return: True if streaming, False if in command, None if we don't know
"""
raise NotImplementedException()
def _set_params(self, *args, **kwargs):
"""
Issue commands to the instrument to set various parameters
"""
startup = False
try:
params = args[0]
except IndexError:
raise InstrumentParameterException('Set command requires a parameter dict.')
try:
startup = args[1]
except IndexError:
pass
# Only check for readonly parameters if we are not setting them from startup
if not startup:
readonly = self._param_dict.get_visibility_list(ParameterDictVisibility.READ_ONLY)
log.debug("set param, but check visibility first")
log.debug("Read only keys: %s", readonly)
for (key, val) in params.iteritems():
if key in readonly:
raise InstrumentParameterException("Attempt to set read only parameter (%s)" % key)
def _update_params(self):
"""
Send instrument commands to get data to refresh the param_dict cache
"""
raise NotImplementedException()
def _apply_params(self):
"""
apply startup parameters to the instrument.
@raise: InstrumentProtocolException if in wrong mode.
"""
config = self.get_startup_config()
log.debug("_apply_params startup config: %s", config)
# Pass true to _set_params so we know these are startup values
self._set_params(config, True)
def _instrument_config_dirty(self):
"""
Read the startup config and compare that to what the instrument
is configured too. If they differ then return True
@return: True if the startup config doesn't match the instrument
@raise: InstrumentParameterException
"""
# Refresh the param dict cache
self._update_params()
startup_params = self._param_dict.get_startup_list()
log.debug("Startup Parameters: %s", startup_params)
for param in startup_params:
if self._param_dict.get(param) != self._param_dict.get_config_value(param):
log.debug("DIRTY: %s %s != %s", param, self._param_dict.get(param), self._param_dict.get_config_value(param))
return True
return False
def _send_wakeup(self):
"""
Send a newline to attempt to wake the sbe26plus device.
"""
self._connection.send(ESCAPE)
self._connection.send(NEWLINE)
| bsd-2-clause | -2,113,255,483,924,937,700 | 36.463211 | 125 | 0.590501 | false |
wger-project/wger | wger/gym/views/admin_notes.py | 1 | 5958 | # -*- coding: utf-8 -*-
# This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# Standard Library
import logging
# Django
from django.contrib.auth.mixins import (
LoginRequiredMixin,
PermissionRequiredMixin,
)
from django.contrib.auth.models import User
from django.http.response import HttpResponseForbidden
from django.urls import reverse
from django.utils.translation import (
gettext as _,
gettext_lazy,
)
from django.views.generic import (
CreateView,
DeleteView,
ListView,
UpdateView,
)
# wger
from wger.gym.models import AdminUserNote
from wger.utils.generic_views import (
WgerDeleteMixin,
WgerFormMixin,
)
logger = logging.getLogger(__name__)
class ListView(LoginRequiredMixin, PermissionRequiredMixin, ListView):
"""
Overview of all available admin notes
"""
model = AdminUserNote
permission_required = 'gym.add_adminusernote'
template_name = 'admin_notes/list.html'
member = None
def get_queryset(self):
"""
Only notes for current user
"""
return AdminUserNote.objects.filter(member_id=self.kwargs['user_pk'])
def dispatch(self, request, *args, **kwargs):
"""
Can only add notes to users in own gym
"""
if not request.user.is_authenticated:
return HttpResponseForbidden()
user = User.objects.get(pk=self.kwargs['user_pk'])
self.member = user
if user.userprofile.gym_id != request.user.userprofile.gym_id:
return HttpResponseForbidden()
return super(ListView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
"""
Send some additional data to the template
"""
context = super(ListView, self).get_context_data(**kwargs)
context['member'] = self.member
return context
class AddView(WgerFormMixin, LoginRequiredMixin, PermissionRequiredMixin, CreateView):
"""
View to add a new admin note
"""
model = AdminUserNote
fields = ['note']
title = gettext_lazy('Add note')
permission_required = 'gym.add_adminusernote'
member = None
def get_success_url(self):
"""
Redirect back to user page
"""
return reverse('gym:admin_note:list', kwargs={'user_pk': self.member.pk})
def dispatch(self, request, *args, **kwargs):
"""
Can only add notes to users in own gym
"""
if not request.user.is_authenticated:
return HttpResponseForbidden()
user = User.objects.get(pk=self.kwargs['user_pk'])
self.member = user
gym_id = user.userprofile.gym_id
if gym_id != request.user.userprofile.gym_id:
return HttpResponseForbidden()
return super(AddView, self).dispatch(request, *args, **kwargs)
def form_valid(self, form):
"""
Set user instances
"""
form.instance.member = self.member
form.instance.user = self.request.user
return super(AddView, self).form_valid(form)
class UpdateView(WgerFormMixin, LoginRequiredMixin, PermissionRequiredMixin, UpdateView):
"""
View to update an existing admin note
"""
model = AdminUserNote
fields = ['note']
permission_required = 'gym.change_adminusernote'
def get_success_url(self):
"""
Redirect back to user page
"""
return reverse('gym:admin_note:list', kwargs={'user_pk': self.object.member.pk})
def dispatch(self, request, *args, **kwargs):
"""
Only trainers for this gym can edit user notes
"""
if not request.user.is_authenticated:
return HttpResponseForbidden()
note = self.get_object()
if note.member.userprofile.gym_id != request.user.userprofile.gym_id:
return HttpResponseForbidden()
return super(UpdateView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
"""
Send some additional data to the template
"""
context = super(UpdateView, self).get_context_data(**kwargs)
context['title'] = _('Edit {0}').format(self.object)
return context
class DeleteView(WgerDeleteMixin, LoginRequiredMixin, PermissionRequiredMixin, DeleteView):
"""
View to delete an existing admin note
"""
model = AdminUserNote
fields = ('note', )
permission_required = 'gym.delete_adminusernote'
def get_success_url(self):
"""
Redirect back to user page
"""
return reverse('gym:admin_note:list', kwargs={'user_pk': self.object.member.pk})
def dispatch(self, request, *args, **kwargs):
"""
Only trainers for this gym can delete user notes
"""
if not request.user.is_authenticated:
return HttpResponseForbidden()
note = self.get_object()
if note.member.userprofile.gym_id != request.user.userprofile.gym_id:
return HttpResponseForbidden()
return super(DeleteView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
"""
Send some additional data to the template
"""
context = super(DeleteView, self).get_context_data(**kwargs)
context['title'] = _('Delete {0}?').format(self.object)
return context
| agpl-3.0 | 4,160,226,836,343,754,000 | 29.243655 | 91 | 0.645015 | false |
imk1/IMKTFBindingCode | makeCallIndividualPeaksScript.py | 1 | 1581 | def makeCallIndividualPeaksScript(datasetPrefix, ofprefixListFileName, pooledTAFileName, pooledTAFileCore, peakOutputDir, fraglenListFileName, blacklistFileName, codePath, scriptFileName):
# ASSUMES THAT THE FRAGMENT LENGTHS ARE IN THE SAME ORDER AS THE ofPrefix's
ofprefixListFile = open(ofprefixListFileName)
fraglenListFile = open(fraglenListFileName)
scriptFile = open(scriptFileName, 'w+')
for line in ofprefixListFile:
# Iterate through the mapped reads file prefixes and make a line in the script for each
fraglen = int(fraglenListFile.readline().strip())
codeName = codePath + "/callIndividualPeaks.sh"
scriptFile.write("sh " + codeName + " " + datasetPrefix + "/ " + line.strip() + " " + pooledTAFileName + " " + pooledTAFileCore + " " + peakOutputDir + "/ " + str(fraglen) + " " + blacklistFileName + " " + codePath + "\n")
ofprefixListFile.close()
fraglenListFile.close()
scriptFile.close()
if __name__=="__main__":
import sys
datasetPrefix = sys.argv[1] # SHOULD NOT HAVE A / AT THE END
ofprefixListFileName = sys.argv[2]
pooledTAFileName = sys.argv[3] # SHOULD NOT HAVE A / AT THE END
pooledTAFileCore = sys.argv[4]
peakOutputDir = sys.argv[5] # SHOULD NOT HAVE A / AT THE END
fraglenListFileName = sys.argv[6]
blacklistFileName = sys.argv[7]
codePath = sys.argv[8] # SHOULD NOT HAVE A / AT THE END
scriptFileName = sys.argv[9]
makeCallIndividualPeaksScript(datasetPrefix, ofprefixListFileName, pooledTAFileName, pooledTAFileCore, peakOutputDir, fraglenListFileName, blacklistFileName, codePath, scriptFileName)
| mit | 115,318,794,299,012,770 | 56.62963 | 224 | 0.739405 | false |
zardoru/vrxcl | util/extract_balance_data.py | 1 | 5816 | # -*- coding: utf-8 -*-
from glob import glob
from re import compile
from json import dumps
class Damage(object):
def __init__(self):
self.damage_start = 0
self.damage_addon = 0
self.speed_start = 0
self.speed_addon = 0
self.dmg_min_start = None
self.dmg_min_addon = 0
self.dmg_max_start = None
self.dmg_max_addon = 0
self.cap = None
self.speed_cap = None
self.pull_start = 0
self.pull_addon = 0
self.count_start = 0
self.count_addon = 0
@property
def is_damage_range(self):
return self.dmg_min_start is not None and\
self.dmg_max_start is not None
@property
def is_damage_capped(self):
return self.cap is not None
def is_speed_capped(self):
return self.speed_cap is not None
class Ent(object):
def __init__(self):
self.health_start = 0
self.health_addon = 0
self.power_start = 0
self.power_addon = 0
# #define xxx number
re_define = compile(r'\#define\s+([a-zA-Z0-9_]+)\s+(-?\d+(\.\d+)?)')
# // xxx.yyy: aaa,bbb,ccc
re_dmgclass = compile(r'//\s*([a-zA-Z.]+)\s*:\s*([a-zA-Z_]+((,[a-zA-Z_]+)+)?)')
# VAR = num
re_luadefine = compile(r'([a-zA-Z0-9_]+)\s*=\s*(\d+(\.\d+)?)')
# (num|def) + (num|def) * whatever;
re_val = compile(r'=\s*((-?(\d+(\.\d+)?)|-?[a-zA-Z_]+)(\s*\+\s*)?)?(((\d+(\.\d+)?)|[a-zA-Z_]+)\s*\*\s*[->.a-zA-Z\[\]]+)?;')
def load_define(defs, line):
match = re_define.search(line)
if match:
# print(match.group(0))
defs[match.group(1)] = float(match.group(2))
def load_luavar(defs, line):
match = re_luadefine.search(line)
if match:
# print(match.group(0))
defs[match.group(1)] = float(match.group(2))
def load_dmgclass_value(defs, dmgs, ents, ltype, names, line, filename):
match = re_val.search(line)
if match is None:
print("invalid match in file {}, line \"{}\"".format(filename, line.strip()))
return
# print(match.group(0), match.group(1), match.group(5))
# group 1: base
# group 2: scaling
names = names.split(',')
for ident in names:
try:
base = float(match.group(2) or 0)
except ValueError: # not a number
try:
base = defs[match.group(2)]
except KeyError:
print(line, "errored while trying to find key", match.group(1))
return
try:
scale = float(match.group(7) or 0)
except ValueError:
try:
scale = defs[match.group(7)]
except KeyError:
print(line, "errored while trying to find key", match.group(5))
return
if ltype in ["dmg", "spd", "cap", "dmgcap", "spdcap", "pull", "dmg.min", "dmg.max", "count"]:
if not ident in dmgs:
dmgs[ident] = Damage()
dmg = dmgs[ident]
if ltype in ["hlt", "pow"]:
if not ident in ents:
ents[ident] = Ent()
ent = ents[ident]
if ltype == "dmg":
dmg.damage_start = base
dmg.damage_addon = scale
if ltype == "spd":
dmg.speed_start = base
dmg.speed_addon = scale
if ltype == "cap":
dmg.cap = base
if ltype == "dmgcap":
dmg.cap = base
if ltype == "spdcap":
dmg.speed_cap = base
if ltype == "pull":
dmg.pull_start = base
dmg.pull_addon = scale
if ltype == "dmg.min":
dmg.dmg_min_start = base
dmg.dmg_min_addon = scale
if ltype == "dmg.max":
dmg.dmg_max_start = base
dmg.dmg_max_addon = scale
if ltype == "dmg.count":
dmg.count_start = base
dmg.count_addon = scale
if ltype == "hlt":
ent.health_start = base
ent.health_addon = scale
if ltype == "pow":
ent.power_start = base
ent.power_addon = scale
def load_dmgclass(defs, dmgs, ents, line, filename):
match = re_dmgclass.search(line)
if match:
if match.group(1) in ["hlt", "dmg", "spd", "cap", "dmgcap", "spdcap", "pow", "pull", "dmg.min", "dmg.max", "count"]:
load_dmgclass_value(defs, dmgs, ents, match.group(1), match.group(2), line, filename)
def read_all_c(defs, dmgs, ents):
srcs = glob("src/**/*.c", recursive=True) + glob("src/**/*.h", recursive=True)
# exclude library files
srcs = [x for x in srcs if 'libraries' not in x]
# read defines first
print("Reading C defines...")
for filename in srcs:
with open(filename, 'r', encoding='utf-8') as file:
print("CURRENT FILE:", filename, " " * 12)
for line in file:
load_define(defs, line)
print()
print("Reading tagged balance data...")
# read damage classes
for filename in srcs:
with open(filename, 'r', encoding='utf-8') as file:
print("CURRENT FILE:", filename, " " * 12)
for line in file:
load_dmgclass(defs, dmgs, ents, line, filename)
def read_all_lua(defs, dmgs):
lua_srcs = glob("lua/variables.lua")
for filename in lua_srcs:
with open(filename, 'r') as file:
for line in file:
load_luavar(defs, line)
if __name__ == '__main__':
defs = {}
dmgs = {}
ents = {}
read_all_lua(defs, dmgs)
read_all_c(defs, dmgs, ents)
dmgstr = dumps(dmgs, indent=4, default=lambda o: o.__dict__)
entstr = dumps(ents, indent=4, default=lambda o: o.__dict__)
with open("dmgdata.json", "w") as f:
f.write(dmgstr)
with open("entstrdata.json", "w") as f:
f.write(entstr) | gpl-2.0 | 3,028,734,910,971,316,000 | 29.615789 | 124 | 0.523728 | false |
GoogleCloudPlatform/cloud-data-quality | clouddq/classes/dq_entity.py | 1 | 8031 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""todo: add classes docstring."""
from __future__ import annotations
from dataclasses import dataclass
import sys
from clouddq.classes.dq_entity_column import DqEntityColumn
from clouddq.utils import assert_not_none_or_empty
from clouddq.utils import get_format_string_arguments
from clouddq.utils import get_from_dict_and_assert
ENTITY_CUSTOM_CONFIG_MAPPING = {
"BIGQUERY": {
"table_name": "{table_name}",
"database_name": "{dataset_name}",
"instance_name": "{project_name}",
},
}
def get_custom_entity_configs(
entity_id: str, configs_map: dict, source_database: str, config_key: str
) -> str:
entity_configs = ENTITY_CUSTOM_CONFIG_MAPPING.get(source_database)
if not entity_configs:
raise NotImplementedError(
f"Entity Config ID '{entity_id}' has unsupported source_database "
f"'{source_database}'."
)
entity_config_template = entity_configs.get(config_key)
if not entity_config_template:
raise NotImplementedError(
f"Entity Config ID '{entity_id}' with source_database "
f"'{source_database}' has unsupported config value '{config_key}'."
)
entity_config_template_arguments = get_format_string_arguments(
entity_config_template
)
entity_config_arguments = dict()
for argument in entity_config_template_arguments:
argument_value = configs_map.get(argument)
if argument_value:
entity_config_arguments[argument] = argument_value
try:
config_value = entity_config_template.format(**entity_config_arguments)
except KeyError:
if config_key in configs_map:
print(
f"Entity Config ID '{entity_id}' with source_database "
f"'{source_database}' is using deprecated "
f"config value '{config_key}'.\n"
f"This will be removed in version 1.0.\n"
f"Migrate to use the config values "
f"'{entity_config_template_arguments}' instead.",
file=sys.stderr,
)
config_value = configs_map.get(config_key)
else:
raise ValueError(
f"Entity Config ID '{entity_id}' with source_database "
f"'{source_database}' has incomplete config values.\n"
f"Configs required: '{entity_config_template_arguments}'.\n"
f"Configs supplied: '{configs_map}'."
)
return config_value
@dataclass
class DqEntity:
""" """
entity_id: str
source_database: str
table_name: str
database_name: str
instance_name: str
columns: dict[str, DqEntityColumn]
environment_override: dict
def resolve_column_config(self: DqEntity, column_id: str) -> DqEntityColumn:
"""
Args:
self: DqRuleBinding:
column_id: str:
Returns:
"""
dq_column_config = self.columns.get(column_id, None)
assert_not_none_or_empty(
dq_column_config,
f"Column ID: {column_id} not found in Entity Config: {self.entity_id}.",
)
return dq_column_config
@classmethod
def from_dict(cls: DqEntity, entity_id: str, kwargs: dict) -> DqEntity:
"""
Args:
cls: DqEntity:
entity_id: str:
kwargs: typing.Dict:
Returns:
"""
source_database = get_from_dict_and_assert(
config_id=entity_id, kwargs=kwargs, key="source_database"
)
table_name = get_custom_entity_configs(
entity_id=entity_id,
configs_map=kwargs,
source_database=source_database,
config_key="table_name",
)
database_name = get_custom_entity_configs(
entity_id=entity_id,
configs_map=kwargs,
source_database=source_database,
config_key="database_name",
)
instance_name = get_custom_entity_configs(
entity_id=entity_id,
configs_map=kwargs,
source_database=source_database,
config_key="instance_name",
)
columns_dict = get_from_dict_and_assert(
config_id=entity_id, kwargs=kwargs, key="columns"
)
columns: dict[str, DqEntityColumn] = dict()
for column_id, column_config in columns_dict.items():
column = DqEntityColumn.from_dict(
entity_column_id=column_id,
kwargs=column_config,
entity_source_database=source_database,
)
columns[column_id] = column
# validate environment override
environment_override = dict()
for key, value in kwargs.get("environment_override", dict()).items():
target_env = get_from_dict_and_assert(
config_id=entity_id,
kwargs=value,
key="environment",
assertion=lambda v: v.lower() == key.lower(),
error_msg=f"Environment target key {key} must match value.",
)
override_configs = value["override"]
instance_name_override = get_custom_entity_configs(
entity_id=entity_id,
configs_map=override_configs,
source_database=source_database,
config_key="instance_name",
)
database_name_override = get_custom_entity_configs(
entity_id=entity_id,
configs_map=override_configs,
source_database=source_database,
config_key="database_name",
)
try:
table_name_override = get_custom_entity_configs(
entity_id=entity_id,
configs_map=override_configs,
source_database=source_database,
config_key="table_name",
)
except ValueError:
table_name_override = table_name
environment_override[target_env] = {
"instance_name": instance_name_override,
"database_name": database_name_override,
"table_name": table_name_override,
}
return DqEntity(
entity_id=str(entity_id),
source_database=source_database,
table_name=table_name,
database_name=database_name,
instance_name=instance_name,
columns=columns,
environment_override=environment_override,
)
def to_dict(self: DqEntity) -> dict:
"""
Args:
self: DqEntity:
Returns:
"""
columns = {
column_id: column_config.dict_values()
for column_id, column_config in self.columns.items()
}
output = {
"source_database": self.source_database,
"table_name": self.table_name,
"database_name": self.database_name,
"instance_name": self.instance_name,
"columns": columns,
}
if self.environment_override:
output["environment_override"] = self.environment_override
return dict({f"{self.entity_id}": output})
def dict_values(self: DqEntity) -> dict:
"""
Args:
self: DqEntity:
Returns:
"""
return dict(self.to_dict().get(self.entity_id))
| apache-2.0 | 8,395,775,331,405,945,000 | 32.60251 | 84 | 0.576018 | false |
avikivity/scylla | test/alternator/test_query.py | 1 | 21311 | # -*- coding: utf-8 -*-
# Copyright 2019 ScyllaDB
#
# This file is part of Scylla.
#
# Scylla is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Scylla is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Scylla. If not, see <http://www.gnu.org/licenses/>.
# Tests for the Query operation
# Some of the Query features are tested in separate files:
# * test_key_conditions.py: the KeyConditions paramter.
# * test_key_condition_expression.py: the KeyConditionExpression parameter.
# * test_filter_expression.py: the FilterExpression parameter.
# * test_query_filter.py: the QueryFilter parameter.
import random
import pytest
from botocore.exceptions import ClientError, ParamValidationError
from decimal import Decimal
from util import random_string, random_bytes, full_query, multiset
from boto3.dynamodb.conditions import Key, Attr
def test_query_nonexistent_table(dynamodb):
client = dynamodb.meta.client
with pytest.raises(ClientError, match="ResourceNotFoundException"):
client.query(TableName="i_do_not_exist", KeyConditions={
'p' : {'AttributeValueList': ['long'], 'ComparisonOperator': 'EQ'},
'c' : {'AttributeValueList': ['11'], 'ComparisonOperator': 'BEGINS_WITH'}
})
# Items returned by Query should be sorted by the sort key. The following
# tests verify that this is indeed the case, for the three allowed key types:
# strings, binary, and numbers. These tests test not just the Query operation,
# but inherently that the sort-key sorting works.
def test_query_sort_order_string(test_table):
# Insert a lot of random items in one new partition:
# str(i) has a non-obvious sort order (e.g., "100" comes before "2") so is a nice test.
p = random_string()
items = [{'p': p, 'c': str(i)} for i in range(128)]
with test_table.batch_writer() as batch:
for item in items:
batch.put_item(item)
got_items = full_query(test_table, KeyConditions={'p': {'AttributeValueList': [p], 'ComparisonOperator': 'EQ'}})
assert len(items) == len(got_items)
# Extract just the sort key ("c") from the items
sort_keys = [x['c'] for x in items]
got_sort_keys = [x['c'] for x in got_items]
# Verify that got_sort_keys are already sorted (in string order)
assert sorted(got_sort_keys) == got_sort_keys
# Verify that got_sort_keys are a sorted version of the expected sort_keys
assert sorted(sort_keys) == got_sort_keys
def test_query_sort_order_bytes(test_table_sb):
# Insert a lot of random items in one new partition:
# We arbitrarily use random_bytes with a random length.
p = random_string()
items = [{'p': p, 'c': random_bytes(10)} for i in range(128)]
with test_table_sb.batch_writer() as batch:
for item in items:
batch.put_item(item)
got_items = full_query(test_table_sb, KeyConditions={'p': {'AttributeValueList': [p], 'ComparisonOperator': 'EQ'}})
assert len(items) == len(got_items)
sort_keys = [x['c'] for x in items]
got_sort_keys = [x['c'] for x in got_items]
# Boto3's "Binary" objects are sorted as if bytes are signed integers.
# This isn't the order that DynamoDB itself uses (byte 0 should be first,
# not byte -128). Sorting the byte array ".value" works.
assert sorted(got_sort_keys, key=lambda x: x.value) == got_sort_keys
assert sorted(sort_keys) == got_sort_keys
def test_query_sort_order_number(test_table_sn):
# This is a list of numbers, sorted in correct order, and each suitable
# for accurate representation by Alternator's number type.
numbers = [
Decimal("-2e10"),
Decimal("-7.1e2"),
Decimal("-4.1"),
Decimal("-0.1"),
Decimal("-1e-5"),
Decimal("0"),
Decimal("2e-5"),
Decimal("0.15"),
Decimal("1"),
Decimal("1.00000000000000000000000001"),
Decimal("3.14159"),
Decimal("3.1415926535897932384626433832795028841"),
Decimal("31.4"),
Decimal("1.4e10"),
]
# Insert these numbers, in random order, into one partition:
p = random_string()
items = [{'p': p, 'c': num} for num in random.sample(numbers, len(numbers))]
with test_table_sn.batch_writer() as batch:
for item in items:
batch.put_item(item)
# Finally, verify that we get back exactly the same numbers (with identical
# precision), and in their original sorted order.
got_items = full_query(test_table_sn, KeyConditions={'p': {'AttributeValueList': [p], 'ComparisonOperator': 'EQ'}})
got_sort_keys = [x['c'] for x in got_items]
assert got_sort_keys == numbers
# Note: this is a very partial check for the QueryFilter feature. See
# test_query_filter.py for much more exhaustive tests for this feature.
def test_query_filtering_attributes_equality(filled_test_table):
test_table, items = filled_test_table
query_filter = {
"attribute" : {
"AttributeValueList" : [ "xxxx" ],
"ComparisonOperator": "EQ"
}
}
got_items = full_query(test_table, KeyConditions={'p': {'AttributeValueList': ['long'], 'ComparisonOperator': 'EQ'}}, QueryFilter=query_filter)
print(got_items)
assert multiset([item for item in items if item['p'] == 'long' and item['attribute'] == 'xxxx']) == multiset(got_items)
query_filter = {
"attribute" : {
"AttributeValueList" : [ "xxxx" ],
"ComparisonOperator": "EQ"
},
"another" : {
"AttributeValueList" : [ "yy" ],
"ComparisonOperator": "EQ"
}
}
got_items = full_query(test_table, KeyConditions={'p': {'AttributeValueList': ['long'], 'ComparisonOperator': 'EQ'}}, QueryFilter=query_filter)
print(got_items)
assert multiset([item for item in items if item['p'] == 'long' and item['attribute'] == 'xxxx' and item['another'] == 'yy']) == multiset(got_items)
# Test that FilterExpression works as expected
def test_query_filter_expression(filled_test_table):
test_table, items = filled_test_table
got_items = full_query(test_table, KeyConditions={'p': {'AttributeValueList': ['long'], 'ComparisonOperator': 'EQ'}}, FilterExpression=Attr("attribute").eq("xxxx"))
print(got_items)
assert multiset([item for item in items if item['p'] == 'long' and item['attribute'] == 'xxxx']) == multiset(got_items)
got_items = full_query(test_table, KeyConditions={'p': {'AttributeValueList': ['long'], 'ComparisonOperator': 'EQ'}}, FilterExpression=Attr("attribute").eq("xxxx") & Attr("another").eq("yy"))
print(got_items)
assert multiset([item for item in items if item['p'] == 'long' and item['attribute'] == 'xxxx' and item['another'] == 'yy']) == multiset(got_items)
# Test Query with the AttributesToGet parameter. Result should include the
# selected attributes only - if one wants the key attributes as well, one
# needs to select them explicitly. When no key attributes are selected,
# some items may have *none* of the selected attributes. Those items are
# returned too, as empty items - they are not outright missing.
def test_query_attributes_to_get(dynamodb, test_table):
p = random_string()
items = [{'p': p, 'c': str(i), 'a': str(i*10), 'b': str(i*100) } for i in range(10)]
with test_table.batch_writer() as batch:
for item in items:
batch.put_item(item)
for wanted in [ ['a'], # only non-key attributes
['c', 'a'], # a key attribute (sort key) and non-key
['p', 'c'], # entire key
['nonexistent'] # none of the items have this attribute!
]:
got_items = full_query(test_table, KeyConditions={'p': {'AttributeValueList': [p], 'ComparisonOperator': 'EQ'}}, AttributesToGet=wanted)
expected_items = [{k: x[k] for k in wanted if k in x} for x in items]
assert multiset(expected_items) == multiset(got_items)
# Test that in a table with both hash key and sort key, which keys we can
# Query by: We can Query by the hash key, by a combination of both hash and
# sort keys, but *cannot* query by just the sort key, and obviously not
# by any non-key column.
def test_query_which_key(test_table):
p = random_string()
c = random_string()
p2 = random_string()
c2 = random_string()
item1 = {'p': p, 'c': c}
item2 = {'p': p, 'c': c2}
item3 = {'p': p2, 'c': c}
for i in [item1, item2, item3]:
test_table.put_item(Item=i)
# Query by hash key only:
got_items = full_query(test_table, KeyConditions={'p': {'AttributeValueList': [p], 'ComparisonOperator': 'EQ'}})
expected_items = [item1, item2]
assert multiset(expected_items) == multiset(got_items)
# Query by hash key *and* sort key (this is basically a GetItem):
got_items = full_query(test_table, KeyConditions={
'p': {'AttributeValueList': [p], 'ComparisonOperator': 'EQ'},
'c': {'AttributeValueList': [c], 'ComparisonOperator': 'EQ'}
})
expected_items = [item1]
assert multiset(expected_items) == multiset(got_items)
# Query by sort key alone is not allowed. DynamoDB reports:
# "Query condition missed key schema element: p".
with pytest.raises(ClientError, match='ValidationException'):
full_query(test_table, KeyConditions={
'c': {'AttributeValueList': [c], 'ComparisonOperator': 'EQ'}
})
# Query by a non-key isn't allowed, for the same reason - that the
# actual hash key (p) is missing in the query:
with pytest.raises(ClientError, match='ValidationException'):
full_query(test_table, KeyConditions={
'z': {'AttributeValueList': [c], 'ComparisonOperator': 'EQ'}
})
# If we try both p and a non-key we get a complaint that the sort
# key is missing: "Query condition missed key schema element: c"
with pytest.raises(ClientError, match='ValidationException'):
full_query(test_table, KeyConditions={
'p': {'AttributeValueList': [p], 'ComparisonOperator': 'EQ'},
'z': {'AttributeValueList': [c], 'ComparisonOperator': 'EQ'}
})
# If we try p, c and another key, we get an error that
# "Conditions can be of length 1 or 2 only".
with pytest.raises(ClientError, match='ValidationException'):
full_query(test_table, KeyConditions={
'p': {'AttributeValueList': [p], 'ComparisonOperator': 'EQ'},
'c': {'AttributeValueList': [c], 'ComparisonOperator': 'EQ'},
'z': {'AttributeValueList': [c], 'ComparisonOperator': 'EQ'}
})
# Test the "Select" parameter of Query. The default Select mode,
# ALL_ATTRIBUTES, returns items with all their attributes. Other modes
# allow returning just specific attributes or just counting the results
# without returning items at all.
@pytest.mark.xfail(reason="Select not supported yet")
def test_query_select(test_table_sn):
numbers = [Decimal(i) for i in range(10)]
# Insert these numbers, in random order, into one partition:
p = random_string()
items = [{'p': p, 'c': num, 'x': num} for num in random.sample(numbers, len(numbers))]
with test_table_sn.batch_writer() as batch:
for item in items:
batch.put_item(item)
# Verify that we get back the numbers in their sorted order. By default,
# query returns all attributes:
got_items = test_table_sn.query(ConsistentRead=True, KeyConditions={'p': {'AttributeValueList': [p], 'ComparisonOperator': 'EQ'}})['Items']
got_sort_keys = [x['c'] for x in got_items]
assert got_sort_keys == numbers
got_x_attributes = [x['x'] for x in got_items]
assert got_x_attributes == numbers
# Select=ALL_ATTRIBUTES does exactly the same as the default - return
# all attributes:
got_items = test_table_sn.query(ConsistentRead=True, KeyConditions={'p': {'AttributeValueList': [p], 'ComparisonOperator': 'EQ'}}, Select='ALL_ATTRIBUTES')['Items']
got_sort_keys = [x['c'] for x in got_items]
assert got_sort_keys == numbers
got_x_attributes = [x['x'] for x in got_items]
assert got_x_attributes == numbers
# Select=ALL_PROJECTED_ATTRIBUTES is not allowed on a base table (it
# is just for indexes, when IndexName is specified)
with pytest.raises(ClientError, match='ValidationException'):
test_table_sn.query(ConsistentRead=True, KeyConditions={'p': {'AttributeValueList': [p], 'ComparisonOperator': 'EQ'}}, Select='ALL_PROJECTED_ATTRIBUTES')
# Select=SPECIFIC_ATTRIBUTES requires that either a AttributesToGet
# or ProjectionExpression appears, but then really does nothing:
with pytest.raises(ClientError, match='ValidationException'):
test_table_sn.query(ConsistentRead=True, KeyConditions={'p': {'AttributeValueList': [p], 'ComparisonOperator': 'EQ'}}, Select='SPECIFIC_ATTRIBUTES')
got_items = test_table_sn.query(ConsistentRead=True, KeyConditions={'p': {'AttributeValueList': [p], 'ComparisonOperator': 'EQ'}}, Select='SPECIFIC_ATTRIBUTES', AttributesToGet=['x'])['Items']
expected_items = [{'x': i} for i in numbers]
assert got_items == expected_items
got_items = test_table_sn.query(ConsistentRead=True, KeyConditions={'p': {'AttributeValueList': [p], 'ComparisonOperator': 'EQ'}}, Select='SPECIFIC_ATTRIBUTES', ProjectionExpression='x')['Items']
assert got_items == expected_items
# Select=COUNT just returns a count - not any items
got = test_table_sn.query(ConsistentRead=True, KeyConditions={'p': {'AttributeValueList': [p], 'ComparisonOperator': 'EQ'}}, Select='COUNT')
assert got['Count'] == len(numbers)
assert not 'Items' in got
# Check again that we also get a count - not just with Select=COUNT,
# but without Select=COUNT we also get the items:
got = test_table_sn.query(ConsistentRead=True, KeyConditions={'p': {'AttributeValueList': [p], 'ComparisonOperator': 'EQ'}})
assert got['Count'] == len(numbers)
assert 'Items' in got
# Select with some unknown string generates a validation exception:
with pytest.raises(ClientError, match='ValidationException'):
test_table_sn.query(ConsistentRead=True, KeyConditions={'p': {'AttributeValueList': [p], 'ComparisonOperator': 'EQ'}}, Select='UNKNOWN')
# Test that the "Limit" parameter can be used to return only some of the
# items in a single partition. The items returned are the first in the
# sorted order.
def test_query_limit(test_table_sn):
numbers = [Decimal(i) for i in range(10)]
# Insert these numbers, in random order, into one partition:
p = random_string()
items = [{'p': p, 'c': num} for num in random.sample(numbers, len(numbers))]
with test_table_sn.batch_writer() as batch:
for item in items:
batch.put_item(item)
# Verify that we get back the numbers in their sorted order.
# First, no Limit so we should get all numbers (we have few of them, so
# it all fits in the default 1MB limitation)
got_items = test_table_sn.query(ConsistentRead=True, KeyConditions={'p': {'AttributeValueList': [p], 'ComparisonOperator': 'EQ'}})['Items']
got_sort_keys = [x['c'] for x in got_items]
assert got_sort_keys == numbers
# Now try a few different Limit values, and verify that the query
# returns exactly the first Limit sorted numbers.
for limit in [1, 2, 3, 7, 10, 17, 100, 10000]:
got_items = test_table_sn.query(ConsistentRead=True, KeyConditions={'p': {'AttributeValueList': [p], 'ComparisonOperator': 'EQ'}}, Limit=limit)['Items']
assert len(got_items) == min(limit, len(numbers))
got_sort_keys = [x['c'] for x in got_items]
assert got_sort_keys == numbers[0:limit]
# Unfortunately, the boto3 library forbids a Limit of 0 on its own,
# before even sending a request, so we can't test how the server responds.
with pytest.raises(ParamValidationError):
test_table_sn.query(ConsistentRead=True, KeyConditions={'p': {'AttributeValueList': [p], 'ComparisonOperator': 'EQ'}}, Limit=0)
# In test_query_limit we tested just that Limit allows to stop the result
# after right right number of items. Here we test that such a stopped result
# can be resumed, via the LastEvaluatedKey/ExclusiveStartKey paging mechanism.
def test_query_limit_paging(test_table_sn):
numbers = [Decimal(i) for i in range(20)]
# Insert these numbers, in random order, into one partition:
p = random_string()
items = [{'p': p, 'c': num} for num in random.sample(numbers, len(numbers))]
with test_table_sn.batch_writer() as batch:
for item in items:
batch.put_item(item)
# Verify that full_query() returns all these numbers, in sorted order.
# full_query() will do a query with the given limit, and resume it again
# and again until the last page.
for limit in [1, 2, 3, 7, 10, 17, 100, 10000]:
got_items = full_query(test_table_sn, KeyConditions={'p': {'AttributeValueList': [p], 'ComparisonOperator': 'EQ'}}, Limit=limit)
got_sort_keys = [x['c'] for x in got_items]
assert got_sort_keys == numbers
# Test that the ScanIndexForward parameter works, and can be used to
# return items sorted in reverse order. Combining this with Limit can
# be used to return the last items instead of the first items of the
# partition.
def test_query_reverse(test_table_sn):
numbers = [Decimal(i) for i in range(20)]
# Insert these numbers, in random order, into one partition:
p = random_string()
items = [{'p': p, 'c': num} for num in random.sample(numbers, len(numbers))]
with test_table_sn.batch_writer() as batch:
for item in items:
batch.put_item(item)
# Verify that we get back the numbers in their sorted order or reverse
# order, depending on the ScanIndexForward parameter being True or False.
# First, no Limit so we should get all numbers (we have few of them, so
# it all fits in the default 1MB limitation)
reversed_numbers = list(reversed(numbers))
got_items = test_table_sn.query(ConsistentRead=True, KeyConditions={'p': {'AttributeValueList': [p], 'ComparisonOperator': 'EQ'}}, ScanIndexForward=True)['Items']
got_sort_keys = [x['c'] for x in got_items]
assert got_sort_keys == numbers
got_items = test_table_sn.query(ConsistentRead=True, KeyConditions={'p': {'AttributeValueList': [p], 'ComparisonOperator': 'EQ'}}, ScanIndexForward=False)['Items']
got_sort_keys = [x['c'] for x in got_items]
assert got_sort_keys == reversed_numbers
# Now try a few different Limit values, and verify that the query
# returns exactly the first Limit sorted numbers - in regular or
# reverse order, depending on ScanIndexForward.
for limit in [1, 2, 3, 7, 10, 17, 100, 10000]:
got_items = test_table_sn.query(ConsistentRead=True, KeyConditions={'p': {'AttributeValueList': [p], 'ComparisonOperator': 'EQ'}}, Limit=limit, ScanIndexForward=True)['Items']
assert len(got_items) == min(limit, len(numbers))
got_sort_keys = [x['c'] for x in got_items]
assert got_sort_keys == numbers[0:limit]
got_items = test_table_sn.query(ConsistentRead=True, KeyConditions={'p': {'AttributeValueList': [p], 'ComparisonOperator': 'EQ'}}, Limit=limit, ScanIndexForward=False)['Items']
assert len(got_items) == min(limit, len(numbers))
got_sort_keys = [x['c'] for x in got_items]
assert got_sort_keys == reversed_numbers[0:limit]
# Test that paging also works properly with reverse order
# (ScanIndexForward=false), i.e., reverse-order queries can be resumed
def test_query_reverse_paging(test_table_sn):
numbers = [Decimal(i) for i in range(20)]
# Insert these numbers, in random order, into one partition:
p = random_string()
items = [{'p': p, 'c': num} for num in random.sample(numbers, len(numbers))]
with test_table_sn.batch_writer() as batch:
for item in items:
batch.put_item(item)
reversed_numbers = list(reversed(numbers))
# Verify that with ScanIndexForward=False, full_query() returns all
# these numbers in reversed sorted order - getting pages of Limit items
# at a time and resuming the query.
for limit in [1, 2, 3, 7, 10, 17, 100, 10000]:
got_items = full_query(test_table_sn, KeyConditions={'p': {'AttributeValueList': [p], 'ComparisonOperator': 'EQ'}}, ScanIndexForward=False, Limit=limit)
got_sort_keys = [x['c'] for x in got_items]
assert got_sort_keys == reversed_numbers
# A query without a KeyConditions or KeyConditionExpress is, or an empty
# one, is obviously not allowed:
def test_query_missing_key(test_table):
with pytest.raises(ClientError, match='ValidationException'):
full_query(test_table, KeyConditions={})
with pytest.raises(ClientError, match='ValidationException'):
full_query(test_table)
| agpl-3.0 | -8,097,653,111,144,787,000 | 53.925258 | 199 | 0.666933 | false |
chemelnucfin/tensorflow | tensorflow/contrib/stat_summarizer/python/stat_summarizer_test.py | 1 | 2856 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for StatSummarizer Python wrapper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import _pywrap_stat_summarizer
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class StatSummarizerTest(test.TestCase):
def testStatSummarizer(self):
with ops.Graph().as_default() as graph:
matrix1 = constant_op.constant([[3., 3.]], name=r"m1")
matrix2 = constant_op.constant([[2.], [2.]], name=r"m2")
product = math_ops.matmul(matrix1, matrix2, name=r"product")
graph_def = graph.as_graph_def()
ss = _pywrap_stat_summarizer.StatSummarizer(
graph_def.SerializeToString())
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
for _ in range(20):
run_metadata = config_pb2.RunMetadata()
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
sess.run(product, options=run_options, run_metadata=run_metadata)
ss.ProcessStepStatsStr(run_metadata.step_stats.SerializeToString())
output_string = ss.GetOutputString()
print(output_string)
# Test it recorded running the expected number of times.
self.assertRegexpMatches(output_string, r"count=20")
# Test that a header line got printed.
self.assertRegexpMatches(output_string, r"====== .* ======")
# Test that the nodes we added were analyzed.
# The line for the op should contain both the op type (MatMul)
# and the name of the node (product)
self.assertRegexpMatches(output_string, r"MatMul.*product")
self.assertRegexpMatches(output_string, r"Const.*m1")
self.assertRegexpMatches(output_string, r"Const.*m2")
# Test that a CDF summed to 100%
self.assertRegexpMatches(output_string, r"100\.")
if __name__ == "__main__":
test.main()
| apache-2.0 | 500,819,497,056,256,900 | 37.08 | 80 | 0.687675 | false |
giubil/trackit | api/files/api/app/views/user_management.py | 1 | 14957 | from app import app
from app.authentication import with_login
from flask import Blueprint, jsonify, request, redirect
from marshmallow import Schema, fields
from app.request_schema import with_request_schema
from app.models import db, User, UserSessionToken, AWSKey
import app.models as models
from app.intercom import add_intercom_user
from app.tasks import send_email, send_email_alternative
from app.onboarding_email import onboarding_email
from app.g_recaptcha import with_g_recaptcha
import uuid
import json
import oauth2client
import jinja2
import apiclient
import httplib2
import config
import traceback
user_management_bp = Blueprint('user_management_bp', __name__)
class UserLoginSchema(Schema):
email = fields.Str(required=True)
password = fields.Str(required=True)
class BaseUserSchema(Schema):
# TODO: remove IDs from API?
email = fields.Email(required=True)
firstname = fields.Str(required=True)
lastname = fields.Str(required=True)
password = fields.Str(required=True, load_only=True)
class UserSchema(BaseUserSchema):
id = fields.Integer(dump_only=True)
class UserSignupSchema(BaseUserSchema):
pass
class UserEditSchema(Schema):
email = fields.Email(required=False)
firstname = fields.Str(required=False)
lastname = fields.Str(required=False)
password = fields.Str(required=False, load_only=True)
class PostProspectSchema(Schema):
#g_recaptcha_response = fields.Str(required=True)
name = fields.Str(required=True)
email = fields.Email(required=True)
phone_number = fields.Str(required=False)
company_name = fields.Str(required=False)
address = fields.Str(required=False)
which_cloud = fields.List(
fields.Str(),
required=False,
)
employees = fields.Str(required=False)
annual_revenue = fields.Str(required=False)
cloud_concerns = fields.List(
fields.Str(),
required=False,
)
class ProspectSchema(Schema):
name = fields.Str(required=True)
email = fields.Email(required=True)
phone_number = fields.Str(required=False)
company_name = fields.Str(required=False)
address = fields.Str(required=False)
which_cloud = fields.List(
fields.Str(),
required=False,
)
employees = fields.Str(required=False)
annual_revenue = fields.Integer(required=False)
cloud_concerns = fields.List(
fields.Str(),
required=False,
)
user_login_schema = UserLoginSchema()
user_schema = UserSchema()
user_signup_schema = UserSignupSchema()
user_edit_schema = UserEditSchema()
prospect_schema = ProspectSchema()
post_prospect_schema = PostProspectSchema()
@app.route('/prospect/<string:type>', methods=['POST'])
@with_request_schema(post_prospect_schema)
#@with_g_recaptcha()
def prospect(data, type):
"""---
post:
tags:
- user
produces:
- application/json
consumes:
- application/json
description: &desc Create a new prospect
summary: *desc
parameters:
- in: body
name: name body
schema:
$ref: "#/definitions/PostProspectSchema"
responses:
200:
description: Successfully creation
schema:
$ref: "#/definitions/ProspectSchema"
422:
description: Invalid body
404:
description: Prospect type not found
403:
description: Captcha validation failed
schema:
properties:
error:
type: string
"""
data, error = prospect_schema.load(data)
if error:
return jsonify(
error="Validation error",
fields=error,
), 422
if type != 'trial' and type != 'demo':
return jsonify(error="Prospect type not found"), 404
if 'cloud_concerns' in data:
concerns = [
models.CloudConcern(concern=c)
for c in data['cloud_concerns']
]
del data['cloud_concerns']
else:
concerns = []
if 'which_cloud' in data:
clouds = [
models.WhichCloud(cloud=c)
for c in data['which_cloud']
]
del data['which_cloud']
else:
clouds = []
data['type'] = type
try:
prospect = models.Prospect(**data)
db.session.add(prospect)
for c in concerns:
c.prospect = prospect
db.session.add(c)
for c in clouds:
c.prospect = prospect
db.session.add(c)
db.session.commit()
except:
traceback.print_exc()
return jsonify(error="Internal database access error"), 500
try:
email_txt, email_html = render_prospect_email(prospect)
send_email_alternative.delay(
app.config['NEW_USER_EMAIL'],
"New {} prospect".format(type),
email_txt,
email_html,
bypass_debug=True,
)
except:
traceback.print_exc()
return jsonify(prospect.to_dict()), 200
@app.route('/send', methods=['POST'])
def contact():
"""---
post:
tags:
- user
produces:
- application/json
consumes:
- application/json
description: &desc Send message
summary: *desc
responses:
200:
description: Successfully sent
422:
description: Invalid body
404:
description: Prospect type not found
"""
data = request.values
email_txt = 'Name: {}\nEmail: {}\nPhone : {}\nMessage :\n{}\n'.format(data['name'], data['email'], data['phone'], data['message'])
email_html = email_txt.replace('\n', '<br>')
try:
send_email_alternative.delay(
app.config['CONTACT_USER_EMAIL'],
"New message from contact form",
email_txt,
email_html,
bypass_debug=True,
)
except:
return "error", 502
return "sent", 200
_template_dir = app.config['EMAIL_TEMPLATE_DIR']
_template_loader = jinja2.FileSystemLoader(_template_dir)
_template_env = jinja2.Environment(loader=_template_loader)
_template_prospect_html = _template_env.get_template('emailNewProspect.html')
_template_prospect_txt = _template_env.get_template('emailNewProspect.txt')
def render_prospect_email(prospect):
content_data = prospect.to_dict()
content_data_pruned = {}
for k, v in content_data.iteritems():
if v is not None: # Jinja2 does not consider None as un undefined value.
content_data_pruned[k] = v
assert 'name' in content_data_pruned
assert 'email' in content_data_pruned
content_html = _template_prospect_html.render(**content_data_pruned)
content_txt = _template_prospect_txt.render(**content_data_pruned)
return content_txt, content_html
@app.route('/login', methods=['POST'])
@with_request_schema(user_login_schema)
def login(data):
"""---
post:
tags:
- user
produces:
- application/json
consumes:
- application/json
description: &desc Log user in
summary: *desc
parameters:
- in: body
name: body
schema:
$ref: "#/definitions/UserLogin"
responses:
200:
description: Successful login
schema:
properties:
token:
type: string
401:
description: Invalid credentials
schema:
properties:
error:
type: string
"""
user = User.query.filter_by(email=data['email']).first()
if user and user.password_matches(data['password']):
token = UserSessionToken.for_user(user)
db.session.add(token)
db.session.commit()
return jsonify(token=token.id)
return jsonify(error="Wrong user or password"), 401
google_auth_flow = oauth2client.client.OAuth2WebServerFlow(scope='https://www.googleapis.com/auth/plus.me '
'https://www.googleapis.com/auth/plus.login '
'https://www.googleapis.com/auth/userinfo.email '
'https://www.googleapis.com/auth/userinfo.profile',
redirect_uri='https://' + config.HOST_DOMAIN + config.OAUTH_URIS['auth_google_callback'],
**(config.GOOGLE_OAUTH))
@app.route('/lostpassword', methods=['POST'])
def lostpassword():
email = request.json.get("email")
user = User.query.filter_by(email=email).first()
if user:
token = uuid.uuid4().hex
user.set_lost_password(token)
db.session.add(user)
db.session.commit()
message = "Hello,\n\nYou requested to reset your password, please follow this link: https://trackit.io/#/lostpassword/" + token
send_email.delay(email, "Forgotten password", message, False, bypass_debug=True)
return jsonify(message="A request was sent")
return jsonify(error="Wrong email"), 400
@app.route('/changelostpassword', methods=['POST'])
def changelostpassword():
lost_password = request.json.get("lost_password")
password = request.json.get("password")
if lost_password and password:
user = User.query.filter_by(lost_password=request.json.get("lost_password")).first()
if user:
token = ""
user.set_lost_password(token)
user.set_password(password)
db.session.add(user)
db.session.commit()
return jsonify(message="Password was changed")
return jsonify(error="Wrong token or password"), 400
@app.route('/auth/google/initiate', methods=['GET'])
def initiate_auth_google():
auth_uri = google_auth_flow.step1_get_authorize_url()
return redirect(auth_uri, code=302)
@app.route('/auth/google/callback', methods=['GET'])
def auth_google():
access_token_url = 'https://accounts.google.com/o/oauth2/token'
people_api_url = 'https://www.googleapis.com/plus/v1/people/me/openIdConnect'
credentials = google_auth_flow.step2_exchange(request.args.get('code'))
http = httplib2.Http()
http = credentials.authorize(http)
google_oauth2_service = apiclient.discovery.build('oauth2', 'v2', http=http)
profile = google_oauth2_service.userinfo().get().execute()
user_google_id = profile['id']
user_email = profile['email']
criteria = ((User.auth_google == user_google_id) |
((User.auth_google == None) & (User.email == user_email)))
user = User.query.filter(criteria).first()
if user:
token = UserSessionToken.for_user(user)
db.session.add(token)
db.session.commit()
else:
user_given_name = profile['given_name']
user_family_name = profile['family_name']
user = User(auth_google=user_google_id,
firstname=user_given_name,
lastname=user_family_name,
email=user_email)
db.session.add(user)
db.session.flush()
db.session.refresh(user)
token = UserSessionToken.for_user(user)
db.session.add(token)
key = AWSKey()
key.id_user = user.id
for data_key, data_value in config.ACCOUNT_CREATION_DEFAULT_AWS_KEY.iteritems():
setattr(key, data_key, data_value)
key.import_s3 = True
key.is_valid_key = True
db.session.add(key)
db.session.commit()
return redirect(('https://%s/#/authenticated?token=%s&token_expires=%s' % (config.WEB_UI_HOST, token.id, token.expires.isoformat())), code=302)
@app.route('/logout', methods=['GET'])
@with_login(load_user=True, load_token=True)
def logout(user, token):
"""---
get:
tags:
- user
produces:
- application/json
consumes:
- application/json
description: &desc Log user out
summary: *desc
responses:
200:
description: Successful user logout
schema:
properties:
message:
type: string
"""
db.session.delete(token)
db.session.commit()
return jsonify(message="Logout")
@app.route('/tokens', methods=['GET'])
@with_login(load_user=True)
def get_tokens(user):
query = UserSessionToken.query.filter_by(id_user=user.id)
tokens = filter(lambda t: not t.has_expired(), query.all())
stripped_tokens = [{ 'id': t.partial_token(), 'expires': t.expires.isoformat() + 'Z' } for t in tokens]
return jsonify(tokens=stripped_tokens), 200
@app.route('/signup', methods=['POST'])
@with_request_schema(user_signup_schema)
def signup(data):
"""---
post:
tags:
- user
produces:
- application/json
consumes:
- application/json
description: &desc Create user account
summary: *desc
parameters:
- in: body
name: body
schema:
$ref: "#/definitions/UserSignup"
responses:
201:
description: Successful signup
schema:
$ref: "#/definitions/User"
409:
description: Email already taken
422:
description: Invalid parameters
"""
if User.query.filter_by(email=data['email']).count():
return jsonify(error="Email already taken", fields={'email': ["Email already taken"]}), 409
user = User()
for key, value in data.iteritems():
if key == 'password':
user.set_password(value)
elif key in ['email', 'firstname', 'lastname']:
setattr(user, key, value)
db.session.add(user)
db.session.flush()
db.session.refresh(user)
db.session.commit()
add_intercom_user(user)
onboarding_email(user.email, user.firstname)
return jsonify(user_schema.dump(user)[0]), 201
@app.route('/user', methods=['GET'])
@with_login(True)
def user(user):
return jsonify(user_schema.dump(user)[0])
@app.route('/user', methods=['PUT'])
@with_login(True)
@with_request_schema(user_edit_schema)
def edit_user(user, data):
for key, value in data.iteritems():
if key == 'password':
user.set_password(value)
elif key in ['email', 'firstname', 'lastname']:
setattr(user, key, value)
db.session.commit()
return jsonify(user_schema.dump(user)[0]), 200
| apache-2.0 | 6,391,227,986,741,426,000 | 32.386161 | 148 | 0.586147 | false |
rbcollins123/eos_garp_all_vlans | garp_all_vlans.py | 1 | 4023 | # coding=utf-8
# Copyright (C) 2016 Intelligent Visbility, Inc. - Richard Collins
# <[email protected]>
import logging
import netifaces
import os
import signal
import subprocess
import sys
import time
"""
This script can be run on an Arista EOS switch to cause all SVI interfaces to
generate gratuitous ARP messages out all VLAN interfaces at a desired
interval. It can be used in a variety of situations such as migration of
layer 3 services from other devices to an Arista switch were you need to
notify all endpoints to use the new Arista default gateway for example.
"""
# set the frequency of pings in seconds to send (CTRL-C allows for early
# termination when desired) (ex: can use .5 for 500ms between garps)
ARPING_INTERVAL = 1
# flag to control output of ARPING.
# True = stdout/stderr, False = send to /dev/null
ARPING_OUTPUT = False
# set logging level to desired output
logger = logging.basicConfig(level=logging.DEBUG)
RUN_FLAG = True # used to by interrupt handlers to signal exit
def kill_popen_list(popen_list):
"""
Terminate all PIDs in a list
:param popen_list: list() of subprocess PIDs
"""
for process in popen_list:
process.kill()
sys.exit(0)
def signal_handler(signal, frame):
"""
register a signal handler to catch SIGINT and kill all the child arping
processes
"""
print('CTRL-C was pressed!\n\nKilling child processes...')
global RUN_FLAG
RUN_FLAG = False
kill_popen_list(process_list)
signal.signal(signal.SIGINT, signal_handler)
""" This is just a safety net. If arping hangs or doesn't exit cleanly,
we do not want to create an infinite # of arping processes on the switch and
impact production while running in our infinite loop. So, we will track each
PID in a list, and verify the PID has terminated for all vlans before running
another batch of arping processes. If it hangs, we do not do the next round
of pings. This errs ensures the process fails closed vs open."""
process_list = []
# main run loop; send at requested intervals and wait for CTRL-C to interrupt
while RUN_FLAG:
start_time = time.time()
logging.debug("Starting new ARPING for all VLAN interfaces")
# pull a list of all interfaces on the switch
interface_list = netifaces.interfaces()
# build a list of tuples as (interface, ipaddress) to be used for calling
# arping on
# all vlan interfaces
target_list = [(interface, netifaces.ifaddresses(interface)[2][0]['addr'])
for interface in interface_list if
str(interface)[:4].lower() == 'vlan']
# kick off a ping on each interface and store the list of processes
process_count = 0
if not ARPING_OUTPUT:
with open(os.devnull, 'w') as dev_null:
for target_network in target_list:
process_list.append(subprocess.Popen(
['/sbin/arping', '-A', '-c', '1', '-I',
str(target_network[0]), str(target_network[1])],
stdout=dev_null, stderr=subprocess.STDOUT))
process_count += 1
else:
for target_network in target_list:
process_list.append(subprocess.Popen(
['/sbin/arping', '-A', '-c', '1', '-I', str(target_network[0]),
str(target_network[1])]))
process_count += 1
logging.debug("Started {} arping processes for "
"{} interfaces.".format(process_count, len(target_list)))
# ensure that all the processes have exited before continuing
while len(process_list):
for process in process_list:
if process.poll() is not None:
process_list.remove(process)
logging.info("Execution time for all {} processes: "
"{} seconds".format(process_count, time.time() - start_time))
# sleep for requested interval before sending the next ping
logging.info("Sleeping for {} seconds".format(ARPING_INTERVAL))
time.sleep(ARPING_INTERVAL)
| lgpl-3.0 | -1,709,095,333,053,565,700 | 36.95283 | 79 | 0.669898 | false |
sai-prasanna/simple-django-blog | blog/migrations/0001_initial.py | 1 | 1576 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import autoslug.fields
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),
('name', models.CharField(max_length=50)),
('created_at', models.DateTimeField(auto_now_add=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),
('title', models.CharField(max_length=300)),
('content', models.TextField()),
('slug', autoslug.fields.AutoSlugField(editable=False, unique=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('author', models.ForeignKey(related_name='posts', to=settings.AUTH_USER_MODEL)),
('categories', models.ManyToManyField(to='blog.Category', related_name='posts')),
],
options={
},
bases=(models.Model,),
),
]
| mit | -3,379,956,058,826,820,600 | 35.651163 | 114 | 0.556472 | false |
quantmind/lux | lux/utils/files.py | 1 | 4997 | '''
Some code is taken from django:
Copyright (c) Django Software Foundation and individual contributors.
All rights reserved.
'''
import os
import re
import itertools
__all__ = ['Filehandler']
def skipfile(name):
return name.startswith('.') or name.startswith('_')
def directory(dir):
bd, fname = os.path.split(dir)
return dir if fname else bd
def get_rel_dir(dir, base, res=''):
'''Return the ``base`` path relative to ``dir``
'''
dir = directory(dir)
base = directory(base)
if len(base) > len(dir):
raise RuntimeError('Base directory not in path')
if dir == base:
return res
dir, fname = os.path.split(dir)
if res:
fname = os.path.join(fname, res)
return get_rel_dir(dir, base, fname)
def get_valid_filename(s):
"""
Returns the given string converted to a string that can be used for a clean
filename. Specifically, leading and trailing spaces are removed; other
spaces are converted to underscores; and anything that is not a unicode
alphanumeric, dash, underscore, or dot, is removed.
>>> get_valid_filename("john's portrait in 2004.jpg")
'johns_portrait_in_2004.jpg'
"""
s = s.strip().replace(' ', '_')
return re.sub(r'(?u)[^-\w.]', '', s)
class Filehandler:
def open(self, name, mode='rb'):
"""Retrieves the specified file from storage, using the optional mixin
class to customize what features are available on the File returned.
"""
raise NotImplementedError()
def save(self, file):
'''Save an instance of :class:`~.File` into the backened storage.'''
name = file.name
name = self.get_available_name(name)
name = self._save(name, file)
# Store filenames with forward slashes, even on Windows
return name.replace('\\', '/')
# These methods are part of the public API, with default implementations.
def get_valid_name(self, name):
"""
Returns a filename, based on the provided filename, that's suitable for
use in the target storage system.
"""
return get_valid_filename(name)
def get_available_name(self, name):
"""
Returns a filename that's free on the target storage system, and
available for new content to be written to.
"""
dir_name, file_name = os.path.split(name)
file_root, file_ext = os.path.splitext(file_name)
# If the filename already exists, add an underscore and a number
# (before the file extension, if one exists) to the filename until
# the generated filename doesn't exist.
count = itertools.count(1)
while self.exists(name):
# file_ext includes the dot.
name = os.path.join(dir_name, "%s_%s%s" %
(file_root, next(count), file_ext))
return name
def path(self, name):
"""
Returns a local filesystem path where the file can be retrieved using
Python's built-in open() function. Storage systems that can't be
accessed using open() should *not* implement this method.
"""
raise NotImplementedError(
"This backend doesn't support absolute paths.")
# The following methods form the public API for storage systems, but with
# no default implementations. Subclasses must implement *all* of these.
def delete(self, name):
"""
Deletes the specified file from the storage system.
"""
raise NotImplementedError()
def exists(self, name):
"""
Returns True if a file referened by the given name already exists
in the storage system, or False if the name is available for a
new file.
"""
raise NotImplementedError()
def listdir(self, path):
"""
Lists the contents of the specified path, returning a 2-tuple of lists;
the first item being directories, the second item being files.
"""
raise NotImplementedError()
def size(self, name):
"""
Returns the total size, in bytes, of the file specified by name.
"""
raise NotImplementedError()
def url(self, name):
"""
Returns an absolute URL where the file's contents can be accessed
directly by a Web browser.
"""
raise NotImplementedError()
def accessed_time(self, name):
"""
Returns the last accessed time (as datetime object) of the file
specified by name.
"""
raise NotImplementedError()
def created_time(self, name):
"""
Returns the creation time (as datetime object) of the file
specified by name.
"""
raise NotImplementedError()
def modified_time(self, name):
"""
Returns the last modified time (as datetime object) of the file
specified by name.
"""
raise NotImplementedError()
| bsd-3-clause | -187,988,342,244,856,160 | 30.23125 | 79 | 0.613568 | false |
mrafieee/django-base | apps/congress/views.py | 1 | 4962 | from django.shortcuts import render_to_response,get_object_or_404
from django.template import RequestContext
from apps.views import paginate
from datetime import date
from forms import *
from models import *
def list(request, language):
congress_records = Congress.objects.all().order_by('priority').order_by('-opening_date')
congress_records = paginate(request, congress_records, 20)
return render_to_response('congress/list.html', {'congresses': congress_records}, context_instance=RequestContext(request))
def detail(request, language, slug):
congress = get_object_or_404(Congress, slug=slug)
return render_to_response('congress/detail.html', {'congress': congress}, context_instance=RequestContext(request))
def up_coming(request, language):
try:
congress_records = Congress.objects.filter(is_open=1).order_by('-pk')
congress_records = paginate(request, congress_records, 20)
return render_to_response('congress/list.html', {'congresses': congress_records}, context_instance=RequestContext(request))
except:
msg = 'There is no Upcoming Congress Available'
return render_to_response('congress/list.html', {'msg':msg}, context_instance=RequestContext(request))
def board_directory(request, language, slug):
board = BoardDirectory.objects.filter(congress__slug=slug)
return render_to_response('congress/board.html', {'board': board}, context_instance=RequestContext(request))
def time_table(request, language, slug):
fee = get_object_or_404(Fee, congress__slug=slug)
return render_to_response('congress/time_table.html', {'fee': fee}, context_instance=RequestContext(request))
# def members_confirmation(request,slug):
# congress = get_object_or_404(Congress, is_open=True,slug=slug)
# if request.method == 'POST':
# form = ConfirmForm(request.POST)
# if form.is_valid():
# code = form.cleaned_data["code"]
# member = Member.objects.get(code=code,congress__slug=slug)
# return render_to_response('congress/member_confirm_form.html', {'member': member,'congress': congress}, context_instance=RequestContext(request))
# else:
# return render_to_response('congress/member_confirm_form.html', {'form': form,'congress': congress}, context_instance=RequestContext(request))
# else:
# form = ConfirmForm()
# return render_to_response('congress/member_confirm_form.html', {'form': form,'congress': congress}, context_instance=RequestContext(request))
# def register(request, slug):
# congress = get_object_or_404(Congress, slug=slug)
# if request.method == 'POST':
# form = RegisterForm(request.POST, prefix = 'congress_form')
# if form.is_valid():
# member = form.save(commit=False)
# member.congress = congress
# member.save()
# member.code = str(date.today().year)+str(member.id)
# member.save()
# return render_to_response('congress/form.html', {'congress': congress,'code': member.code}, context_instance=RequestContext(request))
# else:
# return render_to_response('congress/form.html', {'congress': congress,'form': form}, context_instance=RequestContext(request))
# else:
# form = RegisterForm(prefix = 'congress_form')
# return render_to_response('congress/form.html', {'congress': congress,'form': form}, context_instance=RequestContext(request))
#
# def articles_confirmation(request,slug):
# congress = get_object_or_404(Congress, is_open=True,slug=slug)
# if request.method == 'POST':
# form = ArticleConfirmForm(request.POST)
# if form.is_valid():
# code = form.cleaned_data["code"]
# article = Article.objects.get(code=code,congress__slug=slug)
# return render_to_response('congress/article_confirm_form.html', {'article': article,'congress': congress}, context_instance=RequestContext(request))
# else:
# return render_to_response('congress/article_confirm_form.html', {'form': form,'congress': congress}, context_instance=RequestContext(request))
# else:
# form = ArticleConfirmForm()
# return render_to_response('congress/article_confirm_form.html', {'form': form,'congress': congress}, context_instance=RequestContext(request))
#
# def articles_submission(request, slug):
# congress = get_object_or_404(Congress, slug=slug)
# if request.method == 'POST':
# form = ArticleForm(request.POST, prefix = 'congress_form')
# if form.is_valid():
# article = form.save(commit=False)
# article.congress = congress
# article.save()
# article.code = str(date.today().year)+str(date.today().month)+str(article.id)
# article.save()
# return render_to_response('congress/submission_form.html', {'congress': congress,'code': article.code}, context_instance=RequestContext(request))
# else:
# return render_to_response('congress/submission_form.html', {'congress': congress,'form': form}, context_instance=RequestContext(request))
# else:
# form = ArticleForm(prefix = 'congress_form')
# return render_to_response('congress/submission_form.html', {'congress': congress,'form': form}, context_instance=RequestContext(request)) | gpl-3.0 | -727,504,110,402,674,000 | 51.797872 | 153 | 0.729948 | false |
gizas/CSS_Extractor | ExternalCSSFinder.py | 1 | 8785 | #The main script for extracting Extrnal CSS links from list of sites. Uses Selenium webdriver
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
import unittest, time, re
import logging
#Include Excel functions for saving data in excel
from openpyxl import Workbook,load_workbook
from openpyxl.style import Color, Fill
col1='A' #For Excel Manipulation of Cells -------------
col2='B' #For Excel Manipulation of Cells -------------
class test1(unittest.TestCase):
def setUp(self):
FORMAT = "%(message)-s"
logging.basicConfig(format=FORMAT,filename='reportExternalCSS.txt', filemode='a', level=logging.INFO)
self.logger2 = logging.getLogger("f2")#Create second logger
self.handler2 = logging.FileHandler('reportExternalCSS.txt',mode='a')#Create Filehandler
self.handler2.setLevel(logging.INFO)#Set verbosity of filehandler
self.handler_format2 = logging.Formatter("%(message)-s")#Set Formatter
self.handler2.setFormatter(self.handler_format2)#Add formatter to handler
self.logger2.addHandler(self.handler2)#Add handler to logger2
self.driver = webdriver.Chrome()
self.driver.implicitly_wait(2)
self.base_url = "http://"
self.verificationErrors = []
self.accept_next_alert = True
def test_2(self):
driver = self.driver
def Close_popUps(current_handle):
#Close any popup windows and return to main window
for handle in self.driver.window_handles:
if handle!=current_handle:
self.driver.switch_to_window(handle)
self.driver.close()
self.driver.switch_to_window(current_handle)
#row=0 #For Excel Manipulation of Cells -------------
total_counter=0
fail_counter=0
L=list()# The list of urls that need revalidation after testing failure
NoSitesLoaded=list()
#Printing Blog to Excel-------------
#row = row+1
#_cell1 = ws.cell('%s%s'%(col1, row))
#_cell2 = ws.cell('%s%s'%(col2, row))
#_cell1.style.fill.fill_type = Fill.FILL_SOLID
#_cell1.style.fill.start_color.index = 'FF9999'
#_cell1.style.font.color.index = Color.WHITE
#_cell2.style.font.color.index = Color.WHITE
#_cell2.style.fill.fill_type = Fill.FILL_SOLID
#_cell2.style.fill.start_color.index = 'FF9999'
#_cell1.value = 'A/A'
#_cell2.value = 'FIREFOX AUTOMATION TESTING'
for url in lines:
try:
self.driver.get(url) #Go to the specified url
main_window_ID=self.driver.current_window_handle#Save ID of main window
except:
fail_counter +=1#increase fail counter by1
print('Error with url parser! Bypassing URL: %s')% ( self.driver.current_url)
#Printing Blog to Excel-------------
NoSitesLoaded.append(self.driver.current_url)#Add URL to revalidation list
continue#return to for loop and run test for the next URL
total_counter=total_counter+1
self.logger2.propagate = False
self.logger2.info("===========================================================================")
logging.info("======== %d Test STARTED for url:%s =========",total_counter,self.driver.current_url)
#Printing Blog to Excel-------------
#row = row+1
#ws.cell('%s%s'%(col1, row)).value = '***********************************************************'
#ws.cell('%s%s'%(col2, row)).value = '*********************#%s Test STARTED**************************************' % (total_counter)
#End of Printing Blog to Excel-------------
Close_popUps(main_window_ID)
try:
self.assertEqual("",driver.find_element_by_xpath("//link[contains(@href,'.css')]").text)#Check if js is inserted in html page source
css_links=driver.find_elements(By.XPATH,"//link[contains(@href,'.css')]")
#PRINT BLOCK
#row= row+1
#ws.cell('%s%s'%(col1, row)).value = '%d' % (total_counter)
#ws.cell('%s%s'%(col2, row)).value = ' *****************#URL: %s*****************' % (self.driver.current_url)
#END OF PRINT BLOCK
for links in css_links:
linkcss_text=links.get_attribute('href')
#print(linkcss_text)
#PRINT BLOCK
#row= row+1
#_cell = ws.cell('%s%s'%(col2, row))
#_cell.style.fill.fill_type = Fill.FILL_SOLID
#_cell.style.fill.start_color.index = '99CCFF'
#ws.cell('%s%s'%(col1, row)).value = '%s%s' % (" ", " ")
#_cell.value = '#%s : CSS file'% (linkcss_text)
#print("Found jQuery")
#END OF PRINT BLOCK
logging.info("%s",linkcss_text)
continue
except:
###
#row= row+1
#ws.cell('%s%s'%(col1, row)).value = '%d' % (total_counter)
#ws.cell('%s%s'%(col2, row)).value = ' *****************#URL: %s*****************' % (self.driver.current_url)
#row= row+1
#_cell = ws.cell('%s%s'%(col2, row))
#_cell.style.fill.fill_type = Fill.FILL_SOLID
#_cell.style.fill.start_color.index = 'B2FF66'
#ws.cell('%s%s'%(col1, row)).value = '%s%s' % (" ", " ")
#_cell.value = '#Not Found any css link'
###
print("Not Found External css in url:"+self.driver.current_url)
L.append(self.driver.current_url)#Add URL to revalidation list
fail_counter +=1
#logging.info("Not Found any css link")
#logging.info("#%d Failed to display.",fail_counter)
continue#return to for loop and run test for the next URL
self.logger2.info("===========================================================================")
self.logger2.info("===========================================================================")
percentage_fail=(fail_counter/total_counter)*100#calculate percentage failure
logging.warning("Total sites visited: %d",total_counter )#print total sites tested
logging.warning("Sites CSS not appeared: %d, percentage: %d %%", fail_counter, percentage_fail)
logging.warning("URLS needing revalidation: %s", L)#Print revalidation list
logging.warning("URLS that do not work: %s", NoSitesLoaded)#Print revalidation list
def is_element_present(self, how, what):
try: self.driver.find_element(by=how, value=what)
except NoSuchElementException, e: return False
return True
def is_alert_present(self):
try: self.driver.switch_to_alert()
except NoAlertPresentException, e: return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally: self.accept_next_alert = True
def tearDown(self):
logging.shutdown()#Close all loggers and handlers
self.driver.quit()#Close browser
self.assertEqual([], self.verificationErrors)
if __name__ == "__main__":
log_file = "reportExternalCSS.txt"
f = open(log_file, "a")
#Read testing urls from file urls.txt
lines = open("urls.txt").read().splitlines()
#Settings for Printing to Excel-------------
"""try:
wb = load_workbook(filename = r'allcss.xlsx')
ws = wb.create_sheet()
ws.title = "Firefox JS"
except:
wb = Workbook()
ws = wb.create_sheet()
ws.title = "Firefox JS"
dest_filename = r'allcss.xlsx'
"""
try:
unittest.main()
#unittest.main()
except Exception:
pass
finally:
f.close()
#wb.save(filename = dest_filename)#Save Finally to Excel-------------
| mit | 5,159,504,552,590,513,000 | 44.05641 | 148 | 0.529994 | false |
programa-stic/barf-project | barf/arch/translator.py | 1 | 6462 | # Copyright (c) 2014, Fundacion Dr. Manuel Sadosky
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import
import codecs
import logging
from barf.core.reil import ReilImmediateOperand
from barf.core.reil import ReilLabel
from barf.core.reil import ReilMnemonic
from barf.core.reil import ReilRegisterOperand
from barf.core.reil.builder import ReilBuilder
from barf.core.reil.helpers import to_reil_address
from barf.utils.utils import VariableNamer
logger = logging.getLogger(__name__)
class TranslationError(Exception):
pass
class FlagTranslator(object):
def __init__(self, arch):
self.__arch = arch
self.__flags = {}
def __getattr__(self, flag):
return self.__get_flag(flag)
def __getitem__(self, flag):
return self.__get_flag(flag)
# Auxiliary functions
# ======================================================================== #
def __get_flag(self, flag):
flag = flag.lower()
if flag not in self.__arch.registers_flags:
raise TranslationError("Invalid flag")
if flag not in self.__flags:
self.__flags[flag] = ReilRegisterOperand(flag, self.__arch.registers_size[flag])
return self.__flags[flag]
class RegisterTranslator(object):
def __init__(self, arch):
self.__arch = arch
self.__registers = {}
def __getattr__(self, register):
return self.__get_register(register)
def __getitem__(self, register):
return self.__get_register(register)
# Auxiliary functions
# ======================================================================== #
def __get_register(self, register):
register = register.lower()
if register not in self.__arch.registers_gp_all:
raise TranslationError("Invalid register")
if register not in self.__registers:
self.__registers[register] = ReilRegisterOperand(register, self.__arch.registers_size[register])
return self.__registers[register]
class InstructionTranslator(object):
def __init__(self):
# An instance of a *VariableNamer*. This is used so all the
# temporary REIL registers are unique.
self._ir_name_generator = VariableNamer("t", separator="")
def translate(self, instruction):
"""Return REIL representation of an instruction.
"""
try:
trans_instrs = self._translate(instruction)
except Exception:
self._log_translation_exception(instruction)
raise TranslationError("Unknown error")
return trans_instrs
def reset(self):
"""Restart REIL register name generator.
"""
self._ir_name_generator.reset()
def _translate(self, instruction):
raise NotImplementedError()
# Auxiliary functions
# ======================================================================== #
def _log_not_supported_instruction(self, instruction):
logger.warning("Instruction not supported: %s (%s [%s])",
instruction.mnemonic, instruction,
codecs.encode(instruction.bytes, 'hex'), exc_info=True)
def _log_translation_exception(self, instruction):
logger.error("Error translating instruction: %s (%s [%s])",
instruction.mnemonic, instruction,
codecs.encode(instruction.bytes, 'hex'), exc_info=True)
class TranslationBuilder(object):
def __init__(self, ir_name_generator, architecture_information):
self._ir_name_generator = ir_name_generator
self._instructions = []
self._builder = ReilBuilder()
self._arch_info = architecture_information
def add(self, instruction):
self._instructions.append(instruction)
def temporal(self, size):
return ReilRegisterOperand(self._ir_name_generator.get_next(), size)
def immediate(self, value, size):
return ReilImmediateOperand(value, size)
def label(self, name):
return ReilLabel(name)
def instanciate(self, address):
return self.__resolve_loops(address, self._instructions)
# Auxiliary functions
# ======================================================================== #
def __resolve_loops(self, address, instrs):
# Collect labels.
idx_by_labels = {}
instrs_no_labels = []
curr = 0
for i in instrs:
if isinstance(i, ReilLabel):
idx_by_labels[i.name] = curr
else:
instrs_no_labels.append(i)
curr += 1
instrs[:] = instrs_no_labels
# Resolve instruction addresses and JCC targets.
for index, instr in enumerate(instrs):
instr.address = to_reil_address(address, index)
if instr.mnemonic == ReilMnemonic.JCC:
target = instr.operands[2]
if isinstance(target, ReilLabel):
addr = to_reil_address(address, idx_by_labels[target.name])
size = self._arch_info.address_size + 8
instr.operands[2] = ReilImmediateOperand(addr, size)
return instrs
| bsd-2-clause | 4,689,553,398,422,547,000 | 32.481865 | 108 | 0.625503 | false |
AzureAD/microsoft-authentication-library-for-dotnet | tests/CacheCompat/CommonCache.Test.MsalPython/TestMsalPython.py | 1 | 4062 | #-----------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root for license information.
#-----------------------------------------------------------------------------------------
import sys
import os
import atexit
import json
import msal
class TestInputData:
def __init__(self, labUserDatas, resultsFilePath, storageType):
self.labUserDatas = labUserDatas
self.resultsFilePath = resultsFilePath
self.storageType = storageType
class CacheExecutorAccountResult:
def __init__(self, labUserUpn, authResultUpn, isAuthResultFromCache):
self.LabUserUpn = labUserUpn
self.AuthResultUpn = authResultUpn
self.IsAuthResultFromCache = isAuthResultFromCache
def toJSON(self):
return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4)
class ExecutionContext:
def __init__(self, isError, errorMessage, results):
self.IsError = isError
self.ErrorMessage = errorMessage
self.Results = results
def toJSON(self):
return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4)
# To run locally uncomment the next 2 lines and use a file containing the following json (update the password!)
#
# {"Scope":"https://graph.microsoft.com/user.read","CacheFilePath":"C:\\Temp\\adalcachecompattestdata\\msalCacheV3.bin","LabUserDatas":[{"Upn":"[email protected]","Password":"password","ClientId":"4b0db8c2-9f26-4417-8bde-3f0e3656f8e0","TenantId":"f645ad92-e38d-4d1a-b510-d1b09a74a8ca","Authority":"https://login.microsoftonline.com/f645ad92-e38d-4d1a-b510-d1b09a74a8ca/"}],"ResultsFilePath":"C:Temp\\adalcachecompattestdata\\msal_python_results.json","StorageType":4}
#
#sys.argv.append("--input")
#sys.argv.append("c:\\temp\\tmp2B95.tmp")
cmdlineargs = {}
cmdlineargs["inputFilePath"] = sys.argv[2]
print(os.path.dirname(os.path.realpath(__file__)))
print(sys.argv[2])
with open(sys.argv[2], 'r') as fp:
testInput = json.load(fp)
the_scopes = [ testInput['Scope'] ]
cache = msal.SerializableTokenCache()
cacheFilePath = testInput['CacheFilePath']
print('CacheFilePath: ' + cacheFilePath)
resultsFilePath = testInput['ResultsFilePath']
print('ResultsFilePath: ' + resultsFilePath)
atexit.register(lambda:
open(cacheFilePath, 'w').write(cache.serialize())
)
if os.path.exists(cacheFilePath):
cache.deserialize(open(cacheFilePath, 'r').read())
results = []
for labUserData in testInput['LabUserDatas']:
app = msal.PublicClientApplication(labUserData['ClientId'], authority=labUserData['Authority'], token_cache=cache)
upn = labUserData['Upn']
print('Handling labUserData.Upn = ' + upn)
accounts = app.get_accounts(username=upn)
result = None
if accounts:
result = app.acquire_token_silent(the_scopes, account=accounts[0])
if result:
print("got token for '" + upn + "' from the cache")
results.append(CacheExecutorAccountResult(upn, accounts[0]["username"] if accounts else "n/a", True))
else:
result = app.acquire_token_by_username_password(upn, labUserData['Password'], scopes=the_scopes)
if result:
print("got token for '" + upn + "' by signing in with credentials")
print(result)
results.append(CacheExecutorAccountResult(upn, result.get("id_token_claims", {}).get("preferred_username"), False))
else:
print("** ACQUIRE TOKEN FAILURE **")
print(result.get("error"))
print(result.get("error_description"))
print(result.get("correlation_id"))
results.append(CacheExecutorAccountResult(upn, '', False))
executionContext = ExecutionContext(False, '', results)
json = executionContext.toJSON()
with open(resultsFilePath, 'w') as outfile:
outfile.write(json)
| mit | -3,695,913,073,884,818,400 | 37.417476 | 480 | 0.649507 | false |
diego-d5000/MisValesMd | env/lib/python2.7/site-packages/django/middleware/clickjacking.py | 1 | 2046 | """
Clickjacking Protection Middleware.
This module provides a middleware that implements protection against a
malicious site loading resources from your site in a hidden frame.
"""
from django.conf import settings
class XFrameOptionsMiddleware(object):
"""
Middleware that sets the X-Frame-Options HTTP header in HTTP responses.
Does not set the header if it's already set or if the response contains
a xframe_options_exempt value set to True.
By default, sets the X-Frame-Options header to 'SAMEORIGIN', meaning the
response can only be loaded on a frame within the same site. To prevent the
response from being loaded in a frame in any site, set X_FRAME_OPTIONS in
your project's Django settings to 'DENY'.
Note: older browsers will quietly ignore this header, thus other
clickjacking protection techniques should be used if protection in those
browsers is required.
http://en.wikipedia.org/wiki/Clickjacking#Server_and_client
"""
def process_response(self, request, response):
# Don't set it if it's already in the response
if response.get('X-Frame-Options', None) is not None:
return response
# Don't set it if they used @xframe_options_exempt
if getattr(response, 'xframe_options_exempt', False):
return response
response['X-Frame-Options'] = self.get_xframe_options_value(request,
response)
return response
def get_xframe_options_value(self, request, response):
"""
Gets the value to set for the X_FRAME_OPTIONS header.
By default this uses the value from the X_FRAME_OPTIONS Django
settings. If not found in settings, defaults to 'SAMEORIGIN'.
This method can be overridden if needed, allowing it to vary based on
the request or response.
"""
return getattr(settings, 'X_FRAME_OPTIONS', 'SAMEORIGIN').upper()
| mit | -8,255,345,403,861,911,000 | 37.346154 | 79 | 0.660313 | false |
recipy/recipy | integration_test/version_control.py | 1 | 2655 | """
Functions to provide information about a Git repository.
"""
# Copyright (c) 2015-2016 University of Edinburgh and
# University of Southampton.
from __future__ import (nested_scopes, generators, division,
absolute_import, with_statement,
print_function, unicode_literals)
import hashlib
import os
from git import Repo
from git.exc import InvalidGitRepositoryError
BLOCKSIZE = 65536
def hash_file(path):
"""
Get hash of file, where:
:param path: file path
:type path: str or unicode
:return: hash or None if the file does not exist
:rtype: str or unicode
"""
try:
hasher = hashlib.sha1()
with open(path, 'rb') as afile:
buf = afile.read(BLOCKSIZE)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(BLOCKSIZE)
return hasher.hexdigest()
except Exception:
return None
def get_repository(file_path):
"""
Get repository object for repository within which given file
is located.
:param file_path: File path
:type file_path: str or unicode
:return: repository or None if no repository can be found
:rtype: git.Repo
:raises git.exc.NoSuchPathError: if the path does not exist
"""
path = os.path.realpath(file_path)
repository = None
try:
repository = Repo(path, search_parent_directories=True)
except InvalidGitRepositoryError:
pass
return repository
def get_repository_path(repository):
"""
Get local repository path.
:param repository: repository
:type repository: git.Repo
:return: repository path
:rtype: str or unicode
"""
return repository.working_dir
def get_commit(repository):
"""
Get current commit ID.
:param repository: repository
:type repository: git.Repo
:return: commit ID
:rtype: str or unicode
"""
return repository.head.commit.hexsha
def get_origin(repository):
"""
Get current repository origin.
:param repository: repository
:type repository: git.Repo
:return: origin URL
:rtype: str or unicode
"""
return repository.remotes.origin.url
def get_remote(repository, remote):
"""
Get current repository remote.
:param repository: repository
:type repository: git.Repo
:param remote: remote name
:type remote: str or unicode
:return: remote URL or None if no such remote
:rtype: str or unicode
"""
remote_url = None
try:
remote_url = repository.remotes[remote].url
except IndexError:
pass
return remote_url
| apache-2.0 | -7,703,090,747,470,164,000 | 22.289474 | 64 | 0.644444 | false |
lovelysystems/pyjamas | library/pyjamas/ui/horizsplitpanel.py | 1 | 12764 | """
Horizontal Split Panel: Left and Right layouts with a movable splitter.
/*
* Copyright 2008 Google Inc.
* Copyright 2009 Luke Kenneth Casson Leighton <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License") you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http:#www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
"""
from __pyjamas__ import JS
from pyjamas.ui.splitpanel import SplitPanel
from pyjamas import DOM
from pyjamas.DeferredCommand import DeferredCommand
class ImplHorizontalSplitPanel:
""" The standard implementation for horizontal split panels.
"""
def __init__(self, panel):
self.panel = panel
DOM.setStyleAttribute(panel.getElement(), "position", "relative")
self.expandToFitParentHorizontally(panel.getWidgetElement(0))
self.expandToFitParentHorizontally(panel.getWidgetElement(1))
self.expandToFitParentHorizontally(panel.getSplitElement())
self.panel.expandToFitParentUsingCssOffsets(panel.container)
# Right now, both panes are stacked on top of each other
# on either the left side or the right side of the containing
# panel. This happens because both panes have position:absolute
# and no left/top values. The panes will be on the left side
# if the directionality is LTR, and on the right side if the
# directionality is RTL. In the LTR case, we need to snap the
# right pane to the right of the container, and in the RTL case,
# we need to snap the left pane to the left of the container.
if True: # TODO: (LocaleInfo.getCurrentLocale().isRTL()):
self.panel.setLeft(self.panel.getWidgetElement(0), "0px")
else:
self.panel.setRight(self.panel.getWidgetElement(1), "0px")
def expandToFitParentHorizontally(self, elem):
self.panel.addAbsolutePositoning(elem)
zeroSize = "0px"
self.panel.setTop(elem, zeroSize)
self.panel.setBottom(elem, zeroSize)
def onAttach(self):
pass
def onDetach(self):
pass
def onTimer(self, sender):
pass
def execute(self):
pass
def addResizeListener(self, container):
pass
def onResize(self):
pass
def onSplitterResize(self, px):
self.setSplitPositionUsingPixels(px)
def setSplitPosition(self, pos):
leftElem = self.panel.getWidgetElement(0)
self.panel.setElemWidth(leftElem, pos)
self.setSplitPositionUsingPixels(self.panel.getOffsetWidth(leftElem))
def setSplitPositionUsingPixels(self, px):
self._setSplitPositionUsingPixels(px)
def _setSplitPositionUsingPixels(self, px):
""" Set the splitter's position in units of pixels.
px represents the splitter's position as a distance
of px pixels from the left edge of the container. This is
true even in a bidi environment. Callers of this method
must be aware of this constraint.
"""
splitElem = self.panel.getSplitElement()
rootElemWidth = self.panel.getOffsetWidth(self.panel.container)
splitElemWidth = self.panel.getOffsetWidth(splitElem)
# This represents an invalid state where layout is incomplete. This
# typically happens before DOM attachment, but I leave it here as a
# precaution because negative width/height style attributes produce
# errors on IE.
if (rootElemWidth < splitElemWidth):
return
# Compute the new right side width.
newRightWidth = rootElemWidth - px - splitElemWidth
# Constrain the dragging to the physical size of the panel.
if (px < 0):
px = 0
newRightWidth = rootElemWidth - splitElemWidth
elif (newRightWidth < 0):
px = rootElemWidth - splitElemWidth
newRightWidth = 0
rightElem = self.panel.getWidgetElement(1)
# Set the width of the left side.
self.panel.setElemWidth(self.panel.getWidgetElement(0), "%dpx" % px)
# Move the splitter to the right edge of the left element.
self.panel.setLeft(splitElem, "%dpx" % px)
# Move the right element to the right of the splitter.
self.panel.setLeft(rightElem, "%dpx" % (px + splitElemWidth))
self.updateRightWidth(rightElem, newRightWidth)
def updateRightWidth(self, rightElem, newRightWidth):
# No need to update the width of the right side this will be
# recomputed automatically by CSS. This is helpful, as we do not
# have to worry about watching for resize events and adjusting the
# right-side width.
pass
class HorizontalSplitPanel(SplitPanel):
""" A panel that arranges two widgets in a single horizontal row
and allows the user to interactively change the proportion
of the width dedicated to each of the two widgets. Widgets
contained within a <code>HorizontalSplitPanel</code> will
be automatically decorated with scrollbars when necessary.
Default layout behaviour of HorizontalSplitPanels is to 100% fill
its parent vertically and horizontally [this is NOT normal!]
"""
def __init__(self, **kwargs):
""" Creates an empty horizontal split panel.
"""
if not kwargs.has_key('StyleName'): kwargs['StyleName']="gwt-HorizontalSplitPanel"
SplitPanel.__init__(self, DOM.createDiv(),
DOM.createDiv(),
self.preventBoxStyles(DOM.createDiv()),
self.preventBoxStyles(DOM.createDiv()),
**kwargs)
self.container = self.preventBoxStyles(DOM.createDiv())
self.buildDOM()
self.impl = ImplHorizontalSplitPanel(self)
# By default the panel will fill its parent vertically and horizontally.
# The horizontal case is covered by the fact that the top level div is
# block display.
self.setHeight("100%")
self.lastSplitPosition = "50%"
self.initialLeftWidth = 0
self.initialThumbPos = 0
def add(self, w):
"""
* Adds a widget to a pane in the HorizontalSplitPanel. The method
* will first attempt to add the widget to the left pane. If a
* widget is already in that position, it will attempt to add the
* widget to the right pane. If a widget is already in that position,
* an exception will be thrown, as a HorizontalSplitPanel can
* contain at most two widgets.
*
* Note that this method is bidi-sensitive. In an RTL environment,
* this method will first attempt to add the widget to the right pane,
* and if a widget is already in that position, it will attempt to add
* the widget to the left pane.
*
* @param w the widget to be added
* @throws IllegalStateException
"""
if self.getStartOfLineWidget() is None:
self.setStartOfLineWidget(w)
elif self.getEndOfLineWidget() is None:
self.setEndOfLineWidget(w)
else:
return
# TODO throw new IllegalStateException(
# "A Splitter can only contain two Widgets.")
def getEndOfLineWidget(self):
"""
* Gets the widget in the pane that is at the end of the line
* direction for the layout. That is, in an RTL layout, gets
* the widget in the left pane, and in an LTR layout, gets
* the widget in the right pane.
*
* @return the widget, <code>null</code> if there is not one.
"""
return self.getWidget(self.getEndOfLinePos())
def getLeftWidget(self):
"""
* Gets the widget in the left side of the panel.
*
* @return the widget, <code>null</code> if there is not one.
"""
return self.getWidget(0)
def getRightWidget(self):
"""
* Gets the widget in the right side of the panel.
*
* @return the widget, <code>null</code> if there is not one.
"""
return self.getWidget(1)
def getStartOfLineWidget(self):
"""
* Gets the widget in the pane that is at the start of the line
* direction for the layout. That is, in an RTL environment, gets
* the widget in the right pane, and in an LTR environment, gets
* the widget in the left pane.
*
* @return the widget, <code>null</code> if there is not one.
"""
return self.getWidget(self.getStartOfLinePos())
def setEndOfLineWidget(self, w):
"""
* Sets the widget in the pane that is at the end of the line direction
* for the layout. That is, in an RTL layout, sets the widget in
* the left pane, and in and RTL layout, sets the widget in the
* right pane.
*
* @param w the widget
"""
self.setWidget(self.getEndOfLinePos(), w)
def setLeftWidget(self, w):
"""
* Sets the widget in the left side of the panel.
*
* @param w the widget
"""
self.setWidget(0, w)
def setRightWidget(self, w):
"""
* Sets the widget in the right side of the panel.
*
* @param w the widget
"""
self.setWidget(1, w)
def setSplitPosition(self, pos):
"""
* Moves the position of the splitter.
*
* This method is not bidi-sensitive. The size specified is always
* the size of the left region, regardless of directionality.
*
* @param pos the new size of the left region in CSS units (e.g. "10px",
* "1em")
"""
self.lastSplitPosition = pos
self.impl.setSplitPosition(pos)
def setStartOfLineWidget(self, w):
"""
* Sets the widget in the pane that is at the start of the line direction
* for the layout. That is, in an RTL layout, sets the widget in
* the right pane, and in and RTL layout, sets the widget in the
* left pane.
*
* @param w the widget
"""
self.setWidget(self.getStartOfLinePos(), w)
def onLoad(self):
self.impl.onAttach()
# Set the position realizing it might not work until
# after layout runs. This first call is simply to try
# to avoid a jitter effect if possible.
self.setSplitPosition(self.lastSplitPosition)
DeferredCommand().add(self)
def execute(self):
self.setSplitPosition(self.lastSplitPosition)
def onUnload(self):
self.impl.onDetach()
def onSplitterResize(self, x, y):
self.impl.onSplitterResize(self.initialLeftWidth + x -
self.initialThumbPos)
def onSplitterResizeStarted(self, x, y):
self.initialThumbPos = x
self.initialLeftWidth = self.getOffsetWidth(self.getWidgetElement(0))
def buildDOM(self):
leftDiv = self.getWidgetElement(0)
rightDiv = self.getWidgetElement(1)
splitDiv = self.getSplitElement()
DOM.appendChild(self.getElement(), self.container)
DOM.appendChild(self.container, leftDiv)
DOM.appendChild(self.container, splitDiv)
DOM.appendChild(self.container, rightDiv)
# Sadly, this is the only way I've found to get vertical
# centering in this case. The usually CSS hacks (display:
# table-cell, vertical-align: middle) don't work in an
# absolute positioned DIV.
thumb_html = '<img src="splitPanelThumb.png" />'
DOM.setInnerHTML(splitDiv,
"<table class='hsplitter' height='100%' cellpadding='0' " +
"cellspacing='0'><tr><td align='center' valign='middle'>" +
thumb_html +
"</td></tr></table>")
self.addScrolling(leftDiv)
self.addScrolling(rightDiv)
def getEndOfLinePos(self):
return 0
# TODO: return (LocaleInfo.getCurrentLocale().isRTL() ? 0 : 1)
def getStartOfLinePos(self):
return 1
# TODO: return (LocaleInfo.getCurrentLocale().isRTL() ? 1 : 0)
| apache-2.0 | -4,385,116,286,140,427,000 | 35.783862 | 90 | 0.623551 | false |
AdL1398/PiCasso | source/modules/DecisionEngine/piresources.py | 1 | 18280 | """
title : piresources.py
description : includes
a) functions to manipulate a dictionary that representes
the consumption of a Raspberry Pi resources
b) functions for creating a json file from the dictionary and
reading it from the file and converting it back to the original
dictionary
source :
author : Carlos Molina Jimenez
date : 15 Feb 2017
version : 1.0
usage :
notes :
compile and run : % python piresources.py
python_version : Python 2.7.12
====================================================
"""
"""
pi_stat: describes the resource configuration of an idividual Pi
and their current consumption.
hardResources: hardware configuration of the Pi
cpu: cpu description
mem: memory size of the Pi in Gbytes
disk disk size of the Pi in Gbutes
softResources: software configuration of the Pi
OS: operating system of the Pi
resourceUsage: current status of resource consuption of the Pi
cpuUsage: current cpu usage of the Pi in percentage
cpuLoad: current cpu load of the Pi (sa number between 1 and 4)
containers: a dynamically growing/shrinking list of the containers currently running in the Pi.
id: identification number of the container
cpuUsage: current cpu usage of the container identified by id
cpuUsage: current mem usage of the container identified by id
pi_stat= {
'PiID': '192.0.0.1',
'hardResources': {'cpu': 'A 1.2GHz 64-bit quad-core ARMv8 CPU', 'mem': '1', 'disk': '32'},
'softResources': {'OS': 'Linux'},
'resourceUsage': {'cpuUsage': '30', 'cpuLoad': '70'},
'containers': [{'id': 'Cont0', 'cpuUsage': '23', 'memUsage': '3636'}
]
}
"""
import json
"""
Expects a dictionary that representes the resources of a Pi.
Returns the id of the Pi
"""
def get_PiID(pi_stat):
piID=pi_stat['PiID']
return piID
"""
Expects a dictionary that representes the resources of a Pi and
the id of the Pi. Records the id in the dictionary.
"""
def put_PiID(pi_stat,piID):
pi_stat['PiID']=piID
return
"""
Expects a dictionary that representes the resources of a Pi.
Returns a sub-dictionary that represents the hardware resources of the Pi.
"""
def get_hardResources(pi_stat):
hardRes=pi_stat['hardResources']
return hardRes
"""
Expects a dictionary that representes the resources of a Pi.
Returns the type of the cpu the Pi.
"""
def get_hardResources_cpu(pi_stat):
cpu=pi_stat['hardResources']['cpu']
return cpu
"""
Expects a dictionary that representes the resources of a Pi and
the type of the cpu of the Pi.
Records the cpu in dicionary.
"""
def put_hardResources_cpu(pi_stat,cpu):
pi_stat['hardResources']['cpu']=cpu
return
"""
Expects a dictionary that representes the resources of a Pi.
Returns the size of the memory of the Pi
"""
def get_hardResources_mem(pi_stat):
mem=pi_stat['hardResources']['mem']
return mem
"""
Expects a dictionary that representes the resources of a Pi.
and the size of the memory of the Pi and records it in the
dictionary.
"""
def put_hardResources_mem(pi_stat,mem):
pi_stat['hardResources']['mem']=mem
return
"""
Expects a dictionary that representes the resources of a Pi.
Returns the size of the disk of the Pi
"""
def get_hardResources_disk(pi_stat):
disk=pi_stat['hardResources']['disk']
return disk
"""
Expects a dictionary that representes the resources of a Pi and
the size of the disk of the Pi.
Records the size of the disk in dictionary.
"""
def put_hardResources_disk(pi_stat,disk):
pi_stat['hardResources']['disk']=disk
return
"""
Expects a dictionary that representes the resources of a Pi.
Returns a list of dictionaries where each dictionary represents
a container currently running in the Pi.
"""
def get_containers(pi_stat):
containersLst=pi_stat['containers']
return containersLst
"""
Expects a dictionary that representes the resources of a Pi.
Returns the number of containers currently running in the Pi
"""
def get_numContainers(pi_stat):
containerLst=pi_stat['containers']
return len(containerLst)
"""
Expects a dictionary that representes the resources of a Pi,
the id of a container and the resource of interest (cpu, mem or disk).
Returns the current status of the given resource
"""
def get_containerResource(pi_stat, containerID,resource):
containersLst=pi_stat['containers']
l= len(containersLst)
if l==0:
return "No containers"
else:
for i in range(l):
if containersLst[i]['id']==containerID:
return containersLst[i][resource]
else:
return "containerID not found"
"""
Expects a dictionary that representes the resources of a Pi and
the id of a container.
Returns a tuple of the form (containerID, cpuUsage, memUsage) which
represents the current status of the container identified as containerID.
Returns ("0", "0", "0") if no container is found with containerID
"""
def get_containerResources(pi_stat, containerID):
containersLst=pi_stat['containers']
l= len(containersLst)
for i in range(l):
if containersLst[i]['id']==containerID:
return (containersLst[i]['id'], containersLst[i]['cpuUsage'], containersLst[i]['memUsage'])
else:
return ("0", "0", "0")
"""
Expects a dictionary that representes the resources of a Pi and
a tuple of the form (containerID, cpuUsage, memUsage) which
represents the current status of the container identified as containerID,
produces a dictionary out of the tuple and appends it to tail of
the list of containers running in the Pi
"""
def put_container(pi_stat, containerID, cpuUsage, memUsage):
containersList=pi_stat['containers']
containersList.append({'id': containerID, 'cpuUsage': cpuUsage, 'memUsage': memUsage})
return
"""
Expects a dictionary that representes the resources of a Pi.
Returns a list of tuples. Each tuple has the form (containerID, cpuUsage, memUsage) which
represents the current status of each container
"""
def get_allContainerResources(pi_stat):
containersLst=pi_stat['containers']
l= len(containersLst)
lst_of_tuples=[]
for i in range(l):
lst_of_tuples.append( (containersLst[i]['id'], containersLst[i]['cpuUsage'], containersLst[i]['memUsage']) )
return lst_of_tuples
"""
Expects a dictionary that representes the resources of a Pi
and a cpuUsageThreshold.
Returns the containers with exhausted cpuUsage, that is:
cpuUsage > cpuUsageThreshold.
The return is a list of tuples:
[(containerID, cpuUsage), (containerID, cpuUsage), ...]
"""
def old_get_cpuUsageExhaustedConta_of_Pi(pi_stat,cpuUsageThreshold):
contaLst=pi_stat['containers']
lst_of_tuples=[]
for c in contaLst:
if (int(c['cpuUsage']) > cpuUsageThreshold):
lst_of_tuples.append( (c['id'], c['cpuUsage']) )
return lst_of_tuples
"""
Expects a dictionary that representes the resources of a Pi
and a cpuUsageThreshold.
Returns the containers with exhausted cpuUsage, that is:
cpuUsage > cpuUsageThreshold.
The return is a list of tuples:
[(containerID, cpuUsage, class), (containerID, cpuUsage, class), ...]
Updated 5 Jul to include the class keyword
"""
def get_cpuUsageExhaustedConta_of_Pi(pi_stat,cpuUsageThreshold):
contaLst=pi_stat['containers']
lst_of_tuples=[]
for c in contaLst:
if (int(c['cpuUsage']) > cpuUsageThreshold):
lst_of_tuples.append( (c['id'], c['cpuUsage'], c['class']) )
return lst_of_tuples
"""
Expects a dictionary that representes the resources of a Pi
and a cpuUsageThreshold.
Returns the containers with vigorous cpuUsage, that is:
cpuUsage <= cpuUsageThreshold.
The return is a list of tuples:
[(containerID, cpuUsage), (containerID, cpuUsage), ...]
"""
def old_get_cpuUsageVigorousConta_of_Pi(pi_stat,cpuUsageThreshold):
contaLst=pi_stat['containers']
lst_of_tuples=[]
for c in contaLst:
if (int(c['cpuUsage']) <= cpuUsageThreshold):
lst_of_tuples.append( (c['id'], c['cpuUsage']) )
return lst_of_tuples
"""
Expects a dictionary that representes the resources of a Pi
and a cpuUsageThreshold.
Returns the containers with vigorous cpuUsage, that is:
cpuUsage <= cpuUsageThreshold.
The return is a list of tuples:
[(containerID, cpuUsage, class), (containerID, cpuUsage, class), ...]
Updated 5 Jul to include the class keyword
"""
def get_cpuUsageVigorousConta_of_Pi(pi_stat,cpuUsageThreshold):
contaLst=pi_stat['containers']
lst_of_tuples=[]
for c in contaLst:
if (int(c['cpuUsage']) <= cpuUsageThreshold):
lst_of_tuples.append( (c['id'], c['cpuUsage'], c['class']) )
return lst_of_tuples
"""
Expects a dictionary that representes the resources of a Pi
and a cpuUsageThreshold.
Returns the containers with exhausted cpuUsage, that is:
cpuUsage > cpuUsageThreshold.
The return is a list of tuples:
[(containerID, cpuUsage, image, port_host), (containerID, cpuUsage, image, port_host), ...]
"""
def old_get_cpuUsageExhaustedContainers_of_Pi(pi_stat,cpuUsageThreshold):
contaLst=pi_stat['containers']
lst_of_tuples=[]
for c in contaLst:
if (int(c['cpuUsage']) > cpuUsageThreshold):
lst_of_tuples.append( (c['id'], c['cpuUsage'], c['image'], c['port_host']) )
return lst_of_tuples
"""
Expects a dictionary that representes the resources of a Pi
and a cpuUsageThreshold.
Returns the containers with exhausted cpuUsage, that is:
cpuUsage > cpuUsageThreshold.
The return is a list of tuples:
[(containerID, cpuUsage, image, port_host, class), (containerID, cpuUsage, image, port_host, class), ...]
Updated 5 Jul to include the class keyword
"""
def get_cpuUsageExhaustedContainers_of_Pi(pi_stat,cpuUsageThreshold):
contaLst=pi_stat['containers']
lst_of_tuples=[]
for c in contaLst:
if (int(c['cpuUsage']) > cpuUsageThreshold):
lst_of_tuples.append( (c['id'], c['cpuUsage'], c['image'], c['port_host'], c['class']) )
return lst_of_tuples
"""
Expects a dictionary that representes the resources of a Pi
and a cpuUsageThreshold.
Returns the containers with NO exhausted cpuUsage, that is:
cpuUsage <= cpuUsageThreshold.
The return is a list of tuples:
[(containerID, cpuUsage), (containerID, cpuUsage), ...]
"""
def old_get_cpuUsageVigorousContainers_of_Pi(pi_stat,cpuUsageThreshold):
contaLst=pi_stat['containers']
lst_of_tuples=[]
for c in contaLst:
if (int(c['cpuUsage']) <= cpuUsageThreshold):
lst_of_tuples.append( (c['id'], c['cpuUsage'], c['image'], c['port_host']) )
# lst_of_tuples.append( (c['id'], c['cpuUsage']) )
return lst_of_tuples
"""
Expects a dictionary that representes the resources of a Pi
and a cpuUsageThreshold.
Returns the containers with NO exhausted cpuUsage, that is:
cpuUsage <= cpuUsageThreshold.
The return is a list of tuples:
[(containerID, cpuUsage, class), (containerID, cpuUsage, class), ...]
Updated 5 Jul to include the class keyword
"""
def get_cpuUsageVigorousContainers_of_Pi(pi_stat,cpuUsageThreshold):
contaLst=pi_stat['containers']
lst_of_tuples=[]
for c in contaLst:
if (int(c['cpuUsage']) <= cpuUsageThreshold):
lst_of_tuples.append( (c['id'], c['cpuUsage'], c['image'], c['port_host'], c['class']) )
# lst_of_tuples.append( (c['id'], c['cpuUsage']) )
return lst_of_tuples
"""
Expects a dictionary that representes the resources of a Pi
and a cpuUsageThreshold.
Returns the list of all the containers running in the Pi ONLY if all
of them are vigorous:
cpuUsage <= cpuUsageThreshold
[(containerID, cpuUsage, image, port_host), (containerID, cpuUsage, image, port_host), ...]
Returns an empty list if at least one container is cpuUsage exhausted.
"""
def old_get_cpuUsageVigorousContainersOnly_of_Pi(pi_stat,cpuUsageThreshold):
contaLst=pi_stat['containers']
lst_of_tuples=[]
for c in contaLst:
if (int(c['cpuUsage']) <= cpuUsageThreshold):
lst_of_tuples.append( (c['id'], c['cpuUsage'], c['image'], c['port_host']) )
else:
lst_of_tuples=[]
return lst_of_tuples
"""
Expects a dictionary that representes the resources of a Pi
and a cpuUsageThreshold.
Returns the list of all the containers running in the Pi ONLY if all
of them are vigorous:
cpuUsage <= cpuUsageThreshold
[(containerID, cpuUsage, image, port_host), (containerID, cpuUsage, image, port_host), ...]
Returns an empty list if at least one container is cpuUsage exhausted.
Updated 5 Jul to include the class keyword
"""
def get_cpuUsageVigorousContainersOnly_of_Pi(pi_stat,cpuUsageThreshold):
contaLst=pi_stat['containers']
lst_of_tuples=[]
for c in contaLst:
if (int(c['cpuUsage']) <= cpuUsageThreshold):
lst_of_tuples.append( (c['id'], c['cpuUsage'], c['image'], c['port_host'], c['class']) )
else:
lst_of_tuples=[]
return lst_of_tuples
"""
Expects a dictionary that representes the resources of a Pi.
Returns a list of tuples. Each tuple has the form (containerID, cpuUsage, memUsage, name,
status, image) which represents the current status of each container as
designed by Adisorn (22 Feb 2017).
"""
def dele__get_allResources_of_allContainers_of_Pi(pi_status):
containersLst=pi_status['containers']
l= len(containersLst)
lst_of_tuples=[]
for i in range(l):
lst_of_tuples.append( (containersLst[i]['id'], containersLst[i]['cpuUsage'],
containersLst[i]['memUsage'], containersLst[i]['name'],
containersLst[i]['status'], containersLst[i]['image'],
containersLst[i]['port_host'], containersLst[i]['port_host'],
containersLst[i]['port_container'], containersLst[i]['port_container']))
return lst_of_tuples
"""
On 29 Jun I altered this function to see if it can extract the new
key class that I have included in the dictionaries of triggerde.py.
This is just a test. Other function might need upgrading
"""
def get_allResources_of_allContainers_of_Pi(pi_status):
containersLst=pi_status['containers']
l= len(containersLst)
lst_of_tuples=[]
for i in range(l):
lst_of_tuples.append( (containersLst[i]['id'], containersLst[i]['cpuUsage'],
containersLst[i]['memUsage'], containersLst[i]['name'],
containersLst[i]['status'], containersLst[i]['image'],
containersLst[i]['port_host'], containersLst[i]['port_host'],
containersLst[i]['port_container'], containersLst[i]['port_container'],
containersLst[i]['class']))
return lst_of_tuples
"""
Expects a dictionary that representes the resources of a Pi.
Prints all the resources of the Pi and returns None.
"""
def prt_allResources_of_a_pi(pi_status):
print("\n\nPiID: " + pi_status['PiID'])
print(" PiIP: " + pi_status['PiIP'])
print(" hardResources: " + " cpu: " + pi_status['hardResources']['cpu'] +
" mem: " + pi_status['hardResources']['mem'] +
" disk: " + pi_status['hardResources']['disk'])
print(" sotfResources: " + " OS: " + pi_status['softResources']['OS'])
print(" resourceUsage: " + " cpuUsage: " + pi_status['resourceUsage']['cpuUsage'] +
" cpuLoad: " + pi_status['resourceUsage']['cpuLoad'] +
" memUsage: " + pi_status['resourceUsage']['memUsage'])
containersLst=get_allResources_of_allContainers_of_Pi(pi_status)
for lst in containersLst:
print(" containerID: " + lst[0])
print(" cpuUsage: " + lst[1])
print(" memUsage: " + lst[2])
print(" name: " + lst[3])
print(" status: " + lst[4])
print(" port_host " + lst[5])
print(" port_container " + lst[6])
print(" class: " + lst[7])
return
"""
Functions written on 21 Feb 2017
Expects a dictionary that representes the resources of a Pi and
the id of a container.
Returns a tuple of the form (PiID, cpuUsage, cpuLoad, memUsage) which
represents the current status of the Pi identified as PiID.
"""
def get_resourceUsage(pi_status):
return(pi_status['PiID'], pi_status['resourceUsage']['cpuUsage'],
pi_status['resourceUsage']['cpuLoad'],
pi_status['resourceUsage']['memUsage'])
"""
Expects a dictionary that representes the resources of a Pi
and a fname.
1) Deletes fname if it already exists then
2) Creates a json file named fname.
"""
def create_jsonFile(pi_stat, fname):
import os
try:
os.remove(fname)
except OSError:
pass
json_pi_stat = json.dumps(pi_stat)
with open(fname, 'w') as json_outfile:
json.dump(pi_stat, json_outfile, ensure_ascii=False)
return
"""
Expects a dictionary that representes the resources of a Pi
and a file name that stores a json record that represents
the resources of a Pi.
Reads the json file from disk and converts it into the original dictionary
that represents the resources of the Pi
"""
def read_jsonFile(fname):
print 'filename from piresoiurce: ', fname
with open(fname) as json_infile:
pi_stat_loaded = json.load(json_infile)
return pi_stat_loaded
"""
Expects a dictionary before and after being loaded
from a file where it was stored as a json object.
Compares the two versions and return true is they are
equal, false otherwise
"""
def test_json_retrieval(pi_stat, pi_stat_loaded):
if (pi_stat == pi_stat_loaded):
return "true"
else:
return "false"
| mit | 8,001,806,750,052,898,000 | 34.086372 | 116 | 0.652735 | false |
jakubplichta/grafana-dashboard-builder | setup.py | 1 | 3660 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2015-2020 grafana-dashboard-builder contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
__author__ = 'Jakub Plichta <[email protected]>'
class Tox(TestCommand):
user_options = [('tox-args=', 'a', "Arguments to pass to tox")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.tox_args = None
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import tox
import shlex
args = self.tox_args
if args:
args = shlex.split(self.tox_args)
errno = tox.cmdline(args=args)
sys.exit(errno)
params = {
'name': 'grafana-dashboard-builder',
'version': '0.7.0a3',
'packages': [
'grafana_dashboards',
'grafana_dashboards.client',
'grafana_dashboards.components'
],
'scripts': [
'bin/grafana_dashboard_builder.py'
],
'url': 'https://github.com/jakubplichta/grafana-dashboard-builder',
'license': 'Apache License, Version 2.0',
'author': 'Jakub Plichta',
'author_email': '[email protected]',
'description': 'Generate Grafana dashboards with YAML',
'classifiers': [
'Topic :: Utilities',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
'keywords': 'grafana yaml graphite prometheus influxdb',
'cmdclass': {'test': Tox},
'tests_require': ['tox', 'mock'],
'install_requires': ['PyYAML>=5.3', 'argparse', 'requests-kerberos', 'requests'],
'python_requires': '>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',
'entry_points': {
'console_scripts': [
'grafana-dashboard-builder = grafana_dashboards.cli:main',
],
},
'long_description':
"""grafana-dashboard-builder is an open-source tool for easier creation of Grafana dashboards.
It is written in Python and uses YAML descriptors for dashboard
templates.
This project has been inspired by Jenkins Job Builder that
allows users to describe Jenkins jobs with human-readable format. grafana-dashboard-builder
aims to provide similar simplicity to Grafana dashboard creation and to give users easy way how they can create
dashboard templates filled with different configuration."""
}
setup(**params)
| apache-2.0 | -107,974,675,848,229,150 | 34.533981 | 111 | 0.655738 | false |
jtoppins/beaker | Server/bkr/server/tools/usage_reminder.py | 1 | 7054 |
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# pkg_resources.requires() does not work if multiple versions are installed in
# parallel. This semi-supported hack using __requires__ is the workaround.
# http://bugs.python.org/setuptools/issue139
# (Fedora/EPEL has python-cherrypy2 = 2.3 and python-cherrypy = 3)
__requires__ = ['CherryPy < 3.0']
import sys
from datetime import datetime, timedelta
from sqlalchemy.sql import and_, or_, func
from bkr.common import __version__
from bkr.log import log_to_stream
from bkr.server.model import User, System, Reservation, Recipe, \
RecipeSet, Job, Watchdog, RecipeTask, Task, TaskStatus, RecipeResource
from bkr.server.util import load_config_or_exit
from turbogears import config
from turbomail.control import interface
from bkr.server import mail
from sqlalchemy.orm import joinedload
from optparse import OptionParser
from bkr.server.util import absolute_url
import logging
log = logging.getLogger(__name__)
__description__ = 'Beaker usage reminder system'
def get_parser():
usage = "usage: %prog [options]"
parser = OptionParser(usage, description=__description__,version=__version__)
parser.add_option("-c", "--config-file", dest="configfile", default=None)
parser.add_option('--reservation-expiry', type=int, metavar='HOURS', default=24,
help='Warn about reservations expiring less than HOURS in the future [default: %default]')
parser.add_option('--reservation-length', type=int, metavar='DAYS', default=3,
help='Report systems which have been reserved for longer than DAYS [default: %default]')
parser.add_option('--waiting-recipe-age', type=int, metavar='HOURS', default=1,
help='Warn about recipes which have been waiting for longer than HOURS [default: %default]')
parser.add_option('--delayed-job-age', type=int, metavar='DAYS', default=14,
help='Warn about jobs which have been queued for longer than DAYS [default: %default]')
parser.add_option("-d", "--dry-run", action="store_true", dest="testing")
return parser
class BeakerUsage(object):
def __init__(self, user, reservation_expiry, reservation_length,
waiting_recipe_age, delayed_job_age):
self.user = user
self.reservation_expiry = reservation_expiry
self.reservation_length = reservation_length
self.waiting_recipe_age = waiting_recipe_age
self.delayed_job_age = delayed_job_age
def expiring_reservations(self):
"""
Get expiring reservations
"""
tasks = Task.by_name(u'/distribution/reservesys')
query = Recipe.query\
.join(Recipe.recipeset).join(RecipeSet.job).filter(Job.owner == self.user)\
.join(Recipe.watchdog).join(Watchdog.recipetask)\
.join(Recipe.resource)\
.filter(or_(RecipeTask.task == tasks, Recipe.status == TaskStatus.reserved))\
.filter(Watchdog.kill_time <= (datetime.utcnow() + timedelta(hours=self.reservation_expiry)))\
.values(Watchdog.kill_time, RecipeResource.fqdn)
return list(query)
def open_in_demand_systems(self):
"""
Get Open Loans & Reservations for In Demand Systems
"""
# reservations for in demand systems
waiting_recipes = System.query.join(System.queued_recipes)\
.filter(Recipe.status == TaskStatus.queued)\
.join(Recipe.recipeset)\
.filter(RecipeSet.queue_time <= (datetime.utcnow() - timedelta(hours=self.waiting_recipe_age)))\
.join(RecipeSet.job).filter(Job.deleted == None)\
.with_entities(System.id, func.count(System.id).label('waiting_recipes_count'))\
.group_by(System.id).subquery()
# For sqlalchemy < 0.7, query.join() takes an onclause as in the following:
# query.join((target, onclause), (target2, onclause2), ...)
query = Reservation.query.filter(Reservation.user == self.user)\
.join(Reservation.system)\
.join((waiting_recipes, Reservation.system_id == waiting_recipes.c.id))\
.filter(Reservation.start_time <= (datetime.utcnow() - timedelta(days=self.reservation_length)))\
.filter(Reservation.finish_time == None)\
.values(Reservation.start_time, waiting_recipes.c.waiting_recipes_count, System.fqdn)
reservations = []
for start_time, count, fqdn in query:
duration = (datetime.utcnow() - start_time).days
reservations.append((duration, count, fqdn))
# TODO: Open Loans
return reservations
def delayed_jobs(self):
"""
Get Delayed Jobs
"""
query = Job.query.filter(Job.owner == self.user)\
.join(Job.recipesets)\
.filter(and_(RecipeSet.queue_time <= (datetime.utcnow() - timedelta(days=self.delayed_job_age)),
RecipeSet.status == TaskStatus.queued))\
.group_by(Job.id)\
.values(func.min(RecipeSet.queue_time), Job.id)
return [(queue_time, absolute_url('/jobs/%s' % job_id))
for queue_time, job_id in query]
def main(*args):
parser = get_parser()
(options, args) = parser.parse_args(*args)
load_config_or_exit(options.configfile)
log_to_stream(sys.stderr)
interface.start(config)
reservation_expiry = options.reservation_expiry
reservation_length = options.reservation_length
waiting_recipe_age = options.waiting_recipe_age
delayed_job_age = options.delayed_job_age
testing = options.testing
if testing:
print 'Dry run only, nothing will be sent\n'
for user in User.query:
beaker_usage = BeakerUsage(user, reservation_expiry, reservation_length,
waiting_recipe_age, delayed_job_age)
expiring_reservations = beaker_usage.expiring_reservations()
open_in_demand_systems = beaker_usage.open_in_demand_systems()
delayed_jobs = beaker_usage.delayed_jobs()
if (expiring_reservations or open_in_demand_systems or delayed_jobs):
data = {
'user_name': user.user_name,
'current_date': datetime.utcnow().strftime("%Y-%m-%d"),
'beaker_fqdn': absolute_url('/'),
'reservation_expiry': reservation_expiry,
'reservation_length': reservation_length,
'waiting_recipe_age': waiting_recipe_age,
'delayed_job_age': delayed_job_age,
'expiring_reservations': expiring_reservations,
'open_reservations': open_in_demand_systems,
'delayed_jobs': delayed_jobs
}
mail.send_usage_reminder(user, data, testing)
return
if __name__ == '__main__':
sys.exit(main())
| gpl-2.0 | 4,701,580,294,912,177,000 | 44.805195 | 114 | 0.643748 | false |
ruanima/flasky-test | config.py | 1 | 1361 | #coding=utf-8
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY') or 'hard to guess string'
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
MAIL_SERVER = 'smtp-mail.outlook.com'
MAIL_PORT = 25
MAIL_USE_TLS = True
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
FLASKY_MAIL_SUBJECT_PREFIX = '[Flasky]'
FLASKY_MAIL_SENDER = 'Flasky Admin <[email protected]>'
FLASKY_ADMIN = os.environ.get('FLASKY_ADMIN')
FLASKY_POSTS_PER_PAGE = 23
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = os.environ.get('DEV_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-dev.sqlite')
class TestingConfig(Config):
TESTING = True
SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-test.sqlite')
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data.sqlite')
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'default': DevelopmentConfig
} | gpl-2.0 | -2,306,616,173,938,902,000 | 28.977273 | 71 | 0.645114 | false |
alexandre/cashflow | cashflow/tests/test_cashflow.py | 1 | 7833 | # -*- coding: utf-8 -*-
import datetime
import decimal
import math
import unittest
from hypothesis import assume
from hypothesis import given
from hypothesis import strategies as st
from hypothesis.extra.datetime import datetimes
import hypothesis.strategies as s
import pytest
from cashflow import cashflow
from cashflow.utils import serialize as serialize_utils
TransactionStrategy = s.builds(
cashflow.Transaction,
st.decimals(),
datetimes(),
st.dictionaries(keys=st.text(), values=st.uuids())
)
class TransactionTestCase(unittest.TestCase):
@given(st.decimals(), datetimes(),
st.dictionaries(keys=st.text(), values=st.uuids()))
def test_get_transaction_value(self, value, timestamp, metadata):
transaction = cashflow.Transaction(value, timestamp, metadata)
if math.isnan(value):
assert math.isnan(transaction.value)
else:
assert transaction.value == value
@given(st.decimals(), datetimes(),
st.dictionaries(keys=st.text(), values=st.uuids()))
def test_get_transaction_timestamp(self, value, timestamp, metadata):
transaction = cashflow.Transaction(value, timestamp, metadata)
assert transaction.timestamp == timestamp
@given(st.decimals(), datetimes(),
st.dictionaries(keys=st.text(), values=st.uuids()))
def test_get_transaction_metadata(self, value, timestamp, metadata):
transaction = cashflow.Transaction(value, timestamp, metadata)
assert transaction.metadata == metadata
@given(st.decimals(), datetimes(),
st.dictionaries(keys=st.text(), values=st.uuids()))
def test_transaction_serialization(self, value, timestamp, metadata):
transaction = cashflow.Transaction(value, timestamp, metadata)
serialized_data = transaction.serialize()
expected_data = {
'value': transaction.value,
'timestamp': transaction.timestamp,
'metadata': transaction.metadata
}
assert expected_data == serialized_data
@given(st.decimals(), st.dictionaries(keys=st.text(), values=st.uuids()))
def test_compare_two_transactions(self, value, metadata):
# The difference is the timestamp...
transaction1 = cashflow.Transaction(
value, datetime.datetime.now(), metadata
)
transaction2 = cashflow.Transaction(
value, datetime.datetime.now(), metadata
)
assert transaction1 == transaction1
assert transaction1 != transaction2
# Using date instead datetime to ensure different objects with the same
# data will be considered as equal.
transaction1 = cashflow.Transaction(
value, datetime.date.today(), metadata
)
transaction2 = cashflow.Transaction(
value, datetime.date.today(), metadata
)
assert transaction1 == transaction1
assert transaction1 == transaction2
@given(st.decimals(), datetimes(),
st.dictionaries(keys=st.text(), values=st.uuids()))
def test_compare_transaction_to_other_object(self, value, timestamp, metadata): # noqa
transaction = cashflow.Transaction(value, timestamp, metadata)
with pytest.raises(RuntimeError) as cm:
transaction == 'foo bar baz'
expected_message = '{!r} is not an instance of Transaction'.format(
'foo bar baz'
)
assert str(cm.value) == expected_message
class CashFlowTestCase(unittest.TestCase):
@given(st.tuples(TransactionStrategy))
def test_dunder_len(self, transactions):
cf = cashflow.CashFlow(transactions=transactions)
assert len(cf) == len(transactions)
@given(st.tuples(TransactionStrategy))
def test_dunder_iter(self, transactions):
cf = cashflow.CashFlow(transactions=transactions)
for transaction in cf:
assert transaction in transactions
@given(st.tuples(TransactionStrategy))
def test_dunder_contains(self, transactions):
cf = cashflow.CashFlow(transactions=transactions)
self.assertIn(transactions[0], cf)
@given(st.tuples(TransactionStrategy))
def test_dunder_getitem(self, transactions):
cf = cashflow.CashFlow()
with pytest.raises(IndexError):
cf[0]
assert len(cf[0:100]) == 0
for transaction in transactions:
cf.append(transaction)
assert tuple(cf[0].transactions)[0] == transactions[0]
assert tuple(cf[0:5].transactions) == transactions[0:5]
assert len(tuple(cf[0:5].transactions)) == len(transactions[0:5])
assert tuple(cf[::-1].transactions) == tuple(reversed(transactions))
assert len(tuple(cf[::-1].transactions)) == len(transactions[::-1])
@given(st.tuples(TransactionStrategy))
def test_net_value(self, transactions):
assume(all(not math.isnan(t.value) for t in transactions))
cf = cashflow.CashFlow(transactions=transactions)
self.assertAlmostEqual(
cf.net_value, sum(t.value for t in transactions)
)
@given(TransactionStrategy)
def test_append_duplicate_transaction(self, transaction):
cf = cashflow.CashFlow()
cf.append(transaction)
with pytest.raises(ValueError) as cm:
cf.append(transaction)
expected_message = (
'{} already added to the cash flow'.format(repr(transaction))
)
assert str(cm.value) == expected_message
def test_create_an_empty_cashflow(self):
cf = cashflow.CashFlow()
assert len(cf) == 0
assert list(cf.transactions) == []
assert cf.net_value == decimal.Decimal(0)
@given(TransactionStrategy)
def test_cash_flow_with_one_transaction(self, transaction):
assume(not math.isnan(transaction.value))
cf = cashflow.CashFlow()
cf.append(transaction)
assert len(cf) == 1
assert tuple(cf.transactions) == (transaction, )
self.assertAlmostEqual(
cf.net_value, sum(t.value for t in (transaction,))
)
@given(st.tuples(TransactionStrategy))
def test_cash_flow_filter_example_simple_predicate(self, transactions):
assume(all(not math.isnan(t.value) for t in transactions))
assume(all(transaction.value > 0 for transaction in transactions))
cf = cashflow.CashFlow(transactions=transactions)
assert len(cf.filter(lambda t: t.value > 0)) == len(transactions)
@given(st.tuples(TransactionStrategy))
def test_cash_flow_filter_composition(self, transactions):
assume(all(not math.isnan(t.value) for t in transactions))
assume(all(0 < t.value <= 100 for t in transactions))
cf = cashflow.CashFlow(transactions=transactions).filter(
lambda t: t.value > 0
)
assert len(cf) == len(transactions)
# Duplicate the filter to ensure the value keeps the same
cf = cf.filter(lambda t: t.value > 0).filter(lambda t: t.value > 0)
assert len(cf) == len(transactions)
cf = cf.filter(lambda t: t.value == 100)
transactions_values_greater_than_100 = tuple(
t for t in transactions if t.value == 100
)
assert tuple(cf.transactions) == transactions_values_greater_than_100
@given(st.tuples(TransactionStrategy))
def test_seriaize_cash_flow(self, transactions):
cf = cashflow.CashFlow(transactions=transactions)
expected_data = {'cashflow': [t.serialize() for t in transactions]}
self.assertDictEqual(cf.serialize(), expected_data)
self.assertEqual(
serialize_utils.json_dumps(cf.serialize()),
serialize_utils.json_dumps(expected_data)
)
| unlicense | 5,424,427,961,964,647,000 | 29.243243 | 91 | 0.64956 | false |
ella/mypage | tests/unit_project/helpers.py | 1 | 3140 | from django.contrib.sites.models import Site
from django.template.defaultfilters import slugify
from django.conf import settings
from mypage.pages.models import Page
from mypage.pages.layout import WidgetInLayout
from mypage.widgets.models import Widget
from unit_project.testwidgets.models import TestWidget
import anyjson as json
def create_default_page(case):
"""
Creates a default page and stick it to given test case
"""
p = Page(template='page.html', skin='', layout_json='{"static_containers": [], "containers": []}', id=settings.DEFAULT_PAGE_ID, layout_migrated=True)
p.save()
case.page = p
def create_filled_default_page(case):
"""
Creaates a default page filled with three test widgets and stick it to given test case
"""
create_default_page(case)
case.widget_a = w_a = TestWidget.objects.create(title='Widget A', slug='widget-a')
case.widget_b = w_b = TestWidget.objects.create(title='Widget B', slug='widget-b')
case.widget_c = w_c = TestWidget.objects.create(title='Widget C', slug='widget-c')
# widget_unassigned (to page or whatever)
case.widget_u = w_u = TestWidget.objects.create(title='Widget U', slug='widget-u')
stored_layout = {
'static_containers': [],
'containers': [[
{
'widget_ct_id': w_a.content_type_id,
'widget_id': w_a.pk,
'config': {},
'state': WidgetInLayout.STATE_NEW,
},
{
'widget_ct_id': w_b.content_type_id,
'widget_id': w_b.pk,
'config': {},
'state': WidgetInLayout.STATE_NEW,
},
],[
{
'widget_ct_id': w_c.content_type_id,
'widget_id': w_c.pk,
'config': {},
'state': WidgetInLayout.STATE_NEW,
},
]]
}
case.page.layout_json = json.serialize(stored_layout)
case.page.save()
def build_layout_containers(*widgets_in_containers):
containers = []
for container in widgets_in_containers:
containers.append([[unicode(widget.content_type_id), unicode(widget.pk)] for widget in container])
return containers
class SessionWrapper(dict):
pass
def build_request(get_query=None, post_query=None, session={}, cookies={}, ip_address=None):
"""
Returns request object with useful attributes
"""
from django.http import HttpRequest, QueryDict
from django.contrib.auth.middleware import LazyUser
request = HttpRequest()
# GET and POST
if get_query:
request.GET = QueryDict(get_query)
if post_query:
request.POST = QueryDict(post_query)
# Session and cookies
request.session = SessionWrapper(session)
request.session.session_key = 'XXX'
request.COOKIES = cookies
# User
request.__class__.user = LazyUser()
# Meta
request.META['REMOTE_ADDR'] = ip_address or '0.0.0.0'
return request
def get_widget(pk):
return Widget.objects.get(pk=pk).get_child()
| bsd-3-clause | -7,238,823,112,482,304,000 | 33.888889 | 153 | 0.603185 | false |
senior-zero/metanet | metanet/networks/groups/group.py | 1 | 3102 | # @file group.py
# Node implementation
## @package groups
# @author Evtushenko Georgy
# @date 05/03/2015 17:19:00
# @version 1.1
## @mainpage Metanet documentation
# @section intro_sec Introduction
# Short script to demonstrate the use of doxygen.
#
# @section license_sec License
#\verbatim This file is part of MetaNet.
#
# MetaNet is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MetaNet is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MetaNet. If not, see <http://www.gnu.org/licenses/>.
#
# (Этот файл — часть MetaNet.
#
# MetaNet - свободная программа: вы можете перераспространять ее и/или
# изменять ее на условиях Стандартной общественной лицензии GNU в том виде,
# в каком она была опубликована Фондом свободного программного обеспечения;
# либо версии 3 лицензии, либо (по вашему выбору) любой более поздней
# версии.
#
# MetaNet распространяется в надежде, что она будет полезной,
# но БЕЗО ВСЯКИХ ГАРАНТИЙ; даже без неявной гарантии ТОВАРНОГО ВИДА
# или ПРИГОДНОСТИ ДЛЯ ОПРЕДЕЛЕННЫХ ЦЕЛЕЙ. Подробнее см. в Стандартной
# общественной лицензии GNU.
#
# Вы должны были получить копию Стандартной общественной лицензии GNU
# вместе с этой программой. Если это не так, см.
# <http://www.gnu.org/licenses/>.)
#\endverbatim
__author__ = 'Evtushenko Georgy'
import numpy as np
## Group class.
#<p>Класс для облегчения добавления групп нейронов/сетей
# и операций над ними.</p>
class Group(object):
## Конструктор группы сетей/нейронов
# @param self Указатель на объект.
# @param size Количество генерируемых элементов
# @create_func Функция генератор узлов
def __init__(self, size: int, create_func):
self.node_list = np.array([create_func(i) for i in range(size)])
def get_node(self, i):
return self.node_list[i]
def get_nodes(self):
return self.node_list
def get_size(self):
return self.node_list.size
def __len__(self):
return self.node_list.size | gpl-3.0 | 1,175,809,931,672,178,700 | 31.824324 | 76 | 0.720758 | false |
gunesacar/tbb-fp-tests | selenium-tests/test_fp_screen_coords.py | 2 | 1032 | #!/usr/bin/python
import tbbtest
class Test(tbbtest.TBBTest):
def test_screen_coords(self):
# https://gitweb.torproject.org/torbrowser.git/blob/HEAD:/src/current-patches/firefox/0021-Do-not-expose-physical-screen-info.-via-window-and-w.patch
driver = self.driver
js = driver.execute_script
# check that screenX, screenY are 0
self.assertEqual(True, js("return screenX === 0"))
self.assertEqual(True, js("return screenY === 0"))
# check that mozInnerScreenX, mozInnerScreenY are 0
self.assertEqual(True, js("return mozInnerScreenX === 0"))
self.assertEqual(True, js("return mozInnerScreenY === 0"))
# check that screenLeft, screenTop are 0
self.assertEqual(True, js("return screen.left === 0"))
self.assertEqual(True, js("return screen.top === 0"))
# check that availLeft, availTop are 0
self.assertEqual(True, js("return screen.availLeft === 0"))
self.assertEqual(True, js("return screen.availTop === 0"))
| agpl-3.0 | 8,252,620,859,575,495,000 | 48.142857 | 157 | 0.655039 | false |
skob/alerta | alerta/app/utils.py | 1 | 9559 |
import datetime
import pytz
import re
try:
import simplejson as json
except ImportError:
import json
from functools import wraps
from flask import request, g, current_app
try:
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
from alerta.app import app, db
from alerta.app.exceptions import RejectException, RateLimit, BlackoutPeriod
from alerta.app.metrics import Counter, Timer
from alerta.plugins import Plugins
LOG = app.logger
plugins = Plugins()
reject_counter = Counter('alerts', 'rejected', 'Rejected alerts', 'Number of rejected alerts')
error_counter = Counter('alerts', 'errored', 'Errored alerts', 'Number of errored alerts')
duplicate_timer = Timer('alerts', 'duplicate', 'Duplicate alerts', 'Total time to process number of duplicate alerts')
correlate_timer = Timer('alerts', 'correlate', 'Correlated alerts', 'Total time to process number of correlated alerts')
create_timer = Timer('alerts', 'create', 'Newly created alerts', 'Total time to process number of new alerts')
pre_plugin_timer = Timer('plugins', 'prereceive', 'Pre-receive plugins', 'Total number of pre-receive plugins')
post_plugin_timer = Timer('plugins', 'postreceive', 'Post-receive plugins', 'Total number of post-receive plugins')
def jsonp(func):
"""Wraps JSONified output for JSONP requests."""
@wraps(func)
def decorated(*args, **kwargs):
callback = request.args.get('callback', False)
if callback:
data = str(func(*args, **kwargs).data)
content = str(callback) + '(' + data + ')'
mimetype = 'application/javascript'
return current_app.response_class(content, mimetype=mimetype)
else:
return func(*args, **kwargs)
return decorated
def absolute_url(path=''):
return urljoin(request.base_url.rstrip('/'), app.config.get('BASE_URL', '') + path)
def add_remote_ip(request, alert):
if request.headers.getlist("X-Forwarded-For"):
alert.attributes.update(ip=request.headers.getlist("X-Forwarded-For")[0])
else:
alert.attributes.update(ip=request.remote_addr)
PARAMS_EXCLUDE = [
'_',
'callback',
'token',
'api-key'
]
def parse_fields(p):
params = p.copy()
query_time = datetime.datetime.utcnow()
for s in PARAMS_EXCLUDE:
if s in params:
del params[s]
if params.get('q', None):
query = json.loads(params['q'])
del params['q']
else:
query = dict()
if g.get('customer', None):
query['customer'] = g.get('customer')
page = params.get('page', 1)
if 'page' in params:
del params['page']
page = int(page)
if params.get('from-date', None):
try:
from_date = datetime.datetime.strptime(params['from-date'], '%Y-%m-%dT%H:%M:%S.%fZ')
except ValueError as e:
LOG.warning('Could not parse from-date query parameter: %s', e)
raise
from_date = from_date.replace(tzinfo=pytz.utc)
del params['from-date']
else:
from_date = None
if params.get('to-date', None):
try:
to_date = datetime.datetime.strptime(params['to-date'], '%Y-%m-%dT%H:%M:%S.%fZ')
except ValueError as e:
LOG.warning('Could not parse to-date query parameter: %s', e)
raise
to_date = to_date.replace(tzinfo=pytz.utc)
del params['to-date']
else:
to_date = query_time
to_date = to_date.replace(tzinfo=pytz.utc)
if from_date and to_date:
query['lastReceiveTime'] = {'$gt': from_date, '$lte': to_date}
elif to_date:
query['lastReceiveTime'] = {'$lte': to_date}
if params.get('duplicateCount', None):
query['duplicateCount'] = int(params.get('duplicateCount'))
del params['duplicateCount']
if params.get('repeat', None):
query['repeat'] = True if params.get('repeat', 'true') == 'true' else False
del params['repeat']
sort = list()
direction = 1
if params.get('reverse', None):
direction = -1
del params['reverse']
if params.get('sort-by', None):
for sort_by in params.getlist('sort-by'):
if sort_by in ['createTime', 'receiveTime', 'lastReceiveTime']:
sort.append((sort_by, -direction)) # reverse chronological
else:
sort.append((sort_by, direction))
del params['sort-by']
else:
sort.append(('lastReceiveTime', -direction))
group = list()
if 'group-by' in params:
group = params.get('group-by')
del params['group-by']
if 'limit' in params:
limit = params.get('limit')
del params['limit']
else:
limit = app.config['QUERY_LIMIT']
limit = int(limit)
ids = params.getlist('id')
if len(ids) == 1:
query['$or'] = [{'_id': {'$regex': '^' + ids[0]}}, {'lastReceiveId': {'$regex': '^' + ids[0]}}]
del params['id']
elif ids:
query['$or'] = [{'_id': {'$regex': re.compile('|'.join(['^' + i for i in ids]))}}, {'lastReceiveId': {'$regex': re.compile('|'.join(['^' + i for i in ids]))}}]
del params['id']
if 'fields' in params:
fields = dict([(field, True) for field in params.get('fields').split(',')])
fields.update({'resource': True, 'event': True, 'environment': True, 'createTime': True, 'receiveTime': True, 'lastReceiveTime': True})
del params['fields']
elif 'fields!' in params:
fields = dict([(field, False) for field in params.get('fields!').split(',')])
del params['fields!']
else:
fields = dict()
for field in params:
value = params.getlist(field)
if len(value) == 1:
value = value[0]
if field.endswith('!'):
if value.startswith('~'):
query[field[:-1]] = dict()
query[field[:-1]]['$not'] = re.compile(value[1:], re.IGNORECASE)
else:
query[field[:-1]] = dict()
query[field[:-1]]['$ne'] = value
else:
if value.startswith('~'):
query[field] = dict()
query[field]['$regex'] = re.compile(value[1:], re.IGNORECASE)
else:
query[field] = value
else:
if field.endswith('!'):
if '~' in [v[0] for v in value]:
value = '|'.join([v.lstrip('~') for v in value])
query[field[:-1]] = dict()
query[field[:-1]]['$not'] = re.compile(value, re.IGNORECASE)
else:
query[field[:-1]] = dict()
query[field[:-1]]['$nin'] = value
else:
if '~' in [v[0] for v in value]:
value = '|'.join([v.lstrip('~') for v in value])
query[field] = dict()
query[field]['$regex'] = re.compile(value, re.IGNORECASE)
else:
query[field] = dict()
query[field]['$in'] = value
return query, fields, sort, group, page, limit, query_time
def process_alert(alert):
for plugin in plugins.routing(alert):
started = pre_plugin_timer.start_timer()
try:
alert = plugin.pre_receive(alert)
except (RejectException, RateLimit):
reject_counter.inc()
pre_plugin_timer.stop_timer(started)
raise
except Exception as e:
error_counter.inc()
pre_plugin_timer.stop_timer(started)
raise RuntimeError("Error while running pre-receive plug-in '%s': %s" % (plugin.name, str(e)))
if not alert:
error_counter.inc()
pre_plugin_timer.stop_timer(started)
raise SyntaxError("Plug-in '%s' pre-receive hook did not return modified alert" % plugin.name)
pre_plugin_timer.stop_timer(started)
if db.is_blackout_period(alert):
raise BlackoutPeriod("Suppressed alert during blackout period")
try:
if db.is_duplicate(alert):
started = duplicate_timer.start_timer()
alert = db.save_duplicate(alert)
duplicate_timer.stop_timer(started)
elif db.is_correlated(alert):
started = correlate_timer.start_timer()
alert = db.save_correlated(alert)
correlate_timer.stop_timer(started)
else:
started = create_timer.start_timer()
alert = db.create_alert(alert)
create_timer.stop_timer(started)
except Exception as e:
error_counter.inc()
raise RuntimeError(e)
for plugin in plugins.routing(alert):
started = post_plugin_timer.start_timer()
try:
plugin.post_receive(alert)
except Exception as e:
error_counter.inc()
post_plugin_timer.stop_timer(started)
raise RuntimeError("Error while running post-receive plug-in '%s': %s" % (plugin.name, str(e)))
post_plugin_timer.stop_timer(started)
return alert
def process_status(alert, status, text):
for plugin in plugins.routing(alert):
try:
plugin.status_change(alert, status, text)
except RejectException:
reject_counter.inc()
raise
except Exception as e:
error_counter.inc()
raise RuntimeError("Error while running status plug-in '%s': %s" % (plugin.name, str(e)))
| apache-2.0 | -4,402,401,651,658,987,000 | 33.634058 | 167 | 0.568888 | false |
RulersOfAsgard/ALAMO-worker | alamo_worker/alerter/tests/test_preprocessor.py | 1 | 1518 | import mock
from unittest import TestCase
from alamo_worker.alerter.plugins.image_renderer import ImageRendererPlugin
from alamo_worker.alerter.preprocessor import AlertPreprocessor
from alamo_common.test.utils import override_settings
from tests.base import run_async, AsyncMock
class AlertPreprocessorTestcase(TestCase):
@override_settings(ALERTER_PLUGINS=['image_renderer'])
@mock.patch.object(ImageRendererPlugin, '__call__', new_callable=AsyncMock)
def test_preprocess_calls_image_rendering_plugin(self, plugin):
pre = AlertPreprocessor()
pre.load()
check, trigger, event_type = mock.Mock(), mock.Mock(), mock.Mock()
run_async(pre.preprocess(check, trigger, event_type))
plugin.assert_called_once_with(check, trigger, event_type)
@override_settings(ALERTER_PLUGINS=['image_renderer'])
@mock.patch.object(ImageRendererPlugin, '__call__', side_effect=RuntimeError) # noqa
def test_doesnt_fail_hard_if_plugin_raises_exception(self, mock_plugin):
pre = AlertPreprocessor()
pre.load()
run_async(pre.preprocess(mock.Mock(), mock.Mock(), mock.Mock()))
self.assertTrue(mock_plugin.called)
@override_settings(ALERTER_PLUGINS=[])
@mock.patch.object(ImageRendererPlugin, '__call__', new_callable=AsyncMock)
def test_no_plugins(self, plugin):
pre = AlertPreprocessor()
pre.load()
run_async(pre.preprocess(mock.Mock(), mock.Mock(), mock.Mock()))
self.assertFalse(plugin.called)
| apache-2.0 | 5,802,002,914,782,058,000 | 42.371429 | 90 | 0.70751 | false |
jschrewe/django-mongodbforms | mongodbforms/documents.py | 1 | 39225 | import os
import itertools
from collections import Callable, OrderedDict
from functools import reduce
from django.forms.forms import (BaseForm, DeclarativeFieldsMetaclass,
NON_FIELD_ERRORS, pretty_name)
from django.forms.widgets import media_property
from django.core.exceptions import FieldError
from django.core.validators import EMPTY_VALUES
from django.forms.util import ErrorList
from django.forms.formsets import BaseFormSet, formset_factory
from django.utils.translation import ugettext_lazy as _, ugettext
from django.utils.text import capfirst, get_valid_filename
from mongoengine.fields import (ObjectIdField, ListField, ReferenceField,
FileField, MapField, EmbeddedDocumentField)
try:
from mongoengine.base import ValidationError
except ImportError:
from mongoengine.errors import ValidationError
from mongoengine.queryset import OperationError, Q
from mongoengine.queryset.base import BaseQuerySet
from mongoengine.connection import get_db, DEFAULT_CONNECTION_NAME
from mongoengine.base import NON_FIELD_ERRORS as MONGO_NON_FIELD_ERRORS
from gridfs import GridFS
from mongodbforms.documentoptions import DocumentMetaWrapper
from mongodbforms.util import with_metaclass, load_field_generator
_fieldgenerator = load_field_generator()
def _get_unique_filename(name, db_alias=DEFAULT_CONNECTION_NAME,
collection_name='fs'):
fs = GridFS(get_db(db_alias), collection_name)
file_root, file_ext = os.path.splitext(get_valid_filename(name))
count = itertools.count(1)
while fs.exists(filename=name):
# file_ext includes the dot.
name = os.path.join("%s_%s%s" % (file_root, next(count), file_ext))
return name
def _save_iterator_file(field, instance, uploaded_file, file_data=None):
"""
Takes care of saving a file for a list field. Returns a Mongoengine
fileproxy object or the file field.
"""
# for a new file we need a new proxy object
if file_data is None:
file_data = field.field.get_proxy_obj(key=field.name,
instance=instance)
if file_data.instance is None:
file_data.instance = instance
if file_data.key is None:
file_data.key = field.name
if file_data.grid_id:
file_data.delete()
uploaded_file.seek(0)
filename = _get_unique_filename(uploaded_file.name, field.field.db_alias,
field.field.collection_name)
file_data.put(uploaded_file, content_type=uploaded_file.content_type,
filename=filename)
file_data.close()
return file_data
def construct_instance(form, instance, fields=None, exclude=None):
"""
Constructs and returns a document instance from the bound ``form``'s
``cleaned_data``, but does not save the returned instance to the
database.
"""
cleaned_data = form.cleaned_data
file_field_list = []
# check wether object is instantiated
if isinstance(instance, type):
instance = instance()
for f in instance._fields.values():
if isinstance(f, ObjectIdField):
continue
if f.name not in cleaned_data:
continue
if fields is not None and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
# Defer saving file-type fields until after the other fields, so a
# callable upload_to can use the values from other fields.
if isinstance(f, FileField) or \
(isinstance(f, (MapField, ListField)) and
isinstance(f.field, FileField)):
file_field_list.append(f)
else:
setattr(instance, f.name, cleaned_data.get(f.name))
for f in file_field_list:
if isinstance(f, MapField):
map_field = getattr(instance, f.name)
uploads = cleaned_data[f.name]
for key, uploaded_file in uploads.items():
if uploaded_file is None:
continue
file_data = map_field.get(key, None)
map_field[key] = _save_iterator_file(f, instance,
uploaded_file, file_data)
setattr(instance, f.name, map_field)
elif isinstance(f, ListField):
list_field = getattr(instance, f.name)
uploads = cleaned_data[f.name]
for i, uploaded_file in enumerate(uploads):
if uploaded_file is None:
continue
try:
file_data = list_field[i]
except IndexError:
file_data = None
file_obj = _save_iterator_file(f, instance,
uploaded_file, file_data)
try:
list_field[i] = file_obj
except IndexError:
list_field.append(file_obj)
setattr(instance, f.name, list_field)
else:
field = getattr(instance, f.name)
upload = cleaned_data[f.name]
if upload is None:
continue
try:
upload.file.seek(0)
# delete first to get the names right
if field.grid_id:
field.delete()
filename = _get_unique_filename(upload.name, f.db_alias,
f.collection_name)
field.put(upload, content_type=upload.content_type,
filename=filename)
setattr(instance, f.name, field)
except AttributeError:
# file was already uploaded and not changed during edit.
# upload is already the gridfsproxy object we need.
upload.get()
setattr(instance, f.name, upload)
return instance
def save_instance(form, instance, fields=None, fail_message='saved',
commit=True, exclude=None, construct=True):
"""
Saves bound Form ``form``'s cleaned_data into document ``instance``.
If commit=True, then the changes to ``instance`` will be saved to the
database. Returns ``instance``.
If construct=False, assume ``instance`` has already been constructed and
just needs to be saved.
"""
if construct:
instance = construct_instance(form, instance, fields, exclude)
if form.errors:
raise ValueError("The %s could not be %s because the data didn't"
" validate." % (instance.__class__.__name__,
fail_message))
if commit and hasattr(instance, 'save'):
# see BaseDocumentForm._post_clean for an explanation
# if len(form._meta._dont_save) > 0:
# data = instance._data
# new_data = dict([(n, f) for n, f in data.items() if not n \
# in form._meta._dont_save])
# instance._data = new_data
# instance.save()
# instance._data = data
# else:
instance.save()
return instance
def document_to_dict(instance, fields=None, exclude=None):
"""
Returns a dict containing the data in ``instance`` suitable for passing as
a Form's ``initial`` keyword argument.
``fields`` is an optional list of field names. If provided, only the named
fields will be included in the returned dict.
``exclude`` is an optional list of field names. If provided, the named
fields will be excluded from the returned dict, even if they are listed in
the ``fields`` argument.
"""
data = {}
for f in instance._fields.values():
if fields and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
data[f.name] = getattr(instance, f.name, '')
return data
def fields_for_document(document, fields=None, exclude=None, widgets=None,
formfield_callback=None,
field_generator=_fieldgenerator):
"""
Returns a ``SortedDict`` containing form fields for the given model.
``fields`` is an optional list of field names. If provided, only the named
fields will be included in the returned fields.
``exclude`` is an optional list of field names. If provided, the named
fields will be excluded from the returned fields, even if they are listed
in the ``fields`` argument.
"""
field_list = []
if isinstance(field_generator, type):
field_generator = field_generator()
if formfield_callback and not isinstance(formfield_callback, Callable):
raise TypeError('formfield_callback must be a function or callable')
for name in document._fields_ordered:
f = document._fields.get(name)
if isinstance(f, ObjectIdField):
continue
if fields and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
if widgets and f.name in widgets:
kwargs = {'widget': widgets[f.name]}
else:
kwargs = {}
if formfield_callback:
formfield = formfield_callback(f, **kwargs)
else:
formfield = field_generator.generate(f, **kwargs)
if formfield:
field_list.append((f.name, formfield))
field_dict = OrderedDict(field_list)
if fields:
field_dict = OrderedDict(
[(f, field_dict.get(f)) for f in fields
if ((not exclude) or (exclude and f not in exclude))]
)
return field_dict
class ModelFormOptions(object):
def __init__(self, options=None):
# document class can be declared with 'document =' or 'model ='
self.document = getattr(options, 'document', None)
if self.document is None:
self.document = getattr(options, 'model', None)
self.model = self.document
meta = getattr(self.document, '_meta', {})
# set up the document meta wrapper if document meta is a dict
if self.document is not None and \
not isinstance(meta, DocumentMetaWrapper):
self.document._meta = DocumentMetaWrapper(self.document)
self.fields = getattr(options, 'fields', None)
self.exclude = getattr(options, 'exclude', None)
self.widgets = getattr(options, 'widgets', None)
self.embedded_field = getattr(options, 'embedded_field_name', None)
self.formfield_generator = getattr(options, 'formfield_generator',
_fieldgenerator)
self._dont_save = []
self.labels = getattr(options, 'labels', None)
self.help_texts = getattr(options, 'help_texts', None)
class DocumentFormMetaclass(DeclarativeFieldsMetaclass):
def __new__(cls, name, bases, attrs):
formfield_callback = attrs.pop('formfield_callback', None)
try:
parents = [
b for b in bases
if issubclass(b, DocumentForm) or
issubclass(b, EmbeddedDocumentForm)
]
except NameError:
# We are defining DocumentForm itself.
parents = None
new_class = super(DocumentFormMetaclass, cls).__new__(cls, name,
bases, attrs)
if not parents:
return new_class
if 'media' not in attrs:
new_class.media = media_property(new_class)
opts = new_class._meta = ModelFormOptions(
getattr(new_class, 'Meta', None)
)
if opts.document:
formfield_generator = getattr(opts,
'formfield_generator',
_fieldgenerator)
# If a model is defined, extract form fields from it.
fields = fields_for_document(opts.document, opts.fields,
opts.exclude, opts.widgets,
formfield_callback,
formfield_generator)
# make sure opts.fields doesn't specify an invalid field
none_document_fields = [k for k, v in fields.items() if not v]
missing_fields = (set(none_document_fields) -
set(new_class.declared_fields.keys()))
if missing_fields:
message = 'Unknown field(s) (%s) specified for %s'
message = message % (', '.join(missing_fields),
opts.model.__name__)
raise FieldError(message)
# Override default model fields with any custom declared ones
# (plus, include all the other declared fields).
fields.update(new_class.declared_fields)
else:
fields = new_class.declared_fields
new_class.base_fields = fields
return new_class
class BaseDocumentForm(BaseForm):
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=ErrorList, label_suffix=':',
empty_permitted=False, instance=None):
opts = self._meta
if instance is None:
if opts.document is None:
raise ValueError('A document class must be provided.')
# if we didn't get an instance, instantiate a new one
self.instance = opts.document
object_data = {}
else:
self.instance = instance
object_data = document_to_dict(instance, opts.fields, opts.exclude)
# if initial was provided, it should override the values from instance
if initial is not None:
object_data.update(initial)
# self._validate_unique will be set to True by BaseModelForm.clean().
# It is False by default so overriding self.clean() and failing to call
# super will stop validate_unique from being called.
self._validate_unique = False
super(BaseDocumentForm, self).__init__(data, files, auto_id, prefix,
object_data, error_class,
label_suffix, empty_permitted)
def _update_errors(self, message_dict):
for k, v in list(message_dict.items()):
if k != NON_FIELD_ERRORS:
self._errors.setdefault(k, self.error_class()).extend(v)
# Remove the invalid data from the cleaned_data dict
if k in self.cleaned_data:
del self.cleaned_data[k]
if NON_FIELD_ERRORS in message_dict:
messages = message_dict[NON_FIELD_ERRORS]
self._errors.setdefault(NON_FIELD_ERRORS,
self.error_class()).extend(messages)
def _get_validation_exclusions(self):
"""
For backwards-compatibility, several types of fields need to be
excluded from model validation. See the following tickets for
details: #12507, #12521, #12553
"""
exclude = []
# Build up a list of fields that should be excluded from model field
# validation and unique checks.
for f in self.instance._fields.values():
# Exclude fields that aren't on the form. The developer may be
# adding these values to the model after form validation.
if f.name not in self.fields:
exclude.append(f.name)
# Don't perform model validation on fields that were defined
# manually on the form and excluded via the ModelForm's Meta
# class. See #12901.
elif self._meta.fields and f.name not in self._meta.fields:
exclude.append(f.name)
elif self._meta.exclude and f.name in self._meta.exclude:
exclude.append(f.name)
# Exclude fields that failed form validation. There's no need for
# the model fields to validate them as well.
elif f.name in list(self._errors.keys()):
exclude.append(f.name)
# Exclude empty fields that are not required by the form, if the
# underlying model field is required. This keeps the model field
# from raising a required error. Note: don't exclude the field from
# validaton if the model field allows blanks. If it does, the blank
# value may be included in a unique check, so cannot be excluded
# from validation.
else:
field_value = self.cleaned_data.get(f.name, None)
if not f.required and field_value in EMPTY_VALUES:
exclude.append(f.name)
return exclude
def clean(self):
self._validate_unique = True
return self.cleaned_data
def _post_clean(self):
opts = self._meta
# Update the model instance with self.cleaned_data.
self.instance = construct_instance(self, self.instance, opts.fields,
opts.exclude)
changed_fields = getattr(self.instance, '_changed_fields', [])
exclude = self._get_validation_exclusions()
try:
for f in self.instance._fields.values():
value = getattr(self.instance, f.name)
if f.name not in exclude:
f.validate(value)
elif value in EMPTY_VALUES and f.name not in changed_fields:
# mongoengine chokes on empty strings for fields
# that are not required. Clean them up here, though
# this is maybe not the right place :-)
setattr(self.instance, f.name, None)
# opts._dont_save.append(f.name)
except ValidationError as e:
err = {f.name: [e.message]}
self._update_errors(err)
# Call validate() on the document. Since mongoengine
# does not provide an argument to specify which fields
# should be excluded during validation, we replace
# instance._fields_ordered with a version that does
# not include excluded fields. The attribute gets
# restored after validation.
original_fields = self.instance._fields_ordered
self.instance._fields_ordered = tuple(
[f for f in original_fields if f not in exclude]
)
try:
self.instance.validate()
except ValidationError as e:
if MONGO_NON_FIELD_ERRORS in e.errors:
error = e.errors.get(MONGO_NON_FIELD_ERRORS)
else:
error = e.message
self._update_errors({NON_FIELD_ERRORS: [error, ]})
finally:
self.instance._fields_ordered = original_fields
# Validate uniqueness if needed.
if self._validate_unique:
self.validate_unique()
def validate_unique(self):
"""
Validates unique constrains on the document.
unique_with is supported now.
"""
errors = []
exclude = self._get_validation_exclusions()
for f in self.instance._fields.values():
if f.unique and f.name not in exclude:
filter_kwargs = {
f.name: getattr(self.instance, f.name),
'q_obj': None,
}
if f.unique_with:
for u_with in f.unique_with:
u_with_field = self.instance._fields[u_with]
u_with_attr = getattr(self.instance, u_with)
# handling ListField(ReferenceField()) sucks big time
# What we need to do is construct a Q object that
# queries for the pk of every list entry and only
# accepts lists with the same length as our list
if isinstance(u_with_field, ListField) and \
isinstance(u_with_field.field, ReferenceField):
q_list = [Q(**{u_with: k.pk}) for k in u_with_attr]
q = reduce(lambda x, y: x & y, q_list)
size_key = '%s__size' % u_with
q = q & Q(**{size_key: len(u_with_attr)})
filter_kwargs['q_obj'] = q & filter_kwargs['q_obj']
else:
filter_kwargs[u_with] = u_with_attr
qs = self.instance.__class__.objects.clone()
qs = qs.no_dereference().filter(**filter_kwargs)
# Exclude the current object from the query if we are editing
# an instance (as opposed to creating a new one)
if self.instance.pk is not None:
qs = qs.filter(pk__ne=self.instance.pk)
if qs.count() > 0:
message = _("%s with this %s already exists.") % (
str(capfirst(self.instance._meta.verbose_name)),
str(pretty_name(f.name))
)
err_dict = {f.name: [message]}
self._update_errors(err_dict)
errors.append(err_dict)
return errors
def save(self, commit=True):
"""
Saves this ``form``'s cleaned_data into model instance
``self.instance``.
If commit=True, then the changes to ``instance`` will be saved to the
database. Returns ``instance``.
"""
try:
if self.instance.pk is None:
fail_message = 'created'
else:
fail_message = 'changed'
except (KeyError, AttributeError):
fail_message = 'embedded document saved'
obj = save_instance(self, self.instance, self._meta.fields,
fail_message, commit, construct=False)
return obj
save.alters_data = True
class DocumentForm(with_metaclass(DocumentFormMetaclass, BaseDocumentForm)):
pass
def documentform_factory(document, form=DocumentForm, fields=None,
exclude=None, formfield_callback=None):
# Build up a list of attributes that the Meta object will have.
attrs = {'document': document, 'model': document}
if fields is not None:
attrs['fields'] = fields
if exclude is not None:
attrs['exclude'] = exclude
# If parent form class already has an inner Meta, the Meta we're
# creating needs to inherit from the parent's inner meta.
parent = (object,)
if hasattr(form, 'Meta'):
parent = (form.Meta, object)
Meta = type('Meta', parent, attrs)
# Give this new form class a reasonable name.
if isinstance(document, type):
doc_inst = document()
else:
doc_inst = document
class_name = doc_inst.__class__.__name__ + 'Form'
# Class attributes for the new form class.
form_class_attrs = {
'Meta': Meta,
'formfield_callback': formfield_callback
}
return DocumentFormMetaclass(class_name, (form,), form_class_attrs)
class EmbeddedDocumentForm(with_metaclass(DocumentFormMetaclass,
BaseDocumentForm)):
def __init__(self, parent_document, data=None, files=None, position=None,
*args, **kwargs):
if self._meta.embedded_field is not None and \
self._meta.embedded_field not in parent_document._fields:
raise FieldError("Parent document must have field %s" %
self._meta.embedded_field)
instance = kwargs.pop('instance', None)
if isinstance(parent_document._fields.get(self._meta.embedded_field),
ListField):
# if we received a list position of the instance and no instance
# load the instance from the parent document and proceed as normal
if instance is None and position is not None:
instance = getattr(parent_document,
self._meta.embedded_field)[position]
# same as above only the other way around. Note: Mongoengine
# defines equality as having the same data, so if you have 2
# objects with the same data the first one will be edited. That
# may or may not be the right one.
if instance is not None and position is None:
emb_list = getattr(parent_document, self._meta.embedded_field)
position = next(
(i for i, obj in enumerate(emb_list) if obj == instance),
None
)
super(EmbeddedDocumentForm, self).__init__(data=data, files=files,
instance=instance, *args,
**kwargs)
self.parent_document = parent_document
self.position = position
def save(self, commit=True):
"""If commit is True the embedded document is added to the parent
document. Otherwise the parent_document is left untouched and the
embedded is returned as usual.
"""
if self.errors:
raise ValueError("The %s could not be saved because the data"
"didn't validate." %
self.instance.__class__.__name__)
if commit:
field = self.parent_document._fields.get(self._meta.embedded_field)
if isinstance(field, ListField) and self.position is None:
# no position given, simply appending to ListField
try:
self.parent_document.update(**{
"push__" + self._meta.embedded_field: self.instance
})
except:
raise OperationError("The %s could not be appended." %
self.instance.__class__.__name__)
elif isinstance(field, ListField) and self.position is not None:
# updating ListField at given position
try:
self.parent_document.update(**{
"__".join(("set", self._meta.embedded_field,
str(self.position))): self.instance
})
except:
raise OperationError("The %s could not be updated at "
"position %d." %
(self.instance.__class__.__name__,
self.position))
else:
# not a listfield on parent, treat as an embedded field
setattr(self.parent_document, self._meta.embedded_field,
self.instance)
self.parent_document.save()
return self.instance
class BaseDocumentFormSet(BaseFormSet):
"""
A ``FormSet`` for editing a queryset and/or adding new objects to it.
"""
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
queryset=[], **kwargs):
if not isinstance(queryset, (list, BaseQuerySet)):
queryset = [queryset]
self.queryset = queryset
self.initial = self.construct_initial()
defaults = {'data': data, 'files': files, 'auto_id': auto_id,
'prefix': prefix, 'initial': self.initial}
defaults.update(kwargs)
super(BaseDocumentFormSet, self).__init__(**defaults)
def construct_initial(self):
initial = []
try:
for d in self.get_queryset():
initial.append(document_to_dict(d))
except TypeError:
pass
return initial
def initial_form_count(self):
"""Returns the number of forms that are required in this FormSet."""
if not (self.data or self.files):
return len(self.get_queryset())
return super(BaseDocumentFormSet, self).initial_form_count()
def get_queryset(self):
qs = self.queryset or []
return qs
def save_object(self, form):
obj = form.save(commit=False)
return obj
def save(self, commit=True):
"""
Saves model instances for every form, adding and changing instances
as necessary, and returns the list of instances.
"""
saved = []
for form in self.forms:
if not form.has_changed() and form not in self.initial_forms:
continue
obj = self.save_object(form)
if form.cleaned_data.get("DELETE", False):
try:
obj.delete()
except AttributeError:
# if it has no delete method it is an embedded object. We
# just don't add to the list and it's gone. Cool huh?
continue
if commit:
obj.save()
saved.append(obj)
return saved
def clean(self):
self.validate_unique()
def validate_unique(self):
errors = []
for form in self.forms:
if not hasattr(form, 'cleaned_data'):
continue
errors += form.validate_unique()
if errors:
raise ValidationError(errors)
def get_date_error_message(self, date_check):
return ugettext("Please correct the duplicate data for %(field_name)s "
"which must be unique for the %(lookup)s "
"in %(date_field)s.") % {
'field_name': date_check[2],
'date_field': date_check[3],
'lookup': str(date_check[1]),
}
def get_form_error(self):
return ugettext("Please correct the duplicate values below.")
def documentformset_factory(document, form=DocumentForm,
formfield_callback=None,
formset=BaseDocumentFormSet,
extra=1, can_delete=False, can_order=False,
max_num=None, fields=None, exclude=None):
"""
Returns a FormSet class for the given Django model class.
"""
form = documentform_factory(document, form=form, fields=fields,
exclude=exclude,
formfield_callback=formfield_callback)
FormSet = formset_factory(form, formset, extra=extra, max_num=max_num,
can_order=can_order, can_delete=can_delete)
FormSet.model = document
FormSet.document = document
return FormSet
class BaseInlineDocumentFormSet(BaseDocumentFormSet):
"""
A formset for child objects related to a parent.
self.instance -> the document containing the inline objects
"""
def __init__(self, data=None, files=None, instance=None,
save_as_new=False, prefix=None, queryset=[], **kwargs):
self.instance = instance
self.save_as_new = save_as_new
super(BaseInlineDocumentFormSet, self).__init__(data, files,
prefix=prefix,
queryset=queryset,
**kwargs)
def initial_form_count(self):
if self.save_as_new:
return 0
return super(BaseInlineDocumentFormSet, self).initial_form_count()
# @classmethod
def get_default_prefix(cls):
return cls.document.__name__.lower()
get_default_prefix = classmethod(get_default_prefix)
def add_fields(self, form, index):
super(BaseInlineDocumentFormSet, self).add_fields(form, index)
# Add the generated field to form._meta.fields if it's defined to make
# sure validation isn't skipped on that field.
if form._meta.fields:
if isinstance(form._meta.fields, tuple):
form._meta.fields = list(form._meta.fields)
# form._meta.fields.append(self.fk.name)
def get_unique_error_message(self, unique_check):
unique_check = [
field for field in unique_check if field != self.fk.name
]
return super(BaseInlineDocumentFormSet, self).get_unique_error_message(
unique_check
)
def inlineformset_factory(document, form=DocumentForm,
formset=BaseInlineDocumentFormSet,
fields=None, exclude=None,
extra=1, can_order=False, can_delete=True,
max_num=None, formfield_callback=None):
"""
Returns an ``InlineFormSet`` for the given kwargs.
You must provide ``fk_name`` if ``model`` has more than one ``ForeignKey``
to ``parent_model``.
"""
kwargs = {
'form': form,
'formfield_callback': formfield_callback,
'formset': formset,
'extra': extra,
'can_delete': can_delete,
'can_order': can_order,
'fields': fields,
'exclude': exclude,
'max_num': max_num,
}
FormSet = documentformset_factory(document, **kwargs)
return FormSet
class EmbeddedDocumentFormSet(BaseDocumentFormSet):
def __init__(self, data=None, files=None, save_as_new=False,
prefix=None, queryset=[], parent_document=None, **kwargs):
if parent_document is not None:
self.parent_document = parent_document
if 'instance' in kwargs:
instance = kwargs.pop('instance')
if parent_document is None:
self.parent_document = instance
queryset = getattr(self.parent_document, self.form._meta.embedded_field)
if not isinstance(queryset, list) and queryset is None:
queryset = []
elif not isinstance(queryset, list):
queryset = [queryset, ]
super(EmbeddedDocumentFormSet, self).__init__(data, files, save_as_new,
prefix, queryset,
**kwargs)
def _construct_form(self, i, **kwargs):
defaults = {'parent_document': self.parent_document}
# add position argument to the form. Otherwise we will spend
# a huge amount of time iterating over the list field on form __init__
emb_list = getattr(self.parent_document,
self.form._meta.embedded_field)
if emb_list is not None and len(emb_list) > i:
defaults['position'] = i
defaults.update(kwargs)
form = super(EmbeddedDocumentFormSet, self)._construct_form(
i, **defaults)
return form
@classmethod
def get_default_prefix(cls):
return cls.document.__name__.lower()
@property
def empty_form(self):
form = self.form(
self.parent_document,
auto_id=self.auto_id,
prefix=self.add_prefix('__prefix__'),
empty_permitted=True,
)
self.add_fields(form, None)
return form
def save(self, commit=True):
# Don't try to save the new documents. Embedded objects don't have
# a save method anyway.
objs = super(EmbeddedDocumentFormSet, self).save(commit=False)
objs = objs or []
if commit and self.parent_document is not None:
field = self.parent_document._fields.get(
self.form._meta.embedded_field, None)
if isinstance(field, EmbeddedDocumentField):
try:
obj = objs[0]
except IndexError:
obj = None
setattr(
self.parent_document, self.form._meta.embedded_field, obj)
else:
setattr(
self.parent_document, self.form._meta.embedded_field, objs)
self.parent_document.save()
return objs
def _get_embedded_field(parent_doc, document, emb_name=None, can_fail=False):
if emb_name:
emb_fields = [
f for f in parent_doc._fields.values() if f.name == emb_name]
if len(emb_fields) == 1:
field = emb_fields[0]
if not isinstance(field, (EmbeddedDocumentField, ListField)) or \
(isinstance(field, EmbeddedDocumentField) and
field.document_type != document) or \
(isinstance(field, ListField) and
isinstance(field.field, EmbeddedDocumentField) and
field.field.document_type != document):
raise Exception(
"emb_name '%s' is not a EmbeddedDocumentField or not a ListField to %s" % (
emb_name, document
)
)
elif len(emb_fields) == 0:
raise Exception("%s has no field named '%s'" %
(parent_doc, emb_name))
else:
emb_fields = [
f for f in parent_doc._fields.values()
if (isinstance(field, EmbeddedDocumentField) and
field.document_type == document) or
(isinstance(field, ListField) and
isinstance(field.field, EmbeddedDocumentField) and
field.field.document_type == document)
]
if len(emb_fields) == 1:
field = emb_fields[0]
elif len(emb_fields) == 0:
if can_fail:
return
raise Exception(
"%s has no EmbeddedDocumentField or ListField to %s" % (parent_doc, document))
else:
raise Exception(
"%s has more than 1 EmbeddedDocumentField to %s" % (parent_doc, document))
return field
def embeddedformset_factory(document, parent_document,
form=EmbeddedDocumentForm,
formset=EmbeddedDocumentFormSet,
embedded_name=None,
fields=None, exclude=None,
extra=3, can_order=False, can_delete=True,
max_num=None, formfield_callback=None):
"""
Returns an ``InlineFormSet`` for the given kwargs.
You must provide ``fk_name`` if ``model`` has more than one ``ForeignKey``
to ``parent_model``.
"""
emb_field = _get_embedded_field(parent_document, document, emb_name=embedded_name)
if isinstance(emb_field, EmbeddedDocumentField):
max_num = 1
kwargs = {
'form': form,
'formfield_callback': formfield_callback,
'formset': formset,
'extra': extra,
'can_delete': can_delete,
'can_order': can_order,
'fields': fields,
'exclude': exclude,
'max_num': max_num,
}
FormSet = documentformset_factory(document, **kwargs)
FormSet.form._meta.embedded_field = emb_field.name
return FormSet
| bsd-3-clause | 6,904,557,605,757,164,000 | 38.621212 | 95 | 0.558572 | false |
dmych/gn | db.py | 1 | 6976 | # This file is part of Geeky Notes
#
# Geeky Notes is a CLI Simplenote client
# <https://github.com/dmych/gn>
#
# Copyright (c) Dmitri Brechalov, 2010-2011
#
# Geeky Notes is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Geeky Notes is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Foobar. If not, see <http://www.gnu.org/licenses/>.
'''Geeky Notes - Simplenote CLI client
db.py: local database implementation
'''
import shelve
import os
import time
DATAFILE = os.path.expanduser('~/.gn.db')
## Note format according to the API:
## {
## key: (string, note identifier, created by server),
## deleted: (bool, whether or not note is in trash),
## modifydate: (last modified date, in seconds since epoch),
## createdate: (note created date, in seconds since epoch),
## syncnum: (integer, number set by server, track note changes),
## version: (integer, number set by server, track note content changes),
## minversion: (integer, number set by server, minimum version available for note),
## sharekey: (string, shared note identifier),
## publishkey: (string, published note identifier),
## systemtags: [ (Array of strings, some set by server) ],
## tags: [ (Array of strings) ],
## content: (string, data content)
## }
## Our own fields:
## CHANGED: (bool, did the note changed locally?)
KEY_PREFIX = 'GEEKYNOTES_'
class Database(object):
'''Local simplenote database based on shelve storage
New notes which are not synced have no key field.
Instead a temporary key is used to identify them in the index.
'''
def __init__(self, fname=None):
if fname is None:
fname = DATAFILE
self.db = shelve.open(fname)
def _getKeys(self, deleted=False):
if deleted:
return self.db.keys()
else:
return [k for k in self.db.keys() if self.db[k]['deleted'] == 0]
def values(self):
return self.db.values()
def index(self, sort=None, reverse=True, deleted=False):
'''Return full index
sort may be True (use default sorting by modification time), or
it may be arbitrary compare function `func(note1, note2)`
reverse - list notes in asc or desc order
deleted - show or hide deleted notes
'''
def srt(n1, n2):
n1m = float(n1['modifydate'])
n2m = float(n2['modifydate'])
if n1m < n2m: return -1
if n1m > n2m: return 1
return 0
result = list()
keys = self._getKeys(deleted)
if sort == True:
sort = srt
for k in keys:
rec = self.get(k)
del rec['content']
result.append(rec)
result.sort(cmp=sort, reverse=reverse)
return result
def tags(self, reverse=False):
'''Return ordered list of tags
'''
print 'TAGS'
result = list()
for rec in self.values():
if rec.has_key('tags'):
for tag in rec['tags']:
if not tag in result:
result.append(tag)
result.sort(reverse=reverse)
return result
def keys(self, sort=None, reverse=True, deleted=False):
result = [ item['key'] for item in self.index(sort, reverse, deleted) ]
return result
def get(self, key):
return self.db[key]
def update(self, data):
if not data.has_key('CHANGED'):
data['CHANGED'] = False
self.db[data['key']] = data
self.db.sync()
def replace(self, oldkey, data):
newkey = data['key']
self.db[newkey] = data
self.remove(oldkey)
def remove(self, key):
del self.db[key]
self.db.sync()
class Note(object):
'''Note implementetion
'''
def __init__(self, db, data=None):
self._db = db
if isinstance(data, dict):
self._data = data.copy()
self._upgradeNote()
elif data is not None: # assume data is a key
self.load(data)
else:
self._data = dict()
self._upgradeNote()
def _upgradeNote(self):
if not self._data.has_key('key'):
self._genKey()
if not self._data.has_key('deleted'):
self._data['deleted'] = 0
if not self._data.has_key('createdate'):
self._data['createdate'] = time.time()
if not self._data.has_key('modifydate'):
self._data['modifydate'] = time.time()
if not self._data.has_key('content'):
self._data['content'] = ''
if not self._data.has_key('CHANGED'):
self._data['CHANGED'] = True
def getContent(self):
return self._data['content'].decode('utf-8')
def getTitle(self, length=20):
content = self.getContent()
eoln = content.find('\n')
elipsis = ''
if eoln >= length:
elipsis = '...'
eoln = length -3
elif eoln < 0:
eoln = length
return content[:eoln].replace('\r', ' ').replace('\t', ' ') + elipsis
def setContent(self, text):
self._data['content'] = text.rstrip()
self._markModified()
def tagList(self):
return [ tag.encode('utf-8') for tag in self._data['tags']]
def getTags(self):
if self._data.has_key('tags'):
return ' '.join(self.tagList())
else:
return None
def setTags(self, tags):
'''tags should be list/tuple of strings, or space separated string
'''
from types import StringTypes
if type(tags) in StringTypes:
tags = [ item.strip().decode('utf-8') for item in tags.split(' ') ]
self._data['tags'] = list(tags)
self._markModified()
def _genKey(self):
self._data['key'] = KEY_PREFIX + str(time.time())
def _markModified(self):
self._data['modifydate'] = time.time()
self._data['CHANGED'] = True
def _isModified(self):
return self._data['CHANGED']
def load(self, key):
self._data = self._db.get(key)
def save(self):
self._db.update(self._data)
def deleted(self):
return self._data['deleted']
def markDeleted(self):
self._data['deleted'] = 1
self._markModified()
def getModifydate(self):
return float(self._data['modifydate'])
def getModifiedFormatted(self):
sec = self.getModifydate()
tsec = time.gmtime(sec)
tnow = time.gmtime(time.time())
if tsec[:3] == tnow[:3]:
# today - return time only
fmt = '%H:%M'
elif tsec[:2] == tnow[:2]:
# this month - return Month, Day
fmt = '%b %d'
else:
fmt = '%Y-%m-%d'
return time.strftime(fmt, time.localtime(sec))
def getKey(self):
return self._data['key']
if __name__ == '__main__':
db = Database()
for note in db.values():
print note
print '-' * 40
| gpl-3.0 | -4,818,709,503,768,851,000 | 27.707819 | 86 | 0.612099 | false |
aakashsinha19/Aspectus | Image Classification/models/differential_privacy/privacy_accountant/tf/accountant.py | 1 | 17530 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Defines Accountant class for keeping track of privacy spending.
A privacy accountant keeps track of privacy spendings. It has methods
accumulate_privacy_spending and get_privacy_spent. Here we only define
AmortizedAccountant which tracks the privacy spending in the amortized
way. It uses privacy amplication via sampling to compute the privacy
spending for each batch and strong composition (specialized for Gaussian
noise) for accumulate the privacy spending.
"""
from __future__ import division
import abc
import collections
import math
import sys
import numpy
import tensorflow as tf
from differential_privacy.dp_sgd.dp_optimizer import utils
EpsDelta = collections.namedtuple("EpsDelta", ["spent_eps", "spent_delta"])
# TODO(liqzhang) To ensure the same API for AmortizedAccountant and
# MomentsAccountant, we pass the union of arguments to both, so we
# have unused_sigma for AmortizedAccountant and unused_eps_delta for
# MomentsAccountant. Consider to revise the API to avoid the unused
# arguments. It would be good to use @abc.abstractmethod, etc, to
# define the common interface as a base class.
class AmortizedAccountant(object):
"""Keep track of privacy spending in an amortized way.
AmortizedAccountant accumulates the privacy spending by assuming
all the examples are processed uniformly at random so the spending is
amortized among all the examples. And we assume that we use Gaussian noise
so the accumulation is on eps^2 and delta, using advanced composition.
"""
def __init__(self, total_examples):
"""Initialization. Currently only support amortized tracking.
Args:
total_examples: total number of examples.
"""
assert total_examples > 0
self._total_examples = total_examples
self._eps_squared_sum = tf.Variable(tf.zeros([1]), trainable=False,
name="eps_squared_sum")
self._delta_sum = tf.Variable(tf.zeros([1]), trainable=False,
name="delta_sum")
def accumulate_privacy_spending(self, eps_delta, unused_sigma,
num_examples):
"""Accumulate the privacy spending.
Currently only support approximate privacy. Here we assume we use Gaussian
noise on randomly sampled batch so we get better composition: 1. the per
batch privacy is computed using privacy amplication via sampling bound;
2. the composition is done using the composition with Gaussian noise.
TODO(liqzhang) Add a link to a document that describes the bounds used.
Args:
eps_delta: EpsDelta pair which can be tensors.
unused_sigma: the noise sigma. Unused for this accountant.
num_examples: the number of examples involved.
Returns:
a TensorFlow operation for updating the privacy spending.
"""
eps, delta = eps_delta
with tf.control_dependencies(
[tf.Assert(tf.greater(delta, 0),
["delta needs to be greater than 0"])]):
amortize_ratio = (tf.cast(num_examples, tf.float32) * 1.0 /
self._total_examples)
# Use privacy amplification via sampling bound.
# See Lemma 2.2 in http://arxiv.org/pdf/1405.7085v2.pdf
# TODO(liqzhang) Add a link to a document with formal statement
# and proof.
amortize_eps = tf.reshape(tf.log(1.0 + amortize_ratio * (
tf.exp(eps) - 1.0)), [1])
amortize_delta = tf.reshape(amortize_ratio * delta, [1])
return tf.group(*[tf.assign_add(self._eps_squared_sum,
tf.square(amortize_eps)),
tf.assign_add(self._delta_sum, amortize_delta)])
def get_privacy_spent(self, sess, target_eps=None):
"""Report the spending so far.
Args:
sess: the session to run the tensor.
target_eps: the target epsilon. Unused.
Returns:
the list containing a single EpsDelta, with values as Python floats (as
opposed to numpy.float64). This is to be consistent with
MomentAccountant which can return a list of (eps, delta) pair.
"""
# pylint: disable=unused-argument
unused_target_eps = target_eps
eps_squared_sum, delta_sum = sess.run([self._eps_squared_sum,
self._delta_sum])
return [EpsDelta(math.sqrt(eps_squared_sum), float(delta_sum))]
class MomentsAccountant(object):
"""Privacy accountant which keeps track of moments of privacy loss.
Note: The constructor of this class creates tf.Variables that must
be initialized with tf.global_variables_initializer() or similar calls.
MomentsAccountant accumulates the high moments of the privacy loss. It
requires a method for computing differenital moments of the noise (See
below for the definition). So every specific accountant should subclass
this class by implementing _differential_moments method.
Denote by X_i the random variable of privacy loss at the i-th step.
Consider two databases D, D' which differ by one item. X_i takes value
log Pr[M(D')==x]/Pr[M(D)==x] with probability Pr[M(D)==x].
In MomentsAccountant, we keep track of y_i(L) = log E[exp(L X_i)] for some
large enough L. To compute the final privacy spending, we apply Chernoff
bound (assuming the random noise added at each step is independent) to
bound the total privacy loss Z = sum X_i as follows:
Pr[Z > e] = Pr[exp(L Z) > exp(L e)]
< E[exp(L Z)] / exp(L e)
= Prod_i E[exp(L X_i)] / exp(L e)
= exp(sum_i log E[exp(L X_i)]) / exp(L e)
= exp(sum_i y_i(L) - L e)
Hence the mechanism is (e, d)-differentially private for
d = exp(sum_i y_i(L) - L e).
We require d < 1, i.e. e > sum_i y_i(L) / L. We maintain y_i(L) for several
L to compute the best d for any give e (normally should be the lowest L
such that 2 * sum_i y_i(L) / L < e.
We further assume that at each step, the mechanism operates on a random
sample with sampling probability q = batch_size / total_examples. Then
E[exp(L X)] = E[(Pr[M(D)==x / Pr[M(D')==x])^L]
By distinguishign two cases of wether D < D' or D' < D, we have
that
E[exp(L X)] <= max (I1, I2)
where
I1 = (1-q) E ((1-q) + q P(X+1) / P(X))^L + q E ((1-q) + q P(X) / P(X-1))^L
I2 = E (P(X) / ((1-q) + q P(X+1)))^L
In order to compute I1 and I2, one can consider to
1. use an asymptotic bound, which recovers the advance composition theorem;
2. use the closed formula (like GaussianMomentsAccountant);
3. use numerical integration or random sample estimation.
Dependent on the distribution, we can often obtain a tigher estimation on
the moments and hence a more accurate estimation of the privacy loss than
obtained using generic composition theorems.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, total_examples, moment_orders=32):
"""Initialize a MomentsAccountant.
Args:
total_examples: total number of examples.
moment_orders: the order of moments to keep.
"""
assert total_examples > 0
self._total_examples = total_examples
self._moment_orders = (moment_orders
if isinstance(moment_orders, (list, tuple))
else range(1, moment_orders + 1))
self._max_moment_order = max(self._moment_orders)
assert self._max_moment_order < 100, "The moment order is too large."
self._log_moments = [tf.Variable(numpy.float64(0.0),
trainable=False,
name=("log_moments-%d" % moment_order))
for moment_order in self._moment_orders]
@abc.abstractmethod
def _compute_log_moment(self, sigma, q, moment_order):
"""Compute high moment of privacy loss.
Args:
sigma: the noise sigma, in the multiples of the sensitivity.
q: the sampling ratio.
moment_order: the order of moment.
Returns:
log E[exp(moment_order * X)]
"""
pass
def accumulate_privacy_spending(self, unused_eps_delta,
sigma, num_examples):
"""Accumulate privacy spending.
In particular, accounts for privacy spending when we assume there
are num_examples, and we are releasing the vector
(sum_{i=1}^{num_examples} x_i) + Normal(0, stddev=l2norm_bound*sigma)
where l2norm_bound is the maximum l2_norm of each example x_i, and
the num_examples have been randomly selected out of a pool of
self.total_examples.
Args:
unused_eps_delta: EpsDelta pair which can be tensors. Unused
in this accountant.
sigma: the noise sigma, in the multiples of the sensitivity (that is,
if the l2norm sensitivity is k, then the caller must have added
Gaussian noise with stddev=k*sigma to the result of the query).
num_examples: the number of examples involved.
Returns:
a TensorFlow operation for updating the privacy spending.
"""
q = tf.cast(num_examples, tf.float64) * 1.0 / self._total_examples
moments_accum_ops = []
for i in range(len(self._log_moments)):
moment = self._compute_log_moment(sigma, q, self._moment_orders[i])
moments_accum_ops.append(tf.assign_add(self._log_moments[i], moment))
return tf.group(*moments_accum_ops)
def _compute_delta(self, log_moments, eps):
"""Compute delta for given log_moments and eps.
Args:
log_moments: the log moments of privacy loss, in the form of pairs
of (moment_order, log_moment)
eps: the target epsilon.
Returns:
delta
"""
min_delta = 1.0
for moment_order, log_moment in log_moments:
if math.isinf(log_moment) or math.isnan(log_moment):
sys.stderr.write("The %d-th order is inf or Nan\n" % moment_order)
continue
if log_moment < moment_order * eps:
min_delta = min(min_delta,
math.exp(log_moment - moment_order * eps))
return min_delta
def _compute_eps(self, log_moments, delta):
min_eps = float("inf")
for moment_order, log_moment in log_moments:
if math.isinf(log_moment) or math.isnan(log_moment):
sys.stderr.write("The %d-th order is inf or Nan\n" % moment_order)
continue
min_eps = min(min_eps, (log_moment - math.log(delta)) / moment_order)
return min_eps
def get_privacy_spent(self, sess, target_eps=None, target_deltas=None):
"""Compute privacy spending in (e, d)-DP form for a single or list of eps.
Args:
sess: the session to run the tensor.
target_eps: a list of target epsilon's for which we would like to
compute corresponding delta value.
target_deltas: a list of target deltas for which we would like to
compute the corresponding eps value. Caller must specify
either target_eps or target_delta.
Returns:
A list of EpsDelta pairs.
"""
assert (target_eps is None) ^ (target_deltas is None)
eps_deltas = []
log_moments = sess.run(self._log_moments)
log_moments_with_order = zip(self._moment_orders, log_moments)
if target_eps is not None:
for eps in target_eps:
eps_deltas.append(
EpsDelta(eps, self._compute_delta(log_moments_with_order, eps)))
else:
assert target_deltas
for delta in target_deltas:
eps_deltas.append(
EpsDelta(self._compute_eps(log_moments_with_order, delta), delta))
return eps_deltas
class GaussianMomentsAccountant(MomentsAccountant):
"""MomentsAccountant which assumes Gaussian noise.
GaussianMomentsAccountant assumes the noise added is centered Gaussian
noise N(0, sigma^2 I). In this case, we can compute the differential moments
accurately using a formula.
For asymptotic bound, for Gaussian noise with variance sigma^2, we can show
for L < sigma^2, q L < sigma,
log E[exp(L X)] = O(q^2 L^2 / sigma^2).
Using this we derive that for training T epoches, with batch ratio q,
the Gaussian mechanism with variance sigma^2 (with q < 1/sigma) is (e, d)
private for d = exp(T/q q^2 L^2 / sigma^2 - L e). Setting L = sigma^2,
Tq = e/2, the mechanism is (e, exp(-e sigma^2/2))-DP. Equivalently, the
mechanism is (e, d)-DP if sigma = sqrt{2 log(1/d)}/e, q < 1/sigma,
and T < e/(2q). This bound is better than the bound obtained using general
composition theorems, by an Omega(sqrt{log k}) factor on epsilon, if we run
k steps. Since we use direct estimate, the obtained privacy bound has tight
constant.
For GaussianMomentAccountant, it suffices to compute I1, as I1 >= I2,
which reduce to computing E(P(x+s)/P(x+s-1) - 1)^i for s = 0 and 1. In the
companion gaussian_moments.py file, we supply procedure for computing both
I1 and I2 (the computation of I2 is through multi-precision integration
package). It can be verified that indeed I1 >= I2 for wide range of parameters
we have tried, though at the moment we are unable to prove this claim.
We recommend that when using this accountant, users independently verify
using gaussian_moments.py that for their parameters, I1 is indeed larger
than I2. This can be done by following the instructions in
gaussian_moments.py.
"""
def __init__(self, total_examples, moment_orders=32):
"""Initialization.
Args:
total_examples: total number of examples.
moment_orders: the order of moments to keep.
"""
super(self.__class__, self).__init__(total_examples, moment_orders)
self._binomial_table = utils.GenerateBinomialTable(self._max_moment_order)
def _differential_moments(self, sigma, s, t):
"""Compute 0 to t-th differential moments for Gaussian variable.
E[(P(x+s)/P(x+s-1)-1)^t]
= sum_{i=0}^t (t choose i) (-1)^{t-i} E[(P(x+s)/P(x+s-1))^i]
= sum_{i=0}^t (t choose i) (-1)^{t-i} E[exp(-i*(2*x+2*s-1)/(2*sigma^2))]
= sum_{i=0}^t (t choose i) (-1)^{t-i} exp(i(i+1-2*s)/(2 sigma^2))
Args:
sigma: the noise sigma, in the multiples of the sensitivity.
s: the shift.
t: 0 to t-th moment.
Returns:
0 to t-th moment as a tensor of shape [t+1].
"""
assert t <= self._max_moment_order, ("The order of %d is out "
"of the upper bound %d."
% (t, self._max_moment_order))
binomial = tf.slice(self._binomial_table, [0, 0],
[t + 1, t + 1])
signs = numpy.zeros((t + 1, t + 1), dtype=numpy.float64)
for i in range(t + 1):
for j in range(t + 1):
signs[i, j] = 1.0 - 2 * ((i - j) % 2)
exponents = tf.constant([j * (j + 1.0 - 2.0 * s) / (2.0 * sigma * sigma)
for j in range(t + 1)], dtype=tf.float64)
# x[i, j] = binomial[i, j] * signs[i, j] = (i choose j) * (-1)^{i-j}
x = tf.mul(binomial, signs)
# y[i, j] = x[i, j] * exp(exponents[j])
# = (i choose j) * (-1)^{i-j} * exp(j(j-1)/(2 sigma^2))
# Note: this computation is done by broadcasting pointwise multiplication
# between [t+1, t+1] tensor and [t+1] tensor.
y = tf.mul(x, tf.exp(exponents))
# z[i] = sum_j y[i, j]
# = sum_j (i choose j) * (-1)^{i-j} * exp(j(j-1)/(2 sigma^2))
z = tf.reduce_sum(y, 1)
return z
def _compute_log_moment(self, sigma, q, moment_order):
"""Compute high moment of privacy loss.
Args:
sigma: the noise sigma, in the multiples of the sensitivity.
q: the sampling ratio.
moment_order: the order of moment.
Returns:
log E[exp(moment_order * X)]
"""
assert moment_order <= self._max_moment_order, ("The order of %d is out "
"of the upper bound %d."
% (moment_order,
self._max_moment_order))
binomial_table = tf.slice(self._binomial_table, [moment_order, 0],
[1, moment_order + 1])
# qs = [1 q q^2 ... q^L] = exp([0 1 2 ... L] * log(q))
qs = tf.exp(tf.constant([i * 1.0 for i in range(moment_order + 1)],
dtype=tf.float64) * tf.cast(
tf.log(q), dtype=tf.float64))
moments0 = self._differential_moments(sigma, 0.0, moment_order)
term0 = tf.reduce_sum(binomial_table * qs * moments0)
moments1 = self._differential_moments(sigma, 1.0, moment_order)
term1 = tf.reduce_sum(binomial_table * qs * moments1)
return tf.squeeze(tf.log(tf.cast(q * term0 + (1.0 - q) * term1,
tf.float64)))
class DummyAccountant(object):
"""An accountant that does no accounting."""
def accumulate_privacy_spending(self, *unused_args):
return tf.no_op()
def get_privacy_spent(self, unused_sess, **unused_kwargs):
return [EpsDelta(numpy.inf, 1.0)]
| apache-2.0 | -5,226,359,574,101,129,000 | 41.756098 | 80 | 0.638049 | false |
lukemans/Hello-world | ex7.py | 1 | 1439 | # Coursera Online Machine Learning Course
# Exercise 7 -- Principal Component Analysis and K-Means Clustering
import pandas as pd
import numpy as np
from scipy.optimize import minimize
from ex7_utils import *
import scipy.io
import matplotlib.pyplot as plt
# Part 1 -- Find Closest Centroids
raw_mat = scipy.io.loadmat("ex7data2.mat")
X = raw_mat.get("X")
# Select an initial set of centroids
K = 3
initial_centroids = np.array([[3, 3], [6, 2], [8, 5]])
idx = findClosestCentroids(X, initial_centroids)
# Part 2 -- Compute Means
centroids = computeCentroids(X, idx, K)
# Part 3 -- K-means Clustering
max_iters = 10
initial_centroids = np.array([[3, 3], [6, 2], [8, 5]])
centroids, idx = runKMeans(X, initial_centroids, max_iters, plot_progress=True)
# Part 4 -- K-means Clustering on Pixels
A = plt.imread("bird_small.png")
plt.imshow(A)
plt.show()
original_shape = np.shape(A)
# Reshape A to get R, G, B values for each pixel
X = A.reshape((np.size(A, 0)*np.size(A, 1), 3))
K = 16
max_iters = 10
# Initialize centroids
initial_centroids = kMeansInitCentroids(X, K)
# Run K-means
centroids, idx = runKMeans(X, initial_centroids, max_iters, plot_progress=False)
# Part 5 -- Image Compression
idx = findClosestCentroids(X, centroids)
X_recovered = centroids[idx,:]
X_recovered = X_recovered.reshape(original_shape)
# Display Images
f, (ax1, ax2) = plt.subplots(2, sharex=True, sharey=True)
ax1.imshow(A)
ax2.imshow(X_recovered)
| apache-2.0 | 851,453,940,738,845,200 | 25.648148 | 80 | 0.717165 | false |
lipis/electron-crash-reporter | main/auth/github.py | 1 | 1477 | # coding: utf-8
import flask
import auth
import config
import model
import util
from main import app
github_config = dict(
access_token_method='POST',
access_token_url='https://github.com/login/oauth/access_token',
authorize_url='https://github.com/login/oauth/authorize',
base_url='https://api.github.com/',
consumer_key=config.CONFIG_DB.github_client_id,
consumer_secret=config.CONFIG_DB.github_client_secret,
request_token_params={'scope': 'user:email'},
)
github = auth.create_oauth_app(github_config, 'github')
@app.route('/api/auth/callback/github/')
def github_authorized():
response = github.authorized_response()
if response is None:
flask.flash('You denied the request to sign in.')
return flask.redirect(util.get_next_url())
flask.session['oauth_token'] = (response['access_token'], '')
me = github.get('user')
user_db = retrieve_user_from_github(me.data)
return auth.signin_user_db(user_db)
@github.tokengetter
def get_github_oauth_token():
return flask.session.get('oauth_token')
@app.route('/signin/github/')
def signin_github():
return auth.signin_oauth(github)
def retrieve_user_from_github(response):
auth_id = 'github_%s' % str(response['id'])
user_db = model.User.get_by('auth_ids', auth_id)
return user_db or auth.create_user_db(
auth_id,
response.get('name', response.get('login')),
response.get('login'),
response.get('email', ''),
verified=bool(response.get('email', '')),
)
| mit | 8,695,855,415,469,171,000 | 25.375 | 65 | 0.69736 | false |
gc-i/wntOS | controller/DamageForm.py | 1 | 5612 | from PyQt4.QtGui import *
from PyQt4.QtCore import *
from ..view.Ui_DamageForm import *
from ..model.DatabaseHelper import *
from ..model.core_classes import CoDamage
from ..utils.PluginUtils import *
from ..utils.LayerUtils import *
from DamageDialog import *
class DamageForm(QWidget, Ui_DamageForm, DatabaseHelper):
def __init__(self, damaged_asset_type, damaged_asset_id, parent=None):
super(DamageForm, self).__init__(parent)
DatabaseHelper.__init__(self)
self.setupUi(self)
self.__damaged_asset_id = damaged_asset_id
self.__damaged_asset_type = damaged_asset_type
try:
PluginUtils.run_query(self.__populate_damage_twidget)
except (WntException, SQLAlchemyError) as e:
PluginUtils.show_error(self, self.tr('Database Error'), e.args[0])
self.edit_damage_button.setEnabled(False)
self.view_maxcom_button.setEnabled(False)
def __populate_damage_twidget(self):
self.damage_twidget.clearContents()
self.damage_twidget.setRowCount(0)
count = self.session.query(CoDamage).filter(CoDamage.asset_type == self.__damaged_asset_type).\
filter(CoDamage.asset_id == self.__damaged_asset_id).count()
self.damage_twidget.setRowCount(count)
i = 0
for co_damage in self.session.query(CoDamage).filter(CoDamage.asset_type == self.__damaged_asset_type).\
filter(CoDamage.asset_id == self.__damaged_asset_id). \
order_by(CoDamage.occurrence_timestamp):
control_no = '' if co_damage.control_no is None else co_damage.control_no
item = QTableWidgetItem('{}'.format(control_no))
item.setData(Qt.UserRole, co_damage.id)
self.damage_twidget.setItem(i, 0, item)
if co_damage.received_from is not None:
item = QTableWidgetItem(co_damage.received_from)
self.damage_twidget.setItem(i, 1, item)
if co_damage.occurrence_timestamp is not None:
item = QTableWidgetItem(co_damage.occurrence_timestamp.date().isoformat())
self.damage_twidget.setItem(i, 2, item)
if co_damage.registration_timestamp is not None:
item = QTableWidgetItem(co_damage.registration_timestamp.date().isoformat())
self.damage_twidget.setItem(i, 3, item)
if co_damage.repair_timestamp is not None:
item = QTableWidgetItem(co_damage.repair_timestamp.date().isoformat())
self.damage_twidget.setItem(i, 4, item)
if co_damage.repaired_by is not None:
item = QTableWidgetItem(co_damage.repaired_by)
self.damage_twidget.setItem(i, 5, item)
if co_damage.repair_task is not None:
item = QTableWidgetItem(co_damage.repair_task)
self.damage_twidget.setItem(i, 6, item)
if co_damage.cl_damage_type is not None:
description = co_damage.cl_damage_type.description
item = QTableWidgetItem(description)
self.damage_twidget.setItem(i, 7, item)
if co_damage.cl_damage_cause is not None:
description = co_damage.cl_damage_cause.description
item = QTableWidgetItem(description)
self.damage_twidget.setItem(i, 8, item)
if co_damage.cl_damage_status is not None:
description = co_damage.cl_damage_status.description
item = QTableWidgetItem(description)
self.damage_twidget.setItem(i, 9, item)
item = QTableWidgetItem(co_damage.note)
self.damage_twidget.setItem(i, 10, item)
i += 1
self.damage_twidget.resizeColumnsToContents()
self.damage_twidget.horizontalHeader().setStretchLastSection(True)
@pyqtSlot()
def on_view_maxcom_button_clicked(self):
if len(self.damage_twidget.selectionModel().selectedRows()) == 0:
return
for index in self.damage_twidget.selectionModel().selectedRows():
row = index.row()
control_no = self.damage_twidget.item(row, 0).text()
if len(control_no) > 0:
PluginUtils.open_maxcom(int(control_no), 'COMPLAINT_DAMAGE')
@pyqtSlot()
def on_edit_damage_button_clicked(self):
if len(self.damage_twidget.selectionModel().selectedRows()) == 0:
return
for index in self.damage_twidget.selectionModel().selectedRows():
damage_id = self.damage_twidget.item(index.row(), 0).data(Qt.UserRole)
layer = LayerUtils.layer_by_data_source('core', 'co_damage')
dlg = DamageDialog(layer, damage_id, True, self)
if dlg.exec_() == QDialog.Accepted:
self.__reload_and_select_damage(damage_id)
def __reload_and_select_damage(self, damage_id):
try:
PluginUtils.run_query(self.__populate_damage_twidget)
except (WntException, SQLAlchemyError) as e:
PluginUtils.show_error(self, self.tr('Database Error'), e.args[0])
return
self.__select_damage(damage_id)
self.damage_twidget.setFocus()
def __select_damage(self, damage_id):
for row in range(self.damage_twidget.rowCount()):
damage_id_2 = self.damage_twidget.item(row, 0).data(Qt.UserRole)
if damage_id_2 == damage_id:
self.damage_twidget.selectRow(row)
def keyPressEvent(self, e):
if e.key() == Qt.Key_F1:
PluginUtils.show_help("Add_Edit_Damage.htm")
| gpl-3.0 | 2,911,628,629,951,182,300 | 38.801418 | 112 | 0.619387 | false |
hzlf/openbroadcast | website/legacy/obp_legacy/management/commands/__archive/migrate_legacy.py | 1 | 12138 | #-*- coding: utf-8 -*-
from django.core.files import File as DjangoFile
from django.core.management.base import BaseCommand, NoArgsCommand
from optparse import make_option
import os
import sys
import time
import re
from tagging.models import Tag
from alibrary.models import Artist, Release, Media, Label, Relation, License
from filer.models.filemodels import File
from filer.models.audiomodels import Audio
from filer.models.imagemodels import Image
from obp_legacy.models import *
from django.template.defaultfilters import slugify
from datetime import datetime
from lib.util import filer_extra
from audiotools import AudioFile, MP3Audio, M4AAudio, FlacAudio, WaveAudio, MetaData
import audiotools
def id_to_location(id):
l = "%012d" % id
return '%d/%d/%d' % (int(l[0:4]), int(l[4:8]), int(l[8:12]))
class LegacyImporter(object):
def __init__(self, * args, **kwargs):
self.object_type = kwargs.get('object_type')
self.verbosity = int(kwargs.get('verbosity', 1))
def import_release(self, lr):
print 'trying to get related data'
lms = lr.mediasreleases_set.all()
las = lr.artistsreleases_set.all()
lls = lr.labelsreleases_set.all()
print 'legacy_id: %s' % lr.id
r, created = Release.objects.get_or_create(legacy_id=lr.id)
if created:
print 'Not here yet -> created'
else:
print 'found by legacy_id -> use'
"""
Release creation/update & mapping
"""
r.slug = slugify(lr.name)
r.legacy_id = lr.id
"""
Mapping new <> legacy
"""
r.name = lr.name
print u'%s' % r.id
if lr.catalognumber:
r.catalognumber = lr.catalognumber
if lr.releasetype:
r.releasetype = lr.releasetype
if lr.releasestatus:
r.releasestatus = lr.releasestatus
if lr.published:
r.publish_date = lr.published
if lr.notes:
r.excerpt = lr.notes
if lr.totaltracks:
r.totaltracks = lr.totaltracks
print 'totaltracks: %s' % r.totaltracks
if lr.releasecountry and len(lr.releasecountry) == 2:
r.release_country = lr.releasecountry
# "relation" mapping
if lr.discogs_releaseid and lr.discogs_releaseid != 'nf':
url = 'http://www.discogs.com/release/%s' % lr.discogs_releaseid
print 'discogs_url: %s' % url
rel = Relation(content_object=r, url=url)
rel.save()
if lr.myspace_url:
print 'myspace_url: %s' % lr.myspace_url
rel = Relation(content_object=r, url=lr.myspace_url)
rel.save()
if lr.wikipedia_url:
print 'wikipedia_url: %s' % lr.wikipedia_url
rel = Relation(content_object=r, url=lr.wikipedia_url)
rel.save()
if lr.releasedate:
print 'legacy-date: %s' % lr.releasedate
seg = lr.releasedate.split('-')
print seg
# year only
if len(seg) == 1:
r.releasedate = '%s-%s-%s' % (seg[0], '01', '01')
# year & month only
if len(seg) == 2:
if seg[1] in ('00', '0'):
seg[1] = '01'
r.releasedate = '%s-%s-%s' % (seg[0], seg[1], '01')
# full date
if len(seg) == 3 and seg[0] != '0000':
if seg[1] in ('00', '0'):
seg[1] = '01'
if seg[2] in ('00', '0'):
seg[2] = '01'
r.releasedate = '%s-%s-%s' % (seg[0], seg[1], seg[2] )
print 'new-date: %s' % r.releasedate
#time.sleep(2)
r.save()
# id:
try:
img_url = 'http://openbroadcast.ch/static/images/release/%s/original.jpg' % id_to_location(r.legacy_id)
print img_url
img = filer_extra.url_to_file(img_url, r.folder)
r.main_image = img
r.save()
except:
pass
"""
Tag Mapping
"""
ntrs = NtagsReleases.objects.using('legacy').filter(release_id=lr.id)
# r.tags.clear()
for ntr in ntrs:
print 'Tag ID: %s' % ntr.ntag_id
try:
nt = Ntags.objects.using('legacy').get(id=ntr.ntag_id)
print 'Tag Name: %s' % nt.name
Tag.objects.add_tag(r, u'"%s"' % nt.name)
except Exception, e:
print e
pass
#r.tags.add_tag(nt.name)
#r.tags.add(nt.name)
"""
Label mapping
"""
for ll in lls:
l, created = Label.objects.get_or_create(legacy_id=ll.label.id)
l.slug = slugify(ll.label.name)
"""
Mapping new <> legacy
"""
l.name = ll.label.name
# save (& send to process queue...) :)
l.save()
# assign release
r.label = l
r.save()
"""
Loop tracks and track-related artists
"""
""""""
for lm in lms:
m, created = Media.objects.get_or_create(legacy_id=lm.media.id)
m.slug = slugify(lm.media.name)
# Mapping new <> legacy
m.name = lm.media.name
try:
m.tracknumber = int(lm.media.tracknumber)
except Exception, e:
m.tracknumber = 0
# assign release
m.release = r
# license mapping
if lm.media.license_id:
lic, created = License.objects.get_or_create(legacy_id=lm.media.license_id)
lic.name = lm.media.license_id
lic.save()
m.license = lic
# save (& send to process queue...) :)
m.save()
# get master file / audio
print "-----------------------------------------------------------"
print "Import Audiofile:"
if lm.media.sourcepath:
print "source path: %s" % lm.media.sourcepath
full_path = '/my_file/%s' % lm.media.sourcepath
full_path = 'tmp/dummy.mp3'
print "full path: %s" % full_path
m.duration = lm.media.length
if not m.master:
audiofile = audiotools.open(full_path)
metadata = audiofile.get_metadata()
print metadata
""""""
artist_name = metadata.artist_name
release_name = metadata.album_name
track_name = metadata.track_name
tracknumber = metadata.track_number
print 'artist: %s' % artist_name
print 'release: %s' % release_name
print 'track: %s' % track_name
print 'tracknumber: %s' % tracknumber
dj_file = DjangoFile(open(full_path), name=lm.media.filename)
master = self.import_file(file=dj_file, folder=r.get_folder('tracks'))
master.bits_per_sample = audiofile.bits_per_sample()
master.sample_rate = audiofile.sample_rate()
master.total_frames = audiofile.total_frames()
#master.seconds_length = audiofile.seconds_length()
try:
iext = os.path.splitext(full_path)[1].lower()[1:]
except:
iext = None
print 'IEXT %s' % iext
master.filetype = iext
master.save()
m.master = master
m.save()
# get track artist
tlas = lm.media.artistsmedias_set.all()
for tla in tlas:
print "** TLA **"
#print tla.artist.name
a, created = Artist.objects.get_or_create(legacy_id=tla.artist.id)
a.slug = slugify(tla.artist.name)
a.name = tla.artist.name
a.save()
m.artist = a
m.save()
"""
Update migration timestamp on legacy database
"""
lr.migrated = datetime.now()
lr.save()
return
def import_file(self, file, folder):
print "#########################"
print folder.name
print "#########################"
"""
Create a Audio or an Image into the given folder
"""
try:
iext = os.path.splitext(file.name)[1].lower()
except:
iext = ''
print 'iext:'
print iext
if iext in ['.jpg', '.jpeg', '.png', '.gif']:
obj, created = Image.objects.get_or_create(
original_filename=file.name,
file=file,
folder=folder,
is_public=True)
print 'obj:',
print obj
if iext in ['.mp3','.flac','.m4a','.mp4','.wav','.aiff','.ogg']:
obj, created = Audio.objects.get_or_create(
original_filename=file.name,
file=file,
folder=folder,
is_public=False)
else:
obj = None
if obj:
print 'have object'
return obj
else:
return None
def walker(self):
if(self.object_type == 'releases'):
lrs = Releases.objects.using('legacy').filter(migrated=None).exclude(name=u'').all()[0:100000]
for lr in lrs:
print
print '----------------------------------------'
print 'got release:',
print u'%s' % lr.name.encode('ascii', 'replace')
try:
self.import_release(lr)
except Exception, e:
print e
pass
#print lr.id
return
class Command(NoArgsCommand):
"""
Import directory structure into alibrary:
manage.py import_folder --path=/tmp/assets/images
"""
option_list = BaseCommand.option_list + (
make_option('--type',
action='store',
dest='object_type',
default=False,
help='Import files located in the path into django-filer'),
)
def handle_noargs(self, **options):
legacy_importer = LegacyImporter(**options)
legacy_importer.walker()
| gpl-3.0 | -8,743,974,167,014,346,000 | 27.359813 | 115 | 0.424699 | false |
lowRISC/edalize | edalize/verilator.py | 1 | 7503 | import logging
import multiprocessing
import os
import logging
from edalize.edatool import Edatool
logger = logging.getLogger(__name__)
CONFIG_MK_TEMPLATE = """#Auto generated by Edalize
TOP_MODULE := {top_module}
VC_FILE := {vc_file}
VERILATOR_OPTIONS := {verilator_options}
MAKE_OPTIONS := {make_options}
"""
MAKEFILE_TEMPLATE = """#Auto generated by Edalize
include config.mk
#Assume a local installation if VERILATOR_ROOT is set
ifeq ($(VERILATOR_ROOT),)
VERILATOR ?= verilator
else
VERILATOR ?= $(VERILATOR_ROOT)/bin/verilator
endif
V$(TOP_MODULE): V$(TOP_MODULE).mk
$(MAKE) $(MAKE_OPTIONS) -f $<
V$(TOP_MODULE).mk:
$(VERILATOR) -f $(VC_FILE) $(VERILATOR_OPTIONS)
"""
class Verilator(Edatool):
argtypes = ['cmdlinearg', 'plusarg', 'vlogdefine', 'vlogparam']
@classmethod
def get_doc(cls, api_ver):
if api_ver == 0:
return {'description' : "Verilator is the fastest free Verilog HDL simulator, and outperforms most commercial simulators",
'members' : [
{'name' : 'mode',
'type' : 'String',
'desc' : 'Select compilation mode. Legal values are *cc* for C++ testbenches, *sc* for SystemC testbenches or *lint-only* to only perform linting on the Verilog code'},
{'name' : 'cli_parser',
'type' : 'String',
'desc' : '**Deprecated: Use run_options instead** : Select whether FuseSoC should handle command-line arguments (*managed*) or if they should be passed directly to the verilated model (*raw*). Default is *managed*'}],
'lists' : [
{'name' : 'libs',
'type' : 'String',
'desc' : 'Extra libraries for the verilated model to link against'},
{'name' : 'verilator_options',
'type' : 'String',
'desc' : 'Additional options for verilator'},
{'name' : 'make_options',
'type' : 'String',
'desc' : 'Additional arguments passed to make when compiling the simulation. This is commonly used to set OPT/OPT_FAST/OPT_SLOW.'},
{'name' : 'run_options',
'type' : 'String',
'desc' : 'Additional arguments directly passed to the verilated model'},
]}
def check_managed_parser(self):
managed = 'cli_parser' not in self.tool_options or self.tool_options['cli_parser'] == 'managed'
if not managed:
logger.warning("The cli_parser argument is deprecated. Use run_options to pass raw arguments to verilated models")
def configure_main(self):
self.check_managed_parser()
if not self.toplevel:
raise RuntimeError("'" + self.name + "' miss a mandatory parameter 'top_module'")
self._write_config_files()
def _write_config_files(self):
#Future improvement: Separate include directories of c and verilog files
incdirs = set()
src_files = []
(src_files, incdirs) = self._get_fileset_files(force_slash=True)
self.verilator_file = self.name + '.vc'
with open(os.path.join(self.work_root,self.verilator_file),'w') as f:
f.write('--Mdir .\n')
modes = ['sc', 'cc', 'lint-only']
#Default to cc mode if not specified
if not 'mode' in self.tool_options:
self.tool_options['mode'] = 'cc'
if self.tool_options['mode'] in modes:
f.write('--'+self.tool_options['mode']+'\n')
else:
_s = "Illegal verilator mode {}. Allowed values are {}"
raise RuntimeError(_s.format(self.tool_options['mode'],
', '.join(modes)))
if 'libs' in self.tool_options:
for lib in self.tool_options['libs']:
f.write('-LDFLAGS {}\n'.format(lib))
for include_dir in incdirs:
f.write("+incdir+" + include_dir + '\n')
f.write("-CFLAGS -I{}\n".format(include_dir))
vlt_files = []
vlog_files = []
opt_c_files = []
for src_file in src_files:
if src_file.file_type.startswith("systemVerilogSource") or src_file.file_type.startswith("verilogSource"):
vlog_files.append(src_file.name)
elif src_file.file_type in ['cppSource', 'systemCSource', 'cSource']:
opt_c_files.append(src_file.name)
elif src_file.file_type == 'vlt':
vlt_files.append(src_file.name)
elif src_file.file_type == 'user':
pass
if vlt_files:
f.write('\n'.join(vlt_files) + '\n')
f.write('\n'.join(vlog_files) + '\n')
f.write('--top-module {}\n'.format(self.toplevel))
f.write('--exe\n')
f.write('\n'.join(opt_c_files))
f.write('\n')
f.write(''.join(['-G{}={}\n'.format(key, self._param_value_str(value)) for key, value in self.vlogparam.items()]))
f.write(''.join(['-D{}={}\n'.format(key, self._param_value_str(value)) for key, value in self.vlogdefine.items()]))
with open(os.path.join(self.work_root, 'Makefile'), 'w') as makefile:
makefile.write(MAKEFILE_TEMPLATE)
if 'verilator_options' in self.tool_options:
verilator_options = ' '.join(self.tool_options['verilator_options'])
else:
verilator_options = ''
if 'make_options' in self.tool_options:
make_options = ' '.join(self.tool_options['make_options'])
else:
make_options = ''
with open(os.path.join(self.work_root, 'config.mk'), 'w') as config_mk:
config_mk.write(CONFIG_MK_TEMPLATE.format(
top_module = self.toplevel,
vc_file = self.verilator_file,
verilator_options = verilator_options,
make_options = make_options))
def build_main(self):
logger.info("Building simulation model")
if not 'mode' in self.tool_options:
self.tool_options['mode'] = 'cc'
# Do parallel builds with <number of cpus> * 2 jobs.
make_job_count = multiprocessing.cpu_count() * 2
args = ['-j', str(make_job_count)]
if self.tool_options['mode'] == 'lint-only':
args.append('V'+self.toplevel+'.mk')
_s = os.path.join(self.work_root, 'verilator.{}.log')
self._run_tool('make', args)
def run_main(self):
self.check_managed_parser()
self.args = []
for key, value in self.plusarg.items():
self.args += ['+{}={}'.format(key, self._param_value_str(value))]
for key, value in self.cmdlinearg.items():
self.args += ['--{}={}'.format(key, self._param_value_str(value))]
self.args += self.tool_options.get('run_options', [])
#Default to cc mode if not specified
if not 'mode' in self.tool_options:
self.tool_options['mode'] = 'cc'
if self.tool_options['mode'] == 'lint-only':
return
logger.info("Running simulation")
self._run_tool('./V' + self.toplevel, self.args)
| bsd-2-clause | -3,890,970,101,035,016,700 | 40.683333 | 242 | 0.539651 | false |
andreoliwa/scrapy-tegenaria | tegenaria/settings.py | 1 | 4679 | # -*- coding: utf-8 -*-
"""
Scrapy settings for tegenaria project.
For simplicity, this file contains only settings considered important or
commonly used. You can find more settings consulting the documentation:
http://doc.scrapy.org/en/latest/topics/settings.html
http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
"""
import os
from typing import List
from prettyconf import config
class Config(object):
"""App configuration."""
SECRET_KEY = os.environ.get("TEGENARIA_SECRET", "secret-key") # TODO: Change me
APP_DIR = os.path.abspath(os.path.dirname(__file__)) # This directory
PROJECT_ROOT = os.path.abspath(os.path.join(APP_DIR, os.pardir))
ASSETS_DEBUG = False
DEBUG_TB_ENABLED = True
DEBUG_TB_INTERCEPT_REDIRECTS = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
class ProdConfig(Config):
"""Production configuration."""
ENV = "prod"
DEBUG = False
SQLALCHEMY_DATABASE_URI = "postgresql://tegenaria:tegenaria@localhost:5440/tegenaria"
DEBUG_TB_ENABLED = False # Disable Debug toolbar
class DevConfig(Config):
"""Development configuration."""
ENV = "dev"
DEBUG = True
DB_NAME = "dev.db"
# Put the db file in project root
SQLALCHEMY_DATABASE_URI = "postgresql://tegenaria_dev:tegenaria_dev@localhost:5440/tegenaria_dev"
ASSETS_DEBUG = True # Don't bundle/minify static assets
class TestConfig(Config):
"""Testing configuration."""
TESTING = True
DEBUG = True
SQLALCHEMY_DATABASE_URI = "sqlite://"
WTF_CSRF_ENABLED = False # Allows form testing
GOOGLE_MATRIX_API_KEYS = config("GOOGLE_MATRIX_API_KEYS", cast=config.list, default=[]) # type: List[str]
BOT_NAME = "tegenaria"
SPIDER_MODULES = ["tegenaria.spiders"]
NEWSPIDER_MODULE = "tegenaria.spiders"
# FEED_URI = 'file:///tmp/tegenaria/%(name)s.json'
# FEED_FORMAT = 'jsonlines'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
# USER_AGENT = 'tegenaria (+http://www.yourdomain.com)'
# Configure maximum concurrent requests performed by Scrapy (default: 16)
CONCURRENT_REQUESTS = 1
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = config("DOWNLOAD_DELAY", cast=float, default=0.5)
# The download delay setting will honor only one of:
# CONCURRENT_REQUESTS_PER_DOMAIN=16
# CONCURRENT_REQUESTS_PER_IP=16
# Disable cookies (enabled by default)
# COOKIES_ENABLED=False
# Disable Telnet Console (enabled by default)
# TELNETCONSOLE_ENABLED=False
# Override the default request headers:
# DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
# }
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
# 'tegenaria.middlewares.MyCustomSpiderMiddleware': 543,
# }
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# DOWNLOADER_MIDDLEWARES = {
# 'tegenaria.middlewares.MyCustomDownloaderMiddleware': 543,
# }
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
# EXTENSIONS = {
# 'scrapy.telnet.TelnetConsole': None,
# }
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
"tegenaria.pipelines.ApartmentPipeline": 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
# NOTE: AutoThrottle will honour the standard settings for concurrency and delay
# AUTOTHROTTLE_ENABLED=True
# The initial download delay
# AUTOTHROTTLE_START_DELAY=5
# The maximum download delay to be set in case of high latencies
# AUTOTHROTTLE_MAX_DELAY=60
# Enable showing throttling stats for every response received:
# AUTOTHROTTLE_DEBUG=False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
HTTPCACHE_ENABLED = True
# HTTPCACHE_EXPIRATION_SECS = 0
# HTTPCACHE_DIR = 'httpcache'
# HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
HTTPCACHE_POLICY = "scrapy.extensions.httpcache.RFC2616Policy"
# https://doc.scrapy.org/en/latest/topics/extensions.html#closespider-errorcount
CLOSESPIDER_ERRORCOUNT = 1
| agpl-3.0 | -7,509,483,160,594,380,000 | 32.184397 | 109 | 0.7461 | false |
IMIO/django-fixmystreet | django_fixmystreet/fixmystreet/migrations/0035_create_ods_views.py | 1 | 42163 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
import os
current_dir = os.path.dirname(__file__)
class Migration(DataMigration):
def forwards(self, orm):
db.execute_many(open("{0}/ods-views.sql".format(current_dir)).read())
def backwards(self, orm):
db.execute_many(open("{0}/ods-views-drop.sql".format(current_dir)).read())
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'default': "'!'", 'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'fixmystreet.faqentry': {
'Meta': {'ordering': "['order']", 'object_name': 'FaqEntry'},
'a_fr': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'a_nl': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'q_fr': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'q_nl': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'})
},
u'fixmystreet.fmsuser': {
'Meta': {'object_name': 'FMSUser', '_ormbases': [u'auth.User']},
'agent': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'applicant': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'type'", 'blank': 'True', 'to': u"orm['fixmystreet.ReportCategory']"}),
'contractor': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fmsuser_created'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"}),
'last_used_language': ('django.db.models.fields.CharField', [], {'default': "'FR'", 'max_length': '10', 'null': 'True'}),
'leader': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'logical_deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'manager': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fmsuser_modified'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"}),
'organisation': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'team'", 'null': 'True', 'to': u"orm['fixmystreet.OrganisationEntity']"}),
'quality': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'telephone': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
u'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}),
'work_for': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'workers'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['fixmystreet.OrganisationEntity']"})
},
u'fixmystreet.historicalfmsuser': {
'Meta': {'ordering': "(u'-history_date', u'-history_id')", 'object_name': 'HistoricalFMSUser'},
'agent': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'applicant': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'contractor': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'created_by_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'db_index': 'True', 'max_length': '75', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
u'history_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'history_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'history_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
u'history_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
u'id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_used_language': ('django.db.models.fields.CharField', [], {'default': "'FR'", 'max_length': '10', 'null': 'True'}),
'leader': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'logical_deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'manager': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'modified_by_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
u'organisation_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'default': "'!'", 'max_length': '128'}),
'quality': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'telephone': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
u'user_ptr_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '75', 'db_index': 'True'})
},
u'fixmystreet.historicalorganisationentity': {
'Meta': {'ordering': "(u'-history_date', u'-history_id')", 'object_name': 'HistoricalOrganisationEntity'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'applicant': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'commune': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'created_by_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'department': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'dependency_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'feature_id': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'blank': 'True'}),
u'history_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'history_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'history_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
u'history_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
u'id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'modified_by_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name_nl': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'region': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug_fr': ('django.db.models.fields.SlugField', [], {'max_length': '100'}),
'slug_nl': ('django.db.models.fields.SlugField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'subcontractor': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1'})
},
u'fixmystreet.historicalreport': {
'Meta': {'ordering': "(u'-history_date', u'-history_id')", 'object_name': 'HistoricalReport'},
'accepted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'address_fr': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'address_nl': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'address_number': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'address_number_as_int': ('django.db.models.fields.IntegerField', [], {'max_length': '255'}),
'address_regional': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'category_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
u'citizen_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'close_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'contractor_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'created_by_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'date_planned': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'false_address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'fixed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'gravity': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'hash_code': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'history_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'history_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'history_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
u'history_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
u'id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'blank': 'True'}),
'mark_as_done_motivation': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'mark_as_done_user_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
u'merged_with_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'modified_by_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'pending': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'photo': ('django.db.models.fields.TextField', [], {'max_length': '100', 'blank': 'True'}),
'planned': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'point': ('django.contrib.gis.db.models.fields.PointField', [], {'srid': '31370', 'null': 'True', 'blank': 'True'}),
'postalcode': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'private': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'probability': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'quality': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'refusal_motivation': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'responsible_department_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
u'responsible_entity_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
u'responsible_manager_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'responsible_manager_validated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'secondary_category_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'terms_of_use_validated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'thumbnail': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'valid': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'fixmystreet.listitem': {
'Meta': {'object_name': 'ListItem'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label_fr': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'label_nl': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'model_class': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'model_field': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'fixmystreet.mailnotificationtemplate': {
'Meta': {'object_name': 'MailNotificationTemplate'},
'content_fr': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'content_nl': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'title_fr': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'title_nl': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'fixmystreet.organisationentity': {
'Meta': {'object_name': 'OrganisationEntity'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'applicant': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'commune': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'organisationentity_created'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"}),
'department': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'dependency': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'associates'", 'null': 'True', 'to': u"orm['fixmystreet.OrganisationEntity']"}),
'dispatch_categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'assigned_to_department'", 'blank': 'True', 'to': u"orm['fixmystreet.ReportCategory']"}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'feature_id': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'organisationentity_modified'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"}),
'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name_nl': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'region': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug_fr': ('django.db.models.fields.SlugField', [], {'max_length': '100'}),
'slug_nl': ('django.db.models.fields.SlugField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'subcontractor': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1'})
},
u'fixmystreet.page': {
'Meta': {'object_name': 'Page'},
'content_fr': ('ckeditor.fields.RichTextField', [], {}),
'content_nl': ('ckeditor.fields.RichTextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug_fr': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug_nl': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'title_fr': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'title_nl': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'fixmystreet.report': {
'Meta': {'object_name': 'Report'},
'accepted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'address_fr': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'address_nl': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'address_number': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'address_number_as_int': ('django.db.models.fields.IntegerField', [], {'max_length': '255'}),
'address_regional': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['fixmystreet.ReportMainCategoryClass']", 'null': 'True', 'blank': 'True'}),
'citizen': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'citizen_reports'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"}),
'close_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'contractor': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'assigned_reports'", 'null': 'True', 'to': u"orm['fixmystreet.OrganisationEntity']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'report_created'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"}),
'date_planned': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'false_address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'fixed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'gravity': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'hash_code': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mark_as_done_motivation': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'mark_as_done_user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'reports_solved'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"}),
'merged_with': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'merged_reports'", 'null': 'True', 'to': u"orm['fixmystreet.Report']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'report_modified'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"}),
'pending': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'photo': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'blank': 'True'}),
'planned': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'point': ('django.contrib.gis.db.models.fields.PointField', [], {'srid': '31370', 'null': 'True', 'blank': 'True'}),
'postalcode': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'previous_managers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'previous_reports'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['fixmystreet.FMSUser']"}),
'private': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'probability': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'quality': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'refusal_motivation': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'responsible_department': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reports_in_department'", 'null': 'True', 'to': u"orm['fixmystreet.OrganisationEntity']"}),
'responsible_entity': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'reports_in_charge'", 'null': 'True', 'to': u"orm['fixmystreet.OrganisationEntity']"}),
'responsible_manager': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'reports_in_charge'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"}),
'responsible_manager_validated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'secondary_category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['fixmystreet.ReportCategory']", 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'terms_of_use_validated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'thumbnail': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'valid': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'fixmystreet.reportattachment': {
'Meta': {'object_name': 'ReportAttachment'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reportattachment_created'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logical_deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reportattachment_modified'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"}),
'report': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attachments'", 'to': u"orm['fixmystreet.Report']"}),
'security_level': ('django.db.models.fields.IntegerField', [], {'default': '2'})
},
u'fixmystreet.reportcategory': {
'Meta': {'object_name': 'ReportCategory'},
'category_class': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'categories'", 'to': u"orm['fixmystreet.ReportMainCategoryClass']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reportcategory_created'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reportcategory_modified'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"}),
'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name_nl': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'secondary_category_class': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'categories'", 'to': u"orm['fixmystreet.ReportSecondaryCategoryClass']"}),
'slug_fr': ('django.db.models.fields.SlugField', [], {'max_length': '100'}),
'slug_nl': ('django.db.models.fields.SlugField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'fixmystreet.reportcategoryhint': {
'Meta': {'object_name': 'ReportCategoryHint'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label_fr': ('django.db.models.fields.TextField', [], {}),
'label_nl': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
u'fixmystreet.reportcomment': {
'Meta': {'object_name': 'ReportComment', '_ormbases': [u'fixmystreet.ReportAttachment']},
u'reportattachment_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['fixmystreet.ReportAttachment']", 'unique': 'True', 'primary_key': 'True'}),
'text': ('django.db.models.fields.TextField', [], {})
},
u'fixmystreet.reporteventlog': {
'Meta': {'ordering': "['event_at']", 'object_name': 'ReportEventLog'},
'event_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'event_type': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'merged_with_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'organisation': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'activities'", 'to': u"orm['fixmystreet.OrganisationEntity']"}),
'related_content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True'}),
'related_new_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'related_old_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'report': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'activities'", 'to': u"orm['fixmystreet.Report']"}),
'status_new': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'status_old': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'activities'", 'null': 'True', 'to': u"orm['auth.User']"}),
'value_old': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'})
},
u'fixmystreet.reportfile': {
'Meta': {'object_name': 'ReportFile', '_ormbases': [u'fixmystreet.ReportAttachment']},
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'blank': 'True'}),
'file_creation_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'file_type': ('django.db.models.fields.IntegerField', [], {}),
'image': ('django_fixmystreet.fixmystreet.utils.FixStdImageField', [], {'max_length': '100', 'name': "'image'", 'blank': 'True'}),
u'reportattachment_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['fixmystreet.ReportAttachment']", 'unique': 'True', 'primary_key': 'True'}),
'title': ('django.db.models.fields.TextField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'})
},
u'fixmystreet.reportmaincategoryclass': {
'Meta': {'object_name': 'ReportMainCategoryClass'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reportmaincategoryclass_created'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"}),
'hint': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['fixmystreet.ReportCategoryHint']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reportmaincategoryclass_modified'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"}),
'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name_nl': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'slug_fr': ('django.db.models.fields.SlugField', [], {'max_length': '100'}),
'slug_nl': ('django.db.models.fields.SlugField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'fixmystreet.reportnotification': {
'Meta': {'object_name': 'ReportNotification'},
'content_template': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'error_msg': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'recipient': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'notifications'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"}),
'recipient_mail': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'related_content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
'related_object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'reply_to': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'sent_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'success': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'fixmystreet.reportsecondarycategoryclass': {
'Meta': {'object_name': 'ReportSecondaryCategoryClass'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reportsecondarycategoryclass_created'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reportsecondarycategoryclass_modified'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"}),
'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name_nl': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'slug_fr': ('django.db.models.fields.SlugField', [], {'max_length': '100'}),
'slug_nl': ('django.db.models.fields.SlugField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'fixmystreet.reportsubscription': {
'Meta': {'unique_together': "(('report', 'subscriber'),)", 'object_name': 'ReportSubscription'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'report': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'subscriptions'", 'to': u"orm['fixmystreet.Report']"}),
'subscriber': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['fixmystreet.FMSUser']"})
},
u'fixmystreet.streetsurface': {
'Meta': {'object_name': 'StreetSurface'},
'administrator': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'geom': ('django.contrib.gis.db.models.fields.GeometryField', [], {'srid': '31370'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pw_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'ssft': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'}),
'sslv': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'}),
'urbis_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'version_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'fixmystreet.userorganisationmembership': {
'Meta': {'unique_together': "(('user', 'organisation'),)", 'object_name': 'UserOrganisationMembership'},
'contact_user': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'userorganisationmembership_created'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'userorganisationmembership_modified'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"}),
'organisation': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'memberships'", 'null': 'True', 'to': u"orm['fixmystreet.OrganisationEntity']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'memberships'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"})
},
u'fixmystreet.zipcode': {
'Meta': {'object_name': 'ZipCode'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'commune': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'zipcode'", 'to': u"orm['fixmystreet.OrganisationEntity']"}),
'hide': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name_nl': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['fixmystreet']
| agpl-3.0 | -2,173,867,843,870,946,600 | 96.825986 | 223 | 0.566777 | false |
OpenNews/opennews-source | source/people/migrations/0003_organizationadmin.py | 1 | 1149 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2017-01-17 19:04
from __future__ import unicode_literals
import caching.base
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('people', '0002_person_photo'),
]
operations = [
migrations.CreateModel(
name='OrganizationAdmin',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('email', models.EmailField(max_length=254, unique=True, verbose_name='Email address')),
('organization', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='people.Organization')),
],
options={
'ordering': ('organization', 'email'),
'verbose_name': 'Organization Admin',
},
bases=(caching.base.CachingMixin, models.Model),
),
]
| mit | -1,334,810,765,854,700,300 | 34.90625 | 123 | 0.591819 | false |
newvem/anyjson | anyjson/__init__.py | 1 | 4577 | """Wraps the best available JSON implementation available in a common
interface"""
import sys
VERSION = (0, 3, 1)
__version__ = ".".join(map(str, VERSION[0:3])) + "".join(VERSION[3:])
__author__ = "Rune Halvorsen"
__contact__ = "[email protected]"
__homepage__ = "http://bitbucket.org/runeh/anyjson/"
__docformat__ = "restructuredtext"
# -eof meta-
#: The json implementation object. This is probably not useful to you,
#: except to get the name of the implementation in use. The name is
#: available through ``implementation.name``.
implementation = None
#: List of known json modules, and the names of their loads/dumps
#: methods, as well as the exceptions they throw. Exception can be either
#: an exception class or a string.
_modules = [("yajl", "dumps", TypeError, "loads", ValueError),
("jsonlib2", "write", "WriteError", "read", "ReadError"),
("jsonlib", "write", "WriteError", "read", "ReadError"),
("simplejson", "dumps", TypeError, "loads", ValueError),
("json", "dumps", TypeError, "loads", ValueError),
("django.utils.simplejson", "dumps", TypeError, "loads",ValueError),
("cjson", "encode", "EncodeError", "decode", "DecodeError")
]
_fields = ("modname", "encoder", "encerror", "decoder", "decerror")
class _JsonImplementation(object):
"""Incapsulates a JSON implementation"""
def __init__(self, modspec):
modinfo = dict(zip(_fields, modspec))
if modinfo["modname"] == "cjson":
import warnings
warnings.warn("cjson is deprecated! See http://pypi.python.org/pypi/python-cjson/1.0.5", DeprecationWarning)
# No try block. We want importerror to end up at caller
module = self._attempt_load(modinfo["modname"])
self.implementation = modinfo["modname"]
self._encode = getattr(module, modinfo["encoder"])
self._decode = getattr(module, modinfo["decoder"])
self._encode_error = modinfo["encerror"]
self._decode_error = modinfo["decerror"]
if isinstance(modinfo["encerror"], basestring):
self._encode_error = getattr(module, modinfo["encerror"])
if isinstance(modinfo["decerror"], basestring):
self._decode_error = getattr(module, modinfo["decerror"])
self.name = modinfo["modname"]
def __repr__(self):
return "<_JsonImplementation instance using %s>" % self.name
def _attempt_load(self, modname):
"""Attempt to load module name modname, returning it on success,
throwing ImportError if module couldn't be imported"""
__import__(modname)
return sys.modules[modname]
def dumps(self, data):
"""Serialize the datastructure to json. Returns a string. Raises
TypeError if the object could not be serialized."""
try:
return self._encode(data)
except self._encode_error, exc:
raise TypeError(*exc.args)
serialize = dumps
def loads(self, s):
"""deserialize the string to python data types. Raises
ValueError if the string vould not be parsed."""
try:
return self._decode(s)
except self._decode_error, exc:
raise ValueError(*exc.args)
deserialize = loads
def force_implementation(modname):
"""Forces anyjson to use a specific json module if it's available"""
global implementation
for name, spec in [(e[0], e) for e in _modules]:
if name == modname:
implementation = _JsonImplementation(spec)
return
raise ImportError("No module named: %s" % modname)
if __name__ == "__main__":
# If run as a script, we do nothing but print an error message.
# We do NOT try to load a compatible module because that may throw an
# exception, which renders the package uninstallable with easy_install
# (It trys to execfile the script when installing, to make sure it works)
print "Running anyjson as a stand alone script is not supported"
sys.exit(1)
else:
for modspec in _modules:
try:
implementation = _JsonImplementation(modspec)
break
except ImportError:
pass
else:
raise ImportError("No supported JSON module found")
def loads(value):
"""Serialize the object to JSON."""
return implementation.loads(value)
deserialize = loads # compat
def dumps(value):
"""Deserialize JSON-encoded object to a Python object."""
return implementation.dumps(value)
serialize = dumps
| bsd-3-clause | 8,633,620,444,541,087,000 | 35.325397 | 120 | 0.63251 | false |
wavefrontHQ/python-client | wavefront_api_client/models/access_control_list_write_dto.py | 1 | 5443 | # coding: utf-8
"""
Wavefront REST API
<p>The Wavefront REST API enables you to interact with Wavefront servers using standard REST API tools. You can use the REST API to automate commonly executed operations such as automatically tagging sources.</p><p>When you make REST API calls outside the Wavefront REST API documentation you must add the header \"Authorization: Bearer <<API-TOKEN>>\" to your HTTP requests.</p> # noqa: E501
OpenAPI spec version: v2
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class AccessControlListWriteDTO(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'entity_id': 'str',
'modify_acl': 'list[str]',
'view_acl': 'list[str]'
}
attribute_map = {
'entity_id': 'entityId',
'modify_acl': 'modifyAcl',
'view_acl': 'viewAcl'
}
def __init__(self, entity_id=None, modify_acl=None, view_acl=None): # noqa: E501
"""AccessControlListWriteDTO - a model defined in Swagger""" # noqa: E501
self._entity_id = None
self._modify_acl = None
self._view_acl = None
self.discriminator = None
if entity_id is not None:
self.entity_id = entity_id
if modify_acl is not None:
self.modify_acl = modify_acl
if view_acl is not None:
self.view_acl = view_acl
@property
def entity_id(self):
"""Gets the entity_id of this AccessControlListWriteDTO. # noqa: E501
The entity Id # noqa: E501
:return: The entity_id of this AccessControlListWriteDTO. # noqa: E501
:rtype: str
"""
return self._entity_id
@entity_id.setter
def entity_id(self, entity_id):
"""Sets the entity_id of this AccessControlListWriteDTO.
The entity Id # noqa: E501
:param entity_id: The entity_id of this AccessControlListWriteDTO. # noqa: E501
:type: str
"""
self._entity_id = entity_id
@property
def modify_acl(self):
"""Gets the modify_acl of this AccessControlListWriteDTO. # noqa: E501
List of users and user groups ids that have modify permission # noqa: E501
:return: The modify_acl of this AccessControlListWriteDTO. # noqa: E501
:rtype: list[str]
"""
return self._modify_acl
@modify_acl.setter
def modify_acl(self, modify_acl):
"""Sets the modify_acl of this AccessControlListWriteDTO.
List of users and user groups ids that have modify permission # noqa: E501
:param modify_acl: The modify_acl of this AccessControlListWriteDTO. # noqa: E501
:type: list[str]
"""
self._modify_acl = modify_acl
@property
def view_acl(self):
"""Gets the view_acl of this AccessControlListWriteDTO. # noqa: E501
List of users and user group ids that have view permission # noqa: E501
:return: The view_acl of this AccessControlListWriteDTO. # noqa: E501
:rtype: list[str]
"""
return self._view_acl
@view_acl.setter
def view_acl(self, view_acl):
"""Sets the view_acl of this AccessControlListWriteDTO.
List of users and user group ids that have view permission # noqa: E501
:param view_acl: The view_acl of this AccessControlListWriteDTO. # noqa: E501
:type: list[str]
"""
self._view_acl = view_acl
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(AccessControlListWriteDTO, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AccessControlListWriteDTO):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| apache-2.0 | 3,311,155,771,667,584,000 | 30.462428 | 409 | 0.58589 | false |
pirate/bookmark-archiver | tests/test_list.py | 1 | 4194 | import json
from .fixtures import *
def test_list_json(process, disable_extractors_dict):
subprocess.run(["archivebox", "add", "http://127.0.0.1:8080/static/example.com.html", "--depth=0"],
capture_output=True, env=disable_extractors_dict)
list_process = subprocess.run(["archivebox", "list", "--json"], capture_output=True)
output_json = json.loads(list_process.stdout.decode("utf-8"))
assert output_json[0]["url"] == "http://127.0.0.1:8080/static/example.com.html"
def test_list_json_headers(process, disable_extractors_dict):
subprocess.run(["archivebox", "add", "http://127.0.0.1:8080/static/example.com.html", "--depth=0"],
capture_output=True, env=disable_extractors_dict)
list_process = subprocess.run(["archivebox", "list", "--json", "--with-headers"], capture_output=True)
output_json = json.loads(list_process.stdout.decode("utf-8"))
assert output_json["links"][0]["url"] == "http://127.0.0.1:8080/static/example.com.html"
def test_list_html(process, disable_extractors_dict):
subprocess.run(["archivebox", "add", "http://127.0.0.1:8080/static/example.com.html", "--depth=0"],
capture_output=True, env=disable_extractors_dict)
list_process = subprocess.run(["archivebox", "list", "--html"], capture_output=True)
output_html = list_process.stdout.decode("utf-8")
assert "<footer>" not in output_html
assert "http://127.0.0.1:8080/static/example.com.html" in output_html
def test_list_html_headers(process, disable_extractors_dict):
subprocess.run(["archivebox", "add", "http://127.0.0.1:8080/static/example.com.html", "--depth=0"],
capture_output=True, env=disable_extractors_dict)
list_process = subprocess.run(["archivebox", "list", "--html", "--with-headers"], capture_output=True)
output_html = list_process.stdout.decode("utf-8")
assert "<footer>" in output_html
assert "http://127.0.0.1:8080/static/example.com.html" in output_html
def test_list_csv(process, disable_extractors_dict):
subprocess.run(["archivebox", "add", "http://127.0.0.1:8080/static/example.com.html", "--depth=0"],
capture_output=True, env=disable_extractors_dict)
list_process = subprocess.run(["archivebox", "list", "--csv", "url"], capture_output=True)
output_csv = list_process.stdout.decode("utf-8")
assert "http://127.0.0.1:8080/static/example.com.html" in output_csv
def test_list_csv_headers(process, disable_extractors_dict):
subprocess.run(["archivebox", "add", "http://127.0.0.1:8080/static/example.com.html", "--depth=0"],
capture_output=True, env=disable_extractors_dict)
list_process = subprocess.run(["archivebox", "list", "--csv", "url", "--with-headers"], capture_output=True)
output_csv = list_process.stdout.decode("utf-8")
assert "http://127.0.0.1:8080/static/example.com.html" in output_csv
assert "url" in output_csv
def test_list_index_with_wrong_flags(process):
list_process = subprocess.run(["archivebox", "list", "--with-headers"], capture_output=True)
assert "--with-headers can only be used with --json, --html or --csv options" in list_process.stderr.decode("utf-8")
def test_link_sort_by_url(process, disable_extractors_dict):
subprocess.run(["archivebox", "add", "http://127.0.0.1:8080/static/iana.org.html", "--depth=0"],
capture_output=True, env=disable_extractors_dict)
subprocess.run(["archivebox", "add", "http://127.0.0.1:8080/static/example.com.html", "--depth=0"],
capture_output=True, env=disable_extractors_dict)
list_process = subprocess.run(["archivebox", "list"], capture_output=True)
link_list = list_process.stdout.decode("utf-8").split("\n")
assert "http://127.0.0.1:8080/static/iana.org.html" in link_list[0]
list_process = subprocess.run(["archivebox", "list", "--sort=url"], capture_output=True)
link_list = list_process.stdout.decode("utf-8").split("\n")
assert "http://127.0.0.1:8080/static/example.com.html" in link_list[0]
| mit | -2,798,180,752,393,534,000 | 61.597015 | 120 | 0.645923 | false |
mikemcfarlane/Code_sprints | Concurrency_1/concurrency_1_general_problem.py | 1 | 2273 | """ Looking at concurrency. Moving two head motors (pitch and yaw),
and logging data simultaneously.
"""
# MEMORY_VALUE_NAMES is the list of ALMemory values names you want to save.
ALMEMORY_KEY_NAMES = [
"Device/SubDeviceList/HeadYaw/Position/Sensor/Value",
"Device/SubDeviceList/HeadYaw/Position/Actuator/Value",
"Device/SubDeviceList/HeadYaw/ElectricCurrent/Sensor/Value",
"Device/SubDeviceList/HeadYaw/Temperature/Sensor/Value",
"Device/SubDeviceList/HeadYaw/Hardness/Actuator/Value",
"Device/SubDeviceList/HeadYaw/Temperature/Sensor/Status",
"Device/SubDeviceList/HeadPitch/Position/Actuator/Value",
"Device/SubDeviceList/HeadPitch/Position/Sensor/Value",
"Device/SubDeviceList/HeadPitch/ElectricCurrent/Sensor/Value",
"Device/SubDeviceList/HeadPitch/Temperature/Sensor/Value",
"Device/SubDeviceList/HeadPitch/Hardness/Actuator/Value",
"Device/SubDeviceList/HeadPitch/Temperature/Sensor/Status"
]
NAO_IP = "mistcalf.local"
STEPS = 5
from naoqi import ALProxy
def main():
""" Some simple robot processes.
"""
motion = ALProxy("ALMotion", NAO_IP, 9559)
posture = ALProxy("ALRobotPosture", NAO_IP, 9559)
memory = ALProxy("ALMemory", NAO_IP, 9559)
data = list()
# Set stiffness on for Head motors and go to start pose.
print "Starting move...."
motion.setStiffnesses("Head", 1.0)
posture.goToPosture("Crouch", 2.0)
# Core processes. Do some moves and record data.
for i in range(STEPS):
positiveAngleStep = 1.0 / STEPS
negativeAngleStep = -1 * positiveAngleStep
timeStep = 20 / STEPS
motion.angleInterpolation(
["HeadYaw"],
[positiveAngleStep],
[timeStep],
False
)
motion.angleInterpolation(
["HeadPitch"],
[negativeAngleStep],
[timeStep],
False
)
line = list()
for key in ALMEMORY_KEY_NAMES:
value = memory.getData(key)
line.append(value)
data.append(line)
# Gently set stiff off for Head motors and relax.
print "...Going to stop now!"
motion.setStiffnesses("Head", 0.0)
motion.rest()
print data
if __name__ == "__main__":
main() | gpl-2.0 | 5,231,303,321,345,811,000 | 29.32 | 75 | 0.652882 | false |
architest/pymeeus | pymeeus/Neptune.py | 1 | 113473 | # -*- coding: utf-8 -*-
# PyMeeus: Python module implementing astronomical algorithms.
# Copyright (C) 2018 Dagoberto Salazar
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
from math import sin, cos, tan, acos, atan2, sqrt, radians, log10
from pymeeus.Angle import Angle
from pymeeus.Epoch import Epoch, JDE2000
from pymeeus.Coordinates import (
geometric_vsop_pos, apparent_vsop_pos, orbital_elements,
nutation_longitude, true_obliquity, ecliptical2equatorial
)
from pymeeus.Earth import Earth
from pymeeus.Sun import Sun
"""
.. module:: Neptune
:synopsis: Class to model Neptune planet
:license: GNU Lesser General Public License v3 (LGPLv3)
.. moduleauthor:: Dagoberto Salazar
"""
VSOP87_L = [
# L0
[
[531188633.047, 0.00000000000, 0.00000000000],
[1798475.509, 2.90101273050, 38.13303563780],
[1019727.662, 0.48580923660, 1.48447270830],
[124531.845, 4.83008090682, 36.64856292950],
[42064.450, 5.41054991607, 2.96894541660],
[37714.589, 6.09221834946, 35.16409022120],
[33784.734, 1.24488865578, 76.26607127560],
[16482.741, 0.00007729261, 491.55792945680],
[9198.582, 4.93747059924, 39.61750834610],
[8994.249, 0.27462142569, 175.16605980020],
[4216.235, 1.98711914364, 73.29712585900],
[3364.818, 1.03590121818, 33.67961751290],
[2284.800, 4.20606932559, 4.45341812490],
[1433.512, 2.78340432711, 74.78159856730],
[900.240, 2.07606702418, 109.94568878850],
[744.996, 3.19032530145, 71.81265315070],
[506.206, 5.74785370252, 114.39910691340],
[399.552, 0.34972342569, 1021.24889455140],
[345.195, 3.46186210169, 41.10198105440],
[306.338, 0.49684039897, 0.52126486180],
[287.322, 4.50523446022, 0.04818410980],
[323.004, 2.24815188609, 32.19514480460],
[340.323, 3.30369900416, 77.75054398390],
[266.605, 4.88932609483, 0.96320784650],
[227.079, 1.79713054538, 453.42489381900],
[244.722, 1.24693337933, 9.56122755560],
[232.887, 2.50459795017, 137.03302416240],
[282.170, 2.24565579693, 146.59425171800],
[251.941, 5.78166597292, 388.46515523820],
[150.180, 2.99706110414, 5.93789083320],
[170.404, 3.32390630650, 108.46121608020],
[151.401, 2.19153094280, 33.94024994380],
[148.295, 0.85948986145, 111.43016149680],
[118.672, 3.67706204305, 2.44768055480],
[101.821, 5.70539236951, 0.11187458460],
[97.873, 2.80518260528, 8.07675484730],
[103.054, 4.40441222000, 70.32818044240],
[103.305, 0.04078966679, 0.26063243090],
[109.300, 2.41599378049, 183.24281464750],
[73.938, 1.32805041516, 529.69096509460],
[77.725, 4.16446516424, 4.19278569400],
[86.379, 4.22834506045, 490.07345674850],
[81.536, 5.19908046216, 493.04240216510],
[71.503, 5.29530386579, 350.33211960040],
[64.418, 3.54541016050, 168.05251279940],
[62.570, 0.15028731465, 182.27960680100],
[58.488, 3.50106873945, 145.10977900970],
[48.276, 1.11259925628, 112.91463420510],
[47.229, 4.57373229818, 46.20979048510],
[39.124, 1.66569356050, 213.29909543800],
[47.728, 0.12906212461, 484.44438245600],
[46.858, 3.01699530327, 498.67147645760],
[38.659, 2.38685706479, 2.92076130680],
[47.046, 4.49844660400, 173.68158709190],
[47.565, 2.58404814824, 219.89137757700],
[44.714, 5.47302733614, 176.65053250850],
[32.279, 3.45759151220, 30.71067209630],
[28.249, 4.13282446716, 6.59228213900],
[24.433, 4.55736848232, 106.97674337190],
[24.661, 3.67822620786, 181.75834193920],
[24.505, 1.55095867965, 7.11354700080],
[21.848, 1.04366818343, 39.09624348430],
[16.936, 6.10896452834, 44.72531777680],
[22.169, 2.74932970271, 256.53994050650],
[16.614, 4.98188930613, 37.61177077600],
[17.728, 3.55049134167, 1.37259812370],
[17.347, 2.14069234880, 42.58645376270],
[14.953, 3.36405649131, 98.89998852460],
[14.566, 0.69857991985, 1550.93985964600],
[15.676, 6.22010212025, 454.90936652730],
[13.243, 5.61712542227, 68.84370773410],
[14.837, 3.52557245517, 25.60286266560],
[12.757, 0.04509743861, 11.04570026390],
[11.988, 4.81687553351, 24.11838995730],
[11.060, 1.78958277553, 7.42236354150],
[12.108, 1.87022663714, 79.23501669220],
[11.698, 0.49005698002, 1.59634729290],
[10.459, 2.38743199893, 381.35160823740],
[11.681, 3.85151357766, 218.40690486870],
[8.744, 0.14168568610, 148.07872442630],
[9.196, 1.00274090619, 72.07328558160],
[11.343, 0.81432278263, 525.49817940060],
[10.097, 5.03383557061, 601.76425067620],
[8.035, 1.77685723010, 0.21244832110],
[8.382, 3.07534786987, 1.27202438720],
[10.803, 2.92081211459, 293.18850343600],
[7.666, 1.52223325105, 115.88357962170],
[7.531, 5.37537256533, 5.10780943070],
[8.691, 4.74352784364, 143.62530630140],
[10.183, 1.15395455831, 6244.94281435360],
[8.283, 0.35956716764, 138.51749687070],
[9.544, 4.02452832984, 152.53214255120],
[7.274, 4.10937535938, 251.43213107580],
[7.465, 1.72131945843, 31.01948863700],
[6.902, 4.62452068308, 2.70831298570],
[7.094, 5.11528393609, 312.19908396260],
[7.929, 2.10765101655, 27.08733537390],
[6.156, 3.50746507109, 28.57180808220],
[7.134, 2.05292376023, 278.25883401880],
[8.193, 2.58588219154, 141.22580985640],
[5.499, 2.09250039025, 1.69692102940],
[5.279, 4.09390686798, 983.11585891360],
[6.947, 3.48041784595, 415.29185818120],
[5.916, 0.68957324226, 62.25142559510],
[5.925, 4.02504592620, 255.05546779820],
[4.606, 1.17779101436, 43.24084506850],
[5.357, 3.63061058987, 5.41662597140],
[5.918, 2.57693824084, 10175.15251057320],
[5.482, 3.07979737280, 329.83706636550],
[3.956, 5.00418696742, 184.72728735580],
[5.408, 3.31313295602, 528.20649238630],
[4.767, 4.91981150665, 456.39383923560],
[3.770, 1.57277409442, 32.71640966640],
[3.924, 4.92763242635, 180.27386923090],
[3.707, 4.82965453201, 221.37585028530],
[3.802, 4.96279204998, 594.65070367540],
[4.014, 1.63905164030, 40.58071619260],
[3.061, 0.39713858313, 1.43628859850],
[3.261, 4.65478978469, 29.22619938800],
[3.474, 5.65891305944, 395.57870223900],
[2.918, 5.91079083895, 1.22384027740],
[3.649, 3.88114678609, 494.52687487340],
[3.225, 5.57423738665, 1014.13534755060],
[2.845, 0.56009386585, 144.14657116320],
[2.848, 0.55423029727, 567.82400073240],
[3.440, 1.70887250883, 12.53017297220],
[3.267, 5.63287799820, 488.58898404020],
[3.107, 5.79335949207, 105.49227066360],
[2.712, 2.43726364359, 60.76695288680],
[3.202, 2.21483496593, 41.05379694460],
[3.134, 4.69665220513, 82.85835341460],
[3.590, 5.69939670162, 1124.34166877000],
[2.967, 0.54448940101, 135.54855145410],
[3.211, 4.19927605853, 291.70403072770],
[2.899, 5.99669788291, 22.63391724900],
[3.143, 2.93495725805, 31.23193695810],
[2.729, 4.62707721219, 5.62907429250],
[2.513, 5.60391563025, 19.12245511120],
[2.690, 5.32070128202, 2.00573757010],
[2.630, 6.00855841124, 37.16982779130],
[2.296, 6.06934502789, 451.94042111070],
[2.858, 4.88677262419, 258.02441321480],
[2.879, 5.12239168488, 38.65430049960],
[2.270, 2.08634524182, 30.05628079050],
[2.301, 3.35951602914, 1028.36244155220],
[3.001, 3.59143817947, 211.81462272970],
[2.237, 0.38455553470, 3.62333672240],
[2.901, 3.24755614136, 366.48562929500],
[2.592, 1.36262641469, 35.42472265210],
[2.418, 4.93467056526, 47.69426319340],
[2.089, 5.79838063413, 4.66586644600],
[2.586, 2.69392971321, 38.18121974760],
[1.913, 5.53560681085, 149.56319713460],
[1.971, 6.00790964671, 34.20088237470],
[2.586, 6.24984047544, 38.08485152800],
[2.098, 4.57819744766, 1019.76442184310],
[1.869, 3.85907708723, 911.04257333200],
[2.486, 5.21235809332, 140.00196957900],
[1.795, 1.68012868451, 1059.38193018920],
[2.326, 2.82664069146, 807.94979911340],
[1.984, 5.54763522932, 1022.73336725970],
[1.919, 5.10717766499, 216.92243216040],
[2.004, 5.47811228948, 63.73589830340],
[2.021, 4.15631916516, 178.13500521680],
[1.760, 6.00927149342, 172.19711438360],
[2.140, 2.65037925793, 700.66423920080],
[1.988, 3.35850272780, 186.21176006410],
[1.956, 5.01527508588, 294.67297614430],
[1.966, 4.07957525462, 20.60692781950],
[1.637, 0.53823942149, 67.35923502580],
[1.540, 2.62327849119, 41.75637236020],
[1.810, 5.81430038477, 129.91947716160],
[1.776, 4.37047808449, 328.35259365720],
[1.460, 2.63664516309, 2.85707083200],
[1.388, 2.10598045632, 3.93215326310],
[1.352, 0.55618245459, 0.65439130580],
[1.668, 2.77543377384, 16.15350969460],
[1.338, 0.37643611305, 14.01464568050],
[1.218, 0.73456434750, 426.59819087600],
[1.531, 4.54891769768, 526.72201967800],
[1.610, 3.40993944436, 403.13419222450],
[1.361, 4.48227243414, 17.63798240290],
[1.589, 5.59323020112, 3302.47939106200],
[1.132, 5.64520725360, 151.04766984290],
[1.357, 4.06399031430, 26.82670294300],
[1.494, 4.98692049495, 666.72398925700],
[1.077, 4.30911470250, 0.63313944640],
[1.042, 6.02756893581, 106.01353552540],
[1.060, 0.74679491358, 487.36514376280],
[1.310, 3.78526380930, 386.98068252990],
[1.342, 4.52685061062, 563.63121503840],
[0.986, 0.00600924269, 81.37388070630],
[1.232, 5.17443930901, 331.32153907380],
[0.929, 4.51267465978, 38.39366806870],
[0.956, 3.50447791020, 64.95973858080],
[0.929, 4.43109514438, 37.87240320690],
[0.926, 6.09803297747, 4.14460158420],
[0.972, 0.59038366513, 8.90683624980],
[1.246, 4.69840351226, 389.94962794650],
[1.009, 5.98451242784, 142.14083359310],
[1.020, 0.83233892300, 39.35687591520],
[1.013, 0.37845630298, 36.90919536040],
[0.940, 2.42688145966, 343.21857259960],
[0.974, 5.23958752786, 253.57099508990],
[0.964, 5.09748190218, 357.44566660120],
[0.835, 1.45568626670, 35.21227433100],
[1.077, 0.71409061316, 44.07092647100],
[1.083, 2.27578897621, 6.90109867970],
[0.938, 5.03471583911, 69.36497259590],
[1.078, 1.20253141912, 35.68535508300],
[1.027, 0.18243183397, 84.34282612290],
[0.764, 4.62720907712, 0.83008140250],
[1.013, 0.42234855022, 32.45577723550],
[0.939, 4.50445799766, 365.00115658670],
[0.756, 0.82872484717, 17.52610781830],
[0.916, 3.89409205418, 38.24491022240],
[0.736, 4.78125743795, 5.36844186160],
[0.762, 0.01897337130, 189.39315380180],
[0.738, 2.31770478416, 42.32582133180],
[0.860, 4.82440483506, 210.33015002140],
[0.888, 3.20360339895, 348.84764689210],
[0.916, 5.04967792934, 38.02116105320],
[0.638, 0.63267396269, 244.31858407500],
[0.636, 1.02615137352, 2080.63082474060],
[0.774, 5.44432678139, 367.97010200330],
[0.644, 1.94044989547, 446.31134681820],
[0.631, 4.82928491724, 460.53844081980],
[0.855, 3.57592750113, 439.78275515400],
[0.678, 4.48687912809, 351.81659230870],
[0.724, 4.89141609280, 119.50691634410],
[0.594, 0.59315717529, 491.03666459500],
[0.655, 1.99014093000, 19.01058052660],
[0.580, 2.57189536188, 492.07919431860],
[0.694, 0.08328521209, 5.67725840230],
[0.733, 5.81485239057, 29.74746424980],
[0.666, 3.42196897591, 179.09821306330],
[0.678, 0.29428615814, 171.23390653710],
[0.635, 2.13805182663, 164.12035953630],
[0.623, 5.61454940380, 285.37238101960],
[0.529, 1.88063108785, 416.77633088950],
[0.529, 5.13250788030, 697.74347789400],
[0.500, 1.49548514415, 704.85702489480],
[0.487, 4.97772067947, 274.06604832480],
[0.666, 6.26456825266, 1474.67378837040],
[0.532, 0.25784352716, 477.33083545520],
[0.557, 0.71378452161, 80.71948940050],
[0.556, 2.60791360513, 418.26080359780],
[0.584, 4.29064541383, 16.67477455640],
[0.524, 5.42759392280, 290.21955801940],
[0.524, 0.29054995359, 247.23934538180],
[0.541, 4.36400580938, 815.06334611420],
[0.526, 1.66512720297, 97.41551581630],
[0.497, 4.72640318293, 401.64971951620],
[0.432, 2.98481475894, 100.38446123290],
[0.382, 0.28067758468, 8.38557138800],
[0.424, 6.16774845481, 178.78939652260],
[0.484, 0.01535318279, 738.79727483860],
[0.518, 4.48916591410, 875.83029900100],
[0.506, 5.38611121207, 404.61866493280],
[0.396, 4.62747640832, 6.15033915430],
[0.466, 0.23340415764, 120.99138905240],
[0.409, 3.08849480895, 59.28248017850],
[0.470, 5.01853200224, 313.68355667090],
[0.442, 3.68919475089, 457.87831194390],
[0.384, 3.69499925394, 160.93896579860],
[0.364, 0.76192181046, 104.00779795530],
[0.416, 0.26652109651, 103.09277421860],
[0.401, 4.06530055968, 14.66903698630],
[0.454, 3.72767803715, 476.43131808350],
[0.434, 0.33533802200, 984.60033162190],
[0.340, 0.99915726716, 31.54075349880],
[0.420, 3.65147769268, 20.49505323490],
[0.334, 0.35121412008, 1227.43444298860],
[0.323, 5.45836731979, 918.15612033280],
[0.407, 4.19457842203, 309.79958751760],
[0.381, 0.01364856960, 495.49008271990],
[0.334, 4.05924071124, 8.33738727820],
[0.380, 3.17063415023, 487.62577619370],
[0.309, 0.48352303405, 118.02244363580],
[0.380, 2.70238752925, 134.11226285560],
[0.362, 4.88985810610, 438.29828244570],
[0.327, 2.91090790412, 505.78502345840],
[0.308, 0.96082817124, 21.14944454070],
[0.288, 1.48123872077, 220.41264243880],
[0.293, 2.56582281789, 662.53120356300],
[0.331, 4.37715965811, 180.79513409270],
[0.326, 2.46104924164, 169.53698550770],
[0.289, 2.63591886391, 55.77101804070],
[0.288, 5.02487283285, 1440.73353842660],
[0.344, 1.48930997270, 166.56804009110],
[0.266, 0.63672427386, 79.18683258240],
[0.268, 5.02354540478, 377.41945497430],
[0.308, 1.50185265748, 77.22927912210],
[0.324, 5.30240189273, 457.61767951300],
[0.265, 1.08736632800, 450.45594840240],
[0.264, 0.83337660655, 488.37653571910],
[0.290, 1.80003152563, 101.86893394120],
[0.262, 2.30390003360, 494.73932319450],
[0.325, 5.52669889053, 441.26722786230],
[0.254, 0.02963623277, 117.36805233000],
[0.300, 0.17435705540, 252.91660378410],
[0.315, 5.34885013040, 183.76407950930],
[0.313, 5.45945846595, 13.49338081870],
[0.306, 5.23085809622, 45.24658263860],
[0.237, 0.32676889138, 208.84567731310],
[0.263, 2.66670785888, 464.73122651380],
[0.234, 1.82700149824, 52175.80628314840],
[0.275, 5.04385701142, 156.15547927360],
[0.265, 5.64967127743, 326.86812094890],
[0.247, 1.74540930625, 65.87476231750],
[0.269, 6.09827783249, 1654.03263386460],
[0.229, 2.25832077914, 190.66517818900],
[0.294, 5.45249564193, 206.18554843720],
[0.238, 1.55647021369, 79.88940799800],
[0.230, 6.13158632762, 178.34745353790],
[0.274, 4.10829870815, 518.38463239980],
[0.225, 3.86300359251, 171.98466606250],
[0.228, 2.48511565618, 12566.15169998280],
[0.272, 5.61149862463, 148.33935685720],
[0.214, 1.45987216039, 522.57741809380],
[0.211, 4.04791980901, 6205.32530600750],
[0.266, 0.99036038827, 209.10630974400],
[0.230, 0.54049951530, 532.61172640140],
[0.226, 3.84152961620, 283.62727588040],
[0.243, 5.32730346969, 485.92885516430],
[0.209, 4.35051470487, 536.80451209540],
[0.232, 3.01948719112, 10.93382567930],
[0.264, 5.70536379124, 490.33408917940],
[0.280, 3.99993658196, 674.80074410430],
[0.246, 0.37698964335, 157.63995198190],
[0.219, 5.67679857772, 52099.54021187280],
[0.251, 1.52353965506, 6.85291456990],
[0.203, 5.44328656642, 145.63104387150],
[0.238, 0.96169723853, 497.18700374930],
[0.219, 4.52300776062, 1615.89959822680],
[0.275, 2.37619210741, 2118.76386037840],
[0.258, 5.12448148780, 608.87779767700],
[0.260, 3.88543008475, 513.07988101300],
[0.191, 3.72574595369, 65.22037101170],
[0.211, 0.06484535455, 215.43795945210],
[0.236, 3.95835282821, 141.48644228730],
[0.189, 5.28135043909, 377.15882254340],
[0.243, 4.35559878377, 482.95990974770],
[0.243, 6.06808644973, 154.01661525950],
[0.249, 1.57215637373, 14.22709400160],
[0.238, 1.93340192445, 500.15594916590],
[0.209, 5.02893682321, 364.55921360200],
[0.227, 5.72984298540, 1543.82631264520],
[0.217, 2.45036922991, 187.17496791060],
[0.181, 1.65699502247, 1627.20593092160],
[0.214, 1.60213179145, 11.30633269480],
[0.203, 0.74638490279, 14.55716240170],
[0.192, 3.17719161639, 343.47920503050],
[0.177, 1.50027795761, 9.44935297100],
[0.177, 0.03038098292, 165.60483224460],
[0.176, 4.64462444674, 315.16802937920],
[0.208, 2.65835778368, 496.01134758170],
[0.174, 2.76155855705, 49.17873590170],
[0.196, 1.95549714182, 335.77495719870],
[0.200, 4.16839394758, 285.11174858870],
[0.199, 0.06168021293, 73.55775828990],
[0.188, 6.17288913873, 535.32003938710],
[0.215, 1.92414563346, 552.69738935910],
[0.166, 5.49038139690, 10135.53500222710],
[0.192, 0.96973434120, 304.23420369990],
[0.209, 5.34065233845, 13.64213866500],
[0.203, 5.11234865419, 324.72925693480],
[0.177, 3.50680841790, 207.36120460480],
[0.174, 1.95010708561, 319.31263096340],
[0.187, 5.57685931698, 266.10116806210],
[0.181, 1.43525075751, 279.74330672710],
[0.165, 4.00537112057, 493.56366702690],
[0.191, 1.68313683465, 563.37058260750],
[0.173, 3.93200456456, 238.90195810360],
[0.161, 5.96143146317, 36.12729806770],
[0.194, 2.37664231450, 944.98282327580],
[0.165, 0.97421918976, 556.51766803760],
[0.189, 1.11279570541, 1127.26243007680],
[0.172, 0.75085513952, 267.58564077040],
[0.193, 2.12636756833, 20350.30502114640],
[0.181, 2.10814562080, 113.87784205160],
[0.194, 1.13504964219, 57.25549074900],
[0.181, 6.23699820519, 355.96119389290],
[0.198, 5.68125942959, 6280.10690457480],
[0.173, 5.15083799917, 474.94684537520],
[0.151, 1.66981962338, 116.53797092750],
[0.150, 5.42593657173, 526.98265210890],
[0.205, 4.16096717573, 711.44930703380],
[0.177, 3.49360697678, 421.22974901440],
[0.168, 0.52839230204, 487.10451133190],
[0.160, 4.77712663799, 524.01370669230],
[0.145, 2.81448128781, 1512.80682400820],
[0.146, 4.99570112660, 142.66209845490],
[0.188, 0.82104161550, 10210.31660079440],
[0.145, 4.96888131586, 1189.30140735080],
[0.181, 2.99704790590, 75.74480641380],
[0.176, 0.41626373842, 222.86032299360],
[0.137, 2.96534226337, 6206.80977871580],
[0.138, 1.22260849471, 187.69623277240],
[0.128, 2.53394068407, 276.77436131050],
[0.130, 3.04810765699, 310.71461125430],
[0.122, 3.01323006886, 70.84944530420],
[0.111, 0.77449448649, 179.35884549420],
[0.141, 0.18423889807, 131.40394986990],
[0.126, 5.77648809669, 525.23754696970],
[0.124, 2.93225731024, 179.61947792510],
[0.111, 6.18471578216, 981.63138620530],
[0.141, 2.63342951123, 381.61224066830],
[0.110, 5.25053027081, 986.08480433020],
[0.096, 3.86591534559, 240.12579838100],
[0.120, 3.78755085035, 1057.89745748090],
[0.093, 4.54014016637, 36.69674703930],
[0.109, 1.53327585900, 419.74527630610],
[0.094, 4.21870300178, 1024.21783996800],
[0.109, 2.15905156247, 289.56516671360],
[0.104, 0.20665642552, 564.85505531580],
[0.081, 1.89134135215, 36.60037881970],
[0.080, 4.38832594589, 10137.01947493540],
[0.080, 1.73940577376, 39.50563376150],
[0.084, 0.81316746605, 170.71264167530],
[0.090, 0.60145818457, 36.76043751410],
[0.074, 4.92511651321, 1549.45538693770],
[0.072, 5.06852406179, 249.94765836750],
],
# L1
[
[3837687716.731, 0.00000000000, 0.00000000000],
[16604.187, 4.86319129565, 1.48447270830],
[15807.148, 2.27923488532, 38.13303563780],
[3334.701, 3.68199676020, 76.26607127560],
[1305.840, 3.67320813491, 2.96894541660],
[604.832, 1.50477747549, 35.16409022120],
[178.623, 3.45318524147, 39.61750834610],
[106.537, 2.45126138334, 4.45341812490],
[105.747, 2.75479326550, 33.67961751290],
[72.684, 5.48724732699, 36.64856292950],
[57.069, 5.21649804970, 0.52126486180],
[57.355, 1.85767603384, 114.39910691340],
[35.368, 4.51676827545, 74.78159856730],
[32.216, 5.90411489680, 77.75054398390],
[29.871, 3.67043294114, 388.46515523820],
[28.866, 5.16877529164, 9.56122755560],
[28.742, 5.16732589024, 2.44768055480],
[25.507, 5.24526281928, 168.05251279940],
[24.869, 4.73193067810, 182.27960680100],
[20.205, 5.78945415677, 1021.24889455140],
[19.022, 1.82981144269, 484.44438245600],
[18.661, 1.31606255521, 498.67147645760],
[15.063, 4.95003893760, 137.03302416240],
[15.094, 3.98705254940, 32.19514480460],
[10.720, 2.44148149225, 4.19278569400],
[11.725, 4.89255650674, 71.81265315070],
[9.581, 1.23188039594, 5.93789083320],
[9.606, 1.88534821556, 41.10198105440],
[8.968, 0.01758559103, 8.07675484730],
[9.882, 6.08165628679, 7.11354700080],
[7.632, 5.51307048241, 73.29712585900],
[6.992, 0.61688864282, 2.92076130680],
[5.543, 2.24141557794, 46.20979048510],
[4.845, 3.71055823750, 112.91463420510],
[3.700, 5.25713252333, 111.43016149680],
[3.233, 6.10303038418, 70.32818044240],
[2.939, 4.86520586648, 98.89998852460],
[2.403, 2.90637675099, 601.76425067620],
[2.398, 1.04343654629, 6.59228213900],
[2.784, 4.95821114677, 108.46121608020],
[2.894, 4.20148844767, 381.35160823740],
[2.111, 5.93089610785, 25.60286266560],
[2.075, 5.20632201951, 30.71067209630],
[2.126, 0.54976393136, 41.05379694460],
[2.235, 2.38045158073, 453.42489381900],
[1.859, 0.89409373259, 24.11838995730],
[2.018, 3.42245274178, 31.01948863700],
[1.700, 3.91715254287, 11.04570026390],
[1.776, 3.86571077241, 395.57870223900],
[1.644, 0.15855999051, 152.53214255120],
[1.646, 3.34591387314, 44.72531777680],
[1.876, 2.59784179105, 33.94024994380],
[1.614, 0.42137145545, 175.16605980020],
[1.468, 6.12983933526, 1550.93985964600],
[1.408, 6.13722948564, 490.07345674850],
[1.207, 0.59525736062, 312.19908396260],
[1.336, 3.28611928206, 493.04240216510],
[1.176, 5.87266726996, 5.41662597140],
[1.517, 3.12967210501, 491.55792945680],
[1.053, 4.60375516830, 79.23501669220],
[1.037, 4.89007314395, 1.27202438720],
[1.034, 5.93741289103, 32.71640966640],
[1.038, 1.13470380744, 1014.13534755060],
[1.002, 1.85850922283, 5.10780943070],
[0.983, 0.05345050384, 7.42236354150],
[0.998, 1.73689827444, 1028.36244155220],
[1.193, 4.63176675581, 60.76695288680],
[0.940, 3.09103721222, 62.25142559510],
[0.994, 4.11489180313, 4.66586644600],
[0.890, 0.87049255398, 31.23193695810],
[0.852, 5.35508394316, 144.14657116320],
[0.922, 5.12373360511, 145.10977900970],
[0.789, 0.37496785039, 26.82670294300],
[0.828, 4.06035194600, 115.88357962170],
[0.711, 3.14189997439, 278.25883401880],
[0.727, 1.39718382835, 213.29909543800],
[0.781, 0.10946327923, 173.68158709190],
[0.793, 6.13086312116, 567.82400073240],
[0.669, 4.50554989443, 27.08733537390],
[0.825, 1.35568908148, 129.91947716160],
[0.738, 3.56766018960, 176.65053250850],
[0.714, 6.24797992301, 106.97674337190],
[0.654, 1.13177751192, 68.84370773410],
[0.624, 0.01567750666, 28.57180808220],
[0.608, 4.60180625368, 189.39315380180],
[0.595, 0.00857468445, 42.58645376270],
[0.530, 5.61201247153, 12.53017297220],
[0.521, 1.02371768017, 415.29185818120],
[0.639, 0.68930265745, 529.69096509460],
[0.526, 3.02138731705, 5.62907429250],
[0.456, 4.44331571392, 43.24084506850],
[0.524, 3.43316448349, 38.65430049960],
[0.436, 2.41630174435, 82.85835341460],
[0.424, 1.95736011325, 477.33083545520],
[0.443, 3.39350946329, 357.44566660120],
[0.383, 1.90232196422, 22.63391724900],
[0.479, 5.55141744216, 37.61177077600],
[0.462, 3.80436154644, 343.21857259960],
[0.384, 5.60377408953, 594.65070367540],
[0.369, 4.45577410338, 6.90109867970],
[0.358, 3.69126616347, 3.93215326310],
[0.352, 3.10952926034, 135.54855145410],
[0.368, 3.53577440355, 40.58071619260],
[0.424, 5.27159202779, 181.75834193920],
[0.361, 0.29018303419, 72.07328558160],
[0.390, 5.49512204296, 350.33211960040],
[0.378, 2.74122401337, 488.37653571910],
[0.372, 0.39980033572, 494.73932319450],
[0.353, 1.10614174053, 20.60692781950],
[0.296, 0.86351261285, 149.56319713460],
[0.307, 5.39420288683, 160.93896579860],
[0.395, 1.93577214824, 10137.01947493540],
[0.288, 2.28755739359, 47.69426319340],
[0.295, 2.48737537240, 19.12245511120],
[0.290, 0.18636083306, 143.62530630140],
[0.266, 3.09977370364, 69.36497259590],
[0.266, 1.21002824826, 505.78502345840],
[0.252, 3.12745026026, 460.53844081980],
[0.328, 0.50849285663, 6206.80977871580],
[0.257, 3.64119914774, 446.31134681820],
[0.239, 5.54080102299, 911.04257333200],
[0.265, 0.62702473701, 253.57099508990],
[0.287, 2.44403568436, 16.67477455640],
[0.231, 2.47026250085, 454.90936652730],
[0.230, 3.24571542922, 1066.49547719000],
[0.282, 1.48595620175, 983.11585891360],
[0.212, 5.41931177641, 64.95973858080],
[0.213, 1.64175339637, 1089.12939443900],
[0.238, 2.69801319489, 882.94384600180],
[0.210, 4.53976756699, 1093.32218013300],
[0.220, 2.30038816175, 1052.26838318840],
[0.256, 0.42073598460, 23.90594163620],
[0.216, 5.44225918870, 39.09624348430],
[0.201, 2.58746514605, 119.50691634410],
[0.224, 4.43751392203, 639.89728631400],
[0.186, 2.50651218075, 487.36514376280],
[0.189, 4.05785534221, 120.99138905240],
[0.184, 2.24245977278, 815.06334611420],
[0.202, 3.43517732411, 45.24658263860],
[0.175, 4.49165234532, 171.23390653710],
[0.171, 5.50633466316, 179.09821306330],
[0.200, 6.12663205401, 14.22709400160],
[0.173, 2.61090344107, 389.94962794650],
[0.167, 3.94754384833, 77.22927912210],
[0.166, 3.41009128748, 81.37388070630],
[0.163, 3.88198848446, 556.51766803760],
[0.164, 1.49614763046, 63.73589830340],
[0.176, 3.86129425367, 148.33935685720],
[0.161, 2.22215642318, 574.93754773320],
[0.171, 0.66899426684, 179.31066138440],
[0.161, 1.21480182441, 1024.43028828910],
[0.155, 3.25842414799, 10251.41858184880],
[0.183, 5.45168150656, 218.40690486870],
[0.152, 3.35145509017, 285.37238101960],
[0.152, 0.42398786475, 274.06604832480],
[0.146, 5.70714579127, 419.48464387520],
[0.156, 0.64321524870, 1029.84691426050],
[0.147, 4.30958930740, 157.63995198190],
[0.147, 1.80689177510, 377.41945497430],
[0.140, 1.49826604627, 386.98068252990],
[0.137, 2.14480243915, 563.63121503840],
[0.127, 3.98726599710, 84.34282612290],
[0.134, 4.16039455079, 169.53698550770],
[0.121, 0.29300927469, 206.18554843720],
[0.129, 2.67625057010, 180.79513409270],
[0.134, 3.18868986487, 166.56804009110],
[0.135, 5.07517561780, 426.59819087600],
[0.136, 1.81672451740, 151.04766984290],
[0.129, 3.64795525602, 183.76407950930],
[0.116, 6.06435563172, 220.41264243880],
[0.123, 4.46641157829, 1022.73336725970],
[0.112, 4.34485256988, 138.51749687070],
[0.116, 5.58946529961, 35.68535508300],
[0.108, 1.03796693383, 488.58898404020],
[0.108, 2.10378485880, 494.52687487340],
[0.106, 0.87068583107, 1059.38193018920],
[0.097, 0.74486741478, 485.92885516430],
[0.095, 5.54259914856, 497.18700374930],
[0.085, 3.16062141266, 522.57741809380],
[0.097, 6.05634803604, 482.95990974770],
[0.095, 0.23111852730, 500.15594916590],
[0.084, 2.64687252518, 536.80451209540],
[0.074, 3.90678924318, 1019.76442184310],
],
# L2
[
[53892.649, 0.00000000000, 0.00000000000],
[281.251, 1.19084538887, 38.13303563780],
[295.693, 1.85520292248, 1.48447270830],
[270.190, 5.72143228148, 76.26607127560],
[23.023, 1.21035596452, 2.96894541660],
[7.333, 0.54033306830, 2.44768055480],
[9.057, 4.42544992035, 35.16409022120],
[5.223, 0.67427930044, 168.05251279940],
[5.201, 3.02338671812, 182.27960680100],
[4.288, 3.84351844003, 114.39910691340],
[3.925, 3.53214557374, 484.44438245600],
[3.741, 5.90238217874, 498.67147645760],
[2.966, 0.31002477611, 4.45341812490],
[3.415, 0.55971639038, 74.78159856730],
[3.255, 1.84921884906, 175.16605980020],
[2.157, 1.89135758747, 388.46515523820],
[2.211, 4.37997092240, 7.11354700080],
[1.847, 3.48574435762, 9.56122755560],
[2.451, 4.68586840176, 491.55792945680],
[1.844, 5.12281562096, 33.67961751290],
[2.204, 1.69321574906, 77.75054398390],
[1.652, 2.55859494053, 36.64856292950],
[1.309, 4.52400192922, 1021.24889455140],
[1.124, 0.38710602242, 137.03302416240],
[0.664, 0.88101734307, 4.19278569400],
[0.497, 2.24615784762, 395.57870223900],
[0.512, 6.22609200672, 381.35160823740],
[0.582, 5.25716719826, 31.01948863700],
[0.446, 0.36647221351, 98.89998852460],
[0.383, 5.48585528762, 5.93789083320],
[0.375, 4.61250246774, 8.07675484730],
[0.354, 1.30783918287, 601.76425067620],
[0.259, 5.66033623678, 112.91463420510],
[0.247, 2.89695614593, 189.39315380180],
[0.245, 4.26572913391, 220.41264243880],
[0.200, 0.52604535784, 64.95973858080],
[0.191, 4.88786653062, 39.61750834610],
[0.233, 3.16423779113, 41.10198105440],
[0.248, 5.85877831382, 1059.38193018920],
[0.194, 2.37949641473, 73.29712585900],
[0.227, 0.20028518978, 60.76695288680],
[0.184, 3.01962045713, 1014.13534755060],
[0.190, 5.57500985081, 343.21857259960],
[0.172, 3.66036463613, 477.33083545520],
[0.172, 0.59550457102, 46.20979048510],
[0.182, 1.92429384025, 183.76407950930],
[0.171, 1.61368476689, 357.44566660120],
[0.173, 6.23717119485, 493.04240216510],
[0.217, 1.46218158211, 71.81265315070],
[0.178, 0.34928799031, 1028.36244155220],
[0.169, 4.91086673212, 166.56804009110],
[0.157, 5.89200571154, 169.53698550770],
[0.182, 2.33457064554, 152.53214255120],
[0.151, 3.81621340568, 146.59425171800],
[0.136, 2.75150881988, 144.14657116320],
[0.104, 6.03262825314, 529.69096509460],
[0.076, 0.20932812381, 453.42489381900],
],
# L3
[
[31.254, 0.00000000000, 0.00000000000],
[12.461, 6.04431418812, 1.48447270830],
[14.541, 1.35337075856, 76.26607127560],
[11.547, 6.11257808366, 38.13303563780],
[1.351, 4.93951495175, 2.96894541660],
[0.741, 2.35936954597, 168.05251279940],
[0.715, 1.27409542804, 182.27960680100],
[0.537, 5.23632185196, 484.44438245600],
[0.523, 4.16769839601, 498.67147645760],
[0.664, 0.55871435877, 31.01948863700],
[0.301, 2.69253200796, 7.11354700080],
[0.194, 2.05904114139, 137.03302416240],
[0.206, 2.51012178002, 74.78159856730],
[0.160, 5.63111039032, 114.39910691340],
[0.149, 3.09327713923, 35.16409022120],
],
# L4
[
[113.998, 3.14159265359, 0.00000000000],
[0.605, 3.18211885677, 76.26607127560]
],
# L5
[
[0.874, 3.14159265359, 0.00000000000]
],
]
"""This table contains Neptune's periodic terms (all of them) from the
planetary theory VSOP87 for the heliocentric longitude at the equinox of date
(taken from the 'D' solution). In Meeus' book a shortened version can be found
in pages 452-453."""
VSOP87_B = [
# B0
[
[3088622.933, 1.44104372626, 38.13303563780],
[27780.087, 5.91271882843, 76.26607127560],
[27623.609, 0.00000000000, 0.00000000000],
[15355.490, 2.52123799481, 36.64856292950],
[15448.133, 3.50877080888, 39.61750834610],
[1999.919, 1.50998669505, 74.78159856730],
[1967.540, 4.37778195768, 1.48447270830],
[1015.137, 3.21561035875, 35.16409022120],
[605.767, 2.80246601405, 73.29712585900],
[594.878, 2.12892708114, 41.10198105440],
[588.805, 3.18655882497, 2.96894541660],
[401.830, 4.16883287237, 114.39910691340],
[254.333, 3.27120499438, 453.42489381900],
[261.647, 3.76722704749, 213.29909543800],
[279.964, 1.68165309699, 77.75054398390],
[205.590, 4.25652348864, 529.69096509460],
[140.455, 3.52969556376, 137.03302416240],
[98.530, 4.16774829927, 33.67961751290],
[51.257, 1.95121181203, 4.45341812490],
[67.971, 4.66970781659, 71.81265315070],
[41.931, 5.41783694467, 111.43016149680],
[41.822, 5.94832001477, 112.91463420510],
[30.637, 0.93620571932, 42.58645376270],
[11.084, 5.88898793049, 108.46121608020],
[9.620, 0.03944255108, 70.32818044240],
[9.664, 0.22455797403, 79.23501669220],
[9.728, 5.30069593532, 32.19514480460],
[7.386, 3.00684933642, 426.59819087600],
[7.087, 0.12535040656, 109.94568878850],
[6.021, 6.20514068152, 115.88357962170],
[6.169, 3.62098109648, 983.11585891360],
[4.777, 0.75210194972, 5.93789083320],
[6.391, 5.84646101060, 148.07872442630],
[6.251, 2.41678769385, 152.53214255120],
[4.539, 5.58182098700, 175.16605980020],
[5.006, 4.60815664851, 1059.38193018920],
[4.289, 4.19647392821, 47.69426319340],
[5.795, 5.07516716087, 415.29185818120],
[4.749, 2.51605725604, 37.61177077600],
[4.119, 1.72779509865, 28.57180808220],
[4.076, 6.00252170354, 145.10977900970],
[4.429, 5.65995321659, 98.89998852460],
[3.950, 2.74104636753, 350.33211960040],
[4.091, 1.61787956945, 39.09624348430],
[4.131, 4.40682554313, 37.16982779130],
[4.710, 3.50929350767, 38.65430049960],
[4.440, 4.78977105547, 38.08485152800],
[4.433, 1.23386935925, 38.18121974760],
[3.762, 4.83940791709, 491.55792945680],
[2.606, 1.20956732792, 451.94042111070],
[2.537, 2.18628045751, 454.90936652730],
[2.328, 5.19779918719, 72.07328558160],
[2.502, 0.85987904350, 106.97674337190],
[2.342, 0.81387240947, 4.19278569400],
[1.981, 0.46617960831, 184.72728735580],
[1.963, 6.01909114576, 44.07092647100],
[2.180, 0.70099749844, 206.18554843720],
[1.811, 0.40456996647, 40.58071619260],
[1.814, 3.64699555185, 220.41264243880],
[1.705, 6.13551142362, 181.75834193920],
[1.855, 5.61635630213, 35.68535508300],
[1.595, 2.97147156093, 37.87240320690],
[1.785, 2.42154818096, 388.46515523820],
[1.595, 3.05266110075, 38.39366806870],
[1.437, 1.48678704605, 135.54855145410],
[1.387, 2.46149266117, 138.51749687070],
[1.366, 1.52026779665, 68.84370773410],
[1.575, 3.58964541604, 38.02116105320],
[1.297, 5.06156596196, 33.94024994380],
[1.487, 0.20211121607, 30.05628079050],
[1.504, 5.80298577327, 46.20979048510],
[1.192, 0.87275514483, 42.32582133180],
[1.569, 2.43405967107, 38.24491022240],
[1.207, 1.84658687853, 251.43213107580],
[1.015, 0.53439848924, 129.91947716160],
[0.999, 2.47463873948, 312.19908396260],
[0.990, 3.41514319052, 144.14657116320],
[0.963, 4.31733242907, 151.04766984290],
[1.020, 0.98226686775, 143.62530630140],
[0.941, 1.02993053785, 221.37585028530],
[0.938, 2.43648356625, 567.82400073240],
[1.111, 0.65175024456, 146.59425171800],
[0.777, 0.00175975222, 218.40690486870],
[0.895, 0.25123869620, 30.71067209630],
[0.795, 5.80519741659, 149.56319713460],
[0.737, 3.40060492866, 446.31134681820],
[0.719, 1.43795191278, 8.07675484730],
[0.720, 0.00651007550, 460.53844081980],
[0.766, 4.03399506246, 522.57741809380],
[0.666, 1.39457824982, 84.34282612290],
[0.584, 1.01405548136, 536.80451209540],
[0.596, 0.62390100715, 35.21227433100],
[0.598, 5.39946724188, 41.05379694460],
[0.475, 5.80072248338, 7.42236354150],
[0.510, 1.34478579740, 258.02441321480],
[0.458, 5.25325523118, 80.71948940050],
[0.421, 3.24496387889, 416.77633088950],
[0.446, 1.19167306357, 180.27386923090],
[0.471, 0.92632922375, 44.72531777680],
[0.387, 1.68488418788, 183.24281464750],
[0.375, 0.15223869165, 255.05546779820],
[0.354, 4.21526988674, 0.96320784650],
[0.379, 2.16947487177, 105.49227066360],
[0.341, 4.79194051680, 110.20632121940],
[0.427, 5.15774894584, 31.54075349880],
[0.302, 3.45706306280, 100.38446123290],
[0.298, 2.26790695187, 639.89728631400],
[0.279, 0.25689162963, 39.50563376150],
[0.320, 3.58085653166, 45.24658263860],
[0.269, 5.72024180826, 36.76043751410],
[0.247, 0.61040148804, 186.21176006410],
[0.245, 0.64173616273, 419.48464387520],
[0.235, 0.73189197665, 10213.28554621100],
[0.232, 0.37399822852, 490.07345674850],
[0.230, 5.76570492457, 12.53017297220],
[0.240, 4.13447692727, 0.52126486180],
[0.279, 1.62614865256, 294.67297614430],
[0.238, 2.18528916550, 219.89137757700],
[0.262, 3.08384135298, 6.59228213900],
[0.217, 2.93214905312, 27.08733537390],
[0.217, 4.69210602828, 406.10313764110],
[0.219, 1.35212712560, 216.92243216040],
[0.200, 2.35215465744, 605.95703637020],
[0.232, 3.92583619589, 1512.80682400820],
[0.223, 5.52392277606, 187.69623277240],
[0.190, 0.29169556516, 291.70403072770],
[0.236, 3.12464145036, 563.63121503840],
[0.193, 0.53675942386, 60.76695288680],
[0.215, 3.78391259001, 103.09277421860],
[0.172, 5.63262770743, 7.11354700080],
[0.164, 4.14700645532, 77.22927912210],
[0.162, 0.72021213236, 11.04570026390],
[0.160, 4.23490438166, 487.36514376280],
[0.191, 0.37651439206, 31.01948863700],
[0.157, 1.02419759383, 6283.07584999140],
[0.157, 4.42530429545, 6206.80977871580],
[0.178, 6.24797160202, 316.39186965660],
[0.161, 5.65988283675, 343.21857259960],
[0.153, 5.58405022784, 252.08652238160],
[0.189, 4.80791039970, 641.12112659140],
[0.166, 5.50438043692, 662.53120356300],
[0.146, 5.08949604858, 286.59622129700],
[0.145, 2.13015521881, 2042.49778910280],
[0.156, 2.19452173251, 274.06604832480],
[0.148, 4.85696640135, 442.75170057060],
[0.187, 4.96121139073, 1589.07289528380],
[0.155, 2.28260574227, 142.14083359310],
[0.134, 1.29277093566, 456.39383923560],
[0.126, 5.59769497652, 179.35884549420],
[0.146, 2.53359213478, 256.53994050650],
[0.140, 1.57962199954, 75.74480641380],
[0.123, 0.05442220184, 944.98282327580],
[0.122, 1.90676379802, 418.26080359780],
[0.154, 1.86865302773, 331.32153907380],
[0.144, 5.52229258454, 14.01464568050],
[0.138, 2.80728175526, 82.85835341460],
[0.107, 0.66995358132, 190.66517818900],
[0.114, 1.48894980280, 253.57099508990],
[0.110, 5.32587573069, 240.12579838100],
[0.105, 0.65548440578, 173.68158709190],
[0.102, 2.58735617801, 450.45594840240],
[0.098, 0.44044795266, 328.35259365720],
[0.101, 4.71267656829, 117.36805233000],
[0.094, 0.54938580474, 293.18850343600],
[0.095, 2.17636214523, 101.86893394120],
[0.093, 0.63687810471, 377.15882254340],
[0.091, 5.84828809934, 10137.01947493540],
[0.089, 1.02830167997, 1021.24889455140],
[0.094, 1.79320597168, 493.04240216510],
[0.080, 1.58140274465, 69.15252427480],
[0.075, 0.23453373368, 63.73589830340],
[0.071, 1.51961989690, 488.58898404020],
],
# B1
[
[227279.214, 3.80793089870, 38.13303563780],
[1803.120, 1.97576485377, 76.26607127560],
[1385.733, 4.82555548018, 36.64856292950],
[1433.300, 3.14159265359, 0.00000000000],
[1073.298, 6.08054240712, 39.61750834610],
[147.903, 3.85766231348, 74.78159856730],
[136.448, 0.47764957338, 1.48447270830],
[70.285, 6.18782052139, 35.16409022120],
[51.899, 5.05221791891, 73.29712585900],
[37.273, 4.89476629246, 41.10198105440],
[42.568, 0.30721737205, 114.39910691340],
[37.104, 5.75999349109, 2.96894541660],
[26.399, 5.21566335936, 213.29909543800],
[16.949, 4.26463671859, 77.75054398390],
[18.747, 0.90426522185, 453.42489381900],
[12.951, 6.17709713139, 529.69096509460],
[10.502, 1.20336443465, 137.03302416240],
[4.416, 1.25478204684, 111.43016149680],
[4.383, 6.14147099615, 71.81265315070],
[3.694, 0.94837702528, 33.67961751290],
[2.957, 4.77532871210, 4.45341812490],
[2.698, 1.92435531119, 112.91463420510],
[1.989, 3.96637567224, 42.58645376270],
[1.150, 4.30568700024, 37.61177077600],
[0.871, 4.81775882249, 152.53214255120],
[0.944, 2.21777772050, 109.94568878850],
[0.936, 1.17054983940, 148.07872442630],
[0.925, 2.40329074000, 206.18554843720],
[0.690, 1.57381082857, 38.65430049960],
[0.624, 2.79466003645, 79.23501669220],
[0.726, 4.13829519132, 28.57180808220],
[0.640, 2.46161252327, 115.88357962170],
[0.531, 2.96991530500, 98.89998852460],
[0.537, 1.95986772922, 220.41264243880],
[0.539, 2.06690307827, 40.58071619260],
[0.716, 0.55781847010, 350.33211960040],
[0.563, 1.84072805158, 983.11585891360],
[0.533, 1.34787677940, 47.69426319340],
[0.566, 1.80111775954, 175.16605980020],
[0.449, 1.62191691011, 144.14657116320],
[0.371, 2.74239666472, 415.29185818120],
[0.381, 6.11910193382, 426.59819087600],
[0.366, 2.39752585360, 129.91947716160],
[0.456, 3.19611413854, 108.46121608020],
[0.327, 3.62341506247, 38.18121974760],
[0.328, 0.89613145346, 38.08485152800],
[0.341, 3.87265469070, 35.68535508300],
[0.331, 4.48858774501, 460.53844081980],
[0.414, 1.03543720726, 70.32818044240],
[0.310, 0.51297445145, 37.16982779130],
[0.287, 2.18351651800, 491.55792945680],
[0.274, 6.11504724934, 522.57741809380],
[0.281, 3.81657117512, 5.93789083320],
[0.298, 4.00532631258, 39.09624348430],
[0.265, 5.26569823181, 446.31134681820],
[0.319, 1.34097217817, 184.72728735580],
[0.203, 6.02944475303, 149.56319713460],
[0.205, 5.53935732020, 536.80451209540],
[0.226, 6.17710997862, 454.90936652730],
[0.186, 3.24302117645, 4.19278569400],
[0.179, 4.91458426239, 451.94042111070],
[0.198, 2.30775852880, 146.59425171800],
[0.166, 1.16793600058, 72.07328558160],
[0.147, 2.10574339673, 44.07092647100],
[0.123, 1.98250467171, 46.20979048510],
[0.159, 3.46955908364, 145.10977900970],
[0.116, 5.88971113590, 38.02116105320],
[0.115, 4.73412534395, 38.24491022240],
[0.125, 3.42713474801, 251.43213107580],
[0.128, 1.51108932026, 221.37585028530],
[0.127, 0.17176461812, 138.51749687070],
[0.124, 5.85160407534, 1059.38193018920],
[0.091, 2.38273591235, 30.05628079050],
[0.118, 5.27114846878, 37.87240320690],
[0.117, 5.35267669439, 38.39366806870],
[0.099, 5.19920708255, 135.54855145410],
[0.114, 4.37452353441, 388.46515523820],
[0.093, 4.64183693718, 106.97674337190],
[0.084, 1.35269684746, 33.94024994380],
[0.111, 3.56226463770, 181.75834193920],
[0.082, 3.18401661435, 42.32582133180],
[0.084, 5.51669920239, 8.07675484730],
],
# B2
[
[9690.766, 5.57123750291, 38.13303563780],
[78.815, 3.62705474219, 76.26607127560],
[71.523, 0.45476688580, 36.64856292950],
[58.646, 3.14159265359, 0.00000000000],
[29.915, 1.60671721861, 39.61750834610],
[6.472, 5.60736756575, 74.78159856730],
[5.800, 2.25341847151, 1.48447270830],
[4.309, 1.68126737666, 35.16409022120],
[3.502, 2.39142672984, 114.39910691340],
[2.649, 0.65061457644, 73.29712585900],
[1.518, 0.37600329684, 213.29909543800],
[1.223, 1.23116043030, 2.96894541660],
[0.766, 5.45279753249, 453.42489381900],
[0.779, 2.07081431472, 529.69096509460],
[0.496, 0.26552533921, 41.10198105440],
[0.469, 5.87866293959, 77.75054398390],
[0.482, 5.63056237954, 137.03302416240],
[0.345, 1.80085651594, 71.81265315070],
[0.274, 2.86650141006, 33.67961751290],
[0.158, 4.63868656467, 206.18554843720],
[0.166, 1.24877330835, 220.41264243880],
[0.153, 2.87376446497, 111.43016149680],
[0.116, 3.63838544843, 112.91463420510],
[0.085, 0.43712705655, 4.45341812490],
[0.104, 6.12597614674, 144.14657116320],
],
# B3
[
[273.423, 1.01688979072, 38.13303563780],
[2.274, 2.36805657126, 36.64856292950],
[2.029, 5.33364321342, 76.26607127560],
[2.393, 0.00000000000, 0.00000000000],
[0.538, 3.21934211365, 39.61750834610],
[0.242, 4.52650721578, 114.39910691340],
[0.185, 1.04913770083, 74.78159856730],
[0.155, 3.62376309338, 35.16409022120],
[0.157, 3.94195369610, 1.48447270830],
],
# B4
[[5.728, 2.66872693322, 38.13303563780]],
# B5
[[0.113, 4.70646877989, 38.13303563780]],
]
"""This table contains Neptune's periodic terms (all of them) from the
planetary theory VSOP87 for the heliocentric latitude at the equinox of date
(taken from the 'D' solution). In Meeus' book a shortened version can be found
in page 453."""
VSOP87_R = [
# R0
[
[3007013206.102, 0.00000000000, 0.00000000000],
[27062259.490, 1.32999458930, 38.13303563780],
[1691764.281, 3.25186138896, 36.64856292950],
[807830.737, 5.18592836167, 1.48447270830],
[537760.613, 4.52113902845, 35.16409022120],
[495725.642, 1.57105654815, 491.55792945680],
[274571.970, 1.84552256801, 175.16605980020],
[135134.095, 3.37220607384, 39.61750834610],
[121801.825, 5.79754444303, 76.26607127560],
[100895.397, 0.37702748681, 73.29712585900],
[69791.722, 3.79617226928, 2.96894541660],
[46687.838, 5.74937810094, 33.67961751290],
[24593.778, 0.50801728204, 109.94568878850],
[16939.242, 1.59422166991, 71.81265315070],
[14229.686, 1.07786112902, 74.78159856730],
[12011.825, 1.92062131635, 1021.24889455140],
[8394.731, 0.67816895547, 146.59425171800],
[7571.800, 1.07149263431, 388.46515523820],
[5720.852, 2.59059512267, 4.45341812490],
[4839.672, 1.90685991070, 41.10198105440],
[4483.492, 2.90573457534, 529.69096509460],
[4270.202, 3.41343865825, 453.42489381900],
[4353.790, 0.67985662370, 32.19514480460],
[4420.804, 1.74993796503, 108.46121608020],
[2881.063, 1.98600105123, 137.03302416240],
[2635.535, 3.09755943422, 213.29909543800],
[3380.930, 0.84810683275, 183.24281464750],
[2878.942, 3.67415901855, 350.33211960040],
[2306.293, 2.80962935724, 70.32818044240],
[2530.149, 5.79839567009, 490.07345674850],
[2523.132, 0.48630800015, 493.04240216510],
[2087.303, 0.61858378281, 33.94024994380],
[1976.522, 5.11703044560, 168.05251279940],
[1905.254, 1.72186472126, 182.27960680100],
[1654.039, 1.92782545887, 145.10977900970],
[1435.072, 1.70005157785, 484.44438245600],
[1403.029, 4.58914203187, 498.67147645760],
[1499.193, 1.01623299513, 219.89137757700],
[1398.860, 0.76220317620, 176.65053250850],
[1403.377, 6.07659416908, 173.68158709190],
[1128.560, 5.96661179805, 9.56122755560],
[1228.304, 1.59881465324, 77.75054398390],
[835.414, 3.97066884218, 114.39910691340],
[811.186, 3.00258880870, 46.20979048510],
[731.925, 2.10447054189, 181.75834193920],
[615.781, 2.97874625677, 106.97674337190],
[704.778, 1.18738210880, 256.53994050650],
[502.040, 1.38657803368, 5.93789083320],
[530.357, 4.24059166485, 111.43016149680],
[437.096, 2.27029212923, 1550.93985964600],
[400.250, 1.25609325435, 8.07675484730],
[421.011, 1.89084929506, 30.71067209630],
[382.457, 3.29965259685, 983.11585891360],
[422.485, 5.53186169605, 525.49817940060],
[355.389, 2.27847846648, 218.40690486870],
[280.062, 1.54129714238, 98.89998852460],
[314.499, 3.95932948594, 381.35160823740],
[280.556, 4.54238271682, 44.72531777680],
[267.738, 5.13323364247, 112.91463420510],
[333.311, 5.75067616021, 39.09624348430],
[291.625, 4.02398326341, 68.84370773410],
[321.429, 1.50625025822, 454.90936652730],
[309.196, 2.85452752153, 72.07328558160],
[345.094, 1.35905860594, 293.18850343600],
[307.439, 0.31964571332, 601.76425067620],
[251.356, 3.53992782846, 312.19908396260],
[248.152, 3.41078346726, 37.61177077600],
[306.000, 2.72475094464, 6244.94281435360],
[293.532, 4.89079857814, 528.20649238630],
[234.479, 0.59231043427, 42.58645376270],
[239.628, 3.16441455173, 143.62530630140],
[214.523, 3.62480283040, 278.25883401880],
[246.198, 1.01506302015, 141.22580985640],
[174.089, 5.55011789988, 567.82400073240],
[163.934, 2.10166491786, 2.44768055480],
[162.897, 2.48946521653, 4.19278569400],
[193.455, 1.58425287580, 138.51749687070],
[155.323, 3.28425127954, 31.01948863700],
[182.469, 2.45244890571, 255.05546779820],
[177.846, 4.14773474853, 10175.15251057320],
[174.413, 1.53042999914, 329.83706636550],
[137.649, 3.34900537767, 0.96320784650],
[161.011, 5.16655038482, 211.81462272970],
[113.473, 4.96286007991, 148.07872442630],
[128.823, 3.25521535448, 24.11838995730],
[107.363, 3.26457701792, 1059.38193018920],
[122.732, 5.39399536941, 62.25142559510],
[120.529, 3.08050145518, 184.72728735580],
[99.356, 1.92888554099, 28.57180808220],
[97.713, 2.59474415429, 6.59228213900],
[124.095, 3.11516750340, 221.37585028530],
[124.693, 2.97042405451, 251.43213107580],
[114.252, 0.25039919123, 594.65070367540],
[111.006, 3.34276426767, 180.27386923090],
[120.939, 1.92914010593, 25.60286266560],
[104.667, 0.94883561775, 395.57870223900],
[109.779, 5.43147520571, 494.52687487340],
[96.919, 0.86184760695, 1014.13534755060],
[98.685, 0.89577952710, 488.58898404020],
[88.968, 4.78109764779, 144.14657116320],
[107.888, 0.98700578434, 1124.34166877000],
[97.067, 2.62667400276, 291.70403072770],
[75.131, 5.88936524779, 43.24084506850],
[93.718, 6.09873565184, 526.72201967800],
[94.822, 0.20662943940, 456.39383923560],
[70.036, 2.39683345663, 426.59819087600],
[77.187, 4.21076753240, 105.49227066360],
[89.874, 3.25100749923, 258.02441321480],
[69.133, 4.93031154435, 1028.36244155220],
[90.657, 1.69466970587, 366.48562929500],
[74.242, 3.14479101276, 82.85835341460],
[57.995, 0.86159785905, 60.76695288680],
[78.695, 1.09307575550, 700.66423920080],
[57.230, 0.81331949225, 2.92076130680],
[63.443, 4.39590123005, 149.56319713460],
[55.698, 3.89047249911, 47.69426319340],
[56.430, 5.15003563302, 0.52126486180],
[56.174, 5.42986960794, 911.04257333200],
[61.746, 6.16453667559, 1019.76442184310],
[70.503, 0.08077330612, 40.58071619260],
[74.677, 4.85904499980, 186.21176006410],
[61.861, 4.78702599861, 11.04570026390],
[61.135, 0.83712253227, 1022.73336725970],
[61.268, 5.70228826765, 178.13500521680],
[52.887, 0.37458943972, 27.08733537390],
[56.722, 3.52318112447, 216.92243216040],
[48.819, 5.10789123481, 64.95973858080],
[63.290, 4.39424910030, 807.94979911340],
[64.062, 6.28297531806, 7.11354700080],
[46.356, 1.34735469284, 451.94042111070],
[60.540, 3.40316162416, 294.67297614430],
[46.900, 0.17048203552, 7.42236354150],
[56.766, 0.45048868231, 140.00196957900],
[55.887, 1.06815733757, 172.19711438360],
[53.761, 2.79644687008, 328.35259365720],
[43.828, 6.04655696644, 135.54855145410],
[49.549, 0.64106656292, 41.05379694460],
[53.960, 2.91774494436, 563.63121503840],
[42.961, 5.40175361431, 487.36514376280],
[51.508, 0.09105540708, 210.33015002140],
[41.889, 3.12343223889, 29.22619938800],
[47.655, 3.90701760087, 63.73589830340],
[41.639, 6.26847783513, 32.71640966640],
[41.429, 4.45464156759, 37.16982779130],
[40.745, 0.16043648294, 79.23501669220],
[48.205, 1.84198373010, 403.13419222450],
[36.912, 0.44771386183, 30.05628079050],
[47.762, 0.88083849566, 3302.47939106200],
[39.465, 3.50565484069, 357.44566660120],
[42.139, 0.63375113663, 343.21857259960],
[41.275, 1.36370496322, 31.23193695810],
[42.612, 3.55270845713, 38.65430049960],
[38.931, 5.26691753270, 415.29185818120],
[38.967, 5.25866056502, 386.98068252990],
[33.734, 5.24400184426, 67.35923502580],
[40.879, 3.55292279438, 331.32153907380],
[38.768, 1.12288359393, 38.18121974760],
[37.500, 6.08687972441, 35.42472265210],
[38.831, 4.67876780698, 38.08485152800],
[38.231, 6.26491054328, 389.94962794650],
[29.976, 4.45759985804, 22.63391724900],
[31.356, 0.07746010366, 12.53017297220],
[26.341, 4.59559782754, 106.01353552540],
[27.465, 5.99541587890, 206.18554843720],
[25.152, 4.49867760320, 34.20088237470],
[24.122, 5.17089441917, 129.91947716160],
[28.997, 3.64927210210, 253.57099508990],
[27.173, 4.37944546475, 142.14083359310],
[30.634, 1.59348806560, 348.84764689210],
[31.464, 1.05065113524, 100.38446123290],
[24.056, 1.02801635413, 41.75637236020],
[22.632, 4.72511111292, 81.37388070630],
[21.942, 3.48416607882, 69.15252427480],
[26.333, 3.01556008632, 365.00115658670],
[22.355, 3.92220883921, 5.10780943070],
[22.498, 4.03487494425, 19.12245511120],
[22.885, 1.58977064672, 189.39315380180],
[26.520, 3.61427038042, 367.97010200330],
[25.496, 2.43810518614, 351.81659230870],
[19.111, 2.59694457001, 2080.63082474060],
[19.640, 6.15701741238, 35.21227433100],
[25.688, 2.00512719767, 439.78275515400],
[21.613, 3.32354204724, 119.50691634410],
[25.389, 4.74025836522, 1474.67378837040],
[18.107, 5.35129342595, 244.31858407500],
[23.295, 5.93767742799, 316.39186965660],
[22.087, 4.81594755148, 84.34282612290],
[16.972, 3.05105149940, 220.41264243880],
[20.022, 4.99276451168, 179.09821306330],
[20.370, 1.86508317889, 171.23390653710],
[19.426, 2.04829970231, 5.41662597140],
[22.628, 0.27205783433, 666.72398925700],
[19.072, 3.70882976684, 164.12035953630],
[17.969, 3.40425338171, 69.36497259590],
[18.716, 0.90215956591, 285.37238101960],
[15.889, 0.42011285882, 697.74347789400],
[14.988, 3.08544843665, 704.85702489480],
[14.774, 3.36129613309, 274.06604832480],
[15.972, 1.82864185268, 477.33083545520],
[13.892, 2.94161501165, 38.39366806870],
[13.922, 2.85574364078, 37.87240320690],
[15.481, 4.94982954853, 101.86893394120],
[17.571, 5.82317632469, 35.68535508300],
[15.856, 5.04973561582, 36.90919536040],
[16.414, 3.63049397028, 45.24658263860],
[17.158, 2.51251149482, 20.60692781950],
[12.941, 3.03041555329, 522.57741809380],
[15.752, 5.00292909214, 247.23934538180],
[12.679, 0.20331109568, 460.53844081980],
[16.260, 5.93480347217, 815.06334611420],
[12.903, 3.51141502996, 446.31134681820],
[13.891, 5.51064697670, 31.54075349880],
[13.668, 5.45576135320, 39.35687591520],
[13.418, 3.95805150079, 290.21955801940],
[15.368, 2.45783892707, 26.82670294300],
[14.246, 3.18588280921, 401.64971951620],
[12.222, 4.94370170146, 14.01464568050],
[15.484, 3.79703715637, 404.61866493280],
[13.427, 3.79527836573, 151.04766984290],
[14.450, 4.93940408761, 120.99138905240],
[14.331, 4.71117327722, 738.79727483860],
[11.566, 5.91003539239, 536.80451209540],
[15.578, 2.91836788254, 875.83029900100],
[13.124, 2.16056013419, 152.53214255120],
[11.744, 2.94770244071, 2.70831298570],
[12.793, 1.97868575679, 1.37259812370],
[12.969, 0.00535826017, 97.41551581630],
[13.891, 4.76435441820, 0.26063243090],
[13.729, 2.32306473850, 38.24491022240],
[10.714, 6.18129683877, 115.88357962170],
[11.610, 4.61712859898, 178.78939652260],
[11.257, 0.79300245838, 42.32582133180],
[14.500, 5.44690193314, 44.07092647100],
[11.534, 5.26580538005, 160.93896579860],
[13.355, 5.20849186729, 32.45577723550],
[13.658, 2.15687632802, 476.43131808350],
[13.782, 3.47865209163, 38.02116105320],
[12.714, 2.09462988855, 20.49505323490],
[13.257, 5.15138524813, 103.09277421860],
[9.715, 0.74597883480, 918.15612033280],
[10.340, 5.38977407079, 222.86032299360],
[13.357, 5.89635739027, 748.09786996330],
[12.632, 1.20306997433, 16.15350969460],
[11.437, 1.58444114292, 495.49008271990],
[11.424, 4.74142930795, 487.62577619370],
[9.098, 5.19932138822, 118.02244363580],
[9.336, 0.97313630925, 662.53120356300],
[9.827, 4.48170250645, 505.78502345840],
[8.585, 0.20375451897, 944.98282327580],
[8.875, 5.53111742265, 17.52610781830],
[9.957, 4.03258125243, 169.53698550770],
[11.506, 3.11649121817, 17.63798240290],
[9.818, 5.20376439002, 1.59634729290],
[10.160, 3.74441320429, 457.61767951300],
[8.661, 0.31247523804, 1440.73353842660],
[8.496, 1.06445636872, 55.77101804070],
[11.162, 1.92907800408, 564.85505531580],
[8.057, 0.31116345866, 377.41945497430],
[9.851, 4.23328578127, 418.26080359780],
[7.938, 2.40417397694, 488.37653571910],
[9.894, 0.63707319139, 183.76407950930],
[9.913, 3.94049519088, 441.26722786230],
[7.867, 3.87469522964, 494.73932319450],
[7.589, 3.15909316566, 416.77633088950],
[8.496, 5.38968698704, 104.00779795530],
[9.716, 3.06038536864, 166.56804009110],
[9.377, 0.56416645296, 673.31627139600],
[8.771, 5.24534141981, 1057.89745748090],
[7.990, 1.55726966638, 59.28248017850],
[9.090, 4.32953439022, 29.74746424980],
[9.667, 5.89033222679, 358.40887444770],
[7.209, 2.29464803358, 79.18683258240],
[8.062, 0.44458003524, 19.01058052660],
[8.254, 3.47304582051, 156.15547927360],
[9.804, 6.06393995615, 784.74643289280],
[8.516, 5.99060386955, 180.79513409270],
[8.090, 1.38588221442, 1654.03263386460],
[9.074, 4.03971490460, 1017.05610885740],
[6.908, 1.41919832926, 178.34745353790],
[8.230, 2.53750470473, 518.38463239980],
[8.594, 5.29104206063, 457.87831194390],
[6.769, 5.43380191356, 171.98466606250],
[8.571, 0.35876828441, 636.66770846650],
[8.995, 1.36992508507, 6209.77872413240],
[6.641, 2.92327140872, 0.04818410980],
[9.278, 3.80308677009, 25558.21217647960],
[6.567, 4.01934954352, 0.11187458460],
[6.441, 4.28250687347, 36.12729806770],
[7.257, 4.09776235307, 326.86812094890],
[8.384, 5.49363770202, 532.61172640140],
[7.471, 4.62144262894, 526.98265210890],
[7.500, 0.61545750834, 485.92885516430],
[7.716, 1.04880632264, 525.23754696970],
[8.504, 2.79350586429, 10139.98842035200],
[7.466, 5.07942174095, 157.63995198190],
[7.186, 6.22833818429, 77.22927912210],
[7.784, 1.89308880453, 984.60033162190],
[6.513, 0.07498932215, 79.88940799800],
[6.077, 2.96673519667, 36.69674703930],
[7.706, 5.70632580790, 209.10630974400],
[7.265, 4.94483532589, 131.40394986990],
[6.984, 2.53239305821, 497.18700374930],
[7.824, 2.31462643851, 513.07988101300],
[7.175, 3.69203633127, 524.01370669230],
[6.855, 0.14076801572, 283.62727588040],
[6.922, 3.36515011915, 438.29828244570],
[7.349, 3.50406958122, 500.15594916590],
[6.301, 0.14776691217, 608.87779767700],
[5.892, 4.24403528888, 4.66586644600],
[7.613, 5.14905171677, 259.50888592310],
[7.128, 5.92696788834, 482.95990974770],
[6.829, 1.01745137848, 1543.82631264520],
[5.981, 4.79954091087, 215.43795945210],
[5.526, 2.34003154732, 65.22037101170],
[6.817, 6.12162829690, 395.05743737720],
[5.369, 3.76855960849, 52099.54021187280],
[5.776, 5.61434462641, 987.56927703850],
[7.523, 5.60432148128, 2810.92146160520],
[7.329, 3.76815551582, 1512.80682400820],
[5.616, 2.13872867116, 145.63104387150],
[5.258, 0.30850836910, 36.60037881970],
[5.688, 1.82274388581, 1227.43444298860],
[5.658, 2.35049199704, 5.62907429250],
[6.135, 4.23390561816, 496.01134758170],
[5.128, 2.89050864873, 313.68355667090],
[6.472, 3.49494191669, 552.69738935910],
[4.983, 3.91958511552, 10135.53500222710],
[5.217, 0.40052635702, 319.31263096340],
[4.952, 1.42482088612, 49.17873590170],
[5.964, 5.70758449643, 309.79958751760],
[5.091, 6.00974510144, 1409.71404978960],
[5.205, 5.50271334510, 238.90195810360],
[4.800, 1.13450310670, 134.06407874580],
[4.943, 1.43051344597, 422.40540518200],
[5.604, 2.05669305961, 207.36120460480],
[6.310, 5.22966882627, 139.74133714810],
[4.772, 3.06668713747, 464.73122651380],
[4.919, 3.57280542629, 52175.80628314840],
[4.762, 5.90654311203, 838.96928775040],
[4.848, 0.77467099227, 1.69692102940],
[5.694, 0.77313415569, 709.96483432550],
[5.455, 0.90289242792, 208.84567731310],
[4.901, 3.79986913631, 15.49911838880],
[4.772, 0.15755140037, 39.50563376150],
[5.673, 2.68359159067, 1127.26243007680],
[5.477, 0.53123497431, 113.87784205160],
[5.077, 1.59268428609, 1547.97091422940],
[4.981, 1.44584050478, 1.27202438720],
[5.813, 5.85024085408, 57.25549074900],
[5.520, 5.06396698257, 421.22974901440],
[5.938, 0.96886308551, 6280.10690457480],
[5.206, 3.58003819370, 474.94684537520],
[5.256, 0.61005270999, 95.97922721780],
[5.531, 5.28764137194, 36.76043751410],
[6.158, 5.73176703797, 711.44930703380],
[5.003, 2.19048397989, 501.64042187420],
[5.150, 5.58407480282, 26049.77010593640],
[5.138, 4.55234158942, 670.91677495100],
[5.609, 4.37272759780, 52.80207262410],
[5.636, 2.39183054397, 10210.31660079440],
[4.512, 2.59978208967, 1234.54798998940],
[5.412, 4.58813638089, 179.61947792510],
[4.314, 3.38846714337, 142.66209845490],
[4.708, 5.23537414423, 3.62333672240],
[4.471, 3.94378336812, 12566.15169998280],
[5.296, 1.12249063176, 134.11226285560],
[4.188, 2.52490407427, 6205.32530600750],
[4.645, 1.90644271528, 13324.31667116140],
[4.502, 2.01956920977, 315.16802937920],
[5.346, 2.94804816223, 353.04043258610],
[4.177, 2.09489065926, 803.75701341940],
[5.296, 3.88249567974, 2118.76386037840],
[5.325, 4.28221258353, 477.91579079180],
[5.519, 0.09960891963, 600.01914553700],
[5.169, 0.59948596687, 6.90109867970],
[4.179, 0.14619703083, 6644.57629047010],
[4.490, 1.07042724999, 52139.15772021889],
[3.970, 6.13227798578, 1553.90880506260],
[3.970, 4.69887237362, 91.78644152380],
[4.234, 0.14478458924, 65.87476231750],
[5.183, 3.52837189306, 110.20632121940],
[5.259, 6.20809827528, 142.71028256470],
[3.869, 5.25125030487, 1558.05340664680],
[4.457, 2.10248126544, 487.10451133190],
[4.890, 1.83606790269, 46.51860702580],
[3.875, 5.60269278935, 385.49620982160],
[3.826, 1.30946706974, 2176.61005195840],
[4.591, 4.84657580441, 1337.64076420800],
[5.111, 1.18808079775, 981.63138620530],
[4.709, 1.40878215308, 52213.93931878620],
[3.891, 5.43661875415, 154.67100656530],
[4.145, 4.32505910718, 363.51668387840],
[4.441, 3.50158424570, 187.69623277240],
[3.703, 2.48768949613, 67.88049988760],
[4.094, 1.42347047260, 310.71461125430],
[3.681, 5.70552661143, 491.66980404140],
[4.787, 3.65822147476, 589.34595228860],
[4.020, 5.45643059988, 6641.60734505350],
[3.656, 0.57790726599, 491.44605487220],
[4.288, 3.35265955957, 203.21660302060],
[3.843, 4.61508898119, 1025.70231267630],
[3.767, 0.05292047125, 320.27583880990],
[4.632, 0.82011276589, 3265.83082813250],
[4.609, 5.25443775917, 296.15744885260],
[4.555, 5.30391170376, 26013.12154300690],
[3.556, 4.80267245336, 224.34479570190],
[4.859, 5.52756242256, 487.41332787260],
[3.626, 1.44624342082, 70.84944530420],
[4.302, 1.60914544159, 12529.50313705330],
[3.493, 4.75315651083, 12489.88562870720],
[3.722, 0.27433061822, 949.43624140070],
[4.234, 5.25112033465, 194.28851491140],
[3.451, 2.97409317928, 499.63468430410],
[4.796, 6.21059766333, 491.81856188770],
[3.639, 1.25605018211, 2603.20824283440],
[4.646, 5.71392540144, 321.76031151820],
[3.702, 2.08952561657, 491.03666459500],
[3.672, 2.87489628704, 497.49582029000],
[3.965, 1.05484988240, 75.74480641380],
[3.416, 0.68584132933, 305.08553696180],
[4.513, 4.38927002490, 425.11371816770],
[3.853, 0.61321572401, 12526.53419163670],
[3.788, 3.32221995840, 3140.01275492980],
[3.781, 5.58125317044, 1652.54816115630],
[3.903, 5.31609723466, 408.17831118040],
[3.945, 3.60558877407, 1589.07289528380],
[4.084, 0.83813879869, 52.36012963940],
[4.084, 3.50290269471, 23.90594163620],
[3.694, 1.03218855688, 481.47543703940],
[3.636, 5.31068934607, 141.48644228730],
[3.345, 3.94392179077, 20389.92252949249],
[4.639, 6.24618220184, 821.39499582230],
[3.934, 0.26992234338, 1655.51710657290],
[4.431, 2.48647437800, 549.72844394250],
[4.168, 5.39993754642, 236.50246165860],
[4.020, 0.07393243012, 52136.18877480229],
[4.055, 1.34004288978, 1054.92851206430],
[3.275, 0.98533127454, 1344.75431120880],
[3.213, 2.97105590703, 20386.95358407589],
[4.428, 0.06728869735, 491.29729702590],
[4.063, 0.06192838570, 6168.67674307800],
[3.804, 5.34897033476, 523.75307426140],
[3.917, 5.67905809516, 1131.19458333990],
[3.833, 0.87811168267, 52.69019803950],
[4.020, 2.69209723289, 1439.46151403940],
[4.373, 1.86209663434, 73.55775828990],
[3.159, 1.04693380342, 703.37255218650],
[3.116, 5.20159166840, 449.23210812500],
[3.258, 4.65131076542, 696.25900518570],
[3.427, 0.27003884843, 2389.90914739640],
[4.349, 0.07531141761, 20426.57109242200],
[3.383, 5.61838426864, 699.22795060230],
[3.305, 1.41666877290, 562.14674233010],
[3.297, 5.46677712589, 1442.21801113490],
[3.277, 2.71815883511, 980.14691349700],
[3.171, 4.49510885866, 1439.24906571830],
[4.175, 4.24327707038, 381.61224066830],
[3.155, 3.40776789576, 39.72938293070],
[4.112, 0.90309319273, 1087.69310584050],
[3.350, 5.27474671017, 80.71948940050],
[3.725, 1.52448613082, 1058.10990580200],
[3.650, 3.59798316565, 192.80404220310],
[3.837, 1.48519528444, 10098.88643929760],
[2.959, 1.23012121982, 2500.11546861580],
[3.330, 6.12470287875, 10172.18356515660],
[3.361, 4.31837298696, 492.07919431860],
[3.288, 3.14692435376, 347.36317418380],
[2.992, 5.01304660316, 175.21424391000],
[3.294, 2.52694043155, 1692.16566950240],
[2.984, 1.81780659890, 175.11787569040],
[3.013, 0.92957285991, 1515.77576942480],
[3.863, 5.46044928570, 332.80601178210],
[3.403, 1.10932483984, 987.30864460760],
[3.312, 0.67710158807, 977.48678462110],
[3.030, 1.77996261146, 156489.28581380739],
[3.605, 4.89955108152, 1043.88281180040],
[2.937, 0.60469671230, 990.22940591440],
[3.276, 4.26765608367, 1189.30140735080],
[2.966, 5.29808076929, 31.98269648350],
[2.994, 2.58599359402, 178.08682110700],
[3.905, 1.87748122254, 1158.28191871380],
[3.110, 3.09203517638, 235.93301268700],
[3.313, 2.70308129756, 604.47256366190],
[3.276, 1.24440460327, 874.65464283340],
[3.276, 5.58544609667, 950.92071410900],
[3.746, 0.33859914037, 913.96333463880],
[3.552, 3.07180917863, 240.38643081190],
[2.885, 6.01130634957, 1097.51496582700],
[3.643, 5.11977873355, 452.20105354160],
[2.768, 4.38396269009, 391.43410065480],
[2.776, 5.01821594830, 8.90683624980],
[2.990, 5.62911695857, 140.65636088480],
[2.761, 4.05534163807, 6283.07584999140],
[3.226, 4.76711354367, 6241.97386893700],
[3.748, 4.84009347869, 341.73409989130],
[2.752, 4.53621078796, 6206.80977871580],
[3.847, 2.40982343643, 26086.41866886590],
[2.727, 3.28234198801, 483.48117460950],
[2.884, 4.05452029151, 1.22384027740],
[2.702, 3.72061244391, 946.46729598410],
[2.723, 4.37517047024, 15.19030184810],
[2.847, 5.22951186538, 661.04673085470],
[2.680, 4.19379121323, 13.18456427800],
[3.269, 0.43119778520, 496.97455542820],
[3.489, 3.82213189319, 625.99451521810],
[3.757, 3.88223872147, 495.70253104100],
[2.872, 5.00345974886, 252.08652238160],
[3.742, 2.03372773652, 8.59801970910],
[3.172, 1.11135762382, 260.99335863140],
[3.341, 2.91360557418, 304.23420369990],
[2.915, 2.63627684599, 6681.22485339960],
[2.915, 1.43773625890, 6604.95878212400],
[2.629, 2.09824407450, 2713.41456405380],
[2.901, 3.33924800230, 515.46387109300],
[2.803, 1.23584865903, 6643.09181776180],
[3.045, 3.33515866438, 921.07688163960],
[2.699, 5.42597794650, 925.26966733360],
[2.808, 5.77870303237, 1024.21783996800],
[3.028, 3.75501312393, 511.59540830470],
[3.090, 2.49453093252, 14.66903698630],
[2.913, 4.83296711477, 515.93695184500],
[3.139, 5.99134254710, 570.74476203920],
[2.752, 3.08268180744, 853.19638175200],
[2.779, 3.74527347899, 494.00561001160],
[2.643, 1.99093797444, 470.21728845440],
[2.763, 4.01095972177, 448.97147569410],
[2.643, 5.24970673655, 249.94765836750],
[3.426, 4.73955481174, 1050.99635880120],
[2.573, 2.01267457287, 1514.29129671650],
[2.633, 1.63640090603, 170.71264167530],
[3.034, 4.48979734509, 560.71045373160],
[3.025, 5.51446170055, 369.45457471160],
[3.095, 4.01459691667, 1615.89959822680],
[2.490, 0.15301603966, 78187.44335344699],
[2.589, 0.79196093766, 1228.91891569690],
[3.143, 5.33170343283, 1542.34183993690],
[3.138, 4.50785484172, 461.76228109720],
[2.812, 3.74246594120, 2.00573757010],
[3.062, 4.88018345098, 227.96813242430],
[2.553, 4.85437812287, 488.84961647110],
[2.971, 1.27359129352, 530.91480537200],
[2.646, 3.64828423565, 335.77495719870],
[3.329, 2.71693827722, 171.02145821600],
[2.648, 0.60243117586, 70.58881287330],
[3.061, 5.05044834864, 378.64329525170],
[2.738, 4.75405645015, 151.26011816400],
[2.728, 5.89052930055, 213.95348674380],
[3.411, 2.24137878065, 734.45573129830],
[2.623, 0.54340876464, 1586.10394986720],
[3.169, 5.84871429991, 1049.51188609290],
[2.430, 2.34595493263, 450.45594840240],
[2.907, 5.58085498481, 597.57146498220],
[3.300, 0.94221473935, 58.17051448570],
[2.543, 5.30426930256, 419.48464387520],
[3.175, 2.32600231924, 339.28641933650],
[2.858, 2.36621678719, 32.50396134530],
[2.712, 5.79983621237, 1587.58842257550],
[3.340, 1.36950315448, 384.27236954420],
[3.301, 5.83023910521, 51.77517430280],
[2.415, 0.69446923670, 489.55219188670],
[2.736, 5.74320864965, 1167.84314626940],
[2.956, 5.22962139507, 199.85389872910],
[3.262, 0.01501002027, 1545.31078535350],
[2.506, 4.84043333582, 943.49835056750],
[3.240, 2.46676155925, 1016.79547642650],
[3.148, 4.62079057738, 233.53351624200],
[2.327, 4.10421417326, 70.11573212130],
[2.371, 4.79963943424, 271.14528701800],
[3.006, 3.66877796077, 1476.15826107870],
[2.537, 5.66681769885, 21.14944454070],
[3.006, 0.93048909480, 21.97952594320],
[3.033, 0.67157488690, 292.48592802040],
[2.344, 1.83547256266, 492.30868898220],
[3.117, 2.76268894894, 1473.18931566210],
[2.323, 2.88799980853, 533.62311835770],
[2.340, 4.44862573253, 490.80716993140],
[2.511, 0.99467349084, 266.10116806210],
[2.919, 4.75889516601, 1511.32235129990],
[2.493, 6.10541658597, 1225.94997028030],
[2.798, 3.06162629894, 419.74527630610],
[2.691, 3.20679023131, 463.50738623640],
[2.291, 5.81534758547, 246.97871295090],
[2.319, 6.05514281470, 525.75881183150],
[3.112, 0.89712836583, 314.90739694830],
[3.085, 5.84605938859, 1192.22216865760],
[2.897, 0.54747024257, 20350.30502114640],
[3.067, 2.22206306288, 248.46318565920],
[2.252, 0.87483094907, 61.02758531770],
[2.392, 3.62837597194, 439.19779981740],
[2.817, 2.73562306571, 16.67477455640],
[2.379, 6.17876088396, 467.65198782060],
[2.598, 4.82643304253, 384.58118608490],
[2.718, 1.01823841209, 215.95922431390],
[2.998, 1.09755715300, 1964.74724511890],
[2.884, 2.97813466834, 383.09671337660],
[2.231, 4.48841493844, 4.14460158420],
[2.203, 2.23336308907, 481.26298871830],
[2.260, 2.35404913660, 659.61044225620],
[2.491, 1.70236357070, 445.34813897170],
[3.041, 5.55577674116, 674.80074410430],
[2.289, 1.18497528002, 1552.42433235430],
[2.975, 0.48272389481, 1052.48083150950],
[2.339, 0.75318738767, 478.81530816350],
[3.011, 0.16359500858, 54.28654533240],
[2.820, 6.18522693724, 556.51766803760],
[2.266, 5.91286000054, 3.49021027840],
[2.231, 1.45038594906, 196.50670080260],
],
# R1
[
[236338.502, 0.70498011235, 38.13303563780],
[13220.279, 3.32015499895, 1.48447270830],
[8621.863, 6.21628951630, 35.16409022120],
[2701.740, 1.88140666779, 39.61750834610],
[2153.150, 5.16873840979, 76.26607127560],
[2154.735, 2.09431198086, 2.96894541660],
[1463.924, 1.18417031047, 33.67961751290],
[1603.165, 0.00000000000, 0.00000000000],
[1135.773, 3.91891199655, 36.64856292950],
[897.650, 5.24122933533, 388.46515523820],
[789.908, 0.53315484580, 168.05251279940],
[760.030, 0.02051033644, 182.27960680100],
[607.183, 1.07706500350, 1021.24889455140],
[571.622, 3.40060785432, 484.44438245600],
[560.790, 2.88685815667, 498.67147645760],
[490.190, 3.46830928696, 137.03302416240],
[264.093, 0.86220057976, 4.45341812490],
[270.526, 3.27355867939, 71.81265315070],
[203.524, 2.41820674409, 32.19514480460],
[155.438, 0.36537064534, 41.10198105440],
[132.766, 3.60157672619, 9.56122755560],
[93.626, 0.66670888163, 46.20979048510],
[83.317, 3.25992461673, 98.89998852460],
[72.205, 4.47717435693, 601.76425067620],
[68.983, 1.46326969479, 74.78159856730],
[86.953, 5.77228651853, 381.35160823740],
[68.717, 4.52563942435, 70.32818044240],
[64.724, 3.85477388838, 73.29712585900],
[68.377, 3.39509945953, 108.46121608020],
[53.375, 5.43650770516, 395.57870223900],
[44.453, 3.61409723545, 2.44768055480],
[41.243, 4.73866592865, 8.07675484730],
[48.331, 1.98568593981, 175.16605980020],
[41.744, 4.94257598763, 31.01948863700],
[44.102, 1.41744904844, 1550.93985964600],
[41.170, 1.41999374753, 490.07345674850],
[41.099, 4.86312637841, 493.04240216510],
[36.267, 5.30764043577, 312.19908396260],
[36.284, 0.38187812797, 77.75054398390],
[40.619, 2.27237172464, 529.69096509460],
[32.360, 5.91123007786, 5.93789083320],
[31.197, 2.70549944134, 1014.13534755060],
[32.730, 5.22147683115, 41.05379694460],
[36.079, 4.87817494829, 491.55792945680],
[30.181, 3.63273193845, 30.71067209630],
[29.991, 3.30769367603, 1028.36244155220],
[27.048, 1.77647060739, 44.72531777680],
[27.756, 4.55583165091, 7.11354700080],
[27.475, 0.97228280623, 33.94024994380],
[24.944, 3.10083391185, 144.14657116320],
[25.958, 2.99724758632, 60.76695288680],
[21.369, 4.71270048898, 278.25883401880],
[21.283, 0.68957829113, 251.43213107580],
[23.727, 5.12044184469, 176.65053250850],
[21.392, 0.86286397645, 4.19278569400],
[23.373, 1.64955088447, 173.68158709190],
[24.163, 3.56602004577, 145.10977900970],
[20.238, 5.61479765982, 24.11838995730],
[26.958, 4.14294870704, 453.42489381900],
[24.048, 1.00718363213, 213.29909543800],
[18.322, 1.98028683488, 72.07328558160],
[18.266, 6.17260374467, 189.39315380180],
[19.201, 4.65162168927, 106.97674337190],
[17.606, 1.60307551767, 62.25142559510],
[16.545, 1.69931816587, 357.44566660120],
[20.132, 3.29520553529, 114.39910691340],
[15.425, 4.38812302799, 25.60286266560],
[19.173, 2.20014267311, 343.21857259960],
[15.077, 3.66802659382, 0.52126486180],
[14.029, 0.55336333290, 129.91947716160],
[13.361, 5.85751083720, 68.84370773410],
[15.357, 4.20731277007, 567.82400073240],
[12.746, 3.52815836608, 477.33083545520],
[11.724, 5.57647263460, 31.23193695810],
[11.533, 0.89138506506, 594.65070367540],
[10.508, 4.35552732772, 32.71640966640],
[10.826, 5.21826226871, 26.82670294300],
[10.085, 1.98102855874, 40.58071619260],
[10.518, 5.27281360238, 2.92076130680],
[9.207, 0.50092534158, 64.95973858080],
[9.231, 0.68180977710, 160.93896579860],
[8.735, 5.80657503476, 6.59228213900],
[10.114, 4.51164596694, 28.57180808220],
[10.392, 5.18877536013, 42.58645376270],
[9.873, 3.76512158080, 181.75834193920],
[8.350, 2.82449631025, 43.24084506850],
[9.838, 1.49438763600, 47.69426319340],
[7.645, 4.07503370297, 389.94962794650],
[8.004, 2.78082277326, 505.78502345840],
[7.440, 2.35731983047, 11.04570026390],
[7.342, 1.62279119952, 135.54855145410],
[9.450, 0.27241261915, 426.59819087600],
[7.192, 0.82841201068, 911.04257333200],
[6.979, 1.86753914872, 206.18554843720],
[6.874, 0.83802906828, 82.85835341460],
[7.897, 1.86554246391, 38.65430049960],
[6.729, 3.98338053636, 12.53017297220],
[6.357, 0.90093123522, 487.36514376280],
[6.720, 1.33936040700, 220.41264243880],
[7.695, 5.13312500855, 23.90594163620],
[7.059, 5.99832463494, 639.89728631400],
[8.302, 3.85960902325, 37.61177077600],
[6.412, 2.41743702679, 1059.38193018920],
[6.751, 1.96860894470, 45.24658263860],
[6.431, 4.07813226506, 35.68535508300],
[5.517, 3.81325790890, 815.06334611420],
[5.562, 0.41619602150, 563.63121503840],
[6.115, 2.10934525342, 697.74347789400],
[6.216, 4.79301628209, 143.62530630140],
[5.346, 3.13071964722, 386.98068252990],
[5.245, 6.06245070403, 171.23390653710],
[5.129, 0.79394555531, 179.09821306330],
[5.168, 4.73765992885, 522.57741809380],
[6.422, 0.64684316894, 350.33211960040],
[5.006, 2.37645082899, 77.22927912210],
[5.005, 4.70632786971, 460.53844081980],
[5.167, 5.20246616570, 446.31134681820],
[5.119, 2.17338058771, 494.73932319450],
[5.025, 4.21265519856, 536.80451209540],
[4.722, 6.22814313946, 63.73589830340],
[5.125, 5.38138329172, 179.31066138440],
[4.918, 4.09031782903, 488.37653571910],
[4.652, 5.10765073368, 274.06604832480],
[4.711, 5.56542374115, 42.32582133180],
[4.459, 1.30784829830, 69.36497259590],
[5.485, 3.88088464259, 218.40690486870],
[4.416, 3.05353893868, 27.08733537390],
[4.559, 4.92224120952, 285.37238101960],
[4.393, 4.18047835584, 5.41662597140],
[4.687, 2.21401153210, 1029.84691426050],
[4.644, 1.87902594973, 1433.61999142580],
[5.639, 3.05596737234, 983.11585891360],
[6.045, 5.68817982786, 351.81659230870],
[4.430, 3.37768805833, 377.41945497430],
[4.683, 2.14346624864, 97.41551581630],
[5.845, 4.62301099402, 1024.21783996800],
[4.536, 2.45860473853, 496.01134758170],
[4.398, 5.65312496227, 3.93215326310],
[4.287, 0.66340266603, 1012.65087484230],
[4.086, 0.14551174994, 385.28376150050],
[4.029, 5.98399329775, 178.34745353790],
[4.276, 3.68205082970, 348.84764689210],
[5.257, 3.75263242432, 379.86713552910],
[4.012, 0.42559540783, 104313.47953065898],
[4.025, 2.40645188238, 84.34282612290],
[3.957, 0.86846121055, 171.98466606250],
[3.961, 3.04953080906, 1017.31674128830],
[5.559, 0.77714806229, 1447.84708542740],
[5.071, 2.61075526868, 1536.71276564440],
[4.052, 5.00014006312, 391.64654897590],
[5.182, 4.73444634983, 382.83608094570],
[3.763, 4.29449373755, 313.68355667090],
[4.038, 2.82857942788, 1661.14618086540],
[4.067, 5.73169928960, 169.53698550770],
[3.841, 1.62580928420, 0.96320784650],
[3.901, 2.70874386576, 14.01464568050],
[3.721, 1.20062375429, 1026.87796884390],
[3.911, 3.01809123569, 100.38446123290],
[3.489, 4.28865448963, 1025.18104781450],
[3.714, 5.05021268365, 292.48592802040],
[3.816, 3.93084933114, 39.09624348430],
[3.988, 2.82832650224, 134.11226285560],
[3.745, 4.24728135115, 180.79513409270],
[3.836, 1.02685786071, 1018.27994913480],
[3.941, 5.21895739331, 183.76407950930],
[4.669, 4.38080962573, 1066.49547719000],
[3.780, 6.03723468132, 1022.73336725970],
[3.647, 3.98130320367, 608.87779767700],
[3.456, 5.54052355058, 846.08283475120],
[4.047, 3.71041480907, 1018.06750081370],
[3.865, 4.76002199091, 166.56804009110],
[3.629, 3.29053233846, 447.79581952650],
[3.564, 4.36703678321, 397.06317494730],
[3.304, 1.49289552229, 1505.69327700740],
[3.976, 2.42476188945, 106.01353552540],
[4.217, 4.21677652639, 1052.26838318840],
[3.294, 0.42088065654, 22.63391724900],
[3.615, 3.68096122231, 494.52687487340],
[3.230, 5.10786091356, 69.15252427480],
[3.280, 3.62226152032, 531.17543780290],
[3.337, 2.72502876320, 481.47543703940],
[3.187, 0.08677634706, 399.51085550210],
[3.389, 1.79454271219, 1519.92037100900],
[3.179, 3.40418030121, 423.62924545940],
[3.154, 3.69356460843, 470.21728845440],
[3.706, 2.79048710497, 462.02291352810],
[3.136, 4.38015969606, 385.49620982160],
[3.122, 0.48346644637, 79.18683258240],
[3.392, 0.48037804731, 521.09294538550],
[3.465, 0.93152295589, 2183.72359895920],
[3.735, 0.98809808606, 487.41332787260],
[3.998, 3.38773325131, 6283.07584999140],
[2.998, 2.61728063127, 487.62577619370],
[3.295, 2.53821501556, 4.66586644600],
[2.964, 3.66274645375, 495.49008271990],
[3.901, 1.65463523144, 210.33015002140],
[2.950, 1.99904237956, 872.90953769420],
[2.948, 2.90769224206, 391.43410065480],
[2.971, 0.31626092637, 5.10780943070],
[3.085, 0.95725590904, 109.94568878850],
[2.995, 3.34433305798, 394.09422953070],
[3.126, 5.89472116854, 105.49227066360],
[3.904, 3.01022809543, 556.51766803760],
[3.388, 6.24936444215, 535.32003938710],
[2.930, 6.15005257333, 164.12035953630],
[3.267, 4.19718045293, 518.38463239980],
[3.946, 2.88842759670, 151.26011816400],
[3.076, 6.04134449219, 142.14083359310],
[2.823, 0.60712626756, 214.78356814630],
[2.917, 2.74502617182, 138.51749687070],
[3.347, 6.09373507569, 6246.42728706190],
[3.659, 5.12211619716, 79.23501669220],
[3.010, 0.24656411754, 91.78644152380],
[2.861, 6.17465663902, 422.40540518200],
[2.989, 2.31620917965, 485.92885516430],
[3.088, 2.29186342974, 110.20632121940],
[3.030, 3.69866149100, 532.61172640140],
[3.020, 2.36422658177, 290.21955801940],
[3.170, 1.23078934548, 10176.63698328150],
[2.652, 3.35836234807, 148.07872442630],
[2.673, 6.03366372927, 196.50670080260],
[2.630, 0.46957619348, 1970.42450352120],
[2.599, 4.86022081674, 439.19779981740],
[2.878, 2.61946597178, 488.58898404020],
[2.720, 1.71836225398, 364.55921360200],
[3.333, 3.25126857354, 30.05628079050],
[3.053, 2.49346960035, 6243.45834164530],
[3.062, 6.23776299963, 419.48464387520],
[2.786, 0.83078219939, 497.18700374930],
[2.834, 3.52926079424, 457.87831194390],
[2.932, 1.80245810977, 500.15594916590],
[3.030, 5.10152500393, 367.97010200330],
[2.956, 5.76230870725, 986.08480433020],
[3.116, 2.20042242739, 495.70253104100],
[2.554, 0.65945973992, 67.35923502580],
[2.901, 3.91891656185, 10173.66803786490],
[2.840, 1.34453183591, 482.95990974770],
[2.458, 1.20012815574, 489.11024890200],
[2.556, 3.86921927085, 487.10451133190],
[2.614, 1.51881085312, 463.50738623640],
[2.386, 4.58400538443, 615.99134467780],
[2.438, 5.19827220476, 501.11915701240],
[2.537, 1.64802783144, 519.60847267720],
[2.444, 3.87859489652, 185.24855221760],
[2.795, 4.04265752580, 255.05546779820],
[2.895, 3.26202698812, 1646.91908686380],
[2.225, 5.75197574692, 605.95703637020],
[2.324, 3.99503920129, 481.26298871830],
[2.962, 1.74151265966, 2080.63082474060],
[2.621, 1.74442251671, 35.21227433100],
],
# R2
[
[4247.412, 5.89910679117, 38.13303563780],
[217.570, 0.34581829080, 1.48447270830],
[163.025, 2.23872947130, 168.05251279940],
[156.285, 4.59414467342, 182.27960680100],
[117.940, 5.10295026024, 484.44438245600],
[112.429, 1.19000583596, 498.67147645760],
[127.141, 2.84786298079, 35.16409022120],
[99.467, 3.41578558739, 175.16605980020],
[64.814, 3.46214064840, 388.46515523820],
[77.286, 0.01659281785, 491.55792945680],
[49.509, 4.06995509133, 76.26607127560],
[39.330, 6.09521855958, 1021.24889455140],
[36.450, 5.17130059988, 137.03302416240],
[37.080, 5.97288967681, 2.96894541660],
[30.484, 3.58259801313, 33.67961751290],
[21.099, 0.76843555176, 36.64856292950],
[13.886, 3.59248623971, 395.57870223900],
[13.117, 5.09263515697, 98.89998852460],
[11.379, 1.18060018898, 381.35160823740],
[9.132, 2.34787658568, 601.76425067620],
[8.527, 5.25134685897, 2.44768055480],
[8.136, 4.96270726986, 4.45341812490],
[7.417, 4.46775409796, 189.39315380180],
[7.225, 1.92287508629, 9.56122755560],
[7.289, 1.65519525780, 1028.36244155220],
[8.076, 5.84268048311, 220.41264243880],
[9.654, 0.00000000000, 0.00000000000],
[6.554, 0.69397520733, 144.14657116320],
[7.782, 1.14341656235, 1059.38193018920],
[5.665, 6.25378258571, 74.78159856730],
[5.628, 5.23383764266, 46.20979048510],
[5.523, 4.59041448911, 1014.13534755060],
[5.177, 5.23116646157, 477.33083545520],
[5.503, 3.49522319102, 183.76407950930],
[4.878, 3.52934357721, 39.61750834610],
[4.787, 2.08260524745, 41.10198105440],
[5.055, 0.19949888617, 166.56804009110],
[4.751, 1.18054948270, 169.53698550770],
[4.747, 1.50608965076, 73.29712585900],
[6.113, 6.18326155595, 71.81265315070],
[4.606, 3.91970908886, 587.53715667460],
[5.756, 2.23667359233, 176.65053250850],
[4.536, 2.84337336954, 7.11354700080],
[4.338, 0.51553847388, 446.31134681820],
[3.891, 0.26338839265, 1550.93985964600],
[4.465, 3.01487041298, 129.91947716160],
[3.727, 2.37977930658, 160.93896579860],
[3.840, 3.79290381880, 111.43016149680],
[4.142, 1.70293820961, 983.11585891360],
[3.296, 1.07748822909, 505.78502345840],
[4.008, 0.30663868827, 494.73932319450],
[3.974, 5.97351783840, 488.37653571910],
[3.925, 4.85736421123, 60.76695288680],
[2.966, 2.01608546009, 822.17689311500],
[3.972, 1.07780371834, 374.23806123660],
[3.843, 5.23002047199, 350.33211960040],
[2.848, 6.17799253802, 704.85702489480],
[3.527, 0.79317138165, 274.06604832480],
[2.828, 1.32275775835, 386.98068252990],
[2.773, 5.37132330836, 251.43213107580],
[3.113, 5.12622288690, 426.59819087600],
[3.344, 5.61433537548, 1124.34166877000],
[2.597, 0.67759426519, 312.19908396260],
[2.581, 3.55847612121, 567.82400073240],
[2.578, 1.45603792456, 1035.47598855300],
[2.541, 5.19427579702, 1227.43444298860],
[2.510, 4.12148891512, 171.23390653710],
[2.511, 2.71606957319, 179.09821306330],
[2.342, 0.96469916587, 1019.76442184310],
[2.500, 0.70282276030, 707.77778620160],
[2.480, 4.59623030219, 693.55069220000],
[2.253, 0.74334306011, 976.00231191280],
],
# R3
[
[166.297, 4.55243893489, 38.13303563780],
[22.380, 3.94830879358, 168.05251279940],
[21.348, 2.86296778794, 182.27960680100],
[16.233, 0.54226725872, 484.44438245600],
[15.623, 5.75702251906, 498.67147645760],
[11.867, 4.40280192710, 1.48447270830],
[6.448, 5.19003066847, 31.01948863700],
[3.655, 5.91335292846, 1007.02180054980],
[3.681, 1.62865545676, 388.46515523820],
[3.198, 0.70197118575, 1558.05340664680],
[3.243, 1.88035665980, 522.57741809380],
[3.269, 2.94301808574, 76.26607127560],
[2.688, 1.87062743473, 402.69224923980],
[3.246, 0.79381356193, 536.80451209540],
[2.650, 5.76858449026, 343.21857259960],
[2.644, 4.64542905401, 500.15594916590],
[2.541, 4.79217120822, 482.95990974770],
[2.523, 1.72869889780, 395.57870223900],
[2.690, 2.21096415618, 446.31134681820],
[2.355, 5.77381398401, 485.92885516430],
[2.874, 6.19643340540, 815.06334611420],
[2.278, 3.66579603119, 497.18700374930],
],
# R4
[
[4.227, 2.40375758563, 477.33083545520],
[4.333, 0.10459484545, 395.57870223900],
[3.545, 4.78431259422, 1028.36244155220],
[3.154, 3.88192942366, 505.78502345840],
[3.016, 1.03609346831, 189.39315380180],
[2.294, 1.10879658603, 182.27960680100],
[2.295, 5.67776133184, 168.05251279940],
],
]
"""This table contains Neptune's periodic terms (all of them) from the
planetary theory VSOP87 for the radius vector at the equinox of date (taken
from the 'D' solution). In Meeus' book a shortened version can be found in
page 454."""
ORBITAL_ELEM = [
[304.348665, 219.8833092, 0.00030882, 0.000000018], # L
[30.110386869, -0.0000001663, 0.00000000069, 0.0], # a
[0.00945575, 0.000006033, 0.0, -0.00000000005], # e
[1.769953, -0.0093082, -0.00000708, 0.000000027], # i
[131.748057, 1.1022039, 0.00025952, -0.000000637], # Omega
[48.120276, 1.4262957, 0.00038434, 0.00000002] # pie
]
"""This table contains the parameters to compute Neptune's orbital elements for
the mean equinox of date. Based in Table 31.A, page 213"""
ORBITAL_ELEM_J2000 = [
[304.348665, 218.4862002, 0.00000059, -0.000000002], # L
[1.769953, 0.0002256, 0.00000023, 0.0], # i
[131.748057, -0.0061651, -0.00000219, -0.000000078], # Omega
[48.120276, 0.0291866, 0.0000761, 0.0] # pie
]
"""This table contains the parameters to compute Neptune's orbital elements for
the standard equinox J2000.0. Based on Table 31.B, page 215"""
class Neptune(object):
"""
Class Neptune models that planet.
"""
@staticmethod
def geometric_heliocentric_position(epoch, tofk5=True):
"""This method computes the geometric heliocentric position of planet
Neptune for a given epoch, using the VSOP87 theory.
:param epoch: Epoch to compute Neptune position, as an Epoch object
:type epoch: :py:class:`Epoch`
:param tofk5: Whether or not the small correction to convert to the FK5
system will be applied or not
:type tofk5: bool
:returns: A tuple with the heliocentric longitude and latitude (as
:py:class:`Angle` objects), and the radius vector (as a float,
in astronomical units), in that order
:rtype: tuple
:raises: TypeError if input values are of wrong type.
>>> epoch = Epoch(2018, 10, 27.0)
>>> l, b, r = Neptune.geometric_heliocentric_position(epoch)
>>> print(round(l.to_positive(), 4))
345.3776
>>> print(round(b, 4))
-0.9735
>>> print(round(r, 5))
29.93966
"""
return geometric_vsop_pos(epoch, VSOP87_L, VSOP87_B, VSOP87_R, tofk5)
@staticmethod
def apparent_heliocentric_position(epoch):
"""This method computes the apparent heliocentric position of planet
Neptune for a given epoch, using the VSOP87 theory.
:param epoch: Epoch to compute Neptune position, as an Epoch object
:type epoch: :py:class:`Epoch`
:returns: A tuple with the heliocentric longitude and latitude (as
:py:class:`Angle` objects), and the radius vector (as a float,
in astronomical units), in that order
:rtype: tuple
:raises: TypeError if input values are of wrong type.
"""
return apparent_vsop_pos(epoch, VSOP87_L, VSOP87_B, VSOP87_R)
@staticmethod
def orbital_elements_mean_equinox(epoch):
"""This method computes the orbital elements of Neptune for the mean
equinox of the date for a given epoch.
:param epoch: Epoch to compute orbital elements, as an Epoch object
:type epoch: :py:class:`Epoch`
:returns: A tuple containing the following six orbital elements:
- Mean longitude of the planet (Angle)
- Semimajor axis of the orbit (float, astronomical units)
- eccentricity of the orbit (float)
- inclination on the plane of the ecliptic (Angle)
- longitude of the ascending node (Angle)
- argument of the perihelion (Angle)
:rtype: tuple
:raises: TypeError if input values are of wrong type.
>>> epoch = Epoch(2065, 6, 24.0)
>>> l, a, e, i, ome, arg = Neptune.orbital_elements_mean_equinox(epoch)
>>> print(round(l, 6))
88.321947
>>> print(round(a, 8))
30.11038676
>>> print(round(e, 7))
0.0094597
>>> print(round(i, 6))
1.763855
>>> print(round(ome, 5))
132.46986
>>> print(round(arg, 6))
-83.415521
"""
return orbital_elements(epoch, ORBITAL_ELEM, ORBITAL_ELEM)
@staticmethod
def orbital_elements_j2000(epoch):
"""This method computes the orbital elements of Neptune for the
standard equinox J2000.0 for a given epoch.
:param epoch: Epoch to compute orbital elements, as an Epoch object
:type epoch: :py:class:`Epoch`
:returns: A tuple containing the following six orbital elements:
- Mean longitude of the planet (Angle)
- Semimajor axis of the orbit (float, astronomical units)
- eccentricity of the orbit (float)
- inclination on the plane of the ecliptic (Angle)
- longitude of the ascending node (Angle)
- argument of the perihelion (Angle)
:rtype: tuple
:raises: TypeError if input values are of wrong type.
>>> epoch = Epoch(2065, 6, 24.0)
>>> l, a, e, i, ome, arg = Neptune.orbital_elements_j2000(epoch)
>>> print(round(l, 6))
87.407029
>>> print(round(a, 8))
30.11038676
>>> print(round(e, 7))
0.0094597
>>> print(round(i, 6))
1.770101
>>> print(round(ome, 5))
131.74402
>>> print(round(arg, 6))
-83.6046
"""
return orbital_elements(epoch, ORBITAL_ELEM, ORBITAL_ELEM_J2000)
@staticmethod
def geocentric_position(epoch):
"""This method computes the geocentric position of Neptune (right
ascension and declination) for the given epoch, as well as the
elongation angle.
:param epoch: Epoch to compute geocentric position, as an Epoch object
:type epoch: :py:class:`Epoch`
:returns: A tuple containing the right ascension, the declination and
the elongation angle as Angle objects
:rtype: tuple
:raises: TypeError if input value is of wrong type.
>>> epoch = Epoch(1992, 12, 20.0)
>>> ra, dec, elon = Neptune.geocentric_position(epoch)
>>> print(ra.ra_str(n_dec=1))
19h 17' 14.5''
>>> print(dec.dms_str(n_dec=1))
-21d 34' 15.1''
>>> print(elon.dms_str(n_dec=1))
19d 44' 59.6''
"""
# First check that input value is of correct types
if not isinstance(epoch, Epoch):
raise TypeError("Invalid input type")
# Compute the heliocentric position of Neptune
l, b, r = Neptune.geometric_heliocentric_position(epoch, tofk5=False)
# Compute the heliocentric position of the Earth
l0, b0, r0 = Earth.geometric_heliocentric_position(epoch, tofk5=False)
# Convert to radians
lr = l.rad()
br = b.rad()
l0r = l0.rad()
b0r = b0.rad()
# Compute first iteration
x = r * cos(br) * cos(lr) - r0 * cos(b0r) * cos(l0r)
y = r * cos(br) * sin(lr) - r0 * cos(b0r) * sin(l0r)
z = r * sin(br) - r0 * sin(b0r)
delta = sqrt(x * x + y * y + z * z)
tau = 0.0057755183 * delta
# Adjust the epoch for light-time
epoch -= tau
# Compute again Neptune coordinates with this correction
l, b, r = Neptune.geometric_heliocentric_position(epoch, tofk5=False)
# Compute second iteration
lr = l.rad()
br = b.rad()
x = r * cos(br) * cos(lr) - r0 * cos(b0r) * cos(l0r)
y = r * cos(br) * sin(lr) - r0 * cos(b0r) * sin(l0r)
z = r * sin(br) - r0 * sin(b0r)
# Compute longitude and latitude
lamb = atan2(y, x)
beta = atan2(z, sqrt(x * x + y * y))
# Now, let's compute the aberration effect
t = (epoch - JDE2000) / 36525
e = 0.016708634 + t * (-0.000042037 - t * 0.0000001267)
pie = 102.93735 + t * (1.71946 + t * 0.00046)
pie = radians(pie)
lon = l0 + 180.0
lon = lon.rad()
k = 20.49552 # The constant of aberration
deltal1 = k * (-cos(lon - lamb) + e * cos(pie - lamb)) / cos(beta)
deltab1 = -k * sin(beta) * (sin(lon - lamb) - e * sin(pie - lamb))
deltal1 = Angle(0, 0, deltal1)
deltab1 = Angle(0, 0, deltab1)
# Correction to FK5 system
lamb = Angle(lamb, radians=True)
lamb = lamb.to_positive()
beta = Angle(beta, radians=True)
l_prime = lamb - t * (1.397 + t * 0.00031)
deltal2 = Angle(0, 0, -0.09033)
a = 0.03916 * (cos(l_prime.rad()) + sin(l_prime.rad()))
a = a * tan(b.rad())
deltal2 += Angle(0, 0, a)
deltab2 = 0.03916 * (cos(l_prime.rad()) - sin(l_prime.rad()))
deltab2 = Angle(0, 0, deltab2)
# Apply the corrections
lamb = lamb + deltal1 + deltal2
beta = beta + deltab1 + deltab2
# Correction for nutation
dpsi = nutation_longitude(epoch)
lamb += dpsi
e = true_obliquity(epoch)
ra, dec = ecliptical2equatorial(lamb, beta, e)
# Let's compute the elongation angle
lons, lats, rs = Sun.apparent_geocentric_position(epoch)
lambr = lamb.rad()
lsr = lons.rad()
betar = beta.rad()
elon = acos(cos(betar) * cos(lambr - lsr))
elon = Angle(elon, radians=True)
return ra, dec, elon
@staticmethod
def conjunction(epoch):
"""This method computes the time of the conjunction closest to the
given epoch.
:param epoch: Epoch close to the desired conjunction
:type epoch: :py:class:`Epoch`
:returns: The time when the conjunction happens, as an Epoch
:rtype: :py:class:`Epoch`
:raises: TypeError if input value is of wrong type.
:raises: ValueError if input epoch outside the -2000/4000 range.
>>> epoch = Epoch(1993, 10, 1.0)
>>> conj = Neptune.conjunction(epoch)
>>> y, m, d = conj.get_date()
>>> print(y)
1994
>>> print(m)
1
>>> print(round(d, 4))
11.3057
"""
# First check that input value is of correct types
if not isinstance(epoch, Epoch):
raise TypeError("Invalid input type")
# Check that the input epoch is within valid range
y = epoch.year()
if y < -2000.0 or y > 4000.0:
raise ValueError("Epoch outside the -2000/4000 range")
# Set some specific constants for Neptune's conjunction
a = 2451569.379
b = 367.486703
m0 = 21.5569
m1 = 2.194998
k = round((365.2425 * y + 1721060.0 - a) / b)
jde0 = a + k * b
m = m0 + k * m1
m = Angle(m).to_positive()
m = m.rad()
t = (jde0 - 2451545.0) / 36525.0
# Compute a couple auxiliary angles
ee = 207.83 + 8.51 * t
gg = 276.74 + 209.98 * t
# Convert to radians
ee = Angle(ee).rad()
gg = Angle(gg).rad()
corr = (0.0168
+ sin(m) * (-2.5606 + t * (0.0088 + t * 0.00002))
+ cos(m) * (-0.8611 + t * (-0.0037 + t * 0.00002))
+ sin(2.0 * m) * (0.0118 + t * (-0.0004 + t * 0.00001))
+ cos(2.0 * m) * (0.0307 - t * 0.0003)
+ cos(ee) * (-0.5964)
+ cos(gg) * (0.0728))
to_return = jde0 + corr
return Epoch(to_return)
@staticmethod
def opposition(epoch):
"""This method computes the time of the opposition closest to the given
epoch.
:param epoch: Epoch close to the desired opposition
:type epoch: :py:class:`Epoch`
:returns: The time when the opposition happens, as an Epoch
:rtype: :py:class:`Epoch`
:raises: TypeError if input value is of wrong type.
:raises: ValueError if input epoch outside the -2000/4000 range.
>>> epoch = Epoch(1846, 8, 1)
>>> oppo = Neptune.opposition(epoch)
>>> y, m, d = oppo.get_date()
>>> print(y)
1846
>>> print(m)
8
>>> print(round(d, 4))
20.1623
"""
# First check that input value is of correct types
if not isinstance(epoch, Epoch):
raise TypeError("Invalid input type")
# Check that the input epoch is within valid range
y = epoch.year()
if y < -2000.0 or y > 4000.0:
raise ValueError("Epoch outside the -2000/4000 range")
# Set some specific constants for Neptune's opposition
a = 2451753.122
b = 367.486703
m0 = 202.6544
m1 = 2.194998
k = round((365.2425 * y + 1721060.0 - a) / b)
jde0 = a + k * b
m = m0 + k * m1
m = Angle(m).to_positive()
m = m.rad()
t = (jde0 - 2451545.0) / 36525.0
# Compute a couple auxiliary angles
ee = 207.83 + 8.51 * t
gg = 276.74 + 209.98 * t
# Convert to radians
ee = Angle(ee).rad()
gg = Angle(gg).rad()
corr = (-0.014 + t * t * 0.00001
+ sin(m) * (-1.3486 + t * (0.001 + t * 0.00001))
+ cos(m) * (0.8597 + t * 0.0037)
+ sin(2.0 * m) * (-0.0082 + t * (-0.0002 + t * 0.00001))
+ cos(2.0 * m) * (0.0037 - t * 0.0003)
+ cos(ee) * (-0.5964)
+ cos(gg) * (0.0728))
to_return = jde0 + corr
return Epoch(to_return)
@staticmethod
def magnitude(sun_dist, earth_dist):
"""This function computes the approximate magnitude of Neptune.
:param sun_dist: Distance from Neptune to Sun, in Astronomical Units
:type sun_dist: float
:param earth_dist: Distance Neptune to Earth, in Astronomical Units
:type earth_dist: float
:returns: Neptune's magnitude
:rtype: float
:raises: TypeError if input values are of wrong type.
"""
if not (isinstance(sun_dist, float) and isinstance(earth_dist, float)):
raise TypeError("Invalid input types")
m = -7.05 + 5.0 * log10(sun_dist * earth_dist)
return round(m, 1)
def main():
# Let's define a small helper function
def print_me(msg, val):
print("{}: {}".format(msg, val))
# Let's show some uses of Neptune class
print("\n" + 35 * "*")
print("*** Use of Neptune class")
print(35 * "*" + "\n")
# Let's now compute the heliocentric position for a given epoch
epoch = Epoch(2018, 10, 27.0)
lon, lat, r = Neptune.geometric_heliocentric_position(epoch)
print_me("Geometric Heliocentric Longitude", lon.to_positive())
print_me("Geometric Heliocentric Latitude", lat)
print_me("Radius vector", r)
print("")
# Compute the geocentric position for 1992/12/20:
epoch = Epoch(1992, 12, 20.0)
ra, dec, elon = Neptune.geocentric_position(epoch)
print_me("Right ascension", ra.ra_str(n_dec=1))
print_me("Declination", dec.dms_str(n_dec=1))
print_me("Elongation", elon.dms_str(n_dec=1))
print("")
# Print mean orbital elements for Neptune at 2065.6.24
epoch = Epoch(2065, 6, 24.0)
l, a, e, i, ome, arg = Neptune.orbital_elements_mean_equinox(epoch)
print_me("Mean longitude of the planet", round(l, 6)) # 88.321947
print_me("Semimajor axis of the orbit (UA)", round(a, 8)) # 30.11038676
print_me("Eccentricity of the orbit", round(e, 7)) # 0.0094597
print_me("Inclination on plane of the ecliptic", round(i, 6)) # 1.763855
print_me("Longitude of the ascending node", round(ome, 5)) # 132.46986
print_me("Argument of the perihelion", round(arg, 6)) # -83.415521
print("")
# Compute the time of the conjunction close to 1993/10/1
epoch = Epoch(1993, 10, 1.0)
conj = Neptune.conjunction(epoch)
y, m, d = conj.get_date()
d = round(d, 4)
date = "{}/{}/{}".format(y, m, d)
print_me("Conjunction date", date)
# Compute the time of the opposition close to 1846/8/1
epoch = Epoch(1846, 8, 1)
oppo = Neptune.opposition(epoch)
y, m, d = oppo.get_date()
d = round(d, 4)
date = "{}/{}/{}".format(y, m, d)
print_me("Opposition date", date)
if __name__ == "__main__":
main()
| lgpl-3.0 | 5,247,640,929,068,069,000 | 44.792171 | 79 | 0.60751 | false |
bdestombe/flopy-1 | flopy/modflow/mflpf.py | 1 | 25194 | """
mflpf module. Contains the ModflowLpf class. Note that the user can access
the ModflowLpf class as `flopy.modflow.ModflowLpf`.
Additional information for this MODFLOW package can be found at the `Online
MODFLOW Guide
<http://water.usgs.gov/ogw/modflow/MODFLOW-2005-Guide/index.html?lpf.htm>`_.
"""
import sys
import numpy as np
from .mfpar import ModflowPar as mfpar
from ..pakbase import Package
from ..utils import Util2d, Util3d, read1d
class ModflowLpf(Package):
"""
MODFLOW Layer Property Flow Package Class.
Parameters
----------
model : model object
The model object (of type :class:`flopy.modflow.mf.Modflow`) to which
this package will be added.
ipakcb : int
A flag that is used to determine if cell-by-cell budget data should be
saved. If ipakcb is non-zero cell-by-cell budget data will be saved.
(default is 53)
hdry : float
Is the head that is assigned to cells that are converted to dry during
a simulation. Although this value plays no role in the model
calculations, it is useful as an indicator when looking at the
resulting heads that are output from the model. HDRY is thus similar
to HNOFLO in the Basic Package, which is the value assigned to cells
that are no-flow cells at the start of a model simulation.
(default is -1.e30).
laytyp : int or array of ints (nlay)
Layer type, contains a flag for each layer that specifies the layer type.
0 confined
>0 convertible
<0 convertible unless the THICKSTRT option is in effect.
(default is 0).
layavg : int or array of ints (nlay)
Layer average
0 is harmonic mean
1 is logarithmic mean
2 is arithmetic mean of saturated thickness and logarithmic mean of
of hydraulic conductivity
(default is 0).
chani : float or array of floats (nlay)
contains a value for each layer that is a flag or the horizontal
anisotropy. If CHANI is less than or equal to 0, then variable HANI
defines horizontal anisotropy. If CHANI is greater than 0, then CHANI
is the horizontal anisotropy for the entire layer, and HANI is not
read. If any HANI parameters are used, CHANI for all layers must be
less than or equal to 0. Use as many records as needed to enter a
value of CHANI for each layer. The horizontal anisotropy is the ratio
of the hydraulic conductivity along columns (the Y direction) to the
hydraulic conductivity along rows (the X direction).
(default is 1).
layvka : float or array of floats (nlay)
a flag for each layer that indicates whether variable VKA is vertical
hydraulic conductivity or the ratio of horizontal to vertical
hydraulic conductivity.
0: VKA is vertical hydraulic conductivity
not 0: VKA is the ratio of horizontal to vertical hydraulic conductivity
(default is 0).
laywet : float or array of floats (nlay)
contains a flag for each layer that indicates if wetting is active.
0 wetting is inactive
not 0 wetting is active
(default is 0).
wetfct : float
is a factor that is included in the calculation of the head that is
initially established at a cell when it is converted from dry to wet.
(default is 0.1).
iwetit : int
is the iteration interval for attempting to wet cells. Wetting is
attempted every IWETIT iteration. If using the PCG solver
(Hill, 1990), this applies to outer iterations, not inner iterations.
If IWETIT less than or equal to 0, it is changed to 1.
(default is 1).
ihdwet : int
is a flag that determines which equation is used to define the
initial head at cells that become wet.
(default is 0)
hk : float or array of floats (nlay, nrow, ncol)
is the hydraulic conductivity along rows. HK is multiplied by
horizontal anisotropy (see CHANI and HANI) to obtain hydraulic
conductivity along columns.
(default is 1.0).
hani : float or array of floats (nlay, nrow, ncol)
is the ratio of hydraulic conductivity along columns to hydraulic
conductivity along rows, where HK of item 10 specifies the hydraulic
conductivity along rows. Thus, the hydraulic conductivity along
columns is the product of the values in HK and HANI.
(default is 1.0).
vka : float or array of floats (nlay, nrow, ncol)
is either vertical hydraulic conductivity or the ratio of horizontal
to vertical hydraulic conductivity depending on the value of LAYVKA.
(default is 1.0).
ss : float or array of floats (nlay, nrow, ncol)
is specific storage unless the STORAGECOEFFICIENT option is used.
When STORAGECOEFFICIENT is used, Ss is confined storage coefficient.
(default is 1.e-5).
sy : float or array of floats (nlay, nrow, ncol)
is specific yield.
(default is 0.15).
vkcb : float or array of floats (nlay, nrow, ncol)
is the vertical hydraulic conductivity of a Quasi-three-dimensional
confining bed below a layer. (default is 0.0).
wetdry : float or array of floats (nlay, nrow, ncol)
is a combination of the wetting threshold and a flag to indicate
which neighboring cells can cause a cell to become wet.
(default is -0.01).
storagecoefficient : boolean
indicates that variable Ss and SS parameters are read as storage
coefficient rather than specific storage.
(default is False).
constantcv : boolean
indicates that vertical conductance for an unconfined cell is
computed from the cell thickness rather than the saturated thickness.
The CONSTANTCV option automatically invokes the NOCVCORRECTION
option. (default is False).
thickstrt : boolean
indicates that layers having a negative LAYTYP are confined, and their
cell thickness for conductance calculations will be computed as
STRT-BOT rather than TOP-BOT. (default is False).
nocvcorrection : boolean
indicates that vertical conductance is not corrected when the vertical
flow correction is applied. (default is False).
novfc : boolean
turns off the vertical flow correction under dewatered conditions.
This option turns off the vertical flow calculation described on p.
5-8 of USGS Techniques and Methods Report 6-A16 and the vertical
conductance correction described on p. 5-18 of that report.
(default is False).
extension : string
Filename extension (default is 'lpf')
unitnumber : int
File unit number (default is None).
filenames : str or list of str
Filenames to use for the package and the output files. If
filenames=None the package name will be created using the model name
and package extension and the cbc output name will be created using
the model name and .cbc extension (for example, modflowtest.cbc),
if ipakcbc is a number greater than zero. If a single string is passed
the package will be set to the string and cbc output name will be
created using the model name and .cbc extension, if ipakcbc is a
number greater than zero. To define the names for all package files
(input and output) the length of the list of strings should be 2.
Default is None.
Attributes
----------
Methods
-------
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> lpf = flopy.modflow.ModflowLpf(m)
"""
'Layer-property flow package class\n'
def __init__(self, model, laytyp=0, layavg=0, chani=1.0, layvka=0,
laywet=0, ipakcb=None, hdry=-1E+30, iwdflg=0, wetfct=0.1,
iwetit=1, ihdwet=0, hk=1.0, hani=1.0, vka=1.0, ss=1e-5,
sy=0.15, vkcb=0.0, wetdry=-0.01, storagecoefficient=False,
constantcv=False, thickstrt=False, nocvcorrection=False,
novfc=False, extension='lpf',
unitnumber=None, filenames=None):
# set default unit number of one is not specified
if unitnumber is None:
unitnumber = ModflowLpf.defaultunit()
# set filenames
if filenames is None:
filenames = [None, None]
elif isinstance(filenames, str):
filenames = [filenames, None]
elif isinstance(filenames, list):
if len(filenames) < 2:
filenames.append(None)
# update external file information with cbc output, if necessary
if ipakcb is not None:
fname = filenames[1]
model.add_output_file(ipakcb, fname=fname,
package=ModflowLpf.ftype())
else:
ipakcb = 0
# Fill namefile items
name = [ModflowLpf.ftype()]
units = [unitnumber]
extra = ['']
# set package name
fname = [filenames[0]]
# Call ancestor's init to set self.parent, extension, name and unit number
Package.__init__(self, model, extension=extension, name=name,
unit_number=units, extra=extra, filenames=fname)
self.heading = '# {} package for '.format(self.name[0]) + \
' {}, '.format(model.version_types[model.version]) + \
'generated by Flopy.'
self.url = 'lpf.htm'
nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper
# item 1
self.ipakcb = ipakcb
self.hdry = hdry # Head in cells that are converted to dry during a simulation
self.nplpf = 0 # number of LPF parameters
self.laytyp = Util2d(model, (nlay,), np.int, laytyp, name='laytyp')
self.layavg = Util2d(model, (nlay,), np.int, layavg, name='layavg')
self.chani = Util2d(model, (nlay,), np.float32, chani, name='chani')
self.layvka = Util2d(model, (nlay,), np.int, layvka, name='layvka')
self.laywet = Util2d(model, (nlay,), np.int, laywet, name='laywet')
self.wetfct = wetfct # Factor that is included in the calculation of the head when a cell is converted from dry to wet
self.iwetit = iwetit # Iteration interval for attempting to wet cells
self.ihdwet = ihdwet # Flag that determines which equation is used to define the initial head at cells that become wet
self.options = ' '
if storagecoefficient:
self.options = self.options + 'STORAGECOEFFICIENT '
if constantcv: self.options = self.options + 'CONSTANTCV '
if thickstrt: self.options = self.options + 'THICKSTRT '
if nocvcorrection: self.options = self.options + 'NOCVCORRECTION '
if novfc: self.options = self.options + 'NOVFC '
self.hk = Util3d(model, (nlay, nrow, ncol), np.float32, hk, name='hk',
locat=self.unit_number[0])
self.hani = Util3d(model, (nlay, nrow, ncol), np.float32, hani,
name='hani', locat=self.unit_number[0])
keys = []
for k in range(nlay):
key = 'vka'
if self.layvka[k] != 0:
key = 'vani'
keys.append(key)
self.vka = Util3d(model, (nlay, nrow, ncol), np.float32, vka,
name=keys, locat=self.unit_number[0])
tag = 'ss'
if storagecoefficient:
tag = 'storage'
self.ss = Util3d(model, (nlay, nrow, ncol), np.float32, ss, name=tag,
locat=self.unit_number[0])
self.sy = Util3d(model, (nlay, nrow, ncol), np.float32, sy, name='sy',
locat=self.unit_number[0])
self.vkcb = Util3d(model, (nlay, nrow, ncol), np.float32, vkcb,
name='vkcb', locat=self.unit_number[0])
self.wetdry = Util3d(model, (nlay, nrow, ncol), np.float32, wetdry,
name='wetdry', locat=self.unit_number[0])
self.parent.add_package(self)
return
def write_file(self, check=True):
"""
Write the package file.
Parameters
----------
check : boolean
Check package data for common errors. (default True)
Returns
-------
None
"""
if check: # allows turning off package checks when writing files at model level
self.check(f='{}.chk'.format(self.name[0]),
verbose=self.parent.verbose, level=1)
# get model information
nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper
dis = self.parent.get_package('DIS')
if dis is None:
dis = self.parent.get_package('DISU')
# Open file for writing
f = open(self.fn_path, 'w')
# Item 0: text
f.write('{}\n'.format(self.heading))
# Item 1: IBCFCB, HDRY, NPLPF
f.write('{0:10d}{1:10.6G}{2:10d} {3:s}\n'.format(self.ipakcb,
self.hdry,
self.nplpf,
self.options))
# LAYTYP array
f.write(self.laytyp.string)
# LAYAVG array
f.write(self.layavg.string)
# CHANI array
f.write(self.chani.string)
# LAYVKA array
f.write(self.layvka.string)
# LAYWET array
f.write(self.laywet.string)
# Item 7: WETFCT, IWETIT, IHDWET
iwetdry = self.laywet.sum()
if iwetdry > 0:
f.write('{0:10f}{1:10d}{2:10d}\n'.format(self.wetfct,
self.iwetit,
self.ihdwet))
transient = not dis.steady.all()
for k in range(nlay):
f.write(self.hk[k].get_file_entry())
if self.chani[k] < 1:
f.write(self.hani[k].get_file_entry())
f.write(self.vka[k].get_file_entry())
if transient == True:
f.write(self.ss[k].get_file_entry())
if self.laytyp[k] != 0:
f.write(self.sy[k].get_file_entry())
if dis.laycbd[k] > 0:
f.write(self.vkcb[k].get_file_entry())
if (self.laywet[k] != 0 and self.laytyp[k] != 0):
f.write(self.wetdry[k].get_file_entry())
f.close()
return
@staticmethod
def load(f, model, ext_unit_dict=None, check=True):
"""
Load an existing package.
Parameters
----------
f : filename or file handle
File to load.
model : model object
The model object (of type :class:`flopy.modflow.mf.Modflow`) to
which this package will be added.
ext_unit_dict : dictionary, optional
If the arrays in the file are specified using EXTERNAL,
or older style array control records, then `f` should be a file
handle. In this case ext_unit_dict is required, which can be
constructed using the function
:class:`flopy.utils.mfreadnam.parsenamefile`.
check : boolean
Check package data for common errors. (default True)
Returns
-------
lpf : ModflowLpf object
ModflowLpf object.
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> lpf = flopy.modflow.ModflowLpf.load('test.lpf', m)
"""
if model.verbose:
sys.stdout.write('loading lpf package file...\n')
if not hasattr(f, 'read'):
filename = f
f = open(filename, 'r')
# dataset 0 -- header
while True:
line = f.readline()
if line[0] != '#':
break
# determine problem dimensions
nr, nc, nlay, nper = model.get_nrow_ncol_nlay_nper()
dis = model.get_package('DIS')
if dis is None:
dis = model.get_package('DISU')
# Item 1: IBCFCB, HDRY, NPLPF - line already read above
if model.verbose:
print(' loading IBCFCB, HDRY, NPLPF...')
t = line.strip().split()
ipakcb, hdry, nplpf = int(t[0]), float(t[1]), int(t[2])
#if ipakcb != 0:
# model.add_pop_key_list(ipakcb)
# ipakcb = 53
# options
storagecoefficient = False
constantcv = False
thickstrt = False
nocvcorrection = False
novfc = False
if len(t) > 3:
for k in range(3, len(t)):
if 'STORAGECOEFFICIENT' in t[k].upper():
storagecoefficient = True
elif 'CONSTANTCV' in t[k].upper():
constantcv = True
elif 'THICKSTRT' in t[k].upper():
thickstrt = True
elif 'NOCVCORRECTION' in t[k].upper():
nocvcorrection = True
elif 'NOVFC' in t[k].upper():
novfc = True
# LAYTYP array
if model.verbose:
print(' loading LAYTYP...')
laytyp = np.empty((nlay), dtype=np.int)
laytyp = read1d(f, laytyp)
# LAYAVG array
if model.verbose:
print(' loading LAYAVG...')
layavg = np.empty((nlay), dtype=np.int)
layavg = read1d(f, layavg)
# CHANI array
if model.verbose:
print(' loading CHANI...')
chani = np.empty((nlay), dtype=np.float32)
chani = read1d(f, chani)
# LAYVKA array
if model.verbose:
print(' loading LAYVKA...')
layvka = np.empty((nlay), dtype=np.float32)
layvka = read1d(f, layvka)
# LAYWET array
if model.verbose:
print(' loading LAYWET...')
laywet = np.empty((nlay), dtype=np.int)
laywet = read1d(f, laywet)
# Item 7: WETFCT, IWETIT, IHDWET
wetfct, iwetit, ihdwet = None, None, None
iwetdry = laywet.sum()
if iwetdry > 0:
if model.verbose:
print(' loading WETFCT, IWETIT, IHDWET...')
line = f.readline()
t = line.strip().split()
wetfct, iwetit, ihdwet = float(t[0]), int(t[1]), int(t[2])
# parameters data
par_types = []
if nplpf > 0:
par_types, parm_dict = mfpar.load(f, nplpf, model.verbose)
# print parm_dict
# non-parameter data
transient = not dis.steady.all()
hk = [0] * nlay
hani = [0] * nlay
vka = [0] * nlay
ss = [0] * nlay
sy = [0] * nlay
vkcb = [0] * nlay
wetdry = [0] * nlay
# load by layer
for k in range(nlay):
# allow for unstructured changing nodes per layer
if nr is None:
nrow = 1
ncol = nc[k]
else:
nrow = nr
ncol = nc
# hk
if model.verbose:
print(' loading hk layer {0:3d}...'.format(k + 1))
if 'hk' not in par_types:
t = Util2d.load(f, model, (nrow, ncol), np.float32, 'hk',
ext_unit_dict)
else:
line = f.readline()
t = mfpar.parameter_fill(model, (nrow, ncol), 'hk', parm_dict,
findlayer=k)
hk[k] = t
# hani
if chani[k] < 1:
if model.verbose:
print(' loading hani layer {0:3d}...'.format(k + 1))
if 'hani' not in par_types:
t = Util2d.load(f, model, (nrow, ncol), np.float32, 'hani',
ext_unit_dict)
else:
line = f.readline()
t = mfpar.parameter_fill(model, (nrow, ncol), 'hani',
parm_dict, findlayer=k)
hani[k] = t
# vka
if model.verbose:
print(' loading vka layer {0:3d}...'.format(k + 1))
key = 'vka'
if layvka[k] != 0:
key = 'vani'
if 'vk' not in par_types and 'vani' not in par_types:
t = Util2d.load(f, model, (nrow, ncol), np.float32, key,
ext_unit_dict)
else:
line = f.readline()
key = 'vka'
if 'vani' in par_types:
key = 'vani'
t = mfpar.parameter_fill(model, (nrow, ncol), key, parm_dict,
findlayer=k)
vka[k] = t
# storage properties
if transient:
# ss
if model.verbose:
print(' loading ss layer {0:3d}...'.format(k + 1))
if 'ss' not in par_types:
t = Util2d.load(f, model, (nrow, ncol), np.float32, 'ss',
ext_unit_dict)
else:
line = f.readline()
t = mfpar.parameter_fill(model, (nrow, ncol), 'ss',
parm_dict, findlayer=k)
ss[k] = t
# sy
if laytyp[k] != 0:
if model.verbose:
print(' loading sy layer {0:3d}...'.format(k + 1))
if 'sy' not in par_types:
t = Util2d.load(f, model, (nrow, ncol), np.float32,
'sy',
ext_unit_dict)
else:
line = f.readline()
t = mfpar.parameter_fill(model, (nrow, ncol), 'sy',
parm_dict, findlayer=k)
sy[k] = t
# vkcb
if dis.laycbd[k] > 0:
if model.verbose:
print(' loading vkcb layer {0:3d}...'.format(k + 1))
if 'vkcb' not in par_types:
t = Util2d.load(f, model, (nrow, ncol), np.float32, 'vkcb',
ext_unit_dict)
else:
line = f.readline()
t = mfpar.parameter_fill(model, (nrow, ncol), 'vkcb',
parm_dict, findlayer=k)
vkcb[k] = t
# wetdry
if (laywet[k] != 0 and laytyp[k] != 0):
if model.verbose:
print(' loading wetdry layer {0:3d}...'.format(k + 1))
t = Util2d.load(f, model, (nrow, ncol), np.float32, 'wetdry',
ext_unit_dict)
wetdry[k] = t
# set package unit number
unitnumber = None
filenames = [None, None]
if ext_unit_dict is not None:
unitnumber, filenames[0] = \
model.get_ext_dict_attr(ext_unit_dict,
filetype=ModflowLpf.ftype())
if ipakcb > 0:
iu, filenames[1] = \
model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb)
model.add_pop_key_list(ipakcb)
# create instance of lpf class
lpf = ModflowLpf(model, ipakcb=ipakcb, laytyp=laytyp, layavg=layavg,
chani=chani, layvka=layvka, laywet=laywet, hdry=hdry,
iwdflg=iwetdry, wetfct=wetfct, iwetit=iwetit,
ihdwet=ihdwet, hk=hk, hani=hani, vka=vka, ss=ss,
sy=sy, vkcb=vkcb, wetdry=wetdry,
storagecoefficient=storagecoefficient,
constantcv=constantcv, thickstrt=thickstrt,
novfc=novfc,
unitnumber=unitnumber, filenames=filenames)
if check:
lpf.check(f='{}.chk'.format(lpf.name[0]),
verbose=lpf.parent.verbose, level=0)
return lpf
@staticmethod
def ftype():
return 'LPF'
@staticmethod
def defaultunit():
return 15
| bsd-3-clause | -2,216,817,551,208,031,700 | 38.833063 | 127 | 0.526038 | false |
endlessm/chromium-browser | mojo/public/tools/mojom/mojom/generate/translate.py | 1 | 26478 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Convert parse tree to AST.
This module converts the parse tree to the AST we use for code generation. The
main entry point is OrderedModule, which gets passed the parser
representation of a mojom file. When called it's assumed that all imports have
already been parsed and converted to ASTs before.
"""
import itertools
import os
import re
from mojom.generate import module as mojom
from mojom.parse import ast
def _DuplicateName(values):
"""Returns the 'mojom_name' of the first entry in |values| whose 'mojom_name'
has already been encountered. If there are no duplicates, returns None."""
names = set()
for value in values:
if value.mojom_name in names:
return value.mojom_name
names.add(value.mojom_name)
return None
def _ElemsOfType(elems, elem_type, scope):
"""Find all elements of the given type.
Args:
elems: {Sequence[Any]} Sequence of elems.
elem_type: {Type[C]} Extract all elems of this type.
scope: {str} The name of the surrounding scope (e.g. struct
definition). Used in error messages.
Returns:
{List[C]} All elems of matching type.
"""
assert isinstance(elem_type, type)
result = [elem for elem in elems if isinstance(elem, elem_type)]
duplicate_name = _DuplicateName(result)
if duplicate_name:
raise Exception('Names in mojom must be unique within a scope. The name '
'"%s" is used more than once within the scope "%s".' %
(duplicate_name, scope))
return result
def _MapKind(kind):
map_to_kind = {
'bool': 'b',
'int8': 'i8',
'int16': 'i16',
'int32': 'i32',
'int64': 'i64',
'uint8': 'u8',
'uint16': 'u16',
'uint32': 'u32',
'uint64': 'u64',
'float': 'f',
'double': 'd',
'string': 's',
'handle': 'h',
'handle<data_pipe_consumer>': 'h:d:c',
'handle<data_pipe_producer>': 'h:d:p',
'handle<message_pipe>': 'h:m',
'handle<shared_buffer>': 'h:s',
'handle<platform>': 'h:p'
}
if kind.endswith('?'):
base_kind = _MapKind(kind[0:-1])
# NOTE: This doesn't rule out enum types. Those will be detected later, when
# cross-reference is established.
reference_kinds = ('m', 's', 'h', 'a', 'r', 'x', 'asso', 'rmt', 'rcv',
'rma', 'rca')
if re.split('[^a-z]', base_kind, 1)[0] not in reference_kinds:
raise Exception('A type (spec "%s") cannot be made nullable' % base_kind)
return '?' + base_kind
if kind.endswith('}'):
lbracket = kind.rfind('{')
value = kind[0:lbracket]
return 'm[' + _MapKind(kind[lbracket + 1:-1]) + '][' + _MapKind(value) + ']'
if kind.endswith(']'):
lbracket = kind.rfind('[')
typename = kind[0:lbracket]
return 'a' + kind[lbracket + 1:-1] + ':' + _MapKind(typename)
if kind.endswith('&'):
return 'r:' + _MapKind(kind[0:-1])
if kind.startswith('asso<'):
assert kind.endswith('>')
return 'asso:' + _MapKind(kind[5:-1])
if kind.startswith('rmt<'):
assert kind.endswith('>')
return 'rmt:' + _MapKind(kind[4:-1])
if kind.startswith('rcv<'):
assert kind.endswith('>')
return 'rcv:' + _MapKind(kind[4:-1])
if kind.startswith('rma<'):
assert kind.endswith('>')
return 'rma:' + _MapKind(kind[4:-1])
if kind.startswith('rca<'):
assert kind.endswith('>')
return 'rca:' + _MapKind(kind[4:-1])
if kind in map_to_kind:
return map_to_kind[kind]
return 'x:' + kind
def _AttributeListToDict(attribute_list):
if attribute_list is None:
return None
assert isinstance(attribute_list, ast.AttributeList)
# TODO(vtl): Check for duplicate keys here.
return dict(
[(attribute.key, attribute.value) for attribute in attribute_list])
builtin_values = frozenset([
"double.INFINITY", "double.NEGATIVE_INFINITY", "double.NAN",
"float.INFINITY", "float.NEGATIVE_INFINITY", "float.NAN"
])
def _IsBuiltinValue(value):
return value in builtin_values
def _LookupKind(kinds, spec, scope):
"""Tries to find which Kind a spec refers to, given the scope in which its
referenced. Starts checking from the narrowest scope to most general. For
example, given a struct field like
Foo.Bar x;
Foo.Bar could refer to the type 'Bar' in the 'Foo' namespace, or an inner
type 'Bar' in the struct 'Foo' in the current namespace.
|scope| is a tuple that looks like (namespace, struct/interface), referring
to the location where the type is referenced."""
if spec.startswith('x:'):
mojom_name = spec[2:]
for i in range(len(scope), -1, -1):
test_spec = 'x:'
if i > 0:
test_spec += '.'.join(scope[:i]) + '.'
test_spec += mojom_name
kind = kinds.get(test_spec)
if kind:
return kind
return kinds.get(spec)
def _LookupValue(values, mojom_name, scope, kind):
"""Like LookupKind, but for constant values."""
# If the type is an enum, the value can be specified as a qualified name, in
# which case the form EnumName.ENUM_VALUE must be used. We use the presence
# of a '.' in the requested name to identify this. Otherwise, we prepend the
# enum name.
if isinstance(kind, mojom.Enum) and '.' not in mojom_name:
mojom_name = '%s.%s' % (kind.spec.split(':', 1)[1], mojom_name)
for i in reversed(range(len(scope) + 1)):
test_spec = '.'.join(scope[:i])
if test_spec:
test_spec += '.'
test_spec += mojom_name
value = values.get(test_spec)
if value:
return value
return values.get(mojom_name)
def _FixupExpression(module, value, scope, kind):
"""Translates an IDENTIFIER into a built-in value or structured NamedValue
object."""
if isinstance(value, tuple) and value[0] == 'IDENTIFIER':
# Allow user defined values to shadow builtins.
result = _LookupValue(module.values, value[1], scope, kind)
if result:
if isinstance(result, tuple):
raise Exception('Unable to resolve expression: %r' % value[1])
return result
if _IsBuiltinValue(value[1]):
return mojom.BuiltinValue(value[1])
return value
def _Kind(kinds, spec, scope):
"""Convert a type name into a mojom.Kind object.
As a side-effect this function adds the result to 'kinds'.
Args:
kinds: {Dict[str, mojom.Kind]} All known kinds up to this point, indexed by
their names.
spec: {str} A name uniquely identifying a type.
scope: {Tuple[str, str]} A tuple that looks like (namespace,
struct/interface), referring to the location where the type is
referenced.
Returns:
{mojom.Kind} The type corresponding to 'spec'.
"""
kind = _LookupKind(kinds, spec, scope)
if kind:
return kind
if spec.startswith('?'):
kind = _Kind(kinds, spec[1:], scope).MakeNullableKind()
elif spec.startswith('a:'):
kind = mojom.Array(_Kind(kinds, spec[2:], scope))
elif spec.startswith('asso:'):
inner_kind = _Kind(kinds, spec[5:], scope)
if isinstance(inner_kind, mojom.InterfaceRequest):
kind = mojom.AssociatedInterfaceRequest(inner_kind)
else:
kind = mojom.AssociatedInterface(inner_kind)
elif spec.startswith('a'):
colon = spec.find(':')
length = int(spec[1:colon])
kind = mojom.Array(_Kind(kinds, spec[colon + 1:], scope), length)
elif spec.startswith('r:'):
kind = mojom.InterfaceRequest(_Kind(kinds, spec[2:], scope))
elif spec.startswith('rmt:'):
kind = mojom.PendingRemote(_Kind(kinds, spec[4:], scope))
elif spec.startswith('rcv:'):
kind = mojom.PendingReceiver(_Kind(kinds, spec[4:], scope))
elif spec.startswith('rma:'):
kind = mojom.PendingAssociatedRemote(_Kind(kinds, spec[4:], scope))
elif spec.startswith('rca:'):
kind = mojom.PendingAssociatedReceiver(_Kind(kinds, spec[4:], scope))
elif spec.startswith('m['):
# Isolate the two types from their brackets.
# It is not allowed to use map as key, so there shouldn't be nested ']'s
# inside the key type spec.
key_end = spec.find(']')
assert key_end != -1 and key_end < len(spec) - 1
assert spec[key_end + 1] == '[' and spec[-1] == ']'
first_kind = spec[2:key_end]
second_kind = spec[key_end + 2:-1]
kind = mojom.Map(
_Kind(kinds, first_kind, scope), _Kind(kinds, second_kind, scope))
else:
kind = mojom.Kind(spec)
kinds[spec] = kind
return kind
def _Import(module, import_module):
# Copy the struct kinds from our imports into the current module.
importable_kinds = (mojom.Struct, mojom.Union, mojom.Enum, mojom.Interface)
for kind in import_module.kinds.values():
if (isinstance(kind, importable_kinds)
and kind.module.path == import_module.path):
module.kinds[kind.spec] = kind
# Ditto for values.
for value in import_module.values.values():
if value.module.path == import_module.path:
module.values[value.GetSpec()] = value
return import_module
def _Struct(module, parsed_struct):
"""
Args:
module: {mojom.Module} Module currently being constructed.
parsed_struct: {ast.Struct} Parsed struct.
Returns:
{mojom.Struct} AST struct.
"""
struct = mojom.Struct(module=module)
struct.mojom_name = parsed_struct.mojom_name
struct.native_only = parsed_struct.body is None
struct.spec = 'x:' + module.mojom_namespace + '.' + struct.mojom_name
module.kinds[struct.spec] = struct
if struct.native_only:
struct.enums = []
struct.constants = []
struct.fields_data = []
else:
struct.enums = list(
map(
lambda enum: _Enum(module, enum, struct),
_ElemsOfType(parsed_struct.body, ast.Enum,
parsed_struct.mojom_name)))
struct.constants = list(
map(
lambda constant: _Constant(module, constant, struct),
_ElemsOfType(parsed_struct.body, ast.Const,
parsed_struct.mojom_name)))
# Stash fields parsed_struct here temporarily.
struct.fields_data = _ElemsOfType(parsed_struct.body, ast.StructField,
parsed_struct.mojom_name)
struct.attributes = _AttributeListToDict(parsed_struct.attribute_list)
# Enforce that a [Native] attribute is set to make native-only struct
# declarations more explicit.
if struct.native_only:
if not struct.attributes or not struct.attributes.get('Native', False):
raise Exception("Native-only struct declarations must include a " +
"Native attribute.")
if struct.attributes and struct.attributes.get('CustomSerializer', False):
struct.custom_serializer = True
return struct
def _Union(module, parsed_union):
"""
Args:
module: {mojom.Module} Module currently being constructed.
parsed_union: {ast.Union} Parsed union.
Returns:
{mojom.Union} AST union.
"""
union = mojom.Union(module=module)
union.mojom_name = parsed_union.mojom_name
union.spec = 'x:' + module.mojom_namespace + '.' + union.mojom_name
module.kinds[union.spec] = union
# Stash fields parsed_union here temporarily.
union.fields_data = _ElemsOfType(parsed_union.body, ast.UnionField,
parsed_union.mojom_name)
union.attributes = _AttributeListToDict(parsed_union.attribute_list)
return union
def _StructField(module, parsed_field, struct):
"""
Args:
module: {mojom.Module} Module currently being constructed.
parsed_field: {ast.StructField} Parsed struct field.
struct: {mojom.Struct} Struct this field belongs to.
Returns:
{mojom.StructField} AST struct field.
"""
field = mojom.StructField()
field.mojom_name = parsed_field.mojom_name
field.kind = _Kind(module.kinds, _MapKind(parsed_field.typename),
(module.mojom_namespace, struct.mojom_name))
field.ordinal = parsed_field.ordinal.value if parsed_field.ordinal else None
field.default = _FixupExpression(module, parsed_field.default_value,
(module.mojom_namespace, struct.mojom_name),
field.kind)
field.attributes = _AttributeListToDict(parsed_field.attribute_list)
return field
def _UnionField(module, parsed_field, union):
"""
Args:
module: {mojom.Module} Module currently being constructed.
parsed_field: {ast.UnionField} Parsed union field.
union: {mojom.Union} Union this fields belong to.
Returns:
{mojom.UnionField} AST union.
"""
field = mojom.UnionField()
field.mojom_name = parsed_field.mojom_name
field.kind = _Kind(module.kinds, _MapKind(parsed_field.typename),
(module.mojom_namespace, union.mojom_name))
field.ordinal = parsed_field.ordinal.value if parsed_field.ordinal else None
field.default = _FixupExpression(
module, None, (module.mojom_namespace, union.mojom_name), field.kind)
field.attributes = _AttributeListToDict(parsed_field.attribute_list)
return field
def _Parameter(module, parsed_param, interface):
"""
Args:
module: {mojom.Module} Module currently being constructed.
parsed_param: {ast.Parameter} Parsed parameter.
union: {mojom.Interface} Interface this parameter belongs to.
Returns:
{mojom.Parameter} AST parameter.
"""
parameter = mojom.Parameter()
parameter.mojom_name = parsed_param.mojom_name
parameter.kind = _Kind(module.kinds, _MapKind(parsed_param.typename),
(module.mojom_namespace, interface.mojom_name))
parameter.ordinal = (parsed_param.ordinal.value
if parsed_param.ordinal else None)
parameter.default = None # TODO(tibell): We never have these. Remove field?
parameter.attributes = _AttributeListToDict(parsed_param.attribute_list)
return parameter
def _Method(module, parsed_method, interface):
"""
Args:
module: {mojom.Module} Module currently being constructed.
parsed_method: {ast.Method} Parsed method.
interface: {mojom.Interface} Interface this method belongs to.
Returns:
{mojom.Method} AST method.
"""
method = mojom.Method(
interface,
parsed_method.mojom_name,
ordinal=parsed_method.ordinal.value if parsed_method.ordinal else None)
method.parameters = list(
map(lambda parameter: _Parameter(module, parameter, interface),
parsed_method.parameter_list))
if parsed_method.response_parameter_list is not None:
method.response_parameters = list(
map(lambda parameter: _Parameter(module, parameter, interface),
parsed_method.response_parameter_list))
method.attributes = _AttributeListToDict(parsed_method.attribute_list)
# Enforce that only methods with response can have a [Sync] attribute.
if method.sync and method.response_parameters is None:
raise Exception("Only methods with response can include a [Sync] "
"attribute. If no response parameters are needed, you "
"could use an empty response parameter list, i.e., "
"\"=> ()\".")
return method
def _Interface(module, parsed_iface):
"""
Args:
module: {mojom.Module} Module currently being constructed.
parsed_iface: {ast.Interface} Parsed interface.
Returns:
{mojom.Interface} AST interface.
"""
interface = mojom.Interface(module=module)
interface.mojom_name = parsed_iface.mojom_name
interface.spec = 'x:' + module.mojom_namespace + '.' + interface.mojom_name
module.kinds[interface.spec] = interface
interface.enums = list(
map(lambda enum: _Enum(module, enum, interface),
_ElemsOfType(parsed_iface.body, ast.Enum, parsed_iface.mojom_name)))
interface.constants = list(
map(lambda constant: _Constant(module, constant, interface),
_ElemsOfType(parsed_iface.body, ast.Const, parsed_iface.mojom_name)))
# Stash methods parsed_iface here temporarily.
interface.methods_data = _ElemsOfType(parsed_iface.body, ast.Method,
parsed_iface.mojom_name)
interface.attributes = _AttributeListToDict(parsed_iface.attribute_list)
return interface
def _EnumField(module, enum, parsed_field, parent_kind):
"""
Args:
module: {mojom.Module} Module currently being constructed.
enum: {mojom.Enum} Enum this field belongs to.
parsed_field: {ast.EnumValue} Parsed enum value.
parent_kind: {mojom.Kind} The enclosing type.
Returns:
{mojom.EnumField} AST enum field.
"""
field = mojom.EnumField()
field.mojom_name = parsed_field.mojom_name
# TODO(mpcomplete): FixupExpression should be done in the second pass,
# so constants and enums can refer to each other.
# TODO(mpcomplete): But then, what if constants are initialized to an enum? Or
# vice versa?
if parent_kind:
field.value = _FixupExpression(
module, parsed_field.value,
(module.mojom_namespace, parent_kind.mojom_name), enum)
else:
field.value = _FixupExpression(module, parsed_field.value,
(module.mojom_namespace, ), enum)
field.attributes = _AttributeListToDict(parsed_field.attribute_list)
value = mojom.EnumValue(module, enum, field)
module.values[value.GetSpec()] = value
return field
def _ResolveNumericEnumValues(enum_fields):
"""
Given a reference to a list of mojom.EnumField, resolves and assigns their
values to EnumField.numeric_value.
Returns:
A tuple of the lowest and highest assigned enumerator value or None, None
if no enumerator values were assigned.
"""
# map of <mojom_name> -> integral value
resolved_enum_values = {}
prev_value = -1
min_value = None
max_value = None
for field in enum_fields:
# This enum value is +1 the previous enum value (e.g: BEGIN).
if field.value is None:
prev_value += 1
# Integral value (e.g: BEGIN = -0x1).
elif isinstance(field.value, str):
prev_value = int(field.value, 0)
# Reference to a previous enum value (e.g: INIT = BEGIN).
elif isinstance(field.value, mojom.EnumValue):
prev_value = resolved_enum_values[field.value.mojom_name]
else:
raise Exception("Unresolved enum value.")
resolved_enum_values[field.mojom_name] = prev_value
field.numeric_value = prev_value
if min_value is None or prev_value < min_value:
min_value = prev_value
if max_value is None or prev_value > max_value:
max_value = prev_value
return min_value, max_value
def _Enum(module, parsed_enum, parent_kind):
"""
Args:
module: {mojom.Module} Module currently being constructed.
parsed_enum: {ast.Enum} Parsed enum.
Returns:
{mojom.Enum} AST enum.
"""
enum = mojom.Enum(module=module)
enum.mojom_name = parsed_enum.mojom_name
enum.native_only = parsed_enum.enum_value_list is None
mojom_name = enum.mojom_name
if parent_kind:
mojom_name = parent_kind.mojom_name + '.' + mojom_name
enum.spec = 'x:%s.%s' % (module.mojom_namespace, mojom_name)
enum.parent_kind = parent_kind
enum.attributes = _AttributeListToDict(parsed_enum.attribute_list)
if not enum.native_only:
enum.fields = list(
map(lambda field: _EnumField(module, enum, field, parent_kind),
parsed_enum.enum_value_list))
enum.min_value, enum.max_value = _ResolveNumericEnumValues(enum.fields)
module.kinds[enum.spec] = enum
# Enforce that a [Native] attribute is set to make native-only enum
# declarations more explicit.
if enum.native_only:
if not enum.attributes or not enum.attributes.get('Native', False):
raise Exception("Native-only enum declarations must include a " +
"Native attribute.")
return enum
def _Constant(module, parsed_const, parent_kind):
"""
Args:
module: {mojom.Module} Module currently being constructed.
parsed_const: {ast.Const} Parsed constant.
Returns:
{mojom.Constant} AST constant.
"""
constant = mojom.Constant()
constant.mojom_name = parsed_const.mojom_name
if parent_kind:
scope = (module.mojom_namespace, parent_kind.mojom_name)
else:
scope = (module.mojom_namespace, )
# TODO(mpcomplete): maybe we should only support POD kinds.
constant.kind = _Kind(module.kinds, _MapKind(parsed_const.typename), scope)
constant.parent_kind = parent_kind
constant.value = _FixupExpression(module, parsed_const.value, scope, None)
value = mojom.ConstantValue(module, parent_kind, constant)
module.values[value.GetSpec()] = value
return constant
def _CollectReferencedKinds(module, all_defined_kinds):
"""
Takes a {mojom.Module} object and a list of all defined kinds within that
module, and enumerates the complete dict of user-defined mojom types
(as {mojom.Kind} objects) referenced by the module's own defined kinds (i.e.
as types of struct or union or interface parameters. The returned dict is
keyed by kind spec.
"""
def extract_referenced_user_kinds(kind):
if mojom.IsArrayKind(kind):
return extract_referenced_user_kinds(kind.kind)
if mojom.IsMapKind(kind):
return (extract_referenced_user_kinds(kind.key_kind) +
extract_referenced_user_kinds(kind.value_kind))
if mojom.IsInterfaceRequestKind(kind) or mojom.IsAssociatedKind(kind):
return [kind.kind]
if mojom.IsStructKind(kind):
return [kind]
if (mojom.IsInterfaceKind(kind) or mojom.IsEnumKind(kind)
or mojom.IsUnionKind(kind)):
return [kind]
return []
def sanitize_kind(kind):
"""Removes nullability from a kind"""
if kind.spec.startswith('?'):
return _Kind(module.kinds, kind.spec[1:], (module.mojom_namespace, ''))
return kind
referenced_user_kinds = {}
for defined_kind in all_defined_kinds:
if mojom.IsStructKind(defined_kind) or mojom.IsUnionKind(defined_kind):
for field in defined_kind.fields:
for referenced_kind in extract_referenced_user_kinds(field.kind):
sanitized_kind = sanitize_kind(referenced_kind)
referenced_user_kinds[sanitized_kind.spec] = sanitized_kind
# Also scan for references in parameter lists
for interface in module.interfaces:
for method in interface.methods:
for param in itertools.chain(method.parameters or [],
method.response_parameters or []):
if (mojom.IsStructKind(param.kind) or mojom.IsUnionKind(param.kind)
or mojom.IsEnumKind(param.kind)
or mojom.IsAnyInterfaceKind(param.kind)):
for referenced_kind in extract_referenced_user_kinds(param.kind):
sanitized_kind = sanitize_kind(referenced_kind)
referenced_user_kinds[sanitized_kind.spec] = sanitized_kind
return referenced_user_kinds
def _Module(tree, path, imports):
"""
Args:
tree: {ast.Mojom} The parse tree.
path: {str} The path to the mojom file.
imports: {Dict[str, mojom.Module]} Mapping from filenames, as they appear in
the import list, to already processed modules. Used to process imports.
Returns:
{mojom.Module} An AST for the mojom.
"""
module = mojom.Module(path=path)
module.kinds = {}
for kind in mojom.PRIMITIVES:
module.kinds[kind.spec] = kind
module.values = {}
module.mojom_namespace = tree.module.mojom_namespace[1] if tree.module else ''
# Imports must come first, because they add to module.kinds which is used
# by by the others.
module.imports = [
_Import(module, imports[imp.import_filename]) for imp in tree.import_list
]
if tree.module and tree.module.attribute_list:
assert isinstance(tree.module.attribute_list, ast.AttributeList)
# TODO(vtl): Check for duplicate keys here.
module.attributes = dict((attribute.key, attribute.value)
for attribute in tree.module.attribute_list)
filename = os.path.basename(path)
# First pass collects kinds.
module.enums = list(
map(lambda enum: _Enum(module, enum, None),
_ElemsOfType(tree.definition_list, ast.Enum, filename)))
module.structs = list(
map(lambda struct: _Struct(module, struct),
_ElemsOfType(tree.definition_list, ast.Struct, filename)))
module.unions = list(
map(lambda union: _Union(module, union),
_ElemsOfType(tree.definition_list, ast.Union, filename)))
module.interfaces = list(
map(lambda interface: _Interface(module, interface),
_ElemsOfType(tree.definition_list, ast.Interface, filename)))
module.constants = list(
map(lambda constant: _Constant(module, constant, None),
_ElemsOfType(tree.definition_list, ast.Const, filename)))
# Second pass expands fields and methods. This allows fields and parameters
# to refer to kinds defined anywhere in the mojom.
all_defined_kinds = {}
for struct in module.structs:
struct.fields = list(
map(lambda field: _StructField(module, field, struct),
struct.fields_data))
del struct.fields_data
all_defined_kinds[struct.spec] = struct
for enum in struct.enums:
all_defined_kinds[enum.spec] = enum
for union in module.unions:
union.fields = list(
map(lambda field: _UnionField(module, field, union), union.fields_data))
del union.fields_data
all_defined_kinds[union.spec] = union
for interface in module.interfaces:
interface.methods = list(
map(lambda method: _Method(module, method, interface),
interface.methods_data))
del interface.methods_data
all_defined_kinds[interface.spec] = interface
for enum in interface.enums:
all_defined_kinds[enum.spec] = enum
for enum in module.enums:
all_defined_kinds[enum.spec] = enum
all_referenced_kinds = _CollectReferencedKinds(module,
all_defined_kinds.values())
imported_kind_specs = set(all_referenced_kinds.keys()).difference(
set(all_defined_kinds.keys()))
module.imported_kinds = dict(
(spec, all_referenced_kinds[spec]) for spec in imported_kind_specs)
return module
def OrderedModule(tree, path, imports):
"""Convert parse tree to AST module.
Args:
tree: {ast.Mojom} The parse tree.
path: {str} The path to the mojom file.
imports: {Dict[str, mojom.Module]} Mapping from filenames, as they appear in
the import list, to already processed modules. Used to process imports.
Returns:
{mojom.Module} An AST for the mojom.
"""
module = _Module(tree, path, imports)
return module
| bsd-3-clause | -2,127,859,092,660,964,900 | 34.58871 | 80 | 0.667573 | false |
lhmlihaomin/jp | jp/settings.py | 1 | 3170 | """
Django settings for jp project.
Generated by 'django-admin startproject' using Django 1.9.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '%$enia!a$!&=gxu+0$ictbvedwnin--!4ex#@p9-nw_1tebsa4'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'notebook',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'jp.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'jp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
| apache-2.0 | -5,930,232,875,764,013,000 | 24.983607 | 91 | 0.686435 | false |
ednapiranha/refrig | posts/templatetags/render_post.py | 1 | 3168 | from django.http import *
from django import template
from django.template.defaultfilters import stringfilter
from django.template import RequestContext
from mongoengine import *
from profile.models import Profile
from posts.models import Post, ImagePost, TextPost, LinkPost
from profile.views import *
import tweepy
from urlparse import urlparse
register = template.Library()
@register.filter
def generate_post(value, post):
# determine which output to generate based on the post type
if isinstance(post, ImagePost):
media = '<img src="'+str(post.description)+'" alt="'+str(post.description)+'" />'
elif isinstance(post, LinkPost):
# if there is text in the link, try to grab what looks like the link
link = str(post.description)
for text_item in post.description.split(' '):
if 'http' in text_item:
link = text_item
media = '<a href="'+link+'" target="_blank">'+post.description+'</a>'
elif isinstance(post, VideoPost):
url = urlparse(post.description)
if post.description.lower().find('vimeo') > -1:
media = '<iframe src="http://player.vimeo.com/video/'+str(url.path.strip('/'))+'?wmode=transparent" width="70%" height="300"></iframe>'
elif post.description.lower().find('youtube') > -1:
media = '<iframe class="youtube-player" type="text/html" width="70%" height="300" src="http://youtube.com/embed/'+str(url.query.split('v=')[1].split('&')[0])+'"></iframe>'
elif isinstance(post, AudioPost):
if post.description.endswith('mp3'):
audio_type = 'audio/mpeg'
else:
audio_type = 'audio/ogg'
media = '<audio controls="controls" preload="auto"><source src="'+post.description+'" type="'+audio_type+'" /></audio><p><a href="'+post.description+'">'+post.description+'</a></p>'
else:
media = '<p>'+post.description+'</p>'
return media
@register.filter
def generate_tags(value, post):
# generate tags output from list
tag_list = post.tags
tags = ''
for tag in tag_list:
if len(tag) > 0:
tags += '<a href="/tagged/'+tag+'">'+tag+'</a> '
return tags
@register.filter
def generate_meta_response(value, post):
# output the original author if it exists
result = ''
if post.original_author:
repost_count = str(Post.objects(original_id=post.original_id,original_author=post.original_author).count())
result += '<span class="repost_count">'+repost_count+'</span> <span class="repost_info">Originally posted by <a href="/user/'+str(post.original_author.id)+'">'+post.original_author.full_name+'</a></span>'
return result
@register.filter
def post_by_your_tag(user, tag):
# has the user tagged with this tag?
post = Post.objects(tags=tag.name, author=user).first()
if post:
return "you tagged a post with this"
return ""
@register.filter
def post_by_follower_tag(user, tag):
# has the user tagged with this tag?
post = Post.objects(tags=tag.name, author__in=user.follows).first()
if post:
return "someone you follow tagged a post with this"
return "" | bsd-3-clause | 7,500,112,006,488,823,000 | 39.628205 | 212 | 0.645518 | false |
nan86150/ImageFusion | bin/pilconvert.py | 1 | 2298 | #!/home/chai/workspace/GitHub/ImageFusion/bin/python
#
# The Python Imaging Library.
# $Id$
#
# convert image files
#
# History:
# 0.1 96-04-20 fl Created
# 0.2 96-10-04 fl Use draft mode when converting images
# 0.3 96-12-30 fl Optimize output (PNG, JPEG)
# 0.4 97-01-18 fl Made optimize an option (PNG, JPEG)
# 0.5 98-12-30 fl Fixed -f option (from Anthony Baxter)
#
import site
import getopt, string, sys
from PIL import Image
def usage():
print "PIL Convert 0.5/1998-12-30 -- convert image files"
print "Usage: pilconvert [option] infile outfile"
print
print "Options:"
print
print " -c <format> convert to format (default is given by extension)"
print
print " -g convert to greyscale"
print " -p convert to palette image (using standard palette)"
print " -r convert to rgb"
print
print " -o optimize output (trade speed for size)"
print " -q <value> set compression quality (0-100, JPEG only)"
print
print " -f list supported file formats"
sys.exit(1)
if len(sys.argv) == 1:
usage()
try:
opt, argv = getopt.getopt(sys.argv[1:], "c:dfgopq:r")
except getopt.error, v:
print v
sys.exit(1)
format = None
convert = None
options = { }
for o, a in opt:
if o == "-f":
Image.init()
id = Image.ID[:]
id.sort()
print "Supported formats (* indicates output format):"
for i in id:
if Image.SAVE.has_key(i):
print i+"*",
else:
print i,
sys.exit(1)
elif o == "-c":
format = a
if o == "-g":
convert = "L"
elif o == "-p":
convert = "P"
elif o == "-r":
convert = "RGB"
elif o == "-o":
options["optimize"] = 1
elif o == "-q":
options["quality"] = string.atoi(a)
if len(argv) != 2:
usage()
try:
im = Image.open(argv[0])
if convert and im.mode != convert:
im.draft(convert, im.size)
im = im.convert(convert)
if format:
apply(im.save, (argv[1], format), options)
else:
apply(im.save, (argv[1],), options)
except:
print "cannot convert image",
print "(%s:%s)" % (sys.exc_type, sys.exc_value)
| mit | -8,801,008,984,851,115,000 | 22.9375 | 76 | 0.550479 | false |
it-events-ro/scripts | find-eventbrite-organizers.py | 1 | 2199 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import utils
organizers = {
2491303902, # http://www.eventbrite.com/o/itcamp-2491303902
6873285549, # http://www.eventbrite.com/o/sponge-media-lab-6873285549
3001324227, # http://www.eventbrite.com/o/labview-student-ambassador-upb-3001324227
2300226659, # http://www.eventbrite.com/o/techstars-startup-programs-2300226659
5899601137, # http://www.eventbrite.com/o/oana-calugar-amp-fabio-carati-amp-cristian-dascalu-5899601137
4662547959, # http://www.eventbrite.com/o/clujhub-4662547959
4138472935, # http://www.eventbrite.com/o/yonder-4138472935
6397991619, # http://www.eventbrite.com/o/facultatea-de-inginerie-electrica-in-colaborare-cu-best-cluj-napoca-6397991619
3367422098, # http://www.eventbrite.com/o/andreea-popescu-3367422098
4206997271, # http://www.eventbrite.com/o/babele-create-together-4206997271
3168795376, # http://www.eventbrite.com/o/girls-in-tech-romania-3168795376
6671021543, # http://www.eventbrite.com/o/asociatia-ip-workshop-6671021543
2761218168, # http://www.eventbrite.com/o/ccsir-2761218168
9377817403, # http://www.eventbrite.com/o/hellojs-9377817403
7802438407, # http://www.eventbrite.com/o/innodrive-7802438407
10949312400, # http://www.eventbrite.com/o/school-of-content-10949312400
6795968089, # http://www.eventbrite.com/o/iiba-romania-chapter-6795968089
10963965257, # http://www.eventbrite.com/o/sinaptiq-edu-10963965257
4246372985, # http://www.eventbrite.com/o/hackathon-in-a-box-4246372985
8767089022, # http://www.eventbrite.com.au/o/bm-college-8767089022
6886785391, # http://www.eventbrite.com/o/sprint-consulting-6886785391
8270334915, # http://www.eventbrite.co.uk/o/msg-systems-romania-8270334915
2670928534, # http://www.eventbrite.com/o/itcamp-community-2670928534
5340605367, # http://www.eventbrite.com/o/techhub-bucharest-5340605367
8042013777, # http://www.eventbrite.com/o/owasp-foundation-8042013777
11097508562, # http://www.eventbrite.com/o/robertino-vasilescu-si-bogdan-socol-ambasadori-prestashop-11097508562
}
for o in organizers:
o = utils.eventbriteApi('/organizers/%d/' % o)
print('\t'.join([o['name'], o['name'], o['url']]))
| agpl-3.0 | -8,914,403,487,848,893,000 | 56.868421 | 122 | 0.758527 | false |
nathanbjenx/cairis | cairis/gui/SecurityPatternsDialog.py | 1 | 3037 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import wx
from cairis.core.armid import *
import cairis.core.Asset
from SecurityPatternDialog import SecurityPatternDialog
from DialogClassParameters import DialogClassParameters
from cairis.core.ARM import *
from DimensionBaseDialog import DimensionBaseDialog
__author__ = 'Shamal Faily'
class SecurityPatternsDialog(DimensionBaseDialog):
def __init__(self,parent):
DimensionBaseDialog.__init__(self,parent,SECURITYPATTERNS_ID,'Security Patterns',(930,300),'countermeasure.png')
self.theMainWindow = parent
idList = [SECURITYPATTERNS_PATTERNLIST_ID,SECURITYPATTERNS_BUTTONADD_ID,SECURITYPATTERNS_BUTTONDELETE_ID]
columnList = ['Name']
self.buildControls(idList,columnList,self.dbProxy.getSecurityPatterns,'securitypattern')
listCtrl = self.FindWindowById(SECURITYPATTERNS_PATTERNLIST_ID)
listCtrl.SetColumnWidth(0,300)
def addObjectRow(self,listCtrl,listRow,pattern):
listCtrl.InsertStringItem(listRow,pattern.name())
def onAdd(self,evt):
try:
addParameters = DialogClassParameters(SECURITYPATTERN_ID,'Add Security Pattern',SecurityPatternDialog,SECURITYPATTERN_BUTTONCOMMIT_ID,self.dbProxy.addSecurityPattern,True)
self.addObject(addParameters)
except ARMException,errorText:
dlg = wx.MessageDialog(self,str(errorText),'Add security pattern',wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
def onUpdate(self,evt):
selectedObjt = self.objts[self.selectedLabel]
assetId = selectedObjt.id()
try:
updateParameters = DialogClassParameters(SECURITYPATTERN_ID,'Edit Security Pattern',SecurityPatternDialog,SECURITYPATTERN_BUTTONCOMMIT_ID,self.dbProxy.updateSecurityPattern,False)
self.updateObject(selectedObjt,updateParameters)
except ARMException,errorText:
dlg = wx.MessageDialog(self,str(errorText),'Edit security pattern',wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy
def onDelete(self,evt):
try:
self.deleteObject('No security pattern','Delete security pattern',self.dbProxy.deleteSecurityPattern)
except ARMException,errorText:
dlg = wx.MessageDialog(self,str(errorText),'Delete security pattern',wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy
| apache-2.0 | 3,398,963,292,777,533,000 | 42.385714 | 185 | 0.76457 | false |
dtcaciuc/nitrous | tests/test_types/test_vector.py | 1 | 1577 | import unittest
from nitrous.module import module
from nitrous.function import function
from nitrous.types import Float
from nitrous.types.array import Slice
from nitrous.exp.vector import Vector, load, store, get_element, set_element, fill
FloatP = Slice(Float, (4,))
Float4 = Vector(Float, 4)
load4f = load(Float4)
store4f = store(Float4)
get4f = get_element(Float4)
set4f = set_element(Float4)
fill4f = fill(Float4)
@function(Float, a=Float, b=Float, c=Float, d=Float)
def hadd4(a, b, c, d):
v = Float4()
v = set4f(v, 0, a)
v = set4f(v, 1, b)
v = set4f(v, 2, c)
v = set4f(v, 3, d)
return get4f(v, 0) + get4f(v, 1) + get4f(v, 2) + get4f(v, 3)
@function(a=FloatP, p=FloatP, y=FloatP, z=FloatP)
def axpy(a, p, y, z):
store4f(load4f(a) * load4f(p) + load4f(y), z)
@function(v=FloatP, e=Float)
def fill(v, e):
store4f(fill4f(e), v)
class VectorTests(unittest.TestCase):
def test_repr(self):
self.assertEqual(repr(Float4), "<Vector [4 x Float]>")
def test_get_set(self):
m = module([hadd4])
self.assertEqual(m.hadd4(3, 11, 13, 17), 44)
def test_math(self):
m = module([axpy])
a = (Float.c_type * 4)(1, 2, 3, 4)
y = (Float.c_type * 4)(100, 200, 300, 400)
z = (Float.c_type * 4)()
# a * a + y -> z
m.axpy(a, a, y, z)
self.assertEqual(list(z), [101, 204, 309, 416])
def test_fill(self):
m = module([fill])
v = (Float.c_type * 4)(1, 2, 3, 4)
m.fill(v, 100.0)
self.assertEqual(list(v), [100.0] * 4)
| mit | -5,550,125,944,828,673,000 | 20.902778 | 82 | 0.582118 | false |
EDUlib/edx-platform | openedx/core/djangoapps/schedules/config.py | 1 | 4292 | """
Contains configuration for schedules app
"""
from crum import get_current_request
from edx_toggles.toggles import LegacyWaffleSwitch, LegacyWaffleFlagNamespace, LegacyWaffleSwitchNamespace, WaffleFlag
from lms.djangoapps.experiments.flags import ExperimentWaffleFlag
from lms.djangoapps.experiments.models import ExperimentData
WAFFLE_FLAG_NAMESPACE = LegacyWaffleFlagNamespace(name='schedules')
WAFFLE_SWITCH_NAMESPACE = LegacyWaffleSwitchNamespace(name='schedules')
# .. toggle_name: schedules.enable_debugging
# .. toggle_implementation: WaffleFlag
# .. toggle_default: False
# .. toggle_description: Enable debug level of logging for schedules messages.
# .. toggle_use_cases: open_edx
# .. toggle_creation_date: 2017-09-17
DEBUG_MESSAGE_WAFFLE_FLAG = WaffleFlag('schedules.enable_debugging', __name__)
COURSE_UPDATE_SHOW_UNSUBSCRIBE_WAFFLE_SWITCH = LegacyWaffleSwitch(
WAFFLE_SWITCH_NAMESPACE,
'course_update_show_unsubscribe',
__name__
)
# This experiment waffle is supporting an A/B test we are running on sending course updates from an external service,
# rather than through platform and ACE. See ticket AA-661 for more information.
# Don't use this flag directly, instead use the `set_up_external_updates_for_enrollment` and `query_external_updates`
# methods below. We save this flag decision at enrollment time and don't change it even if the flag changes. So you
# can't just directly look at flag result.
_EXTERNAL_COURSE_UPDATES_EXPERIMENT_ID = 18
_EXTERNAL_COURSE_UPDATES_FLAG = ExperimentWaffleFlag(WAFFLE_FLAG_NAMESPACE, 'external_updates', __name__,
experiment_id=_EXTERNAL_COURSE_UPDATES_EXPERIMENT_ID,
use_course_aware_bucketing=False)
def set_up_external_updates_for_enrollment(user, course_key):
"""
Returns and stores whether a user should be getting the "external course updates" experience.
See the description of this experiment with the waffle flag definition above. But basically, if a user is getting
external course updates for a course, edx-platform just stops sending any updates, trustingn that the user is
receiving them elsewhere.
This is basically just a wrapper around our experiment waffle flag, but only buckets users that directly enrolled
(rather than users enrolled by staff), for technical "waffle-flags-can-only-get-the-user-from-the-request" reasons.
This saves the decision in experiment data tables. It is also idempotent and will not change after the first
call for a given user/course, regardless of how the waffle answer changes.
"""
request = get_current_request()
user_is_valid = request and hasattr(request, 'user') and request.user.id and request.user.id == user.id
experiment_on = _EXTERNAL_COURSE_UPDATES_FLAG.is_experiment_on(course_key)
if user_is_valid and experiment_on:
# Don't send tracking info as it might differ from our saved value, and we already send the bucket in
# enrollment segment events.
bucket = _EXTERNAL_COURSE_UPDATES_FLAG.get_bucket(course_key, track=False)
else:
bucket = -1 # a special value meaning to ignore this enrollment for analytics purposes
data, _created = ExperimentData.objects.get_or_create(experiment_id=_EXTERNAL_COURSE_UPDATES_EXPERIMENT_ID,
user_id=user.id, key=str(course_key),
defaults={'value': str(bucket)})
return int(data.value)
def query_external_updates(user_id, course_id):
"""
Returns a queryset indicating whether the user get the "external course updates" experience for the given course.
This is designed for use as a subquery in a larger queryset, which is why it returns a queryset, rather than a
boolean. But it can also be used to spot-check whether a user is in the external experience for a given course by
casting the returned queryset to a bool.
This looks up the experiment data, saved at enrollment time.
"""
return ExperimentData.objects.filter(experiment_id=_EXTERNAL_COURSE_UPDATES_EXPERIMENT_ID,
user_id=user_id, key=course_id, value='1')
| agpl-3.0 | 5,457,186,290,929,313,000 | 52.65 | 119 | 0.715051 | false |
BlackHC/mdp | tests/test_dsl.py | 1 | 4064 | # Copyright 2017 Andreas Kirsch <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from blackhc.mdp import dsl
# noinspection PyStatementEffect,PyPep8Naming
def test_coverage():
with dsl.new() as new_mdp:
stateA = dsl.state()
stateB = dsl.state()
actionA = dsl.action()
actionB = dsl.action()
stateA & actionA > stateA
stateA & actionB > stateB
stateB & (actionA | actionB) > stateB
new_mdp.to_env()
new_mdp.to_graph()
return new_mdp.validate()
# noinspection PyStatementEffect
def test_weighted_next_states():
with dsl.new() as new_mdp:
state = dsl.state()
action = dsl.action()
state & action > state * 0.5
state & action > state * 2 | state * 5
new_mdp.validate()
# noinspection PyStatementEffect
def test_weighted_rewards():
with dsl.new() as new_mdp:
state = dsl.state()
action = dsl.action()
state & action > dsl.reward(1) * 1
state & action > dsl.reward(1) * 1 | dsl.reward(2) * 3
# noinspection PyStatementEffect,PyPep8Naming
def test_alternatives():
with dsl.new():
stateA = dsl.state()
stateB = dsl.state()
actionA = dsl.action()
actionB = dsl.action()
(stateA | stateB) & (actionA | actionB) > (stateA | stateB)
dsl.to_env()
# noinspection PyStatementEffect,PyPep8Naming
def test_alternatives2():
with dsl.new():
stateA = dsl.state()
stateB = dsl.state()
actionA = dsl.action()
actionB = dsl.action()
(stateA | stateB) & (actionA | actionB > stateA | stateB)
dsl.to_env()
# noinspection PyStatementEffect,PyPep8Naming
def test_alternatives3():
with dsl.new():
stateA = dsl.state()
stateB = dsl.state()
actionA = dsl.action()
actionB = dsl.action()
(stateA | stateB) & ((actionA > stateA) | (actionB > stateB))
dsl.to_env()
# noinspection PyStatementEffect,PyPep8Naming
def test_coverage_nmrp():
with dsl.new():
stateA = dsl.state()
stateB = dsl.state()
actionA = dsl.action()
actionB = dsl.action()
stateA & actionA > stateA
stateA & actionB > stateB
stateB & (actionA | actionB) > stateB
dsl.to_env()
# noinspection PyStatementEffect,PyPep8Naming
def test_multi_states_fail():
with pytest.raises(dsl.SyntaxError):
with dsl.new():
stateA = dsl.state()
stateB = dsl.state()
stateA & stateB
# noinspection PyStatementEffect,PyPep8Naming
def test_multi_actions_fail():
with pytest.raises(dsl.SyntaxError):
with dsl.new():
actionA = dsl.action()
actionB = dsl.action()
actionA & actionB
# noinspection PyStatementEffect,PyPep8Naming
def test_alternative_mismatch_final_fail():
with pytest.raises(dsl.SyntaxError):
with dsl.new():
stateA = dsl.state()
actionB = dsl.action()
stateA | actionB > stateA
# noinspection PyStatementEffect,PyPep8Naming
def test_mapping_alternative_mismatch_fail():
with pytest.raises(dsl.SyntaxError):
with dsl.new():
stateA = dsl.state()
actionB = dsl.action()
stateA > stateA | actionB
def test_missing_terminal_state_fail():
with pytest.raises(ValueError):
with dsl.new() as new_mdp:
dsl.state()
dsl.action()
new_mdp.validate()
| apache-2.0 | -6,233,214,120,182,796,000 | 24.88535 | 74 | 0.616142 | false |
MacGyverNL/alot | alot/buffers/thread.py | 1 | 13115 | # Copyright (C) 2011-2018 Patrick Totzke <[email protected]>
# Copyright © 2018 Dylan Baker
# This file is released under the GNU GPL, version 3 or a later revision.
# For further details see the COPYING file
import asyncio
import urwid
import logging
from urwidtrees import ArrowTree, TreeBox, NestedTree
from .buffer import Buffer
from ..settings.const import settings
from ..widgets.thread import ThreadTree
from .. import commands
from ..db.errors import NonexistantObjectError
class ThreadBuffer(Buffer):
"""displays a thread as a tree of messages."""
modename = 'thread'
def __init__(self, ui, thread):
"""
:param ui: main UI
:type ui: :class:`~alot.ui.UI`
:param thread: thread to display
:type thread: :class:`~alot.db.Thread`
"""
self.thread = thread
self.message_count = thread.get_total_messages()
# two semaphores for auto-removal of unread tag
self._auto_unread_dont_touch_mids = set([])
self._auto_unread_writing = False
self._indent_width = settings.get('thread_indent_replies')
self.rebuild()
Buffer.__init__(self, ui, self.body)
def __str__(self):
return '[thread] %s (%d message%s)' % (self.thread.get_subject(),
self.message_count,
's' * (self.message_count > 1))
def translated_tags_str(self, intersection=False):
tags = self.thread.get_tags(intersection=intersection)
trans = [settings.get_tagstring_representation(tag)['translated']
for tag in tags]
return ' '.join(trans)
def get_info(self):
info = {}
info['subject'] = self.thread.get_subject()
info['authors'] = self.thread.get_authors_string()
info['tid'] = self.thread.get_thread_id()
info['message_count'] = self.message_count
info['thread_tags'] = self.translated_tags_str()
info['intersection_tags'] = self.translated_tags_str(intersection=True)
return info
def get_selected_thread(self):
"""Return the displayed :class:`~alot.db.Thread`."""
return self.thread
def rebuild(self):
try:
self.thread.refresh()
except NonexistantObjectError:
self.body = urwid.SolidFill()
self.message_count = 0
return
self._tree = ThreadTree(self.thread)
# define A to be the tree to be wrapped by a NestedTree and displayed.
# We wrap the thread tree into an ArrowTree for decoration if
# indentation was requested and otherwise use it as is.
if self._indent_width == 0:
A = self._tree
else:
# we want decoration.
bars_att = settings.get_theming_attribute('thread', 'arrow_bars')
# only add arrow heads if there is space (indent > 1).
heads_char = None
heads_att = None
if self._indent_width > 1:
heads_char = u'\u27a4'
heads_att = settings.get_theming_attribute('thread',
'arrow_heads')
A = ArrowTree(
self._tree,
indent=self._indent_width,
childbar_offset=0,
arrow_tip_att=heads_att,
arrow_tip_char=heads_char,
arrow_att=bars_att)
self._nested_tree = NestedTree(A, interpret_covered=True)
self.body = TreeBox(self._nested_tree)
self.message_count = self.thread.get_total_messages()
def render(self, size, focus=False):
if self.message_count == 0:
return self.body.render(size, focus)
if settings.get('auto_remove_unread'):
logging.debug('Tbuffer: auto remove unread tag from msg?')
msg = self.get_selected_message()
mid = msg.get_message_id()
focus_pos = self.body.get_focus()[1]
summary_pos = (self.body.get_focus()[1][0], (0,))
cursor_on_non_summary = (focus_pos != summary_pos)
if cursor_on_non_summary:
if mid not in self._auto_unread_dont_touch_mids:
if 'unread' in msg.get_tags():
logging.debug('Tbuffer: removing unread')
def clear():
self._auto_unread_writing = False
self._auto_unread_dont_touch_mids.add(mid)
self._auto_unread_writing = True
msg.remove_tags(['unread'], afterwards=clear)
fcmd = commands.globals.FlushCommand(silent=True)
asyncio.get_event_loop().create_task(
self.ui.apply_command(fcmd))
else:
logging.debug('Tbuffer: No, msg not unread')
else:
logging.debug('Tbuffer: No, mid locked for autorm-unread')
else:
if not self._auto_unread_writing and \
mid in self._auto_unread_dont_touch_mids:
self._auto_unread_dont_touch_mids.remove(mid)
logging.debug('Tbuffer: No, cursor on summary')
return self.body.render(size, focus)
def get_selected_mid(self):
"""Return Message ID of focussed message."""
return self.body.get_focus()[1][0]
def get_selected_message_position(self):
"""Return position of focussed message in the thread tree."""
return self._sanitize_position((self.get_selected_mid(),))
def get_selected_messagetree(self):
"""Return currently focussed :class:`MessageTree`."""
return self._nested_tree[self.body.get_focus()[1][:1]]
def get_selected_message(self):
"""Return focussed :class:`~alot.db.message.Message`."""
return self.get_selected_messagetree()._message
def get_messagetree_positions(self):
"""
Return a Generator to walk through all positions of
:class:`MessageTree` in the :class:`ThreadTree` of this buffer.
"""
return [(pos,) for pos in self._tree.positions()]
def messagetrees(self):
"""
returns a Generator of all :class:`MessageTree` in the
:class:`ThreadTree` of this buffer.
"""
for pos in self._tree.positions():
yield self._tree[pos]
def refresh(self):
"""Refresh and flush caches of Thread tree."""
self.body.refresh()
# needed for ui.get_deep_focus..
def get_focus(self):
"Get the focus from the underlying body widget."
return self.body.get_focus()
def set_focus(self, pos):
"Set the focus in the underlying body widget."
logging.debug('setting focus to %s ', pos)
self.body.set_focus(pos)
def focus_first(self):
"""set focus to first message of thread"""
self.body.set_focus(self._nested_tree.root)
def focus_last(self):
self.body.set_focus(next(self._nested_tree.positions(reverse=True)))
def _sanitize_position(self, pos):
return self._nested_tree._sanitize_position(pos,
self._nested_tree._tree)
def focus_selected_message(self):
"""focus the summary line of currently focussed message"""
# move focus to summary (root of current MessageTree)
self.set_focus(self.get_selected_message_position())
def focus_parent(self):
"""move focus to parent of currently focussed message"""
mid = self.get_selected_mid()
newpos = self._tree.parent_position(mid)
if newpos is not None:
newpos = self._sanitize_position((newpos,))
self.body.set_focus(newpos)
def focus_first_reply(self):
"""move focus to first reply to currently focussed message"""
mid = self.get_selected_mid()
newpos = self._tree.first_child_position(mid)
if newpos is not None:
newpos = self._sanitize_position((newpos,))
self.body.set_focus(newpos)
def focus_last_reply(self):
"""move focus to last reply to currently focussed message"""
mid = self.get_selected_mid()
newpos = self._tree.last_child_position(mid)
if newpos is not None:
newpos = self._sanitize_position((newpos,))
self.body.set_focus(newpos)
def focus_next_sibling(self):
"""focus next sibling of currently focussed message in thread tree"""
mid = self.get_selected_mid()
newpos = self._tree.next_sibling_position(mid)
if newpos is not None:
newpos = self._sanitize_position((newpos,))
self.body.set_focus(newpos)
def focus_prev_sibling(self):
"""
focus previous sibling of currently focussed message in thread tree
"""
mid = self.get_selected_mid()
localroot = self._sanitize_position((mid,))
if localroot == self.get_focus()[1]:
newpos = self._tree.prev_sibling_position(mid)
if newpos is not None:
newpos = self._sanitize_position((newpos,))
else:
newpos = localroot
if newpos is not None:
self.body.set_focus(newpos)
def focus_next(self):
"""focus next message in depth first order"""
mid = self.get_selected_mid()
newpos = self._tree.next_position(mid)
if newpos is not None:
newpos = self._sanitize_position((newpos,))
self.body.set_focus(newpos)
def focus_prev(self):
"""focus previous message in depth first order"""
mid = self.get_selected_mid()
localroot = self._sanitize_position((mid,))
if localroot == self.get_focus()[1]:
newpos = self._tree.prev_position(mid)
if newpos is not None:
newpos = self._sanitize_position((newpos,))
else:
newpos = localroot
if newpos is not None:
self.body.set_focus(newpos)
def focus_property(self, prop, direction):
"""does a walk in the given direction and focuses the
first message tree that matches the given property"""
newpos = self.get_selected_mid()
newpos = direction(newpos)
while newpos is not None:
MT = self._tree[newpos]
if prop(MT):
newpos = self._sanitize_position((newpos,))
self.body.set_focus(newpos)
break
newpos = direction(newpos)
def focus_next_matching(self, querystring):
"""focus next matching message in depth first order"""
self.focus_property(lambda x: x._message.matches(querystring),
self._tree.next_position)
def focus_prev_matching(self, querystring):
"""focus previous matching message in depth first order"""
self.focus_property(lambda x: x._message.matches(querystring),
self._tree.prev_position)
def focus_next_unfolded(self):
"""focus next unfolded message in depth first order"""
self.focus_property(lambda x: not x.is_collapsed(x.root),
self._tree.next_position)
def focus_prev_unfolded(self):
"""focus previous unfolded message in depth first order"""
self.focus_property(lambda x: not x.is_collapsed(x.root),
self._tree.prev_position)
def expand(self, msgpos):
"""expand message at given position"""
MT = self._tree[msgpos]
MT.expand(MT.root)
def messagetree_at_position(self, pos):
"""get :class:`MessageTree` for given position"""
return self._tree[pos[0]]
def expand_all(self):
"""expand all messages in thread"""
for MT in self.messagetrees():
MT.expand(MT.root)
def collapse(self, msgpos):
"""collapse message at given position"""
MT = self._tree[msgpos]
MT.collapse(MT.root)
self.focus_selected_message()
def collapse_all(self):
"""collapse all messages in thread"""
for MT in self.messagetrees():
MT.collapse(MT.root)
self.focus_selected_message()
def unfold_matching(self, querystring, focus_first=True):
"""
expand all messages that match a given querystring.
:param querystring: query to match
:type querystring: str
:param focus_first: set the focus to the first matching message
:type focus_first: bool
"""
first = None
for MT in self.messagetrees():
msg = MT._message
if msg.matches(querystring):
MT.expand(MT.root)
if first is None:
first = (self._tree.position_of_messagetree(MT), MT.root)
self.body.set_focus(first)
else:
MT.collapse(MT.root)
self.body.refresh()
| gpl-3.0 | 995,784,562,381,729,800 | 37.011594 | 79 | 0.574577 | false |
rustychris/stompy | stompy/grid/front.py | 1 | 109620 | """
An advancing front grid generator for use with unstructured_grid
Largely a port of paver.py.
"""
from __future__ import print_function
import math
import numpy as np
from collections import defaultdict
import time
from scipy import optimize as opt
import pdb
import logging
log=logging.getLogger(__name__)
from shapely import geometry
from . import (unstructured_grid,
exact_delaunay,
shadow_cdt)
from .. import utils
try:
import matplotlib.pyplot as plt
except ImportError:
log.warning("Plotting not available - no matplotlib")
plt=None
def circumcenter_py(p1,p2,p3):
""" Compute circumcenter of a single triangle using pure python.
For small input sizes, this is much faster than using the vectorized
numpy version in utils.
"""
ref = p1
p1x = 0
p1y = 0
p2x = p2[0] - ref[0]
p2y = p2[1] - ref[1]
p3x = p3[0] - ref[0]
p3y = p3[1] - ref[1]
# taken from TRANSFORMER_gang.f90
dd=2.0*((p1x-p2x)*(p1y-p3y) -(p1x-p3x)*(p1y-p2y))
b_com=p1x*p1x+p1y*p1y
b1=b_com-p2x*p2x-p2y*p2y
b2=b_com-p3x*p3x-p3y*p3y
# avoid division by zero is the points are collinear
dd=max(dd,1e-40)
return [ (b1*(p1y-p3y)-b2*(p1y-p2y))/dd + ref[0] ,
(b2*(p1x-p2x)-b1*(p1x-p3x))/dd + ref[1] ]
# from numba import jit, int32, float64
# @jit(nopython=True)
# @jit
# @jit(float64(float64[:],float64[:,:,:],float64),nopython=True)
def one_point_cost(pnt,edges,target_length=5.0):
# pnt is intended to complete a triangle with each
# pair of points in edges, and should be to the left
# of each edge
penalty = 0
max_angle = 85.0*np.pi/180.
# all_edges[triangle_i,{ab,bc,ca},{x,y}]
all_edges = np.zeros( (edges.shape[0], 3 ,2), np.float64 )
# get the edges:
all_edges[:,0,:] = edges[:,0] - pnt # ab
all_edges[:,1,:] = edges[:,1] - edges[:,0] # bc
all_edges[:,2,:] = pnt - edges[:,1] # ca
i = np.arange(3)
im1 = (i-1)%3
#--# cost based on angle:
abs_angles = np.arctan2( all_edges[:,:,1], all_edges[:,:,0] )
all_angles = (np.pi - (abs_angles[:,i] - abs_angles[:,im1]) % (2*np.pi)) % (2*np.pi)
if 1:
# 60 is what it's been for a while, but I think in one situation
# this put too much weight on small angles.
# tried considering just large angles, but that quickly blew up.
# even just changing this to 50 still blows up.
# how about a small tweak - s/60/58/ ??
worst_angle = np.abs(all_angles - 60*np.pi/180.).max()
alpha = worst_angle /(max_angle - 60*np.pi/180.0)
# 10**alpha: edges got very short...
# 5**alpha - 1: closer, but overall still short edges.
# alpha**5: angles look kind of bad
angle_penalty = 10*alpha**5
# Seems like it doesn't try hard enough to get rid of almost bad angles.
# in one case, there is a small angle of 33.86 degrees, and another angle
# of 84.26 degrees. so the cost function only cares about the small angle
# because it is slightly more deviant from 60deg, but we may be in a cell
# where the only freedom we have is to change the larger angles.
# so add this in:
if 1:
# extra exponential penalty for nearly bad triangles:
# These values mean that 3 degrees before the triangle is invalid
# the exponential cuts in and will add a factor of e by the time the
# triangles is invalid.
scale_rad = 3.0*np.pi/180. # radians - e-folding scale of the cost
# max_angle - 2.0*scale_rad works..
thresh = max_angle - 1.0*scale_rad # angle at which the exponential 'cuts in'
big_angle_penalty = np.exp( (all_angles.max() - thresh) / scale_rad)
else:
alphas = (all_angles - 60*np.pi/180.) / (max_angle - 60*np.pi/180.)
alphas = 10*alphas**4
angle_penalty = alphas.sum()
penalty += angle_penalty + big_angle_penalty
#--# Length penalties:
if 1:
ab_lens = (all_edges[:,0,:]**2).sum(axis=1)
ca_lens = (all_edges[:,2,:]**2).sum(axis=1)
min_ab=ab_lens.min() # min(ab_lens)
min_ca=ca_lens.min() # min(ca_lens)
else:
# maybe better for numba?
min_ab=np.inf
min_ca=np.inf
for idx in range(edges.shape[0]):
l_ab=(all_edges[idx,0,:]**2).sum()
l_ca=(all_edges[idx,2,:]**2).sum()
if l_ab<min_ab:
min_ab=l_ab
if l_ca<min_ca:
min_ca=l_ca
# had been using ab_lens.min(), but numba didn't like that.
# okay - the problem is that numba doesn't understand the sum
# above, and thinks that ab_lens is a scalar.
min_len = min( min_ab,min_ca )
max_len = max( min_ab,min_ca )
tl2=target_length**2
# min_len can be 0.0, so soften undershoot
undershoot = tl2 / (min_len + 0.01*tl2)
overshoot = max_len / tl2
length_penalty = 0
length_factor = 2
length_penalty += length_factor*(max(undershoot,1) - 1)
length_penalty += length_factor*(max(overshoot,1) - 1)
# paver had two other approachs, effectively commented out
penalty += length_penalty
return penalty
class Curve(object):
"""
Boundaries which can be open or closed, indexable
by a floating point value (including modulo arithmetic).
By default, indexes by distance along each segment.
"""
class CurveException(Exception):
pass
def __init__(self,points,closed=True,ccw=None):
"""
points: [N,2]
closed: if True, treat this as a closed ring
ccw: if True, make sure the order is ccw,
False - make sure cw
None - leave as is.
"""
if ccw is not None:
area=utils.signed_area(points)
if (area>0) != bool(ccw):
points=points[::-1,:]
self.points=np.asarray(points)
self.closed=bool(closed)
if self.closed:
if np.all(self.points[0]==self.points[-1]):
pass # already duplicated
else:
self.points = np.concatenate( (self.points,
self.points[:1,:] ) )
else:
assert not np.all(self.points[0]==self.points[-1])
self.distances=utils.dist_along(self.points)
def __call__(self,f,metric='distance'):
if metric=='distance':
if self.closed:
# wraps around
# double mod in case f==-eps
f=(f % self.distances[-1]) % self.distances[-1]
# side='right' ensures that f=0 works
# it's unfortunately possible to get f=-eps, which rounds in
# a way such that (f % distances[-1]) == distances[-1]
# the double mod above might solve that
idxs=np.searchsorted(self.distances,f,side='right') - 1
assert not np.any( f>self.distances[-1] ),"Curve: Range or round off problem"
idxs=idxs.clip(0,len(self.distances)-2) # to be sure equality doesn't push us off the end
alphas = (f - self.distances[idxs]) / (self.distances[idxs+1]-self.distances[idxs])
if not np.isscalar(alphas):
alphas = alphas[:,None]
return (1-alphas)*self.points[idxs] + alphas*self.points[idxs+1]
else:
assert False
def tangent(self,f,metric='distance'):
assert metric=='distance'
if self.closed:
# wraps around
# double mod in case f==-eps
f=(f % self.distances[-1]) % self.distances[-1]
# side='right' ensures that f=0 works
# it's unfortunately possible to get f=-eps, which rounds in
# a way such that (f % distances[-1]) == distances[-1]
# the double mod above might solve that
idxs=np.searchsorted(self.distances,f,side='right') - 1
assert not np.any( f>self.distances[-1] ),"Curve: Range or round off problem"
idxs=idxs.clip(0,len(self.distances)-2) # to be sure equality doesn't push us off the end
tng=utils.to_unit( self.points[idxs+1] - self.points[idxs] )
return tng
def total_distance(self):
return self.distances[-1]
def upsample(self,scale,return_sources=False):
"""
return_sources: return a second array having the distance values for each
return point, if this is true.
"""
# def upsample_linearring(points,density,closed_ring=1,return_sources=False):
new_segments = []
sources = []
for i,(A,B) in enumerate(zip( self.points[:-1,:],
self.points[1:,:] ) ):
l = utils.dist(B-A)
local_scale = scale( 0.5*(A+B) )
npoints = max(1,round( l/local_scale ))
alphas = np.arange(npoints) / float(npoints)
alphas=alphas[:,None]
new_segment = (1.0-alphas)*A + alphas*B
new_segments.append(new_segment)
if return_sources:
sources.append(self.distances[i] + alphas*l)
new_points = np.concatenate( new_segments )
if return_sources:
sources = np.concatenate(sources)
return new_points,sources
else:
return new_points
def distance_away(self,anchor_f,signed_distance,rtol=0.05):
""" Find a point on the curve signed_distance away from the
point corresponding to anchor_f, within the given relative tolerance.
returns new_f,new_x.
If a point could not be found within the requested tolerance, raises
a self.CurveException.
"""
sign=int(np.sign(signed_distance))
abs_dist=np.abs(signed_distance)
anchor_pnt=self(anchor_f)
anchor_idx_a=np.searchsorted(self.distances,anchor_f,side='right') - 1
anchor_idx_b=(anchor_idx_a+1)%(len(self.points)-1)
if sign<0:
anchor_idx_a,anchor_idx_b=anchor_idx_b,anchor_idx_a
# How many segment of the curve are we willing to examine? all of them,
# but no more.
Npnts=len(self.points)-1 # duplicate for closed ring
max_segs=Npnts
for segi in range(max_segs):
idxa=anchor_idx_a+sign*segi
idxb=idxa+sign # +-1
idxa=idxa%Npnts
idxb=idxb%Npnts
if segi==0:
# only care about the portion of the first segment
# "ahead" of anchor (TODO: handle sign<0)
pnta=anchor_pnt
else:
pnta=self.points[idxa]
pntb=self.points[idxb]
dista=utils.dist(pnta - anchor_pnt)
distb=utils.dist(pntb - anchor_pnt)
# as written, this may bail out of the iteration with an
# inferior solution (i.e. stop when the error is 5%, rather
# than go to the next segment where we could get an exact
# answer). It's not too bad though.
if (dista<(1-rtol)*abs_dist) and (distb<(1-rtol)*abs_dist):
# No way this segment is good.
continue
else:
break
else:
# i.e. checked everybody, could never get far enough
# away
raise self.CurveException("Could not get far enough away")
assert dista<distb
assert dista<(1+rtol)*abs_dist
assert distb>(1-rtol)*abs_dist
if segi==0:
close_f=anchor_f
else:
close_f=self.distances[idxa]
far_f=self.distances[idxb]
if sign*far_f<sign*close_f:
far_f+=sign*self.distances[-1]
# explicitly check the far end point
if abs(distb-abs_dist) / abs_dist < rtol:
# good enough
result=far_f,self(far_f)
else:
# if there are large disparities in adjacent edge lengths
# it's possible that it takes many iterations here.
for maxit in range(20):
mid_f=0.5*(close_f+far_f)
pnt_mid=self(mid_f)
dist_mid=utils.dist(pnt_mid - anchor_pnt)
rel_err = (dist_mid-abs_dist)/abs_dist
if rel_err < -rtol:
close_f=mid_f
elif rel_err > rtol:
far_f=mid_f
else:
result=mid_f,pnt_mid
break
else:
assert False
return result
def point_to_f(self,x,f_start=0,direction=1,rel_tol=1e-4):
"""
Return the ring_f which yields a point close to x.
This scans the points in the curve, starting with f_start
and proceeding in the given direction.
if direction is 0, both directions will be attempted and
the first valid result returned.
rel_tol: stop when a point is found within rel_tol*len(segment)
of a segment.
This is intended for finding f for a point that is already
approximately on the curve. So it's a greedy approach.
To project a point onto the curve, specify rel_tol='best'
"""
# Walk along the curve, looking for a segment which approximately
# contains x.
if rel_tol=='best':
# Do a full sweep, check all segments.
# Could be smarter, but for now this isn't performance critical
segs=np.stack( [self.points[:-1,:],
self.points[1:,:]], axis=1)
dists,alphas = utils.point_segments_distance(x,segs,return_alpha=True)
best=np.argmin(dists)
seg_len=utils.dist( segs[best,0], segs[best,1] )
new_f=self.distances[best] + alphas[best]*seg_len
return new_f
else:
# Have to be careful about exact matches. distances[i] should always
# yield idx_start=i.
# But anything in between depends on the direction
if direction==1:
idx_start=np.searchsorted(self.distances,f_start,side='right') - 1
elif direction==-1:
idx_start=np.searchsorted(self.distances,f_start,side='left')
elif direction==0:
# try either, accept any hit.
try:
return self.point_to_f(x,f_start=f_start,direction=1,rel_tol=rel_tol)
except self.CurveException:
return self.point_to_f(x,f_start=f_start,direction=-1,rel_tol=rel_tol)
else:
raise Exception("direction must be +-1")
# Start traversing the segments:
seg_idx_a=idx_start
best=None
# closed loops have a point duplicated, and open strings
# have one less segment than points
# Either way, -1.
Nseg=len(self.points)-1
for i in range(Nseg): # max possible traversal
if self.closed:
seg_idx_b=(seg_idx_a + direction)%Nseg
else:
seg_idx_b=seg_idx_a+direction
# Wrapping
if seg_idx_b<0:
break
if seg_idx_b>Nseg: # same as >=len(self.points)
break
seg=self.points[ [seg_idx_a,seg_idx_b] ]
seg_len=utils.dist(seg[0],seg[1])
dist,alpha = utils.point_segment_distance(x,seg,return_alpha=True)
if rel_tol=='best':
if (best is None) or (dist<best[0]):
new_f=self.distances[seg_idx_a] + direction*alpha*seg_len
best=[dist,new_f,seg_idx_a,seg_idx_b]
else:
if dist/seg_len < rel_tol:
# How to get to an f from this?
new_f=self.distances[seg_idx_a] + direction*alpha*seg_len
if not self.closed:
new_f=max(0,min(new_f,self.distances[-1]))
return new_f
seg_idx_a=seg_idx_b
if rel_tol=='best':
return best[1]
raise self.CurveException("Failed to find a point within tolerance")
def is_forward(self,fa,fb,fc):
""" return true if fa,fb and fc are distinct and
ordered CCW around the curve
"""
if fa==fb or fb==fc or fc==fa:
return False
if self.closed:
d=self.total_distance()
return ((fb-fa) % d) < ((fc-fa)%d)
else:
return fa<fb<fc
# return ( (fb-fa) < (fc-fa) )
def is_reverse(self,fa,fb,fc):
# for closed curves, is_reverse=not is_forward, but
# for open curves, that's not necessarily true.
# when degenerate situations are included, then they
# are not opposites even for closed curves.
if fa==fb or fb==fc or fc==fa:
return False
if self.closed:
d=self.total_distance()
return ((fb-fc) % d) < ((fa-fc)%d)
else:
# return (fa-fb) < (fa-fc)
return fc<fb<fa
def is_ordered(self,fa,fb,fc):
"""
Non-robust check for fb falling between fc. For a closed
curve, this resorts to the heuristic of whether fb falls
between fa and fc on the shorter way around.
"""
if self.closed:
tdist=self.total_distance()
if (fa-fc) % tdist < tdist/2:
if self.is_forward(fc,fb,fa):
return True
else:
if self.is_forward(fa,fb,fc):
return True
return False
else:
return (fa<fb<fc) or (fa>fb>fc)
def signed_area(self):
assert self.closed
return utils.signed_area(self.points)
def reverse(self):
return Curve(points=self.points[::-1,:],
closed=self.closed)
def plot(self,ax=None,**kw):
ax=ax or plt.gca()
return ax.plot(self.points[:,0],self.points[:,1],**kw)[0]
def internal_angle(A,B,C):
BA=A-B
BC=C-B
theta_BA = np.arctan2( BA[1], BA[0] )
theta_BC = np.arctan2( BC[1], BC[0] )
return (theta_BA - theta_BC) % (2*np.pi)
class StrategyFailed(Exception):
pass
class Strategy(object):
def metric(self,site,scale_factor):
assert False
def execute(self,site):
"""
Apply this strategy to the given Site.
Returns a dict with nodes,cells which were modified
"""
assert False
class WallStrategy(Strategy):
"""
Add two edges and a new triangle to the forward side of the
site.
"""
def __str__(self):
return "<Wall>"
def metric(self,site):
# rough translation from paver
theta=site.internal_angle * 180/np.pi
scale_factor = site.edge_length / site.local_length
# Wall can be applied in a wide variety of situations
# angles greater than 90, Wall may be the only option
# angles less than 60, and we can't do a wall.
# np.clip( (120 - theta) / 30, 0,np.inf)
# at 90, we can try, but a bisect would be better.
# at 180, this is the only option.
return (180-theta) / 180
def new_point(self,site):
na,nb,nc= site.abc
grid=site.grid
b,c = grid.nodes['x'][ [nb,nc] ]
bc=c-b
return b + utils.rot(np.pi/3,bc)
def execute(self,site):
na,nb,nc= site.abc
grid=site.grid
new_x = self.new_point(site)
nd=grid.add_node(x=new_x,fixed=site.af.FREE)
# new_c=grid.add_cell_and_edges( [nb,nc,nd] )
j0=grid.nodes_to_edge(nb,nc)
unmesh2=[grid.UNMESHED,grid.UNMESHED]
# the correct unmeshed will get overwritten in
# add cell.
j1=grid.add_edge(nodes=[nc,nd],cells=unmesh2)
j2=grid.add_edge(nodes=[nb,nd],cells=unmesh2)
new_c=grid.add_cell(nodes=[nb,nc,nd],
edges=[j0,j1,j2])
return {'nodes': [nd],
'cells': [new_c] }
class WallCloseStrategy(WallStrategy):
"""
Wall, but with a very close-in initial guess point
"""
def metric(self,site):
# always try regular Wall first.
return 0.5+super(WallCloseStrategy,self).metric(site)
def new_point(self,site):
na,nb,nc= site.abc
grid=site.grid
b,c = grid.nodes['x'][ [nb,nc] ]
bc=c-b
usual_x=b + utils.rot(np.pi/3,bc)
midpoint=0.5*(b+c)
alpha=0.95
return alpha*midpoint + (1-alpha)*usual_x
class BisectStrategy(Strategy):
"""
Add three edges and two new triangles.
"""
def __str__(self):
return "<Bisect>"
def metric(self,site):
# rough translation from paver
theta=site.internal_angle * 180/np.pi
scale_factor = site.edge_length / site.local_length
# Ideal is 120 degrees for a bisect
# Can't bisect when it's nearing 180.
if theta> 2*89:
return np.inf # not allowed
else:
ideal=120 + (1-scale_factor)*30
return np.abs( (theta-ideal)/ 50 ).clip(0,1)
def execute(self,site):
na,nb,nc= site.abc
grid=site.grid
b,c = grid.nodes['x'][ [nb,nc] ]
bc=c-b
new_x = b + utils.rot(np.pi/3,bc)
nd=grid.add_node(x=new_x,fixed=site.af.FREE)
# new_c=grid.add_cell_and_edges( [nb,nc,nd] )
j_ab=grid.nodes_to_edge(na,nb)
j_bc=grid.nodes_to_edge(nb,nc)
unmesh2=[grid.UNMESHED,grid.UNMESHED]
# the correct unmeshed will get overwritten in
# add cell.
j_cd=grid.add_edge(nodes=[nc,nd],cells=unmesh2)
j_bd=grid.add_edge(nodes=[nb,nd],cells=unmesh2)
j_ad=grid.add_edge(nodes=[na,nd],cells=unmesh2)
new_c1=grid.add_cell(nodes=[nb,nc,nd],
edges=[j_bc,j_cd,j_bd])
new_c2=grid.add_cell(nodes=[na,nb,nd],
edges=[j_ab,j_bd,j_ad])
return {'nodes': [nd],
'cells': [new_c1,new_c2],
'edges': [j_cd,j_bd,j_ad] }
class ResampleStrategy(Strategy):
""" TESTING: resample one step beyond.
"""
def __str__(self):
return "<Resample>"
def nodes_beyond(self,site):
he=site.grid.nodes_to_halfedge(site.abc[0],site.abc[1])
pre_a=he.rev().node_rev()
post_c=he.fwd().fwd().node_fwd()
return pre_a,post_c
def distances(self,site):
"return pair of distances from the site to next node"
pre_a,post_c = self.nodes_beyond(site)
p_pa,p_a,p_c,p_pc=site.grid.nodes['x'][ [pre_a,
site.abc[0],
site.abc[2],
post_c] ]
dists=[utils.dist( p_pa - p_a ),
utils.dist( p_c - p_pc )]
return dists
def metric(self,site):
dists=self.distances(site)
# return a good low score when those distances are short relative
# scale
scale=site.local_length
return min( dists[0]/scale,dists[1]/scale )
def execute(self,site):
grid=site.grid
scale=site.local_length
metric0=self.metric(site)
def maybe_resample(n,anchor,direction):
if n in site.abc:
# went too far around! Bad!
return n
# Is this overly restrictive? What if the edge is nice
# and long, and just wants a node in the middle?
# That should be allowed, until there is some way of annotating
# edges as rigid.
# But at the moment that breaks things.
# it shouldn't though. And the checks here duplicate checks in
# af.resample(). So skip the test, and go for it.
# if grid.nodes['fixed'][n] in [site.af.HINT,site.af.SLIDE]:
try:
n=site.af.resample(n=n,anchor=anchor,scale=scale,
direction=direction)
except Curve.CurveException as exc:
pass
return n
# execute one side at a time, since it's possible for a
# resample on one side to reach into the other side.
he=site.grid.nodes_to_halfedge(site.abc[0],site.abc[1])
pre_a=he.rev().node_rev()
new_pre_a=maybe_resample(pre_a,site.abc[0],-1)
post_c=he.fwd().fwd().node_fwd()
new_post_c=maybe_resample(post_c,site.abc[2],1)
metric=self.metric(site)
if metric>metric0:
# while other nodes may have been modified, these are
# the ones still remaining, and even these are probably of
# no use for optimization. may change this to report no
# optimizable items
return {'nodes':[new_pre_a,new_post_c]}
else:
log.warning("Resample made no improvement (%f => %f)"%(metric0,metric))
raise StrategyFailed("Resample made no improvement")
class CutoffStrategy(Strategy):
def __str__(self):
return "<Cutoff>"
def metric(self,site):
theta=site.internal_angle
scale_factor = site.edge_length / site.local_length
# Cutoff wants a small-ish internal angle
# If the sites edges are long, scale_factor > 1
# and we'd like to be making smaller edges, so ideal angle gets smaller
# this used to be a comparison to 89, but that is too strict.
# there could be an obtuse angle that we'd like to Cutoff and then
# optimize back to acute.
if theta>179*np.pi/180:
return np.inf # not allowed
else:
ideal=60 + (1-scale_factor)*30
return np.abs(theta - ideal*np.pi/180.)
def execute(self,site):
grid=site.grid
na,nb,nc=site.abc
he_ab=grid.nodes_to_halfedge(na,nb)
he_bc=grid.nodes_to_halfedge(nb,nc)
# Special case detect final quad
he_da=he_ab.rev()
he_cd=he_bc.fwd()
j_ab=he_ab.j
j_bc=he_bc.j
ret={'cells':[]}
if he_da.node_rev()==he_cd.node_fwd():
# Quad handling:
nd=he_cd.node_fwd()
abcd=[na,nb,nc,nd]
x=grid.nodes['x'][abcd]
delta_x=np.roll(x,-1,axis=0) - x
seg_theta=np.arctan2(delta_x[:,1],delta_x[:,0])
internal_angles=((np.pi - ((np.roll(seg_theta,-1) - seg_theta))) % (2*np.pi))
# first of these is internal angle of abc, and should be the smallest (based on
# how sites are chosen).
cutoff_bd=internal_angles[0]+internal_angles[2]
cutoff_ac=internal_angles[1]+internal_angles[3]
if cutoff_bd>cutoff_ac:
# angles at b and d are larger, so should add the edge b--d
j_bd=grid.add_edge(nodes=[nb,nd],cells=[grid.UNMESHED,grid.UNMESHED])
c_abd=site.grid.add_cell(nodes=[na,nb,nd],
edges=[j_ab,j_bd,he_da.j])
c_bcd=site.grid.add_cell(nodes=[nb,nc,nd],
edges=[j_bc,he_cd.j,j_bd])
ret['cells'].extend( [c_abd,c_bcd] )
else:
j_ca=grid.add_edge(nodes=[nc,na],cells=[grid.UNMESHED,grid.UNMESHED])
c_abc=site.grid.add_cell(nodes=site.abc,
edges=[j_ab,j_bc,j_ca])
c_cda=site.grid.add_cell(nodes=[nc,nd,na],
edges=[he_cd.j,he_da.j,j_ca])
ret['cells'].extend([c_abc,c_cda])
else:
# non-quad handling:
nd=None
j_ca=grid.nodes_to_edge(nc,na)
if j_ca is None:
# typical, but if we're finishing off the last triangle, this edge
# exists.
j_ca=grid.add_edge(nodes=[nc,na],cells=[grid.UNMESHED,grid.UNMESHED])
c=site.grid.add_cell(nodes=site.abc,
edges=[j_ab,j_bc,j_ca])
ret['cells'].append(c)
return ret
class SplitQuadStrategy(Strategy):
"""
When the remaining node string has 4 nodes, often splitting this
into two triangles and calling it done is the thing to do.
"""
def __str__(self):
return "<SplitQuad>"
def metric(self,site):
he_ab=site.grid.nodes_to_halfedge(site.abc[0],site.abc[1])
he_da=he_ab.rev()
he_cd=he_da.rev()
c_maybe=he_cd.node_rev()
d=he_cd.node_fwd()
if c_maybe!=site.abc[2]:
return np.inf # not a quad
# Otherwise see if the scale is close:
L=utils.dist_along( site.grid.nodes['x'][ site.abc + [d] + [site.abc[0]]] )[-1]
scale_factor = L / (4*site.local_length)
# if scale_factor<1, definitely want to try this.
# if scale_factor>2, probably not.
return 0.05 * 500**(scale_factor-1)
class JoinStrategy(Strategy):
"""
Given an inside angle, merge the two edges.
"""
def __str__(self):
return "<Join>"
def metric(self,site):
theta=site.internal_angle
scale_factor = site.edge_length / site.local_length
# Cutoff wants a small-ish internal angle
# If the sites edges are long, scale_factor > 1
# and we'd like to be making smaller edges, so ideal angle gets smaller
if theta> 89*np.pi/180:
return np.inf # not allowed
else:
# as theta goes to 0, a Join has no effect on scale.
#
# at larger theta, a join effectively coarsens
# so if edges are too small, we want to coarsen, scale_factor
# will be < 1
# adding the factor of 2: it was choosing join too often.
return 2*scale_factor * theta
def execute(self,site):
grid=site.grid
na,nb,nc=site.abc
# special case, when na and nc share a second common neighbor,
# forming a quad, that neighbor will be kept in nd
nd=None
# choose the node to move -
mover=None
j_ac=grid.nodes_to_edge(na,nc)
j_ac_oring=0
if j_ac is not None:
# special case: nodes are already joined, but there is no
# cell.
# this *could* be extended to allow the deletion of thin cells,
# but I don't want to get into that yet (since it's modification,
# not creation)
if (grid.edges['cells'][j_ac,0] >=0) or (grid.edges['cells'][j_ac,1]>=0):
raise StrategyFailed("Edge already has real cells")
# remember for tests below:
j_ac_oring=grid.edges['oring'][j_ac]
grid.delete_edge(j_ac)
j_ac=None
# a previous version only checked fixed against HINT and SLIDE
# when the edge j_ac existed. Why not allow this comparison
# even when j_ac doesn't exist?
# need to be more careful than that, though. The only time it's okay
# for a SLIDE or HINT to be the mover is if anchor is on the same ring,
# and the path between them is clear, which means b cannot be on that
# ring.
if grid.nodes['fixed'][na]==site.af.FREE:
mover=na
anchor=nc
elif grid.nodes['fixed'][nc]==site.af.FREE:
mover=nc
anchor=na
elif grid.nodes['oring'][na]>0 and grid.nodes['oring'][nc]>0:
# *might* be legal but requires more checks:
ring=grid.nodes['oring'][na]
if ring!=grid.nodes['oring'][nc]: # this can maybe get relaxed to join onto a fixed node on multiple rings
raise StrategyFailed("Cannot join across rings")
if grid.nodes['oring'][nb]==ring:
# This original check is too lenient. in a narrow
# channel, it's possible to have the three nodes
# on the same ring, straddling the channel, and this
# may allow for a join across the channel.
# # this is a problem if nb falls in between them.
# fa,fb,fc=grid.nodes['ring_f'][ [na,nb,nc] ]
# curve=site.af.curves[ring-1]
#
# if curve.is_ordered(fa,fb,fc):
# raise StrategyFailed("Cannot join across middle node")
# instead, check for an edge between a and c.
if j_ac_oring!=ring:
raise StrategyFailed("Cannot join non-adjacent along ring")
# probably okay, not sure if there are more checks to attempt
if grid.nodes['fixed'][na]==site.af.HINT:
mover,anchor=na,nc
else:
mover,anchor=nc,na
else:
raise StrategyFailed("Neither node can be moved")
he_ab=grid.nodes_to_halfedge(na,nb)
he_da=he_ab.rev()
pre_a=he_da.node_rev()
he_bc=he_ab.fwd()
he_cd=he_bc.fwd()
post_c=he_cd.node_fwd()
if pre_a==post_c:
log.info("Found a quad - proceeding carefully with nd")
nd=pre_a
# figure out external cell markers before the half-edges are invalidated.
# note the cell index on the outside of mover, and record half-edges
# for the anchor side
if mover==na:
cell_opp_mover=he_ab.cell_opp()
cell_opp_dmover=he_da.cell_opp()
he_anchor=he_bc
he_danchor=he_cd
else:
cell_opp_mover=he_bc.cell_opp()
cell_opp_dmover=he_cd.cell_opp()
he_anchor=he_ab
he_danchor=he_da
edits={'cells':[],'edges':[] }
cells_to_replace=[]
def archive_cell(c):
cells_to_replace.append( (c,grid.cells[c].copy()) )
grid.delete_cell(c)
edges_to_replace=[]
def archive_edge(j):
for c in grid.edges['cells'][j]:
if c>=0:
archive_cell(c)
edges_to_replace.append( (j,grid.edges[j].copy()) )
grid.delete_edge(j)
for j in list(grid.node_to_edges(mover)):
archive_edge(j)
grid.delete_node(mover)
for j,data in edges_to_replace:
nodes=data['nodes']
for i in [0,1]:
if nodes[i]==mover:
if (nodes[1-i]==nb) or (nodes[1-i]==nd):
nodes=None # signal that we don't add it
else:
nodes[i]=anchor
break
if nodes is not None:
# need to remember boundary, but any real
# cells get added in the next step, so can
# be -2 here.
cells=data['cells']
if cells[0]>=0:
cells[0]=-2
if cells[1]>=0:
cells[1]=-2
# This can raise Collinear exceptions
# also, it's possible that one of these edges will be a dupe,
# in the case of a quad
try:
# fairly sure there are tests above which prevent
# this from having to populate additional fields, but
# not positive. 2018-02-26: need to think about oring.
jnew=grid.add_edge( nodes=nodes, cells=cells,
oring=data['oring'],ring_sign=data['ring_sign'],
fixed=data['fixed'] )
except exact_delaunay.ConstraintCollinearNode:
raise StrategyFailed("Edge was collinear with existing nodes")
edits['edges'].append(jnew)
for c,data in cells_to_replace:
nodes=data['nodes']
for ni,n in enumerate(nodes):
if n==mover:
nodes[ni]=anchor
cnew=grid.add_cell(nodes=nodes)
edits['cells'].append(cnew)
if cell_opp_mover<0: # need to update boundary markers
j_cells=grid.edges['cells'][he_anchor.j,:].copy()
j_cells[he_anchor.orient]=cell_opp_mover
grid.modify_edge(he_anchor.j,cells=j_cells)
if nd is not None and cell_opp_dmover<0:
j_cells=grid.edges['cells'][he_danchor.j,:].copy()
j_cells[he_danchor.orient]=cell_opp_dmover
grid.modify_edge(he_danchor.j,cells=j_cells)
# This check could also go in unstructured_grid, maybe optionally?
areas=grid.cells_area()
if np.any( areas[edits['cells']]<=0.0 ):
raise StrategyFailed("Join created non-positive area cells")
return edits
class NonLocalStrategy(Strategy):
"""
Add an edge to a nearby, but not locally connected, element.
Currently, this is not very strong in identifying whether a
nearby node.
"""
def __str__(self):
return "<Nonlocal>"
def nonlocal_pair(self,site):
"""
Nonlocal nodes for a site
"""
af=site.af
best_pair=None,None
best_dist=np.inf
# skip over neighbors of any of the sites nodes
# take any neighbors in the DT.
each_dt_nbrs=[af.cdt.delaunay_neighbors(n) for n in site.abc]
if 1:
# filter out neighbors which are not within the 'sector'
# defined by the site.
apnt,bpnt,cpnt=af.grid.nodes['x'][site.abc]
ba_angle=np.arctan2(apnt[1] - bpnt[1],
apnt[0] - bpnt[0])
bc_angle=np.arctan2(cpnt[1] - bpnt[1],
cpnt[0] - bpnt[0])
old_each_dt_nbrs=each_dt_nbrs
each_dt_nbrs=[]
for nbrs in old_each_dt_nbrs:
nbrs_pnts=af.grid.nodes['x'][nbrs]
diffs=nbrs_pnts - bpnt
angles=np.arctan2(diffs[:,1], diffs[:,0])
# want to make sure that the angles from b to a,nbr,c
# are consecutive
angle_sum = (angles-bc_angle)%(2*np.pi) + (ba_angle-angles)%(2*np.pi)
valid=(angle_sum < 2*np.pi)
each_dt_nbrs.append(nbrs[valid])
each_nbrs=[af.grid.node_to_nodes(n) for n in site.abc]
# flat list of grid neighbors. note that since a-b-c are connected,
# this will include a,b,c, too.
if 0:
all_nbrs=[n for l in each_nbrs for n in l]
else:
all_nbrs=list(site.abc) # the way it's written, only c will be
# picked up by the loops below.
# HERE - this needs to go back to something similar to the old
# code, where the neighbors to avoid are defined by being connected
# along local edges within the given straight-line distance.
he0=af.grid.nodes_to_halfedge(site.abc[0],site.abc[1])
for incr,node,ref_pnt in [ (lambda x: x.rev(),
lambda x: x.node_rev(),
apnt), # walk along b->a
(lambda x: x.fwd(),
lambda x: x.node_fwd(),
cpnt)]: # walk along b->c
trav=incr(he0)
while trav!=he0: # in case of small loops
ntrav=node(trav)
# some decision here about whether to calculate straight line
# distance from a or b, and whether the threshold is
# local_length or some factor thereof
straight_dist=utils.dist(af.grid.nodes['x'][ntrav] - ref_pnt)
if straight_dist > 1.0*site.local_length:
break
all_nbrs.append(ntrav)
trav=incr(trav)
for n,dt_nbrs in zip(site.abc,each_dt_nbrs):
# DBG: maybe only DT neighbors of 'b' can be considered?
# when considering 'a' and 'c', too many possibilities
# of extraneous connections, which in the past were ruled
# out based on looking only at 'b', and by more explicitly
# enumerating local connections
if n!=site.abc[1]:
continue # TESTING
# most of those we are already connected to, weed them out.
good_nbrs=[nbr
for nbr in dt_nbrs
if nbr not in all_nbrs]
if not good_nbrs:
continue
dists=[utils.dist(af.grid.nodes['x'][n] - af.grid.nodes['x'][nbr])
for nbr in good_nbrs]
idx=np.argmin(dists)
if dists[idx]<best_dist:
best_dist=dists[idx]
best_pair=(n,good_nbrs[idx])
# is the best nonlocal node connection good enough?
# not worrying about angles, just proximity
return best_pair[0],best_pair[1],best_dist
def metric(self,site):
# something high if it's bad.
# 0.0 if it looks good
site_node,nonlocal_node,dist = self.nonlocal_pair(site)
scale=site.local_length
if site_node is not None:
# score it such that if the nonlocal connection is
# less than or equal to the target scale away, then
# it gets the highest score, and linearly increasing
# based on being longer than that.
# This may reach too far in some cases, and will need to be
# scaled or have a nonlinear term.
return max(0.0, (dist - scale)/scale)
else:
return np.inf
def execute(self,site):
# as much as it would be nice to blindly execute these
# things, the current state of the cost functions means
# that a really bad nonlocal may not show up in the cost
# function, and that means that best_child() will get tricked
# So until there is a better cost function, this needs to
# be more careful about which edges it will attempt
if self.metric(site) > 0.75:
raise StrategyFailed("NonLocal: too far away")
site_node,nonlocal_node,dist = self.nonlocal_pair(site)
if site_node is None:
raise StrategyFailed()
grid=site.grid
j=grid.add_edge(nodes=[site_node,nonlocal_node],
cells=[grid.UNMESHED,grid.UNMESHED])
return {'nodes': [],
'cells': [],
'edges': [j] }
Wall=WallStrategy()
WallClose=WallCloseStrategy()
Cutoff=CutoffStrategy()
Join=JoinStrategy()
Bisect=BisectStrategy()
NonLocal=NonLocalStrategy()
Resample=ResampleStrategy()
class Site(object):
"""
represents a potential location for advancing the front.
"""
def __init__(self):
pass
def metric(self):
""" Smaller number means more likely to be chosen.
"""
assert False
def actions(self):
return []
class FrontSite(object):
resample_status=None
def metric(self):
assert False
def plot(self,ax=None):
assert False
def actions(self):
assert False
class TriangleSite(FrontSite):
"""
When adding triangles, the heuristic is to choose
tight locations.
"""
def __init__(self,af,nodes):
self.af=af
self.grid=af.grid
assert len(nodes)==3
self.abc = nodes
def metric(self):
return self.internal_angle
def points(self):
return self.grid.nodes['x'][ self.abc ]
@property
def internal_angle(self):
A,B,C = self.points()
return internal_angle(A,B,C)
@property
def edge_length(self):
return utils.dist( np.diff(self.points(),axis=0) ).mean()
@property
def local_length(self):
scale = self.af.scale
return scale( self.points().mean(axis=0) )
def plot(self,ax=None):
ax=ax or plt.gca()
points=self.grid.nodes['x'][self.abc]
return ax.plot( points[:,0],points[:,1],'r-o' )[0]
def actions(self):
theta=self.internal_angle
return [Wall,WallClose,Cutoff,Join,Bisect,NonLocal,Resample]
def resample_neighbors(self):
""" may update site! used to be part of AdvancingFront, but
probably better here, as part of the site.
"""
a,b,c = self.abc
# local_length = self.af.scale( self.points().mean(axis=0) )
# Possible that the site has very long edges ab or bc.
# averaging the position can give a point far from the actual
# site of the action which is b.
# This is safer:
local_length = self.af.scale( self.points()[1] )
grid=self.af.grid
self.resample_status=True
if self.grid.nodes['fixed'][b] == self.af.HINT:
self.grid.modify_node(b,fixed=self.af.SLIDE)
for n,direction in [ (a,-1),
(c,1) ]:
# used to check for SLIDE and degree
# not sure whether we should let SLIDE through...
# probably want to relax this to allow for subdividing
# long edges if the edge itself is not RIGID. But
# we still avoid FREE nodes, since they are not on the boundary
# and cannot be resampled
if grid.nodes['fixed'][n] in [self.af.HINT,self.af.SLIDE,self.af.RIGID]:
try:
n_res=self.af.resample(n=n,anchor=b,scale=local_length,direction=direction)
except Curve.CurveException as exc:
self.resample_status=False
n_res=n
if n!=n_res:
log.info("resample_neighbors changed a node")
if n==a:
self.abc[0]=n_res
else:
self.abc[2]=n_res
n=n_res # so that modify_node below operates on the right one.
# is this the right time to change the fixed status?
if grid.nodes['fixed'][n] == self.af.HINT:
grid.modify_node(n,fixed=self.af.SLIDE)
return self.resample_status
# without a richer way of specifying the scales, have to start
# with marked edges
class QuadCutoffStrategy(Strategy):
def metric(self,site):
# how to get scale here?
# FIX
return 1.0 # ?
def execute(self,site):
"""
Apply this strategy to the given Site.
Returns a dict with nodes,cells which were modified
"""
nodes=[site.abcd[0],site.abcd[3]]
j=site.grid.nodes_to_edge(nodes)
if j is None: # typ. case
# Set cells to unmeshed, and one will be overwritten by add_cell.
j=site.grid.add_edge(nodes=nodes,
para=site.grid.edges['para'][site.js[1]],
cells=[site.grid.UNMESHED,site.grid.UNMESHED])
else:
log.info("Cutoff found edge %d already exists"%j)
cnew=site.grid.add_cell(nodes=site.abcd)
return {'edges': [j],
'cells': [cnew] }
QuadCutoff=QuadCutoffStrategy()
class QuadSite(FrontSite):
def __init__(self,af,nodes):
self.af=af
self.grid=af.grid
assert len(nodes)==4
self.abcd = nodes
self.js=[ self.grid.nodes_to_edge(nodes[:2]),
self.grid.nodes_to_edge(nodes[1:3]),
self.grid.nodes_to_edge(nodes[2:])]
def metric(self):
return 1.0 # ?
def points(self):
return self.grid.nodes['x'][ self.abcd ]
# def internal_angle(self): ...
# def edge_length(self): ...
# def local_length(self): ...
def plot(self,ax=None):
ax=ax or plt.gca()
points=self.grid.nodes['x'][self.abcd]
return ax.plot( points[:,0],points[:,1],'r-o' )[0]
def actions(self):
return [QuadCutoff] # ,FloatLeft,FloatRight,FloatBoth,NonLocal?]
def resample_neighbors(self):
""" may update site!
if resampling failed, returns False. It's possible that some
nodes have been updated, but no guarantee that they are as far
away as requested.
this is where HINT nodes which part of the site are set to SLIDE nodes.
"""
a,b,c,d = self.abcd
print("call to QuadSite: resample_neighbors, %d %d %d %d"%(a,b,c,d))
# could extend to something more dynamic, like triangle does
local_para=self.af.para_scale
local_perp=self.af.perp_scale
g=self.af.grid
if g.edges['para'][self.js[1]] == self.af.PARA:
scale=local_perp
else:
scale=local_para
for n in [b,c]:
if self.grid.nodes['fixed'][n] == self.af.HINT:
self.grid.modify_node(n,fixed=self.af.SLIDE)
self.resample_status=True
for n,anchor,direction in [ (a,b,-1),
(d,c,1) ]:
# this used to check SLIDE and degree
# not sure if we should let SLIDE through now...
if self.grid.nodes['fixed'][n] in [self.af.HINT,self.af.SLIDE]:
try:
n_res=self.af.resample(n=n,anchor=anchor,scale=scale,direction=direction)
except Curve.CurveException as exc:
log.warning("Unable to resample neighbors")
self.resample_status=False
continue
# is this the right time to change the fixed status?
if self.grid.nodes['fixed'][n_res] == self.af.HINT:
self.grid.modify_node(n_res,fixed=self.af.SLIDE)
if n!=n_res:
log.info("resample_neighbors changed a node")
if n==a:
self.abcd[0]=n_res
else:
self.abcd[3]=n_res
return self.resample_status
class AdvancingFront(object):
"""
Implementation of advancing front
"""
grid=None
cdt=None
# 'fixed' flags:
# in order of increasing degrees of freedom in its location.
# don't use 0 here, so that it's easier to detect uninitialized values
UNSET=0
RIGID=1 # should not be moved at all
SLIDE=2 # able to slide along a ring.
FREE=3 # not constrained
HINT=4 # slidable and can be removed.
StrategyFailed=StrategyFailed
def __init__(self,grid=None,**kw):
"""
"""
self.log = logging.getLogger("AdvancingFront")
utils.set_keywords(self,kw)
if grid is None:
grid=unstructured_grid.UnstructuredGrid()
self.grid = self.instrument_grid(grid)
self.curves=[]
def add_curve(self,curve=None,interior=None,nodes=None,closed=True):
"""
Add a Curve, upon which nodes can be slid.
curve: [N,2] array of point locations, or a Curve instance.
interior: true to force this curve to be an island.
nodes: use existing nodes, given by the indices here.
Any node which is already part of another ring will be set to RIGID,
but will retain its original oring.
The nodes must have existing edges connecting them, and those edges
will be assigned to this ring via edges['oring'] and ['ring_sign']
"""
if nodes is not None:
nodes=np.asarray(nodes)
curve=self.grid.nodes['x'][nodes]
if not isinstance(curve,Curve):
if interior is not None:
ccw=not interior
else:
ccw=None
curve=Curve(curve,ccw=ccw,closed=closed)
elif interior is not None:
assert curve.closed
a=curve.signed_area()
if a>0 and interior:
curve=curve.reverse()
self.curves.append( curve )
oring=len(self.curves) # 1-based
if nodes is not None:
# Update nodes to be on this curve:
on_a_ring=self.grid.nodes['oring'][nodes]>0
self.grid.nodes['oring'][nodes[~on_a_ring]]=oring
# curve.distances has an extra entry when a closed loop
self.grid.nodes['ring_f'][nodes[~on_a_ring]]=curve.distances[:len(nodes)][~on_a_ring]
self.grid.nodes['fixed'][nodes[~on_a_ring]]=self.HINT
self.grid.nodes['fixed'][nodes[on_a_ring]]=self.RIGID
# And update the edges, too:
if closed:
pairs=utils.circular_pairs(nodes)
else:
pairs=zip(nodes[:-1],nodes[1:])
for a,b in pairs:
j=self.grid.nodes_to_edge([a,b])
self.grid.edges['oring'][j]=oring
if self.grid.edges['nodes'][j,0]==a:
self.grid.edges['ring_sign'][j]=1
elif self.grid.edges['nodes'][j,0]==b: # little sanity check
self.grid.edges['ring_sign'][j]=-1
else:
assert False,"Failed invariant"
return oring-1
def instrument_grid(self,g):
"""
Add fields to the given grid to support advancing front
algorithm. Modifies grid in place, and returns it.
Also creates a Triangulation which follows modifications to
the grid, keeping a constrained Delaunay triangulation around.
"""
# oring is stored 1-based, so that the default 0 value is
# indicates no data / missing.
g.add_node_field('oring',np.zeros(g.Nnodes(),'i4'),on_exists='pass')
g.add_node_field('fixed',np.zeros(g.Nnodes(),'i1'),on_exists='pass')
g.add_node_field('ring_f',-1*np.ones(g.Nnodes(),'f8'),on_exists='pass')
# track a fixed field on edges, too, as it is not always sufficient
# to tag nodes as fixed, since a long edge between two fixed nodes may
# or may not be subdividable. Note that for edges, we are talking about
# topology, not the locations, since locations are part of nodes.
# for starters, support RIGID (cannot subdivide) and 0, meaning no
# additional information beyond existing node and topological constraints.
g.add_edge_field('fixed',np.zeros(g.Nedges(),'i1'),on_exists='pass')
# if nonzero, which curve this edge follows
g.add_edge_field('oring',np.zeros(g.Nedges(),'i4'),on_exists='pass')
# if oring nonzero, then +1 if n1=>n2 is forward on the curve, -1
# otherwise
g.add_edge_field('ring_sign',np.zeros(g.Nedges(),'i1'),on_exists='pass')
# Subscribe to operations *before* they happen, so that the constrained
# DT can signal that an invariant would be broken
self.cdt=self.shadow_cdt_factory(g)
return g
def shadow_cdt_factory(self,g):
"""
Create a shadow CDT for the given grid.
This extra level of indirection is to facilitate
testing of one method vs the other in subclasses.
"""
return shadow_cdt.shadow_cdt_factory(g)
def initialize_boundaries(self,upsample=True):
"""
Add nodes and edges to the the grid from curves.
if upsample is True, resample curves at scale.
"""
for curve_i,curve in enumerate(self.curves):
# this is problematic when the goal is to have an
# entirely rigid set of nodes.
if upsample:
curve_points,srcs=curve.upsample(self.scale,return_sources=True)
else:
if curve.closed:
# avoid repeated point
curve_points=curve.points[:-1]
else:
curve_points=curve.points
srcs=curve.distances[:len(curve_points)]
# add the nodes in:
# used to initialize as SLIDE
nodes=[self.grid.add_node(x=curve_points[j],
oring=curve_i+1,
ring_f=srcs[j],
fixed=self.HINT)
for j in range(len(curve_points))]
if curve.closed:
Ne=len(curve_points)
else:
Ne=len(curve_points) - 1
pairs=zip( np.arange(Ne),
(np.arange(Ne)+1)%Ne)
for na,nb in pairs:
self.grid.add_edge( nodes=[nodes[na],nodes[nb]],
cells=[self.grid.UNMESHED,
self.grid.UNDEFINED],
oring=curve_i+1,
ring_sign=1 )
def enumerate_sites(self):
raise Exception("Implement in subclass")
def choose_site(self):
sites=self.enumerate_sites()
if len(sites):
scores=[ site.metric()
for site in sites ]
best=np.argmin( scores )
return sites[best]
else:
return None
def free_span(self,he,max_span,direction):
"""
returns the distance, and the nodes making up the
span, starting from anchor (the rev node of he),
and going until either max_span distance is found,
it wraps around, or encounters a non-SLIDE-able node.
the reason this works with halfedges is that we only
move along nodes which are simply connected (degree 2)
TODO: this reports along edge distances, but it's
used (exclusively?) in walking along boundaries which
might be resampled. It would be better to look at
the distance in discrete jumps.
"""
span=0.0
if direction==1:
trav0=he.node_fwd()
anchor=he.node_rev()
else:
trav0=he.node_rev()
anchor=he.node_fwd()
last=anchor
trav=trav0
nodes=[last] # anchor is included
def pred(n):
# N.B. possible for trav0 to be SLIDE
degree=self.grid.node_degree(n)
# 2020-11-28: there used to be a blanket exception for trav0,
# but it's only in the case that trav0 is SLIDE that we want
# to return True for it.
if degree>2:
return False
if n==trav0 and self.grid.nodes['fixed'][n]==self.SLIDE:
return True
if self.grid.nodes['fixed'][n]==self.HINT:
return True
return False
while pred(trav) and (trav != anchor) and (span<max_span):
span += utils.dist( self.grid.nodes['x'][last] -
self.grid.nodes['x'][trav] )
nodes.append(trav)
if direction==1:
he=he.fwd()
last,trav = trav,he.node_fwd()
elif direction==-1:
he=he.rev()
last,trav = trav,he.node_rev()
else:
assert False
# could use some loop retrofitting..
span += utils.dist( self.grid.nodes['x'][last] -
self.grid.nodes['x'][trav] )
nodes.append(trav)
return span,nodes
max_span_factor=4
def resample(self,n,anchor,scale,direction):
"""
move/replace n, such that from anchor to n/new_n the edge
length is close to scale.
If n has more than 2 neighbors, does nothing and returns n as is.
Used to assume that n was SLIDE or HINT. Now checks for either
nodes['fixed'][n] in (SLIDE,HINT), or that the edge can be subdivided.
normally, a SLIDE node cannot be deleted. in some cases resample will
create a new node for n, and it will be a SLIDE node. in that case, should
n retain SLIDE, too? is it the responsibility of resample(), or the caller?
can we at least guarantee that no other nodes need to be changing status?
in the past, new nodes created here were given fixed=SLIDE. This is
probably better set to HINT, as the SLIDE nodes can get in the way if
they aren't used immediately for a cell.
Returns the resampled node index -- often same as n, but may be a different
node.
"""
#self.log.debug("resample %d to be %g away from %d in the %s direction"%(n,scale,anchor,
# direction) )
if direction==1: # anchor to n is t
he=self.grid.nodes_to_halfedge(anchor,n)
elif direction==-1:
he=self.grid.nodes_to_halfedge(n,anchor)
else:
assert False
n_deg=self.grid.node_degree(n)
if self.grid.nodes['oring'][n]==0:
self.log.debug("Node is not on a ring, no resampling possible")
return n
# must be able to either muck with n, or split the anchor-n edge
# in the past we assumed that this sort of check was already done
j=he.j
edge_resamplable=( (self.grid.edges['fixed'][j]!=self.RIGID)
and (self.grid.edges['cells'][j,0]<0)
and (self.grid.edges['cells'][j,1]<0) )
# node_resamplable=(n_deg==2) and (self.grid.nodes['fixed'][n] in [self.HINT,self.SLIDE])
# it's possible to have a node that, based on the above test, is resamplable,
# but the edge is not (because the edge test includes the possibility of
# a cell on the opposite side).
#if not (node_resamplable or edge_resamplable):
if not edge_resamplable:
self.log.debug("Edge and node are RIGID/deg!=2, no resampling possible")
return n
span_length,span_nodes = self.free_span(he,self.max_span_factor*scale,direction)
# anchor-n distance should be in there, already.
# self.log.debug("free span from the anchor is %g"%span_length)
if span_length < self.max_span_factor*scale:
n_segments = max(1,round(span_length / scale))
target_span = span_length / n_segments
else:
target_span=scale
n_segments = None
def handle_one_segment():
# this is a function because there are two times
# (one proactive, one reactive) it might get used below.
# in tight situations, need to make sure
# that for a site a--b--c we're not trying
# move c all the way on top of a.
# it is not sufficient to just force two
# segments, as that just pushes the issue into
# the next iteration, but in an even worse state.
if direction==-1:
he_other=he.fwd()
opposite_node=he_other.node_fwd()
else:
he_other=he.rev()
opposite_node=he_other.node_rev()
if opposite_node==span_nodes[-1]:
# self.log.info("n_segment=1, but that would be an implicit join")
# rather than force two segments, force it
# to remove all but the last edge.
del span_nodes[-1]
# self.log.debug("Only space for 1 segment")
for d in span_nodes[1:-1]:
cp=self.grid.checkpoint()
try:
self.grid.merge_edges(node=d)
except self.cdt.IntersectingConstraints as exc:
self.log.info("handle_one_segment: cut short by exception")
self.grid.revert(cp)
# only got that far..
return d
return span_nodes[-1]
if n_segments==1:
return handle_one_segment()
# first, find a point on the original ring which satisfies the target_span
anchor_oring=self.grid.nodes['oring'][anchor]-1
n_oring=self.grid.nodes['oring'][n]-1
oring=self.grid.edges['oring'][j]-1
# Default, may be overwritten below
anchor_f = self.grid.nodes['ring_f'][anchor]
n_f = self.grid.nodes['ring_f'][n]
if anchor_oring != oring:
self.log.warning('resample: anchor on different rings. Cautiously resample')
if n_oring==oring:
f_start=n_f # can use n to speed up point_to_f
else:
f_start=0.0 # not valid, so full search in point_to_f
anchor_f = self.curves[oring].point_to_f(self.grid.nodes['x'][anchor],
n_f,
direction=0)
if n_oring != oring:
# anchor_f is valid regardless of its original oring
n_f = self.curves[oring].point_to_f(self.grid.nodes['x'][n],
anchor_f,
direction=0)
# Easing into use of explicit edge orings
assert oring==self.grid.edges['oring'][j]-1
curve = self.curves[oring]
# at any point might encounter a node from a different ring, but want
# to know it's ring_f for this ring.
def node_f(m):
# first two cases are partially to be sure that equality comparisons will
# work.
if m==n:
return n_f
elif m==anchor:
return anchor_f
elif self.grid.nodes['oring'][m]==oring+1:
return self.grid.nodes['ring_f'][m]
else:
return curve.point_to_f(self.grid.nodes['x'][m],
n_f,direction=0)
if 0: # delete this once the new stanza below is trusted
# explicitly record whether the curve has the opposite orientation
# of the edge. Hoping to retire this way.
# This is actually dangerous, as the mid_point does not generally
# fall on the line, and so we have to give it a generous rel_tol.
mid_point = 0.5*(self.grid.nodes['x'][n] + self.grid.nodes['x'][anchor])
mid_f=self.curves[oring].point_to_f(mid_point)
if curve.is_forward(anchor_f,mid_f,n_f):
curve_direction=1
else:
curve_direction=-1
if 1: # "new" way
# logic is confusing
edge_ring_sign=self.grid.edges['ring_sign'][he.j]
curve_direction=(1-2*he.orient)*direction*edge_ring_sign
#assert new_curve_direction==curve_direction
assert edge_ring_sign!=0,"Edge %d has sign %d, should be set"%(he.j,edge_ring_sign)
# a curve forward that bakes in curve_direction
if curve_direction==1:
rel_curve_fwd=lambda a,b,c: curve.is_forward(a,b,c)
else:
rel_curve_fwd=lambda a,b,c: curve.is_reverse(a,b,c)
try:
new_f,new_x = curve.distance_away(anchor_f,curve_direction*target_span)
except Curve.CurveException as exc:
raise
# it's possible that even though the free_span distance yielded
# n_segments>1, distance_away() went too far since it cuts out some
# curvature in the along-curve distance.
# this leads to a liability that new_f is beyond span_nodes[-1], and
# we should follow the same treatment as above for n_segments==1
end_span_f=node_f(span_nodes[-1])
# 2018-02-13: hoping this also changes to curve_direction
if ( rel_curve_fwd(anchor_f,end_span_f,new_f)
and end_span_f!=anchor_f):
self.log.warning("n_segments=%s, but distance_away blew past it"%n_segments)
return handle_one_segment()
# check to see if there are other nodes in the way, and remove them.
# in the past, this started with the node after n, deleting things up
# to, and *including* a node at the location where we want n to be.
# in simple cases, it would be better to delete n, and only move the
# last node. But there is a chance that n cannot be deleted, more likely
# that n cannot be deleted than later nodes. However... free_span
# would not allow those edges, so we can assume anything goes here.
eps=0.001*target_span
nodes_to_delete=[]
trav=he
while True:
# start with the half-edge from anchor to n
# want to loop until trav.node_fwd() (for direction=1)
# is at or beyond our target, and all nodes from n
# until trav.node_rev() are in the list nodes_to_delete.
if direction==1:
n_trav=trav.node_fwd()
else:
n_trav=trav.node_rev()
f_trav=node_f(n_trav)
# EPS needs some TLC here. The corner cases have not been
# sufficiently take care of, i.e. new_f==f_trav, etc.
if rel_curve_fwd(anchor_f, new_f+curve_direction*eps, f_trav ):
break
# that half-edge wasn't far enough
nodes_to_delete.append(n_trav)
if direction==1:
trav=trav.fwd()
else:
trav=trav.rev()
# sanity check.
if trav==he:
self.log.error("Made it all the way around!")
raise Exception("This is probably bad")
# either n was already far enough, in which case we should split
# this edge, or there are some nodes in nodes_to_delete.
# the last of those nodes will be saved, and become the new n
if len(nodes_to_delete):
nnew=nodes_to_delete.pop()
# slide, because it needs to move farther out
method='slide'
else:
# because n is already too far
method='split'
nnew=n
# Maybe better to fix the new node with any sliding necessary,
# and then delete these, but that would require more checks to
# see if it's safe to reposition the node?
for d in nodes_to_delete:
cp=self.grid.checkpoint()
try:
self.grid.merge_edges(node=d)
except self.cdt.IntersectingConstraints as exc:
self.log.info("resample: had to stop short due to intersection")
self.grid.revert(cp)
return d
# on the other hand, it may be that the next node is too far away, and it
# would be better to divide the edge than to shift a node from far away.
# also possible that our neighbor was RIGID and can't be shifted
cp=self.grid.checkpoint()
try:
if method=='slide':
self.grid.modify_node(nnew,x=new_x,ring_f=new_f)
assert self.grid.nodes['oring'][nnew]==oring+1
else: # 'split'
j=self.grid.nodes_to_edge([anchor,nnew])
# get a newer nnew
# This used to set fixed=SLIDE, but since there is no additional
# topology attached to nnew, it probably makes more sense for it
# to be HINT. changed 2018-02-26
jnew,nnew,j_next = self.grid.split_edge(j,x=new_x,ring_f=new_f,oring=oring+1,
fixed=self.HINT)
except self.cdt.IntersectingConstraints as exc:
self.log.info("resample - slide() failed. will return node at original loc")
self.grid.revert(cp)
return nnew
def resample_neighbors(self,site):
return site.resample_neighbors()
def resample_cycles(self):
"""
Resample all edges along cycles. Useful when the boundary has
a mix of rigid and non-rigid, with coarse spacing that needs
to be resampled.
"""
cycs=self.grid.find_cycles(max_cycle_len=self.grid.Nnodes())
for cyc in cycs:
n0=cyc[0]
he=self.grid.nodes_to_halfedge(cyc[0],cyc[1])
while 1:
a=he.node_rev()
b=he.node_fwd()
res=self.resample(b,a,
scale=self.scale(self.grid.nodes['x'][a]),
direction=1)
he=self.grid.nodes_to_halfedge(a,res).fwd()
if he.node_rev()==n0:
break # full circle.
def cost_function(self,n):
raise Exception("Implement in subclass")
def eval_cost(self,n):
if self.grid.nodes['fixed'][n]==self.RIGID:
return 0.0
fn=self.cost_function(n)
if fn:
return fn(self.grid.nodes['x'][n])
else:
return 0.0
cost_thresh_default=0.22
def optimize_nodes(self,nodes,max_levels=4,cost_thresh=None):
"""
iterate over the given set of nodes, optimizing each location,
and possibly expanding the set of nodes in order to optimize
a larger area.
2019-03-12: max_levels used to default to 3, but there were
cases where it needed a little more perseverance.
cost_thresh defaults to 0.22, following the tuning of paver.py
"""
if cost_thresh is None:
cost_thresh=self.cost_thresh_default
for level in range(max_levels):
# following paver, maybe will decrease number of calls
# didn't help.
nodes.sort(reverse=True)
max_cost=0
for n in nodes:
# relax_node can return 0 if there was no cost
# function to optimize
# this node may already be good enough
initial_cost=self.eval_cost(n)
if initial_cost<cost_thresh: continue
new_cost=self.relax_node(n) or 0.0
max_cost=max(max_cost,new_cost)
if max_cost <= cost_thresh:
break
# as in paver -- if everybody is valid, good enough
failures=self.check_edits(dict(nodes=nodes))
if len(failures['cells'])==0:
break
if level==0:
# just try re-optimizing once
pass
else:
# expand list of nodes one level
new_nodes=set(nodes)
for n in nodes:
new_nodes.update(self.grid.node_to_nodes(n))
nodes=list(new_nodes)
def optimize_edits(self,edits,**kw):
"""
Given a set of elements (which presumably have been modified
and need tuning), jostle nodes around to improve the cost function
Returns an updated edits with any additional changes. No promise
that it's the same object or a copy.
"""
if 'nodes' not in edits:
edits['nodes']=[]
nodes = list(edits.get('nodes',[]))
for c in edits.get('cells',[]):
for n in self.grid.cell_to_nodes(c):
if n not in nodes:
nodes.append(n)
def track_node_edits(g,func_name,n,**k):
if n not in edits['nodes']:
edits['nodes'].append(n)
self.grid.subscribe_after('modify_node',track_node_edits)
self.optimize_nodes(nodes,**kw)
self.grid.unsubscribe_after('modify_node',track_node_edits)
return edits
def relax_node(self,n):
""" Move node n, subject to its constraints, to minimize
the cost function. Return the final value of the cost function
"""
# self.log.debug("Relaxing node %d"%n)
if self.grid.nodes['fixed'][n] == self.FREE:
return self.relax_free_node(n)
elif self.grid.nodes['fixed'][n] == self.SLIDE:
return self.relax_slide_node(n)
else:
# Changed to silent pass because ResampleStrategy currently
# tells the truth about nodes it moves, even though they
# are HINT nodes.
# raise Exception("relax_node with fixed=%s"%self.grid.nodes['fixed'][n])
return 0.0
def relax_free_node(self,n):
cost=self.cost_function(n)
if cost is None:
return None
x0=self.grid.nodes['x'][n]
local_length=self.scale( x0 )
init_cost=cost(x0)
new_x = opt.fmin(cost,
x0,
xtol=local_length*1e-4,
disp=0)
opt_cost=cost(new_x)
dx=utils.dist( new_x - x0 )
if (dx != 0.0) and opt_cost<init_cost:
# self.log.debug('Relaxation moved node %f'%dx)
cp=self.grid.checkpoint()
try:
self.grid.modify_node(n,x=new_x)
return opt_cost
except self.cdt.IntersectingConstraints as exc:
self.grid.revert(cp)
self.log.info("Relaxation caused intersection, reverting")
return init_cost
def relax_slide_node(self,n):
cost_free=self.cost_function(n)
if cost_free is None:
return
x0=self.grid.nodes['x'][n]
f0=self.grid.nodes['ring_f'][n]
ring=self.grid.nodes['oring'][n]-1
assert np.isfinite(f0)
assert ring>=0
local_length=self.scale( x0 )
slide_limits=self.find_slide_limits(n,3*local_length)
# used to just be f, but I think it's more appropriate to
# be f[0]
def cost_slide(f):
# lazy bounded optimization
f=f[0]
fclip=np.clip(f,*slide_limits)
err=(f-fclip)**2
return err+cost_free( self.curves[ring](fclip) )
base_cost=cost_free(x0)
new_f = opt.fmin(cost_slide,
[f0],
xtol=local_length*1e-4,
disp=0)
if not self.curves[ring].is_forward(slide_limits[0],
new_f,
slide_limits[1]):
# Would be better to just optimize within bounds.
# still, can check the two bounds, and if the
# cost is lower, return one of them.
self.log.warning("Slide went outside limits")
slide_length=(slide_limits[1] - slide_limits[0])
lower_f=0.95*slide_limits[0]+0.05*slide_limits[1]
upper_f=0.05*slide_limits[0]+0.95*slide_limits[1]
lower_cost=cost_slide([lower_f])
upper_cost=cost_slide([upper_f])
if lower_cost<upper_cost and lower_cost<base_cost:
self.log.warning("Truncate slide on lower end")
new_f=[lower_f]
elif upper_cost<base_cost:
new_f=[upper_f]
self.log.warning("Truncate slide on upper end")
else:
self.log.warning("Couldn't truncate slide.")
return base_cost
new_cost=cost_slide(new_f)
if new_cost<base_cost:
cp=self.grid.checkpoint()
try:
self.slide_node(n,new_f[0]-f0)
return new_cost
except self.cdt.IntersectingConstraints as exc:
self.grid.revert(cp)
self.log.info("Relaxation caused intersection, reverting")
return base_cost
def node_ring_f(self,n,ring0):
"""
return effective ring_f for node n in terms of ring0.
if that's the native ring for n, just return ring_f,
otherwise calculates where n would fall on ring0
"""
if self.grid.nodes['oring'][n]-1==ring0:
return self.grid.nodes['ring_f'][n]
else:
return self.curves[ring0].point_to_f(self.grid.nodes['x'][n])
def find_slide_limits(self,n,cutoff=None):
""" Returns the range of allowable ring_f for n.
limits are exclusive
cutoff: a distance along the curve beyond which we don't
care. note that this is not as the crow flies, but tracing
the segments. So a point which is cutoff away may be much
closer as the crow flies.
"""
n_ring=self.grid.nodes['oring'][n]-1
n_f=self.grid.nodes['ring_f'][n]
curve=self.curves[n_ring]
L=curve.total_distance()
# find our two neighbors on the ring:check forward:
nbrs=[]
for nbr in self.grid.node_to_nodes(n):
j=self.grid.nodes_to_edge([n,nbr])
j_ring=self.grid.edges['oring'][j]
if j_ring==0:
continue
assert j_ring-1==n_ring
# The test below is not robust with intersecting curves,
# and is why edges have to track their own ring.
#if self.grid.nodes['oring'][nbr]-1!=n_ring:
# continue
nbrs.append(nbr)
# With the above check on edge oring, this should not be necessary.
# if len(nbrs)>2:
# # annoying, but happens. one or more edges are internal,
# # and two are along the curve.
# nbrs.append(n)
# # sort them along the ring - HERE this logic is likely not robust for open curves
# all_f=(self.grid.nodes['ring_f'][nbrs]-n_f) % L
# order=np.argsort(all_f)
# nbrs=[ nbrs[order[-1]], nbrs[order[1]] ]
assert len(nbrs)==2
if curve.is_forward(self.node_ring_f(nbrs[0],n_ring),
n_f,
self.node_ring_f(nbrs[1],n_ring) ):
pass # already in nice order
else:
nbrs=[nbrs[1],nbrs[0]]
# Backward then forward
stops=[]
for sgn,nbr in zip( [-1,1], nbrs ):
trav=[n,nbr]
while 1:
# beyond cutoff?
if ( (cutoff is not None) and
(sgn*(self.node_ring_f(trav[1],n_ring) - n_f) )%L > cutoff ):
break
# is trav[1] something which limits the sliding of n?
trav_nbrs=self.grid.node_to_nodes(trav[1])
# if len(trav_nbrs)>2:
# break
# if self.grid.nodes['fixed'][trav[1]] != self.SLIDE:
# break
# the transition to HINT
if self.grid.nodes['fixed'][trav[1]] != self.HINT:
break
for nxt in trav_nbrs:
if nxt not in trav:
break
# before updating, check to see if this edge has
# a cell on it. If it does, then even if the node is degree
# 2, we can't slide through it.
j=self.grid.nodes_to_edge( [trav[1],nxt] )
j_c=self.grid.edges['cells'][j]
if j_c[0]>=0 or j_c[1]>=0:
# adjacent cells, can't slide through here.
break
trav=[trav[1],nxt]
stops.append(trav[1])
limits=[self.node_ring_f(m,n_ring)
for m in stops]
# make sure limits are monotonic increasing. for circular,
# this may require some modulo
if curve.closed and (limits[0]>limits[1]):
if limits[1] < n_f:
limits[1] += curve.total_distance()
elif limits[0] > n_f:
limits[0] -= curve.total_distance()
else:
assert False,"Not sure how to get the range to enclose n"
assert limits[0] < limits[1]
return limits
def find_slide_conflicts(self,n,delta_f):
""" Find nodes in the way of sliding node n
to a new ring_f=old_oring_f + delta_f.
N.B. this does not appear to catch situations
where n falls exactly on an existing node, though
it should (i.e. it's a bug)
"""
n_ring=self.grid.nodes['oring'][n]-1
n_f=self.grid.nodes['ring_f'][n]
new_f=n_f + delta_f
curve=self.curves[n_ring]
# Want to find edges in the direction of travel
# it's a little funny to use half-edges, since what
# really care about is what it's facing
# would like to use half-edges here, but it's not entirely
# well-defined, so rather than introduce some future pitfalls,
# do things a bit more manually.
to_delete=[]
for nbr in self.grid.node_to_nodes(n):
if self.grid.nodes['oring'][nbr]-1!=n_ring:
continue
nbr_f=self.node_ring_f(nbr,n_ring)
if self.grid.node_degree(nbr)!=2:
continue
if delta_f>0:
# either the nbr is outside our slide area, or could
# be in the opposite direction along the ring
if not curve.is_forward(n_f,nbr_f,n_f+delta_f):
continue
to_delete.append(nbr)
he=self.grid.nodes_to_halfedge(n,nbr)
while 1:
he=he.fwd()
nbr=he.node_fwd()
nbr_f=self.node_ring_f(nbr,n_ring)
if curve.is_forward(n_f,n_f+delta_f,nbr_f):
break
to_delete.append(nbr)
break
else:
if not curve.is_reverse(n_f,nbr_f,n_f+delta_f):
continue
to_delete.append(nbr)
he=self.grid.nodes_to_halfedge(nbr,n)
while 1:
he=he.rev()
nbr=he.node_rev()
nbr_f=self.node_ring_f(nbr,n_ring)
if curve.is_reverse(n_f,n_f+delta_f,nbr_f):
break
to_delete.append(nbr)
break
# sanity checks:
for nbr in to_delete:
assert n_ring==self.grid.nodes['oring'][nbr]-1
# OLD COMMENT:
# For now, depart a bit from paver, and rather than
# having HINT nodes, HINT and SLIDE are both fixed=SLIDE,
# but differentiate based on node degree.
# NEW COMMENT:
# actually, that was a bad idea. better to stick with
# how it was in paver
assert self.grid.nodes['fixed'][nbr]==self.HINT # SLIDE
assert self.grid.node_degree(nbr)==2
return to_delete
def slide_node(self,n,delta_f):
conflicts=self.find_slide_conflicts(n,delta_f)
for nbr in conflicts:
self.grid.merge_edges(node=nbr)
n_ring=self.grid.nodes['oring'][n]-1
n_f=self.grid.nodes['ring_f'][n]
new_f=n_f + delta_f
curve=self.curves[n_ring]
self.grid.modify_node(n,x=curve(new_f),ring_f=new_f)
loop_count=0
def loop(self,count=0):
while 1:
site=self.choose_site()
if site is None:
break
if not self.advance_at_site(site):
self.log.error("Failed to advance. Exiting loop early")
return False
count-=1
self.loop_count+=1
if count==0:
break
return True
def advance_at_site(self,site):
# This can modify site! May also fail.
resampled_success = self.resample_neighbors(site)
actions=site.actions()
metrics=[a.metric(site) for a in actions]
bests=np.argsort(metrics)
for best in bests:
try:
cp=self.grid.checkpoint()
self.log.debug("Chose strategy %s"%( actions[best] ) )
edits=actions[best].execute(site)
opt_edits=self.optimize_edits(edits)
failures=self.check_edits(opt_edits)
if len(failures['cells'])>0:
self.log.info("Some cells failed")
raise StrategyFailed("Cell geometry violation")
# could commit?
except self.cdt.IntersectingConstraints as exc:
# arguably, this should be caught lower down, and rethrown
# as a StrategyFailed.
self.log.error("Intersecting constraints - rolling back")
self.grid.revert(cp)
continue
except StrategyFailed as exc:
self.log.error("Strategy failed - rolling back")
self.grid.revert(cp)
continue
break
else:
self.log.error("Exhausted the actions!")
return False
return True
def check_edits(self,edits):
return defaultdict(list)
zoom=None
def plot_summary(self,ax=None,
label_nodes=True,
clip=None):
ax=ax or plt.gca()
ax.cla()
for curve in self.curves:
curve.plot(ax=ax,color='0.5',lw=0.4,zorder=-5)
self.grid.plot_edges(ax=ax,clip=clip,lw=1)
if label_nodes:
labeler=lambda ni,nr: str(ni)
else:
labeler=None
self.grid.plot_nodes(ax=ax,labeler=labeler,clip=clip,sizes=10)
ax.axis('equal')
if self.zoom:
ax.axis(self.zoom)
class AdvancingTriangles(AdvancingFront):
"""
Specialization which roughly mimics tom, creating only triangles
"""
scale=None
def __init__(self,grid=None,scale=None,**kw):
super(AdvancingTriangles,self).__init__(grid=grid,**kw)
if scale is not None:
self.set_edge_scale(scale)
def set_edge_scale(self,scale):
self.scale=scale
def enumerate_sites(self):
sites=[]
# FIX: This doesn't scale!
valid=(self.grid.edges['cells'][:,:]==self.grid.UNMESHED)
J,Orient = np.nonzero(valid)
for j,orient in zip(J,Orient):
if self.grid.edges['deleted'][j]:
continue
he=self.grid.halfedge(j,orient)
he_nxt=he.fwd()
a=he.node_rev()
b=he.node_fwd()
bb=he_nxt.node_rev()
c=he_nxt.node_fwd()
assert b==bb
sites.append( TriangleSite(self,nodes=[a,b,c]) )
return sites
# reject edit that puts a cell circumcenter outside the cell
reject_cc_outside_cell=True
# If a numeric value, check distance between adjacent circumcenters
# reject if signed distance below this value, normalize by sqrt(cell area)
reject_cc_distance_factor=None
def check_edits(self,edits):
"""
edits: {'nodes':[n1,n2,...],
'cells': ...,
'edges': ... }
Checks for any elements which fail geometric checks, such
as orthogonality.
"""
failures=defaultdict(list)
cells=set( edits.get('cells',[]) )
for n in edits.get('nodes',[]):
cells.update( self.grid.node_to_cells(n) )
for c in list(cells):
pnts=self.grid.nodes['x'][self.grid.cell_to_nodes(c)]
cc=circumcenter_py(pnts[0],pnts[1],pnts[2])
if self.reject_cc_outside_cell:
if not self.grid.cell_polygon(c).contains(geometry.Point(cc)):
failures['cells'].append(c)
if self.reject_cc_distance_factor is not None:
# More expensive but closer to what really matters
for j in self.grid.cell_to_edges(c):
ec=self.grid.edges['cells'][j,:]
n=self.grid.edges_normals(j)
if ec[0]==c:
nbr=ec[1]
elif ec[1]==c:
nbr=ec[0]
n=-n
else: assert False
if nbr<0: continue
pnts=self.grid.nodes['x'][self.grid.cell_to_nodes(nbr)]
nbr_cc=circumcenter_py(pnts[0],pnts[1],pnts[2])
l_perp=(np.array(nbr_cc)-np.array(cc)).dot(n)
L=np.sqrt( self.grid.cells_area(sel=[c,nbr]).sum() )
if l_perp < self.reject_cc_distance_factor*L:
failures['cells'].append(c)
break
return failures
# cc_py is more elegant and crappier
cost_method='base'
cost_thresh_default=0.22
def cost_function(self,n):
"""
Return a function which takes an x,y pair, and evaluates
a geometric cost function for node n based on the shape and
scale of triangle cells containing n
"""
local_length = self.scale( self.grid.nodes['x'][n] )
my_cells = self.grid.node_to_cells(n)
if len(my_cells) == 0:
return None
cell_nodes = [self.grid.cell_to_nodes(c)
for c in my_cells ]
# for the moment, can only deal with triangles
cell_nodes=np.array(cell_nodes)
# pack our neighbors from the cell list into an edge
# list that respects the CCW condition that pnt must be on the
# left of each segment
for j in range(len(cell_nodes)):
if cell_nodes[j,0] == n:
cell_nodes[j,:2] = cell_nodes[j,1:]
elif cell_nodes[j,1] == n:
cell_nodes[j,1] = cell_nodes[j,0]
cell_nodes[j,0] = cell_nodes[j,2] # otherwise, already set
edges = cell_nodes[:,:2]
edge_points = self.grid.nodes['x'][edges]
def cost(x,edge_points=edge_points,local_length=local_length):
return one_point_cost(x,edge_points,target_length=local_length)
Alist=[ [ e[0],e[1] ]
for e in edge_points[:,0,:] ]
Blist=[ [ e[0],e[1] ]
for e in edge_points[:,1,:] ]
EPS=1e-5*local_length
def cost_cc_and_scale_py(x0):
C=list(x0)
cc_cost=0
scale_cost=0
for A,B in zip(Alist,Blist):
tri_cc=circumcenter_py(A,B,C)
deltaAB=[ tri_cc[0] - A[0],
tri_cc[1] - A[1]]
ABs=[B[0]-A[0],B[1]-A[1]]
magABs=math.sqrt( ABs[0]*ABs[0] + ABs[1]*ABs[1])
vecAB=[ABs[0]/magABs, ABs[1]/magABs]
leftAB=vecAB[0]*deltaAB[1] - vecAB[1]*deltaAB[0]
deltaBC=[tri_cc[0] - B[0],
tri_cc[1] - B[1]]
BCs=[C[0]-B[0], C[1]-B[1]]
magBCs=math.sqrt( BCs[0]*BCs[0] + BCs[1]*BCs[1] )
vecBC=[BCs[0]/magBCs, BCs[1]/magBCs]
leftBC=vecBC[0]*deltaBC[1] - vecBC[1]*deltaBC[0]
deltaCA=[tri_cc[0] - C[0],
tri_cc[1] - C[1]]
CAs=[A[0]-C[0],A[1]-C[1]]
magCAs=math.sqrt(CAs[0]*CAs[0] + CAs[1]*CAs[1])
vecCA=[CAs[0]/magCAs, CAs[1]/magCAs]
leftCA=vecCA[0]*deltaCA[1] - vecCA[1]*deltaCA[0]
cc_fac=-4. # not bad
# cc_fac=-2. # a little nicer shape
# clip to 100, to avoid overflow in math.exp
if 0:
# this can favor isosceles too much
this_cc_cost = ( math.exp(min(100,cc_fac*leftAB/local_length)) +
math.exp(min(100,cc_fac*leftBC/local_length)) +
math.exp(min(100,cc_fac*leftCA/local_length)) )
else:
# maybe?
this_cc_cost = ( math.exp(min(100,cc_fac*leftAB/magABs)) +
math.exp(min(100,cc_fac*leftBC/magBCs)) +
math.exp(min(100,cc_fac*leftCA/magCAs)) )
# mixture
# 0.3: let's the scale vary too much between the cells
# adjacent to n
alpha=1.0
avg_length=alpha*local_length + (1-alpha)*(magABs+magBCs+magCAs)/3
this_scale_cost=( (magABs-avg_length)**2
+ (magBCs-avg_length)**2
+ (magCAs-avg_length)**2 )
this_scale_cost/=avg_length*avg_length
cc_cost+=this_cc_cost
scale_cost+=this_scale_cost
# With even weighting between these, some edges are pushed long rather than
# having nice angles.
# 3 is a shot in the dark.
# 50 is more effective at avoiding a non-orthogonal cell
return 50*cc_cost+scale_cost
if self.cost_method=='base':
return cost
elif self.cost_method=='cc_py':
return cost_cc_and_scale_py
else:
assert False
##
####
def one_point_quad_cost(x,edge_scales,quads,para_scale,perp_scale):
# orthogonality cost:
ortho_cost=0.0
base_scale=np.sqrt( para_scale**2 + perp_scale**2 )
quads[:,0,:] = x # update the first point of each quad
for quad in quads:
cc=utils.poly_circumcenter(quad)
dists=utils.mag(quad-cc)
err=np.std(dists) / base_scale
ortho_cost += 10*err # ad hoc hoc hoc
# length cost:
scale_cost=0.0
dists=utils.mag(x - edge_scales[:,:2])
errs=(dists - edge_scales[:,2]) / edge_scales[:,2]
scale_cost = (2*errs**2).sum()
return ortho_cost+scale_cost
class AdvancingQuads(AdvancingFront):
PARA=1
PERP=2
para_scale=None
perp_scale=None
def __init__(self,grid=None,scale=None,perp_scale=None):
super(AdvancingQuads,self).__init__(grid=grid)
if scale is not None:
if perp_scale is None:
self.set_edge_scales(scale,scale)
else:
self.set_edge_scales(scale,perp_scale)
def instrument_grid(self,g):
super(AdvancingQuads,self).instrument_grid(g)
# 0 for unknown, 1 for parallel, 2 for perpendicular
g.add_edge_field('para',np.zeros(g.Nedges(),'i4'),on_exists='pass')
return g
def set_edge_scales(self,para_scale,perp_scale):
self.para_scale=para_scale
self.perp_scale=perp_scale
def add_existing_curve_surrounding(self,x):
# Get the nodes:
pc=self.grid.enclosing_nodestring(x,self.grid.Nnodes())
if pc is None:
raise Exception("No ring around this rosey")
curve_idx=self.add_curve( Curve(self.grid.nodes['x'][pc],closed=True) )
curve=self.curves[curve_idx]
# update those nodes to reflect their relationship to this curve.
# don't forget it's 1-based!
self.grid.nodes['oring'][pc]=1+curve_idx
self.grid.nodes['ring_f'][pc]=curve.distances[:-1]
for n in pc:
degree=self.grid.node_degree(n)
assert degree >= 2
if degree==2:
self.grid.nodes['fixed'][n]=self.HINT # self.SLIDE
else:
self.grid.nodes['fixed'][n]=self.RIGID
# and mark the internal edges as unmeshed:
for na,nb in utils.circular_pairs(pc):
j=self.grid.nodes_to_edge([na,nb])
if self.grid.edges['nodes'][j,0]==na:
side=0
else:
side=1
self.grid.edges['cells'][j,side]=self.grid.UNMESHED
# and for later sanity checks, mark the other side as outside (-1)
# if it's -99.
if self.grid.edges['cells'][j,1-side]==self.grid.UNKNOWN:
self.grid.edges['cells'][j,1-side]=self.grid.UNDEFINED
# infer the fixed nature of the edge
if self.grid.edges['cells'][j,1-side]>=0:
self.grid.edges['fixed'][j]=self.RIGID
# Add in the edge data to link it to this curve
if self.grid.edges['oring'][j]==0:
# only give it a ring if it is not already on a ring.
# There may be reason to override this in the future, since the ring
# information may be stale from an existing grid, and now we want
# to regenerate it.
self.grid.edges['oring'][j]=1+curve_idx
# side=0 when the edge is going the same direction as the
# ring, which in turn should be ring_sign=1.
self.grid.edges['ring_sign'][j]=1-2*side
def orient_quad_edge(self,j,orient):
self.grid.edges['para'][j]=orient
def enumerate_sites(self):
sites=[]
# FIX: This doesn't scale!
valid=(self.grid.edges['cells'][:,:]==self.grid.UNMESHED)& (self.grid.edges['para']!=0)[:,None]
J,Orient = np.nonzero(valid)
for j,orient in zip(J,Orient):
if self.grid.edges['deleted'][j]:
continue
he=self.grid.halfedge(j,orient)
he_nxt=he.fwd()
a=he.rev().node_rev()
b=he.node_rev()
c=he.node_fwd()
d=he.fwd().node_fwd()
sites.append( QuadSite(self,nodes=[a,b,c,d]) )
return sites
def cost_function(self,n):
local_para = self.para_scale
local_perp = self.perp_scale
my_cells = self.grid.node_to_cells(n)
if len(my_cells) == 0:
return None
if 0:
# HERE: needs to handle mix of n-gons
cell_nodes = [self.grid.cell_to_nodes(c)
for c in my_cells ]
cell_nodes=np.array(cell_nodes) # may contain undef nodes
# make sure all quads:
assert np.all( cell_nodes[:,:4]>=0 )
assert np.all( cell_nodes[:,4:]<0 ) # does that work?
else:
# more general -
cell_nodes=self.grid.cells['nodes'][my_cells]
# except that for the moment I'm only going to worry about the
# quads:
sel_quad=(cell_nodes[:,3]>=0)
if self.grid.max_sides>4:
sel_quad &=(cell_nodes[:,4]<0)
cell_nodes=cell_nodes[sel_quad]
# For each quad, rotate our node to be at the front of the list:
quad_nodes=[np.roll(quad,-list(quad).index(n))
for quad in cell_nodes[:,:4]]
quad_nodes=np.array(quad_nodes)
quads=self.grid.nodes['x'][quad_nodes]
# for the moment, don't worry about reestablishing scale, just
# focus on orthogonality
edge_scales=np.zeros( [0,3], 'f8') # np.array( [ [x,y,target_distance], ... ] )
def cost(x,edge_scales=edge_scales,quads=quads,
local_para=local_para,local_perp=local_perp):
return one_point_quad_cost(x,edge_scales,quads,local_para,local_perp)
return cost
def scale(self, x0):
# temporary hack - needed for relax_slide_node
return 0.5*(self.para_scale+self.perp_scale)
# Classes related to the decision tree
class DTNode(object):
parent=None
af=None # AdvancingTriangles object
cp=None # checkpoint
ops_parent=None # chunk of op_stack to get from parent to here.
options=None # node-specific list of data for child options
children=None # filled in by subclass [DTNode, ... ]
child_prior=None # est. cost for child
child_post =None # actual cost for child
def __init__(self,af,parent=None):
self.af=af
self.parent=parent
# in cases where init of the node makes some changes,
# this should be updated
self.cp=af.grid.checkpoint()
self.active_child=None # we don't manipulate this, but just signal that it's fresh
def set_options(self,options,priors):
self.options=options
self.child_prior=priors
N=len(options)
self.children=[None] * N
self.child_post =[None]*N
self.child_order=np.argsort(self.child_prior)
def revert_to_parent(self):
if self.parent is None:
return False
return self.parent.revert_to_here()
def revert_to_here(self):
"""
rewind to the state when we first encountered this node
"""
self.af.grid.revert(self.cp)
self.af.current=self
def try_child(self,i):
assert False # implemented by subclass
def best_child(self,count=0,cb=None):
"""
Try all, (or up to count) children,
use the best one based on post scores.
If no children succeeded, return False, otherwise True
"""
if count:
count=min(count,len(self.options))
else:
count=len(self.options)
best=None
for i in range(count):
print("best_child: trying %d / %d"%(i,count))
if self.try_child(i):
if cb: cb()
if best is None:
best=i
elif self.child_post[i] < self.child_post[best]:
best=i
if i<count-1:
self.revert_to_here()
else:
print("best_child: option %d did not succeed"%i)
if best is None:
# no children worked out -
print("best_child: no children worked")
return False
# wait to see if the best was the last, in which case
# can save an undo/redo
if best!=count-1:
self.revert_to_here()
self.try_child(best)
return True
class DTChooseSite(DTNode):
def __init__(self,af,parent=None):
super(DTChooseSite,self).__init__(af=af,parent=parent)
sites=af.enumerate_sites()
priors=[ site.metric()
for site in sites ]
self.set_options(sites,priors)
def try_child(self,i):
"""
Assumes that af state is currently at this node,
try the decision of the ith child, create the new DTNode
for that, and shift af state to be that child.
Returns true if successful. On failure (topological violation?)
return false, and state should be unchanged.
"""
assert self.af.current==self
site=self.options[self.child_order[i]]
self.children[i] = DTChooseStrategy(af=self.af,parent=self,site=site)
# nothing to update for posterior
self.child_post[i] = self.child_prior[i]
self.af.current=self.children[i]
return True
def best_child(self,count=0,cb=None):
"""
For choosing a site, prior is same as posterior
"""
if count:
count=min(count,len(self.options))
else:
count=len(self.options)
best=None
for i in range(count):
print("best_child: trying %d / %d"%(i,count))
if self.try_child(i):
if cb: cb()
# no need to go further
return True
return False
class DTChooseStrategy(DTNode):
def __init__(self,af,parent,site):
super(DTChooseStrategy,self).__init__(af=af,parent=parent)
self.site=site
self.af.resample_neighbors(site)
self.cp=af.grid.checkpoint()
actions=site.actions()
priors=[a.metric(site)
for a in actions]
self.set_options(actions,priors)
def try_child(self,i):
try:
edits=self.options[self.child_order[i]].execute(self.site)
self.af.optimize_edits(edits)
# could commit?
except self.af.cdt.IntersectingConstraints as exc:
self.af.log.error("Intersecting constraints - rolling back")
self.af.grid.revert(self.cp)
return False
except self.af.StrategyFailed as exc:
self.af.log.error("Strategy failed - rolling back")
self.af.grid.revert(self.cp)
return False
self.children[i] = DTChooseSite(af=self.af,parent=self)
self.active_edits=edits # not sure this is the right place to store this
self.af.current=self.children[i]
nodes=[]
for c in edits.get('cells',[]):
nodes += list(self.af.grid.cell_to_nodes(c))
for n in edits.get('nodes',[]):
nodes.append(n)
for j in edits.get('edges',[]):
# needed in particular for nonlocal, where nothing
# changes except the creation of an edge
nodes += list(self.af.grid.edges['nodes'][j])
nodes=list(set(nodes))
assert len(nodes) # something had to change, right?
cost = np.max( [ (self.af.eval_cost(n) or 0.0)
for n in nodes] )
self.child_post[i]=cost
return True
| mit | 6,816,814,694,789,307,000 | 36.425743 | 118 | 0.534702 | false |
mysociety/pombola | pombola/settings/tests_base.py | 1 | 1485 | import os
import shutil
from pombola.config import config
IN_TEST_MODE = True
base_dir = os.path.abspath( os.path.join( os.path.split(__file__)[0], '..', '..' ) )
# Change the data dir in testing, and delete it to ensure that we have a clean
# slate. Also print out a little warning - adds clutter to the test output but
# better than letting a site go live and not notice that the test mode has been
# detected by mistake
conf_data_dir = config.get( 'DATA_DIR', 'data' )
if os.path.isabs(conf_data_dir):
data_dir = os.path.join( conf_data_dir, 'testing' )
else:
data_dir = os.path.abspath( os.path.join( base_dir, conf_data_dir, 'testing' ) )
if os.path.exists( data_dir ):
shutil.rmtree( data_dir )
print("Running in test mode! (testing data_dir is '%s')" % data_dir)
# For tests we've change the value of data_dir, so have to reset
# these settings variables:
MEDIA_ROOT = os.path.normpath( os.path.join( data_dir, "media_root/") )
STATIC_ROOT = os.path.normpath( os.path.join( data_dir, "collected_static/") )
HTTPLIB2_CACHE_DIR = os.path.join( data_dir, 'httplib2_cache' )
HANSARD_CACHE = os.path.join( data_dir, 'hansard_cache' )
MAP_BOUNDING_BOX_NORTH = None
MAP_BOUNDING_BOX_SOUTH = None
MAP_BOUNDING_BOX_EAST = None
MAP_BOUNDING_BOX_WEST = None
# A workaround so that functional tests don't fail with missing
# assets, as suggested here:
# https://github.com/cyberdelia/django-pipeline/issues/277
STATICFILES_STORAGE = 'pipeline.storage.PipelineStorage'
| agpl-3.0 | -4,685,007,054,679,419,000 | 37.076923 | 84 | 0.715152 | false |
icereval/osf-sync | tests/sync/test_consolidated_event_handler.py | 1 | 8572 | import os
from time import sleep
import shutil
import pytest
from watchdog.events import ( # noqa
FileDeletedEvent,
FileModifiedEvent,
FileCreatedEvent,
FileMovedEvent,
DirDeletedEvent,
DirModifiedEvent,
DirCreatedEvent,
DirMovedEvent
)
from osfoffline import settings
from tests.base import OSFOTestBase
from tests.utils import unique_file_name, unique_folder_name
from tests.sync.utils import TestSyncWorker
class TestConsolidatedEventHandler(OSFOTestBase):
# 'z_' because pytest fixtures are run in alphabetical order
# h/t: http://stackoverflow.com/questions/25660064/in-which-order-are-pytest-fixtures-executed
@pytest.fixture(scope="function", autouse=True)
def z_attach_event_handler(self, request):
self.sync_worker = TestSyncWorker(
folder=str(self.root_dir)
)
self.sync_worker.start()
def stop():
self.sync_worker.stop()
self.sync_worker.flushed.clear()
self.sync_worker.done.set()
self.sync_worker.done.clear()
self.sync_worker.join()
request.addfinalizer(stop)
self.sync_worker.observer.ready.wait()
def test_create_file(self):
project = self.PROJECT_STRUCTURE[0]
osf_storage_path = self.root_dir.join(
project['rel_path'].lstrip(os.path.sep),
settings.OSF_STORAGE_FOLDER
)
file_name = unique_file_name()
file_path = osf_storage_path.join(
file_name
)
with open(str(file_path), 'w') as fp:
fp.write('The meaning of life is 42')
self.sync_worker.flushed.wait()
assert len(self.sync_worker._create_cache) == 1, \
"exactly one event captured"
assert isinstance(
self.sync_worker._create_cache[0],
FileCreatedEvent
) is True, \
"the one captured event is a FileCreatedEvent"
def test_update_file(self):
project = self.PROJECT_STRUCTURE[0]
file_path = self.root_dir.join(
project['files'][0]['children'][0]['rel_path'].lstrip(os.path.sep)
)
with open(str(file_path), 'w') as fp:
fp.write('Hello world')
self.sync_worker.flushed.wait()
assert len(self.sync_worker._event_cache.children()) == 1, \
"exactly one event captured"
assert isinstance(
self.sync_worker._event_cache.children()[0],
FileModifiedEvent
) is True, \
"the one captured event is a FileModifiedEvent"
def test_rename_file(self):
project = self.PROJECT_STRUCTURE[0]
file_path = self.root_dir.join(
project['files'][0]['children'][0]['rel_path'].lstrip(os.path.sep)
)
os.rename(str(file_path), 'foobar.baz')
self.sync_worker.flushed.wait()
assert len(self.sync_worker._event_cache.children()) == 1, \
"exactly one event captured"
assert isinstance(
self.sync_worker._event_cache.children()[0],
FileMovedEvent
) is True, \
"the one captured event is a FileMovedEvent"
def test_move_file(self):
project = self.PROJECT_STRUCTURE[0]
file_path = self.root_dir.join(
project['files'][0]['children'][0]['rel_path'].lstrip(os.path.sep)
)
new_path = str(file_path).replace(file_path.basename, 'foo.bar')
shutil.move(str(file_path), new_path)
self.sync_worker.flushed.wait()
assert len(self.sync_worker._event_cache.children()) == 1, \
"exactly one event captured"
assert isinstance(
self.sync_worker._event_cache.children()[0],
FileMovedEvent
) is True, \
"the one captured event is a FileMovedEvent"
def test_delete_file(self):
project = self.PROJECT_STRUCTURE[0]
file_path = self.root_dir.join(
project['files'][0]['children'][0]['rel_path'].lstrip(os.path.sep)
)
os.remove(str(file_path))
self.sync_worker.flushed.wait()
assert len(self.sync_worker._event_cache.children()) == 1, \
"exactly one event captured"
assert isinstance(
self.sync_worker._event_cache.children()[0],
FileDeletedEvent
) is True, \
"the one captured event is a FileDeletedEvent"
def test_create_folder(self):
project = self.PROJECT_STRUCTURE[0]
parent_dir_path = self.root_dir.join(
project['files'][0]['rel_path'].lstrip(os.path.sep)
)
dir_path = os.path.join(
str(parent_dir_path),
unique_folder_name()
)
os.mkdir(dir_path)
self.sync_worker.flushed.wait()
assert len(self.sync_worker._create_cache) == 1, \
"exactly one event captured"
assert isinstance(
self.sync_worker._create_cache[0],
DirCreatedEvent
) is True, \
"the one captured event is a DirCreatedEvent"
def test_create_folder_with_contents(self):
project = self.PROJECT_STRUCTURE[0]
parent_dir_path = self.root_dir.join(
project['files'][0]['rel_path'].lstrip(os.path.sep)
)
super_root_dir = self.root_dir.dirpath()
ext_folder = super_root_dir.mkdir('ext')
ext_child_path = ext_folder.join('ext_child')
with open(str(ext_child_path), 'w') as fp:
fp.write('Hello, world')
shutil.move(str(ext_folder), str(parent_dir_path) + os.path.sep)
self.sync_worker.flushed.wait()
assert len(self.sync_worker._create_cache) == 2, \
"exactly two events captured"
create_cache = self.sync_worker._sorted_create_cache()
assert isinstance(
create_cache[0],
DirCreatedEvent
) is True, \
"the first event is a DirCreatedEvent"
assert isinstance(
create_cache[1],
FileCreatedEvent
) is True, \
"the s event is a DirCreatedEvent"
def test_rename_folder(self):
project = self.PROJECT_STRUCTURE[0]
dir_path = self.root_dir.join(
project['files'][0]['rel_path'].lstrip(os.path.sep)
)
new_dir_path = str(dir_path).replace(dir_path.basename, 'newdir')
os.rename(str(dir_path), new_dir_path)
self.sync_worker.flushed.wait()
assert len(self.sync_worker._event_cache.children()) == 1, \
"exactly one event captured"
assert isinstance(
self.sync_worker._event_cache.children()[0],
DirMovedEvent
) is True, \
"the one captured event is a DirMovedEvent"
def test_move_folder(self):
project = self.PROJECT_STRUCTURE[0]
dir_path = self.root_dir.join(
project['files'][0]['rel_path'].lstrip(os.path.sep)
)
new_dir_path = str(dir_path).replace(dir_path.basename, 'newdir')
shutil.move(str(dir_path), new_dir_path)
self.sync_worker.flushed.wait()
assert len(self.sync_worker._event_cache.children()) == 1, \
"exactly one event captured"
assert isinstance(
self.sync_worker._event_cache.children()[0],
DirMovedEvent
) is True, \
"the one captured event is a DirMovedEvent"
def test_delete_folder(self):
project = self.PROJECT_STRUCTURE[0]
dir_path = self.root_dir.join(
project['files'][1]['rel_path'].lstrip(os.path.sep)
)
shutil.rmtree(str(dir_path))
self.sync_worker.flushed.wait()
assert len(self.sync_worker._event_cache.children()) == 1, \
"exactly one event captured"
assert isinstance(
self.sync_worker._event_cache.children()[0],
DirDeletedEvent
) is True, \
"the one captured event is a DirDeletedEvent"
def test_delete_folder_with_children(self):
project = self.PROJECT_STRUCTURE[0]
dir_path = self.root_dir.join(
project['files'][0]['rel_path'].lstrip(os.path.sep)
)
shutil.rmtree(str(dir_path))
self.sync_worker.flushed.wait()
assert len(self.sync_worker._event_cache.children()) == 1, \
"exactly one event captured"
assert isinstance(
self.sync_worker._event_cache.children()[0],
DirDeletedEvent
) is True, \
"the one captured event is a DirDeletedEvent"
| lgpl-3.0 | -1,746,631,683,988,542,200 | 35.476596 | 98 | 0.586911 | false |
jbdubois/obus | python/obus/looper.py | 1 | 3841 | #===============================================================================
# obus-python - obus client python module.
#
# @file protocol.py
#
# @brief obus python looper
#
# @author [email protected]
#
# Copyright (c) 2013 Parrot S.A.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Parrot Company nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL PARROT COMPANY BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#===============================================================================
import threading
import Queue as queue
#===============================================================================
#===============================================================================
_tls = threading.local()
#===============================================================================
#===============================================================================
class _Loop(object):
def __init__(self):
self._queue = queue.Queue()
self._running = False
def postMessage(self, handler, msg):
self._queue.put((handler, msg))
def run(self):
self._running = True
while self._running:
try:
# Use timeout so we can interrupt wait
(handler, msg) = self._queue.get(timeout=0.1)
handler.cb(msg)
except queue.Empty:
pass
def exit(self):
self._running = False
#===============================================================================
#===============================================================================
class Handler(object):
def __init__(self, cb):
self._loop = _tls.loop
self.cb = cb
def postMessage(self, msg):
self._loop.postMessage(self, msg)
#===============================================================================
#===============================================================================
def prepareLoop(loop=None):
# Make sure that current thread does not already have a loop object
if hasattr(_tls, "loop") and _tls.loop is not None:
raise Exception("Current thread already have a loop object")
# Create a new loop object
if loop is None:
_tls.loop = _Loop()
else:
_tls.loop = loop
#===============================================================================
#===============================================================================
def runLoop():
_tls.loop.run()
_tls.loop = None
#===============================================================================
#===============================================================================
def exitLoop():
_tls.loop.exit()
| lgpl-2.1 | 6,982,514,743,527,212,000 | 37.79798 | 80 | 0.51523 | false |
jfkirk/tensorrec | tensorrec/prediction_graphs.py | 1 | 5042 | import abc
import tensorflow as tf
from .recommendation_graphs import relative_cosine
class AbstractPredictionGraph(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def connect_dense_prediction_graph(self, tf_user_representation, tf_item_representation):
"""
This method is responsible for consuming user and item representations and calculating prediction scores for all
possible user-item pairs based on these representations.
:param tf_user_representation: tf.Tensor
The user representations as a Tensor of shape [n_users, n_components]
:param tf_item_representation: tf.Tensor
The item representations as a Tensor of shape [n_items, n_components]
:return: tf.Tensor
The predictions as a Tensor of shape [n_users, n_items]
"""
pass
@abc.abstractmethod
def connect_serial_prediction_graph(self, tf_user_representation, tf_item_representation, tf_x_user, tf_x_item):
"""
This method is responsible for consuming user and item representations and indices and calculating prediction
scores for particular user-item pairs.
:param tf_user_representation: tf.Tensor
The user representations as a Tensor of shape [n_users, n_components]
:param tf_item_representation: tf.Tensor
The item representations as a Tensor of shape [n_items, n_components]
:param tf_x_user: tf.Tensor
The users for whom to predict as a Tensor of shape [n_interactions]
:param tf_x_item: tf.Tensor
The items for which to predict as a Tensor of shape [n_interactions]
:return: tf.Tensor
The predictions as a Tensor of shape [n_interactions]
"""
pass
class DotProductPredictionGraph(AbstractPredictionGraph):
"""
This prediction function calculates the prediction as the dot product between the user and item representations.
Prediction = user_repr * item_repr
"""
def connect_dense_prediction_graph(self, tf_user_representation, tf_item_representation):
return tf.matmul(tf_user_representation, tf_item_representation, transpose_b=True)
def connect_serial_prediction_graph(self, tf_user_representation, tf_item_representation, tf_x_user, tf_x_item):
gathered_user_reprs = tf.gather(tf_user_representation, tf_x_user)
gathered_item_reprs = tf.gather(tf_item_representation, tf_x_item)
return tf.reduce_sum(tf.multiply(gathered_user_reprs, gathered_item_reprs), axis=1)
class CosineSimilarityPredictionGraph(AbstractPredictionGraph):
"""
This prediction function calculates the prediction as the cosine between the user and item representations.
Prediction = cos(user_repr, item_repr)
"""
def connect_dense_prediction_graph(self, tf_user_representation, tf_item_representation):
return relative_cosine(tf_tensor_1=tf_user_representation, tf_tensor_2=tf_item_representation)
def connect_serial_prediction_graph(self, tf_user_representation, tf_item_representation, tf_x_user, tf_x_item):
normalized_users = tf.nn.l2_normalize(tf_user_representation, 1)
normalized_items = tf.nn.l2_normalize(tf_item_representation, 1)
gathered_user_reprs = tf.gather(normalized_users, tf_x_user)
gathered_item_reprs = tf.gather(normalized_items, tf_x_item)
return tf.reduce_sum(tf.multiply(gathered_user_reprs, gathered_item_reprs), axis=1)
class EuclideanSimilarityPredictionGraph(AbstractPredictionGraph):
"""
This prediction function calculates the prediction as the negative euclidean distance between the user and
item representations.
Prediction = -1 * sqrt(sum((user_repr - item_repr)^2))
"""
epsilon = 1e-16
def connect_dense_prediction_graph(self, tf_user_representation, tf_item_representation):
# [ n_users, 1 ]
r_user = tf.reduce_sum(tf_user_representation ** 2, 1, keep_dims=True)
# [ n_items, 1 ]
r_item = tf.reduce_sum(tf_item_representation ** 2, 1, keep_dims=True)
# [ n_users, n_items ]
distance = (r_user
- 2.0 * tf.matmul(tf_user_representation, tf_item_representation, transpose_b=True)
+ tf.transpose(r_item))
# For numeric stability
distance = tf.maximum(distance, self.epsilon)
return -1.0 * tf.sqrt(distance)
def connect_serial_prediction_graph(self, tf_user_representation, tf_item_representation, tf_x_user, tf_x_item):
# [ n_interactions, n_components ]
gathered_user_reprs = tf.gather(tf_user_representation, tf_x_user)
gathered_item_reprs = tf.gather(tf_item_representation, tf_x_item)
# [ n_interactions, n_components ]
delta = tf.pow(gathered_user_reprs - gathered_item_reprs, 2)
# [ n_interactions, 1 ]
distance = tf.reduce_sum(delta, axis=1)
# For numeric stability
distance = tf.maximum(distance, self.epsilon)
return -1.0 * tf.sqrt(distance)
| apache-2.0 | -5,519,003,016,839,481,000 | 42.094017 | 120 | 0.688021 | false |
michaelneuder/image_quality_analysis | bin/nets/wip/ms_ssim_nets/structure_net.py | 1 | 9207 | #!/usr/bin/env python3
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import tensorflow as tf
import matplotlib as mpl
import pandas as pd
import numpy as np
mpl.use('Agg')
import time
import matplotlib.pyplot as plt
def convolve_inner_layers(x, W, b):
'''
inner layers of network --- tanh activation
'''
y = tf.nn.conv2d(x, W, strides = [1,1,1,1], padding='VALID')
y = tf.nn.bias_add(y, b)
return tf.nn.relu(y)
def convolve_ouput_layer(x, W, b):
'''
output layer of network --- linear activation
'''
y = tf.nn.conv2d(x, W, strides = [1,1,1,1], padding='VALID')
y = tf.nn.bias_add(y, b)
return y
def conv_net(x, W, b):
'''
entire conv net. each layer feed to following layer as well as output layer
'''
conv1 = convolve_inner_layers(x, W['weights1'], b['bias1'])
conv2 = convolve_inner_layers(conv1, W['weights2'], b['bias2'])
conv3 = convolve_inner_layers(conv2, W['weights3'], b['bias3'])
conv4 = convolve_inner_layers(conv3, W['weights4'], b['bias4'])
output_feed = tf.concat([conv1, conv2, conv3, conv4],3)
output = convolve_ouput_layer(output_feed, W['weights_out'], b['bias_out'])
return output
def get_variance(training_target):
'''
returns variance of the target data. used in normalizing the error.
'''
all_pixels = training_target.flatten()
return all_pixels.var()
def normalize_input(train_data, test_data):
'''
normailizing input across each pixel an each channel (i.e. normalize for each input to network).
'''
mean, std_dev = np.mean(train_data, axis=0), np.std(train_data, axis=0)
return (train_data - mean) / std_dev, (test_data - mean) / std_dev
def get_epoch(x, y, n):
'''
splits entire data set into an epoch with minibatch of size n. returns a dict with key being the
minibatch number and the value being a length 2 list with the features in first index and
targets in the second.
'''
input_size = x.shape[0]
number_batches = input_size // n
extra_examples = input_size % n
batches = {}
batch_indices = np.arange(input_size)
np.random.shuffle(batch_indices)
for i in range(number_batches):
temp_indices = batch_indices[n*i:n*(i+1)]
temp_x = []
temp_y = []
for j in temp_indices:
temp_x.append(x[j])
temp_y.append(y[j])
batches[i] = [np.asarray(temp_x), np.asarray(temp_y)]
if extra_examples != 0:
extra_indices = batch_indices[input_size-extra_examples:input_size]
temp_x = []
temp_y = []
for k in extra_indices:
temp_x.append(x[k])
temp_y.append(y[k])
batches[i+1] = [np.asarray(temp_x), np.asarray(temp_y)]
return batches
def main():
print('welcome to structure net.')
# parameters
filter_dim, filter_dim2 = 11, 1
batch_size = 4
image_dim, result_dim = 96, 86
input_layer, first_layer, second_layer, third_layer, fourth_layer, output_layer = 4, 100, 50, 25, 10, 1
learning_rate = .001
epochs = 5000
# data input
data_path = 'https://raw.githubusercontent.com/michaelneuder/image_quality_analysis/master/data/sample_data/'
# train data --- 500 images, 96x96 pixels
orig_500 = pd.read_csv('{}orig_500.txt'.format(data_path), header=None, delim_whitespace = True)
recon_500 = pd.read_csv('{}recon_500.txt'.format(data_path), header=None, delim_whitespace = True)
# test data --- 140 images, 96x96 pixels
orig_140 = pd.read_csv('{}orig_140.txt'.format(data_path), header=None, delim_whitespace = True)
recon_140 = pd.read_csv('{}recon_140.txt'.format(data_path), header=None, delim_whitespace = True)
# train target --- 500 images, 86x86 pixels (dimension reduction due no zero padding being used)
structure_500= pd.read_csv('{}structure_500.csv'.format(data_path), header=None)
structure_140 = pd.read_csv('{}structure_140.csv'.format(data_path), header=None)
print('images loaded...')
# getting 4 input channels for train and test --- (orig, recon, orig squared, recon squared)
original_images_train = orig_500.values
original_images_train_sq = orig_500.values**2
reconstructed_images_train = recon_500.values
reconstructed_images_train_sq = recon_500.values**2
original_images_test = orig_140.values
original_images_test_sq = orig_140.values**2
reconstructed_images_test = recon_140.values
reconstructed_images_test_sq = recon_140.values**2
# stack inputs
training_input = np.dstack((original_images_train, reconstructed_images_train, original_images_train_sq, reconstructed_images_train_sq))
testing_input = np.dstack((original_images_test, reconstructed_images_test, original_images_test_sq, reconstructed_images_test_sq))
# normalize inputs
training_input_normalized, testing_input_normalized = normalize_input(training_input, testing_input)
# target values
training_target = structure_500.values
testing_target = structure_140.values
# get size of training and testing set
train_size = original_images_train.shape[0]
test_size = original_images_test.shape[0]
# reshaping features to (num images, 96x96, 4 channels)
train_features = np.reshape(training_input_normalized, [train_size,image_dim,image_dim,input_layer])
test_features = np.reshape(testing_input_normalized, [test_size,image_dim,image_dim,input_layer])
# reshaping target to --- (num images, 86x86, 1)
train_target = np.reshape(training_target, [train_size, result_dim, result_dim, output_layer])
test_target = np.reshape(testing_target, [test_size, result_dim, result_dim, output_layer])
# initializing filters, this is what we are trying to learn --- fan in
scaling_factor = 0.1
initializer = tf.contrib.layers.xavier_initializer()
weights = {
'weights1': tf.get_variable('weights1', [filter_dim,filter_dim,input_layer,first_layer], initializer=initializer),
'weights2': tf.get_variable('weights2', [filter_dim2,filter_dim2,first_layer,second_layer], initializer=initializer),
'weights3': tf.get_variable('weights3', [filter_dim2,filter_dim2,second_layer,third_layer], initializer=initializer),
'weights4': tf.get_variable('weights4', [filter_dim2,filter_dim2,third_layer,fourth_layer], initializer=initializer),
'weights_out': tf.get_variable('weights_out', [filter_dim2,filter_dim2,fourth_layer+third_layer+second_layer+first_layer,output_layer], initializer=initializer)
}
biases = {
'bias1': tf.get_variable('bias1', [first_layer], initializer=initializer),
'bias2': tf.get_variable('bias2', [second_layer], initializer=initializer),
'bias3': tf.get_variable('bias3', [third_layer], initializer=initializer),
'bias4': tf.get_variable('bias4', [fourth_layer], initializer=initializer),
'bias_out': tf.get_variable('bias_out', [output_layer], initializer=initializer)
}
# tensorflow setup
x = tf.placeholder(tf.float32, [None, image_dim, image_dim, input_layer])
y = tf.placeholder(tf.float32, [None, result_dim, result_dim, output_layer])
# model
prediction = conv_net(x, weights, biases)
# get variance to normalize error terms during training
variance = get_variance(train_target)
# loss and optimization
cost = tf.reduce_mean(tf.square(tf.subtract(prediction, y)))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
init = tf.global_variables_initializer()
# error arrays
training_error, testing_error = [], []
epoch_time = np.asarray([])
# tensorflow session & training
with tf.Session() as sess:
sess.run(init)
global_start_time = time.time()
print('starting training...')
for epoch_count in range(epochs):
start_time = time.time()
epoch = get_epoch(train_features, train_target, batch_size)
for i in epoch:
x_data_train, y_data_train = np.asarray(epoch[i][0]), np.asarray(epoch[i][1])
sess.run(optimizer, feed_dict={x : x_data_train, y : y_data_train})
train_loss = sess.run(cost, feed_dict={x : x_data_train, y : y_data_train})
training_error.append(100*train_loss/variance)
test_loss = sess.run(cost, feed_dict={x : test_features, y : test_target})
testing_error.append(100*test_loss/variance)
end_time = time.time()
epoch_time = np.append(epoch_time, end_time-start_time)
print('current epoch: {} -- '.format(epoch_count)
+'current train error: {:.4f} -- '.format(100*train_loss/variance)
+'average epoch time: {:.4}s '.format(epoch_time.mean()))
f, axarr = plt.subplots(nrows=1, ncols=1, figsize=(9,6))
axarr.plot(np.arange(epoch_count+1), training_error, label='train')
axarr.plot(np.arange(epoch_count+1), testing_error, label='test')
axarr.legend()
axarr.set_ylim(0,100)
plt.savefig('relu_1521_struct.png')
print('training finished.')
if __name__ == '__main__':
main()
| mit | -9,039,136,241,680,621,000 | 42.023364 | 168 | 0.652221 | false |
nakulm95/team_swag | test2.py | 1 | 2257 | import RPi.GPIO as GPIO
import time
import os
import RPIO
SPICLK = 15
SPIMISO = 13
SPIMOSI = 11
SPICS = 7
IRRX = 15
IRTX = 11
def main():
init()
while True:
print(GPIO.input(IRRX))
## GPIO.output(IRTX, True)
## time.sleep(.000013)
## GPIO.output(IRTX, False)
## time.sleep(.000013)
#v = readadc(0,SPICLK,SPIMOSI,SPIMISO,SPICS)
#if v == 1023:
# print "shitz"
# break
#else:
# print v
#while(True):
#if not GPIO.input(11):
#print("shit detected")
#time.sleep(2)
def init():
GPIO.setmode(GPIO.BOARD)
# set up the SPI interface pins
# GPIO.setup(SPIMOSI, GPIO.OUT)
# GPIO.setup(SPIMISO, GPIO.IN)
# GPIO.setup(SPICLK, GPIO.OUT)
# GPIO.setup(SPICS, GPIO.OUT)
GPIO.setup(IRRX, GPIO.IN)
GPIO.setup(IRTX, GPIO.OUT)
#GPIO.setup(11, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# read SPI data from MCP3008 chip, 8 possible adc's (0 thru 7)
# vibration sensor
def readadc(adcnum, clockpin, mosipin, misopin, cspin):
if ((adcnum > 7) or (adcnum < 0)):
return -1
GPIO.output(cspin, True)
GPIO.output(clockpin, False) # start clock low
GPIO.output(cspin, False) # bring CS low
commandout = adcnum
commandout |= 0x18 # start bit + single-ended bit
commandout <<= 3 # we only need to send 5 bits here
for i in range(5):
if (commandout & 0x80):
GPIO.output(mosipin, True)
else:
GPIO.output(mosipin, False)
commandout <<= 1
GPIO.output(clockpin, True)
GPIO.output(clockpin, False)
adcout = 0
# read in one empty bit, one null bit and 10 ADC bits
for i in range(12):
GPIO.output(clockpin, True)
GPIO.output(clockpin, False)
adcout <<= 1
if (GPIO.input(misopin)):
adcout |= 0x1
GPIO.output(cspin, True)
adcout >>= 1 # first bit is 'null' so drop it
return adcout
if __name__ == "__main__":
main()
| mit | -6,307,853,393,377,386,000 | 26.192771 | 62 | 0.519273 | false |
MTgeophysics/mtpy | mtpy/gui/SmartMT/Components/PlotParameter/station_selection.py | 1 | 1034 | # -*- coding: utf-8 -*-
"""
Description:
Usage:
Author: YingzhiGou
Date: 24/10/2017
"""
from qtpy.QtCore import Signal
from qtpy.QtWidgets import QGroupBox
from mtpy.gui.SmartMT.ui_asset.groupbox_station_select import Ui_GroupBox_Station_Select
class StationSelection(QGroupBox):
def __init__(self, parent):
QGroupBox.__init__(self, parent)
self.ui = Ui_GroupBox_Station_Select()
self.ui.setupUi(self)
self.mt_objs = None
self.ui.comboBox_station.currentIndexChanged.connect(self._current_station_changed)
def _current_station_changed(self):
self.station_changed.emit()
station_changed = Signal()
def set_data(self, mt_objs):
self.ui.comboBox_station.clear()
self.mt_objs = []
for mt_obj in mt_objs:
self.mt_objs.append(mt_obj)
self.ui.comboBox_station.addItem(mt_obj.station)
def get_station(self):
index = self.ui.comboBox_station.currentIndex()
return self.mt_objs[index]
| gpl-3.0 | 5,908,322,875,758,642,000 | 24.85 | 91 | 0.646035 | false |
anderfosca/contextbroker | code/virtualenv/broker/broker.py | 1 | 10285 | #!flask/bin/python
from flask import Flask, jsonify, request, render_template
import modules.getProviders as getProviders
import modules.advertisement as adv
import modules.getContext as getContext
import modules.subscription as subscription
import modules.update as update
import modules.contextml_validator as contextml_validator
import modules.generic_response as generic_response
import logging
from logging.handlers import RotatingFileHandler
import os
import pymongo
from pymongo import MongoClient
broker = Flask('broker')
# Logging initialization
logger = logging.getLogger('broker')
logger.setLevel(logging.INFO)
file_handler = RotatingFileHandler(os.path.dirname(os.path.abspath(__file__)) + '/log/broker',
maxBytes=1024 * 1024 * 100, backupCount=20)
file_handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
def print_response(sender, response, **extra):
print ('Request context is about to close down. '
'Response: %s', response.data)
from flask import request_finished
request_finished.connect(print_response, broker)
# Broker Interfaces
# getProviders
# quem acessa: Consumer
# dados esperados: nenhum
# descricao: Consumer faz uma requisicao dos Providers cadastrados no Broker
# retorna: xml com estrutura de Advertisement, contendo as informacoes dos Providers
# cadastrados
# Receives GET message, with scope and entity arguments, searches the database for Providers associated with the
# arguments, returns ContextML Providers information message or ERROR ContextML message
@broker.route('/getProviders', methods=['GET'])
def get_providers():
scope = request.args.get('scope')
entity_type = request.args.get('entity')
logger.info('getProviders - scope: '+scope+' entity_type: '+entity_type)
result = getProviders.get_providers(scope, entity_type)
return result
# advertisement
# quem usa: Provider
# dados esperados: xml com informacoes do Provider
# descricao: Faz o registro (ou atualizacao) das informacoes do Provider que enviou os dados, caso seja um segundo Adv,
# os dados serao tratados como os mais atuais, substituindo os anteriores. O Provider deve manter contato com o Broker
# de tempos em tempos, o Broker tem um timer que, caso nao haja interacao no tempo, o Broker pede um sinal de vida ao
# Provider, na forma de ACK
# retorna: mensagem de sucesso ou erro
# Receives ContextML advertisement message, validates it, registers the Provider and Scopes on the database, returns OK
# or ERROR ContextML message
@broker.route('/advertisement', methods=['POST'])
def advertisement():
# TODO ->>>>>>>>>>>>avisa os outros que recebeu
xml_string = request.data
if contextml_validator.validate_contextml(xml_string):
result = adv.register_provider(xml_string)
else:
logger.warn('advertisement - XML not accepted by ContextML Schema')
result = generic_response.generate_response('ERROR','400','Bad XML','advertisement')
return result
#return jsonify({'result': result})
# getContext
# quem usa: Consumer
# dados esperados: parametros URL:
# scopeList - lista de scpoes, separados por virgula, sem espacos, nao pode ser vazio
# entities - lista de IDs e tipos, separados pro virgula, sem espaco:
# entities=user|joao,user|roberto,
# ou
# entity e type - para so uma entidade: entity=joao&type=user
# descricao: Consumer pede por dados que satisfacam os Scopes e entidades listadas nos parametros
# retorna: ctxEL mensagem, com os dados que combinem com os parametros, ou uma mensagem de erro
# Receives a GET message, with scopeList and entities, or, entity and type arguments, searches for the content in
# the database, if not found asks the Providers associated with the arguments, returns the elements queried or ERROR
# ContextML message
@broker.route('/getContext', methods=['GET'])
def get_context():
scope_list = request.args.get('scopeList')
if request.args.get('entities'):
entities = request.args.get('entities')
else:
entities = request.args.get('type') + '|' + request.args.get('entity')
result = getContext.get_context(scope_list, entities)
print result
return result
# subscribe
# quem usa: Consumer
# dados esperados: parametros URL:
# entity - ID da entidade desejada: entity=joao
# type - tipo da entidade desejada: type=user
# scopeList - lista de scopes desejados, separados por virgula, sem espaco: location,name
# callbackUrl - endereco pra onde o Broker vai enviar dados quando atualizados pelo Prov
# time - quantidade de tempo de vida da subscription, em minutos, inteiro maior que 0
# descricao: Consumer envia entidade e escopos sobre os quais deseja receber atualizacoes, na sua Url, e um tempo de
# vida para a subscription
# retorna: mensagem de sucesso ou erro
# Receives a GET message, with entity, type, scopeList, callbackUrl and time arguments in the URL, registers
# the Subscription and returns OK or ERROR ContextML message
@broker.route('/subscribe', methods=['GET'])
def subscribe():
entity_id = request.args.get('entity')
entity_type = request.args.get('type')
scope_list = request.args.get('scopeList')
callback_url = request.args.get('callbackUrl')
minutes = request.args.get('time')
result = subscription.subscribe(callback_url, entity_id, entity_type, scope_list, minutes)
return result
# update
# Receives a ContextML message, validates it, makes the update in the database, returns OK or ERROR ContextML message.
@broker.route('/update', methods=['POST'])
def context_update():
update_xml = request.data
if contextml_validator.validate_contextml(update_xml):
result = update.context_update(update_xml)
else:
logger.warn('update - XML not accepted by ContextML Schema')
result = generic_response.generate_response('ERROR','400','Bad XML','update')
return result
# Index page of the Broker, has links to Providers, Subscriptions, Registries and Log pages
@broker.route('/')
def index():
return render_template("index.html")
# Gets and shows the Providers in the providers.html template
@broker.route('/providers')
def providers():
##################MONGODB
answ = MongoClient().broker.providers.find()
##################MONGODB
return render_template("providers.html", answ=answ)
# Gets and shows the Subscriptions in the subscriptions.html template
@broker.route('/subscriptions')
def subscriptions():
###############MONGODB
answ = MongoClient().broker.subscriptions.find()
###############MONGODB
return render_template("subscriptions.html", answ=answ)
# Gets and shows the Registries in the registries.html template
@broker.route('/registers')
def registers():
###############MONGODB
answ = MongoClient().broker.registries.find()
###############MONGODB
return render_template("registers.html", answ=answ)
# Shows the log file content, in the log.html template
@broker.route('/log')
def log_page():
with open(os.path.dirname(os.path.abspath(__file__)) + '/log/broker', 'r') as f:
log_string = f.read()
return render_template("log.html", log_string=log_string)
# heartbeat
@broker.route('/heartbeat')
def heartbeat():
return "OK"
# before_request
# descricao: realiza o que estiver aqui antes de qualquer request, seja GET ou POST, tanto faz
@broker.before_request
def before_request():
args = request.args
data = request.data
print "before_request "
#print args, data
#request.remote_addr
print request.environ['REMOTE_ADDR']
# enviar info do request para os brothers
@broker.after_request
def per_request_callbacks(response):
print "after_request"
#print request.args
#print response.data
# avisar os brothers que finalizou o request
return response
@broker.route('/tf_message')
def tf_message():
# tem que haver uma autenticacao do Broker irmao, pra nao virar alvo de ataques
# recebe a mensagem, extrai os valores que importam, salva numa HashTable em memoria mesmo
# salva so uma mensagem de cada broker, a ultima, pois se o outro Broker confirma a execucao de uma tarefa,
# ela nao interessa mais, entao quando ele manda outra ja pode guardar por cima, assim nao ocupa muito espaco
# nao precisa recuperar estado porque eh stateless
return 'bla'
@broker.teardown_request
def teardown_request(exception=None):
print 'this runs after request'
# --------background function
# ----funcao que faz a TF
# 1 manda HB pra um brother
# checa se ele responde
# se sim
# espera 5s
# volta pra 1
# senao
# chama ele de novo
# se respondeu
# espera 5s
# volta pra 1
# senao
# pega ultima msg do brother
# confere se ela eh um "ok eu fiz meu bagulho" OU se eh um "faz isso ai brother"
# se eh um "ok eu fiz meu bagulho"
# 2 manda HB
# se respondeu
# espera 5s
# volta pra 2
# senao respondeu
# volta pra 2
# se eh um "faz isso ai brother"
# avisa os outros bkps que vai "fazer isso"
# faz "isso"
# avisa os outros que fez
# 3 manda HB pro brother
# se respondeu
# espera 5s
# volta pra 1
# senao
# volta pra 3
#
#
# TODO timers que ficam contando os expires, etc
# TODO docstring
if __name__ == '__main__':
logger.info('Started')
client = MongoClient()
db = client.broker
db.providers.remove()
db.scopes.remove()
db.entities.remove()
db.registries.remove()
db.subscriptions.remove()
broker.run(debug=True, use_reloader=True, threaded=True)
| gpl-2.0 | -6,344,407,240,051,767,000 | 37.811321 | 120 | 0.6737 | false |
HewlettPackard/python-proliant-sdk | examples/Rest/ex33_set_bios_service.py | 1 | 1873 | # Copyright 2016 Hewlett Packard Enterprise Development, LP.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from _restobject import RestObject
def ex33_set_bios_service(restobj, bios_properties, bios_password=None):
sys.stdout.write("\nEXAMPLE 33: Set Bios Service\n")
instances = restobj.search_for_type("Bios.")
for instance in instances:
response = restobj.rest_patch(instance["href"], bios_properties, \
bios_password)
restobj.error_handler(response)
if __name__ == "__main__":
# When running on the server locally use the following commented values
# iLO_https_url = "blobstore://."
# iLO_account = "None"
# iLO_password = "None"
# When running remotely connect using the iLO secured (https://) address,
# iLO account name, and password to send https requests
# iLO_https_url acceptable examples:
# "https://10.0.0.100"
# "https://f250asha.americas.hpqcorp.net"
iLO_https_url = "https://10.0.0.100"
iLO_account = "admin"
iLO_password = "password"
#Create a REST object
REST_OBJ = RestObject(iLO_https_url, iLO_account, iLO_password)
ex33_set_bios_service(REST_OBJ, {'ServiceName':'HP', \
'ServiceEmail':'[email protected]'})
| apache-2.0 | 6,621,441,918,202,101,000 | 37.851064 | 78 | 0.649226 | false |
murgatroid99/grpc | tools/run_tests/artifacts/distribtest_targets.py | 1 | 12314 | #!/usr/bin/env python
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definition of targets run distribution package tests."""
import os.path
import sys
sys.path.insert(0, os.path.abspath('..'))
import python_utils.jobset as jobset
def create_docker_jobspec(name,
dockerfile_dir,
shell_command,
environ={},
flake_retries=0,
timeout_retries=0,
copy_rel_path=None):
"""Creates jobspec for a task running under docker."""
environ = environ.copy()
environ['RUN_COMMAND'] = shell_command
# the entire repo will be cloned if copy_rel_path is not set.
if copy_rel_path:
environ['RELATIVE_COPY_PATH'] = copy_rel_path
docker_args = []
for k, v in environ.items():
docker_args += ['-e', '%s=%s' % (k, v)]
docker_env = {
'DOCKERFILE_DIR': dockerfile_dir,
'DOCKER_RUN_SCRIPT': 'tools/run_tests/dockerize/docker_run.sh'
}
jobspec = jobset.JobSpec(
cmdline=['tools/run_tests/dockerize/build_and_run_docker.sh'] +
docker_args,
environ=docker_env,
shortname='distribtest.%s' % (name),
timeout_seconds=30 * 60,
flake_retries=flake_retries,
timeout_retries=timeout_retries)
return jobspec
def create_jobspec(name,
cmdline,
environ=None,
shell=False,
flake_retries=0,
timeout_retries=0,
use_workspace=False,
timeout_seconds=10 * 60):
"""Creates jobspec."""
environ = environ.copy()
if use_workspace:
environ['WORKSPACE_NAME'] = 'workspace_%s' % name
cmdline = ['bash', 'tools/run_tests/artifacts/run_in_workspace.sh'
] + cmdline
jobspec = jobset.JobSpec(
cmdline=cmdline,
environ=environ,
shortname='distribtest.%s' % (name),
timeout_seconds=timeout_seconds,
flake_retries=flake_retries,
timeout_retries=timeout_retries,
shell=shell)
return jobspec
class CSharpDistribTest(object):
"""Tests C# NuGet package"""
def __init__(self, platform, arch, docker_suffix=None,
use_dotnet_cli=False):
self.name = 'csharp_%s_%s' % (platform, arch)
self.platform = platform
self.arch = arch
self.docker_suffix = docker_suffix
self.labels = ['distribtest', 'csharp', platform, arch]
self.script_suffix = ''
if docker_suffix:
self.name += '_%s' % docker_suffix
self.labels.append(docker_suffix)
if use_dotnet_cli:
self.name += '_dotnetcli'
self.script_suffix = '_dotnetcli'
self.labels.append('dotnetcli')
else:
self.labels.append('olddotnet')
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
if self.platform == 'linux':
return create_docker_jobspec(
self.name,
'tools/dockerfile/distribtest/csharp_%s_%s' %
(self.docker_suffix, self.arch),
'test/distrib/csharp/run_distrib_test%s.sh' %
self.script_suffix,
copy_rel_path='test/distrib')
elif self.platform == 'macos':
return create_jobspec(
self.name, [
'test/distrib/csharp/run_distrib_test%s.sh' %
self.script_suffix
],
environ={'EXTERNAL_GIT_ROOT': '../../../..'},
use_workspace=True)
elif self.platform == 'windows':
if self.arch == 'x64':
# Use double leading / as the first occurence gets removed by msys bash
# when invoking the .bat file (side-effect of posix path conversion)
environ = {
'MSBUILD_EXTRA_ARGS': '//p:Platform=x64',
'DISTRIBTEST_OUTPATH': 'DistribTest\\bin\\x64\\Debug'
}
else:
environ = {'DISTRIBTEST_OUTPATH': 'DistribTest\\bin\\Debug'}
return create_jobspec(
self.name, [
'test\\distrib\\csharp\\run_distrib_test%s.bat' %
self.script_suffix
],
environ=environ,
use_workspace=True)
else:
raise Exception("Not supported yet.")
def __str__(self):
return self.name
class PythonDistribTest(object):
"""Tests Python package"""
def __init__(self, platform, arch, docker_suffix):
self.name = 'python_%s_%s_%s' % (platform, arch, docker_suffix)
self.platform = platform
self.arch = arch
self.docker_suffix = docker_suffix
self.labels = ['distribtest', 'python', platform, arch, docker_suffix]
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
if not self.platform == 'linux':
raise Exception("Not supported yet.")
return create_docker_jobspec(
self.name,
'tools/dockerfile/distribtest/python_%s_%s' % (self.docker_suffix,
self.arch),
'test/distrib/python/run_distrib_test.sh',
copy_rel_path='test/distrib')
def __str__(self):
return self.name
class RubyDistribTest(object):
"""Tests Ruby package"""
def __init__(self, platform, arch, docker_suffix, ruby_version=None):
self.name = 'ruby_%s_%s_%s_version_%s' % (platform, arch, docker_suffix,
ruby_version or 'unspecified')
self.platform = platform
self.arch = arch
self.docker_suffix = docker_suffix
self.ruby_version = ruby_version
self.labels = ['distribtest', 'ruby', platform, arch, docker_suffix]
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
arch_to_gem_arch = {
'x64': 'x86_64',
'x86': 'x86',
}
if not self.platform == 'linux':
raise Exception("Not supported yet.")
dockerfile_name = 'tools/dockerfile/distribtest/ruby_%s_%s' % (
self.docker_suffix, self.arch)
if self.ruby_version is not None:
dockerfile_name += '_%s' % self.ruby_version
return create_docker_jobspec(
self.name,
dockerfile_name,
'test/distrib/ruby/run_distrib_test.sh %s %s' %
(arch_to_gem_arch[self.arch], self.platform),
copy_rel_path='test/distrib')
def __str__(self):
return self.name
class PHPDistribTest(object):
"""Tests PHP package"""
def __init__(self, platform, arch, docker_suffix=None):
self.name = 'php_%s_%s_%s' % (platform, arch, docker_suffix)
self.platform = platform
self.arch = arch
self.docker_suffix = docker_suffix
self.labels = ['distribtest', 'php', platform, arch, docker_suffix]
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
if self.platform == 'linux':
return create_docker_jobspec(
self.name,
'tools/dockerfile/distribtest/php_%s_%s' % (self.docker_suffix,
self.arch),
'test/distrib/php/run_distrib_test.sh',
copy_rel_path='test/distrib')
elif self.platform == 'macos':
return create_jobspec(
self.name, ['test/distrib/php/run_distrib_test.sh'],
environ={'EXTERNAL_GIT_ROOT': '../../../..'},
use_workspace=True)
else:
raise Exception("Not supported yet.")
def __str__(self):
return self.name
class CppDistribTest(object):
"""Tests Cpp make intall by building examples."""
def __init__(self, platform, arch, docker_suffix=None, testcase=None):
if platform == 'linux':
self.name = 'cpp_%s_%s_%s_%s' % (platform, arch, docker_suffix,
testcase)
else:
self.name = 'cpp_%s_%s_%s' % (platform, arch, testcase)
self.platform = platform
self.arch = arch
self.docker_suffix = docker_suffix
self.testcase = testcase
self.labels = [
'distribtest', 'cpp', platform, arch, docker_suffix, testcase
]
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
if self.platform == 'linux':
return create_docker_jobspec(
self.name, 'tools/dockerfile/distribtest/cpp_%s_%s' %
(self.docker_suffix, self.arch),
'test/distrib/cpp/run_distrib_test_%s.sh' % self.testcase)
elif self.platform == 'windows':
return create_jobspec(
self.name,
['test\\distrib\\cpp\\run_distrib_test_%s.bat' % self.testcase],
environ={},
timeout_seconds=30 * 60,
use_workspace=True)
else:
raise Exception("Not supported yet.")
def __str__(self):
return self.name
def targets():
"""Gets list of supported targets"""
return [
CppDistribTest('linux', 'x64', 'jessie', 'routeguide'),
CppDistribTest('linux', 'x64', 'jessie', 'cmake'),
CppDistribTest('windows', 'x86', testcase='cmake'),
CSharpDistribTest('linux', 'x64', 'wheezy'),
CSharpDistribTest('linux', 'x64', 'jessie'),
CSharpDistribTest('linux', 'x86', 'jessie'),
CSharpDistribTest('linux', 'x64', 'centos7'),
CSharpDistribTest('linux', 'x64', 'ubuntu1404'),
CSharpDistribTest('linux', 'x64', 'ubuntu1604'),
CSharpDistribTest('linux', 'x64', 'ubuntu1404', use_dotnet_cli=True),
CSharpDistribTest('macos', 'x86'),
CSharpDistribTest('windows', 'x86'),
CSharpDistribTest('windows', 'x64'),
PythonDistribTest('linux', 'x64', 'wheezy'),
PythonDistribTest('linux', 'x64', 'jessie'),
PythonDistribTest('linux', 'x86', 'jessie'),
PythonDistribTest('linux', 'x64', 'centos6'),
PythonDistribTest('linux', 'x64', 'centos7'),
PythonDistribTest('linux', 'x64', 'fedora20'),
PythonDistribTest('linux', 'x64', 'fedora21'),
PythonDistribTest('linux', 'x64', 'fedora22'),
PythonDistribTest('linux', 'x64', 'fedora23'),
PythonDistribTest('linux', 'x64', 'opensuse'),
PythonDistribTest('linux', 'x64', 'arch'),
PythonDistribTest('linux', 'x64', 'ubuntu1204'),
PythonDistribTest('linux', 'x64', 'ubuntu1404'),
PythonDistribTest('linux', 'x64', 'ubuntu1604'),
RubyDistribTest('linux', 'x64', 'wheezy'),
RubyDistribTest('linux', 'x64', 'jessie'),
RubyDistribTest('linux', 'x86', 'jessie'),
RubyDistribTest('linux', 'x64', 'jessie', ruby_version='ruby_2_0_0'),
RubyDistribTest('linux', 'x64', 'centos6'),
RubyDistribTest('linux', 'x64', 'centos7'),
RubyDistribTest('linux', 'x64', 'fedora20'),
RubyDistribTest('linux', 'x64', 'fedora21'),
RubyDistribTest('linux', 'x64', 'fedora22'),
RubyDistribTest('linux', 'x64', 'fedora23'),
RubyDistribTest('linux', 'x64', 'opensuse'),
RubyDistribTest('linux', 'x64', 'ubuntu1204'),
RubyDistribTest('linux', 'x64', 'ubuntu1404'),
RubyDistribTest('linux', 'x64', 'ubuntu1604'),
PHPDistribTest('linux', 'x64', 'jessie'),
PHPDistribTest('macos', 'x64'),
]
| apache-2.0 | 8,822,937,347,256,965,000 | 36.202417 | 87 | 0.553273 | false |
tartavull/google-cloud-python | pubsub/google/cloud/pubsub/_gax.py | 1 | 30972 | # Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GAX wrapper for Pubsub API requests."""
import functools
from google.cloud.gapic.pubsub.v1.publisher_client import PublisherClient
from google.cloud.gapic.pubsub.v1.subscriber_client import SubscriberClient
from google.gax import CallOptions
from google.gax import INITIAL_PAGE
from google.gax.errors import GaxError
from google.gax.grpc import exc_to_code
from google.protobuf.json_format import MessageToDict
from google.cloud.proto.pubsub.v1.pubsub_pb2 import PubsubMessage
from google.cloud.proto.pubsub.v1.pubsub_pb2 import PushConfig
from grpc import insecure_channel
from grpc import StatusCode
from google.cloud._helpers import _to_bytes
from google.cloud._helpers import _pb_timestamp_to_rfc3339
from google.cloud._helpers import _timedelta_to_duration_pb
from google.cloud._helpers import make_secure_channel
from google.cloud._http import DEFAULT_USER_AGENT
from google.cloud.exceptions import Conflict
from google.cloud.exceptions import NotFound
from google.cloud.iterator import GAXIterator
from google.cloud.pubsub import __version__
from google.cloud.pubsub._helpers import subscription_name_from_path
from google.cloud.pubsub.snapshot import Snapshot
from google.cloud.pubsub.subscription import Subscription
from google.cloud.pubsub.topic import Topic
_CONFLICT_ERROR_CODES = (
StatusCode.FAILED_PRECONDITION, StatusCode.ALREADY_EXISTS)
class _PublisherAPI(object):
"""Helper mapping publisher-related APIs.
:type gax_api: :class:`.publisher_client.PublisherClient`
:param gax_api: API object used to make GAX requests.
:type client: :class:`~google.cloud.pubsub.client.Client`
:param client: The client that owns this API object.
"""
def __init__(self, gax_api, client):
self._gax_api = gax_api
self._client = client
def list_topics(self, project, page_size=0, page_token=None):
"""List topics for the project associated with this API.
See
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.topics/list
:type project: str
:param project: project ID
:type page_size: int
:param page_size: maximum number of topics to return, If not passed,
defaults to a value set by the API.
:type page_token: str
:param page_token: opaque marker for the next "page" of topics. If not
passed, the API will return the first page of
topics.
:rtype: :class:`~google.cloud.iterator.Iterator`
:returns: Iterator of :class:`~google.cloud.pubsub.topic.Topic`
accessible to the current API.
"""
if page_token is None:
page_token = INITIAL_PAGE
options = CallOptions(page_token=page_token)
path = 'projects/%s' % (project,)
page_iter = self._gax_api.list_topics(
path, page_size=page_size, options=options)
return GAXIterator(self._client, page_iter, _item_to_topic)
def topic_create(self, topic_path):
"""API call: create a topic
See
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.topics/create
:type topic_path: str
:param topic_path: fully-qualified path of the new topic, in format
``projects/<PROJECT>/topics/<TOPIC_NAME>``.
:rtype: dict
:returns: ``Topic`` resource returned from the API.
:raises: :exc:`google.cloud.exceptions.Conflict` if the topic already
exists
"""
try:
topic_pb = self._gax_api.create_topic(topic_path)
except GaxError as exc:
if exc_to_code(exc.cause) in _CONFLICT_ERROR_CODES:
raise Conflict(topic_path)
raise
return {'name': topic_pb.name}
def topic_get(self, topic_path):
"""API call: retrieve a topic
See
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.topics/get
:type topic_path: str
:param topic_path: fully-qualified path of the topic, in format
``projects/<PROJECT>/topics/<TOPIC_NAME>``.
:rtype: dict
:returns: ``Topic`` resource returned from the API.
:raises: :exc:`google.cloud.exceptions.NotFound` if the topic does not
exist
"""
try:
topic_pb = self._gax_api.get_topic(topic_path)
except GaxError as exc:
if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:
raise NotFound(topic_path)
raise
return {'name': topic_pb.name}
def topic_delete(self, topic_path):
"""API call: delete a topic
See
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.topics/delete
:type topic_path: str
:param topic_path: fully-qualified path of the topic, in format
``projects/<PROJECT>/topics/<TOPIC_NAME>``.
"""
try:
self._gax_api.delete_topic(topic_path)
except GaxError as exc:
if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:
raise NotFound(topic_path)
raise
def topic_publish(self, topic_path, messages, timeout=30):
"""API call: publish one or more messages to a topic
See
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.topics/publish
:type topic_path: str
:param topic_path: fully-qualified path of the topic, in format
``projects/<PROJECT>/topics/<TOPIC_NAME>``.
:type messages: list of dict
:param messages: messages to be published.
:type timeout: int
:param timeout: (Optional) Timeout seconds.
:rtype: list of string
:returns: list of opaque IDs for published messages.
:raises: :exc:`google.cloud.exceptions.NotFound` if the topic does not
exist
"""
options = CallOptions(is_bundling=False, timeout=timeout)
message_pbs = [_message_pb_from_mapping(message)
for message in messages]
try:
result = self._gax_api.publish(topic_path, message_pbs,
options=options)
except GaxError as exc:
if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:
raise NotFound(topic_path)
raise
return result.message_ids
def topic_list_subscriptions(self, topic, page_size=0, page_token=None):
"""API call: list subscriptions bound to a topic
See
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.topics.subscriptions/list
:type topic: :class:`~google.cloud.pubsub.topic.Topic`
:param topic: The topic that owns the subscriptions.
:type page_size: int
:param page_size: maximum number of subscriptions to return, If not
passed, defaults to a value set by the API.
:type page_token: str
:param page_token: opaque marker for the next "page" of subscriptions.
If not passed, the API will return the first page
of subscriptions.
:rtype: :class:`~google.cloud.iterator.Iterator`
:returns: Iterator of
:class:`~google.cloud.pubsub.subscription.Subscription`
accessible to the current API.
:raises: :exc:`~google.cloud.exceptions.NotFound` if the topic does
not exist.
"""
if page_token is None:
page_token = INITIAL_PAGE
options = CallOptions(page_token=page_token)
topic_path = topic.full_name
try:
page_iter = self._gax_api.list_topic_subscriptions(
topic_path, page_size=page_size, options=options)
except GaxError as exc:
if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:
raise NotFound(topic_path)
raise
iterator = GAXIterator(self._client, page_iter,
_item_to_subscription_for_topic)
iterator.topic = topic
return iterator
class _SubscriberAPI(object):
"""Helper mapping subscriber-related APIs.
:type gax_api: :class:`.publisher_client.SubscriberClient`
:param gax_api: API object used to make GAX requests.
:type client: :class:`~google.cloud.pubsub.client.Client`
:param client: The client that owns this API object.
"""
def __init__(self, gax_api, client):
self._gax_api = gax_api
self._client = client
def list_subscriptions(self, project, page_size=0, page_token=None):
"""List subscriptions for the project associated with this API.
See
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions/list
:type project: str
:param project: project ID
:type page_size: int
:param page_size: maximum number of subscriptions to return, If not
passed, defaults to a value set by the API.
:type page_token: str
:param page_token: opaque marker for the next "page" of subscriptions.
If not passed, the API will return the first page
of subscriptions.
:rtype: :class:`~google.cloud.iterator.Iterator`
:returns: Iterator of
:class:`~google.cloud.pubsub.subscription.Subscription`
accessible to the current API.
"""
if page_token is None:
page_token = INITIAL_PAGE
options = CallOptions(page_token=page_token)
path = 'projects/%s' % (project,)
page_iter = self._gax_api.list_subscriptions(
path, page_size=page_size, options=options)
# We attach a mutable topics dictionary so that as topic
# objects are created by Subscription.from_api_repr, they
# can be re-used by other subscriptions from the same topic.
topics = {}
item_to_value = functools.partial(
_item_to_sub_for_client, topics=topics)
return GAXIterator(self._client, page_iter, item_to_value)
def subscription_create(self, subscription_path, topic_path,
ack_deadline=None, push_endpoint=None,
retain_acked_messages=None,
message_retention_duration=None):
"""API call: create a subscription
See
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions/create
:type subscription_path: str
:param subscription_path:
the fully-qualified path of the new subscription, in format
``projects/<PROJECT>/subscriptions/<SUB_NAME>``.
:type topic_path: str
:param topic_path: the fully-qualified path of the topic being
subscribed, in format
``projects/<PROJECT>/topics/<TOPIC_NAME>``.
:type ack_deadline: int
:param ack_deadline:
(Optional) the deadline (in seconds) by which messages pulled from
the back-end must be acknowledged.
:type push_endpoint: str
:param push_endpoint:
(Optional) URL to which messages will be pushed by the back-end.
If not set, the application must pull messages.
:type retain_acked_messages: bool
:param retain_acked_messages:
(Optional) Whether to retain acked messages. If set, acked messages
are retained in the subscription's backlog for a duration indicated
by `message_retention_duration`.
:type message_retention_duration: :class:`datetime.timedelta`
:param message_retention_duration:
(Optional) Whether to retain acked messages. If set, acked messages
are retained in the subscription's backlog for a duration indicated
by `message_retention_duration`. If unset, defaults to 7 days.
:rtype: dict
:returns: ``Subscription`` resource returned from the API.
"""
if push_endpoint is not None:
push_config = PushConfig(push_endpoint=push_endpoint)
else:
push_config = None
if message_retention_duration is not None:
message_retention_duration = _timedelta_to_duration_pb(
message_retention_duration)
try:
sub_pb = self._gax_api.create_subscription(
subscription_path, topic_path,
push_config=push_config, ack_deadline_seconds=ack_deadline,
retain_acked_messages=retain_acked_messages,
message_retention_duration=message_retention_duration)
except GaxError as exc:
if exc_to_code(exc.cause) in _CONFLICT_ERROR_CODES:
raise Conflict(topic_path)
raise
return MessageToDict(sub_pb)
def subscription_get(self, subscription_path):
"""API call: retrieve a subscription
See
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions/get
:type subscription_path: str
:param subscription_path:
the fully-qualified path of the subscription, in format
``projects/<PROJECT>/subscriptions/<SUB_NAME>``.
:rtype: dict
:returns: ``Subscription`` resource returned from the API.
"""
try:
sub_pb = self._gax_api.get_subscription(subscription_path)
except GaxError as exc:
if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:
raise NotFound(subscription_path)
raise
return MessageToDict(sub_pb)
def subscription_delete(self, subscription_path):
"""API call: delete a subscription
See
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions/delete
:type subscription_path: str
:param subscription_path:
the fully-qualified path of the subscription, in format
``projects/<PROJECT>/subscriptions/<SUB_NAME>``.
"""
try:
self._gax_api.delete_subscription(subscription_path)
except GaxError as exc:
if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:
raise NotFound(subscription_path)
raise
def subscription_modify_push_config(self, subscription_path,
push_endpoint):
"""API call: update push config of a subscription
See
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions/modifyPushConfig
:type subscription_path: str
:param subscription_path:
the fully-qualified path of the subscription to affect, in format
``projects/<PROJECT>/subscriptions/<SUB_NAME>``.
:type push_endpoint: str
:param push_endpoint:
(Optional) URL to which messages will be pushed by the back-end.
If not set, the application must pull messages.
"""
push_config = PushConfig(push_endpoint=push_endpoint)
try:
self._gax_api.modify_push_config(subscription_path, push_config)
except GaxError as exc:
if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:
raise NotFound(subscription_path)
raise
def subscription_pull(self, subscription_path, return_immediately=False,
max_messages=1):
"""API call: retrieve messages for a subscription
See
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions/modifyPushConfig
:type subscription_path: str
:param subscription_path:
the fully-qualified path of the subscription to pull from, in
format ``projects/<PROJECT>/subscriptions/<SUB_NAME>``.
:type return_immediately: bool
:param return_immediately: if True, the back-end returns even if no
messages are available; if False, the API
call blocks until one or more messages are
available.
:type max_messages: int
:param max_messages: the maximum number of messages to return.
:rtype: list of dict
:returns: the ``receivedMessages`` element of the response.
"""
try:
response_pb = self._gax_api.pull(
subscription_path, max_messages,
return_immediately=return_immediately)
except GaxError as exc:
code = exc_to_code(exc.cause)
if code == StatusCode.NOT_FOUND:
raise NotFound(subscription_path)
elif code == StatusCode.DEADLINE_EXCEEDED:
# NOTE: The JSON-over-HTTP API returns a 200 with an empty
# response when ``return_immediately`` is ``False``, so
# we "mutate" the gRPC error into a non-error to conform.
if not return_immediately:
return []
raise
return [_received_message_pb_to_mapping(rmpb)
for rmpb in response_pb.received_messages]
def subscription_acknowledge(self, subscription_path, ack_ids):
"""API call: acknowledge retrieved messages
See
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions/modifyPushConfig
:type subscription_path: str
:param subscription_path:
the fully-qualified path of the subscription to affect, in format
``projects/<PROJECT>/subscriptions/<SUB_NAME>``.
:type ack_ids: list of string
:param ack_ids: ack IDs of messages being acknowledged
"""
try:
self._gax_api.acknowledge(subscription_path, ack_ids)
except GaxError as exc:
if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:
raise NotFound(subscription_path)
raise
def subscription_modify_ack_deadline(self, subscription_path, ack_ids,
ack_deadline):
"""API call: update ack deadline for retrieved messages
See
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions/modifyAckDeadline
:type subscription_path: str
:param subscription_path:
the fully-qualified path of the subscription to affect, in format
``projects/<PROJECT>/subscriptions/<SUB_NAME>``.
:type ack_ids: list of string
:param ack_ids: ack IDs of messages being acknowledged
:type ack_deadline: int
:param ack_deadline: the deadline (in seconds) by which messages pulled
from the back-end must be acknowledged.
"""
try:
self._gax_api.modify_ack_deadline(
subscription_path, ack_ids, ack_deadline)
except GaxError as exc:
if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:
raise NotFound(subscription_path)
raise
def subscription_seek(self, subscription_path, time=None, snapshot=None):
"""API call: seek a subscription
See
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions/seek
:type subscription_path: str
:param subscription_path::
the fully-qualified path of the subscription to affect, in format
``projects/<PROJECT>/subscriptions/<SUB_NAME>``.
:type time: :class:`.timestamp_pb2.Timestamp`
:param time: The time to seek to.
:type snapshot: str
:param snapshot: The snapshot to seek to.
"""
try:
self._gax_api.seek(subscription_path, time=time, snapshot=snapshot)
except GaxError as exc:
if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:
raise NotFound(subscription_path)
raise
def list_snapshots(self, project, page_size=0, page_token=None):
"""List snapshots for the project associated with this API.
See
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.snapshots/list
:type project: str
:param project: project ID
:type page_size: int
:param page_size: maximum number of topics to return, If not passed,
defaults to a value set by the API.
:type page_token: str
:param page_token: opaque marker for the next "page" of topics. If not
passed, the API will return the first page of
topics.
:rtype: :class:`~google.cloud.iterator.Iterator`
:returns: Iterator of :class:`~google.cloud.pubsub.snapshot.Snapshot`
accessible to the current API.
"""
if page_token is None:
page_token = INITIAL_PAGE
options = CallOptions(page_token=page_token)
path = 'projects/%s' % (project,)
page_iter = self._gax_api.list_snapshots(
path, page_size=page_size, options=options)
# We attach a mutable topics dictionary so that as topic
# objects are created by Snapshot.from_api_repr, they
# can be re-used by other snapshots of the same topic.
topics = {}
item_to_value = functools.partial(
_item_to_snapshot_for_client, topics=topics)
return GAXIterator(self._client, page_iter, item_to_value)
def snapshot_create(self, snapshot_path, subscription_path):
"""API call: create a snapshot
See
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.snapshots/create
:type snapshot_path: str
:param snapshot_path: fully-qualified path of the snapshot, in format
``projects/<PROJECT>/snapshots/<SNAPSHOT_NAME>``.
:type subscription_path: str
:param subscription_path: fully-qualified path of the subscrption that
the new snapshot captures, in format
``projects/<PROJECT>/subscription/<SNAPSHOT_NAME>``.
:rtype: dict
:returns: ``Snapshot`` resource returned from the API.
:raises: :exc:`google.cloud.exceptions.Conflict` if the snapshot
already exists
:raises: :exc:`google.cloud.exceptions.NotFound` if the subscription
does not exist
"""
try:
snapshot_pb = self._gax_api.create_snapshot(
snapshot_path, subscription_path)
except GaxError as exc:
if exc_to_code(exc.cause) in _CONFLICT_ERROR_CODES:
raise Conflict(snapshot_path)
elif exc_to_code(exc.cause) == StatusCode.NOT_FOUND:
raise NotFound(subscription_path)
raise
return MessageToDict(snapshot_pb)
def snapshot_delete(self, snapshot_path):
"""API call: delete a topic
See
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.snapshots/delete
:type snapshot_path: str
:param snapshot_path: fully-qualified path of the snapshot, in format
``projects/<PROJECT>/snapshots/<SNAPSHOT_NAME>``.
:raises: :exc:`google.cloud.exceptions.NotFound` if the snapshot does
not exist
"""
try:
self._gax_api.delete_snapshot(snapshot_path)
except GaxError as exc:
if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:
raise NotFound(snapshot_path)
raise
def _message_pb_from_mapping(message):
"""Helper for :meth:`_PublisherAPI.topic_publish`.
Performs "impedance matching" between the protobuf attrs and the keys
expected in the JSON API.
"""
return PubsubMessage(data=_to_bytes(message['data']),
attributes=message['attributes'])
def _message_pb_to_mapping(message_pb):
"""Helper for :meth:`pull`, et aliae
Performs "impedance matching" between the protobuf attrs and the keys
expected in the JSON API.
"""
return {
'messageId': message_pb.message_id,
'data': message_pb.data,
'attributes': message_pb.attributes,
'publishTime': _pb_timestamp_to_rfc3339(message_pb.publish_time),
}
def _received_message_pb_to_mapping(received_message_pb):
"""Helper for :meth:`pull`, et aliae
Performs "impedance matching" between the protobuf attrs and the keys
expected in the JSON API.
"""
return {
'ackId': received_message_pb.ack_id,
'message': _message_pb_to_mapping(
received_message_pb.message),
}
def make_gax_publisher_api(credentials=None, host=None):
"""Create an instance of the GAX Publisher API.
If the ``credentials`` are omitted, then we create an insecure
``channel`` pointing at the local Pub / Sub emulator.
:type credentials: :class:`~google.auth.credentials.Credentials`
:param credentials: (Optional) Credentials for getting access
tokens.
:type host: str
:param host: (Optional) The host for an insecure channel. Only
used if ``credentials`` are omitted.
:rtype: :class:`.publisher_client.PublisherClient`
:returns: A publisher API instance with the proper channel.
"""
if credentials is None:
channel = insecure_channel(host)
else:
channel = make_secure_channel(
credentials, DEFAULT_USER_AGENT,
PublisherClient.SERVICE_ADDRESS)
return PublisherClient(
channel=channel, lib_name='gccl', lib_version=__version__)
def make_gax_subscriber_api(credentials=None, host=None):
"""Create an instance of the GAX Subscriber API.
If the ``credentials`` are omitted, then we create an insecure
``channel`` pointing at the local Pub / Sub emulator.
:type credentials: :class:`~google.auth.credentials.Credentials`
:param credentials: (Optional) Credentials for getting access
tokens.
:type host: str
:param host: (Optional) The host for an insecure channel. Only
used if ``credentials`` are omitted.
:rtype: :class:`.subscriber_client.SubscriberClient`
:returns: A subscriber API instance with the proper channel.
"""
if credentials is None:
channel = insecure_channel(host)
else:
channel = make_secure_channel(
credentials, DEFAULT_USER_AGENT,
SubscriberClient.SERVICE_ADDRESS)
return SubscriberClient(
channel=channel, lib_name='gccl', lib_version=__version__)
def _item_to_topic(iterator, resource):
"""Convert a protobuf topic to the native object.
:type iterator: :class:`~google.cloud.iterator.Iterator`
:param iterator: The iterator that is currently in use.
:type resource: :class:`.pubsub_pb2.Topic`
:param resource: A topic returned from the API.
:rtype: :class:`~google.cloud.pubsub.topic.Topic`
:returns: The next topic in the page.
"""
return Topic.from_api_repr(
{'name': resource.name}, iterator.client)
def _item_to_subscription_for_topic(iterator, subscription_path):
"""Convert a subscription name to the native object.
:type iterator: :class:`~google.cloud.iterator.Iterator`
:param iterator: The iterator that is currently in use.
:type subscription_path: str
:param subscription_path: Subscription path returned from the API.
:rtype: :class:`~google.cloud.pubsub.subscription.Subscription`
:returns: The next subscription in the page.
"""
subscription_name = subscription_name_from_path(
subscription_path, iterator.client.project)
return Subscription(subscription_name, iterator.topic)
def _item_to_sub_for_client(iterator, sub_pb, topics):
"""Convert a subscription protobuf to the native object.
.. note::
This method does not have the correct signature to be used as
the ``item_to_value`` argument to
:class:`~google.cloud.iterator.Iterator`. It is intended to be
patched with a mutable topics argument that can be updated
on subsequent calls. For an example, see how the method is
used above in :meth:`_SubscriberAPI.list_subscriptions`.
:type iterator: :class:`~google.cloud.iterator.Iterator`
:param iterator: The iterator that is currently in use.
:type sub_pb: :class:`.pubsub_pb2.Subscription`
:param sub_pb: A subscription returned from the API.
:type topics: dict
:param topics: A dictionary of topics to be used (and modified)
as new subscriptions are created bound to topics.
:rtype: :class:`~google.cloud.pubsub.subscription.Subscription`
:returns: The next subscription in the page.
"""
resource = MessageToDict(sub_pb)
return Subscription.from_api_repr(
resource, iterator.client, topics=topics)
def _item_to_snapshot_for_client(iterator, snapshot_pb, topics):
"""Convert a subscription protobuf to the native object.
.. note::
This method does not have the correct signature to be used as
the ``item_to_value`` argument to
:class:`~google.cloud.iterator.Iterator`. It is intended to be
patched with a mutable topics argument that can be updated
on subsequent calls. For an example, see how the method is
used above in :meth:`_SubscriberAPI.list_snapshots`.
:type iterator: :class:`~google.cloud.iterator.Iterator`
:param iterator: The iterator that is currently in use.
:type sub_pb: :class:`.pubsub_pb2.Snapshot`
:param sub_pb: A subscription returned from the API.
:type topics: dict
:param topics: A dictionary of topics to be used (and modified)
as new subscriptions are created bound to topics.
:rtype: :class:`~google.cloud.pubsub.subscription.Subscription`
:returns: The next subscription in the page.
"""
resource = MessageToDict(snapshot_pb)
return Snapshot.from_api_repr(
resource, iterator.client, topics=topics)
| apache-2.0 | 1,538,807,644,575,028,000 | 37.763454 | 103 | 0.62521 | false |
AndreasMadsen/course-02456-sparsemax | python_reference/sparsemax.py | 1 | 2098 | import numpy as np
def forward(z):
"""forward pass for sparsemax
this will process a 2d-array $z$, where axis 1 (each row) is assumed to be
the the z-vector.
"""
# sort z
z_sorted = np.sort(z, axis=1)[:, ::-1]
# calculate k(z)
z_cumsum = np.cumsum(z_sorted, axis=1)
k = np.arange(1, z.shape[1] + 1)
z_check = 1 + k * z_sorted > z_cumsum
# use argmax to get the index by row as .nonzero() doesn't
# take an axis argument. np.argmax return the first index, but the last
# index is required here, use np.flip to get the last index and
# `z.shape[axis]` to compensate for np.flip afterwards.
k_z = z.shape[1] - np.argmax(z_check[:, ::-1], axis=1)
# calculate tau(z)
tau_sum = z_cumsum[np.arange(0, z.shape[0]), k_z - 1]
tau_z = ((tau_sum - 1) / k_z).reshape(-1, 1)
# calculate p
return np.maximum(0, z - tau_z)
def jacobian(z):
"""jacobian for sparsemax
this will process a 2d-array $z$, where axis 1 (each row) is assumed to be
the the z-vector.
"""
# Construct S(z)
# Possibly this could be reduced to just calculating k(z)
p = forward(z)
s = p > 0
s_float = s.astype('float64')
# row-wise outer product
# http://stackoverflow.com/questions/31573856/theano-row-wise-outer-product-between-two-matrices
jacobian = s_float[:, :, np.newaxis] * s_float[:, np.newaxis, :]
jacobian /= - np.sum(s, axis=1)[:, np.newaxis, np.newaxis]
# add delta_ij
obs, index = s.nonzero()
jacobian[obs, index, index] += 1
return jacobian
def Rop(z, v):
"""Jacobian vector product (Rop) for sparsemax
This calculates [J(z_i) * v_i, ...]. `z` is a 2d-array, where axis 1
(each row) is assumed to be the the z-vector. `v` is a matrix where
axis 1 (each row) is assumed to be the `v-vector`.
"""
# Construct S(z)
p = forward(z)
s = p > 0
# Calculate \hat{v}, which will be a vector (scalar for each z)
v_hat = np.sum(v * s, axis=1) / np.sum(s, axis=1)
# Calculates J(z) * v
return s * (v - v_hat[:, np.newaxis])
| mit | 6,878,382,542,096,882,000 | 27.739726 | 100 | 0.596282 | false |
gustavo94/UVaProblems | 573-The snail.py | 1 | 3092 | from sys import stdin
from math import floor
# exapmle input:
ex_in = "6 3 1 10\n10 2 1 50\n50 5 3 14\n50 6 4 1\n50 6 3 1\n1 1 1 1\n97 56 3 10\n56 3 1 5\n0 0 0 0"
def cal_limit_day(U,D,fatigue):
limit_day = floor((U-D)/fatigue)
if limit_day <= 0:
limit_day = 1
return limit_day
def total_down_by_fatigue(day, fatigue):
fatigue_application_times = ((day-1)*day)/2
return fatigue_application_times * fatigue
def climbed_distance(day,U,fatigue):
last_day_of_climbing = floor(U/fatigue)
if day > last_day_of_climbing:
return (last_day_of_climbing * U) - (total_down_by_fatigue(last_day_of_climbing,fatigue))
return (U*day) - (total_down_by_fatigue(day,fatigue))
def total_climbed_before_night_at(day,U,fatigue,D): #total climbed at the end of the day
night_sliding = D*(day-1)
return climbed_distance(day,U,fatigue) - night_sliding
def total_climbed_after_night_at(day,U,fatigue,D): #total climbed at the end of the night
night_sliding = D*(day)
return climbed_distance(day,U,fatigue) - night_sliding
def can_the_snail_climb_the_well_at(day,U,fatigue,D,H):
total_climbed_at_day = total_climbed_before_night_at(day,U,fatigue,D)
answer = False
if (total_climbed_at_day > H):
answer = True
return answer
def day_for_succes(limit_day,U,fatigue,D,H):
day = limit_day
for day in range(limit_day,0,-1):
if not(can_the_snail_climb_the_well_at(day-1,U,fatigue,D,H)):
break
return day
def day_for_fail(day,U,fatigue,D):
min_limit = 1
total_climbed = total_climbed_after_night_at(day,U,fatigue,D)
while True: #binary search
if total_climbed >= 0:
min_limit = day
day = day *2
total_climbed = total_climbed_after_night_at(day,U,fatigue,D)
else:
if total_climbed_after_night_at((day-1),U,fatigue,D) >= 0 : #if the previos day the total climbed is positive then day is the day of failure
break
middle_day = floor((day+min_limit)/2)
if total_climbed_after_night_at(middle_day,U,fatigue,D) >= 0:
min_limit = middle_day
else:
day = middle_day
return day
# def brute_force(H,U,D,fatigue):
# day = 1
# height = 0
# while True:
# height += U
# U -= fatigue
# if U < 0:
# U = 0
# if height > H:
# print("success on day " + str(day))
# break
# height -= D
# if height < 0:
# print("failure on day "+ str(day))
# break
# day += 1
def main():
lines = stdin.read().splitlines() #Use ctrl+d to finish read process
# lines = ex_in.splitlines() # Testing
for curr_Line in lines:
H,U,D,F = [int(param) for param in curr_Line.split()] #the semantic of H,U,D and F is on the problem description
if H <= 0:
return
fatigue = U * (F/100)
limit_day = cal_limit_day(U,D,fatigue) #This day the snail cannot climb more because de fatigue
answer = can_the_snail_climb_the_well_at(limit_day,U,fatigue,D,H)
if answer:
day = day_for_succes(limit_day,U,fatigue,D,H)
print("success on day " + str(day))
else:
day = day_for_fail(limit_day,U,fatigue,D)
# print(total_climbed_before_night_at(31,U,fatigue,D))
print("failure on day "+ str(day))
# brute_force(H,U,D,fatigue)
return
main()
| mit | -5,461,073,765,344,389,000 | 29.313725 | 143 | 0.67238 | false |
rbarrois/djadmin_export | dev/settings.py | 1 | 4794 | # Django settings for dev project.
import os
ROOT_DIR = os.path.abspath(os.path.dirname(__file__))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
ADMIN_EXPORTERS = (
'djadmin_export.exporters.xlsx.XLSXExporter',
)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(ROOT_DIR, 'db.sqlite'),
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Europe/Paris'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'yo^78u^eg@o*p&)3re%xo9c_j%8prma+#8v5j*@r7-a%8q%@+o'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'dev.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'dev.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'djadmin_export',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| lgpl-3.0 | -7,371,711,069,494,227,000 | 30.333333 | 88 | 0.698164 | false |
james-pack/ml | pack/ml/nodes/hinge_loss_node.py | 1 | 3718 | import numpy as np
from pack.ml.nodes.hinge_loss_node_pb2 import HingeLossNodeProto
class HingeLossNode(object):
class Hyperparameters(object):
def __eq__(self, other):
return self.__dict__ == other.__dict__
def generate_random_hyperparameters(self):
pass
def __init__(self, name, input_shape, hyperparameters=Hyperparameters()):
self.name = name
assert len(input_shape) == 1
self._input_shape = input_shape[:]
self._hyperparameters = hyperparameters
def __eq__(self, other):
return self.__dict__ == other.__dict__
def to_proto(self):
node_proto = HingeLossNodeProto()
node_proto.stage_name = self.name
node_proto.input_rows = self._input_shape[0]
return node_proto
@staticmethod
def from_proto(node_proto):
return HingeLossNode(node_proto.stage_name, (node_proto.input_rows, ), HingeLossNode.Hyperparameters())
@property
def input_shape(self):
return self._input_shape
@property
def output_shape(self):
# This node returns one loss value per item.
return (1,)
@property
def hyperparameters(self):
return self._hyperparameters
def apply(self, input, y):
""" Computes the forward pass of an SVM loss function.
Args:
scores: matrix of scores where each column represents the scores of a
single item for each possible class. scores should have the same number
of columns as rows in y.
Returns:
A vector of the losses for each item (column) in scores.
"""
# Only a single input is allowed.
assert len(input.keys()) == 1
# Extract scores from input.
scores = input[list(input.keys())[0]]
# Scores must match expected input shape.
assert scores.shape[0] == self.input_shape[0], '{} != {}'.format(scores.shape[0], self.input_shape[0])
# Both inputs must have the same number of items.
assert scores.shape[1] == y.shape[0], '{} != {}'.format(scores.shape[1], y.shape[0])
number_items = y.shape[0]
# Scores must have the expected number of features.
assert scores.shape[0] == self.input_shape[0], '{} != {}'.format(scores.shape, self.input_shape)
scores_y = scores[y, np.arange(scores.shape[1])]
self.margins = np.maximum(0., scores - scores_y + 1.)
self.margins[y, np.arange(self.margins.shape[1])] = 0.
assert self.margins.shape[0] == scores.shape[0]
assert self.margins.shape[1] == scores.shape[1]
loss = np.sum(self.margins[:], axis=0) / scores.shape[0]
# Must have one loss for each item.
assert loss.shape[0] == y.shape[0], '{} != {}'.format(loss.shape[0], y.shape[0])
return loss
def update(self, input, step_size, training_result=None):
""" Backpropagation of gradients to allow updates on parameters.
Must be called after the apply() method.
Args:
"""
# Only a single input is allowed.
assert len(input.keys()) == 1
# Extract dz from input.
dz = input[list(input.keys())[0]]
# Inputs must have the right number of items.
assert dz.shape[0] == self.margins.shape[1], '{} != {}'.format(dz.shape[0], self.margins.shape[1])
positive_margins = (self.margins > 0.)
d_margins = np.ones(positive_margins.shape) * positive_margins
dx = np.multiply(dz, d_margins)
dx = dx.reshape(self.margins.shape)
# dx must match expected input shape.
assert dx.shape[0] == self.input_shape[0], '{} != {}'.format(dx.shape[0], self.input_shape[0])
return dx
| mit | -8,466,301,796,413,677,000 | 37.729167 | 111 | 0.605702 | false |
RetailMeNotSandbox/dart | src/python/dart/engine/redshift/add_engine.py | 1 | 5627 | import logging
import os
from dart.client.python.dart_client import Dart
from dart.config.config import configuration
from dart.engine.redshift.metadata import RedshiftActionTypes
from dart.model.engine import Engine, EngineData
_logger = logging.getLogger(__name__)
def add_redshift_engine(config):
engine_config = config['engines']['redshift_engine']
opts = engine_config['options']
dart = Dart(opts['dart_host'], opts['dart_port'], opts['dart_api_version'])
assert isinstance(dart, Dart)
_logger.info('saving redshift_engine')
engine_id = None
for e in dart.get_engines():
if e.data.name == 'redshift_engine':
engine_id = e.id
ecs_task_definition = None if config['dart']['use_local_engines'] else {
'family': 'dart-%s-redshift_engine' % config['dart']['env_name'],
'containerDefinitions': [
{
'name': 'dart-redshift_engine',
'cpu': 64,
'memory': 256,
'image': engine_config['docker_image'],
'logConfiguration': {'logDriver': 'syslog'},
'environment': [
{'name': 'DART_ROLE', 'value': 'worker:engine_redshift'},
{'name': 'DART_CONFIG', 'value': engine_config['config']},
{'name': 'AWS_DEFAULT_REGION', 'value': opts['region']}
],
'mountPoints': [
{
'containerPath': '/mnt/ecs_agent_data',
'sourceVolume': 'ecs-agent-data',
'readOnly': True
}
],
}
],
'volumes': [
{
'host': {'sourcePath': '/var/lib/ecs/data'},
'name': 'ecs-agent-data'
}
],
}
e1 = dart.save_engine(Engine(id=engine_id, data=EngineData(
name='redshift_engine',
description='For Redshift clusters',
options_json_schema={
'type': 'object',
'properties': {
'node_type': {
'type': 'string',
'default': 'ds2.xlarge',
'enum': ['ds1.xlarge', 'ds1.8xlarge', 'ds2.xlarge', 'ds2.8xlarge', 'dc1.large', 'dc1.8xlarge'],
'description': 'the type of each node'
},
'nodes': {
'type': 'integer',
'default': 2,
'minimum': 2,
'maximum': 50,
'description': 'the number of nodes in this cluster'
},
'master_user_name': {
'type': ['string', 'null'],
'default': 'admin',
'minLength': 1,
'maxLength': 128,
'pattern': '^[a-zA-Z]+[a-zA-Z0-9]*$',
'description': 'the master user name for this redshift cluster'
},
'master_user_password': {
'type': 'string',
'default': 'passw0rD--CHANGE-ME!',
'minLength': 8,
'maxLength': 64,
'pattern': '(?=.*\d)(?=.*[a-z])(?=.*[A-Z])(?!.*[\'"\/@\s])',
'x-dart-secret': True,
'description': 'the master user password for this redshift cluster (hidden and ignored after'
+ ' initial save), see AWS docs for password requirements'
},
'master_db_name': {
'type': ['string', 'null'],
"default": 'dart',
'minLength': 1,
'maxLength': 64,
'pattern': '^[a-z]+$',
'description': 'the master database name for this redshift cluster'
},
'cluster_identifier': {
'type': ['string', 'null'],
'default': None,
'minLength': 1,
'maxLength': 63,
'pattern': '^[a-zA-Z0-9-]*$',
'description': 'this overrides the auto-generated dart cluster_identifier'
},
'preferred_maintenance_window': {
'type': 'string',
'default': 'sat:03:30-sat:04:00',
'description': 'UTC time when automated cluster maintenance can occur'
},
'snapshot_retention': {
'type': 'integer',
'default': 2,
'minimum': 1,
'maximum': 10,
'description': 'the maximum number of snapshots to keep, older ones will be deleted'
},
},
'additionalProperties': False,
'required': ['master_user_password']
},
supported_action_types=[
RedshiftActionTypes.start_datastore,
RedshiftActionTypes.stop_datastore,
RedshiftActionTypes.execute_sql,
RedshiftActionTypes.load_dataset,
RedshiftActionTypes.consume_subscription,
RedshiftActionTypes.copy_to_s3,
RedshiftActionTypes.create_snapshot,
RedshiftActionTypes.data_check,
RedshiftActionTypes.cluster_maintenance,
],
ecs_task_definition=ecs_task_definition
)))
_logger.info('saved redshift_engine: %s' % e1.id)
if __name__ == '__main__':
add_redshift_engine(configuration(os.environ['DART_CONFIG']))
| mit | -3,486,965,417,809,789,000 | 38.907801 | 115 | 0.460992 | false |
alimanfoo/numcodecs | numcodecs/tests/test_zlib.py | 1 | 2696 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division
import itertools
import numpy as np
import pytest
from numcodecs.zlib import Zlib
from numcodecs.tests.common import (check_encode_decode, check_config, check_repr,
check_backwards_compatibility,
check_err_decode_object_buffer,
check_err_encode_object_buffer)
codecs = [
Zlib(),
Zlib(level=-1),
Zlib(level=0),
Zlib(level=1),
Zlib(level=5),
Zlib(level=9),
]
# mix of dtypes: integer, float, bool, string
# mix of shapes: 1D, 2D, 3D
# mix of orders: C, F
arrays = [
np.arange(1000, dtype='i4'),
np.linspace(1000, 1001, 1000, dtype='f8'),
np.random.normal(loc=1000, scale=1, size=(100, 10)),
np.random.randint(0, 2, size=1000, dtype=bool).reshape(100, 10, order='F'),
np.random.choice([b'a', b'bb', b'ccc'], size=1000).reshape(10, 10, 10),
np.random.randint(0, 2**60, size=1000, dtype='u8').view('M8[ns]'),
np.random.randint(0, 2**60, size=1000, dtype='u8').view('m8[ns]'),
np.random.randint(0, 2**25, size=1000, dtype='u8').view('M8[m]'),
np.random.randint(0, 2**25, size=1000, dtype='u8').view('m8[m]'),
np.random.randint(-2**63, -2**63 + 20, size=1000, dtype='i8').view('M8[ns]'),
np.random.randint(-2**63, -2**63 + 20, size=1000, dtype='i8').view('m8[ns]'),
np.random.randint(-2**63, -2**63 + 20, size=1000, dtype='i8').view('M8[m]'),
np.random.randint(-2**63, -2**63 + 20, size=1000, dtype='i8').view('m8[m]'),
]
def test_encode_decode():
for arr, codec in itertools.product(arrays, codecs):
check_encode_decode(arr, codec)
def test_config():
codec = Zlib(level=3)
check_config(codec)
def test_repr():
check_repr("Zlib(level=3)")
def test_eq():
assert Zlib() == Zlib()
assert not Zlib() != Zlib()
assert Zlib(1) == Zlib(1)
assert Zlib(1) != Zlib(9)
assert Zlib() != 'foo'
assert 'foo' != Zlib()
assert not Zlib() == 'foo'
def test_backwards_compatibility():
check_backwards_compatibility(Zlib.codec_id, arrays, codecs)
def test_err_decode_object_buffer():
check_err_decode_object_buffer(Zlib())
def test_err_encode_object_buffer():
check_err_encode_object_buffer(Zlib())
def test_err_encode_list():
data = ['foo', 'bar', 'baz']
for codec in codecs:
with pytest.raises(TypeError):
codec.encode(data)
def test_err_encode_non_contiguous():
# non-contiguous memory
arr = np.arange(1000, dtype='i4')[::2]
for codec in codecs:
with pytest.raises(ValueError):
codec.encode(arr)
| mit | 3,851,977,212,866,270,700 | 27.378947 | 82 | 0.599777 | false |
azumimuo/family-xbmc-addon | plugin.video.bubbles/resources/lib/sources/german/hoster/open/tata.py | 1 | 7065 | # -*- coding: utf-8 -*-
"""
Bubbles Addon
Copyright (C) 2016 Viper2k4
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import json
import re
import urllib
import urlparse
import base64
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import directstream
from resources.lib.modules import dom_parser
from resources.lib.modules import source_utils
class source:
def __init__(self):
self.priority = 1
self.language = ['de']
self.domains = ['tata.to']
self.base_link = 'http://tata.to'
self.search_link = '/filme?suche=%s&type=alle'
self.ajax_link = '/ajax/stream/%s'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = self.__search_movie(imdb, year)
return url if url else None
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'localtvshowtitle': localtvshowtitle, 'aliases': aliases, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if not url:
return
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
tvshowtitle = data['tvshowtitle']
localtvshowtitle = data['localtvshowtitle']
aliases = source_utils.aliases_to_array(eval(data['aliases']))
year = re.findall('(\d{4})', premiered)
year = year[0] if year else data['year']
url = self.__search([localtvshowtitle] + aliases, year, season, episode)
if not url and tvshowtitle != localtvshowtitle:
url = self.__search([tvshowtitle] + aliases, year, season, episode)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
return sources
ref = urlparse.urljoin(self.base_link, url)
url = urlparse.urljoin(self.base_link, self.ajax_link % re.findall('-([\w\d]+)$', ref)[0])
result = client.request(url, referer=ref)
result = base64.decodestring(result)
result = json.loads(result).get('playinfo', [])
result = [i.get('link_mp4') for i in result]
result = [i for i in result if i]
for i in result:
try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'de', 'url': i, 'direct': True, 'debridonly': False})
except: pass
return sources
except:
return
def resolve(self, url):
return url
def __search_movie(self, imdb, year):
try:
query = urlparse.urljoin(self.base_link, self.search_link % imdb)
y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']
r = client.request(query)
r = dom_parser.parse_dom(r, 'div', attrs={'class': 'container'})
r = dom_parser.parse_dom(r, 'div', attrs={'class': 'ml-item-content'})
r = [(dom_parser.parse_dom(i, 'a', attrs={'class': 'ml-image'}, req='href'), dom_parser.parse_dom(i, 'ul', attrs={'class': 'item-params'})) for i in r]
r = [(i[0][0].attrs['href'], re.findall('calendar.+?>.+?(\d{4})', ''.join([x.content for x in i[1]]))) for i in r if i[0] and i[1]]
r = [(i[0], i[1][0] if len(i[1]) > 0 else '0') for i in r]
r = sorted(r, key=lambda i: int(i[1]), reverse=True) # with year > no year
r = [i[0] for i in r if i[1] in y][0]
url = urlparse.urlparse(r).path
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def __search(self, titles, year, season=0, episode=False):
try:
query = self.search_link % (urllib.quote_plus(cleantitle.query(titles[0])))
query = urlparse.urljoin(self.base_link, query)
t = [cleantitle.get(i) for i in set(titles) if i]
y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']
r = client.request(query)
r = dom_parser.parse_dom(r, 'div', attrs={'class': 'container'})
r = dom_parser.parse_dom(r, 'div', attrs={'class': 'ml-item-content'})
f = []
for i in r:
_url = dom_parser.parse_dom(i, 'a', attrs={'class': 'ml-image'}, req='href')[0].attrs['href']
_title = re.sub('<.+?>|</.+?>', '', dom_parser.parse_dom(i, 'h6')[0].content).strip()
try: _title = re.search('(.*?)\s(?:staf+el|s)\s*(\d+)', _title, re.I).group(1)
except: pass
_season = '0'
_year = re.findall('calendar.+?>.+?(\d{4})', ''.join([x.content for x in dom_parser.parse_dom(i, 'ul', attrs={'class': 'item-params'})]))
_year = _year[0] if len(_year) > 0 else '0'
if season > 0:
s = dom_parser.parse_dom(i, 'span', attrs={'class': 'season-label'})
s = dom_parser.parse_dom(s, 'span', attrs={'class': 'el-num'})
if s: _season = s[0].content.strip()
if cleantitle.get(_title) in t and _year in y and int(_season) == int(season):
f.append((_url, _year))
r = f
r = sorted(r, key=lambda i: int(i[1]), reverse=True) # with year > no year
r = [i[0] for i in r if r[0]][0]
url = source_utils.strip_domain(r)
if episode:
r = client.request(urlparse.urljoin(self.base_link, url))
r = dom_parser.parse_dom(r, 'div', attrs={'class': 'season-list'})
r = dom_parser.parse_dom(r, 'li')
r = dom_parser.parse_dom(r, 'a', req='href')
r = [(i.attrs['href'], i.content) for i in r]
r = [i[0] for i in r if i[1] and int(i[1]) == int(episode)][0]
url = source_utils.strip_domain(r)
return url
except:
return
| gpl-2.0 | 4,262,739,806,493,651,500 | 39.83815 | 174 | 0.539561 | false |
benjamin9999/python-stix | stix/incident/property_affected.py | 1 | 7575 | # Copyright (c) 2014, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import stix
from stix.common import VocabString, StructuredText
import stix.bindings.incident as incident_binding
from stix.common.vocabs import LossProperty, LossDuration
from stix.common.vocabs import AvailabilityLossType as AvailabilityLoss
class NonPublicDataCompromised(VocabString):
_namespace = "http://stix.mitre.org/Incident-1"
_binding = incident_binding
_binding_class = incident_binding.NonPublicDataCompromisedType
def __init__(self, value=None, data_encrypted=None):
self.data_encrypted = data_encrypted
super(NonPublicDataCompromised, self).__init__(value)
@classmethod
def from_obj(cls, obj, return_obj=None):
if not obj:
return None
if not return_obj:
return_obj = cls()
super(NonPublicDataCompromised, cls).from_obj(obj, return_obj=return_obj)
return_obj.data_encrypted = obj.data_encrypted
return return_obj
def to_obj(self, return_obj=None, ns_info=None):
if not return_obj:
return_obj = self._binding_class()
super(NonPublicDataCompromised, self).to_obj(return_obj=return_obj, ns_info=ns_info)
return_obj.data_encrypted = self.data_encrypted
return return_obj
@classmethod
def from_dict(cls, d, return_obj=None):
if not d:
return None
if not return_obj:
return_obj = cls()
super(NonPublicDataCompromised, cls).from_dict(d, return_obj=return_obj)
return_obj.data_encrypted = d.get('data_encrypted')
return return_obj
def to_dict(self):
d = super(NonPublicDataCompromised, self).to_dict()
if self.data_encrypted:
d['data_encrypted'] = self.data_encrypted
return d
class PropertyAffected(stix.Entity):
_namespace = "http://stix.mitre.org/Incident-1"
_binding = incident_binding
_binding_class = incident_binding.PropertyAffectedType
def __init__(self):
self.property_ = None
self.description_of_effect = None
self.type_of_availability_loss = None
self.duration_of_availability_loss = None
self.non_public_data_compromised = None
@property
def property_(self):
return self._property
@property_.setter
def property_(self, value):
if not value:
self._property = None
elif isinstance(value, VocabString):
self._property = value
else:
self._property = LossProperty(value)
@property
def description_of_effect(self):
return self._description_of_effect
@description_of_effect.setter
def description_of_effect(self, value):
if not value:
self._description_of_effect = None
elif isinstance(value, StructuredText):
self._description_of_effect = value
else:
self._description_of_effect = StructuredText(value)
@property
def type_of_availability_loss(self):
return self._type_of_availability_loss
@type_of_availability_loss.setter
def type_of_availability_loss(self, value):
if not value:
self._type_of_availability_loss = None
elif isinstance(value, VocabString):
self._type_of_availability_loss = value
else:
self._type_of_availability_loss = AvailabilityLoss(value)
@property
def duration_of_availability_loss(self):
return self._duration_of_availability_loss
@duration_of_availability_loss.setter
def duration_of_availability_loss(self, value):
if not value:
self._duration_of_availability_loss = None
elif isinstance(value, VocabString):
self._duration_of_availability_loss = value
else:
self._duration_of_availability_loss = LossDuration(value)
@property
def non_public_data_compromised(self):
return self._non_public_data_compromised
@non_public_data_compromised.setter
def non_public_data_compromised(self, value):
if not value:
self._non_public_data_compromised = None
elif isinstance(value, NonPublicDataCompromised):
self._non_public_data_compromised = value
else:
self._non_public_data_compromised = NonPublicDataCompromised(value)
@classmethod
def from_obj(cls, obj, return_obj=None):
if not obj:
return None
if not return_obj:
return_obj = cls()
return_obj.property_ = VocabString.from_obj(obj.Property)
return_obj.description_of_effect = StructuredText.from_obj(obj.Description_Of_Effect)
return_obj.type_of_availability_loss = VocabString.from_obj(obj.Type_Of_Availability_Loss)
return_obj.duration_of_availability_loss = VocabString.from_obj(obj.Duration_Of_Availability_Loss)
return_obj.non_public_data_compromised = NonPublicDataCompromised.from_obj(obj.Non_Public_Data_Compromised)
return return_obj
def to_obj(self, return_obj=None, ns_info=None):
super(PropertyAffected, self).to_obj(return_obj=return_obj, ns_info=ns_info)
if not return_obj:
return_obj = self._binding_class()
if self.property_:
return_obj.Property = self.property_.to_obj(ns_info=ns_info)
if self.description_of_effect:
return_obj.Description_Of_Effect = self.description_of_effect.to_obj(ns_info=ns_info)
if self.type_of_availability_loss:
return_obj.Type_Of_Availability_Loss = self.type_of_availability_loss.to_obj(ns_info=ns_info)
if self.duration_of_availability_loss:
return_obj.Duration_Of_Availability_Loss = self.duration_of_availability_loss.to_obj(ns_info=ns_info)
if self.non_public_data_compromised:
return_obj.Non_Public_Data_Compromised = self.non_public_data_compromised.to_obj(ns_info=ns_info)
return return_obj
@classmethod
def from_dict(cls, d, return_obj=None):
if not d:
return None
if not return_obj:
return_obj = cls()
return_obj.property_ = VocabString.from_dict(d.get(('property')))
return_obj.description_of_effect = StructuredText.from_dict(d.get('description_of_effect'))
return_obj.type_of_availability_loss = VocabString.from_dict(d.get('type_of_availability_loss'))
return_obj.duration_of_availability_loss = VocabString.from_dict(d.get('duration_of_availability_loss'))
return_obj.non_public_data_compromised = NonPublicDataCompromised.from_dict(d.get('non_public_data_compromised'))
return return_obj
def to_dict(self):
d = {}
if self.property_:
d['property'] = self.property_.to_dict()
if self.description_of_effect:
d['description_of_effect'] = self.description_of_effect.to_dict()
if self.type_of_availability_loss:
d['type_of_availability_loss'] = self.type_of_availability_loss.to_dict()
if self.duration_of_availability_loss:
d['duration_of_availability_loss'] = self.duration_of_availability_loss.to_dict()
if self.non_public_data_compromised:
d['non_public_data_compromised'] = self.non_public_data_compromised.to_dict()
return d
| bsd-3-clause | 5,678,507,531,601,564,000 | 37.846154 | 121 | 0.639736 | false |
openstack/glance_store | glance_store/tests/unit/test_multistore_cinder.py | 1 | 24351 | # Copyright 2018-2019 RedHat Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import errno
import os
from unittest import mock
import six
import socket
import sys
import tempfile
import time
import uuid
import fixtures
from os_brick.initiator import connector
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_utils.secretutils import md5
from oslo_utils import units
import glance_store as store
from glance_store import exceptions
from glance_store import location
from glance_store.tests import base
from glance_store.tests.unit import test_store_capabilities as test_cap
sys.modules['glance_store.common.fs_mount'] = mock.Mock()
from glance_store._drivers import cinder # noqa
class FakeObject(object):
def __init__(self, **kwargs):
for name, value in kwargs.items():
setattr(self, name, value)
class TestMultiCinderStore(base.MultiStoreBaseTest,
test_cap.TestStoreCapabilitiesChecking):
# NOTE(flaper87): temporary until we
# can move to a fully-local lib.
# (Swift store's fault)
_CONF = cfg.ConfigOpts()
def setUp(self):
super(TestMultiCinderStore, self).setUp()
enabled_backends = {
"cinder1": "cinder",
"cinder2": "cinder"
}
self.conf = self._CONF
self.conf(args=[])
self.conf.register_opt(cfg.DictOpt('enabled_backends'))
self.config(enabled_backends=enabled_backends)
store.register_store_opts(self.conf)
self.config(default_backend='cinder1', group='glance_store')
# Ensure stores + locations cleared
location.SCHEME_TO_CLS_BACKEND_MAP = {}
store.create_multi_stores(self.conf)
self.addCleanup(setattr, location, 'SCHEME_TO_CLS_BACKEND_MAP',
dict())
self.test_dir = self.useFixture(fixtures.TempDir()).path
self.addCleanup(self.conf.reset)
self.store = cinder.Store(self.conf, backend="cinder1")
self.store.configure()
self.register_store_backend_schemes(self.store, 'cinder', 'cinder1')
self.store.READ_CHUNKSIZE = 4096
self.store.WRITE_CHUNKSIZE = 4096
fake_sc = [{u'endpoints': [{u'publicURL': u'http://foo/public_url'}],
u'endpoints_links': [],
u'name': u'cinder',
u'type': u'volumev3'}]
self.context = FakeObject(service_catalog=fake_sc,
user_id='fake_user',
auth_token='fake_token',
project_id='fake_project')
self.fake_admin_context = mock.MagicMock()
self.fake_admin_context.elevated.return_value = FakeObject(
service_catalog=fake_sc,
user_id='admin_user',
auth_token='admin_token',
project_id='admin_project')
cinder._reset_cinder_session()
def test_location_url_prefix_is_set(self):
self.assertEqual("cinder://cinder1", self.store.url_prefix)
def test_get_cinderclient(self):
cc = self.store.get_cinderclient(self.context)
self.assertEqual('fake_token', cc.client.auth.token)
self.assertEqual('http://foo/public_url', cc.client.auth.endpoint)
def test_get_cinderclient_with_user_overriden(self):
self.config(cinder_store_user_name='test_user', group="cinder1")
self.config(cinder_store_password='test_password', group="cinder1")
self.config(cinder_store_project_name='test_project', group="cinder1")
self.config(cinder_store_auth_address='test_address', group="cinder1")
cc = self.store.get_cinderclient(self.context)
self.assertEqual('Default', cc.client.session.auth.project_domain_name)
self.assertEqual('test_project', cc.client.session.auth.project_name)
def test_get_cinderclient_legacy_update(self):
cc = self.store.get_cinderclient(self.fake_admin_context,
legacy_update=True)
self.assertEqual('admin_token', cc.client.auth.token)
self.assertEqual('http://foo/public_url', cc.client.auth.endpoint)
def test_temporary_chown(self):
class fake_stat(object):
st_uid = 1
with mock.patch.object(os, 'stat', return_value=fake_stat()), \
mock.patch.object(os, 'getuid', return_value=2), \
mock.patch.object(processutils, 'execute') as mock_execute, \
mock.patch.object(cinder.Store, 'get_root_helper',
return_value='sudo'):
with self.store.temporary_chown('test'):
pass
expected_calls = [mock.call('chown', 2, 'test', run_as_root=True,
root_helper='sudo'),
mock.call('chown', 1, 'test', run_as_root=True,
root_helper='sudo')]
self.assertEqual(expected_calls, mock_execute.call_args_list)
@mock.patch.object(time, 'sleep')
def test_wait_volume_status(self, mock_sleep):
fake_manager = FakeObject(get=mock.Mock())
volume_available = FakeObject(manager=fake_manager,
id='fake-id',
status='available')
volume_in_use = FakeObject(manager=fake_manager,
id='fake-id',
status='in-use')
fake_manager.get.side_effect = [volume_available, volume_in_use]
self.assertEqual(volume_in_use,
self.store._wait_volume_status(
volume_available, 'available', 'in-use'))
fake_manager.get.assert_called_with('fake-id')
mock_sleep.assert_called_once_with(0.5)
@mock.patch.object(time, 'sleep')
def test_wait_volume_status_unexpected(self, mock_sleep):
fake_manager = FakeObject(get=mock.Mock())
volume_available = FakeObject(manager=fake_manager,
id='fake-id',
status='error')
fake_manager.get.return_value = volume_available
self.assertRaises(exceptions.BackendException,
self.store._wait_volume_status,
volume_available, 'available', 'in-use')
fake_manager.get.assert_called_with('fake-id')
@mock.patch.object(time, 'sleep')
def test_wait_volume_status_timeout(self, mock_sleep):
fake_manager = FakeObject(get=mock.Mock())
volume_available = FakeObject(manager=fake_manager,
id='fake-id',
status='available')
fake_manager.get.return_value = volume_available
self.assertRaises(exceptions.BackendException,
self.store._wait_volume_status,
volume_available, 'available', 'in-use')
fake_manager.get.assert_called_with('fake-id')
def _test_open_cinder_volume(self, open_mode, attach_mode, error,
multipath_supported=False,
enforce_multipath=False):
self.config(cinder_mount_point_base=None, group='cinder1')
fake_volume = mock.MagicMock(id=str(uuid.uuid4()), status='available')
fake_volumes = FakeObject(get=lambda id: fake_volume,
detach=mock.Mock())
fake_client = FakeObject(volumes=fake_volumes)
_, fake_dev_path = tempfile.mkstemp(dir=self.test_dir)
fake_devinfo = {'path': fake_dev_path}
fake_connector = FakeObject(
connect_volume=mock.Mock(return_value=fake_devinfo),
disconnect_volume=mock.Mock())
@contextlib.contextmanager
def fake_chown(path, backend=None):
yield
def do_open():
with self.store._open_cinder_volume(
fake_client, fake_volume, open_mode):
if error:
raise error
def fake_factory(protocol, root_helper, **kwargs):
self.assertEqual(fake_volume.initialize_connection.return_value,
kwargs['conn'])
return fake_connector
root_helper = "sudo glance-rootwrap /etc/glance/rootwrap.conf"
with mock.patch.object(cinder.Store,
'_wait_volume_status',
return_value=fake_volume), \
mock.patch.object(cinder.Store, 'temporary_chown',
side_effect=fake_chown), \
mock.patch.object(cinder.Store, 'get_root_helper',
return_value=root_helper), \
mock.patch.object(connector.InitiatorConnector, 'factory',
side_effect=fake_factory) as fake_conn_obj:
with mock.patch.object(connector,
'get_connector_properties') as mock_conn:
if error:
self.assertRaises(error, do_open)
else:
do_open()
mock_conn.assert_called_once_with(
root_helper, socket.gethostname(), multipath_supported,
enforce_multipath)
fake_connector.connect_volume.assert_called_once_with(mock.ANY)
fake_connector.disconnect_volume.assert_called_once_with(
mock.ANY, fake_devinfo)
fake_volume.attach.assert_called_once_with(
None, 'glance_store', attach_mode,
host_name=socket.gethostname())
fake_volumes.detach.assert_called_once_with(fake_volume)
fake_conn_obj.assert_called_once_with(
mock.ANY, root_helper, conn=mock.ANY,
use_multipath=multipath_supported)
def test_open_cinder_volume_rw(self):
self._test_open_cinder_volume('wb', 'rw', None)
def test_open_cinder_volume_ro(self):
self._test_open_cinder_volume('rb', 'ro', None)
def test_open_cinder_volume_error(self):
self._test_open_cinder_volume('wb', 'rw', IOError)
def test_open_cinder_volume_multipath_disabled(self):
self.config(cinder_use_multipath=False, group='cinder1')
self._test_open_cinder_volume('wb', 'rw', None,
multipath_supported=False)
def test_open_cinder_volume_enforce_multipath(self):
self.config(cinder_use_multipath=True, group='cinder1')
self.config(cinder_enforce_multipath=True, group='cinder1')
self._test_open_cinder_volume('wb', 'rw', None,
multipath_supported=True,
enforce_multipath=True)
def test_cinder_check_context(self):
self.assertRaises(exceptions.BadStoreConfiguration,
self.store._check_context, None)
self.assertRaises(exceptions.BadStoreConfiguration,
self.store._check_context,
FakeObject(service_catalog=None))
self.store._check_context(FakeObject(service_catalog='fake'))
def test_configure_add(self):
def fake_volume_type_check(name):
if name != 'some_type':
raise cinder.cinder_exception.NotFound(code=404)
with mock.patch.object(self.store, 'get_cinderclient') as mocked_cc:
mocked_cc.return_value = FakeObject(volume_types=FakeObject(
find=fake_volume_type_check))
self.config(cinder_volume_type='some_type',
group=self.store.backend_group)
# If volume type exists, no exception is raised
self.store.configure_add()
# setting cinder_volume_type to non-existent value will log a
# warning
self.config(cinder_volume_type='some_random_type',
group=self.store.backend_group)
with mock.patch.object(cinder, 'LOG') as mock_log:
self.store.configure_add()
mock_log.warning.assert_called_with(
"Invalid `cinder_volume_type some_random_type`")
def test_configure_add_cinder_service_down(self):
def fake_volume_type_check(name):
raise cinder.cinder_exception.ClientException(code=503)
self.config(cinder_volume_type='some_type',
group=self.store.backend_group)
with mock.patch.object(self.store, 'get_cinderclient') as mocked_cc:
mocked_cc.return_value = FakeObject(volume_types=FakeObject(
find=fake_volume_type_check))
# We handle the ClientException to pass so no exception is raised
# in this case
self.store.configure_add()
def test_configure_add_authorization_failed(self):
def fake_volume_type_check(name):
raise cinder.exceptions.AuthorizationFailure(code=401)
self.config(cinder_volume_type='some_type',
group=self.store.backend_group)
with mock.patch.object(self.store, 'get_cinderclient') as mocked_cc:
mocked_cc.return_value = FakeObject(volume_types=FakeObject(
find=fake_volume_type_check))
# Anything apart from invalid volume type or cinder service
# down will raise an exception
self.assertRaises(cinder.exceptions.AuthorizationFailure,
self.store.configure_add)
def test_is_image_associated_with_store(self):
with mock.patch.object(self.store, 'get_cinderclient') as mocked_cc:
mocked_cc.return_value = FakeObject(volumes=FakeObject(
get=lambda volume_id: FakeObject(volume_type='some_type')),
volume_types=FakeObject(
default=lambda: FakeObject(name='some_type')))
# When cinder_volume_type is set and is same as volume's type
self.config(cinder_volume_type='some_type',
group=self.store.backend_group)
fake_vol_id = str(uuid.uuid4())
type_match = self.store.is_image_associated_with_store(
self.context, fake_vol_id)
self.assertTrue(type_match)
# When cinder_volume_type is not set and volume's type is same as
# set default volume type
self.config(cinder_volume_type=None,
group=self.store.backend_group)
type_match = self.store.is_image_associated_with_store(
self.context, fake_vol_id)
self.assertTrue(type_match)
# When cinder_volume_type is not set and volume's type does not
# match with default volume type
mocked_cc.return_value.volume_types = FakeObject(
default=lambda: {'name': 'random_type'})
type_match = self.store.is_image_associated_with_store(
self.context, fake_vol_id)
self.assertFalse(type_match)
def test_cinder_get(self):
expected_size = 5 * units.Ki
expected_file_contents = b"*" * expected_size
volume_file = six.BytesIO(expected_file_contents)
fake_client = FakeObject(auth_token=None, management_url=None)
fake_volume_uuid = str(uuid.uuid4())
fake_volume = mock.MagicMock(id=fake_volume_uuid,
metadata={'image_size': expected_size},
status='available')
fake_volume.manager.get.return_value = fake_volume
fake_volumes = FakeObject(get=lambda id: fake_volume)
@contextlib.contextmanager
def fake_open(client, volume, mode):
self.assertEqual('rb', mode)
yield volume_file
with mock.patch.object(cinder.Store, 'get_cinderclient') as mock_cc, \
mock.patch.object(self.store, '_open_cinder_volume',
side_effect=fake_open):
mock_cc.return_value = FakeObject(client=fake_client,
volumes=fake_volumes)
uri = "cinder://cinder1/%s" % fake_volume_uuid
loc = location.get_location_from_uri_and_backend(uri,
"cinder1",
conf=self.conf)
(image_file, image_size) = self.store.get(loc,
context=self.context)
expected_num_chunks = 2
data = b""
num_chunks = 0
for chunk in image_file:
num_chunks += 1
data += chunk
self.assertEqual(expected_num_chunks, num_chunks)
self.assertEqual(expected_file_contents, data)
def test_cinder_get_size(self):
fake_client = FakeObject(auth_token=None, management_url=None)
fake_volume_uuid = str(uuid.uuid4())
fake_volume = FakeObject(size=5, metadata={})
fake_volumes = {fake_volume_uuid: fake_volume}
with mock.patch.object(cinder.Store, 'get_cinderclient') as mocked_cc:
mocked_cc.return_value = FakeObject(client=fake_client,
volumes=fake_volumes)
uri = 'cinder://cinder1/%s' % fake_volume_uuid
loc = location.get_location_from_uri_and_backend(uri,
"cinder1",
conf=self.conf)
image_size = self.store.get_size(loc, context=self.context)
self.assertEqual(fake_volume.size * units.Gi, image_size)
def test_cinder_get_size_with_metadata(self):
fake_client = FakeObject(auth_token=None, management_url=None)
fake_volume_uuid = str(uuid.uuid4())
expected_image_size = 4500 * units.Mi
fake_volume = FakeObject(size=5,
metadata={'image_size': expected_image_size})
fake_volumes = {fake_volume_uuid: fake_volume}
with mock.patch.object(cinder.Store, 'get_cinderclient') as mocked_cc:
mocked_cc.return_value = FakeObject(client=fake_client,
volumes=fake_volumes)
uri = 'cinder://cinder1/%s' % fake_volume_uuid
loc = location.get_location_from_uri_and_backend(uri,
"cinder1",
conf=self.conf)
image_size = self.store.get_size(loc, context=self.context)
self.assertEqual(expected_image_size, image_size)
def _test_cinder_add(self, fake_volume, volume_file, size_kb=5,
verifier=None, backend="cinder1"):
expected_image_id = str(uuid.uuid4())
expected_size = size_kb * units.Ki
expected_file_contents = b"*" * expected_size
image_file = six.BytesIO(expected_file_contents)
expected_checksum = md5(expected_file_contents,
usedforsecurity=False).hexdigest()
expected_location = 'cinder://%s/%s' % (backend, fake_volume.id)
fake_client = FakeObject(auth_token=None, management_url=None)
fake_volume.manager.get.return_value = fake_volume
fake_volumes = FakeObject(create=mock.Mock(return_value=fake_volume))
self.config(cinder_volume_type='some_type', group=backend)
@contextlib.contextmanager
def fake_open(client, volume, mode):
self.assertEqual('wb', mode)
yield volume_file
with mock.patch.object(cinder.Store, 'get_cinderclient') as mock_cc, \
mock.patch.object(self.store, '_open_cinder_volume',
side_effect=fake_open):
mock_cc.return_value = FakeObject(client=fake_client,
volumes=fake_volumes)
loc, size, checksum, metadata = self.store.add(expected_image_id,
image_file,
expected_size,
self.context,
verifier)
self.assertEqual(expected_location, loc)
self.assertEqual(expected_size, size)
self.assertEqual(expected_checksum, checksum)
fake_volumes.create.assert_called_once_with(
1,
name='image-%s' % expected_image_id,
metadata={'image_owner': self.context.project_id,
'glance_image_id': expected_image_id,
'image_size': str(expected_size)},
volume_type='some_type')
self.assertEqual(backend, metadata["store"])
def test_cinder_add(self):
fake_volume = mock.MagicMock(id=str(uuid.uuid4()),
status='available',
size=1)
volume_file = six.BytesIO()
self._test_cinder_add(fake_volume, volume_file)
def test_cinder_add_with_verifier(self):
fake_volume = mock.MagicMock(id=str(uuid.uuid4()),
status='available',
size=1)
volume_file = six.BytesIO()
verifier = mock.MagicMock()
self._test_cinder_add(fake_volume, volume_file, 1, verifier)
verifier.update.assert_called_with(b"*" * units.Ki)
def test_cinder_add_volume_full(self):
e = IOError()
volume_file = six.BytesIO()
e.errno = errno.ENOSPC
fake_volume = mock.MagicMock(id=str(uuid.uuid4()),
status='available',
size=1)
with mock.patch.object(volume_file, 'write', side_effect=e):
self.assertRaises(exceptions.StorageFull,
self._test_cinder_add, fake_volume, volume_file)
fake_volume.delete.assert_called_once_with()
def test_cinder_delete(self):
fake_client = FakeObject(auth_token=None, management_url=None)
fake_volume_uuid = str(uuid.uuid4())
fake_volume = FakeObject(delete=mock.Mock())
fake_volumes = {fake_volume_uuid: fake_volume}
with mock.patch.object(cinder.Store, 'get_cinderclient') as mocked_cc:
mocked_cc.return_value = FakeObject(client=fake_client,
volumes=fake_volumes)
uri = 'cinder://cinder1/%s' % fake_volume_uuid
loc = location.get_location_from_uri_and_backend(uri,
"cinder1",
conf=self.conf)
self.store.delete(loc, context=self.context)
fake_volume.delete.assert_called_once_with()
def test_cinder_add_different_backend(self):
self.store = cinder.Store(self.conf, backend="cinder2")
self.store.configure()
self.register_store_backend_schemes(self.store, 'cinder', 'cinder2')
fake_volume = mock.MagicMock(id=str(uuid.uuid4()),
status='available',
size=1)
volume_file = six.BytesIO()
self._test_cinder_add(fake_volume, volume_file, backend="cinder2")
| apache-2.0 | 4,125,745,246,991,553,500 | 45.382857 | 79 | 0.562482 | false |
Charnelx/django-split-settings | tests/conftest.py | 1 | 1356 | # -*- coding: utf-8 -*-
"""
This file contains different utils and fixtures.
"""
import os
from pytest import fixture
__author__ = 'sobolevn'
class Scope(dict):
"""
This class emulates `globals()`,
but does not share state across all tests.
"""
def __init__(self, *args, **kwargs):
"""
Adding `__file__` to make things work in `tools.py`.
"""
super(Scope, self).__init__(*args, **kwargs)
self['__file__'] = __file__
# Different util functions:
@fixture
def scope():
"""
This fixture just returns the new instance
of the test Scope class.
"""
return Scope()
@fixture
def fixture_file():
"""
This fixture return a path to the test fixture file.
"""
return os.path.join(
'settings',
'basic',
'fixture_to_include.py'
)
# Settings files:
@fixture
def merged():
"""
This fixture returns basic merged settings example.
"""
from tests import settings
return settings
@fixture
def stacked():
"""
This fixture returns stacked settings example.
"""
from tests.settings import stacked as _stacked
return _stacked
@fixture
def recursion():
"""
This fixture returns recursion settings example.
"""
from tests.settings import recursion as _recursion
return _recursion
| bsd-3-clause | 5,667,695,300,868,778,000 | 16.61039 | 60 | 0.601032 | false |
TechWritingWhiz/indy-node | data/migrations/deb/disabled_1_0_97_to_1_0_96.py | 1 | 8626 | #!/usr/bin/python3.5
import os
import shutil
import subprocess
from common.serializers.compact_serializer import CompactSerializer
from common.serializers.json_serializer import JsonSerializer
from common.serializers.mapping_serializer import MappingSerializer
from ledger.compact_merkle_tree import CompactMerkleTree
from ledger.ledger import Ledger
from plenum.persistence.leveldb_hash_store import LevelDbHashStore
from storage import store_utils
from storage.chunked_file_store import ChunkedFileStore
from indy_common.config_util import getConfig
from indy_common.txn_util import getTxnOrderedFields
from stp_core.common.log import getlogger
config = getConfig()
logger = getlogger()
def __migrate_ledger(data_directory,
old_ledger_file, new_ledger_file,
serializer: MappingSerializer = None):
"""
Test for the directory, open old and new ledger, migrate data, rename directories
"""
# we should have ChunkedFileStorage implementation of the Ledger
if not os.path.isdir(os.path.join(data_directory, old_ledger_file)):
msg = 'Could not find directory {} for migration.'.format(
old_ledger_file)
logger.error(msg)
raise Exception(msg)
# open the old ledger using the specified serializer
old_ledger_file_backup = old_ledger_file + "_new"
old_txn_log_store = ChunkedFileStore(data_directory,
old_ledger_file_backup,
isLineNoKey=True,
storeContentHash=False)
old_ledger = Ledger(CompactMerkleTree(),
dataDir=data_directory,
txn_serializer=serializer,
hash_serializer=serializer,
fileName=old_ledger_file_backup,
transactionLogStore=old_txn_log_store)
# open the new ledger with new serialization
new_ledger = Ledger(CompactMerkleTree(),
dataDir=data_directory,
fileName=new_ledger_file)
logger.info("new size for {}: {}".format(
old_ledger_file_backup, str(new_ledger.size)))
# add all txns into the old ledger
for _, txn in new_ledger.getAllTxn():
old_ledger.add(txn)
logger.info("old size for {}: {}".format(
new_ledger_file, str(old_ledger.size)))
old_ledger.stop()
new_ledger.stop()
# now that everything succeeded, remove the new files and move the old
# files into place
shutil.rmtree(
os.path.join(data_directory, new_ledger_file))
os.rename(
os.path.join(data_directory, old_ledger_file_backup),
os.path.join(data_directory, old_ledger_file))
def __open_old_ledger(data_directory, old_ledger_file,
hash_store_name, serializer):
# open old Ledger with leveldb hash store (to re-init it)
old_txn_log_store = ChunkedFileStore(data_directory,
old_ledger_file,
isLineNoKey=True,
storeContentHash=False)
old_ledger = Ledger(CompactMerkleTree(
hashStore=LevelDbHashStore(
dataDir=data_directory,
fileNamePrefix=hash_store_name)),
dataDir=data_directory,
txn_serializer=serializer,
hash_serializer=serializer,
fileName=old_ledger_file,
transactionLogStore=old_txn_log_store)
old_ledger.stop()
def migrate_all_hash_stores(node_data_directory):
# the new hash store (merkle tree) will be recovered from the new transaction log after re-start
# just delete the current hash store
new_merkle_nodes = os.path.join(node_data_directory, '_merkleNodes')
new_merkle_leaves = os.path.join(node_data_directory, '_merkleLeaves')
new_merkle_nodes_bin = os.path.join(
node_data_directory, '_merkleNodes.bin')
new_merkle_leaves_bin = os.path.join(
node_data_directory, '_merkleLeaves.bin')
new_merkle_nodes_config_bin = os.path.join(
node_data_directory, 'config_merkleNodes.bin')
new_merkle_leaves_config_bin = os.path.join(
node_data_directory, 'config_merkleLeaves.bin')
if os.path.exists(new_merkle_nodes):
shutil.rmtree(new_merkle_nodes)
if os.path.exists(new_merkle_leaves):
shutil.rmtree(new_merkle_leaves)
if os.path.exists(new_merkle_nodes_bin):
os.remove(new_merkle_nodes_bin)
if os.path.exists(new_merkle_leaves_bin):
os.remove(new_merkle_leaves_bin)
if os.path.exists(new_merkle_nodes_config_bin):
os.remove(new_merkle_nodes_config_bin)
if os.path.exists(new_merkle_leaves_config_bin):
os.remove(new_merkle_leaves_config_bin)
# open new Ledgers
fields = getTxnOrderedFields()
__open_old_ledger(node_data_directory, config.poolTransactionsFile,
'pool', serializer=JsonSerializer())
__open_old_ledger(node_data_directory, config.domainTransactionsFile,
'domain', serializer=CompactSerializer(fields=fields))
__open_old_ledger(node_data_directory, config.configTransactionsFile,
'config', serializer=JsonSerializer())
def migrate_all_ledgers_for_node(node_data_directory):
# using default ledger names
__migrate_ledger(node_data_directory,
config.poolTransactionsFile, config.poolTransactionsFile,
serializer=JsonSerializer())
__migrate_ledger(
node_data_directory,
config.configTransactionsFile,
config.configTransactionsFile,
serializer=JsonSerializer())
# domain ledger uses custom CompactSerializer and old file name
fields = getTxnOrderedFields()
__migrate_ledger(node_data_directory,
config.domainTransactionsFile.replace(
'domain_', ''), config.domainTransactionsFile,
serializer=CompactSerializer(fields=fields))
def migrate_all_states(node_data_directory):
# the states will be recovered from the ledger during the start-up.
# just delete the current ones
shutil.rmtree(
os.path.join(node_data_directory, 'pool_state'))
shutil.rmtree(
os.path.join(node_data_directory, 'domain_state'))
shutil.rmtree(
os.path.join(node_data_directory, 'config_state'))
def migrate_genesis_txn(base_dir):
for suffix in ('sandbox', 'live', 'local'):
old_domain_genesis = os.path.join(
base_dir, 'transactions_{}'.format(suffix))
old_pool_genesis = os.path.join(
base_dir, 'pool_transactions_{}'.format(suffix))
new_domain_genesis = os.path.join(
base_dir, 'domain_transactions_{}_genesis'.format(suffix))
new_pool_genesis = os.path.join(
base_dir, 'pool_transactions_{}_genesis'.format(suffix))
if os.path.exists(old_domain_genesis):
os.remove(old_domain_genesis)
if os.path.exists(old_pool_genesis):
os.remove(old_pool_genesis)
if os.path.exists(new_domain_genesis):
old_ser = CompactSerializer(getTxnOrderedFields())
new_ser = JsonSerializer()
with open(new_domain_genesis, 'r') as f1:
with open(old_domain_genesis, 'w') as f2:
for line in store_utils.cleanLines(f1):
txn = new_ser.deserialize(line)
txn = old_ser.serialize(txn)
f2.write(txn)
os.remove(new_domain_genesis)
if os.path.exists(new_pool_genesis):
os.rename(new_pool_genesis, old_domain_genesis)
def migrate_all():
base_dir = config.baseDir
nodes_data_dir = os.path.join(base_dir, config.nodeDataDir)
if not os.path.exists(nodes_data_dir):
# TODO: find a better way
base_dir = '/home/sovrin/.sovrin'
nodes_data_dir = os.path.join(base_dir, config.nodeDataDir)
if not os.path.exists(nodes_data_dir):
msg = 'Can not find the directory with the ledger: {}'.format(
nodes_data_dir)
logger.error(msg)
raise Exception(msg)
for node_dir in os.listdir(nodes_data_dir):
node_data_dir = os.path.join(nodes_data_dir, node_dir)
migrate_all_ledgers_for_node(node_data_dir)
migrate_all_hash_stores(node_data_dir)
migrate_all_states(node_data_dir)
migrate_genesis_txn(base_dir)
subprocess.run(['chown', '-R', 'sovrin:sovrin', '/home/sovrin/.sovrin'])
migrate_all()
| apache-2.0 | -7,163,062,001,245,452,000 | 38.935185 | 100 | 0.634245 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.