repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
tommy-u/enable | examples/kiva/pyglet_gl.py | 1 | 1319 | from __future__ import with_statement
from numpy import array
from pyglet.window import key, Window
try:
from kiva.gl import GraphicsContext
except ImportError, e:
raise Exception(e)
from kiva.constants import FILL, STROKE, FILL_STROKE
class TestWindow(Window):
""" Press Q or Escape to exit
"""
def __init__(self, *args, **kw):
Window.__init__(self, *args, **kw)
self.init_window()
def init_window(self):
self.gc = GraphicsContext(size=(self.width, self.height))
self.gc.gl_init()
def on_key_press(self, symbol, modifiers):
if symbol in (key.ESCAPE, key.Q):
self.has_exit = True
def draw(self):
gc = self.gc
with gc:
gc.clear((0, 1, 0, 1))
gc.set_stroke_color((1,1,1,1))
gc.set_line_width(2)
pts = array([[50, 50], [50,100], [100,100], [100,50]])
gc.begin_path()
gc.lines(pts)
gc.close_path()
gc.draw_path(STROKE)
gc.flush()
def main():
win = TestWindow(width = 640, height=480)
exit = False
while not exit:
win.switch_to()
win.dispatch_events()
win.clear()
win.draw()
win.flip()
exit = win.has_exit
if __name__ == "__main__":
main()
| bsd-3-clause | -925,883,367,172,672,600 | 22.553571 | 66 | 0.542077 | false | 3.408269 | false | false | false |
halmd-org/h5md-tools | h5mdtools/_plot/pdf.py | 1 | 8436 | # -*- coding: utf-8 -*-
#
# pdf - compute and plot pair distribution function
#
# Copyright © 2008-2012 Felix Höfling and Peter Colberg
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
#
from __future__ import print_function
"""
Compute and plot pair distribution function g(r)
"""
def plot(args):
import os, os.path
import h5py
from matplotlib import pyplot as plt
import h5mdtools._plot.label
from numpy import linspace
ax = plt.axes()
label = None
ax.axhline(y=1, color='black', lw=0.5)
ax.set_color_cycle(args.colors)
for (i, fn) in enumerate(args.input):
try:
f = h5py.File(fn, 'r')
except IOError:
raise SystemExit('failed to open HDF5 file: %s' % fn)
try:
# determine file type, prefer precomputed static structure factor data
if 'structure' in f.keys() and 'ssf' in f['structure'].keys():
import filon
import h5mdtools._plot.ssf as ssf
from scipy.constants import pi
param = f['parameters']
# load static structure factor from file
H5 = f['structure/ssf/' + '/'.join(args.flavour)]
q = f['structure/ssf/wavenumber'].__array__() # convert to NumPy array
S_q, S_q_err = ssf.load_ssf(H5, args)
# read some parameters
dim = param['box'].attrs['dimension']
density = param['box'].attrs['density']
length = param['box'].attrs['length']
# compute pair distribution function
xlim = args.xlim or (0, min(length) / 2)
r = linspace(xlim[0], xlim[1], num=args.bins)
if r[0] == 0:
r = r[1:]
if dim == 3:
# convert 3-dim Fourier transform F[S_q - 1] / (2π)³ to 1-dim Fourier integral
pdf = filon.filon(q * (S_q - 1), q, r).imag / (2 * pi * pi * r)
pdf_err = filon.filon(q * S_q_err, q, r).imag / (2 * pi * pi * r)
pdf = 1 + pdf / density # add δ-contribution
pdf_err = pdf_err / density
elif 'particles' in f.keys():
# compute SSF from trajectory data
H5 = f['particles/' + args.flavour[0]]
r, pdf, pdf_err = pdf_from_trajectory(H5, args)
else:
raise SystemExit('Input file provides neither data for the static structure factor nor a trajectory')
# before closing the file, store attributes for later use
if 'param' in locals():
attrs = h5mdtools._plot.label.attributes(param)
except IndexError:
raise SystemExit('invalid phase space sample offset')
except KeyError as what:
raise SystemExit(str(what) + '\nmissing simulation data in file: %s' % fn)
finally:
f.close()
if args.label:
label = args.label[i % len(args.label)] % attrs
elif args.legend or not args.small:
basename = os.path.splitext(os.path.basename(fn))[0]
label = r'%s' % basename.replace('_', r'\_')
if args.title:
title = args.title % attrs
c = args.colors[i % len(args.colors)]
ax.plot(r, pdf, '-', color=c, label=label)
if 'pdf_err' in locals():
ax.errorbar(r, pdf, pdf_err, fmt='o', color=c, markerfacecolor=c, markeredgecolor=c, markersize=2, linewidth=.5)
else:
ax.plot(r, pdf, 'o', markerfacecolor=c, markeredgecolor=c, markersize=2)
# write plot data to file
if args.dump:
f = open(args.dump, 'a')
print('# %s, sample %s' % (label.replace(r'\_', '_'), args.sample), file=f)
if 'pdf_err' in locals():
print('# r g(r) g_err(r)', file=f)
savetxt(f, array((r, pdf, pdf_err)).T)
else:
print('# r g(r)', file=f)
savetxt(f, array((r, pdf)).T)
print('\n', file=f)
f.close()
# adjust axis ranges
ax.axis('tight')
if args.xlim:
plt.setp(ax, xlim=args.xlim)
if args.ylim:
plt.setp(ax, ylim=args.ylim)
else:
plt.setp(ax, ylim=(0, plt.ylim()[1]))
# optionally plot with logarithmic scale(s)
if args.axes == 'xlog':
ax.set_xscale('log')
if args.axes == 'ylog':
ax.set_yscale('log')
if args.axes == 'loglog':
ax.set_xscale('log')
ax.set_yscale('log')
if args.legend or not args.small:
l = ax.legend(loc=args.legend)
l.legendPatch.set_alpha(0.7)
plt.xlabel(args.xlabel or r'distance $r / \sigma$')
plt.ylabel(args.ylabel or r'pair distribution function $g(r)$')
if args.output is None:
plt.show()
else:
plt.savefig(args.output, dpi=args.dpi)
"""
Compute pair distribution function from trajectory data
"""
def pdf_from_trajectory(H5group, args):
from scipy.constants import pi
from scipy.special import gamma
from numpy import array, diagonal, float32, histogram, power, prod, round_, sqrt, sum, zeros
import re
# read periodically extended particle positions,
# read one or several samples, convert to single precision
idx = [int(x) for x in re.split(':', args.sample)]
data = H5group['position/value']
if len(idx) == 1:
samples = array([data[idx[0]],], dtype=float32)
elif len(idx) == 2:
samples = array(data[idx[0]:idx[1]], dtype=float32)
elif len(idx) == 3:
samples = array(data[idx[0]:idx[1]:idx[2]], dtype=float32)
# positional coordinates dimension
dim = H5group['box'].attrs['dimension']
# periodic simulation box length
length = diagonal(H5group['box/edges'])
# number of particles
N = data.shape[1]
density = N / prod(length)
r_max = args.xlim or (0, min(length) / 2)
H = zeros(args.bins)
for r in samples:
for (i, j) in enumerate(range(r.shape[0] - 1, 0, -1)):
# particle distance vectors
dr = r[:j] - r[i + 1:]
# minimum image distances
dr = dr - round_(dr / length) * length
# magnitude of distance vectors
r_norm = sqrt(sum(dr * dr, axis=1))
# accumulate histogram of minimum image distances
h, bins = histogram(r_norm, bins=args.bins, range=r_max)
H += 2 * h
# volume of n-dimensional unit sphere
Vn = power(pi, dim / 2.) / gamma(dim / 2. + 1.)
# average number of atoms in ideal gas per interval
n = Vn * density * (power(bins[1:], dim) - power(bins[:-1], dim))
# compute pair distribution function g(r)
pdf = H / samples.shape[0] / n / N
pdf_err = sqrt(H) / samples.shape[0] / n / N
return .5 * (bins[1:] + bins[:-1]), pdf, pdf_err
def add_parser(subparsers):
parser = subparsers.add_parser('pdf', help='pair distribution function')
parser.add_argument('input', nargs='+', metavar='INPUT', help='HDF5 file with trajectory or ssf data')
parser.add_argument('--flavour', nargs=2, help='particle flavours')
parser.add_argument('--sample', help='index of phase space sample(s)')
parser.add_argument('--bins', type=int, help='number of histogram bins')
parser.add_argument('--xlim', metavar='VALUE', type=float, nargs=2, help='limit x-axis to given range')
parser.add_argument('--ylim', metavar='VALUE', type=float, nargs=2, help='limit y-axis to given range')
parser.add_argument('--axes', choices=['xlog', 'ylog', 'loglog'], help='logarithmic scaling')
parser.add_argument('--verbose', action='store_true')
parser.set_defaults(flavour=('A', 'A'), sample='0', bins=50,)
| gpl-3.0 | -7,613,585,320,662,006,000 | 37.674312 | 124 | 0.580002 | false | 3.613802 | false | false | false |
w0rp/w0rpzone | w0rplib/templatetags/markdown.py | 1 | 1374 | from django import template
from django.template.defaultfilters import stringfilter
from django.utils.safestring import mark_safe
import bleach
import markdown2
register = template.Library()
# Not really safe, but Django needs to think it is.
@register.filter(is_safe=True)
@stringfilter
def unsafe_markdown(value):
return mark_safe(markdown2.markdown(
text=value,
extras=[
"fenced-code-blocks",
"code-friendly",
"tables",
"highlightjs-lang",
],
))
@register.filter(is_safe=True)
@stringfilter
def markdown(value):
html = unsafe_markdown(value)
return bleach.clean(
html,
tags=[
'a',
'abbr',
'acronym',
'b',
'blockquote',
'code',
'em',
'i',
'li',
'ol',
'p',
'pre',
'strong',
'table',
'tbody',
'td',
'tfoot',
'th',
'thead',
'tr',
'ul',
],
attributes={
'a': ['href', 'title'],
'abbr': ['title'],
'acroynym': ['title'],
'code': ['class'],
},
protocols=[
'http',
'https',
'mailto',
],
)
| bsd-2-clause | 4,200,208,242,134,314,000 | 19.507463 | 55 | 0.437409 | false | 4.240741 | false | false | false |
dhruvbaldawa/fitbit | plot_sleep.py | 1 | 2126 | import os
import logging
import itertools
import json
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import pylab
from datetime import datetime
from utils import gen_next_day, setup_logging
sns.set()
plt.switch_backend('Qt4Agg')
ROOT = os.getcwd()
CONFIG_FILE = os.path.join(ROOT, 'config.ini')
DATA_DIR = os.path.join(ROOT, 'data', 'sleep')
DATA_DB_FILE = os.path.join(DATA_DIR, '.db')
FILENAME_TEMPLATE = 'sleep-{date}.json'
DATE_FORMAT = '%Y-%m-%d'
ASLEEP, AWAKE, REALLY_AWAKE, DEFAULT = 1, 2, 3, -1
logger = logging.getLogger(__name__)
def format_date(date):
return datetime.strftime(date, DATE_FORMAT)
def convert_ts_to_minute(ts):
hour, minute, seconds = ts.split(':', 2)
return int(hour) * 60 + int(minute)
def get_minute_data(day):
filename = FILENAME_TEMPLATE.format(date=format_date(day))
filepath = os.path.join(DATA_DIR, filename)
with open(filepath, 'r') as f:
data = json.loads(f.read())
minute_data = []
for record in data['sleep']:
minute_data += record['minuteData']
return minute_data
def write_to_dataframe(dataframe, day, minute_data):
for record in minute_data:
minute = convert_ts_to_minute(record['dateTime'])
dataframe[minute][format_date(day)] = int(record['value'])
def main():
start_date = datetime.strptime('2015-02-06', DATE_FORMAT)
end_date = datetime.strptime('2015-03-21', DATE_FORMAT)
days = itertools.takewhile(lambda x: x <= end_date,
gen_next_day(start_date))
date_index = pd.date_range(start_date, end_date)
df = pd.DataFrame(index=date_index, columns=range(24 * 60), dtype='uint8')
print df.dtypes
for day in days:
logger.info('Processing day {}'.format(format_date(day)))
minute_data = get_minute_data(day)
write_to_dataframe(df, day, minute_data)
df = df.fillna(0)
sns.heatmap(df, xticklabels=False, yticklabels=False,
linewidths=0)
# df.plot()
pylab.show()
if __name__ == '__main__':
setup_logging(logger, logging.INFO)
main()
| mit | -6,007,969,226,944,758,000 | 26.25641 | 78 | 0.645343 | false | 3.250765 | false | false | false |
Karajlug/karajlug | books/views.py | 1 | 2202 | # -----------------------------------------------------------------------------
# Karajlug.org
# Copyright (C) 2010 Karajlug community
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# -----------------------------------------------------------------------------
from django.shortcuts import render_to_response as rr
from django.http import Http404
from django.template import RequestContext
from django.core.paginator import Paginator, InvalidPage, EmptyPage
from django.conf import settings
from .models import Book
def books_index(request):
"""
Main index of registered books.
"""
books = Book.objects.all().order_by("weight")
book_per_page = 4
try:
book_per_page = settings.BOOK_IN_PAGE
except AttributeError:
pass
paginator = Paginator(books, book_per_page)
try:
page = int(request.GET.get('page', '1'))
except ValueError:
page = 1
try:
books_list = paginator.page(page)
except (EmptyPage, InvalidPage):
# if provided page value in GET was out of range
books_list = paginator.page(paginator.num_pages)
return rr("books.html", {"books": books_list},
context_instance=RequestContext(request))
def book_view(request, slug):
"""
View of each Book
"""
try:
book = Book.objects.get(slug=slug)
except Book.DoesNotExist:
raise Http404()
return rr("book_view.html", {"book": book},
context_instance=RequestContext(request))
| gpl-2.0 | 6,594,413,010,891,017,000 | 33.40625 | 79 | 0.631698 | false | 4.284047 | false | false | false |
kursitet/edx-ora2 | openassessment/assessment/serializers/base.py | 2 | 10587 | # coding=utf-8
"""
Serializers common to all assessment types.
"""
from copy import deepcopy
import logging
from django.core.cache import cache
from rest_framework import serializers
from openassessment.assessment.models import (
Assessment, AssessmentPart, Criterion, CriterionOption, Rubric,
)
logger = logging.getLogger(__name__)
class InvalidRubric(Exception):
"""This can be raised during the deserialization process."""
def __init__(self, errors):
Exception.__init__(self, repr(errors))
self.errors = deepcopy(errors)
class NestedModelSerializer(serializers.ModelSerializer):
"""Model Serializer that supports deserialization with arbitrary nesting.
The Django REST Framework does not currently support deserialization more
than one level deep (so a parent and children). We want to be able to
create a :class:`Rubric` → :class:`Criterion` → :class:`CriterionOption`
hierarchy.
Much of the base logic already "just works" and serialization of arbritrary
depth is supported. So we just override the save_object method to
recursively link foreign key relations instead of doing it one level deep.
We don't touch many-to-many relationships because we don't need to for our
purposes, so those still only work one level deep.
"""
def recursively_link_related(self, obj, **kwargs):
if getattr(obj, '_related_data', None):
for accessor_name, related in obj._related_data.items():
setattr(obj, accessor_name, related)
for related_obj in related:
self.recursively_link_related(related_obj, **kwargs)
del(obj._related_data)
def save_object(self, obj, **kwargs):
obj.save(**kwargs)
# The code for many-to-many relationships is just copy-pasted from the
# Django REST Framework ModelSerializer
if getattr(obj, '_m2m_data', None):
for accessor_name, object_list in obj._m2m_data.items():
setattr(obj, accessor_name, object_list)
del(obj._m2m_data)
# This is our only real change from ModelSerializer
self.recursively_link_related(obj, **kwargs)
class CriterionOptionSerializer(NestedModelSerializer):
"""Serializer for :class:`CriterionOption`"""
class Meta:
model = CriterionOption
fields = ('order_num', 'points', 'name', 'label', 'explanation')
class CriterionSerializer(NestedModelSerializer):
"""Serializer for :class:`Criterion`"""
options = CriterionOptionSerializer(required=True, many=True)
points_possible = serializers.Field(source='points_possible')
class Meta:
model = Criterion
fields = ('order_num', 'name', 'label', 'prompt', 'options', 'points_possible')
class RubricSerializer(NestedModelSerializer):
"""Serializer for :class:`Rubric`."""
criteria = CriterionSerializer(required=True, many=True)
points_possible = serializers.Field(source='points_possible')
class Meta:
model = Rubric
fields = ('id', 'content_hash', 'structure_hash', 'criteria', 'points_possible')
def validate_criteria(self, attrs, source):
"""Make sure we have at least one Criterion in the Rubric."""
criteria = attrs[source]
if not criteria:
raise serializers.ValidationError("Must have at least one criterion")
return attrs
@classmethod
def serialized_from_cache(cls, rubric, local_cache=None):
"""For a given `Rubric` model object, return a serialized version.
This method will attempt to use the cache if possible, first looking at
the `local_cache` dict you can pass in, and then looking at whatever
Django cache is configured.
Args:
rubric (Rubric): The Rubric model to get the serialized form of.
local_cach (dict): Mapping of `rubric.content_hash` to serialized
rubric dictionary. We include this so that we can call this
method in a loop.
Returns:
dict: `Rubric` fields as a dictionary, with `criteria` and `options`
relations followed.
"""
# Optional local cache you can send in (for when you're calling this
# in a loop).
local_cache = local_cache or {}
# Check our in-memory cache...
if rubric.content_hash in local_cache:
return local_cache[rubric.content_hash]
# Check the external cache (e.g. memcached)
rubric_dict_cache_key = (
"RubricSerializer.serialized_from_cache.{}"
.format(rubric.content_hash)
)
rubric_dict = cache.get(rubric_dict_cache_key)
if rubric_dict:
local_cache[rubric.content_hash] = rubric_dict
return rubric_dict
# Grab it from the database
rubric_dict = RubricSerializer(rubric).data
cache.set(rubric_dict_cache_key, rubric_dict)
local_cache[rubric.content_hash] = rubric_dict
return rubric_dict
class AssessmentPartSerializer(serializers.ModelSerializer):
"""Serializer for :class:`AssessmentPart`."""
class Meta:
model = AssessmentPart
fields = ('option', 'criterion', 'feedback')
class AssessmentSerializer(serializers.ModelSerializer):
"""Simplified serializer for :class:`Assessment` that's lighter on the DB."""
class Meta:
model = Assessment
fields = (
'submission_uuid',
'rubric',
'scored_at',
'scorer_id',
'score_type',
'feedback',
)
def serialize_assessments(assessments_qset):
assessments = list(assessments_qset.select_related("rubric"))
rubric_cache = {}
return [
full_assessment_dict(
assessment,
RubricSerializer.serialized_from_cache(
assessment.rubric, rubric_cache
)
)
for assessment in assessments
]
def full_assessment_dict(assessment, rubric_dict=None):
"""
Return a dict representation of the Assessment model, including nested
assessment parts. We do some of the serialization ourselves here instead
of relying on the Django REST Framework serializers. This is for performance
reasons -- we have a cached rubric easily available, and we don't want to
follow all the DB relations from assessment -> assessment part -> option ->
criterion.
Args:
assessment (Assessment): The Assessment model to serialize
Returns:
dict with keys 'rubric' (serialized Rubric model) and 'parts' (serialized assessment parts)
"""
assessment_cache_key = "assessment.full_assessment_dict.{}.{}.{}".format(
assessment.id, assessment.submission_uuid, assessment.scored_at.isoformat()
)
assessment_dict = cache.get(assessment_cache_key)
if assessment_dict:
return assessment_dict
assessment_dict = AssessmentSerializer(assessment).data
if not rubric_dict:
rubric_dict = RubricSerializer.serialized_from_cache(assessment.rubric)
assessment_dict["rubric"] = rubric_dict
# This part looks a little goofy, but it's in the name of saving dozens of
# SQL lookups. The rubric_dict has the entire serialized output of the
# `Rubric`, its child `Criterion` and grandchild `CriterionOption`. This
# includes calculated things like `points_possible` which aren't actually in
# the DB model. Instead of invoking the serializers for `Criterion` and
# `CriterionOption` again, we simply index into the places we expect them to
# be from the big, saved `Rubric` serialization.
parts = []
for part in assessment.parts.all().select_related("criterion", "option"):
criterion_dict = rubric_dict["criteria"][part.criterion.order_num]
options_dict = None
if part.option is not None:
options_dict = criterion_dict["options"][part.option.order_num]
options_dict["criterion"] = criterion_dict
parts.append({
"option": options_dict,
"criterion": criterion_dict,
"feedback": part.feedback
})
# Now manually built up the dynamically calculated values on the
# `Assessment` so we can again avoid DB calls.
assessment_dict["parts"] = parts
assessment_dict["points_earned"] = sum(
part_dict["option"]["points"]
if part_dict["option"] is not None else 0
for part_dict in parts
)
assessment_dict["points_possible"] = rubric_dict["points_possible"]
cache.set(assessment_cache_key, assessment_dict)
return assessment_dict
def rubric_from_dict(rubric_dict):
"""Given a dict of rubric information, return the corresponding Rubric
This will create the Rubric and its children if it does not exist already.
Sample data (one criterion, two options)::
{
"prompt": "Create a plan to deliver ora2!",
"criteria": [
{
"order_num": 0,
"name": "realistic",
"prompt": "Is the deadline realistic?",
"options": [
{
"order_num": 0,
"points": 0,
"name": "No",
"explanation": "We need more time!"
},
{
"order_num": 1,
"points": 2,
"name": "Yes",
"explanation": "We got this."
},
]
}
]
}
"""
rubric_dict = deepcopy(rubric_dict)
# Calculate the hash based on the rubric content...
content_hash = Rubric.content_hash_from_dict(rubric_dict)
try:
rubric = Rubric.objects.get(content_hash=content_hash)
except Rubric.DoesNotExist:
rubric_dict["content_hash"] = content_hash
rubric_dict["structure_hash"] = Rubric.structure_hash_from_dict(rubric_dict)
for crit_idx, criterion in enumerate(rubric_dict.get("criteria", {})):
if "order_num" not in criterion:
criterion["order_num"] = crit_idx
for opt_idx, option in enumerate(criterion.get("options", {})):
if "order_num" not in option:
option["order_num"] = opt_idx
rubric_serializer = RubricSerializer(data=rubric_dict)
if not rubric_serializer.is_valid():
raise InvalidRubric(rubric_serializer.errors)
rubric = rubric_serializer.save()
return rubric
| agpl-3.0 | 4,205,798,340,659,545,000 | 34.874576 | 99 | 0.631768 | false | 4.248495 | false | false | false |
bitmovin/bitmovin-python | bitmovin/services/manifests/generic_media_service.py | 1 | 2056 | from bitmovin.errors import MissingArgumentError, FunctionalityNotAvailableError
from bitmovin.services.manifests.media_custom_tag_service import MediaCustomTag
from bitmovin.services.rest_service import RestService
class GenericMediaService(RestService):
BASE_ENDPOINT_URL = 'encoding/manifests/hls/{manifest_id}/media/{media_type}'
def __init__(self, http_client, media_type_url, resource_class):
if not media_type_url:
raise MissingArgumentError('media_type_url must be given')
if not resource_class:
raise MissingArgumentError('resource_class must be given')
self.media_type_url = media_type_url
self.resource_class = resource_class
super().__init__(http_client=http_client, relative_url=self.BASE_ENDPOINT_URL, class_=self.resource_class)
self.CustomTag = MediaCustomTag(http_client=http_client)
def _get_endpoint_url(self, manifest_id):
if not manifest_id:
raise MissingArgumentError('manifest_id must be given')
endpoint_url = self.BASE_ENDPOINT_URL\
.replace('{manifest_id}', manifest_id)\
.replace('{media_type}', self.media_type_url)
return endpoint_url
def create(self, object_, manifest_id):
self.relative_url = self._get_endpoint_url(manifest_id=manifest_id)
return super().create(object_)
def delete(self, manifest_id, media_id):
self.relative_url = self._get_endpoint_url(manifest_id=manifest_id)
return super().delete(id_=media_id)
def retrieve(self, manifest_id, media_id):
self.relative_url = self._get_endpoint_url(manifest_id=manifest_id)
return super().retrieve(id_=media_id)
def list(self, manifest_id, offset=None, limit=None):
self.relative_url = self._get_endpoint_url(manifest_id=manifest_id)
return super().list(offset, limit)
def retrieve_custom_data(self, manifest_id, media_id):
raise FunctionalityNotAvailableError('Retrieve Custom Data is not available for HLS Manifest Medias')
| unlicense | -556,082,224,711,346,800 | 45.727273 | 114 | 0.692121 | false | 3.821561 | false | false | false |
rossant/spiky | experimental/_correlation/loader.py | 1 | 7874 | """This module provides utility classes and functions to load spike sorting
data sets."""
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import os.path
import re
import numpy as np
from iotools import (find_filename, find_index, load_text, load_xml, normalize,
load_binary)
from selection import select
from logger import debug, info, warn
# -----------------------------------------------------------------------------
# File loading functions
# -----------------------------------------------------------------------------
def read_xml(filename_xml, fileindex):
"""Read the XML file associated to the current dataset,
and return a metadata dictionary."""
params = load_xml(filename_xml, fileindex=fileindex)
# klusters tests
metadata = dict(
nchannels=params['nchannels'],
nsamples=params['nsamples'],
fetdim=params['fetdim'],
freq=params['rate'])
return metadata
def read_features(filename_fet, nchannels, fetdim, freq):
"""Read a .fet file and return the normalize features array,
as well as the spiketimes."""
features = load_text(filename_fet, np.int32, skiprows=1)
features = np.array(features, dtype=np.float32)
# HACK: There are either 1 or 5 dimensions more than fetdim*nchannels
# we can't be sure so we first try 1, if it does not work we try 5.
for nextradim in [1, 5]:
try:
features = features.reshape((-1,
fetdim * nchannels + nextradim))
# if the features array could be reshape, directly break the loop
break
except ValueError:
features = None
if features is None:
raise ValueError("""The number of columns in the feature matrix
is not fetdim (%d) x nchannels (%d) + 1 or 5.""" %
(fetdim, nchannels))
# get the spiketimes
spiketimes = features[:,-1].copy()
spiketimes *= (1. / freq)
# count the number of extra features
nextrafet = features.shape[1] - nchannels * fetdim
# normalize normal features while keeping symmetry
features[:,:-nextrafet] = normalize(features[:,:-nextrafet],
symmetric=True)
# normalize extra features without keeping symmetry
features[:,-nextrafet:] = normalize(features[:,-nextrafet:],
symmetric=False)
return features, spiketimes
def read_clusters(filename_clu):
clusters = load_text(filename_clu, np.int32)
clusters = clusters[1:]
return clusters
def read_masks(filename_mask, fetdim):
full_masks = load_text(filename_mask, np.float32, skiprows=1)
masks = full_masks[:,:-1:fetdim]
return masks, full_masks
def read_waveforms(filename_spk, nsamples, nchannels):
waveforms = load_binary(filename_spk)
waveforms = waveforms.reshape((-1, nsamples, nchannels))
return waveforms
# -----------------------------------------------------------------------------
# KlustersLoader class
# -----------------------------------------------------------------------------
class KlustersLoader(object):
# Initialization methods
# ----------------------
def __init__(self, filename=None):
"""Initialize a Loader object for loading Klusters-formatted files.
Arguments:
* filename: the full path of any file belonging to the same
dataset.
"""
if filename:
self.open(filename)
def open(self, filename):
"""Open a file."""
self.filename = filename
# Find the file index associated to the filename, or 1 by default.
self.fileindex = find_index(filename) or 1
self.find_filenames()
self.read()
def find_filenames(self):
"""Find the filenames of the different files for the current
dataset."""
self.filename_xml = find_filename(self.filename, 'xml')
self.filename_fet = find_filename(self.filename, 'fet')
self.filename_clu = find_filename(self.filename, 'clu')
# fmask or mask file
self.filename_mask = find_filename(self.filename, 'fmask')
if not self.filename_mask:
self.filename_mask = find_filename(self.filename, 'mask')
self.filename_spk = find_filename(self.filename, 'spk')
# Input-Output methods
# --------------------
def read(self):
# Read metadata.
try:
self.metadata = read_xml(self.filename_xml, self.fileindex)
except IOError:
# Die if no XML file is available for this dataset, as it contains
# critical metadata.
raise IOError("The XML file is missing.")
nsamples = self.metadata.get('nsamples')
nchannels = self.metadata.get('nchannels')
fetdim = self.metadata.get('fetdim')
freq = self.metadata.get('freq')
# Read features.
try:
self.features, self.spiketimes = read_features(self.filename_fet,
nchannels, fetdim, freq)
except IOError:
raise IOError("The FET file is missing.")
# Count the number of spikes and save it in the metadata.
nspikes = self.features.shape[0]
self.metadata['nspikes'] = nspikes
# Read clusters.
try:
self.clusters = read_clusters(self.filename_clu)
except IOError:
warn("The CLU file is missing.")
# Default clusters if the CLU file is not available.
self.clusters = np.zeros(nspikes + 1, dtype=np.int32)
self.clusters[0] = 1
# Read masks.
try:
self.masks, self.masks_full = read_masks(self.filename_mask,
fetdim)
except IOError:
warn("The MASKS/FMASKS file is missing.")
# Default masks if the MASK/FMASK file is not available.
self.masks = np.ones((nspikes, nchannels))
self.masks_full = np.ones(features.shape)
# Read waveforms.
try:
self.waveforms = read_waveforms(self.filename_spk, nsamples,
nchannels)
except IOError:
warn("The SPK file is missing.")
self.waveforms = np.zeros((nspikes, nsamples, nchannels))
def close(self):
self.filename = None
self.fileindex = None
self.filename_xml = None
self.filename_fet = None
self.filename_clu = None
self.filename_mask = None
self.filename_spk = None
self.features = None
self.spiketimes = None
self.clusters = None
self.masks = None
self.masks_full = None
self.waveforms = None
self.metadata = {}
# Access to the data
# ------------------
def get_features(self, spikes=None):
return select(self.features, spikes)
def get_spiketimes(self, spikes=None):
return select(self.spiketimes, spikes)
def get_clusters(self, spikes=None):
return select(self.clusters, spikes)
def get_masks(self, spikes=None, full=None):
if not full:
masks = self.masks
else:
masks = self.masks_full
return select(masks, spikes)
def get_waveforms(self, spikes=None):
return select(self.waveforms, spikes)
if __name__ == '__main__':
filename = "D:\Git\spiky\_test\data\subset41test.clu.1"
l = KlustersLoader(filename)
print l.metadata
| bsd-3-clause | -5,610,675,824,701,268,000 | 33.23913 | 79 | 0.546355 | false | 4.359911 | false | false | false |
Clyde-fare/scikit-learn | sklearn/manifold/mds.py | 257 | 15138 | """
Multi-dimensional Scaling (MDS)
"""
# author: Nelle Varoquaux <[email protected]>
# Licence: BSD
import numpy as np
import warnings
from ..base import BaseEstimator
from ..metrics import euclidean_distances
from ..utils import check_random_state, check_array, check_symmetric
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..isotonic import IsotonicRegression
def _smacof_single(similarities, metric=True, n_components=2, init=None,
max_iter=300, verbose=0, eps=1e-3, random_state=None):
"""
Computes multidimensional scaling using SMACOF algorithm
Parameters
----------
similarities: symmetric ndarray, shape [n * n]
similarities between the points
metric: boolean, optional, default: True
compute metric or nonmetric SMACOF algorithm
n_components: int, optional, default: 2
number of dimension in which to immerse the similarities
overwritten if initial array is provided.
init: {None or ndarray}, optional
if None, randomly chooses the initial configuration
if ndarray, initialize the SMACOF algorithm with this array
max_iter: int, optional, default: 300
Maximum number of iterations of the SMACOF algorithm for a single run
verbose: int, optional, default: 0
level of verbosity
eps: float, optional, default: 1e-6
relative tolerance w.r.t stress to declare converge
random_state: integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Returns
-------
X: ndarray (n_samples, n_components), float
coordinates of the n_samples points in a n_components-space
stress_: float
The final value of the stress (sum of squared distance of the
disparities and the distances for all constrained points)
n_iter : int
Number of iterations run.
"""
similarities = check_symmetric(similarities, raise_exception=True)
n_samples = similarities.shape[0]
random_state = check_random_state(random_state)
sim_flat = ((1 - np.tri(n_samples)) * similarities).ravel()
sim_flat_w = sim_flat[sim_flat != 0]
if init is None:
# Randomly choose initial configuration
X = random_state.rand(n_samples * n_components)
X = X.reshape((n_samples, n_components))
else:
# overrides the parameter p
n_components = init.shape[1]
if n_samples != init.shape[0]:
raise ValueError("init matrix should be of shape (%d, %d)" %
(n_samples, n_components))
X = init
old_stress = None
ir = IsotonicRegression()
for it in range(max_iter):
# Compute distance and monotonic regression
dis = euclidean_distances(X)
if metric:
disparities = similarities
else:
dis_flat = dis.ravel()
# similarities with 0 are considered as missing values
dis_flat_w = dis_flat[sim_flat != 0]
# Compute the disparities using a monotonic regression
disparities_flat = ir.fit_transform(sim_flat_w, dis_flat_w)
disparities = dis_flat.copy()
disparities[sim_flat != 0] = disparities_flat
disparities = disparities.reshape((n_samples, n_samples))
disparities *= np.sqrt((n_samples * (n_samples - 1) / 2) /
(disparities ** 2).sum())
# Compute stress
stress = ((dis.ravel() - disparities.ravel()) ** 2).sum() / 2
# Update X using the Guttman transform
dis[dis == 0] = 1e-5
ratio = disparities / dis
B = - ratio
B[np.arange(len(B)), np.arange(len(B))] += ratio.sum(axis=1)
X = 1. / n_samples * np.dot(B, X)
dis = np.sqrt((X ** 2).sum(axis=1)).sum()
if verbose >= 2:
print('it: %d, stress %s' % (it, stress))
if old_stress is not None:
if(old_stress - stress / dis) < eps:
if verbose:
print('breaking at iteration %d with stress %s' % (it,
stress))
break
old_stress = stress / dis
return X, stress, it + 1
def smacof(similarities, metric=True, n_components=2, init=None, n_init=8,
n_jobs=1, max_iter=300, verbose=0, eps=1e-3, random_state=None,
return_n_iter=False):
"""
Computes multidimensional scaling using SMACOF (Scaling by Majorizing a
Complicated Function) algorithm
The SMACOF algorithm is a multidimensional scaling algorithm: it minimizes
a objective function, the *stress*, using a majorization technique. The
Stress Majorization, also known as the Guttman Transform, guarantees a
monotone convergence of Stress, and is more powerful than traditional
techniques such as gradient descent.
The SMACOF algorithm for metric MDS can summarized by the following steps:
1. Set an initial start configuration, randomly or not.
2. Compute the stress
3. Compute the Guttman Transform
4. Iterate 2 and 3 until convergence.
The nonmetric algorithm adds a monotonic regression steps before computing
the stress.
Parameters
----------
similarities : symmetric ndarray, shape (n_samples, n_samples)
similarities between the points
metric : boolean, optional, default: True
compute metric or nonmetric SMACOF algorithm
n_components : int, optional, default: 2
number of dimension in which to immerse the similarities
overridden if initial array is provided.
init : {None or ndarray of shape (n_samples, n_components)}, optional
if None, randomly chooses the initial configuration
if ndarray, initialize the SMACOF algorithm with this array
n_init : int, optional, default: 8
Number of time the smacof algorithm will be run with different
initialisation. The final results will be the best output of the
n_init consecutive runs in terms of stress.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
max_iter : int, optional, default: 300
Maximum number of iterations of the SMACOF algorithm for a single run
verbose : int, optional, default: 0
level of verbosity
eps : float, optional, default: 1e-6
relative tolerance w.r.t stress to declare converge
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
return_n_iter : bool
Whether or not to return the number of iterations.
Returns
-------
X : ndarray (n_samples,n_components)
Coordinates of the n_samples points in a n_components-space
stress : float
The final value of the stress (sum of squared distance of the
disparities and the distances for all constrained points)
n_iter : int
The number of iterations corresponding to the best stress.
Returned only if `return_n_iter` is set to True.
Notes
-----
"Modern Multidimensional Scaling - Theory and Applications" Borg, I.;
Groenen P. Springer Series in Statistics (1997)
"Nonmetric multidimensional scaling: a numerical method" Kruskal, J.
Psychometrika, 29 (1964)
"Multidimensional scaling by optimizing goodness of fit to a nonmetric
hypothesis" Kruskal, J. Psychometrika, 29, (1964)
"""
similarities = check_array(similarities)
random_state = check_random_state(random_state)
if hasattr(init, '__array__'):
init = np.asarray(init).copy()
if not n_init == 1:
warnings.warn(
'Explicit initial positions passed: '
'performing only one init of the MDS instead of %d'
% n_init)
n_init = 1
best_pos, best_stress = None, None
if n_jobs == 1:
for it in range(n_init):
pos, stress, n_iter_ = _smacof_single(
similarities, metric=metric,
n_components=n_components, init=init,
max_iter=max_iter, verbose=verbose,
eps=eps, random_state=random_state)
if best_stress is None or stress < best_stress:
best_stress = stress
best_pos = pos.copy()
best_iter = n_iter_
else:
seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)
results = Parallel(n_jobs=n_jobs, verbose=max(verbose - 1, 0))(
delayed(_smacof_single)(
similarities, metric=metric, n_components=n_components,
init=init, max_iter=max_iter, verbose=verbose, eps=eps,
random_state=seed)
for seed in seeds)
positions, stress, n_iters = zip(*results)
best = np.argmin(stress)
best_stress = stress[best]
best_pos = positions[best]
best_iter = n_iters[best]
if return_n_iter:
return best_pos, best_stress, best_iter
else:
return best_pos, best_stress
class MDS(BaseEstimator):
"""Multidimensional scaling
Read more in the :ref:`User Guide <multidimensional_scaling>`.
Parameters
----------
metric : boolean, optional, default: True
compute metric or nonmetric SMACOF (Scaling by Majorizing a
Complicated Function) algorithm
n_components : int, optional, default: 2
number of dimension in which to immerse the similarities
overridden if initial array is provided.
n_init : int, optional, default: 4
Number of time the smacof algorithm will be run with different
initialisation. The final results will be the best output of the
n_init consecutive runs in terms of stress.
max_iter : int, optional, default: 300
Maximum number of iterations of the SMACOF algorithm for a single run
verbose : int, optional, default: 0
level of verbosity
eps : float, optional, default: 1e-6
relative tolerance w.r.t stress to declare converge
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
dissimilarity : string
Which dissimilarity measure to use.
Supported are 'euclidean' and 'precomputed'.
Attributes
----------
embedding_ : array-like, shape [n_components, n_samples]
Stores the position of the dataset in the embedding space
stress_ : float
The final value of the stress (sum of squared distance of the
disparities and the distances for all constrained points)
References
----------
"Modern Multidimensional Scaling - Theory and Applications" Borg, I.;
Groenen P. Springer Series in Statistics (1997)
"Nonmetric multidimensional scaling: a numerical method" Kruskal, J.
Psychometrika, 29 (1964)
"Multidimensional scaling by optimizing goodness of fit to a nonmetric
hypothesis" Kruskal, J. Psychometrika, 29, (1964)
"""
def __init__(self, n_components=2, metric=True, n_init=4,
max_iter=300, verbose=0, eps=1e-3, n_jobs=1,
random_state=None, dissimilarity="euclidean"):
self.n_components = n_components
self.dissimilarity = dissimilarity
self.metric = metric
self.n_init = n_init
self.max_iter = max_iter
self.eps = eps
self.verbose = verbose
self.n_jobs = n_jobs
self.random_state = random_state
@property
def _pairwise(self):
return self.kernel == "precomputed"
def fit(self, X, y=None, init=None):
"""
Computes the position of the points in the embedding space
Parameters
----------
X : array, shape=[n_samples, n_features], or [n_samples, n_samples] \
if dissimilarity='precomputed'
Input data.
init : {None or ndarray, shape (n_samples,)}, optional
If None, randomly chooses the initial configuration
if ndarray, initialize the SMACOF algorithm with this array.
"""
self.fit_transform(X, init=init)
return self
def fit_transform(self, X, y=None, init=None):
"""
Fit the data from X, and returns the embedded coordinates
Parameters
----------
X : array, shape=[n_samples, n_features], or [n_samples, n_samples] \
if dissimilarity='precomputed'
Input data.
init : {None or ndarray, shape (n_samples,)}, optional
If None, randomly chooses the initial configuration
if ndarray, initialize the SMACOF algorithm with this array.
"""
X = check_array(X)
if X.shape[0] == X.shape[1] and self.dissimilarity != "precomputed":
warnings.warn("The MDS API has changed. ``fit`` now constructs an"
" dissimilarity matrix from data. To use a custom "
"dissimilarity matrix, set "
"``dissimilarity='precomputed'``.")
if self.dissimilarity == "precomputed":
self.dissimilarity_matrix_ = X
elif self.dissimilarity == "euclidean":
self.dissimilarity_matrix_ = euclidean_distances(X)
else:
raise ValueError("Proximity must be 'precomputed' or 'euclidean'."
" Got %s instead" % str(self.dissimilarity))
self.embedding_, self.stress_, self.n_iter_ = smacof(
self.dissimilarity_matrix_, metric=self.metric,
n_components=self.n_components, init=init, n_init=self.n_init,
n_jobs=self.n_jobs, max_iter=self.max_iter, verbose=self.verbose,
eps=self.eps, random_state=self.random_state,
return_n_iter=True)
return self.embedding_
| bsd-3-clause | 2,097,884,011,881,603,800 | 35.389423 | 79 | 0.625512 | false | 4.124796 | true | false | false |
pw31/GGchem | data/SUPCRT92/make_SH90.py | 1 | 1725 | import numpy as np
import matplotlib.pyplot as plt
import sys
#==========================================================================
# read Sharp & Huebner data
#==========================================================================
SHnam = np.loadtxt('SharpHuebner.dat',skiprows=0,usecols=[0],dtype='str')
SHcoeff = np.loadtxt('SharpHuebner.dat',skiprows=0,usecols=[1,2,3,4,5])
#==========================================================================
# read GGchem data
#==========================================================================
f = open('../../data/DustChem.dat','r')
lines = f.readlines()[:]
f.close
Nline = len(lines)
Ncond = len(SHnam)
Nfound = 0
f = open('DustChem_SH90.dat','w')
f.write('dust species\n')
f.write('============\n')
f.write('%d\n' %(Ncond))
for i in range(0,Ncond):
cond = SHnam[i]+'[s]'
lenc = len(cond)
found = 0
for j in range(0,Nline):
line = lines[j]
if (line[0:lenc]==cond):
found = 1
Nfound = Nfound+1
print cond,j
f.write('\n')
correct = 0
while True:
line = lines[j]
if (line=='\n'): break
if (line[0]=='#'): correct=1
if (correct==1): line = '#'+line[1:]
if (correct==0): f.write(line)
j = j+1
c = SHcoeff[i,0:5]
f.write('# Sharp & Huebner (1990):\n')
f.write(' 1 %12.5e %12.5e %12.5e %12.5e %12.5e\n' %(c[0],c[1],c[2],c[3],c[4]))
if (found==0):
print cond," not found."
f.write('\n')
f.write(cond+'\n')
f.write('# Sharp & Huebner (1990):\n')
f.write(' 1 %12.5e %12.5e %12.5e %12.5e %12.5e\n' %(c[0],c[1],c[2],c[3],c[4]))
f.close
print Nfound," condensates found."
print Ncond-Nfound," condensates not found."
| gpl-3.0 | -6,609,831,724,567,343,000 | 29.803571 | 85 | 0.456812 | false | 2.928693 | false | false | false |
rjferrier/fluidity | mayavi/mayavi_amcg/filters/data_set_clipper.py | 7 | 3812 | """This filter enables one to select a portion of an input dataset
using a plane and clip it so only one side remains.
Many thanks to Prabhu for ScalarCutPlane and TransformData.
Wouldn't have been able to code this otherwise.
"""
# Author: Samir Talwar <[email protected]>
# License: BSD Style.
# Enthought library imports.
from enthought.traits.api import Instance, Int, Trait, TraitMap, Button
from enthought.traits.ui.api import View, Group, Item
from enthought.tvtk.api import tvtk
# Local imports
from enthought.mayavi.core.common import error
from enthought.mayavi.filters.filter_base import FilterBase
from enthought.mayavi.core.pipeline_info import PipelineInfo
from enthought.mayavi.components.implicit_plane import ImplicitPlane
######################################################################
# `DataSetClipper` class.
######################################################################
class DataSetClipper(FilterBase):
# The version of this class. Used for persistence.
__version__ = 0
# The implicit plane widget used to easily place the implicit function.
implicit_plane = Instance(ImplicitPlane, allow_none=False)
# The actual filter.
filter = Instance(tvtk.ClipDataSet, allow_none=False)
# I'm not sure what this'll work with. vtkUnstructuredGrid is confirmed.
# Everything else is somewhat possible.
input_info = PipelineInfo(datasets=['any'],
attribute_types=['any'],
attributes=['any'])
output_info = PipelineInfo(datasets=['any'],
attribute_types=['any'],
attributes=['any'])
########################################
# View related traits.
# Button to reset the boundaries of the plane.
# This should really be done automatically.
reset_button = Button('Reset Boundaries')
# The View for this object.
view = View(Group(Item(name='reset_button'),
Item(name='implicit_plane', style='custom'),
show_labels=False,
),
)
######################################################################
# `Filter` interface
######################################################################
def setup_pipeline(self):
self.implicit_plane = ImplicitPlane()
self.filter = tvtk.ClipDataSet()
def update_pipeline(self):
inputs = self.inputs
if len(inputs) == 0:
return
implicit_plane = self.implicit_plane
implicit_plane.inputs = inputs
implicit_plane.update_pipeline()
widget = self.implicit_plane.widget
widget.outline_translation = 0
self.widgets = [widget]
filter = self.filter
filter.input = inputs[0].outputs[0]
filter.clip_function = implicit_plane.plane
filter.update()
self._set_outputs([filter.output])
self.pipeline_changed = True
def update_data(self):
# Do nothing if there is no input.
if len(self.inputs) == 0:
return
# Propagate the data_changed event.
self.data_changed = True
######################################################################
# Non-public methods.
######################################################################
def _on_implicit_plane_changed(self):
self.filter.clip_function = self.implicit_plane.plane
self.filter.update()
self.render()
def _reset_button_fired(self):
if len(self.widgets) == 0:
return
self.widgets[0].place_widget()
self.render()
| lgpl-2.1 | 908,296,905,546,060,200 | 33.035714 | 76 | 0.536201 | false | 4.741294 | false | false | false |
pvital/patchew | api/migrations/0021_fix_recipients_utf8.py | 2 | 1084 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
import json
import email.header
def _parse_header(header):
r = ""
for h, c in email.header.decode_header(header):
if isinstance(h, bytes):
h = h.decode(c or "utf-8", "replace")
r += h
return r
def fix_utf8_recipients(apps, schema_editor):
# We can't import the models directly as they may be a newer
# version than this migration expects. We use the historical version.
Message = apps.get_model("api", "Message")
msgs = Message.objects.all()
msgs = msgs.filter(recipients__contains="?")
for m in msgs:
recipients = json.loads(m.recipients)
recipients = [[_parse_header(x[0]), x[1]] for x in recipients]
m.recipients = json.dumps(recipients)
m.save()
class Migration(migrations.Migration):
dependencies = [("api", "0020_auto_20180204_0647")]
operations = [
migrations.RunPython(
fix_utf8_recipients, reverse_code=migrations.RunPython.noop
)
]
| mit | -1,629,429,450,629,336,000 | 26.794872 | 73 | 0.632841 | false | 3.737931 | false | false | false |
palfrey/book-blog | bofh.py | 1 | 2431 | from common import *
from re import compile, DOTALL, MULTILINE
from urlgrab import Cache
from urlparse import urljoin
linkPattern = compile("<h3><a href=\"(/[^\"]+)\">(.+?)</a></h3>")
earlierPattern = compile("<a href='([^\']+)'>.+?Earlier Stories.+?</a>", DOTALL | MULTILINE)
titlePattern = compile("<h2>(.+?)</h2>")
subtitlePattern = compile("<p class=\"standfirst\">(.+?)</p>")
contentPattern = compile("<strong class=\"trailer\">.+?</p>(.+?)(?:(?:<p>(?:(?:<i>)|(?:<small>)|(?:<font size=\"-2\">)|(?:<br>\n))?BOFH .+? Simon Travaglia)|(?:<ul class=\"noindent\">)|(?:<ul>.+?<li><a href=\"http://www.theregister.co.uk/content/30/index.html\">BOFH: The whole shebang</a></li>)|(?:</form>))", DOTALL| MULTILINE)
adPattern = compile("(<div id=ad-mu1-spot>.+?</div>)", MULTILINE | DOTALL)
episodePattern = compile("<strong class=\"trailer\">Episode \d+")
url = "http://www.theregister.co.uk/data_centre/bofh/"
pages = [url]
cache = Cache()
while True:
print url
data = cache.get(url).read()
links = linkPattern.findall(data)
if links == []:
break
pages.insert(0, url)
earlier = earlierPattern.findall(data)
url = urljoin(url, earlier[0])
skipTitles = ["Salmon Days is Go!"]
year = None
newItems = False
for mainPage in pages:
data = cache.get(mainPage).read()
links = linkPattern.findall(data)
links.reverse()
for l in links:
url = urljoin(mainPage, l[0])
newyear = url.split("/")[3]
if newyear != year:
if year != None:
if int(newyear) < int(year):
raise Exception, (year, newyear)
tocEnd(toc)
makeMobi(folder, "Simon Travaglia", newitems = newItems)
newItems = False
folder = "BOFH-%s"%newyear
toc = tocStart(folder)
year = newyear
data = cache.get(url, max_age = -1).read()
episode = episodePattern.findall(data)
if len(episode) == 0:
print "Skipping", url
continue
print url
title = titlePattern.findall(data)[0]
print title
if title in skipTitles:
print "skipping", title
continue
subtitle = subtitlePattern.findall(data)[0]
content = contentPattern.findall(data)[0]
ad = adPattern.findall(data)[0]
content = content.replace(ad, "")
content = content.decode('utf-8')
title = title.decode("utf-8")
subtitle = subtitle.decode("utf-8")
assert len(content)>0
if generatePage(url, title, subtitle + "<br />\n" + content, folder, toc):
newItems = True
#break
print links
tocEnd(toc)
makeMobi(folder, "Simon Travaglia")
| agpl-3.0 | -3,325,928,608,387,939,300 | 28.646341 | 329 | 0.647059 | false | 2.897497 | false | false | false |
Ektorus/bohrium | test/cil/Unittest/Benchmarks/IronPython/cphhpc/signal/convolution.py | 1 | 3399 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# convolution - http://www.songho.ca/dsp/convolution/convolution.html
# Copyright (C) 2011-2012 The CPHHPC Project lead by Brian Vinter
#
# This file is part of CPHHPC Toolbox.
#
# CPHHPC Toolbox is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# CPHHPC Toolbox is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
# USA.
#
# -- END_HEADER ---
#
"""Convolution: http://www.songho.ca/dsp/convolution/convolution.html"""
from numcil import zeros
def convolve2d(input, window, out=None, data_type=None):
"""
Convolve two 2-dimensional arrays:
http://www.songho.ca/dsp/convolution/convolution.html
Parameters
----------
input : ndarray
A 2-dimensional input array
window: ndarray
A 2-dimensional convolution window array (shape must be odd)
out : ndarray, optional
Output argument. This must have the exact kind that would be returned
if it was not used. In particular, it must have the right shape and must be
C-contiguous. This is a performance feature. Therefore, if
these conditions are not met, an exception is raised, instead of
attempting to be flexible.
data_type : data-type, optional
The precision of the created `out` ndarray if `out` is None
Raises
------
ValueError
If shape of `window` is even
If shape of `out` doesn't match those of `input`
"""
if window.shape[0] % 2 == 0 or window.shape[1] % 2 == 0:
msg = "window.shape: %s is _NOT_ odd" % (str(window.shape))
raise ValueError(msg)
window_radius = (window.shape[0]/2, window.shape[1]/2)
zero_pad_shape = (input.shape[0] + (window_radius[0]*2),
input.shape[1] + (window_radius[1]*2))
zero_padded_input = zeros(zero_pad_shape, dtype=data_type)
zero_padded_input[window_radius[0]:-window_radius[0],
window_radius[1]:-window_radius[1]] = input
if out != None:
if out.shape != input.shape:
msg = "input.shape: %s and out.shape: %s doesn't match" % (str(input.shape), str(out.shape))
raise ValueError(msg)
else:
if data_type == None:
out = zeros(input.shape, dtype=input.dtype)
else:
out = zeros(input.shape, dtype=data_type)
start_y = window_radius[0]*2
end_y = zero_pad_shape[0]
for y in xrange(window.shape[0]):
start_x = window_radius[1]*2
end_x = zero_pad_shape[1]
for x in xrange(window.shape[1]):
tmp = zero_padded_input * window[y][x]
out += tmp[start_y:end_y, start_x:end_x]
start_x -= 1
end_x -= 1
start_y -= 1
end_y -= 1
return out
| lgpl-3.0 | 9,024,390,674,542,501,000 | 33.333333 | 104 | 0.624595 | false | 3.658773 | false | false | false |
avasilevich/spolks | icmp/utils/ip_packet.py | 1 | 1482 | import struct
import socket
from collections import namedtuple
IPv4Header = namedtuple('IPv4Header', [
'version',
'ihl',
'tos',
'total_length',
'id',
'flags',
'fragment_offset',
'ttl',
'proto',
'checksum',
'src',
'dest',
])
class IpHeader(IPv4Header):
_format = '!BBHHHBBH4s4s'
def pack(self):
to_pack = (
self[0] << 4 | self[1],
self[2],
self[3],
self[4],
self[5] << 13 | self[6],
self[7],
self[8],
self[9],
self[10],
self[11],
)
return struct.pack(self._format, *to_pack)
@classmethod
def unpack(cls, byte_obj):
(ver_ihl, tos, tot_len, id, flags_offset, *others) = struct.unpack(
cls._format, byte_obj)
version = ver_ihl >> 4
ihl = ver_ihl & 0xf
flags = flags_offset >> 13
fragment_offset = flags_offset & 0x1fff
return IPv4Header(
version, ihl, tos, tot_len, id, flags, fragment_offset, *others)
def __len__(self):
return struct.calcsize(self._format)
def make_ip_packet(dest_addr, ip_proto, payload, source_addr, ttl=64):
source_addr = socket.inet_aton(source_addr)
dest_addr = socket.inet_aton(dest_addr)
id = 13371
header = IpHeader(
4, 5, 0, 0, id, 2, 0, ttl, ip_proto, 0, source_addr, dest_addr)
data = header.pack() + payload
return data
| mit | 4,343,442,371,421,247,000 | 22.15625 | 76 | 0.527665 | false | 3.32287 | false | false | false |
PKRoma/python-for-android | pythonforandroid/recipes/liblzma/__init__.py | 4 | 2591 | import sh
from multiprocessing import cpu_count
from os.path import exists, join
from pythonforandroid.archs import Arch
from pythonforandroid.logger import shprint
from pythonforandroid.recipe import Recipe
from pythonforandroid.util import current_directory, ensure_dir
class LibLzmaRecipe(Recipe):
version = '5.2.4'
url = 'https://tukaani.org/xz/xz-{version}.tar.gz'
built_libraries = {'liblzma.so': 'install/lib'}
def build_arch(self, arch: Arch) -> None:
env = self.get_recipe_env(arch)
install_dir = join(self.get_build_dir(arch.arch), 'install')
with current_directory(self.get_build_dir(arch.arch)):
if not exists('configure'):
shprint(sh.Command('./autogen.sh'), _env=env)
shprint(sh.Command('autoreconf'), '-vif', _env=env)
shprint(sh.Command('./configure'),
'--host=' + arch.command_prefix,
'--prefix=' + install_dir,
'--disable-builddir',
'--disable-static',
'--enable-shared',
'--disable-xz',
'--disable-xzdec',
'--disable-lzmadec',
'--disable-lzmainfo',
'--disable-scripts',
'--disable-doc',
_env=env)
shprint(
sh.make, '-j', str(cpu_count()),
_env=env
)
ensure_dir('install')
shprint(sh.make, 'install', _env=env)
def get_library_includes(self, arch: Arch) -> str:
"""
Returns a string with the appropriate `-I<lib directory>` to link
with the lzma lib. This string is usually added to the environment
variable `CPPFLAGS`.
"""
return " -I" + join(
self.get_build_dir(arch.arch), 'install', 'include',
)
def get_library_ldflags(self, arch: Arch) -> str:
"""
Returns a string with the appropriate `-L<lib directory>` to link
with the lzma lib. This string is usually added to the environment
variable `LDFLAGS`.
"""
return " -L" + join(
self.get_build_dir(arch.arch), self.built_libraries['liblzma.so'],
)
@staticmethod
def get_library_libs_flag() -> str:
"""
Returns a string with the appropriate `-l<lib>` flags to link with
the lzma lib. This string is usually added to the environment
variable `LIBS`.
"""
return " -llzma"
recipe = LibLzmaRecipe()
| mit | -3,836,365,355,632,390,000 | 32.217949 | 78 | 0.545735 | false | 4.054773 | false | false | false |
peak6/st2 | st2common/tests/unit/test_jinja_render_crypto_filters.py | 3 | 2246 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from st2tests.base import CleanDbTestCase
from st2common.constants.keyvalue import FULL_SYSTEM_SCOPE, SYSTEM_SCOPE, DATASTORE_PARENT_SCOPE
from st2common.models.db.keyvalue import KeyValuePairDB
from st2common.persistence.keyvalue import KeyValuePair
from st2common.services.keyvalues import KeyValueLookup
from st2common.util import jinja as jinja_utils
from st2common.util.crypto import read_crypto_key, symmetric_encrypt
class JinjaUtilsDecryptTestCase(CleanDbTestCase):
def test_filter_decrypt_kv(self):
secret = 'Build a wall'
crypto_key_path = cfg.CONF.keyvalue.encryption_key_path
crypto_key = read_crypto_key(key_path=crypto_key_path)
secret_value = symmetric_encrypt(encrypt_key=crypto_key, plaintext=secret)
KeyValuePair.add_or_update(KeyValuePairDB(name='k8', value=secret_value,
scope=FULL_SYSTEM_SCOPE,
secret=True))
env = jinja_utils.get_jinja_environment()
context = {}
context.update({SYSTEM_SCOPE: KeyValueLookup(scope=SYSTEM_SCOPE)})
context.update({
DATASTORE_PARENT_SCOPE: {
SYSTEM_SCOPE: KeyValueLookup(scope=FULL_SYSTEM_SCOPE)
}
})
template = '{{st2kv.system.k8 | decrypt_kv}}'
actual = env.from_string(template).render(context)
self.assertEqual(actual, secret)
| apache-2.0 | -2,793,195,057,411,800,000 | 44.836735 | 96 | 0.705254 | false | 4.083636 | false | false | false |
trojjer/romaine | src/romaine/core.py | 1 | 1344 | import os
class Core(object):
"""
The core of the Romaine, provides BDD test API.
"""
# All located features
feature_file_paths = set()
instance = None
def __init__(self):
"""
Initialise Romaine core.
"""
self.steps = {}
Core.instance = self
def locate_features(self, path):
"""
Locate any features given a path.
Keyword arguments:
path -- The path to search for features, recursively.
Returns:
List of features located in the path given.
"""
walked_paths = os.walk(path)
# Features in this path are stored in an intermediate list before
# being added to the class variable so that we can return only the
# ones we find on this invocation of locate_features
feature_candidates = []
for walked_path in walked_paths:
base_directory, sub_directories, feature_files = walked_path
for feature_file in feature_files:
feature_candidates.append(
os.path.join(
base_directory,
feature_file
)
)
self.feature_file_paths.update(feature_candidates)
return feature_candidates
| mit | 5,296,890,157,354,747,000 | 27 | 74 | 0.541667 | false | 4.887273 | false | false | false |
Darrel12/FFAudX | MyListWidget.py | 1 | 6481 | from PyQt5.QtWidgets import QAbstractItemView, QFrame, QListWidget, QListWidgetItem
from PyQt5.QtCore import Qt
import os
from mimetypes import MimeTypes
from urllib import request
from PyQt5 import QtGui, QtCore
import savedData as sd
from pytube import YouTube
from pprint import pprint
# Customized list widget item to hold more data than just the absolute path of the item #
class MyListWidgetItem(QListWidgetItem):
def __init__(self, path, dest, video_checked=False, video=None, audio_checked=False, audio=None):
super().__init__()
self.absFilePath = path
# determine if new item is a url or file path - handle accordingly
if os.path.exists(path):
print("path exists:", path)
# extract the path without the filename and render it back to a string
self.path = '/'.join(path.split('/')[0:-1]) + '/'
# print("directory path: " + self.path) # idk if this is useful anymore
# extract the last part of the path to get the file name
self.fName = path.split('/')[-1]
# file name without the extension
self.no_extension = self.fName.split('.')[0]
# use MimeTypes to determine the file type
self.fType = identifyItem(path)
# set the save destination for when the conversion is done
self.fDest = dest
# the audio/video type to convert to if they have one - blank by default
# TODO maybe make them the currently checked values? and/or reset checked values when adding new item?
self.audio = audio if audio_checked else ""
self.video = video if video_checked else ""
else:
print("Pathhh:", path)
# TODO put something here? see how this corresponds to the above self.path
self.path = path
self.yt = YouTube(path)
# use the youtube scraper to get the youtube video name
self.no_extension = self.yt.filename
self.fName = self.no_extension + "." + sd.initVidFmt
self.fType = ('youtube/video', None) # save a custom mime-type TODO extract the mime type from the metadata
self.fDest = dest
self.audio = audio if audio_checked else ""
self.video = video if video_checked else ""
print("fType:", self.fType)
def __repr__(self):
try:
return self.fName
except Exception as err:
print("I think fName is trying to be accessed when it hasn't been created:")
pprint(err.args)
def getAudio(self, audio=""):
if audio != "":
self.audio = audio
return self.audio
def getVideo(self, video=""):
if video != "":
self.video = video
return self.video
def getFileType(self):
return self.fType
# identify the type of item the user is adding to the queue #
def identifyItem(path):
"""
:param path: the file path or url the user is providing
:return: the type of file the user is providing
"""
mime = MimeTypes()
url = request.pathname2url(path)
mime_type = mime.guess_type(url)
print("MimeType: " + str(mime_type))
return mime_type
# Customized list widget to allow internal/external drag-and-drop actions #
class MyListWidget(QListWidget):
def __init__(self, parent):
super(MyListWidget, self).__init__(parent)
self.setAcceptDrops(True)
self.setDragDropMode(QAbstractItemView.InternalMove)
self.setFrameShadow(QFrame.Plain)
self.setFrameShape(QFrame.Box)
# do stuff if a dragged item enters the widget #
def dragEnterEvent(self, event):
if event.mimeData().hasUrls():
event.acceptProposedAction()
else:
super(MyListWidget, self).dragEnterEvent(event)
# do stuff repeatedly if a dragged item is moving around in the widget #
def dragMoveEvent(self, event):
super(MyListWidget, self).dragMoveEvent(event)
# handle internal and external drag-and-drop actions #
def dropEvent(self, event):
# capture the main windows audio/video configuration to be applied to the next added items
video_checked = self.parent().parent().parent().ui.chk_video.isChecked()
audio_checked = self.parent().parent().parent().ui.chk_audio.isChecked()
video = self.parent().parent().parent().ui.combo_video.currentText()
audio = self.parent().parent().parent().ui.combo_audio.currentText()
# handle external drop
if event.mimeData().hasUrls():
for url in event.mimeData().urls():
print("url: " + str(url))
print(url)
path = url.toLocalFile()
if os.path.isfile(path):
item = MyListWidgetItem(path, sd.initSaveDir, video_checked, video, audio_checked, audio)
print("local file:", item)
self.addItem(item)
else:
item = MyListWidgetItem(url.toString(), sd.initSaveDir, video_checked, video, audio_checked, audio)
print("Youtube Video:", item)
self.addItem(item)
# make the item display its name
self.item(self.count() - 1).setText(item.no_extension)
self.item(self.count() - 1).setSelected(True)
else:
# default internal drop
super(MyListWidget, self).dropEvent(event)
# noinspection PyArgumentList
def keyPressEvent(self, event):
"""
Assign the following functions to keystrokes
delete -> delete the highlighted items
ctrl + a -> highlight all items in the queue
:param event: signal event to determine if it's a keyboard event
"""
# TODO make arrow keys move selection to above/below item
# TODO Ctrl + arrow keys to move the highlighted items priority
modifiers = QtGui.QApplication.keyboardModifiers()
if event.key() == Qt.Key_Delete:
self._del_item()
elif modifiers == QtCore.Qt.ControlModifier and event.key() == Qt.Key_A:
self._highlight_all()
# remove the selected item
def _del_item(self):
for item in self.selectedItems():
self.takeItem(self.row(item))
# highlight all items in the list
def _highlight_all(self):
self.selectAll()
| mit | -9,006,226,964,796,633,000 | 40.280255 | 120 | 0.613486 | false | 4.263816 | false | false | false |
kgizdov/EasyClangComplete | plugin/completion/base_complete.py | 1 | 3073 | """Contains base class for completers.
Attributes:
log (logging.Logger): logger for this module
"""
import logging
from ..tools import Tools
log = logging.getLogger("ECC")
class BaseCompleter:
"""A base class for clang based completions.
Attributes:
compiler_variant (CompilerVariant): compiler specific options
valid (bool): is completer valid
version_str (str): version string of format "3.4.0"
error_vis (obj): an object of error visualizer
"""
name = "base"
valid = False
def __init__(self, settings, error_vis):
"""Initialize the BaseCompleter.
Args:
settings (SettingsStorage): an object that stores current settings
error_vis (ErrorVis): an object of error visualizer
Raises:
RuntimeError: if clang not defined we throw an error
"""
# check if clang binary is defined
if not settings.clang_binary:
raise RuntimeError("clang binary not defined")
self.compiler_variant = None
self.version_str = settings.clang_version
self.clang_binary = settings.clang_binary
# initialize error visualization
self.error_vis = error_vis
def complete(self, completion_request):
"""Function to generate completions. See children for implementation.
Args:
completion_request (ActionRequest): request object
Raises:
NotImplementedError: Guarantees we do not call this abstract method
"""
raise NotImplementedError("calling abstract method")
def info(self, tooltip_request):
"""Provide information about object in given location.
Using the current translation unit it queries libclang for available
information about cursor.
Args:
tooltip_request (tools.ActionRequest): A request for action
from the plugin.
Raises:
NotImplementedError: Guarantees we do not call this abstract method
"""
raise NotImplementedError("calling abstract method")
def update(self, view, settings):
"""Update the completer for this view.
This can increase consequent completion speeds or is needed to just
show errors.
Args:
view (sublime.View): this view
settings: all plugin settings
Raises:
NotImplementedError: Guarantees we do not call this abstract method
"""
raise NotImplementedError("calling abstract method")
def show_errors(self, view, output):
"""Show current complie errors.
Args:
view (sublime.View): Current view
output (object): opaque output to be parsed by compiler variant
"""
errors = self.compiler_variant.errors_from_output(output)
if not Tools.is_valid_view(view):
log.error("cannot show errors. View became invalid!")
return
self.error_vis.generate(view, errors)
self.error_vis.show_errors(view)
| mit | -550,637,532,913,715,000 | 29.425743 | 79 | 0.634884 | false | 4.94847 | false | false | false |
ssorj/haystack | python/haystack.py | 1 | 15893 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import brbn
import email.utils as _email
import json as _json
import logging as _logging
import os as _os
import quopri as _quopri
import re as _re
import sqlite3 as _sqlite
import time as _time
import textwrap as _textwrap
from datetime import datetime as _datetime
from pencil import *
_log = _logging.getLogger("haystack")
_strings = StringCatalog(__file__)
_topics = _json.loads(_strings["topics"])
class Haystack(brbn.Application):
def __init__(self, home_dir):
super().__init__(home_dir)
path = _os.path.join(self.home, "data", "data.sqlite")
self.database = Database(path)
self.root_resource = _IndexPage(self)
self.search_page = _SearchPage(self)
self.thread_page = _ThreadPage(self)
self.message_page = _MessagePage(self)
def receive_request(self, request):
request.database_connection = self.database.connect()
try:
return super().receive_request(request)
finally:
request.database_connection.close()
class _IndexPage(brbn.Page):
def __init__(self, app):
super().__init__(app, "/", _strings["index_page_body"])
def get_title(self, request):
return "Haystack"
@brbn.xml
def render_topics(self, request):
items = list()
for topic in _topics:
href = self.app.search_page.get_href(request, query=topic)
text = xml_escape(topic)
items.append(html_a(text, href))
return html_ul(items, class_="four-column")
class _SearchPage(brbn.Page):
def __init__(self, app):
super().__init__(app, "/search", _strings["search_page_body"])
def get_title(self, request):
query = request.get("query")
return "Search '{}'".format(query)
def render_query(self, request):
return request.get("query")
@brbn.xml
def render_threads(self, request):
query = request.get("query")
sql = ("select * from messages where id in "
"(select distinct thread_id from messages_fts "
" where messages_fts match ? limit 1000) "
"order by date desc")
escaped_query = query.replace("\"", "\"\"")
records = self.app.database.query(request, sql, escaped_query)
thread = Thread()
rows = list()
for record in records:
thread.load_from_record(record)
thread_link = thread.get_link(request)
row = [
thread_link,
xml_escape(thread.from_address),
thread.authored_words,
xml_escape(str(_email.formatdate(thread.date)[:-6])),
]
rows.append(row)
return html_table(rows, False, class_="messages four")
class _ThreadPage(brbn.Page):
def __init__(self, app):
super().__init__(app, "/thread", _strings["thread_page_body"])
def get_title(self, request):
return "Thread '{}'".format(request.thread.subject)
def process(self, request):
id = request.get("id")
request.thread = self.app.database.get(request, Message, id)
sql = ("select * from messages "
"where thread_id = ? "
"order by thread_position, date asc "
"limit 1000")
records = self.app.database.query(request, sql, request.thread.id)
request.messages = list()
request.messages_by_id = dict()
for record in records:
message = Message()
message.load_from_record(record)
request.messages.append(message)
request.messages_by_id[message.id] = message
def render_title(self, request):
return request.thread.subject
@brbn.xml
def render_index(self, request):
rows = list()
for i, message in enumerate(request.messages):
date = _time.strftime("%d %b %Y", _time.gmtime(message.date))
number = i + 1
title = self.get_message_title(request, message, number)
row = [
html_a(xml_escape(title), "#{}".format(number)),
xml_escape(date),
message.authored_words,
]
rows.append(row)
return html_table(rows, False, class_="messages")
@brbn.xml
def render_messages(self, request):
out = list()
for i, message in enumerate(request.messages):
number = i + 1
title = self.get_message_title(request, message, number)
out.append(html_elem("h2", title, id=str(number)))
out.append(html_elem("pre", xml_escape(message.content)))
return "\n".join(out)
def get_message_title(self, request, message, number):
title = "{}. {}".format(number, message.from_name)
if message.in_reply_to_id is not None:
rmessage = request.messages_by_id.get(message.in_reply_to_id)
if rmessage is not None:
rperson = rmessage.from_name
title = "{} replying to {}".format(title, rperson)
return title
class _MessagePage(brbn.Page):
def __init__(self, app):
super().__init__(app, "/message", _strings["message_page_body"])
def get_title(self, request):
return "Message '{}'".format(request.message.subject)
def process(self, request):
id = request.get("id")
request.message = self.app.database.get(request, Message, id)
def render_title(self, request):
return request.message.subject
@brbn.xml
def render_thread_link(self, request):
thread = None
thread_id = request.message.thread_id
thread_link = xml_escape(thread_id)
if thread_id is not None:
try:
thread = self.app.database.get(request, Message, thread_id)
except ObjectNotFound:
pass
if thread is not None:
thread_link = thread.get_link(request)
return thread_link
@brbn.xml
def render_in_reply_to_link(self, request):
rmessage = None
rmessage_id = request.message.in_reply_to_id
rmessage_link = nvl(xml_escape(rmessage_id), "[None]")
if rmessage_id is not None:
try:
rmessage = self.database.get(request, Message, rmessage_id)
except ObjectNotFound:
pass
if rmessage is not None:
rmessage_link = rmessage.get_link(request)
return rmessage_link
@brbn.xml
def render_headers(self, request):
message = request.message
from_field = "{} <{}>".format(message.from_name, message.from_address)
items = (
("ID", xml_escape(message.id)),
("List", xml_escape(message.list_id)),
("From", xml_escape(from_field)),
("Date", xml_escape(_email.formatdate(message.date))),
("Subject", xml_escape(message.subject)),
)
return html_table(items, False, True, class_="headers")
@brbn.xml
def render_content(self, request):
message = request.message
content = ""
if message.content is not None:
lines = list()
for line in message.content.splitlines():
line = line.strip()
if line.startswith(">"):
m = _re.match("^[> ]+", line)
prefix = "\n{}".format(m.group(0))
line = prefix.join(_textwrap.wrap(line, 80))
line = html_span(xml_escape(line), class_="quoted")
else:
line = "\n".join(_textwrap.wrap(line, 80))
line = xml_escape(line)
lines.append(line)
content = "\n".join(lines)
return content
class Database:
def __init__(self, path):
self.path = path
_log.info("Using database at {}".format(self.path))
def connect(self):
# XXX thread local connections
return _sqlite.connect(self.path)
def create_schema(self):
columns = list()
for name in Message.fields:
field_type = Message.field_types.get(name, str)
column_type = "text"
if field_type == int:
column_type = "integer"
column = "{} {}".format(name, column_type)
columns.append(column)
statements = list()
columns = ", ".join(columns)
ddl = "create table messages ({});".format(columns)
statements.append(ddl)
ddl = "create index messages_id_idx on messages (id);"
statements.append(ddl)
columns = ", ".join(Message.fts_fields)
ddl = ("create virtual table messages_fts using fts4 "
"({}, notindexed=id, notindexed=thread_id, tokenize=porter)"
"".format(columns))
statements.append(ddl)
conn = self.connect()
cursor = conn.cursor()
try:
for statement in statements:
cursor.execute(statement)
finally:
conn.close()
def optimize(self):
conn = self.connect()
cursor = conn.cursor()
ddl = "insert into messages_fts (messages_fts) values ('optimize')"
try:
cursor.execute(ddl)
finally:
conn.close()
def cursor(self, request):
return request.database_connection.cursor()
def query(self, request, sql, *args):
cursor = self.cursor(request)
try:
cursor.execute(sql, args)
return cursor.fetchall()
finally:
cursor.close()
def get(self, request, cls, id):
_log.debug("Getting {} with ID {}".format(cls.__name__, id))
assert issubclass(cls, _DatabaseObject), cls
assert id is not None
sql = "select * from {} where id = ?".format(cls.table)
cursor = self.cursor(request)
try:
cursor.execute(sql, [id])
record = cursor.fetchone()
finally:
cursor.close()
if record is None:
raise ObjectNotFound()
obj = cls()
obj.load_from_record(record)
return obj
class ObjectNotFound(Exception):
pass
class _DatabaseObject:
table = None
def __init__(self, id, name, parent=None):
self.id = id
self._name = name
self.parent = parent
def __repr__(self):
return format_repr(self, self.id)
@property
def name(self):
return self._name
def get_link_href(self, request):
raise NotImplementedError()
def get_link_text(self, request):
return self.name
def get_link(self, request, text=None):
href = self.get_link_href(request)
if text is None:
text = self.get_link_text(request)
return "<a href=\"{}\">{}</a>".format(href, xml_escape(text))
class Message(_DatabaseObject):
table = "messages"
fields = [
"id",
"in_reply_to_id",
"from_name",
"from_address",
"list_id",
"date",
"subject",
"content_type",
"content",
"authored_content",
"authored_words",
"thread_id",
"thread_position",
]
field_types = {
"date": int,
"authored_words": int,
"thread_position": int,
}
field_mbox_keys = {
"id": "Message-ID",
"in_reply_to_id": "In-Reply-To",
"list_id": "List-Id",
"subject": "Subject",
"content_type": "Content-Type",
}
fts_fields = [
"id",
"thread_id",
"subject",
"authored_content",
]
def __init__(self):
super().__init__(None, None)
for name in self.fields:
setattr(self, name, None)
@property
def name(self):
return self.subject
def load_from_mbox_message(self, mbox_message):
for name in self.field_mbox_keys:
mbox_key = self.field_mbox_keys[name]
value = mbox_message.get(mbox_key)
field_type = self.field_types.get(name, str)
if value is not None:
value = field_type(value)
setattr(self, name, value)
name, address = _email.parseaddr(mbox_message["From"])
self.from_name = name
self.from_address = address
tup = _email.parsedate(mbox_message["Date"])
self.date = _time.mktime(tup)
content = _get_mbox_content(mbox_message)
assert content is not None
self.content = content
self.authored_content = _get_authored_content(self.content)
self.authored_words = len(self.authored_content.split())
def load_from_record(self, record):
for i, name in enumerate(self.fields):
value = record[i]
field_type = self.field_types.get(name, str)
if value is not None:
value = field_type(value)
setattr(self, name, value)
def save(self, cursor):
columns = ", ".join(self.fields)
values = ", ".join("?" * len(self.fields))
args = [getattr(self, x) for x in self.fields]
dml = "insert into messages ({}) values ({})".format(columns, values)
cursor.execute(dml, args)
columns = ", ".join(self.fts_fields)
values = ", ".join("?" * len(self.fts_fields))
args = [getattr(self, x) for x in self.fts_fields]
dml = "insert into messages_fts ({}) values ({})".format(columns, values)
cursor.execute(dml, args)
def get_link_href(self, request):
return request.app.message_page.get_href(request, id=self.id)
def get_link_title(self, request):
return self.subject
class Thread(Message):
def get_link_href(self, request):
return request.app.thread_page.get_href(request, id=self.id)
def _get_mbox_content(mbox_message):
content_type = None
content_encoding = None
content = None
if mbox_message.is_multipart():
for part in mbox_message.walk():
if part.get_content_type() == "text/plain":
content_type = "text/plain"
content_encoding = part["Content-Transfer-Encoding"]
content = part.get_payload()
if content_type is None:
content_type = mbox_message.get_content_type()
content_encoding = mbox_message["Content-Transfer-Encoding"]
content = mbox_message.get_payload()
assert content_type is not None
assert content is not None
if content_encoding == "quoted-printable":
content = _quopri.decodestring(content)
content = content.decode("utf-8", errors="replace")
if content_type == "text/html":
content = strip_tags(content)
return content
def _get_authored_content(content):
lines = list()
for line in content.splitlines():
line = line.strip()
if line.startswith(">"):
continue
lines.append(line)
return "\n".join(lines)
| apache-2.0 | 1,053,662,476,856,937,300 | 27.129204 | 81 | 0.569118 | false | 3.950534 | false | false | false |
yuyuz/FLASH | tests/unittests/test_benchmark_util.py | 5 | 2920 | ##
# wrapping: A program making it easy to use hyperparameter
# optimization software.
# Copyright (C) 2013 Katharina Eggensperger and Matthias Feurer
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import unittest
import sys
import HPOlib.benchmark_util as benchmark_util
class BenchmarkUtilTest(unittest.TestCase):
def setUp(self):
# Change into the parent of the test directory
os.chdir(os.path.join("..", os.path.dirname(os.path.realpath(__file__))))
# Make sure there is no config file
try:
os.remove("./config.cfg")
except:
pass
def test_read_parameters_from_command_line(self):
# Legal call
sys.argv = ["test.py", "--folds", "10", "--fold", "0", "--params", "-x",
"3"]
args, params = benchmark_util.parse_cli()
self.assertEqual(params, {'x': '3'})
self.assertEqual(args, {'folds': '10', 'fold': '0'})
# illegal call, arguments with one minus before --params
sys.argv = ["test.py", "-folds", "10", "--fold", "0", "--params", "-x",
"3"]
with self.assertRaises(ValueError) as cm1:
benchmark_util.parse_cli()
self.assertEqual(cm1.exception.message, "You either try to use arguments"
" with only one leading minus or try to specify a "
"hyperparameter before the --params argument. test.py"
" -folds 10 --fold 0 --params -x 3")
# illegal call, trying to specify an arguments after --params
sys.argv = ["test.py", "--folds", "10", "--params", "-x",
"'3'", "--fold", "0"]
with self.assertRaises(ValueError) as cm5:
benchmark_util.parse_cli()
self.assertEqual(cm5.exception.message, "You are trying to specify an argument after the "
"--params argument. Please change the order.")
# illegal call, no - in front of parameter name
sys.argv = ["test_cv.py", "--params", "x", "'5'"]
with self.assertRaises(ValueError) as cm2:
benchmark_util.parse_cli()
self.assertEqual(cm2.exception.message, "Illegal command line string, expected a hyperpara"
"meter starting with - but found x") | gpl-3.0 | -8,210,264,829,300,046,000 | 41.955882 | 99 | 0.612671 | false | 4.118477 | true | false | false |
paris-ci/CloudBot | plugins/core_misc.py | 1 | 3369 | import asyncio
import socket
from cloudbot import hook
socket.setdefaulttimeout(10)
# Auto-join on Invite (Configurable, defaults to True)
@asyncio.coroutine
@hook.irc_raw('INVITE')
def invite(irc_paramlist, conn, event):
"""
:type irc_paramlist: list[str]
:type conn: cloudbot.client.Client
"""
invite_join = conn.config.get('invite_join', True)
if invite_join:
conn.join(irc_paramlist[-1])
invite = event.irc_raw.replace(":", "")
head, sep, tail = invite.split()[
0].partition('!')
# message(invite.split()[0] + " invited me to " + invite.split()[-1], invite.split()[-1])
conn.message(irc_paramlist[-1].strip(":"),
"Hello! I'm " + conn.config["nick"] + ". " + head + " invited me here! Check what I can do with " + conn.config[
"command_prefix"] + "help.")
conn.message(irc_paramlist[-1].strip(":"),
"You can check more info about me at github : https://github.com/paris-ci/CloudBot")
# Identify to NickServ (or other service)
@asyncio.coroutine
@hook.irc_raw('004')
def onjoin(conn, bot):
"""
:type conn: cloudbot.clients.clients.IrcClient
:type bot: cloudbot.bot.CloudBot
"""
bot.logger.info("[{}|misc] Bot is sending join commands for network.".format(conn.name))
nickserv = conn.config.get('nickserv')
if nickserv and nickserv.get("enabled", True):
bot.logger.info("[{}|misc] Bot is authenticating with NickServ.".format(conn.name))
nickserv_password = nickserv.get('nickserv_password', '')
nickserv_name = nickserv.get('nickserv_name', 'nickserv')
nickserv_account_name = nickserv.get('nickserv_user', '')
nickserv_command = nickserv.get('nickserv_command', 'IDENTIFY')
if nickserv_password:
if "censored_strings" in bot.config and nickserv_password in bot.config['censored_strings']:
bot.config['censored_strings'].remove(nickserv_password)
if nickserv_account_name:
conn.message(nickserv_name, "{} {} {}".format(nickserv_command,
nickserv_account_name, nickserv_password))
else:
conn.message(nickserv_name, "{} {}".format(nickserv_command, nickserv_password))
if "censored_strings" in bot.config:
bot.config['censored_strings'].append(nickserv_password)
yield from asyncio.sleep(1)
# Set bot modes
mode = conn.config.get('mode')
if mode:
bot.logger.info("[{}|misc] Bot is setting mode on itself: {}".format(conn.name, mode))
conn.cmd('MODE', conn.nick, mode)
# Join config-defined channels
bot.logger.info("[{}|misc] Bot is joining channels for network.".format(conn.name))
for channel in conn.channels:
conn.join(channel)
yield from asyncio.sleep(0.4)
conn.ready = True
bot.logger.info("[{}|misc] Bot has finished sending join commands for network.".format(conn.name))
@asyncio.coroutine
@hook.irc_raw('004')
def keep_alive(conn):
"""
:type conn: cloudbot.clients.clients.IrcClient
"""
keepalive = conn.config.get('keep_alive', False)
if keepalive:
while True:
conn.cmd('PING', conn.nick)
yield from asyncio.sleep(60)
| gpl-3.0 | -2,460,782,346,248,535,600 | 38.635294 | 133 | 0.61027 | false | 3.614807 | true | false | false |
gjr80/weewx | bin/weewx/drivers/wmr100.py | 3 | 17667 | #
# Copyright (c) 2009-2015 Tom Keffer <[email protected]>
#
# See the file LICENSE.txt for your full rights.
#
"""Classees and functions for interfacing with an Oregon Scientific WMR100
station. The WMRS200 reportedly works with this driver (NOT the WMR200, which
is a different beast).
The wind sensor reports wind speed, wind direction, and wind gust. It does
not report wind gust direction.
WMR89:
- data logger
- up to 3 channels
- protocol 3 sensors
- THGN800, PRCR800, WTG800
WMR86:
- no data logger
- protocol 3 sensors
- THGR800, WGR800, PCR800, UVN800
The following references were useful for figuring out the WMR protocol:
From Per Ejeklint:
https://github.com/ejeklint/WLoggerDaemon/blob/master/Station_protocol.md
From Rainer Finkeldeh:
http://www.bashewa.com/wmr200-protocol.php
The WMR driver for the wfrog weather system:
http://code.google.com/p/wfrog/source/browse/trunk/wfdriver/station/wmrs200.py
Unfortunately, there is no documentation for PyUSB v0.4, so you have to back
it out of the source code, available at:
https://pyusb.svn.sourceforge.net/svnroot/pyusb/branches/0.4/pyusb.c
"""
from __future__ import absolute_import
from __future__ import print_function
import logging
import time
import operator
from functools import reduce
import usb
import weewx.drivers
import weewx.wxformulas
import weeutil.weeutil
log = logging.getLogger(__name__)
DRIVER_NAME = 'WMR100'
DRIVER_VERSION = "3.5.0"
def loader(config_dict, engine): # @UnusedVariable
return WMR100(**config_dict[DRIVER_NAME])
def confeditor_loader():
return WMR100ConfEditor()
class WMR100(weewx.drivers.AbstractDevice):
"""Driver for the WMR100 station."""
DEFAULT_MAP = {
'pressure': 'pressure',
'windSpeed': 'wind_speed',
'windDir': 'wind_dir',
'windGust': 'wind_gust',
'windBatteryStatus': 'battery_status_wind',
'inTemp': 'temperature_0',
'outTemp': 'temperature_1',
'extraTemp1': 'temperature_2',
'extraTemp2': 'temperature_3',
'extraTemp3': 'temperature_4',
'extraTemp4': 'temperature_5',
'extraTemp5': 'temperature_6',
'extraTemp6': 'temperature_7',
'extraTemp7': 'temperature_8',
'inHumidity': 'humidity_0',
'outHumidity': 'humidity_1',
'extraHumid1': 'humidity_2',
'extraHumid2': 'humidity_3',
'extraHumid3': 'humidity_4',
'extraHumid4': 'humidity_5',
'extraHumid5': 'humidity_6',
'extraHumid6': 'humidity_7',
'extraHumid7': 'humidity_8',
'inTempBatteryStatus': 'battery_status_0',
'outTempBatteryStatus': 'battery_status_1',
'extraBatteryStatus1': 'battery_status_2',
'extraBatteryStatus2': 'battery_status_3',
'extraBatteryStatus3': 'battery_status_4',
'extraBatteryStatus4': 'battery_status_5',
'extraBatteryStatus5': 'battery_status_6',
'extraBatteryStatus6': 'battery_status_7',
'extraBatteryStatus7': 'battery_status_8',
'rain': 'rain',
'rainTotal': 'rain_total',
'rainRate': 'rain_rate',
'hourRain': 'rain_hour',
'rain24': 'rain_24',
'rainBatteryStatus': 'battery_status_rain',
'UV': 'uv',
'uvBatteryStatus': 'battery_status_uv'}
def __init__(self, **stn_dict):
"""Initialize an object of type WMR100.
NAMED ARGUMENTS:
model: Which station model is this?
[Optional. Default is 'WMR100']
timeout: How long to wait, in seconds, before giving up on a response
from the USB port.
[Optional. Default is 15 seconds]
wait_before_retry: How long to wait before retrying.
[Optional. Default is 5 seconds]
max_tries: How many times to try before giving up.
[Optional. Default is 3]
vendor_id: The USB vendor ID for the WMR
[Optional. Default is 0xfde]
product_id: The USB product ID for the WM
[Optional. Default is 0xca01]
interface: The USB interface
[Optional. Default is 0]
IN_endpoint: The IN USB endpoint used by the WMR.
[Optional. Default is usb.ENDPOINT_IN + 1]
"""
log.info('Driver version is %s' % DRIVER_VERSION)
self.model = stn_dict.get('model', 'WMR100')
# TODO: Consider putting these in the driver loader instead:
self.record_generation = stn_dict.get('record_generation', 'software')
self.timeout = float(stn_dict.get('timeout', 15.0))
self.wait_before_retry = float(stn_dict.get('wait_before_retry', 5.0))
self.max_tries = int(stn_dict.get('max_tries', 3))
self.vendor_id = int(stn_dict.get('vendor_id', '0x0fde'), 0)
self.product_id = int(stn_dict.get('product_id', '0xca01'), 0)
self.interface = int(stn_dict.get('interface', 0))
self.IN_endpoint = int(stn_dict.get('IN_endpoint', usb.ENDPOINT_IN + 1))
self.sensor_map = dict(self.DEFAULT_MAP)
if 'sensor_map' in stn_dict:
self.sensor_map.update(stn_dict['sensor_map'])
log.info('Sensor map is %s' % self.sensor_map)
self.last_rain_total = None
self.devh = None
self.openPort()
def openPort(self):
dev = self._findDevice()
if not dev:
log.error("Unable to find USB device (0x%04x, 0x%04x)"
% (self.vendor_id, self.product_id))
raise weewx.WeeWxIOError("Unable to find USB device")
self.devh = dev.open()
# Detach any old claimed interfaces
try:
self.devh.detachKernelDriver(self.interface)
except usb.USBError:
pass
try:
self.devh.claimInterface(self.interface)
except usb.USBError as e:
self.closePort()
log.error("Unable to claim USB interface: %s" % e)
raise weewx.WeeWxIOError(e)
def closePort(self):
try:
self.devh.releaseInterface()
except usb.USBError:
pass
try:
self.devh.detachKernelDriver(self.interface)
except usb.USBError:
pass
def genLoopPackets(self):
"""Generator function that continuously returns loop packets"""
# Get a stream of raw packets, then convert them, depending on the
# observation type.
for _packet in self.genPackets():
try:
_packet_type = _packet[1]
if _packet_type in WMR100._dispatch_dict:
# get the observations from the packet
_raw = WMR100._dispatch_dict[_packet_type](self, _packet)
if _raw is not None:
# map the packet labels to schema fields
_record = dict()
for k in self.sensor_map:
if self.sensor_map[k] in _raw:
_record[k] = _raw[self.sensor_map[k]]
# if there are any observations, add time and units
if _record:
for k in ['dateTime', 'usUnits']:
_record[k] = _raw[k]
yield _record
except IndexError:
log.error("Malformed packet: %s" % _packet)
def genPackets(self):
"""Generate measurement packets. These are 8 to 17 byte long packets containing
the raw measurement data.
For a pretty good summary of what's in these packets see
https://github.com/ejeklint/WLoggerDaemon/blob/master/Station_protocol.md
"""
# Wrap the byte generator function in GenWithPeek so we
# can peek at the next byte in the stream. The result, the variable
# genBytes, will be a generator function.
genBytes = weeutil.weeutil.GenWithPeek(self._genBytes_raw())
# Start by throwing away any partial packets:
for ibyte in genBytes:
if genBytes.peek() != 0xff:
break
buff = []
# March through the bytes generated by the generator function genBytes:
for ibyte in genBytes:
# If both this byte and the next one are 0xff, then we are at the end of a record
if ibyte == 0xff and genBytes.peek() == 0xff:
# We are at the end of a packet.
# Compute its checksum. This can throw an exception if the packet is empty.
try:
computed_checksum = reduce(operator.iadd, buff[:-2])
except TypeError as e:
log.debug("Exception while calculating checksum: %s" % e)
else:
actual_checksum = (buff[-1] << 8) + buff[-2]
if computed_checksum == actual_checksum:
# Looks good. Yield the packet
yield buff
else:
log.debug("Bad checksum on buffer of length %d" % len(buff))
# Throw away the next character (which will be 0xff):
next(genBytes)
# Start with a fresh buffer
buff = []
else:
buff.append(ibyte)
@property
def hardware_name(self):
return self.model
#===============================================================================
# USB functions
#===============================================================================
def _findDevice(self):
"""Find the given vendor and product IDs on the USB bus"""
for bus in usb.busses():
for dev in bus.devices:
if dev.idVendor == self.vendor_id and dev.idProduct == self.product_id:
return dev
def _genBytes_raw(self):
"""Generates a sequence of bytes from the WMR USB reports."""
try:
# Only need to be sent after a reset or power failure of the station:
self.devh.controlMsg(usb.TYPE_CLASS + usb.RECIP_INTERFACE, # requestType
0x0000009, # request
[0x20,0x00,0x08,0x01,0x00,0x00,0x00,0x00], # buffer
0x0000200, # value
0x0000000, # index
1000) # timeout
except usb.USBError as e:
log.error("Unable to send USB control message: %s" % e)
# Convert to a Weewx error:
raise weewx.WakeupError(e)
nerrors = 0
while True:
try:
# Continually loop, retrieving "USB reports". They are 8 bytes long each.
report = self.devh.interruptRead(self.IN_endpoint,
8, # bytes to read
int(self.timeout * 1000))
# While the report is 8 bytes long, only a smaller, variable portion of it
# has measurement data. This amount is given by byte zero. Return each
# byte, starting with byte one:
for i in range(1, report[0] + 1):
yield report[i]
nerrors = 0
except (IndexError, usb.USBError) as e:
log.debug("Bad USB report received: %s" % e)
nerrors += 1
if nerrors > self.max_tries:
log.error("Max retries exceeded while fetching USB reports")
raise weewx.RetriesExceeded("Max retries exceeded while fetching USB reports")
time.sleep(self.wait_before_retry)
# =========================================================================
# LOOP packet decoding functions
#==========================================================================
def _rain_packet(self, packet):
# NB: in my experiments with the WMR100, it registers in increments of
# 0.04 inches. Per Ejeklint's notes have you divide the packet values
# by 10, but this would result in an 0.4 inch bucket --- too big. So,
# I'm dividing by 100.
_record = {
'rain_rate' : ((packet[3] << 8) + packet[2]) / 100.0,
'rain_hour' : ((packet[5] << 8) + packet[4]) / 100.0,
'rain_24' : ((packet[7] << 8) + packet[6]) / 100.0,
'rain_total' : ((packet[9] << 8) + packet[8]) / 100.0,
'battery_status_rain': packet[0] >> 4,
'dateTime': int(time.time() + 0.5),
'usUnits': weewx.US}
# Because the WMR does not offer anything like bucket tips, we must
# calculate it by looking for the change in total rain. Of course, this
# won't work for the very first rain packet.
_record['rain'] = weewx.wxformulas.calculate_rain(
_record['rain_total'], self.last_rain_total)
self.last_rain_total = _record['rain_total']
return _record
def _temperature_packet(self, packet):
_record = {'dateTime': int(time.time() + 0.5),
'usUnits': weewx.METRIC}
# Per Ejeklint's notes don't mention what to do if temperature is
# negative. I think the following is correct. Also, from experience, we
# know that the WMR has problems measuring dewpoint at temperatures
# below about 20F. So ignore dewpoint and let weewx calculate it.
T = (((packet[4] & 0x7f) << 8) + packet[3]) / 10.0
if packet[4] & 0x80:
T = -T
R = float(packet[5])
channel = packet[2] & 0x0f
_record['temperature_%d' % channel] = T
_record['humidity_%d' % channel] = R
_record['battery_status_%d' % channel] = (packet[0] & 0x40) >> 6
return _record
def _temperatureonly_packet(self, packet):
# function added by fstuyk to manage temperature-only sensor THWR800
_record = {'dateTime': int(time.time() + 0.5),
'usUnits': weewx.METRIC}
# Per Ejeklint's notes don't mention what to do if temperature is
# negative. I think the following is correct.
T = (((packet[4] & 0x7f) << 8) + packet[3])/10.0
if packet[4] & 0x80:
T = -T
channel = packet[2] & 0x0f
_record['temperature_%d' % channel] = T
_record['battery_status_%d' % channel] = (packet[0] & 0x40) >> 6
return _record
def _pressure_packet(self, packet):
# Although the WMR100 emits SLP, not all consoles in the series
# (notably, the WMRS200) allow the user to set altitude. So we
# record only the station pressure (raw gauge pressure).
SP = float(((packet[3] & 0x0f) << 8) + packet[2])
_record = {'pressure': SP,
'dateTime': int(time.time() + 0.5),
'usUnits': weewx.METRIC}
return _record
def _uv_packet(self, packet):
_record = {'uv': float(packet[3]),
'battery_status_uv': packet[0] >> 4,
'dateTime': int(time.time() + 0.5),
'usUnits': weewx.METRIC}
return _record
def _wind_packet(self, packet):
"""Decode a wind packet. Wind speed will be in kph"""
_record = {
'wind_speed': ((packet[6] << 4) + ((packet[5]) >> 4)) / 10.0,
'wind_gust': (((packet[5] & 0x0f) << 8) + packet[4]) / 10.0,
'wind_dir': (packet[2] & 0x0f) * 360.0 / 16.0,
'battery_status_wind': (packet[0] >> 4),
'dateTime': int(time.time() + 0.5),
'usUnits': weewx.METRICWX}
# Sometimes the station emits a wind gust that is less than the
# average wind. If this happens, ignore it.
if _record['wind_gust'] < _record['wind_speed']:
_record['wind_gust'] = None
return _record
def _clock_packet(self, packet):
"""The clock packet is not used by weewx. However, the last time is
saved in case getTime() is called."""
tt = (2000 + packet[8], packet[7], packet[6], packet[5], packet[4], 0, 0, 0, -1)
self.last_time = time.mktime(tt)
return None
# Dictionary that maps a measurement code, to a function that can decode it
_dispatch_dict = {0x41: _rain_packet,
0x42: _temperature_packet,
0x46: _pressure_packet,
0x47: _uv_packet,
0x48: _wind_packet,
0x60: _clock_packet,
0x44: _temperatureonly_packet}
class WMR100ConfEditor(weewx.drivers.AbstractConfEditor):
@property
def default_stanza(self):
return """
[WMR100]
# This section is for the Oregon Scientific WMR100
# The driver to use
driver = weewx.drivers.wmr100
# The station model, e.g., WMR100, WMR100N, WMRS200
model = WMR100
"""
def modify_config(self, config_dict):
print("""
Setting rainRate calculation to hardware.""")
config_dict.setdefault('StdWXCalculate', {})
config_dict['StdWXCalculate'].setdefault('Calculations', {})
config_dict['StdWXCalculate']['Calculations']['rainRate'] = 'hardware'
| gpl-3.0 | 4,452,557,909,837,226,000 | 39.243736 | 98 | 0.54299 | false | 3.907764 | true | false | false |
alexsavio/aizkolari | aizkolari_measure.py | 1 | 16654 | #!/usr/bin/python
#-------------------------------------------------------------------------------
#License GPL v3.0
#Author: Alexandre Manhaes Savio <[email protected]>
#Grupo de Inteligencia Computational <www.ehu.es/ccwintco>
#Universidad del Pais Vasco UPV/EHU
#Use this at your own risk!
#-------------------------------------------------------------------------------
#DEPENDENCIES:
#sudo apt-get install python-argparse python-numpy python-numpy-ext python-matplotlib python-scipy python-nibabel
#For development:
#sudo apt-get install ipython python-nifti python-nitime
#from IPython.core.debugger import Tracer; debug_here = Tracer()
import os
import sys
import argparse
import logging
import numpy as np
import nibabel as nib
import aizkolari_utils as au
import aizkolari_preproc as pre
import aizkolari_pearson as pear
import aizkolari_bhattacharyya as bat
import aizkolari_ttest as ttst
import aizkolari_postproc as post
#-------------------------------------------------------------------------------
def set_parser():
parser = argparse.ArgumentParser(description='Slices and puts together a list of subjects to perform voxe-wise group calculations, e.g., Pearson correlations and bhattacharyya distance. \n The Pearson correlation is calculated between each voxel site for all subjects and the class label vector of the same subjects. \n Bhatthacharyya distance is calculated between each two groups using voxelwise Gaussian univariate distributions of each group. \n Student t-test is calculated as a Welch t-test where the two population variances are assumed to be different.')
parser.add_argument('-c', '--classesf', dest='classes', required=True, help='class label file. one line per class: <class_label>,<class_name>.')
parser.add_argument('-i', '--insubjsf', dest='subjs', required=True, help='file with a list of the volume files and its labels for the analysis. Each line: <class_label>,<subject_file>')
parser.add_argument('-o', '--outdir', dest='outdir', required=True, help='name of the output directory where the results will be put.')
parser.add_argument('-e', '--exclude', dest='exclude', default='', required=False, help='subject list mask, i.e., text file where each line has 0 or 1 indicating with 1 which subject should be excluded in the measure. To help calculating measures for cross-validation folds, for leave-one-out you can use the -l option.')
parser.add_argument('-l', '--leave', dest='leave', default=-1, required=False, type=int, help='index from subject list (counting from 0) indicating one subject to be left out of the measure. For leave-one-out measures.')
parser.add_argument('-d', '--datadir', dest='datadir', required=False, help='folder path where the subjects are, if the absolute path is not included in the subjects list file.', default='')
parser.add_argument('-m', '--mask', dest='mask', required=False, help='Brain mask volume file for all subjects.')
parser.add_argument('-n', '--measure', dest='measure', default='pearson', choices=['pearson','bhatta','bhattacharyya','ttest'], required=False, help='name of the distance/correlation method. Allowed: pearson (Pearson Correlation), bhatta (Bhattacharyya distance), ttest (Student`s t-test). (default: pearson)')
parser.add_argument('-k', '--cleanup', dest='cleanup', action='store_true', help='if you want to clean up all the temp files after processing')
parser.add_argument('-f', '--foldno', dest='foldno', required=False, type=int, default=-1, help='number to identify the fold for this run, in case you will run many different folds.')
parser.add_argument('-x', '--expname', dest='expname', required=False, type=str, default='', help='name to identify this run, in case you will run many different experiments.')
parser.add_argument('-a', '--absolute', dest='absolute', required=False, action='store_true', help='put this if you want absolute values of the measure.')
parser.add_argument('-v', '--verbosity', dest='verbosity', required=False, type=int, default=2, help='Verbosity level: Integer where 0 for Errors, 1 for Progression reports, 2 for Debug reports')
parser.add_argument('--checklist', dest='checklist', required=False, action='store_true', help='If set will use and update a checklist file, which will control the steps already done in case the process is interrupted.')
return parser
#-------------------------------------------------------------------------------
def decide_whether_usemask (maskfname):
usemask = False
if maskfname:
usemask = True
if usemask:
if not os.path.exists(maskfname):
print ('Mask file ' + maskfname + ' not found!')
usemask = False
return usemask
#-------------------------------------------------------------------------------
def get_fold_numberstr (foldno):
if foldno == -1: return ''
else: return zeropad (foldno)
#-------------------------------------------------------------------------------
def get_measure_shortname (measure_name):
if measure_name == 'bhattacharyya' or measure_name == 'bhatta':
measure = 'bat'
elif measure_name == 'pearson':
measure = 'pea'
elif measure_name == 'ttest':
measure = 'ttest'
return measure
#-------------------------------------------------------------------------------
def parse_labels_file (classf):
labels = []
classnames = []
labfile = open(classf, 'r')
for l in labfile:
line = l.strip().split(',')
labels .append (int(line[0]))
classnames.append (line[1])
labfile.close()
return [labels, classnames]
#-------------------------------------------------------------------------------
def parse_subjects_list (subjsfname, datadir=''):
subjlabels = []
subjs = []
if datadir:
datadir += os.path.sep
try:
subjfile = open(subjsfname, 'r')
for s in subjfile:
line = s.strip().split(',')
subjlabels.append(int(line[0]))
subjfname = line[1].strip()
if not os.path.isabs(subjfname):
subjs.append (datadir + subjfname)
else:
subjs.append (subjfname)
subjfile.close()
except:
log.error( "Unexpected error: ", sys.exc_info()[0] )
sys.exit(-1)
return [subjlabels, subjs]
#-------------------------------------------------------------------------------
def parse_exclude_list (excluf, leave=-1):
excluded =[]
if (excluf):
try:
excluded = np.loadtxt(excluf, dtype=int)
#if leave already excluded, dont take it into account
if leave > -1:
if excluded[leave] == 1:
au.log.warn ('Your left out subject (-l) is already being excluded in the exclusion file (-e).')
leave = -1
except:
au.log.error ('Error processing file ' + excluf)
au.log.error ('Unexpected error: ' + str(sys.exc_info()[0]))
sys.exit(-1)
return excluded
#-------------------------------------------------------------------------------
def main(argv=None):
parser = set_parser()
try:
args = parser.parse_args ()
except argparse.ArgumentError, exc:
print (exc.message + '\n' + exc.argument)
parser.error(str(msg))
return -1
datadir = args.datadir.strip()
classf = args.classes.strip()
subjsf = args.subjs.strip()
maskf = args.mask.strip()
outdir = args.outdir.strip()
excluf = args.exclude.strip()
measure = args.measure.strip()
expname = args.expname.strip()
foldno = args.foldno
cleanup = args.cleanup
leave = args.leave
absval = args.absolute
verbose = args.verbosity
chklst = args.checklist
au.setup_logger(verbose)
usemask = decide_whether_usemask(maskf)
foldno = get_fold_numberstr (foldno)
measure = get_measure_shortname (measure)
classnum = au.file_len(classf)
subjsnum = au.file_len(subjsf)
#reading label file
[labels, classnames] = parse_labels_file (classf)
#reading subjects list
[subjlabels, subjs] = parse_subjects_list (subjsf, datadir)
#if output dir does not exist, create
if not(os.path.exists(outdir)):
os.mkdir(outdir)
#checklist_fname
if chklst:
chkf = outdir + os.path.sep + au.checklist_str()
if not(os.path.exists(chkf)):
au.touch(chkf)
else:
chkf = ''
#saving data in files where further processes can find them
outf_subjs = outdir + os.path.sep + au.subjects_str()
outf_labels = outdir + os.path.sep + au.labels_str()
np.savetxt(outf_subjs, subjs, fmt='%s')
np.savetxt(outf_labels, subjlabels, fmt='%i')
#creating folder for slices
slidir = outdir + os.path.sep + au.slices_str()
if not(os.path.exists(slidir)):
os.mkdir(slidir)
#slice the volumes
#creating group and mask slices
pre.slice_and_merge(outf_subjs, outf_labels, chkf, outdir, maskf)
#creating measure output folder
if measure == 'pea':
measure_fname = au.pearson_str()
elif measure == 'bat':
measure_fname = au.bhattacharyya_str()
elif measure == 'ttest':
measure_fname = au.ttest_str()
#checking the leave parameter
if leave > (subjsnum - 1):
au.log.warning('aizkolari_measure: the leave (-l) argument value is ' + str(leave) + ', bigger than the last index of subject: ' + str(subjsnum - 1) + '. Im setting it to -1.')
leave = -1
#reading exclusion list
excluded = parse_exclude_list (excluf, leave)
#setting the output folder mdir extension
mdir = outdir + os.path.sep + measure_fname
if expname:
mdir += '_' + expname
if foldno:
mdir += '_' + foldno
#setting the stats folder
statsdir = outdir + os.path.sep + au.stats_str()
if expname:
statsdir += '_' + expname
if foldno:
statsdir += '_' + foldno
#setting a string with step parameters
step_params = ' ' + measure_fname + ' ' + mdir
absolute_str = ''
if absval:
absolute_str = ' ' + au.abs_str()
step_params += absolute_str
leave_str = ''
if leave > -1:
leave_str = ' excluding subject ' + str(leave)
step_params += leave_str
#checking if this measure has already been done
endstep = au.measure_str() + step_params
stepdone = au.is_done(chkf, endstep)
#add pluses to output dir if it already exists
if stepdone:
while os.path.exists (mdir):
mdir += '+'
else: #work in the last folder used
plus = False
while os.path.exists (mdir):
mdir += '+'
plus = True
if plus:
mdir = mdir[0:-1]
#setting statsdir
pluses = mdir.count('+')
for i in np.arange(pluses): statsdir += '+'
#merging mask slices to mdir
if not stepdone:
#creating output folders
if not os.path.exists (mdir):
os.mkdir(mdir)
#copying files to mdir
au.copy(outf_subjs, mdir)
au.copy(outf_labels, mdir)
#saving exclude files in mdir
outf_exclude = ''
if (excluf):
outf_exclude = au.exclude_str()
if expname:
outf_exclude += '_' + expname
if foldnumber:
outf_exclude += '_' + foldnumber
np.savetxt(outdir + os.path.sep + outf_exclude , excluded, fmt='%i')
np.savetxt(mdir + os.path.sep + au.exclude_str(), excluded, fmt='%i')
excluf = mdir + os.path.sep + au.exclude_str()
step = au.maskmerging_str() + ' ' + measure_fname + ' ' + mdir
if usemask and not au.is_done(chkf, step):
maskregex = au.mask_str() + '_' + au.slice_str() + '*'
post.merge_slices (slidir, maskregex, au.mask_str(), mdir, False)
au.checklist_add(chkf, step)
#CORRELATION
#read the measure argument and start processing
if measure == 'pea':
#measure pearson correlation for each population slice
step = au.measureperslice_str() + step_params
if not au.is_done(chkf, step):
pear.pearson_correlation (outdir, mdir, usemask, excluf, leave)
au.checklist_add(chkf, step)
#merge all correlation slice measures
step = au.postmerging_str() + step_params
if not au.is_done(chkf, step):
pearegex = au.pearson_str() + '_' + au.slice_str() + '*'
peameasf = mdir + os.path.sep + au.pearson_str()
if leave > -1:
pearegex += '_' + au.excluded_str() + str(leave) + '*'
peameasf += '_' + au.excluded_str() + str(leave) + '_' + au.pearson_str()
post.merge_slices (mdir, pearegex, peameasf, mdir)
if absval:
post.change_to_absolute_values(peameasf)
au.checklist_add(chkf, step)
#BHATTACHARYYA AND T-TEST
elif measure == 'bat' or measure == 'ttest':
if not os.path.exists (statsdir):
os.mkdir(statsdir)
gsize = np.zeros([len(classnames),2], dtype=int)
for c in range(len(classnames)):
gname = classnames[c]
glabel = labels [c]
godir = mdir + os.path.sep + gname
au.log.debug ('Processing group ' + gname)
gselect = np.zeros(len(subjs))
gsubjs = list()
glabels = list()
for s in range(len(subjs)):
slabel = subjlabels[s]
if slabel == glabel:
gsubjs .append (subjs[s])
glabels.append (slabel)
gselect[s] = 1
if outf_exclude:
if excluded[s]:
gselect[s] = 0
gsize[c,0] = glabel
gsize[c,1] = np.sum(gselect)
outf_subjs = mdir + os.path.sep + gname + '_' + au.subjects_str()
outf_labels = mdir + os.path.sep + gname + '_' + au.labels_str()
outf_group = mdir + os.path.sep + gname + '_' + au.members_str()
np.savetxt(outf_subjs , gsubjs, fmt='%s')
np.savetxt(outf_labels, glabels, fmt='%i')
np.savetxt(outf_group , gselect, fmt='%i')
step = au.groupfilter_str() + ' ' + gname + ' ' + statsdir
if not au.is_done(chkf, step):
au.group_filter (outdir, statsdir, gname, outf_group, usemask)
au.checklist_add(chkf, step)
grp_step_params = ' ' + au.stats_str() + ' ' + gname + ' ' + statsdir
step = au.measureperslice_str() + grp_step_params
if not au.is_done(chkf, step):
post.group_stats (statsdir, gname, gsize[c,1], statsdir)
au.checklist_add(chkf, step)
statfnames = {}
step = au.postmerging_str() + grp_step_params
if not au.is_done(chkf, step):
statfnames[gname] = post.merge_stats_slices (statsdir, gname)
au.checklist_add(chkf, step)
sampsizef = mdir + os.path.sep + au.groupsizes_str()
np.savetxt(sampsizef, gsize, fmt='%i,%i')
#decide which group distance function to use
if measure == 'bat':
distance_func = bat.measure_bhattacharyya_distance
elif measure == 'ttest':
distance_func = ttst.measure_ttest
#now we deal with the indexed excluded subject
step = au.postmerging_str() + ' ' + str(classnames) + step_params
exsubf = ''
exclas = ''
if leave > -1:
exsubf = subjs[leave]
exclas = classnames[subjlabels[leave]]
if not au.is_done(chkf, step):
#group distance called here, in charge of removing the 'leave' subject from stats as well
measfname = post.group_distance (distance_func, statsdir, classnames, gsize, chkf, absval, mdir, foldno, expname, leave, exsubf, exclas)
if usemask:
au.apply_mask (measfname, mdir + os.path.sep + au.mask_str())
au.checklist_add(chkf, step)
#adding step end indication
au.checklist_add(chkf, endstep)
#CLEAN SPACE SUGGESTION
rmcomm = 'rm -rf ' + outdir + os.path.sep + au.slices_str() + ';'
if cleanup:
au.log.debug ('Cleaning folders:')
au.log.info (rmcomm)
os.system (rmcomm)
else:
au.log.info ('If you need disk space, remove the temporary folders executing:')
au.log.info (rmcomm.replace(';','\n'))
if leave > -1:
au.log.info ('You should not remove these files if you are doing further leave-one-out measures.')
#-------------------------------------------------------------------------------
if __name__ == "__main__":
sys.exit(main())
| bsd-3-clause | 636,419,978,259,120,100 | 37.461894 | 565 | 0.584064 | false | 3.642607 | true | false | false |
jegger/kivy | kivy/uix/recycleview/views.py | 7 | 13895 | '''
RecycleView Views
=================
.. versionadded:: 1.10.0
The adapter part of the RecycleView which together with the layout is the
view part of the model-view-controller pattern.
The view module handles converting the data to a view using the adapter class
which is then displayed by the layout. A view can be any Widget based class.
However, inheriting from RecycleDataViewBehavior adds methods for converting
the data to a view.
TODO:
* Make view caches specific to each view class type.
'''
from kivy.properties import StringProperty, ObjectProperty
from kivy.event import EventDispatcher
from kivy.factory import Factory
from collections import defaultdict
_view_base_cache = {}
'''Cache whose keys are classes and values is a boolean indicating whether the
class inherits from :class:`RecycleDataViewBehavior`.
'''
_cached_views = defaultdict(list)
'''A size limited cache that contains old views (instances) that are not used.
Each key is a class whose value is the list of the instances of that class.
'''
# current number of unused classes in the class cache
_cache_count = 0
# maximum number of items in the class cache
_max_cache_size = 1000
def _clean_cache():
'''Trims _cached_views cache to half the size of `_max_cache_size`.
'''
# all keys will be reduced to max_size.
max_size = (_max_cache_size // 2) // len(_cached_views)
global _cache_count
for cls, instances in _cached_views.items():
_cache_count -= max(0, len(instances) - max_size)
del instances[max_size:]
class RecycleDataViewBehavior(object):
'''A optional base class for data views (:attr:`RecycleView`.viewclass).
If a view inherits from this class, the class's functions will be called
when the view needs to be updated due to a data change or layout update.
'''
def refresh_view_attrs(self, rv, index, data):
'''Called by the :class:`RecycleAdapter` when the view is initially
populated with the values from the `data` dictionary for this item.
Any pos or size info should be removed because they are set
subsequently with :attr:`refresh_view_layout`.
:Parameters:
`rv`: :class:`RecycleView` instance
The :class:`RecycleView` that caused the update.
`data`: dict
The data dict used to populate this view.
'''
sizing_attrs = RecycleDataAdapter._sizing_attrs
for key, value in data.items():
if key not in sizing_attrs:
setattr(self, key, value)
def refresh_view_layout(self, rv, index, layout, viewport):
'''Called when the view's size is updated by the layout manager,
:class:`RecycleLayoutManagerBehavior`.
:Parameters:
`rv`: :class:`RecycleView` instance
The :class:`RecycleView` that caused the update.
`viewport`: 4-tuple
The coordinates of the bottom left and width height in layout
manager coordinates. This may be larger than this view item.
:raises:
`LayoutChangeException`: If the sizing or data changed during a
call to this method, raising a `LayoutChangeException` exception
will force a refresh. Useful when data changed and we don't want
to layout further since it'll be overwritten again soon.
'''
w, h = layout.pop('size')
if w is None:
if h is not None:
self.height = h
else:
if h is None:
self.width = w
else:
self.size = w, h
for name, value in layout.items():
setattr(self, name, value)
def apply_selection(self, rv, index, is_selected):
pass
class RecycleDataAdapter(EventDispatcher):
'''The class that converts data to a view.
--- Internal details ---
A view can have 3 states.
* It can be completely in sync with the data, which
occurs when the view is displayed. These are stored in :attr:`views`.
* It can be dirty, which occurs when the view is in sync with the data,
except for the size/pos parameters which is controlled by the layout.
This occurs when the view is not currently displayed but the data has
not changed. These views are stored in :attr:`dirty_views`.
* Finally the view can be dead which occurs when the data changes and
the view was not updated or when a view is just created. Such views
are typically added to the internal cache.
Typically what happens is that the layout manager lays out the data
and then asks for views, using :meth:`set_visible_views,` for some specific
data items that it displays.
These views are gotten from the current views, dirty or global cache. Then
depending on the view state :meth:`refresh_view_attrs` is called to bring
the view up to date with the data (except for sizing parameters). Finally,
the layout manager gets these views, updates their size and displays them.
'''
recycleview = ObjectProperty(None, allownone=True)
'''The :class:`~kivy.uix.recycleview.RecycleViewBehavior` associated
with this instance.
'''
# internals
views = {} # current displayed items
# items whose attrs, except for pos/size is still accurate
dirty_views = defaultdict(dict)
_sizing_attrs = {
'size', 'width', 'height', 'size_hint', 'size_hint_x', 'size_hint_y',
'pos', 'x', 'y', 'center', 'center_x', 'center_y', 'pos_hint',
'size_hint_min', 'size_hint_min_x', 'size_hint_min_y', 'size_hint_max',
'size_hint_max_x', 'size_hint_max_y'}
def attach_recycleview(self, rv):
'''Associates a :class:`~kivy.uix.recycleview.RecycleViewBehavior`
with this instance. It is stored in :attr:`recycleview`.
'''
self.recycleview = rv
def detach_recycleview(self):
'''Removes the :class:`~kivy.uix.recycleview.RecycleViewBehavior`
associated with this instance and clears :attr:`recycleview`.
'''
self.recycleview = None
def create_view(self, index, data_item, viewclass):
'''(internal) Creates and initializes the view for the data at `index`.
The returned view is synced with the data, except for the pos/size
information.
'''
if viewclass is None:
return
view = viewclass()
self.refresh_view_attrs(index, data_item, view)
return view
def get_view(self, index, data_item, viewclass):
'''(internal) Returns a view instance for the data at `index`
It looks through the various caches and finally creates a view if it
doesn't exist. The returned view is synced with the data, except for
the pos/size information.
If found in the cache it's removed from the source
before returning. It doesn't check the current views.
'''
# is it in the dirtied views?
dirty_views = self.dirty_views
if viewclass is None:
return
stale = False
view = None
if viewclass in dirty_views: # get it first from dirty list
dirty_class = dirty_views[viewclass]
if index in dirty_class:
# we found ourself in the dirty list, no need to update data!
view = dirty_class.pop(index)
elif _cached_views[viewclass]:
# global cache has this class, update data
view, stale = _cached_views[viewclass].pop(), True
elif dirty_class:
# random any dirty view element - update data
view, stale = dirty_class.popitem()[1], True
elif _cached_views[viewclass]: # otherwise go directly to cache
# global cache has this class, update data
view, stale = _cached_views[viewclass].pop(), True
if view is None:
view = self.create_view(index, data_item, viewclass)
if view is None:
return
if stale:
self.refresh_view_attrs(index, data_item, view)
return view
def refresh_view_attrs(self, index, data_item, view):
'''(internal) Syncs the view and brings it up to date with the data.
This method calls :meth:`RecycleDataViewBehavior.refresh_view_attrs`
if the view inherits from :class:`RecycleDataViewBehavior`. See that
method for more details.
.. note::
Any sizing and position info is skipped when syncing with the data.
'''
viewclass = view.__class__
if viewclass not in _view_base_cache:
_view_base_cache[viewclass] = isinstance(view,
RecycleDataViewBehavior)
if _view_base_cache[viewclass]:
view.refresh_view_attrs(self.recycleview, index, data_item)
else:
sizing_attrs = RecycleDataAdapter._sizing_attrs
for key, value in data_item.items():
if key not in sizing_attrs:
setattr(view, key, value)
def refresh_view_layout(self, index, layout, view, viewport):
'''Updates the sizing information of the view.
viewport is in coordinates of the layout manager.
This method calls :meth:`RecycleDataViewBehavior.refresh_view_attrs`
if the view inherits from :class:`RecycleDataViewBehavior`. See that
method for more details.
.. note::
Any sizing and position info is skipped when syncing with the data.
'''
if view.__class__ not in _view_base_cache:
_view_base_cache[view.__class__] = isinstance(
view, RecycleDataViewBehavior)
if _view_base_cache[view.__class__]:
view.refresh_view_layout(
self.recycleview, index, layout, viewport)
else:
w, h = layout.pop('size')
if w is None:
if h is not None:
view.height = h
else:
if h is None:
view.width = w
else:
view.size = w, h
for name, value in layout.items():
setattr(view, name, value)
def make_view_dirty(self, view, index):
'''(internal) Used to flag this view as dirty, ready to be used for
others. See :meth:`make_views_dirty`.
'''
del self.views[index]
self.dirty_views[view.__class__][index] = view
def make_views_dirty(self):
'''Makes all the current views dirty.
Dirty views are still in sync with the corresponding data. However, the
size information may go out of sync. Therefore a dirty view can be
reused by the same index by just updating the sizing information.
Once the underlying data of this index changes, the view should be
removed from the dirty views and moved to the global cache with
:meth:`invalidate`.
This is typically called when the layout manager needs to re-layout all
the data.
'''
views = self.views
if not views:
return
dirty_views = self.dirty_views
for index, view in views.items():
dirty_views[view.__class__][index] = view
self.views = {}
def invalidate(self):
'''Moves all the current views into the global cache.
As opposed to making a view dirty where the view is in sync with the
data except for sizing information, this will completely disconnect the
view from the data, as it is assumed the data has gone out of sync with
the view.
This is typically called when the data changes.
'''
global _cache_count
for view in self.views.values():
_cached_views[view.__class__].append(view)
_cache_count += 1
for cls, views in self.dirty_views.items():
_cached_views[cls].extend(views.values())
_cache_count += len(views)
if _cache_count >= _max_cache_size:
_clean_cache()
self.views = {}
self.dirty_views.clear()
def set_visible_views(self, indices, data, viewclasses):
'''Gets a 3-tuple of the new, remaining, and old views for the current
viewport.
The new views are synced to the data except for the size/pos
properties.
The old views need to be removed from the layout, and the new views
added.
The new views are not necessarily *new*, but are all the currently
visible views.
'''
visible_views = {}
previous_views = self.views
ret_new = []
ret_remain = []
get_view = self.get_view
# iterate though the visible view
# add them into the container if not already done
for index in indices:
view = previous_views.pop(index, None)
if view is not None: # was current view
visible_views[index] = view
ret_remain.append((index, view))
else:
view = get_view(index, data[index],
viewclasses[index]['viewclass'])
if view is None:
continue
visible_views[index] = view
ret_new.append((index, view))
old_views = previous_views.items()
self.make_views_dirty()
self.views = visible_views
return ret_new, ret_remain, old_views
def get_visible_view(self, index):
'''Returns the currently visible view associated with ``index``.
If no view is currently displayed for ``index`` it returns ``None``.
'''
return self.views.get(index)
| mit | -7,012,238,564,549,061,000 | 36.352151 | 79 | 0.611227 | false | 4.339475 | false | false | false |
douban/tfmesos | tfmesos/scheduler.py | 1 | 16913 | import os
import sys
import math
import select
import socket
import getpass
import logging
import textwrap
from addict import Dict
from six import iteritems
from six.moves import urllib
from pymesos import Scheduler, MesosSchedulerDriver
from tfmesos.utils import send, recv, setup_logger
import uuid
FOREVER = 0xFFFFFFFF
logger = logging.getLogger(__name__)
class Job(object):
def __init__(self, name, num, cpus=1.0, mem=1024.0,
gpus=0, cmd=None, start=0):
self.name = name
self.num = num
self.cpus = cpus
self.gpus = gpus
self.mem = mem
self.cmd = cmd
self.start = start
class Task(object):
def __init__(self, mesos_task_id, job_name, task_index,
cpus=1.0, mem=1024.0, gpus=0, cmd=None, volumes={}, env={}):
self.mesos_task_id = mesos_task_id
self.job_name = job_name
self.task_index = task_index
self.cpus = cpus
self.gpus = gpus
self.mem = mem
self.cmd = cmd
self.volumes = volumes
self.env = env
self.offered = False
self.addr = None
self.connection = None
self.initalized = False
def __str__(self):
return textwrap.dedent('''
<Task
mesos_task_id=%s
addr=%s
>''' % (self.mesos_task_id, self.addr))
def to_task_info(self, offer, master_addr, gpu_uuids=[],
gpu_resource_type=None, containerizer_type='MESOS',
force_pull_image=False):
ti = Dict()
ti.task_id.value = str(self.mesos_task_id)
ti.agent_id.value = offer.agent_id.value
ti.name = '/job:%s/task:%s' % (self.job_name, self.task_index)
ti.resources = resources = []
cpus = Dict()
resources.append(cpus)
cpus.name = 'cpus'
cpus.type = 'SCALAR'
cpus.scalar.value = self.cpus
mem = Dict()
resources.append(mem)
mem.name = 'mem'
mem.type = 'SCALAR'
mem.scalar.value = self.mem
image = os.environ.get('DOCKER_IMAGE')
if image is not None:
if containerizer_type == 'DOCKER':
ti.container.type = 'DOCKER'
ti.container.docker.image = image
ti.container.docker.force_pull_image = force_pull_image
ti.container.docker.parameters = parameters = []
p = Dict()
p.key = 'memory-swap'
p.value = '-1'
parameters.append(p)
if self.gpus and gpu_uuids:
hostname = offer.hostname
url = 'http://%s:3476/docker/cli?dev=%s' % (
hostname, urllib.parse.quote(
' '.join(gpu_uuids)
)
)
try:
docker_args = urllib.request.urlopen(url).read()
for arg in docker_args.split():
k, v = arg.split('=')
assert k.startswith('--')
k = k[2:]
p = Dict()
parameters.append(p)
p.key = k
p.value = v
except Exception:
logger.exception(
'fail to determine remote device parameter,'
' disable gpu resources'
)
gpu_uuids = []
elif containerizer_type == 'MESOS':
ti.container.type = 'MESOS'
ti.container.mesos.image.type = 'DOCKER'
ti.container.mesos.image.docker.name = image
# "cached" means the opposite of "force_pull_image"
ti.container.mesos.image.cached = not force_pull_image
else:
assert False, (
'Unsupported containerizer: %s' % containerizer_type
)
ti.container.volumes = volumes = []
for path in ['/etc/passwd', '/etc/group']:
v = Dict()
volumes.append(v)
v.host_path = v.container_path = path
v.mode = 'RO'
for src, dst in iteritems(self.volumes):
v = Dict()
volumes.append(v)
v.container_path = dst
v.host_path = src
v.mode = 'RW'
if self.gpus and gpu_uuids and gpu_resource_type is not None:
if gpu_resource_type == 'SET':
gpus = Dict()
resources.append(gpus)
gpus.name = 'gpus'
gpus.type = 'SET'
gpus.set.item = gpu_uuids
else:
gpus = Dict()
resources.append(gpus)
gpus.name = 'gpus'
gpus.type = 'SCALAR'
gpus.scalar.value = len(gpu_uuids)
ti.command.shell = True
cmd = [
sys.executable, '-m', '%s.server' % __package__,
str(self.mesos_task_id), master_addr
]
ti.command.value = ' '.join(cmd)
ti.command.environment.variables = variables = [
Dict(name=name, value=value)
for name, value in self.env.items()
if name != 'PYTHONPATH'
]
env = Dict()
variables.append(env)
env.name = 'PYTHONPATH'
env.value = ':'.join(sys.path)
return ti
class TFMesosScheduler(Scheduler):
MAX_FAILURE_COUNT = 3
def __init__(self, task_spec, role=None, master=None, name=None,
quiet=False, volumes={}, containerizer_type=None,
force_pull_image=False, forward_addresses=None,
protocol='grpc', env={}, extra_config={}):
self.started = False
self.master = master or os.environ['MESOS_MASTER']
self.name = name or '[tensorflow] %s %s' % (
os.path.abspath(sys.argv[0]), ' '.join(sys.argv[1:]))
self.task_spec = task_spec
self.containerizer_type = containerizer_type
self.force_pull_image = force_pull_image
self.protocol = protocol
self.extra_config = extra_config
self.forward_addresses = forward_addresses
self.role = role or '*'
self.tasks = {}
self.task_failure_count = {}
self.job_finished = {}
for job in task_spec:
self.job_finished[job.name] = 0
for task_index in range(job.start, job.num):
mesos_task_id = str(uuid.uuid4())
task = Task(
mesos_task_id,
job.name,
task_index,
cpus=job.cpus,
mem=job.mem,
gpus=job.gpus,
cmd=job.cmd,
volumes=volumes,
env=env
)
self.tasks[mesos_task_id] = task
self.task_failure_count[self.decorated_task_index(task)] = 0
if not quiet:
global logger
setup_logger(logger)
def resourceOffers(self, driver, offers):
'''
Offer resources and launch tasks
'''
for offer in offers:
if all(task.offered for id, task in iteritems(self.tasks)):
self.driver.suppressOffers()
driver.declineOffer(offer.id, Dict(refuse_seconds=FOREVER))
continue
offered_cpus = offered_mem = 0.0
offered_gpus = []
offered_tasks = []
gpu_resource_type = None
for resource in offer.resources:
if resource.name == 'cpus':
offered_cpus = resource.scalar.value
elif resource.name == 'mem':
offered_mem = resource.scalar.value
elif resource.name == 'gpus':
if resource.type == 'SET':
offered_gpus = resource.set.item
else:
offered_gpus = list(range(int(resource.scalar.value)))
gpu_resource_type = resource.type
for id, task in iteritems(self.tasks):
if task.offered:
continue
if not (task.cpus <= offered_cpus and
task.mem <= offered_mem and
task.gpus <= len(offered_gpus)):
continue
offered_cpus -= task.cpus
offered_mem -= task.mem
gpus = int(math.ceil(task.gpus))
gpu_uuids = offered_gpus[:gpus]
offered_gpus = offered_gpus[gpus:]
task.offered = True
offered_tasks.append(
task.to_task_info(
offer, self.addr, gpu_uuids=gpu_uuids,
gpu_resource_type=gpu_resource_type,
containerizer_type=self.containerizer_type,
force_pull_image=self.force_pull_image
)
)
driver.launchTasks(offer.id, offered_tasks)
@property
def targets(self):
targets = {}
for id, task in iteritems(self.tasks):
target_name = '/job:%s/task:%s' % (task.job_name, task.task_index)
grpc_addr = 'grpc://%s' % task.addr
targets[target_name] = grpc_addr
return targets
def _start_tf_cluster(self):
cluster_def = {}
tasks = sorted(self.tasks.values(), key=lambda task: task.task_index)
for task in tasks:
cluster_def.setdefault(task.job_name, []).append(task.addr)
for id, task in iteritems(self.tasks):
response = {
'job_name': task.job_name,
'task_index': task.task_index,
'cpus': task.cpus,
'mem': task.mem,
'gpus': task.gpus,
'cmd': task.cmd,
'cwd': os.getcwd(),
'cluster_def': cluster_def,
'forward_addresses': self.forward_addresses,
'extra_config': self.extra_config,
'protocol': self.protocol
}
send(task.connection, response)
assert recv(task.connection) == 'ok'
logger.info(
'Device /job:%s/task:%s activated @ grpc://%s ',
task.job_name,
task.task_index,
task.addr
)
task.connection.close()
def start(self):
def readable(fd):
return bool(select.select([fd], [], [], 0.1)[0])
lfd = socket.socket()
try:
lfd.bind(('', 0))
self.addr = '%s:%s' % (socket.gethostname(), lfd.getsockname()[1])
lfd.listen(10)
framework = Dict()
framework.user = getpass.getuser()
framework.name = self.name
framework.hostname = socket.gethostname()
framework.role = self.role
self.driver = MesosSchedulerDriver(
self, framework, self.master, use_addict=True
)
self.driver.start()
task_start_count = 0
while any((not task.initalized
for id, task in iteritems(self.tasks))):
if readable(lfd):
c, _ = lfd.accept()
if readable(c):
mesos_task_id, addr = recv(c)
task = self.tasks[mesos_task_id]
task.addr = addr
task.connection = c
task.initalized = True
task_start_count += 1
logger.info('Task %s with mesos_task_id %s has '
'registered',
'{}:{}'.format(task.job_name,
task.task_index),
mesos_task_id)
logger.info('Out of %d tasks '
'%d tasks have been registered',
len(self.tasks), task_start_count)
else:
c.close()
self.started = True
self._start_tf_cluster()
except Exception:
self.stop()
raise
finally:
lfd.close()
def registered(self, driver, framework_id, master_info):
logger.info(
'Tensorflow cluster registered. '
'( http://%s:%s/#/frameworks/%s )',
master_info.hostname, master_info.port, framework_id.value
)
if self.containerizer_type is None:
version = tuple(int(x) for x in driver.version.split("."))
self.containerizer_type = (
'MESOS' if version >= (1, 0, 0) else 'DOCKER'
)
def statusUpdate(self, driver, update):
logger.debug('Received status update %s', str(update.state))
mesos_task_id = update.task_id.value
if self._is_terminal_state(update.state):
task = self.tasks.get(mesos_task_id)
if task is None:
# This should be very rare and hence making this info.
logger.info("Task not found for mesos task id {}"
.format(mesos_task_id))
return
if self.started:
if update.state != 'TASK_FINISHED':
logger.error('Task failed: %s, %s with state %s', task,
update.message, update.state)
raise RuntimeError(
'Task %s failed! %s with state %s' %
(task, update.message, update.state)
)
else:
self.job_finished[task.job_name] += 1
else:
logger.warn('Task failed while launching the server: %s, '
'%s with state %s', task,
update.message, update.state)
if task.connection:
task.connection.close()
self.task_failure_count[self.decorated_task_index(task)] += 1
if self._can_revive_task(task):
self.revive_task(driver, mesos_task_id, task)
else:
raise RuntimeError('Task %s failed %s with state %s and '
'retries=%s' %
(task, update.message, update.state,
TFMesosScheduler.MAX_FAILURE_COUNT))
def revive_task(self, driver, mesos_task_id, task):
logger.info('Going to revive task %s ', task.task_index)
self.tasks.pop(mesos_task_id)
task.offered = False
task.addr = None
task.connection = None
new_task_id = task.mesos_task_id = str(uuid.uuid4())
self.tasks[new_task_id] = task
driver.reviveOffers()
def _can_revive_task(self, task):
return self.task_failure_count[self.decorated_task_index(task)] < \
TFMesosScheduler.MAX_FAILURE_COUNT
@staticmethod
def decorated_task_index(task):
return '{}.{}'.format(task.job_name, str(task.task_index))
@staticmethod
def _is_terminal_state(task_state):
return task_state in ["TASK_FINISHED", "TASK_FAILED", "TASK_KILLED",
"TASK_ERROR"]
def slaveLost(self, driver, agent_id):
if self.started:
logger.error('Slave %s lost:', agent_id.value)
raise RuntimeError('Slave %s lost' % agent_id)
def executorLost(self, driver, executor_id, agent_id, status):
if self.started:
logger.error('Executor %s lost:', executor_id.value)
raise RuntimeError('Executor %s@%s lost' % (executor_id, agent_id))
def error(self, driver, message):
logger.error('Mesos error: %s', message)
raise RuntimeError('Error ' + message)
def stop(self):
logger.debug('exit')
if hasattr(self, 'tasks'):
for id, task in iteritems(self.tasks):
if task.connection:
task.connection.close()
del self.tasks
if hasattr(self, 'driver'):
self.driver.stop()
self.driver.join()
del self.driver
def finished(self):
return any(
self.job_finished[job.name] >= job.num for job in self.task_spec
)
def processHeartBeat(self):
# compatibility with pymesos
pass
| bsd-3-clause | -6,295,978,636,634,129,000 | 34.162162 | 79 | 0.482765 | false | 4.307947 | false | false | false |
jasontbradshaw/plinth | util.py | 1 | 2215 | import collections
import threading
import errors
import tokens
def ensure_type(required_class, item, *rest):
'''
Raises a WrongArgumentTypeError if all the items aren't instances of the
required class/classes tuple.
'''
if not isinstance(item, required_class):
raise errors.WrongArgumentTypeError.build(item, required_class)
for thing in rest:
if not isinstance(thing, required_class):
raise errors.WrongArgumentTypeError.build(thing, required_class)
def ensure_args(supplied_args, num_required=0, num_optional=0, is_variadic=False):
'''
Enforces the argument format specified by the keyword arguments. This
format is: required arguments first, optional arguments next, and a single
optional variadic arg last.
num_required defaults to 0, num_optional defaults to 0, and is_variadic
defaults to False.
Raises an IncorrectArgumentCountError if the args don't match the spec.
'''
# get the various counts we need to determine if the number of args is good
min_args = num_required
max_args = float('inf') if is_variadic else num_required + num_optional
# determine whether the arg spec was met by the supplied arg list
num_supplied = len(supplied_args)
if num_supplied < min_args or num_supplied > max_args:
raise errors.IncorrectArgumentCountError.build(min_args, max_args,
num_supplied, is_variadic=is_variadic)
def file_char_iter(f):
'''Iterate over an open file one character at a time.'''
for line in f:
for c in line:
yield c
def to_string(x):
'''Convert an atom to a string as it appears in our language.'''
if isinstance(x, bool):
return tokens.TRUE if x else tokens.FALSE
elif isinstance(x, basestring):
# TODO: escape properly
return tokens.STRING + x + tokens.STRING
return unicode(x)
class ThreadSafeCounter:
'''When called, returns increasing ints in order.'''
def __init__(self, count=0):
self.count = count
self.lock = threading.Lock()
def __call__(self):
with self.lock:
c = self.count
self.count += 1
return c
| mit | 2,962,825,229,518,733,000 | 30.642857 | 82 | 0.669074 | false | 4.109462 | false | false | false |
anandology/pyjamas | pyjs/src/pyjs/options.py | 1 | 10069 | debug_options={}
speed_options={}
pythonic_options={}
all_compile_options = dict(
internal_ast = False,
debug = False,
print_statements=True,
function_argument_checking=False,
attribute_checking=False,
getattr_support=True,
bound_methods=True,
descriptors=False,
source_tracking=False,
line_tracking=False,
store_source=False,
inline_code=False,
operator_funcs=True,
number_classes=False,
create_locals=False,
stupid_mode=False,
translator='proto',
)
def add_compile_options(parser):
global debug_options, speed_options, pythonic_options
parser.add_option("--internal-ast",
dest="internal_ast",
action="store_true",
help="Use internal AST parser instead of standard python one"
)
parser.add_option("--no-internal-ast",
dest="internal_ast",
action="store_false",
help="Use internal AST parser instead of standard python one"
)
parser.add_option("--debug-wrap",
dest="debug",
action="store_true",
help="Wrap function calls with javascript debug code",
)
parser.add_option("--no-debug-wrap",
dest="debug",
action="store_false",
)
debug_options['debug'] = True
speed_options['debug'] = False
parser.add_option("--no-print-statements",
dest="print_statements",
action="store_false",
help="Remove all print statements",
)
parser.add_option("--print-statements",
dest="print_statements",
action="store_true",
help="Generate code for print statements",
)
speed_options['print_statements'] = False
parser.add_option("--no-function-argument-checking",
dest = "function_argument_checking",
action="store_false",
help = "Do not generate code for function argument checking",
)
parser.add_option("--function-argument-checking",
dest = "function_argument_checking",
action="store_true",
help = "Generate code for function argument checking",
)
speed_options['function_argument_checking'] = False
pythonic_options['function_argument_checking'] = True
parser.add_option("--no-attribute-checking",
dest = "attribute_checking",
action="store_false",
help = "Do not generate code for attribute checking",
)
parser.add_option("--attribute-checking",
dest = "attribute_checking",
action="store_true",
help = "Generate code for attribute checking",
)
speed_options['attribute_checking'] = False
pythonic_options['attribute_checking'] = True
parser.add_option("--no-getattr-support",
dest = "getattr_support",
action="store_false",
help = "Do not support __getattr__()",
)
parser.add_option("--getattr-support",
dest = "getattr_support",
action="store_true",
help = "Support __getattr__()",
)
speed_options['getattr_support'] = False
pythonic_options['getattr_support'] = True
parser.add_option("--no-bound-methods",
dest = "bound_methods",
action="store_false",
help = "Do not generate code for binding methods",
)
parser.add_option("--bound-methods",
dest = "bound_methods",
action="store_true",
help = "Generate code for binding methods",
)
speed_options['bound_methods'] = False
pythonic_options['bound_methods'] = True
parser.add_option("--no-descriptors",
dest = "descriptors",
action="store_false",
help = "Do not generate code for descriptor calling",
)
parser.add_option("--descriptors",
dest = "descriptors",
action="store_true",
help = "Generate code for descriptor calling",
)
speed_options['descriptors'] = False
pythonic_options['descriptors'] = True
parser.add_option("--no-source-tracking",
dest = "source_tracking",
action="store_false",
help = "Do not generate code for source tracking",
)
parser.add_option("--source-tracking",
dest = "source_tracking",
action="store_true",
help = "Generate code for source tracking",
)
debug_options['source_tracking'] = True
speed_options['source_tracking'] = False
pythonic_options['source_tracking'] = True
parser.add_option("--no-line-tracking",
dest = "line_tracking",
action="store_true",
help = "Do not generate code for source tracking on every line",
)
parser.add_option("--line-tracking",
dest = "line_tracking",
action="store_true",
help = "Generate code for source tracking on every line",
)
debug_options['line_tracking'] = True
pythonic_options['line_tracking'] = True
parser.add_option("--no-store-source",
dest = "store_source",
action="store_false",
help = "Do not store python code in javascript",
)
parser.add_option("--store-source",
dest = "store_source",
action="store_true",
help = "Store python code in javascript",
)
debug_options['store_source'] = True
pythonic_options['store_source'] = True
parser.add_option("--no-inline-code",
dest = "inline_code",
action="store_false",
help = "Do not generate inline code for bool/eq/len",
)
parser.add_option("--inline-code",
dest = "inline_code",
action="store_true",
help = "Generate inline code for bool/eq/len",
)
speed_options['inline_code'] = True
parser.add_option("--no-operator-funcs",
dest = "operator_funcs",
action="store_false",
help = "Do not generate function calls for operators",
)
parser.add_option("--operator-funcs",
dest = "operator_funcs",
action="store_true",
help = "Generate function calls for operators",
)
speed_options['operator_funcs'] = False
pythonic_options['operator_funcs'] = True
parser.add_option("--no-number-classes",
dest = "number_classes",
action="store_false",
help = "Do not use number classes",
)
parser.add_option("--number-classes",
dest = "number_classes",
action="store_true",
help = "Use classes for numbers (float, int, long)",
)
speed_options['number_classes'] = False
pythonic_options['number_classes'] = True
parser.add_option("--create-locals",
dest = "create_locals",
action="store_true",
help = "Create locals",
)
parser.add_option("--no-stupid-mode",
dest = "stupid_mode",
action="store_false",
help = "Doesn't rely on javascriptisms",
)
parser.add_option("--stupid-mode",
dest = "stupid_mode",
action="store_true",
help = "Creates minimalist code, relying on javascript",
)
parser.add_option("--translator",
dest = "translator",
default="proto",
help = "Specify the translator: proto|dict",
)
def set_multiple(option, opt_str, value, parser, **kwargs):
for k in kwargs.keys():
setattr(parser.values, k, kwargs[k])
parser.add_option("-d", "--debug",
action="callback",
callback = set_multiple,
callback_kwargs = debug_options,
help="Set all debugging options",
)
parser.add_option("-O",
action="callback",
callback = set_multiple,
callback_kwargs = speed_options,
help="Set all options that maximize speed",
)
parser.add_option("--strict",
action="callback",
callback = set_multiple,
callback_kwargs = pythonic_options,
help="Set all options that mimic standard python behavior",
)
parser.set_defaults(**all_compile_options)
def get_compile_options(opts):
d = {}
for opt in all_compile_options:
d[opt] = getattr(opts, opt)
return d
| apache-2.0 | -3,915,245,851,955,995,000 | 38.178988 | 86 | 0.478399 | false | 5.155658 | false | false | false |
ebmscruff/studsup | matches.py | 1 | 1877 | #!/usr/bin/python3
import random
from humans import Human
from cities import City
from clubs import Club
class Match():
def __init__(self, teamHome, teamAway):
self.teamHome = teamHome
self.teamAway = teamAway
self.homeScore = 0
self.awayScore = 0
self.result = 0 # 0 = unplayed
def sim_match(self):
homeTier = 0.0
awayTier = 0.0
# add player tiers
for player in self.teamHome.players:
homeTier += player.tier
for player in self.teamAway.players:
awayTier += player.tier
homeTier = homeTier*random.uniform(0,2)
awayTier = awayTier*random.uniform(0,2)
# do some randoms.. more chances to score based on your tier
# you must have a higher end result than the opponent for to score a goal
for goalAtt in range(0, 5):
home = random.randint(0, 10) + homeTier
away = random.randint(0, 8) + awayTier
if home > away:
self.homeScore += 1
for goalAtt in range(0, 4):
home = random.randint(0, 8) + homeTier
away = random.randint(0, 10) + awayTier
if away > home:
self.awayScore += 1
# 1: home win -- 2: away win -- 3: draw
if self.homeScore > self.awayScore:
self.result = 1
elif self.awayScore > self.homeScore:
self.result = 2
else:
self.result = 3
def print_postmatch(self):
print(" Home: {homeScore} :{teamHome}\n Away: {awayScore} :{teamAway}\n".format(homeScore=self.homeScore, teamHome=self.teamHome.name, awayScore=self.awayScore, teamAway=self.teamAway.name))
def print_prematch(self):
print(" HOME: {0}\n AWAY: {1}\n".format(self.teamHome.name, self.teamAway.name))
| gpl-3.0 | 5,004,726,597,598,686,000 | 31.929825 | 198 | 0.572722 | false | 3.568441 | false | false | false |
nzjrs/conduit | conduit/DBus.py | 2 | 21538 | """
DBus related functionality including the DBus interface and utility
functions
Copyright: John Stowers, 2006
License: GPLv2
"""
import os.path
import dbus
import dbus.service
import logging
log = logging.getLogger("DBus")
import conduit
import conduit.utils as Utils
import conduit.Conduit as Conduit
import conduit.SyncSet as SyncSet
ERROR = -1
SUCCESS = 0
DEBUG_ALL_CALLS = True
APPLICATION_DBUS_IFACE="org.conduit.Application"
SYNCSET_DBUS_IFACE="org.conduit.SyncSet"
CONDUIT_DBUS_IFACE="org.conduit.Conduit"
EXPORTER_DBUS_IFACE="org.conduit.Exporter"
DATAPROVIDER_DBUS_IFACE="org.conduit.DataProvider"
################################################################################
# DBus API Docs
################################################################################
#
# ==== Main Application ====
# Service org.conduit.Application
# Interface org.conduit.Application
# Object path /
#
# Methods:
# BuildConduit(source, sink)
# BuildExporter(self, sinkKey)
# ListAllDataProviders
# GetDataProvider
# NewSyncSet
# Quit
#
# Signals:
# DataproviderAvailable(key)
# DataproviderUnavailable(key)
#
# ==== SyncSet ====
# Service org.conduit.SyncSet
# Interface org.conduit.SyncSet
# Object path /syncset/{dbus, gui, UUID}
#
# Methods:
# AddConduit
# DeleteConduit
# SaveToXml
# RestoreFromXml
#
# Signals:
# ConduitAdded(key)
# ConduitRemoved(key)
#
# ==== Conduit ====
# Service org.conduit.Conduit
# Interface org.conduit.Conduit
# Object path /conduit/{some UUID}
#
# Methods:
# EnableTwoWaySync
# DisableTwoWaySync
# IsTwoWay
# AddDataprovider
# DeleteDataprovider
# Sync
# Refresh
#
# Signals:
# SyncStarted
# SyncCompleted(aborted, error, conflict)
# SyncConflict
# SyncProgress(progress, completedUIDs)
# DataproviderAdded
# DataproviderRemoved
#
# ==== Exporter Conduit ====
# Service org.conduit.Conduit
# Interface org.conduit.Exporter
# Object path /conduit/{some UUID}
#
# Methods:
# AddData
# SinkConfigure
# SinkGetInformation
# SinkGetConfigurationXml
# SinkSetConfigurationXml
#
# ==== DataProvider ====
# Service org.conduit.DataProvider
# Interface org.conduit.DataProvider
# Object path /dataprovider/{some UUID}
#
# Methods:
# IsPending
# IsConfigured
# SetConfigurationXML
# GetConfigurationXML
# Configure
# GetInformation
# AddData
#
# Signals:
#All objects currently exported over the bus
EXPORTED_OBJECTS = {}
class ConduitException(dbus.DBusException):
_dbus_error_name = 'org.conduit.ConduitException'
class DBusItem(dbus.service.Object):
def __init__(self, iface, path):
bus_name = dbus.service.BusName(iface, bus=dbus.SessionBus())
dbus.service.Object.__init__(self, bus_name, path)
log.debug("DBus Exported: %s" % self.get_path())
def get_path(self):
return self.__dbus_object_path__
def _print(self, message):
if DEBUG_ALL_CALLS:
log.debug("DBus Message from %s: %s" % (self.get_path(), message))
class ConduitDBusItem(DBusItem):
def __init__(self, sync_manager, conduit, uuid):
DBusItem.__init__(self, iface=CONDUIT_DBUS_IFACE, path="/conduit/%s" % uuid)
self.sync_manager = sync_manager
self.conduit = conduit
self.conduit.connect("sync-started", self._on_sync_started)
self.conduit.connect("sync-completed", self._on_sync_completed)
self.conduit.connect("sync-conflict", self._on_sync_conflict)
self.conduit.connect("sync-progress", self._on_sync_progress)
def _on_sync_started(self, cond):
if cond == self.conduit:
self.SyncStarted()
def _on_sync_completed(self, cond, aborted, error, conflict):
if cond == self.conduit:
self.SyncCompleted(bool(aborted), bool(error), bool(conflict))
def _on_sync_progress(self, cond, progress, UIDs):
if cond == self.conduit:
self.SyncProgress(float(progress), UIDs)
def _on_sync_conflict(self, cond, conflict):
if cond == self.conduit:
self.SyncConflict()
#
# org.conduit.Conduit
#
@dbus.service.method(CONDUIT_DBUS_IFACE, in_signature='', out_signature='')
def EnableTwoWaySync(self):
self._print("EnableTwoWaySync")
self.conduit.enable_two_way_sync()
@dbus.service.method(CONDUIT_DBUS_IFACE, in_signature='', out_signature='')
def DisableTwoWaySync(self):
self._print("DisableTwoWaySync")
self.conduit.disable_two_way_sync()
@dbus.service.method(CONDUIT_DBUS_IFACE, in_signature='', out_signature='b')
def IsTwoWay(self):
self._print("IsTwoWay")
return self.conduit.is_two_way()
@dbus.service.method(CONDUIT_DBUS_IFACE, in_signature='ob', out_signature='')
def AddDataprovider(self, dp, trySource):
self._print("AddDataprovider: %s" % dp)
#get the actual dps from their object paths
try:
dpw = EXPORTED_OBJECTS[str(dp)].dataprovider
except KeyError, e:
raise ConduitException("Could not locate dataprovider: %s" % e)
if not self.conduit.add_dataprovider(dpw):
raise ConduitException("Could not add dataprovider: %s" % e)
@dbus.service.method(CONDUIT_DBUS_IFACE, in_signature='o', out_signature='')
def DeleteDataprovider(self, dp):
self._print("DeleteDataprovider: %s" % dp)
#get the actual dps from their object paths
try:
dpw = EXPORTED_OBJECTS[str(dp)].dataprovider
except KeyError, e:
raise ConduitException("Could not locate dataprovider: %s" % e)
if not self.conduit.delete_dataprovider(dpw):
raise ConduitException("Could not delete dataprovider: %s" % e)
@dbus.service.method(CONDUIT_DBUS_IFACE, in_signature='', out_signature='')
def Sync(self):
self._print("Sync")
self.conduit.sync()
@dbus.service.method(CONDUIT_DBUS_IFACE, in_signature='', out_signature='')
def Refresh(self):
self._print("Refresh")
self.conduit.refresh()
@dbus.service.signal(CONDUIT_DBUS_IFACE, signature='')
def SyncStarted(self):
self._print("SyncStarted")
@dbus.service.signal(CONDUIT_DBUS_IFACE, signature='bbb')
def SyncCompleted(self, aborted, error, conflict):
self._print("SyncCompleted (abort:%s error:%s conflict:%s)" % (aborted,error,conflict))
@dbus.service.signal(CONDUIT_DBUS_IFACE, signature='')
def SyncConflict(self):
self._print("SyncConflict")
@dbus.service.signal(CONDUIT_DBUS_IFACE, signature='das')
def SyncProgress(self, progress, UIDs):
self._print("SyncProgress %s%%\n\t%s" % ((progress*100.0), UIDs))
#
# org.conduit.Exporter
#
@dbus.service.method(EXPORTER_DBUS_IFACE, in_signature='s', out_signature='')
def SinkSetConfigurationXml(self, xml):
self._print("SinkSetConfigurationXml: %s" % xml)
if len(self.conduit.datasinks) != 1:
raise ConduitException("Simple exporter must only have one sink")
self.conduit.datasinks[0].set_configuration_xml(xml)
@dbus.service.method(EXPORTER_DBUS_IFACE, in_signature='', out_signature='')
def SinkConfigure(self):
self._print("SinkConfigure")
if len(self.conduit.datasinks) != 1:
raise ConduitException("Simple exporter must only have one sink")
dataprovider = self.conduit.datasinks[0]
#FIXME Hard-coded GtkUI
from conduit.gtkui.WindowConfigurator import WindowConfigurator
from conduit.gtkui.ConfigContainer import ConfigContainer
configurator = WindowConfigurator(None)
container = dataprovider.module.get_config_container(
configContainerKlass=ConfigContainer,
name=dataprovider.get_name(),
icon=dataprovider.get_icon(),
configurator=configurator
)
configurator.set_containers([container])
configurator.run(container)
@dbus.service.method(EXPORTER_DBUS_IFACE, in_signature='s', out_signature='b')
def AddData(self, uri):
self._print("AddData: %s" % uri)
if self.conduit.datasource == None:
raise ConduitException("Simple exporter must have a source")
return self.conduit.datasource.module.add(uri)
@dbus.service.method(EXPORTER_DBUS_IFACE, in_signature='', out_signature='a{ss}')
def SinkGetInformation(self):
self._print("SinkGetInformation")
if len(self.conduit.datasinks) != 1:
raise ConduitException("Simple exporter must only have one sink")
#Need to call get_icon so that the icon_name/path is loaded
try:
self.conduit.datasinks[0].get_icon()
except:
log.warn("DBus could not lookup dp icon")
info = {}
info["name"] = self.conduit.datasinks[0].name
info["description"] = self.conduit.datasinks[0].description
info["module_type"] = self.conduit.datasinks[0].module_type
info["category"] = self.conduit.datasinks[0].category.name
info["in_type"] = self.conduit.datasinks[0].get_input_type()
info["out_type"] = self.conduit.datasinks[0].get_output_type()
info["classname"] = self.conduit.datasinks[0].classname
info["key"] = self.conduit.datasinks[0].get_key()
info["enabled"] = str( self.conduit.datasinks[0].enabled)
info["UID"] = self.conduit.datasinks[0].get_UID()
info["icon_name"] = self.conduit.datasinks[0].icon_name
info["icon_path"] = self.conduit.datasinks[0].icon_path
return info
@dbus.service.method(EXPORTER_DBUS_IFACE, in_signature='', out_signature='s')
def SinkGetConfigurationXml(self):
self._print("SinkGetConfigurationXml")
if len(self.conduit.datasinks) != 1:
raise ConduitException("Simple exporter must only have one sink")
return self.conduit.datasinks[0].get_configuration_xml()
class DataProviderDBusItem(DBusItem):
def __init__(self, dataprovider, uuid):
DBusItem.__init__(self, iface=DATAPROVIDER_DBUS_IFACE, path="/dataprovider/%s" % uuid)
self.dataprovider = dataprovider
@dbus.service.method(DATAPROVIDER_DBUS_IFACE, in_signature='', out_signature='b')
def IsPending(self):
self._print("IsPending")
return self.dataprovider.module == None
@dbus.service.method(DATAPROVIDER_DBUS_IFACE, in_signature='bb', out_signature='b')
def IsConfigured(self, isSource, isTwoWay):
self._print("IsConfigured")
if self.dataprovider.module != None:
return self.dataprovider.module.is_configured(isSource, isTwoWay)
return False
@dbus.service.method(DATAPROVIDER_DBUS_IFACE, in_signature='', out_signature='a{ss}')
def GetInformation(self):
self._print("GetInformation")
#Need to call get_icon so that the icon_name/path is loaded
try:
self.dataprovider.get_icon()
except:
log.warn("DBus could not lookup dp icon")
info = {}
info["name"] = self.dataprovider.name
info["description"] = self.dataprovider.description
info["module_type"] = self.dataprovider.module_type
info["category"] = self.dataprovider.category.name
info["in_type"] = self.dataprovider.get_input_type()
info["out_type"] = self.dataprovider.get_output_type()
info["classname"] = self.dataprovider.classname
info["key"] = self.dataprovider.get_key()
info["enabled"] = str(self.dataprovider.enabled)
info["UID"] = self.dataprovider.get_UID()
info["icon_name"] = self.dataprovider.icon_name
info["icon_path"] = self.dataprovider.icon_path
return info
@dbus.service.method(DATAPROVIDER_DBUS_IFACE, in_signature='', out_signature='s')
def GetConfigurationXml(self):
self._print("GetConfigurationXml")
return self.dataprovider.get_configuration_xml()
@dbus.service.method(DATAPROVIDER_DBUS_IFACE, in_signature='s', out_signature='')
def SetConfigurationXml(self, xml):
self._print("SetConfigurationXml: %s" % xml)
self.dataprovider.set_configuration_xml(xml)
@dbus.service.method(DATAPROVIDER_DBUS_IFACE, in_signature='', out_signature='')
def Configure(self):
self._print("Configure")
#FIXME Hard-coded GtkUI
from conduit.gtkui.WindowConfigurator import WindowConfigurator
from conduit.gtkui.ConfigContainer import ConfigContainer
configurator = WindowConfigurator(None)
container = self.dataprovider.module.get_config_container(
configContainerKlass=ConfigContainer,
name=self.dataprovider.get_name(),
icon=self.dataprovider.get_icon(),
configurator=configurator
)
configurator.set_containers([container])
configurator.run(container)
@dbus.service.method(DATAPROVIDER_DBUS_IFACE, in_signature='s', out_signature='b')
def AddData(self, uri):
self._print("AddData: %s" % uri)
return self.dataprovider.module.add(uri)
class SyncSetDBusItem(DBusItem):
def __init__(self, syncSet, name):
DBusItem.__init__(self, iface=SYNCSET_DBUS_IFACE, path="/syncset/%s" % name)
self.syncSet = syncSet
self.syncSet.connect("conduit-added", self._on_conduit_added)
self.syncSet.connect("conduit-removed", self._on_conduit_removed)
def _on_conduit_added(self, syncset, cond):
self.ConduitAdded()
def _on_conduit_removed(self, syncset, cond):
self.ConduitRemoved()
@dbus.service.signal(SYNCSET_DBUS_IFACE, signature='')
def ConduitAdded(self):
self._print("Emmiting DBus signal ConduitAdded")
@dbus.service.signal(SYNCSET_DBUS_IFACE, signature='')
def ConduitRemoved(self):
self._print("Emmiting DBus signal ConduitRemoved")
@dbus.service.method(SYNCSET_DBUS_IFACE, in_signature='o', out_signature='')
def AddConduit(self, cond):
self._print("AddConduit: %s" % cond)
try:
c = EXPORTED_OBJECTS[str(cond)].conduit
except KeyError, e:
raise ConduitException("Could not locate Conduit: %s" % e)
self.syncSet.add_conduit(c)
@dbus.service.method(SYNCSET_DBUS_IFACE, in_signature='o', out_signature='')
def DeleteConduit(self, cond):
self._print("DeleteConduit: %s" % cond)
try:
c = EXPORTED_OBJECTS[str(cond)].conduit
except KeyError, e:
raise ConduitException("Could not locate Conduit: %s" % e)
self.syncSet.remove_conduit(c)
@dbus.service.method(SYNCSET_DBUS_IFACE, in_signature='s', out_signature='')
def SaveToXml(self, path):
self._print("SaveToXml: %s" % path)
self.syncSet.save_to_xml(os.path.abspath(path))
@dbus.service.method(SYNCSET_DBUS_IFACE, in_signature='s', out_signature='')
def RestoreFromXml(self, path):
self._print("RestoreFromXml: %s" % path)
self.syncSet.restore_from_xml(os.path.abspath(path))
class DBusInterface(DBusItem):
def __init__(self, conduitApplication, moduleManager, typeConverter, syncManager, guiSyncSet):
DBusItem.__init__(self, iface=APPLICATION_DBUS_IFACE, path="/")
self.conduitApplication = conduitApplication
#setup the module manager
self.moduleManager = moduleManager
self.moduleManager.connect("dataprovider-available", self._on_dataprovider_available)
self.moduleManager.connect("dataprovider-unavailable", self._on_dataprovider_unavailable)
#type converter and sync manager
self.type_converter = typeConverter
self.sync_manager = syncManager
#export the syncsets
new = SyncSetDBusItem(guiSyncSet, "gui")
EXPORTED_OBJECTS[new.get_path()] = new
self.sync_set = SyncSet.SyncSet(moduleManager,syncManager)
new = SyncSetDBusItem(self.sync_set, "dbus")
EXPORTED_OBJECTS[new.get_path()] = new
#export myself
EXPORTED_OBJECTS[self.get_path()] = self
def _get_all_dps(self):
datasources = self.moduleManager.get_modules_by_type("source")
datasinks = self.moduleManager.get_modules_by_type("sink")
twoways = self.moduleManager.get_modules_by_type("twoway")
return datasources + datasinks + twoways
def _new_syncset(self):
ss = SyncSet.SyncSet(
moduleManager=self.moduleManager,
syncManager=self.sync_manager
)
i = Utils.uuid_string()
new = SyncSetDBusItem(ss, i)
EXPORTED_OBJECTS[new.get_path()] = new
return new
def _get_dataprovider(self, key):
"""
Instantiates a new dataprovider (source or sink), storing it
appropriately.
@param key: Key of the DP to create
@returns: The new DP
"""
dpw = self.moduleManager.get_module_wrapper_with_instance(key)
if dpw == None:
raise ConduitException("Could not find dataprovider with key: %s" % key)
i = Utils.uuid_string()
new = DataProviderDBusItem(dpw, i)
EXPORTED_OBJECTS[new.get_path()] = new
return new
def _get_conduit(self, source=None, sink=None, sender=None):
"""
Instantiates a new dataprovider (source or sink), storing it
appropriately.
@param key: Key of the DP to create
@returns: The new DP
"""
if sender == None:
raise ConduitException("Invalid DBus Caller")
cond = Conduit.Conduit(self.sync_manager)
if source != None:
if not cond.add_dataprovider(dataprovider_wrapper=source, trySourceFirst=True):
raise ConduitException("Error adding source to conduit")
if sink != None:
if not cond.add_dataprovider(dataprovider_wrapper=sink, trySourceFirst=False):
raise ConduitException("Error adding source to conduit")
i = Utils.uuid_string()
new = ConduitDBusItem(self.sync_manager, cond, i)
EXPORTED_OBJECTS[new.get_path()] = new
return new
def _on_dataprovider_available(self, loader, dataprovider):
self.DataproviderAvailable(dataprovider.get_key())
def _on_dataprovider_unavailable(self, loader, dataprovider):
self.DataproviderUnavailable(dataprovider.get_key())
def quit(self):
#need to call quit() on all sync sets or conduits as they may have been
#created here...
for path in EXPORTED_OBJECTS:
if path.startswith("/syncset/"):
EXPORTED_OBJECTS[path].syncSet.quit()
elif path.startswith("/conduit/"):
EXPORTED_OBJECTS[path].conduit.quit()
def get_syncset(self):
return self.sync_set
def get_all_syncsets(self):
return [EXPORTED_OBJECTS[path].syncSet
for path in EXPORTED_OBJECTS if path.startswith("/syncset/")
]
@dbus.service.signal(APPLICATION_DBUS_IFACE, signature='s')
def DataproviderAvailable(self, key):
self._print("Emmiting DBus signal DataproviderAvailable %s" % key)
@dbus.service.signal(APPLICATION_DBUS_IFACE, signature='s')
def DataproviderUnavailable(self, key):
self._print("Emiting DBus signal DataproviderUnavailable %s" % key)
@dbus.service.method(APPLICATION_DBUS_IFACE, in_signature='', out_signature='o')
def NewSyncSet(self):
self._print("NewSyncSet")
return self._new_syncset()
@dbus.service.method(APPLICATION_DBUS_IFACE, in_signature='', out_signature='as')
def GetAllDataProviders(self):
self._print("GetAllDataProviders")
return [i.get_key() for i in self._get_all_dps()]
@dbus.service.method(APPLICATION_DBUS_IFACE, in_signature='s', out_signature='o')
def GetDataProvider(self, key):
self._print("GetDataProvider: %s" % key)
return self._get_dataprovider(key)
@dbus.service.method(APPLICATION_DBUS_IFACE, in_signature='oo', out_signature='o', sender_keyword='sender')
def BuildConduit(self, source, sink, sender=None):
self._print("BuildConduit (sender: %s:) %s --> %s" % (sender, source, sink))
#get the actual dps from their object paths
try:
source = EXPORTED_OBJECTS[str(source)].dataprovider
sink = EXPORTED_OBJECTS[str(sink)].dataprovider
except KeyError, e:
raise ConduitException("Could not find dataprovider with key: %s" % e)
return self._get_conduit(source, sink, sender)
@dbus.service.method(APPLICATION_DBUS_IFACE, in_signature='s', out_signature='o', sender_keyword='sender')
def BuildExporter(self, key, sender=None):
self._print("BuildExporter (sender: %s:) --> %s" % (sender,key))
source = self._get_dataprovider("FileSource")
sink = self._get_dataprovider(key)
return self._get_conduit(source.dataprovider, sink.dataprovider, sender)
@dbus.service.method(APPLICATION_DBUS_IFACE, in_signature='', out_signature='')
def Quit(self):
if self.conduitApplication != None:
self.conduitApplication.Quit()
| gpl-2.0 | -548,939,383,247,523,200 | 35.505085 | 111 | 0.640124 | false | 3.520432 | true | false | false |
mufaddalq/cloudstack-datera-driver | tools/cli/cloudmonkey/cachemaker.py | 2 | 5977 | # -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
try:
import json
import os
import types
from config import config_fields
except ImportError, e:
import sys
print "ImportError", e
sys.exit(1)
def getvalue(dictionary, key):
if key in dictionary:
return dictionary[key]
else:
return None
def splitcsvstring(string):
if string is not None:
return filter(lambda x: x.strip() != '', string.split(','))
else:
return []
def splitverbsubject(string):
idx = 0
for char in string:
if char.islower():
idx += 1
else:
break
return string[:idx].lower(), string[idx:].lower()
def savecache(apicache, json_file):
"""
Saves apicache dictionary as json_file, returns dictionary as indented str
"""
if apicache is None or apicache is {}:
return ""
apicachestr = json.dumps(apicache, indent=2)
with open(json_file, 'w') as cache_file:
cache_file.write(apicachestr)
return apicachestr
def loadcache(json_file):
"""
Loads json file as dictionary, feeds it to monkeycache and spits result
"""
f = open(json_file, 'r')
data = f.read()
f.close()
try:
apicache = json.loads(data)
except ValueError, e:
print "Error processing json:", json_file, e
return {}
return apicache
def monkeycache(apis):
"""
Feed this a dictionary of api bananas, it spits out processed cache
"""
if isinstance(type(apis), types.NoneType) or apis is None:
return {}
responsekey = filter(lambda x: 'response' in x, apis.keys())
if len(responsekey) == 0:
print "[monkeycache] Invalid dictionary, has no response"
return None
if len(responsekey) != 1:
print "[monkeycache] Multiple responsekeys, chosing first one"
responsekey = responsekey[0]
verbs = set()
cache = {}
cache['count'] = getvalue(apis[responsekey], 'count')
cache['asyncapis'] = []
apilist = getvalue(apis[responsekey], 'api')
if apilist is None:
print "[monkeycache] Server response issue, no apis found"
for api in apilist:
name = getvalue(api, 'name')
verb, subject = splitverbsubject(name)
apidict = {}
apidict['name'] = name
apidict['description'] = getvalue(api, 'description')
apidict['isasync'] = getvalue(api, 'isasync')
if apidict['isasync']:
cache['asyncapis'].append(name)
apidict['related'] = splitcsvstring(getvalue(api, 'related'))
required = []
apiparams = []
for param in getvalue(api, 'params'):
apiparam = {}
apiparam['name'] = getvalue(param, 'name')
apiparam['description'] = getvalue(param, 'description')
apiparam['required'] = (getvalue(param, 'required') is True)
apiparam['length'] = int(getvalue(param, 'length'))
apiparam['type'] = getvalue(param, 'type')
apiparam['related'] = splitcsvstring(getvalue(param, 'related'))
if apiparam['required']:
required.append(apiparam['name'])
apiparams.append(apiparam)
apidict['requiredparams'] = required
apidict['params'] = apiparams
if verb not in cache:
cache[verb] = {}
cache[verb][subject] = apidict
verbs.add(verb)
cache['verbs'] = list(verbs)
return cache
def main(json_file):
"""
cachemaker.py creates a precache datastore of all available apis of
CloudStack and dumps the precache dictionary in an
importable python module. This way we cheat on the runtime overhead of
completing commands and help docs. This reduces the overall search and
cache_miss (computation) complexity from O(n) to O(1) for any valid cmd.
"""
f = open("precache.py", "w")
f.write("""# -*- coding: utf-8 -*-
# Auto-generated code by cachemaker.py
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.""")
f.write("\napicache = %s" % loadcache(json_file))
f.close()
if __name__ == "__main__":
cache_file = config_fields['core']['cache_file']
print "[cachemaker] Pre-caching using user's cloudmonkey cache", cache_file
if os.path.exists(cache_file):
main(cache_file)
else:
print "[cachemaker] Unable to cache apis, file not found", cache_file
print "[cachemaker] Run cloudmonkey sync to generate cache"
| apache-2.0 | -8,580,099,564,586,530,000 | 32.022099 | 79 | 0.650661 | false | 3.963528 | false | false | false |
Leopardob/dice-dev | dice/dice_extras/scheduler.py | 1 | 4324 | from sys import stderr
from dice.app import BasicApp
from threading import _start_new_thread
def debug(msg):
stderr.write(msg+"\n")
stderr.flush()
class Scheduler:
def __init__(self, project):
self.__project = project
self.__run_stack = []
self.__prepare_stack = []
def schedule_run(self, app):
"""
Tries to run an application. The algorithm is to get all input apps into the FINISHED state
(by calling schedule_run for them if needed) and calling prepare() and run() for the actual app.
:param app:
:return:
"""
# debug("schedule run for "+str(app))
if app in self.__run_stack:
# debug("stack contains "+str(app))
return
self.__run_stack.append(app)
# app.connect("statusChanged", self.__process_run_signals(app))
app.status_changed.connect(self.__process_run_signals(app))
_start_new_thread(self.__schedule_run, (app,))
def schedule_prepare(self, app):
# TODO
app.prepare()
def __process_run_signals(self, app):
"""
Returns a function that handles status changes for the given scheduled app.
:param app:
:return:
"""
def status_change_handler():
# debug(str(app)+" changed status to "+app.get_status())
if app.status == BasicApp.FINISHED:
try:
self.__run_stack.remove(app)
except:
pass
# app.disconnect("statusChanged", status_change_handler)
app.status_changed.disconnect(status_change_handler)
for output_app in app.output_apps:
if output_app in self.__run_stack:
self.__try_run(output_app)
elif app.status == BasicApp.ERROR:
try:
self.__run_stack.remove(app)
except:
pass
return status_change_handler
def __schedule_run(self, app):
"""
Scheduling part of schedule_run, extracted to run in its own thread
:param app:
:return:
"""
to_schedule = self.__try_run(app)
# add the input apps to the scheduler if they are not finished
for input_app in to_schedule:
self.schedule_run(input_app)
def __try_run(self, app):
"""
Tries to run the given app if all inputs apps of the app are finished.
Otherwise it returns a list of all unfinished input apps.
:param app:
:return:
"""
all_input_apps_are_finished = True
to_schedule = []
for input_app in app.input_apps:
if input_app.status != BasicApp.FINISHED:
all_input_apps_are_finished = False
to_schedule.append(input_app)
if all_input_apps_are_finished:
# This is the default run behavior:
# prepare() if not already prepared and call run() if prepare() was successful
prepared = app.status == BasicApp.PREPARED
if not prepared:
app.status = BasicApp.PREPARING
try:
if app.prepare():
app.status = BasicApp.PREPARED
prepared = True
except BaseException as e:
app.status = BasicApp.ERROR
self.__project.dice.process_exception(e)
return [] # do not schedule any more apps
if prepared:
app.status = BasicApp.RUNNING
try:
if app.run():
app.status = BasicApp.FINISHED
else:
app.status = BasicApp.ERROR
except BaseException as e:
app.status = BasicApp.ERROR
self.__project.dice.process_exception(e)
return [] # do not schedule any more apps
else:
# Set on WAITING. If called by schedule_run, all apps in to_schedule will be scheduled as well.
# This will cause __process_run_signals to call __try_run again as needed.
app.status = BasicApp.WAITING
return to_schedule
| gpl-3.0 | -5,139,901,510,796,492,000 | 35.336134 | 107 | 0.533302 | false | 4.49948 | false | false | false |
mothsART/linkmanager | linkmserver/__init__.py | 1 | 5077 | import os
import subprocess
import json
import arrow
from flask import (
Flask, render_template, abort,
request, jsonify, g
)
from flask.ext.assets import Environment
# from werkzeug.debug import get_current_traceback
from werkzeug.contrib.cache import SimpleCache
cache = SimpleCache()
from linkmanager import settings
from linkmanager.db import DataBase
app = Flask(__name__)
assets = Environment(app)
if settings.SERVER:
var_path = '/var/cache/linkmanager'
if not os.path.exists(var_path):
os.makedirs(var_path, mode=0o755)
static_path = os.path.join(var_path, 'static')
if not os.path.exists(static_path):
os.symlink(assets.directory, static_path)
assets.directory = static_path
assets.url = assets.url[1:]
db = DataBase()
db.editmode = settings.EDITMODE
def read_only(func):
""" Decorator : get an Unauthorize 403
when read only's settings is True. """
def wrapper():
if settings.READ_ONLY:
return abort(403)
return func()
return wrapper
def is_server(func):
""" Decorator : get an Unauthorize 403
when server settings is True """
def wrapper():
if settings.SERVER:
return abort(403)
return func()
return wrapper
def launch_browser(BROWSER=False):
subprocess.call(
'sleep 0.5;nohup %s http://127.0.0.1:%s/ &' % (
BROWSER,
settings.HTTP_PORT
),
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True
)
@app.route("/")
def index():
return render_template(
'index.html',
DEBUG=settings.DEBUG,
SERVER=settings.SERVER,
READ_ONLY=settings.READ_ONLY,
EDITMODE=settings.EDITMODE,
DELETEDIALOG=settings.DELETEDIALOG,
nb_links=len(db)
)
# try:
# error
# except Exception:
# track = get_current_traceback(
# skip=1, show_hidden_frames=True,
# ignore_system_exceptions=False
# )
# track.log()
# abort(500)
@read_only
@is_server
@app.route("/editmode", methods=['GET', 'POST'])
def editmode():
if request.method == 'GET':
return jsonify({'editmode': db.editmode})
db.editmode = not db.editmode
return jsonify({'editmode': db.editmode})
@read_only
@app.route("/add", methods=['POST'])
def add():
fixture = {}
link = request.form['link']
fixture[link] = {
"tags": request.form['tags'].split(),
"priority": request.form['priority'],
"description": request.form['description'],
"title": request.form['title'],
"init date": str(arrow.now())
}
result = db.add_link(json.dumps(fixture))
return jsonify({'is_add': result})
@read_only
@app.route("/update", methods=['POST'])
def update():
fixture = {}
link = request.form['link']
if request.form['link'] != request.form['newlink']:
result = db.delete_link(request.form['link'])
if not result:
return jsonify({'is_update': False})
link = request.form['newlink']
old_link = db.get_link_properties(link)
fixture[link] = {
"tags": request.form['tags'].split(),
"priority": request.form['priority'],
"description": request.form['description'],
"title": request.form['title'],
"init date": old_link['init date'],
"update date": str(arrow.now())
}
if request.form['link'] != request.form['newlink']:
fixture[link]["init date"] = str(arrow.now())
fixture[link]["update date"] = old_link['update date']
result = db.add_link(json.dumps(fixture))
return jsonify({'is_update': result})
@read_only
@app.route("/delete", methods=['POST'])
def delete():
result = db.delete_link(request.form['link'])
return jsonify({'is_delete': result})
@app.route("/search")
def search():
results = {}
try:
tags = next(request.args.items())[0].split()
links = db.sorted_links(*tags)
except:
links = db.sorted_links()
results = {}
for l in links:
properties = db.get_link_properties(l)
results[l] = properties
return jsonify(**results)
@app.route("/suggest")
def suggest():
tags = request.args.get('tags')
if not tags:
return jsonify({})
keywords = tags.split()
last_keyword = keywords[len(keywords) - 1]
str_suggestion = ' '.join(keywords[:-1])
suggestions = {}
for s in db.complete_tags(last_keyword):
if s not in keywords:
suggestions[str_suggestion + ' ' + s] = 10
return jsonify(**suggestions)
def run(browser=None):
BROWSER = settings.BROWSER
if browser:
BROWSER = browser
if os.environ.get('WERKZEUG_RUN_MAIN') != 'true':
launch_browser(BROWSER)
app.debug = settings.DEBUG
app.run(host=settings.HTTP_HOST, port=settings.HTTP_PORT)
settings.set_user_conf(WEBAPP=['EDITMODE', db.editmode])
if __name__ == '__main__':
app.debug = settings.DEBUG
app.run(host=settings.HTTP_HOST, port=settings.HTTP_PORT)
| bsd-2-clause | 3,865,095,246,136,694,300 | 25.862434 | 62 | 0.609218 | false | 3.600709 | false | false | false |
bfagundez/apex_paperboy | apex_executor.py | 1 | 4129 | # -*- coding: utf-8 -*-
# unicode color codes
OKGREEN = '\033[92m'
NOTOKRED = '\033[91m'
OKBLUE = '\033[94m'
WARNING = '\033[93m'
HEADER = '\033[95m'
ENDC = '\033[0m'
from lib.sforce.base import SforceBaseClient
from suds import WebFault
from lib.sforce.partner import SforcePartnerClient
from lib.sforce.metadata import SforceMetadataClient
from lib.sforce.apex import SforceApexClient
from lib.sforce.tooling import SforceToolingClient
from optparse import OptionParser
import lib.mm_util as mm_util
import time
import os
import sys
# Adds an option to command line to clean up all transactions and mappings on start
# for dev purposes only.
parser = OptionParser()
parser.add_option("-u", "--user", dest="user", type="string", help="Salesforce username")
parser.add_option("-p", "--password", dest="password", type="string", help="Salesforce password")
parser.add_option("-t", "--token", dest="token", type="string", help="Salesforce token")
parser.add_option("-s", "--apex-script", dest="apexscriptfilename", type="string", help="Apex code to execute")
(options, args) = parser.parse_args()
missing_args = False
error_log = '\n'+NOTOKRED+' ✗'+ENDC+' Errors found \n\n'
if options.user == None:
missing_args = True
error_log += " ~ Salesforce username is required \n"
if options.password == None:
missing_args = True
error_log += " ~ Salesforce password is required \n"
if options.apexscriptfilename == None:
missing_args = True
error_log += " ~ Apex script filename is required \n"
if missing_args:
print error_log
else:
print ' \n🏁 Starting apex execution \n '
print '- Loading partner WSDL'
try:
wsdl_location = os.path.join(mm_util.WSDL_PATH, 'partner.xml')
client = SforcePartnerClient(
wsdl_location,
apiVersion=None,
environment='production',
sid=None,
metadata_server_url=None,
server_url=None)
print OKGREEN+'✓'+ENDC+' WSDL loaded \n '
except Exception, e:
print '\n'+NOTOKRED+'✗'+ENDC+' Unable to load the WSDL '
print e.message
sys.exit()
try:
# login using partner wsdl
print '- Authenticating'
# sometimes password and token are provided together.
# token parameter is not required.
token_safe = ''
if options.token:
token_safe = options.token
client.login(options.user,options.password,token_safe)
# use token with apex wsdl
apex_wsdl_location = os.path.join(mm_util.WSDL_PATH, 'apex.xml')
apex_client = SforceApexClient(
apex_wsdl_location,
apiVersion=mm_util.SFDC_API_VERSION,
environment='production',
sid=client.getSessionId(),
metadata_server_url=client.getMetadaServerUrl(),
server_url=mm_util.get_sfdc_endpoint_by_type('enterprise'))
print OKGREEN+'✓'+ENDC+' Authentication succesful. \n '
except Exception, e:
print '\n'+NOTOKRED+'✗'+ENDC+' Error during authentication '
print e.message
sys.exit()
try:
print '- Opening the file'
# open script file
f = open(options.apexscriptfilename, "r")
apex_code = f.read()
print OKGREEN+'✓'+ENDC+' File loaded succesfully. \n '
except Exception, e:
print '\n'+NOTOKRED+'✗'+ENDC+' Error found reading the file '
print e.message
sys.exit()
try:
# Execute code
print '- Executing the script'
t0 = time.clock()
apex_execution = apex_client.executeAnonymous({"body":apex_code})
if apex_execution.success:
print OKGREEN+'✓'+ENDC+' Script executed succesfully 🍻 \n '
print 'Code executed in '+str(time.clock() - t0)+ ' seconds. \n'
else:
print NOTOKRED+'✗'+ENDC+' Errors found: '
if apex_execution.exceptionMessage:
print apex_execution.exceptionMessage
if apex_execution.compileProblem:
print 'Compilation error: '+apex_execution.compileProblem
print 'Line: '+str(apex_execution.line)
except Exception, e:
#logger.error(str(e.message))
print '\n'+NOTOKRED+'✗'+ENDC+' Errors found '
print e.message
sys.exit()
| mit | -8,470,200,002,246,769,000 | 30.083333 | 111 | 0.660736 | false | 3.393714 | false | false | false |
garbear/EventGhost | plugins/PS3/__init__.py | 1 | 55108 | README = """\
<u><b>1) Bluetooth</b></u>
Tested succesfully working with Bluetooth Software :
WIDCOMM Bluetooth Software 5.1.0.1100.
5.1.0.1100 is not the last version, but it's the most versatile version and
works with most of Bluetooth adapter in a patched version.
See <a href"http://forum.gsmhosting.com/vbb/forumdisplay.php?f=237">
this thread</a> to help about patched WIDCOMM Bluetooth Software 5.1.0.1100
(Restart PC, right click on bluetooth icon in task bar and stop/start bluetooth
device can help)
On remote, to activate discoverable mode, press simultaneously "start+enter".
On PC choose "Next (no code)"
Check in "Device Manager" / "Human Interface Devices" that the
PS3 Remote appears as "HID-compliant game controller".
If not, if it appears as "HID Keyboard Device" in "Keyboards",
delete it, right click on bluetooth icon in task bar and
stop/start bluetooth device to force new device detection.
This time should appears as "HID-compliant game controller"
<u><b>2) Plugin</b></u>
This plugin generates:
<ul>
<li>ENDURING events named like "HID.Eject"</li>
</ul>
and lot of additional NORMAL events for:
<ul>
<li>short click on remote, events name end with ".S" eg. "HID.Eject.S"</li>
<li>long click on remote, events name end with ".L"</li>
<li>double click on remote, events name end with ".D"</li>
</ul>
and special selectable or not events:
<ul>
<li>"Sleep" when remote is not used</li>
<li>"Hibernate" when remote is not use during a long time (also puts the remote into low-power mode
if using the Widcomm Bluetooth stack)</li>
<li>"WakeUp" for first event after "Sleep" or "Hibernate"</li>
<li>"Zone.X" where X is relative to Zone Key in Remote (see Remote paper manual)
event generated when a new key is pressed in another zone.
each remote key belong of on zone except one, the key with strange
symbol below the directional pad. this is by design.</li>
<li>"Release" can be generated for each relase of each key.</li>
</ul>
Of course all these additional events are not needed,
it's possible to do the same thing by EventGhost configuration
but it's far more simple to have these events available
ready to use, than play with timer inside EventGhost.
This remote can generate events when 2 keys are pressed simultaneously.
In this case the event code genered is an hexadecimal value.
Note: some keys combination generate the same event.
This is a Remote issue.
After the "Hibernate" period expires, the remote will be put into a low-power (SNIFF) mode.
It may take a few seconds for the first button press to be registered in this mode.
The plugin will also automatically re-detect the PS3 remote after being in standby mode.
"""
eg.RegisterPlugin(
name = "PlayStation 3 Bluetooth Remote",
author = "Thierry Couquillou, Tim Delaney",
version = "3.0.0",
kind = "remote",
url="http://www.eventghost.net/forum/viewtopic.php?t=640",
description = "Hardware plugin for the PS3 Bluetooth Remote (based on the HID code of Bartman)",
canMultiLoad = True,
help = README,
)
import itertools
import time
import binascii
import ctypes
import _winreg
import sys
import threading
import win32con
import win32event
import win32file
import wx
import wx.lib.mixins.listctrl as listmix
from ctypes import Structure, Union, c_byte, c_ubyte, c_char, c_int, c_long, c_ulong, c_ushort, c_wchar
from ctypes import pointer, byref, sizeof, POINTER
from ctypes.wintypes import ULONG, BOOLEAN, BOOL
class Ps3Remote:
button = {}
button["000000FFFFFFFFFFFF00"]= "Release"
button["00000016FFFFFFFFFF01"]= "Eject"
button["00000064FFFFFFFFFF01"]= "Audio"
button["00000065FFFFFFFFFF01"]= "Angle"
button["00000063FFFFFFFFFF01"]= "Subtitle"
button["00000000FFFFFFFFFF01"]= "Num1"
button["00000001FFFFFFFFFF01"]= "Num2"
button["00000002FFFFFFFFFF01"]= "Num3"
button["00000003FFFFFFFFFF01"]= "Num4"
button["00000004FFFFFFFFFF01"]= "Num5"
button["00000005FFFFFFFFFF01"]= "Num6"
button["00000006FFFFFFFFFF01"]= "Num7"
button["00000007FFFFFFFFFF01"]= "Num8"
button["00000008FFFFFFFFFF01"]= "Num9"
button["0000000FFFFFFFFFFF01"]= "Clear"
button["00000009FFFFFFFFFF01"]= "Num0"
button["00000028FFFFFFFFFF01"]= "Time"
button["00000081FFFFFFFFFF01"]= "Red"
button["00000082FFFFFFFFFF01"]= "Green"
button["00000083FFFFFFFFFF01"]= "Yellow"
button["00000080FFFFFFFFFF01"]= "Blue"
button["00000070FFFFFFFFFF01"]= "Display"
button["0000001AFFFFFFFFFF01"]= "TopMenu"
button["00000040FFFFFFFFFF01"]= "PopUpMenu"
button["0000000EFFFFFFFFFF01"]= "Return"
button["10000054FFFFFFFFFF01"]= "Up"
button["300000FFFFFFFFFFFF01"]= "RightUp"
button["20000055FFFFFFFFFF01"]= "Right"
button["600000FFFFFFFFFFFF01"]= "RightDown"
button["40000056FFFFFFFFFF01"]= "Down"
button["C00000FFFFFFFFFFFF01"]= "LeftDown"
button["80000057FFFFFFFFFF01"]= "Left"
button["900000FFFFFFFFFFFF01"]= "LeftUp"
button["0000080BFFFFFFFFFF01"]= "Enter"
button["0010005CFFFFFFFFFF01"]= "Triangle"
button["0020005DFFFFFFFFFF01"]= "Circle"
button["0080005FFFFFFFFFFF01"]= "Square"
button["0040005EFFFFFFFFFF01"]= "Cross"
button["0004005AFFFFFFFFFF01"]= "L1"
button["00010058FFFFFFFFFF01"]= "L2"
button["02000051FFFFFFFFFF01"]= "L3"
button["00000143FFFFFFFFFF01"]= "Zarbi"
button["01000050FFFFFFFFFF01"]= "Select"
button["08000053FFFFFFFFFF01"]= "Start"
button["0008005BFFFFFFFFFF01"]= "R1"
button["00020059FFFFFFFFFF01"]= "R2"
button["04000052FFFFFFFFFF01"]= "R3"
button["00000033FFFFFFFFFF01"]= "Scan-"
button["00000032FFFFFFFFFF01"]= "Play"
button["00000034FFFFFFFFFF01"]= "Scan+"
button["00000030FFFFFFFFFF01"]= "Prev"
button["00000038FFFFFFFFFF01"]= "Stop"
button["00000031FFFFFFFFFF01"]= "Next"
button["00000060FFFFFFFFFF01"]= "SlowStep-"
button["00000039FFFFFFFFFF01"]= "Pause"
button["00000061FFFFFFFFFF01"]= "SlowStep+"
zone = {}
zone["000000FFFFFFFFFFFF00"]= "none"
zone["00000016FFFFFFFFFF01"]= "Zone.A1"
zone["00000064FFFFFFFFFF01"]= "Zone.A1"
zone["00000065FFFFFFFFFF01"]= "Zone.A1"
zone["00000063FFFFFFFFFF01"]= "Zone.A1"
zone["00000000FFFFFFFFFF01"]= "Zone.A2"
zone["00000001FFFFFFFFFF01"]= "Zone.A2"
zone["00000002FFFFFFFFFF01"]= "Zone.A2"
zone["00000003FFFFFFFFFF01"]= "Zone.A2"
zone["00000004FFFFFFFFFF01"]= "Zone.A2"
zone["00000005FFFFFFFFFF01"]= "Zone.A2"
zone["00000006FFFFFFFFFF01"]= "Zone.A2"
zone["00000007FFFFFFFFFF01"]= "Zone.A2"
zone["00000008FFFFFFFFFF01"]= "Zone.A2"
zone["0000000FFFFFFFFFFF01"]= "Zone.A2"
zone["00000009FFFFFFFFFF01"]= "Zone.A2"
zone["00000028FFFFFFFFFF01"]= "Zone.A2"
zone["00000081FFFFFFFFFF01"]= "Zone.A3"
zone["00000082FFFFFFFFFF01"]= "Zone.A3"
zone["00000083FFFFFFFFFF01"]= "Zone.A3"
zone["00000080FFFFFFFFFF01"]= "Zone.A3"
zone["00000070FFFFFFFFFF01"]= "Zone.A3"
zone["0000001AFFFFFFFFFF01"]= "Zone.A3"
zone["00000040FFFFFFFFFF01"]= "Zone.A3"
zone["0000000EFFFFFFFFFF01"]= "Zone.A3"
zone["10000054FFFFFFFFFF01"]= "Zone.Pad"
zone["300000FFFFFFFFFFFF01"]= "Zone.Pad"
zone["20000055FFFFFFFFFF01"]= "Zone.Pad"
zone["600000FFFFFFFFFFFF01"]= "Zone.Pad"
zone["40000056FFFFFFFFFF01"]= "Zone.Pad"
zone["C00000FFFFFFFFFFFF01"]= "Zone.Pad"
zone["80000057FFFFFFFFFF01"]= "Zone.Pad"
zone["900000FFFFFFFFFFFF01"]= "Zone.Pad"
zone["0000080BFFFFFFFFFF01"]= "Zone.Pad"
zone["0010005CFFFFFFFFFF01"]= "Zone.B1"
zone["0020005DFFFFFFFFFF01"]= "Zone.B1"
zone["0080005FFFFFFFFFFF01"]= "Zone.B1"
zone["0040005EFFFFFFFFFF01"]= "Zone.B1"
zone["0004005AFFFFFFFFFF01"]= "Zone.B2"
zone["00010058FFFFFFFFFF01"]= "Zone.B2"
zone["02000051FFFFFFFFFF01"]= "Zone.B2"
zone["00000143FFFFFFFFFF01"]= "none"
zone["01000050FFFFFFFFFF01"]= "Zone.B2"
zone["08000053FFFFFFFFFF01"]= "Zone.B2"
zone["0008005BFFFFFFFFFF01"]= "Zone.B2"
zone["00020059FFFFFFFFFF01"]= "Zone.B2"
zone["04000052FFFFFFFFFF01"]= "Zone.B2"
zone["00000033FFFFFFFFFF01"]= "Zone.C"
zone["00000032FFFFFFFFFF01"]= "Zone.C"
zone["00000034FFFFFFFFFF01"]= "Zone.C"
zone["00000030FFFFFFFFFF01"]= "Zone.C"
zone["00000038FFFFFFFFFF01"]= "Zone.C"
zone["00000031FFFFFFFFFF01"]= "Zone.C"
zone["00000060FFFFFFFFFF01"]= "Zone.C"
zone["00000039FFFFFFFFFF01"]= "Zone.C"
zone["00000061FFFFFFFFFF01"]= "Zone.C"
class Text:
manufacturer = "Manufacturer"
deviceName = "Device Name"
connected = "Connected"
eventName = "Event prefix (optional):"
yes = "Yes"
no = "No"
eventsSettings = "Remote Events Settings"
enduringEvents = "Trigger enduring events for buttons"
rawDataEvents = "Use raw Data as event name"
ps3Settings = "PS3 Remote Events Settings"
ps3DataEvents = "Use ps3 Remote Key as event name"
ps3Release = "Generate ps3 Remote Release event"
ps3Zone = "Generate ps3 Remote Zone event"
shortKeyTime = "Short press if lower than"
longKeyTime = "Long press if greater than"
sleepTime = "Sleep event generated after"
hibernateTime = "Hibernate event generated after"
seconds = "seconds"
noOtherPort = "Use selected device only if connected to current port"
errorFind = "Error finding HID device: "
errorOpen = "Error opening HID device: "
errorRead = "Error reading HID device: "
errorRetrieval = "Error getting HID device info."
errorMultipleDevices = "Multiple devices found. Don't know which to use."
errorInvalidDataIndex = "Found data index not defined as button or control value."
vendorID = "Vendor ID "
enteredLowPower = "%s entered low-power mode"
exitedLowPower = "%s exited low-power mode"
#structures for ctypes
class GUID(Structure):
_fields_ = [
("Data1", c_ulong),
("Data2", c_ushort),
("Data3", c_ushort),
("Data4", c_byte * 8)
]
class SP_DEVICE_INTERFACE_DATA(Structure):
_fields_ = [("cbSize", c_ulong),
("InterfaceClassGuid", GUID),
("Flags", c_ulong),
("Reserved", POINTER(ULONG))
]
class SP_DEVICE_INTERFACE_DETAIL_DATA_A(Structure):
_fields_ = [("cbSize", c_ulong),
("DevicePath", c_char * 255)
]
class HIDD_ATTRIBUTES(Structure):
_fields_ = [("cbSize", c_ulong),
("VendorID", c_ushort),
("ProductID", c_ushort),
("VersionNumber", c_ushort)
]
class HIDP_CAPS(Structure):
_fields_ = [
("Usage", c_ushort),
("UsagePage", c_ushort),
("InputReportByteLength", c_ushort),
("OutputReportByteLength", c_ushort),
("FeatureReportByteLength", c_ushort),
("Reserved", c_ushort * 17),
("NumberLinkCollectionNodes", c_ushort),
("NumberInputButtonCaps", c_ushort),
("NumberInputValueCaps", c_ushort),
("NumberInputDataIndices", c_ushort),
("NumberOutputButtonCaps", c_ushort),
("NumberOutputValueCaps", c_ushort),
("NumberOutputDataIndices", c_ushort),
("NumberFeatureButtonCaps", c_ushort),
("NumberFeatureValueCaps", c_ushort),
("NumberFeatureDataIndices", c_ushort)
]
class HIDP_CAPS_UNION(Union):
class HIDP_BUTTON_CAPS_RANGE(Structure):
_fields_ = [
("UsageMin", c_ushort),
("UsageMax", c_ushort),
("StringMin", c_ushort),
("StringMax", c_ushort),
("DesignatorMin", c_ushort),
("DesignatorMax", c_ushort),
("DataIndexMin", c_ushort),
("DataIndexMax", c_ushort)
]
class HIDP_BUTTON_CAPS_NOT_RANGE(Structure):
_fields_ = [
("Usage", c_ushort),
("Reserved1", c_ushort),
("StringIndex", c_ushort),
("Reserved2", c_ushort),
("DesignatorIndex", c_ushort),
("Reserved3", c_ushort),
("DataIndex", c_ushort),
("Reserved4", c_ushort)
]
_fields_ = [
("Range", HIDP_BUTTON_CAPS_RANGE),
("NotRange", HIDP_BUTTON_CAPS_NOT_RANGE)
]
class HIDP_BUTTON_CAPS(Structure):
_fields_ = [
("UsagePage", c_ushort),
("ReportID", c_char),
("IsAlias", BOOLEAN),
("BitField", c_ushort),
("LinkCollection", c_ushort),
("LinkUsage", c_ushort),
("LinkUsagePage", c_ushort),
("IsRange", BOOLEAN),
("IsStringRange", BOOLEAN),
("IsDesignatorRange", BOOLEAN),
("IsAbsolute", BOOLEAN),
("Reserved", c_ulong * 10),
("Info", HIDP_CAPS_UNION)
]
class HIDP_VALUE_CAPS(Structure):
_fields_ = [
("UsagePage", c_ushort),
("ReportID", c_char),
("IsAlias", BOOLEAN),
("BitField", c_ushort),
("LinkCollection", c_ushort),
("LinkUsage", c_ushort),
("LinkUsagePage", c_ushort),
("IsRange", BOOLEAN),
("IsStringRange", BOOLEAN),
("IsDesignatorRange", BOOLEAN),
("IsAbsolute", BOOLEAN),
("HasNull", BOOLEAN),
("Reserved", c_char),
("BitSize", c_ushort),
("ReportCount", c_ushort),
("Reserved2", c_ushort * 5),
("UnitsExp", c_ulong),
("Units", c_ulong),
("LogicalMin", c_long),
("LogicalMax", c_long),
("PhysicalMin", c_long),
("PhysicalMax", c_long),
("Info", HIDP_CAPS_UNION)
]
class HIDP_DATA(Structure):
class HIDP_DATA_VALUE(Union):
_fields_ = [
("RawValue", c_ulong),
("On", BOOLEAN),
]
_fields_ = [
("DataIndex", c_ushort),
("Reserved", c_ushort),
("Data", HIDP_DATA_VALUE)
]
# Flags controlling what is included in the device information set built
# by SetupDiGetClassDevs
DIGCF_DEFAULT = 0x00000001 # only valid with DIGCF_DEVICEINTERFACE
DIGCF_PRESENT = 0x00000002
DIGCF_ALLCLASSES = 0x00000004
DIGCF_PROFILE = 0x00000008
DIGCF_DEVICEINTERFACE = 0x00000010
#constants to identify the device info
DEVICE_PATH = 0
VENDOR_ID = 1
VENDOR_STRING = 2
PRODUCT_ID = 3
PRODUCT_STRING = 4
VERSION_NUMBER= 5
BLUETOOTH_ADDRESS = 6
BLUETOOTH_LINK_MODE = MAX_INDEX = 7
#link mode
(
LINK_MODE_NORMAL,
LINK_MODE_HOLD,
LINK_MODE_SNIFF,
LINK_MODE_PARK,
) = xrange(4)
# See if we've got widcomm - if not, we won't be changing the link mode
ALLOW_CANCEL_SNIFF = True
try:
widcommDLL = ctypes.cdll.widcommsdk
except WindowsError:
widcommDLL = None
else:
IsStackServerUp = getattr(widcommDLL, '?IsStackServerUp@CBtIf@@QAEHXZ')
IsStackServerUp.restype = BOOL
if not IsStackServerUp():
widcommDLL = None
if widcommDLL is None:
def set_sniff_mode(bd_addr):
return False
def cancel_sniff_mode(bd_addr):
return False
def read_link_mode(bd_addr):
return None
else:
SetSniffMode = getattr(widcommDLL, '?SetSniffMode@CBtIf@@SAHQAE@Z')
SetSniffMode.restype = BOOL
CancelSniffMode = getattr(widcommDLL, '?CancelSniffMode@CBtIf@@SAHQAE@Z')
CancelSniffMode.restype = BOOL
ReadLinkMode = getattr(widcommDLL, '?ReadLinkMode@CBtIf@@SAHQAEPAE@Z')
ReadLinkMode.restype = BOOLEAN
def set_sniff_mode(bd_addr):
result = SetSniffMode(bd_addr)
return bool(result)
def cancel_sniff_mode(bd_addr):
if ALLOW_CANCEL_SNIFF:
result = CancelSniffMode(bd_addr)
return bool(result)
return False
def read_link_mode(bd_addr):
mode = c_ubyte(0)
result = ReadLinkMode(bd_addr, byref(mode))
if result:
return mode.value
return None
def check_link_mode_sniff(device):
if device is None:
return
mode = read_link_mode(device[BLUETOOTH_ADDRESS])
if mode == LINK_MODE_SNIFF and mode != device[BLUETOOTH_LINK_MODE]:
device[BLUETOOTH_LINK_MODE] = mode
print Text.enteredLowPower % (device_name(device),)
def check_link_mode_no_sniff(device):
if device is None:
return
mode = read_link_mode(device[BLUETOOTH_ADDRESS])
if mode == LINK_MODE_NORMAL and mode != device[BLUETOOTH_LINK_MODE]:
device[BLUETOOTH_LINK_MODE] = mode
print Text.exitedLowPower % (device_name(device),)
#helper class to iterate, find and open hid devices
class HIDHelper:
text = Text
deviceList = []
def __init__(self):
self.UpdateDeviceList()
def UpdateDeviceList(self):
self.deviceList = []
#dll references
setupapiDLL = ctypes.windll.setupapi
hidDLL = ctypes.windll.hid
#prepare Interfacedata
interfaceInfo = SP_DEVICE_INTERFACE_DATA()
interfaceInfo.cbSize = sizeof(interfaceInfo)
#prepare InterfaceDetailData Structure
interfaceDetailData = SP_DEVICE_INTERFACE_DETAIL_DATA_A()
interfaceDetailData.cbSize = 5
#prepare HIDD_ATTRIBUTES
hiddAttributes = HIDD_ATTRIBUTES()
hiddAttributes.cbSize = sizeof(hiddAttributes)
#get guid for HID device class
g = GUID()
hidDLL.HidD_GetHidGuid(byref(g))
#get handle to the device information set
hinfo = setupapiDLL.SetupDiGetClassDevsA(byref(g), None, None,
DIGCF_PRESENT + DIGCF_DEVICEINTERFACE)
#enumerate devices
i = 0
while setupapiDLL.SetupDiEnumDeviceInterfaces(hinfo,
None, byref(g), i, byref(interfaceInfo)):
device = {}
i += 1
#get the required size
requiredSize = c_ulong()
setupapiDLL.SetupDiGetDeviceInterfaceDetailA(hinfo,
byref(interfaceInfo), None, 0, byref(requiredSize), None)
if requiredSize.value > 250:
eg.PrintError(self.text.errorRetrieval)
continue #prevent a buffer overflow
#get the actual info
setupapiDLL.SetupDiGetDeviceInterfaceDetailA(
hinfo,
byref(interfaceInfo),
byref(interfaceDetailData),
requiredSize,
pointer(requiredSize),
None
)
device[DEVICE_PATH] = interfaceDetailData.DevicePath
#get handle to HID device
try:
hidHandle = win32file.CreateFile(
device[DEVICE_PATH],
win32con.GENERIC_READ | win32con.GENERIC_WRITE,
win32con.FILE_SHARE_READ | win32con.FILE_SHARE_WRITE,
None,
win32con.OPEN_EXISTING,
0,
0
)
#skipping devices which cannot be opened
#(e.g. mice & keyboards, which are opened exclusivly by OS)
if int(hidHandle) <= 0:
continue
except:
continue
#getting additional info
hidDLL.HidD_GetAttributes(int(hidHandle), byref(hiddAttributes))
device[VENDOR_ID] = hiddAttributes.VendorID
device[PRODUCT_ID] = hiddAttributes.ProductID
device[VERSION_NUMBER] = hiddAttributes.VersionNumber
#prepare string buffer for device info strings
hidpStringType = c_wchar * 128
infoStr = hidpStringType()
#getting manufacturer
result = hidDLL.HidD_GetManufacturerString(
int(hidHandle), byref(infoStr), ctypes.sizeof(infoStr))
if not result or len(infoStr.value) == 0:
#build a generic ManufacturerString with the vendor ID
device[VENDOR_STRING] = self.text.vendorID + str(hiddAttributes.VendorID)
else:
device[VENDOR_STRING] = infoStr.value
#getting device name
result = hidDLL.HidD_GetProductString(
int(hidHandle), byref(infoStr), ctypes.sizeof(infoStr))
if not result or len(infoStr.value) == 0:
#getting product name via registry
devicePathSplit = device[DEVICE_PATH][4:].split("#")
regkey = "SYSTEM\\CurrentControlSet\\Enum\\" + devicePathSplit[0] + \
"\\" + devicePathSplit[1] + "\\" + devicePathSplit[2]
regHandle = _winreg.OpenKey( _winreg.HKEY_LOCAL_MACHINE, regkey)
device[PRODUCT_STRING], regType = _winreg.QueryValueEx(regHandle, "DeviceDesc")
_winreg.CloseKey(regHandle)
else:
device[PRODUCT_STRING] = infoStr.value
#close handle
win32file.CloseHandle(hidHandle)
#add device to internal list
self.deviceList.append(device)
#end loop
#destroy deviceinfolist
setupapiDLL.SetupDiDestroyDeviceInfoList(hinfo)
# try to find Bluetooth device IDs
self.findBluetoothDeviceIds(self.deviceList)
def findBluetoothDeviceIds(self, deviceList):
# try to find Bluetooth device ID - we'll check the Widcomm section of the registry
regkey = "SYSTEM\\CurrentControlSet\\Enum\\{95C7A0A0-3094-11D7-A202-00508B9D7D5A}"
mapping = self.findBluetoothDeviceIdNameMapping(regkey)
for d in deviceList:
devicePathSplit = d[DEVICE_PATH][4:].split("#")
parentId = devicePathSplit[2]
for parentIdPrefix in mapping:
if parentId.startswith(parentIdPrefix):
d[BLUETOOTH_ADDRESS] = mapping[parentIdPrefix]
d[BLUETOOTH_LINK_MODE] = read_link_mode(d[BLUETOOTH_ADDRESS])
break
else:
d[BLUETOOTH_ADDRESS] = None
d[BLUETOOTH_LINK_MODE] = None
def findBluetoothDeviceIdNameMapping(self, regkey, stack=None, mapping=None):
# iterate through all the subkeys, looking for the 'ParentIdPrefix' and 'BdAddr'
# values. 'LocationInformation' will match the PRODUCT_STRING above.
if stack is None:
stack = []
if mapping is None:
mapping = {}
appended_parent = False
try:
regHandle = _winreg.OpenKey( _winreg.HKEY_LOCAL_MACHINE, regkey)
except WindowsError:
return mapping
try:
parentIdPrefix, regType = _winreg.QueryValueEx(regHandle, "ParentIdPrefix")
stack.append(parentIdPrefix)
appended_parent = True
except EnvironmentError:
pass
try:
bdaddr, regType = _winreg.QueryValueEx(regHandle, "BdAddr")
if stack:
mapping[stack[-1]] = bdaddr
except EnvironmentError:
pass
subkeys = []
try:
for i in itertools.count(0):
subkeys.append(_winreg.EnumKey(regHandle, i))
except EnvironmentError:
pass
_winreg.CloseKey(regHandle)
for k in subkeys:
subkey = regkey + '\\' + k
self.findBluetoothDeviceIdNameMapping(subkey, stack, mapping)
if appended_parent:
stack.pop()
return mapping
def _get_device(self,
noOtherPort,
devicePath,
vendorID,
productID,
versionNumber
):
found = 0
path = ""
for item in self.deviceList:
if noOtherPort:
#just search for devicepath
if item[DEVICE_PATH] == devicePath:
#found right device
return item
else:
#find the right vendor and product ids
if item[VENDOR_ID] == vendorID \
and item[PRODUCT_ID] == productID \
and item[VERSION_NUMBER] == versionNumber:
found = found + 1
if item[DEVICE_PATH] == devicePath:
#found right device
return item
if found == 1:
return item
#multiple devices found
#don't know which to use
if found > 1:
eg.PrintError(self.text.errorMultipleDevices)
return None
#gets the devicePath
#the devicePath parameter is only used with multiple same devices
def GetDevicePath(self,
noOtherPort,
devicePath,
vendorID,
productID,
versionNumber
):
device = self._get_device(noOtherPort, devicePath, vendorID, productID, versionNumber)
if device is None:
return None
return device[DEVICE_PATH]
#gets the device bluetooth address
#the devicePath parameter is only used with multiple same devices
def GetDeviceBTAddress(self,
noOtherPort,
devicePath,
vendorID,
productID,
versionNumber
):
device = self._get_device(noOtherPort, devicePath, vendorID, productID, versionNumber)
if device is None:
return None
return device[BLUETOOTH_ADDRESS]
class TimerThread(threading.Thread):
def __init__(self,
plugin,
name,
interval,
prefix,
evtName,
):
self.start_time = time.time()
self.plugin = plugin
self.name = name
self.interval = interval
self.prefix = prefix
self.evtName = evtName
threading.Thread.__init__(self, name = name)
self.finished = threading.Event()
self.abort = False
def run(self):
now = time.time()
elapsed = now - self.start_time
remaining = max(0, min(self.interval, self.interval - elapsed))
self.finished.wait(remaining)
self.finished.clear()
if not self.abort:
eg.TriggerEvent(self.evtName, prefix = self.prefix)
def stop(self):
self.abort = True
self.finished.set()
DEVICE = None
class HIDThread(threading.Thread):
def __init__(self,
plugin,
helper,
enduringEvents,
rawDataEvents,
ps3DataEvents,
ps3Release,
ps3Zone,
shortKeyTime,
longKeyTime,
sleepTime,
hibernateTime,
noOtherPort,
devicePath,
vendorID,
vendorString,
productID,
productString,
versionNumber,
):
self.ps3Remote = Ps3Remote
self.text = Text
self.deviceName = vendorString + " " + productString
self.abort = False
self._overlappedRead = win32file.OVERLAPPED()
self._overlappedRead.hEvent = win32event.CreateEvent(None, 1, 0, None)
self.evtName = "None"
self.zoneName = "None"
self.maskRegularEvent = False
self.regularEvent = False
self.Started = True
self.timeStarted = time.time()
#getting devicePath
self.devicePath = helper.GetDevicePath(
noOtherPort,
devicePath,
vendorID,
productID,
versionNumber
)
if not self.devicePath:
self.stop_enduring_event()
eg.PrintError(self.text.errorFind + self.deviceName)
return
threading.Thread.__init__(self, name = self.devicePath)
#setting members
self.plugin = plugin
self.helper = helper
self.enduringEvents = enduringEvents
self.rawDataEvents = rawDataEvents
self.ps3DataEvents = ps3DataEvents
self.ps3Release = ps3Release
self.ps3Zone = ps3Zone
self.shortKeyTime = shortKeyTime
self.longKeyTime = longKeyTime
self.sleepTime = sleepTime
self.hibernateTime = hibernateTime
global DEVICE
DEVICE = helper._get_device(
noOtherPort,
devicePath,
vendorID,
productID,
versionNumber
)
self.bdAddr = DEVICE[BLUETOOTH_ADDRESS]
self.start()
def AbortThread(self):
self.abort = True
win32event.SetEvent(self._overlappedRead.hEvent)
def run(self):
#open file/devcice
try:
handle = win32file.CreateFile(
self.devicePath,
win32con.GENERIC_READ | win32con.GENERIC_WRITE,
win32con.FILE_SHARE_READ | win32con.FILE_SHARE_WRITE,
None, # no security
win32con.OPEN_EXISTING,
win32con.FILE_ATTRIBUTE_NORMAL | win32con.FILE_FLAG_OVERLAPPED,
0
)
except:
self.stop_enduring_event()
eg.PrintError(self.text.errorOpen + self.deviceName)
return
#getting data to get the right buffer size
hidDLL = ctypes.windll.hid
setupapiDLL = ctypes.windll.setupapi
#get preparsed data
preparsedData = c_ulong()
result = hidDLL.HidD_GetPreparsedData(
int(handle),
ctypes.byref(preparsedData)
)
#getCaps
hidpCaps = HIDP_CAPS()
result = hidDLL.HidP_GetCaps(preparsedData, ctypes.byref(hidpCaps))
n = hidpCaps.InputReportByteLength
rt = c_int(0) #report type input
rl = c_ulong(n) #report length
maxDataL = hidDLL.HidP_MaxDataListLength(rt, preparsedData)
#getting button caps
bCapsArrL = c_ushort(hidpCaps.NumberInputButtonCaps)
bCapsArrType = HIDP_BUTTON_CAPS * bCapsArrL.value
bCapsArr = bCapsArrType()
hidDLL.HidP_GetButtonCaps(
rt,
ctypes.byref(bCapsArr),
ctypes.byref(bCapsArrL),
preparsedData
)
#getting value caps
vCapsArrL = c_ushort(hidpCaps.NumberInputValueCaps)
vCapsArrType = HIDP_VALUE_CAPS * vCapsArrL.value
vCapsArr = vCapsArrType()
hidDLL.HidP_GetValueCaps(
rt,
ctypes.byref(vCapsArr),
ctypes.byref(vCapsArrL),
preparsedData
)
#parsing caps
# prepare a list to find and store for each index
# whether it is a button or value
oldValues = {}
dataIndexType = [0] * hidpCaps.NumberInputDataIndices
#list entries depending on caps
for i in range(bCapsArrL.value):
if bCapsArr[i].IsRange:
for ii in range(
bCapsArr[i].Info.Range.DataIndexMin,
bCapsArr[i].Info.Range.DataIndexMax + 1
):
dataIndexType[ii] = 1
else:
ii = bCapsArr[i].Info.NotRange.DataIndex
dataIndexType[ii] = 1
for i in range(vCapsArrL.value):
if vCapsArr[i].IsRange:
for ii in range(
vCapsArr[i].Info.Range.DataIndexMin,
vCapsArr[i].Info.Range.DataIndexMax + 1
):
dataIndexType[ii] = 2
oldValues[ii] = sys.maxint
else:
ii = vCapsArr[i].Info.NotRange.DataIndex
dataIndexType[ii] = 2
oldValues[ii] = sys.maxint
#prepare data array with maximum possible length
DataArrayType = HIDP_DATA * maxDataL
data = DataArrayType()
while not self.abort:
#try to read and wait for an event to happen
try:
win32event.ResetEvent(self._overlappedRead.hEvent)
rc, buf = win32file.ReadFile(handle, n, self._overlappedRead)
#waiting for an event
win32event.WaitForSingleObject(
self._overlappedRead.hEvent,
win32event.INFINITE
)
except:
self.stop_enduring_event()
eg.PrintError(self.text.errorRead + self.deviceName)
self.abort = True
#parse data
if len(buf) == n and not self.abort:
#raw data events
if self.ps3DataEvents:
read = str(buf)
keycode = binascii.hexlify(read).upper()[2:22]
try:
evtName = self.ps3Remote.button[keycode]
zoneName = self.ps3Remote.zone[keycode]
regularEvent = True
except KeyError:
evtName = keycode
zoneName = "Extended"
regularEvent = False
# Make sure any time we get a keypress, we come out of low-power mode
cancel_sniff_mode(self.bdAddr)
if result:
eg.scheduler.AddTask(1.0, check_link_mode_no_sniff, DEVICE)
if self.enduringEvents:
self.stop_enduring_event()
prefix = self.plugin.info.eventPrefix
currentTime = time.time()
elapsedTime = currentTime - self.timeStarted
self.timeStarted = time.time()
if self.Started:
if not self.regularEvent or evtName == "Release":
if elapsedTime < self.shortKeyTime:
self.plugin.TriggerEvent(self.evtName + ".S")
self.Started = False
if evtName == "Release":
if self.sleepTime > 0:
self.Timer2 = TimerThread(self.plugin, "Timer2", self.sleepTime, prefix, "Sleep")
self.Timer2.start()
if self.hibernateTime > 0:
self.Timer3 = TimerThread(self.plugin, "Timer3", self.hibernateTime, prefix, "Hibernate")
self.Timer3.start()
if self.ps3Release:
self.plugin.TriggerEvent(evtName)
self.maskRegularEvent = False
else:
if not self.maskRegularEvent or not regularEvent:
if elapsedTime > self.sleepTime and self.sleepTime > 0:
self.plugin.TriggerEvent("WakeUp")
self.zoneName = "None"
if self.ps3Zone and self.zoneName != zoneName and zoneName != "none":
self.plugin.TriggerEvent(zoneName)
self.plugin.TriggerEnduringEvent(evtName)
if elapsedTime < self.shortKeyTime and evtName == self.evtName:
self.Timer1 = TimerThread(self.plugin, "Timer1", self.longKeyTime, prefix, evtName + ".M")
self.Timer1.start()
eg.TriggerEvent(evtName + ".D", prefix = prefix)
else:
self.Timer1 = TimerThread(self.plugin, "Timer1", self.longKeyTime, prefix, evtName + ".L")
self.Timer1.start()
self.Started = True
self.evtName = evtName
self.zoneName = zoneName
self.regularEvent = regularEvent
if not regularEvent:
self.maskRegularEvent = True
else:
self.plugin.TriggerEvent(evtName)
elif maxDataL == 0 or self.rawDataEvents:
read = str(buf)
self.plugin.TriggerEvent(
binascii.hexlify(read).upper()
)
else:
dataL = c_ulong(maxDataL)
result = hidDLL.HidP_GetData(
rt,
ctypes.byref(data),
ctypes.byref(dataL),
preparsedData,
ctypes.c_char_p(str(buf)),
rl
)
#parse data to trigger events
btnPressed = []
for i in range(dataL.value):
tmpIndex = data[i].DataIndex
if dataIndexType[tmpIndex] == 1:#button
#collect buttons pressed
btnPressed.append(str(tmpIndex))
elif dataIndexType[tmpIndex] == 2:#control value
newValue = int(data[i].Data.RawValue)
if newValue == oldValues[tmpIndex]:
continue
oldValues[tmpIndex] = newValue
self.plugin.TriggerEvent(
"Value." + str(tmpIndex),
payload = newValue
)
else:
eg.PrintError(self.text.errorInvalidDataIndex)
if len(btnPressed):
#one or more buttons pressed
#btnPressed.sort()
evtName = "Button." + "+".join(btnPressed)
if self.enduringEvents:
self.plugin.TriggerEnduringEvent(evtName)
else:
self.plugin.TriggerEvent(evtName)
elif self.enduringEvents:
#no buttons pressed anymore
self.plugin.EndLastEvent()
else:
#trigger event so that releasing all buttons
#can get noticed even w/o enduring events
self.plugin.TriggerEvent("Button.None")
#loop aborted
if self.enduringEvents:
self.stop_enduring_event()
win32file.CloseHandle(handle)
#free references
hidDLL.HidD_FreePreparsedData(ctypes.byref(preparsedData))
#HID thread finished
def stop_enduring_event(self):
try:
enduringEvents = self.enduringEvents
except AttributeError:
enduringEvents = False
if enduringEvents:
try:
if self.Timer1.isAlive():
self.Timer1.stop()
except AttributeError:
pass
else:
del self.Timer1
try:
if self.Timer2.isAlive():
self.Timer2.stop()
except AttributeError:
pass
else:
del self.Timer2
try:
if self.Timer3.isAlive():
self.Timer3.stop()
except AttributeError:
pass
else:
del self.Timer3
self.plugin.EndLastEvent()
def device_name(device):
return device[VENDOR_STRING] + " " + device[PRODUCT_STRING]
def handle_wake_up(event):
global ALLOW_CANCEL_SNIFF
if event.string == 'System.Resume':
ALLOW_CANCEL_SNIFF = True
device = DEVICE
if device is None:
return
bd_addr = device[BLUETOOTH_ADDRESS]
result = cancel_sniff_mode(bd_addr)
if result:
eg.scheduler.AddTask(1.0, check_link_mode_no_sniff, DEVICE)
def handle_sleep(event):
device = DEVICE
if device is None:
return
bd_addr = device[BLUETOOTH_ADDRESS]
result = set_sniff_mode(bd_addr)
if result:
eg.scheduler.AddTask(1.0, check_link_mode_sniff, DEVICE)
def handle_init(event):
# Put the PS3 remote to sleep if it isn't already
handle_sleep(event)
def handle_machine_sleep(event):
global ALLOW_CANCEL_SNIFF
if event.string == 'System.Suspend':
ALLOW_CANCEL_SNIFF = False
return handle_sleep(event)
INSTANCE = None
def handle_device_attached(event):
instance = INSTANCE
if not isinstance(instance, PS3):
return
eg.actionThread.Call(instance.__stop__)
eg.actionThread.Call(instance.__start__, *instance.args)
class PS3(eg.PluginClass):
helper = None
text = Text
thread = None
def __start__(self, *args):
global INSTANCE
INSTANCE = self
# We store the arguments away so that we can use them again later (i.e. when we resume
# from standby and need to restart ourself).
self.args = args
self.__start(*args)
def __start(self,
eventName,
enduringEvents,
rawDataEvents,
ps3DataEvents,
ps3Release,
ps3Zone,
shortKeyTime,
longKeyTime,
sleepTime,
hibernateTime,
noOtherPort,
devicePath,
vendorID,
vendorString,
productID,
productString,
versionNumber,
# For backwards-compatibility with 2.0.2 and 3.0.0 - if a new config option is added this can just be replaced
dummy=None
):
# Set up bindings to ensure that we handle power states, etc correctly.
eg.Bind('Main.OnInit', handle_init)
eg.Bind('HID.WakeUp', handle_wake_up)
eg.Bind('System.Resume', handle_wake_up)
eg.Bind('HID.Hibernate', handle_sleep)
eg.Bind('System.QuerySuspend', handle_machine_sleep)
eg.Bind('System.Suspend', handle_machine_sleep)
# If we get one of these, we __stop__ and __start__ the plugin so that we
# pick up the device (if necessary).
eg.Bind('System.DeviceAttached', handle_device_attached)
if eventName:
self.info.eventPrefix = eventName
else:
self.info.eventPrefix = "HID"
#ensure helper object is up to date
if not self.helper:
self.helper = HIDHelper()
else:
self.helper.UpdateDeviceList()
#create thread
self.thread = HIDThread(self,
self.helper,
enduringEvents,
rawDataEvents,
ps3DataEvents,
ps3Release,
ps3Zone,
shortKeyTime,
longKeyTime,
sleepTime,
hibernateTime,
noOtherPort,
devicePath,
vendorID,
vendorString,
productID,
productString,
versionNumber
)
def __stop__(self):
global INSTANCE
INSTANCE = None
self.thread.AbortThread()
eg.Unbind('Main.OnInit', handle_init)
eg.Unbind('HID.Hibernate', handle_sleep)
eg.Unbind('System.QuerySuspend', handle_machine_sleep)
eg.Unbind('System.Suspend', handle_machine_sleep)
eg.Unbind('HID.Wake', handle_wake_up)
eg.Unbind('System.Resume', handle_wake_up)
eg.Unbind('System.DeviceAttached', handle_device_attached)
def Configure(self,
eventName = "",
enduringEvents = True,
rawDataEvents = False,
ps3DataEvents = False,
ps3Release = False,
ps3Zone = False,
shortKeyTime = 0.3,
longKeyTime = 0.5,
sleepTime = 5.0,
hibernateTime = 60.0,
noOtherPort = False,
devicePath = None,
vendorID = None,
vendorString = None,
productID = None,
productString = None,
versionNumber = None,
# For backwards-compatibility with 2.0.2 and 3.0.0 - if a new config option is added this can just be replaced
dummy=None
):
#ensure helper object is up to date
if not self.helper:
self.helper = HIDHelper()
else:
self.helper.UpdateDeviceList()
panel = eg.ConfigPanel(self, resizable=True)
#building dialog
hidList = wx.ListCtrl(panel, -1, pos=wx.DefaultPosition,
size=wx.DefaultSize, style=wx.LC_REPORT | wx.LC_SINGLE_SEL)
#create GUI
hidList.InsertColumn(0, self.text.deviceName)
hidList.InsertColumn(1, self.text.manufacturer)
hidList.InsertColumn(2, self.text.connected)
path = self.helper.GetDevicePath(noOtherPort,
devicePath, vendorID, productID, versionNumber)
#fill list
devices = {}
idx = 0
for item in self.helper.deviceList:
idx = hidList.InsertStringItem(sys.maxint, item[PRODUCT_STRING])
hidList.SetStringItem(idx, 1, item[VENDOR_STRING])
hidList.SetStringItem(idx, 2, self.text.yes)
if item[DEVICE_PATH] == path:
hidList.Select(idx)
devices[idx] = item
#add not connected device to bottom of list
if not path:
if not devicePath:
#just select first entry on first start
hidList.Select(0)
else:
item = {
DEVICE_PATH: devicePath,
VENDOR_ID: vendorID,
VENDOR_STRING: vendorString,
PRODUCT_ID: productID,
PRODUCT_STRING: productString,
VERSION_NUMBER: versionNumber,
}
idx = hidList.InsertStringItem(sys.maxint, item[PRODUCT_STRING])
hidList.SetStringItem(idx, 1, item[VENDOR_STRING])
hidList.SetStringItem(idx, 2, self.text.no)
hidList.Select(idx)
devices[idx] = item
if hidList.GetFirstSelected() == -1:
#no device selected, disable ok and apply button
panel.dialog.buttonRow.okButton.Enable(False)
panel.dialog.buttonRow.applyButton.Enable(False)
#layout
for i in range(hidList.GetColumnCount()):
hidList.SetColumnWidth(i, wx.LIST_AUTOSIZE_USEHEADER)
size = hidList.GetColumnWidth(i)
hidList.SetColumnWidth(i, wx.LIST_AUTOSIZE)
hidList.SetColumnWidth(i, max(size, hidList.GetColumnWidth(i) + 5))
panel.sizer.Add(hidList, 1, flag = wx.EXPAND)
panel.sizer.Add((15,15))
#sizers
eventsGroupSizer = wx.StaticBoxSizer(
wx.StaticBox(panel, -1, self.text.eventsSettings),
wx.VERTICAL
)
eventsSizer = wx.GridBagSizer(0, 5)
#eventname
eventsSizer.Add(
wx.StaticText(panel, -1, self.text.eventName),
(0, 0),
flag = wx.ALIGN_CENTER_VERTICAL)
eventNameCtrl = wx.TextCtrl(panel, value = eventName)
eventNameCtrl.SetMaxLength(32)
eventsSizer.Add(eventNameCtrl, (0, 1), (1, 2), flag = wx.EXPAND)
#checkbox for no other port option
noOtherPortCtrl = wx.CheckBox(panel, -1, self.text.noOtherPort)
noOtherPortCtrl.SetValue(noOtherPort)
eventsSizer.Add(noOtherPortCtrl, (1, 0), (1, 3))
#checkbox for enduring event option
enduringEventsCtrl = wx.CheckBox(panel, -1, self.text.enduringEvents)
enduringEventsCtrl.SetValue(enduringEvents)
eventsSizer.Add(enduringEventsCtrl, (2, 0), (1, 3))
#checkbox for raw data events
rawDataEventsCtrl = wx.CheckBox(panel, -1, self.text.rawDataEvents)
rawDataEventsCtrl.SetValue(rawDataEvents)
eventsSizer.Add(rawDataEventsCtrl, (3, 0), (1, 3))
eventsGroupSizer.Add(eventsSizer, 0, wx.ALL, 10)
panel.sizer.Add(eventsGroupSizer, 0, wx.EXPAND)
panel.sizer.Add((15,15))
#sizers
ps3GroupSizer = wx.StaticBoxSizer(
wx.StaticBox(panel, -1, self.text.ps3Settings),
wx.VERTICAL
)
ps3Sizer = wx.GridBagSizer(0, 5)
#checkbox for ps3 data events
ps3DataEventsCtrl = wx.CheckBox(panel, -1, self.text.ps3DataEvents)
ps3DataEventsCtrl.SetValue(ps3DataEvents)
ps3Sizer.Add(ps3DataEventsCtrl, (0, 0), (1, 3))
#checkbox for ps3 release event
ps3ReleaseCtrl = wx.CheckBox(panel, -1, self.text.ps3Release)
ps3ReleaseCtrl.SetValue(ps3Release)
ps3Sizer.Add(ps3ReleaseCtrl, (1, 0), (1, 3))
#checkbox for ps3 zone event
ps3ZoneCtrl = wx.CheckBox(panel, -1, self.text.ps3Zone)
ps3ZoneCtrl.SetValue(ps3Zone)
ps3Sizer.Add(ps3ZoneCtrl, (2, 0), (1, 3))
#short key time
ps3Sizer.Add(
wx.StaticText(panel, -1, self.text.shortKeyTime),
(3, 0), flag = wx.ALIGN_CENTER_VERTICAL)
shortKeyTimeCtrl = eg.SpinNumCtrl(
panel, -1, shortKeyTime, size=(200,-1), integerWidth=7, increment=0.05
)
ps3Sizer.Add(shortKeyTimeCtrl, (3, 1), flag = wx.EXPAND)
ps3Sizer.Add(
wx.StaticText(panel, -1, self.text.seconds),
(3, 2), (1, 2),
flag = wx.ALIGN_CENTER_VERTICAL)
#long key time
ps3Sizer.Add(
wx.StaticText(panel, -1, self.text.longKeyTime),
(4, 0), flag = wx.ALIGN_CENTER_VERTICAL)
longKeyTimeCtrl = eg.SpinNumCtrl(
panel, -1, longKeyTime, size=(200,-1), integerWidth=7, increment=0.05
)
ps3Sizer.Add(longKeyTimeCtrl, (4, 1), flag = wx.EXPAND)
ps3Sizer.Add(
wx.StaticText(panel, -1, self.text.seconds),
(4, 2), (1, 2),
flag = wx.ALIGN_CENTER_VERTICAL)
#sleep time
ps3Sizer.Add(
wx.StaticText(panel, -1, self.text.sleepTime),
(5, 0), flag = wx.ALIGN_CENTER_VERTICAL)
sleepTimeCtrl = eg.SpinNumCtrl(
panel, -1, sleepTime, size=(200,-1), integerWidth=7, increment=1.00
)
ps3Sizer.Add(sleepTimeCtrl, (5, 1), flag = wx.EXPAND)
ps3Sizer.Add(
wx.StaticText(panel, -1, self.text.seconds),
(5, 2), (1, 2),
flag = wx.ALIGN_CENTER_VERTICAL)
#hibernate time
ps3Sizer.Add(
wx.StaticText(panel, -1, self.text.hibernateTime),
(6, 0), flag = wx.ALIGN_CENTER_VERTICAL)
hibernateTimeCtrl = eg.SpinNumCtrl(
panel, -1, hibernateTime, size=(200,-1), integerWidth=7, increment=1.00
)
ps3Sizer.Add(hibernateTimeCtrl, (6, 1), flag = wx.EXPAND)
ps3Sizer.Add(
wx.StaticText(panel, -1, self.text.seconds),
(6, 2), (1, 2),
flag = wx.ALIGN_CENTER_VERTICAL)
ps3GroupSizer.Add(ps3Sizer, 0, wx.ALL, 10)
panel.sizer.Add(ps3GroupSizer, 0, wx.EXPAND)
def OnHidListSelect(event):
panel.dialog.buttonRow.okButton.Enable(True)
panel.dialog.buttonRow.applyButton.Enable(True)
event.Skip()
def OnRawDataEventsChange(event):
enduringEventsCtrl.Enable(not rawDataEventsCtrl.GetValue())
ps3DataEventsCtrl.Enable(not rawDataEventsCtrl.GetValue())
event.Skip()
def OnEnduringEventsChange(event):
rawDataEventsCtrl.Enable(not enduringEventsCtrl.GetValue())
ps3ReleaseCtrl.Enable(enduringEventsCtrl.GetValue() and ps3DataEventsCtrl.GetValue())
ps3ZoneCtrl.Enable(enduringEventsCtrl.GetValue() and ps3DataEventsCtrl.GetValue())
event.Skip()
def OnPs3DataEventsChange(event):
rawDataEventsCtrl.Enable(not ps3DataEventsCtrl.GetValue())
ps3ReleaseCtrl.Enable(enduringEventsCtrl.GetValue() and ps3DataEventsCtrl.GetValue())
ps3ZoneCtrl.Enable(enduringEventsCtrl.GetValue() and ps3DataEventsCtrl.GetValue())
event.Skip()
def OnPs3ReleaseChange(event):
rawDataEventsCtrl.Enable(not ps3DataEventsCtrl.GetValue())
event.Skip()
def OnPs3ZoneChange(event):
rawDataEventsCtrl.Enable(not ps3DataEventsCtrl.GetValue())
event.Skip()
OnRawDataEventsChange(wx.CommandEvent())
OnPs3DataEventsChange(wx.CommandEvent())
OnPs3ReleaseChange(wx.CommandEvent())
OnPs3ZoneChange(wx.CommandEvent())
OnEnduringEventsChange(wx.CommandEvent())
rawDataEventsCtrl.Bind(wx.EVT_CHECKBOX, OnRawDataEventsChange)
ps3DataEventsCtrl.Bind(wx.EVT_CHECKBOX, OnPs3DataEventsChange)
ps3ReleaseCtrl.Bind(wx.EVT_CHECKBOX, OnPs3ReleaseChange)
ps3ZoneCtrl.Bind(wx.EVT_CHECKBOX, OnPs3ZoneChange)
enduringEventsCtrl.Bind(wx.EVT_CHECKBOX, OnEnduringEventsChange)
hidList.Bind(wx.EVT_LIST_ITEM_SELECTED, OnHidListSelect)
while panel.Affirmed():
device = devices[hidList.GetFirstSelected()]
panel.SetResult(
eventNameCtrl.GetValue(),
enduringEventsCtrl.GetValue(),
rawDataEventsCtrl.GetValue(),
ps3DataEventsCtrl.GetValue(),
ps3ReleaseCtrl.GetValue(),
ps3ZoneCtrl.GetValue(),
shortKeyTimeCtrl.GetValue(),
longKeyTimeCtrl.GetValue(),
sleepTimeCtrl.GetValue(),
hibernateTimeCtrl.GetValue(),
noOtherPortCtrl.GetValue(),
device[DEVICE_PATH],
device[VENDOR_ID],
device[VENDOR_STRING],
device[PRODUCT_ID],
device[PRODUCT_STRING],
device[VERSION_NUMBER]
)
| gpl-2.0 | 3,992,069,199,257,997,000 | 32.55047 | 126 | 0.562804 | false | 4.023069 | false | false | false |
rocky/python2-trepan | trepan/bwprocessor/msg.py | 1 | 1030 | # -*- coding: utf-8 -*-
''' Common I/O routines'''
# Note for errmsg, msg, and msg_nocr we don't want to simply make
# an assignment of method names like self.msg = self.debugger.intf.msg,
# because we want to allow the interface (intf) to change
# dynamically. That is, the value of self.debugger may change
# in the course of the program and if we made such an method assignemnt
# we wouldn't pick up that change in our self.msg
def errmsg(proc_obj, message, opts={}):
response = proc_obj.response
if 'set_name' in opts: response['name'] = 'error'
return response['errs'].append(message)
def msg(proc_obj, message, opts={}):
response = proc_obj.response
return response['msg'].append(message)
# Demo it
if __name__=='__main__':
class Demo:
def __init__(self):
self.response = {'errs': [],
'msg' : []}
pass
pass
import pprint
demo = Demo()
msg(demo, 'hi')
pp = pprint.PrettyPrinter()
pp.pprint(demo.response)
| gpl-3.0 | -3,844,177,047,269,908,500 | 28.428571 | 71 | 0.618447 | false | 3.691756 | false | false | false |
Scalr/scalr-ctl | scalrctl/commands/farm.py | 2 | 6362 | __author__ = 'Dmitriy Korsakov'
__doc__ = 'Farm management'
import json
import copy
from scalrctl import commands
from scalrctl import click
from scalrctl import request, settings
class FarmTerminate(commands.SimplifiedAction):
epilog = "Example: scalr-ctl farms terminate --farmId <ID> --force"
post_template = {
"terminateFarmRequest": {"force": True}
}
def get_options(self):
hlp = "It is used to terminate the Server immediately ignoring scalr.system.server_terminate_timeout."
force_terminate = click.Option(('--force', 'force'), is_flag=True, default=False, help=hlp)
options = [force_terminate, ]
options.extend(super(FarmTerminate, self).get_options())
return options
def pre(self, *args, **kwargs):
"""
before request is made
"""
force = kwargs.pop("force", None)
post_data = copy.deepcopy(self.post_template)
post_data["terminateFarmRequest"]["force"] = force
kv = {"import-data": post_data}
kv.update(kwargs)
arguments, kw = super(FarmTerminate, self).pre(*args, **kv)
return arguments, kw
class FarmLaunch(commands.SimplifiedAction):
epilog = "Example: scalr-ctl farms launch --farmId <ID>"
post_template = {}
def pre(self, *args, **kwargs):
"""
before request is made
"""
kv = {"import-data": {}}
kv.update(kwargs)
arguments, kw = super(FarmLaunch, self).pre(*args, **kv)
return arguments, kw
class FarmClone(commands.SimplifiedAction):
epilog = "Example: scalr-ctl farms clone --farmId <ID> --name MyNewFarm"
post_template = {
"cloneFarmRequest": {"name": ""}
}
def get_options(self):
hlp = "The name of a new Farm."
name = click.Option(('--name', 'name'), required=True, help=hlp)
options = [name, ]
options.extend(super(FarmClone, self).get_options())
return options
def pre(self, *args, **kwargs):
"""
before request is made
"""
name = kwargs.pop("name", None)
post_data = copy.deepcopy(self.post_template)
post_data["cloneFarmRequest"]["name"] = name
kv = {"import-data": post_data}
kv.update(kwargs)
arguments, kw = super(FarmClone, self).pre(*args, **kv)
return arguments, kw
class FarmSuspend(FarmLaunch):
epilog = "Example: scalr-ctl farms suspend --farmId <ID>"
post_template = {}
class FarmResume(FarmLaunch):
epilog = "Example: scalr-ctl farms resume --farmId <ID>"
post_template = {}
class FarmLock(commands.SimplifiedAction):
epilog = "Example: scalr-ctl farm lock --farmId <ID> --comment <COMMENT> --unlock-permission <ANYONE|OWNER|TEAM>"
post_template = {
"lockFarmRequest": {"lockComment": "", "unlockPermission": "anyone"}
}
def get_options(self):
comment = click.Option(('--lockComment', 'comment'), default="", help="Comment to lock a Farm.")
hlp = "If you would like to prevent other users unlocking the Farm you should set 'owner' options.\
With 'team' options only members of the Farm's Teams can unlock this Farm.\
Default value 'anyone' means that anyone with access can unlock this Farm."
unlock_permission = click.Option((
'--unlockPermission', 'unlock_permission'),
default="anyone", show_default=True, help=hlp)
options = [comment, unlock_permission]
options.extend(super(FarmLock, self).get_options())
return options
def pre(self, *args, **kwargs):
"""
before request is made
"""
comment = kwargs.pop("comment", None)
unlock_permission = kwargs.pop("unlock_permission", "anyone")
post_data = copy.deepcopy(self.post_template)
post_data["lockFarmRequest"]["lockComment"] = comment
post_data["lockFarmRequest"]["unlockPermission"] = unlock_permission
kv = {"import-data": post_data}
kv.update(kwargs)
arguments, kw = super(FarmLock, self).pre(*args, **kv)
return arguments, kw
class FarmCreateFromTemplate(commands.Action):
def pre(self, *args, **kwargs):
"""
before request is made
"""
kwargs = self._apply_arguments(**kwargs)
stdin = kwargs.pop('stdin', None)
kwargs["FarmTemplate"] = self._read_object() if stdin else self._edit_example()
return args, kwargs
def run(self, *args, **kwargs):
"""
Callback for click subcommand.
"""
hide_output = kwargs.pop('hide_output', False) # [ST-88]
args, kwargs = self.pre(*args, **kwargs)
uri = self._request_template
payload = {}
data = {}
if '{envId}' in uri and not kwargs.get('envId') and settings.envId:
kwargs['envId'] = settings.envId
if kwargs:
# filtering in-body and empty params
uri = self._request_template.format(**kwargs)
for key, value in kwargs.items():
param = '{{{}}}'.format(key)
if value and (param not in self._request_template):
data.update(value)
if self.dry_run:
click.echo('{} {} {} {}'.format(self.http_method, uri,
payload, data))
# returns dummy response
return json.dumps({'data': {}, 'meta': {}})
data = json.dumps(data)
raw_response = request.request(self.http_method, self.api_level,
uri, payload, data)
response = self.post(raw_response)
text = self._format_response(response, hidden=hide_output, **kwargs)
if text is not None:
click.echo(text)
return response
def _edit_example(self):
commentary = \
'''# The body must be a valid FarmTemplate object.
#
# Type your FarmTemplate object below this line. The above text will not be sent to the API server.'''
text = click.edit(commentary)
if text:
raw_object = "".join([line for line in text.splitlines()
if not line.startswith("#")]).strip()
else:
raw_object = ""
return json.loads(raw_object)
| apache-2.0 | 738,455,535,190,341,600 | 31.793814 | 117 | 0.584565 | false | 3.929586 | false | false | false |
dusteye/greasemonkey-scripts | autologin.py | 1 | 1930 | #!/usr/bin/python3
#coding:utf-8
import urllib.request
import urllib.parse
import http.cookiejar
import smtplib
from email.mime.text import MIMEText
from email.header import Header
user = '[email protected]'
passwd = '202,118,239,46'
to = '[email protected]'
def autologin(url, params, req_encoding, res_encoding):
cookiejar = http.cookiejar.CookieJar()
opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cookiejar))
urllib.request.install_opener(opener)
params = urllib.parse.urlencode(params)
params = params.encode(req_encoding)
response = urllib.request.urlopen(url, params)
text = response.read()
text_decode = text.decode(req_encoding)
text = text_decode.encode(res_encoding)
return text
def check(ip, passwd):
params = {"fr":"00", "id_ip":ip, "pass":passwd, "set":"进入"}
req_encoding = 'gb2312'
res_encoding = 'utf-8'
text = autologin('http://hitsun.hit.edu.cn/index1.php', params, req_encoding, res_encoding)
text = str(text, 'utf-8')
search_text = '所剩余额'
for line in text.splitlines():
if line.find(search_text)!=-1:
return(line.split(';')[2].split(' ')[0])
def genMail(iplist, user, to):
context = ''
for (ip,passwd) in iplist.items():
context += ip + ": " + check(ip, passwd) + '\n'
context += '\n'
msg = MIMEText(context.encode('utf-8'), 'plain', 'utf-8')
sub = Header('当月服务器余额情况', 'utf-8')
msg['Subject'] = sub
msg['From'] = user
msg['To'] = to
return msg
def sendMail(From, FromPass, To, mail):
if not mail:
return
server = smtplib.SMTP("smtp.gmail.com", 587)
server.ehlo()
server.starttls()
server.ehlo()
server.login(From, FromPass)
server.sendmail(From, To, mail)
server.close()
if __name__ == '__main__':
iplist = { '202.118.239.46':'123456',
'202.118.250.18':'123456',
'202.118.250.19':'123456'}
mail = genMail(iplist, user, to)
sendMail(user, passwd, to, mail.as_string())
| gpl-2.0 | 511,054,629,723,975,300 | 25.388889 | 92 | 0.680526 | false | 2.698864 | false | false | false |
huaweiswitch/neutron | neutron/agent/ovs_cleanup_util.py | 8 | 3836 | # Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from neutron.agent.common import config as agent_config
from neutron.agent import l3_agent
from neutron.agent.linux import interface
from neutron.agent.linux import ip_lib
from neutron.agent.linux import ovs_lib
from neutron.common import config
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
def setup_conf():
"""Setup the cfg for the clean up utility.
Use separate setup_conf for the utility because there are many options
from the main config that do not apply during clean-up.
"""
opts = [
cfg.BoolOpt('ovs_all_ports',
default=False,
help=_('True to delete all ports on all the OpenvSwitch '
'bridges. False to delete ports created by '
'Neutron on integration and external network '
'bridges.'))
]
conf = cfg.CONF
conf.register_cli_opts(opts)
conf.register_opts(l3_agent.L3NATAgent.OPTS)
conf.register_opts(interface.OPTS)
agent_config.register_interface_driver_opts_helper(conf)
agent_config.register_use_namespaces_opts_helper(conf)
agent_config.register_root_helper(conf)
return conf
def collect_neutron_ports(bridges, root_helper):
"""Collect ports created by Neutron from OVS."""
ports = []
for bridge in bridges:
ovs = ovs_lib.OVSBridge(bridge, root_helper)
ports += [port.port_name for port in ovs.get_vif_ports()]
return ports
def delete_neutron_ports(ports, root_helper):
"""Delete non-internal ports created by Neutron
Non-internal OVS ports need to be removed manually.
"""
for port in ports:
if ip_lib.device_exists(port):
device = ip_lib.IPDevice(port, root_helper)
device.link.delete()
LOG.info(_("Delete %s"), port)
def main():
"""Main method for cleaning up OVS bridges.
The utility cleans up the integration bridges used by Neutron.
"""
conf = setup_conf()
conf()
config.setup_logging()
configuration_bridges = set([conf.ovs_integration_bridge,
conf.external_network_bridge])
ovs_bridges = set(ovs_lib.get_bridges(conf.AGENT.root_helper))
available_configuration_bridges = configuration_bridges & ovs_bridges
if conf.ovs_all_ports:
bridges = ovs_bridges
else:
bridges = available_configuration_bridges
# Collect existing ports created by Neutron on configuration bridges.
# After deleting ports from OVS bridges, we cannot determine which
# ports were created by Neutron, so port information is collected now.
ports = collect_neutron_ports(available_configuration_bridges,
conf.AGENT.root_helper)
for bridge in bridges:
LOG.info(_("Cleaning %s"), bridge)
ovs = ovs_lib.OVSBridge(bridge, conf.AGENT.root_helper)
ovs.delete_ports(all_ports=conf.ovs_all_ports)
# Remove remaining ports created by Neutron (usually veth pair)
delete_neutron_ports(ports, conf.AGENT.root_helper)
LOG.info(_("OVS cleanup completed successfully"))
| apache-2.0 | -5,369,221,548,570,687,000 | 33.872727 | 78 | 0.671794 | false | 4.033649 | true | false | false |
GoogleCloudPlatform/appengine-gcs-client | python/test/rest_api_test.py | 2 | 10917 | # Copyright 2012 Google Inc. All Rights Reserved.
import httplib
import pickle
import unittest
import mock
from google.appengine.ext import ndb
from google.appengine.api import app_identity
from google.appengine.api import memcache
from google.appengine.api import urlfetch
from google.appengine.ext import testbed
try:
from cloudstorage import api_utils
from cloudstorage import rest_api
from cloudstorage import test_utils
except ImportError:
from google.appengine.ext.cloudstorage import api_utils
from google.appengine.ext.cloudstorage import rest_api
from google.appengine.ext.cloudstorage import test_utils
class RestApiTest(unittest.TestCase):
def setUp(self):
super(RestApiTest, self).setUp()
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_app_identity_stub()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
self.testbed.init_urlfetch_stub()
api_utils._thread_local_settings.retry_params = None
def tearDown(self):
self.testbed.deactivate()
super(RestApiTest, self).tearDown()
def testBasicCall(self):
api = rest_api._RestApi('scope')
self.assertEqual(api.scopes, ['scope'])
fut_get_token = ndb.Future()
fut_get_token.set_result('blah')
api.get_token_async = mock.create_autospec(api.get_token_async,
return_value=fut_get_token)
fut_urlfetch = ndb.Future()
fut_urlfetch.set_result(
test_utils.MockUrlFetchResult(200, {'foo': 'bar'}, 'yoohoo'))
ctx_urlfetch = mock.Mock(return_value=fut_urlfetch)
ndb.get_context().urlfetch = ctx_urlfetch
res = api.do_request('http://example.com')
self.assertEqual(res, (200, {'foo': 'bar'}, 'yoohoo'))
ctx_urlfetch.assert_called_once_with(
'http://example.com',
headers={'authorization': 'OAuth blah',
'User-Agent': 'AppEngine-Python-GCS'},
follow_redirects=False,
payload=None,
method='GET',
deadline=None,
callback=None)
def testBasicCallWithUserAgent(self):
user_agent = 'Test User Agent String'
retry_params = api_utils.RetryParams(_user_agent=user_agent)
api = rest_api._RestApi('scope', retry_params=retry_params)
self.assertEqual(api.scopes, ['scope'])
fut_get_token = ndb.Future()
fut_get_token.set_result('blah')
api.get_token_async = mock.create_autospec(api.get_token_async,
return_value=fut_get_token)
fut_urlfetch = ndb.Future()
fut_urlfetch.set_result(
test_utils.MockUrlFetchResult(200, {'foo': 'bar'}, 'yoohoo'))
ctx_urlfetch = mock.Mock(return_value=fut_urlfetch)
ndb.get_context().urlfetch = ctx_urlfetch
res = api.do_request('http://example.com')
self.assertEqual(res, (200, {'foo': 'bar'}, 'yoohoo'))
ctx_urlfetch.assert_called_once_with(
'http://example.com',
headers={'authorization': 'OAuth blah',
'User-Agent': user_agent},
follow_redirects=False,
payload=None,
method='GET',
deadline=None,
callback=None)
def testNoToken(self):
api = rest_api._RestApi('scope')
self.assertEqual(api.scopes, ['scope'])
fut_get_token = ndb.Future()
fut_get_token.set_result(None)
api.get_token_async = mock.create_autospec(api.get_token_async,
return_value=fut_get_token)
fut_urlfetch = ndb.Future()
fut_urlfetch.set_result(
test_utils.MockUrlFetchResult(200, {'foo': 'bar'}, 'yoohoo'))
ctx_urlfetch = mock.Mock(return_value=fut_urlfetch)
ndb.get_context().urlfetch = ctx_urlfetch
res = api.do_request('http://example.com')
self.assertEqual(res, (200, {'foo': 'bar'}, 'yoohoo'))
ctx_urlfetch.assert_called_once_with(
'http://example.com',
headers={'User-Agent': 'AppEngine-Python-GCS'},
follow_redirects=False,
payload=None,
method='GET',
deadline=None,
callback=None)
def testMultipleScopes(self):
api = rest_api._RestApi(['scope1', 'scope2'])
self.assertEqual(api.scopes, ['scope1', 'scope2'])
def testNegativeTimeout(self):
api = rest_api._RestApi('scope')
fut1 = ndb.Future()
fut1.set_result(('token1', 0))
fut2 = ndb.Future()
fut2.set_result(('token2', 0))
api.make_token_async = mock.create_autospec(
api.make_token_async, side_effect=[fut1, fut2])
token1 = api.get_token()
token2 = api.get_token()
self.assertNotEqual(token1, token2)
def testNoExpiredToken(self):
with mock.patch('time.time') as t:
t.side_effect = [2, 4, 5, 6]
api = rest_api._RestApi('scope')
fut1 = ndb.Future()
fut1.set_result(('token1', 3 + api.expiration_headroom))
fut2 = ndb.Future()
fut2.set_result(('token2', 7 + api.expiration_headroom))
api.make_token_async = mock.create_autospec(
api.make_token_async, side_effect=[fut1, fut2])
token = api.get_token()
self.assertEqual('token1', token)
token = api.get_token()
self.assertEqual('token2', token)
token = api.get_token()
self.assertEqual('token2', token)
def testTokenMemoized(self):
ndb_ctx = ndb.get_context()
ndb_ctx.set_cache_policy(lambda key: False)
ndb_ctx.set_memcache_policy(lambda key: False)
api = rest_api._RestApi('scope')
t1 = api.get_token()
self.assertNotEqual(None, t1)
api = rest_api._RestApi('scope')
t2 = api.get_token()
self.assertEqual(t2, t1)
def testTokenSaved(self):
retry_params = api_utils.RetryParams(save_access_token=True)
api = rest_api._RestApi('scope', retry_params=retry_params)
t1 = api.get_token()
self.assertNotEqual(None, t1)
api = rest_api._RestApi('scope', retry_params=retry_params)
t2 = api.get_token()
self.assertEqual(t2, t1)
memcache.flush_all()
ndb.get_context().clear_cache()
api = rest_api._RestApi('scope', retry_params=retry_params)
t3 = api.get_token()
self.assertEqual(t3, t1)
def testDifferentServiceAccounts(self):
api1 = rest_api._RestApi('scope', 123)
api2 = rest_api._RestApi('scope', 456)
t1 = api1.get_token()
t2 = api2.get_token()
self.assertNotEqual(t1, t2)
def testSameServiceAccount(self):
api1 = rest_api._RestApi('scope', 123)
api2 = rest_api._RestApi('scope', 123)
t1 = api1.get_token()
t2 = api2.get_token()
self.assertEqual(t1, t2)
def testCallUrlFetch(self):
api = rest_api._RestApi('scope')
fut = ndb.Future()
fut.set_result(test_utils.MockUrlFetchResult(200, {}, 'response'))
ndb.Context.urlfetch = mock.create_autospec(
ndb.Context.urlfetch,
return_value=fut)
res = api.urlfetch('http://example.com', method='PUT', headers={'a': 'b'})
self.assertEqual(res.status_code, 200)
self.assertEqual(res.content, 'response')
def testPickling(self):
retry_params = api_utils.RetryParams(max_retries=1000)
api = rest_api._RestApi('scope', service_account_id=1,
retry_params=retry_params)
self.assertNotEqual(None, api.get_token())
pickled_api = pickle.loads(pickle.dumps(api))
self.assertEqual(0, len(set(api.__dict__.keys()) ^
set(pickled_api.__dict__.keys())))
for k, v in api.__dict__.iteritems():
if not hasattr(v, '__call__'):
self.assertEqual(v, pickled_api.__dict__[k])
pickled_api.token = None
fut_urlfetch = ndb.Future()
fut_urlfetch.set_result(
test_utils.MockUrlFetchResult(200, {'foo': 'bar'}, 'yoohoo'))
pickled_api.urlfetch_async = mock.create_autospec(
pickled_api.urlfetch_async, return_value=fut_urlfetch)
res = pickled_api.do_request('http://example.com')
self.assertEqual(res, (200, {'foo': 'bar'}, 'yoohoo'))
def testUrlFetchCalledWithUserProvidedDeadline(self):
retry_params = api_utils.RetryParams(urlfetch_timeout=90)
api = rest_api._RestApi('scope', retry_params=retry_params)
resp_fut1 = ndb.Future()
resp_fut1.set_exception(urlfetch.DownloadError())
resp_fut2 = ndb.Future()
resp_fut2.set_result(test_utils.MockUrlFetchResult(httplib.ACCEPTED,
None, None))
ndb.Context.urlfetch = mock.create_autospec(
ndb.Context.urlfetch,
side_effect=[resp_fut1, resp_fut2])
self.assertEqual(httplib.ACCEPTED, api.do_request('foo')[0])
self.assertEqual(
90, ndb.Context.urlfetch.call_args_list[0][1]['deadline'])
self.assertEqual(
90, ndb.Context.urlfetch.call_args_list[1][1]['deadline'])
def testRetryAfterDoRequestUrlFetchTimeout(self):
api = rest_api._RestApi('scope')
resp_fut1 = ndb.Future()
resp_fut1.set_exception(urlfetch.DownloadError())
resp_fut2 = ndb.Future()
resp_fut2.set_result(test_utils.MockUrlFetchResult(httplib.ACCEPTED,
None, None))
ndb.Context.urlfetch = mock.create_autospec(
ndb.Context.urlfetch,
side_effect=[resp_fut1, resp_fut2])
self.assertEqual(httplib.ACCEPTED, api.do_request('foo')[0])
self.assertEqual(2, ndb.Context.urlfetch.call_count)
def testRetryAfterDoRequestResponseTimeout(self):
api = rest_api._RestApi('scope')
resp_fut1 = ndb.Future()
resp_fut1.set_result(test_utils.MockUrlFetchResult(httplib.REQUEST_TIMEOUT,
None, None))
resp_fut2 = ndb.Future()
resp_fut2.set_result(test_utils.MockUrlFetchResult(httplib.ACCEPTED,
None, None))
ndb.Context.urlfetch = mock.create_autospec(
ndb.Context.urlfetch,
side_effect=[resp_fut1, resp_fut2])
self.assertEqual(httplib.ACCEPTED, api.do_request('foo')[0])
self.assertEqual(2, ndb.Context.urlfetch.call_count)
def testRetryAfterAppIdentityError(self):
api = rest_api._RestApi('scope')
token_fut = ndb.Future()
token_fut.set_result('token1')
api.get_token_async = mock.create_autospec(
api.get_token_async,
side_effect=[app_identity.InternalError,
app_identity.InternalError,
token_fut])
resp_fut = ndb.Future()
resp_fut.set_result(test_utils.MockUrlFetchResult(httplib.ACCEPTED,
None, None))
ndb.Context.urlfetch = mock.create_autospec(
ndb.Context.urlfetch,
side_effect=[resp_fut])
self.assertEqual(httplib.ACCEPTED, api.do_request('foo')[0])
self.assertEqual(
'OAuth token1',
ndb.Context.urlfetch.call_args[1]['headers']['authorization'])
self.assertEqual(3, api.get_token_async.call_count)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -69,337,176,130,697,900 | 32.798762 | 79 | 0.633599 | false | 3.38932 | true | false | false |
blackpioter/sendgrid-python | sendgrid/helpers/mail/mail.py | 1 | 10679 | """v3/mail/send response body builder"""
from .personalization import Personalization
from .header import Header
class Mail(object):
"""A request to be sent with the SendGrid v3 Mail Send API (v3/mail/send).
Use get() to get the request body.
"""
def __init__(
self, from_email=None, subject=None, to_email=None, content=None):
"""Create a Mail object.
If parameters are supplied, all parameters must be present.
:param from_email: Email address to send from.
:type from_email: Email, optional
:param subject: Subject line of emails.
:type subject: string, optional
:param to_email: Email address to send to.
:type to_email: Email, optional
:param content: Content of the message.
:type content: Content, optional
"""
self._from_email = None
self._subject = None
self._template_id = None
self._send_at = None
self._batch_id = None
self._asm = None
self._ip_pool_name = None
self._mail_settings = None
self._tracking_settings = None
self._reply_to = None
self._personalizations = []
self._contents = []
self._attachments = []
self._sections = []
self._headers = []
self._categories = []
self._custom_args = []
# Minimum required to send an email
if from_email and subject and to_email and content:
self.from_email = from_email
self.subject = subject
personalization = Personalization()
personalization.add_to(to_email)
self.add_personalization(personalization)
self.add_content(content)
def __str__(self):
"""Get a JSON representation of this Mail request.
:rtype: string
"""
return str(self.get())
def get(self):
"""Get a response body for this Mail.
:rtype: dict
"""
mail = {}
if self.from_email is not None:
mail["from"] = self.from_email.get()
if self.subject is not None:
mail["subject"] = self.subject
if self.personalizations:
mail["personalizations"] = [
personalization.get()
for personalization in self.personalizations
]
if self.contents:
mail["content"] = [ob.get() for ob in self.contents]
if self.attachments:
mail["attachments"] = [ob.get() for ob in self.attachments]
if self.template_id is not None:
mail["template_id"] = self.template_id
if self.sections:
sections = {}
for key in self.sections:
sections.update(key.get())
mail["sections"] = sections
if self.headers:
headers = {}
for key in self.headers:
headers.update(key.get())
mail["headers"] = headers
if self.categories:
mail["categories"] = [category.get() for category in
self.categories]
if self.custom_args:
custom_args = {}
for key in self.custom_args:
custom_args.update(key.get())
mail["custom_args"] = custom_args
if self.send_at is not None:
mail["send_at"] = self.send_at
if self.batch_id is not None:
mail["batch_id"] = self.batch_id
if self.asm is not None:
mail["asm"] = self.asm.get()
if self.ip_pool_name is not None:
mail["ip_pool_name"] = self.ip_pool_name
if self.mail_settings is not None:
mail["mail_settings"] = self.mail_settings.get()
if self.tracking_settings is not None:
mail["tracking_settings"] = self.tracking_settings.get()
if self.reply_to is not None:
mail["reply_to"] = self.reply_to.get()
return mail
@property
def from_email(self):
"""The email from which this Mail will be sent.
:rtype: string
"""
return self._from_email
@from_email.setter
def from_email(self, value):
self._from_email = value
@property
def subject(self):
"""The global, or "message level", subject of this Mail.
This may be overridden by personalizations[x].subject.
:rtype: string
"""
return self._subject
@subject.setter
def subject(self, value):
self._subject = value
@property
def template_id(self):
"""The id of a template that you would like to use.
If you use a template that contains a subject and content (either text
or html), you do not need to specify those at the personalizations nor
message level.
:rtype: int
"""
return self._template_id
@template_id.setter
def template_id(self, value):
self._template_id = value
@property
def send_at(self):
"""A unix timestamp allowing you to specify when you want your email to
be delivered. This may be overridden by the personalizations[x].send_at
parameter. Scheduling more than 72 hours in advance is forbidden.
:rtype: int
"""
return self._send_at
@send_at.setter
def send_at(self, value):
self._send_at = value
@property
def batch_id(self):
"""An ID for this batch of emails.
This represents a batch of emails sent at the same time. Including a
batch_id in your request allows you include this email in that batch,
and also enables you to cancel or pause the delivery of that batch.
For more information, see https://sendgrid.com/docs/API_Reference/Web_API_v3/cancel_schedule_send.html
:rtype: int
"""
return self._batch_id
@batch_id.setter
def batch_id(self, value):
self._batch_id = value
@property
def asm(self):
"""The ASM for this Mail.
:rtype: ASM
"""
return self._asm
@asm.setter
def asm(self, value):
self._asm = value
@property
def mail_settings(self):
"""The MailSettings for this Mail.
:rtype: MailSettings
"""
return self._mail_settings
@mail_settings.setter
def mail_settings(self, value):
self._mail_settings = value
@property
def tracking_settings(self):
"""The TrackingSettings for this Mail.
:rtype: TrackingSettings
"""
return self._tracking_settings
@tracking_settings.setter
def tracking_settings(self, value):
self._tracking_settings = value
@property
def ip_pool_name(self):
"""The IP Pool that you would like to send this Mail email from.
:rtype: string
"""
return self._ip_pool_name
@ip_pool_name.setter
def ip_pool_name(self, value):
self._ip_pool_name = value
@property
def reply_to(self):
"""The email address to use in the Reply-To header.
:rtype: Email
"""
return self._reply_to
@reply_to.setter
def reply_to(self, value):
self._reply_to = value
@property
def personalizations(self):
"""The Personalizations applied to this Mail.
Each object within personalizations can be thought of as an envelope -
it defines who should receive an individual message and how that
message should be handled. A maximum of 1000 personalizations can be
included.
:rtype: list
"""
return self._personalizations
def add_personalization(self, personalizations):
"""Add a new Personalization to this Mail.
:type personalizations: Personalization
"""
self._personalizations.append(personalizations)
@property
def contents(self):
"""The Contents of this Mail. Must include at least one MIME type.
:rtype: list(Content)
"""
return self._contents
def add_content(self, content):
"""Add a new Content to this Mail. Usually the plaintext or HTML
message contents.
:type content: Content
"""
if self._contents is None:
self._contents = []
# Text content should be before HTML content
if content._type == "text/plain":
self._contents.insert(0, content)
else:
self._contents.append(content)
@property
def attachments(self):
"""The attachments included with this Mail.
:returns: List of Attachment objects.
:rtype: list(Attachment)
"""
return self._attachments
def add_attachment(self, attachment):
"""Add an Attachment to this Mail.
:type attachment: Attachment
"""
self._attachments.append(attachment)
@property
def sections(self):
"""The sections included with this Mail.
:returns: List of Section objects.
:rtype: list(Section)
"""
return self._sections
def add_section(self, section):
"""Add a Section to this Mail.
:type attachment: Section
"""
self._sections.append(section)
@property
def headers(self):
"""The Headers included with this Mail.
:returns: List of Header objects.
:rtype: list(Header)
"""
return self._headers
def add_header(self, header):
"""Add a Header to this Mail.
The header provided can be a Header or a dictionary with a single
key-value pair.
:type header: object
"""
if isinstance(header, dict):
(k, v) = list(header.items())[0]
self._headers.append(Header(k, v))
else:
self._headers.append(header)
@property
def categories(self):
"""The Categories applied to this Mail. Must not exceed 10 items
:rtype: list(Category)
"""
return self._categories
def add_category(self, category):
"""Add a Category to this Mail. Must be less than 255 characters.
:type category: string
"""
self._categories.append(category)
@property
def custom_args(self):
"""The CustomArgs attached to this Mail.
Must not exceed 10,000 characters.
:rtype: list(CustomArg)
"""
return self._custom_args
def add_custom_arg(self, custom_arg):
if self._custom_args is None:
self._custom_args = []
self._custom_args.append(custom_arg)
| mit | -4,177,295,920,119,920,000 | 26.665803 | 110 | 0.575709 | false | 4.371265 | false | false | false |
Philippe-Lawrence/pyBar | classDialog.py | 1 | 3451 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2007 Philippe LAWRENCE
#
# This file is part of pyBar.
# pyBar is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# pyBar is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyBar; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from gi.repository import Gtk, GLib
class Singleton(object):
def __new__(cls, *args, **kwargs):
if '_inst' not in vars(cls):
cls._inst = object.__new__(cls, *args, **kwargs)
return cls._inst
class Message(Singleton):
def __init__(self):
# ne rien mettre ici
pass
def set_message(self, content):
"""Formate le message (ne conserve que la première ligne) et lance son affichage si nécessaire"""
#print "set_message", content
if self._content == content:
return
if content is None:
self._content = None
else:
text, ind = content
pos = text.find('\n')
if not pos == -1:
text = text[:pos]
self._content = (text, ind)
if self.has_changed is False:
self.has_changed = True
GLib.idle_add(self._print_message)
def ini_message(self, box):
self.has_changed = False
self.box = box
self._content = None
def _print_message(self):
"""type = 0 : error; 1 : warning; 2 : info"""
#print "_print_message", self._content
self.has_changed = False
box = self.box
if box is None:
return
for elem in box.get_children():
box.remove(elem)
if self._content is None:
return
text, type = self._content
# icone
image = Gtk.Image()
if type == 0:
image.set_from_icon_name('dialog-error', Gtk.IconSize.BUTTON)
elif type == 1:
image.set_from_icon_name('dialog-warning', Gtk.IconSize.BUTTON)
elif type == 2:
image.set_from_icon_name("dialog-information", Gtk.IconSize.BUTTON)
elif type == 3:
image.set_from_icon_name("dialog-information", Gtk.IconSize.BUTTON)
image.show()
box.pack_start(image, False, True, 0)
box.set_spacing(10)
label = Gtk.Label()
label.set_text(text)
label.set_use_markup(True)
label.show()
box.pack_start(label, False, True, 0)
class Dialog:
def __init__(self, errors):
text = '\n'.join(errors)
if text == '':
return
dialog = Gtk.Dialog("Erreur", None,
Gtk.DialogFlags.MODAL | Gtk.DialogFlags.DESTROY_WITH_PARENT,
(Gtk.STOCK_CLOSE, Gtk.ResponseType.CLOSE))
dialog.set_icon_from_file("glade/logo.png")
box = dialog.get_content_area()
box.set_border_width(80)
hbox = Gtk.HBox()
image = Gtk.Image()
image.set_from_icon_name('dialog-error', Gtk.IconSize.DIALOG)
hbox.pack_start(image, False, False, 0)
label = Gtk.Label(label=text)
label.set_margin_start(10)
label.show()
hbox.pack_start(label, False, False, 0)
hbox.show_all()
box.add(hbox)
result = dialog.run()
dialog.destroy()
| gpl-3.0 | -4,579,491,287,271,453,000 | 28.228814 | 101 | 0.641635 | false | 3.371457 | false | false | false |
ContinuumIO/blaze | blaze/expr/expressions.py | 3 | 29015 | from __future__ import absolute_import, division, print_function
from collections import Mapping
from keyword import iskeyword
import re
import datashape
from datashape import (
dshape,
DataShape,
Record,
Var,
Fixed,
promote,
Option,
Null,
)
from datashape.predicates import (
isscalar,
iscollection,
isboolean,
isrecord,
istabular,
)
import numpy as np
from odo.utils import copydoc
import toolz
from toolz import concat, memoize, partial, first, unique, merge
from toolz.curried import map, filter
from ..compatibility import _strtypes, builtins, boundmethod, PY2
from .core import (
Node,
_setattr,
common_subexpression,
path,
resolve_args,
subs,
)
from .method_dispatch import select_functions
from ..dispatch import dispatch
from .utils import hashable_index, replace_slices, maxshape
from ..utils import attribute, as_attribute
__all__ = [
'Apply',
'Cast',
'Coalesce',
'Coerce',
'ElemWise',
'Expr',
'Field',
'Label',
'Map',
'Projection',
'ReLabel',
'Selection',
'SimpleSelection',
'Slice',
'Symbol',
'apply',
'cast',
'coalesce',
'coerce',
'discover',
'drop_field',
'label',
'ndim',
'projection',
'relabel',
'selection',
'shape',
'symbol',
]
def isvalid_identifier(s):
"""Check whether a string is a valid Python identifier
Examples
--------
>>> isvalid_identifier('Hello')
True
>>> isvalid_identifier('Hello world')
False
>>> isvalid_identifier('Helloworld!')
False
>>> isvalid_identifier('1a')
False
>>> isvalid_identifier('a1')
True
>>> isvalid_identifier('for')
False
>>> isvalid_identifier(None)
False
"""
# the re module compiles and caches regexs so no need to compile it
return (s is not None and not iskeyword(s) and
re.match(r'^[_a-zA-Z][_a-zA-Z0-9]*$', s) is not None)
def valid_identifier(s):
"""Rewrite a string to be a valid identifier if it contains
>>> valid_identifier('hello')
'hello'
>>> valid_identifier('hello world')
'hello_world'
>>> valid_identifier('hello.world')
'hello_world'
>>> valid_identifier('hello-world')
'hello_world'
>>> valid_identifier(None)
>>> valid_identifier('1a')
"""
if isinstance(s, _strtypes):
if not s or s[0].isdigit():
return
return s.replace(' ', '_').replace('.', '_').replace('-', '_')
return s
class Expr(Node):
"""
Symbolic expression of a computation
All Blaze expressions (Join, By, Sort, ...) descend from this class. It
contains shared logic and syntax. It in turn inherits from ``Node`` which
holds all tree traversal logic
"""
def __repr__(self):
return str(self)
def _get_field(self, fieldname):
if not isinstance(self.dshape.measure, (Record, datashape.Map)):
if fieldname == self._name:
return self
raise ValueError(
"Can not get field '%s' of non-record expression %s" %
(fieldname, self))
return Field(self, fieldname)
def __getitem__(self, key):
if isinstance(key, _strtypes) and key in self.fields:
return self._get_field(key)
elif isinstance(key, Expr) and iscollection(key.dshape):
return self._select(key)
elif (isinstance(key, list) and
builtins.all(isinstance(k, _strtypes) for k in key)):
if set(key).issubset(self.fields):
return self._project(key)
else:
raise ValueError('Names %s not consistent with known names %s'
% (key, self.fields))
elif (isinstance(key, tuple) and
all(isinstance(k, (int, slice, type(None), list, np.ndarray))
for k in key)):
return sliceit(self, key)
elif isinstance(key, (slice, int, type(None), list, np.ndarray)):
return sliceit(self, (key,))
raise ValueError("Not understood %s[%s]" % (self, key))
def map(self, func, schema=None, name=None):
return Map(self, func, schema, name)
@attribute
def schema(self):
try:
m = self._schema
except AttributeError:
schema = datashape.dshape(self.dshape.measure)
else:
schema = m()
return _setattr(self, 'schema', schema)
@attribute
def dshape(self):
return _setattr(self, 'dshape', self._dshape())
@property
def fields(self):
measure = self.dshape.measure
if isinstance(self.dshape.measure, Option):
measure = measure.ty
if isinstance(measure, Record):
return measure.names
elif isinstance(measure, datashape.Map):
if not isrecord(self.dshape.measure.value):
raise TypeError('Foreign key must reference a '
'Record datashape')
return measure.value.names
name = getattr(self, '_name', None)
if name is not None:
return [self._name]
return []
def _len(self):
try:
return int(self.dshape[0])
except TypeError:
raise ValueError('Can not determine length of table with the '
'following datashape: %s' % self.dshape)
def __len__(self): # pragma: no cover
return self._len()
def __iter__(self):
raise NotImplementedError(
'Iteration over expressions is not supported.\n'
'Iterate over computed result instead, e.g. \n'
"\titer(expr) # don't do this\n"
"\titer(compute(expr)) # do this instead")
def __dir__(self):
result = dir(type(self))
if (isrecord(self.dshape.measure) or
isinstance(self.dshape.measure, datashape.Map) and
self.fields):
result.extend(map(valid_identifier, self.fields))
result.extend(toolz.merge(schema_methods(self.dshape.measure),
dshape_methods(self.dshape)))
return sorted(set(filter(isvalid_identifier, result)))
def __getattr__(self, key):
assert key != '_hash', \
'%s should set _hash in _init' % type(self).__name__
try:
result = object.__getattribute__(self, key)
except AttributeError:
fields = dict(zip(map(valid_identifier, self.fields), self.fields))
measure = self.dshape.measure
if isinstance(measure, datashape.Map): # Foreign key
measure = measure.key
# prefer the method if there's a field with the same name
methods = toolz.merge(
schema_methods(measure),
dshape_methods(self.dshape)
)
if key in methods:
func = methods[key]
if func in method_properties:
result = func(self)
elif getattr(func, '__get__', None):
result = func.__get__(self, type(self))
else:
result = boundmethod(func, self)
elif self.fields and key in fields:
if isscalar(self.dshape.measure): # t.foo.foo is t.foo
result = self
else:
result = self[fields[key]]
else:
raise
# cache the attribute lookup, getattr will not be invoked again.
_setattr(self, key, result)
return result
@attribute
def _name(self):
measure = self.dshape.measure
if len(self._inputs) == 1 and isscalar(getattr(measure, 'key',
measure)):
child_measure = self._child.dshape.measure
if isscalar(getattr(child_measure, 'key', child_measure)):
# memoize the result
return _setattr(self, '_name', self._child._name)
def __enter__(self):
""" Enter context """
return self
def __exit__(self, *args):
""" Exit context
Close any open resource if we are called in context
"""
for value in self._resources().values():
try:
value.close()
except AttributeError:
pass
return True
# Add some placeholders to help with refactoring. If we forget to attach
# these methods later we will get better errors.
# To find the real definition, look for usage of ``@as_attribute``
for method in ('_project', '_select', 'cast'):
@attribute
def _(self):
raise AssertionError('method added after class definition')
locals()[method] = _
del _
del method
def sanitized_dshape(dshape, width=50):
pretty_dshape = datashape.pprint(dshape, width=width).replace('\n', '')
if len(pretty_dshape) > width:
pretty_dshape = "{}...".format(pretty_dshape[:width])
return pretty_dshape
class Symbol(Expr):
"""
Symbolic data. The leaf of a Blaze expression
Examples
--------
>>> points = symbol('points', '5 * 3 * {x: int, y: int}')
>>> points
<`points` symbol; dshape='5 * 3 * {x: int32, y: int32}'>
>>> points.dshape
dshape("5 * 3 * {x: int32, y: int32}")
"""
_arguments = '_name', 'dshape', '_token'
_input_attributes = ()
def __repr__(self):
fmt = "<`{}` symbol; dshape='{}'>"
return fmt.format(self._name, sanitized_dshape(self.dshape))
def __str__(self):
return self._name or ''
def _resources(self):
return {}
@copydoc(Symbol)
def symbol(name, dshape, token=None):
return Symbol(name, datashape.dshape(dshape), token or 0)
@dispatch(Symbol, Mapping)
def _subs(o, d):
""" Subs symbols using symbol function
Supports caching"""
newargs = (subs(arg, d) for arg in o._args)
return symbol(*newargs)
class ElemWise(Expr):
"""
Elementwise operation.
The shape of this expression matches the shape of the child.
"""
def _dshape(self):
return datashape.DataShape(
*(self._child.dshape.shape + tuple(self.schema))
)
class Field(ElemWise):
"""
A single field from an expression.
Get a single field from an expression with record-type schema.
We store the name of the field in the ``_name`` attribute.
Examples
--------
>>> points = symbol('points', '5 * 3 * {x: int32, y: int32}')
>>> points.x.dshape
dshape("5 * 3 * int32")
For fields that aren't valid Python identifiers, use ``[]`` syntax:
>>> points = symbol('points', '5 * 3 * {"space station": float64}')
>>> points['space station'].dshape
dshape("5 * 3 * float64")
"""
_arguments = '_child', '_name'
def __str__(self):
fmt = '%s.%s' if isvalid_identifier(self._name) else '%s[%r]'
return fmt % (self._child, self._name)
@property
def _expr(self):
return symbol(self._name, datashape.DataShape(self.dshape.measure))
def _dshape(self):
shape = self._child.dshape.shape
measure = self._child.dshape.measure
# TODO: is this too special-case-y?
schema = getattr(measure, 'value', measure).dict[self._name]
shape = shape + schema.shape
schema = (schema.measure,)
return DataShape(*(shape + schema))
class Projection(ElemWise):
"""Select a subset of fields from data.
Examples
--------
>>> accounts = symbol('accounts',
... 'var * {name: string, amount: int, id: int}')
>>> accounts[['name', 'amount']].schema
dshape("{name: string, amount: int32}")
>>> accounts[['name', 'amount']]
accounts[['name', 'amount']]
See Also
--------
blaze.expr.expressions.Field
"""
_arguments = '_child', '_fields'
@property
def fields(self):
return list(self._fields)
def _schema(self):
measure = self._child.schema.measure
d = getattr(measure, 'value', measure).dict
return DataShape(Record((name, d[name]) for name in self.fields))
def __str__(self):
return '%s[%s]' % (self._child, self.fields)
def _project(self, key):
if isinstance(key, list) and set(key).issubset(set(self.fields)):
return self._child[key]
raise ValueError("Column Mismatch: %s" % key)
def _get_field(self, fieldname):
if fieldname in self.fields:
return Field(self._child, fieldname)
raise ValueError("Field %s not found in columns %s" % (fieldname,
self.fields))
@as_attribute(Expr, '_project')
@copydoc(Projection)
def projection(expr, names):
if not names:
raise ValueError("Projection with no names")
if not isinstance(names, (tuple, list)):
raise TypeError("Wanted list of strings, got %s" % names)
if not set(names).issubset(expr.fields):
raise ValueError("Mismatched names. Asking for names %s "
"where expression has names %s" %
(names, expr.fields))
return Projection(expr, tuple(names))
def sanitize_index_lists(ind):
""" Handle lists/arrays of integers/bools as indexes
>>> sanitize_index_lists([2, 3, 5])
[2, 3, 5]
>>> sanitize_index_lists([True, False, True, False])
[0, 2]
>>> sanitize_index_lists(np.array([1, 2, 3]))
[1, 2, 3]
>>> sanitize_index_lists(np.array([False, True, True]))
[1, 2]
"""
if not isinstance(ind, (list, np.ndarray)):
return ind
if isinstance(ind, np.ndarray):
ind = ind.tolist()
if isinstance(ind, list) and ind and isinstance(ind[0], bool):
ind = [a for a, b in enumerate(ind) if b]
return ind
def sliceit(child, index):
index2 = tuple(map(sanitize_index_lists, index))
index3 = hashable_index(index2)
s = Slice(child, index3)
hash(s)
return s
class Slice(Expr):
"""Elements `start` until `stop`. On many backends, a `step` parameter
is also allowed.
Examples
--------
>>> from blaze import symbol
>>> accounts = symbol('accounts', 'var * {name: string, amount: int}')
>>> accounts[2:7].dshape
dshape("5 * {name: string, amount: int32}")
>>> accounts[2:7:2].dshape
dshape("3 * {name: string, amount: int32}")
"""
_arguments = '_child', '_index'
def _dshape(self):
return self._child.dshape.subshape[self.index]
@property
def index(self):
return replace_slices(self._index)
def __str__(self):
if isinstance(self.index, tuple):
index = ', '.join(map(str, self._index))
else:
index = str(self._index)
return '%s[%s]' % (self._child, index)
class Selection(Expr):
""" Filter elements of expression based on predicate
Examples
--------
>>> accounts = symbol('accounts',
... 'var * {name: string, amount: int, id: int}')
>>> deadbeats = accounts[accounts.amount < 0]
"""
_arguments = '_child', 'predicate'
_input_attributes = '_child', 'predicate'
@property
def _name(self):
return self._child._name
def __str__(self):
return "%s[%s]" % (self._child, self.predicate)
def _dshape(self):
shape = list(self._child.dshape.shape)
shape[0] = Var()
return DataShape(*(shape + [self._child.dshape.measure]))
class SimpleSelection(Selection):
"""Internal selection class that does not treat the predicate as an input.
"""
_arguments = Selection._arguments
_input_attributes = '_child',
@as_attribute(Expr, '_select')
@copydoc(Selection)
def selection(table, predicate):
subexpr = common_subexpression(table, predicate)
if not builtins.all(
isinstance(node, (VarArgsExpr, ElemWise, Symbol)) or
node.isidentical(subexpr)
for node in concat([path(predicate, subexpr),
path(table, subexpr)])):
raise ValueError("Selection not properly matched with table:\n"
"child: %s\n"
"apply: %s\n"
"predicate: %s" % (subexpr, table, predicate))
if not isboolean(predicate.dshape):
raise TypeError("Must select over a boolean predicate. Got:\n"
"%s[%s]" % (table, predicate))
return table._subs({subexpr: Selection(subexpr, predicate)})
class Label(ElemWise):
"""An expression with a name.
Examples
--------
>>> accounts = symbol('accounts', 'var * {name: string, amount: int}')
>>> expr = accounts.amount * 100
>>> expr._name
'amount'
>>> expr.label('new_amount')._name
'new_amount'
See Also
--------
blaze.expr.expressions.ReLabel
"""
_arguments = '_child', 'label'
def _schema(self):
return self._child.schema
@property
def _name(self):
return self.label
def _get_field(self, key):
if key[0] == self.fields[0]:
return self
raise ValueError("Column Mismatch: %s" % key)
def __str__(self):
return 'label(%s, %r)' % (self._child, self.label)
@copydoc(Label)
def label(expr, lab):
if expr._name == lab:
return expr
return Label(expr, lab)
class ReLabel(ElemWise):
"""
Table with same content but with new labels
Examples
--------
>>> accounts = symbol('accounts', 'var * {name: string, amount: int}')
>>> accounts.schema
dshape("{name: string, amount: int32}")
>>> accounts.relabel(amount='balance').schema
dshape("{name: string, balance: int32}")
>>> accounts.relabel(not_a_column='definitely_not_a_column')
Traceback (most recent call last):
...
ValueError: Cannot relabel non-existent child fields: {'not_a_column'}
>>> s = symbol('s', 'var * {"0": int64}')
>>> s.relabel({'0': 'foo'})
s.relabel({'0': 'foo'})
>>> s.relabel(0='foo') # doctest: +SKIP
Traceback (most recent call last):
...
SyntaxError: keyword can't be an expression
Notes
-----
When names are not valid Python names, such as integers or string with
spaces, you must pass a dictionary to ``relabel``. For example
.. code-block:: python
>>> s = symbol('s', 'var * {"0": int64}')
>>> s.relabel({'0': 'foo'})
s.relabel({'0': 'foo'})
>>> t = symbol('t', 'var * {"whoo hoo": ?float32}')
>>> t.relabel({"whoo hoo": 'foo'})
t.relabel({'whoo hoo': 'foo'})
See Also
--------
blaze.expr.expressions.Label
"""
_arguments = '_child', 'labels'
def _schema(self):
subs = dict(self.labels)
param = self._child.dshape.measure.parameters[0]
return DataShape(Record([[subs.get(name, name), dtype]
for name, dtype in param]))
def __str__(self):
labels = self.labels
if all(map(isvalid_identifier, map(first, labels))):
rest = ', '.join('%s=%r' % l for l in labels)
else:
rest = '{%s}' % ', '.join('%r: %r' % l for l in labels)
return '%s.relabel(%s)' % (self._child, rest)
@copydoc(ReLabel)
def relabel(child, labels=None, **kwargs):
labels = {k: v
for k, v in toolz.merge(labels or {}, kwargs).items() if k != v}
label_keys = set(labels)
fields = child.fields
if not label_keys.issubset(fields):
non_existent_fields = label_keys.difference(fields)
raise ValueError("Cannot relabel non-existent child fields: {%s}" %
', '.join(map(repr, non_existent_fields)))
if not labels:
return child
if isinstance(labels, Mapping): # Turn dict into tuples
labels = tuple(sorted(labels.items()))
if isscalar(child.dshape.measure):
if child._name == labels[0][0]:
return child.label(labels[0][1])
else:
return child
return ReLabel(child, labels)
class Map(ElemWise):
""" Map an arbitrary Python function across elements in a collection
Examples
--------
>>> from datetime import datetime
>>> t = symbol('t', 'var * {price: real, time: int64}') # times as integers
>>> datetimes = t.time.map(datetime.utcfromtimestamp)
Optionally provide extra schema information
>>> datetimes = t.time.map(datetime.utcfromtimestamp,
... schema='{time: datetime}')
See Also
--------
blaze.expr.expresions.Apply
"""
_arguments = '_child', 'func', '_asschema', '_name0'
def _schema(self):
if self._asschema:
return dshape(self._asschema)
else:
raise NotImplementedError("Schema of mapped column not known.\n"
"Please specify datashape keyword in "
".map method.\nExample: "
"t.columnname.map(function, 'int64')")
def label(self, name):
assert isscalar(self.dshape.measure)
return Map(self._child,
self.func,
self.schema,
name)
@property
def shape(self):
return self._child.shape
@property
def ndim(self):
return self._child.ndim
@property
def _name(self):
if self._name0:
return self._name0
else:
return self._child._name
if PY2:
copydoc(Map, Expr.map.im_func)
else:
copydoc(Map, Expr.map)
class Apply(Expr):
""" Apply an arbitrary Python function onto an expression
Examples
--------
>>> t = symbol('t', 'var * {name: string, amount: int}')
>>> h = t.apply(hash, dshape='int64') # Hash value of resultant dataset
You must provide the datashape of the result with the ``dshape=`` keyword.
For datashape examples see
http://datashape.pydata.org/grammar.html#some-simple-examples
If using a chunking backend and your operation may be safely split and
concatenated then add the ``splittable=True`` keyword argument
>>> t.apply(f, dshape='...', splittable=True) # doctest: +SKIP
See Also
--------
blaze.expr.expressions.Map
"""
_arguments = '_child', 'func', '_asdshape', '_splittable'
def _schema(self):
if iscollection(self.dshape):
return self.dshape.subshape[0]
else:
raise TypeError("Non-tabular datashape, %s" % self.dshape)
def _dshape(self):
return self._asdshape
@copydoc(Apply)
def apply(expr, func, dshape, splittable=False):
return Apply(expr, func, datashape.dshape(dshape), splittable)
class Coerce(ElemWise):
"""Coerce an expression to a different type.
Examples
--------
>>> t = symbol('t', '100 * float64')
>>> t.coerce(to='int64')
t.coerce(to='int64')
>>> t.coerce('float32')
t.coerce(to='float32')
>>> t.coerce('int8').dshape
dshape("100 * int8")
"""
_arguments = '_child', 'to'
def _schema(self):
return self.to
def __str__(self):
return '%s.coerce(to=%r)' % (self._child, str(self.schema))
@copydoc(Coerce)
def coerce(expr, to):
return Coerce(expr, dshape(to) if isinstance(to, _strtypes) else to)
class Cast(Expr):
"""Cast an expression to a different type.
This is only an expression time operation.
Examples
--------
>>> s = symbol('s', '?int64')
>>> s.cast('?int32').dshape
dshape("?int32")
# Cast to correct mislabeled optionals
>>> s.cast('int64').dshape
dshape("int64")
# Cast to give concrete dimension length
>>> t = symbol('t', 'var * float32')
>>> t.cast('10 * float32').dshape
dshape("10 * float32")
"""
_arguments = '_child', 'to'
def _dshape(self):
return self.to
def __str__(self):
return 'cast(%s, to=%r)' % (self._child, str(self.to))
@as_attribute(Expr)
@copydoc(Cast)
def cast(expr, to):
return Cast(expr, dshape(to) if isinstance(to, _strtypes) else to)
def binop_name(expr):
if not isscalar(expr.dshape.measure):
return None
l = getattr(expr.lhs, '_name', None)
r = getattr(expr.rhs, '_name', None)
if bool(l) ^ bool(r):
return l or r
elif l == r:
return l
return None
def binop_inputs(expr):
if isinstance(expr.lhs, Expr):
yield expr.lhs
if isinstance(expr.rhs, Expr):
yield expr.rhs
class Coalesce(Expr):
"""SQL like coalesce.
.. code-block:: python
coalesce(a, b) = {
a if a is not NULL
b otherwise
}
Examples
--------
>>> coalesce(1, 2)
1
>>> coalesce(1, None)
1
>>> coalesce(None, 2)
2
>>> coalesce(None, None) is None
True
"""
_arguments = 'lhs', 'rhs', 'dshape'
_input_attributes = 'lhs', 'rhs'
def __str__(self):
return 'coalesce(%s, %s)' % (self.lhs, self.rhs)
_name = property(binop_name)
@property
def _inputs(self):
return tuple(binop_inputs(self))
@copydoc(Coalesce)
def coalesce(a, b):
a_dshape = discover(a)
a_measure = a_dshape.measure
isoption = isinstance(a_measure, Option)
if isoption:
a_measure = a_measure.ty
isnull = isinstance(a_measure, Null)
if isnull:
# a is always null, this is just b
return b
if not isoption:
# a is not an option, this is just a
return a
b_dshape = discover(b)
return Coalesce(a, b, DataShape(*(
maxshape((a_dshape.shape, b_dshape.shape)) +
(promote(a_measure, b_dshape.measure),)
)))
dshape_method_list = list()
schema_method_list = list()
method_properties = set()
dshape_methods = memoize(partial(select_functions, dshape_method_list))
schema_methods = memoize(partial(select_functions, schema_method_list))
@dispatch(DataShape)
def shape(ds):
s = ds.shape
s = tuple(int(d) if isinstance(d, Fixed) else d for d in s)
return s
@dispatch(object)
def shape(expr):
""" Shape of expression
>>> symbol('s', '3 * 5 * int32').shape
(3, 5)
Works on anything discoverable
>>> shape([[1, 2], [3, 4]])
(2, 2)
"""
s = list(discover(expr).shape)
for i, elem in enumerate(s):
try:
s[i] = int(elem)
except TypeError:
pass
return tuple(s)
def ndim(expr):
""" Number of dimensions of expression
>>> symbol('s', '3 * var * int32').ndim
2
"""
return len(shape(expr))
def drop_field(expr, field, *fields):
"""Drop a field or fields from a tabular expression.
Parameters
----------
expr : Expr
A tabular expression to drop columns from.
*fields
The names of the fields to drop.
Returns
-------
dropped : Expr
The new tabular expression with some columns missing.
Raises
------
TypeError
Raised when ``expr`` is not tabular.
ValueError
Raised when a column is not in the fields of ``expr``.
See Also
--------
:func:`blaze.expr.expressions.projection`
"""
to_remove = set((field,)).union(fields)
new_fields = []
for field in expr.fields:
if field not in to_remove:
new_fields.append(field)
else:
to_remove.remove(field)
if to_remove:
raise ValueError(
'fields %r were not in the fields of expr (%r)' % (
sorted(to_remove),
expr.fields
),
)
return expr[new_fields]
dshape_method_list.extend([
(lambda ds: True, {apply}),
(iscollection, {shape, ndim}),
(lambda ds: iscollection(ds) and isscalar(ds.measure), {coerce}),
(istabular, {drop_field}),
])
schema_method_list.extend([
(isscalar, {label, relabel, coerce}),
(isrecord, {relabel}),
(lambda ds: isinstance(ds, Option), {coalesce}),
])
method_properties.update([shape, ndim])
@dispatch(Expr)
def discover(expr):
return expr.dshape
class VarArgsExpr(Expr):
"""An expression used for collecting variadic arguments into a single, typed
container.
Parameters
----------
_inputs : tuple[any]
The arguments that this expression will compute.
"""
_arguments = '_inputs',
@attribute
def _inputs(self):
raise NotImplementedError('overridden in _init')
def _dshape(self):
return DataShape(datashape.void)
def varargsexpr(args):
"""Create a varargs expr which will be materialzed as a ``VarArgs``
"""
# lazy import to break cycle
from blaze.compute.varargs import register_varargs_arity
args = tuple(args)
register_varargs_arity(len(args))
return VarArgsExpr(args)
| bsd-3-clause | 5,986,935,608,387,100,000 | 26.015829 | 80 | 0.56781 | false | 3.825818 | false | false | false |
jireh-father/tensorflow-alexnet | ops.py | 1 | 1864 | import tensorflow as tf
def conv(inputs, kernel_size, output_num, stride_size=1, init_bias=0.0, conv_padding='SAME', stddev=0.01,
activation_func=tf.nn.relu):
input_size = inputs.get_shape().as_list()[-1]
conv_weights = tf.Variable(
tf.random_normal([kernel_size, kernel_size, input_size, output_num], dtype=tf.float32, stddev=stddev),
name='weights')
conv_biases = tf.Variable(tf.constant(init_bias, shape=[output_num], dtype=tf.float32), 'biases')
conv_layer = tf.nn.conv2d(inputs, conv_weights, [1, stride_size, stride_size, 1], padding=conv_padding)
conv_layer = tf.nn.bias_add(conv_layer, conv_biases)
if activation_func:
conv_layer = activation_func(conv_layer)
return conv_layer
def fc(inputs, output_size, init_bias=0.0, activation_func=tf.nn.relu, stddev=0.01):
input_shape = inputs.get_shape().as_list()
if len(input_shape) == 4:
fc_weights = tf.Variable(
tf.random_normal([input_shape[1] * input_shape[2] * input_shape[3], output_size], dtype=tf.float32,
stddev=stddev),
name='weights')
inputs = tf.reshape(inputs, [-1, fc_weights.get_shape().as_list()[0]])
else:
fc_weights = tf.Variable(tf.random_normal([input_shape[-1], output_size], dtype=tf.float32, stddev=stddev),
name='weights')
fc_biases = tf.Variable(tf.constant(init_bias, shape=[output_size], dtype=tf.float32), name='biases')
fc_layer = tf.matmul(inputs, fc_weights)
fc_layer = tf.nn.bias_add(fc_layer, fc_biases)
if activation_func:
fc_layer = activation_func(fc_layer)
return fc_layer
def lrn(inputs, depth_radius=2, alpha=0.0001, beta=0.75, bias=1.0):
return tf.nn.local_response_normalization(inputs, depth_radius=depth_radius, alpha=alpha, beta=beta, bias=bias)
| mit | 5,898,839,375,387,653,000 | 46.794872 | 115 | 0.645386 | false | 3.180887 | false | false | false |
virtualsciences/egg.releaser | egg/releaser/git.py | 1 | 3721 | import logging
import ConfigParser
import io
import sys
import utils
from zest.releaser.git import Git as OGGit
logger = logging.getLogger(__name__)
class Git(OGGit):
""" Command proxy for Git enhanced with gitflow commands.
"""
def cmd_gitflow_release_start(self, version, base=''):
return 'git flow release start %s %s' % (version, base)
def cmd_gitflow_release_finish(self, version):
return 'git flow release finish -m "Release-%s" %s' % (version,
version)
def cmd_gitflow_hotfix_start(self, version, basename=''):
return "git flow hotfix start %s %s" % (version, basename)
def cmd_gitflow_hotfix_finish(self, version):
return "git flow hotfix finish %s" % version
def _config(self):
""" Parse the git config into a ConfigParser object.
"""
config = open('./.git/config', 'r').read().replace('\t', '')
config = config.replace('\t', '') # ConfigParser doesn't like tabs
parser = ConfigParser.ConfigParser()
parser.readfp(io.BytesIO(config))
return parser
@property
def extensions(self):
config = self._config()
return ['gitflow'] if 'gitflow "branch"' in config.sections() else []
def cmd_create_tag(self, version, base=''):
if 'gitflow' in self.extensions:
msg = "Release-%s" % version
_start_cmd = 'git flow release start %s %s' % (version, base)
_finish_cmd = 'git flow release finish -m "%s" %s' % (msg, version)
return '; '.join([_start_cmd, _finish_cmd])
else:
super(OGGit, self).cmd_create_tag(version)
def gitflow_branches(self):
config = self._config()
return dict(config.items('gitflow "branch"'))
def gitflow_get_branch(self, branch):
branches = self.gitflow_branches()
if branch in branches:
return branches.get(branch)
else:
logger.critical(
'"%s" is not a valid gitflow branch.' % branch)
sys.exit(1)
def gitflow_prefixes(self):
config = self._config()
return dict(config.items('gitflow "prefix"'))
def gitflow_get_prefix(self, prefix):
prefixes = self.gitflow_prefixes()
if prefix in prefixes:
return prefixes.get(prefix)
else:
logger.critical(
'"%s" is not a valid gitflow prefix.' % prefix)
sys.exit(1)
def gitflow_check_prefix(self, prefix):
prefix = self.gitflow_get_prefix(prefix)
current = self.current_branch()
return current.startswith(prefix)
def gitflow_check_branch(self, branch, switch=False):
branch = self.gitflow_get_branch(branch)
current = self.current_branch()
if current != branch:
if switch:
self.gitflow_switch_to_branch(branch, silent=False)
else:
logger.critical(
'You are not on the "%s" branch.' % branch)
sys.exit(1)
def gitflow_switch_to_branch(self, branch, silent=True):
if not silent:
logger.info(
'You are not on the "%s" branch, switching now.' % branch)
utils.execute_command(self.cmd_checkout_from_tag(branch, '.'))
def current_branch(self):
return utils.execute_command("git rev-parse --abbrev-ref HEAD").strip()
def enhance_with_gitflow(vcs):
""" Return the vcs determined by the original function, unless we are
dealing with git, in which case we return our gitflow enhanced Git().
"""
return Git() if isinstance(vcs, OGGit) else vcs
| gpl-2.0 | -6,212,874,634,522,170,000 | 33.453704 | 79 | 0.586133 | false | 3.98821 | true | false | false |
piyushmaurya23/twitter-clone | config/settings/local.py | 1 | 2021 | # -*- coding: utf-8 -*-
"""
Local settings
- Run in Debug mode
- Use console backend for emails
- Add Django Debug Toolbar
- Add django-extensions as app
"""
from .common import * # noqa
# DEBUG
# ------------------------------------------------------------------------------
DEBUG = env.bool('DJANGO_DEBUG', default=True)
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = env('DJANGO_SECRET_KEY', default='r%-xf7+jh0o_!8-f3x&c#2iil!3-g!=anoo!m=_yge1io#bv3)')
# Mail settings
# ------------------------------------------------------------------------------
DATABASE_URL="postgres://postgres:[email protected]:5432/twitter"
EMAIL_PORT = 1025
EMAIL_HOST = 'localhost'
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND',
default='django.core.mail.backends.console.EmailBackend')
# CACHING
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# django-debug-toolbar
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INSTALLED_APPS += ('debug_toolbar', )
INTERNAL_IPS = ('127.0.0.1', '10.0.2.2',)
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
# django-extensions
# ------------------------------------------------------------------------------
INSTALLED_APPS += ('django_extensions', )
# TESTING
# ------------------------------------------------------------------------------
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Your local stuff: Below this line define 3rd party library settings
| mit | 233,340,992,603,222,370 | 29.164179 | 99 | 0.492825 | false | 4.099391 | false | false | false |
pandreetto/info-glue-provider | src/GLUEInfoProvider/GLUE2ServiceHandler.py | 1 | 2292 | # Copyright (c) Members of the EGEE Collaboration. 2004.
# See http://www.eu-egee.org/partners/ for details on the copyright
# holders.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from GLUEInfoProvider import CommonUtils
#
# This module should replace glite-ce-glue2-computingservice-static
# and the related YAIM function config_cream_gip_glue2
#
def process(siteDefs, out=sys.stdout):
now = CommonUtils.getNow()
srvType = 'org.glite.ce.CREAM'
endpointCount = 2 # CREAM + RTEPublisher (CEMon ?)
shareCount = siteDefs.ruleTable.getShareCount()
resourceCount = len(siteDefs.resourceTable)
out.write("dn: GLUE2ServiceID=%s,GLUE2GroupID=resource,o=glue\n" % siteDefs.compServiceID)
out.write("objectClass: GLUE2Entity\n")
out.write("objectClass: GLUE2Service\n")
out.write("objectClass: GLUE2ComputingService\n")
out.write("GLUE2EntityCreationTime: %s\n" % now)
out.write("GLUE2EntityName: Computing Service %s\n" % siteDefs.compServiceID)
out.write("GLUE2EntityOtherInfo: InfoProviderName=%s\n" % CommonUtils.providerName)
out.write("GLUE2EntityOtherInfo: InfoProviderVersion=%s\n" % CommonUtils.providerVersion)
out.write("GLUE2EntityOtherInfo: InfoProviderHost=%s\n" % siteDefs.ceHost)
out.write("GLUE2ServiceID: %s\n" % siteDefs.compServiceID)
out.write("GLUE2ServiceType: %s\n" % srvType)
out.write("GLUE2ServiceCapability: executionmanagement.jobexecution\n")
out.write("GLUE2ServiceQualityLevel: production\n")
out.write("GLUE2ServiceComplexity: endpointType=%d, share=%d, resource=%d\n"
% (endpointCount, shareCount, resourceCount))
out.write("GLUE2ServiceAdminDomainForeignKey: %s\n" % siteDefs.siteName)
out.write("\n")
| apache-2.0 | -8,300,528,381,849,291,000 | 39.210526 | 94 | 0.72993 | false | 3.293103 | false | false | false |
thomastweets/PythonRSA | rsa.py | 1 | 15107 | # -*- coding: utf-8 -*-
################################################################################
################# Representational Similarity Analysis #########################
################################################################################
# Amelie Haugg
# Julia Brehm
# Pia Schröder
import os
from os.path import dirname, abspath
import numpy as np
from scipy.spatial.distance import cdist
from scipy.stats import spearmanr
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import datetime
import markdown
import webbrowser
# Global variables and their default values
matrix_plot1 = True
matrix_plot2 = False
bar_plot = False
correlations1 = False
correlations2 = False
pvalues = False
no_relabelings = 10000
dist_metric = 1
output_first = True
output_second = False
scale_to_max = False
now = datetime.datetime.now()
def import_data(paths):
""" Import header and data matrix from VOM files specified in paths. Returns
dictionary DATA containing data set names as keys."""
DATA = dict()
# Iterate through files and save data in dictionary
for set_no, file_name in enumerate(paths):
header = dict()
# Read header and store last line
with open(file_name, 'r') as file:
for index, line in enumerate(file):
string_list = line.split()
item_list = [int(i) if i.isdigit() else i for i in string_list]
# For non empty list save first element as key and rest as value in
# header dictionary
if item_list:
key = item_list.pop(0)
if len(item_list) > 1:
header[key] = item_list
else: header[key] = item_list.pop()
# Use 'NrOfVoxels' as indicator for the end of the header
if 'NrOfVoxels:' in line:
break
header_end = index + 1
# Read data into array
data = np.loadtxt(file_name, skiprows = header_end)
# Save data set in DATA dictionary
key = "data_set_" + str(set_no + 1)
DATA[key] = {'header': header, 'data': data}
return DATA
def extract_data(DATA):
""" Get voxel data from data matrices in DATA. One matrix per area, rows = voxels,
columns = conditions. """
# Extracts those columns in data that contain measurements (excluding voxel coordinates)
data = []
for i in range(1,len(DATA)+1):
data.append(DATA['data_set_' + str(i)]['data'][:,3:])
return data
def first_order_rdm(condition_data):
""" Return Specified distance matrices (1 = Pearson correlation,
2 = Euclidian distance, 3 = Absolute activation difference) of data in
input matrices. One matrix per area/subject/method/...
Number of rows/columns = number of conditions = number of columns in each
matrix in condition_data"""
RDMs = list()
# Iterate through matrices in condition_data and save one RDM per matrix
for i in range(len(condition_data)):
if dist_metric == 1:
# Use correlation distance
RDM = 1-np.corrcoef(condition_data[i],rowvar=0)
elif dist_metric == 2:
# Use Eucledian distance
RDM = cdist(condition_data[i].T,condition_data[i].T,'euclidean')
elif dist_metric == 3:
# Use absolute activation difference
means = np.mean(condition_data[i], axis=0) # Determine mean activation per condition
m, n = np.meshgrid(means,means) # Create all possible combinations
RDM = abs(m-n) # Calculate difference for each combination
RDMs.append(RDM)
return RDMs
def get_pvalue(matrix1, matrix2):
""" Randomize condition labels to test significance """
order = range(0,len(matrix2))
dist = np.zeros(no_relabelings)
# First, determine actual correlation
flat1 = matrix1.flatten(1).transpose()
flat2 = matrix2.flatten(1).transpose()
corr = spearmanr(flat1,flat2)[0]
# Relabel N times to obtain distribution of correlations
for i in range(0,no_relabelings):
np.random.shuffle(order)
dummy = matrix2.take(order, axis=1).take(order, axis=0)
flat2 = dummy.flatten(1).transpose()
dist[i] = spearmanr(flat1,flat2)[0]
# Determine p value of actual correlation from distribution
p = float((dist >= corr).sum()) / len(dist)
# Mit dieser Methode braucht man mindestens 4 conditions, also 4!=24 mögliche
# Reihenfolgen um auf p < 0.05 zu kommen. Nicht gut!
return p
def bootstrap(data):
""" computes the variability of the obtained second-order RDM (i.e. distance
between areas, models, ...) for the same experiment with different stimuli
by bootstrapping 100 times from the condition set. """
all_RDMs = list()
# Iterate through 100 resamplings
for ind in range(100):
index = np.random.random_integers(0, high=len(data[0].T)-1, size=(1,len(data[0].T)))[0]
new_data = np.array(data)
# Reorder columns in data (conditions)
for elem in range(len(data)):
new_data[elem] = new_data[elem][:,index]
# Recompute first and second-order RDMs with new conditions
new_RDM1 = first_order_rdm(list(new_data))
new_RDM2 = second_order_rdm(new_RDM1, data, False)[0]
# Remove off-diagonal zeros to avoid artefactually small standard deviations
m_index = [new_RDM2 == 0]
ident = np.invert(np.identity(len(new_RDM2), dtype=bool))
m_index = m_index & ident
new_RDM2[m_index[0]] = np.nan
all_RDMs.append(new_RDM2)
all_RDMs = np.array(all_RDMs)
# Compute standard deviation along first dimension (across RDMs)
variability = np.nanstd(all_RDMs,0)
return variability
def second_order_rdm(RDMs, data, firstcall):
""" Returns representational dissimilarity matrix computed with Spearman rank correlations
between variable number of equally sized input matrices. """
# Flatten input matrices
flat = [m.flatten(1) for m in RDMs]
flat = np.array(flat).transpose()
# Compute Spearman rank correlation matrix
c_matrix = spearmanr(flat)[0]
# In case only two conditions are compared, spearmanr returns single correlation
# coefficient and c_matrix has to be built manually
if not(isinstance(c_matrix, np.ndarray)):
c_matrix = np.array([[1,c_matrix],[c_matrix,1]])
# Compute RDM (distance matrix) with correlation distance: 1 - correlation
RDM = np.ones(c_matrix.shape) - c_matrix
p_values = []
variability = []
if firstcall:
if bar_plot:
# Determine variability of distance estimates for different stimuli
# Bootstrap from condition set (100 times, with replacement)
variability = bootstrap(data)
if pvalues or bar_plot:
# Determine significance of second order RDM
p_values = np.zeros(RDM.shape)
# Iterate through pvalue matrix and fill in p-values but only for upper
# triangle to improve performance
for i in range(0,len(p_values)):
for j in range(i,len(p_values)):
p_values[i,j] = get_pvalue(RDMs[i], RDMs[j])
# mirror matrix to obtain all p-values
p_values = p_values + np.triu(p_values,1).T
return [RDM, p_values, variability]
def plot_RDM(RDMs, labels, names, fig):
""" Create RDM plot. Creates one first-order plot for each area if fig=1
and a single second-order plot if fig=2."""
# Determine optimal arrangement for plots
rows = int(np.sqrt(len(RDMs)))
columns = int(np.ceil(len(RDMs)/float(rows)))
ticks = np.arange(len(labels))
# Use maximum value in RDMs for scaling if desired
dist_max = np.max(np.array(RDMs))
if fig == 1:
f = plt.figure(fig, figsize=(18, 8))
if fig == 2:
f = plt.figure(fig, figsize=(6, 6))
# New: add_subplot instead of subplots to control figure instance
for index in np.arange(len(RDMs)):
ax = f.add_subplot(rows,columns,index+1, xticklabels = labels, yticklabels = labels, xticks = ticks, yticks = ticks)
if scale_to_max:
im = ax.imshow(RDMs[index], interpolation = 'none', cmap = 'jet', vmin = 0, vmax = dist_max)
else:
im = ax.imshow(RDMs[index], interpolation = 'none', cmap = 'jet')
for label in ax.get_xticklabels():
label.set_fontsize(6)
ax.xaxis.tick_top()
ax.set_title(names[index], y = 1.08)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
dist_max = np.max(RDMs[index])
cbar = plt.colorbar(im, ticks=[0, dist_max], cax=cax)
cbar.ax.set_yticklabels(['0', str(np.around(dist_max,decimals=2))])
cbar.ax.set_ylabel('Dissimilarity')
f.subplots_adjust(hspace=0.1, wspace=0.3)
if fig == 1:
if dist_metric == 1:
f.suptitle('First order distance metric: Correlation distance', y=0.9, fontsize=18)
elif dist_metric == 2:
f.suptitle('First order distance metric: Euclidean distance', y=0.9, fontsize=18)
elif dist_metric == 3:
f.suptitle('First order distance metric: Absolute activation difference', y=0.9, fontsize=18)
figure_name = "Figure%d_%d-%d-%d-%d-%d-%d.png" % (fig, now.day, now.month, now.year, now.hour,
now.minute, now.second)
plt.savefig(figure_name, transparent=True)
return figure_name
def plot_bars(RDM, pvalues, variability, names):
""" Creates bar plot depicticting the distances between different areas.
Bars are sorted by significance, errorbars indicate the standard error of
the distnace estimate (estimated as the standard deviation of 100 distance
estimates obtained from bootstrapping of the condition labels)"""
length = len(RDM)
f = plt.figure(3, figsize=(14,6))
for index in np.arange(length):
maxim = np.max(RDM[index])
xticks = np.arange(length-1)+1
d_values = RDM[index,:]
plot_dvalues = d_values[d_values != 0]
v_values = variability[index,:]
plot_vvalues = v_values[d_values != 0]
p_values = pvalues[index,:]
plot_pvalues = np.around(p_values[d_values != 0], decimals=4)
plot_names = np.array(names)[d_values != 0]
sort = np.argsort(plot_pvalues)
ax = f.add_subplot(1,length, index+1, xticks = xticks, xticklabels = plot_pvalues[sort])
ax.set_ylabel('Correlation distance (1-Spearman rank correlation)')
ax.set_xlabel('P-values')
ax.bar(xticks, plot_dvalues[sort], 0.5, yerr = plot_vvalues[sort], error_kw=dict(ecolor='black', lw=2), align = 'center')
scale_y = max(plot_dvalues + plot_vvalues)+maxim*0.1
plt.axis([0.5, length-0.5, 0, scale_y])
ax.set_title(names[index])
for ind in np.arange(length-1):
ax.text(xticks[ind], scale_y*0.1, plot_names[sort][ind],
rotation='vertical', horizontalalignment='center',
backgroundcolor='w', color='k', visible=True)
f.subplots_adjust(hspace=0.1, wspace=0.3)
figure_name = "Figure3_%d-%d-%d-%d-%d-%d.png" % (now.day, now.month, now.year, now.hour,
now.minute, now.second)
plt.savefig(figure_name, transparent=True)
return figure_name
def generate_output(*args):
""" Generates text file including all output and converts it into html
(markdown) file """
if len(args) > 3:
[withinRDMs, betweenRDM, names, labels] = args
else:
[withinRDMs, names, labels] = args
# Produce text file
filename = "RSA_output_%d-%d-%d-%d-%d-%d.txt" % (now.day, now.month, now.year, now.hour,
now.minute, now.second)
output = "RSA_output_%d-%d-%d-%d-%d-%d.html" % (now.day, now.month, now.year, now.hour,
now.minute, now.second)
with open(filename, 'w') as fid:
fid.write("#Representational similarity analysis\n\n")
fid.write("###Areas: "+str(', '.join(names))+"\n")
fid.write("###Conditions: "+str(', '.join(labels))+"\n\n\n\n")
# first-order RDMs
if output_first:
fid.write("##First-order analysis\n\n")
# Numerical correlations
if correlations1:
distances = {1:'Correlation distance', 2:'Euclidean distance', 3:'Absolute activation difference'}
fid.write("###Dissimilarity between conditions: "+distances[dist_metric]+"\n\n")
for ind in np.arange(len(withinRDMs)):
fid.write("\n###"+names[ind]+"\n")
np.savetxt(fid, withinRDMs[ind], fmt='%.4f')# , header="\n"+names[ind]+"\n")
fid.write("\n")
# RDM Plot
if matrix_plot1:
figure_name = plot_RDM(withinRDMs, labels, names, 1)
fid.write("" % figure_name)
# second-order RDM
if output_second:
fid.write("\n")
fid.write("##Second-order analysis\n\n")
# Numerical correlations
if correlations2:
fid.write("###Dissimilarity between areas: 1-Spearman rank correlation\n\n")
np.savetxt(fid, betweenRDM[0], fmt='%.4f')
fid.write("\n\n")
# P-values
if pvalues:
fid.write("###Statistical significance of Dissimilarity between areas\n")
fid.write("P-values are obtained by random relabeling of conditions.\nNo. of relabelings = %d \n\n" % (no_relabelings))
np.savetxt(fid, betweenRDM[1], fmt='%.4f')
fid.write("\n\n")
# RDM plot
if matrix_plot2:
figure_name = plot_RDM([betweenRDM[0]], names, ['Second order RDM'], 2)
fid.write("\n")
fid.write("" % figure_name)
fid.write("\n")
# Bar plot
if bar_plot:
figure_name = plot_bars(betweenRDM[0], betweenRDM[1], betweenRDM[2], names)
fid.write("\n")
fid.write("" % figure_name)
fid.write("\n")
with open(output, 'w') as output_file:
html = markdown.markdownFromFile(filename, output_file, extensions=['markdown.extensions.nl2br'])
os.remove(filename)
webbrowser.open(output, new=2)
def RSA(paths, files, labels):
''' Imports input files, extracts relevant data, computes first and second
order RDMs and plots them'''
data = import_data(paths)
data = extract_data(data)
withinRDMs = first_order_rdm(data)
names = [file[0:-4] for file in files]
if output_second:
betweenRDM = second_order_rdm(withinRDMs, data, True)
if output_second:
generate_output(withinRDMs, betweenRDM, names, labels)
else:
generate_output(withinRDMs, names, labels)
| gpl-2.0 | 192,519,840,704,321,060 | 33.174208 | 135 | 0.601721 | false | 3.594717 | false | false | false |
mamrhein/identifiers | src/identifiers/isbnutils.py | 1 | 2204 | # -*- coding: utf-8 -*-
# ---------------------------------------------------------------------------
# Name: isbnutils
# Purpose: Utility functions for checking ISBNs
#
# Author: Michael Amrhein ([email protected])
#
# Copyright: (c) 2016 Michael Amrhein
# License: This program is part of a larger application. For license
# details please read the file LICENSE.TXT provided together
# with the application.
# ---------------------------------------------------------------------------
# $Source$
# $Revision$
"""Utility functions for checking ISBNs"""
import os.path
from bisect import bisect
from typing import Iterator, Tuple
from xml.etree import ElementTree as ETree
def _iter_rules(root: ETree.Element) -> Iterator:
for elem in root.findall('RegistrationGroups/Group'):
prefix = elem.findtext('Prefix').replace('-', '')
prefix_length = len(prefix)
for subelem in elem.findall('Rules/Rule'):
number_range = subelem.findtext('Range')
lower, upper = number_range.split('-')
lower_prefix = prefix + lower
upper_prefix = prefix + upper
length = int(subelem.findtext('Length'))
if length > 0:
item_idx = prefix_length + length
else:
item_idx = 0
yield lower_prefix, upper_prefix, prefix_length, item_idx
file_name = os.path.join(os.path.dirname(__file__), "ISBN_Ranges.xml")
etree = ETree.parse(file_name)
root = etree.getroot()
rule_list = list(_iter_rules(root))
def lookup_isbn_prefix(digits: str) -> Tuple[int, int]:
"""Check ISBN prefix in `digits`."""
idx = max(bisect(rule_list, (digits,)) - 1, 0)
lower_prefix, upper_prefix, registrant_idx, item_idx = rule_list[idx]
if lower_prefix <= digits <= upper_prefix:
if item_idx > 0:
return registrant_idx, item_idx
raise ValueError(f"Excluded prefix range: '{lower_prefix}' - "
f"'{upper_prefix}'.")
if lower_prefix[:3] != digits[:3]:
raise ValueError("Undefined prefix.")
raise ValueError("Undefined registration group or registrant.")
| bsd-2-clause | 7,380,924,420,951,742,000 | 35.131148 | 77 | 0.577586 | false | 4.02925 | false | false | false |
nddsg/SimpleDBMS | simple_dbms/insert_row.py | 1 | 1183 | from data_output_stream import DataOutputStream
from column import Column
class InsertRow:
# Constants for special offsets
# The field with this offset is a primary key.
IS_PKEY = -1
# The field with this offset has a null value.
IS_NULL = -2
def __init__(self, table, values):
"""
Constructs an InsertRow object for a row containing the specified
values that is to be inserted in the specified table.
:param table:
:param values:
"""
self._table = table
self._values = values
# These objects will be created by the marshall() method.
self._key = None
self._data = None
def marshall(self):
"""
Takes the collection of values for this InsertRow
and marshalls them into a key/data pair.
:return:
"""
def get_key(self):
"""
Returns the key in the key/data pair for this row.
:return: the key
"""
return self._key
def get_data(self):
"""
Returns the data item in the key/data pair for this row.
:return: the data
"""
return self._data
| gpl-3.0 | -3,515,780,267,484,842,000 | 24.717391 | 73 | 0.578191 | false | 4.39777 | false | false | false |
toirl/ringo | ringo/lib/history.py | 1 | 1556 | import urlparse
class History:
def __init__(self, history):
self.history = history
def push(self, url):
"""Adds an url to the history if the url is not already the most
recent entry. The scheme and network location (host, port,
username, password), if present, are removed from the URL before
storing it. If there are more than 5 entries in the list the
oldes entry will be removed.
"""
# normalize the URL by removing scheme and netloc. This avoids
# problems with the URLs when running ringo behind reverse
# proxies.
split = urlparse.urlsplit(url)
normalized_url = urlparse.urlunsplit(("", "") + split[2:])
if not self.history or normalized_url != self.history[-1]:
self.history.append(normalized_url)
if len(self.history) > 5:
del self.history[0]
def pop(self, num=1):
"""Returns a url form the history and deletes the item and all
decendants from the history. On default it will return the last
recent entry in the history. Optionally you can provide a number
to the pop method to get e.g the 2 most recent entry."""
url = None
for x in range(num):
if len(self.history) > 0:
url = self.history.pop()
return url
def last(self):
"""Returns the last element from the history stack without
removing it"""
if len(self.history) > 0:
return self.history[-1]
return None
| gpl-2.0 | -5,592,376,716,367,176,000 | 34.363636 | 72 | 0.607969 | false | 4.471264 | false | false | false |
KSG-IT/ksg-nett | economy/admin.py | 1 | 1195 | from django.contrib import admin
from economy.models import Deposit, SociBankAccount, SociProduct, SociSession, ProductOrder
@admin.register(SociBankAccount)
class SociBankAccountAdmin(admin.ModelAdmin):
list_display = ['user', 'card_uuid', 'balance']
readonly_fields = ['balance']
@admin.register(SociProduct)
class SociProductAdmin(admin.ModelAdmin):
list_display = ['sku_number', 'icon', 'name', 'price', 'description', 'start']
@admin.register(Deposit)
class DepositAdmin(admin.ModelAdmin):
list_display = ['id', 'user', 'amount', 'has_receipt', 'is_valid']
@staticmethod
def user(deposit: Deposit):
return deposit.account.user
def has_receipt(self, deposit):
return bool(deposit.receipt)
has_receipt.boolean = True
def is_valid(self, deposit):
return deposit.is_valid
is_valid.boolean = True
@admin.register(SociSession)
class SociSessionAdmin(admin.ModelAdmin):
pass
@admin.register(ProductOrder)
class ProductOrderAdmin(admin.ModelAdmin):
list_display = ['id', 'product', 'order_size', 'source', 'cost']
@staticmethod
def cost(product_order: ProductOrder):
return product_order.cost
| gpl-3.0 | -4,060,153,103,539,028,000 | 24.425532 | 91 | 0.704603 | false | 3.556548 | false | false | false |
nickhand/nbodykit | nbodykit/extern/docrep.py | 1 | 34200 | import types
import six
import inspect
import re
from warnings import warn
__version__ = '0.2.5'
__author__ = 'Philipp Sommer'
try:
from matplotlib.cbook import dedent as dedents
except ImportError:
from textwrap import dedent as _dedents
def dedents(s):
return '\n'.join(_dedents(s or '').splitlines()[1:])
substitution_pattern = re.compile(
r"""(?s)(?<!%)(%%)*%(?!%) # uneven number of %
\((?P<key>.*?)\)# key enclosed in brackets""", re.VERBOSE)
summary_patt = re.compile(r'(?s).*?(?=(\n\s*\n)|$)')
class _StrWithIndentation(object):
"""A convenience class that indents the given string if requested through
the __str__ method"""
def __init__(self, s, indent=0, *args, **kwargs):
self._indent = '\n' + ' ' * indent
self._s = s
def __str__(self):
return self._indent.join(self._s.splitlines())
def __repr__(self):
return repr(self._indent.join(self._s.splitlines()))
def safe_modulo(s, meta, checked='', print_warning=True, stacklevel=2):
"""Safe version of the modulo operation (%) of strings
Parameters
----------
s: str
string to apply the modulo operation with
meta: dict or tuple
meta informations to insert (usually via ``s % meta``)
checked: {'KEY', 'VALUE'}, optional
Security parameter for the recursive structure of this function. It can
be set to 'VALUE' if an error shall be raised when facing a TypeError
or ValueError or to 'KEY' if an error shall be raised when facing a
KeyError. This parameter is mainly for internal processes.
print_warning: bool
If True and a key is not existent in `s`, a warning is raised
stacklevel: int
The stacklevel for the :func:`warnings.warn` function
Examples
--------
The effects are demonstrated by this example::
>>> from docrep import safe_modulo
>>> s = "That's %(one)s string %(with)s missing 'with' and %s key"
>>> s % {'one': 1} # raises KeyError because of missing 'with'
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
KeyError: 'with'
>>> s % {'one': 1, 'with': 2} # raises TypeError because of '%s'
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: not enough arguments for format string
>>> safe_modulo(s, {'one': 1})
"That's 1 string %(with)s missing 'with' and %s key"
"""
try:
return s % meta
except (ValueError, TypeError, KeyError):
# replace the missing fields by %%
keys = substitution_pattern.finditer(s)
for m in keys:
key = m.group('key')
if not isinstance(meta, dict) or key not in meta:
if print_warning:
warn("%r is not a valid key!" % key, SyntaxWarning,
stacklevel)
full = m.group()
s = s.replace(full, '%' + full)
if 'KEY' not in checked:
return safe_modulo(s, meta, checked=checked + 'KEY',
print_warning=print_warning,
stacklevel=stacklevel)
if not isinstance(meta, dict) or 'VALUE' in checked:
raise
s = re.sub(r"""(?<!%)(%%)*%(?!%) # uneven number of %
\s*(\w|$) # format strings""", '%\g<0>', s,
flags=re.VERBOSE)
return safe_modulo(s, meta, checked=checked + 'VALUE',
print_warning=print_warning, stacklevel=stacklevel)
class DocstringProcessor(object):
"""Class that is intended to process docstrings
It is, but only to minor extends, inspired by the
:class:`matplotlib.docstring.Substitution` class.
Examples
--------
Create docstring processor via::
>>> from docrep import DocstringProcessor
>>> d = DocstringProcessor(doc_key='My doc string')
And then use it as a decorator to process the docstring::
>>> @d
... def doc_test():
... '''That's %(doc_key)s'''
... pass
>>> print(doc_test.__doc__)
That's My doc string
Use the :meth:`get_sectionsf` method to extract Parameter sections (or
others) form the docstring for later usage (and make sure, that the
docstring is dedented)::
>>> @d.get_sectionsf('docstring_example',
... sections=['Parameters', 'Examples'])
... @d.dedent
... def doc_test(a=1, b=2):
... '''
... That's %(doc_key)s
...
... Parameters
... ----------
... a: int, optional
... A dummy parameter description
... b: int, optional
... A second dummy parameter
...
... Examples
... --------
... Some dummy example doc'''
... print(a)
>>> @d.dedent
... def second_test(a=1, b=2):
... '''
... My second function where I want to use the docstring from
... above
...
... Parameters
... ----------
... %(docstring_example.parameters)s
...
... Examples
... --------
... %(docstring_example.examples)s'''
... pass
>>> print(second_test.__doc__)
My second function where I want to use the docstring from
above
<BLANKLINE>
Parameters
----------
a: int, optional
A dummy parameter description
b: int, optional
A second dummy parameter
<BLANKLINE>
Examples
--------
Some dummy example doc
Another example uses non-dedented docstrings::
>>> @d.get_sectionsf('not_dedented')
... def doc_test2(a=1):
... '''That's the summary
...
... Parameters
... ----------
... a: int, optional
... A dummy parameter description'''
... print(a)
These sections must then be used with the :meth:`with_indent` method to
indent the inserted parameters::
>>> @d.with_indent(4)
... def second_test2(a=1):
... '''
... My second function where I want to use the docstring from
... above
...
... Parameters
... ----------
... %(not_dedented.parameters)s'''
... pass
"""
#: :class:`dict`. Dictionary containing the compiled patterns to identify
#: the Parameters, Other Parameters, Warnings and Notes sections in a
#: docstring
patterns = {}
#: :class:`dict`. Dictionary containing the parameters that are used in for
#: substitution.
params = {}
#: sections that behave the same as the `Parameter` section by defining a
#: list
param_like_sections = ['Parameters', 'Other Parameters', 'Returns',
'Raises']
#: sections that include (possibly not list-like) text
text_sections = ['Warnings', 'Notes', 'Examples', 'See Also',
'References']
#: The action on how to react on classes in python 2
#:
#: When calling::
#:
#: >>> @docstrings
#: ... class NewClass(object):
#: ... """%(replacement)s"""
#:
#: This normaly raises an AttributeError, because the ``__doc__`` attribute
#: of a class in python 2 is not writable. This attribute may be one of
#: ``'ignore', 'raise' or 'warn'``
python2_classes = 'ignore'
def __init__(self, *args, **kwargs):
"""
Parameters
----------
``*args`` and ``**kwargs``
Parameters that shall be used for the substitution. Note that you can
only provide either ``*args`` or ``**kwargs``, furthermore most of the
methods like `get_sectionsf` require ``**kwargs`` to be provided."""
if len(args) and len(kwargs):
raise ValueError("Only positional or keyword args are allowed")
self.params = args or kwargs
patterns = {}
all_sections = self.param_like_sections + self.text_sections
for section in self.param_like_sections:
patterns[section] = re.compile(
'(?s)(?<=%s\n%s\n)(.+?)(?=\n\n\S+|$)' % (
section, '-'*len(section)))
all_sections_patt = '|'.join(
'%s\n%s\n' % (s, '-'*len(s)) for s in all_sections)
# examples and see also
for section in self.text_sections:
patterns[section] = re.compile(
'(?s)(?<=%s\n%s\n)(.+?)(?=%s|$)' % (
section, '-'*len(section), all_sections_patt))
self._extended_summary_patt = re.compile(
'(?s)(.+?)(?=%s|$)' % all_sections_patt)
self._all_sections_patt = re.compile(all_sections_patt)
self.patterns = patterns
def __call__(self, func):
"""
Substitute in a docstring of a function with :attr:`params`
Parameters
----------
func: function
function with the documentation whose sections
shall be inserted from the :attr:`params` attribute
See Also
--------
dedent: also dedents the doc
with_indent: also indents the doc"""
doc = func.__doc__ and safe_modulo(func.__doc__, self.params,
stacklevel=3)
return self._set_object_doc(func, doc)
def get_sections(self, s, base,
sections=['Parameters', 'Other Parameters']):
"""
Method that extracts the specified sections out of the given string if
(and only if) the docstring follows the numpy documentation guidelines
[1]_. Note that the section either must appear in the
:attr:`param_like_sections` or the :attr:`text_sections` attribute.
Parameters
----------
s: str
Docstring to split
base: str
base to use in the :attr:`sections` attribute
sections: list of str
sections to look for. Each section must be followed by a newline
character ('\\n') and a bar of '-' (following the numpy (napoleon)
docstring conventions).
Returns
-------
str
The replaced string
References
----------
.. [1] https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt
See Also
--------
delete_params, keep_params, delete_types, keep_types, delete_kwargs:
For manipulating the docstring sections
save_docstring:
for saving an entire docstring
"""
params = self.params
# Remove the summary and dedent the rest
s = self._remove_summary(s)
for section in sections:
key = '%s.%s' % (base, section.lower().replace(' ', '_'))
params[key] = self._get_section(s, section)
return s
def _remove_summary(self, s):
# if the string does not start with one of the sections, we remove the
# summary
if not self._all_sections_patt.match(s.lstrip()):
# remove the summary
lines = summary_patt.sub('', s, 1).splitlines()
# look for the first line with content
first = next((i for i, l in enumerate(lines) if l.strip()), 0)
# dedent the lines
s = dedents('\n' + '\n'.join(lines[first:]))
return s
def _get_section(self, s, section):
try:
return self.patterns[section].search(s).group(0).rstrip()
except AttributeError:
return ''
def get_sectionsf(self, *args, **kwargs):
"""
Decorator method to extract sections from a function docstring
Parameters
----------
``*args`` and ``**kwargs``
See the :meth:`get_sections` method. Note, that the first argument
will be the docstring of the specified function
Returns
-------
function
Wrapper that takes a function as input and registers its sections
via the :meth:`get_sections` method"""
def func(f):
doc = f.__doc__
self.get_sections(doc or '', *args, **kwargs)
return f
return func
def _set_object_doc(self, obj, doc, stacklevel=3):
"""Convenience method to set the __doc__ attribute of a python object
"""
if isinstance(obj, types.MethodType) and six.PY2:
obj = obj.im_func
try:
obj.__doc__ = doc
except AttributeError: # probably python2 class
if (self.python2_classes != 'raise' and
(inspect.isclass(obj) and six.PY2)):
if self.python2_classes == 'warn':
warn("Cannot modify docstring of classes in python2!",
stacklevel=stacklevel)
else:
raise
return obj
def dedent(self, func):
"""
Dedent the docstring of a function and substitute with :attr:`params`
Parameters
----------
func: function
function with the documentation to dedent and whose sections
shall be inserted from the :attr:`params` attribute"""
doc = func.__doc__ and self.dedents(func.__doc__, stacklevel=4)
return self._set_object_doc(func, doc)
def dedents(self, s, stacklevel=3):
"""
Dedent a string and substitute with the :attr:`params` attribute
Parameters
----------
s: str
string to dedent and insert the sections of the :attr:`params`
attribute
stacklevel: int
The stacklevel for the warning raised in :func:`safe_module` when
encountering an invalid key in the string"""
s = dedents(s)
return safe_modulo(s, self.params, stacklevel=stacklevel)
def with_indent(self, indent=0):
"""
Substitute in the docstring of a function with indented :attr:`params`
Parameters
----------
indent: int
The number of spaces that the substitution should be indented
Returns
-------
function
Wrapper that takes a function as input and substitutes it's
``__doc__`` with the indented versions of :attr:`params`
See Also
--------
with_indents, dedent"""
def replace(func):
doc = func.__doc__ and self.with_indents(
func.__doc__, indent=indent, stacklevel=4)
return self._set_object_doc(func, doc)
return replace
def with_indents(self, s, indent=0, stacklevel=3):
"""
Substitute a string with the indented :attr:`params`
Parameters
----------
s: str
The string in which to substitute
indent: int
The number of spaces that the substitution should be indented
stacklevel: int
The stacklevel for the warning raised in :func:`safe_module` when
encountering an invalid key in the string
Returns
-------
str
The substituted string
See Also
--------
with_indent, dedents"""
# we make a new dictionary with objects that indent the original
# strings if necessary. Note that the first line is not indented
d = {key: _StrWithIndentation(val, indent)
for key, val in six.iteritems(self.params)}
return safe_modulo(s, d, stacklevel=stacklevel)
def delete_params(self, base_key, *params):
"""
Method to delete a parameter from a parameter documentation.
This method deletes the given `param` from the `base_key` item in the
:attr:`params` dictionary and creates a new item with the original
documentation without the description of the param. This method works
for the ``'Parameters'`` sections.
The new docstring without the selected parts will be accessible as
``base_key + '.no_' + '|'.join(params)``, e.g.
``'original_key.no_param1|param2'``.
See the :meth:`keep_params` method for an example.
Parameters
----------
base_key: str
key in the :attr:`params` dictionary
``*params``
str. Parameter identifier of which the documentations shall be
deleted
See Also
--------
delete_types, keep_params"""
self.params[
base_key + '.no_' + '|'.join(params)] = self.delete_params_s(
self.params[base_key], params)
@staticmethod
def delete_params_s(s, params):
"""
Delete the given parameters from a string
Same as :meth:`delete_params` but does not use the :attr:`params`
dictionary
Parameters
----------
s: str
The string of the parameters section
params: list of str
The names of the parameters to delete
Returns
-------
str
The modified string `s` without the descriptions of `params`
"""
patt = '(?s)' + '|'.join(
'(?<=\n)' + s + '\s*:.+?\n(?=\S+|$)' for s in params)
return re.sub(patt, '', '\n' + s.strip() + '\n').strip()
def delete_kwargs(self, base_key, args=None, kwargs=None):
"""
Deletes the ``*args`` or ``**kwargs`` part from the parameters section
Either `args` or `kwargs` must not be None. The resulting key will be
stored in
``base_key + 'no_args'``
if `args` is not None and `kwargs` is None
``base_key + 'no_kwargs'``
if `args` is None and `kwargs` is not None
``base_key + 'no_args_kwargs'``
if `args` is not None and `kwargs` is not None
Parameters
----------
base_key: str
The key in the :attr:`params` attribute to use
args: None or str
The string for the args to delete
kwargs: None or str
The string for the kwargs to delete
Notes
-----
The type name of `args` in the base has to be like ````*<args>````
(i.e. the `args` argument preceeded by a ``'*'`` and enclosed by double
``'`'``). Similarily, the type name of `kwargs` in `s` has to be like
````**<kwargs>````"""
if not args and not kwargs:
warn("Neither args nor kwargs are given. I do nothing for %s" % (
base_key))
return
ext = '.no' + ('_args' if args else '') + ('_kwargs' if kwargs else '')
self.params[base_key + ext] = self.delete_kwargs_s(
self.params[base_key], args, kwargs)
@classmethod
def delete_kwargs_s(cls, s, args=None, kwargs=None):
"""
Deletes the ``*args`` or ``**kwargs`` part from the parameters section
Either `args` or `kwargs` must not be None.
Parameters
----------
s: str
The string to delete the args and kwargs from
args: None or str
The string for the args to delete
kwargs: None or str
The string for the kwargs to delete
Notes
-----
The type name of `args` in `s` has to be like ````*<args>```` (i.e. the
`args` argument preceeded by a ``'*'`` and enclosed by double ``'`'``).
Similarily, the type name of `kwargs` in `s` has to be like
````**<kwargs>````"""
if not args and not kwargs:
return s
types = []
if args is not None:
types.append('`?`?\*%s`?`?' % args)
if kwargs is not None:
types.append('`?`?\*\*%s`?`?' % kwargs)
return cls.delete_types_s(s, types)
def delete_types(self, base_key, out_key, *types):
"""
Method to delete a parameter from a parameter documentation.
This method deletes the given `param` from the `base_key` item in the
:attr:`params` dictionary and creates a new item with the original
documentation without the description of the param. This method works
for ``'Results'`` like sections.
See the :meth:`keep_types` method for an example.
Parameters
----------
base_key: str
key in the :attr:`params` dictionary
out_key: str
Extension for the base key (the final key will be like
``'%s.%s' % (base_key, out_key)``
``*types``
str. The type identifier of which the documentations shall deleted
See Also
--------
delete_params"""
self.params['%s.%s' % (base_key, out_key)] = self.delete_types_s(
self.params[base_key], types)
@staticmethod
def delete_types_s(s, types):
"""
Delete the given types from a string
Same as :meth:`delete_types` but does not use the :attr:`params`
dictionary
Parameters
----------
s: str
The string of the returns like section
types: list of str
The type identifiers to delete
Returns
-------
str
The modified string `s` without the descriptions of `types`
"""
patt = '(?s)' + '|'.join(
'(?<=\n)' + s + '\n.+?\n(?=\S+|$)' for s in types)
return re.sub(patt, '', '\n' + s.strip() + '\n',).strip()
def keep_params(self, base_key, *params):
"""
Method to keep only specific parameters from a parameter documentation.
This method extracts the given `param` from the `base_key` item in the
:attr:`params` dictionary and creates a new item with the original
documentation with only the description of the param. This method works
for ``'Parameters'`` like sections.
The new docstring with the selected parts will be accessible as
``base_key + '.' + '|'.join(params)``, e.g.
``'original_key.param1|param2'``
Parameters
----------
base_key: str
key in the :attr:`params` dictionary
``*params``
str. Parameter identifier of which the documentations shall be
in the new section
See Also
--------
keep_types, delete_params
Examples
--------
To extract just two parameters from a function and reuse their
docstrings, you can type::
>>> from docrep import DocstringProcessor
>>> d = DocstringProcessor()
>>> @d.get_sectionsf('do_something')
... def do_something(a=1, b=2, c=3):
... '''
... That's %(doc_key)s
...
... Parameters
... ----------
... a: int, optional
... A dummy parameter description
... b: int, optional
... A second dummy parameter that will be excluded
... c: float, optional
... A third parameter'''
... print(a)
>>> d.keep_params('do_something.parameters', 'a', 'c')
>>> @d.dedent
... def do_less(a=1, c=4):
... '''
... My second function with only `a` and `c`
...
... Parameters
... ----------
... %(do_something.parameters.a|c)s'''
... pass
>>> print(do_less.__doc__)
My second function with only `a` and `c`
<BLANKLINE>
Parameters
----------
a: int, optional
A dummy parameter description
c: float, optional
A third parameter
Equivalently, you can use the :meth:`delete_params` method to remove
parameters::
>>> d.delete_params('do_something.parameters', 'b')
>>> @d.dedent
... def do_less(a=1, c=4):
... '''
... My second function with only `a` and `c`
...
... Parameters
... ----------
... %(do_something.parameters.no_b)s'''
... pass
"""
self.params[base_key + '.' + '|'.join(params)] = self.keep_params_s(
self.params[base_key], params)
@staticmethod
def keep_params_s(s, params):
"""
Keep the given parameters from a string
Same as :meth:`keep_params` but does not use the :attr:`params`
dictionary
Parameters
----------
s: str
The string of the parameters like section
params: list of str
The parameter names to keep
Returns
-------
str
The modified string `s` with only the descriptions of `params`
"""
patt = '(?s)' + '|'.join(
'(?<=\n)' + s + '\s*:.+?\n(?=\S+|$)' for s in params)
return ''.join(re.findall(patt, '\n' + s.strip() + '\n')).rstrip()
def keep_types(self, base_key, out_key, *types):
"""
Method to keep only specific parameters from a parameter documentation.
This method extracts the given `type` from the `base_key` item in the
:attr:`params` dictionary and creates a new item with the original
documentation with only the description of the type. This method works
for the ``'Results'`` sections.
Parameters
----------
base_key: str
key in the :attr:`params` dictionary
out_key: str
Extension for the base key (the final key will be like
``'%s.%s' % (base_key, out_key)``
``*types``
str. The type identifier of which the documentations shall be
in the new section
See Also
--------
delete_types, keep_params
Examples
--------
To extract just two return arguments from a function and reuse their
docstrings, you can type::
>>> from docrep import DocstringProcessor
>>> d = DocstringProcessor()
>>> @d.get_sectionsf('do_something', sections=['Returns'])
... def do_something():
... '''
... That's %(doc_key)s
...
... Returns
... -------
... float
... A random number
... int
... A random integer'''
... return 1.0, 4
>>> d.keep_types('do_something.returns', 'int_only', 'int')
>>> @d.dedent
... def do_less():
... '''
... My second function that only returns an integer
...
... Returns
... -------
... %(do_something.returns.int_only)s'''
... return do_something()[1]
>>> print(do_less.__doc__)
My second function that only returns an integer
<BLANKLINE>
Returns
-------
int
A random integer
Equivalently, you can use the :meth:`delete_types` method to remove
parameters::
>>> d.delete_types('do_something.returns', 'no_float', 'float')
>>> @d.dedent
... def do_less():
... '''
... My second function with only `a` and `c`
...
... Returns
... ----------
... %(do_something.returns.no_float)s'''
... return do_something()[1]
"""
self.params['%s.%s' % (base_key, out_key)] = self.keep_types_s(
self.params[base_key], types)
@staticmethod
def keep_types_s(s, types):
"""
Keep the given types from a string
Same as :meth:`keep_types` but does not use the :attr:`params`
dictionary
Parameters
----------
s: str
The string of the returns like section
types: list of str
The type identifiers to keep
Returns
-------
str
The modified string `s` with only the descriptions of `types`
"""
patt = '|'.join('(?<=\n)' + s + '\n(?s).+?\n(?=\S+|$)' for s in types)
return ''.join(re.findall(patt, '\n' + s.strip() + '\n')).rstrip()
def save_docstring(self, key):
"""
Descriptor method to save a docstring from a function
Like the :meth:`get_sectionsf` method this method serves as a
descriptor for functions but saves the entire docstring"""
def func(f):
self.params[key] = f.__doc__ or ''
return f
return func
def get_summary(self, s, base=None):
"""
Get the summary of the given docstring
This method extracts the summary from the given docstring `s` which is
basicly the part until two newlines appear
Parameters
----------
s: str
The docstring to use
base: str or None
A key under which the summary shall be stored in the :attr:`params`
attribute. If not None, the summary will be stored in
``base + '.summary'``. Otherwise, it will not be stored at all
Returns
-------
str
The extracted summary"""
summary = summary_patt.search(s).group()
if base is not None:
self.params[base + '.summary'] = summary
return summary
def get_summaryf(self, *args, **kwargs):
"""
Extract the summary from a function docstring
Parameters
----------
``*args`` and ``**kwargs``
See the :meth:`get_summary` method. Note, that the first argument
will be the docstring of the specified function
Returns
-------
function
Wrapper that takes a function as input and registers its summary
via the :meth:`get_summary` method"""
def func(f):
doc = f.__doc__
self.get_summary(doc or '', *args, **kwargs)
return f
return func
def get_extended_summary(self, s, base=None):
"""Get the extended summary from a docstring
This here is the extended summary
Parameters
----------
s: str
The docstring to use
base: str or None
A key under which the summary shall be stored in the :attr:`params`
attribute. If not None, the summary will be stored in
``base + '.summary_ext'``. Otherwise, it will not be stored at
all
Returns
-------
str
The extracted extended summary"""
# Remove the summary and dedent
s = self._remove_summary(s)
ret = ''
if not self._all_sections_patt.match(s):
m = self._extended_summary_patt.match(s)
if m is not None:
ret = m.group().strip()
if base is not None:
self.params[base + '.summary_ext'] = ret
return ret
def get_extended_summaryf(self, *args, **kwargs):
"""Extract the extended summary from a function docstring
This function can be used as a decorator to extract the extended
summary of a function docstring (similar to :meth:`get_sectionsf`).
Parameters
----------
``*args`` and ``**kwargs``
See the :meth:`get_extended_summary` method. Note, that the first
argument will be the docstring of the specified function
Returns
-------
function
Wrapper that takes a function as input and registers its summary
via the :meth:`get_extended_summary` method"""
def func(f):
doc = f.__doc__
self.get_extended_summary(doc or '', *args, **kwargs)
return f
return func
def get_full_description(self, s, base=None):
"""Get the full description from a docstring
This here and the line above is the full description (i.e. the
combination of the :meth:`get_summary` and the
:meth:`get_extended_summary`) output
Parameters
----------
s: str
The docstring to use
base: str or None
A key under which the description shall be stored in the
:attr:`params` attribute. If not None, the summary will be stored
in ``base + '.full_desc'``. Otherwise, it will not be stored
at all
Returns
-------
str
The extracted full description"""
summary = self.get_summary(s)
extended_summary = self.get_extended_summary(s)
ret = (summary + '\n\n' + extended_summary).strip()
if base is not None:
self.params[base + '.full_desc'] = ret
return ret
def get_full_descriptionf(self, *args, **kwargs):
"""Extract the full description from a function docstring
This function can be used as a decorator to extract the full
descriptions of a function docstring (similar to
:meth:`get_sectionsf`).
Parameters
----------
``*args`` and ``**kwargs``
See the :meth:`get_full_description` method. Note, that the first
argument will be the docstring of the specified function
Returns
-------
function
Wrapper that takes a function as input and registers its summary
via the :meth:`get_full_description` method"""
def func(f):
doc = f.__doc__
self.get_full_description(doc or '', *args, **kwargs)
return f
return func
| gpl-3.0 | -3,629,700,325,544,615,000 | 32.595285 | 84 | 0.522485 | false | 4.495859 | false | false | false |
wcmckee/moejobs-site | cache/.mako.tmp/comments_helper_googleplus.tmpl.py | 1 | 2430 | # -*- coding:utf-8 -*-
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
STOP_RENDERING = runtime.STOP_RENDERING
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 10
_modified_time = 1443802885.4031692
_enable_loop = True
_template_filename = '/usr/local/lib/python3.4/dist-packages/nikola/data/themes/base/templates/comments_helper_googleplus.tmpl'
_template_uri = 'comments_helper_googleplus.tmpl'
_source_encoding = 'utf-8'
_exports = ['comment_link_script', 'comment_form', 'comment_link']
def render_body(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
__M_writer = context.writer()
__M_writer('\n\n')
__M_writer('\n\n')
__M_writer('\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_comment_link_script(context):
__M_caller = context.caller_stack._push_frame()
try:
__M_writer = context.writer()
__M_writer('\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_comment_form(context,url,title,identifier):
__M_caller = context.caller_stack._push_frame()
try:
__M_writer = context.writer()
__M_writer('\n<script src="https://apis.google.com/js/plusone.js"></script>\n<div class="g-comments"\n data-href="')
__M_writer(str(url))
__M_writer('"\n data-first_party_property="BLOGGER"\n data-view_type="FILTERED_POSTMOD">\n</div>\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_comment_link(context,link,identifier):
__M_caller = context.caller_stack._push_frame()
try:
__M_writer = context.writer()
__M_writer('\n<div class="g-commentcount" data-href="')
__M_writer(str(link))
__M_writer('"></div>\n<script src="https://apis.google.com/js/plusone.js"></script>\n')
return ''
finally:
context.caller_stack._pop_frame()
"""
__M_BEGIN_METADATA
{"uri": "comments_helper_googleplus.tmpl", "source_encoding": "utf-8", "filename": "/usr/local/lib/python3.4/dist-packages/nikola/data/themes/base/templates/comments_helper_googleplus.tmpl", "line_map": {"33": 16, "39": 2, "57": 12, "43": 2, "44": 5, "45": 5, "16": 0, "51": 11, "21": 9, "22": 14, "23": 17, "56": 12, "55": 11, "29": 16, "63": 57}}
__M_END_METADATA
"""
| mit | -6,546,587,520,571,600,000 | 35.268657 | 348 | 0.617695 | false | 3.079848 | false | false | false |
tomkuba/vRAAPIClient | examples/reservation/createReservationsJinja2/createReservation-AllBusinessGroups.py | 2 | 1557 | #!/usr/bin/python
import getpass
import json
import os
from globalconfig import passwd, url, usr
from jinja2 import Environment, FileSystemLoader
from vraapiclient import reservation
#Get the current directory
currentDirectory = os.path.dirname(os.path.abspath(__file__))
client = reservation.ReservationClient(url, usr, passwd)
#Set up jinja2 environment
env = Environment(loader=FileSystemLoader(currentDirectory))
template = env.get_template('reservationTemplate.json')
#Get all business groups
businessGroups = client.getAllBusinessGroups(show="json")
#Loop through each group in the businessGroups object and pull out
#id and name, format the reservation name and inject both values
#in to the params dict.
for group in businessGroups:
#This is where we format the reservation name.
#[ComputeResource]-Res-BusinessGroupName(nospaces)
name = 'CLTEST01-Res-{groupname}'.format(groupname = group['name'].replace(" ",""))
#Set all configurable parameters here
params = {
'ReservationName': name,
'SubTenantId': group['id'],
}
#Create the JSON payload for the POST
#This is where params are added to the json payload
payload = json.loads(template.render(params=params))
#Attempt to create each reservation. Catch any errors and continue
try:
reservation = client.createReservation(payload)
print "Reservation created: {id}".format(id=reservation)
except Exception, e:
pass
| mit | 699,249,831,173,375,500 | 31.4375 | 91 | 0.69878 | false | 4.373596 | false | false | false |
OCA/sale-workflow | sale_automatic_workflow/tests/test_automatic_workflow_base.py | 1 | 2654 | # Copyright 2014 Camptocamp SA (author: Guewen Baconnier)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo.tests import common
class TestAutomaticWorkflowBase(common.TransactionCase):
def create_sale_order(self, workflow, override=None):
sale_obj = self.env['sale.order']
partner_values = {'name': 'Imperator Caius Julius Caesar Divus'}
partner = self.env['res.partner'].create(partner_values)
product_values = {'name': 'Bread',
'list_price': 5,
'type': 'product'}
product = self.env['product.product'].create(product_values)
self.product_uom_unit = self.env.ref('uom.product_uom_unit')
values = {
'partner_id': partner.id,
'order_line': [(0, 0, {
'name': product.name,
'product_id': product.id,
'product_uom': self.product_uom_unit.id,
'price_unit': product.list_price,
'product_uom_qty': 1})],
'workflow_process_id': workflow.id,
}
if override:
values.update(override)
order = sale_obj.create(values)
# Create inventory for add stock qty to lines
# With this commit https://goo.gl/fRTLM3 the moves that where
# force-assigned are not transferred in the picking
for line in order.order_line:
if line.product_id.type == 'product':
inventory = self.env['stock.inventory'].create({
'name': 'Inventory for move %s' % line.name,
'filter': 'product',
'product_id': line.product_id.id,
'line_ids': [(0, 0, {
'product_id': line.product_id.id,
'product_qty': line.product_uom_qty,
'location_id':
self.env.ref('stock.stock_location_stock').id
})]
})
inventory.post_inventory()
return order
def create_full_automatic(self, override=None):
workflow_obj = self.env['sale.workflow.process']
values = workflow_obj.create({
'name': 'Full Automatic',
'picking_policy': 'one',
'validate_order': True,
'validate_picking': True,
'create_invoice': True,
'validate_invoice': True,
'invoice_date_is_order_date': True,
})
if override:
values.update(override)
return values
def progress(self):
self.env['automatic.workflow.job'].run()
| agpl-3.0 | 5,893,069,417,804,947,000 | 39.212121 | 72 | 0.529766 | false | 4.076805 | false | false | false |
glibin/tortik | tortik/util/__init__.py | 1 | 3283 | # -*- encoding: utf-8 -*-
from types import FunctionType
import tornado.web
from tornado.util import unicode_type
try:
import urlparse # py2
except ImportError:
import urllib.parse as urlparse # py3
try:
from urllib import urlencode # py2
except ImportError:
from urllib.parse import urlencode # py3
def decorate_all(decorator_list):
def is_method_need_to_decorate(func_name, func_obj, check_param):
"""check if an object should be decorated"""
methods = ["get", "head", "post", "put", "delete", "patch"]
return (func_name in methods and
isinstance(func_obj, FunctionType) and
getattr(func_obj, check_param, True))
"""decorate all instance methods (unless excluded) with the same decorator"""
class DecorateAll(type):
def __new__(cls, name, bases, dct):
for func_name, func_obj in dct.items():
for item in decorator_list:
decorator, check_param = item
if is_method_need_to_decorate(func_name, func_obj, check_param):
dct[func_name] = decorator(dct[func_name])
return super(DecorateAll, cls).__new__(cls, name, bases, dct)
def __setattr__(self, func_name, func_obj):
for item in decorator_list:
decorator, check_param = item
if is_method_need_to_decorate(func_name, func_obj, check_param):
func_obj = decorator(func_obj)
super(DecorateAll, self).__setattr__(func_name, func_obj)
return DecorateAll
def make_list(val):
if isinstance(val, list):
return val
else:
return [val]
def real_ip(request):
# split is for X-Forwarded-For header that can consist of many IPs: X-Forwarded-For: client, proxy1, proxy2
return (request.headers.get('X-Real-Ip', None) or request.headers.get('X-Forwarded-For', None) or
request.remote_ip or '127.0.0.1').split(',')[0]
HTTPError = tornado.web.HTTPError
ITERABLE = (set, frozenset, list, tuple)
def update_url(url, update_args=None, remove_args=None):
scheme, sep, url_new = url.partition('://')
if len(scheme) == len(url):
scheme = ''
else:
url = '//' + url_new
url_split = urlparse.urlsplit(url)
query_dict = urlparse.parse_qs(url_split.query, keep_blank_values=True)
# add args
if update_args:
query_dict.update(update_args)
# remove args
if remove_args:
query_dict = dict([(k, query_dict.get(k)) for k in query_dict if k not in remove_args])
query = make_qs(query_dict)
return urlparse.urlunsplit((scheme, url_split.netloc, url_split.path, query, url_split.fragment))
def make_qs(query_args):
def _encode(s):
if isinstance(s, unicode_type):
return s.encode('utf-8')
else:
return s
kv_pairs = []
for key, val in query_args.items():
if val is not None:
encoded_key = _encode(key)
if isinstance(val, ITERABLE):
for v in val:
kv_pairs.append((encoded_key, _encode(v)))
else:
kv_pairs.append((encoded_key, _encode(val)))
qs = urlencode(kv_pairs, doseq=True)
return qs
| mit | -2,950,805,595,275,233,300 | 31.186275 | 111 | 0.598843 | false | 3.643729 | false | false | false |
Facenapalm/NapalmBot | scripts/tow.py | 1 | 3430 | """
Script updates russian {{Перевод недели}} template according to Translate of
the Week project.
Usage:
python tow.py
"""
import re
import pywikibot
META_TEMPLATE = "Template:TOWThisweek"
LOCAL_TEMPLATE = "Шаблон:Перевод недели"
ORIGINAL_ID = "original"
LOCAL_ID = "russian"
ARCHIVE_PAGE = "Проект:Переводы/Невыполненные переводы недели"
ARCHIVE_ALL = False
ARCHIVE_LABEL = "<!-- NapalmBot: insert here -->"
ARCHIVE_DEFAULT = "???"
ARCHIVE_FORMAT = "|-\n| {local} || {original}\n"
DEFAULT_TEXT = "'''[[Шаблон:Перевод недели|Укажите название статьи]]'''"
UPDATE_COMMENT = "Обновление перевода недели."
ARCHIVE_COMMENT = "Архивация перевода недели."
def parse_meta_template():
"""Return (link, langcode, pagename) tuple."""
site = pywikibot.Site("meta", "meta")
template = pywikibot.Page(site, META_TEMPLATE)
match = re.search(r"\[\[:([A-Za-z\-]+):(.*?)\]\]", template.text)
return (match.group(0), match.group(1), match.group(2))
def get_sitelink(site, lang, name):
"""Return interwiki of [[:lang:name]] in current site."""
try:
page = pywikibot.Page(pywikibot.Site(lang), name)
result = pywikibot.ItemPage.fromPage(page).getSitelink(site)
except:
result = None
return result
def get_regexps():
"""
Return (original, local) re object tuple for matching links:
$1 — prefix,
$2 — link,
$3 — postfix.
"""
regexp = r"(<span id\s*=\s*\"{}\">)(.*?)(</span>)"
wrap = lambda x: re.compile(regexp.format(x))
return (wrap(ORIGINAL_ID), wrap(LOCAL_ID))
def archive(site, local, original):
"""Archive link if neccessary."""
if ARCHIVE_PAGE == "":
return
if local != DEFAULT_TEXT:
if not ARCHIVE_ALL:
match = re.match(r"\[\[(.*?)[\]|]", local)
if match is None:
return
try:
if pywikibot.Page(site, match.group(1)).exists():
return
except:
return
else:
local = ARCHIVE_DEFAULT
page = pywikibot.Page(site, ARCHIVE_PAGE)
text = page.text
pos = text.find(ARCHIVE_LABEL)
if pos == -1:
return
text = text[:pos] + ARCHIVE_FORMAT.format(local=local, original=original) + text[pos:]
page.text = text
page.save(ARCHIVE_COMMENT, minor=False)
def main():
"""Main script function."""
site = pywikibot.Site()
(interwiki, lang, name) = parse_meta_template()
local = get_sitelink(site, lang, name)
if local:
local = "[[{}]]".format(local)
else:
local = DEFAULT_TEXT
(interwiki_re, local_re) = get_regexps()
template = pywikibot.Page(site, LOCAL_TEMPLATE)
result = template.text
old_interwiki = interwiki_re.search(result).group(2)
old_local = local_re.search(result).group(2)
if interwiki == old_interwiki:
return
else:
archive(site, old_local, old_interwiki)
result = local_re.sub("\\1" + local + "\\3", result)
result = interwiki_re.sub("\\1" + interwiki + "\\3", result)
template.text = result
template.save(UPDATE_COMMENT, minor=False)
if __name__ == "__main__":
main()
| mit | -6,701,825,818,206,077,000 | 28.504673 | 90 | 0.585784 | false | 3.117479 | false | false | false |
meeb/txhttprelay | txhttprelay/transport.py | 1 | 5773 | '''
Copyright 2012 Joe Harris
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import json
import time
from urlparse import urlsplit, urlunsplit
from zope.interface import implements
from twisted.python import failure
from twisted.internet import reactor, protocol
from twisted.web.client import Agent
from twisted.web.iweb import IBodyProducer
from twisted.web.http_headers import Headers
from twisted.internet.defer import succeed
from txhttprelay.parser import ParserError
# try and import the verifying SSL context from txverifyssl
try:
from txverifyssl.context import VerifyingSSLContext as SSLContextFactory
except ImportError:
# if txverifyssl is not installed default to the built-in SSL context, this works but has no SSL verification
from twisted.internet.ssl import ClientContextFactory
class SSLContextFactory(ClientContextFactory):
def getContext(self, hostname, port):
return ClientContextFactory.getContext(self)
class RequestError(Exception):
pass
class HttpRequest(object):
METHODS = ('get', 'post', 'put', 'delete', 'head', 'options')
def __init__(self, id='', method='', url='', expected=200, parser=None):
method = method.lower().strip()
if method not in self.METHODS:
raise RequestError('invalid HTTP method: {}'.format(method))
self.method = method
self.url = urlsplit(url)
self.expected = expected
self.parser = parser
self.headers = {}
self.body = None
self.set_header('User-Agent', 'txhttprelay')
if self.method == 'post':
self.set_header('Content-Type', 'application/x-www-form-urlencoded')
self.id = id
self.start_time = 0
def __unicode__(self):
return u'<HttpRequest ({} {})>'.format(
self.method.upper(),
urlunsplit(self.url)
)
def __str__(self):
return self.__unicode__()
def start_timer(self):
self.start_time = time.time()
def set_header(self, name, value):
self.headers.setdefault(str(name), []).append(str(value))
def set_body(self, body):
if body:
self.body = self.parser.request(body)
class HttpResponse(object):
def __init__(self, request, code, headers, body):
self.request = request
self.code = int(code)
self.headers = list(headers)
self.body = str(body)
def ok(self):
return int(self.request.expected) == int(self.code)
def data(self):
if not self.request.parser:
return self.body
try:
return self.request.parser.response(self.body)
except ParserError:
return None
class TransportError(Exception):
pass
class StringProducer(object):
implements(IBodyProducer)
def __init__(self, data):
self.body = data
self.length = len(self.body)
def startProducing(self, consumer):
consumer.write(self.body)
return succeed(None)
def pauseProducing(self):
pass
def stopProducing(self):
pass
class StringReceiver(protocol.Protocol):
def __init__(self, response, callback):
self.response = response
self.callback = callback
def dataReceived(self, data):
self.response.body += data
def connectionLost(self, reason):
self.callback(self.response)
class HttpTransport(object):
def __init__(self, request):
self.request = request
def _request(self):
method = self.request.method.upper()
scheme = self.request.url.scheme.lower()
if scheme == 'https':
context = SSLContextFactory()
if hasattr(context, 'set_expected_host'):
context.set_expected_host(self.request.url.netloc)
agent = Agent(reactor, context)
elif scheme == 'http':
agent = Agent(reactor)
else:
raise TransportError('only HTTP and HTTPS schemes are supported')
producer = StringProducer(self.request.body) if self.request.body else None
self.request.start_timer()
return agent.request(
method,
urlunsplit(self.request.url),
Headers(self.request.headers),
producer
)
def go(self, callback=None):
if not callback:
raise TransportError('go() requires a callback as the only parameter')
def _got_response(raw_response):
if isinstance(raw_response, failure.Failure):
error_body = json.dumps({'error':raw_response.getErrorMessage()})
response = HttpResponse(request=self.request, code=0, headers={}, body=error_body)
callback(response)
else:
response = HttpResponse(
request=self.request,
code=raw_response.code,
headers=raw_response.headers.getAllRawHeaders(),
body=''
)
raw_response.deliverBody(StringReceiver(response, callback))
self._request().addBoth(_got_response)
'''
eof
'''
| apache-2.0 | 1,102,819,096,241,351,200 | 29.871658 | 113 | 0.618916 | false | 4.444188 | false | false | false |
vollov/django-blog | image/migrations/0001_initial.py | 1 | 1822 | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-11-08 19:18
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import image.models
import image.storage
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Albumn',
fields=[
('id', models.CharField(default=uuid.uuid4, max_length=64, primary_key=True, serialize=False, verbose_name='Activation key')),
('name', models.CharField(db_index=True, max_length=60, unique=True)),
('weight', models.IntegerField(default=0)),
('slug', models.SlugField(max_length=150, unique=True)),
('created', models.DateTimeField(auto_now_add=True, db_index=True)),
('active', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=60, null=True)),
('image_key', models.CharField(default=uuid.uuid4, max_length=64, verbose_name='Activation key')),
('image', models.ImageField(storage=image.storage.OverwriteStorage(), upload_to=image.models.image_upload_path)),
('created', models.DateTimeField(auto_now_add=True)),
('active', models.BooleanField(default=True)),
('weight', models.IntegerField(default=0)),
('albumn', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='image.Albumn')),
],
),
]
| mit | -1,071,830,020,456,034,800 | 40.409091 | 142 | 0.598244 | false | 4.140909 | false | false | false |
hradec/gaffer | python/GafferUITest/GadgetTest.py | 6 | 9854 | ##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2011-2012, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import imath
import IECore
import Gaffer
import GafferTest
import GafferUI
import GafferUITest
class GadgetTest( GafferUITest.TestCase ) :
def testTransform( self ) :
g = GafferUI.TextGadget( "hello" )
self.assertEqual( g.getTransform(), imath.M44f() )
t = imath.M44f().scale( imath.V3f( 2 ) )
g.setTransform( t )
self.assertEqual( g.getTransform(), t )
c1 = GafferUI.LinearContainer()
c1.addChild( g )
c2 = GafferUI.LinearContainer()
c2.addChild( c1 )
t2 = imath.M44f().translate( imath.V3f( 1, 2, 3 ) )
c2.setTransform( t2 )
self.assertEqual( g.fullTransform(), t * t2 )
self.assertEqual( g.fullTransform( c1 ), t )
def testToolTip( self ) :
g = GafferUI.TextGadget( "hello" )
self.assertEqual( g.getToolTip( IECore.LineSegment3f() ), "" )
g.setToolTip( "hi" )
self.assertEqual( g.getToolTip( IECore.LineSegment3f() ), "hi" )
def testDerivationInPython( self ) :
class MyGadget( GafferUI.Gadget ) :
def __init__( self ) :
GafferUI.Gadget.__init__( self )
self.layersRendered = set()
def bound( self ) :
return imath.Box3f( imath.V3f( -20, 10, 2 ), imath.V3f( 10, 15, 5 ) )
def doRenderLayer( self, layer, style ) :
self.layersRendered.add( layer )
mg = MyGadget()
# we can't call the methods of the gadget directly in python to test the
# bindings, as that doesn't prove anything (we're no exercising the virtual
# method override code in the wrapper). instead cause c++ to call through
# for us by adding our gadget to a parent and making calls to the parent.
c = GafferUI.IndividualContainer()
c.addChild( mg )
self.assertEqual( c.bound().size(), mg.bound().size() )
with GafferUI.Window() as w :
GafferUI.GadgetWidget( c )
w.setVisible( True )
self.waitForIdle( 1000 )
self.assertEqual( mg.layersRendered, set( GafferUI.Gadget.Layer.values.values() ) )
def testStyle( self ) :
g = GafferUI.TextGadget( "test" )
l = GafferUI.LinearContainer()
l.addChild( g )
self.assertEqual( g.getStyle(), None )
self.assertEqual( l.getStyle(), None )
self.assertTrue( g.style().isSame( GafferUI.Style.getDefaultStyle() ) )
self.assertTrue( l.style().isSame( GafferUI.Style.getDefaultStyle() ) )
s = GafferUI.StandardStyle()
l.setStyle( s )
self.assertTrue( l.getStyle().isSame( s ) )
self.assertEqual( g.getStyle(), None )
self.assertTrue( g.style().isSame( s ) )
self.assertTrue( l.style().isSame( s ) )
def testTypeNamePrefixes( self ) :
self.assertTypeNamesArePrefixed( GafferUI )
self.assertTypeNamesArePrefixed( GafferUITest )
def testRenderRequestOnStyleChange( self ) :
g = GafferUI.Gadget()
cs = GafferTest.CapturingSlot( g.renderRequestSignal() )
self.assertEqual( len( cs ), 0 )
s = GafferUI.StandardStyle()
g.setStyle( s )
self.assertEqual( len( cs ), 1 )
self.assertTrue( cs[0][0].isSame( g ) )
s2 = GafferUI.StandardStyle()
g.setStyle( s2 )
self.assertEqual( len( cs ), 2 )
self.assertTrue( cs[1][0].isSame( g ) )
s2.setColor( GafferUI.StandardStyle.Color.BackgroundColor, imath.Color3f( 1 ) )
self.assertEqual( len( cs ), 3 )
self.assertTrue( cs[2][0].isSame( g ) )
def testHighlighting( self ) :
g = GafferUI.Gadget()
self.assertEqual( g.getHighlighted(), False )
g.setHighlighted( True )
self.assertEqual( g.getHighlighted(), True )
g.setHighlighted( False )
self.assertEqual( g.getHighlighted(), False )
cs = GafferTest.CapturingSlot( g.renderRequestSignal() )
g.setHighlighted( False )
self.assertEqual( len( cs ), 0 )
g.setHighlighted( True )
self.assertEqual( len( cs ), 1 )
self.assertTrue( cs[0][0].isSame( g ) )
def testVisibility( self ) :
g1 = GafferUI.Gadget()
self.assertEqual( g1.getVisible(), True )
self.assertEqual( g1.visible(), True )
g1.setVisible( False )
self.assertEqual( g1.getVisible(), False )
self.assertEqual( g1.visible(), False )
g2 = GafferUI.Gadget()
g1.addChild( g2 )
self.assertEqual( g2.getVisible(), True )
self.assertEqual( g2.visible(), False )
g1.setVisible( True )
self.assertEqual( g2.visible(), True )
g3 = GafferUI.Gadget()
g2.addChild( g3 )
self.assertEqual( g3.getVisible(), True )
self.assertEqual( g3.visible(), True )
g1.setVisible( False )
self.assertEqual( g3.getVisible(), True )
self.assertEqual( g3.visible(), False )
self.assertEqual( g3.visible( relativeTo = g2 ), True )
self.assertEqual( g3.visible( relativeTo = g1 ), True )
def testVisibilitySignals( self ) :
g = GafferUI.Gadget()
cs = GafferTest.CapturingSlot( g.renderRequestSignal() )
self.assertEqual( len( cs ), 0 )
g.setVisible( True )
self.assertEqual( len( cs ), 0 )
g.setVisible( False )
self.assertEqual( len( cs ), 1 )
self.assertEqual( cs[0][0], g )
g.setVisible( False )
self.assertEqual( len( cs ), 1 )
self.assertEqual( cs[0][0], g )
g.setVisible( True )
self.assertEqual( len( cs ), 2 )
self.assertEqual( cs[1][0], g )
def testBoundIgnoresHiddenChildren( self ) :
g = GafferUI.Gadget()
t = GafferUI.TextGadget( "text" )
g.addChild( t )
b = t.bound()
self.assertEqual( g.bound(), b )
t.setVisible( False )
# we still want to know what the bound would be for t,
# even when it's hidden.
self.assertEqual( t.bound(), b )
# but we don't want it taken into account when computing
# the parent bound.
self.assertEqual( g.bound(), imath.Box3f() )
def testVisibilityChangedSignal( self ) :
g = GafferUI.Gadget()
g["a"] = GafferUI.Gadget()
g["a"]["c"] = GafferUI.Gadget()
g["b"] = GafferUI.Gadget()
events = []
def visibilityChanged( gadget ) :
events.append( ( gadget, gadget.visible() ) )
connnections = [
g.visibilityChangedSignal().connect( visibilityChanged ),
g["a"].visibilityChangedSignal().connect( visibilityChanged ),
g["a"]["c"].visibilityChangedSignal().connect( visibilityChanged ),
g["b"].visibilityChangedSignal().connect( visibilityChanged ),
]
g["b"].setVisible( True )
self.assertEqual( len( events ), 0 )
g["b"].setVisible( False )
self.assertEqual( len( events ), 1 )
self.assertEqual( events[0], ( g["b"], False ) )
g["b"].setVisible( True )
self.assertEqual( len( events ), 2 )
self.assertEqual( events[1], ( g["b"], True ) )
g["a"].setVisible( True )
self.assertEqual( len( events ), 2 )
g["a"].setVisible( False )
self.assertEqual( len( events ), 4 )
self.assertEqual( events[-2], ( g["a"]["c"], False ) )
self.assertEqual( events[-1], ( g["a"], False ) )
g["a"].setVisible( True )
self.assertEqual( len( events ), 6 )
self.assertEqual( events[-2], ( g["a"]["c"], True ) )
self.assertEqual( events[-1], ( g["a"], True ) )
g["a"]["c"].setVisible( False )
self.assertEqual( len( events ), 7 )
self.assertEqual( events[-1], ( g["a"]["c"], False ) )
g.setVisible( False )
self.assertEqual( len( events ), 10 )
self.assertEqual( events[-3], ( g["a"], False ) )
self.assertEqual( events[-2], ( g["b"], False ) )
self.assertEqual( events[-1], ( g, False ) )
g["a"]["c"].setVisible( True )
self.assertEqual( len( events ), 10 )
def testEnabled( self ) :
g1 = GafferUI.Gadget()
self.assertEqual( g1.getEnabled(), True )
self.assertEqual( g1.enabled(), True )
g1.setEnabled( False )
self.assertEqual( g1.getEnabled(), False )
self.assertEqual( g1.enabled(), False )
g2 = GafferUI.Gadget()
g1.addChild( g2 )
self.assertEqual( g2.getEnabled(), True )
self.assertEqual( g2.enabled(), False )
g1.setEnabled( True )
self.assertEqual( g2.enabled(), True )
g3 = GafferUI.Gadget()
g2.addChild( g3 )
self.assertEqual( g3.getEnabled(), True )
self.assertEqual( g3.enabled(), True )
g1.setEnabled( False )
self.assertEqual( g3.getEnabled(), True )
self.assertEqual( g3.enabled(), False )
self.assertEqual( g3.enabled( relativeTo = g2 ), True )
self.assertEqual( g3.enabled( relativeTo = g1 ), True )
if __name__ == "__main__":
unittest.main()
| bsd-3-clause | -6,002,373,824,026,318,000 | 27.897361 | 85 | 0.66298 | false | 3.203511 | true | false | false |
InspectorIncognito/visualization | AndroidRequests/migrations/0023_auto_20170310_1529.py | 1 | 1247 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db.models import F
from django.db import models, migrations
"""
PROCEDURE
- Delete aux column
"""
class Migration(migrations.Migration):
dependencies = [
('AndroidRequests', '0022_auto_20170310_1525'),
]
operations = [
# remove aux columns
migrations.RemoveField(
model_name='servicesbybusstop',
name='busStop_id_aux',
),
migrations.RemoveField(
model_name='servicestopdistance',
name='busStop_id_aux',
),
migrations.RemoveField(
model_name='eventforbusstop',
name='busStop_id_aux',
),
migrations.RemoveField(
model_name='nearbybuseslog',
name='busStop_id_aux',
),
# Service model
migrations.RemoveField(
model_name='servicesbybusstop',
name='service_id_aux',
),
# Token model
migrations.RemoveField(
model_name='poseintrajectoryoftoken',
name='token_id_aux',
),
migrations.RemoveField(
model_name='activetoken',
name='token_id_aux',
),
]
| gpl-3.0 | -268,970,102,853,448,700 | 24.44898 | 55 | 0.55012 | false | 4.227119 | false | false | false |
flippym/toolbox | yum-package.py | 1 | 1083 | # Yum package checker and installer
__author__ = "Frederico Martins"
__license__ = "GPLv3"
__version__ = 1.2
from os import system
from output_handler import OutputWaiting
class Package(object):
def __init__(self, packages):
if type(packages) is list or type(packages) is tuple or type(packages) is set:
pass
elif type(packages) is str:
packages = [packages]
else:
OutputWaiting.Info('Packages to be asserted must be in string or list format')
exit(-1)
if self.Check(packages):
self.Install('Installing {}'.format(' and '.join(self.packages)), True)
def Check(self, packages):
self.packages = []
for each in packages:
if system('rpm -q {} > /dev/null'.format(each)):
self.packages.append(each)
return self.packages
@OutputWaiting
def Install(self):
return system('yum -y install {} > /dev/null'.format(' '.join(self.packages)))
| gpl-3.0 | -9,131,135,238,154,205,000 | 25.414634 | 90 | 0.555863 | false | 4.314741 | false | false | false |
iulian787/spack | var/spack/repos/builtin/packages/libvdwxc/package.py | 2 | 1769 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Libvdwxc(AutotoolsPackage):
"""Portable C library of density functionals with van der Waals
interactions for density functional theory"""
homepage = "https://libvdwxc.gitlab.io/libvdwxc/"
url = "https://launchpad.net/libvdwxc/stable/0.4.0/+download/libvdwxc-0.4.0.tar.gz"
version("0.4.0", sha256="3524feb5bb2be86b4688f71653502146b181e66f3f75b8bdaf23dd1ae4a56b33")
variant("mpi", default=True, description="Enable MPI support")
variant("pfft", default=False, description="Enable support for PFFT")
depends_on("fftw-api@3")
depends_on("mpi@2:", when="+mpi")
depends_on("pfft", when="+pfft")
# pfft needs MPI
conflicts("~mpi", "+pfft")
conflicts("^fftw~mpi", "+mpi")
def configure_args(self):
spec = self.spec
args = [
"--{0}-pfft".format(
"with" if self.spec.satisfies("+pfft") else "without"
),
"MPICC=", # make sure both variables are always unset
"MPIFC=", # otherwise the configure scripts complains
]
if spec.satisfies("+mpi"):
# work around b0rken MPI detection: the MPI detection tests are
# run with CC instead of MPICC, triggering an error. So, setting
# CC/FC to the MPI compiler wrappers.
args += [
"--with-mpi",
"CC={0}".format(spec["mpi"].mpicc),
"FC={0}".format(spec["mpi"].mpifc),
]
else:
args += ["--without-mpi"]
return args
| lgpl-2.1 | 2,800,756,667,424,239,600 | 32.377358 | 95 | 0.6026 | false | 3.552209 | false | false | false |
jhpyle/docassemble | docassemble_base/docassemble/base/ocr.py | 1 | 18129 | import tempfile
import subprocess
from PIL import Image, ImageEnhance
from docassemble.base.functions import get_config, get_language, ReturnValue
from docassemble.base.core import DAFile, DAFileList, DAFileCollection, DAStaticFile
import PyPDF2
from docassemble.base.logger import logmessage
from docassemble.base.error import DAError
import pycountry
import sys
import os
import shutil
import re
QPDF_PATH = 'qpdf'
def safe_pypdf_reader(filename):
try:
return PyPDF2.PdfFileReader(open(filename, 'rb'))
except PyPDF2.utils.PdfReadError:
new_filename = tempfile.NamedTemporaryFile(prefix="datemp", mode="wb", suffix=".pdf", delete=False)
qpdf_subprocess_arguments = [QPDF_PATH, filename, new_filename.name]
try:
result = subprocess.run(qpdf_subprocess_arguments, timeout=60).returncode
except subprocess.TimeoutExpired:
result = 1
if result != 0:
raise Exception("Call to qpdf failed for template " + str(filename) + " where arguments were " + " ".join(qpdf_subprocess_arguments))
return PyPDF2.PdfFileReader(open(new_filename.name, 'rb'))
def ocr_finalize(*pargs, **kwargs):
#sys.stderr.write("ocr_finalize started")
if kwargs.get('pdf', False):
target = kwargs['target']
dafilelist = kwargs['dafilelist']
filename = kwargs['filename']
file_list = []
input_number = target.number
for parg in pargs:
if type(parg) is list:
for item in parg:
if type(item) is ReturnValue:
if isinstance(item.value, dict):
if 'page' in item.value:
file_list.append([item.value['indexno'], int(item.value['page']), item.value['doc']._pdf_page_path(int(item.value['page']))])
else:
file_list.append([item.value['indexno'], 0, item.value['doc'].path()])
else:
if type(parg) is ReturnValue:
if isinstance(item.value, dict):
if 'page' in item.value:
file_list.append([parg.value['indexno'], int(parg.value['page']), parg.value['doc']._pdf_page_path(int(parg.value['page']))])
else:
file_list.append([parg.value['indexno'], 0, parg.value['doc'].path()])
from docassemble.base.pandoc import concatenate_files
pdf_path = concatenate_files([y[2] for y in sorted(file_list, key=lambda x: x[0]*10000 + x[1])])
target.initialize(filename=filename, extension='pdf', mimetype='application/pdf', reinitialize=True)
shutil.copyfile(pdf_path, target.file_info['path'])
del target.file_info
target._make_pdf_thumbnail(1, both_formats=True)
target.commit()
target.retrieve()
return (target, dafilelist)
output = dict()
#index = 0
for parg in pargs:
#sys.stderr.write("ocr_finalize: index " + str(index) + " is a " + str(type(parg)) + "\n")
if type(parg) is list:
for item in parg:
#sys.stderr.write("ocr_finalize: sub item is a " + str(type(item)) + "\n")
if type(item) is ReturnValue and isinstance(item.value, dict):
output[int(item.value['page'])] = item.value['text']
else:
if type(parg) is ReturnValue and isinstance(item.value, dict):
output[int(parg.value['page'])] = parg.value['text']
#index += 1
#sys.stderr.write("ocr_finalize: assembling output\n")
final_output = "\f".join([output[x] for x in sorted(output.keys())])
#sys.stderr.write("ocr_finalize: final output has length " + str(len(final_output)) + "\n")
return final_output
def get_ocr_language(language):
langs = get_available_languages()
if language is None:
language = get_language()
ocr_langs = get_config("ocr languages")
if ocr_langs is None:
ocr_langs = dict()
if language in langs:
lang = language
else:
if language in ocr_langs and ocr_langs[language] in langs:
lang = ocr_langs[language]
else:
try:
pc_lang = pycountry.languages.get(alpha_2=language)
lang_three_letter = pc_lang.alpha_3
if lang_three_letter in langs:
lang = lang_three_letter
else:
if 'eng' in langs:
lang = 'eng'
else:
lang = langs[0]
raise Exception("could not get OCR language for language " + str(language) + "; using language " + str(lang))
except Exception as the_error:
if 'eng' in langs:
lang = 'eng'
else:
lang = langs[0]
raise Exception("could not get OCR language for language " + str(language) + "; using language " + str(lang) + "; error was " + str(the_error))
return lang
def get_available_languages():
try:
output = subprocess.check_output(['tesseract', '--list-langs'], stderr=subprocess.STDOUT).decode()
except subprocess.CalledProcessError as err:
raise Exception("get_available_languages: failed to list available languages: " + str(err))
else:
result = output.splitlines()
result.pop(0)
return result
def ocr_page_tasks(image_file, language=None, psm=6, x=None, y=None, W=None, H=None, user_code=None, user=None, pdf=False, preserve_color=False, **kwargs):
#sys.stderr.write("ocr_page_tasks running\n")
if isinstance(image_file, set):
return []
if not (isinstance(image_file, DAFile) or isinstance(image_file, DAFileList)):
return word("(Not a DAFile or DAFileList object)")
pdf_to_ppm = get_config("pdftoppm")
if pdf_to_ppm is None:
pdf_to_ppm = 'pdftoppm'
ocr_resolution = get_config("ocr dpi")
if ocr_resolution is None:
ocr_resolution = '300'
langs = get_available_languages()
if language is None:
language = get_language()
if language in langs:
lang = language
else:
ocr_langs = get_config("ocr languages")
if ocr_langs is None:
ocr_langs = dict()
if language in ocr_langs and ocr_langs[language] in langs:
lang = ocr_langs[language]
else:
try:
pc_lang = pycountry.languages.get(alpha_2=language)
lang_three_letter = pc_lang.alpha_3
if lang_three_letter in langs:
lang = lang_three_letter
else:
if 'eng' in langs:
lang = 'eng'
else:
lang = langs[0]
sys.stderr.write("ocr_file: could not get OCR language for language " + str(language) + "; using language " + str(lang) + "\n")
except Exception as the_error:
if 'eng' in langs:
lang = 'eng'
else:
lang = langs[0]
sys.stderr.write("ocr_file: could not get OCR language for language " + str(language) + "; using language " + str(lang) + "; error was " + str(the_error) + "\n")
if isinstance(image_file, DAFile):
image_file = [image_file]
todo = list()
for doc in image_file:
if hasattr(doc, 'extension'):
if doc.extension not in ['pdf', 'png', 'jpg', 'gif', 'docx', 'doc', 'odt', 'rtf']:
raise Exception("document with extension " + doc.extension + " is not a readable image file")
if doc.extension == 'pdf':
#doc.page_path(1, 'page')
for i in range(safe_pypdf_reader(doc.path()).getNumPages()):
todo.append(dict(doc=doc, page=i+1, lang=lang, ocr_resolution=ocr_resolution, psm=psm, x=x, y=y, W=W, H=H, pdf_to_ppm=pdf_to_ppm, user_code=user_code, user=user, pdf=pdf, preserve_color=preserve_color))
elif doc.extension in ("docx", "doc", "odt", "rtf"):
import docassemble.base.util
doc_conv = docassemble.base.util.pdf_concatenate(doc)
for i in range(safe_pypdf_reader(doc_conv.path()).getNumPages()):
todo.append(dict(doc=doc_conv, page=i+1, lang=lang, ocr_resolution=ocr_resolution, psm=psm, x=x, y=y, W=W, H=H, pdf_to_ppm=pdf_to_ppm, user_code=user_code, user=user, pdf=pdf, preserve_color=preserve_color))
else:
todo.append(dict(doc=doc, page=None, lang=lang, ocr_resolution=ocr_resolution, psm=psm, x=x, y=y, W=W, H=H, pdf_to_ppm=pdf_to_ppm, user_code=user_code, user=user, pdf=pdf, preserve_color=preserve_color))
#sys.stderr.write("ocr_page_tasks finished\n")
return todo
def make_png_for_pdf(doc, prefix, resolution, pdf_to_ppm, page=None):
path = doc.path()
make_png_for_pdf_path(path, prefix, resolution, pdf_to_ppm, page=page)
doc.commit()
def make_png_for_pdf_path(path, prefix, resolution, pdf_to_ppm, page=None):
basefile = os.path.splitext(path)[0]
test_path = basefile + prefix + '-in-progress'
with open(test_path, 'a'):
os.utime(test_path, None)
if page is None:
try:
result = subprocess.run([str(pdf_to_ppm), '-r', str(resolution), '-png', str(path), str(basefile + prefix)], timeout=3600).returncode
except subprocess.TimeoutExpired:
result = 1
else:
try:
result = subprocess.run([str(pdf_to_ppm), '-f', str(page), '-l', str(page), '-r', str(resolution), '-png', str(path), str(basefile + prefix)], timeout=3600).returncode
except subprocess.TimeoutExpired:
result = 1
if os.path.isfile(test_path):
os.remove(test_path)
if result > 0:
raise Exception("Unable to extract images from PDF file")
def ocr_pdf(*pargs, target=None, filename=None, lang=None, psm=6, dafilelist=None, preserve_color=False):
if preserve_color:
device = 'tiff48nc'
else:
device = 'tiffgray'
docs = []
if not isinstance(target, DAFile):
raise DAError("ocr_pdf: target must be a DAFile")
for other_file in pargs:
if isinstance(other_file, DAFileList):
for other_file_sub in other_file.elements:
docs.append(other_file_sub)
elif isinstance(other_file, DAFileCollection):
if hasattr(other_file, 'pdf'):
docs.append(other_file.pdf)
elif hasattr(other_file, 'docx'):
docs.append(other_file.docx)
else:
raise DAError('ocr_pdf: DAFileCollection object did not have pdf or docx attribute.')
elif isinstance(other_file, DAStaticFile):
docs.append(other_file)
elif isinstance(other_file, (str, DAFile)):
docs.append(other_file)
if len(docs) == 0:
docs.append(target)
if psm is None:
psm = 6
output = []
for doc in docs:
if not hasattr(doc, 'extension'):
continue
if doc._is_pdf() and hasattr(doc, 'has_ocr') and doc.has_ocr:
output.append(doc.path())
continue
if doc.extension in ['png', 'jpg', 'gif']:
import docassemble.base.util
doc = docassemble.base.util.pdf_concatenate(doc)
elif doc.extension in ['docx', 'doc', 'odt', 'rtf']:
import docassemble.base.util
output.append(docassemble.base.util.pdf_concatenate(doc).path())
continue
elif not doc._is_pdf():
logmessage("ocr_pdf: not a readable image file")
continue
path = doc.path()
pdf_file = tempfile.NamedTemporaryFile(prefix="datemp", mode="wb", delete=False)
pdf_file.close()
if doc.extension == 'pdf':
tiff_file = tempfile.NamedTemporaryFile(prefix="datemp", mode="wb", suffix=".tiff", delete=False)
params = ['gs', '-q', '-dNOPAUSE', '-sDEVICE=' + device, '-r600', '-sOutputFile=' + tiff_file.name, path, '-c', 'quit']
try:
result = subprocess.run(params, timeout=60*60).returncode
except subprocess.TimeoutExpired:
result = 1
logmessage("ocr_pdf: call to gs took too long")
if result != 0:
raise Exception("ocr_pdf: failed to run gs with command " + " ".join(params))
params = ['tesseract', tiff_file.name, pdf_file.name, '-l', str(lang), '--psm', str(psm), '--dpi', '600', 'pdf']
try:
result = subprocess.run(params, timeout=60*60).returncode
except subprocess.TimeoutExpired:
result = 1
logmessage("ocr_pdf: call to tesseract took too long")
if result != 0:
raise Exception("ocr_pdf: failed to run tesseract with command " + " ".join(params))
else:
params = ['tesseract', path, pdf_file.name, '-l', str(lang), '--psm', str(psm), '--dpi', '300', 'pdf']
try:
result = subprocess.run(params, timeout=60*60).returncode
except subprocess.TimeoutExpired:
result = 1
logmessage("ocr_pdf: call to tesseract took too long")
if result != 0:
raise Exception("ocr_pdf: failed to run tesseract with command " + " ".join(params))
output.append(pdf_file.name + '.pdf')
if len(output) == 0:
return None
if len(output) == 1:
the_file = tempfile.NamedTemporaryFile(prefix="datemp", mode="wb", delete=False)
the_file.close()
shutil.copyfile(output[0], the_file.name)
source_file = the_file.name
else:
import docassemble.base.pandoc
source_file = docassemble.base.pandoc.concatenate_files(output)
if filename is None:
filename = 'file.pdf'
target.initialize(filename=filename, extension='pdf', mimetype='application/pdf', reinitialize=True)
shutil.copyfile(source_file, target.file_info['path'])
del target.file_info
target._make_pdf_thumbnail(1, both_formats=True)
target.commit()
target.retrieve()
return target
def ocr_page(indexno, doc=None, lang=None, pdf_to_ppm='pdf_to_ppm', ocr_resolution=300, psm=6, page=None, x=None, y=None, W=None, H=None, user_code=None, user=None, pdf=False, preserve_color=False):
"""Runs optical character recognition on an image or a page of a PDF file and returns the recognized text."""
if page is None:
page = 1
if psm is None:
psm = 6
sys.stderr.write("ocr_page running on page " + str(page) + "\n")
the_file = None
if not hasattr(doc, 'extension'):
return None
#sys.stderr.write("ocr_page running with extension " + str(doc.extension) + "\n")
if doc.extension not in ['pdf', 'png', 'jpg', 'gif']:
raise Exception("Not a readable image file")
#sys.stderr.write("ocr_page calling doc.path()\n")
path = doc.path()
if doc.extension == 'pdf':
the_file = None
if x is None and y is None and W is None and H is None:
the_file = doc.page_path(page, 'page', wait=False)
if the_file is None:
output_file = tempfile.NamedTemporaryFile()
args = [str(pdf_to_ppm), '-r', str(ocr_resolution), '-f', str(page), '-l', str(page)]
if x is not None:
args.extend(['-x', str(x)])
if y is not None:
args.extend(['-y', str(y)])
if W is not None:
args.extend(['-W', str(W)])
if H is not None:
args.extend(['-H', str(H)])
args.extend(['-singlefile', '-png', str(path), str(output_file.name)])
try:
result = subprocess.run(args, timeout=120).returncode
except subprocess.TimeoutExpired:
result = 1
if result > 0:
return word("(Unable to extract images from PDF file)")
the_file = output_file.name + '.png'
else:
the_file = path
file_to_read = tempfile.NamedTemporaryFile()
if pdf and preserve_color:
shutil.copyfile(the_file, file_to_read.name)
else:
image = Image.open(the_file)
color = ImageEnhance.Color(image)
bw = color.enhance(0.0)
bright = ImageEnhance.Brightness(bw)
brightened = bright.enhance(1.5)
contrast = ImageEnhance.Contrast(brightened)
final_image = contrast.enhance(2.0)
file_to_read = tempfile.TemporaryFile()
final_image.convert('RGBA').save(file_to_read, "PNG")
file_to_read.seek(0)
if pdf:
outfile = doc._pdf_page_path(page)
params = ['tesseract', 'stdin', re.sub(r'\.pdf$', '', outfile), '-l', str(lang), '--psm', str(psm), '--dpi', str(ocr_resolution), 'pdf']
sys.stderr.write("ocr_page: piping to command " + " ".join(params) + "\n")
try:
text = subprocess.check_output(params, stdin=file_to_read).decode()
except subprocess.CalledProcessError as err:
raise Exception("ocr_page: failed to run tesseract with command " + " ".join(params) + ": " + str(err) + " " + str(err.output.decode()))
sys.stderr.write("ocr_page finished with pdf page " + str(page) + "\n")
doc.commit()
return dict(indexno=indexno, page=page, doc=doc)
params = ['tesseract', 'stdin', 'stdout', '-l', str(lang), '--psm', str(psm), '--dpi', str(ocr_resolution)]
sys.stderr.write("ocr_page: piping to command " + " ".join(params) + "\n")
try:
text = subprocess.check_output(params, stdin=file_to_read).decode()
except subprocess.CalledProcessError as err:
raise Exception("ocr_page: failed to run tesseract with command " + " ".join(params) + ": " + str(err) + " " + str(err.output.decode()))
sys.stderr.write("ocr_page finished with page " + str(page) + "\n")
return dict(indexno=indexno, page=page, text=text)
| mit | -5,302,447,009,926,579,000 | 46.960317 | 227 | 0.579072 | false | 3.631611 | false | false | false |
XcomConvent/xcom40k-shades | xcom40k/app/migrations/0026_auto_20120106_2212.py | 1 | 1819 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('app', '0025_auto_20150923_0843'),
]
operations = [
migrations.CreateModel(
name='BlogEntry',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
('text', models.CharField(max_length=10000)),
('pub_date', models.DateField()),
('author', models.ForeignKey(to='app.Account')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='BlogEntryTag',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
('desc', models.CharField(max_length=500)),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='mission',
name='finalize_date',
field=models.DateField(default=datetime.datetime(2012, 1, 6, 22, 12, 50, 101184, tzinfo=utc)),
preserve_default=False,
),
migrations.AlterField(
model_name='neurorequest',
name='closed_date',
field=models.DateField(),
),
migrations.AddField(
model_name='blogentry',
name='tags',
field=models.ManyToManyField(to='app.BlogEntryTag'),
),
]
| apache-2.0 | -3,998,331,126,975,175,700 | 31.482143 | 114 | 0.523364 | false | 4.53616 | false | false | false |
asceth/devsyn | devsyn/cameras/firstperson.py | 1 | 3702 | import __builtin__
from direct.showbase.DirectObject import DirectObject
from direct.task.Task import Task
from devsyn.entities import Entity
base = __builtin__.base
class FirstPersonCamera(Entity):
# TODO: Make speed configurable
# constants
speed = 50
def __init__(self, parent = base.render):
# basic properties
## keeps track of mouse movement
self.pos = [0.0, 0.0]
# our prime is the camera
self.prime = base.camera
# who are we attached to?
self.parent = parent
# initialize various velocities
self.rotation_velocity = 0.05
def activate(self, reparent = True):
print "Activated FirstPerson Camera"
# initialize camera
base.camLens.setFov(70) # field of view
if reparent == True:
self.reset_parent()
# initialize camera task
base.taskMgr.add(self.update, "update_camera_task")
def deactivate(self):
self.reset_parent(base.render)
base.taskMgr.remove("update_camera_task")
def reset_parent(self, parent = None):
if parent != None:
if isinstance(parent, Entity):
self.parent = parent.prime
else:
self.parent = parent
# attach to our parent
self.attachTo(self.parent)
# has to be a way to get the height of the model....
self.setZ(self.getZ() + 1.0)
self.parent.hide()
def update(self, task):
# rotate the camera
pointer = base.win.getPointer(0)
new_position = [pointer.getX(), pointer.getY()]
# new position - last position gives us difference in mouse movement
d = [new_position[0] - self.pos[0],
new_position[1] - self.pos[1]]
# interpolate mouse last position to new position
self.pos[0] += d[0] * 0.5
self.pos[1] += d[1] * 0.5
# rotate camera using x vector (left/right)
camright = base.camera.getNetTransform().getMat().getRow3(0)
camright.normalize()
base.camera.setH(base.camera.getH() -
(d[0] * self.rotation_velocity))
# rotate camera using z vector (up/down)
camup = base.camera.getNetTransform().getMat().getRow3(2)
camup.normalize()
base.camera.setP(base.camera.getP() -
(d[1] * self.rotation_velocity * 2.5))
# collisions are taken care of through our
# parent (usually a player etc)
# For smoother mouse movement on all platforms
# we don't immediately set the 'cursor' in the window
# back to the center. Instead we let it freely travel
# within a square region inside the actual window.
# In this case the region has a 1/4 margin around
# our game window.
# If the cursor travels outside of this region
# we set it back to the center of the region.
# We ONLY reset the axis that moves out of bounds.
## If the mouse escapes the region via the x-axis
## reset the x axis to half screen width (center of screen)
if (self.pos[0] < (base.win.getXSize() * 0.25)):
self.pos[0] = (base.win.getXSize() / 2)
base.win.movePointer(0, base.win.getXSize() / 2, int(self.pos[1]))
elif (self.pos[0] > (base.win.getXSize() * 0.75)):
self.pos[0] = (base.win.getXSize() / 2)
base.win.movePointer(0, base.win.getXSize() / 2, int(self.pos[1]))
## If the mouse escapes the region via the y-axis
## reset the y axis to half the screen height (center of screen)
if (self.pos[1] < (base.win.getYSize() * 0.25)):
self.pos[1] = (base.win.getYSize() / 2)
base.win.movePointer(0, int(self.pos[0]), base.win.getYSize() / 2)
elif (self.pos[1] > (base.win.getYSize() * 0.75)):
self.pos[1] = (base.win.getYSize() / 2)
base.win.movePointer(0, int(self.pos[0]), base.win.getYSize() / 2)
return Task.cont
| mit | -5,657,496,182,112,678,000 | 31.761062 | 72 | 0.640194 | false | 3.329137 | false | false | false |
MKuranowski/WarsawGTFS | static/shapes/helpers.py | 1 | 3586 | from pyroutelib3 import distHaversine
from contextlib import contextmanager
from typing import IO, Optional, List, Union, Tuple
from time import time
import signal
import math
import os
from ..const import DIR_SHAPE_CACHE, SHAPE_CACHE_TTL
from ..util import ensure_dir_exists
_Pt = Tuple[float, float]
@contextmanager
def time_limit(sec):
"Time limter based on https://gist.github.com/Rabbit52/7449101"
def handler(x, y):
raise TimeoutError
signal.signal(signal.SIGALRM, handler)
signal.alarm(sec)
try:
yield
finally:
signal.alarm(0)
def total_length(x: List[_Pt]) -> float:
dist = 0.0
for i in range(1, len(x)):
dist += distHaversine(x[i-1], x[i])
return dist
def dist_point_to_line(r: _Pt, p1: _Pt, p2: _Pt) -> float:
"""Defines distance from point r to line defined by point p1 and p2."""
# See https://en.wikipedia.org/wiki/Distance_from_a_point_to_a_line,
# algorithm "Line defined by two points"
# Unpack coordinates
x0, y0 = r
x1, y1 = p1
x2, y2 = p2
# DIfferences between p1, p2 coordinates
dx = x2 - x1
dy = y2 - y1
return abs(dy*x0 - dx*y0 + x2*y1 - y2*x1) / math.sqrt(dy**2 + dx**2)
def simplify_line(x: List[_Pt], threshold: float) -> List[_Pt]:
"""Simplifies line x using the Ramer-Douglas-Peucker algorithm"""
# Unable to simplify 2-point lines any further
if len(x) <= 2:
return x
# Find point furthest away from line (x[0], x[-1])
furthest_pt_dist = 0
furthest_pt_index = -1
for pt_idx, pt in enumerate(x[1:-1], start=1):
pt_dist = dist_point_to_line(pt, x[0], x[-1])
if pt_dist > furthest_pt_dist:
furthest_pt_dist = pt_dist
furthest_pt_index = pt_idx
# If furthest point is further then given threshold, simplify recursively both parts
if furthest_pt_dist > threshold:
left_simplified = simplify_line(x[:furthest_pt_index + 1], threshold)
right_simplified = simplify_line(x[furthest_pt_index:], threshold)
# strip last point from `left_simplified` to avoid furthest point being included twice
return left_simplified[:-1] + right_simplified
# If furthest point is close then given threshold, the simplification is just the
# segment from start & end of x.
else:
return [x[0], x[-1]]
def cache_retr(file: str, ttl_minutes: int = SHAPE_CACHE_TTL) -> Optional[IO[bytes]]:
"""
Tries to read specified from cahce.
If file is older then specified time-to-live,
or cahced files doesn't exist at all, returns None.
Otherwise, returns a file-like object.
"""
file_path = os.path.join(DIR_SHAPE_CACHE, file)
# Check if cahced file exists
if not os.path.exists(file_path):
return
# Try to get file's last-modified attribute
file_stat = os.stat(file_path)
file_timediff = (time() - file_stat.st_mtime) / 60
# File was modified earlier then specified time-to-live, return a IO object to that file
if file_timediff < ttl_minutes:
return open(file_path, "rb")
def cache_save(file: str, reader: Union[IO[bytes], bytes]):
"""Caches contents of `reader` in DIR_SHAPE_CACHE/{file}."""
ensure_dir_exists(DIR_SHAPE_CACHE, clear=False)
file_path = os.path.join(DIR_SHAPE_CACHE, file)
# Check if cahced file exists
with open(file_path, "wb") as writer:
if isinstance(reader, bytes):
writer.write(reader)
else:
while (chunk := reader.read(1024 * 16)):
writer.write(chunk)
| mit | 4,947,337,439,888,656,000 | 30.45614 | 94 | 0.64222 | false | 3.210385 | false | false | false |
priomsrb/vimswitch | vimswitch/test/FileSystemSandbox.py | 1 | 3987 | import os
import shutil
import vimswitch.six.moves.builtins as builtins
class FileSystemSandbox:
"""
This static class sets up a global sandbox where all disk modification is
disabled except inside the sandbox directory. If an operation outside the
sandbox directory occurs, a FileSystemSandboxError is thrown.
Read-only disk operations will still be allowed outside the sandbox
directory, however.
"""
enabled = False
sandboxRoot = ''
def enable(self, sandboxRoot):
if self.enabled:
raise RuntimeError('Sandbox already enabled')
self.enabled = True
self.sandboxRoot = sandboxRoot
self._setUpSafeOperations()
def disable(self):
if not self.enabled:
raise RuntimeError('Sandbox already disabled')
self.enabled = False
self._tearDownSafeOperations()
def _setUpSafeOperations(self):
self._real_builtin_open = builtins.open
self._real_os_mkdir = os.mkdir
self._real_os_makedirs = os.makedirs
self._real_os_remove = os.remove
self._real_os_path_isfile = os.path.isfile
self._real_os_path_isdir = os.path.isdir
self._real_shutil_copy = shutil.copy
self._real_shutil_move = shutil.move
self._real_shutil_copytree = shutil.copytree
self._real_shutil_rmtree = shutil.rmtree
builtins.open = self._safe_builtin_open
os.mkdir = self._safe_os_mkdir
os.makedirs = self._safe_os_makedirs
os.remove = self._safe_os_remove
shutil.copy = self._safe_shutil_copy
shutil.move = self._safe_shutil_move
shutil.copytree = self._safe_shutil_copytree
shutil.rmtree = self._safe_shutil_rmtree
def _tearDownSafeOperations(self):
builtins.open = self._real_builtin_open
os.mkdir = self._real_os_mkdir
os.makedirs = self._real_os_makedirs
os.remove = self._real_os_remove
shutil.copy = self._real_shutil_copy
shutil.move = self._real_shutil_move
shutil.copytree = self._real_shutil_copytree
shutil.rmtree = self._real_shutil_rmtree
def _safe_builtin_open(self, path, mode='r', *args, **kwargs):
# We only verify if the file is being opened for writing or appending.
# Read only access should be allowed.
if mode.find('w') != -1 or mode.find('a') != -1:
self._verifyPath(path)
return self._real_builtin_open(path, mode, *args, **kwargs)
def _safe_os_mkdir(self, path, *args, **kwargs):
self._verifyPath(path)
self._real_os_mkdir(path, *args, **kwargs)
def _safe_os_makedirs(self, path, *args, **kwargs):
self._verifyPath(path)
self._real_os_makedirs(path, *args, **kwargs)
def _safe_os_remove(self, path):
self._verifyPath(path)
self._real_os_remove(path)
def _safe_shutil_copy(self, src, dst):
# Only need to verify destination path since src will not be modified
self._verifyPath(dst)
self._real_shutil_copy(src, dst)
def _safe_shutil_move(self, src, dst):
self._verifyPath(src)
self._verifyPath(dst)
self._real_shutil_move(src, dst)
def _safe_shutil_copytree(self, src, dst, *args, **kwargs):
# Only need to verify destination path since src will not be modified
self._verifyPath(dst)
self._real_shutil_copytree(src, dst, *args, **kwargs)
def _safe_shutil_rmtree(self, path, *args, **kwargs):
self._verifyPath(path)
self._real_shutil_rmtree(path, *args, **kwargs)
def _verifyPath(self, path):
"Checks that path is inside the sandbox"
absPath = os.path.abspath(path)
if not absPath.startswith(self.sandboxRoot):
raise FileSystemSandboxError(path)
class FileSystemSandboxError(Exception):
def __init__(self, path):
Exception.__init__(self, 'Tried to access path outside sandbox: %s' % path)
| gpl-2.0 | 7,276,454,648,368,458,000 | 34.598214 | 83 | 0.637823 | false | 3.901174 | false | false | false |
moses-rolston/err | errbot/backends/base.py | 1 | 44212 | import difflib
import inspect
import io
import logging
import traceback
import warnings
from collections import deque, defaultdict
from xml.etree import cElementTree as ET
from xml.etree.cElementTree import ParseError
from errbot import botcmd, PY2
from errbot.utils import get_sender_username, xhtml2txt, parse_jid, split_string_after, deprecated
from errbot.templating import tenv
from errbot.bundled.threadpool import ThreadPool, WorkRequest
class ACLViolation(Exception):
"""Exceptions raised when user is not allowed to execute given command due to ACLs"""
class RoomError(Exception):
"""General exception class for MUC-related errors"""
class RoomNotJoinedError(RoomError):
"""Exception raised when performing MUC operations
that require the bot to have joined the room"""
class RoomDoesNotExistError(RoomError):
"""Exception that is raised when performing an operation
on a room that doesn't exist"""
class Identifier(object):
"""
This class is the parent and the basic contract of all the ways the backends
are identifying a person on their system.
"""
def __init__(self, jid=None, node='', domain='', resource=''):
if jid:
self._node, self._domain, self._resource = parse_jid(jid)
else:
self._node = node
self._domain = domain
self._resource = resource
@property
def node(self):
return self._node
@property
def domain(self):
return self._domain
@property
def resource(self):
return self._resource
@property
def stripped(self):
if self._domain:
return self._node + '@' + self._domain
return self._node # if the backend has no domain notion
def bare_match(self, other):
""" checks if 2 identifiers are equal, ignoring the resource """
return other.stripped == self.stripped
def __str__(self):
answer = self.stripped
if self._resource:
answer += '/' + self._resource
return answer
def __unicode__(self):
return str(self.__str__())
# deprecated stuff ...
@deprecated(node)
def getNode(self):
""" will be removed on the next version """
@deprecated(domain)
def getDomain(self):
""" will be removed on the next version """
@deprecated(bare_match)
def bareMatch(self, other):
""" will be removed on the next version """
@deprecated(stripped)
def getStripped(self):
""" will be removed on the next version """
@deprecated(resource)
def getResource(self):
""" will be removed on the next version """
class Message(object):
"""
A chat message.
This class represents chat messages that are sent or received by
the bot. It is modeled after XMPP messages so not all methods
make sense in the context of other back-ends.
"""
fr = Identifier('unknown@localhost')
def __init__(self, body, type_='chat', html=None):
"""
:param body:
The plaintext body of the message.
:param type_:
The type of message (generally one of either 'chat' or 'groupchat').
:param html:
An optional HTML representation of the body.
"""
# it is either unicode or assume it is utf-8
if isinstance(body, str):
self._body = body
else:
self._body = body.decode('utf-8')
self._html = html
self._type = type_
self._from = None
self._to = None
self._delayed = False
self._nick = None
@property
def to(self):
"""
Get the recipient of the message.
:returns:
An :class:`~errbot.backends.base.Identifier` identifying
the recipient.
"""
return self._to
@to.setter
def to(self, to):
"""
Set the recipient of the message.
:param to:
An :class:`~errbot.backends.base.Identifier`, or string which may
be parsed as one, identifying the recipient.
"""
if isinstance(to, Identifier):
self._to = to
else:
self._to = Identifier(to) # assume a parseable string
@property
def type(self):
"""
Get the type of the message.
:returns:
The message type as a string (generally one of either
'chat' or 'groupchat')
"""
return self._type
@type.setter
def type(self, type_):
"""
Set the type of the message.
:param type_:
The message type (generally one of either 'chat'
or 'groupchat').
"""
self._type = type_
@property
def frm(self):
"""
Get the sender of the message.
:returns:
An :class:`~errbot.backends.base.Identifier` identifying
the sender.
"""
return self._from
@frm.setter
def frm(self, from_):
"""
Set the sender of the message.
:param from_:
An :class:`~errbot.backends.base.Identifier`, or string which may
be parsed as one, identifying the sender.
"""
if isinstance(from_, Identifier):
self._from = from_
else:
self._from = Identifier(from_) # assume a parseable string
@property
def body(self):
"""
Get the plaintext body of the message.
:returns:
The body as a string.
"""
return self._body
@property
def html(self):
"""
Get the HTML representation of the message.
:returns:
A string containing the HTML message or `None` when there
is none.
"""
return self._html
@html.setter
def html(self, html):
"""
Set the HTML representation of the message
:param html:
The HTML message.
"""
self._html = html
@property
def delayed(self):
return self._delayed
@delayed.setter
def delayed(self, delayed):
self._delayed = delayed
@property
def nick(self):
return self._nick
@nick.setter
def nick(self, nick):
self._nick = nick
def __str__(self):
return self._body
# deprecated stuff ...
@deprecated(to)
def getTo(self):
""" will be removed on the next version """
@deprecated(to.fset)
def setTo(self, to):
""" will be removed on the next version """
@deprecated(type)
def getType(self):
""" will be removed on the next version """
@deprecated(type.fset)
def setType(self, type_):
""" will be removed on the next version """
@deprecated(frm)
def getFrom(self):
""" will be removed on the next version """
@deprecated(frm.fset)
def setFrom(self, from_):
""" will be removed on the next version """
@deprecated(body)
def getBody(self):
""" will be removed on the next version """
@deprecated(html)
def getHTML(self):
""" will be removed on the next version """
@deprecated(html.fset)
def setHTML(self, html):
""" will be removed on the next version """
@deprecated(delayed)
def isDelayed(self):
""" will be removed on the next version """
@deprecated(delayed.fset)
def setDelayed(self, delayed):
""" will be removed on the next version """
@deprecated(nick)
def setMuckNick(self, nick):
""" will be removed on the next version """
@deprecated(nick.fset)
def getMuckNick(self):
""" will be removed on the next version """
ONLINE = 'online'
OFFLINE = 'offline'
AWAY = 'away'
DND = 'dnd'
class Presence(object):
"""
This class represents a presence change for a user or a user in a chatroom.
Instances of this class are passed to :meth:`~errbot.botplugin.BotPlugin.callback_presence`
when the presence of people changes.
"""
def __init__(self, nick=None, identifier=None, status=None, chatroom=None, message=None):
if nick is None and identifier is None:
raise ValueError('Presence: nick and identifiers are both None')
if nick is None and chatroom is not None:
raise ValueError('Presence: nick is None when chatroom is not')
if status is None and message is None:
raise ValueError('Presence: at least a new status or a new status message mustbe present')
self._nick = nick
self._identifier = identifier
self._chatroom = chatroom
self._status = status
self._message = message
@property
def chatroom(self):
""" Returns the Identifier pointing the room in which the event occurred.
If it returns None, the event occurred outside of a chatroom.
"""
return self._chatroom
@property
def nick(self):
""" Returns a plain string of the presence nick.
(In some chatroom implementations, you cannot know the real identifier
of a person in it).
Can return None but then identifier won't be None.
"""
return self._nick
@property
def identifier(self):
""" Returns the identifier of the event.
Can be None *only* if chatroom is not None
"""
return self._identifier
@property
def status(self):
""" Returns the status of the presence change.
It can be one of the constants ONLINE, OFFLINE, AWAY, DND, but
can also be custom statuses depending on backends.
It can be None if it is just an update of the status message (see get_message)
"""
return self._status
@property
def message(self):
""" Returns a human readable message associated with the status if any.
like : "BRB, washing the dishes"
It can be None if it is only a general status update (see get_status)
"""
return self._message
def __str__(self):
response = ''
if self._nick:
response += 'Nick:%s ' % self._nick
if self._identifier:
response += 'Idd:%s ' % self._identifier
if self._status:
response += 'Status:%s ' % self._status
if self._chatroom:
response += 'Room:%s ' % self._chatroom
if self._message:
response += 'Msg:%s ' % self._message
return response
def __unicode__(self):
return str(self.__str__())
STREAM_WAITING_TO_START = 'pending'
STREAM_TRANSFER_IN_PROGRESS = 'in progress'
STREAM_SUCCESSFULLY_TRANSFERED = 'success'
STREAM_PAUSED = 'paused'
STREAM_ERROR = 'error'
STREAM_REJECTED = 'rejected'
DEFAULT_REASON = 'unknown'
class Stream(io.BufferedReader):
"""
This class represents a stream request.
Instances of this class are passed to :meth:`~errbot.botplugin.BotPlugin.callback_stream`
when an incoming stream is requested.
"""
def __init__(self, identifier, fsource, name=None, size=None, stream_type=None):
super(Stream, self).__init__(fsource)
self._identifier = identifier
self._name = name
self._size = size
self._stream_type = stream_type
self._status = STREAM_WAITING_TO_START
self._reason = DEFAULT_REASON
@property
def identifier(self):
"""
The identity the stream is coming from if it is an incoming request
or to if it is an outgoing request.
"""
return self._identifier
@property
def name(self):
"""
The name of the stream/file if it has one or None otherwise.
!! Be carefull of injections if you are using this name directly as a filename.
"""
return self._name
@property
def size(self):
"""
The expected size in bytes of the stream if it is known or None.
"""
return self._size
@property
def stream_type(self):
"""
The mimetype of the stream if it is known or None.
"""
return self._stream_type
@property
def status(self):
"""
The status for this stream.
"""
return self._status
def accept(self):
"""
Signal that the stream has been accepted.
"""
if self._status != STREAM_WAITING_TO_START:
raise ValueError("Invalid state, the stream is not pending.")
self._status = STREAM_TRANSFER_IN_PROGRESS
def reject(self):
"""
Signal that the stream has been rejected.
"""
if self._status != STREAM_WAITING_TO_START:
raise ValueError("Invalid state, the stream is not pending.")
self._status = STREAM_REJECTED
def error(self, reason=DEFAULT_REASON):
"""
An internal plugin error prevented the transfer.
"""
self._status = STREAM_ERROR
self._reason = reason
def success(self):
"""
The streaming finished normally.
"""
if self._status != STREAM_TRANSFER_IN_PROGRESS:
raise ValueError("Invalid state, the stream is not in progress.")
self._status = STREAM_SUCCESSFULLY_TRANSFERED
def clone(self, new_fsource):
"""
Creates a clone and with an alternative stream
"""
return Stream(self._identifier, new_fsource, self._name, self._size, self._stream_type)
class MUCRoom(Identifier):
"""
This class represents a Multi-User Chatroom.
"""
def join(self, username=None, password=None):
"""
Join the room.
If the room does not exist yet, this will automatically call
:meth:`create` on it first.
"""
raise NotImplementedError("It should be implemented specifically for your backend")
def leave(self, reason=None):
"""
Leave the room.
:param reason:
An optional string explaining the reason for leaving the room.
"""
raise NotImplementedError("It should be implemented specifically for your backend")
def create(self):
"""
Create the room.
Calling this on an already existing room is a no-op.
"""
raise NotImplementedError("It should be implemented specifically for your backend")
def destroy(self):
"""
Destroy the room.
Calling this on a non-existing room is a no-op.
"""
raise NotImplementedError("It should be implemented specifically for your backend")
@property
def exists(self):
"""
Boolean indicating whether this room already exists or not.
:getter:
Returns `True` if the room exists, `False` otherwise.
"""
raise NotImplementedError("It should be implemented specifically for your backend")
@property
def joined(self):
"""
Boolean indicating whether this room has already been joined.
:getter:
Returns `True` if the room has been joined, `False` otherwise.
"""
raise NotImplementedError("It should be implemented specifically for your backend")
@property
def topic(self):
"""
The room topic.
:getter:
Returns the topic (a string) if one is set, `None` if no
topic has been set at all.
.. note::
Back-ends may return an empty string rather than `None`
when no topic has been set as a network may not
differentiate between no topic and an empty topic.
:raises:
:class:`~MUCNotJoinedError` if the room has not yet been joined.
"""
raise NotImplementedError("It should be implemented specifically for your backend")
@topic.setter
def topic(self, topic):
"""
Set the room's topic.
:param topic:
The topic to set.
"""
raise NotImplementedError("It should be implemented specifically for your backend")
@property
def occupants(self):
"""
The room's occupants.
:getter:
Returns a list of :class:`~errbot.backends.base.MUCOccupant` instances.
:raises:
:class:`~MUCNotJoinedError` if the room has not yet been joined.
"""
raise NotImplementedError("It should be implemented specifically for your backend")
def invite(self, *args):
"""
Invite one or more people into the room.
:*args:
One or more JID's to invite into the room.
"""
raise NotImplementedError("It should be implemented specifically for your backend")
class MUCOccupant(Identifier):
"""
This class represents a person inside a MUC.
This class exists to expose additional information about occupants
inside a MUC. For example, the XMPP back-end may expose backend-specific
information such as the real JID of the occupant and whether or not
that person is a moderator or owner of the room.
See the parent class for additional details.
"""
pass
def build_text_html_message_pair(source):
node = None
text_plain = None
try:
node = ET.XML(source)
text_plain = xhtml2txt(source)
except ParseError as ee:
if source.strip(): # avoids keep alive pollution
logging.debug('Could not parse [%s] as XHTML-IM, assume pure text Parsing error = [%s]' % (source, ee))
text_plain = source
except UnicodeEncodeError:
text_plain = source
return text_plain, node
def build_message(text, message_class, conversion_function=None):
"""Builds an xhtml message without attributes.
If input is not valid xhtml-im fallback to normal."""
message = None # keeps the compiler happy
try:
text = text.replace('', '*') # there is a weird chr IRC is sending that we need to filter out
if PY2:
ET.XML(text.encode('utf-8')) # test if is it xml
else:
ET.XML(text)
edulcorated_html = conversion_function(text) if conversion_function else text
try:
text_plain, node = build_text_html_message_pair(edulcorated_html)
message = message_class(body=text_plain)
message.html = node
except ET.ParseError as ee:
logging.error('Error translating to hipchat [%s] Parsing error = [%s]' % (edulcorated_html, ee))
except ET.ParseError as ee:
if text.strip(): # avoids keep alive pollution
logging.debug('Determined that [%s] is not XHTML-IM (%s)' % (text, ee))
message = message_class(body=text)
return message
class Backend(object):
"""
Implements the basic Bot logic (logic independent from the backend) and leaves
you to implement the missing parts
"""
cmd_history = defaultdict(lambda: deque(maxlen=10)) # this will be a per user history
MSG_ERROR_OCCURRED = 'Sorry for your inconvenience. ' \
'An unexpected error occurred.'
MSG_HELP_TAIL = 'Type help <command name> to get more info ' \
'about that specific command.'
MSG_HELP_UNDEFINED_COMMAND = 'That command is not defined.'
def __init__(self, config):
""" Those arguments will be directly those put in BOT_IDENTITY
"""
if config.BOT_ASYNC:
self.thread_pool = ThreadPool(3)
logging.debug('created the thread pool' + str(self.thread_pool))
self.commands = {} # the dynamically populated list of commands available on the bot
self.re_commands = {} # the dynamically populated list of regex-based commands available on the bot
self.MSG_UNKNOWN_COMMAND = 'Unknown command: "%(command)s". ' \
'Type "' + config.BOT_PREFIX + 'help" for available commands.'
if config.BOT_ALT_PREFIX_CASEINSENSITIVE:
self.bot_alt_prefixes = tuple(prefix.lower() for prefix in config.BOT_ALT_PREFIXES)
else:
self.bot_alt_prefixes = config.BOT_ALT_PREFIXES
def send_message(self, mess):
"""Should be overridden by backends"""
def send_simple_reply(self, mess, text, private=False):
"""Send a simple response to a message"""
self.send_message(self.build_reply(mess, text, private))
def build_reply(self, mess, text=None, private=False):
"""Build a message for responding to another message.
Message is NOT sent"""
msg_type = mess.type
response = self.build_message(text)
response.frm = self.jid
if msg_type == 'groupchat' and not private:
# stripped returns the full [email protected]/chat_username
# but in case of a groupchat, we should only try to send to the MUC address
# itself ([email protected])
response.to = mess.frm.stripped.split('/')[0]
elif str(mess.to) == self.bot_config.BOT_IDENTITY['username']:
# This is a direct private message, not initiated through a MUC. Use
# stripped to remove the resource so that the response goes to the
# client with the highest priority
response.to = mess.frm.stripped
else:
# This is a private message that was initiated through a MUC. Don't use
# stripped here to retain the resource, else the XMPP server doesn't
# know which user we're actually responding to.
response.to = mess.frm
response.type = 'chat' if private else msg_type
return response
def callback_presence(self, presence):
"""
Implemented by errBot.
"""
pass
def callback_room_joined(self, room):
"""
See :class:`~errbot.errBot.ErrBot`
"""
pass
def callback_room_left(self, room):
"""
See :class:`~errbot.errBot.ErrBot`
"""
pass
def callback_room_topic(self, room):
"""
See :class:`~errbot.errBot.ErrBot`
"""
pass
def callback_message(self, mess):
"""
Needs to return False if we want to stop further treatment
"""
# Prepare to handle either private chats or group chats
type_ = mess.type
jid = mess.frm
text = mess.body
username = get_sender_username(mess)
user_cmd_history = self.cmd_history[username]
if mess.delayed:
logging.debug("Message from history, ignore it")
return False
if type_ not in ("groupchat", "chat"):
logging.debug("unhandled message type %s" % mess)
return False
# Ignore messages from ourselves. Because it isn't always possible to get the
# real JID from a MUC participant (including ourself), matching the JID against
# ourselves isn't enough (see https://github.com/gbin/err/issues/90 for
# background discussion on this). Matching against CHATROOM_FN isn't technically
# correct in all cases because a MUC could give us another nickname, but it
# covers 99% of the MUC cases, so it should suffice for the time being.
if (jid.bare_match(self.jid) or
type_ == "groupchat" and mess.nick == self.bot_config.CHATROOM_FN): # noqa
logging.debug("Ignoring message from self")
return False
logging.debug("*** jid = %s" % jid)
logging.debug("*** username = %s" % username)
logging.debug("*** type = %s" % type_)
logging.debug("*** text = %s" % text)
# If a message format is not supported (eg. encrypted),
# txt will be None
if not text:
return False
surpress_cmd_not_found = False
prefixed = False # Keeps track whether text was prefixed with a bot prefix
only_check_re_command = False # Becomes true if text is determed to not be a regular command
tomatch = text.lower() if self.bot_config.BOT_ALT_PREFIX_CASEINSENSITIVE else text
if len(self.bot_config.BOT_ALT_PREFIXES) > 0 and tomatch.startswith(self.bot_alt_prefixes):
# Yay! We were called by one of our alternate prefixes. Now we just have to find out
# which one... (And find the longest matching, in case you have 'err' and 'errbot' and
# someone uses 'errbot', which also matches 'err' but would leave 'bot' to be taken as
# part of the called command in that case)
prefixed = True
longest = 0
for prefix in self.bot_alt_prefixes:
l = len(prefix)
if tomatch.startswith(prefix) and l > longest:
longest = l
logging.debug("Called with alternate prefix '{}'".format(text[:longest]))
text = text[longest:]
# Now also remove the separator from the text
for sep in self.bot_config.BOT_ALT_PREFIX_SEPARATORS:
# While unlikely, one may have separators consisting of
# more than one character
l = len(sep)
if text[:l] == sep:
text = text[l:]
elif type_ == "chat" and self.bot_config.BOT_PREFIX_OPTIONAL_ON_CHAT:
logging.debug("Assuming '%s' to be a command because BOT_PREFIX_OPTIONAL_ON_CHAT is True" % text)
# In order to keep noise down we surpress messages about the command
# not being found, because it's possible a plugin will trigger on what
# was said with trigger_message.
surpress_cmd_not_found = True
elif not text.startswith(self.bot_config.BOT_PREFIX):
only_check_re_command = True
if text.startswith(self.bot_config.BOT_PREFIX):
text = text[len(self.bot_config.BOT_PREFIX):]
prefixed = True
text = text.strip()
text_split = text.split(' ')
cmd = None
command = None
args = ''
if not only_check_re_command:
if len(text_split) > 1:
command = (text_split[0] + '_' + text_split[1]).lower()
if command in self.commands:
cmd = command
args = ' '.join(text_split[2:])
if not cmd:
command = text_split[0].lower()
args = ' '.join(text_split[1:])
if command in self.commands:
cmd = command
if len(text_split) > 1:
args = ' '.join(text_split[1:])
if command == self.bot_config.BOT_PREFIX: # we did "!!" so recall the last command
if len(user_cmd_history):
cmd, args = user_cmd_history[-1]
else:
return False # no command in history
elif command.isdigit(): # we did "!#" so we recall the specified command
index = int(command)
if len(user_cmd_history) >= index:
cmd, args = user_cmd_history[-index]
else:
return False # no command in history
# Try to match one of the regex commands if the regular commands produced no match
matched_on_re_command = False
if not cmd:
if prefixed:
commands = self.re_commands
else:
commands = {k: self.re_commands[k] for k in self.re_commands
if not self.re_commands[k]._err_command_prefix_required}
for name, func in commands.items():
if func._err_command_matchall:
match = list(func._err_command_re_pattern.finditer(text))
else:
match = func._err_command_re_pattern.search(text)
if match:
logging.debug("Matching '{}' against '{}' produced a match"
.format(text, func._err_command_re_pattern.pattern))
matched_on_re_command = True
self._process_command(mess, name, text, match)
else:
logging.debug("Matching '{}' against '{}' produced no match"
.format(text, func._err_command_re_pattern.pattern))
if matched_on_re_command:
return True
if cmd:
self._process_command(mess, cmd, args, match=None)
elif not only_check_re_command:
logging.debug("Command not found")
if surpress_cmd_not_found:
logging.debug("Surpressing command not found feedback")
else:
reply = self.unknown_command(mess, command, args)
if reply is None:
reply = self.MSG_UNKNOWN_COMMAND % {'command': command}
if reply:
self.send_simple_reply(mess, reply)
return True
def _process_command(self, mess, cmd, args, match):
"""Process and execute a bot command"""
jid = mess.frm
username = get_sender_username(mess)
user_cmd_history = self.cmd_history[username]
logging.info("Processing command '{}' with parameters '{}' from {}/{}".format(cmd, args, jid, mess.nick))
if (cmd, args) in user_cmd_history:
user_cmd_history.remove((cmd, args)) # Avoids duplicate history items
try:
self.check_command_access(mess, cmd)
except ACLViolation as e:
if not self.bot_config.HIDE_RESTRICTED_ACCESS:
self.send_simple_reply(mess, str(e))
return
f = self.re_commands[cmd] if match else self.commands[cmd]
if f._err_command_admin_only and self.bot_config.BOT_ASYNC:
# If it is an admin command, wait until the queue is completely depleted so
# we don't have strange concurrency issues on load/unload/updates etc...
self.thread_pool.wait()
if f._err_command_historize:
user_cmd_history.append((cmd, args)) # add it to the history only if it is authorized to be so
# Don't check for None here as None can be a valid argument to str.split.
# '' was chosen as default argument because this isn't a valid argument to str.split()
if not match and f._err_command_split_args_with != '':
try:
if hasattr(f._err_command_split_args_with, "parse_args"):
args = f._err_command_split_args_with.parse_args(args)
elif callable(f._err_command_split_args_with):
args = f._err_command_split_args_with(args)
else:
args = args.split(f._err_command_split_args_with)
except Exception as e:
self.send_simple_reply(
mess,
"Sorry, I couldn't parse your arguments. {}".format(e)
)
return
if self.bot_config.BOT_ASYNC:
wr = WorkRequest(
self._execute_and_send,
[],
{'cmd': cmd, 'args': args, 'match': match, 'mess': mess, 'jid': jid,
'template_name': f._err_command_template}
)
self.thread_pool.putRequest(wr)
if f._err_command_admin_only:
# Again, if it is an admin command, wait until the queue is completely
# depleted so we don't have strange concurrency issues.
self.thread_pool.wait()
else:
self._execute_and_send(cmd=cmd, args=args, match=match, mess=mess, jid=jid,
template_name=f._err_command_template)
def _execute_and_send(self, cmd, args, match, mess, jid, template_name=None):
"""Execute a bot command and send output back to the caller
cmd: The command that was given to the bot (after being expanded)
args: Arguments given along with cmd
match: A re.MatchObject if command is coming from a regex-based command, else None
mess: The message object
jid: The jid of the person executing the command
template_name: The name of the template which should be used to render
html-im output, if any
"""
def process_reply(reply_):
# integrated templating
if template_name:
reply_ = tenv().get_template(template_name + '.html').render(**reply_)
# Reply should be all text at this point (See https://github.com/gbin/err/issues/96)
return str(reply_)
def send_reply(reply_):
for part in split_string_after(reply_, self.bot_config.MESSAGE_SIZE_LIMIT):
self.send_simple_reply(mess, part, cmd in self.bot_config.DIVERT_TO_PRIVATE)
commands = self.re_commands if match else self.commands
try:
if inspect.isgeneratorfunction(commands[cmd]):
replies = commands[cmd](mess, match) if match else commands[cmd](mess, args)
for reply in replies:
if reply:
send_reply(process_reply(reply))
else:
reply = commands[cmd](mess, match) if match else commands[cmd](mess, args)
if reply:
send_reply(process_reply(reply))
except Exception as e:
tb = traceback.format_exc()
logging.exception('An error happened while processing '
'a message ("%s") from %s: %s"' %
(mess.body, jid, tb))
send_reply(self.MSG_ERROR_OCCURRED + ':\n %s' % e)
def is_admin(self, usr):
"""
an overridable check to see if a user is an administrator
"""
return usr in self.bot_config.BOT_ADMINS
def check_command_access(self, mess, cmd):
"""
Check command against ACL rules
Raises ACLViolation() if the command may not be executed in the given context
"""
usr = str(get_jid_from_message(mess))
typ = mess.type
if cmd not in self.bot_config.ACCESS_CONTROLS:
self.bot_config.ACCESS_CONTROLS[cmd] = self.bot_config.ACCESS_CONTROLS_DEFAULT
if ('allowusers' in self.bot_config.ACCESS_CONTROLS[cmd] and
usr not in self.bot_config.ACCESS_CONTROLS[cmd]['allowusers']):
raise ACLViolation("You're not allowed to access this command from this user")
if ('denyusers' in self.bot_config.ACCESS_CONTROLS[cmd] and
usr in self.bot_config.ACCESS_CONTROLS[cmd]['denyusers']):
raise ACLViolation("You're not allowed to access this command from this user")
if typ == 'groupchat':
stripped = mess.frm.stripped
if ('allowmuc' in self.bot_config.ACCESS_CONTROLS[cmd] and
self.bot_config.ACCESS_CONTROLS[cmd]['allowmuc'] is False):
raise ACLViolation("You're not allowed to access this command from a chatroom")
if ('allowrooms' in self.bot_config.ACCESS_CONTROLS[cmd] and
stripped not in self.bot_config.ACCESS_CONTROLS[cmd]['allowrooms']):
raise ACLViolation("You're not allowed to access this command from this room")
if ('denyrooms' in self.bot_config.ACCESS_CONTROLS[cmd] and
stripped in self.bot_config.ACCESS_CONTROLS[cmd]['denyrooms']):
raise ACLViolation("You're not allowed to access this command from this room")
else:
if ('allowprivate' in self.bot_config.ACCESS_CONTROLS[cmd] and
self.bot_config.ACCESS_CONTROLS[cmd]['allowprivate'] is False):
raise ACLViolation("You're not allowed to access this command via private message to me")
f = self.commands[cmd] if cmd in self.commands else self.re_commands[cmd]
if f._err_command_admin_only:
if typ == 'groupchat':
raise ACLViolation("You cannot administer the bot from a chatroom, message the bot directly")
if not self.is_admin(usr):
raise ACLViolation("This command requires bot-admin privileges")
def unknown_command(self, _, cmd, args):
""" Override the default unknown command behavior
"""
full_cmd = cmd + ' ' + args.split(' ')[0] if args else None
if full_cmd:
part1 = 'Command "%s" / "%s" not found.' % (cmd, full_cmd)
else:
part1 = 'Command "%s" not found.' % cmd
ununderscore_keys = [m.replace('_', ' ') for m in self.commands.keys()]
matches = difflib.get_close_matches(cmd, ununderscore_keys)
if full_cmd:
matches.extend(difflib.get_close_matches(full_cmd, ununderscore_keys))
matches = set(matches)
if matches:
return (part1 + '\n\nDid you mean "' + self.bot_config.BOT_PREFIX +
('" or "' + self.bot_config.BOT_PREFIX).join(matches) + '" ?')
else:
return part1
def inject_commands_from(self, instance_to_inject):
classname = instance_to_inject.__class__.__name__
for name, value in inspect.getmembers(instance_to_inject, inspect.ismethod):
if getattr(value, '_err_command', False):
commands = self.re_commands if getattr(value, '_err_re_command') else self.commands
name = getattr(value, '_err_command_name')
if name in commands:
f = commands[name]
new_name = (classname + '-' + name).lower()
self.warn_admins('%s.%s clashes with %s.%s so it has been renamed %s' % (
classname, name, type(f.__self__).__name__, f.__name__, new_name))
name = new_name
commands[name] = value
if getattr(value, '_err_re_command'):
logging.debug('Adding regex command : %s -> %s' % (name, value.__name__))
self.re_commands = commands
else:
logging.debug('Adding command : %s -> %s' % (name, value.__name__))
self.commands = commands
def remove_commands_from(self, instance_to_inject):
for name, value in inspect.getmembers(instance_to_inject, inspect.ismethod):
if getattr(value, '_err_command', False):
name = getattr(value, '_err_command_name')
if getattr(value, '_err_re_command') and name in self.re_commands:
del (self.re_commands[name])
elif not getattr(value, '_err_re_command') and name in self.commands:
del (self.commands[name])
def warn_admins(self, warning):
for admin in self.bot_config.BOT_ADMINS:
self.send(admin, warning)
def top_of_help_message(self):
"""Returns a string that forms the top of the help message
Override this method in derived class if you
want to add additional help text at the
beginning of the help message.
"""
return ""
def bottom_of_help_message(self):
"""Returns a string that forms the bottom of the help message
Override this method in derived class if you
want to add additional help text at the end
of the help message.
"""
return ""
@botcmd
def help(self, mess, args):
""" Returns a help string listing available options.
Automatically assigned to the "help" command."""
if not args:
if self.__doc__:
description = self.__doc__.strip()
else:
description = 'Available commands:'
usage = '\n'.join(sorted([
self.bot_config.BOT_PREFIX + '%s: %s' % (name, (command.__doc__ or
'(undocumented)').strip().split('\n', 1)[0])
for (name, command) in self.commands.items()
if name != 'help' and not command._err_command_hidden
]))
usage = '\n\n' + '\n\n'.join(filter(None, [usage, self.MSG_HELP_TAIL]))
else:
description = ''
if args in self.commands:
usage = (self.commands[args].__doc__ or
'undocumented').strip()
else:
usage = self.MSG_HELP_UNDEFINED_COMMAND
top = self.top_of_help_message()
bottom = self.bottom_of_help_message()
return ''.join(filter(None, [top, description, usage, bottom]))
def send(self, user, text, in_reply_to=None, message_type='chat', groupchat_nick_reply=False):
"""Sends a simple message to the specified user."""
nick_reply = self.bot_config.GROUPCHAT_NICK_PREFIXED
if (message_type == 'groupchat' and in_reply_to and nick_reply and groupchat_nick_reply):
reply_text = self.groupchat_reply_format().format(in_reply_to.nick, text)
else:
reply_text = text
mess = self.build_message(reply_text)
if hasattr(user, 'stripped'):
mess.to = user.stripped
else:
mess.to = user
if in_reply_to:
mess.type = in_reply_to.type
mess.frm = in_reply_to.to.stripped
else:
mess.type = message_type
mess.frm = self.jid
self.send_message(mess)
# ##### HERE ARE THE SPECIFICS TO IMPLEMENT PER BACKEND
def groupchat_reply_format(self):
raise NotImplementedError("It should be implemented specifically for your backend")
def build_message(self, text):
raise NotImplementedError("It should be implemented specifically for your backend")
def serve_forever(self):
raise NotImplementedError("It should be implemented specifically for your backend")
def connect(self):
"""Connects the bot to server or returns current connection
"""
raise NotImplementedError("It should be implemented specifically for your backend")
def join_room(self, room, username=None, password=None):
"""
Join a room (MUC).
:param room:
The JID/identifier of the room to join.
:param username:
An optional username to use.
:param password:
An optional password to use (for password-protected rooms).
.. deprecated:: 2.2.0
Use the methods on :class:`MUCRoom` instead.
"""
warnings.warn(
"Using join_room is deprecated, use query_room and the join "
"method on the resulting response instead.",
DeprecationWarning
)
self.query_room(room).join(username=username, password=password)
def query_room(self, room):
"""
Query a room for information.
:param room:
The JID/identifier of the room to query for.
:returns:
An instance of :class:`~MUCRoom`.
"""
raise NotImplementedError("It should be implemented specifically for your backend")
def shutdown(self):
pass
def connect_callback(self):
pass
def disconnect_callback(self):
pass
@property
def mode(self):
raise NotImplementedError("It should be implemented specifically for your backend")
def rooms(self):
"""
Return a list of rooms the bot is currently in.
:returns:
A list of :class:`~errbot.backends.base.MUCRoom` instances.
"""
raise NotImplementedError("It should be implemented specifically for your backend")
def get_jid_from_message(mess):
if mess.type == 'chat':
# strip the resource for direct chats
return mess.frm.stripped
fr = mess.frm
jid = Identifier(node=fr.node, domain=fr.domain, resource=fr.resource)
return jid
| gpl-3.0 | -6,755,025,033,461,585,000 | 34.200637 | 115 | 0.580815 | false | 4.331962 | true | false | false |
Nokorbis/unodp | src/main/resources/scripts/dataset_profiler/main.py | 1 | 1870 | """ Script that tries to parse a dataset to profile its variables"""
import os
import sys
from databasehelper import DatabaseHelper
from profiler import Profiler
def main():
"""
Main function of the program
Arguments should be:
1) path of the file to analyse
2) format of the file
3) id of the resource
"""
if len(sys.argv) < 4:
# TO!DO: Print to log
print('Call should be: py <script.py> <path> <file_format> '
'<resource_id>')
return -1
path = sys.argv[1]
file_format = sys.argv[2]
resource_id = sys.argv[3]
if os.path.exists(path) and os.path.isfile(path):
with open(path, 'rb') as file:
profiler = Profiler(file, file_format.lower())
profiler.profile_dataset()
save_to_database(resource_id, profiler)
return 0
def save_to_database(resource_id: int, profiler: Profiler):
""" Save the structure of the dataset resource in the database """
db_hlpr = DatabaseHelper()
db_hlpr.open_connection()
types = db_hlpr.get_variable_types_id(profiler.final_types)
pfl_id = db_hlpr.create_profile(resource_id)
tbl_id = db_hlpr.add_resource_table(pfl_id, offset_y=profiler.offset)
length = len(profiler.final_types)
variables = [None]*length
for i in range(0, length):
typ = profiler.final_types[i]
if profiler.real_headers:
name = profiler.real_headers[i]
else:
name = "Col-" + str(i)
idt = db_hlpr.add_variable(tbl_id, i, types[typ], name)
variables[i] = idt
i = 0
for row in profiler.row_set:
if i < (profiler.offset+1):
i = i + 1
continue
db_hlpr.add_row(tbl_id, i, row, variables)
i = i + 1
db_hlpr.close_connection()
if __name__ == "__main__":
sys.exit(main())
| apache-2.0 | -5,158,722,433,175,688,000 | 25.338028 | 73 | 0.596257 | false | 3.456562 | false | false | false |
guglielmino/pushetta-api-django | pushetta/api/subscriber_sl.py | 1 | 3851 | # coding=utf-8
# Progetto: Pushetta API
# Service layer con le funzionalità per la gestione Subscribers
from rest_framework import generics, permissions
from rest_framework.response import Response
from rest_framework import status
from django.conf import settings
from core.models import Subscriber, Channel, ChannelSubscribeRequest
from core.models import ACCEPTED, PENDING
from api.serializers import SubscriberSerializer, ChannelSerializer, ChannelSubscribeRequestSerializer
from core.subscriber_manager import SubscriberManager
class SubscriberList(generics.GenericAPIView):
"""
Handle device subscription to Pushetta
"""
serializer_class = SubscriberSerializer
def post(self, request, format=None):
serializer = SubscriberSerializer(data=request.DATA)
if serializer.is_valid():
is_sandbox = (True if settings.ENVIRONMENT == "dev" else False)
subscriber_data = serializer.object
subscriber, created = Subscriber.objects.get_or_create(device_id=subscriber_data["device_id"],
defaults={'sub_type': subscriber_data["sub_type"],
'sandbox': is_sandbox, 'enabled': True,
'name': subscriber_data["name"],
'token': subscriber_data["token"]})
if not created:
subscriber.token = subscriber_data["token"]
subscriber.name = subscriber_data["name"]
subscriber.save()
# Update del token nelle subscription del device
subMamager = SubscriberManager()
channel_subscriptions = subMamager.get_device_subscriptions(subscriber_data["device_id"])
for channel_sub in channel_subscriptions:
subMamager.subscribe(channel_sub, subscriber_data["sub_type"], subscriber_data["device_id"],
subscriber_data["token"])
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class SubcriptionsList(generics.GenericAPIView):
"""
Handle subscriptions to channels of a specific device
"""
permission_classes = [
permissions.AllowAny
]
serializer_class = ChannelSerializer
def get(self, request, format=None, deviceId=None):
channel_names = SubscriberManager().get_device_subscriptions(deviceId)
channels = Channel.objects.filter(name__in=channel_names)
serializer = ChannelSerializer(channels, many=True)
return Response(serializer.data)
class DeviceSubscriptionsRequests(generics.GenericAPIView):
"""
Handle list of device requests (subscribed and pending subscriptions)
"""
permission_classes = [
permissions.AllowAny
]
serializer_class = ChannelSubscribeRequestSerializer
def get(self, request, format=None, deviceId=None):
channel_names = SubscriberManager().get_device_subscriptions(deviceId)
# Uso ChannelSubscribeRequestSerializer e quelli già sottoscritti li aggiungo fake come ACCEPTED
channels = Channel.objects.filter(name__in=channel_names)
subscribed = [ChannelSubscribeRequest(channel=ch, device_id=deviceId, status=ACCEPTED) for ch in channels]
# Le richieste visualizzate client side sono solo quelle
requests = ChannelSubscribeRequest.objects.filter(device_id=deviceId).filter(status=PENDING)
serializer = ChannelSubscribeRequestSerializer(subscribed + list(requests), many=True)
return Response(serializer.data)
| gpl-3.0 | 3,609,743,969,598,237,700 | 38.27551 | 117 | 0.651078 | false | 4.699634 | false | false | false |
hammadi123/CF_Algorithms | baseline.py | 1 | 3368 | from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import math
import operator
import time
import operator
def readratings(f):
ratings = {}
index=0
somme=0
for row in f:
line = row.split("\t")
userid, movieid, rating = int(line[0]), int(line[1]), int(line[2])
ratings.setdefault(userid, {})
ratings[userid][movieid] = rating
somme=somme+rating
index=index+1
return ratings,somme/index
def transpose(util):
transposed = {}
for id1 in util:
for id2 in util[id1]:
transposed.setdefault(id2, {})
transposed[id2][id1] = util[id1][id2]
return transposed
def normalize(util):
avgs = {}
for id1 in util:
avg = 0.0
for id2 in util[id1]:
avg += util[id1][id2]
avg = float(avg)/len(util[id1])
for id2 in util[id1]:
util[id1][id2] -= avg
avgs[id1] = avg
return avgs
def bais_item(movies,mean,lamda=5):
bais_items={}
for item in movies:
somme=0
index=0
for user in movies[item]:
somme=somme+(movies[item][user]-mean)
index=index+1
bais_items[item]=somme/(index+lamda)
return bais_items
def bais_user(users,mean,bais_items,lamda=5):
bais_users={}
for user in users:
somme=0
index=0
for movie in users[user]:
somme=somme+(users[user][movie]-mean-bais_items[movie])
index=index+1
bais_users[user]=somme/(index+lamda)
return bais_users
def dcg_at_k(r, k, method=0):
r = np.asfarray(r)[:k]
if r.size:
if method == 0:
return r[0] + np.sum(r[1:] / np.log2(np.arange(2, r.size + 1)))
elif method == 1:
return np.sum(r / np.log2(np.arange(2, r.size + 2)))
else:
raise ValueError('method must be 0 or 1.')
return 0
def ndcg_at_k(r, k, method=0):
dcg_max = dcg_at_k(sorted(r, reverse=True), k, method)
if not dcg_max:
return 0.
return dcg_at_k(r, k, method) / dcg_max
def histogram_plot(users):
list=[]
for u in users:
for j in users[u]:
list.append(users[u][j])
x=np.array(list)
print type(x)
print x
n = 4
bins=np.arange(1,7,1)
fig, ax = plt.subplots(1,1)
ax.hist(x, bins=bins, align='left')
ax.set_xticks(bins[:-1])
plt.xlabel("Rating value")
plt.ylabel("Frequency")
plt.show()
if __name__ == "__main__":
init = time.time()
# read in training data set
f1 = open("ua.base")
users,s = readratings(f1)
f1.close()
histogram_plot(users)
# read in test data set
f2 = open("ua.test")
rated,a = readratings(f2)
# normalize user ratings
movies = transpose(users)
b_items=bais_item(movies,s,lamda=5)
b_users=bais_user(users,s,b_items,lamda=5)
total = 0
totalrmse = 0.0
totalndcg=0
for userid in rated:
list=[]
for movieid in rated[userid]:
if movieid in movies:
list.append((rated[userid][movieid],-(s+b_items[movieid]+b_users[userid])))
totalrmse += (s+b_items[movieid]+b_users[userid]-rated[userid][movieid])**2
total += 1
list.sort(key=operator.itemgetter(1))
totalndcg=totalndcg+ndcg_at_k([list[i][0] for i in xrange(len(list))],len(list))
print "RMSE= ", math.sqrt(totalrmse/total)
print "NDCG=", totalndcg/len(rated)
print "elapsed time=",time.time()-init
| gpl-3.0 | 3,464,100,723,694,914,600 | 23.23741 | 85 | 0.597981 | false | 2.772016 | false | false | false |
codysmithd/PyGPS | NMEA.py | 1 | 4501 | '''
NMEA.py
Defines NMEA sentence and other useful classes
'''
class Point:
'''
Point: Simple coordinate point
Attributes:
lat: Latutude (decimal)
lng: Longitude (decimal)
alt: Altitude (meters)
'''
def __init__(self, lat=0, lng=0, alt=0):
self.lat = lat
self.lng = lng
self.alt = alt
def __str__(self):
return '{0}, {1}, {2} meters'.format(self.lat, self.lng, self.alt)
def getDistance(self, toPoint):
'''
Gets the distance (in arbitrary units) to another point
'''
return math.sqrt(math.pow((self.lat - toPoint.lat),2) + math.pow((self.lng - toPoint.lng),2))
class GGA :
'''
NMEA GGA: fix data
Attributes:
time: String with UTC time
lat: Latitude (decimal value)
lng: Longitude (decimal value)
fix_quality:
0 = Error (no fix)
1 = GPS fix (SPS)
2 = DGPS fix
3 = PPS fix
4 = Real Time Kinematic
5 = Float RTK
6 = estimated (dead reckoning) (2.3 feature)
7 = Manual input mode
8 = Simulation mode
num_sats: number of satellites being tracked
hdp: Horizontal dilution of position
alt: Altitude, Meters, above mean sea level
geoid_height: Height of geoid (mean sea level) above WGS84 ellipsoid
checkum: message checksum
valid: is this a valid or invalid message (based on complete data and checksum)
'''
def __init__(self, inputString=''):
s = inputString.split(',')
if not len(s) == 15 or not s[0] == '$GPGGA':
raise ValueError('Invalid input string for NMEA GGA object, given string was: ' + inputString)
else:
try:
self.time = s[1]
self.lat = float(s[2][:2]) + float(s[2][2:])/60
if(s[3] == 'S'):
self.lat = -1 * self.lat
self.lng = float(s[4][:3]) + float(s[4][3:])/60
if(s[5] == 'W'):
self.lng = -1 * self.lng
self.fix_quality = s[6]
self.num_sats = int(s[7])
self.hdp = float(s[8])
self.alt = float(s[9])
self.geoid_height = float(s[11])
self.checksum = s[14]
self.valid = _validateChecksum(inputString, self.checksum)
except ValueError:
if not len(self.time):
self.time = ''
if not hasattr(self, 'lat') or not self.lat:
self.lat = 0.0
if not hasattr(self, 'lng') or not self.lng:
self.lng = 0.0
if not hasattr(self, 'fix_quality') or not self.fix_quality:
self.fix_quality = 0
if not hasattr(self, 'num_sats') or not self.num_sats:
self.num_sats = 0
if not hasattr(self, 'hdp') or not self.hdp:
self.hdp = 0.0
if not hasattr(self, 'alt') or not self.alt:
self.alt = 0.0
if not hasattr(self, 'geoid_height') or not self.geoid_height:
self.geoid_height = 0.0
if not hasattr(self, 'checksum') or not self.checksum:
self.checksum = ''
self.valid = False
def getPoint(self):
'''
Returns a Point version of itself
'''
return Point(self.lat, self.lng, self.alt)
def getNMEA(line):
'''
Given a line of text, tries to make a NMEA object from it, or returns None.
Args:
line: NMEA sentence
Returns:
NMEA object if valid line (eg. GGA), None if not valid
'''
if not line:
return None
else:
s = line.split(',')
if len(s) == 15 and s[0] == '$GPGGA':
return GGA(line)
else:
return None
def _validateChecksum(line):
'''
Given a NMEA sentence line, validates the checksum
Args:
line: NMEA sentence
Returns:
True if valid, False otherwise
'''
try:
if line.index('$') == 0 and '*' in line:
check_against = line[1:line.index('*')]
checksum = int(line[line.index('*')+1:], 16)
result = 0
for char in check_against:
result = result ^ ord(char)
return checksum == result
except ValueError:
return False
| apache-2.0 | 5,130,355,820,602,922,000 | 31.615942 | 106 | 0.504999 | false | 3.791912 | false | false | false |
Panda3D-google-code-repositories/panda3d-beast | rttcache/beast/Color.py | 2 | 5605 | #!/usr/bin/python
#encoding: utf-8
'''
This file is part of the Panda3D user interface library, Beast.
See included "License.txt"
'''
class Color(object):
'''
Converts a full color eg, 255 color, (255, 255, 255) OR (255, 255, 255, 255) to a float color (1.0, 1.0, 1.0, 1.0)
Also accepts a float color, in which case it simply returns the float color after validation.
Good for ensuring a full 255 or float color is indeed a float color.
'''
@staticmethod
def fullToFloat(floatOrFull):
assert type(floatOrFull) == list or type(floatOrFull) == tuple
assert len(floatOrFull) == 3 or len(floatOrFull) == 4
# Float color must stay within 0-1, and (1, 1, 1) could be 255 full color!
# So within range 0-1, with floats is float color
# And within range 1 with int is full 255 color
isFloatColor = True
for c in floatOrFull:
if c > 1.0:
isFloatColor = False
break
elif (c == 1) and (type(c) == int or type(c) == long):
isFloatColor = False
break
if isFloatColor:
if len(floatOrFull) == 3:
floatOrFull = (floatOrFull[0], floatOrFull[1], floatOrFull[2], 1.0)
return floatOrFull
r = floatOrFull[0] / 255.0
g = floatOrFull[1] / 255.0
b = floatOrFull[2] / 255.0
if len(floatOrFull) == 4:
a = floatOrFull[3] / 255.0
else:
a = 1.0
return (r, g, b, a)
'''
Converts a hex color eg, #FFFFFF OR #FFFFFFFF, to a float color (1.0, 1.0, 1.0, 1.0)
'''
@staticmethod
def hexToFloat(hexColor):
assert type(hexColor) == str or type(hexColor) == unicode
if hexColor.startswith('#'):
hexColor = hexColor[1:]
assert len(hexColor) == 6 or len(hexColor) == 8, 'Hex color must be either #RRGGBB or #RRGGBBAA format!'
r, g, b = int(hexColor[:2], 16), int(hexColor[2:4], 16), int(hexColor[4:6], 16)
if len(hexColor) == 8:
a = int(hexColor[6:8], 16)
else:
a = 255
return Color.fullToFloat((r, g, b, a))
'''
Converts a float color eg, (1.0, 1.0, 1.0) OR (1.0, 1.0, 1.0, 1.0) to a full color, (255, 255, 255, 255)
'''
@staticmethod
def floatToFull(floatColor):
assert type(floatColor) == list or type(floatColor) == tuple
assert len(floatColor) == 3 or len(floatColor) == 4
r = int(round(floatColor[0] * 255.0, 0))
g = int(round(floatColor[1] * 255.0, 0))
b = int(round(floatColor[2] * 255.0, 0))
if len(floatColor) == 4:
a = int(round(floatColor[3] * 255.0, 0))
else:
a = 255
return (r, g, b, a)
'''
Converts a float color eg, (1.0, 1.0, 1.0) OR (1.0, 1.0, 1.0, 1.0) to a hex color, #FFFFFFFF
'''
@staticmethod
def floatToHex(floatColor, withPound = True):
fullColor = Color.floatToFull(floatColor)
assert type(fullColor) == list or type(fullColor) == tuple
assert len(fullColor) == 3 or len(fullColor) == 4
if len(fullColor) == 3:
hexColor = '%02x%02x%02x' % fullColor
elif len(fullColor) == 4:
hexColor = '%02x%02x%02x%02x' % fullColor
if len(hexColor) == 6:
hexColor = hexColor + 'FF'
hexColor = hexColor.upper()
if withPound:
return '#' + hexColor
else:
return hexColor
'''
Color storage class, takes a single color, in any compatible format, and can convert it to other formats
(1.0, 1.0, 1.0), or (1.0, 1.0, 1.0, 1.0)
(255, 255, 255), or (255, 255, 255, 255)
#RRGGBB, or #RRGGBBAA (with or without pound sign)
'''
def __init__(self, color = None):
self.__color = (0.0, 0.0, 0.0, 0.0)
if color:
self.setColor(color)
'''
Change the color stored by this instance to a different one, this is the same as the constructor optional argument
'''
def setColor(self, color):
if type(color) == str or type(color) == unicode:
self.__color = self.hexToFloat(color)
elif type(color) == tuple or type(color) == list:
self.__color = self.fullToFloat(color)
else:
raise AssertionError('Invalid color format, should be either string, unicode, tuple, or list')
'''
Convert the stored color into a tuple of floats, ranging from 0-1, eg (0.5, 0.5, 0.5, 0.5)
'''
def getAsFloat(self):
return tuple(self.__color)
'''
Convert the stored color into a full 255 color, ranging from 0-255, eg (128, 128, 128, 128)
'''
def getAsFull(self):
return self.floatToFull(self.__color)
'''
Convert the stored color into a hex color, optionally starting with a pound # sign, eg #80808080
Note: Third argument is Alpha/Transparency, which may just be FF. For "fully solid"
'''
def getAsHex(self, withPound = True):
return self.floatToHex(self.__color, withPound)
if __name__ == '__main__':
def log(col, c):
c.setColor(col)
print '-> %s' % (col,)
print '-> float -> %s' % (c.getAsFloat(),)
print '-> full -> %s' % (c.getAsFull(),)
print '-> hex -> %s' % (c.getAsHex(),)
print
c = Color()
log((0.5, 0.5, 0.5), c)
log((0.5, 0.5, 0.5, 0.5), c)
log((128, 128, 128), c)
log((128, 128, 128, 128), c)
log('#808080', c)
log('#80808080', c)
| bsd-2-clause | 6,329,967,598,123,207,000 | 34.251572 | 122 | 0.548439 | false | 3.322466 | false | false | false |
kwoodhouse93/astro-bomber | source/weapon.py | 1 | 4940 | import pygame
import pymunk
from pymunk.vec2d import Vec2d
from source import game
from source.constants import *
from source.utilities import *
class Projectile:
def __init__(self, position, velocity, impulse):
self.radius = BULLET_RADIUS
mass = BULLET_MASS
moment = pymunk.moment_for_circle(mass, 0, self.radius)
self.body = pymunk.Body(mass, moment)
self.body.position = position
self.shape = pymunk.Circle(self.body, self.radius)
self.shape.collision_type = CT_BULLET
game.space.add(self.body, self.shape)
self.strength = BULLET_STRENGTH
self.body.velocity = velocity
self.body.apply_impulse_at_world_point(impulse)
game.object_manager.register(self)
def update(self):
Utils.remove_if_outside_game_area(self.body, self, self.radius)
def hit(self, damage):
self.strength -= damage
if self.strength < 0:
game.object_manager.unregister(self)
def delete(self):
# print('Bullet removed')
game.space.remove(self.body, self.shape)
def draw(self):
draw_tuple = Utils.vec2d_to_draw_tuple(self.body.position)
pygame.draw.circle(game.screen, (255, 255, 255), draw_tuple, self.radius)
class Bomb:
def __init__(self, position, velocity, impulse):
self.radius = BOMB_RADIUS
mass = BOMB_MASS
moment = pymunk.moment_for_circle(mass, 0, self.radius)
self.body = pymunk.Body(mass, moment)
self.body.position = position
self.shape = pymunk.Circle(self.body, self.radius)
self.shape.collision_type = CT_BOMB
game.space.add(self.body, self.shape)
self.strength = BOMB_STRENGTH
self.body.velocity = velocity
self.body.apply_impulse_at_world_point(impulse)
game.object_manager.register(self)
self.birth = pygame.time.get_ticks()
self.lifetime = BOMB_TIMER
self.exploded = False
def explode(self):
print('BANG!')
# BOMB_BLAST_RADIUS
# # Create blast sensor shape
self.blast_shape = pymunk.Circle(self.body, BOMB_BLAST_RADIUS)
self.blast_shape.sensor = True
self.blast_shape.collision_type = CT_BLAST
game.space.add(self.blast_shape)
# game.object_manager.unregister(self)
self.exploded = True
def update(self):
if self.exploded:
game.object_manager.unregister(self)
age = pygame.time.get_ticks() - self.birth
if age > self.lifetime and not self.exploded:
self.explode()
Utils.remove_if_outside_game_area(self.body, self, BOMB_BLAST_RADIUS)
def hit(self, damage):
self.strength -= damage
if self.strength < 0:
self.explode()
game.object_manager.unregister(self)
def delete(self):
print('Bomb removed')
if hasattr(self, 'blast_shape'):
game.space.remove(self.blast_shape)
game.space.remove(self.body, self.shape)
def draw(self):
draw_tuple = Utils.vec2d_to_draw_tuple(self.body.position)
pygame.draw.circle(game.screen, (0, 255, 0), draw_tuple, self.radius)
class PrimaryCannon:
def __init__(self, parent):
self.parent = parent
game.object_manager.register(self)
self.cannon_power = CANNON_POWER
def activate(self):
position = self.pos
local_impulse = Vec2d(0, CANNON_POWER)
parent_angle = self.parent.body.angle
impulse = local_impulse.rotated(parent_angle)
velocity = self.parent.body.velocity
Projectile(position, velocity, impulse)
def update(self):
parent_pos = self.parent.body.position
parent_angle = self.parent.body.angle
local_offset = Vec2d(0, BOMBER_HEIGHT/2)
self.pos = parent_pos + (local_offset.rotated(parent_angle))
self.draw_pos = Utils.vec2d_to_draw_tuple(self.pos)
def delete(self):
pass
def draw(self):
pygame.draw.circle(game.screen, (255, 0, 0), self.draw_pos, 1)
class SecondaryBombLauncher:
def __init__(self, parent):
self.parent = parent
game.object_manager.register(self)
def activate(self):
position = self.pos
local_impulse = Vec2d(0, -BOMB_LAUNCHER_POWER)
parent_angle = self.parent.body.angle
impulse = local_impulse.rotated(parent_angle)
velocity = self.parent.body.velocity
Bomb(position, velocity, impulse)
def update(self):
parent_pos = self.parent.body.position
parent_angle = self.parent.body.angle
local_offset = Vec2d(0, -BOMBER_HEIGHT/2)
self.pos = parent_pos + (local_offset.rotated(parent_angle))
self.draw_pos = Utils.vec2d_to_draw_tuple(self.pos)
def delete(self):
pass
def draw(self):
pygame.draw.circle(game.screen, (0, 0, 255), self.draw_pos, 3)
| mit | 8,768,513,573,241,920,000 | 31.077922 | 81 | 0.62915 | false | 3.416321 | false | false | false |
ksmaheshkumar/gitfs | tests/views/test_read_only.py | 3 | 1735 | # Copyright 2014 PressLabs SRL
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
from fuse import FuseOSError
from gitfs.views.read_only import ReadOnlyView
class TestReadOnly(object):
def test_cant_write(self):
view = ReadOnlyView()
for method in ["write", "create", "utimens",
"chmod", "mkdir"]:
with pytest.raises(FuseOSError):
getattr(view, method)("path", 1)
with pytest.raises(FuseOSError):
view.getxattr("path", "name", 1)
with pytest.raises(FuseOSError):
view.chown("path", 1, 1)
def test_always_return_0(self):
view = ReadOnlyView()
for method in ["flush", "releasedir", "release"]:
assert getattr(view, method)("path", 1) == 0
assert view.opendir("path") == 0
def test_open(self):
view = ReadOnlyView()
with pytest.raises(FuseOSError):
view.open("path", os.O_WRONLY)
assert view.open("path", os.O_RDONLY) == 0
def test_access(self):
view = ReadOnlyView()
with pytest.raises(FuseOSError):
view.access("path", os.W_OK)
assert view.access("path", os.R_OK) == 0
| apache-2.0 | 7,316,076,637,565,921,000 | 27.916667 | 74 | 0.633429 | false | 3.821586 | true | false | false |
klpdotorg/disereports | handlers/Demographics.py | 1 | 5855 | import web
import psycopg2
import traceback
import sys, os,traceback
from operator import itemgetter
import db.KLPDB
import db.Queries_dise
from utils.CommonUtil import CommonUtil
#connection = db.KLPDB.getConnection()
#cursor = connection.cursor()
cursor = db.KLPDB.getWebDbConnection1()
class Demographics:
def generateData(self,cons_type, constid):
data = {}
constype = "mp"
if cons_type == 1:
data["const_type"]='MP'
constype = "mp"
elif cons_type == 2:
data["const_type"]='MLA'
constype = "mla"
elif cons_type == 3:
data["const_type"]='Corporator'
constype = "corporator"
elif cons_type == 4:
data["const_type"]='District'
constype = "district"
elif cons_type == 5:
data["const_type"]='Block'
constype = "block"
elif cons_type == 6:
data["const_type"]='Cluster'
constype = "cluster"
data["const_name"]=str(constid[0])
queries = ['gend_sch']
#,'gend_presch']
data.update(self.genderGraphs(constype,constid,queries))
#queries = ['mt_sch']
#,'mt_presch']
#data.update(self.mtGraphs(constype,constid,queries))
queries = ['moi_sch','cat_sch','enrol_sch']
#,'enrol_presch']
data.update(self.pieGraphs(constype,constid,queries))
data.update(self.constituencyData(constype,constid))
return data
def genderGraphs(self,constype,constid,qkeys):
data = {}
for querykey in qkeys:
result = cursor.query(db.Queries_dise.getDictionary(constype)[constype + '_' + querykey],{'s':constid})
chartdata ={}
for row in result:
chartdata[str(row.sex.strip())] = int(row.sum)
if len(chartdata.keys()) > 0:
total = chartdata['Boy']+chartdata['Girl']
percBoys = round(float(chartdata['Boy'])/total*100,0)
percGirls = round(float(chartdata['Girl'])/total*100,0)
data[querykey+"_tb"]=chartdata
else:
data[querykey+"_hasdata"] = 0
return data
def mtGraphs(self,constype,constid,qkeys):
data = {}
for querykey in qkeys:
result = cursor.query(db.Queries_dise.getDictionary(constype)[constype + '_' + querykey],{'s':constid})
tabledata = {}
invertdata = {}
order_lst = []
for row in result:
invertdata[int(row.sum)] = str(row.mt.strip().title())
if len(invertdata.keys()) > 0:
checklist = sorted(invertdata)
others = 0
for i in checklist[0:len(checklist)-4]:
others = others + i
del invertdata[i]
invertdata[others] = 'Others'
tabledata = dict(zip(invertdata.values(),invertdata.keys()))
if 'Other' in tabledata.keys():
tabledata['Others'] = tabledata['Others'] + tabledata['Other']
del tabledata['Other']
for i in sorted(tabledata,key=tabledata.get,reverse=True):
order_lst.append(i)
if len(tabledata.keys()) > 0:
data[querykey + "_tb"] = tabledata
data[querykey + "_ord_lst"] = order_lst
else:
data[querykey + "_hasdata"] = 0
return data
def pieGraphs(self,constype,constid,qkeys):
data = {}
for querykey in qkeys:
result = cursor.query(db.Queries_dise.getDictionary(constype)[constype + '_' + querykey],{'s':constid})
tabledata = {}
for row in result:
tabledata[str(row.a1.strip().title())] = str(int(row.a2))
sorted_x = sorted(tabledata.items(), key=itemgetter(1))
tabledata = dict(sorted_x)
if len(tabledata.keys()) > 0:
data[querykey + "_tb"] = tabledata
else:
data[querykey + "_hasdata"] = 0
return data
def constituencyData(self,constype,constid):
data = {}
util = CommonUtil()
ret_data = util.constituencyData(constype,constid)
data.update(ret_data[0])
neighbors = self.neighboursData(ret_data[1],ret_data[2])
if neighbors:
data.update(neighbors)
return data
def neighboursData(self, neighbours, constype):
data = {}
constype_str = constype
try:
if len(neighbours) > 0:
neighbours_sch = {}
neighbours_presch = {}
result = cursor.query(db.Queries_dise.getDictionary(constype)[constype_str + '_neighbour_sch'], {'s':tuple(neighbours)})
for row in result:
neighbours_sch[row.const_ward_name.strip()]={'schcount':str(row.count)}
#result = cursor.query(db.Queries_dise.getDictionary(constype)[constype_str + '_neighbour_presch'], {'s':tuple(neighbours)})
#for row in result:
#neighbours_presch[row.const_ward_name.strip()] = 0 #{'preschcount':str(row.count)}
result = cursor.query(db.Queries_dise.getDictionary(constype)[constype_str + '_neighbour_gendsch'],{'s':tuple(neighbours)})
for row in result:
neighbours_sch[row.const_ward_name.strip()][row.sex.strip()] = str(row.sum)
#result = cursor.query(db.Queries_dise.getDictionary(constype)[constype_str + '_neighbour_gendpresch'],{'s':tuple(neighbours)})
#for row in result:
#neighbours_presch[row.const_ward_name.strip()][row.sex.strip()] = str(row.sum)
#neighbours_presch[row.const_ward_name.strip()]['Boys'] = 0 #str(row.sum)
#neighbours_presch[row.const_ward_name.strip()]['Girls'] = 0 #str(row.sum)
if len(neighbours_sch.keys()) > 0:
data["neighbours_sch"] = neighbours_sch
else:
data["neighbours_sch_hasdata"] = 0
if len(neighbours_presch.keys()) > 0:
data["neighbours_presch"] = neighbours_presch
else:
data["neighbours_presch_hasdata"] = 0
else:
data["neighbours_sch_hasdata"] = 0
data["neighbours_presch_hasdata"] = 0
return data
except:
print "Unexpected error:", sys.exc_info()
traceback.print_exc(file=sys.stdout)
return None
| mit | -8,367,429,558,039,493,000 | 34.484848 | 135 | 0.614518 | false | 3.236595 | false | false | false |
tBaxter/tango-contact-manager | setup.py | 1 | 1109 | # -*- coding: utf-8 -*-
from distutils.core import setup
from setuptools import find_packages
with open('docs/requirements.txt') as f:
required = f.read().splitlines()
setup(
name='tango-contact-manager',
version='0.10.0',
author=u'Tim Baxter',
author_email='[email protected]',
url='https://github.com/tBaxter/tango-contact-manager',
license='LICENSE',
description="""Provides contact forms and any other user submission form you might want.
Create user submission forms on the fly, straight from the Django admin.
""",
long_description=open('README.md').read(),
zip_safe=False,
packages=find_packages(),
include_package_data=True,
install_requires=required,
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Web Environment",
"Framework :: Django",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
],
)
| mit | 5,236,171,838,286,252,000 | 32.606061 | 92 | 0.639315 | false | 4.092251 | false | true | false |
mesosphere/dcos-cli | cli/tests/integrations/helpers/job.py | 1 | 3171 | import contextlib
import json
from .common import assert_command, exec_command
from .marathon import watch_deployment
def remove_job(job_id):
""" Remove a job
:param job_id: id of job to remove
:type job_id: str
:rtype: None
"""
assert_command(['dcos', 'job', 'remove',
'--stop-current-job-runs', job_id])
def show_job(app_id):
"""Show details of a Metronome job.
:param app_id: The id for the application
:type app_id: str
:returns: The requested Metronome job.
:rtype: dict
"""
cmd = ['dcos', 'job', 'show', app_id]
returncode, stdout, stderr = exec_command(cmd)
assert returncode == 0
assert stderr == b''
result = json.loads(stdout.decode('utf-8'))
assert isinstance(result, dict)
assert result['id'] == app_id
return result
def show_job_schedule(app_id, schedule_id):
"""Show details of a Metronome schedule.
:param app_id: The id for the job
:type app_id: str
:param schedule_id: The id for the schedule
:type schedule_id: str
:returns: The requested Metronome job.
:rtype: dict
"""
cmd = ['dcos', 'job', 'schedule', 'show', app_id, '--json']
returncode, stdout, stderr = exec_command(cmd)
assert returncode == 0
assert stderr == b''
result = json.loads(stdout.decode('utf-8'))
assert isinstance(result[0], dict)
assert result[0]['id'] == schedule_id
return result[0]
@contextlib.contextmanager
def job(path, job_id):
"""Context manager that deploys a job on entrance, and removes it on
exit.
:param path: path to job's json definition:
:type path: str
:param job_id: job id
:type job_id: str
:param wait: whether to wait for the deploy
:type wait: bool
:rtype: None
"""
add_job(path)
try:
yield
finally:
remove_job(job_id)
def watch_job_deployments(count=300):
"""Wait for all deployments to complete.
:param count: max number of seconds to wait
:type count: int
:rtype: None
"""
deps = list_job_deployments()
for dep in deps:
watch_deployment(dep['id'], count)
def add_job(job_path):
""" Add a job, and wait for it to deploy
:param job_path: path to job's json definition
:type job_path: str
:param wait: whether to wait for the deploy
:type wait: bool
:rtype: None
"""
assert_command(['dcos', 'job', 'add', job_path])
def list_job_deployments(expected_count=None, app_id=None):
"""Get all active deployments.
:param expected_count: assert that number of active deployments
equals `expected_count`
:type expected_count: int
:param app_id: only get deployments for this app
:type app_id: str
:returns: active deployments
:rtype: [dict]
"""
cmd = ['dcos', 'job', 'list', '--json']
if app_id is not None:
cmd.append(app_id)
returncode, stdout, stderr = exec_command(cmd)
result = json.loads(stdout.decode('utf-8'))
assert returncode == 0
if expected_count is not None:
assert len(result) == expected_count
assert stderr == b''
return result
| apache-2.0 | -8,802,720,673,048,894,000 | 21.65 | 72 | 0.62157 | false | 3.583051 | false | false | false |
Royal-Society-of-New-Zealand/NZ-ORCID-Hub | tests/test_forms.py | 1 | 7889 | # -*- coding: utf-8 -*-
"""Tests for forms and WTForms extensions."""
# noqa: D103
import itertools
from unittest.mock import MagicMock
import pytest
from wtforms import Form, StringField
from orcid_hub.forms import validate_orcid_id_field # noqa: E128
from orcid_hub.forms import BitmapMultipleValueField, CountrySelectField, PartialDate, PartialDateField
from orcid_hub.models import PartialDate as PartialDateDbField
def test_partial_date_widget(): # noqa
assert '<option selected value="1995">1995</option>' in PartialDate()(
MagicMock(data=PartialDateDbField(1995)))
field = MagicMock(label="LABEL", id="ID", data=PartialDateDbField(2017, 5, 13))
field.name = "NAME"
pd = PartialDate()(field)
assert '<option selected value="2017">2017</option>' in pd
assert '<option selected value="5">05</option>' in pd
assert '<option selected value="13">13</option><option value="14">14</option>' in pd
assert '"NAME:year"' in pd
assert '"NAME:month"' in pd
assert '"NAME:day"' in pd
@pytest.fixture
def test_form(): # noqa
class F(Form):
pdf1 = PartialDateField("f1", default=PartialDateDbField(1995), id="test-id-1")
pdf2 = PartialDateField("f2", default=PartialDateDbField(2017, 5, 13), id="test-id-2")
pdf3 = PartialDateField("f3")
csf1 = CountrySelectField()
csf2 = CountrySelectField(label="Select Country")
bmvf1 = BitmapMultipleValueField(choices=[
(
1,
"one",
),
(
2,
"two",
),
(
4,
"four",
),
])
bmvf2 = BitmapMultipleValueField(
choices=[
(
1,
"one",
),
(
2,
"two",
),
(
4,
"four",
),
], )
bmvf2.is_bitmap_value = False
return F
def test_partial_date_field_defaults(test_form): # noqa
tf = test_form()
assert tf.pdf1.data == PartialDateDbField(1995)
assert tf.pdf2.data == PartialDateDbField(2017, 5, 13)
assert tf.pdf1.label.text == "f1"
class DummyPostData(dict): # noqa
def __init__(self, data): # noqa
super().__init__()
self.update(data)
def getlist(self, key): # noqa
v = self[key]
if not isinstance(v, (list, tuple)):
v = [v]
return v
def test_partial_date_field_with_data(test_form): # noqa
tf = test_form(DummyPostData({"pdf1:year": "2000", "pdf1:month": "1", "pdf1:day": "31"}))
pdf1 = tf.pdf1()
assert '<option selected value="31">' in pdf1
assert '<option value="2001">2001</option><option selected value="2000">2000</option>' in pdf1
assert '<option value="">Month</option><option selected value="1">01</option><option value="2">' in pdf1
def test_partial_date_field_errors(test_form): # noqa
tf = test_form(
DummyPostData({
"pdf1:year": "ERROR",
"pdf1:month": "ERROR",
"pdf1:day": "ERROR"
}))
tf.validate()
assert len(tf.pdf1.process_errors) > 0
tf = test_form(DummyPostData({"pdf1:year": "2001", "pdf1:month": "", "pdf1:day": "31"}))
tf.validate()
assert len(tf.pdf1.errors) > 0
tf = test_form(DummyPostData({"pdf1:year": "", "pdf1:month": "12", "pdf1:day": "31"}))
tf.validate()
assert len(tf.pdf1.errors) > 0
tf = test_form(DummyPostData({"pdf1:year": "2001", "pdf1:month": "13", "pdf1:day": ""}))
tf.validate()
assert len(tf.pdf1.errors) > 0
tf = test_form(DummyPostData({"pdf1:year": "2001", "pdf1:month": "-1", "pdf1:day": ""}))
tf.validate()
assert len(tf.pdf1.errors) > 0
tf = test_form(DummyPostData({"pdf1:year": "1995", "pdf1:month": "2", "pdf1:day": "29"}))
tf.validate()
assert len(tf.pdf1.errors) > 0
tf = test_form(DummyPostData({"pdf1:year": "1996", "pdf1:month": "2", "pdf1:day": "29"}))
tf.validate()
assert not tf.pdf1.errors
tf = test_form(DummyPostData({"pdf1:year": "1994", "pdf1:month": "2", "pdf1:day": "30"}))
tf.validate()
assert len(tf.pdf1.errors) > 0
tf = test_form(DummyPostData({"pdf1:year": "1994", "pdf1:month": "4", "pdf1:day": "31"}))
tf.validate()
assert len(tf.pdf1.errors) > 0
for m in itertools.chain(range(9, 13, 2), range(2, 8, 2)):
tf = test_form(DummyPostData({"pdf1:year": "1994", "pdf1:month": f"{m}", "pdf1:day": "31"}))
tf.validate()
assert len(tf.pdf1.errors) > 0
def test_partial_date_field_with_filter(test_form): # noqa
test_form.pdf = PartialDateField(
"f", filters=[lambda pd: PartialDateDbField(pd.year + 1, pd.month + 1, pd.day + 1)])
tf = test_form(DummyPostData({"pdf:year": "2012", "pdf:month": "4", "pdf:day": "12"}))
pdf = tf.pdf()
assert '<option selected value="13">' in pdf
assert '<option selected value="2013">' in pdf
assert '<option selected value="5">' in pdf
assert len(tf.pdf1.process_errors) == 0
def failing_filter(*args, **kwargs):
raise ValueError("ERROR!!!")
test_form.pdf = PartialDateField("f", filters=[failing_filter])
tf = test_form(DummyPostData({"pdf:year": "2012", "pdf:month": "4", "pdf:day": "12"}))
assert len(tf.pdf.process_errors) > 0
assert "ERROR!!!" in tf.pdf.process_errors
def test_partial_date_field_with_obj(test_form): # noqa
tf = test_form(None, obj=MagicMock(pdf1=PartialDateDbField(2017, 1, 13)))
pdf1 = tf.pdf1()
assert '<option selected value="13">' in pdf1
assert '<option selected value="2017">2017</option>' in pdf1
assert '<option value="">Month</option><option selected value="1">01</option><option value="2">' in pdf1
tf = test_form(None, obj=MagicMock(pdf3=PartialDateDbField(2017)))
pdf3 = tf.pdf3()
assert '<option selected value="">' in pdf3
assert '<option selected value="2017">2017</option>' in pdf3
assert '<option selected value="">Month</option><option value="1">01</option><option value="2">' in pdf3
def test_partial_date_field_with_data_and_obj(test_form): # noqa
tf = test_form(
DummyPostData({
"pdf1:year": "2000"
}), MagicMock(pdf1=PartialDateDbField(2017, 1, 13)))
pdf1 = tf.pdf1()
assert '<option selected value="13">' in pdf1
assert '<option value="2001">2001</option><option selected value="2000">2000</option>' in pdf1
assert '<option value="">Month</option><option selected value="1">01</option><option value="2">' in pdf1
def test_orcid_validation(test_form): # noqa
orcid_id = StringField("ORCID iD", [
validate_orcid_id_field,
])
orcid_id.data = "0000-0001-8228-7153"
validate_orcid_id_field(test_form, orcid_id)
orcid_id.data = "INVALID FORMAT"
with pytest.raises(ValueError) as excinfo:
validate_orcid_id_field(test_form, orcid_id)
assert f"Invalid ORCID iD {orcid_id.data}. It should be in the form of 'xxxx-xxxx-xxxx-xxxx' where x is a digit." \
in str(excinfo.value)
orcid_id.data = "0000-0001-8228-7154"
with pytest.raises(ValueError) as excinfo:
validate_orcid_id_field(test_form, orcid_id)
assert f"Invalid ORCID iD {orcid_id.data} checksum. Make sure you have entered correct ORCID iD." in str(
excinfo.value)
def test_country_select_field(test_form): # noqa
tf = test_form()
assert tf.csf1.label.text == "Country"
assert tf.csf2.label.text == "Select Country"
def test_bitmap_multiple_value_field(test_form): # noqa
tf = test_form()
tf.bmvf1.data = 3
tf.bmvf2.data = (
1,
4,
)
tf.validate()
tf.bmvf1.process_data(5)
tf.bmvf1.process_data([1, 4])
| mit | -951,219,314,364,749,000 | 31.870833 | 119 | 0.593611 | false | 3.247839 | true | false | false |
walchko/pyxl320 | bin/set_angle.py | 1 | 1405 | #!/usr/bin/env python
##############################################
# The MIT License (MIT)
# Copyright (c) 2016 Kevin Walchko
# see LICENSE for full details
##############################################
from pyxl320 import Packet
# from pyxl320 import DummySerial
from pyxl320 import ServoSerial
import argparse
DESCRIPTION = """
Set the angle of a servo in degrees.
Example: set servo 3 to angle 45
./set_angle /dev/serial0 45 -i 3
"""
def handleArgs():
parser = argparse.ArgumentParser(description=DESCRIPTION, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-i', '--id', help='servo id', type=int, default=1)
parser.add_argument('port', help='serial port or \'dummy\' for testing', type=str)
parser.add_argument('angle', help='servo angle in degrees: 0.0 - 300.0', type=float)
args = vars(parser.parse_args())
return args
def main():
args = handleArgs()
ID = args['id']
port = args['port'] # '/dev/tty.usbserial-A5004Flb'
angle = args['angle']
print('Setting servo[{}] to {:.2f} on port {}'.format(ID, angle, port))
if port.lower() == 'dummy':
serial = ServoSerial(port=port, fake=True)
else:
serial = ServoSerial(port=port)
serial.open()
pkt = Packet.makeServoPacket(ID, angle) # move servo 1 to 158.6 degrees
ans = serial.sendPkt(pkt) # send packet to servo
if ans:
print('status: {}'.format(ans))
if __name__ == '__main__':
main()
| mit | 5,071,201,303,506,127,000 | 24.089286 | 105 | 0.641993 | false | 3.164414 | false | false | false |
prathapsridharan/health_project | health_project/questions/forms.py | 1 | 2744 | """Contains form class defintions for form for the questions app.
Classes: [
SurveyQuestionForm
]
"""
from django import forms
from django.core.urlresolvers import reverse
from crispy_forms.bootstrap import FormActions
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit, Layout
from . import config
from .models import ChoiceSelectionAnswerFormat, ListInputAnswerFormat
class SurveyQuestionForm(forms.Form):
"""Models a form for how a question should be posed to the user."""
answer_input = forms.CharField(
label = "",
required=False,
widget=forms.Textarea()
)
class Meta:
fields = ("answer_input")
def __init__(self, *args, **kwargs):
survey_question = kwargs.pop('survey_question', None)
super().__init__(*args, **kwargs)
# NOTE: It is assumed that all answer_formats belonging to a question
# is of the same type.
# TODO: ENHANCEMENT: Enforce same answer format instances or throw an
# error
answer_formats = survey_question.question.answer_formats.all()
answer_format = answer_formats[0]
# Determine what the type of 'answer_input' form field is and its
# associated widget type.
if isinstance(answer_format, ChoiceSelectionAnswerFormat):
choices = ((a.id, a.choice_name) for a in answer_formats)
label = ""
required = False
if (answer_format.answer_format_type == config.CHOICE_SELECTION_MANY_FORMAT):
self.fields['answer_input'] = forms.MultipleChoiceField(
label=label,
required=required,
choices=choices
)
self.fields['answer_input'].widget = forms.CheckboxSelectMultiple()
else:
self.fields['answer_input'] = forms.ChoiceField(
label=label,
required=required,
choices=choices
)
self.fields['answer_input'].widget = forms.RadioSelect()
elif isinstance(answer_format, ListInputAnswerFormat):
self.fields['answer_input'].help_text = "Please enter each new item on a new line."
self.helper = FormHelper()
self.helper.form_class = 'form-horizontal'
self.helper.form_method = 'post'
self.helper.form_action = reverse("questions:answer-survey-question")
self.helper.layout = Layout(
'answer_input',
)
self.helper.layout.append(
FormActions(
Submit('skip', 'Skip'),
Submit('submit', 'Submit', css_class='btn-primary'),
)
)
| bsd-3-clause | 8,239,022,131,864,577,000 | 32.876543 | 95 | 0.601676 | false | 4.469055 | false | false | false |
jcorrius/go-oo-mingw32-soc | scratch/mso-dumper/src/ole.py | 1 | 22475 | ########################################################################
#
# OpenOffice.org - a multi-platform office productivity suite
#
# Author:
# Kohei Yoshida <[email protected]>
#
# The Contents of this file are made available subject to the terms
# of GNU Lesser General Public License Version 2.1 and any later
# version.
#
########################################################################
import sys
import globals
from globals import getSignedInt
# ----------------------------------------------------------------------------
# Reference: The Microsoft Compound Document File Format by Daniel Rentz
# http://sc.openoffice.org/compdocfileformat.pdf
# ----------------------------------------------------------------------------
from globals import output
class NoRootStorage(Exception): pass
class ByteOrder:
LittleEndian = 0
BigEndian = 1
Unknown = 2
class BlockType:
MSAT = 0
SAT = 1
SSAT = 2
Directory = 3
class StreamLocation:
SAT = 0
SSAT = 1
class Header(object):
@staticmethod
def byteOrder (chars):
b1, b2 = ord(chars[0]), ord(chars[1])
if b1 == 0xFE and b2 == 0xFF:
return ByteOrder.LittleEndian
elif b1 == 0xFF and b2 == 0xFE:
return ByteOrder.BigEndian
else:
return ByteOrder.Unknown
def __init__ (self, bytes, params):
self.bytes = bytes
self.MSAT = None
self.docId = None
self.uId = None
self.revision = 0
self.version = 0
self.byteOrder = ByteOrder.Unknown
self.minStreamSize = 0
self.numSecMSAT = 0
self.numSecSSAT = 0
self.numSecSAT = 0
self.__secIDFirstMSAT = -2
self.__secIDFirstDirStrm = -2
self.__secIDFirstSSAT = -2
self.secSize = 512
self.secSizeShort = 64
self.params = params
def getSectorSize (self):
return 2**self.secSize
def getShortSectorSize (self):
return 2**self.secSizeShort
def getFirstSectorID (self, blockType):
if blockType == BlockType.MSAT:
return self.__secIDFirstMSAT
elif blockType == BlockType.SSAT:
return self.__secIDFirstSSAT
elif blockType == BlockType.Directory:
return self.__secIDFirstDirStrm
return -2
def output (self):
def printRawBytes (bytes):
for b in bytes:
output("%2.2X "%ord(b))
output("\n")
def printSep (c='-', w=68, prefix=''):
print(prefix + c*w)
printSep('=', 68)
print("Compound Document Header")
printSep('-', 68)
if self.params.debug:
globals.dumpBytes(self.bytes[0:512])
printSep('-', 68)
# document ID and unique ID
output("Document ID: ")
printRawBytes(self.docId)
output("Unique ID: ")
printRawBytes(self.uId)
# revision and version
print("Revision: %d Version: %d"%(self.revision, self.version))
# byte order
output("Byte order: ")
if self.byteOrder == ByteOrder.LittleEndian:
print("little endian")
elif self.byteOrder == ByteOrder.BigEndian:
print("big endian")
else:
print("unknown")
# sector size (usually 512 bytes)
print("Sector size: %d (%d)"%(2**self.secSize, self.secSize))
# short sector size (usually 64 bytes)
print("Short sector size: %d (%d)"%(2**self.secSizeShort, self.secSizeShort))
# total number of sectors in SAT (equals the number of sector IDs
# stored in the MSAT).
print("Total number of sectors used in SAT: %d"%self.numSecSAT)
print("Sector ID of the first sector of the directory stream: %d"%
self.__secIDFirstDirStrm)
print("Minimum stream size: %d"%self.minStreamSize)
if self.__secIDFirstSSAT == -2:
print("Sector ID of the first SSAT sector: [none]")
else:
print("Sector ID of the first SSAT sector: %d"%self.__secIDFirstSSAT)
print("Total number of sectors used in SSAT: %d"%self.numSecSSAT)
if self.__secIDFirstMSAT == -2:
# There is no more sector ID stored outside the header.
print("Sector ID of the first MSAT sector: [end of chain]")
else:
# There is more sector IDs than 109 IDs stored in the header.
print("Sector ID of the first MSAT sector: %d"%(self.__secIDFirstMSAT))
print("Total number of sectors used to store additional MSAT: %d"%self.numSecMSAT)
def parse (self):
# document ID and unique ID
self.docId = self.bytes[0:8]
self.uId = self.bytes[8:24]
# revision and version
self.revision = getSignedInt(self.bytes[24:26])
self.version = getSignedInt(self.bytes[26:28])
# byte order
self.byteOrder = Header.byteOrder(self.bytes[28:30])
# sector size (usually 512 bytes)
self.secSize = getSignedInt(self.bytes[30:32])
# short sector size (usually 64 bytes)
self.secSizeShort = getSignedInt(self.bytes[32:34])
# total number of sectors in SAT (equals the number of sector IDs
# stored in the MSAT).
self.numSecSAT = getSignedInt(self.bytes[44:48])
self.__secIDFirstDirStrm = getSignedInt(self.bytes[48:52])
self.minStreamSize = getSignedInt(self.bytes[56:60])
self.__secIDFirstSSAT = getSignedInt(self.bytes[60:64])
self.numSecSSAT = getSignedInt(self.bytes[64:68])
self.__secIDFirstMSAT = getSignedInt(self.bytes[68:72])
self.numSecMSAT = getSignedInt(self.bytes[72:76])
# master sector allocation table
self.MSAT = MSAT(2**self.secSize, self.bytes, self.params)
# First part of MSAT consisting of an array of up to 109 sector IDs.
# Each sector ID is 4 bytes in length.
for i in xrange(0, 109):
pos = 76 + i*4
id = getSignedInt(self.bytes[pos:pos+4])
if id == -1:
break
self.MSAT.appendSectorID(id)
if self.__secIDFirstMSAT != -2:
# additional sectors are used to store more SAT sector IDs.
secID = self.__secIDFirstMSAT
size = self.getSectorSize()
inLoop = True
while inLoop:
pos = 512 + secID*size
bytes = self.bytes[pos:pos+size]
n = int(size/4)
for i in xrange(0, n):
pos = i*4
id = getSignedInt(bytes[pos:pos+4])
if id < 0:
inLoop = False
break
elif i == n-1:
# last sector ID - points to the next MSAT sector.
secID = id
break
else:
self.MSAT.appendSectorID(id)
return 512
def getMSAT (self):
return self.MSAT
def getSAT (self):
return self.MSAT.getSAT()
def getSSAT (self):
ssatID = self.getFirstSectorID(BlockType.SSAT)
if ssatID < 0:
return None
chain = self.getSAT().getSectorIDChain(ssatID)
if len(chain) == 0:
return None
obj = SSAT(2**self.secSize, self.bytes, self.params)
for secID in chain:
obj.addSector(secID)
obj.buildArray()
return obj
def getDirectory (self):
dirID = self.getFirstSectorID(BlockType.Directory)
if dirID < 0:
return None
chain = self.getSAT().getSectorIDChain(dirID)
if len(chain) == 0:
return None
obj = Directory(self, self.params)
for secID in chain:
obj.addSector(secID)
return obj
def dummy ():
pass
class MSAT(object):
"""Master Sector Allocation Table (MSAT)
This class represents the master sector allocation table (MSAT) that stores
sector IDs that point to all the sectors that are used by the sector
allocation table (SAT). The actual SAT are to be constructed by combining
all the sectors pointed by the sector IDs in order of occurrence.
"""
def __init__ (self, sectorSize, bytes, params):
self.sectorSize = sectorSize
self.secIDs = []
self.bytes = bytes
self.__SAT = None
self.params = params
def appendSectorID (self, id):
self.secIDs.append(id)
def output (self):
print('')
print("="*68)
print("Master Sector Allocation Table (MSAT)")
print("-"*68)
for id in self.secIDs:
print("sector ID: %5d (pos: %7d)"%(id, 512+id*self.sectorSize))
def getSATSectorPosList (self):
list = []
for id in self.secIDs:
pos = 512 + id*self.sectorSize
list.append([id, pos])
return list
def getSAT (self):
if self.__SAT != None:
return self.__SAT
obj = SAT(self.sectorSize, self.bytes, self.params)
for id in self.secIDs:
obj.addSector(id)
obj.buildArray()
self.__SAT = obj
return self.__SAT
class SAT(object):
"""Sector Allocation Table (SAT)
"""
def __init__ (self, sectorSize, bytes, params):
self.sectorSize = sectorSize
self.sectorIDs = []
self.bytes = bytes
self.array = []
self.params = params
def getSectorSize (self):
return self.sectorSize
def addSector (self, id):
self.sectorIDs.append(id)
def buildArray (self):
if len(self.array) > 0:
# array already built.
return
numItems = int(self.sectorSize/4)
self.array = []
for secID in self.sectorIDs:
pos = 512 + secID*self.sectorSize
for i in xrange(0, numItems):
beginPos = pos + i*4
id = getSignedInt(self.bytes[beginPos:beginPos+4])
self.array.append(id)
def outputRawBytes (self):
bytes = ""
for secID in self.sectorIDs:
pos = 512 + secID*self.sectorSize
bytes += self.bytes[pos:pos+self.sectorSize]
globals.dumpBytes(bytes, 512)
def outputArrayStats (self):
sectorTotal = len(self.array)
sectorP = 0 # >= 0
sectorM1 = 0 # -1
sectorM2 = 0 # -2
sectorM3 = 0 # -3
sectorM4 = 0 # -4
sectorMElse = 0 # < -4
sectorLiveTotal = 0
for i in xrange(0, len(self.array)):
item = self.array[i]
if item >= 0:
sectorP += 1
elif item == -1:
sectorM1 += 1
elif item == -2:
sectorM2 += 1
elif item == -3:
sectorM3 += 1
elif item == -4:
sectorM4 += 1
elif item < -4:
sectorMElse += 1
else:
sectorLiveTotal += 1
print("total sector count: %4d"%sectorTotal)
print("* live sector count: %4d"%sectorP)
print("* end-of-chain sector count: %4d"%sectorM2) # end-of-chain is also live
print("* free sector count: %4d"%sectorM1)
print("* SAT sector count: %4d"%sectorM3)
print("* MSAT sector count: %4d"%sectorM4)
print("* other sector count: %4d"%sectorMElse)
def output (self):
print('')
print("="*68)
print("Sector Allocation Table (SAT)")
print("-"*68)
if self.params.debug:
self.outputRawBytes()
print("-"*68)
for i in xrange(0, len(self.array)):
print("%5d: %5d"%(i, self.array[i]))
print("-"*68)
self.outputArrayStats()
def getSectorIDChain (self, initID):
if initID < 0:
return []
chain = [initID]
nextID = self.array[initID]
while nextID != -2:
chain.append(nextID)
nextID = self.array[nextID]
return chain
class SSAT(SAT):
"""Short Sector Allocation Table (SSAT)
SSAT contains an array of sector ID chains of all short streams, as oppposed
to SAT which contains an array of sector ID chains of all standard streams.
The sector IDs included in the SSAT point to the short sectors in the short
stream container stream.
The first sector ID of SSAT is in the header, and the IDs of the remaining
sectors are contained in the SAT as a sector ID chain.
"""
def output (self):
print('')
print("="*68)
print("Short Sector Allocation Table (SSAT)")
print("-"*68)
if self.params.debug:
self.outputRawBytes()
print("-"*68)
for i in xrange(0, len(self.array)):
item = self.array[i]
output("%3d : %3d\n"%(i, item))
self.outputArrayStats()
class Directory(object):
"""Directory Entries
This stream contains a list of directory entries that are stored within the
entire file stream.
"""
class Type:
Empty = 0
UserStorage = 1
UserStream = 2
LockBytes = 3
Property = 4
RootStorage = 5
class NodeColor:
Red = 0
Black = 1
Unknown = 99
class Entry:
def __init__ (self):
self.Name = ''
self.CharBufferSize = 0
self.Type = Directory.Type.Empty
self.NodeColor = Directory.NodeColor.Unknown
self.DirIDLeft = -1
self.DirIDRight = -1
self.DirIDRoot = -1
self.UniqueID = None
self.UserFlags = None
self.TimeCreated = None
self.TimeModified = None
self.StreamSectorID = -2
self.StreamSize = 0
self.bytes = []
def __init__ (self, header, params):
self.sectorSize = header.getSectorSize()
self.bytes = header.bytes
self.minStreamSize = header.minStreamSize
self.sectorIDs = []
self.entries = []
self.SAT = header.getSAT()
self.SSAT = header.getSSAT()
self.header = header
self.RootStorage = None
self.RootStorageBytes = ""
self.params = params
def __buildRootStorageBytes (self):
if self.RootStorage == None:
# no root storage exists.
return
firstSecID = self.RootStorage.StreamSectorID
chain = self.header.getSAT().getSectorIDChain(firstSecID)
for secID in chain:
pos = 512 + secID*self.sectorSize
self.RootStorageBytes += self.header.bytes[pos:pos+self.sectorSize]
def __getRawStream (self, entry):
chain = []
if entry.StreamLocation == StreamLocation.SAT:
chain = self.header.getSAT().getSectorIDChain(entry.StreamSectorID)
elif entry.StreamLocation == StreamLocation.SSAT:
chain = self.header.getSSAT().getSectorIDChain(entry.StreamSectorID)
if entry.StreamLocation == StreamLocation.SSAT:
# Get the root storage stream.
if self.RootStorage == None:
raise NoRootStorage
bytes = ""
self.__buildRootStorageBytes()
size = self.header.getShortSectorSize()
for id in chain:
pos = id*size
bytes += self.RootStorageBytes[pos:pos+size]
return bytes
offset = 512
size = self.header.getSectorSize()
bytes = ""
for id in chain:
pos = offset + id*size
bytes += self.header.bytes[pos:pos+size]
return bytes
def getRawStreamByName (self, name):
bytes = []
for entry in self.entries:
if entry.Name == name:
bytes = self.__getRawStream(entry)
break
return bytes
def addSector (self, id):
self.sectorIDs.append(id)
def output (self, debug=False):
print('')
print("="*68)
print("Directory")
if debug:
print("-"*68)
print("sector(s) used:")
for secID in self.sectorIDs:
print(" sector %d"%secID)
print("")
for secID in self.sectorIDs:
print("-"*68)
print(" Raw Hex Dump (sector %d)"%secID)
print("-"*68)
pos = globals.getSectorPos(secID, self.sectorSize)
globals.dumpBytes(self.bytes[pos:pos+self.sectorSize], 128)
for entry in self.entries:
self.__outputEntry(entry, debug)
def __outputEntry (self, entry, debug):
print("-"*68)
if len(entry.Name) > 0:
name = entry.Name
if ord(name[0]) <= 5:
name = "<%2.2Xh>%s"%(ord(name[0]), name[1:])
print("name: %s (name buffer size: %d bytes)"%(name, entry.CharBufferSize))
else:
print("name: [empty] (name buffer size: %d bytes)"%entry.CharBufferSize)
if self.params.debug:
print("-"*68)
globals.dumpBytes(entry.bytes)
print("-"*68)
output("type: ")
if entry.Type == Directory.Type.Empty:
print("empty")
elif entry.Type == Directory.Type.LockBytes:
print("lock bytes")
elif entry.Type == Directory.Type.Property:
print("property")
elif entry.Type == Directory.Type.RootStorage:
print("root storage")
elif entry.Type == Directory.Type.UserStorage:
print("user storage")
elif entry.Type == Directory.Type.UserStream:
print("user stream")
else:
print("[unknown type]")
output("node color: ")
if entry.NodeColor == Directory.NodeColor.Red:
print("red")
elif entry.NodeColor == Directory.NodeColor.Black:
print("black")
elif entry.NodeColor == Directory.NodeColor.Unknown:
print("[unknown color]")
print("linked dir entries: left: %d; right: %d; root: %d"%
(entry.DirIDLeft, entry.DirIDRight, entry.DirIDRoot))
self.__outputRaw("unique ID", entry.UniqueID)
self.__outputRaw("user flags", entry.UserFlags)
self.__outputRaw("time created", entry.TimeCreated)
self.__outputRaw("time last modified", entry.TimeModified)
output("stream info: ")
if entry.StreamSectorID < 0:
print("[empty stream]")
else:
strmLoc = "SAT"
if entry.StreamLocation == StreamLocation.SSAT:
strmLoc = "SSAT"
print("(first sector ID: %d; size: %d; location: %s)"%
(entry.StreamSectorID, entry.StreamSize, strmLoc))
satObj = None
secSize = 0
if entry.StreamLocation == StreamLocation.SAT:
satObj = self.SAT
secSize = self.header.getSectorSize()
elif entry.StreamLocation == StreamLocation.SSAT:
satObj = self.SSAT
secSize = self.header.getShortSectorSize()
if satObj != None:
chain = satObj.getSectorIDChain(entry.StreamSectorID)
print("sector count: %d"%len(chain))
print("total sector size: %d"%(len(chain)*secSize))
if self.params.showSectorChain:
self.__outputSectorChain(chain)
def __outputSectorChain (self, chain):
line = "sector chain: "
lineLen = len(line)
for id in chain:
frag = "%d, "%id
fragLen = len(frag)
if lineLen + fragLen > 68:
print(line)
line = frag
lineLen = fragLen
else:
line += frag
lineLen += fragLen
if line[-2:] == ", ":
line = line[:-2]
lineLen -= 2
if lineLen > 0:
print(line)
def __outputRaw (self, name, bytes):
if bytes == None:
return
output("%s: "%name)
for byte in bytes:
output("%2.2X "%ord(byte))
print("")
def getDirectoryNames (self):
names = []
for entry in self.entries:
names.append(entry.Name)
return names
def parseDirEntries (self):
if len(self.entries):
# directory entries already built
return
# combine all sectors first.
bytes = ""
for secID in self.sectorIDs:
pos = globals.getSectorPos(secID, self.sectorSize)
bytes += self.bytes[pos:pos+self.sectorSize]
self.entries = []
# each directory entry is exactly 128 bytes.
numEntries = int(len(bytes)/128)
if numEntries == 0:
return
for i in xrange(0, numEntries):
pos = i*128
self.entries.append(self.parseDirEntry(bytes[pos:pos+128]))
def parseDirEntry (self, bytes):
entry = Directory.Entry()
entry.bytes = bytes
name = globals.getUTF8FromUTF16(bytes[0:64])
entry.Name = name
entry.CharBufferSize = getSignedInt(bytes[64:66])
entry.Type = getSignedInt(bytes[66:67])
entry.NodeColor = getSignedInt(bytes[67:68])
entry.DirIDLeft = getSignedInt(bytes[68:72])
entry.DirIDRight = getSignedInt(bytes[72:76])
entry.DirIDRoot = getSignedInt(bytes[76:80])
entry.UniqueID = bytes[80:96]
entry.UserFlags = bytes[96:100]
entry.TimeCreated = bytes[100:108]
entry.TimeModified = bytes[108:116]
entry.StreamSectorID = getSignedInt(bytes[116:120])
entry.StreamSize = getSignedInt(bytes[120:124])
entry.StreamLocation = StreamLocation.SAT
if entry.Type != Directory.Type.RootStorage and \
entry.StreamSize < self.header.minStreamSize:
entry.StreamLocation = StreamLocation.SSAT
if entry.Type == Directory.Type.RootStorage and entry.StreamSectorID >= 0:
# This is an existing root storage.
self.RootStorage = entry
return entry
| lgpl-3.0 | -4,926,262,211,372,972,000 | 29.248991 | 90 | 0.542158 | false | 3.967343 | false | false | false |
ADicksonLab/wepy | src/wepy/hdf5.py | 1 | 206591 | # -*- coding: utf-8 -*-
"""Primary wepy simulation database driver and access API using the
HDF5 format.
The HDF5 Format Specification
=============================
As part of the wepy framework this module provides a fully-featured
API for creating and accessing data generated in weighted ensemble
simulations run with wepy.
The need for a special purpose format is many-fold but primarily it is
the nonlinear branching structure of walker trajectories coupled with
weights.
That is for standard simulations data is organized as independent
linear trajectories of frames each related linearly to the one before
it and after it.
In weighted ensemble due to the resampling (i.e. cloning and merging)
of walkers, a single frame may have multiple 'child' frames.
This is the primary motivation for this format.
However, in practice it solves several other issues and itself is a
more general and flexible format than for just weighted ensemble
simulations.
Concretely the WepyHDF5 format is simply an informally described
schema that is commensurable with the HDF5 constructs of hierarchical
groups (similar to unix filesystem directories) arranged as a tree
with datasets as the leaves.
The hierarchy is fairly deep and so we will progress downwards from
the top and describe each broad section in turn breaking it down when
necessary.
Header
------
The items right under the root of the tree are:
- runs
- topology
- _settings
The first item 'runs' is itself a group that contains all of the
primary data from simulations. In WepyHDF5 the run is the unit
dataset. All data internal to a run is self contained. That is for
multiple dependent trajectories (e.g. from cloning and merging) all
exist within a single run.
This excludes metadata-like things that may be needed for interpreting
this data, such as the molecular topology that imposes structure over
a frame of atom positions. This information is placed in the
'topology' item.
The topology field has no specified internal structure at this
time. However, with the current implementation of the WepyHDF5Reporter
(which is the principal implementation of generating a WepyHDF5
object/file from simulations) this is simply a string dataset. This
string dataset should be a JSON compliant string. The format of which
is specified elsewhere and was borrowed from the mdtraj library.
Warning! this format and specification for the topology is subject to
change in the future and will likely be kept unspecified indefinitely.
For most intents and purposes (which we assume to be for molecular or
molecular-like simulations) the 'topology' item (and perhaps any other
item at the top level other than those proceeded by and underscore,
such as in the '_settings' item) is merely useful metadata that
applies to ALL runs and is not dynamical.
In the language of the orchestration module all data in 'runs' uses
the same 'apparatus' which is the function that takes in the initial
conditions for walkers and produces new walkers. The apparatus may
differ in the specific values of parameters but not in kind. This is
to facilitate runs that are continuations of other runs. For these
kinds of simulations the state of the resampler, boundary conditions,
etc. will not be as they were initially but are the same in kind or
type.
All of the necessary type information of data in runs is kept in the
'_settings' group. This is used to serialize information about the
data types, shapes, run to run continuations etc. This allows for the
initialization of an empty (no runs) WepyHDF5 database at one time and
filling of data at another time. Otherwise types of datasets would
have to be inferred from the data itself, which may not exist yet.
As a convention items which are preceeded by an underscore (following
the python convention) are to be considered hidden and mechanical to
the proper functioning of various WepyHDF5 API features, such as
sparse trajectory fields.
The '_settings' is specified as a simple key-value structure, however
values may be arbitrarily complex.
Runs
----
The meat of the format is contained within the runs group:
- runs
- 0
- 1
- 2
- ...
Under the runs group are a series of groups for each run. Runs are
named according to the order in which they were added to the database.
Within a run (say '0' from above) we have a number of items:
- 0
- init_walkers
- trajectories
- decision
- resampling
- resampler
- warping
- progress
- boundary_conditions
Trajectories
^^^^^^^^^^^^
The 'trajectories' group is where the data for the frames of the
walker trajectories is stored.
Even though the tree-like trajectories of weighted ensemble data may
be well suited to having a tree-like storage topology we have opted to
use something more familiar to the field, and have used a collection
of linear "trajectories".
This way of breaking up the trajectory data coupled with proper
records of resampling (see below) allows for the imposition of a tree
structure without committing to that as the data storage topology.
This allows the WepyHDF5 format to be easily used as a container
format for collections of linear trajectories. While this is not
supported in any real capacity it is one small step to convergence. We
feel that a format that contains multiple trajectories is important
for situations like weighted ensemble where trajectories are
interdependent. The transition to a storage format like HDF5 however
opens up many possibilities for new features for trajectories that
have not occurred despite several attempts to forge new formats based
on HDF5 (TODO: get references right; see work in mdtraj and MDHDF5).
Perhaps these formats have not caught on because the existing formats
(e.g. XTC, DCD) for simple linear trajectories are good enough and
there is little motivation to migrate.
However, by making the WepyHDF5 format (and related sub-formats to be
described e.g. record groups and the trajectory format) both cover a
new use case which can't be achieved with old formats and old ones
with ease.
Once users see the power of using a format like HDF5 from using wepy
they may continue to use it for simpler simulations.
In any case the 'trajectories' in the group for weighted ensemble
simulations should be thought of only as containers and not literally
as trajectories. That is frame 4 does not necessarily follow from
frame 3. So one may think of them more as "lanes" or "slots" for
trajectory data that needs to be stitched together with the
appropriate resampling records.
The routines and methods for generating contiguous trajectories from
the data in WepyHDF5 are given through the 'analysis' module, which
generates "traces" through the dataset.
With this in mind we will describe the sub-format of a trajectory now.
The 'trajectories' group is similar to the 'runs' group in that it has
sub-groups whose names are numbers. These numbers however are not the
order in which they are created but an index of that trajectory which
are typically laid out all at once.
For a wepy simulation with a constant number of walkers you will only
ever need as many trajectories/slots as there are walkers. So if you
have 8 walkers then you will have trajectories 0 through 7. Concretely:
- runs
- 0
- trajectories
- 0
- 1
- 2
- 3
- 4
- 5
- 6
- 7
If we look at trajectory 0 we might see the following groups within:
- positions
- box_vectors
- velocities
- weights
Which is what you would expect for a constant pressure molecular
dynamics simulation where you have the positions of the atoms, the box
size, and velocities of the atoms.
The particulars for what "fields" a trajectory in general has are not
important but this important use-case is directly supported in the
WepyHDF5 format.
In any such simulation, however, the 'weights' field will appear since
this is the weight of the walker of this frame and is a value
important to weighted ensemble and not the underlying dynamics.
The naive approach to these fields is that each is a dataset of
dimension (n_frames, feature_vector_shape[0], ...) where the first dimension
is the cycle_idx and the rest of the dimensions are determined by the
atomic feature vector for each field for a single frame.
For example, the positions for a molecular simulation with 100 atoms
with x, y, and z coordinates that ran for 1000 cycles would be a
dataset of the shape (1000, 100, 3). Similarly the box vectors would
be (1000, 3, 3) and the weights would be (1000, 1).
This uniformity vastly simplifies accessing and adding new variables
and requires that individual state values in walkers always be arrays
with shapes, even when they are single values (e.g. energy). The
exception being the weight which is handled separately.
However, this situation is actually more complex to allow for special
features.
First of all is the presence of compound fields which allow nesting of
multiple groups.
The above "trajectory fields" would have identifiers such as the
literal strings 'positions' and 'box_vectors', while a compound field
would have an identifier 'observables/rmsd' or 'alt_reps/binding_site'.
Use of trajectory field names using the '/' path separator will
automatically make a field a group and the last element of the field
name the dataset. So for the observables example we might have:
- 0
- observables
- rmsd
- sasa
Where the rmsd would be accessed as a trajectory field of trajectory 0
as 'observables/rmsd' and the solvent accessible surface area as
'observables/sasa'.
This example introduces how the WepyHDF5 format is not only useful for
storing data produced by simulation but also in the analysis of that
data and computation of by-frame quantities.
The 'observables' compound group key prefix is special and will be
used in the 'compute_observables' method.
The other special compound group key prefix is 'alt_reps' which is
used for particle simulations to store "alternate representation" of
the positions. This is useful in cooperation with the next feature of
wepy trajectory fields to allow for more economical storage of data.
The next feature (and complication of the format) is the allowance for
sparse fields. As the fields were introduced we said that they should
have as many feature vectors as there are frames for the
simulation. In the example however, you will notice that storing both
the full atomic positions and velocities for a long simulation
requires a heavy storage burden.
So perhaps you only want to store the velocities (or forces) every 100
frames so that you can be able to restart a simulation form midway
through the simulation. This is achieved through sparse fields.
A sparse field is no longer a dataset but a group with two items:
- _sparse_idxs
- data
The '_sparse_idxs' are simply a dataset of integers that assign each
element of the 'data' dataset to a frame index. Using the above
example we run a simulation for 1000 frames with 100 atoms and we save
the velocities every 100 frames we would have a 'velocities/data'
dataset of shape (100, 100, 3) which is 10 times less data than if it
were saved every frame.
While this complicates the storage format use of the proper API
methods should be transparent whether you are returning a sparse field
or not.
As alluded to above the use of sparse fields can be used for more than
just accessory fields. In many simulations, such as those with full
atomistic simulations of proteins in solvent we often don't care about
the dynamics of most of the atoms in the simulation and so would like
to not have to save them.
The 'alt_reps' compound field is meant to solve this. For example, the
WepyHDF5Reporter supports a special option to save only a subset of
the atoms in the main 'positions' field but also to save the full
atomic system as an alternate representation, which is the field name
'alt_reps/all_atoms'. So that you can still save the full system every
once in a while but be economical in what positions you save every
single frame.
Note that there really isn't a way to achieve this with other
formats. You either make a completely new trajectory with only the
atoms of interest and now you are duplicating those in two places, or
you duplicate and then filter your full systems trajectory file and
rely on some sort of index to always live with it in the filesystem,
which is a very precarious scenario. The situation is particularly
hopeless for weighted ensemble trajectories.
Init Walkers
^^^^^^^^^^^^
The data stored in the 'trajectories' section is the data that is
returned after running dynamics in a cycle. Since we view the WepyHDF5
as a completely self-contained format for simulations it seems
negligent to rely on outside sources (such as the filesystem) for the
initial structures that seeded the simulations. These states (and
weights) can be stored in this group.
The format of this group is identical to the one for trajectories
except that there is only one frame for each slot and so the shape of
the datasets for each field is just the shape of the feature vector.
Record Groups
^^^^^^^^^^^^^
TODO: add reference to reference groups
The last five items are what are called 'record groups' and all follow
the same format.
Each record group contains itself a number of datasets, where the
names of the datasets correspond to the 'field names' from the record
group specification. So each record groups is simply a key-value store
where the values must be datasets.
For instance the fields in the 'resampling' (which is particularly
important as it encodes the branching structure) record group for a
WExplore resampler simulation are:
- step_idx
- walker_idx
- decision_id
- target_idxs
- region_assignment
Where the 'step_idx' is an integer specifying which step of resampling
within the cycle the resampling action took place (the cycle index is
metadata for the group). The 'walker_idx' is the index of the walker
that this action was assigned to. The 'decision_id' is an integer that
is related to an enumeration of decision types that encodes which
discrete action is to be taken for this resampling event (the
enumeration is in the 'decision' item of the run groups). The
'target_idxs' is a variable length 1-D array of integers which assigns
the results of the action to specific target 'slots' (which was
discussed for the 'trajectories' run group). And the
'region_assignment' is specific to WExplore which reports on which
region the walker was in at that time, and is a variable length 1-D
array of integers.
Additionally, record groups are broken into two types:
- continual
- sporadic
Continual records occur once per cycle and so there is no extra
indexing necessary.
Sporadic records can happen multiple or zero times per cycle and so
require a special index for them which is contained in the extra
dataset '_cycle_idxs'.
It is worth noting that the underlying methods for each record group
are general. So while these are the official wepy record groups that
are supported if there is a use-case that demands a new record group
it is a fairly straightforward task from a developers perspective.
"""
import os.path as osp
from collections import Sequence, namedtuple, defaultdict, Counter
import itertools as it
import json
from warnings import warn
from copy import copy
import logging
import gc
import numpy as np
import h5py
import networkx as nx
from wepy.analysis.parents import resampling_panel
from wepy.util.mdtraj import mdtraj_to_json_topology, json_to_mdtraj_topology, \
traj_fields_to_mdtraj
from wepy.util.util import traj_box_vectors_to_lengths_angles
from wepy.util.json_top import json_top_subset, json_top_atom_count
# optional dependencies
try:
import mdtraj as mdj
except ModuleNotFoundError:
warn("mdtraj is not installed and that functionality will not work", RuntimeWarning)
try:
import pandas as pd
except ModuleNotFoundError:
warn("pandas is not installed and that functionality will not work", RuntimeWarning)
## h5py settings
# we set the libver to always be the latest (which should be 1.10) so
# that we know we can always use SWMR and the newest features. We
# don't care about backwards compatibility with HDF5 1.8. Just update
# in a new virtualenv if this is a problem for you
H5PY_LIBVER = 'latest'
## Header and settings keywords
TOPOLOGY = 'topology'
"""Default header apparatus dataset. The molecular topology dataset."""
SETTINGS = '_settings'
"""Name of the settings group in the header group."""
RUNS = 'runs'
"""The group name for runs."""
## metadata fields
RUN_IDX = 'run_idx'
"""Metadata field for run groups for the run index within this file."""
RUN_START_SNAPSHOT_HASH = 'start_snapshot_hash'
"""Metadata field for a run that corresponds to the hash of the
starting simulation snapshot in orchestration."""
RUN_END_SNAPSHOT_HASH = 'end_snapshot_hash'
"""Metadata field for a run that corresponds to the hash of the
ending simulation snapshot in orchestration."""
TRAJ_IDX = 'traj_idx'
"""Metadata field for trajectory groups for the trajectory index in that run."""
## Misc. Names
CYCLE_IDX = 'cycle_idx'
"""String for setting the names of cycle indices in records and
miscellaneous situations."""
## Settings field names
SPARSE_FIELDS = 'sparse_fields'
"""Settings field name for sparse field trajectory field flags."""
N_ATOMS = 'n_atoms'
"""Settings field name group for the number of atoms in the default positions field."""
N_DIMS_STR = 'n_dims'
"""Settings field name for positions field spatial dimensions."""
MAIN_REP_IDXS = 'main_rep_idxs'
"""Settings field name for the indices of the full apparatus topology in
the default positions trajectory field."""
ALT_REPS_IDXS = 'alt_reps_idxs'
"""Settings field name for the different 'alt_reps'. The indices of
the atoms from the full apparatus topology for each."""
FIELD_FEATURE_SHAPES_STR = 'field_feature_shapes'
"""Settings field name for the trajectory field shapes."""
FIELD_FEATURE_DTYPES_STR = 'field_feature_dtypes'
"""Settings field name for the trajectory field data types."""
UNITS = 'units'
"""Settings field name for the units of the trajectory fields."""
RECORD_FIELDS = 'record_fields'
"""Settings field name for the record fields that are to be included
in the truncated listing of record group fields."""
CONTINUATIONS = 'continuations'
"""Settings field name for the continuations relationships between runs."""
## Run Fields Names
TRAJECTORIES = 'trajectories'
"""Run field name for the trajectories group."""
INIT_WALKERS = 'init_walkers'
"""Run field name for the initial walkers group."""
DECISION = 'decision'
"""Run field name for the decision enumeration group."""
## Record Groups Names
RESAMPLING = 'resampling'
"""Record group run field name for the resampling records """
RESAMPLER = 'resampler'
"""Record group run field name for the resampler records """
WARPING = 'warping'
"""Record group run field name for the warping records """
PROGRESS = 'progress'
"""Record group run field name for the progress records """
BC = 'boundary_conditions'
"""Record group run field name for the boundary conditions records """
## Record groups constants
# special datatypes strings
NONE_STR = 'None'
"""String signifying a field of unspecified shape. Used for
serializing the None python object."""
CYCLE_IDXS = '_cycle_idxs'
"""Group name for the cycle indices of sporadic records."""
# records can be sporadic or continual. Continual records are
# generated every cycle and are saved every cycle and are for all
# walkers. Sporadic records are generated conditional on specific
# events taking place and thus may or may not be produced each
# cycle. There also is not a single record for each (cycle, step) like
# there would be for continual ones because they can occur for single
# walkers, boundary conditions, or resamplers.
SPORADIC_RECORDS = (RESAMPLER, WARPING, RESAMPLING, BC)
"""Enumeration of the record groups that are sporadic."""
## Trajectories Group
# Default Trajectory Constants
N_DIMS = 3
"""Number of dimensions for the default positions."""
# Required Trajectory Fields
WEIGHTS = 'weights'
"""The field name for the frame weights."""
# default fields for trajectories
POSITIONS = 'positions'
"""The field name for the default positions."""
BOX_VECTORS = 'box_vectors'
"""The field name for the default box vectors."""
VELOCITIES = 'velocities'
"""The field name for the default velocities."""
FORCES = 'forces'
"""The field name for the default forces."""
TIME = 'time'
"""The field name for the default time."""
KINETIC_ENERGY = 'kinetic_energy'
"""The field name for the default kinetic energy."""
POTENTIAL_ENERGY = 'potential_energy'
"""The field name for the default potential energy."""
BOX_VOLUME = 'box_volume'
"""The field name for the default box volume."""
PARAMETERS = 'parameters'
"""The field name for the default parameters."""
PARAMETER_DERIVATIVES = 'parameter_derivatives'
"""The field name for the default parameter derivatives."""
ALT_REPS = 'alt_reps'
"""The field name for the default compound field observables."""
OBSERVABLES = 'observables'
"""The field name for the default compound field observables."""
## Trajectory Field Constants
WEIGHT_SHAPE = (1,)
"""Weights feature vector shape."""
WEIGHT_DTYPE = np.float
"""Weights feature vector data type."""
# Default Trajectory Field Constants
FIELD_FEATURE_SHAPES = ((TIME, (1,)),
(BOX_VECTORS, (3,3)),
(BOX_VOLUME, (1,)),
(KINETIC_ENERGY, (1,)),
(POTENTIAL_ENERGY, (1,)),
)
"""Default shapes for the default fields."""
FIELD_FEATURE_DTYPES = ((POSITIONS, np.float),
(VELOCITIES, np.float),
(FORCES, np.float),
(TIME, np.float),
(BOX_VECTORS, np.float),
(BOX_VOLUME, np.float),
(KINETIC_ENERGY, np.float),
(POTENTIAL_ENERGY, np.float),
)
"""Default data types for the default fields."""
# Positions (and thus velocities and forces) are determined by the
# N_DIMS (which can be customized) and more importantly the number of
# particles which is always different. All the others are always wacky
# and different.
POSITIONS_LIKE_FIELDS = (VELOCITIES, FORCES)
"""Default trajectory fields which are the same shape as the main positions field."""
## Trajectory field features keys
# sparse trajectory fields
DATA = 'data'
"""Name of the dataset in sparse trajectory fields."""
SPARSE_IDXS = '_sparse_idxs'
"""Name of the dataset that indexes sparse trajectory fields."""
# utility for paths
def _iter_field_paths(grp):
"""Return all subgroup field name paths from a group.
Useful for compound fields. For example if you have the group
observables with multiple subfields:
- observables
- rmsd
- sasa
Passing the h5py group 'observables' will return the full field
names for each subfield:
- 'observables/rmsd'
- 'observables/sasa'
Parameters
----------
grp : h5py.Group
The group to enumerate subfield names for.
Returns
-------
subfield_names : list of str
The full names for the subfields of the group.
"""
field_paths = []
for field_name in grp:
if isinstance(grp[field_name], h5py.Group):
for subfield in grp[field_name]:
# if it is a sparse field don't do the subfields since
# they will be _sparse_idxs and data which are not
# what we want here
if field_name not in grp.file['_settings/sparse_fields']:
field_paths.append(field_name + '/' + subfield)
else:
field_paths.append(field_name)
return field_paths
class WepyHDF5(object):
"""Wrapper for h5py interface to an HDF5 file object for creation and
access of WepyHDF5 data.
This is the primary implementation of the API for creating,
accessing, and modifying data in an HDF5 file that conforms to the
WepyHDF5 specification.
"""
MODES = ('r', 'r+', 'w', 'w-', 'x', 'a')
"""The recognized modes for opening the WepyHDF5 file."""
WRITE_MODES = ('r+', 'w', 'w-', 'x', 'a')
#### dunder methods
def __init__(self, filename, mode='x',
topology=None,
units=None,
sparse_fields=None,
feature_shapes=None, feature_dtypes=None,
n_dims=None,
alt_reps=None, main_rep_idxs=None,
swmr_mode=False,
expert_mode=False
):
"""Constructor for the WepyHDF5 class.
Initialize a new Wepy HDF5 file. This will create an h5py.File
object.
The File will be closed after construction by default.
mode:
r Readonly, file must exist
r+ Read/write, file must exist
w Create file, truncate if exists
x or w- Create file, fail if exists
a Read/write if exists, create otherwise
Parameters
----------
filename : str
File path
mode : str
Mode specification for opening the HDF5 file.
topology : str
JSON string representing topology of system being simulated.
units : dict of str : str, optional
Mapping of trajectory field names to string specs
for units.
sparse_fields : list of str, optional
List of trajectory fields that should be initialized as sparse.
feature_shapes : dict of str : shape_spec, optional
Mapping of trajectory fields to their shape spec for initialization.
feature_dtypes : dict of str : dtype_spec, optional
Mapping of trajectory fields to their shape spec for initialization.
n_dims : int, default: 3
Set the number of spatial dimensions for the default
positions trajectory field.
alt_reps : dict of str : list of int, optional
Specifies that there will be 'alt_reps' of positions each
named by the keys of this mapping and containing the
indices in each value list.
main_rep_idxs : list of int, optional
The indices of atom positions to save as the main 'positions'
trajectory field. Defaults to all atoms.
expert_mode : bool
If True no initialization is performed other than the
setting of the filename. Useful mainly for debugging.
Raises
------
AssertionError
If the mode is not one of the supported mode specs.
AssertionError
If a topology is not given for a creation mode.
Warns
-----
If initialization data was given but the file was opened in a read mode.
"""
self._filename = filename
self._swmr_mode = swmr_mode
if expert_mode is True:
self._h5 = None
self._wepy_mode = None
self._h5py_mode = None
self.closed = None
# terminate the constructor here
return None
assert mode in self.MODES, \
"mode must be either one of: {}".format(', '.join(self.MODES))
# the top level mode enforced by wepy.hdf5
self._wepy_mode = mode
# the lower level h5py mode. THis was originally different to
# accomodate different modes at teh wepy level for
# concatenation. I will leave these separate because this is
# used elsewhere and could be a feature in the future.
self._h5py_mode = mode
# Temporary metadata: used to initialize the object but not
# used after that
self._topology = topology
self._units = units
self._n_dims = n_dims
self._n_coords = None
# set hidden feature shapes and dtype, which are only
# referenced if needed when trajectories are created. These
# will be saved in the settings section in the actual HDF5
# file
self._field_feature_shapes_kwarg = feature_shapes
self._field_feature_dtypes_kwarg = feature_dtypes
self._field_feature_dtypes = None
self._field_feature_shapes = None
# save the sparse fields as a private variable for use in the
# create constructor
if sparse_fields is None:
self._sparse_fields = []
else:
self._sparse_fields = sparse_fields
# if we specify an atom subset of the main POSITIONS field
# we must save them
self._main_rep_idxs = main_rep_idxs
# a dictionary specifying other alt_reps to be saved
if alt_reps is not None:
self._alt_reps = alt_reps
# all alt_reps are sparse
alt_rep_keys = ['{}/{}'.format(ALT_REPS, key) for key in self._alt_reps.keys()]
self._sparse_fields.extend(alt_rep_keys)
else:
self._alt_reps = {}
# open the file and then run the different constructors based
# on the mode
with h5py.File(filename, mode=self._h5py_mode,
libver=H5PY_LIBVER, swmr=self._swmr_mode) as h5:
self._h5 = h5
# set SWMR mode if asked for if we are in write mode also
if self._swmr_mode is True and mode in self.WRITE_MODES:
self._h5.swmr_mode = swmr_mode
# create file mode: 'w' will create a new file or overwrite,
# 'w-' and 'x' will not overwrite but will create a new file
if self._wepy_mode in ['w', 'w-', 'x']:
self._create_init()
# read/write mode: in this mode we do not completely overwrite
# the old file and start again but rather write over top of
# values if requested
elif self._wepy_mode in ['r+']:
self._read_write_init()
# add mode: read/write create if doesn't exist
elif self._wepy_mode in ['a']:
if osp.exists(self._filename):
self._read_write_init()
else:
self._create_init()
# read only mode
elif self._wepy_mode == 'r':
# if any data was given, warn the user
if any([kwarg is not None for kwarg in
[topology, units, sparse_fields,
feature_shapes, feature_dtypes,
n_dims, alt_reps, main_rep_idxs]]):
warn("Data was given but opening in read-only mode", RuntimeWarning)
# then run the initialization process
self._read_init()
# flush the buffers
self._h5.flush()
# set the h5py mode to the value in the actual h5py.File
# object after creation
self._h5py_mode = self._h5.mode
# get rid of the temporary variables
del self._topology
del self._units
del self._n_dims
del self._n_coords
del self._field_feature_shapes_kwarg
del self._field_feature_dtypes_kwarg
del self._field_feature_shapes
del self._field_feature_dtypes
del self._sparse_fields
del self._main_rep_idxs
del self._alt_reps
# variable to reflect if it is closed or not, should be closed
# after initialization
self.closed = True
# end of the constructor
return None
# TODO is this right? shouldn't we actually delete the data then close
def __del__(self):
self.close()
# context manager methods
def __enter__(self):
self.open()
# self._h5 = h5py.File(self._filename,
# libver=H5PY_LIBVER, swmr=self._swmr_mode)
# self.closed = False
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
@property
def swmr_mode(self):
return self._swmr_mode
@swmr_mode.setter
def swmr_mode(self, val):
self._swmr_mode = val
# TODO custom deepcopy to avoid copying the actual HDF5 object
#### hidden methods (_method_name)
### constructors
def _create_init(self):
"""Creation mode constructor.
Completely overwrite the data in the file. Reinitialize the values
and set with the new ones if given.
"""
assert self._topology is not None, \
"Topology must be given for a creation constructor"
# initialize the runs group
runs_grp = self._h5.create_group(RUNS)
# initialize the settings group
settings_grp = self._h5.create_group(SETTINGS)
# create the topology dataset
self._h5.create_dataset(TOPOLOGY, data=self._topology)
# sparse fields
if self._sparse_fields is not None:
# make a dataset for the sparse fields allowed. this requires
# a 'special' datatype for variable length strings. This is
# supported by HDF5 but not numpy.
vlen_str_dt = h5py.special_dtype(vlen=str)
# create the dataset with empty values for the length of the
# sparse fields given
sparse_fields_ds = settings_grp.create_dataset(SPARSE_FIELDS,
(len(self._sparse_fields),),
dtype=vlen_str_dt,
maxshape=(None,))
# set the flags
for i, sparse_field in enumerate(self._sparse_fields):
sparse_fields_ds[i] = sparse_field
# field feature shapes and dtypes
# initialize to the defaults, this gives values to
# self._n_coords, and self.field_feature_dtypes, and
# self.field_feature_shapes
self._set_default_init_field_attributes(n_dims=self._n_dims)
# save the number of dimensions and number of atoms in settings
settings_grp.create_dataset(N_DIMS_STR, data=np.array(self._n_dims))
settings_grp.create_dataset(N_ATOMS, data=np.array(self._n_coords))
# the main rep atom idxs
settings_grp.create_dataset(MAIN_REP_IDXS, data=self._main_rep_idxs, dtype=np.int)
# alt_reps settings
alt_reps_idxs_grp = settings_grp.create_group(ALT_REPS_IDXS)
for alt_rep_name, idxs in self._alt_reps.items():
alt_reps_idxs_grp.create_dataset(alt_rep_name, data=idxs, dtype=np.int)
# if both feature shapes and dtypes were specified overwrite
# (or initialize if not set by defaults) the defaults
if (self._field_feature_shapes_kwarg is not None) and\
(self._field_feature_dtypes_kwarg is not None):
self._field_feature_shapes.update(self._field_feature_shapes_kwarg)
self._field_feature_dtypes.update(self._field_feature_dtypes_kwarg)
# any sparse field with unspecified shape and dtype must be
# set to None so that it will be set at runtime
for sparse_field in self.sparse_fields:
if (not sparse_field in self._field_feature_shapes) or \
(not sparse_field in self._field_feature_dtypes):
self._field_feature_shapes[sparse_field] = None
self._field_feature_dtypes[sparse_field] = None
# save the field feature shapes and dtypes in the settings group
shapes_grp = settings_grp.create_group(FIELD_FEATURE_SHAPES_STR)
for field_path, field_shape in self._field_feature_shapes.items():
if field_shape is None:
# set it as a dimensionless array of NaN
field_shape = np.array(np.nan)
shapes_grp.create_dataset(field_path, data=field_shape)
dtypes_grp = settings_grp.create_group(FIELD_FEATURE_DTYPES_STR)
for field_path, field_dtype in self._field_feature_dtypes.items():
if field_dtype is None:
dt_str = NONE_STR
else:
# make a json string of the datatype that can be read
# in again, we call np.dtype again because there is no
# np.float.descr attribute
dt_str = json.dumps(np.dtype(field_dtype).descr)
dtypes_grp.create_dataset(field_path, data=dt_str)
# initialize the units group
unit_grp = self._h5.create_group(UNITS)
# if units were not given set them all to None
if self._units is None:
self._units = {}
for field_path in self._field_feature_shapes.keys():
self._units[field_path] = None
# set the units
for field_path, unit_value in self._units.items():
# ignore the field if not given
if unit_value is None:
continue
unit_path = '{}/{}'.format(UNITS, field_path)
unit_grp.create_dataset(unit_path, data=unit_value)
# create the group for the run data records
records_grp = settings_grp.create_group(RECORD_FIELDS)
# create a dataset for the continuation run tuples
# (continuation_run, base_run), where the first element
# of the new run that is continuing the run in the second
# position
self._init_continuations()
def _read_write_init(self):
"""Read-write mode constructor."""
self._read_init()
def _add_init(self):
"""The addition mode constructor.
Create the dataset if it doesn't exist and put it in r+ mode,
otherwise, just open in r+ mode.
"""
if not any(self._exist_flags):
self._create_init()
else:
self._read_write_init()
def _read_init(self):
"""Read mode constructor."""
pass
def _set_default_init_field_attributes(self, n_dims=None):
"""Sets the feature_shapes and feature_dtypes to be the default for
this module. These will be used to initialize field datasets when no
given during construction (i.e. for sparse values)
Parameters
----------
n_dims : int
"""
# we use the module defaults for the datasets to initialize them
field_feature_shapes = dict(FIELD_FEATURE_SHAPES)
field_feature_dtypes = dict(FIELD_FEATURE_DTYPES)
# get the number of coordinates of positions. If there is a
# main_reps then we have to set the number of atoms to that,
# if not we count the number of atoms in the topology
if self._main_rep_idxs is None:
self._n_coords = json_top_atom_count(self.topology)
self._main_rep_idxs = list(range(self._n_coords))
else:
self._n_coords = len(self._main_rep_idxs)
# get the number of dimensions as a default
if n_dims is None:
self._n_dims = N_DIMS
# feature shapes for positions and positions-like fields are
# not known at the module level due to different number of
# coordinates (number of atoms) and number of dimensions
# (default 3 spatial). We set them now that we know this
# information.
# add the postitions shape
field_feature_shapes[POSITIONS] = (self._n_coords, self._n_dims)
# add the positions-like field shapes (velocities and forces) as the same
for poslike_field in POSITIONS_LIKE_FIELDS:
field_feature_shapes[poslike_field] = (self._n_coords, self._n_dims)
# set the attributes
self._field_feature_shapes = field_feature_shapes
self._field_feature_dtypes = field_feature_dtypes
def _get_field_path_grp(self, run_idx, traj_idx, field_path):
"""Given a field path for the trajectory returns the group the field's
dataset goes in and the key for the field name in that group.
The field path for a simple field is just the name of the
field and for a compound field it is the compound field group
name with the subfield separated by a '/' like
'observables/observable1' where 'observables' is the compound
field group and 'observable1' is the subfield name.
Parameters
----------
run_idx : int
traj_idx : int
field_path : str
Returns
-------
group : h5py.Group
field_name : str
"""
# check if it is compound
if '/' in field_path:
# split it
grp_name, field_name = field_path.split('/')
# get the hdf5 group
grp = self.h5['{}/{}/{}/{}/{}'.format(RUNS, run_idx, TRAJECTORIES, traj_idx, grp_name)]
# its simple so just return the root group and the original path
else:
grp = self.h5
field_name = field_path
return grp, field_name
def _init_continuations(self):
"""This will either create a dataset in the settings for the
continuations or if continuations already exist it will reinitialize
them and delete the data that exists there.
Returns
-------
continuation_dset : h5py.Dataset
"""
# if the continuations dset already exists we reinitialize the
# data
if CONTINUATIONS in self.settings_grp:
cont_dset = self.settings_grp[CONTINUATIONS]
cont_dset.resize( (0,2) )
# otherwise we just create the data
else:
cont_dset = self.settings_grp.create_dataset(CONTINUATIONS, shape=(0,2), dtype=np.int,
maxshape=(None, 2))
return cont_dset
def _add_run_init(self, run_idx, continue_run=None):
"""Routines for creating a run includes updating and setting object
global variables, increasing the counter for the number of runs.
Parameters
----------
run_idx : int
continue_run : int
Index of the run to continue.
"""
# add the run idx as metadata in the run group
self._h5['{}/{}'.format(RUNS, run_idx)].attrs[RUN_IDX] = run_idx
# if this is continuing another run add the tuple (this_run,
# continues_run) to the continutations settings
if continue_run is not None:
self.add_continuation(run_idx, continue_run)
def _add_init_walkers(self, init_walkers_grp, init_walkers):
"""Adds the run field group for the initial walkers.
Parameters
----------
init_walkers_grp : h5py.Group
The group to add the walker data to.
init_walkers : list of objects implementing the Walker interface
The walkers to save in the group
"""
# add the initial walkers to the group by essentially making
# new trajectories here that will only have one frame
for walker_idx, walker in enumerate(init_walkers):
walker_grp = init_walkers_grp.create_group(str(walker_idx))
# weights
# get the weight from the walker and make a feature array of it
weights = np.array([[walker.weight]])
# then create the dataset and set it
walker_grp.create_dataset(WEIGHTS, data=weights)
# state fields data
for field_key, field_value in walker.state.dict().items():
# values may be None, just ignore them
if field_value is not None:
# just create the dataset by making it a feature array
# (wrapping it in another list)
walker_grp.create_dataset(field_key, data=np.array([field_value]))
def _init_run_sporadic_record_grp(self, run_idx, run_record_key, fields):
"""Initialize a sporadic record group for a run.
Parameters
----------
run_idx : int
run_record_key : str
The record group name.
fields : list of field specs
Each field spec is a 3-tuple of
(field_name : str, field_shape : shape_spec, field_dtype : dtype_spec)
Returns
-------
record_group : h5py.Group
"""
# create the group
run_grp = self.run(run_idx)
record_grp = run_grp.create_group(run_record_key)
# initialize the cycles dataset that maps when the records
# were recorded
record_grp.create_dataset(CYCLE_IDXS, (0,), dtype=np.int,
maxshape=(None,))
# for each field simply create the dataset
for field_name, field_shape, field_dtype in fields:
# initialize this field
self._init_run_records_field(run_idx, run_record_key,
field_name, field_shape, field_dtype)
return record_grp
def _init_run_continual_record_grp(self, run_idx, run_record_key, fields):
"""Initialize a continual record group for a run.
Parameters
----------
run_idx : int
run_record_key : str
The record group name.
fields : list of field specs
Each field spec is a 3-tuple of
(field_name : str, field_shape : shape_spec, field_dtype : dtype_spec)
Returns
-------
record_group : h5py.Group
"""
# create the group
run_grp = self.run(run_idx)
record_grp = run_grp.create_group(run_record_key)
# for each field simply create the dataset
for field_name, field_shape, field_dtype in fields:
self._init_run_records_field(run_idx, run_record_key,
field_name, field_shape, field_dtype)
return record_grp
def _init_run_records_field(self, run_idx, run_record_key,
field_name, field_shape, field_dtype):
"""Initialize a single field for a run record group.
Parameters
----------
run_idx : int
run_record_key : str
The name of the record group.
field_name : str
The name of the field in the record group.
field_shape : tuple of int
The shape of the dataset for the field.
field_dtype : dtype_spec
An h5py recognized data type.
Returns
-------
dataset : h5py.Dataset
"""
record_grp = self.run(run_idx)[run_record_key]
# check if it is variable length
if field_shape is Ellipsis:
# make a special dtype that allows it to be
# variable length
vlen_dt = h5py.special_dtype(vlen=field_dtype)
# this is only allowed to be a single dimension
# since no real shape was given
dset = record_grp.create_dataset(field_name, (0,), dtype=vlen_dt,
maxshape=(None,))
# its not just make it normally
else:
# create the group
dset = record_grp.create_dataset(field_name, (0, *field_shape), dtype=field_dtype,
maxshape=(None, *field_shape))
return dset
def _is_sporadic_records(self, run_record_key):
"""Tests whether a record group is sporadic or not.
Parameters
----------
run_record_key : str
Record group name.
Returns
-------
is_sporadic : bool
True if the record group is sporadic False if not.
"""
# assume it is continual and check if it is in the sporadic groups
if run_record_key in SPORADIC_RECORDS:
return True
else:
return False
def _init_traj_field(self, run_idx, traj_idx, field_path, feature_shape, dtype):
"""Initialize a trajectory field.
Initialize a data field in the trajectory to be empty but
resizeable.
Parameters
----------
run_idx : int
traj_idx : int
field_path : str
Field name specification.
feature_shape : shape_spec
Specification of shape of a feature vector of the field.
dtype : dtype_spec
Specification of the feature vector datatype.
"""
# check whether this is a sparse field and create it
# appropriately
if field_path in self.sparse_fields:
# it is a sparse field
self._init_sparse_traj_field(run_idx, traj_idx, field_path, feature_shape, dtype)
else:
# it is not a sparse field (AKA simple)
self._init_contiguous_traj_field(run_idx, traj_idx, field_path, feature_shape, dtype)
def _init_contiguous_traj_field(self, run_idx, traj_idx, field_path, shape, dtype):
"""Initialize a contiguous (non-sparse) trajectory field.
Parameters
----------
run_idx : int
traj_idx : int
field_path : str
Field name specification.
feature_shape : tuple of int
Shape of the feature vector of the field.
dtype : dtype_spec
H5py recognized datatype
"""
traj_grp = self._h5['{}/{}/{}/{}'.format(RUNS, run_idx, TRAJECTORIES, traj_idx)]
# create the empty dataset in the correct group, setting
# maxshape so it can be resized for new feature vectors to be added
traj_grp.create_dataset(field_path, (0, *[0 for i in shape]), dtype=dtype,
maxshape=(None, *shape))
def _init_sparse_traj_field(self, run_idx, traj_idx, field_path, shape, dtype):
"""
Parameters
----------
run_idx : int
traj_idx : int
field_path : str
Field name specification.
feature_shape : shape_spec
Specification for the shape of the feature.
dtype : dtype_spec
Specification for the dtype of the feature.
"""
traj_grp = self._h5['{}/{}/{}/{}'.format(RUNS, run_idx, TRAJECTORIES, traj_idx)]
# check to see that neither the shape and dtype are
# None which indicates it is a runtime defined value and
# should be ignored here
if (shape is None) or (dtype is None):
# do nothing
pass
else:
# only create the group if you are going to add the
# datasets so the extend function can know if it has been
# properly initialized easier
sparse_grp = traj_grp.create_group(field_path)
# create the dataset for the feature data
sparse_grp.create_dataset(DATA, (0, *[0 for i in shape]), dtype=dtype,
maxshape=(None, *shape))
# create the dataset for the sparse indices
sparse_grp.create_dataset(SPARSE_IDXS, (0,), dtype=np.int, maxshape=(None,))
def _init_traj_fields(self, run_idx, traj_idx,
field_paths, field_feature_shapes, field_feature_dtypes):
"""Initialize a number of fields for a trajectory.
Parameters
----------
run_idx : int
traj_idx : int
field_paths : list of str
List of field names.
field_feature_shapes : list of shape_specs
field_feature_dtypes : list of dtype_specs
"""
for i, field_path in enumerate(field_paths):
self._init_traj_field(run_idx, traj_idx,
field_path, field_feature_shapes[i], field_feature_dtypes[i])
def _add_traj_field_data(self,
run_idx,
traj_idx,
field_path,
field_data,
sparse_idxs=None,
):
"""Add a trajectory field to a trajectory.
If the sparse indices are given the field will be created as a
sparse field otherwise a normal one.
Parameters
----------
run_idx : int
traj_idx : int
field_path : str
Field name.
field_data : numpy.array
The data array to set for the field.
sparse_idxs : arraylike of int of shape (1,)
List of cycle indices that the data corresponds to.
"""
# get the traj group
traj_grp = self._h5['{}/{}/{}/{}'.format(RUNS, run_idx, TRAJECTORIES, traj_idx)]
# if it is a sparse dataset we need to add the data and add
# the idxs in a group
if sparse_idxs is None:
# first require that the dataset exist and is exactly the
# same as the one that already exists (if indeed it
# does). If it doesn't raise a specific error letting the
# user know that they will have to delete the dataset if
# they want to change it to something else
try:
dset = traj_grp.require_dataset(field_path, shape=field_data.shape, dtype=field_data.dtype,
exact=True,
maxshape=(None, *field_data.shape[1:]))
except TypeError:
raise TypeError("For changing the contents of a trajectory field it must be the same shape and dtype.")
# if that succeeds then go ahead and set the data to the
# dataset (overwriting if it is still there)
dset[...] = field_data
else:
sparse_grp = traj_grp.create_group(field_path)
# add the data to this group
sparse_grp.create_dataset(DATA, data=field_data,
maxshape=(None, *field_data.shape[1:]))
# add the sparse idxs
sparse_grp.create_dataset(SPARSE_IDXS, data=sparse_idxs,
maxshape=(None,))
def _extend_contiguous_traj_field(self, run_idx, traj_idx, field_path, field_data):
"""Add multiple new frames worth of data to the end of an existing
contiguous (non-sparse)trajectory field.
Parameters
----------
run_idx : int
traj_idx : int
field_path : str
Field name
field_data : numpy.array
The frames of data to add.
"""
traj_grp = self.h5['{}/{}/{}/{}'.format(RUNS, run_idx, TRAJECTORIES, traj_idx)]
field = traj_grp[field_path]
# make sure this is a feature vector
assert len(field_data.shape) > 1, \
"field_data must be a feature vector with the same number of dimensions as the number"
# of datase new frames
n_new_frames = field_data.shape[0]
# check the field to make sure it is not empty
if all([i == 0 for i in field.shape]):
# check the feature shape against the maxshape which gives
# the feature dimensions for an empty dataset
assert field_data.shape[1:] == field.maxshape[1:], \
"field feature dimensions must be the same, i.e. all but the first dimension"
# if it is empty resize it to make an array the size of
# the new field_data with the maxshape for the feature
# dimensions
feature_dims = field.maxshape[1:]
field.resize( (n_new_frames, *feature_dims) )
# set the new data to this
field[0:, ...] = field_data
else:
# make sure the new data has the right dimensions against
# the shape it already has
assert field_data.shape[1:] == field.shape[1:], \
"field feature dimensions must be the same, i.e. all but the first dimension"
# append to the dataset on the first dimension, keeping the
# others the same, these must be feature vectors and therefore
# must exist
field.resize( (field.shape[0] + n_new_frames, *field.shape[1:]) )
# add the new data
field[-n_new_frames:, ...] = field_data
def _extend_sparse_traj_field(self, run_idx, traj_idx, field_path, values, sparse_idxs):
"""Add multiple new frames worth of data to the end of an existing
contiguous (non-sparse)trajectory field.
Parameters
----------
run_idx : int
traj_idx : int
field_path : str
Field name
values : numpy.array
The frames of data to add.
sparse_idxs : list of int
The cycle indices the values correspond to.
"""
field = self.h5['{}/{}/{}/{}/{}'.format(RUNS, run_idx, TRAJECTORIES, traj_idx, field_path)]
field_data = field[DATA]
field_sparse_idxs = field[SPARSE_IDXS]
# number of new frames
n_new_frames = values.shape[0]
# if this sparse_field has been initialized empty we need to resize
if all([i == 0 for i in field_data.shape]):
# check the feature shape against the maxshape which gives
# the feature dimensions for an empty dataset
assert values.shape[1:] == field_data.maxshape[1:], \
"input value features have shape {}, expected {}".format(
values.shape[1:], field_data.maxshape[1:])
# if it is empty resize it to make an array the size of
# the new values with the maxshape for the feature
# dimensions
feature_dims = field_data.maxshape[1:]
field_data.resize( (n_new_frames, *feature_dims) )
# set the new data to this
field_data[0:, ...] = values
else:
# make sure the new data has the right dimensions
assert values.shape[1:] == field_data.shape[1:], \
"field feature dimensions must be the same, i.e. all but the first dimension"
# append to the dataset on the first dimension, keeping the
# others the same, these must be feature vectors and therefore
# must exist
field_data.resize( (field_data.shape[0] + n_new_frames, *field_data.shape[1:]) )
# add the new data
field_data[-n_new_frames:, ...] = values
# add the sparse idxs in the same way
field_sparse_idxs.resize( (field_sparse_idxs.shape[0] + n_new_frames,
*field_sparse_idxs.shape[1:]) )
# add the new data
field_sparse_idxs[-n_new_frames:, ...] = sparse_idxs
def _add_sparse_field_flag(self, field_path):
"""Register a trajectory field as sparse in the header settings.
Parameters
----------
field_path : str
Name of the trajectory field you want to flag as sparse
"""
sparse_fields_ds = self._h5['{}/{}'.format(SETTINGS, SPARSE_FIELDS)]
# make sure it isn't already in the sparse_fields
if field_path in sparse_fields_ds[:]:
warn("sparse field {} already a sparse field, ignoring".format(field_path))
sparse_fields_ds.resize( (sparse_fields_ds.shape[0] + 1,) )
sparse_fields_ds[sparse_fields_ds.shape[0] - 1] = field_path
def _add_field_feature_shape(self, field_path, field_feature_shape):
"""Add the shape to the header settings for a trajectory field.
Parameters
----------
field_path : str
The name of the trajectory field you want to set for.
field_feature_shape : shape_spec
The shape spec to serialize as a dataset.
"""
shapes_grp = self._h5['{}/{}'.format(SETTINGS, FIELD_FEATURE_SHAPES_STR)]
shapes_grp.create_dataset(field_path, data=np.array(field_feature_shape))
def _add_field_feature_dtype(self, field_path, field_feature_dtype):
"""Add the data type to the header settings for a trajectory field.
Parameters
----------
field_path : str
The name of the trajectory field you want to set for.
field_feature_dtype : dtype_spec
The dtype spec to serialize as a dataset.
"""
feature_dtype_str = json.dumps(field_feature_dtype.descr)
dtypes_grp = self._h5['{}/{}'.format(SETTINGS, FIELD_FEATURE_DTYPES_STR)]
dtypes_grp.create_dataset(field_path, data=feature_dtype_str)
def _set_field_feature_shape(self, field_path, field_feature_shape):
"""Add the trajectory field shape to header settings or set the value.
Parameters
----------
field_path : str
The name of the trajectory field you want to set for.
field_feature_shape : shape_spec
The shape spec to serialize as a dataset.
"""
# check if the field_feature_shape is already set
if field_path in self.field_feature_shapes:
# check that the shape was previously saved as "None" as we
# won't overwrite anything else
if self.field_feature_shapes[field_path] is None:
full_path = '{}/{}/{}'.format(SETTINGS, FIELD_FEATURE_SHAPES_STR, field_path)
# we have to delete the old data and set new data
del self.h5[full_path]
self.h5.create_dataset(full_path, data=field_feature_shape)
else:
raise AttributeError(
"Cannot overwrite feature shape for {} with {} because it is {} not {}".format(
field_path, field_feature_shape, self.field_feature_shapes[field_path],
NONE_STR))
# it was not previously set so we must create then save it
else:
self._add_field_feature_shape(field_path, field_feature_shape)
def _set_field_feature_dtype(self, field_path, field_feature_dtype):
"""Add the trajectory field dtype to header settings or set the value.
Parameters
----------
field_path : str
The name of the trajectory field you want to set for.
field_feature_dtype : dtype_spec
The dtype spec to serialize as a dataset.
"""
feature_dtype_str = json.dumps(field_feature_dtype.descr)
# check if the field_feature_dtype is already set
if field_path in self.field_feature_dtypes:
# check that the dtype was previously saved as "None" as we
# won't overwrite anything else
if self.field_feature_dtypes[field_path] is None:
full_path = '{}/{}/{}'.format(SETTINGS, FIELD_FEATURE_DTYPES_STR, field_path)
# we have to delete the old data and set new data
del self.h5[full_path]
self.h5.create_dataset(full_path, data=feature_dtype_str)
else:
raise AttributeError(
"Cannot overwrite feature dtype for {} with {} because it is {} not ".format(
field_path, field_feature_dtype, self.field_feature_dtypes[field_path],
NONE_STR))
# it was not previously set so we must create then save it
else:
self._add_field_feature_dtype(field_path, field_feature_dtype)
def _extend_run_record_data_field(self, run_idx, run_record_key,
field_name, field_data):
"""Primitive record append method.
Adds data for a single field dataset in a run records group. This
is done without paying attention to whether it is sporadic or
continual and is supposed to be only the data write method.
Parameters
----------
run_idx : int
run_record_key : str
Name of the record group.
field_name : str
Name of the field in the record group to add to.
field_data : arraylike
The data to add to the field.
"""
records_grp = self.h5['{}/{}/{}'.format(RUNS, run_idx, run_record_key)]
field = records_grp[field_name]
# make sure this is a feature vector
assert len(field_data.shape) > 1, \
"field_data must be a feature vector with the same number of dimensions as the number"
# of datase new frames
n_new_frames = field_data.shape[0]
# check whether it is a variable length record, by getting the
# record dataset dtype and using the checker to see if it is
# the vlen special type in h5py
if h5py.check_dtype(vlen=field.dtype) is not None:
# if it is we have to treat it differently, since it
# cannot be multidimensional
# if the dataset has no data in it we need to reshape it
if all([i == 0 for i in field.shape]):
# initialize this array
# if it is empty resize it to make an array the size of
# the new field_data with the maxshape for the feature
# dimensions
field.resize( (n_new_frames,) )
# set the new data to this
for i, row in enumerate(field_data):
field[i] = row
# otherwise just add the data
else:
# resize the array but it is only of rank because
# of variable length data
field.resize( (field.shape[0] + n_new_frames, ) )
# add each row to the newly made space
for i, row in enumerate(field_data):
field[(field.shape[0] - 1) + i] = row
# if it is not variable length we don't have to treat it
# differently
else:
# if this is empty we need to reshape the dataset to accomodate data
if all([i == 0 for i in field.shape]):
# check the feature shape against the maxshape which gives
# the feature dimensions for an empty dataset
assert field_data.shape[1:] == field.maxshape[1:], \
"field feature dimensions must be the same, i.e. all but the first dimension"
# if it is empty resize it to make an array the size of
# the new field_data with the maxshape for the feature
# dimensions
feature_dims = field.maxshape[1:]
field.resize( (n_new_frames, *feature_dims) )
# set the new data to this
field[0:, ...] = field_data
# otherwise just add the data
else:
# append to the dataset on the first dimension, keeping the
# others the same, these must be feature vectors and therefore
# must exist
field.resize( (field.shape[0] + n_new_frames, *field.shape[1:]) )
# add the new data
field[-n_new_frames:, ...] = field_data
def _run_record_namedtuple(self, run_record_key):
"""Generate a namedtuple record type for a record group.
The class name will be formatted like '{}_Record' where the {}
will be replaced with the name of the record group.
Parameters
----------
run_record_key : str
Name of the record group
Returns
-------
RecordType : namedtuple
The record type to generate records for this record group.
"""
Record = namedtuple('{}_Record'.format(run_record_key),
[CYCLE_IDX] + self.record_fields[run_record_key])
return Record
def _convert_record_field_to_table_column(self, run_idx, run_record_key, record_field):
"""Converts a dataset of feature vectors to more palatable values for
use in external datasets.
For single value feature vectors it unwraps them into single
values.
For 1-D feature vectors it casts them as tuples.
Anything of higher rank will raise an error.
Parameters
----------
run_idx : int
run_record_key : str
Name of the record group
record_field : str
Name of the field of the record group
Returns
-------
record_dset : list
Table-ified values
Raises
------
TypeError
If the field feature vector shape rank is greater than 1.
"""
# get the field dataset
rec_grp = self.records_grp(run_idx, run_record_key)
dset = rec_grp[record_field]
# if it is variable length or if it has more than one element
# cast all elements to tuples
if h5py.check_dtype(vlen=dset.dtype) is not None:
rec_dset = [tuple(value) for value in dset[:]]
# if it is not variable length make sure it is not more than a
# 1D feature vector
elif len(dset.shape) > 2:
raise TypeError(
"cannot convert fields with feature vectors more than 1 dimension,"
" was given {} for {}/{}".format(
dset.shape[1:], run_record_key, record_field))
# if it is only a rank 1 feature vector and it has more than
# one element make a tuple out of it
elif dset.shape[1] > 1:
rec_dset = [tuple(value) for value in dset[:]]
# otherwise just get the single value instead of keeping it as
# a single valued feature vector
else:
rec_dset = [value[0] for value in dset[:]]
return rec_dset
def _convert_record_fields_to_table_columns(self, run_idx, run_record_key):
"""Convert record group data to truncated namedtuple records.
This uses the specified record fields from the header settings
to choose which record group fields to apply this to.
Does no checking to make sure the fields are
"table-ifiable". If a field is not it will raise a TypeError.
Parameters
----------
run_idx : int
run_record_key : str
The name of the record group
Returns
-------
table_fields : dict of str : list
Mapping of the record group field to the table-ified values.
"""
fields = {}
for record_field in self.record_fields[run_record_key]:
fields[record_field] = self._convert_record_field_to_table_column(
run_idx, run_record_key, record_field)
return fields
def _make_records(self, run_record_key, cycle_idxs, fields):
"""Generate a list of proper (nametuple) records for a record group.
Parameters
----------
run_record_key : str
Name of the record group
cycle_idxs : list of int
The cycle indices you want to get records for.
fields : list of str
The fields to make record entries for.
Returns
-------
records : list of namedtuple objects
"""
Record = self._run_record_namedtuple(run_record_key)
# for each record we make a tuple and yield it
records = []
for record_idx in range(len(cycle_idxs)):
# make a record for this cycle
record_d = {CYCLE_IDX : cycle_idxs[record_idx]}
for record_field, column in fields.items():
datum = column[record_idx]
record_d[record_field] = datum
record = Record(*(record_d[key] for key in Record._fields))
records.append(record)
return records
def _run_records_sporadic(self, run_idxs, run_record_key):
"""Generate records for a sporadic record group for a multi-run
contig.
If multiple run indices are given assumes that these are a
contig (e.g. the second run index is a continuation of the
first and so on). This method is considered low-level and does
no checking to make sure this is true.
The cycle indices of records from "continuation" runs will be
modified so as the records will be indexed as if they are a
single run.
Uses the record fields settings to decide which fields to use.
Parameters
----------
run_idxs : list of int
The indices of the runs in the order they are in the contig
run_record_key : str
Name of the record group
Returns
-------
records : list of namedtuple objects
"""
# we loop over the run_idxs in the contig and get the fields
# and cycle idxs for the whole contig
fields = None
cycle_idxs = np.array([], dtype=int)
# keep a cumulative total of the runs cycle idxs
prev_run_cycle_total = 0
for run_idx in run_idxs:
# get all the value columns from the datasets, and convert
# them to something amenable to a table
run_fields = self._convert_record_fields_to_table_columns(run_idx, run_record_key)
# we need to concatenate each field to the end of the
# field in the master dictionary, first we need to
# initialize it if it isn't already made
if fields is None:
# if it isn't initialized we just set it as this first
# run fields dictionary
fields = run_fields
else:
# if it is already initialized we need to go through
# each field and concatenate
for field_name, field_data in run_fields.items():
# just add it to the list of fields that will be concatenated later
fields[field_name].extend(field_data)
# get the cycle idxs for this run
rec_grp = self.records_grp(run_idx, run_record_key)
run_cycle_idxs = rec_grp[CYCLE_IDXS][:]
# add the total number of cycles that came before this run
# to each of the cycle idxs to get the cycle_idxs in terms
# of the full contig
run_contig_cycle_idxs = run_cycle_idxs + prev_run_cycle_total
# add these cycle indices to the records for the whole contig
cycle_idxs = np.hstack( (cycle_idxs, run_contig_cycle_idxs) )
# add the total number of cycle_idxs from this run to the
# running total
prev_run_cycle_total += self.num_run_cycles(run_idx)
# then make the records from the fields
records = self._make_records(run_record_key, cycle_idxs, fields)
return records
def _run_records_continual(self, run_idxs, run_record_key):
"""Generate records for a continual record group for a multi-run
contig.
If multiple run indices are given assumes that these are a
contig (e.g. the second run index is a continuation of the
first and so on). This method is considered low-level and does
no checking to make sure this is true.
The cycle indices of records from "continuation" runs will be
modified so as the records will be indexed as if they are a
single run.
Uses the record fields settings to decide which fields to use.
Parameters
----------
run_idxs : list of int
The indices of the runs in the order they are in the contig
run_record_key : str
Name of the record group
Returns
-------
records : list of namedtuple objects
"""
cycle_idxs = np.array([], dtype=int)
fields = None
prev_run_cycle_total = 0
for run_idx in run_idxs:
# get all the value columns from the datasets, and convert
# them to something amenable to a table
run_fields = self._convert_record_fields_to_table_columns(run_idx, run_record_key)
# we need to concatenate each field to the end of the
# field in the master dictionary, first we need to
# initialize it if it isn't already made
if fields is None:
# if it isn't initialized we just set it as this first
# run fields dictionary
fields = run_fields
else:
# if it is already initialized we need to go through
# each field and concatenate
for field_name, field_data in run_fields.items():
# just add it to the list of fields that will be concatenated later
fields[field_name].extend(field_data)
# get one of the fields (if any to iterate over)
record_fields = self.record_fields[run_record_key]
main_record_field = record_fields[0]
# make the cycle idxs from that
run_rec_grp = self.records_grp(run_idx, run_record_key)
run_cycle_idxs = np.array(range(run_rec_grp[main_record_field].shape[0]))
# add the total number of cycles that came before this run
# to each of the cycle idxs to get the cycle_idxs in terms
# of the full contig
run_contig_cycle_idxs = run_cycle_idxs + prev_run_cycle_total
# add these cycle indices to the records for the whole contig
cycle_idxs = np.hstack( (cycle_idxs, run_contig_cycle_idxs) )
# add the total number of cycle_idxs from this run to the
# running total
prev_run_cycle_total += self.num_run_cycles(run_idx)
# then make the records from the fields
records = self._make_records(run_record_key, cycle_idxs, fields)
return records
def _get_contiguous_traj_field(self, run_idx, traj_idx, field_path, frames=None):
"""Access actual data for a trajectory field.
Parameters
----------
run_idx : int
traj_idx : int
field_path : str
Trajectory field name to access
frames : list of int, optional
The indices of the frames to return if you don't want all of them.
Returns
-------
field_data : arraylike
The data requested for the field.
"""
full_path = '{}/{}/{}/{}/{}'.format(RUNS, run_idx, TRAJECTORIES, traj_idx, field_path)
if frames is None:
field = self._h5[full_path][:]
else:
field = self._h5[full_path][list(frames)]
return field
def _get_sparse_traj_field(self, run_idx, traj_idx, field_path, frames=None, masked=True):
"""Access actual data for a trajectory field.
Parameters
----------
run_idx : int
traj_idx : int
field_path : str
Trajectory field name to access
frames : list of int, optional
The indices of the frames to return if you don't want all of them.
masked : bool
If True returns the array data as numpy masked array, and
only the available values if False.
Returns
-------
field_data : arraylike
The data requested for the field.
"""
traj_path = '{}/{}/{}/{}'.format(RUNS, run_idx, TRAJECTORIES, traj_idx)
traj_grp = self.h5[traj_path]
field = traj_grp[field_path]
n_frames = traj_grp[POSITIONS].shape[0]
if frames is None:
data = field[DATA][:]
# if it is to be masked make the masked array
if masked:
sparse_idxs = field[SPARSE_IDXS][:]
filled_data = np.full( (n_frames, *data.shape[1:]), np.nan)
filled_data[sparse_idxs] = data
mask = np.full( (n_frames, *data.shape[1:]), True)
mask[sparse_idxs] = False
data = np.ma.masked_array(filled_data, mask=mask)
else:
# get the sparse idxs and the frames to slice from the
# data
sparse_idxs = field[SPARSE_IDXS][:]
# we get a boolean array of the rows of the data table
# that we are to slice from
sparse_frame_idxs = np.argwhere(np.isin(sparse_idxs, frames))
data = field[DATA][list(sparse_frame_idxs)]
# if it is to be masked make the masked array
if masked:
# the empty arrays the size of the number of requested frames
filled_data = np.full( (len(frames), *field[DATA].shape[1:]), np.nan)
mask = np.full( (len(frames), *field[DATA].shape[1:]), True )
# take the data which exists and is part of the frames
# selection, and put it into the filled data where it is
# supposed to be
filled_data[np.isin(frames, sparse_idxs)] = data
# unmask the present values
mask[np.isin(frames, sparse_idxs)] = False
data = np.ma.masked_array(filled_data, mask=mask)
return data
def _add_run_field(self,
run_idx,
field_path,
data,
sparse_idxs=None,
force=False):
"""Add a trajectory field to all trajectories in a run.
By enforcing adding it to all trajectories at one time we
promote in-run consistency.
Parameters
----------
run_idx : int
field_path : str
Name to set the trajectory field as. Can be compound.
data : arraylike of shape (n_trajectories, n_cycles, feature_vector_shape[0],...)
The data for all trajectories to be added.
sparse_idxs : list of int
If the data you are adding is sparse specify which cycles to apply them to.
If 'force' is turned on, no checking for constraints will be done.
"""
# TODO, SNIPPET: check that we have the right permissions
# if field_exists:
# # if we are in a permissive write mode we delete the
# # old dataset and add the new one, overwriting old data
# if self.mode in ['w', 'w-', 'x', 'r+']:
# logging.info("Dataset already present. Overwriting.")
# del obs_grp[field_name]
# obs_grp.create_dataset(field_name, data=results)
# # this will happen in 'c' and 'c-' modes
# else:
# raise RuntimeError(
# "Dataset already exists and file is in concatenate mode ('c' or 'c-')")
# check that the data has the correct number of trajectories
if not force:
assert len(data) == self.num_run_trajs(run_idx),\
"The number of trajectories in data, {}, is different than the number"\
"of trajectories in the run, {}.".format(len(data), self.num_run_trajs(run_idx))
# for each trajectory check that the data is compliant
for traj_idx, traj_data in enumerate(data):
if not force:
# check that the number of frames is not larger than that for the run
if traj_data.shape[0] > self.num_run_cycles(run_idx):
raise ValueError("The number of frames in data for traj {} , {},"
"is larger than the number of frames"
"for this run, {}.".format(
traj_idx, data.shape[1], self.num_run_cycles(run_idx)))
# if the number of frames given is the same or less than
# the number of frames in the run
elif (traj_data.shape[0] <= self.num_run_cycles(run_idx)):
# if sparse idxs were given we check to see there is
# the right number of them
# and that they match the number of frames given
if data.shape[0] != len(sparse_idxs[traj_idx]):
raise ValueError("The number of frames provided for traj {}, {},"
"was less than the total number of frames, {},"
"but an incorrect number of sparse idxs were supplied, {}."\
.format(traj_idx, traj_data.shape[0],
self.num_run_cycles(run_idx), len(sparse_idxs[traj_idx])))
# if there were strictly fewer frames given and the
# sparse idxs were not given we need to raise an error
elif (traj_data.shape[0] < self.num_run_cycles(run_idx)):
raise ValueError("The number of frames provided for traj {}, {},"
"was less than the total number of frames, {},"
"but sparse_idxs were not supplied.".format(
traj_idx, traj_data.shape[0],
self.num_run_cycles(run_idx)))
# add it to each traj
for i, idx_tup in enumerate(self.run_traj_idx_tuples([run_idx])):
if sparse_idxs is None:
self._add_traj_field_data(*idx_tup, field_path, data[i])
else:
self._add_traj_field_data(*idx_tup, field_path, data[i],
sparse_idxs=sparse_idxs[i])
def _add_field(self, field_path, data, sparse_idxs=None,
force=False):
"""Add a trajectory field to all runs in a file.
Parameters
----------
field_path : str
Name of trajectory field
data : list of arraylike
Each element of this list corresponds to a single run. The
elements of which are arraylikes of shape (n_trajectories,
n_cycles, feature_vector_shape[0],...) for each run.
sparse_idxs : list of list of int
The list of cycle indices to set for the sparse fields. If
None, no trajectories are set as sparse.
"""
for i, run_idx in enumerate(self.run_idxs):
if sparse_idxs is not None:
self._add_run_field(run_idx, field_path, data[i], sparse_idxs=sparse_idxs[i],
force=force)
else:
self._add_run_field(run_idx, field_path, data[i],
force=force)
#### Public Methods
### File Utilities
@property
def filename(self):
"""The path to the underlying HDF5 file."""
return self._filename
def open(self, mode=None):
"""Open the underlying HDF5 file for access.
Parameters
----------
mode : str
Valid mode spec. Opens the HDF5 file in this mode if given
otherwise uses the existing mode.
"""
if mode is None:
mode = self.mode
if self.closed:
self.set_mode(mode)
self._h5 = h5py.File(self._filename, mode,
libver=H5PY_LIBVER, swmr=self.swmr_mode)
self.closed = False
else:
raise IOError("This file is already open")
def close(self):
"""Close the underlying HDF5 file. """
if not self.closed:
self._h5.flush()
self._h5.close()
self.closed = True
@property
def mode(self):
"""The WepyHDF5 mode this object was created with."""
return self._wepy_mode
@mode.setter
def mode(self, mode):
"""Set the mode for opening the file with."""
self.set_mode(mode)
def set_mode(self, mode):
"""Set the mode for opening the file with."""
if not self.closed:
raise AttributeError("Cannot set the mode while the file is open.")
self._set_h5_mode(mode)
self._wepy_mode = mode
@property
def h5_mode(self):
"""The h5py.File mode the HDF5 file currently has."""
return self._h5.mode
def _set_h5_mode(self, h5_mode):
"""Set the mode to open the HDF5 file with.
This really shouldn't be set without using the main wepy mode
as they need to be aligned.
"""
if not self.closed:
raise AttributeError("Cannot set the mode while the file is open.")
self._h5py_mode = h5_mode
@property
def h5(self):
"""The underlying h5py.File object."""
return self._h5
### h5py object access
def run(self, run_idx):
"""Get the h5py.Group for a run.
Parameters
----------
run_idx : int
Returns
-------
run_group : h5py.Group
"""
return self._h5['{}/{}'.format(RUNS, int(run_idx))]
def traj(self, run_idx, traj_idx):
"""Get an h5py.Group trajectory group.
Parameters
----------
run_idx : int
traj_idx : int
Returns
-------
traj_group : h5py.Group
"""
return self._h5['{}/{}/{}/{}'.format(RUNS, run_idx, TRAJECTORIES, traj_idx)]
def run_trajs(self, run_idx):
"""Get the trajectories group for a run.
Parameters
----------
run_idx : int
Returns
-------
trajectories_grp : h5py.Group
"""
return self._h5['{}/{}/{}'.format(RUNS, run_idx, TRAJECTORIES)]
@property
def runs(self):
"""The runs group."""
return self.h5[RUNS]
def run_grp(self, run_idx):
"""A group for a single run."""
return self.runs["{}".format(run_idx)]
def run_start_snapshot_hash(self, run_idx):
"""Hash identifier for the starting snapshot of a run from
orchestration.
"""
return self.run_grp(run_idx).attrs[RUN_START_SNAPSHOT_HASH]
def run_end_snapshot_hash(self, run_idx):
"""Hash identifier for the ending snapshot of a run from
orchestration.
"""
return self.run_grp(run_idx).attrs[RUN_END_SNAPSHOT_HASH]
def set_run_start_snapshot_hash(self, run_idx, snaphash):
"""Set the starting snapshot hash identifier for a run from
orchestration.
"""
if RUN_START_SNAPSHOT_HASH not in self.run_grp(run_idx).attrs:
self.run_grp(run_idx).attrs[RUN_START_SNAPSHOT_HASH] = snaphash
else:
raise AttributeError("The snapshot has already been set.")
def set_run_end_snapshot_hash(self, run_idx, snaphash):
"""Set the ending snapshot hash identifier for a run from
orchestration.
"""
if RUN_END_SNAPSHOT_HASH not in self.run_grp(run_idx).attrs:
self.run_grp(run_idx).attrs[RUN_END_SNAPSHOT_HASH] = snaphash
else:
raise AttributeError("The snapshot has already been set.")
@property
def settings_grp(self):
"""The header settings group."""
settings_grp = self.h5[SETTINGS]
return settings_grp
def decision_grp(self, run_idx):
"""Get the decision enumeration group for a run.
Parameters
----------
run_idx : int
Returns
-------
decision_grp : h5py.Group
"""
return self.run(run_idx)[DECISION]
def init_walkers_grp(self, run_idx):
"""Get the group for the initial walkers for a run.
Parameters
----------
run_idx : int
Returns
-------
init_walkers_grp : h5py.Group
"""
return self.run(run_idx)[INIT_WALKERS]
def records_grp(self, run_idx, run_record_key):
"""Get a record group h5py.Group for a run.
Parameters
----------
run_idx : int
run_record_key : str
Name of the record group
Returns
-------
run_record_group : h5py.Group
"""
path = '{}/{}/{}'.format(RUNS, run_idx, run_record_key)
return self.h5[path]
def resampling_grp(self, run_idx):
"""Get this record group for a run.
Parameters
----------
run_idx : int
Returns
-------
run_record_group : h5py.Group
"""
return self.records_grp(run_idx, RESAMPLING)
def resampler_grp(self, run_idx):
"""Get this record group for a run.
Parameters
----------
run_idx : int
Returns
-------
run_record_group : h5py.Group
"""
return self.records_grp(run_idx, RESAMPLER)
def warping_grp(self, run_idx):
"""Get this record group for a run.
Parameters
----------
run_idx : int
Returns
-------
run_record_group : h5py.Group
"""
return self.records_grp(run_idx, WARPING)
def bc_grp(self, run_idx):
"""Get this record group for a run.
Parameters
----------
run_idx : int
Returns
-------
run_record_group : h5py.Group
"""
return self.records_grp(run_idx, BC)
def progress_grp(self, run_idx):
"""Get this record group for a run.
Parameters
----------
run_idx : int
Returns
-------
run_record_group : h5py.Group
"""
return self.records_grp(run_idx, PROGRESS)
def iter_runs(self, idxs=False, run_sel=None):
"""Generator for iterating through the runs of a file.
Parameters
----------
idxs : bool
If True yields the run index in addition to the group.
run_sel : list of int, optional
If not None should be a list of the runs you want to iterate over.
Yields
------
run_idx : int, if idxs is True
run_group : h5py.Group
"""
if run_sel is None:
run_sel = self.run_idxs
for run_idx in self.run_idxs:
if run_idx in run_sel:
run = self.run(run_idx)
if idxs:
yield run_idx, run
else:
yield run
def iter_trajs(self, idxs=False, traj_sel=None):
"""Generator for iterating over trajectories in a file.
Parameters
----------
idxs : bool
If True returns a tuple of the run index and trajectory
index in addition to the trajectory group.
traj_sel : list of int, optional
If not None is a list of tuples of (run_idx, traj_idx)
selecting which trajectories to iterate over.
Yields
------
traj_id : tuple of int, if idxs is True
A tuple of (run_idx, traj_idx) for the group
trajectory : h5py.Group
"""
# set the selection of trajectories to iterate over
if traj_sel is None:
idx_tups = self.run_traj_idx_tuples()
else:
idx_tups = traj_sel
# get each traj for each idx_tup and yield them for the generator
for run_idx, traj_idx in idx_tups:
traj = self.traj(run_idx, traj_idx)
if idxs:
yield (run_idx, traj_idx), traj
else:
yield traj
def iter_run_trajs(self, run_idx, idxs=False):
"""Iterate over the trajectories of a run.
Parameters
----------
run_idx : int
idxs : bool
If True returns a tuple of the run index and trajectory
index in addition to the trajectory group.
Returns
-------
iter_trajs_generator : generator for the iter_trajs method
"""
run_sel = self.run_traj_idx_tuples([run_idx])
return self.iter_trajs(idxs=idxs, traj_sel=run_sel)
### Settings
@property
def defined_traj_field_names(self):
"""A list of the settings defined field names all trajectories have in the file."""
return list(self.field_feature_shapes.keys())
@property
def observable_field_names(self):
"""Returns a list of the names of the observables that all trajectories have.
If this encounters observable fields that don't occur in all
trajectories (inconsistency) raises an inconsistency error.
"""
n_trajs = self.num_trajs
field_names = Counter()
for traj in self.iter_trajs():
for name in list(traj['observables']):
field_names[name] += 1
# if any of the field names has not occured for every
# trajectory we raise an error
for field_name, count in field_names.items():
if count != n_trajs:
raise TypeError("observable field names are inconsistent")
# otherwise return the field names for the observables
return list(field_names.keys())
def _check_traj_field_consistency(self, field_names):
"""Checks that every trajectory has the given fields across
the entire dataset.
Parameters
----------
field_names : list of str
The field names to check for.
Returns
-------
consistent : bool
True if all trajs have the fields, False otherwise
"""
n_trajs = self.num_trajs
field_names = Counter()
for traj in self.iter_trajs():
for name in field_names:
if name in traj:
field_names[name] += 1
# if any of the field names has not occured for every
# trajectory we raise an error
for field_name, count in field_names:
if count != n_trajs:
return False
return True
@property
def record_fields(self):
"""The record fields for each record group which are selected for inclusion in the truncated records.
These are the fields which are considered to be table-ified.
Returns
-------
record_fields : dict of str : list of str
Mapping of record group name to alist of the record group fields.
"""
record_fields_grp = self.settings_grp[RECORD_FIELDS]
record_fields_dict = {}
for group_name, dset in record_fields_grp.items():
record_fields_dict[group_name] = list(dset)
return record_fields_dict
@property
def sparse_fields(self):
"""The trajectory fields that are sparse."""
return self.h5['{}/{}'.format(SETTINGS, SPARSE_FIELDS)][:]
@property
def main_rep_idxs(self):
"""The indices of the atoms included from the full topology in the default 'positions' trajectory """
if '{}/{}'.format(SETTINGS, MAIN_REP_IDXS) in self.h5:
return self.h5['{}/{}'.format(SETTINGS, MAIN_REP_IDXS)][:]
else:
return None
@property
def alt_reps_idxs(self):
"""Mapping of the names of the alt reps to the indices of the atoms
from the topology that they include in their datasets."""
idxs_grp = self.h5['{}/{}'.format(SETTINGS, ALT_REPS_IDXS)]
return {name : ds[:] for name, ds in idxs_grp.items()}
@property
def alt_reps(self):
"""Names of the alt reps."""
idxs_grp = self.h5['{}/{}'.format(SETTINGS, ALT_REPS_IDXS)]
return {name for name in idxs_grp.keys()}
@property
def field_feature_shapes(self):
"""Mapping of the names of the trajectory fields to their feature
vector shapes."""
shapes_grp = self.h5['{}/{}'.format(SETTINGS, FIELD_FEATURE_SHAPES_STR)]
field_paths = _iter_field_paths(shapes_grp)
shapes = {}
for field_path in field_paths:
shape = shapes_grp[field_path][()]
if np.isnan(shape).all():
shapes[field_path] = None
else:
shapes[field_path] = shape
return shapes
@property
def field_feature_dtypes(self):
"""Mapping of the names of the trajectory fields to their feature
vector numpy dtypes."""
dtypes_grp = self.h5['{}/{}'.format(SETTINGS, FIELD_FEATURE_DTYPES_STR)]
field_paths = _iter_field_paths(dtypes_grp)
dtypes = {}
for field_path in field_paths:
dtype_str = dtypes_grp[field_path][()]
# if there is 'None' flag for the dtype then return None
if dtype_str == NONE_STR:
dtypes[field_path] = None
else:
dtype_obj = json.loads(dtype_str)
dtype_obj = [tuple(d) for d in dtype_obj]
dtype = np.dtype(dtype_obj)
dtypes[field_path] = dtype
return dtypes
@property
def continuations(self):
"""The continuation relationships in this file."""
return self.settings_grp[CONTINUATIONS][:]
@property
def metadata(self):
"""File metadata (h5py.attrs)."""
return dict(self._h5.attrs)
def decision_enum(self, run_idx):
"""Mapping of decision enumerated names to their integer representations.
Parameters
----------
run_idx : int
Returns
-------
decision_enum : dict of str : int
Mapping of the decision ID string to the integer representation.
See Also
--------
WepyHDF5.decision_value_names : for the reverse mapping
"""
enum_grp = self.decision_grp(run_idx)
enum = {}
for decision_name, dset in enum_grp.items():
enum[decision_name] = dset[()]
return enum
def decision_value_names(self, run_idx):
"""Mapping of the integer values for decisions to the decision ID strings.
Parameters
----------
run_idx : int
Returns
-------
decision_enum : dict of int : str
Mapping of the decision integer to the decision ID string representation.
See Also
--------
WepyHDF5.decision_enum : for the reverse mapping
"""
enum_grp = self.decision_grp(run_idx)
rev_enum = {}
for decision_name, dset in enum_grp.items():
value = dset[()]
rev_enum[value] = decision_name
return rev_enum
### Topology
def get_topology(self, alt_rep=POSITIONS):
"""Get the JSON topology for a particular represenation of the positions.
By default gives the topology for the main 'positions' field
(when alt_rep 'positions'). To get the full topology the file
was initialized with set `alt_rep` to `None`. Topologies for
alternative representations (subfields of 'alt_reps') can be
obtained by passing in the key for that alt_rep. For example,
'all_atoms' for the field in alt_reps called 'all_atoms'.
Parameters
----------
alt_rep : str
The base name of the alternate representation, or 'positions', or None.
Returns
-------
topology : str
The JSON topology string for the representation.
"""
top = self.topology
# if no alternative representation is given we just return the
# full topology
if alt_rep is None:
pass
# otherwise we either give the main representation topology
# subset
elif alt_rep == POSITIONS:
top = json_top_subset(top, self.main_rep_idxs)
# or choose one of the alternative representations
elif alt_rep in self.alt_reps_idxs:
top = json_top_subset(top, self.alt_reps_idxs[alt_rep])
# and raise an error if the given alternative representation
# is not given
else:
raise ValueError("alt_rep {} not found".format(alt_rep))
return top
@property
def topology(self):
"""The topology for the full simulated system.
May not be the main representation in the POSITIONS field; for
that use the `get_topology` method.
Returns
-------
topology : str
The JSON topology string for the full representation.
"""
return self._h5[TOPOLOGY][()]
def get_mdtraj_topology(self, alt_rep=POSITIONS):
"""Get an mdtraj.Topology object for a system representation.
By default gives the topology for the main 'positions' field
(when alt_rep 'positions'). To get the full topology the file
was initialized with set `alt_rep` to `None`. Topologies for
alternative representations (subfields of 'alt_reps') can be
obtained by passing in the key for that alt_rep. For example,
'all_atoms' for the field in alt_reps called 'all_atoms'.
Parameters
----------
alt_rep : str
The base name of the alternate representation, or 'positions', or None.
Returns
-------
topology : str
The JSON topology string for the full representation.
"""
json_top = self.get_topology(alt_rep=alt_rep)
return json_to_mdtraj_topology(json_top)
## Initial walkers
def initial_walker_fields(self, run_idx, fields, walker_idxs=None):
"""Get fields from the initial walkers of the simulation.
Parameters
----------
run_idx : int
Run to get initial walkers for.
fields : list of str
Names of the fields you want to retrieve.
walker_idxs : None or list of int
If None returns all of the walkers fields, otherwise a
list of ints that are a selection from those walkers.
Returns
-------
walker_fields : dict of str : array of shape
Dictionary mapping fields to the values for all
walkers. Frames will be either in counting order if no
indices were requested or the order of the walker indices
as given.
"""
# set the walker indices if not specified
if walker_idxs is None:
walker_idxs = range(self.num_init_walkers(run_idx))
init_walker_fields = {field : [] for field in fields}
# for each walker go through and add the selected fields
for walker_idx in walker_idxs:
init_walker_grp = self.init_walkers_grp(run_idx)[str(walker_idx)]
for field in fields:
# we remove the first dimension because we just want
# them as a single frame
init_walker_fields[field].append(init_walker_grp[field][:][0])
# convert the field values to arrays
init_walker_fields = {field : np.array(val) for field, val in init_walker_fields.items()}
return init_walker_fields
def initial_walkers_to_mdtraj(self, run_idx, walker_idxs=None, alt_rep=POSITIONS):
"""Generate an mdtraj Trajectory from a trace of frames from the runs.
Uses the default fields for positions (unless an alternate
representation is specified) and box vectors which are assumed
to be present in the trajectory fields.
The time value for the mdtraj trajectory is set to the cycle
indices for each trace frame.
This is useful for converting WepyHDF5 data to common
molecular dynamics data formats accessible through the mdtraj
library.
Parameters
----------
run_idx : int
Run to get initial walkers for.
fields : list of str
Names of the fields you want to retrieve.
walker_idxs : None or list of int
If None returns all of the walkers fields, otherwise a
list of ints that are a selection from those walkers.
alt_rep : None or str
If None uses default 'positions' representation otherwise
chooses the representation from the 'alt_reps' compound field.
Returns
-------
traj : mdtraj.Trajectory
"""
rep_path = self._choose_rep_path(alt_rep)
init_walker_fields = self.initial_walker_fields(run_idx, [rep_path, BOX_VECTORS],
walker_idxs=walker_idxs)
return self.traj_fields_to_mdtraj(init_walker_fields, alt_rep=alt_rep)
### Counts and Indexing
@property
def num_atoms(self):
"""The number of atoms in the full topology representation."""
return self.h5['{}/{}'.format(SETTINGS, N_ATOMS)][()]
@property
def num_dims(self):
"""The number of spatial dimensions in the positions and alt_reps trajectory fields."""
return self.h5['{}/{}'.format(SETTINGS, N_DIMS_STR)][()]
@property
def num_runs(self):
"""The number of runs in the file."""
return len(self._h5[RUNS])
@property
def num_trajs(self):
"""The total number of trajectories in the entire file."""
return len(list(self.run_traj_idx_tuples()))
def num_init_walkers(self, run_idx):
"""The number of initial walkers for a run.
Parameters
----------
run_idx : int
Returns
-------
n_walkers : int
"""
return len(self.init_walkers_grp(run_idx))
def num_walkers(self, run_idx, cycle_idx):
"""Get the number of walkers at a given cycle in a run.
Parameters
----------
run_idx : int
cycle_idx : int
Returns
-------
n_walkers : int
"""
if cycle_idx >= self.num_run_cycles(run_idx):
raise ValueError(
f"Run {run_idx} has {self.num_run_cycles(run_idx)} cycles, {cycle_idx} requested")
# TODO: currently we do not have a well-defined mechanism for
# actually storing variable number of walkers in the
# trajectory data so just return the number of trajectories
return self.num_run_trajs(run_idx)
def num_run_trajs(self, run_idx):
"""The number of trajectories in a run.
Parameters
----------
run_idx : int
Returns
-------
n_trajs : int
"""
return len(self._h5['{}/{}/{}'.format(RUNS, run_idx, TRAJECTORIES)])
def num_run_cycles(self, run_idx):
"""The number of cycles in a run.
Parameters
----------
run_idx : int
Returns
-------
n_cycles : int
"""
return self.num_traj_frames(run_idx, 0)
def num_traj_frames(self, run_idx, traj_idx):
"""The number of frames in a given trajectory.
Parameters
----------
run_idx : int
traj_idx : int
Returns
-------
n_frames : int
"""
return self.traj(run_idx, traj_idx)[POSITIONS].shape[0]
@property
def run_idxs(self):
"""The indices of the runs in the file."""
return list(range(len(self._h5[RUNS])))
def run_traj_idxs(self, run_idx):
"""The indices of trajectories in a run.
Parameters
----------
run_idx : int
Returns
-------
traj_idxs : list of int
"""
return list(range(len(self._h5['{}/{}/{}'.format(RUNS, run_idx, TRAJECTORIES)])))
def run_traj_idx_tuples(self, runs=None):
"""Get identifier tuples (run_idx, traj_idx) for all trajectories in
all runs.
Parameters
----------
runs : list of int, optional
If not None, a list of run indices to restrict to.
Returns
-------
run_traj_tuples : list of tuple of int
A listing of all trajectories by their identifying tuple
of (run_idx, traj_idx).
"""
tups = []
if runs is None:
run_idxs = self.run_idxs
else:
run_idxs = runs
for run_idx in run_idxs:
for traj_idx in self.run_traj_idxs(run_idx):
tups.append((run_idx, traj_idx))
return tups
def get_traj_field_cycle_idxs(self, run_idx, traj_idx, field_path):
"""Returns the cycle indices for a sparse trajectory field.
Parameters
----------
run_idx : int
traj_idx : int
field_path : str
Name of the trajectory field
Returns
-------
cycle_idxs : arraylike of int
"""
traj_path = '{}/{}/{}/{}'.format(RUNS, run_idx, TRAJECTORIES, traj_idx)
if not field_path in self._h5[traj_path]:
raise KeyError("key for field {} not found".format(field_path))
# if the field is not sparse just return the cycle indices for
# that run
if field_path not in self.sparse_fields:
cycle_idxs = np.array(range(self.num_run_cycles(run_idx)))
else:
cycle_idxs = self._h5[traj_path][field_path][SPARSE_IDXS][:]
return cycle_idxs
def next_run_idx(self):
"""The index of the next run if it were to be added.
Because runs are named as the integer value of the order they
were added this gives the index of the next run that would be
added.
Returns
-------
next_run_idx : int
"""
return self.num_runs
def next_run_traj_idx(self, run_idx):
"""The index of the next trajectory for this run.
Parameters
----------
run_idx : int
Returns
-------
next_traj_idx : int
"""
return self.num_run_trajs(run_idx)
### Aggregation
def is_run_contig(self, run_idxs):
"""This method checks that if a given list of run indices is a valid
contig or not.
Parameters
----------
run_idxs : list of int
The run indices that would make up the contig in order.
Returns
-------
is_contig : bool
"""
run_idx_continuations = [np.array([run_idxs[idx+1], run_idxs[idx]])
for idx in range(len(run_idxs)-1)]
#gets the contigs array
continuations = self.settings_grp[CONTINUATIONS][:]
# checks if sub contigs are in contigs list or not.
for run_continuous in run_idx_continuations:
contig = False
for continuous in continuations:
if np.array_equal(run_continuous, continuous):
contig = True
if not contig:
return False
return True
def clone(self, path, mode='x'):
"""Clone the header information of this file into another file.
Clones this WepyHDF5 file without any of the actual runs and run
data. This includes the topology, units, sparse_fields,
feature shapes and dtypes, alt_reps, and main representation
information.
This method will flush the buffers for this file.
Does not preserve metadata pertaining to inter-run
relationships like continuations.
Parameters
----------
path : str
File path to save the new file.
mode : str
The mode to open the new file with.
Returns
-------
new_file : h5py.File
The handle to the new file. It will be closed.
"""
assert mode in ['w', 'w-', 'x'], "must be opened in a file creation mode"
# we manually construct an HDF5 and copy the groups over
new_h5 = h5py.File(path, mode=mode, libver=H5PY_LIBVER)
new_h5.require_group(RUNS)
# flush the datasets buffers
self.h5.flush()
new_h5.flush()
# copy the existing datasets to the new one
h5py.h5o.copy(self._h5.id, TOPOLOGY.encode(), new_h5.id, TOPOLOGY.encode())
h5py.h5o.copy(self._h5.id, UNITS.encode(), new_h5.id, UNITS.encode())
h5py.h5o.copy(self._h5.id, SETTINGS.encode(), new_h5.id, SETTINGS.encode())
# now make a WepyHDF5 object in "expert_mode" which means it
# is just empy and we construct it manually, "surgically" as I
# like to call it
new_wepy_h5 = WepyHDF5(path, expert_mode=True)
# perform the surgery:
# attach the h5py.File
new_wepy_h5._h5 = new_h5
# set the wepy mode to read-write since the creation flags
# were already used in construction of the h5py.File object
new_wepy_h5._wepy_mode = 'r+'
new_wepy_h5._h5py_mode = 'r+'
# for the settings we need to get rid of the data for interun
# relationships like the continuations, so we reinitialize the
# continuations for the new file
new_wepy_h5._init_continuations()
# close the h5py.File and set the attribute to closed
new_wepy_h5._h5.close()
new_wepy_h5.closed = True
# return the runless WepyHDF5 object
return new_wepy_h5
def link_run(self, filepath, run_idx, continue_run=None, **kwargs):
"""Add a run from another file to this one as an HDF5 external
link.
Parameters
----------
filepath : str
File path to the HDF5 file that the run is on.
run_idx : int
The run index from the target file you want to link.
continue_run : int, optional
The run from the linking WepyHDF5 file you want the target
linked run to continue.
kwargs : dict
Adds metadata (h5py.attrs) to the linked run.
Returns
-------
linked_run_idx : int
The index of the linked run in the linking file.
"""
# link to the external run
ext_run_link = h5py.ExternalLink(filepath, '{}/{}'.format(RUNS, run_idx))
# the run index in this file, as determined by the counter
here_run_idx = self.next_run_idx()
# set the local run as the external link to the other run
self._h5['{}/{}'.format(RUNS, here_run_idx)] = ext_run_link
# run the initialization routines for adding a run
self._add_run_init(here_run_idx, continue_run=continue_run)
run_grp = self._h5['{}/{}'.format(RUNS, here_run_idx)]
# add metadata if given
for key, val in kwargs.items():
if key != RUN_IDX:
run_grp.attrs[key] = val
else:
warn('run_idx metadata is set by wepy and cannot be used', RuntimeWarning)
return here_run_idx
def link_file_runs(self, wepy_h5_path):
"""Link all runs from another WepyHDF5 file.
This preserves continuations within that file. This will open
the file if not already opened.
Parameters
----------
wepy_h5_path : str
Filepath to the file you want to link runs from.
Returns
-------
new_run_idxs : list of int
The new run idxs from the linking file.
"""
wepy_h5 = WepyHDF5(wepy_h5_path, mode='r')
with wepy_h5:
ext_run_idxs = wepy_h5.run_idxs
continuations = wepy_h5.continuations
# add the runs
new_run_idxs = []
for ext_run_idx in ext_run_idxs:
# link the next run, and get its new run index
new_run_idx = self.link_run(wepy_h5_path, ext_run_idx)
# save that run idx
new_run_idxs.append(new_run_idx)
# copy the continuations over translating the run idxs,
# for each continuation in the other files continuations
for continuation in continuations:
# translate each run index from the external file
# continuations to the run idxs they were just assigned in
# this file
self.add_continuation(new_run_idxs[continuation[0]],
new_run_idxs[continuation[1]])
return new_run_idxs
def extract_run(self, filepath, run_idx,
continue_run=None,
run_slice=None,
**kwargs):
"""Add a run from another file to this one by copying it and
truncating it if necessary.
Parameters
----------
filepath : str
File path to the HDF5 file that the run is on.
run_idx : int
The run index from the target file you want to link.
continue_run : int, optional
The run from the linking WepyHDF5 file you want the target
linked run to continue.
run_slice :
kwargs : dict
Adds metadata (h5py.attrs) to the linked run.
Returns
-------
linked_run_idx : int
The index of the linked run in the linking file.
"""
# close ourselves if not already done, so we can write using
# the lower level API
was_open = False
if not self.closed:
self.close()
was_open = True
# do the copying
# open the other file and get the runs in it and the
# continuations it has
wepy_h5 = WepyHDF5(filepath, mode='r')
with self:
# normalize our HDF5s path
self_path = osp.realpath(self.filename)
# the run index in this file, as determined by the counter
here_run_idx = self.next_run_idx()
# get the group name for the new run in this HDF5
target_grp_path = "/runs/{}".format(here_run_idx)
with wepy_h5:
# link the next run, and get its new run index
new_h5 = wepy_h5.copy_run_slice(run_idx, self_path,
target_grp_path,
run_slice=run_slice,
mode='r+')
# close it since we are done
new_h5.close()
with self:
# run the initialization routines for adding a run, just
# sets some metadata
self._add_run_init(here_run_idx, continue_run=continue_run)
run_grp = self._h5['{}/{}'.format(RUNS, here_run_idx)]
# add metadata if given
for key, val in kwargs.items():
if key != RUN_IDX:
run_grp.attrs[key] = val
else:
warn('run_idx metadata is set by wepy and cannot be used', RuntimeWarning)
if was_open:
self.open()
return here_run_idx
def extract_file_runs(self, wepy_h5_path,
run_slices=None):
"""Extract (copying and truncating appropriately) all runs from
another WepyHDF5 file.
This preserves continuations within that file. This will open
the file if not already opened.
Parameters
----------
wepy_h5_path : str
Filepath to the file you want to link runs from.
Returns
-------
new_run_idxs : list of int
The new run idxs from the linking file.
"""
if run_slices is None:
run_slices = {}
# open the other file and get the runs in it and the
# continuations it has
wepy_h5 = WepyHDF5(wepy_h5_path, mode='r')
with wepy_h5:
# the run idx in the external file
ext_run_idxs = wepy_h5.run_idxs
continuations = wepy_h5.continuations
# then for each run in it copy them to this file
new_run_idxs = []
for ext_run_idx in ext_run_idxs:
# get the run_slice spec for the run in the other file
run_slice = run_slices[ext_run_idx]
# get the index this run should be when it is added
new_run_idx = self.extract_run(wepy_h5_path, ext_run_idx,
run_slice=run_slice)
# save that run idx
new_run_idxs.append(new_run_idx)
was_closed = False
if self.closed:
self.open()
was_closed = True
# copy the continuations over translating the run idxs,
# for each continuation in the other files continuations
for continuation in continuations:
# translate each run index from the external file
# continuations to the run idxs they were just assigned in
# this file
self.add_continuation(new_run_idxs[continuation[0]],
new_run_idxs[continuation[1]])
if was_closed:
self.close()
return new_run_idxs
def join(self, other_h5):
"""Given another WepyHDF5 file object does a left join on this
file, renumbering the runs starting from this file.
This function uses the H5O function for copying. Data will be
copied not linked.
Parameters
----------
other_h5 : h5py.File
File handle to the file you want to join to this one.
"""
with other_h5 as h5:
for run_idx in h5.run_idxs:
# the other run group handle
other_run = h5.run(run_idx)
# copy this run to this file in the next run_idx group
self.h5.copy(other_run, '{}/{}'.format(RUNS, self.next_run_idx()))
### initialization and data generation
def add_metadata(self, key, value):
"""Add metadata for the whole file.
Parameters
----------
key : str
value : h5py value
h5py valid metadata value.
"""
self._h5.attrs[key] = value
def init_record_fields(self, run_record_key, record_fields):
"""Initialize the settings record fields for a record group in the
settings group.
Save which records are to be considered from a run record group's
datasets to be in the table like representation. This exists
to allow there to large and small datasets for records to be
stored together but allow for a more compact single table like
representation to be produced for serialization.
Parameters
----------
run_record_key : str
Name of the record group you want to set this for.
record_fields : list of str
Names of the fields you want to set as record fields.
"""
record_fields_grp = self.settings_grp[RECORD_FIELDS]
# make a dataset for the sparse fields allowed. this requires
# a 'special' datatype for variable length strings. This is
# supported by HDF5 but not numpy.
vlen_str_dt = h5py.special_dtype(vlen=str)
# create the dataset with the strings of the fields which are records
record_group_fields_ds = record_fields_grp.create_dataset(run_record_key,
(len(record_fields),),
dtype=vlen_str_dt,
maxshape=(None,))
# set the flags
for i, record_field in enumerate(record_fields):
record_group_fields_ds[i] = record_field
def init_resampling_record_fields(self, resampler):
"""Initialize the record fields for this record group.
Parameters
----------
resampler : object implementing the Resampler interface
The resampler which contains the data for which record fields to set.
"""
self.init_record_fields(RESAMPLING, resampler.resampling_record_field_names())
def init_resampler_record_fields(self, resampler):
"""Initialize the record fields for this record group.
Parameters
----------
resampler : object implementing the Resampler interface
The resampler which contains the data for which record fields to set.
"""
self.init_record_fields(RESAMPLER, resampler.resampler_record_field_names())
def init_bc_record_fields(self, bc):
"""Initialize the record fields for this record group.
Parameters
----------
bc : object implementing the BoundaryConditions interface
The boundary conditions object which contains the data for which record fields to set.
"""
self.init_record_fields(BC, bc.bc_record_field_names())
def init_warping_record_fields(self, bc):
"""Initialize the record fields for this record group.
Parameters
----------
bc : object implementing the BoundaryConditions interface
The boundary conditions object which contains the data for which record fields to set.
"""
self.init_record_fields(WARPING, bc.warping_record_field_names())
def init_progress_record_fields(self, bc):
"""Initialize the record fields for this record group.
Parameters
----------
bc : object implementing the BoundaryConditions interface
The boundary conditions object which contains the data for which record fields to set.
"""
self.init_record_fields(PROGRESS, bc.progress_record_field_names())
def add_continuation(self, continuation_run, base_run):
"""Add a continuation between runs.
Parameters
----------
continuation_run : int
The run index of the run that will be continuing another
base_run : int
The run that is being continued.
"""
continuations_dset = self.settings_grp[CONTINUATIONS]
continuations_dset.resize((continuations_dset.shape[0] + 1, continuations_dset.shape[1],))
continuations_dset[continuations_dset.shape[0] - 1] = np.array([continuation_run, base_run])
def new_run(self, init_walkers, continue_run=None, **kwargs):
"""Initialize a new run.
Parameters
----------
init_walkers : list of objects implementing the Walker interface
The walkers that will be the start of this run.
continue_run : int, optional
If this run is a continuation of another set which one it is continuing.
kwargs : dict
Metadata to set for the run.
Returns
-------
run_grp : h5py.Group
The group of the newly created run.
"""
# check to see if the continue_run is actually in this file
if continue_run is not None:
if continue_run not in self.run_idxs:
raise ValueError("The continue_run idx given, {}, is not present in this file".format(
continue_run))
# get the index for this run
new_run_idx = self.next_run_idx()
# create a new group named the next integer in the counter
run_grp = self._h5.create_group('{}/{}'.format(RUNS, new_run_idx))
# set the initial walkers group
init_walkers_grp = run_grp.create_group(INIT_WALKERS)
self._add_init_walkers(init_walkers_grp, init_walkers)
# initialize the walkers group
traj_grp = run_grp.create_group(TRAJECTORIES)
# run the initialization routines for adding a run
self._add_run_init(new_run_idx, continue_run=continue_run)
# add metadata if given
for key, val in kwargs.items():
if key != RUN_IDX:
run_grp.attrs[key] = val
else:
warn('run_idx metadata is set by wepy and cannot be used', RuntimeWarning)
return run_grp
# application level methods for setting the fields for run record
# groups given the objects themselves
def init_run_resampling(self, run_idx, resampler):
"""Initialize data for resampling records.
Initialized the run record group as well as settings for the
fields.
This method also creates the decision group for the run.
Parameters
----------
run_idx : int
resampler : object implementing the Resampler interface
The resampler which contains the data for which record fields to set.
Returns
-------
record_grp : h5py.Group
"""
# set the enumeration of the decisions
self.init_run_resampling_decision(0, resampler)
# set the data fields that can be used for table like records
resampler.resampler_record_field_names()
resampler.resampling_record_field_names()
# then make the records group
fields = resampler.resampling_fields()
grp = self.init_run_record_grp(run_idx, RESAMPLING, fields)
return grp
def init_run_resampling_decision(self, run_idx, resampler):
"""Initialize the decision group for the run resampling records.
Parameters
----------
run_idx : int
resampler : object implementing the Resampler interface
The resampler which contains the data for which record fields to set.
"""
self.init_run_fields_resampling_decision(run_idx, resampler.DECISION.enum_dict_by_name())
def init_run_resampler(self, run_idx, resampler):
"""Initialize data for this record group in a run.
Initialized the run record group as well as settings for the
fields.
Parameters
----------
run_idx : int
resampler : object implementing the Resampler interface
The resampler which contains the data for which record fields to set.
Returns
-------
record_grp : h5py.Group
"""
fields = resampler.resampler_fields()
grp = self.init_run_record_grp(run_idx, RESAMPLER, fields)
return grp
def init_run_warping(self, run_idx, bc):
"""Initialize data for this record group in a run.
Initialized the run record group as well as settings for the
fields.
Parameters
----------
run_idx : int
bc : object implementing the BoundaryConditions interface
The boundary conditions object which contains the data for which record fields to set.
Returns
-------
record_grp : h5py.Group
"""
fields = bc.warping_fields()
grp = self.init_run_record_grp(run_idx, WARPING, fields)
return grp
def init_run_progress(self, run_idx, bc):
"""Initialize data for this record group in a run.
Initialized the run record group as well as settings for the
fields.
Parameters
----------
run_idx : int
bc : object implementing the BoundaryConditions interface
The boundary conditions object which contains the data for which record fields to set.
Returns
-------
record_grp : h5py.Group
"""
fields = bc.progress_fields()
grp = self.init_run_record_grp(run_idx, PROGRESS, fields)
return grp
def init_run_bc(self, run_idx, bc):
"""Initialize data for this record group in a run.
Initialized the run record group as well as settings for the
fields.
Parameters
----------
run_idx : int
bc : object implementing the BoundaryConditions interface
The boundary conditions object which contains the data for which record fields to set.
Returns
-------
record_grp : h5py.Group
"""
fields = bc.bc_fields()
grp = self.init_run_record_grp(run_idx, BC, fields)
return grp
# application level methods for initializing the run records
# groups with just the fields and without the objects
def init_run_fields_resampling(self, run_idx, fields):
"""Initialize this record group fields datasets.
Parameters
----------
run_idx : int
fields : list of str
Names of the fields to initialize
Returns
-------
record_grp : h5py.Group
"""
grp = self.init_run_record_grp(run_idx, RESAMPLING, fields)
return grp
def init_run_fields_resampling_decision(self, run_idx, decision_enum_dict):
"""Initialize the decision group for this run.
Parameters
----------
run_idx : int
decision_enum_dict : dict of str : int
Mapping of decision ID strings to integer representation.
"""
decision_grp = self.run(run_idx).create_group(DECISION)
for name, value in decision_enum_dict.items():
decision_grp.create_dataset(name, data=value)
def init_run_fields_resampler(self, run_idx, fields):
"""Initialize this record group fields datasets.
Parameters
----------
run_idx : int
fields : list of str
Names of the fields to initialize
Returns
-------
record_grp : h5py.Group
"""
grp = self.init_run_record_grp(run_idx, RESAMPLER, fields)
return grp
def init_run_fields_warping(self, run_idx, fields):
"""Initialize this record group fields datasets.
Parameters
----------
run_idx : int
fields : list of str
Names of the fields to initialize
Returns
-------
record_grp : h5py.Group
"""
grp = self.init_run_record_grp(run_idx, WARPING, fields)
return grp
def init_run_fields_progress(self, run_idx, fields):
"""Initialize this record group fields datasets.
Parameters
----------
run_idx : int
fields : list of str
Names of the fields to initialize
Returns
-------
record_grp : h5py.Group
"""
grp = self.init_run_record_grp(run_idx, PROGRESS, fields)
return grp
def init_run_fields_bc(self, run_idx, fields):
"""Initialize this record group fields datasets.
Parameters
----------
run_idx : int
fields : list of str
Names of the fields to initialize
Returns
-------
record_grp : h5py.Group
"""
grp = self.init_run_record_grp(run_idx, BC, fields)
return grp
def init_run_record_grp(self, run_idx, run_record_key, fields):
"""Initialize a record group for a run.
Parameters
----------
run_idx : int
run_record_key : str
The name of the record group.
fields : list of str
The names of the fields to set for the record group.
"""
# initialize the record group based on whether it is sporadic
# or continual
if self._is_sporadic_records(run_record_key):
grp = self._init_run_sporadic_record_grp(run_idx, run_record_key,
fields)
else:
grp = self._init_run_continual_record_grp(run_idx, run_record_key,
fields)
# TODO: should've been removed already just double checking things are good without it
# def traj_n_frames(self, run_idx, traj_idx):
# """
# Parameters
# ----------
# run_idx :
# traj_idx :
# Returns
# -------
# """
# return self.traj(run_idx, traj_idx)[POSITIONS].shape[0]
def add_traj(self, run_idx, data, weights=None, sparse_idxs=None, metadata=None):
"""Add a full trajectory to a run.
Parameters
----------
run_idx : int
data : dict of str : arraylike
Mapping of trajectory fields to the data for them to add.
weights : 1-D arraylike of float
The weights of each frame. If None defaults all frames to 1.0.
sparse_idxs : list of int
Cycle indices the data corresponds to.
metadata : dict of str : value
Metadata for the trajectory.
Returns
-------
traj_grp : h5py.Group
"""
# convenient alias
traj_data = data
# initialize None kwargs
if sparse_idxs is None:
sparse_idxs = {}
if metadata is None:
metadata = {}
# positions are mandatory
assert POSITIONS in traj_data, "positions must be given to create a trajectory"
assert isinstance(traj_data[POSITIONS], np.ndarray)
n_frames = traj_data[POSITIONS].shape[0]
# if weights are None then we assume they are 1.0
if weights is None:
weights = np.ones((n_frames, 1), dtype=float)
else:
assert isinstance(weights, np.ndarray), "weights must be a numpy.ndarray"
assert weights.shape[0] == n_frames,\
"weights and the number of frames must be the same length"
# current traj_idx
traj_idx = self.next_run_traj_idx(run_idx)
# make a group for this trajectory, with the current traj_idx
# for this run
traj_grp = self._h5.create_group(
'{}/{}/{}/{}'.format(RUNS, run_idx, TRAJECTORIES, traj_idx))
# add the run_idx as metadata
traj_grp.attrs[RUN_IDX] = run_idx
# add the traj_idx as metadata
traj_grp.attrs[TRAJ_IDX] = traj_idx
# add the rest of the metadata if given
for key, val in metadata.items():
if not key in [RUN_IDX, TRAJ_IDX]:
traj_grp.attrs[key] = val
else:
warn("run_idx and traj_idx are used by wepy and cannot be set", RuntimeWarning)
# check to make sure the positions are the right shape
assert traj_data[POSITIONS].shape[1] == self.num_atoms, \
"positions given have different number of atoms: {}, should be {}".format(
traj_data[POSITIONS].shape[1], self.num_atoms)
assert traj_data[POSITIONS].shape[2] == self.num_dims, \
"positions given have different number of dims: {}, should be {}".format(
traj_data[POSITIONS].shape[2], self.num_dims)
# add datasets to the traj group
# weights
traj_grp.create_dataset(WEIGHTS, data=weights, dtype=WEIGHT_DTYPE,
maxshape=(None, *WEIGHT_SHAPE))
# positions
positions_shape = traj_data[POSITIONS].shape
# add the rest of the traj_data
for field_path, field_data in traj_data.items():
# if there were sparse idxs for this field pass them in
if field_path in sparse_idxs:
field_sparse_idxs = sparse_idxs[field_path]
# if this is a sparse field and no sparse_idxs were given
# we still need to initialize it as a sparse field so it
# can be extended properly so we make sparse_idxs to match
# the full length of this initial trajectory data
elif field_path in self.sparse_fields:
field_sparse_idxs = np.arange(positions_shape[0])
# otherwise it is not a sparse field so we just pass in None
else:
field_sparse_idxs = None
self._add_traj_field_data(run_idx, traj_idx, field_path, field_data,
sparse_idxs=field_sparse_idxs)
## initialize empty sparse fields
# get the sparse field datasets that haven't been initialized
traj_init_fields = list(sparse_idxs.keys()) + list(traj_data.keys())
uninit_sparse_fields = set(self.sparse_fields).difference(traj_init_fields)
# the shapes
uninit_sparse_shapes = [self.field_feature_shapes[field] for field in uninit_sparse_fields]
# the dtypes
uninit_sparse_dtypes = [self.field_feature_dtypes[field] for field in uninit_sparse_fields]
# initialize the sparse fields in the hdf5
self._init_traj_fields(run_idx, traj_idx,
uninit_sparse_fields, uninit_sparse_shapes, uninit_sparse_dtypes)
return traj_grp
def extend_traj(self, run_idx, traj_idx, data, weights=None):
"""Extend a trajectory with data for all fields.
Parameters
----------
run_idx : int
traj_idx : int
data : dict of str : arraylike
The data to add for each field of the trajectory. Must all
have the same first dimension.
weights : arraylike
Weights for the frames of the trajectory. If None defaults all frames to 1.0.
"""
if self._wepy_mode == 'c-':
assert self._append_flags[dataset_key], "dataset is not available for appending to"
# convenient alias
traj_data = data
# number of frames to add
n_new_frames = traj_data[POSITIONS].shape[0]
n_frames = self.num_traj_frames(run_idx, traj_idx)
# calculate the new sparse idxs for sparse fields that may be
# being added
sparse_idxs = np.array(range(n_frames, n_frames + n_new_frames))
# get the trajectory group
traj_grp = self._h5['{}/{}/{}/{}'.format(RUNS, run_idx, TRAJECTORIES, traj_idx)]
## weights
# if weights are None then we assume they are 1.0
if weights is None:
weights = np.ones((n_new_frames, 1), dtype=float)
else:
assert isinstance(weights, np.ndarray), "weights must be a numpy.ndarray"
assert weights.shape[0] == n_new_frames,\
"weights and the number of frames must be the same length"
# add the weights
weights_ds = traj_grp[WEIGHTS]
# append to the dataset on the first dimension, keeping the
# others the same, if they exist
if len(weights_ds.shape) > 1:
weights_ds.resize( (weights_ds.shape[0] + n_new_frames, *weights_ds.shape[1:]) )
else:
weights_ds.resize( (weights_ds.shape[0] + n_new_frames, ) )
# add the new data
weights_ds[-n_new_frames:, ...] = weights
# add the other fields
for field_path, field_data in traj_data.items():
# if the field hasn't been initialized yet initialize it,
# unless we are in SWMR mode
if not field_path in traj_grp:
# if in SWMR mode you cannot create groups so if we
# are in SWMR mode raise a warning that the data won't
# be recorded
if self.swmr_mode:
warn("New datasets cannot be created while in SWMR mode. The field {} will"
"not be saved. If you want to save this it must be"
"previously created".format(field_path))
else:
feature_shape = field_data.shape[1:]
feature_dtype = field_data.dtype
# not specified as sparse_field, no settings
if (not field_path in self.field_feature_shapes) and \
(not field_path in self.field_feature_dtypes) and \
not field_path in self.sparse_fields:
# only save if it is an observable
is_observable = False
if '/' in field_path:
group_name = field_path.split('/')[0]
if group_name == OBSERVABLES:
is_observable = True
if is_observable:
warn("the field '{}' was received but not previously specified"
" but is being added because it is in observables.".format(field_path))
# save sparse_field flag, shape, and dtype
self._add_sparse_field_flag(field_path)
self._set_field_feature_shape(field_path, feature_shape)
self._set_field_feature_dtype(field_path, feature_dtype)
else:
raise ValueError("the field '{}' was received but not previously specified"
"it is being ignored because it is not an observable.".format(field_path))
# specified as sparse_field but no settings given
elif (self.field_feature_shapes[field_path] is None and
self.field_feature_dtypes[field_path] is None) and \
field_path in self.sparse_fields:
# set the feature shape and dtype since these
# should be 0 in the settings
self._set_field_feature_shape(field_path, feature_shape)
self._set_field_feature_dtype(field_path, feature_dtype)
# initialize
self._init_traj_field(run_idx, traj_idx, field_path, feature_shape, feature_dtype)
# extend it either as a sparse field or a contiguous field
if field_path in self.sparse_fields:
self._extend_sparse_traj_field(run_idx, traj_idx, field_path, field_data, sparse_idxs)
else:
self._extend_contiguous_traj_field(run_idx, traj_idx, field_path, field_data)
## application level append methods for run records groups
def extend_cycle_warping_records(self, run_idx, cycle_idx, warping_data):
"""Add records for each field for this record group.
Parameters
----------
run_idx : int
cycle_idx : int
The cycle index these records correspond to.
warping_data : dict of str : arraylike
Mapping of the record group fields to a collection of
values for each field.
"""
self.extend_cycle_run_group_records(run_idx, WARPING, cycle_idx, warping_data)
def extend_cycle_bc_records(self, run_idx, cycle_idx, bc_data):
"""Add records for each field for this record group.
Parameters
----------
run_idx : int
cycle_idx : int
The cycle index these records correspond to.
bc_data : dict of str : arraylike
Mapping of the record group fields to a collection of
values for each field.
"""
self.extend_cycle_run_group_records(run_idx, BC, cycle_idx, bc_data)
def extend_cycle_progress_records(self, run_idx, cycle_idx, progress_data):
"""Add records for each field for this record group.
Parameters
----------
run_idx : int
cycle_idx : int
The cycle index these records correspond to.
progress_data : dict of str : arraylike
Mapping of the record group fields to a collection of
values for each field.
"""
self.extend_cycle_run_group_records(run_idx, PROGRESS, cycle_idx, progress_data)
def extend_cycle_resampling_records(self, run_idx, cycle_idx, resampling_data):
"""Add records for each field for this record group.
Parameters
----------
run_idx : int
cycle_idx : int
The cycle index these records correspond to.
resampling_data : dict of str : arraylike
Mapping of the record group fields to a collection of
values for each field.
"""
self.extend_cycle_run_group_records(run_idx, RESAMPLING, cycle_idx, resampling_data)
def extend_cycle_resampler_records(self, run_idx, cycle_idx, resampler_data):
"""Add records for each field for this record group.
Parameters
----------
run_idx : int
cycle_idx : int
The cycle index these records correspond to.
resampler_data : dict of str : arraylike
Mapping of the record group fields to a collection of
values for each field.
"""
self.extend_cycle_run_group_records(run_idx, RESAMPLER, cycle_idx, resampler_data)
def extend_cycle_run_group_records(self, run_idx, run_record_key, cycle_idx, fields_data):
"""Extend data for a whole records group.
This must have the cycle index for the data it is appending as
this is done for sporadic and continual datasets.
Parameters
----------
run_idx : int
run_record_key : str
Name of the record group.
cycle_idx : int
The cycle index these records correspond to.
fields_data : dict of str : arraylike
Mapping of the field name to the values for the records being added.
"""
record_grp = self.records_grp(run_idx, run_record_key)
# if it is sporadic add the cycle idx
if self._is_sporadic_records(run_record_key):
# get the cycle idxs dataset
record_cycle_idxs_ds = record_grp[CYCLE_IDXS]
# number of old and new records
n_new_records = len(fields_data)
n_existing_records = record_cycle_idxs_ds.shape[0]
# make a new chunk for the new records
record_cycle_idxs_ds.resize( (n_existing_records + n_new_records,) )
# add an array of the cycle idx for each record
record_cycle_idxs_ds[n_existing_records:] = np.full((n_new_records,), cycle_idx)
# then add all the data for the field
for record_dict in fields_data:
for field_name, field_data in record_dict.items():
self._extend_run_record_data_field(run_idx, run_record_key,
field_name, np.array([field_data]))
### Analysis Routines
## Record Getters
def run_records(self, run_idx, run_record_key):
"""Get the records for a record group for a single run.
Parameters
----------
run_idx : int
run_record_key : str
The name of the record group.
Returns
-------
records : list of namedtuple objects
The list of records for the run's record group.
"""
# wrap this in a list since the underlying functions accept a
# list of records
run_idxs = [run_idx]
return self.run_contig_records(run_idxs, run_record_key)
def run_contig_records(self, run_idxs, run_record_key):
"""Get the records for a record group for the contig that is formed by
the run indices.
This alters the cycle indices for the records so that they
appear to have come from a single run. That is they are the
cycle indices of the contig.
Parameters
----------
run_idxs : list of int
The run indices that form a contig. (i.e. element 1
continues element 0)
run_record_key : str
Name of the record group.
Returns
-------
records : list of namedtuple objects
The list of records for the contig's record group.
"""
# if there are no fields return an empty list
record_fields = self.record_fields[run_record_key]
if len(record_fields) == 0:
return []
# get the iterator for the record idxs, if the group is
# sporadic then we just use the cycle idxs
if self._is_sporadic_records(run_record_key):
records = self._run_records_sporadic(run_idxs, run_record_key)
else:
records = self._run_records_continual(run_idxs, run_record_key)
return records
def run_records_dataframe(self, run_idx, run_record_key):
"""Get the records for a record group for a single run in the form of
a pandas DataFrame.
Parameters
----------
run_idx : int
run_record_key : str
Name of record group.
Returns
-------
record_df : pandas.DataFrame
"""
records = self.run_records(run_idx, run_record_key)
return pd.DataFrame(records)
def run_contig_records_dataframe(self, run_idxs, run_record_key):
"""Get the records for a record group for a contig of runs in the form
of a pandas DataFrame.
Parameters
----------
run_idxs : list of int
The run indices that form a contig. (i.e. element 1
continues element 0)
run_record_key : str
The name of the record group.
Returns
-------
records_df : pandas.DataFrame
"""
records = self.run_contig_records(run_idxs, run_record_key)
return pd.DataFrame(records)
# application level specific methods for each main group
# resampling
def resampling_records(self, run_idxs):
"""Get the records this record group for the contig that is formed by
the run indices.
This alters the cycle indices for the records so that they
appear to have come from a single run. That is they are the
cycle indices of the contig.
Parameters
----------
run_idxs : list of int
The run indices that form a contig. (i.e. element 1
continues element 0)
Returns
-------
records : list of namedtuple objects
The list of records for the contig's record group.
"""
return self.run_contig_records(run_idxs, RESAMPLING)
def resampling_records_dataframe(self, run_idxs):
"""Get the records for this record group for a contig of runs in the
form of a pandas DataFrame.
Parameters
----------
run_idxs : list of int
The run indices that form a contig. (i.e. element 1
continues element 0)
Returns
-------
records_df : pandas.DataFrame
"""
return pd.DataFrame(self.resampling_records(run_idxs))
# resampler records
def resampler_records(self, run_idxs):
"""Get the records this record group for the contig that is formed by
the run indices.
This alters the cycle indices for the records so that they
appear to have come from a single run. That is they are the
cycle indices of the contig.
Parameters
----------
run_idxs : list of int
The run indices that form a contig. (i.e. element 1
continues element 0)
Returns
-------
records : list of namedtuple objects
The list of records for the contig's record group.
"""
return self.run_contig_records(run_idxs, RESAMPLER)
def resampler_records_dataframe(self, run_idxs):
"""Get the records for this record group for a contig of runs in the
form of a pandas DataFrame.
Parameters
----------
run_idxs : list of int
The run indices that form a contig. (i.e. element 1
continues element 0)
Returns
-------
records_df : pandas.DataFrame
"""
return pd.DataFrame(self.resampler_records(run_idxs))
# warping
def warping_records(self, run_idxs):
"""Get the records this record group for the contig that is formed by
the run indices.
This alters the cycle indices for the records so that they
appear to have come from a single run. That is they are the
cycle indices of the contig.
Parameters
----------
run_idxs : list of int
The run indices that form a contig. (i.e. element 1
continues element 0)
Returns
-------
records : list of namedtuple objects
The list of records for the contig's record group.
"""
return self.run_contig_records(run_idxs, WARPING)
def warping_records_dataframe(self, run_idxs):
"""Get the records for this record group for a contig of runs in the
form of a pandas DataFrame.
Parameters
----------
run_idxs : list of int
The run indices that form a contig. (i.e. element 1
continues element 0)
Returns
-------
records_df : pandas.DataFrame
"""
return pd.DataFrame(self.warping_records(run_idxs))
# boundary conditions
def bc_records(self, run_idxs):
"""Get the records this record group for the contig that is formed by
the run indices.
This alters the cycle indices for the records so that they
appear to have come from a single run. That is they are the
cycle indices of the contig.
Parameters
----------
run_idxs : list of int
The run indices that form a contig. (i.e. element 1
continues element 0)
Returns
-------
records : list of namedtuple objects
The list of records for the contig's record group.
"""
return self.run_contig_records(run_idxs, BC)
def bc_records_dataframe(self, run_idxs):
"""Get the records for this record group for a contig of runs in the
form of a pandas DataFrame.
Parameters
----------
run_idxs : list of int
The run indices that form a contig. (i.e. element 1
continues element 0)
Returns
-------
records_df : pandas.DataFrame
"""
return pd.DataFrame(self.bc_records(run_idxs))
# progress
def progress_records(self, run_idxs):
"""Get the records this record group for the contig that is formed by
the run indices.
This alters the cycle indices for the records so that they
appear to have come from a single run. That is they are the
cycle indices of the contig.
Parameters
----------
run_idxs : list of int
The run indices that form a contig. (i.e. element 1
continues element 0)
Returns
-------
records : list of namedtuple objects
The list of records for the contig's record group.
"""
return self.run_contig_records(run_idxs, PROGRESS)
def progress_records_dataframe(self, run_idxs):
"""Get the records for this record group for a contig of runs in the
form of a pandas DataFrame.
Parameters
----------
run_idxs : list of int
The run indices that form a contig. (i.e. element 1
continues element 0)
Returns
-------
records_df : pandas.DataFrame
"""
return pd.DataFrame(self.progress_records(run_idxs))
def run_resampling_panel(self, run_idx):
"""Generate a resampling panel from the resampling records of a run.
Parameters
----------
run_idx : int
Returns
-------
resampling_panel : list of list of list of namedtuple records
The panel (list of tables) of resampling records in order
(cycle, step, walker)
"""
return self.run_contig_resampling_panel([run_idx])
def run_contig_resampling_panel(self, run_idxs):
"""Generate a resampling panel from the resampling records of a
contig, which is a series of runs.
Parameters
----------
run_idxs : list of int
The run indices that form a contig. (i.e. element 1
continues element 0)
Returns
-------
resampling_panel : list of list of list of namedtuple records
The panel (list of tables) of resampling records in order
(cycle, step, walker)
"""
# check the contig to make sure it is a valid contig
if not self.is_run_contig(run_idxs):
raise ValueError("The run_idxs provided are not a valid contig, {}.".format(
run_idxs))
# make the resampling panel from the resampling records for the contig
contig_resampling_panel = resampling_panel(self.resampling_records(run_idxs),
is_sorted=False)
return contig_resampling_panel
# Trajectory Field Setters
def add_run_observable(self, run_idx, observable_name, data, sparse_idxs=None):
"""Add a trajectory sub-field in the compound field "observables" for
a single run.
Parameters
----------
run_idx : int
observable_name : str
What to name the observable subfield.
data : arraylike of shape (n_trajs, feature_vector_shape[0], ...)
The data for all of the trajectories that will be set to
this observable field.
sparse_idxs : list of int, optional
If not None, specifies the cycle indices this data corresponds to.
"""
obs_path = '{}/{}'.format(OBSERVABLES, observable_name)
self._add_run_field(run_idx, obs_path, data, sparse_idxs=sparse_idxs)
def add_traj_observable(self, observable_name, data, sparse_idxs=None):
"""Add a trajectory sub-field in the compound field "observables" for
an entire file, on a trajectory basis.
Parameters
----------
observable_name : str
What to name the observable subfield.
data : list of arraylike
The data for each run are the elements of this
argument. Each element is an arraylike of shape
(n_traj_frames, feature_vector_shape[0],...) where the
n_run_frames is the number of frames in trajectory.
sparse_idxs : list of list of int, optional
If not None, specifies the cycle indices this data
corresponds to. First by run, then by trajectory.
"""
obs_path = '{}/{}'.format(OBSERVABLES, observable_name)
run_results = []
for run_idx in range(self.num_runs):
run_num_trajs = self.num_run_trajs(run_idx)
run_results.append([])
for traj_idx in range(run_num_trajs):
run_results[run_idx].append(data[(run_idx * run_num_trajs) + traj_idx])
run_sparse_idxs = None
if sparse_idxs is not None:
run_sparse_idxs = []
for run_idx in range(self.num_runs):
run_num_trajs = self.num_run_trajs(run_idx)
run_sparse_idxs.append([])
for traj_idx in range(run_num_trajs):
run_sparse_idxs[run_idx].append(sparse_idxs[(run_idx * run_num_trajs) + traj_idx])
self.add_observable(observable_name, run_results,
sparse_idxs=run_sparse_idxs)
def add_observable(self, observable_name, data, sparse_idxs=None):
"""Add a trajectory sub-field in the compound field "observables" for
an entire file, on a compound run and trajectory basis.
Parameters
----------
observable_name : str
What to name the observable subfield.
data : list of list of arraylike
The data for each run are the elements of this
argument. Each element is a list of the trajectory
observable arraylikes of shape (n_traj_frames,
feature_vector_shape[0],...).
sparse_idxs : list of list of int, optional
If not None, specifies the cycle indices this data
corresponds to. First by run, then by trajectory.
"""
obs_path = '{}/{}'.format(OBSERVABLES, observable_name)
self._add_field(
obs_path,
data,
sparse_idxs=sparse_idxs,
)
def compute_observable(self, func, fields, args,
map_func=map,
traj_sel=None,
save_to_hdf5=None, idxs=False, return_results=True):
"""Compute an observable on the trajectory data according to a
function. Optionally save that data in the observables data group for
the trajectory.
Parameters
----------
func : callable
The function to apply to the trajectory fields (by
cycle). Must accept a dictionary mapping string trajectory
field names to a feature vector for that cycle and return
an arraylike. May accept other positional arguments as well.
fields : list of str
A list of trajectory field names to pass to the mapped function.
args : tuple
A single tuple of arguments which will be expanded and
passed to the mapped function for every evaluation.
map_func : callable
The mapping function. The implementation of how to map the
computation function over the data. Default is the python
builtin `map` function. Can be a parallel implementation
for example.
traj_sel : list of tuple, optional
If not None, a list of trajectory identifier tuple
(run_idx, traj_idx) to restrict the computation to.
save_to_hdf5 : None or string, optional
If not None, a string that specifies the name of the
observables sub-field that the computed values will be saved to.
idxs : bool
If True will return the trajectory identifier tuple
(run_idx, traj_idx) along with other return values.
return_results : bool
If True will return the results of the mapping. If not
using the 'save_to_hdf5' option, be sure to use this or
results will be lost.
Returns
-------
traj_id_tuples : list of tuple of int, if 'idxs' option is True
A list of the tuple identifiers for each trajectory result.
results : list of arraylike, if 'return_results' option is True
A list of arraylike feature vectors for each trajectory.
"""
if save_to_hdf5 is not None:
assert self.mode in ['w', 'w-', 'x', 'r+', 'c', 'c-'],\
"File must be in a write mode"
assert isinstance(save_to_hdf5, str),\
"`save_to_hdf5` should be the field name to save the data in the `observables`"\
" group in each trajectory"
# the field name comes from this kwarg if it satisfies the
# string condition above
field_name = save_to_hdf5
# calculate the results and accumulate them here
results = []
# and the indices of the results
result_idxs = []
# map over the trajectories and apply the function and save
# the results
for result in self.traj_fields_map(func, fields, args,
map_func=map_func, traj_sel=traj_sel, idxs=True):
idx_tup, obs_features = result
results.append(obs_features)
result_idxs.append(idx_tup)
# we want to separate writing and computation so we can do it
# in parallel without having multiple writers. So if we are
# writing directly to the HDF5 we add the results to it.
# if we are saving this to the trajectories observables add it as a dataset
if save_to_hdf5:
# reshape the results to be in the observable shape:
observable = [[] for run_idx in self.run_idxs]
for result_idx, traj_results in zip(result_idxs, results):
run_idx, traj_idx = result_idx
observable[run_idx].append(traj_results)
self.add_observable(
field_name,
observable,
sparse_idxs=None,
)
if return_results:
if idxs:
return result_idxs, results
else:
return results
## Trajectory Getters
def get_traj_field(self, run_idx, traj_idx, field_path, frames=None, masked=True):
"""Returns a numpy array for the given trajectory field.
You can control how sparse fields are returned using the
`masked` option. When True (default) a masked numpy array will
be returned such that you can get which cycles it is from,
when False an unmasked array of the data will be returned
which has no cycle information.
Parameters
----------
run_idx : int
traj_idx : int
field_path : str
Name of the trajectory field to get
frames : None or list of int
If not None, a list of the frame indices of the trajectory
to return values for.
masked : bool
If true will return sparse field values as masked arrays,
otherwise just returns the compacted data.
Returns
-------
field_data : arraylike
The data for the trajectory field.
"""
traj_path = '{}/{}/{}/{}'.format(RUNS, run_idx, TRAJECTORIES, traj_idx)
# if the field doesn't exist return None
if not field_path in self._h5[traj_path]:
raise KeyError("key for field {} not found".format(field_path))
# return None
# get the field depending on whether it is sparse or not
if field_path in self.sparse_fields:
return self._get_sparse_traj_field(run_idx, traj_idx, field_path,
frames=frames, masked=masked)
else:
return self._get_contiguous_traj_field(run_idx, traj_idx, field_path,
frames=frames)
def get_trace_fields(self,
frame_tups,
fields,
same_order=True,
):
"""Get trajectory field data for the frames specified by the trace.
Parameters
----------
frame_tups : list of tuple of int
The trace values. Each tuple is of the form
(run_idx, traj_idx, frame_idx).
fields : list of str
The names of the fields to get for each frame.
same_order : bool
(Default = True)
If True will ensure that the results will be sorted exactly
as the order of the frame_tups were. If False will return
them in an arbitrary implementation determined order that
should be more efficient.
Returns
-------
trace_fields : dict of str : arraylike
Mapping of the field names to the array of feature vectors
for the trace.
"""
# TODO, WARN: this is known to not work properly in all
# cases. While this is an important feature, we defer the
# implementation of chunking to another function or interace
if False:
def argsort(seq):
return sorted(range(len(seq)), key=seq.__getitem__)
def apply_argsorted(shuffled_seq, sorted_idxs):
return [shuffled_seq[i] for i in sorted_idxs]
# first sort the frame_tups so we can chunk them up by
# (run, traj) to get more efficient reads since these are
# chunked by these datasets.
# we do an argsort here so that we can map fields back to
# the order they came in (if requested)
sorted_idxs = argsort(frame_tups)
# then sort them as we will iterate through them
sorted_frame_tups = apply_argsorted(frame_tups, sorted_idxs)
# generate the chunks by (run, traj)
read_chunks = defaultdict(list)
for run_idx, traj_idx, frame_idx in sorted_frame_tups:
read_chunks[(run_idx, traj_idx)].append(frame_idx)
# go through each chunk and read data for each field
frame_fields = {}
for field in fields:
# for each field collect the chunks
field_chunks = []
for chunk_key, frames in read_chunks.items():
run_idx, traj_idx = chunk_key
frames_field = self.get_traj_field(run_idx, traj_idx, field,
frames=frames)
field_chunks.append(frames_field)
# then aggregate them
field_unsorted = np.concatenate(field_chunks)
del field_chunks; gc.collect()
# if we want them sorted sort them back to the
# original (unsorted) order, otherwise just return
# them
if same_order:
frame_fields[field] = field_unsorted[sorted_idxs]
else:
frame_fields[field] = field_unsorted
del field_unsorted; gc.collect()
else:
frame_fields = {field : [] for field in fields}
for run_idx, traj_idx, cycle_idx in frame_tups:
for field in fields:
frame_field = self.get_traj_field(
run_idx,
traj_idx,
field,
frames=[cycle_idx],
)
# the first dimension doesn't matter here since we
# only get one frame at a time.
frame_fields[field].append(frame_field[0])
# combine all the parts of each field into single arrays
for field in fields:
frame_fields[field] = np.array(frame_fields[field])
return frame_fields
def get_run_trace_fields(self, run_idx, frame_tups, fields):
"""Get trajectory field data for the frames specified by the trace
within a single run.
Parameters
----------
run_idx : int
frame_tups : list of tuple of int
The trace values. Each tuple is of the form
(traj_idx, frame_idx).
fields : list of str
The names of the fields to get for each frame.
Returns
-------
trace_fields : dict of str : arraylike
Mapping of the field names to the array of feature vectors
for the trace.
"""
frame_fields = {field : [] for field in fields}
for traj_idx, cycle_idx in frame_tups:
for field in fields:
frame_field = self.get_traj_field(run_idx, traj_idx, field, frames=[cycle_idx])
# the first dimension doesn't matter here since we
# only get one frame at a time.
frame_fields[field].append(frame_field[0])
# combine all the parts of each field into single arrays
for field in fields:
frame_fields[field] = np.array(frame_fields[field])
return frame_fields
def get_contig_trace_fields(self, contig_trace, fields):
"""Get field data for all trajectories of a contig for the frames
specified by the contig trace.
Parameters
----------
contig_trace : list of tuple of int
The trace values. Each tuple is of the form
(run_idx, frame_idx).
fields : list of str
The names of the fields to get for each cycle.
Returns
-------
contig_fields : dict of str : arraylike
of shape (n_cycles, n_trajs, field_feature_shape[0],...)
Mapping of the field names to the array of feature vectors
for contig trace.
"""
# to be efficient we want to group our grabbing of fields by run
# so we group them by run
runs_frames = defaultdict(list)
# and we get the runs in the order to fetch them
run_idxs = []
for run_idx, cycle_idx in contig_trace:
runs_frames[run_idx].append(cycle_idx)
if not run_idx in run_idxs:
run_idxs.append(run_idx)
# (there must be the same number of trajectories in each run)
n_trajs_test = self.num_run_trajs(run_idxs[0])
assert all([True if n_trajs_test == self.num_run_trajs(run_idx) else False
for run_idx in run_idxs])
# then using this we go run by run and get all the
# trajectories
field_values = {}
for field in fields:
# we gather trajectories in "bundles" (think sticks
# strapped together) and each bundle represents a run, we
# will concatenate the ends of the bundles together to get
# the full array at the end
bundles = []
for run_idx in run_idxs:
run_bundle = []
for traj_idx in self.run_traj_idxs(run_idx):
# get the values for this (field, run, trajectory)
traj_field_vals = self.get_traj_field(run_idx, traj_idx, field,
frames=runs_frames[run_idx],
masked=True)
run_bundle.append(traj_field_vals)
# convert this "bundle" of trajectory values (think
# sticks side by side) into an array
run_bundle = np.array(run_bundle)
bundles.append(run_bundle)
# stick the bundles together end to end to make the value
# for this field , the first dimension currently is the
# trajectory_index, but we want to make the cycles the
# first dimension. So we stack them along that axis then
# transpose the first two axes (not the rest of them which
# should stay the same). Pardon the log terminology, but I
# don't know a name for a bunch of bundles taped together.
field_log = np.hstack(tuple(bundles))
field_log = np.swapaxes(field_log, 0, 1)
field_values[field] = field_log
return field_values
def iter_trajs_fields(self, fields, idxs=False, traj_sel=None):
"""Generator for iterating over fields trajectories in a file.
Parameters
----------
fields : list of str
Names of the trajectory fields you want to yield.
idxs : bool
If True will also return the tuple identifier of the
trajectory the field data is from.
traj_sel : list of tuple of int
If not None, a list of trajectory identifiers to restrict
iteration over.
Yields
------
traj_identifier : tuple of int if 'idxs' option is True
Tuple identifying the trajectory the data belongs to
(run_idx, traj_idx).
fields_data : dict of str : arraylike
Mapping of the field name to the array of feature vectors
of that field for this trajectory.
"""
for idx_tup, traj in self.iter_trajs(idxs=True, traj_sel=traj_sel):
run_idx, traj_idx = idx_tup
dsets = {}
# DEBUG if we ask for debug prints send in the run and
# traj index so the function can print this out TESTING if
# this causes no problems (it doesn't seem like it would
# from the code this will be removed permanently)
# dsets['run_idx'] = run_idx
# dsets[TRAJ_IDX] = traj_idx
for field in fields:
try:
dset = traj[field][:]
except KeyError:
warn("field \"{}\" not found in \"{}\"".format(field, traj.name), RuntimeWarning)
dset = None
dsets[field] = dset
if idxs:
yield (run_idx, traj_idx), dsets
else:
yield dsets
def traj_fields_map(self, func, fields, args,
map_func=map, idxs=False, traj_sel=None):
"""Function for mapping work onto field of trajectories.
Parameters
----------
func : callable
The function to apply to the trajectory fields (by
cycle). Must accept a dictionary mapping string trajectory
field names to a feature vector for that cycle and return
an arraylike. May accept other positional arguments as well.
fields : list of str
A list of trajectory field names to pass to the mapped function.
args : None or or tuple
A single tuple of arguments which will be
passed to the mapped function for every evaluation.
map_func : callable
The mapping function. The implementation of how to map the
computation function over the data. Default is the python
builtin `map` function. Can be a parallel implementation
for example.
traj_sel : list of tuple, optional
If not None, a list of trajectory identifier tuple
(run_idx, traj_idx) to restrict the computation to.
idxs : bool
If True will return the trajectory identifier tuple
(run_idx, traj_idx) along with other return values.
Returns
-------
traj_id_tuples : list of tuple of int, if 'idxs' option is True
A list of the tuple identifiers for each trajectory result.
results : list of arraylike
A list of arraylike feature vectors for each trajectory.
"""
# check the args and kwargs to see if they need expanded for
# mapping inputs
#first go through each run and get the number of cycles
n_cycles = 0
for run_idx in self.run_idxs:
n_cycles += self.num_run_cycles(run_idx)
mapped_args = []
for arg in args:
# make a generator out of it to map as inputs
mapped_arg = (arg for i in range(n_cycles))
mapped_args.append(mapped_arg)
# make a generator for the arguments to pass to the function
# from the mapper, for the extra arguments we just have an
# endless generator
map_args = (self.iter_trajs_fields(fields, traj_sel=traj_sel, idxs=False),
*(it.repeat(arg) for arg in args))
results = map_func(func, *map_args)
if idxs:
if traj_sel is None:
traj_sel = self.run_traj_idx_tuples()
return zip(traj_sel, results)
else:
return results
def to_mdtraj(self, run_idx, traj_idx, frames=None, alt_rep=None):
"""Convert a trajectory to an mdtraj Trajectory object.
Works if the right trajectory fields are defined. Minimally
this is a representation, including the 'positions' field or
an 'alt_rep' subfield.
Will also set the unitcell lengths and angle if the
'box_vectors' field is present.
Will also set the time for the frames if the 'time' field is
present, although this is likely not useful since walker
segments have the time reset.
Parameters
----------
run_idx : int
traj_idx : int
frames : None or list of int
If not None, a list of the frames to include.
alt_rep : str
If not None, an 'alt_reps' subfield name to use for
positions instead of the 'positions' field.
Returns
-------
traj : mdtraj.Trajectory
"""
traj_grp = self.traj(run_idx, traj_idx)
# the default for alt_rep is the main rep
if alt_rep is None:
rep_key = POSITIONS
rep_path = rep_key
else:
rep_key = alt_rep
rep_path = '{}/{}'.format(ALT_REPS, alt_rep)
topology = self.get_mdtraj_topology(alt_rep=rep_key)
# get the frames if they are not given
if frames is None:
frames = self.get_traj_field_cycle_idxs(run_idx, traj_idx, rep_path)
# get the data for all or for the frames specified
positions = self.get_traj_field(run_idx, traj_idx, rep_path,
frames=frames, masked=False)
try:
time = self.get_traj_field(run_idx, traj_idx, TIME,
frames=frames, masked=False)[:, 0]
except KeyError:
warn("time not in this trajectory, ignoring")
time = None
try:
box_vectors = self.get_traj_field(run_idx, traj_idx, BOX_VECTORS,
frames=frames, masked=False)
except KeyError:
warn("box_vectors not in this trajectory, ignoring")
box_vectors = None
if box_vectors is not None:
unitcell_lengths, unitcell_angles = traj_box_vectors_to_lengths_angles(box_vectors)
if (box_vectors is not None) and (time is not None):
traj = mdj.Trajectory(positions, topology,
time=time,
unitcell_lengths=unitcell_lengths, unitcell_angles=unitcell_angles)
elif box_vectors is not None:
traj = mdj.Trajectory(positions, topology,
unitcell_lengths=unitcell_lengths, unitcell_angles=unitcell_angles)
elif time is not None:
traj = mdj.Trajectory(positions, topology,
time=time)
else:
traj = mdj.Trajectory(positions, topology)
return traj
def trace_to_mdtraj(self, trace, alt_rep=None):
"""Generate an mdtraj Trajectory from a trace of frames from the runs.
Uses the default fields for positions (unless an alternate
representation is specified) and box vectors which are assumed
to be present in the trajectory fields.
The time value for the mdtraj trajectory is set to the cycle
indices for each trace frame.
This is useful for converting WepyHDF5 data to common
molecular dynamics data formats accessible through the mdtraj
library.
Parameters
----------
trace : list of tuple of int
The trace values. Each tuple is of the form
(run_idx, traj_idx, frame_idx).
alt_rep : None or str
If None uses default 'positions' representation otherwise
chooses the representation from the 'alt_reps' compound field.
Returns
-------
traj : mdtraj.Trajectory
"""
rep_path = self._choose_rep_path(alt_rep)
trace_fields = self.get_trace_fields(trace, [rep_path, BOX_VECTORS])
return self.traj_fields_to_mdtraj(trace_fields, alt_rep=alt_rep)
def run_trace_to_mdtraj(self, run_idx, trace, alt_rep=None):
"""Generate an mdtraj Trajectory from a trace of frames from the runs.
Uses the default fields for positions (unless an alternate
representation is specified) and box vectors which are assumed
to be present in the trajectory fields.
The time value for the mdtraj trajectory is set to the cycle
indices for each trace frame.
This is useful for converting WepyHDF5 data to common
molecular dynamics data formats accessible through the mdtraj
library.
Parameters
----------
run_idx : int
The run the trace is over.
run_trace : list of tuple of int
The trace values. Each tuple is of the form
(traj_idx, frame_idx).
alt_rep : None or str
If None uses default 'positions' representation otherwise
chooses the representation from the 'alt_reps' compound field.
Returns
-------
traj : mdtraj.Trajectory
"""
rep_path = self._choose_rep_path(alt_rep)
trace_fields = self.get_run_trace_fields(run_idx, trace, [rep_path, BOX_VECTORS])
return self.traj_fields_to_mdtraj(trace_fields, alt_rep=alt_rep)
def _choose_rep_path(self, alt_rep):
"""Given a positions specification string, gets the field name/path
for it.
Parameters
----------
alt_rep : str
The short name (non relative path) for a representation of
the positions.
Returns
-------
rep_path : str
The relative field path to that representation.
E.g.:
If you give it 'positions' or None it will simply return
'positions', however if you ask for 'all_atoms' it will return
'alt_reps/all_atoms'.
"""
# the default for alt_rep is the main rep
if alt_rep == POSITIONS:
rep_path = POSITIONS
elif alt_rep is None:
rep_key = POSITIONS
rep_path = rep_key
# if it is already a path we don't add more to it and just
# return it.
elif len(alt_rep.split('/')) > 1:
if len(alt_rep.split('/')) > 2:
raise ValueError("unrecognized alt_rep spec")
elif alt_rep.split('/')[0] != ALT_REPS:
raise ValueError("unrecognized alt_rep spec")
else:
rep_path = alt_rep
else:
rep_key = alt_rep
rep_path = '{}/{}'.format(ALT_REPS, alt_rep)
return rep_path
def traj_fields_to_mdtraj(self, traj_fields, alt_rep=POSITIONS):
"""Create an mdtraj.Trajectory from a traj_fields dictionary.
Parameters
----------
traj_fields : dict of str : arraylike
Dictionary of the traj fields to their values
alt_reps : str
The base alt rep name for the positions representation to
use for the topology, should have the corresponding
alt_rep field in the traj_fields
Returns
-------
traj : mdtraj.Trajectory object
This is mainly a convenience function to retrieve the correct
topology for the positions which will be passed to the generic
`traj_fields_to_mdtraj` function.
"""
rep_path = self._choose_rep_path(alt_rep)
json_topology = self.get_topology(alt_rep=rep_path)
return traj_fields_to_mdtraj(traj_fields, json_topology, rep_key=rep_path)
def copy_run_slice(self, run_idx, target_file_path, target_grp_path,
run_slice=None, mode='x'):
"""Copy this run to another HDF5 file (target_file_path) at the group
(target_grp_path)"""
assert mode in ['w', 'w-', 'x', 'r+'], "must be opened in write mode"
if run_slice is not None:
assert run_slice[1] >= run_slice[0], "Must be a contiguous slice"
# get a list of the frames to use
slice_frames = list(range(*run_slice))
# we manually construct an HDF5 wrapper and copy the groups over
new_h5 = h5py.File(target_file_path, mode=mode, libver=H5PY_LIBVER)
# flush the datasets buffers
self.h5.flush()
new_h5.flush()
# get the run group we are interested in
run_grp = self.run(run_idx)
# slice the datasets in the run and set them in the new file
if run_slice is not None:
# initialize the group for the run
new_run_grp = new_h5.require_group(target_grp_path)
# copy the init walkers group
self.h5.copy(run_grp[INIT_WALKERS], new_run_grp,
name=INIT_WALKERS)
# copy the decision group
self.h5.copy(run_grp[DECISION], new_run_grp,
name=DECISION)
# create the trajectories group
new_trajs_grp = new_run_grp.require_group(TRAJECTORIES)
# slice the trajectories and copy them
for traj_idx in run_grp[TRAJECTORIES]:
traj_grp = run_grp[TRAJECTORIES][traj_idx]
traj_id = "{}/{}".format(TRAJECTORIES, traj_idx)
new_traj_grp = new_trajs_grp.require_group(str(traj_idx))
for field_name in _iter_field_paths(run_grp[traj_id]):
field_path = "{}/{}".format(traj_id, field_name)
data = self.get_traj_field(run_idx, traj_idx, field_name,
frames=slice_frames)
# if it is a sparse field we need to create the
# dataset differently
if field_name in self.sparse_fields:
# create a group for the field
new_field_grp = new_traj_grp.require_group(field_name)
# slice the _sparse_idxs from the original
# dataset that are between the slice
cycle_idxs = self.traj(run_idx, traj_idx)[field_name]['_sparse_idxs'][:]
sparse_idx_idxs = np.argwhere(np.logical_and(
cycle_idxs[:] >= run_slice[0], cycle_idxs[:] < run_slice[1]
)).flatten().tolist()
# the cycle idxs there is data for
sliced_cycle_idxs = cycle_idxs[sparse_idx_idxs]
# get the data for these cycles
field_data = data[sliced_cycle_idxs]
# get the information on compression,
# chunking, and filters and use it when we set
# the new data
field_data_dset = traj_grp[field_name]['data']
data_dset_kwargs = {
'chunks' : field_data_dset.chunks,
'compression' : field_data_dset.compression,
'compression_opts' : field_data_dset.compression_opts,
'shuffle' : field_data_dset.shuffle,
'fletcher32' : field_data_dset.fletcher32,
}
# and for the sparse idxs although it is probably overkill
field_idxs_dset = traj_grp[field_name]['_sparse_idxs']
idxs_dset_kwargs = {
'chunks' : field_idxs_dset.chunks,
'compression' : field_idxs_dset.compression,
'compression_opts' : field_idxs_dset.compression_opts,
'shuffle' : field_idxs_dset.shuffle,
'fletcher32' : field_idxs_dset.fletcher32,
}
# then create the datasets
new_field_grp.create_dataset('_sparse_idxs',
data=sliced_cycle_idxs,
**idxs_dset_kwargs)
new_field_grp.create_dataset('data',
data=field_data,
**data_dset_kwargs)
else:
# get the information on compression,
# chunking, and filters and use it when we set
# the new data
field_dset = traj_grp[field_name]
# since we are slicing we want to make sure
# that the chunks are smaller than the
# slices. Normally chunks are (1, ...) for a
# field, but may not be for observables
# (perhaps they should but thats for another issue)
chunks = (1, *field_dset.chunks[1:])
dset_kwargs = {
'chunks' : chunks,
'compression' : field_dset.compression,
'compression_opts' : field_dset.compression_opts,
'shuffle' : field_dset.shuffle,
'fletcher32' : field_dset.fletcher32,
}
# require the dataset first to automatically build
# subpaths for compound fields if necessary
dset = new_traj_grp.require_dataset(field_name,
data.shape, data.dtype,
**dset_kwargs)
# then set the data depending on whether it is
# sparse or not
dset[:] = data
# then do it for the records
for rec_grp_name, rec_fields in self.record_fields.items():
rec_grp = run_grp[rec_grp_name]
# if this is a contiguous record we can skip the cycle
# indices to record indices conversion that is
# necessary for sporadic records
if self._is_sporadic_records(rec_grp_name):
cycle_idxs = rec_grp[CYCLE_IDXS][:]
# get dataset info
cycle_idxs_dset = rec_grp[CYCLE_IDXS]
# we use autochunk, because I can't figure out how
# the chunks are set and I can't reuse them
idxs_dset_kwargs = {
'chunks' : True,
# 'chunks' : cycle_idxs_dset.chunks,
'compression' : cycle_idxs_dset.compression,
'compression_opts' : cycle_idxs_dset.compression_opts,
'shuffle' : cycle_idxs_dset.shuffle,
'fletcher32' : cycle_idxs_dset.fletcher32,
}
# get the indices of the records we are interested in
record_idxs = np.argwhere(np.logical_and(
cycle_idxs >= run_slice[0], cycle_idxs < run_slice[1]
)).flatten().tolist()
# set the cycle indices in the new run group
new_recgrp_cycle_idxs_path = '{}/{}/_cycle_idxs'.format(target_grp_path,
rec_grp_name)
cycle_data = cycle_idxs[record_idxs]
cycle_dset = new_h5.require_dataset(new_recgrp_cycle_idxs_path,
cycle_data.shape, cycle_data.dtype,
**idxs_dset_kwargs)
cycle_dset[:] = cycle_data
# if contiguous just set the record indices as the
# range between the slice
else:
record_idxs = list(range(run_slice[0], run_slice[1]))
# then for each rec_field slice those and set them in the new file
for rec_field in rec_fields:
field_dset = rec_grp[rec_field]
# get dataset info
field_dset_kwargs = {
'chunks' : True,
# 'chunks' : field_dset.chunks,
'compression' : field_dset.compression,
'compression_opts' : field_dset.compression_opts,
'shuffle' : field_dset.shuffle,
'fletcher32' : field_dset.fletcher32,
}
rec_field_path = "{}/{}".format(rec_grp_name, rec_field)
new_recfield_grp_path = '{}/{}'.format(target_grp_path, rec_field_path)
# if it is a variable length dtype make the dtype
# that for the dataset and we also slice the
# dataset differently
vlen_type = h5py.check_dtype(vlen=field_dset.dtype)
if vlen_type is not None:
dtype = h5py.special_dtype(vlen=vlen_type)
else:
dtype = field_dset.dtype
# if there are no records don't attempt to add them
# get the shape
shape = (len(record_idxs), *field_dset.shape[1:])
new_field_dset = new_h5.require_dataset(new_recfield_grp_path,
shape, dtype,
**field_dset_kwargs)
# if there aren't records just don't do anything,
# and if there are get them and add them
if len(record_idxs) > 0:
rec_data = field_dset[record_idxs]
# if it is a variable length data type we have
# to do it 1 by 1
if vlen_type is not None:
for i, vlen_rec in enumerate(rec_data):
new_field_dset[i] = rec_data[i]
# otherwise just set it all at once
else:
new_field_dset[:] = rec_data
# just copy the whole thing over, since this will probably be
# more efficient
else:
# split off the last bit of the target path, for copying we
# need it's parent group but not it to exist
target_grp_path_basename = target_grp_path.split('/')[-1]
target_grp_path_prefix = target_grp_path.split('/')[:-1]
new_run_prefix_grp = self.h5.require_group(target_grp_path_prefix)
# copy the whole thing
self.h5.copy(run_grp, new_run_prefix_grp,
name=target_grp_path_basename)
# flush the datasets buffers
self.h5.flush()
new_h5.flush()
return new_h5
| mit | -871,012,172,022,830,700 | 33.068437 | 119 | 0.58065 | false | 4.334865 | false | false | false |
cerivera/crossfire | bin/firefox/addon-sdk-1.15/python-lib/cuddlefish/docs/generate.py | 1 | 9935 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import sys
import shutil
import hashlib
import tarfile
import StringIO
from cuddlefish._version import get_versions
from cuddlefish.docs import apiparser
from cuddlefish.docs import apirenderer
from cuddlefish.docs import webdocs
from documentationitem import get_module_list
from documentationitem import get_devguide_list
from documentationitem import ModuleInfo
from documentationitem import DevGuideItemInfo
from linkrewriter import rewrite_links
import simplejson as json
DIGEST = "status.md5"
TGZ_FILENAME = "addon-sdk-docs.tgz"
def get_sdk_docs_path(env_root):
return os.path.join(env_root, "doc")
def get_base_url(env_root):
sdk_docs_path = get_sdk_docs_path(env_root).lstrip("/")
return "file://"+"/"+"/".join(sdk_docs_path.split(os.sep))+"/"
def clean_generated_docs(docs_dir):
status_file = os.path.join(docs_dir, "status.md5")
if os.path.exists(status_file):
os.remove(status_file)
index_file = os.path.join(docs_dir, "index.html")
if os.path.exists(index_file):
os.remove(index_file)
dev_guide_dir = os.path.join(docs_dir, "dev-guide")
if os.path.exists(dev_guide_dir):
shutil.rmtree(dev_guide_dir)
api_doc_dir = os.path.join(docs_dir, "modules")
if os.path.exists(api_doc_dir):
shutil.rmtree(api_doc_dir)
def generate_static_docs(env_root, override_version=get_versions()["version"]):
clean_generated_docs(get_sdk_docs_path(env_root))
generate_docs(env_root, override_version, stdout=StringIO.StringIO())
tgz = tarfile.open(TGZ_FILENAME, 'w:gz')
tgz.add(get_sdk_docs_path(env_root), "doc")
tgz.close()
return TGZ_FILENAME
def generate_local_docs(env_root):
return generate_docs(env_root, get_versions()["version"], get_base_url(env_root))
def generate_named_file(env_root, filename_and_path):
module_list = get_module_list(env_root)
web_docs = webdocs.WebDocs(env_root, module_list, get_versions()["version"], get_base_url(env_root))
abs_path = os.path.abspath(filename_and_path)
path, filename = os.path.split(abs_path)
if abs_path.startswith(os.path.join(env_root, 'doc', 'module-source')):
module_root = os.sep.join([env_root, "doc", "module-source"])
module_info = ModuleInfo(env_root, module_root, path, filename)
write_module_doc(env_root, web_docs, module_info, False)
elif abs_path.startswith(os.path.join(get_sdk_docs_path(env_root), 'dev-guide-source')):
devguide_root = os.sep.join([env_root, "doc", "dev-guide-source"])
devguideitem_info = DevGuideItemInfo(env_root, devguide_root, path, filename)
write_devguide_doc(env_root, web_docs, devguideitem_info, False)
else:
raise ValueError("Not a valid path to a documentation file")
def generate_docs(env_root, version=get_versions()["version"], base_url=None, stdout=sys.stdout):
docs_dir = get_sdk_docs_path(env_root)
# if the generated docs don't exist, generate everything
if not os.path.exists(os.path.join(docs_dir, "dev-guide")):
print >>stdout, "Generating documentation..."
generate_docs_from_scratch(env_root, version, base_url)
current_status = calculate_current_status(env_root)
open(os.path.join(docs_dir, DIGEST), "w").write(current_status)
else:
current_status = calculate_current_status(env_root)
previous_status_file = os.path.join(docs_dir, DIGEST)
docs_are_up_to_date = False
if os.path.exists(previous_status_file):
docs_are_up_to_date = current_status == open(previous_status_file, "r").read()
# if the docs are not up to date, generate everything
if not docs_are_up_to_date:
print >>stdout, "Regenerating documentation..."
generate_docs_from_scratch(env_root, version, base_url)
open(os.path.join(docs_dir, DIGEST), "w").write(current_status)
return get_base_url(env_root) + "index.html"
# this function builds a hash of the name and last modification date of:
# * every file in "doc/sdk" which ends in ".md"
# * every file in "doc/dev-guide-source" which ends in ".md"
# * every file in "doc/static-files" which does not start with "."
def calculate_current_status(env_root):
docs_dir = get_sdk_docs_path(env_root)
current_status = hashlib.md5()
module_src_dir = os.path.join(env_root, "doc", "module-source")
for (dirpath, dirnames, filenames) in os.walk(module_src_dir):
for filename in filenames:
if filename.endswith(".md"):
current_status.update(filename)
current_status.update(str(os.path.getmtime(os.path.join(dirpath, filename))))
guide_src_dir = os.path.join(docs_dir, "dev-guide-source")
for (dirpath, dirnames, filenames) in os.walk(guide_src_dir):
for filename in filenames:
if filename.endswith(".md"):
current_status.update(filename)
current_status.update(str(os.path.getmtime(os.path.join(dirpath, filename))))
package_dir = os.path.join(env_root, "packages")
for (dirpath, dirnames, filenames) in os.walk(package_dir):
for filename in filenames:
if filename.endswith(".md"):
current_status.update(filename)
current_status.update(str(os.path.getmtime(os.path.join(dirpath, filename))))
base_html_file = os.path.join(docs_dir, "static-files", "base.html")
current_status.update(base_html_file)
current_status.update(str(os.path.getmtime(os.path.join(dirpath, base_html_file))))
return current_status.digest()
def generate_docs_from_scratch(env_root, version, base_url):
docs_dir = get_sdk_docs_path(env_root)
module_list = get_module_list(env_root)
web_docs = webdocs.WebDocs(env_root, module_list, version, base_url)
must_rewrite_links = True
if base_url:
must_rewrite_links = False
clean_generated_docs(docs_dir)
# py2.5 doesn't have ignore=, so we delete tempfiles afterwards. If we
# required >=py2.6, we could use ignore=shutil.ignore_patterns("*~")
for (dirpath, dirnames, filenames) in os.walk(docs_dir):
for n in filenames:
if n.endswith("~"):
os.unlink(os.path.join(dirpath, n))
# generate api docs for all modules
if not os.path.exists(os.path.join(docs_dir, "modules")):
os.mkdir(os.path.join(docs_dir, "modules"))
[write_module_doc(env_root, web_docs, module_info, must_rewrite_links) for module_info in module_list]
# generate third-party module index
third_party_index_file = os.sep.join([env_root, "doc", "module-source", "third-party-modules.md"])
third_party_module_list = [module_info for module_info in module_list if module_info.level() == "third-party"]
write_module_index(env_root, web_docs, third_party_index_file, third_party_module_list, must_rewrite_links)
# generate high-level module index
high_level_index_file = os.sep.join([env_root, "doc", "module-source", "high-level-modules.md"])
high_level_module_list = [module_info for module_info in module_list if module_info.level() == "high"]
write_module_index(env_root, web_docs, high_level_index_file, high_level_module_list, must_rewrite_links)
# generate low-level module index
low_level_index_file = os.sep.join([env_root, "doc", "module-source", "low-level-modules.md"])
low_level_module_list = [module_info for module_info in module_list if module_info.level() == "low"]
write_module_index(env_root, web_docs, low_level_index_file, low_level_module_list, must_rewrite_links)
# generate dev-guide docs
devguide_list = get_devguide_list(env_root)
[write_devguide_doc(env_root, web_docs, devguide_info, must_rewrite_links) for devguide_info in devguide_list]
# make /md/dev-guide/welcome.html the top level index file
doc_html = web_docs.create_guide_page(os.path.join(docs_dir, 'dev-guide-source', 'index.md'))
write_file(env_root, doc_html, docs_dir, 'index', False)
def write_module_index(env_root, web_docs, source_file, module_list, must_rewrite_links):
doc_html = web_docs.create_module_index(source_file, module_list)
base_filename, extension = os.path.splitext(os.path.basename(source_file))
destination_path = os.sep.join([env_root, "doc", "modules"])
write_file(env_root, doc_html, destination_path, base_filename, must_rewrite_links)
def write_module_doc(env_root, web_docs, module_info, must_rewrite_links):
doc_html = web_docs.create_module_page(module_info)
write_file(env_root, doc_html, module_info.destination_path(), module_info.base_filename(), must_rewrite_links)
def write_devguide_doc(env_root, web_docs, devguide_info, must_rewrite_links):
doc_html = web_docs.create_guide_page(devguide_info.source_path_and_filename())
write_file(env_root, doc_html, devguide_info.destination_path(), devguide_info.base_filename(), must_rewrite_links)
def write_file(env_root, doc_html, dest_dir, filename, must_rewrite_links):
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
dest_path_html = os.path.join(dest_dir, filename) + ".html"
replace_file(env_root, dest_path_html, doc_html, must_rewrite_links)
return dest_path_html
def replace_file(env_root, dest_path, file_contents, must_rewrite_links):
if os.path.exists(dest_path):
os.remove(dest_path)
# before we copy the final version, we'll rewrite the links
# I'll do this last, just because we know definitely what the dest_path is at this point
if must_rewrite_links and dest_path.endswith(".html"):
file_contents = rewrite_links(env_root, get_sdk_docs_path(env_root), file_contents, dest_path)
open(dest_path, "w").write(file_contents)
| mit | 2,806,518,507,816,887,300 | 49.176768 | 119 | 0.688576 | false | 3.241436 | false | false | false |
rhnvrm/mini-projects | bioinformatics/Motifs.py | 1 | 3302 | #! /usr/bin/python3.4
# -*-coding:utf-8 -*
def Count(Motifs):
"""
Returns the count matrix of a list of sequences.
The count matrix is the number of times a nucleotid appears at a position in the pool of sequences (Motifs).
:param Motifs: The sequences to make the count matrix of.
:type Motifs: list of string
:return: the count matrix
:rtype: dict of list of int with nucleotids as keys.
..seealso:: Count()
"""
count = {}
k = len(Motifs[0])
for symbol in "ACGT":
count[symbol] = []
for j in range(k):
count[symbol].append(0)
t = len(Motifs)
for i in range(t):
for j in range(k):
symbol = Motifs[i][j]
count[symbol][j] += 1
return count
def Profile(Motifs):
"""
Returns the profile matrix of a list of sequences.
The profile matrix is the frequency of a nucleotid at a position in the pool of sequences (Motifs).
:param Motifs: The sequences to make the profile matrix of.
:type Motifs: list of string
:return: the profile matrix
:rtype: dict of list of float with nucleotids as keys.
..seealso:: Count()
"""
t = len(Motifs)
k = len(Motifs[0])
profile = {}
count= Count(Motifs)
for symbol in "ACGT":
profile[symbol] = []
for j in range(k):
profile[symbol].append(0)
for symbol in "ACGT":
for j in range(k):
if t >0:
profile[symbol][j]= count[symbol][j]/t
return profile
def Consensus(Motifs):
"""
Returns the consensus sequence of several sequences.
:param Motifs: the sequences to produce a consensus of.
:type Motifs: list of string
:return: the consensus sequence
:rtype: string
..warnings:: the strings in Motifs must only be composed on the letters A,C,G,T.
..seealso:: Count()
"""
consensus = ""
k = len(Motifs[0])
count=Count(Motifs)
for j in range(k):
m = 0
frequentSymbol = ""
for symbol in "ACGT":
if count[symbol][j] > m:
m = count[symbol][j]
frequentSymbol = symbol
consensus += frequentSymbol
return consensus
def Score(Motifs):
"""
returns the number of unpopular letter in the motif matrix (Motifs).
:param Motifs: the motif matrix.
:type Motifs: a list of string
:return: the number of unpopular letters in the motif matrix.
:rtype: int
..seealso:: Count(), Consensus()
"""
t = len(Motifs)
k = len(Motifs[0])
score=0
count=Count(Motifs)
consensus = Consensus(Motifs)
for symbol in "ACGT":
for j in range(k):
if symbol != consensus[j]:
score += count[symbol][j]
return score
# Input: String Text and profile matrix Profile
# Output: Pr(Text, Profile)
def Pr(Text, Profile):
# insert your code here
compteur=0
Pr=1
for letter in Text:
Pr=Pr*Profile[letter][compteur]
compteur+=1
return Pr
# Input: String Text, an integer k, and profile matrix Profile
# Output: ProfileMostProbablePattern(Text, k, Profile)
def ProfileMostProbablePattern(Text, k, Profile):
prm=-1
s=""
for i in range(len(Text)-k+1):
pr=Pr(Text[i:i+k],Profile)
if pr>prm:
prm=pr
s=str(Text[i:i+k])
return str(s)
| mit | -4,212,984,857,459,504,000 | 25 | 109 | 0.608419 | false | 3.298701 | false | false | false |
aileisun/bubbleimg | bubbleimg/imgdownload/hsc/hscimgloader.py | 1 | 12117 | # hscimgloader.py
# ALS 2017/05/02
import numpy as np
import os
import requests
import astropy.units as u
from astropy.io import fits
import re
from ..loader import imgLoader
from ...filters import surveysetup
from ..get_credential import getCrendential
from . import hscurl
nanomaggy = u.def_unit('nanomaggy', 3.631e-6*u.Jy)
u.add_enabled_units([nanomaggy])
u.nanomaggy=nanomaggy
class hscimgLoader(imgLoader):
def __init__(self, **kwargs):
"""
hscimgLoader, child of imgLoader
download stamps from HSC DAS Quarry
download psf by either:
(iaa) call sumire to infer psf from calexp and download psf from sumire
(online) download calexp from server and infer psf locally
on top of imgLoader init, set self.survey = 'hsc',
add attributes self.img_width_pix, self.img_height_pix
do not load obj.sdss.xid by default unless to_make_obj_sdss= True
Additional Params
-----------------
rerun = 's16a_wide' (str)
release_version = 'dr1' (str)
username (optional) (str): STARs account
password (optional) (str): STARs account
Public Methods
--------------
__init__(self, **kwargs)
make_stamp(self, band, overwrite=False, **kwargs)
make_stamps(self, overwrite=False, **kwargs)
make_psf(self, band, overwrite=False, to_keep_calexp=False)
make_psfs(self, overwrite=False, to_keep_calexp=False)
Instruction for stars username and password
-------------------------------------------
1) as arguments
hscimgLoader(..., username=username, password=password)
2) as environmental variable
$ export HSC_SSP_CAS_USERNAME
$ read -s HSC_SSP_CAS_USERNAME
$ export HSC_SSP_CAS_PASSWORD
$ read -s HSC_SSP_CAS_PASSWORD
3) enter from terminal
Attributes
----------
(in addition to loader attributes)
rerun = s16a_wide
semester = s16a
release_version = dr1
survey = 'hsc'
bands = ['g', 'r', 'i', 'z', 'y']
username
password
status (bool)
whether an hsc object is successfully identified
"""
super(hscimgLoader, self).__init__(**kwargs)
# set data release parameters
self.rerun = kwargs.pop('rerun', 's16a_wide')
self.semester = self.rerun.split('_')[0]
self.release_version = kwargs.pop('release_version', 'dr1')
# set hsc object parameters
self.status = super(self.__class__, self).add_obj_hsc(update=False, release_version=self.release_version, rerun=self.rerun)
self.survey = 'hsc'
self.bands = surveysetup.surveybands[self.survey]
self.pixsize = surveysetup.pixsize[self.survey]
self._add_attr_img_width_pix_arcsec()
# set connection parameters
self.__username = kwargs.pop('username', '')
self.__password = kwargs.pop('password', '')
if self.__username == '' or self.__password == '':
self.__username = getCrendential("HSC_SSP_CAS_USERNAME", cred_name = 'STARs username')
self.__password = getCrendential("HSC_SSP_CAS_PASSWORD", cred_name = 'STARs password')
def _get_fn_calexp(self, band):
return 'calexp-{0}.fits'.format(band)
def _get_filter_name(self, band):
return "HSC-{band}".format(band=band.upper())
def make_stamp(self, band, overwrite=False, **kwargs):
"""
make stamp image of the specified band of the object. takes care of overwrite with argument 'overwrite'. Default: do not overwrite. See _download_stamp() for specific implementation.
Params
----------
band (string) = 'r'
overwrite (boolean) = False
**kwargs: to be passed to _download_stamp()
e.g., imgtype='coadd', tract='', rerun='', see _download_stamp()
if not specified then use self.rerun.
Return
----------
status: True if downloaded or skipped, False if download fails
"""
return self._imgLoader__make_file_core(func_download_file=self._download_stamp, func_naming_file=self.get_fn_stamp, band=band, overwrite=overwrite, **kwargs)
def make_stamps(self, overwrite=False, **kwargs):
"""
make stamps of all bands, see make_stamp()
"""
return self._imgLoader__make_files_core(func_download_file=self._download_stamp, func_naming_file=self.get_fn_stamp, overwrite=overwrite, **kwargs)
def _download_stamp(self, band, imgtype='coadd', tract='', tokeepraw=False, n_trials=5):
"""
download hsc cutout img using HSC DAS Querry. Provides only ra, dec to DAS Querry and download the default coadd. always overwrite.
convert it to stamp images.
ra, dec can be decimal degrees (12.345) or sexagesimal (1:23:35)
for details see hsc query manual
https://hscdata.mtk.nao.ac.jp/das_quarry/manual.html
Args
--------
band
imgtype='coadd'
tract=''
tokeepraw = False (bool):
whether to keep the downloaded raw HSC image, which has four extensions.
n_trials=5
how many times to retry requesting if there is requests errors such as connection error.
Return
----------
status: True if downloaded, False if download fails
"""
rerun = self.rerun
# setting
fp_out = self.get_fp_stamp(band)
semi_width_inarcsec = (self.img_width_arcsec.to(u.arcsec).value/2.)-0.1 # to get pix number right
semi_height_inarcsec = (self.img_height_arcsec.to(u.arcsec).value/2.)-0.1
sw = '%.5f'%semi_width_inarcsec+'asec'
sh = '%.5f'%semi_height_inarcsec+'asec'
# get url
url = hscurl.get_hsc_cutout_url(self.ra, self.dec, band=band, rerun=rerun, tract=tract, imgtype=imgtype, sw=sw, sh=sh)
# query, download, and convert to new unit
# writing two files (if successful): raw img file and stamp img file.
rqst = self._retry_request(url, n_trials=n_trials)
if rqst.status_code == 200:
fp_raw = self._write_request_to_file(rqst)
self._write_fits_unit_specified_in_nanomaggy(filein=fp_raw, fileout=fp_out)
if not tokeepraw:
os.remove(fp_raw)
return True
else:
print("[hscimgloader] image cannot be retrieved")
return False
def make_psf(self, band, overwrite=False, **kwargs):
"""
make psf image of the specified band of the object. See _download_psf() for details.
Params
----------
band (string) = 'r'
overwrite (boolean) = False
**kwargs: to be passed to _download_psf()
e.g., imgtype='coadd'
Return
----------
status: True if downloaded or skipped, False if download fails
"""
return self._imgLoader__make_file_core(func_download_file=self._download_psf, func_naming_file=self.get_fn_psf, band=band, overwrite=overwrite, **kwargs)
def make_psfs(self, overwrite=False, **kwargs):
"""
make psfs of all bands, see make_psf()
"""
return self._imgLoader__make_files_core(func_download_file=self._download_psf, func_naming_file=self.get_fn_psf, overwrite=overwrite, **kwargs)
def _download_psf(self, band, imgtype='coadd', rerun='', tract='', patch_s='', n_trials=5):
"""
download hsc cutout img using HSC DAS Querry. Provides only ra, dec to DAS Querry and download the default coadd. always overwrite. If rerun not specified then use self.rerun.
for details see manual https://hscdata.mtk.nao.ac.jp/psf/4/manual.html#Bulk_mode
https://hscdata.mtk.nao.ac.jp/das_quarry/manual.html
Args
--------
band
imgtype='coadd'
rerun=self.rerun
tract=''
patch_s=''
n_trials=5
how many times to retry requesting if there is requests errors such as connection error.
Return
----------
status: True if downloaded, False if download fails
"""
if rerun == '':
rerun = self.rerun
# setting
fp_out = self.get_fp_psf(band)
# get url
url = hscurl.get_hsc_psf_url(ra=self.ra, dec=self.dec, band=band, rerun=rerun, tract=tract, patch=patch_s, imgtype=imgtype)
# download
rqst = self._retry_request(url, n_trials=n_trials)
if rqst.status_code == 200:
self._write_request_to_file(rqst, fn=os.path.basename(fp_out))
return True
else:
print("[hscimgloader] psf cannot be retrieved")
return False
def _retry_request(self, url, n_trials=5):
"""
request url and retries for up to n_trials times if requests exceptions are raised, such as ConnectionErrors. Uses self.__username self.__password as authentication.
"""
for _ in range(n_trials):
try:
rqst = requests.get(url, auth=(self.__username, self.__password))
return rqst
break
except requests.exceptions.RequestException as e:
print(("[hscimgloader] retrying as error detected: "+str(e)))
def _write_request_to_file(self, rqst, fn=''):
"""
write requested file under self.dir_obj with original filename unless filename specified
Args
--------
rqst: request result
fn ='' (str):
the filename to be saved to. default: use original filename.
Return
--------
fp_out (string): the entire filepath to the file written
"""
d = rqst.headers['content-disposition']
if fn == '':
fn = re.findall("filename=(.+)", d)[0][1:-1]
fp_out = self.dir_obj + fn
with open(fp_out, 'wb') as out:
for bits in rqst.iter_content():
out.write(bits)
return fp_out
def _write_fits_unit_converted_to_nanomaggy(self, filein, fileout):
"""
!!!!!!! WARNING !!!!!!!! this funciton is not used currently
Convert raw hsc image to an image with unit nanomaggy, changing the data value.
take only the second hdu hdu[1] as data in output
read in fits file filein with no bunit but FLUXMAG0 and convert to one fits file with unit nanomaggy, and write to fileout.
Notes on Unit conversion
-----------
HSC fluxmag0 is set such that a pix value of 1 has a magAB of 27 so:
fluxmag0 = header_combine['FLUXMAG0']
# 63095734448.0194
pixunit = 10.**-19.44 / fluxmag0 * (u.erg * u.s**-1 * u.cm**-2 * u.Hz**-1)
# u.Quantity('5.754399373371546e-31 erg / cm2 / Hz / s')
nanomaggy_per_raw_unit = float((u.nanomaggy/pixunit).decompose())
# 63.099548091890085
But this function should work even with other fluxmag 0, as we set
nanomaggy_per_raw_unit = fluxmag0 * 10**-9
"""
hdu = fits.open(filein)
header_combine = hdu[1].header+hdu[0].header
# sanity check
if header_combine['FLUXMAG0'] != 63095734448.0194:
raise ValueError("HSC FLUXMAG0 different from usual. Although shouldnt be a problem")
if 'BUNIT' in header_combine:
raise ValueError("Input fits file should not have BUNIT")
nanomaggy_per_raw_unit = header_combine['FLUXMAG0']*10.**-9
data_nanomaggy = hdu[1].data/nanomaggy_per_raw_unit
header_combine.set(keyword='BUNIT', value='nanomaggy', comment="1 nanomaggy = 3.631e-6 Jy")
header_combine['COMMENT'] = "Unit converted to nanomaggy by ALS"
header_combine.remove(keyword='FLUXMAG0')
hdu_abbrv = fits.PrimaryHDU(data_nanomaggy, header=header_combine)
hdu_abbrv.writeto(fileout, overwrite=True)
def _write_fits_unit_specified_in_nanomaggy(self, filein, fileout):
"""
Convert a raw hsc image to an image with unit nanomaggy, the data values unchanged.
Take only the second hdu hdu[1] as data in output.
read in fits file filein with no bunit but FLUXMAG0 and convert to one fits file with unit nanomaggy, and write to fileout.
Notes on Unit conversion
-----------
HSC fluxmag0 is set such that a pix value of 1 has a magAB of 27 so:
fluxmag0 = header_combine['FLUXMAG0']
# 63095734448.0194
pixunit = 10.**-19.44 / fluxmag0 * (u.erg * u.s**-1 * u.cm**-2 * u.Hz**-1)
# u.Quantity('5.754399373371546e-31 erg / cm2 / Hz / s')
nanomaggy_per_raw_unit = float((u.nanomaggy/pixunit).decompose())
# 63.099548091890085
raw_unit_per_nanomaggy = 1/nanomaggy_per_raw_unit
# 0.015847974038478506
But this function should work even with other fluxmag 0, as we set
nanomaggy_per_raw_unit = fluxmag0 * 10**-9
"""
hdu = fits.open(filein)
header_combine = hdu[1].header+hdu[0].header
# sanity check
if header_combine['FLUXMAG0'] != 63095734448.0194:
raise ValueError("HSC FLUXMAG0 different from assumed")
if 'BUNIT' in header_combine:
raise ValueError("Input fits file should not have BUNIT")
bunit = '1.58479740e-02 nanomaggy'
header_combine.set(keyword='BUNIT', value=bunit, comment="1 nanomaggy = 3.631e-6 Jy")
header_combine['COMMENT'] = "Unit specified in nanomaggy by ALS"
data = hdu[1].data
hdu_abbrv = fits.PrimaryHDU(data, header=header_combine)
hdu_abbrv.writeto(fileout, overwrite=True)
| mit | -9,032,188,369,782,719,000 | 29.444724 | 185 | 0.68276 | false | 2.941019 | false | false | false |
kgullikson88/HET-Scripts | Smooth.py | 1 | 6217 | import numpy as np
# import FitsUtils
import FittingUtilities
import HelperFunctions
import matplotlib.pyplot as plt
import sys
import os
from astropy import units
from astropy.io import fits, ascii
import DataStructures
from scipy.interpolate import InterpolatedUnivariateSpline as interp
import MakeModel
import HelperFunctions
from collections import Counter
from sklearn.gaussian_process import GaussianProcess
import warnings
def SmoothData(order, windowsize=91, smoothorder=5, lowreject=3, highreject=3, numiters=10, expand=0, normalize=True):
denoised = HelperFunctions.Denoise(order.copy())
denoised.y = FittingUtilities.Iterative_SV(denoised.y, windowsize, smoothorder, lowreject=lowreject,
highreject=highreject, numiters=numiters, expand=expand)
if normalize:
denoised.y /= denoised.y.max()
return denoised
def roundodd(num):
rounded = round(num)
if rounded % 2 != 0:
return rounded
else:
if rounded > num:
return rounded - 1
else:
return rounded + 1
def GPSmooth(data, low=0.1, high=10, debug=False):
"""
This will smooth the data using Gaussian processes. It will find the best
smoothing parameter via cross-validation to be between the low and high.
The low and high keywords are reasonable bounds for A and B stars with
vsini > 100 km/s.
"""
smoothed = data.copy()
# First, find outliers by doing a guess smooth
smoothed = SmoothData(data, normalize=False)
temp = smoothed.copy()
temp.y = data.y / smoothed.y
temp.cont = FittingUtilities.Continuum(temp.x, temp.y, lowreject=2, highreject=2, fitorder=3)
outliers = HelperFunctions.FindOutliers(temp, numsiglow=3, expand=5)
if len(outliers) > 0:
data.y[outliers] = smoothed.y[outliers]
gp = GaussianProcess(corr='squared_exponential',
theta0=np.sqrt(low * high),
thetaL=low,
thetaU=high,
normalize=False,
nugget=(data.err / data.y) ** 2,
random_start=1)
try:
gp.fit(data.x[:, None], data.y)
except ValueError:
#On some orders with large telluric residuals, this will fail.
# Just fall back to the old smoothing method in that case.
return SmoothData(data), 91
if debug:
print "\tSmoothing parameter theta = ", gp.theta_
smoothed.y, smoothed.err = gp.predict(data.x[:, None], eval_MSE=True)
return smoothed, gp.theta_[0][0]
if __name__ == "__main__":
fileList = []
plot = False
vsini_file = "%s/School/Research/Useful_Datafiles/Vsini.csv" % (os.environ["HOME"])
for arg in sys.argv[1:]:
if "-p" in arg:
plot = True
elif "-vsini" in arg:
vsini_file = arg.split("=")[-1]
else:
fileList.append(arg)
#Read in the vsini table
vsini_data = ascii.read(vsini_file)[10:]
if len(fileList) == 0:
fileList = [f for f in os.listdir("./") if f.endswith("telluric_corrected.fits")]
for fname in fileList:
orders = HelperFunctions.ReadFits(fname, extensions=True, x="wavelength", y="flux", cont="continuum",
errors="error")
#Find the vsini of this star
header = fits.getheader(fname)
starname = header["object"].split()[0].replace("_", " ")
found = False
for data in vsini_data:
if data[0] == starname:
vsini = float(data[1])
found = True
if not found:
outfile = open("Warnings.log", "a")
outfile.write("Cannot find %s in the vsini data: %s\n" % (starname, vsini_file))
outfile.close()
warnings.warn("Cannot find %s in the vsini data: %s" % (starname, vsini_file))
print starname, vsini
#Begin looping over the orders
column_list = []
header_list = []
for i, order in enumerate(orders):
print "Smoothing order %i/%i" % (i + 1, len(orders))
#Fix errors
order.err[order.err > 1e8] = np.sqrt(order.y[order.err > 1e8])
#Linearize
xgrid = np.linspace(order.x[0], order.x[-1], order.x.size)
order = FittingUtilities.RebinData(order, xgrid)
dx = order.x[1] - order.x[0]
smooth_factor = 0.8
theta = roundodd(vsini / 3e5 * order.x.mean() / dx * smooth_factor)
denoised = SmoothData(order,
windowsize=theta,
smoothorder=3,
lowreject=3,
highreject=3,
expand=10,
numiters=10)
#denoised, theta = GPSmooth(order.copy())
#denoised, theta = CrossValidation(order.copy(), 5, 2, 2, 10)
#denoised, theta = OptimalSmooth(order.copy())
#denoised.y *= order.cont/order.cont.mean()
print "Window size = %.4f nm" % theta
column = {"wavelength": denoised.x,
"flux": order.y / denoised.y,
"continuum": denoised.cont,
"error": denoised.err}
header_list.append((("Smoother", theta, "Smoothing Parameter"),))
column_list.append(column)
if plot:
plt.figure(1)
plt.plot(order.x, order.y / order.y.mean())
plt.plot(denoised.x, denoised.y / denoised.y.mean())
plt.title(starname)
plt.figure(2)
plt.plot(order.x, order.y / denoised.y)
plt.title(starname)
#plt.plot(order.x, (order.y-denoised.y)/np.median(order.y))
#plt.show()
if plot:
plt.show()
outfilename = "%s_smoothed.fits" % (fname.split(".fits")[0])
print "Outputting to %s" % outfilename
HelperFunctions.OutputFitsFileExtensions(column_list, fname, outfilename, mode='new', headers_info=header_list)
| gpl-3.0 | 4,576,857,145,479,174,000 | 37.141104 | 119 | 0.564098 | false | 3.770164 | false | false | false |
AutorestCI/azure-sdk-for-python | azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/resource_usage_statistics.py | 2 | 1414 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ResourceUsageStatistics(Model):
"""the statistics information for resource usage.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar average: the average value.
:vartype average: float
:ivar minimum: the minimum value.
:vartype minimum: long
:ivar maximum: the maximum value.
:vartype maximum: long
"""
_validation = {
'average': {'readonly': True},
'minimum': {'readonly': True},
'maximum': {'readonly': True},
}
_attribute_map = {
'average': {'key': 'average', 'type': 'float'},
'minimum': {'key': 'minimum', 'type': 'long'},
'maximum': {'key': 'maximum', 'type': 'long'},
}
def __init__(self):
super(ResourceUsageStatistics, self).__init__()
self.average = None
self.minimum = None
self.maximum = None
| mit | 1,628,343,292,106,821,000 | 30.422222 | 76 | 0.5686 | false | 4.666667 | false | false | false |
juliarizza/web2courses | controllers/default.py | 1 | 7458 | # -*- coding: utf-8 -*-
# this file is released under public domain and you can use without limitations
#########################################################################
## This is a sample controller
## - index is the default action of any application
## - user is required for authentication and authorization
## - download is for downloading files uploaded in the db (does streaming)
#########################################################################
import itertools
def index():
return dict()
def user():
"""
exposes:
http://..../[app]/default/user/login
http://..../[app]/default/user/logout
http://..../[app]/default/user/register
http://..../[app]/default/user/profile
http://..../[app]/default/user/retrieve_password
http://..../[app]/default/user/change_password
http://..../[app]/default/user/manage_users (requires membership in
use @auth.requires_login()
@auth.requires_membership('group name')
@auth.requires_permission('read','table name',record_id)
to decorate functions that need access control
"""
if request.args(0) == 'register':
db.auth_user.bio.writable = db.auth_user.bio.readable = False
db.auth_user.avatar.writable = db.auth_user.avatar.readable = False
return dict(form=auth())
@cache.action()
def download():
"""
allows downloading of uploaded files
http://..../[app]/default/download/[filename]
"""
return response.download(request, db)
def call():
"""
exposes services. for example:
http://..../[app]/default/call/jsonrpc
decorate with @services.jsonrpc the functions to expose
supports xml, json, xmlrpc, jsonrpc, amfrpc, rss, csv
"""
return service()
##################################################################################
#### ####
#### COURSE PAGES ####
#### ####
##################################################################################
def courses():
courses = db(Course).select()
return dict(courses=courses)
def course():
course_id = request.args(0, cast=int)
course = Course(id=course_id)
open_classes = course.classes(Class.status == 3).select()
limited_classes = [c for c in open_classes if c.available_until]
Interest.course.default = course_id
Interest.course.readable = Interest.course.writable = False
interest_form = SQLFORM(Interest)
if interest_form.process(onvalidation=check_if_exists).accepted:
response.flash = T("Thank you!")
elif interest_form.errors:
response.flash = T("Erros no formulário!")
return dict(
course=course,
open_classes=open_classes,
limited_classes=limited_classes,
interest_form=interest_form)
def enroll():
class_id = request.args(0, cast=int)
if not class_id in session.cart:
session.cart.append(class_id)
else:
session.flash = T('This course is already on your shopping cart!')
redirect(URL('payments', 'shopping_cart'))
@auth.requires_login()
def my_courses():
class_ids = db(Student.student == auth.user.id).select()
my_courses = db(Course.course_owner == auth.user.id).select()
classes = db(Class.id.belongs([x.class_id for x in class_ids])|\
Class.course.belongs([x.id for x in my_courses])).select()
return dict(classes=classes)
@auth.requires(lambda: enrolled_in_class(record_id=request.args(0, cast=int), record_type=1) | auth.has_membership("Admin"))
def my_class():
class_id = request.args(0, cast=int)
my_class = Class(id=class_id)
my_course = my_class.course
modules = db(Module.course_id == my_course).select()
return dict(my_class=my_class,
modules=modules)
@auth.requires(lambda: enrolled_in_class(record_id=request.args(0, cast=int), record_type=2) | auth.has_membership("Admin"))
def lesson():
lesson_id = request.args(0, cast=int)
class_id = request.args(1, cast=int)
lesson = Lesson(id=lesson_id)
if db(Schedule_Lesson.lesson_id == lesson_id).select().first().release_date > request.now.date():
raise HTTP(404)
page = int(request.vars.page or 1)
videos = lesson.videos.select()
texts = lesson.texts.select()
exercises = lesson.exercises.select()
merged_records = itertools.chain(videos, texts, exercises)
contents = sorted(merged_records, key=lambda record: record['place'])
if page <= 0 or page > len(contents):
raise HTTP(404)
is_correct = {}
if request.vars:
keys = request.vars.keys()
for key in keys:
if key != 'page':
q_id = int(key.split('_')[1])
question = Exercise(id=q_id)
if question.correct == int(request.vars[key]):
is_correct[key] = True
else:
is_correct[key] = False
return dict(lesson=lesson,
content=contents[page-1],
total_pages=len(contents),
is_correct=is_correct,
class_id=class_id)
@auth.requires(lambda: enrolled_in_class(record_id=request.args(0, cast=int), record_type=1) | auth.has_membership("Admin"))
def forum():
class_id = request.args(0, cast=int)
topics = db(Forum.class_id == class_id).select(orderby=~Forum.created_on)
return dict(topics=topics,
class_id=class_id)
@auth.requires(lambda: enrolled_in_class(record_id=request.args(0, cast=int), record_type=3) | auth.has_membership("Admin"))
def topic():
topic_id = request.args(0, cast=int)
topic = Forum(id=topic_id)
comments = db(Comment.post == topic_id).select()
Comment.post.default = topic_id
Comment.post.readable = Comment.post.writable = False
form = crud.create(Comment, next=URL('topic', args=topic_id))
return dict(topic=topic,
comments=comments,
form=form)
@auth.requires(lambda: enrolled_in_class(record_id=request.args(0, cast=int), record_type=1) | auth.has_membership("Admin"))
def new_topic():
class_id = request.args(0, cast=int)
Forum.class_id.default = class_id
Forum.class_id.readable = Forum.class_id.writable = False
form = SQLFORM(Forum)
if form.process().accepted:
redirect(URL('topic', args=form.vars.id))
return dict(form=form)
@auth.requires(lambda: enrolled_in_class(record_id=request.args(0, cast=int), record_type=1) | auth.has_membership("Admin"))
def calendar():
class_id = request.args(0, cast=int)
dates = db((Date.class_id == class_id)|(Date.class_id == None)).select()
my_class = Class(id=class_id)
modules = db(Module.course_id == my_class.course).select()
lessons = []
for module in modules:
for lesson in module.lessons.select():
lessons.append(lesson)
return dict(dates=dates,
my_class=my_class,
lessons=lessons)
@auth.requires(lambda: enrolled_in_class(record_id=request.args(0, cast=int), record_type=1) | auth.has_membership("Admin"))
def announcements():
class_id = request.args(0, cast=int)
announcements = db(Announcement.class_id == class_id).select()
return dict(announcements=announcements,
class_id=class_id) | mit | 2,963,647,003,533,820,400 | 35.920792 | 124 | 0.593134 | false | 3.700744 | false | false | false |
jbaayen/sympy | sympy/thirdparty/__init__.py | 10 | 1047 | """Thirdparty Packages for internal use.
"""
import sys
import os
def import_thirdparty(lib):
"""
Imports a thirdparty package "lib" by setting all paths correctly.
At the moment, there is only the "pyglet" library, so we just put
pyglet to sys.path temporarily, then import "lib" and then restore the path.
With more packages, we'll just put them to sys.path as well.
"""
seen = set()
def new_import(name, globals={}, locals={}, fromlist=[]):
if name in seen:
return old_import(name, globals, locals, fromlist)
seen.add(name)
sys.path.insert(0, os.path.join(os.path.abspath(os.path.dirname( \
__file__)), "pyglet"))
try:
m = old_import(name, globals, locals, fromlist)
finally:
del sys.path[0]
return m
import __builtin__
old_import = __builtin__.__import__
__builtin__.__import__ = new_import
try:
m = __import__(lib)
finally:
__builtin__.__import__ = old_import
return m
| bsd-3-clause | 2,119,548,506,643,350,000 | 28.083333 | 80 | 0.588348 | false | 3.892193 | false | false | false |
bigartm/visartm | algo/tools/vkloader.py | 1 | 1609 | import os
import json
def download_wall(domain, dataset_folder, cut=1000000, access_token=None):
import vk
session = vk.Session(access_token=access_token)
api = vk.API(session)
info = dict()
docs_folder = os.path.join(dataset_folder, "documents")
os.makedirs(docs_folder, exist_ok=True)
os.makedirs(os.path.join(dataset_folder, "meta"), exist_ok=True)
id = 0
offset = 0
while True:
posts = api.wall.get(domain=domain, offset=offset, count=100)
for i in range(1, len(posts)):
post = posts[i]
text = post["text"].replace("<br>", "\n")
if len(text) > 50:
id += 1
text_id = "%06d.txt" % id
info[text_id] = dict()
info[text_id]["url"] = "https://vk.com/" + domain + \
"?w=wall" + str(post["from_id"]) + "_" + str(post["id"])
info[text_id]["time"] = post["date"]
text_file_name = os.path.join(docs_folder, text_id)
with open(text_file_name, "w", encoding='utf-8') as f:
f.write(text)
if id == cut:
break
offset += 100
# print (offset)
if len(posts) != 101:
break
if id == cut:
break
with open(os.path.join(dataset_folder, "meta", "meta.json"), "wb") as f:
f.write(json.dumps(info).encode("UTF-8"))
if __name__ == "__main__":
domain = "lurkopub_alive"
download_wall(
domain,
"D:\\visartm\\data\\datasets\\" +
domain,
cut=1000000)
| bsd-3-clause | 5,083,216,247,126,751,000 | 31.18 | 76 | 0.50404 | false | 3.460215 | false | false | false |
yeongseon/django_beautifulseodang | django_beautifulseodang/settings.py | 1 | 8083 | """
Django settings for django_beautifulseodang project.
Generated by 'django-admin startproject' using Django 1.8.14.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import sys
import json
from django.core.exceptions import ImproperlyConfigured
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Env for dev / deploy
def get_env(setting, envs):
try:
return envs[setting]
except KeyError:
error_msg = "You SHOULD set {} environ".format(setting)
raise ImproperlyConfigured(error_msg)
return
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ukm)tbc+e%#gew3^%wxyk%@@e9&g%3(@zq&crilwlbvh@6n*l$'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# Favicon
'favicon',
# Disqus
'disqus',
# ckeditor
'ckeditor',
'ckeditor_uploader',
# Bootstrap
'bootstrap3',
'bootstrapform',
'bootstrap_pagination',
'django_social_share',
# Fontawesome
'fontawesome',
# home
'home',
'social_django',
#'social.apps.django_app.default',
'django_beautifulseodang',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'social_django.middleware.SocialAuthExceptionMiddleware',
)
ROOT_URLCONF = 'django_beautifulseodang.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
#os.path.join(BASE_DIR, 'templates', 'allauth'),
os.path.join(BASE_DIR, 'templates', 'django_social_share'),
],
# 'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'social_django.context_processors.backends',
'social_django.context_processors.login_redirect',
#'social.apps.django_app.context_processors.backends',
#'social.apps.django_app.context_processors.login_redirect',
],
'loaders': [
# APP_DIRS를 주석처리 해야지 동작
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]
},
},
]
# Django all auth settings
AUTHENTICATION_BACKENDS = (
#'social_core.backends.github.GithubOAuth2', # Github for python-social-auth
'social_core.backends.twitter.TwitterOAuth', # Twitter for python-social-auth
'social_core.backends.google.GoogleOAuth2', # Google for python-social-auth
'social_core.backends.facebook.FacebookOAuth2', # Facebook for python-social-auth
'django.contrib.auth.backends.ModelBackend',
)
WSGI_APPLICATION = 'django_beautifulseodang.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
TEMPLATE_DIR = (
os.path.join(BASE_DIR, 'templates'),
)
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'ko-kr' # 기본 한글
TIME_ZONE = 'Asia/Seoul'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
STATIC_ROOT = os.path.join(BASE_DIR, 'statics')
SITE_ID = 1
AUTH_PASSWORD_VALIDATORS = [
{
'NAME':
'django.contrib.auth.password_validation.MinimumLengthValidator',
'OPTIONS': {
'min_length': 9,
}
}
]
SOCIAL_AUTH_PIPELINE = (
'social.pipeline.social_auth.social_details',
'social.pipeline.social_auth.social_uid',
'social.pipeline.social_auth.auth_allowed',
'social.pipeline.social_auth.social_user',
'social.pipeline.user.get_username',
'social.pipeline.user.create_user',
#'accounts.social.create_user', # 덮어씀
#'accounts.social.update_avatar', # 추가함
'social.pipeline.social_auth.associate_user',
'social.pipeline.social_auth.load_extra_data',
'social.pipeline.user.user_details'
)
SOCIAL_AUTH_LOGIN_REDIRECT_URL = '/'
SOCIAL_AUTH_URL_NAMESPACE = 'social'
LOGIN_URL = 'login'
LOGOUT_URL = 'logout'
LOGIN_REDIRECT_URL = '/'
#ACCOUNT_FORMS = {
# 'login': 'home.forms.MyLoginForm',
# 'signup': 'home.forms.MySignupForm'
#}
DEV_ENVS = os.path.join(BASE_DIR, "envs_dev.json")
DEPLOY_ENVS = os.path.join(BASE_DIR, "envs.json")
if os.path.exists(DEV_ENVS): # Develop Env
env_file = open(DEV_ENVS)
elif os.path.exists(DEPLOY_ENVS): # Deploy Env
env_file = open(DEPLOY_ENVS)
else:
env_file = None
if env_file is None: # System environ
try:
FACEBOOK_KEY = os.environ['FACEBOOK_KEY']
FACEBOOK_SECRET = os.environ['FACEBOOK_SECRET']
GOOGLE_KEY = os.environ['GOOGLE_KEY']
GOOGLE_SECRET = os.environ['GOOGLE_SECRET']
except KeyError as error_msg:
raise ImproperlyConfigured(error_msg)
else: # JSON env
envs = json.loads(env_file.read())
FACEBOOK_KEY = get_env('FACEBOOK_KEY', envs)
FACEBOOK_SECRET = get_env('FACEBOOK_SECRET', envs)
GOOGLE_KEY = get_env('GOOGLE_KEY', envs)
GOOGLE_SECRET = get_env('GOOGLE_SECRET', envs)
# SocialLogin: Facebook
SOCIAL_AUTH_FACEBOOK_KEY = FACEBOOK_KEY
SOCIAL_AUTH_FACEBOOK_SECRET = FACEBOOK_SECRET
SOCIAL_AUTH_FACEBOOK_SCOPE = ['email']
SOCIAL_AUTH_FACEBOOK_PROFILE_EXTRA_PARAMS = {
'fields': 'id, name, email'
}
# SocialLogin: Google
SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = GOOGLE_KEY
SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = GOOGLE_SECRET
SOCIAL_AUTH_GOOGLE_OAUTH2_SCOPE = ['email']
SOCIAL_AUTH_TWITTER_KEY = 'EUQaQkvpr4R22UTNofeqIfqsV'
SOCIAL_AUTH_TWITTER_SECRET = 'QLjJGjCGMxkIPvGaMymAcu7zZ2GcjMxrbHqt019v5FpIs3WTB1'
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer'
# favicon
FAVICON_PATH = STATIC_URL + 'img/favicon.png'
# ckeditor
MEDIA_URL = '/media/'
CKEDITOR_JQUERY_URL = '//ajax.googleapis.com/ajax/libs/jquery/2.1.1/jquery.min.js'
CKEDITOR_UPLOAD_PATH = "uploads/"
CKEDITOR_CONFIGS = {
'default': {
'toolbar': None,
}
}
# Disqus
DISQUS_API_KEY = 'zcJshWHxmOREPGjOrCq6r0rviSIIELz2iHWEdwDrpYSpko5wZDVBt60c7kYsvjlP'
DISQUS_WEBSITE_SHORTNAME = 'http-yeongseon-pythonanywhere-com'
#try:
# from .allauth_settings import *
#except ImportError:
# print("ImportError")
# pass
try:
from .bootstrap3_settings import *
except ImportError:
print("ImportError")
pass
| mit | 1,824,213,679,889,360,100 | 26.172297 | 85 | 0.674748 | false | 3.184086 | false | false | false |
ArcheProject/arche_pas | arche_pas/registration_cases.py | 1 | 8840 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from uuid import uuid4
from arche.interfaces import IFlashMessages
from pyramid.httpexceptions import HTTPFound
from arche_pas import _
from arche_pas.models import register_case
def callback_case_1(provider, user, data):
provider.store(user, data)
fm = IFlashMessages(provider.request)
msg = _("data_tied_at_login",
default="Since you already had an account with the same email address validated, "
"you've been logged in as that user. Your accounts have also been linked.")
fm.add(msg, type="success", auto_destruct=False)
# Will return a HTTP 302
return provider.login(user)
def callback_case_2(provider, user, data):
user.email_validated = True
provider.store(user, data)
fm = IFlashMessages(provider.request)
msg = _("accounts_linked_verified_since_logged_in",
default="You've linked your external login to this account.")
fm.add(msg, type="success", auto_destruct=False)
# Will return a HTTP 302
return provider.login(user)
def callback_must_be_logged_in(provider, user, data):
email = provider.get_email(data, validated=False)
msg = _("user_email_present",
default="There's already a user registered here with your email address: '${email}' "
"If this is your account, please login here first to "
"connect the two accounts.",
mapping={'email': email})
fm = IFlashMessages(provider.request)
fm.add(msg, type='danger', auto_destruct=False, require_commit=False)
raise HTTPFound(location=provider.request.resource_url(provider.request.root, 'login'))
def callback_register(provider, user, data):
reg_id = str(uuid4())
provider.request.session[reg_id] = data
# Register this user
return reg_id
def callback_maybe_attach_account(provider, user, data):
""" Only for logged in users."""
reg_id = str(uuid4())
provider.request.session[reg_id] = data
raise HTTPFound(location=provider.request.route_url('pas_link', provider=provider.name, reg_id=reg_id))
def includeme(config):
"""
Different registration cases for email:
1) Validated on server, validated locally and exist
2) Validated on server, exists locally but not validated, user logged in
3) Validated on server, exists locally but not validated, user not logged in
4) Validated on server, doesn't exist locally
5) Validated on server, doesn't match locally but user logged in
- change email?
Serious security breach risk:
6) Not validated/trusted on server, validated locally, user logged in
- Serious risk of hack: cross site scripting or accidental attach of credentials
7) Not validated/trusted on server, validated locally, user not logged in
8) Not validated/trusted on server, exists locally but not validated, user logged in
9) Not validated/trusted on server, local user not matched, user logged in
10) Not validated/trusted on server, exists locally but not validated, user not logged in
11) Not validated/trusted on server, doesn't exist locally, not logged in
12) No email from provider, user logged in
13) No email from provider, user not logged in
"""
register_case(
config.registry,
'case1',
title = "Validated on server, validated locally and user exists",
require_authenticated = None,
email_validated_provider = True,
email_validated_locally = True,
user_exist_locally = True,
provider_validation_trusted = True,
callback=callback_case_1,
)
register_case(
config.registry,
'case2',
title = "Validated on server, exists locally but not validated, user logged in",
require_authenticated = True,
email_validated_provider = True,
email_validated_locally = False,
user_exist_locally = True,
provider_validation_trusted = True,
callback = callback_case_2,
)
register_case(
config.registry,
'case3',
title = "Validated on server, exists locally but not validated, user not logged in",
require_authenticated = False,
email_validated_provider = True,
email_validated_locally = False,
user_exist_locally = True,
provider_validation_trusted = True,
callback = callback_must_be_logged_in,
)
register_case(
config.registry,
'case4',
title = "Validated on server, doesn't exist locally",
require_authenticated = False,
email_validated_provider = True,
#email_validated_locally = False,
user_exist_locally = False,
provider_validation_trusted = True,
callback = callback_register,
)
register_case(
config.registry,
'case5',
title = "Validated on server, doesn't match locally but is authenticated",
require_authenticated = True,
email_validated_provider = True,
#email_validated_locally = False,
user_exist_locally = False,
provider_validation_trusted = True,
callback = callback_maybe_attach_account,
)
register_case(
config.registry,
'case6',
title="Not validated/trusted on server, validated locally, user logged in",
require_authenticated = True,
#email_validated_provider = None,
email_validated_locally = True,
#user_exist_locally = True, Should be caught by email_validated_locally?
email_from_provider = None,
provider_validation_trusted = False,
callback = callback_maybe_attach_account,
)
register_case(
config.registry,
'case7',
title="Not validated/trusted on server, validated locally, user not logged in",
require_authenticated = False,
#email_validated_provider = None,
email_validated_locally = True,
#user_exist_locally = True, Should be caught by email_validated_locally?
email_from_provider = None,
provider_validation_trusted = False,
callback = callback_must_be_logged_in,
)
register_case(
config.registry,
'case8',
title="Not validated/trusted on server, exists locally but not validated, user logged in",
require_authenticated = True,
email_validated_provider = None,
email_validated_locally = False,
user_exist_locally = True,
email_from_provider = True,
provider_validation_trusted = False,
callback = callback_maybe_attach_account,
)
register_case(
config.registry,
'case9',
title="Not validated/trusted on server, local user not matched, user logged in",
require_authenticated = True,
email_validated_provider = None,
email_validated_locally = False,
user_exist_locally = False,
email_from_provider = True,
provider_validation_trusted = False,
callback = callback_maybe_attach_account, #FIXME: And change email?
)
register_case(
config.registry,
'case10',
title="Not validated/trusted on server, exists locally but not validated, user not logged in",
require_authenticated = False,
email_validated_provider = None,
email_validated_locally = False,
user_exist_locally = True,
email_from_provider = None,
provider_validation_trusted = False,
callback = callback_must_be_logged_in,
)
register_case(
config.registry,
'case11',
title="Not validated/trusted on server, doesn't exist locally",
require_authenticated = False,
email_validated_provider = None,
#email_validated_locally = False,
user_exist_locally = False,
email_from_provider = True,
provider_validation_trusted = False,
callback = callback_register,
)
register_case(
config.registry,
'case12',
title="No email from provider, user logged in",
require_authenticated = True,
email_validated_provider = None,
email_validated_locally = None,
# user_exist_locally = True,
email_from_provider = False,
provider_validation_trusted = None,
callback = callback_maybe_attach_account,
)
register_case(
config.registry,
'case13',
title="No email from provider, user not logged in",
require_authenticated = False,
#email_validated_provider = None,
#email_validated_locally = None,
#user_exist_locally = None,
email_from_provider = False,
#provider_validation_trusted = None,
callback=callback_register, #Allow registration here?
)
| gpl-2.0 | 4,128,858,310,187,192,000 | 36.617021 | 107 | 0.648982 | false | 4.266409 | true | false | false |
Diego999/Sudoku-Recognition | Training/Sets/create_sets.py | 1 | 2220 | from os import listdir, path
from random import shuffle
SEPARATOR = '_'
class Attrib:
def __init__(self, number, id, path):
self.number, self.id, self.path = number, id, path
def __str__(self):
return 'Number : ' + str(self.number) + ', Id : ' + str(self.id) + ', Path : ' + self.path
def __unicode__(self):
return self.__str__()
def __repr__(self):
return self.__str__()
def __eq__(self, other):
return self.path == other.path
def listdir_nohidden(_path):
output = []
if path.isdir(_path):
for f in listdir(_path):
if not f.startswith('.'):
output.append(f)
return output
def analyze_image(filepath):
filename = filepath[1+filepath.rfind('/'):]
words = filename.split(SEPARATOR)
words[-1] = words[-1][0:words[-1].find('.')]#Remove extension
return Attrib(words[0], words[1], filepath)
def load_image(dirs):
output = []
for d in dirs:
i = 0
for f in listdir_nohidden(d):
for ff in listdir_nohidden(d+f):
output.append(analyze_image(d + f + '/' + ff))
i += 1
print(d, ' contains ', i, ' items')
shuffle(output)
return output
def query(dirs, n, different_than=None):
output = []
i = 0
for f in load_image(dirs):
if i >= n:
break
if different_than is None or (different_than is not None and f not in different_than):
output.append(f)
i += 1
return output
def save_file(path, list):
with open(path, 'w+') as f:
for l in list:
f.write(l.path + '\n')
# The data is from the Machine Learning book
filepath = '/Users/Diego/Github/Digit-Dataset/'
d = [filepath]
nb_training = 270*0.8
nb_validation = 270*0.2
training = query(d, nb_training, different_than=None)
validation = query(d, nb_validation, different_than=training)
print "\nTraining ", len(training), " items\n"
for t in training:
print(t)
print "\nValidation ", len(validation), " items\n"
for v in validation:
print(v)
save_file('/Users/Diego/Desktop/training.list', training)
save_file('/Users/Diego/Desktop/validation.list', validation) | mit | -4,436,189,453,462,923,300 | 24.238636 | 98 | 0.584685 | false | 3.457944 | false | false | false |
ggreg/presto-python-client | prestodb/client.py | 1 | 18954 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module implements the Presto protocol to submit SQL statements, track
their state and retrieve their result as described in
https://github.com/prestodb/presto/wiki/HTTP-Protocol
and Presto source code.
The outline of a query is:
- Send HTTP POST to the coordinator
- Retrieve HTTP response with ``nextUri``
- Get status of the query execution by sending a HTTP GET to the coordinator
Presto queries are managed by the ``PrestoQuery`` class. HTTP requests are
managed by the ``PrestoRequest`` class. the status of a query is represented
by ``PrestoStatus`` and the result by ``PrestoResult``.
The main interface is :class:`PrestoQuery`: ::
>> request = PrestoRequest(host='coordinator', port=8080, user='test')
>> query = PrestoQuery(request, sql)
>> rows = list(query.execute())
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from typing import Any, Dict, List, Optional, Text, Tuple, Union # NOQA for mypy types
import requests
from prestodb import constants
from prestodb import exceptions
import prestodb.logging
from prestodb.transaction import NO_TRANSACTION
import prestodb.redirect
__all__ = ['PrestoQuery', 'PrestoRequest']
logger = prestodb.logging.get_logger(__name__)
MAX_ATTEMPTS = constants.DEFAULT_MAX_ATTEMPTS
SOCKS_PROXY = os.environ.get('SOCKS_PROXY')
if SOCKS_PROXY:
PROXIES = {
'http': 'socks5://' + SOCKS_PROXY,
'https': 'socks5://' + SOCKS_PROXY,
}
else:
PROXIES = None
class ClientSession(object):
def __init__(
self,
catalog,
schema,
source,
user,
properties=None,
headers=None,
transaction_id=None,
):
self.catalog = catalog
self.schema = schema
self.source = source
self.user = user
if properties is None:
properties = {}
self._properties = properties
self._headers = headers or {}
self.transaction_id = transaction_id
@property
def properties(self):
return self._properties
@property
def headers(self):
return self._headers
def get_header_values(headers, header):
return [val.strip() for val in headers[header].split(',')]
def get_session_property_values(headers, header):
kvs = get_header_values(headers, header)
return [
(k.strip(), v.strip()) for k, v
in (kv.split('=', 1) for kv in kvs)
]
class PrestoStatus(object):
def __init__(self, id, stats, info_uri, next_uri, rows, columns=None):
self.id = id
self.stats = stats
self.info_uri = info_uri
self.next_uri = next_uri
self.rows = rows
self.columns = columns
def __repr__(self):
return (
'PrestoStatus('
'id={}, stats={{...}}, info_uri={}, next_uri={}, rows=<count={}>'
')'.format(
self.id,
self.info_uri,
self.next_uri,
len(self.rows),
)
)
class PrestoRequest(object):
"""
Manage the HTTP requests of a Presto query.
:param host: name of the coordinator
:param port: TCP port to connect to the coordinator
:param user: associated with the query. It is useful for access control
and query scheduling.
:param source: associated with the query. It is useful for access
control and query scheduling.
:param catalog: to query. The *catalog* is associated with a Presto
connector. This variable sets the default catalog used
by SQL statements. For example, if *catalog* is set
to ``some_catalog``, the SQL statement
``SELECT * FROM some_schema.some_table`` will actually
query the table
``some_catalog.some_schema.some_table``.
:param schema: to query. The *schema* is a logical abstraction to group
table. This variable sets the default schema used by
SQL statements. For eample, if *schema* is set to
``some_schema``, the SQL statement
``SELECT * FROM some_table`` will actually query the
table ``some_catalog.some_schema.some_table``.
:param session_properties: set specific Presto behavior for the current
session. Please refer to the output of
``SHOW SESSION`` to check the available
properties.
:param http_headers: HTTP headers to post/get in the HTTP requests
:param http_scheme: "http" or "https"
:param auth: class that manages user authentication. ``None`` means no
authentication.
:max_attempts: maximum number of attempts when sending HTTP requests. An
attempt is an HTTP request. 5 attempts means 4 retries.
:request_timeout: How long (in seconds) to wait for the server to send
data before giving up, as a float or a
``(connect timeout, read timeout)`` tuple.
The client initiates a query by sending an HTTP POST to the
coordinator. It then gets a response back from the coordinator with:
- An URI to query to get the status for the query and the remaining
data
- An URI to get more information about the execution of the query
- Statistics about the current query execution
Please refer to :class:`PrestoStatus` to access the status returned by
:meth:`PrestoRequest.process`.
When the client makes an HTTP request, it may encounter the following
errors:
- Connection or read timeout:
- There is a network partition and TCP segments are
either dropped or delayed.
- The coordinator stalled because of an OS level stall (page allocation
stall, long time to page in pages, etc...), a JVM stall (full GC), or
an application level stall (thread starving, lock contention)
- Connection refused: Configuration or runtime issue on the coordinator
- Connection closed:
As most of these errors are transient, the question the caller should set
retries with respect to when they want to notify the application that uses
the client.
"""
http = requests
HTTP_EXCEPTIONS = (
http.ConnectionError, # type: ignore
http.Timeout, # type: ignore
)
def __init__(
self,
host, # type: Text
port, # type: int
user, # type: Text
source=None, # type: Text
catalog=None, # type: Text
schema=None, # type: Text
session_properties=None, # type: Optional[Dict[Text, Any]]
http_session=None, # type: Any
http_headers=None, # type: Optional[Dict[Text, Text]]
transaction_id=NO_TRANSACTION, # type: Optional[Text]
http_scheme=constants.HTTP, # type: Text
auth=constants.DEFAULT_AUTH, # type: Optional[Any]
redirect_handler=prestodb.redirect.GatewayRedirectHandler(),
max_attempts=MAX_ATTEMPTS, # type: int
request_timeout=constants.DEFAULT_REQUEST_TIMEOUT, # type: Union[float, Tuple[float, float]]
handle_retry=exceptions.RetryWithExponentialBackoff(),
):
# type: (...) -> None
self._client_session = ClientSession(
catalog,
schema,
source,
user,
session_properties,
http_headers,
transaction_id,
)
self._host = host
self._port = port
self._next_uri = None # type: Optional[Text]
if http_session is not None:
self._http_session = http_session
else:
# mypy cannot follow module import
self._http_session = self.http.Session() # type: ignore
self._http_session.headers.update(self.http_headers)
self._exceptions = self.HTTP_EXCEPTIONS
self._auth = auth
if self._auth:
if http_scheme == constants.HTTP:
raise ValueError('cannot use authentication with HTTP')
self._auth.set_http_session(self._http_session)
self._exceptions += self._auth.get_exceptions()
self._redirect_handler = redirect_handler
self._request_timeout = request_timeout
self._handle_retry = handle_retry
self.max_attempts = max_attempts
self._http_scheme = http_scheme
@property
def transaction_id(self):
return self._client_session.transaction_id
@transaction_id.setter
def transaction_id(self, value):
self._client_session.transaction_id = value
@property
def http_headers(self):
# type: () -> Dict[Text, Text]
headers = {}
headers[constants.HEADER_CATALOG] = self._client_session.catalog
headers[constants.HEADER_SCHEMA] = self._client_session.schema
headers[constants.HEADER_SOURCE] = self._client_session.source
headers[constants.HEADER_USER] = self._client_session.user
headers[constants.HEADER_SESSION] = ','.join(
# ``name`` must not contain ``=``
'{}={}'.format(name, value)
for name, value in self._client_session.properties.items()
)
# merge custom http headers
for key in self._client_session.headers:
if key in headers.keys():
raise ValueError('cannot override reserved HTTP header {}'.format(key))
headers.update(self._client_session.headers)
transaction_id = self._client_session.transaction_id
headers[constants.HEADER_TRANSACTION] = transaction_id
return headers
@property
def max_attempts(self):
# type: () -> int
return self._max_attempts
@max_attempts.setter
def max_attempts(self, value):
# type: (int) -> None
self._max_attempts = value
if value == 1: # No retry
self._get = self._http_session.get
self._post = self._http_session.post
self._delete = self._http_session.delete
return
with_retry = exceptions.retry_with(
self._handle_retry,
exceptions=self._exceptions,
conditions=(
# need retry when there is no exception but the status code is 503
lambda response: getattr(response, 'status_code', None) == 503,
),
max_attempts=self._max_attempts,
)
self._get = with_retry(self._http_session.get)
self._post = with_retry(self._http_session.post)
self._delete = with_retry(self._http_session.delete)
def get_url(self, path):
# type: (Text) -> Text
return "{protocol}://{host}:{port}{path}".format(
protocol=self._http_scheme,
host=self._host,
port=self._port,
path=path
)
@property
def statement_url(self):
# type: () -> Text
return self.get_url(constants.URL_STATEMENT_PATH)
@property
def next_uri(self):
# type: () -> Text
return self._next_uri
def post(self, sql):
data = sql.encode('utf-8')
http_headers = self.http_headers
http_response = self._post(
self.statement_url,
data=data,
headers=http_headers,
timeout=self._request_timeout,
allow_redirects=self._redirect_handler is None,
proxies=PROXIES,
)
if self._redirect_handler is not None:
while http_response is not None and http_response.is_redirect:
location = http_response.headers['Location']
url = self._redirect_handler.handle(location)
logger.info('redirect {} from {} to {}'.format(
http_response.status_code,
location,
url,
))
http_response = self._post(
url,
data=data,
headers=http_headers,
timeout=self._request_timeout,
allow_redirects=False,
proxies=PROXIES,
)
return http_response
def get(self, url):
return self._get(
url,
headers=self.http_headers,
timeout=self._request_timeout,
proxies=PROXIES,
)
def delete(self, url):
return self._delete(
url,
timeout=self._request_timeout,
proxies=PROXIES,
)
def _process_error(self, error, query_id):
error_type = error['errorType']
if error_type == 'EXTERNAL':
raise exceptions.PrestoExternalError(error, query_id)
elif error_type == 'USER_ERROR':
return exceptions.PrestoUserError(error, query_id)
return exceptions.PrestoQueryError(error, query_id)
def raise_response_error(self, http_response):
if http_response.status_code == 503:
raise exceptions.Http503Error('error 503: service unavailable')
raise exceptions.HttpError(
'error {}{}'.format(
http_response.status_code,
': {}'.format(http_response.content) if http_response.content else '',
)
)
def process(self, http_response):
# type: (requests.Response) -> PrestoStatus
if not http_response.ok:
self.raise_response_error(http_response)
http_response.encoding = 'utf-8'
response = http_response.json()
logger.debug('HTTP {}: {}'.format(http_response.status_code, response))
if 'error' in response:
raise self._process_error(response['error'], response.get('id'))
if constants.HEADER_CLEAR_SESSION in http_response.headers:
for prop in get_header_values(
http_response.headers,
constants.HEADER_CLEAR_SESSION,
):
self._client_session.properties.pop(prop, None)
if constants.HEADER_SET_SESSION in http_response.headers:
for key, value in get_session_property_values(
http_response.headers,
constants.HEADER_SET_SESSION,
):
self._client_session.properties[key] = value
self._next_uri = response.get('nextUri')
return PrestoStatus(
id=response['id'],
stats=response['stats'],
info_uri=response['infoUri'],
next_uri=self._next_uri,
rows=response.get('data', []),
columns=response.get('columns'),
)
class PrestoResult(object):
"""
Represent the result of a Presto query as an iterator on rows.
This class implements the iterator protocol as a generator type
https://docs.python.org/3/library/stdtypes.html#generator-types
"""
def __init__(self, query, rows=None):
self._query = query
self._rows = rows or []
self._rownumber = 0
@property
def rownumber(self):
# type: () -> int
return self._rownumber
def __iter__(self):
# Initial fetch from the first POST request
for row in self._rows:
self._rownumber += 1
yield row
self._rows = None
# Subsequent fetches from GET requests until next_uri is empty.
while not self._query.is_finished():
rows = self._query.fetch()
for row in rows:
self._rownumber += 1
logger.debug('row {}'.format(row))
yield row
class PrestoQuery(object):
"""Represent the execution of a SQL statement by Presto."""
def __init__(
self,
request, # type: PrestoRequest
sql, # type: Text
):
# type: (...) -> None
self.query_id = None # type: Optional[Text]
self._stats = {} # type: Dict[Any, Any]
self._columns = None # type: Optional[List[Text]]
self._finished = False
self._cancelled = False
self._request = request
self._sql = sql
self._result = PrestoResult(self)
@property
def columns(self):
return self._columns
@property
def stats(self):
return self._stats
@property
def result(self):
return self._result
def execute(self):
# type: () -> PrestoResult
"""Initiate a Presto query by sending the SQL statement
This is the first HTTP request sent to the coordinator.
It sets the query_id and returns a Result object used to
track the rows returned by the query. To fetch all rows,
call fetch() until is_finished is true.
"""
if self._cancelled:
raise exceptions.PrestoUserError(
"Query has been cancelled",
self.query_id,
)
response = self._request.post(self._sql)
status = self._request.process(response)
self.query_id = status.id
self._stats.update({u'queryId': self.query_id})
self._stats.update(status.stats)
if status.next_uri is None:
self._finished = True
self._result = PrestoResult(self, status.rows)
return self._result
def fetch(self):
# type: () -> List[List[Any]]
"""Continue fetching data for the current query_id"""
response = self._request.get(self._request.next_uri)
status = self._request.process(response)
if status.columns:
self._columns = status.columns
self._stats.update(status.stats)
logger.debug(status)
if status.next_uri is None:
self._finished = True
return status.rows
def cancel(self):
# type: () -> None
"""Cancel the current query"""
if self.is_finished():
return
self._cancelled = True
if self._request.next_uri is None:
return
response = self._request.delete(self._request.next_uri)
if response.status_code == requests.codes.no_content:
return
self._request.raise_response_error(response)
def is_finished(self):
# type: () -> bool
return self._finished
| apache-2.0 | 7,682,619,366,289,095,000 | 32.846429 | 101 | 0.59101 | false | 4.309686 | false | false | false |
BrainComputationLab/ncs | applications/simulator/simulator.py | 1 | 1304 | #!/usr/bin/python
import sys
import json_model
import pyncs
def Run(argv):
if len(argv) < 2:
print "Usage: %s <model_file>" % argv[0]
model = json_model.JSONModel(argv[1])
if not model.valid:
print "Failed to load model"
return
model_specification = model.model_specification
simulation_parameters = pyncs.SimulationParameters()
simulation_parameters.thisown = False;
simulation = pyncs.Simulation(model_specification,
simulation_parameters)
if not simulation.init(pyncs.string_list(argv)):
print "Failed to initialize simulator."
return
print "Injecting pre-specified inputs."
for name, group in model.input_groups.items():
simulation.addInput(group)
print "Injection complete."
print "Adding reports."
sinks = {}
for name, report in model.reports.items():
source = simulation.addReport(report)
if not source:
print "Failed to add report %s" % name
return
#sinks[name] = pyncs.NullSink(source)
#sinks[name] = pyncs.AsciiStreamSink(source)
#sinks[name] = pyncs.AsciiFileSink(source, "/dev/fd/0")
sinks[name] = pyncs.AsciiFileSink(source, "/dev/fd/0")
print "Starting simulation."
for i in range(0,100):
simulation.step()
del simulation
if __name__ == "__main__":
Run(sys.argv)
| bsd-2-clause | -4,326,328,763,696,087,600 | 28.636364 | 58 | 0.680982 | false | 3.422572 | false | false | false |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/dask/store/core.py | 2 | 2913 | from __future__ import absolute_import, division, print_function
from collections import defaultdict
from operator import getitem
from datetime import datetime
from time import time
from ..compatibility import MutableMapping
from ..core import istask, ishashable
from ..utils_test import add # noqa: F401
class Store(MutableMapping):
""" Store - A storage of data and computation
Examples
--------
Store data like a dictionary
>>> import dask.store as ds
>>> s = ds.Store()
>>> s['x'] = 10
>>> s['x']
10
Also store computation on that data
>>> s['y'] = (add, 'x', 5)
Accessing these keys results in computations. Results may be cached for
reuse.
>>> s['y']
15
Design
------
A Store maintains the following state
dsk: dict
A dask to define all computation
cache: dict-like
Stores both ground data and cached intermediate values
data: set
The keys in the cache that can not be removed for correctness.
compute_time: dict:: {key: float}
dict mapping the time it took to compute each key
access_times: dict:: {key: [datetimes]}
The times at which a key was accessed
"""
def __init__(self, cache=None):
self.dsk = dict()
if cache is None:
cache = dict()
self.cache = cache
self.data = set()
self.compute_time = dict()
self.access_times = defaultdict(list)
def __setitem__(self, key, value):
if key in self.dsk:
if (self.dsk[key] == value or
self.dsk[key] == (getitem, self.cache, key) and
self.cache[key] == value):
return
else:
raise KeyError("Can not overwrite data")
if istask(value):
self.dsk[key] = value
else:
self.cache[key] = value
self.dsk[key] = (getitem, self.cache, key)
self.data.add(key)
def __getitem__(self, key):
if isinstance(key, list):
return (self[item] for item in key)
if not ishashable(key):
return key
if key not in self.dsk:
return key
self.access_times[key].append(datetime.now())
if key in self.cache:
return self.cache[key]
task = self.dsk[key]
func, args = task[0], task[1:]
if func == getitem and args[0] is self.cache:
return self.cache[args[1]]
args = [self[arg] for arg in args]
start = time()
result = func(*args)
end = time()
self.cache[key] = result
self.compute_time[key] = end - start
return result
def __len__(self):
return len(self.dsk)
def __iter__(self):
return iter(self.dsk)
def __delitem__(self, key):
raise ValueError("Dask Store does not support deletion")
| gpl-3.0 | -8,787,493,078,674,475,000 | 24.330435 | 76 | 0.565396 | false | 4.040222 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.