repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
IFAEControl/pirelay | pirelay/server.py | 1 | 1591 | #!/usr/bin/env python3
import time
from concurrent import futures
import grpc
from .protos import pirelay_pb2
from .protos import pirelay_pb2_grpc
from .relay import RelaysArray
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
PINS = [21]
class PiRelayServer(pirelay_pb2_grpc.PiRelayServicer):
def __init__(self, bcm_pins=[]):
self._relays = RelaysArray(bcm_pins=bcm_pins)
def Enable(self, request, context):
try:
self._relays.enable(request.channel)
except Exception as ex:
return pirelay_pb2.PiRelaysAnswer(type=pirelay_pb2.Error,
message=str(ex))
else:
return pirelay_pb2.PiRelaysAnswer(type=pirelay_pb2.Ok,
message="")
def Disable(self, request, context):
try:
self._relays.disable(request.channel)
except Exception as ex:
return pirelay_pb2.PiRelaysAnswer(type=pirelay_pb2.Error,
message=str(ex))
else:
return pirelay_pb2.PiRelaysAnswer(type=pirelay_pb2.Ok,
message="")
def serve():
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
pirelay_pb2_grpc.add_PiRelayServicer_to_server(PiRelayServer(PINS), server)
server.add_insecure_port('[::]:50051')
server.start()
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
if __name__ == '__main__':
serve()
| lgpl-3.0 | -4,084,407,837,345,929,700 | 25.966102 | 79 | 0.574481 | false |
mwilliamson/python-mammoth | tests/docx/style_map_tests.py | 1 | 4495 | import io
from zipfile import ZipFile
from nose.tools import istest, assert_equal
from mammoth.docx.style_map import write_style_map, read_style_map
from mammoth.zips import open_zip
from mammoth.docx import xmlparser as xml
@istest
def reading_embedded_style_map_on_document_without_embedded_style_map_returns_none():
fileobj = _normal_docx()
assert_equal(None, read_style_map(fileobj))
@istest
def writing_style_map_preserves_unrelated_files():
fileobj = _normal_docx()
write_style_map(fileobj, "p => h1")
with open_zip(fileobj, "r") as zip_file:
assert_equal("placeholder", zip_file.read_str("placeholder"))
@istest
def embedded_style_map_can_be_read_after_being_written():
fileobj = _normal_docx()
write_style_map(fileobj, "p => h1")
assert_equal("p => h1", read_style_map(fileobj))
@istest
def embedded_style_map_is_written_to_separate_file():
fileobj = _normal_docx()
write_style_map(fileobj, "p => h1")
with open_zip(fileobj, "r") as zip_file:
assert_equal("p => h1", zip_file.read_str("mammoth/style-map"))
@istest
def embedded_style_map_is_referenced_in_relationships():
fileobj = _normal_docx()
write_style_map(fileobj, "p => h1")
assert_equal(expected_relationships_xml, _read_relationships_xml(fileobj))
@istest
def embedded_style_map_has_override_content_type_in_content_types_xml():
fileobj = _normal_docx()
write_style_map(fileobj, "p => h1")
assert_equal(expected_content_types_xml, _read_content_types_xml(fileobj))
@istest
def can_overwrite_existing_style_map():
fileobj = _normal_docx()
write_style_map(fileobj, "p => h1")
write_style_map(fileobj, "p => h2")
with open_zip(fileobj, "r") as zip_file:
assert_equal("p => h2", read_style_map(fileobj))
_assert_no_duplicates(zip_file._zip_file.namelist())
assert_equal(expected_relationships_xml, _read_relationships_xml(fileobj))
assert_equal(expected_content_types_xml, _read_content_types_xml(fileobj))
def _read_relationships_xml(fileobj):
with open_zip(fileobj, "r") as zip_file:
return xml.parse_xml(
io.StringIO(zip_file.read_str("word/_rels/document.xml.rels")),
[("r", "http://schemas.openxmlformats.org/package/2006/relationships")],
)
def _read_content_types_xml(fileobj):
with open_zip(fileobj, "r") as zip_file:
return xml.parse_xml(
io.StringIO(zip_file.read_str("[Content_Types].xml")),
[("ct", "http://schemas.openxmlformats.org/package/2006/content-types")],
)
original_relationships_xml = ('<?xml version="1.0" encoding="UTF-8" standalone="yes"?>' +
'<Relationships xmlns="http://schemas.openxmlformats.org/package/2006/relationships">' +
'<Relationship Id="rId3" Type="http://schemas.openxmlformats.org/officeDocument/2006/relationships/settings" Target="settings.xml"/>' +
'</Relationships>')
expected_relationships_xml = xml.element("r:Relationships", {}, [
xml.element("r:Relationship", {"Id": "rId3", "Type": "http://schemas.openxmlformats.org/officeDocument/2006/relationships/settings", "Target": "settings.xml"}),
xml.element("r:Relationship", {"Id": "rMammothStyleMap", "Type": "http://schemas.zwobble.org/mammoth/style-map", "Target": "/mammoth/style-map"}),
])
original_content_types_xml = ('<?xml version="1.0" encoding="UTF-8" standalone="yes"?>' +
'<Types xmlns="http://schemas.openxmlformats.org/package/2006/content-types">' +
'<Default Extension="png" ContentType="image/png"/>' +
'</Types>'
)
expected_content_types_xml = xml.element("ct:Types", {}, [
xml.element("ct:Default", {"Extension": "png", "ContentType": "image/png"}),
xml.element("ct:Override", {"PartName": "/mammoth/style-map", "ContentType": "text/prs.mammoth.style-map"}),
])
def _normal_docx():
fileobj = io.BytesIO()
zip_file = ZipFile(fileobj, "w")
try:
zip_file.writestr("placeholder", "placeholder")
zip_file.writestr("word/_rels/document.xml.rels", original_relationships_xml)
zip_file.writestr("[Content_Types].xml", original_content_types_xml)
expected_relationships_xml
finally:
zip_file.close()
return fileobj
def _assert_no_duplicates(values):
counts = {}
for value in values:
counts[value] = counts.get(value, 0) + 1
for value, count in counts.items():
if count != 1:
assert False, "{0} has count of {1}".format(value, count)
| bsd-2-clause | -5,439,335,408,695,723,000 | 36.14876 | 164 | 0.666741 | false |
tensorflow/model-optimization | tensorflow_model_optimization/g3doc/tools/build_docs.py | 1 | 3663 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tool to generate open source api_docs for tensorflow_model_optimization.
To use:
1. Install the tensorflow docs package, which is only compatible with Python
python3 -m pip install git+https://github.com/tensorflow/docs
2. Install TensorFlow Model Optimization. The API docs are generated from
`tfmot` from the import of the tfmot package below, based on what is exposed
under
https://github.com/tensorflow/model-optimization/tree/master/tensorflow_model_optimization/python/core/api.
See https://www.tensorflow.org/model_optimization/guide/install.
3. Run build_docs.py.
python3 build_docs.py --output_dir=/tmp/model_optimization_api
4. View the generated markdown files on a viewer. One option is to fork
https://github.com/tensorflow/model-optimization/, push a change that
copies the files to tensorflow_model_optimization/g3doc, and then
view the files on Github.
Note:
If duplicate or spurious docs are generated (e.g. internal names), consider
blacklisting them via the `private_map` argument below.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
from tensorflow_docs.api_generator import generate_lib
import tensorflow_model_optimization as tfmot
flags.DEFINE_string("output_dir", "/tmp/model_optimization_api",
"Where to output the docs")
flags.DEFINE_string(
"code_url_prefix",
("https://github.com/tensorflow/model-optimization/blob/master/"
"tensorflow_model_optimization"),
"The url prefix for links to code.")
flags.DEFINE_bool("search_hints", True,
"Include metadata search hints in the generated files")
flags.DEFINE_string("site_path", "model_optimization/api_docs/python",
"Path prefix in the _toc.yaml")
FLAGS = flags.FLAGS
def main(unused_argv):
doc_generator = generate_lib.DocGenerator(
root_title="TensorFlow Model Optimization",
py_modules=[("tfmot", tfmot)],
base_dir=os.path.dirname(tfmot.__file__),
code_url_prefix=FLAGS.code_url_prefix,
search_hints=FLAGS.search_hints,
site_path=FLAGS.site_path,
# TODO(tfmot): remove this once the next release after 0.3.0 happens.
# This is needed in the interim because the API docs reflect
# the latest release and the current release still wildcard imports
# all of the classes below.
private_map={
"tfmot.sparsity.keras": [
# List of internal classes which get exposed when imported.
"InputLayer",
"custom_object_scope",
"pruning_sched",
"pruning_wrapper",
"absolute_import",
"division",
"print_function",
"compat"
]
},
)
doc_generator.build(output_dir=FLAGS.output_dir)
if __name__ == "__main__":
app.run(main)
| apache-2.0 | 1,166,959,432,661,376,000 | 33.556604 | 110 | 0.677041 | false |
owlabs/incubator-airflow | airflow/executors/__init__.py | 1 | 3891 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.configuration import conf
from airflow.exceptions import AirflowException
from airflow.executors.base_executor import BaseExecutor # noqa
from airflow.executors.local_executor import LocalExecutor
from airflow.executors.sequential_executor import SequentialExecutor
DEFAULT_EXECUTOR = None
def _integrate_plugins():
"""Integrate plugins to the context."""
from airflow.plugins_manager import executors_modules
for executors_module in executors_modules:
sys.modules[executors_module.__name__] = executors_module
globals()[executors_module._name] = executors_module
def get_default_executor():
"""Creates a new instance of the configured executor if none exists and returns it"""
global DEFAULT_EXECUTOR
if DEFAULT_EXECUTOR is not None:
return DEFAULT_EXECUTOR
executor_name = conf.get('core', 'EXECUTOR')
DEFAULT_EXECUTOR = _get_executor(executor_name)
log = LoggingMixin().log
log.info("Using executor %s", executor_name)
return DEFAULT_EXECUTOR
class Executors:
LocalExecutor = "LocalExecutor"
SequentialExecutor = "SequentialExecutor"
CeleryExecutor = "CeleryExecutor"
DaskExecutor = "DaskExecutor"
MesosExecutor = "MesosExecutor"
KubernetesExecutor = "KubernetesExecutor"
DebugExecutor = "DebugExecutor"
def _get_executor(executor_name):
"""
Creates a new instance of the named executor.
In case the executor name is not know in airflow,
look for it in the plugins
"""
if executor_name == Executors.LocalExecutor:
return LocalExecutor()
elif executor_name == Executors.SequentialExecutor:
return SequentialExecutor()
elif executor_name == Executors.CeleryExecutor:
from airflow.executors.celery_executor import CeleryExecutor
return CeleryExecutor()
elif executor_name == Executors.DaskExecutor:
from airflow.executors.dask_executor import DaskExecutor
return DaskExecutor()
elif executor_name == Executors.MesosExecutor:
from airflow.contrib.executors.mesos_executor import MesosExecutor
return MesosExecutor()
elif executor_name == Executors.KubernetesExecutor:
from airflow.contrib.executors.kubernetes_executor import KubernetesExecutor
return KubernetesExecutor()
elif executor_name == Executors.DebugExecutor:
from airflow.executors.debug_executor import DebugExecutor
return DebugExecutor()
else:
# Loading plugins
_integrate_plugins()
executor_path = executor_name.split('.')
if len(executor_path) != 2:
raise AirflowException(
"Executor {0} not supported: "
"please specify in format plugin_module.executor".format(executor_name))
if executor_path[0] in globals():
return globals()[executor_path[0]].__dict__[executor_path[1]]()
else:
raise AirflowException("Executor {0} not supported.".format(executor_name))
| apache-2.0 | -8,116,916,838,794,192,000 | 36.776699 | 89 | 0.72038 | false |
suutari/shoop | shuup/notify/template.py | 1 | 3011 | # -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from django.utils.encoding import force_text
from jinja2.sandbox import SandboxedEnvironment
class NoLanguageMatches(Exception):
pass
def render_in_context(context, template_text, html_intent=False):
"""
Render the given Jinja2 template text in the script context.
:param context: Script context.
:type context: shuup.notify.script.Context
:param template_text: Jinja2 template text.
:type template_text: str
:param html_intent: Is the template text intended for HTML output?
This currently turns on autoescaping.
:type html_intent: bool
:return: Rendered template text
:rtype: str
:raises: Whatever Jinja2 might happen to raise
"""
# TODO: Add some filters/globals into this environment?
env = SandboxedEnvironment(autoescape=html_intent)
template = env.from_string(template_text)
return template.render(context.get_variables())
class Template(object):
def __init__(self, context, data):
"""
:param context: Script context
:type context: shuup.notify.script.Context
:param data: Template data dictionary
:type data: dict
"""
self.context = context
self.data = data
def _get_language_data(self, language):
return self.data.get(force_text(language).lower(), {})
def has_language(self, language, fields):
data = self._get_language_data(language)
return set(data.keys()) >= set(fields)
def render(self, language, fields):
"""
Render this template in the given language,
returning the given fields.
:param language: Language code (ISO 639-1 or ISO 639-2)
:type language: str
:param fields: Desired fields to render.
:type fields: list[str]
:return: Dict of field -> rendered content.
:rtype: dict[str, str]
"""
data = self._get_language_data(language)
rendered = {}
for field in fields:
field_template = data.get(field)
if field_template: # pragma: no branch
rendered[field] = render_in_context(self.context, field_template, html_intent=False)
return rendered
def render_first_match(self, language_preferences, fields):
# TODO: Document
for language in language_preferences:
if self.has_language(language, fields):
rendered = self.render(language=language, fields=fields)
rendered["_language"] = language
return rendered
raise NoLanguageMatches("No language in template matches any of languages %r for fields %r" % (
language_preferences, fields
))
| agpl-3.0 | -5,120,846,759,464,826,000 | 32.831461 | 103 | 0.645965 | false |
danic96/Practica1 | Practica1/Aplicacio/views.py | 1 | 4321 | # from django.shortcuts import render
# Create your views here.
# from django.http import HttpResponse
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from django.views.generic.edit import CreateView, UpdateView
from django.views.generic import DetailView, DeleteView
from rest_framework import generics
from models import Movie, Character, Team, Power, Location
from forms import MovieForm, CharacterForm, TeamForm, PowerForm, LocationForm
from Practica1.serializers import MovieSerializer
# Security Mixins
class LoginRequiredMixin(object):
@method_decorator(login_required())
def dispatch(self, *args, **kwargs):
return super(LoginRequiredMixin, self).dispatch(*args, **kwargs)
class CheckIsOwnerMixin(object):
def get_object(self, *args, **kwargs):
obj = super(CheckIsOwnerMixin, self).get_object(*args, **kwargs)
if not obj.user == self.request.user:
raise PermissionDenied
return obj
class LoginRequiredCheckIsOwnerUpdateView(LoginRequiredMixin, CheckIsOwnerMixin, UpdateView):
template_name = 'Aplicacio/form.html'
class MovieCreate(LoginRequiredMixin, CreateView):
model = Movie
template_name = 'Aplicacio/form.html'
form_class = MovieForm
def form_valid(self, form):
form.instance.user = self.request.user
return super(MovieCreate, self).form_valid(form)
class CharacterCreate(LoginRequiredMixin, CreateView):
model = Character
template_name = 'Aplicacio/form.html'
form_class = CharacterForm
def form_valid(self, form):
form.instance.user = self.request.user
return super(CharacterCreate, self).form_valid(form)
class TeamCreate(LoginRequiredMixin, CreateView):
model = Team
template_name = 'Aplicacio/form.html'
form_class = TeamForm
def form_valid(self, form):
form.instance.user = self.request.user
return super(TeamCreate, self).form_valid(form)
class PowerCreate(LoginRequiredMixin, CreateView):
model = Power
template_name = 'Aplicacio/form.html'
form_class = PowerForm
def form_valid(self, form):
form.instance.user = self.request.user
return super(PowerCreate, self).form_valid(form)
class LocationCreate(LoginRequiredMixin, CreateView):
model = Location
template_name = 'Aplicacio/form.html'
form_class = LocationForm
def form_valid(self, form):
form.instance.user = self.request.user
return super(LocationCreate, self).form_valid(form)
"""
class LocationDelete(LoginRequiredMixin, CreateView):
model = Location
template_name = 'Aplicacio/form.html'
form_class = LocationForm
def form_valid(self, form):
form.instance.user = self.request.user
return super(LocationDelete, self).form_valid(form)
"""
"""
class Delete(DeleteView):
model = Location
success_url = reverse_lazy('all_locations') # This is where this view will
# redirect the user
template_name = 'Aplicacio/delete_location.html'
"""
class MovieDetail(DetailView):
model = Movie
template_name = 'Aplicacio/movie_detail.html'
"""
def get_context_data(self, **kwargs):
context = super(MovieDetail, self).get_context_data(**kwargs)
context['RATING_CHOICES'] = RestaurantReview.RATING_CHOICES
return context
"""
class CharacterDetail(DetailView):
model = Character
template_name = 'Aplicacio/character_detail.html'
class TeamDetail(DetailView):
model = Team
template_name = 'Aplicacio/team_detail.html'
class PowerDetail(DetailView):
model = Power
template_name = 'Aplicacio/power_detail.html'
class LocationDetail(DetailView):
model = Location
template_name = 'Aplicacio/location_detail.html'
def form_valid(self, form):
form.instance.user = self.request.user
return super(CharacterCreate, self).form_valid(form)
### RESTful API views ###
class APIMovieList(generics.ListCreateAPIView):
model = Movie
queryset = Movie.objects.all()
serializer_class = MovieSerializer
class APIMovieDetail(generics.RetrieveUpdateDestroyAPIView):
model = Movie
queryset = Movie.objects.all()
serializer_class = MovieSerializer
| mit | 6,965,548,810,274,474,000 | 27.058442 | 93 | 0.707938 | false |
madjar/aurifere | tests/test_pkgbuild.py | 1 | 1304 | import os
import unittest
here = os.path.dirname(__file__)
class PkgbuildTest(unittest.TestCase):
def _get_pkgbuild(self):
from aurifere.pkgbuild import PKGBUILD
return PKGBUILD(os.path.join(here, 'fixtures/PKGBUILD'))
def test_attributes(self):
p = self._get_pkgbuild()
self.assertEqual(p['name'], 'pep8')
self.assertEqual(p['version'], '0.6.1')
def test_version(self):
p = self._get_pkgbuild()
self.assertEqual(p.version(), '0.6.1-3')
def test_all_depends(self):
p = self._get_pkgbuild()
self.assertEqual(list(p.all_depends()),
['python2', 'setuptools', 'fakedepend'])
class VersionCompareTest(unittest.TestCase):
def _get_FUT(self):
from aurifere.pkgbuild import version_is_greater
return version_is_greater
def test_classic_dotted_version_equals(self):
self.assertFalse(self._get_FUT()('2.12.4-5', '2.12.4-5'))
def test_classic_dotted_version_greater(self):
self.assertTrue(self._get_FUT()('2.0.2-1', '2.0.1-2'))
def test_classic_dotted_version_lesser(self):
self.assertFalse(self._get_FUT()('2.0.1-2', '2.0.2-1'))
def test_ugly_version_numbers(self):
self.assertTrue(self._get_FUT()('1.0.27.206_r0-1', '1.0.27.206-1'))
| isc | 343,255,923,941,644,740 | 30.047619 | 75 | 0.623466 | false |
CorundumGames/Invasodado | game/ufo.py | 1 | 3605 | from math import sin
from random import choice, uniform, expovariate
from pygame import Rect
from core import color
from core import config
from core.particles import ParticleEmitter
from game.block import get_block
from game.gameobject import GameObject
from game import gamedata
### Constants ##################################################################
AVG_WAIT = 9000 #Expected time in frames between UFO appearance
DEATH = config.load_sound('ufo_explosion.wav')
FRAMES = tuple(
Rect(64 * (i % 4), 192 + 32 * (i // 4), 64, 32)
for i in range(10, -1, -1)
)
INVADE = config.load_sound('ufo.wav')
START_POS = (640, 16)
UFO_FRAMES = color.get_colored_objects(FRAMES)
UFO_STATES = ('IDLE', 'APPEARING', 'ACTIVE', 'DYING', 'LEAVING', 'LOWERING', 'GAMEOVER')
################################################################################
class UFO(GameObject):
STATES = config.Enum(*UFO_STATES)
GROUP = None
BLOCK_GROUP = None
def __init__(self):
super().__init__()
self._anim = 0.0
self.column = None
self.current_frame_list = UFO_FRAMES
self.image = config.get_sprite(FRAMES[0])
self.odds = expovariate(AVG_WAIT)
self.position = list(START_POS)
self.rect = Rect(START_POS, self.image.get_size())
self.state = UFO.STATES.IDLE
self.emitter = ParticleEmitter(color.random_color_particles, self.rect)
del self.acceleration
def appear(self):
'''
Appear on-screen, but not for very long!
'''
INVADE.play(-1)
self.position = list(START_POS)
self.rect.topleft = list(START_POS)
self.change_state(UFO.STATES.ACTIVE)
self.velocity[0] = -2.0
def move(self):
'''
Move left on the screen, and oscillate up and down.
'''
position = self.position
rect = self.rect
self._anim += 0.5
self.image = UFO_FRAMES[id(choice(color.LIST)) ] \
[int(self._anim) % len(FRAMES)]
position[0] += self.velocity[0]
position[1] += sin(self._anim/4)
rect.topleft = (position[0] + .5, position[1] + .5)
if rect.right < 0:
#If we've gone past the left edge of the screen...
self.change_state(UFO.STATES.LEAVING)
def die(self):
'''
Vanish and release a special Block that clears lots of other Blocks.
'''
self.emitter.rect = self.rect
self.emitter.burst(30)
DEATH.play()
UFO.BLOCK_GROUP.add(get_block((self.rect.centerx, 0), special=True))
gamedata.score += 90
self.change_state(UFO.STATES.LEAVING)
def leave(self):
INVADE.stop()
self.velocity[0] = 0
self.position = list(START_POS)
self.rect.topleft = START_POS
self.change_state(UFO.STATES.IDLE)
def wait(self):
'''
Wait off-screen, and only come back with a specific probability.
'''
if uniform(0, 1) < self.odds:
#With a certain probability...
self.odds = expovariate(AVG_WAIT)
self.change_state(UFO.STATES.APPEARING)
actions = {
STATES.IDLE : 'wait' ,
STATES.APPEARING: 'appear',
STATES.ACTIVE : 'move' ,
STATES.DYING : 'die' ,
STATES.LEAVING : 'leave' ,
STATES.GAMEOVER : None ,
} | gpl-3.0 | -1,058,903,097,034,391,700 | 32.700935 | 88 | 0.530929 | false |
rdo-infra/ci-config | ci-scripts/dlrnapi_promoter/qcow_client.py | 1 | 10534 | """
This file contains classes and functionto interact with qcow images servers
"""
import copy
import logging
import os
import paramiko
from common import PromotionError
class QcowConnectionClient(object):
"""
Proxy class for client connection
"""
_log = logging.getLogger("promoter")
def __init__(self, server_conf):
self._host = server_conf['host']
self._user = server_conf['user']
self._client_type = server_conf['client']
self._keypath = server_conf['keypath']
self._client = os
if self._client_type == "sftp":
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
keypath = os.path.expanduser(self._keypath)
self.key = paramiko.RSAKey.from_private_key_file(filename=keypath)
self.kwargs = {}
if self._user is not None:
self.kwargs['username'] = self._user
else:
self.kwargs['username'] = os.environ.get("USER")
self._log.debug("Connecting to %s as user %s", self._host,
self._user)
self.ssh_client = client
def connect(self):
if hasattr(self, 'ssh_client'):
self.ssh_client.connect(self._host, pkey=self.key, **self.kwargs)
self._client = self.ssh_client.open_sftp()
def __getattr__(self, item):
return getattr(self._client, item)
def close(self):
if self._client_type == "sftp":
self._client.close()
class QcowClient(object):
"""
This class interacts with qcow images servers
"""
log = logging.getLogger("promoter")
def __init__(self, config):
self.config = config
self.git_root = self.config.git_root
self.promote_script = os.path.join(self.git_root,
'ci-scripts', 'promote-images.sh')
self.distro_name = self.config.distro_name
self.distro_version = self.config.distro_version
self.rollback_links = {}
server_conf = self.config.overcloud_images.get('qcow_servers')
qcow_server = self.config.default_qcow_server
self.user = server_conf[qcow_server]['user']
self.root = server_conf[qcow_server]['root']
self.host = server_conf[qcow_server]['host']
self.client = QcowConnectionClient(server_conf[qcow_server])
self.images_dir = os.path.join(
os.path.join(config.stage_root, self.root),
config.distro, config.release, "rdo_trunk")
def validate_qcows(self, dlrn_hash, name=None, assume_valid=False):
"""
Check we have the images dir in the server
if name is specified, verify that name points to the hash
- maybe qcow ran and failed
Check at which point of qcow promotion we stopped
1) did we create a new symlink ?
2) did we create the previous symlink ?
3) are all the images uploaded correctly ?
:param dlrn_hash: The hash to check
:param name: The promotion name
:param assume_valid: report everything worked unconditionally
:return: A dict with result of the validation
"""
try:
self.client.listdir(self.images_dir)
self.client.chdir(self.images_dir)
except EnvironmentError as ex:
self.log.error("Qcow-client: Image root dir %s does not exist "
"in the server, or is not accessible")
self.log.exception(ex)
raise
results = {
"hash_valid": False,
"promotion_valid": False,
"qcow_valid": False,
"missing_qcows": copy.copy(
self.config.overcloud_images['qcow_images']),
"present_qcows": [],
}
stat = None
images = None
images_path = os.path.join(self.images_dir, dlrn_hash.full_hash)
try:
stat = self.client.stat(images_path)
images = sorted(self.client.listdir(images_path))
except EnvironmentError:
self.log.error("Images path for hash %s not present or "
"accessible", dlrn_hash)
if not images:
self.log.error("No images found")
if stat and images:
results['hash_valid'] = True
results['present_qcows'] = images
results['missing_qcows'] = \
list(set(self.config.overcloud_images[
'qcow_images']).difference(
images))
if images == self.config.overcloud_images['qcow_images']:
results['qcow_valid'] = True
if name is not None:
try:
link = self.client.readlink(name)
if link == dlrn_hash.full_hash:
results['promotion_valid'] = True
except EnvironmentError:
self.log.error("%s was not promoted to %s",
dlrn_hash.full_hash, name)
return results
def rollback(self):
"""
Rolls back the link to the initial status
Rollback is guaranteed to work only for caught exceptions, and it may
not be really useful. We have a rollback only if a remove or a symlink
fails.
- If a remove fails, it means that we don't need to rollback
- If a symlink fails, then it will probably fail on rollback too.
:return: None
"""
for name, target in self.rollback_links.items():
self.client.remove(name)
self.client.symlink(target, name)
self.rollback_links = {}
def promote(self, candidate_hash, target_label, candidate_label=None,
create_previous=True, validation=True):
"""
Effective promotion of the images. This method will handle symbolic
links to the dir containing images from the candidate hash,
optionally saving the current link as previous
:param candidate_hash: The dlrn hash to promote
:param target_label: The name of the link to create
:param candidate_label: Currently unused
:param create_previous: A bool to determine if previous link is created
:param validation: A bool to determine if qcow validation should be done
:return: None
"""
self.client.connect()
if validation:
self.validate_qcows(candidate_hash)
self.client.chdir(self.images_dir)
log_header = "Qcow promote '{}' to {}:".format(candidate_hash,
target_label)
self.log.info("%s Attempting promotion", log_header)
# Check if candidate_hash dir is present
try:
self.client.stat(candidate_hash.full_hash)
except EnvironmentError as ex:
self.log.error("%s images dir for hash %s not present or not "
"accessible", log_header, candidate_hash)
self.log.exception(ex)
self.client.close()
raise PromotionError("{} No images dir for hash {}"
"".format(log_header, candidate_hash))
# Check if the target label exists and points to a hash dir
current_hash = None
try:
current_hash = self.client.readlink(target_label)
except EnvironmentError:
self.log.debug("%s No link named %s exists", log_header,
target_label)
# If this exists Check if we can remove the symlink
if current_hash:
self.rollback_links['target_label'] = current_hash
try:
self.client.remove(target_label)
except EnvironmentError as ex:
self.log.debug("Unable to remove the target_label: %s",
target_label)
self.log.exception(ex)
self.client.close()
raise
# Check if a previous link exists and points to an hash-dir
previous_label = "previous-{}".format(target_label)
previous_hash = None
try:
previous_hash = self.client.readlink(previous_label)
except EnvironmentError:
self.log.debug("%s No previous-link named %s exists",
log_header,
previous_label)
self.log.debug("Previous hash %s", previous_hash)
# If it exists and we are handling it, check if we can remove and
# reassign it
if current_hash and previous_hash and create_previous:
self.rollback_links[previous_label] = previous_hash
try:
self.client.remove(previous_label)
except EnvironmentError as ex:
self.log.debug("Unable to remove the target_label: %s",
target_label)
self.log.exception(ex)
self.client.close()
# Rollback is not tested, we enable it later, when tests are
# easier to add
# self.rollback()
raise
try:
self.client.symlink(current_hash, previous_label)
except EnvironmentError as ex:
self.log.error("%s failed to link %s to %s", log_header,
previous_label, current_hash)
self.log.exception(ex)
# Rollback is not tested, we enable it later, when tests are
# easier to add
# self.rollback()
self.client.close()
raise
# Finally the effective promotion
try:
c_hash = os.path.join(self.images_dir, candidate_hash.full_hash)
self.client.symlink(c_hash, target_label)
self.log.debug("Created link {} -> {}".format(
candidate_hash.full_hash, target_label))
except EnvironmentError as ex:
self.log.error("%s failed to link %s to %s", log_header,
target_label, candidate_hash.full_hash)
self.log.exception(ex)
# Rollback is not tested, we enable it later, when tests are
# easier to add
# self.rollback()
finally:
self.client.close()
self.log.info("%s Successful promotion", log_header)
| apache-2.0 | -8,655,361,982,532,262,000 | 37.445255 | 80 | 0.56123 | false |
parksandwildlife/wastd | occurrence/migrations/0006_auto_20181129_1812.py | 1 | 1084 | # Generated by Django 2.0.8 on 2018-11-29 10:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('occurrence', '0005_auto_20181025_1720'),
]
operations = [
migrations.AlterField(
model_name='areaencounter',
name='source',
field=models.PositiveIntegerField(choices=[(0, 'Direct entry'), (1, 'Manual entry from paper datasheet'), (2, 'Digital data capture (ODK)'), (10, 'Threatened Fauna'), (11, 'Threatened Flora'), (12, 'Threatened Communities'), (13, 'Threatened Communities Boundaries'), (14, 'Threatened Communities Buffers'), (15, 'Threatened Communities Sites'), (20, 'Turtle Tagging Database WAMTRAM2'), (21, 'Ningaloo Turtle Program'), (22, 'Broome Turtle Program'), (23, 'Pt Hedland Turtle Program'), (24, 'Gnaraloo Turtle Program'), (25, 'Eco Beach Turtle Program'), (30, 'Cetacean Strandings Database'), (31, 'Pinniped Strandings Database')], default=0, help_text='Where was this record captured initially?', verbose_name='Data Source'),
),
]
| mit | -6,071,923,042,693,717,000 | 59.222222 | 738 | 0.671587 | false |
csdms/dakota | dakotathon/tests/test_plugin_hydrotrend_run.py | 1 | 3466 | #!/usr/bin/env python
#
# Test running the dakota.plugin.hydrotrend module.
#
# Call with:
# $ nosetests -sv
#
# Mark Piper ([email protected])
import os
import shutil
# import filecmp
import glob
from nose.tools import with_setup, assert_true
from dakotathon.dakota import Dakota
from dakotathon.plugins.hydrotrend import is_installed as is_hydrotrend_installed
from dakotathon.utils import is_dakota_installed
from . import start_dir, data_dir
# Global variables -----------------------------------------------------
run_dir = os.getcwd()
config_file = os.path.join(run_dir, "dakota.yaml")
known_config_file = os.path.join(data_dir, "dakota.yaml")
# known_dat_file = os.path.join(data_dir, 'dakota.dat')
# Fixtures -------------------------------------------------------------
def setup_module():
"""Called before any tests are performed."""
print("\n*** " + __name__)
def setup():
"""Called at start of any test using it @with_setup()"""
pass
def teardown():
"""Called at end of any test using it @with_setup()"""
if os.path.exists(config_file):
os.remove(config_file)
if os.path.exists("dakota.in"):
os.remove("dakota.in")
if os.path.exists("run.log"):
os.remove("run.log")
if os.path.exists("stderr.log"):
os.remove("stderr.log")
if is_hydrotrend_installed():
for dname in glob.glob("HYDRO_*"):
shutil.rmtree(dname)
if is_dakota_installed():
for dname in glob.glob("run.*"):
shutil.rmtree(dname)
for fname in ["dakota." + ext for ext in ["dat", "out", "rst"]]:
if os.path.exists(fname):
os.remove(fname)
def teardown_module():
"""Called after all tests have completed."""
pass
# Tests ----------------------------------------------------------------
@with_setup(setup, teardown)
def test_run_by_setting_attributes():
"""Test running a HydroTrend simulation."""
d = Dakota(method="vector_parameter_study", plugin="hydrotrend")
d.template_file = os.path.join(data_dir, "HYDRO.IN.dtmpl")
d.auxiliary_files = os.path.join(data_dir, "HYDRO0.HYPS")
d.variables.descriptors = [
"starting_mean_annual_temperature",
"total_annual_precipitation",
]
d.variables.initial_point = [10.0, 1.5]
d.method.final_point = [20.0, 2.5]
d.method.n_steps = 5
d.responses.response_descriptors = ["Qs_median", "Q_mean"]
d.responses.response_files = ["HYDROASCII.QS", "HYDROASCII.Q"]
d.responses.response_statistics = ["median", "mean"]
d.setup()
assert_true(os.path.exists(d.input_file))
if is_dakota_installed() and is_hydrotrend_installed():
d.run()
assert_true(os.path.exists(d.output_file))
# assert_true(filecmp.cmp(known_dat_file, d.environment.data_file))
@with_setup(setup, teardown)
def test_run_from_config_file():
"""Test running a HydroTrend simulation from a config file."""
d = Dakota.from_file_like(known_config_file)
d.run_directory = run_dir
d.template_file = os.path.join(data_dir, "HYDRO.IN.dtmpl")
d.auxiliary_files = os.path.join(data_dir, "HYDRO0.HYPS")
d.serialize(config_file)
d.write_input_file()
assert_true(os.path.exists(d.input_file))
if is_dakota_installed() and is_hydrotrend_installed():
d.run()
assert_true(os.path.exists(d.output_file))
# assert_true(filecmp.cmp(known_dat_file, d.environment.data_file))
| mit | -8,400,592,787,912,912,000 | 31.092593 | 81 | 0.617426 | false |
kysolvik/reservoir-id | reservoir-id/classifier_train.py | 1 | 6974 | #!/usr/bin/env python
"""
Train random forest classifier
Inputs: CSV from build_att_table, small area cutoff
Outputs: Packaged up Random Forest model
@authors: Kylen Solvik
Date Create: 3/17/17
"""
# Load libraries
import pandas as pd
from sklearn import model_selection
from sklearn import preprocessing
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.externals import joblib
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.cross_validation import *
import numpy as np
import sys
import argparse
import os
import xgboost as xgb
# Parse arguments
parser = argparse.ArgumentParser(description='Train Random Forest classifier.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('prop_csv',
help='Path to attribute table (from build_att_table.py).',
type=str)
parser.add_argument('xgb_pkl',
help='Path to save random forest model as .pkl.',
type=str)
parser.add_argument('--area_lowbound',
help='Lower area bound. All regions <= in size will be ignored',
default=2,
type=int)
parser.add_argument('--path_prefix',
help='To be placed at beginnings of all other path args',
type=str,default='')
args = parser.parse_args()
def select_training_obs(full_csv_path):
"""Takes full csv and selects only the training observations.
Writes out to csv for further use"""
training_csv_path = full_csv_path.replace('.csv','_trainonly.csv')
if not os.path.isfile(training_csv_path):
dataset = pd.read_csv(full_csv_path,header=0)
training_dataset = dataset.loc[dataset['class'] > 0]
training_dataset.to_csv(training_csv_path,header=True,index=False)
return(training_csv_path)
def main():
# Set any attributes to exclude for this run
exclude_att_patterns = []
# Load dataset
training_csv = select_training_obs(args.path_prefix + args.prop_csv)
dataset = pd.read_csv(training_csv,header=0)
dataset_acut = dataset.loc[dataset['area'] > args.area_lowbound]
# Exclude attributes matching user input patterns, or if they are all nans
exclude_atts = []
for pattern in exclude_att_patterns:
col_list = [col for col in dataset_acut.columns if pattern in col]
exclude_atts.extend(col_list)
for att in dataset.columns[1:]:
if sum(np.isfinite(dataset[att])) == 0:
exclude_atts.append(att)
for att in list(set(exclude_atts)):
del dataset_acut[att]
(ds_y,ds_x) = dataset_acut.shape
print(ds_y,ds_x)
# Convert dataset to array
feature_names = dataset_acut.columns[2:]
array = dataset_acut.values
X = array[:,2:ds_x].astype(float)
Y = array[:,1].astype(int)
Y = Y-1 # Convert from 1s and 2s to 0-1
# Set nans to 0
X = np.nan_to_num(X)
# Separate test data
test_size = 0.2
seed = 5
X_train, X_test, Y_train, Y_test = model_selection.train_test_split(
X, Y, test_size=test_size,
random_state=seed)
# Convert data to xgboost matrices
d_train = xgb.DMatrix(X_train,label=Y_train)
# d_test = xgb.DMatrix(X_test,label=Y_test)
#----------------------------------------------------------------------
# Paramater tuning
# Step 1: Find approximate n_estimators to use
early_stop_rounds = 40
n_folds = 5
xgb_model = xgb.XGBClassifier(
learning_rate =0.1,
n_estimators=1000,
max_depth=5,
min_child_weight=1,
gamma=0,
subsample=0.8,
colsample_bytree=0.8,
objective= 'binary:logistic',
seed=27)
xgb_params = xgb_model.get_xgb_params()
cvresult = xgb.cv(xgb_params, d_train,
num_boost_round=xgb_params['n_estimators'], nfold=n_folds,
metrics='auc', early_stopping_rounds=early_stop_rounds,
)
n_est_best = (cvresult.shape[0] - early_stop_rounds)
print('Best number of rounds = {}'.format(n_est_best))
# Step 2: Tune hyperparameters
xgb_model = xgb.XGBClassifier()
params = {'max_depth': range(5,10,2),
'learning_rate': [0.1],
'gamma':[0,0.5,1],
'silent': [1],
'objective': ['binary:logistic'],
'n_estimators' : [n_est_best],
'subsample' : [0.7, 0.8,1],
'min_child_weight' : range(1,4,2),
'colsample_bytree':[0.7,0.8,1],
}
clf = GridSearchCV(xgb_model,params,n_jobs = 1,
cv = StratifiedKFold(Y_train,
n_folds=5, shuffle=True),
scoring = 'roc_auc',
verbose = 2,
refit = True)
clf.fit(X_train,Y_train)
best_parameters,score,_ = max(clf.grid_scores_,key=lambda x: x[1])
print('Raw AUC score:',score)
for param_name in sorted(best_parameters.keys()):
print("%s: %r" % (param_name, best_parameters[param_name]))
# Step 3: Decrease learning rate and up the # of trees
#xgb_finalcv = XGBClassifier()
tuned_params = clf.best_params_
tuned_params['n_estimators'] = 10000
tuned_params['learning_rate'] = 0.01
cvresult = xgb.cv(tuned_params, d_train,
num_boost_round=tuned_params['n_estimators'], nfold=n_folds,
metrics='auc', early_stopping_rounds=early_stop_rounds,
)
# Train model with cv results and predict on test set For test accuracy
n_est_final = int((cvresult.shape[0] - early_stop_rounds) / (1 - 1 / n_folds))
tuned_params['n_estimators'] = n_est_final
print(tuned_params)
xgb_train = xgb.XGBClassifier()
xgb_train.set_params(**tuned_params)
xgb_train.fit(X_train,Y_train)
bst_preds = xgb_train.predict(X_test)
print("Xgboost Test acc = " + str(accuracy_score(Y_test, bst_preds)))
print(confusion_matrix(Y_test, bst_preds))
print(classification_report(Y_test, bst_preds))
# Export cv classifier
joblib.dump(cvresult, args.path_prefix + args.xgb_pkl + 'cv')
# Export classifier trained on full data set
xgb_full = xgb.XGBClassifier()
xgb_full.set_params(**tuned_params)
xgb_full.fit(X,Y)
joblib.dump(xgb_full, args.path_prefix + args.xgb_pkl)
if __name__ == '__main__':
main()
| gpl-3.0 | -8,813,554,001,576,761,000 | 37.10929 | 88 | 0.57571 | false |
ambitioninc/django-user-guide | user_guide/templatetags/user_guide_tags.py | 1 | 2767 | """
Template tag for displaying user guides.
"""
import re
from django import template
from django.conf import settings
from django.template import loader
from django.template.defaulttags import CsrfTokenNode
from user_guide.models import GuideInfo
register = template.Library()
# The maximum number of guides to show per page
USER_GUIDE_SHOW_MAX = getattr(settings, 'USER_GUIDE_SHOW_MAX', 10)
# Use cookies to determine if guides should be shown
USER_GUIDE_USE_COOKIES = getattr(settings, 'USER_GUIDE_USE_COOKIES', False)
# The url to any custom CSS
USER_GUIDE_CSS_URL = getattr(
settings,
'USER_GUIDE_CSS_URL',
None
)
# The url to any custom JS
USER_GUIDE_JS_URL = getattr(
settings,
'USER_GUIDE_JS_URL',
None
)
@register.simple_tag(takes_context=True)
def user_guide(context, *args, **kwargs):
"""
Creates html items for all appropriate user guides.
Kwargs:
guide_name: A string name of a specific guide.
guide_tags: An array of string guide tags.
limit: An integer maxmimum number of guides to show at a single time.
Returns:
An html string containing the user guide scaffolding and any guide html.
"""
user = context['request'].user if 'request' in context and hasattr(context['request'], 'user') else None
if user and user.is_authenticated(): # No one is logged in
limit = kwargs.get('limit', USER_GUIDE_SHOW_MAX)
filters = {
'user': user,
'is_finished': False
}
# Handle special filters
if kwargs.get('guide_name'):
filters['guide__guide_name'] = kwargs.get('guide_name')
if kwargs.get('guide_tags'):
filters['guide__guide_tag__in'] = kwargs.get('guide_tags')
# Set the html
html = ''.join((
'<div data-guide="{0}" class="django-user-guide-item">{1}</div>'.format(
guide_info.id,
guide_info.guide.html
) for guide_info in GuideInfo.objects.select_related('guide').filter(**filters).only('guide')[:limit]
))
# Return the rendered template with the guide html
return loader.render_to_string('user_guide/window.html', {
'html': re.sub(r'\{\s*static\s*\}', settings.STATIC_URL, html),
'css_href': '{0}user_guide/build/django-user-guide.css'.format(settings.STATIC_URL),
'js_src': '{0}user_guide/build/django-user-guide.js'.format(settings.STATIC_URL),
'custom_css_href': USER_GUIDE_CSS_URL,
'custom_js_src': USER_GUIDE_JS_URL,
'use_cookies': str(USER_GUIDE_USE_COOKIES).lower(),
'csrf_node': CsrfTokenNode().render(context)
})
else:
return ''
| mit | -4,151,106,416,245,228,000 | 31.940476 | 113 | 0.62378 | false |
praekelt/django-ultracache | bin/cache-purge-consumer.py | 1 | 3973 | """Subscribe to RabbitMQ and listen for purge instructions continuously. Manage
this script through eg. supervisor."""
import json
import traceback
from multiprocessing.pool import ThreadPool
from optparse import OptionParser
from time import sleep
import pika
import requests
import yaml
class Consumer:
channel = None
connection = None
def __init__(self):
self.pool = ThreadPool()
parser = OptionParser()
parser.add_option("-c", "--config", dest="config",
help="Configuration file", metavar="FILE")
(options, args) = parser.parse_args()
config_file = options.config
self.config = {}
if config_file:
self.config = yaml.load(open(config_file)) or {}
def log(self, msg):
name = self.config.get("logfile", None)
if not name:
return
if name == "stdout":
print(msg)
return
fp = open(name, "a")
try:
fp.write(msg + "\n")
finally:
fp.close()
def connect(self):
parameters = pika.URLParameters(
self.config.get(
"rabbit-url",
"amqp://guest:[email protected]:5672/%2F"
)
)
self.connection = pika.BlockingConnection(parameters)
self.channel = self.connection.channel()
self.channel.exchange_declare(
exchange="purgatory", exchange_type="fanout"
)
queue = self.channel.queue_declare(exclusive=True)
queue_name = queue.method.queue
self.channel.queue_bind(exchange="purgatory", queue=queue_name)
self.channel.basic_qos(prefetch_count=1)
self.channel.basic_consume(
self.on_message, queue=queue_name, no_ack=False, exclusive=True
)
def on_message(self, channel, method_frame, header_frame, body):
self.pool.apply_async(self.handle_message, (body,))
channel.basic_ack(delivery_tag=method_frame.delivery_tag)
def handle_message(self, body):
if body:
try:
di = json.loads(body)
except ValueError:
path = body
headers = {}
else:
path = di["path"]
headers = di["headers"]
self.log("Purging %s with headers %s" % (path, str(headers)))
host = self.config.get("host", None)
try:
if host:
final_headers = {"Host": host}
final_headers.update(headers)
response = requests.request(
"PURGE", "http://" \
+ self.config.get("proxy-address", "127.0.0.1") + path,
headers=final_headers,
timeout=10
)
else:
response = requests.request(
"PURGE", "http://" \
+ self.config.get("proxy-address", "127.0.0.1") + path,
timeout=10,
headers=headers
)
except Exception as exception:
msg = traceback.format_exc()
self.log("Error purging %s: %s" % (path, msg))
else:
content = response.content
def consume(self):
loop = True
while loop:
try:
if self.channel is None:
raise pika.exceptions.ConnectionClosed()
self.channel.start_consuming()
except KeyboardInterrupt:
loop = False
self.channel.stop_consuming()
except pika.exceptions.ConnectionClosed:
try:
self.connect()
except pika.exceptions.ConnectionClosed:
sleep(1)
self.connection.close()
consumer = Consumer()
consumer.consume()
| bsd-3-clause | -856,625,678,671,990,400 | 31.300813 | 83 | 0.511956 | false |
adviti/melange | tests/app/soc/logic/test_dicts.py | 1 | 13831 | #!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests related to soc.logic.dicts.
"""
import unittest
from google.appengine.ext import db
from soc.logic import dicts
class TestDicts(unittest.TestCase):
"""Tests functions in dicts module.
"""
def setUp(self):
self.dummy_dict = {'a': '1', 'b': '2', 'c': '3', 'd': '1',
'e': '1', 'f': '3', 'g': '7'}
def testFilterKeysToFilterValid(self):
"""Tests if a dict is filtered correctly if some keys are given.
"""
keys_to_filter = ['a', 'b', 'c', 'd']
expected_dict = {'a': '1', 'b': '2', 'c': '3', 'd': '1'}
self.assertEqual(
dicts.filter(self.dummy_dict, keys_to_filter), expected_dict)
def testFilterNoKeysToFilter(self):
"""Tests that nothing is filtered if no keys are given.
"""
keys_to_filter = []
expected_dict = {}
self.assertEqual(
dicts.filter(self.dummy_dict, keys_to_filter), expected_dict)
def testFilterKeysToFilterNotInDict(self):
"""Tests that nothing is filtered if keys are not in dict.
"""
keys_to_filter = ['foo8']
expected_dict = {}
self.assertEqual(
dicts.filter(self.dummy_dict, keys_to_filter), expected_dict)
def testMergeTargetHasNoKeysInUpdates(self):
"""Tests if a dictionary is updated correctly.
"""
target = self.dummy_dict
updates = {'h': '8', 'i': {'a': '3', 'b': '11'}}
temp = target.copy()
temp.update(updates)
expected_dict = temp
self.assertEqual(dicts.merge(target, updates), expected_dict)
def testMergeTargetHasAnyKeyInUpdates(self):
"""Tests if a dictionary is updated correctly.
"""
target = self.dummy_dict
updates = {'a': '2', 'b': '3', 'c': '4', 'd': '5', 'e': '6', 'f': '7',
'g': '8', 'h': '9', 'i': {'a': '3', 'b': '11'}}
temp = target.copy()
temp_updates = dict((('h', updates['h']), ('i', updates['i'])))
temp.update(temp_updates)
expected_dict = temp
self.assertEqual(dicts.merge(target, updates), expected_dict)
def testMergeUpdatesEmpty(self):
"""Tests that nothing is updated if no updates.
"""
target = self.dummy_dict
updates = {}
expected_dict = target
self.assertEqual(dicts.merge(target, updates), expected_dict)
def testMergeTargetEmptyUpdatesNotEmpty(self):
"""Tests that an empty target is updated.
"""
target = {}
updates = {'a': '1'}
expected = updates
self.assertEqual(dicts.merge(target, updates), expected)
def testMergeTargetEmptyUpdatesEmpty(self):
"""Tests that an empty dict is returned when no target and updates.
"""
target = {}
updates = {}
expected_dict = updates
self.assertEqual(dicts.merge(target, updates), expected_dict)
def testMergeSubMergeTrueMergeSubDictsPresent(self):
"""Tests if sub-dicts are merged if sub_merge=True.
"""
#merge sub dict present in both target and updates
target = self.dummy_dict.copy()
new_key = 'foo'
new_value = {'k1': 'v1', 'k2': 'v2'}
target.update({new_key: new_value })
updates = {'h': '8', 'foo': {'a': '3', 'b': '11'}}
temp = self.dummy_dict.copy()
from copy import deepcopy
updates_copy = deepcopy(updates)
temp.update(updates_copy)
temp[new_key].update(new_value)
expected_dict = temp
self.assertEqual(dicts.merge(target, updates, sub_merge=True), expected_dict)
def testMergeSubMergeTrueSubListsPresent(self):
"""Tests if two lists are merged if sub_merge=True.
"""
#merge sub lists present in both target and lists
target = self.dummy_dict.copy()
target.update({'foo': ['value1', 'value2']})
updates = {'h': '8', 'foo': ['value3', 'value4']}
temp = target.copy()
temp['foo'] = temp['foo'] + updates['foo']
temp.update(h='8')
expected_dict = temp
self.assertEqual(dicts.merge(target, updates, sub_merge=True), expected_dict)
def testMergeWhenBothSubListsAndDictsArePresent(self):
"""Tests that lists and dicts can not be merged.
"""
#do not merge lists and dicts and sub_merge is True
target = self.dummy_dict.copy()
target.update({'foo': {'alpha': 1, 'beta': 2}})
updates = {'foo':['gamma', 'delta']}
expected = target
self.assertEqual(dicts.merge(target, updates), expected)
target = self.dummy_dict.copy()
target.update({'foo':['gamma', 'delta']})
updates = {'foo': {'alpha': 1, 'beta': 2}}
expected = target
self.assertEqual(dicts.merge(target, updates), expected)
def testMergeSubMergeFalseSubDictsPresent(self):
"""Tests if sub-dicts are not merged if sub_merge=False.
"""
#merge sub dict present in both target and updates
target = self.dummy_dict.copy()
target.update({'foo': {'k1': 'v1', 'k2': 'v2'}})
updates = {'foo': {'a': '3', 'b': '11'}}
expected_dict = target
self.assertEqual(dicts.merge(target, updates, sub_merge=False), expected_dict)
def testMergeRecursiveFalse(self):
"""Tests if dicts are updated.
"""
target = {'a':1, 'b': {'c': {"d": { "e": 2}}}}
updates = {'f': 3, 'b': {'c' :{"d": {"g": 5}}}}
temp = target.copy()
temp.update({'f': 3})
expected = temp
self.assertEqual(dicts.merge(target, updates,
sub_merge=True, recursive=False), expected)
def testMergeRecursiveTrue(self):
"""Tests if dicts are updated correctly when recursive is set True.
"""
target = {'a':1, 'b': {'c': {"d": { "e": 2}}}}
updates = {'f': 3, 'b': {'c' :{"d": {"g": 5}}}}
expected = {'a':1, 'f':3, 'b': {'c': {"d": { "e": 2, 'g':5}}}}
self.assertEqual(dicts.merge(target, updates,
sub_merge=True, recursive=True), expected)
def testMergeRecursiveTrueSubMergeFalse(self):
"""Tests if dicts are updated correctly when recursive True, sub_merge False.
"""
target = {'a':1, 'b': {'c': {"d": { "e": 2}}}}
updates = {'f': 3, 'b': {'c' :{"d": {"g": 5}}}}
expected = {'a':1, 'f':3, 'b': {'c': {"d": { "e": 2}}}}
self.assertEqual(dicts.merge(target, updates,
sub_merge=False, recursive=True), expected)
def testZip(self):
"""Test that keys and values are zipped as desired.
"""
#equal keys and values
keys = ['a', 'b', 'c']
values = ['1', '2', '3']
expected_dict = dict(zip(keys, values))
self.assertEqual(dicts.zip(keys, values), expected_dict)
#extra key
keys.append('d')
expected_dict.update({'d': None})
self.assertEqual(dicts.zip(keys, values), expected_dict)
#extra values
values.extend(['4', '5'])
expected_dict = dict(zip(keys, values))
self.assertEqual(dicts.zip(keys, values), expected_dict)
def testUnzip(self):
"""Tests if desired values are unzipped from a dictionary.
"""
target = self.dummy_dict
order = ['a', 'b', 'c']
expected_list = ['1', '2', '3']
gen = dicts.unzip(target, order)
result = list(gen)
self.assertEqual(result, expected_list)
target = self.dummy_dict
order = ['x', 'y', 'z']
expected_list = []
gen = dicts.unzip(target, order)
self.assertRaises(KeyError, list, gen)
order = []
expected_list = []
gen = dicts.unzip(target, order)
result = list(gen)
self.assertEqual(result, expected_list)
def testRename(self):
"""Tests that keys in the target dict are renamed with value of the same key
in another dict.
"""
target = {'wan': 1, 'too': 2, 'tree': 3}
keys = {'wan': 'one', 'too': 'two', 'tree': 'three'}
expected_dict = {'one': 1, 'two': 2, 'three': 3}
self.assertEqual(dicts.rename(target, keys), expected_dict)
target = {}
expected_dict = {}
self.assertEqual(dicts.rename(target, keys), expected_dict)
target = {'wan': 1, 'too': 2, 'tree': 3}
keys = {}
expected_dict = {}
self.assertEqual(dicts.rename(target, keys), expected_dict)
target = {'wan': 1, 'too': 2, 'tree': 3}
keys = {'for': 4}
expected_dict = {}
self.assertEqual(dicts.rename(target, keys), expected_dict)
def testSplit(self):
"""Tests that a dict is split into single-valued pairs.
"""
target = {}
expected = [{}]
self.assertEqual(dicts.split(target), expected)
target = {'foo': 'bar'}
expected = [{'foo': 'bar'}]
self.assertEqual(dicts.split(target), expected)
target = {'foo': 'bar', 'bar': 'baz'}
expected = [{'foo': 'bar', 'bar': 'baz'}]
self.assertEqual(dicts.split(target), expected)
target = {'foo': 'bar', 'bar': ['one', 'two']}
expected = [
{'foo': 'bar', 'bar': 'one'}, {'foo': 'bar', 'bar': 'two'}]
self.assertEqual(dicts.split(target), expected)
target = {'foo': 'bar', 'bar': ['one', 'two'], 'baz': ['three', 'four']}
expected = [{'bar': 'one', 'foo': 'bar', 'baz': 'three'},
{'bar': 'two', 'foo': 'bar', 'baz': 'three'},
{'bar': 'one', 'foo': 'bar', 'baz': 'four'},
{'bar': 'two', 'foo': 'bar', 'baz': 'four'}]
self.assertEqual(dicts.split(target), expected)
def testGroupDictBy(self):
"""Not tested because dicts.groupDictBy is not used in the code base
presently.
"""
pass
def testIdentity(self):
"""Tests if a dict with values equal to keys is returned
"""
target = {'wan': 1, 'too': 2, 'tree': 3}
expected_dict = {'wan': 'wan' , 'too': 'too', 'tree': 'tree'}
self.assertEqual(dicts.identity(target), expected_dict)
target = {}
expected_dict = {}
self.assertEqual(dicts.identity(target), expected_dict)
def testFormat(self):
"""Not tested because dicts.format is not used in the code base presently.
"""
pass
def testGroupby(self):
"""Tests if a list of dictionaries is grouped by a group_key.
"""
target = [{'a':1, 'b': 2}, {'a':3, 'b': 4}, {'a':1, 'c': 4}]
group_key = 'a'
expected = {1: [{'a':1, 'b': 2}, {'a':1, 'c': 4}], 3: [{'a':3, 'b': 4}]}
self.assertEqual(dicts.groupby(target, group_key), expected)
group_key = ''
expected = {}
self.assertRaises(KeyError, dicts.groupby, target, group_key)
group_key = 'f'
self.assertRaises(KeyError, dicts.groupby, target, group_key)
target = []
group_key = ''
expected = {}
self.assertEqual(dicts.groupby(target, group_key), expected)
target = []
group_key = 'a'
expected = {}
self.assertEqual(dicts.groupby(target, group_key), expected)
def testContainsAll(self):
"""Tests if a correct boolean value is returned.
"""
target = {'wan': 1, 'too': 2, 'tree': 3}
keys = ['wan', 'too']
self.assertTrue(dicts.containsAll(target, keys))
keys = ['wan', 'fore']
self.assertFalse(dicts.containsAll(target, keys))
keys = []
self.assertTrue(dicts.containsAll(target, keys))
def testToDict(self):
"""Tests if a dict with desired entity properties is returned.
"""
class Books(db.Model):
item_freq = db.StringProperty()
freq = db.IntegerProperty()
details = db.TextProperty()
released = bool
entity = Books()
entity.item_freq = '5'
entity.freq = 4
entity.details = 'Test Entity'
entity.released = True
entity.put()
expected_dict = {'freq': 4, 'item_freq': '5'}
self.assertEqual(dicts.toDict(entity), expected_dict)
field_names = ['item_freq', 'details', 'released']
expected_dict = {'released': True,
'details': 'Test Entity',
'item_freq': '5'}
self.assertEqual(dicts.toDict(entity, field_names), expected_dict)
field_names = []
expected_dict = {'freq': 4, 'item_freq': '5'}
self.assertEqual(dicts.toDict(entity, field_names), expected_dict)
#field names not in the entity
field_names = ['other_data']
expected_dict = {}
self.assertEqual(dicts.toDict(entity, field_names), expected_dict)
def testCleanDict(self):
"""Tests if the fields in the dict is HTML escaped as desired.
"""
target = {
'name': 'test', 'param1':'>1000', 'param2':'<1000', 'param3': 'a&b'}
filter_fields = ['param1', 'param2', 'param3']
expected_dict = {
'param3': u'a&b', 'name': 'test', 'param1': u'>1000',
'param2': u'<1000'
}
self.assertEqual(dicts.cleanDict(target, filter_fields), expected_dict)
filter_fields = []
expected_dict = {'param3': 'a&b', 'name': 'test', 'param1': '>1000',
'param2': '<1000'}
self.assertEqual(dicts.cleanDict(target, filter_fields), expected_dict)
#parameter not present in target
filter_fields = ['other_param']
self.assertRaises(KeyError, dicts.cleanDict, target, filter_fields)
from django.utils.safestring import mark_safe
target['param1'] = mark_safe(target['param1'])
expected_dict = {
'param3': u'a&b', 'name': 'test', 'param1': '>1000',
'param2': u'<1000'}
filter_fields = ['param1', 'param2', 'param3']
self.assertEqual(dicts.cleanDict(target, filter_fields), expected_dict)
expected_dict = {
'param3': u'a&b', 'name': 'test', 'param1': u'>1000',
'param2': u'<1000'}
self.assertEqual(
dicts.cleanDict(target, filter_fields, escape_safe=True), expected_dict)
| apache-2.0 | -8,010,192,646,561,666,000 | 32.327711 | 82 | 0.598583 | false |
myriasofo/CLRS_exercises | algos/testSuite.py | 1 | 6292 | ''' WHAT: Simple test framework for checking algorithms
TASK:
*Handle output that's an object, eg. bst that gets modified
*option3: optional param - Class (accept input/output as arrays and TURN INTO object)
(option1: optional param - comparison function (instead of simple "!=")
(option2: optional param - Class (automatically deconstruct objects in arrays)
'''
import copy
def init(*args, **kwargs):
return TestSuite(*args, **kwargs)
class TestSuite:
def __init__(self, tests, dataStructures=None):
self.tests = tests
self.converter = DataStructureConverter(dataStructures) if dataStructures is not None else None
def test(self, function):
print('FUNCTION: {}'.format(function.__name__))
tests = copy.deepcopy(self.tests)
for i, test in enumerate(tests):
params, expected = test
try:
actual = self.runFunction(function, params)
if actual != expected:
self.printError(i+1, params, expected, actual)
return
except Exception as error:
self.printError(i+1, params, expected, 'ERROR')
raise error
def printError(self, iteration, params, expected, actual):
print()
print('ERROR: Iteration {}'.format(iteration))
print()
stringifiedParams = ', '.join([str(param) for param in params])
print('input: {}'.format(stringifiedParams))
print('ouptut expected: {}'.format(expected))
print('output actual: {}'.format(actual))
print()
def runFunction(self, function, params):
if self.converter is not None:
params = self.converter.convertInput(params)
params = copy.deepcopy(params)
actual = function(*params)
if self.converter is not None:
actual = self.converter.convertOuptut(actual)
return actual
class DataStructureConverter:
def __init__(self, config):
self.config = config
self.arrayToDs = {
'SinglyLinkedList': self.createSinglyLinkedList,
#'DoublyLinkedList': createSinglyLinkedList,
#'BinaryTree': createBinaryTree,
#'Queue': createQueue,
#'Stack': createStack,
}
self.dsToArray = {
'SinglyLinkedList': self.createArrayFromSinglyLinkedList,
#'DoublyLinkedList': createSinglyLinkedList,
#'BinaryTree': createBinaryTree,
#'Queue': createQueue,
#'Stack': createStack,
}
def convertInput(self, params):
if isinstance(self.config, str):
converted = []
for param in params:
ds = self.convertArrayToDs(param, self.config)
converted.append(ds)
return converted
elif isinstance(self.config, dict):
converted = []
for param, dsName in zip(params, self.config['input']):
if not isinstance(dsName, str):
converted.append(param)
else:
ds = self.convertArrayToDs(param, dsName)
converted.append(ds)
return converted
else:
raise Exception('ERROR: This is not the right format for dataStructure: {}'.format(self.config))
def convertOuptut(self, output):
if isinstance(self.config, str):
return self.convertDsToArray(output, self.config)
elif isinstance(self.config, dict):
return self.convertDsToArray(output, self.config['output'])
else:
raise Exception('ERROR: This is not the right format for dataStructure: {}'.format(self.ds))
def convertArrayToDs(self, array, dsName):
if dsName not in self.arrayToDs:
raise Exception('ERROR: Name of dataStructure not supported: {}'.format(dsName))
dsConstructor = self.arrayToDs[dsName]
ds = dsConstructor(array)
return ds
def convertDsToArray(self, ds, dsName):
if dsName not in self.dsToArray:
raise Exception('ERROR: Name of dataStructure not supported: {}'.format(dsName))
arrayConstructor = self.dsToArray[dsName]
array = arrayConstructor(ds)
return array
class Node:
# spec determined by leetcode
def __init__(self, val):
self.val = val
self.next = None
def createSinglyLinkedList(self, array, storeInArray=False):
if storeInArray:
container = []
head = None
curr = None
for elem in array:
node = self.Node(elem)
if storeInArray:
container.append(node)
if head is None:
head = node
curr = node
continue
curr.next = node
curr = node
if storeInArray:
return container
return head
def createArrayFromSinglyLinkedList(self, head):
array = []
while head is not None:
array.append(head.val)
head = head.next
return array
# custom
def createIntersectingLinkedLists(self, nA, nB, nIntersection):
headA = self.createSinglyLinkedList(range(nA))
headB = self.createSinglyLinkedList(range(nA, nA+nB))
if nIntersection is None or nIntersection == 0:
return headA, headB, None
headI = self.createSinglyLinkedList(range(nA+nB, nA+nB+nIntersection))
if headA is None:
headA = headI
else:
self.getEndofList(headA).next = headI
if headB is None:
headB = headI
else:
self.getEndofList(headB).next = headI
return headA, headB, headI
def getEndofList(self, head):
while head is not None and head.next is not None:
head = head.next
return head
### Example usage
def main():
import sys
sys.path.append('/Users/Abe/my/codingPractice/algos')
import testSuite
tests = [
([[3,2,1,5,6,4], 2], 5),
([[3,2,3,1,2,4,5,5,6], 4], 4)
]
t = testSuite.init(tests)
t.test(v1)
t.test(v2)
t.test(v3)
| mit | -4,231,904,569,753,463,300 | 30.148515 | 108 | 0.578512 | false |
tis-intern-apparel/ApparelStrategy | server/dialogue_system/module/database.py | 1 | 5633 | # -*- coding: utf-8 -*-
import os
import codecs
class Personal:
point_id = ''
user_name = ''
user_pronoun = ''
sex = ''
phone = ''
email = ''
address = ''
class Cloth:
cloth_name = ''
color_code = ''
small_type = ''
price = ''
image_url = ''
big_type = ''
cloth_code = ''
cloth_describe = ''
class Evaluate:
clothes = []
osyaredo = 0
class DataBaseManager:
def __init__(self,data_dir):
self.data_dir = data_dir
self.clothes_path = os.path.join(data_dir,'clothes.csv')
self.evaluate_path = os.path.join(data_dir,'evaluate.csv')
self.personal_path = os.path.join(data_dir,'personal.csv')
def __split_csvline(self,line):
return line.replace('\n','').replace('"','').split(',')
def __struct_personal(self,line):
cols = self.__split_csvline(line)
personal = Personal()
personal.point_id = cols[0]
personal.user_name = cols[1]
personal.user_pronoun = cols[2]
personal.sex = cols[3]
personal.phone = cols[4]
personal.email = cols[5]
personal.address = cols[6]
personal.age = cols[7]
return personal
def __struct_cloth(self,line):
cols = self.__split_csvline(line)
cloth = Cloth()
cloth.cloth_name = cols[0]
cloth.color_code = cols[1]
cloth.small_type = cols[2]
cloth.price = cols[3]
cloth.image_url = cols[4]
cloth.big_type = cols[5]
cloth.cloth_code = cols[6]
cloth.cloth_describe = cols[7]
return cloth
def __struct_evaluate(self,line):
cols = self.__split_csvline(line)
osyare = Evaluate()
osyare.clothes = []
for c in cols:
if c == 'null':
break
else:
osyare.clothes.append(c)
osyare.osyaredo = cols[3]
return osyare
def get_personal_from_id(self,point_id):
"""
read personal data from point id
:param point_id: search point id
:return: personal object
"""
with codecs.open(self.personal_path,'r','utf-8') as f:
for line in f:
personal = self.__struct_personal(line)
if personal.point_id == point_id:
return personal
return None
def get_clothes_from_code(self, cloth_code):
"""
read cloth data from cloth_code
:param cloth_code: cloth code for searching
:return: cloth object
"""
with codecs.open(self.clothes_path, 'r', 'utf-8') as f:
for line in f:
cloth = self.__struct_cloth(line)
if cloth.cloth_code == cloth_code:
return cloth
return None
def get_evaluate_from_code(self, cloth_code):
"""
read evaluate(osyaredo) from cloth code
:param cloth_code: cloth code for searching evaluate
:return: evaluate object list
"""
result = []
with codecs.open(self.evaluate_path, 'r', 'utf-8') as f:
for line in f:
ev = self.__struct_evaluate(line)
if ev.clothes.count(cloth_code) > 0:
result.append(ev)
if len(result) > 0:
return result
else:
return None
def get_evaluate_from_codelist(self, cloth_codelist):
"""
read evaluate(osyaredo) from cloth code
:param cloth_code: cloth code for searching evaluate
:return: evaluate object list
"""
result = []
with codecs.open(self.evaluate_path, 'r', 'utf-8') as f:
for line in f:
ev = self.__struct_evaluate(line)
isContain = True
for cloth in cloth_codelist:
if not cloth.cloth_code in ev.clothes:
isContain = False
break
if isContain:
result.append(ev)
if len(result) > 0:
return result
else:
return None
def get_clothes_from_name(self, contains_name):
"""
read cloth data from keyword that contains cloth name
:param contains_name: key contains cloth name
:return: cloth object list
"""
result = []
with codecs.open(self.clothes_path, 'r', 'utf-8') as f:
for line in f:
cloth = self.__struct_cloth(line)
if cloth.cloth_name.count(contains_name) > 0:
result.append(cloth)
if len(result) > 0:
return result
else:
return None
def get_clothes_from_keys(self, season,price = None):
"""
read cloth data from keyword that contains cloth name
:param contains_name: key contains cloth name
:return: cloth object list
"""
result = []
with codecs.open(self.clothes_path, 'r', 'utf-8') as f:
for line in f:
cloth = self.__struct_cloth(line)
if cloth.cloth_describe.count(season) > 0 or cloth.cloth_name.count(season) > 0:
result.append(cloth)
if len(result) > 0:
return result
else:
return None
if __name__ == '__main__':
script_dir = os.path.dirname(__file__)
data_path = os.path.join(script_dir,'../../data')
manager = DataBaseManager(data_path)
personal = manager.get_clothes_from_name('ズボン')
for p in personal:
print(p.cloth_name)
| mit | 3,080,065,227,675,096,000 | 28.005155 | 96 | 0.52568 | false |
django-notifications/django-notifications | notifications/views.py | 1 | 7702 | # -*- coding: utf-8 -*-
''' Django Notifications example views '''
from distutils.version import \
StrictVersion # pylint: disable=no-name-in-module,import-error
from django import get_version
from django.contrib.auth.decorators import login_required
from django.forms import model_to_dict
from django.shortcuts import get_object_or_404, redirect
from django.utils.decorators import method_decorator
from django.views.decorators.cache import never_cache
from django.views.generic import ListView
from notifications import settings
from notifications.settings import get_config
from notifications.utils import id2slug, slug2id
from swapper import load_model
Notification = load_model('notifications', 'Notification')
if StrictVersion(get_version()) >= StrictVersion('1.7.0'):
from django.http import JsonResponse # noqa
else:
# Django 1.6 doesn't have a proper JsonResponse
import json
from django.http import HttpResponse # noqa
def date_handler(obj):
return obj.isoformat() if hasattr(obj, 'isoformat') else obj
def JsonResponse(data): # noqa
return HttpResponse(
json.dumps(data, default=date_handler),
content_type="application/json")
class NotificationViewList(ListView):
template_name = 'notifications/list.html'
context_object_name = 'notifications'
paginate_by = settings.get_config()['PAGINATE_BY']
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(NotificationViewList, self).dispatch(
request, *args, **kwargs)
class AllNotificationsList(NotificationViewList):
"""
Index page for authenticated user
"""
def get_queryset(self):
if settings.get_config()['SOFT_DELETE']:
qset = self.request.user.notifications.active()
else:
qset = self.request.user.notifications.all()
return qset
class UnreadNotificationsList(NotificationViewList):
def get_queryset(self):
return self.request.user.notifications.unread()
@login_required
def mark_all_as_read(request):
request.user.notifications.mark_all_as_read()
_next = request.GET.get('next')
if _next:
return redirect(_next)
return redirect('notifications:unread')
@login_required
def mark_as_read(request, slug=None):
notification_id = slug2id(slug)
notification = get_object_or_404(
Notification, recipient=request.user, id=notification_id)
notification.mark_as_read()
_next = request.GET.get('next')
if _next:
return redirect(_next)
return redirect('notifications:unread')
@login_required
def mark_as_unread(request, slug=None):
notification_id = slug2id(slug)
notification = get_object_or_404(
Notification, recipient=request.user, id=notification_id)
notification.mark_as_unread()
_next = request.GET.get('next')
if _next:
return redirect(_next)
return redirect('notifications:unread')
@login_required
def delete(request, slug=None):
notification_id = slug2id(slug)
notification = get_object_or_404(
Notification, recipient=request.user, id=notification_id)
if settings.get_config()['SOFT_DELETE']:
notification.deleted = True
notification.save()
else:
notification.delete()
_next = request.GET.get('next')
if _next:
return redirect(_next)
return redirect('notifications:all')
@never_cache
def live_unread_notification_count(request):
try:
user_is_authenticated = request.user.is_authenticated()
except TypeError: # Django >= 1.11
user_is_authenticated = request.user.is_authenticated
if not user_is_authenticated:
data = {
'unread_count': 0
}
else:
data = {
'unread_count': request.user.notifications.unread().count(),
}
return JsonResponse(data)
@never_cache
def live_unread_notification_list(request):
''' Return a json with a unread notification list '''
try:
user_is_authenticated = request.user.is_authenticated()
except TypeError: # Django >= 1.11
user_is_authenticated = request.user.is_authenticated
if not user_is_authenticated:
data = {
'unread_count': 0,
'unread_list': []
}
return JsonResponse(data)
default_num_to_fetch = get_config()['NUM_TO_FETCH']
try:
# If they don't specify, make it 5.
num_to_fetch = request.GET.get('max', default_num_to_fetch)
num_to_fetch = int(num_to_fetch)
if not (1 <= num_to_fetch <= 100):
num_to_fetch = default_num_to_fetch
except ValueError: # If casting to an int fails.
num_to_fetch = default_num_to_fetch
unread_list = []
for notification in request.user.notifications.unread()[0:num_to_fetch]:
struct = model_to_dict(notification)
struct['slug'] = id2slug(notification.id)
if notification.actor:
struct['actor'] = str(notification.actor)
if notification.target:
struct['target'] = str(notification.target)
if notification.action_object:
struct['action_object'] = str(notification.action_object)
if notification.data:
struct['data'] = notification.data
unread_list.append(struct)
if request.GET.get('mark_as_read'):
notification.mark_as_read()
data = {
'unread_count': request.user.notifications.unread().count(),
'unread_list': unread_list
}
return JsonResponse(data)
@never_cache
def live_all_notification_list(request):
''' Return a json with a unread notification list '''
try:
user_is_authenticated = request.user.is_authenticated()
except TypeError: # Django >= 1.11
user_is_authenticated = request.user.is_authenticated
if not user_is_authenticated:
data = {
'all_count': 0,
'all_list': []
}
return JsonResponse(data)
default_num_to_fetch = get_config()['NUM_TO_FETCH']
try:
# If they don't specify, make it 5.
num_to_fetch = request.GET.get('max', default_num_to_fetch)
num_to_fetch = int(num_to_fetch)
if not (1 <= num_to_fetch <= 100):
num_to_fetch = default_num_to_fetch
except ValueError: # If casting to an int fails.
num_to_fetch = default_num_to_fetch
all_list = []
for notification in request.user.notifications.all()[0:num_to_fetch]:
struct = model_to_dict(notification)
struct['slug'] = id2slug(notification.id)
if notification.actor:
struct['actor'] = str(notification.actor)
if notification.target:
struct['target'] = str(notification.target)
if notification.action_object:
struct['action_object'] = str(notification.action_object)
if notification.data:
struct['data'] = notification.data
all_list.append(struct)
if request.GET.get('mark_as_read'):
notification.mark_as_read()
data = {
'all_count': request.user.notifications.count(),
'all_list': all_list
}
return JsonResponse(data)
def live_all_notification_count(request):
try:
user_is_authenticated = request.user.is_authenticated()
except TypeError: # Django >= 1.11
user_is_authenticated = request.user.is_authenticated
if not user_is_authenticated:
data = {
'all_count': 0
}
else:
data = {
'all_count': request.user.notifications.count(),
}
return JsonResponse(data)
| bsd-3-clause | -8,903,661,375,527,200,000 | 28.852713 | 76 | 0.64321 | false |
agoose77/hivesystem | tutorial/layers/layer17/layers.py | 1 | 2334 | from __future__ import print_function
# import the main and action components
from maincomponent import maincomponent
from action1 import action1component
from action2 import action2component
from action3 import action3component
#import manager components
from action3components import animationmanager
from action3components import soundmanager
#keyboard mainloop
from keycodes import ascii_to_keycode
from getch import getch, kbhit, change_termios, restore_termios
def mainloop(keyfunc=None):
change_termios()
while True:
while not kbhit(): continue
key = getch()
if isinstance(key, bytes) and bytes != str: key = key.decode()
if key not in ascii_to_keycode: continue
keycode = ascii_to_keycode[key]
if keycode == "ESCAPE": break
if keyfunc is not None: keyfunc(keycode)
restore_termios()
#define a generic pseudo-hive class
import libcontext
class pseudohive(object):
components = {}
def __init__(self):
for componentname, componentclass in self.components.items():
component = componentclass()
setattr(self, componentname, component)
def build(self, contextname):
self._contextname = contextname
self._context = libcontext.context(self._contextname)
def place(self):
libcontext.push(self._contextname)
for componentname, componentclass in self.components.items():
component = getattr(self, componentname)
component.place()
libcontext.pop()
def close(self):
self._context.close()
#define the main (pseudo-)hive
class mainhive(pseudohive):
components = {
#action3 manager components
"animationmanager": animationmanager,
"soundmanager": soundmanager,
#main component and action components
"maincomponent": maincomponent,
"action1": action1component,
"action2": action2component,
"action3": action3component,
}
#Set up the main hive and run it
#Give us a new mainhive instance
main = mainhive()
#Build a context named "main"
main.build("main")
#Declare sockets and plugins
main.place()
#Build all connections, and validate the connection network
main.close()
#Run the main loop
main.maincomponent.start()
mainloop(main.maincomponent.keypress)
| bsd-2-clause | 2,937,745,632,804,940,000 | 25.522727 | 70 | 0.696658 | false |
cackharot/fbeazt | src/foodbeazt/resources/order.py | 1 | 13147 | import time
from datetime import datetime
from bson import ObjectId, json_util
from collections import defaultdict
from flask import g, request
from flask_mail import Message
from flask_restful import Resource
from service.OrderService import OrderService, DuplicateOrderException
from service.PushNotificationService import PushNotificationService
from service.ProductService import ProductService
from service.PincodeService import PincodeService
from service.StoreService import StoreService
from service.StoreOrderService import StoreOrderService
from service.SmsService import SmsService
from libs.order_helper import OrderHelper
from foodbeazt.fapp import mongo, app, mail, admin_permission
from resources.coupon import ValidateCouponApi
import logging
from gcm import *
order_created_template = app.jinja_env.get_template('email/order_created.html')
order_created_sms_template = app.jinja_env.get_template(
'sms/order_created.txt')
order_otp_sms_template = app.jinja_env.get_template('sms/otp.txt')
class TrackOrderApi(Resource):
def get(self, order_no):
return OrderApi().get(order_no)
class OrderApi(Resource):
def __init__(self):
self.MAX_ORDER_PER_PHONE = 3
self.log = logging.getLogger(__name__)
self.service = OrderService(mongo.db)
self.storeOrderService = StoreOrderService(mongo.db)
self.storeService = StoreService(mongo.db)
self.productService = ProductService(mongo.db)
self.pincodeService = PincodeService(mongo.db)
self.pushNotifyService = PushNotificationService(
mongo.db, app.config['GCM_API_KEY'])
self.smsService = SmsService(
mongo.db, app.config['SMS_USER'], app.config['SMS_API_KEY'])
self.helper = OrderHelper(self.productService)
self.validateCouponService = ValidateCouponApi()
self.admin_emails = app.config['ADMIN_EMAILS'].split(',')
def update_store_data(self, order):
store_ids = set([str(x['store_id']) for x in order['items']])
stores = {str(x['_id']): x for x in self.storeService.search_by_ids(store_ids=store_ids)}
for item in order['items']:
item['store'] = stores[str(item['store_id'])]
def update_store_status(self, order):
order_dict = {}
order_dict[str(order['_id'])] = order
store_statuses = defaultdict(dict)
for x in self.storeOrderService.get_by_order_ids(order_dict.keys()):
oid = str(x['order_id'])
store_id = str(x['store_id'])
store_statuses[oid][store_id] = dict(no=x['store_order_no'], status_timings=x.get('status_timings',{}), status=x['status'])
order_dict[oid]['store_delivery_status'] = store_statuses[oid]
def get(self, _id):
if _id == "-1":
return None, 404
try:
order = None
if len(_id) <= 9:
order = self.service.get_by_number(_id)
else:
order = self.service.get_by_id(_id)
if order:
self.update_store_data(order)
if admin_permission.can():
self.update_store_status(order)
return order, 200
else:
return None, 404
except Exception as e:
self.log.exception(e)
return {"status": "error", "message": "Error on get order with id %s" % _id}, 421
def put(self, _id):
data = json_util.loads(request.data.decode('utf-8'))
cmd = data.get('cmd', None)
if cmd is None:
return dict(status='error', message="Invalid command"), 423
if cmd == "VERIFY_OTP":
return self.verify_otp(data)
elif cmd == "RESEND_OTP":
return self.resend_otp(data)
else:
return dict(status='error', message="Invalid command"), 423
def resend_otp(self, data):
order_id = data.get("order_id", None)
new_number = data.get("number", None)
try:
order = self.service.get_by_id(order_id)
if order is None or order['status'] == 'DELIVERED':
return dict(status='error', message="Invalid Order id given. Order not found/delivered"), 425
if new_number is not None and len(new_number) != 0:
if len(new_number) != 10:
return dict(status='error', message="Invalid phone number!"), 426
else:
order['delivery_details']['phone'] = new_number
self.service.save(order)
order['otp_status'] = self.send_otp(order)
self.service.save(order)
return dict(status='success'), 200
except Exception as e:
self.log.exception(e)
return dict(status="error", message="Error while sending OTP. Try again later!"), 400
def verify_otp(self, data):
order_id = data.get("order_id", None)
otp = data.get("otp", None)
if otp is None or len(otp) < 3 or len(otp) > 10:
return dict(status='error', message="Invalid OTP given"), 424
try:
order = self.service.get_by_id(order_id)
if order is None or order['status'] == 'DELIVERED':
return dict(status='error', message="Invalid Order id given. Order not found/delivered"), 425
number = order['delivery_details'].get('phone')
if self.smsService.update_otp(number, otp):
order['otp_status'] = 'VERIFIED'
self.service.save(order)
self.send_email(order)
self.send_sms(order)
self.notify_new_order(order)
self.log.info(
"%s order success after OTP verified!", order.get('order_no'))
return dict(status='success'), 200
else:
return dict(status='error', message="Invalid OTP given"), 424
except Exception as e:
self.log.exception(e)
return dict(status="error", message="Unable to verify the OTP. Please try again later!"), 427
def post(self, _id):
order = json_util.loads(request.data.decode('utf-8'))
self.log.debug("RECEIVED ORDER %s" % order)
validation_error, sanitized_items = self.helper.validate_line_items(order)
if validation_error is not None:
return dict(status='error', type='validation', message=validation_error), 421
delivery_validation, delivery_details = self.helper.validate_delivery_details(order)
if delivery_validation is not None:
return dict(status='error', type='validation', message=delivery_validation), 422
payment_type = order.get('payment_type', 'cod')
if payment_type not in ['cod', 'payumoney', 'paytm']:
return dict(status='error', type='validation', message="Invalid Payment choosen"), 422
tenant_id = g.user.tenant_id
user_id = g.user.id
valid_order = {
'tenant_id': ObjectId(tenant_id),
'user_id': ObjectId(user_id),
'items': sanitized_items,
'delivery_details': delivery_details,
'payment_type': payment_type
}
valid_order['delivery_charges'] = self.service.get_delivery_charges(
valid_order)
valid_order['total'] = self.service.get_order_total(valid_order)
coupon_code = order.get('coupon_code', None)
coupon_discount = 0.0
if coupon_code:
coupon_data = self.validateCouponService.fetch_coupon_data(
coupon_code)
if coupon_data is None:
return dict(status='error', type='validation', message="Invalid coupon data!"), 422
if not self.validateCouponService.valid_coupon(coupon_data):
return dict(status='error', type='validation', message="Coupon code was expired!"), 422
coupon_discount = self.validateCouponService.calculate_discount(
valid_order, coupon_data)
if coupon_discount <= 0.0:
self.log.info(
'Coupon code does not meet the conditions! %s', coupon_code)
return {'status': 'error', 'type': 'validation', 'message': 'Coupon code does not meet the conditions!'}, 472
valid_order['coupon_code'] = coupon_code
valid_order['coupon_discount'] = -coupon_discount
if payment_type in ['cod', 'paytm']:
valid_order['payment_status'] = 'success'
_id = None
try:
pincode = valid_order['delivery_details']['pincode']
if not self.pincodeService.check_pincode(pincode):
return {"status": "error", "message": "Delivery not available for %s pincode!" % (pincode)}, 422
self.check_spam_order(valid_order)
valid_order['otp_status'] = self.send_otp(valid_order)
_id = self.service.save(valid_order)
self.save_order_metadata(_id, order.get('metadata', None))
except DuplicateOrderException as e:
self.log.exception(e)
return dict(status="error", message="We identified frequent placement of order. \
Please wait 15 minutes before placing any other order."), 429
except Exception as e:
self.log.exception(e)
return dict(status="error",
message="Oops! Error while trying to save order details! Please try again later"), 420
if valid_order['otp_status'] == 'VERIFIED' and payment_type in ['cod', 'paytm']:
self.send_email(valid_order)
self.send_sms(valid_order)
self.notify_new_order(valid_order)
self.log.info("%s order success!", valid_order.get('order_no'))
return {"status": "success", "location": "/api/order/" + str(_id), "data": valid_order}
def notify_new_order(self, order):
email = order['delivery_details']['email']
address = order['delivery_details']['address']
pincode = order['delivery_details']['pincode']
total = order['total']
data = {
'message': "Yay! New order from %s for Rs.%.2f. Delivery to %s - %s" % (email, total, address, pincode),
'order_id': order['_id'],
'order_no': order['order_no'],
'order_date': order['created_at'],
'total': total,
'title': 'New Order'
}
try:
for admin_email in self.admin_emails:
self.pushNotifyService.send_to_device(data, email=admin_email)
except Exception as e:
self.log.exception(e)
def delete(self, _id):
return None, 204
def save_order_metadata(self, order_id, metadata):
if metadata is None or order_id is None:
return
entity = {
'order_id': order_id,
'metadata': metadata
}
try:
self.service.save_order_metadata(entity)
except Exception as e:
self.log.exception(e)
def send_otp(self, order):
return 'VERIFIED'
# if order['payment_type'] in ['cod', 'paytm']:
# return 'VERIFIED'
# if app.config['SEND_OTP'] is False:
# return 'VERIFIED'
# number = order['delivery_details'].get('phone')
# if not self.smsService.verified_number(number):
# otp = self.smsService.generate_otp()
# message = order_otp_sms_template.render(order=order, otp=otp)
# return self.smsService.send_otp(number, otp, message)
# else:
# return 'VERIFIED'
def send_sms(self, order):
number = order['delivery_details'].get('phone')
track_link = app.config['ORDER_TRACK_URL'] % (order['order_no'])
message = order_created_sms_template.render(
order=order, track_link=track_link)
try:
self.smsService.send(number, message)
except Exception as e:
self.log.exception(e)
def send_email(self, order):
email = order['delivery_details']['email']
subject = "Order confirmation <%s>" % (order['order_no'])
msg = Message(subject=subject,
reply_to=app.config['MAIL_REPLY_TO'],
charset='utf-8',
sender=(app.config['MAIL_SENDER_NAME'],
app.config['MAIL_SENDER']),
recipients=[email])
msg.html = order_created_template.render(order=order)
self.log.info("Sending email [%s] to %s" % (subject, email))
if app.config['SEND_MAIL'] is False:
return
try:
mail.send(msg)
except Exception as e:
self.log.exception(e)
def check_spam_order(self, order):
number = order['delivery_details']['phone']
email = order['delivery_details']['email']
order_count = self.smsService.get_order_count(
number=number, email=email, minutes=15)
if order_count > self.MAX_ORDER_PER_PHONE:
raise DuplicateOrderException()
| apache-2.0 | 4,485,631,413,601,322,000 | 41.546926 | 134 | 0.587663 | false |
ssssam/calliope | calliope/stat/__init__.py | 1 | 1520 | #!/usr/bin/env python3
# Calliope
# Copyright (C) 2016 Sam Thursfield <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import click
import argparse
import os
import sys
import urllib.parse
import warnings
import calliope
def measure_size(playlist):
'''Measure the total size of the files listed.'''
def measure_one(item):
path = urllib.parse.unquote(urllib.parse.urlsplit(item['location']).path)
try:
return os.stat(path).st_size
except FileNotFoundError as e:
warnings.warn("Did not find file %s" % path)
return 0
size = 0
for item in playlist:
if 'location' in item:
size += measure_one(item)
elif 'tracks' in item:
for track in item['tracks']:
if 'location' in track:
size += measure_one(track)
print("Total size: %i MB" % (size / 1024 / 1024.0))
| gpl-2.0 | 4,141,134,429,499,727,000 | 30.666667 | 81 | 0.667763 | false |
DrSkippy/Gravitational-Three-Body-Symmetric | sim_pendulum.py | 1 | 1975 | #!/usr/bin/env python
import csv
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.style.use('ggplot')
# arg 1 = w init
# arg 2 = n periods
# arg 3 = n ratio
# time step
dt = np.float64(0.00010)
# constants
L_0 = np.float64(1.0) # unstretched length
g = np.float64(9.81) # gravitation
n = np.float64(sys.argv[3])
K_over_M = (n*n - 1)*g/L_0
# initial conditions
theta = np.float64(0)
L = L_0 + g/K_over_M # equilibrium length with gravity
# 2mgl = 1/2 m l^2 w^2
w_sep = np.sqrt(4.*g/L)
w_0 = np.float64(sys.argv[1])
w = w_0
#
v_l_0 = 0
v_l = v_l_0
# periods
T_p = 2.*np.pi/np.sqrt(g/L)
T_k = 2.*np.pi/np.sqrt(K_over_M)
# record some stuff
print "Tp = {} T/dt = {}".format(T_p, T_p/dt)
print "Tk = {} T/dt = {}".format(T_k, T_k/dt)
print "Tk/Tp = {}".format(T_k/T_p)
print "w_esc = {}".format(w_sep)
t = np.float64(0.0)
theta_last = theta
# keep some records
data = []
t_s = []
theta += w*dt/2.
L += v_l*dt/2.
for i in range(int(sys.argv[2])*int(T_p/dt)):
w += -dt*g*np.sin(theta)/L
v_l += -K_over_M*(L-L_0) + g*np.cos(theta) + w*w*L
theta += w*dt
theta = np.fmod(theta, 2.*np.pi)
L += v_l*dt
t += dt
data.append([t, theta, w, L, v_l])
if theta_last < 0 and theta > 0:
t_s.append(t)
theta_last = theta
# periods by measure
t_s = [t_s[i] - t_s[i-1] for i in range(1,len(t_s)) ]
print "avg period = {} std periods = {}".format(np.average(t_s), np.std(t_s))
# plots
df = pd.DataFrame().from_records(data)
df.columns = ["t", "theta", "omega", "l", "v_l"]
df.set_index("t")
ax = df.plot(kind="scatter", x="theta", y="omega", marker=".")
fig = ax.get_figure()
fig.savefig("phase1.png")
ax = df.plot(kind="scatter", x="l", y="v_l", marker=".")
fig = ax.get_figure()
fig.savefig("phase2.png")
# config space
df["y_c"] = -df["l"]
df["x_c"] = df["l"] * np.sin(df["theta"])
ax = df.plot(kind="scatter", x="x_c", y="y_c", marker=".")
fig = ax.get_figure()
fig.savefig("config.png")
| cc0-1.0 | 3,132,915,243,086,640,600 | 20.944444 | 77 | 0.578734 | false |
metpy/SHARPpy | sharppy/version.py | 1 | 1931 | import os.path
import subprocess
release = False
__version__ = '0.2'
_repository_path = os.path.split(__file__)[0]
_git_file_path = os.path.join(_repository_path, '__git_version__.py')
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
return out
def get_git_hash():
'''
Gets the last GIT commit hash and date for the repository, using the
path to this file.
'''
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
GIT_REVISION = out.strip().decode('ascii')
except:
GIT_REVISION = None
return GIT_REVISION
def get_git_revision():
hash = get_git_hash()
if hash :
rev = '.dev.' + hash[:7]
try:
cmd = ['git', 'show', '%s' % (hash), '--date=short',
'--format="(%ad)"']
date = _minimal_ext_cmd(cmd).split('"')[1]
rev += date
except:
pass
else:
rev = ".dev.Unknown"
return rev
def write_git_version():
'Write the GIT revision to a file.'
rev = get_git_revision()
if rev == ".dev.Unknown":
if os.path.isfile(_git_file_path):
return
gitfile = open(_git_file_path, 'w')
gitfile.write('rev = "%s"\n' % rev)
gitfile.close()
def get_version():
'''
Get the version of the package, including the GIT revision if this
is an actual release.
'''
version = __version__
if not release:
try:
import __git_version__
version += __git_version__.rev
except ImportError:
version += get_git_revision()
return version
| bsd-3-clause | 6,243,977,662,548,874,000 | 23.75641 | 81 | 0.539617 | false |
brian-o/CS-CourseWork | CS491/Program2/testForks.py | 1 | 2677 | ############################################################
'''
testForks.py
Written by: Brian O'Dell, Spetember 2017
A program to run each program a 500 times per thread count.
Then uses the data collected to make graphs and tables that
are useful to evaluate the programs running time.
'''
############################################################
from subprocess import *
from numba import jit
import numpy as np
import csv as csv
import pandas as pd
from pandas.plotting import table
import matplotlib.pyplot as plt
'''
Call the C program multiple times with variable arguments to gather data
The name of the executable should exist before running
'''
@jit
def doCount(name):
j = 0
while (j < 1025):
for i in range(0,501):
call([name,"-t",str(j), "-w"])
if (j == 0):
j = 1
else:
j = 2*j;
'''
Turn the data into something meaningful.
Takes all the data gets the average and standard deviation for each
number of threads. Then plots a graph based on it. Also, makes
a csv with the avg and stddev
'''
@jit
def exportData(name):
DF = pd.read_csv("data/"+name+".csv")
f = {'ExecTime':['mean','std']}
#group by the number of threads in the csv and
#apply the mean and standard deviation functions to the groups
avgDF = DF.groupby('NumThreads').agg(f)
avgTable = DF.groupby('NumThreads', as_index=False).agg(f)
#When the data csv was saved we used 0 to indicate serial execution
#this was so the rows would be in numerical order instead of Alphabetical
#Now rename index 0 to Serial to be an accurate representation
indexList = avgDF.index.tolist()
indexList[0] = 'Serial'
avgDF.index = indexList
#make the bar chart and set the axes
avgPlot = avgDF.plot(kind='bar',
title=('Run Times Using '+ name), legend='False', figsize=(15,8))
avgPlot.set_xlabel("Number of Forks")
avgPlot.set_ylabel("Run Time (seconds)")
#put the data values on top of the bars for clarity
avgPlot.legend(['mean','std deviation'])
for p in avgPlot.patches:
avgPlot.annotate((str(p.get_height())[:6]),
(p.get_x()-.01, p.get_height()), fontsize=9)
#save the files we need
plt.savefig('data/'+name+'Graph.png')
avgTable.to_csv('data/'+name+'Table.csv', index=False, encoding='utf-8')
def main():
doCount("./forkedSemaphor")
doCount("./forkedPrivateCount")
doCount("./forkedPrivateCount32")
exportData("forkedSemaphor")
exportData("forkedPrivateCount")
exportData("forkedPrivateCount32")
if __name__ == '__main__':
main()
| gpl-3.0 | 5,887,714,014,429,846,000 | 30.494118 | 82 | 0.623086 | false |
PennyDreadfulMTG/Penny-Dreadful-Tools | modo_bugs/fetcher.py | 1 | 4118 | import os
import sys
from typing import Dict, List, Optional, Tuple
from bs4 import BeautifulSoup
from bs4.element import Tag
from shared import fetch_tools, lazy
def search_scryfall(query: str) -> Tuple[int, List[str], List[str]]:
"""Returns a tuple. First member is an integer indicating how many cards match the query total,
second member is a list of card names up to the maximum that could be fetched in a timely fashion."""
if query == '':
return 0, [], []
print(f'Searching scryfall for `{query}`')
result_json = fetch_tools.fetch_json('https://api.scryfall.com/cards/search?q=' + fetch_tools.escape(query), character_encoding='utf-8')
if 'code' in result_json.keys(): # The API returned an error
if result_json['status'] == 404: # No cards found
return 0, [], []
print('Error fetching scryfall data:\n', result_json)
return 0, [], []
for warning in result_json.get('warnings', []): # scryfall-provided human-readable warnings
print(warning)
result_data = result_json['data']
result_data.sort(key=lambda x: x['legalities']['penny'])
def get_frontside(scr_card: Dict) -> str:
"""If card is transform, returns first name. Otherwise, returns name.
This is to make sure cards are later found in the database"""
# not sure how to handle meld cards
if scr_card['layout'] in ['transform', 'flip', 'modal_dfc']:
return scr_card['card_faces'][0]['name']
return scr_card['name']
result_cardnames = [get_frontside(obj) for obj in result_data]
return result_json['total_cards'], result_cardnames, result_json.get('warnings', [])
def catalog_cardnames() -> List[str]:
result_json = fetch_tools.fetch_json('https://api.scryfall.com/catalog/card-names')
names: List[str] = result_json['data']
for n in names:
if ' // ' in n:
names.extend(n.split(' // '))
return names
def update_redirect(file: str, title: str, redirect: str, **kwargs: str) -> bool:
text = '---\ntitle: {title}\nredirect_to:\n - {url}\n'.format(title=title, url=redirect)
for key, value in kwargs.items():
text += f'{key}: {value}\n'
text = text + '---\n'
fname = f'{file}.md'
if not os.path.exists(fname):
bb_jekyl = open(fname, mode='w')
bb_jekyl.write('')
bb_jekyl.close()
bb_jekyl = open(fname, mode='r')
orig = bb_jekyl.read()
bb_jekyl.close()
if orig != text:
print(f'New {file} update!')
bb_jekyl = open(fname, mode='w')
bb_jekyl.write(text)
bb_jekyl.close()
return True
if 'always-scrape' in sys.argv:
return True
return False
def find_bug_blog() -> Tuple[Optional[str], bool]:
bug_blogs = [a for a in get_article_archive() if str(a[0].string).startswith('Magic Online Bug Blog')]
if not bug_blogs:
return (None, False)
(title, link) = bug_blogs[0]
print('Found: {0} ({1})'.format(title, link))
new = update_redirect('bug_blog', title.text, link)
return (link, new)
def find_announcements() -> Tuple[str, bool]:
articles = [a for a in get_article_archive() if str(a[0].string).startswith('Magic Online Announcements')]
(title, link) = articles[0]
print('Found: {0} ({1})'.format(title, link))
bn = 'Build Notes' in fetch_tools.fetch(link)
new = update_redirect('announcements', title.text, link, has_build_notes=str(bn))
return (link, new)
def parse_article_item_extended(a: Tag) -> Tuple[Tag, str]:
title = a.find_all('h3')[0]
link = 'http://magic.wizards.com' + a.find_all('a')[0]['href']
return (title, link)
@lazy.lazy_property
def get_article_archive() -> List[Tuple[Tag, str]]:
try:
html = fetch_tools.fetch('http://magic.wizards.com/en/articles/archive/184956')
except fetch_tools.FetchException:
html = fetch_tools.fetch('http://magic.wizards.com/en/articles/archive/')
soup = BeautifulSoup(html, 'html.parser')
return [parse_article_item_extended(a) for a in soup.find_all('div', class_='article-item-extended')]
| gpl-3.0 | -1,526,794,542,128,501,000 | 41.020408 | 140 | 0.629917 | false |
heldergg/webpymail | webpymail/sabapp/models.py | 1 | 2844 | # -*- coding: utf-8 -*-
# sabapp - Simple Address Book Application
# Copyright (C) 2008 Helder Guerreiro
# This file is part of sabapp.
#
# sabapp is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# sabapp is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with sabapp. If not, see <http://www.gnu.org/licenses/>.
#
# Helder Guerreiro <[email protected]>
#
from django.contrib.auth.models import User
from django.db import models
from django.db.models import Q
from django.utils.translation import gettext_lazy as _
# Models:
ADDRESSBOOKTYPE = (
(1, _('User address book')),
(2, _('Server address book')),
(3, _('Site address book')),
)
class AddressManager(models.Manager):
def for_request(self, request):
'''Addresses available for request'''
host = request.session['host']
return super(AddressManager, self).get_queryset().filter(
Q(user__exact=request.user, imap_server__exact=host,
ab_type__exact=1) |
Q(imap_server__exact=host, ab_type__exact=2) |
Q(ab_type__exact=3))
def have_addr(self, request, addr):
address = self.for_request(request).filter(email__iexact=addr)
return bool(address)
class Address(models.Model):
user = models.ForeignKey(User, null=True)
imap_server = models.CharField(_('IMAP server'), max_length=128)
nickname = models.CharField(max_length=64, blank=True)
first_name = models.CharField(_('first name'), max_length=30, blank=True)
last_name = models.CharField(_('last name'), max_length=64, blank=True)
email = models.EmailField(_('e-mail address'))
additional_info = models.CharField(_('aditional information'),
max_length=128, blank=True)
ab_type = models.IntegerField(choices=ADDRESSBOOKTYPE)
objects = AddressManager()
class Meta:
verbose_name = _('Address')
verbose_name_plural = _('Addresses')
db_table = 'address_book'
ordering = ['first_name', 'last_name', 'email']
def full_name(self):
return ('%s %s' % (self.first_name, self.last_name)).strip()
def mail_addr(self):
name = ('%s %s' % (self.first_name, self.last_name)).strip()
if name:
return '"%s" <%s>' % (name, self.email)
else:
return self.email
def __str__(self):
return self.mail_addr()
| gpl-3.0 | -5,009,104,855,560,248,000 | 31.318182 | 77 | 0.645921 | false |
silentfuzzle/calibre | src/calibre/devices/kobo/driver.py | 1 | 147621 | #!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import division
__license__ = 'GPL v3'
__copyright__ = '2010-2012, Timothy Legge <[email protected]>, Kovid Goyal <[email protected]> and David Forrester <[email protected]>'
__docformat__ = 'restructuredtext en'
'''
Driver for Kobo ereaders. Supports all e-ink devices.
Originally developed by Timothy Legge <[email protected]>.
Extended to support Touch firmware 2.0.0 and later and newer devices by David Forrester <[email protected]>
'''
import os, time, shutil
from contextlib import closing
from calibre.devices.usbms.books import BookList
from calibre.devices.usbms.books import CollectionsBookList
from calibre.devices.kobo.books import KTCollectionsBookList
from calibre.devices.kobo.books import Book
from calibre.devices.kobo.books import ImageWrapper
from calibre.devices.mime import mime_type_ext
from calibre.devices.usbms.driver import USBMS, debug_print
from calibre import prints, fsync
from calibre.ptempfile import PersistentTemporaryFile
from calibre.constants import DEBUG
from calibre.utils.config_base import prefs
EPUB_EXT = '.epub'
KEPUB_EXT = '.kepub'
# Implementation of QtQHash for strings. This doesn't seem to be in the Python implementation.
def qhash(inputstr):
instr = b""
if isinstance(inputstr, bytes):
instr = inputstr
elif isinstance(inputstr, unicode):
instr = inputstr.encode("utf8")
else:
return -1
h = 0x00000000
for x in bytearray(instr):
h = (h << 4) + x
h ^= (h & 0xf0000000) >> 23
h &= 0x0fffffff
return h
class DummyCSSPreProcessor(object):
def __call__(self, data, add_namespace=False):
return data
class KOBO(USBMS):
name = 'Kobo Reader Device Interface'
gui_name = 'Kobo Reader'
description = _('Communicate with the Kobo Reader')
author = 'Timothy Legge and David Forrester'
version = (2, 1, 8)
dbversion = 0
fwversion = 0
supported_dbversion = 120
has_kepubs = False
supported_platforms = ['windows', 'osx', 'linux']
booklist_class = CollectionsBookList
book_class = Book
# Ordered list of supported formats
FORMATS = ['kepub', 'epub', 'pdf', 'txt', 'cbz', 'cbr']
CAN_SET_METADATA = ['collections']
VENDOR_ID = [0x2237]
BCD = [0x0110, 0x0323, 0x0326]
ORIGINAL_PRODUCT_ID = [0x4165]
WIFI_PRODUCT_ID = [0x4161, 0x4162]
PRODUCT_ID = ORIGINAL_PRODUCT_ID + WIFI_PRODUCT_ID
VENDOR_NAME = ['KOBO_INC', 'KOBO']
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = ['.KOBOEREADER', 'EREADER']
EBOOK_DIR_MAIN = ''
SUPPORTS_SUB_DIRS = True
SUPPORTS_ANNOTATIONS = True
# "kepubs" do not have an extension. The name looks like a GUID. Using an empty string seems to work.
VIRTUAL_BOOK_EXTENSIONS = frozenset(['kobo', ''])
EXTRA_CUSTOMIZATION_MESSAGE = [
_('The Kobo supports several collections including ')+
'Read, Closed, Im_Reading. ' +
_('Create tags for automatic management'),
_('Upload covers for books (newer readers)') +
':::'+_('Normally, the KOBO readers get the cover image from the'
' ebook file itself. With this option, calibre will send a '
'separate cover image to the reader, useful if you '
'have modified the cover.'),
_('Upload Black and White Covers'),
_('Show expired books') +
':::'+_('A bug in an earlier version left non kepubs book records'
' in the database. With this option Calibre will show the '
'expired records and allow you to delete them with '
'the new delete logic.'),
_('Show Previews') +
':::'+_('Kobo previews are included on the Touch and some other versions'
' by default they are no longer displayed as there is no good reason to '
'see them. Enable if you wish to see/delete them.'),
_('Show Recommendations') +
':::'+_('Kobo now shows recommendations on the device. In some cases these have '
'files but in other cases they are just pointers to the web site to buy. '
'Enable if you wish to see/delete them.'),
_('Attempt to support newer firmware') +
':::'+_('Kobo routinely updates the firmware and the '
'database version. With this option calibre will attempt '
'to perform full read-write functionality - Here be Dragons!! '
'Enable only if you are comfortable with restoring your kobo '
'to factory defaults and testing software'),
]
EXTRA_CUSTOMIZATION_DEFAULT = [
', '.join(['tags']),
True,
True,
True,
False,
False,
False
]
OPT_COLLECTIONS = 0
OPT_UPLOAD_COVERS = 1
OPT_UPLOAD_GRAYSCALE_COVERS = 2
OPT_SHOW_EXPIRED_BOOK_RECORDS = 3
OPT_SHOW_PREVIEWS = 4
OPT_SHOW_RECOMMENDATIONS = 5
OPT_SUPPORT_NEWER_FIRMWARE = 6
def initialize(self):
USBMS.initialize(self)
self.dbversion = 7
def device_database_path(self):
return self.normalize_path(self._main_prefix + '.kobo/KoboReader.sqlite')
def books(self, oncard=None, end_session=True):
from calibre.ebooks.metadata.meta import path_to_ext
dummy_bl = BookList(None, None, None)
if oncard == 'carda' and not self._card_a_prefix:
self.report_progress(1.0, _('Getting list of books on device...'))
return dummy_bl
elif oncard == 'cardb' and not self._card_b_prefix:
self.report_progress(1.0, _('Getting list of books on device...'))
return dummy_bl
elif oncard and oncard != 'carda' and oncard != 'cardb':
self.report_progress(1.0, _('Getting list of books on device...'))
return dummy_bl
prefix = self._card_a_prefix if oncard == 'carda' else \
self._card_b_prefix if oncard == 'cardb' \
else self._main_prefix
# Determine the firmware version
try:
with open(self.normalize_path(self._main_prefix + '.kobo/version'),
'rb') as f:
self.fwversion = f.readline().split(',')[2]
except:
self.fwversion = 'unknown'
if self.fwversion != '1.0' and self.fwversion != '1.4':
self.has_kepubs = True
debug_print('Version of driver: ', self.version, 'Has kepubs:', self.has_kepubs)
debug_print('Version of firmware: ', self.fwversion, 'Has kepubs:', self.has_kepubs)
self.booklist_class.rebuild_collections = self.rebuild_collections
# get the metadata cache
bl = self.booklist_class(oncard, prefix, self.settings)
need_sync = self.parse_metadata_cache(bl, prefix, self.METADATA_CACHE)
# make a dict cache of paths so the lookup in the loop below is faster.
bl_cache = {}
for idx,b in enumerate(bl):
bl_cache[b.lpath] = idx
def update_booklist(prefix, path, title, authors, mime, date, ContentType, ImageID, readstatus, MimeType, expired, favouritesindex, accessibility):
changed = False
try:
lpath = path.partition(self.normalize_path(prefix))[2]
if lpath.startswith(os.sep):
lpath = lpath[len(os.sep):]
lpath = lpath.replace('\\', '/')
# debug_print("LPATH: ", lpath, " - Title: " , title)
playlist_map = {}
if lpath not in playlist_map:
playlist_map[lpath] = []
if readstatus == 1:
playlist_map[lpath].append('Im_Reading')
elif readstatus == 2:
playlist_map[lpath].append('Read')
elif readstatus == 3:
playlist_map[lpath].append('Closed')
# Related to a bug in the Kobo firmware that leaves an expired row for deleted books
# this shows an expired Collection so the user can decide to delete the book
if expired == 3:
playlist_map[lpath].append('Expired')
# A SHORTLIST is supported on the touch but the data field is there on most earlier models
if favouritesindex == 1:
playlist_map[lpath].append('Shortlist')
# Label Previews
if accessibility == 6:
playlist_map[lpath].append('Preview')
elif accessibility == 4:
playlist_map[lpath].append('Recommendation')
path = self.normalize_path(path)
# print "Normalized FileName: " + path
idx = bl_cache.get(lpath, None)
if idx is not None:
bl_cache[lpath] = None
if ImageID is not None:
imagename = self.normalize_path(self._main_prefix + '.kobo/images/' + ImageID + ' - NickelBookCover.parsed')
if not os.path.exists(imagename):
# Try the Touch version if the image does not exist
imagename = self.normalize_path(self._main_prefix + '.kobo/images/' + ImageID + ' - N3_LIBRARY_FULL.parsed')
# print "Image name Normalized: " + imagename
if not os.path.exists(imagename):
debug_print("Strange - The image name does not exist - title: ", title)
if imagename is not None:
bl[idx].thumbnail = ImageWrapper(imagename)
if (ContentType != '6' and MimeType != 'Shortcover'):
if os.path.exists(self.normalize_path(os.path.join(prefix, lpath))):
if self.update_metadata_item(bl[idx]):
# print 'update_metadata_item returned true'
changed = True
else:
debug_print(" Strange: The file: ", prefix, lpath, " does mot exist!")
if lpath in playlist_map and \
playlist_map[lpath] not in bl[idx].device_collections:
bl[idx].device_collections = playlist_map.get(lpath,[])
else:
if ContentType == '6' and MimeType == 'Shortcover':
book = Book(prefix, lpath, title, authors, mime, date, ContentType, ImageID, size=1048576)
else:
try:
if os.path.exists(self.normalize_path(os.path.join(prefix, lpath))):
book = self.book_from_path(prefix, lpath, title, authors, mime, date, ContentType, ImageID)
else:
debug_print(" Strange: The file: ", prefix, lpath, " does mot exist!")
title = "FILE MISSING: " + title
book = Book(prefix, lpath, title, authors, mime, date, ContentType, ImageID, size=1048576)
except:
debug_print("prefix: ", prefix, "lpath: ", lpath, "title: ", title, "authors: ", authors,
"mime: ", mime, "date: ", date, "ContentType: ", ContentType, "ImageID: ", ImageID)
raise
# print 'Update booklist'
book.device_collections = playlist_map.get(lpath,[]) # if lpath in playlist_map else []
if bl.add_book(book, replace_metadata=False):
changed = True
except: # Probably a path encoding error
import traceback
traceback.print_exc()
return changed
import sqlite3 as sqlite
with closing(sqlite.connect(
self.normalize_path(self._main_prefix +
'.kobo/KoboReader.sqlite'))) as connection:
# return bytestrings if the content cannot the decoded as unicode
connection.text_factory = lambda x: unicode(x, "utf-8", "ignore")
cursor = connection.cursor()
cursor.execute('select version from dbversion')
result = cursor.fetchone()
self.dbversion = result[0]
debug_print("Database Version: ", self.dbversion)
opts = self.settings()
if self.dbversion >= 33:
query= ('select Title, Attribution, DateCreated, ContentID, MimeType, ContentType, '
'ImageID, ReadStatus, ___ExpirationStatus, FavouritesIndex, Accessibility, IsDownloaded from content where '
'BookID is Null %(previews)s %(recomendations)s and not ((___ExpirationStatus=3 or ___ExpirationStatus is Null) %(expiry)s') % dict(expiry=' and ContentType = 6)'
if opts.extra_customization[self.OPT_SHOW_EXPIRED_BOOK_RECORDS] else ')',
previews=' and Accessibility <> 6'
if opts.extra_customization[self.OPT_SHOW_PREVIEWS] == False else '',
recomendations=' and IsDownloaded in (\'true\', 1)'
if opts.extra_customization[self.OPT_SHOW_RECOMMENDATIONS] == False else '')
elif self.dbversion >= 16 and self.dbversion < 33:
query= ('select Title, Attribution, DateCreated, ContentID, MimeType, ContentType, '
'ImageID, ReadStatus, ___ExpirationStatus, FavouritesIndex, Accessibility, "1" as IsDownloaded from content where '
'BookID is Null and not ((___ExpirationStatus=3 or ___ExpirationStatus is Null) %(expiry)s') % dict(expiry=' and ContentType = 6)'
if opts.extra_customization[self.OPT_SHOW_EXPIRED_BOOK_RECORDS] else ')')
elif self.dbversion < 16 and self.dbversion >= 14:
query= ('select Title, Attribution, DateCreated, ContentID, MimeType, ContentType, '
'ImageID, ReadStatus, ___ExpirationStatus, FavouritesIndex, "-1" as Accessibility, "1" as IsDownloaded from content where '
'BookID is Null and not ((___ExpirationStatus=3 or ___ExpirationStatus is Null) %(expiry)s') % dict(expiry=' and ContentType = 6)'
if opts.extra_customization[self.OPT_SHOW_EXPIRED_BOOK_RECORDS] else ')')
elif self.dbversion < 14 and self.dbversion >= 8:
query= ('select Title, Attribution, DateCreated, ContentID, MimeType, ContentType, '
'ImageID, ReadStatus, ___ExpirationStatus, "-1" as FavouritesIndex, "-1" as Accessibility, "1" as IsDownloaded from content where '
'BookID is Null and not ((___ExpirationStatus=3 or ___ExpirationStatus is Null) %(expiry)s') % dict(expiry=' and ContentType = 6)'
if opts.extra_customization[self.OPT_SHOW_EXPIRED_BOOK_RECORDS] else ')')
else:
query= 'select Title, Attribution, DateCreated, ContentID, MimeType, ContentType, ' \
'ImageID, ReadStatus, "-1" as ___ExpirationStatus, "-1" as FavouritesIndex, "-1" as Accessibility, "1" as IsDownloaded from content where BookID is Null'
try:
cursor.execute(query)
except Exception as e:
err = str(e)
if not ('___ExpirationStatus' in err or 'FavouritesIndex' in err or
'Accessibility' in err or 'IsDownloaded' in err):
raise
query= ('select Title, Attribution, DateCreated, ContentID, MimeType, ContentType, '
'ImageID, ReadStatus, "-1" as ___ExpirationStatus, "-1" as '
'FavouritesIndex, "-1" as Accessibility from content where '
'BookID is Null')
cursor.execute(query)
changed = False
for i, row in enumerate(cursor):
# self.report_progress((i+1) / float(numrows), _('Getting list of books on device...'))
if not hasattr(row[3], 'startswith') or row[3].startswith("file:///usr/local/Kobo/help/"):
# These are internal to the Kobo device and do not exist
continue
path = self.path_from_contentid(row[3], row[5], row[4], oncard)
mime = mime_type_ext(path_to_ext(path)) if path.find('kepub') == -1 else 'application/epub+zip'
# debug_print("mime:", mime)
if oncard != 'carda' and oncard != 'cardb' and not row[3].startswith("file:///mnt/sd/"):
changed = update_booklist(self._main_prefix, path, row[0], row[1], mime, row[2], row[5], row[6], row[7], row[4], row[8], row[9], row[10])
# print "shortbook: " + path
elif oncard == 'carda' and row[3].startswith("file:///mnt/sd/"):
changed = update_booklist(self._card_a_prefix, path, row[0], row[1], mime, row[2], row[5], row[6], row[7], row[4], row[8], row[9], row[10])
if changed:
need_sync = True
cursor.close()
# Remove books that are no longer in the filesystem. Cache contains
# indices into the booklist if book not in filesystem, None otherwise
# Do the operation in reverse order so indices remain valid
for idx in sorted(bl_cache.itervalues(), reverse=True):
if idx is not None:
need_sync = True
del bl[idx]
# print "count found in cache: %d, count of files in metadata: %d, need_sync: %s" % \
# (len(bl_cache), len(bl), need_sync)
if need_sync: # self.count_found_in_bl != len(bl) or need_sync:
if oncard == 'cardb':
self.sync_booklists((None, None, bl))
elif oncard == 'carda':
self.sync_booklists((None, bl, None))
else:
self.sync_booklists((bl, None, None))
self.report_progress(1.0, _('Getting list of books on device...'))
return bl
def filename_callback(self, path, mi):
# debug_print("Kobo:filename_callback:Path - {0}".format(path))
idx = path.rfind('.')
ext = path[idx:]
if ext == KEPUB_EXT:
path = path + EPUB_EXT
# debug_print("Kobo:filename_callback:New path - {0}".format(path))
return path
def delete_via_sql(self, ContentID, ContentType):
# Delete Order:
# 1) shortcover_page
# 2) volume_shorcover
# 2) content
import sqlite3 as sqlite
debug_print('delete_via_sql: ContentID: ', ContentID, 'ContentType: ', ContentType)
with closing(sqlite.connect(self.normalize_path(self._main_prefix +
'.kobo/KoboReader.sqlite'))) as connection:
# return bytestrings if the content cannot the decoded as unicode
connection.text_factory = lambda x: unicode(x, "utf-8", "ignore")
cursor = connection.cursor()
t = (ContentID,)
cursor.execute('select ImageID from content where ContentID = ?', t)
ImageID = None
for row in cursor:
# First get the ImageID to delete the images
ImageID = row[0]
cursor.close()
cursor = connection.cursor()
if ContentType == 6 and self.dbversion < 8:
# Delete the shortcover_pages first
cursor.execute('delete from shortcover_page where shortcoverid in (select ContentID from content where BookID = ?)', t)
# Delete the volume_shortcovers second
cursor.execute('delete from volume_shortcovers where volumeid = ?', t)
# Delete the rows from content_keys
if self.dbversion >= 8:
cursor.execute('delete from content_keys where volumeid = ?', t)
# Delete the chapters associated with the book next
t = (ContentID,)
# Kobo does not delete the Book row (ie the row where the BookID is Null)
# The next server sync should remove the row
cursor.execute('delete from content where BookID = ?', t)
if ContentType == 6:
try:
cursor.execute('update content set ReadStatus=0, FirstTimeReading = \'true\', ___PercentRead=0, ___ExpirationStatus=3 '
'where BookID is Null and ContentID =?',t)
except Exception as e:
if 'no such column' not in str(e):
raise
try:
cursor.execute('update content set ReadStatus=0, FirstTimeReading = \'true\', ___PercentRead=0 '
'where BookID is Null and ContentID =?',t)
except Exception as e:
if 'no such column' not in str(e):
raise
cursor.execute('update content set ReadStatus=0, FirstTimeReading = \'true\' '
'where BookID is Null and ContentID =?',t)
else:
cursor.execute('delete from content where BookID is Null and ContentID =?',t)
connection.commit()
cursor.close()
if ImageID == None:
print "Error condition ImageID was not found"
print "You likely tried to delete a book that the kobo has not yet added to the database"
# If all this succeeds we need to delete the images files via the ImageID
return ImageID
def delete_images(self, ImageID, book_path):
if ImageID != None:
path_prefix = '.kobo/images/'
path = self._main_prefix + path_prefix + ImageID
file_endings = (' - iPhoneThumbnail.parsed', ' - bbMediumGridList.parsed', ' - NickelBookCover.parsed', ' - N3_LIBRARY_FULL.parsed',
' - N3_LIBRARY_GRID.parsed', ' - N3_LIBRARY_LIST.parsed', ' - N3_SOCIAL_CURRENTREAD.parsed', ' - N3_FULL.parsed',)
for ending in file_endings:
fpath = path + ending
fpath = self.normalize_path(fpath)
if os.path.exists(fpath):
# print 'Image File Exists: ' + fpath
os.unlink(fpath)
def delete_books(self, paths, end_session=True):
if self.modify_database_check("delete_books") == False:
return
for i, path in enumerate(paths):
self.report_progress((i+1) / float(len(paths)), _('Removing books from device...'))
path = self.normalize_path(path)
# print "Delete file normalized path: " + path
extension = os.path.splitext(path)[1]
ContentType = self.get_content_type_from_extension(extension) if extension != '' else self.get_content_type_from_path(path)
ContentID = self.contentid_from_path(path, ContentType)
ImageID = self.delete_via_sql(ContentID, ContentType)
# print " We would now delete the Images for" + ImageID
self.delete_images(ImageID, path)
if os.path.exists(path):
# Delete the ebook
# print "Delete the ebook: " + path
os.unlink(path)
filepath = os.path.splitext(path)[0]
for ext in self.DELETE_EXTS:
if os.path.exists(filepath + ext):
# print "Filename: " + filename
os.unlink(filepath + ext)
if os.path.exists(path + ext):
# print "Filename: " + filename
os.unlink(path + ext)
if self.SUPPORTS_SUB_DIRS:
try:
# print "removed"
os.removedirs(os.path.dirname(path))
except:
pass
self.report_progress(1.0, _('Removing books from device...'))
def remove_books_from_metadata(self, paths, booklists):
if self.modify_database_check("remove_books_from_metatata") == False:
return
for i, path in enumerate(paths):
self.report_progress((i+1) / float(len(paths)), _('Removing books from device metadata listing...'))
for bl in booklists:
for book in bl:
# print "Book Path: " + book.path
if path.endswith(book.path):
# print " Remove: " + book.path
bl.remove_book(book)
self.report_progress(1.0, _('Removing books from device metadata listing...'))
def add_books_to_metadata(self, locations, metadata, booklists):
metadata = iter(metadata)
for i, location in enumerate(locations):
self.report_progress((i+1) / float(len(locations)), _('Adding books to device metadata listing...'))
info = metadata.next()
blist = 2 if location[1] == 'cardb' else 1 if location[1] == 'carda' else 0
# Extract the correct prefix from the pathname. To do this correctly,
# we must ensure that both the prefix and the path are normalized
# so that the comparison will work. Book's __init__ will fix up
# lpath, so we don't need to worry about that here.
path = self.normalize_path(location[0])
if self._main_prefix:
prefix = self._main_prefix if \
path.startswith(self.normalize_path(self._main_prefix)) else None
if not prefix and self._card_a_prefix:
prefix = self._card_a_prefix if \
path.startswith(self.normalize_path(self._card_a_prefix)) else None
if not prefix and self._card_b_prefix:
prefix = self._card_b_prefix if \
path.startswith(self.normalize_path(self._card_b_prefix)) else None
if prefix is None:
prints('in add_books_to_metadata. Prefix is None!', path,
self._main_prefix)
continue
# print "Add book to metatdata: "
# print "prefix: " + prefix
lpath = path.partition(prefix)[2]
if lpath.startswith('/') or lpath.startswith('\\'):
lpath = lpath[1:]
# print "path: " + lpath
book = self.book_class(prefix, lpath, other=info)
if book.size is None or book.size == 0:
book.size = os.stat(self.normalize_path(path)).st_size
b = booklists[blist].add_book(book, replace_metadata=True)
if b:
b._new_book = True
self.report_progress(1.0, _('Adding books to device metadata listing...'))
def contentid_from_path(self, path, ContentType):
if ContentType == 6:
extension = os.path.splitext(path)[1]
if extension == '.kobo':
ContentID = os.path.splitext(path)[0]
# Remove the prefix on the file. it could be either
ContentID = ContentID.replace(self._main_prefix, '')
else:
ContentID = path
ContentID = ContentID.replace(self._main_prefix + self.normalize_path('.kobo/kepub/'), '')
if self._card_a_prefix is not None:
ContentID = ContentID.replace(self._card_a_prefix, '')
elif ContentType == 999: # HTML Files
ContentID = path
ContentID = ContentID.replace(self._main_prefix, "/mnt/onboard/")
if self._card_a_prefix is not None:
ContentID = ContentID.replace(self._card_a_prefix, "/mnt/sd/")
else: # ContentType = 16
ContentID = path
ContentID = ContentID.replace(self._main_prefix, "file:///mnt/onboard/")
if self._card_a_prefix is not None:
ContentID = ContentID.replace(self._card_a_prefix, "file:///mnt/sd/")
ContentID = ContentID.replace("\\", '/')
return ContentID
def get_content_type_from_path(self, path):
# Strictly speaking the ContentType could be 6 or 10
# however newspapers have the same storage format
if path.find('kepub') >= 0:
ContentType = 6
return ContentType
def get_content_type_from_extension(self, extension):
if extension == '.kobo':
# Kobo books do not have book files. They do have some images though
# print "kobo book"
ContentType = 6
elif extension == '.pdf' or extension == '.epub':
# print "ePub or pdf"
ContentType = 16
elif extension == '.rtf' or extension == '.txt' or extension == '.htm' or extension == '.html':
# print "txt"
if self.fwversion == '1.0' or self.fwversion == '1.4' or self.fwversion == '1.7.4':
ContentType = 999
else:
ContentType = 901
else: # if extension == '.html' or extension == '.txt':
ContentType = 901 # Yet another hack: to get around Kobo changing how ContentID is stored
return ContentType
def path_from_contentid(self, ContentID, ContentType, MimeType, oncard):
path = ContentID
if oncard == 'cardb':
print 'path from_contentid cardb'
elif oncard == 'carda':
path = path.replace("file:///mnt/sd/", self._card_a_prefix)
# print "SD Card: " + path
else:
if ContentType == "6" and MimeType == 'Shortcover':
# This is a hack as the kobo files do not exist
# but the path is required to make a unique id
# for calibre's reference
path = self._main_prefix + path + '.kobo'
# print "Path: " + path
elif (ContentType == "6" or ContentType == "10") and MimeType == 'application/x-kobo-epub+zip':
if path.startswith("file:///mnt/onboard/"):
path = self._main_prefix + path.replace("file:///mnt/onboard/", '')
else:
path = self._main_prefix + '.kobo/kepub/' + path
# print "Internal: " + path
else:
# if path.startswith("file:///mnt/onboard/"):
path = path.replace("file:///mnt/onboard/", self._main_prefix)
path = path.replace("/mnt/onboard/", self._main_prefix)
# print "Internal: " + path
return path
def modify_database_check(self, function):
# Checks to see whether the database version is supported
# and whether the user has chosen to support the firmware version
if self.dbversion > self.supported_dbversion:
# Unsupported database
opts = self.settings()
if not opts.extra_customization[self.OPT_SUPPORT_NEWER_FIRMWARE]:
debug_print('The database has been upgraded past supported version')
self.report_progress(1.0, _('Removing books from device...'))
from calibre.devices.errors import UserFeedback
raise UserFeedback(_("Kobo database version unsupported - See details"),
_('Your Kobo is running an updated firmware/database version.'
' As calibre does not know about this updated firmware,'
' database editing is disabled, to prevent corruption.'
' You can still send books to your Kobo with calibre, '
' but deleting books and managing collections is disabled.'
' If you are willing to experiment and know how to reset'
' your Kobo to Factory defaults, you can override this'
' check by right clicking the device icon in calibre and'
' selecting "Configure this device" and then the '
' "Attempt to support newer firmware" option.'
' Doing so may require you to perform a factory reset of'
' your Kobo.') + ((
'\nDevice database version: %s.'
'\nDevice firmware version: %s') % (self.dbversion, self.fwversion))
, UserFeedback.WARN)
return False
else:
# The user chose to edit the database anyway
return True
else:
# Supported database version
return True
def get_file(self, path, *args, **kwargs):
tpath = self.munge_path(path)
extension = os.path.splitext(tpath)[1]
if extension == '.kobo':
from calibre.devices.errors import UserFeedback
raise UserFeedback(_("Not Implemented"),
_('".kobo" files do not exist on the device as books; '
'instead they are rows in the sqlite database. '
'Currently they cannot be exported or viewed.'),
UserFeedback.WARN)
return USBMS.get_file(self, path, *args, **kwargs)
@classmethod
def book_from_path(cls, prefix, lpath, title, authors, mime, date, ContentType, ImageID):
# debug_print("KOBO:book_from_path - title=%s"%title)
from calibre.ebooks.metadata import MetaInformation
if cls.settings().read_metadata or cls.MUST_READ_METADATA:
mi = cls.metadata_from_path(cls.normalize_path(os.path.join(prefix, lpath)))
else:
from calibre.ebooks.metadata.meta import metadata_from_filename
mi = metadata_from_filename(cls.normalize_path(os.path.basename(lpath)),
cls.build_template_regexp())
if mi is None:
mi = MetaInformation(os.path.splitext(os.path.basename(lpath))[0],
[_('Unknown')])
size = os.stat(cls.normalize_path(os.path.join(prefix, lpath))).st_size
book = cls.book_class(prefix, lpath, title, authors, mime, date, ContentType, ImageID, size=size, other=mi)
return book
def get_device_paths(self):
paths = {}
for prefix, path, source_id in [
('main', 'metadata.calibre', 0),
('card_a', 'metadata.calibre', 1),
('card_b', 'metadata.calibre', 2)
]:
prefix = getattr(self, '_%s_prefix'%prefix)
if prefix is not None and os.path.exists(prefix):
paths[source_id] = os.path.join(prefix, *(path.split('/')))
return paths
def reset_readstatus(self, connection, oncard):
cursor = connection.cursor()
# Reset Im_Reading list in the database
if oncard == 'carda':
query= 'update content set ReadStatus=0, FirstTimeReading = \'true\' where BookID is Null and ContentID like \'file:///mnt/sd/%\''
elif oncard != 'carda' and oncard != 'cardb':
query= 'update content set ReadStatus=0, FirstTimeReading = \'true\' where BookID is Null and ContentID not like \'file:///mnt/sd/%\''
try:
cursor.execute(query)
except:
debug_print(' Database Exception: Unable to reset ReadStatus list')
raise
else:
connection.commit()
# debug_print(' Commit: Reset ReadStatus list')
cursor.close()
def set_readstatus(self, connection, ContentID, ReadStatus):
cursor = connection.cursor()
t = (ContentID,)
cursor.execute('select DateLastRead from Content where BookID is Null and ContentID = ?', t)
result = cursor.fetchone()
if result is None:
datelastread = '1970-01-01T00:00:00'
else:
datelastread = result[0] if result[0] is not None else '1970-01-01T00:00:00'
t = (ReadStatus,datelastread,ContentID,)
try:
cursor.execute('update content set ReadStatus=?,FirstTimeReading=\'false\',DateLastRead=? where BookID is Null and ContentID = ?', t)
except:
debug_print(' Database Exception: Unable update ReadStatus')
raise
else:
connection.commit()
# debug_print(' Commit: Setting ReadStatus List')
cursor.close()
def reset_favouritesindex(self, connection, oncard):
# Reset FavouritesIndex list in the database
if oncard == 'carda':
query= 'update content set FavouritesIndex=-1 where BookID is Null and ContentID like \'file:///mnt/sd/%\''
elif oncard != 'carda' and oncard != 'cardb':
query= 'update content set FavouritesIndex=-1 where BookID is Null and ContentID not like \'file:///mnt/sd/%\''
cursor = connection.cursor()
try:
cursor.execute(query)
except Exception as e:
debug_print(' Database Exception: Unable to reset Shortlist list')
if 'no such column' not in str(e):
raise
else:
connection.commit()
# debug_print(' Commit: Reset FavouritesIndex list')
def set_favouritesindex(self, connection, ContentID):
cursor = connection.cursor()
t = (ContentID,)
try:
cursor.execute('update content set FavouritesIndex=1 where BookID is Null and ContentID = ?', t)
except Exception as e:
debug_print(' Database Exception: Unable set book as Shortlist')
if 'no such column' not in str(e):
raise
else:
connection.commit()
# debug_print(' Commit: Set FavouritesIndex')
def update_device_database_collections(self, booklists, collections_attributes, oncard):
debug_print("Kobo:update_device_database_collections - oncard='%s'"%oncard)
if self.modify_database_check("update_device_database_collections") == False:
return
# Only process categories in this list
supportedcategories = {
"Im_Reading":1,
"Read":2,
"Closed":3,
"Shortlist":4,
# "Preview":99, # Unsupported as we don't want to change it
}
# Define lists for the ReadStatus
readstatuslist = {
"Im_Reading":1,
"Read":2,
"Closed":3,
}
accessibilitylist = {
"Preview":6,
"Recommendation":4,
}
# debug_print('Starting update_device_database_collections', collections_attributes)
# Force collections_attributes to be 'tags' as no other is currently supported
# debug_print('KOBO: overriding the provided collections_attributes:', collections_attributes)
collections_attributes = ['tags']
collections = booklists.get_collections(collections_attributes)
# debug_print('Kobo:update_device_database_collections - Collections:', collections)
# Create a connection to the sqlite database
# Needs to be outside books collection as in the case of removing
# the last book from the collection the list of books is empty
# and the removal of the last book would not occur
import sqlite3 as sqlite
with closing(sqlite.connect(self.normalize_path(self._main_prefix +
'.kobo/KoboReader.sqlite'))) as connection:
# return bytestrings if the content cannot the decoded as unicode
connection.text_factory = lambda x: unicode(x, "utf-8", "ignore")
if collections:
# Need to reset the collections outside the particular loops
# otherwise the last item will not be removed
self.reset_readstatus(connection, oncard)
if self.dbversion >= 14:
self.reset_favouritesindex(connection, oncard)
# Process any collections that exist
for category, books in collections.items():
if category in supportedcategories:
# debug_print("Category: ", category, " id = ", readstatuslist.get(category))
for book in books:
# debug_print(' Title:', book.title, 'category: ', category)
if category not in book.device_collections:
book.device_collections.append(category)
extension = os.path.splitext(book.path)[1]
ContentType = self.get_content_type_from_extension(extension) if extension != '' else self.get_content_type_from_path(book.path)
ContentID = self.contentid_from_path(book.path, ContentType)
if category in readstatuslist.keys():
# Manage ReadStatus
self.set_readstatus(connection, ContentID, readstatuslist.get(category))
elif category == 'Shortlist' and self.dbversion >= 14:
# Manage FavouritesIndex/Shortlist
self.set_favouritesindex(connection, ContentID)
elif category in accessibilitylist.keys():
# Do not manage the Accessibility List
pass
else: # No collections
# Since no collections exist the ReadStatus needs to be reset to 0 (Unread)
debug_print("No Collections - reseting ReadStatus")
self.reset_readstatus(connection, oncard)
if self.dbversion >= 14:
debug_print("No Collections - reseting FavouritesIndex")
self.reset_favouritesindex(connection, oncard)
# debug_print('Finished update_device_database_collections', collections_attributes)
def get_collections_attributes(self):
collections = []
opts = self.settings()
if opts.extra_customization and len(opts.extra_customization[self.OPT_COLLECTIONS]) > 0:
collections = [x.lower().strip() for x in opts.extra_customization[self.OPT_COLLECTIONS].split(',')]
return collections
def sync_booklists(self, booklists, end_session=True):
debug_print('KOBO:sync_booklists - start')
paths = self.get_device_paths()
blists = {}
for i in paths:
try:
if booklists[i] is not None:
#debug_print('Booklist: ', i)
blists[i] = booklists[i]
except IndexError:
pass
collections = self.get_collections_attributes()
#debug_print('KOBO: collection fields:', collections)
for i, blist in blists.items():
if i == 0:
oncard = 'main'
else:
oncard = 'carda'
self.update_device_database_collections(blist, collections, oncard)
USBMS.sync_booklists(self, booklists, end_session=end_session)
debug_print('KOBO:sync_booklists - end')
def rebuild_collections(self, booklist, oncard):
collections_attributes = []
self.update_device_database_collections(booklist, collections_attributes, oncard)
def upload_cover(self, path, filename, metadata, filepath):
'''
Upload book cover to the device. Default implementation does nothing.
:param path: The full path to the directory where the associated book is located.
:param filename: The name of the book file without the extension.
:param metadata: metadata belonging to the book. Use metadata.thumbnail
for cover
:param filepath: The full path to the ebook file
'''
opts = self.settings()
if not opts.extra_customization[self.OPT_UPLOAD_COVERS]:
# Building thumbnails disabled
debug_print('KOBO: not uploading cover')
return
if not opts.extra_customization[self.OPT_UPLOAD_GRAYSCALE_COVERS]:
uploadgrayscale = False
else:
uploadgrayscale = True
debug_print('KOBO: uploading cover')
try:
self._upload_cover(path, filename, metadata, filepath, uploadgrayscale)
except:
debug_print('FAILED to upload cover', filepath)
def _upload_cover(self, path, filename, metadata, filepath, uploadgrayscale):
from calibre.utils.magick.draw import save_cover_data_to
if metadata.cover:
cover = self.normalize_path(metadata.cover.replace('/', os.sep))
if os.path.exists(cover):
# Get ContentID for Selected Book
extension = os.path.splitext(filepath)[1]
ContentType = self.get_content_type_from_extension(extension) if extension != '' else self.get_content_type_from_path(filepath)
ContentID = self.contentid_from_path(filepath, ContentType)
import sqlite3 as sqlite
with closing(sqlite.connect(self.normalize_path(self._main_prefix +
'.kobo/KoboReader.sqlite'))) as connection:
# return bytestrings if the content cannot the decoded as unicode
connection.text_factory = lambda x: unicode(x, "utf-8", "ignore")
cursor = connection.cursor()
t = (ContentID,)
cursor.execute('select ImageId from Content where BookID is Null and ContentID = ?', t)
result = cursor.fetchone()
if result is None:
debug_print("No rows exist in the database - cannot upload")
return
else:
ImageID = result[0]
# debug_print("ImageId: ", result[0])
cursor.close()
if ImageID != None:
path_prefix = '.kobo/images/'
path = self._main_prefix + path_prefix + ImageID
file_endings = {' - iPhoneThumbnail.parsed':(103,150),
' - bbMediumGridList.parsed':(93,135),
' - NickelBookCover.parsed':(500,725),
' - N3_LIBRARY_FULL.parsed':(355,530),
' - N3_LIBRARY_GRID.parsed':(149,233),
' - N3_LIBRARY_LIST.parsed':(60,90),
' - N3_FULL.parsed':(600,800),
' - N3_SOCIAL_CURRENTREAD.parsed':(120,186)}
for ending, resize in file_endings.items():
fpath = path + ending
fpath = self.normalize_path(fpath.replace('/', os.sep))
if os.path.exists(fpath):
with open(cover, 'rb') as f:
data = f.read()
# Return the data resized and in Grayscale if
# required
data = save_cover_data_to(data, 'dummy.jpg',
grayscale=uploadgrayscale,
resize_to=resize, return_data=True)
with open(fpath, 'wb') as f:
f.write(data)
fsync(f)
else:
debug_print("ImageID could not be retreived from the database")
def prepare_addable_books(self, paths):
'''
The Kobo supports an encrypted epub refered to as a kepub
Unfortunately Kobo decided to put the files on the device
with no file extension. I just hope that decision causes
them as much grief as it does me :-)
This has to make a temporary copy of the book files with a
epub extension to allow Calibre's normal processing to
deal with the file appropriately
'''
for idx, path in enumerate(paths):
if path.find('kepub') >= 0:
with closing(open(path, 'rb')) as r:
tf = PersistentTemporaryFile(suffix='.epub')
shutil.copyfileobj(r, tf)
# tf.write(r.read())
paths[idx] = tf.name
return paths
def create_annotations_path(self, mdata, device_path=None):
if device_path:
return device_path
return USBMS.create_annotations_path(self, mdata)
def get_annotations(self, path_map):
from calibre.devices.kobo.bookmark import Bookmark
EPUB_FORMATS = [u'epub']
epub_formats = set(EPUB_FORMATS)
def get_storage():
storage = []
if self._main_prefix:
storage.append(os.path.join(self._main_prefix, self.EBOOK_DIR_MAIN))
if self._card_a_prefix:
storage.append(os.path.join(self._card_a_prefix, self.EBOOK_DIR_CARD_A))
if self._card_b_prefix:
storage.append(os.path.join(self._card_b_prefix, self.EBOOK_DIR_CARD_B))
return storage
def resolve_bookmark_paths(storage, path_map):
pop_list = []
book_ext = {}
for id in path_map:
file_fmts = set()
for fmt in path_map[id]['fmts']:
file_fmts.add(fmt)
bookmark_extension = None
if file_fmts.intersection(epub_formats):
book_extension = list(file_fmts.intersection(epub_formats))[0]
bookmark_extension = 'epub'
if bookmark_extension:
for vol in storage:
bkmk_path = path_map[id]['path']
bkmk_path = bkmk_path
if os.path.exists(bkmk_path):
path_map[id] = bkmk_path
book_ext[id] = book_extension
break
else:
pop_list.append(id)
else:
pop_list.append(id)
# Remove non-existent bookmark templates
for id in pop_list:
path_map.pop(id)
return path_map, book_ext
storage = get_storage()
path_map, book_ext = resolve_bookmark_paths(storage, path_map)
bookmarked_books = {}
for id in path_map:
extension = os.path.splitext(path_map[id])[1]
ContentType = self.get_content_type_from_extension(extension) if extension != '' else self.get_content_type_from_path(path_map[id])
ContentID = self.contentid_from_path(path_map[id], ContentType)
debug_print("get_annotations - ContentID: ", ContentID, "ContentType: ", ContentType)
bookmark_ext = extension
db_path = self.normalize_path(self._main_prefix + '.kobo/KoboReader.sqlite')
myBookmark = Bookmark(db_path, ContentID, path_map[id], id, book_ext[id], bookmark_ext)
bookmarked_books[id] = self.UserAnnotation(type='kobo_bookmark', value=myBookmark)
# This returns as job.result in gui2.ui.annotations_fetched(self,job)
return bookmarked_books
def generate_annotation_html(self, bookmark):
import calendar
from calibre.ebooks.BeautifulSoup import BeautifulSoup, Tag, NavigableString
# Returns <div class="user_annotations"> ... </div>
#last_read_location = bookmark.last_read_location
#timestamp = bookmark.timestamp
percent_read = bookmark.percent_read
debug_print("Date: ", bookmark.last_read)
if bookmark.last_read is not None:
try:
last_read = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(calendar.timegm(time.strptime(bookmark.last_read, "%Y-%m-%dT%H:%M:%S"))))
except:
try:
last_read = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(calendar.timegm(time.strptime(bookmark.last_read, "%Y-%m-%dT%H:%M:%S.%f"))))
except:
last_read = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(calendar.timegm(time.strptime(bookmark.last_read, "%Y-%m-%dT%H:%M:%SZ"))))
else:
#self.datetime = time.gmtime()
last_read = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
# debug_print("Percent read: ", percent_read)
ka_soup = BeautifulSoup()
dtc = 0
divTag = Tag(ka_soup,'div')
divTag['class'] = 'user_annotations'
# Add the last-read location
spanTag = Tag(ka_soup, 'span')
spanTag['style'] = 'font-weight:normal'
if bookmark.book_format == 'epub':
spanTag.insert(0,NavigableString(
_("<hr /><b>Book Last Read:</b> %(time)s<br /><b>Percentage Read:</b> %(pr)d%%<hr />") %
dict(time=last_read,
# loc=last_read_location,
pr=percent_read)))
else:
spanTag.insert(0,NavigableString(
_("<hr /><b>Book Last Read:</b> %(time)s<br /><b>Percentage Read:</b> %(pr)d%%<hr />") %
dict(time=last_read,
# loc=last_read_location,
pr=percent_read)))
divTag.insert(dtc, spanTag)
dtc += 1
divTag.insert(dtc, Tag(ka_soup,'br'))
dtc += 1
if bookmark.user_notes:
user_notes = bookmark.user_notes
annotations = []
# Add the annotations sorted by location
for location in sorted(user_notes):
if user_notes[location]['type'] == 'Bookmark':
annotations.append(
_('<b>Chapter %(chapter)d:</b> %(chapter_title)s<br /><b>%(typ)s</b><br /><b>Chapter Progress:</b> %(chapter_progress)s%%<br />%(annotation)s<br /><hr />') %
dict(chapter=user_notes[location]['chapter'],
dl=user_notes[location]['displayed_location'],
typ=user_notes[location]['type'],
chapter_title=user_notes[location]['chapter_title'],
chapter_progress=user_notes[location]['chapter_progress'],
annotation=user_notes[location]['annotation'] if user_notes[location]['annotation'] is not None else ""))
elif user_notes[location]['type'] == 'Highlight':
annotations.append(
_('<b>Chapter %(chapter)d:</b> %(chapter_title)s<br /><b>%(typ)s</b><br /><b>Chapter Progress:</b> %(chapter_progress)s%%<br /><b>Highlight:</b> %(text)s<br /><hr />') %
dict(chapter=user_notes[location]['chapter'],
dl=user_notes[location]['displayed_location'],
typ=user_notes[location]['type'],
chapter_title=user_notes[location]['chapter_title'],
chapter_progress=user_notes[location]['chapter_progress'],
text=user_notes[location]['text']))
elif user_notes[location]['type'] == 'Annotation':
annotations.append(
_('<b>Chapter %(chapter)d:</b> %(chapter_title)s<br /><b>%(typ)s</b><br /><b>Chapter Progress:</b> %(chapter_progress)s%%<br /><b>Highlight:</b> %(text)s<br /><b>Notes:</b> %(annotation)s<br /><hr />') %
dict(chapter=user_notes[location]['chapter'],
dl=user_notes[location]['displayed_location'],
typ=user_notes[location]['type'],
chapter_title=user_notes[location]['chapter_title'],
chapter_progress=user_notes[location]['chapter_progress'],
text=user_notes[location]['text'],
annotation=user_notes[location]['annotation']))
else:
annotations.append(
_('<b>Chapter %(chapter)d:</b> %(chapter_title)s<br /><b>%(typ)s</b><br /><b>Chapter Progress:</b> %(chapter_progress)s%%<br /><b>Highlight:</b> %(text)s<br /><b>Notes:</b> %(annotation)s<br /><hr />') %
dict(chapter=user_notes[location]['chapter'],
dl=user_notes[location]['displayed_location'],
typ=user_notes[location]['type'],
chapter_title=user_notes[location]['chapter_title'],
chapter_progress=user_notes[location]['chapter_progress'],
text=user_notes[location]['text'],
annotation=user_notes[location]['annotation']))
for annotation in annotations:
divTag.insert(dtc, annotation)
dtc += 1
ka_soup.insert(0,divTag)
return ka_soup
def add_annotation_to_library(self, db, db_id, annotation):
from calibre.ebooks.BeautifulSoup import Tag
bm = annotation
ignore_tags = set(['Catalog', 'Clippings'])
if bm.type == 'kobo_bookmark':
mi = db.get_metadata(db_id, index_is_id=True)
debug_print("KOBO:add_annotation_to_library - Title: ", mi.title)
user_notes_soup = self.generate_annotation_html(bm.value)
if mi.comments:
a_offset = mi.comments.find('<div class="user_annotations">')
ad_offset = mi.comments.find('<hr class="annotations_divider" />')
if a_offset >= 0:
mi.comments = mi.comments[:a_offset]
if ad_offset >= 0:
mi.comments = mi.comments[:ad_offset]
if set(mi.tags).intersection(ignore_tags):
return
if mi.comments:
hrTag = Tag(user_notes_soup,'hr')
hrTag['class'] = 'annotations_divider'
user_notes_soup.insert(0, hrTag)
mi.comments += unicode(user_notes_soup.prettify())
else:
mi.comments = unicode(user_notes_soup.prettify())
# Update library comments
db.set_comment(db_id, mi.comments)
# Add bookmark file to db_id
# NOTE: As it is, this copied the book from the device back to the library. That meant it replaced the
# existing file. Taking this out for that reason, but some books have a ANNOT file that could be
# copied.
# db.add_format_with_hooks(db_id, bm.value.bookmark_extension,
# bm.value.path, index_is_id=True)
class KOBOTOUCH(KOBO):
name = 'KoboTouch'
gui_name = 'Kobo Touch/Glo/Mini/Aura HD'
author = 'David Forrester'
description = 'Communicate with the Kobo Touch, Glo, Mini and Aura HD ereaders. Based on the existing Kobo driver by %s.' % (KOBO.author)
# icon = I('devices/kobotouch.jpg')
supported_dbversion = 120
min_supported_dbversion = 53
min_dbversion_series = 65
min_dbversion_externalid = 65
min_dbversion_archive = 71
min_dbversion_images_on_sdcard = 77
min_dbversion_activity = 77
min_dbversion_keywords = 82
max_supported_fwversion = (3, 15, 1)
# The following document firwmare versions where new function or devices were added.
# Not all are used, but this feels a good place to record it.
min_fwversion_shelves = (2, 0, 0)
min_fwversion_images_on_sdcard = (2, 4, 1)
min_fwversion_images_tree = (2, 9, 0) # Cover images stored in tree under .kobo-images
min_aurah2o_fwversion = (3, 7, 0)
min_reviews_fwversion = (3, 12, 0)
min_glohd_fwversion = (3, 14, 0)
has_kepubs = True
booklist_class = KTCollectionsBookList
book_class = Book
MAX_PATH_LEN = 185 # 250 - (len(" - N3_LIBRARY_SHELF.parsed") + len("F:\.kobo\images\"))
KOBO_EXTRA_CSSFILE = 'kobo_extra.css'
EXTRA_CUSTOMIZATION_MESSAGE = [
_('The Kobo from firmware V2.0.0 supports bookshelves.'
' These are created on the Kobo. ' +
'Specify a tags type column for automatic management.'),
_('Create Bookshelves') +
':::'+_('Create new bookshelves on the Kobo if they do not exist. This is only for firmware V2.0.0 or later.'),
_('Delete Empty Bookshelves') +
':::'+_('Delete any empty bookshelves from the Kobo when syncing is finished. This is only for firmware V2.0.0 or later.'),
_('Upload covers for books') +
':::'+_('Upload cover images from the calibre library when sending books to the device.'),
_('Upload Black and White Covers'),
_('Keep cover aspect ratio') +
':::'+_('When uploading covers, do not change the aspect ratio when resizing for the device.'
' This is for firmware versions 2.3.1 and later.'),
_('Show archived books') +
':::'+_('Archived books are listed on the device but need to be downloaded to read.'
' Use this option to show these books and match them with books in the calibre library.'),
_('Show Previews') +
':::'+_('Kobo previews are included on the Touch and some other versions'
' by default they are no longer displayed as there is no good reason to '
'see them. Enable if you wish to see/delete them.'),
_('Show Recommendations') +
':::'+_('Kobo shows recommendations on the device. In some cases these have '
'files but in other cases they are just pointers to the web site to buy. '
'Enable if you wish to see/delete them.'),
_('Set Series information') +
':::'+_('The book lists on the Kobo devices can display series information. '
'This is not read by the device from the sideloaded books. '
'Series information can only be added to the device after the book has been processed by the device. '
'Enable if you wish to set series information.'),
_('Modify CSS') +
':::'+_('This allows addition of user CSS rules and removal of some CSS. '
'When sending a book, the driver adds the contents of {0} to all stylesheets in the ePub. '
'This file is searched for in the root directory of the main memory of the device. '
'As well as this, if the file contains settings for the "orphans" or "widows", '
'these are removed for all styles in the original stylesheet.').format(KOBO_EXTRA_CSSFILE),
_('Attempt to support newer firmware') +
':::'+_('Kobo routinely updates the firmware and the '
'database version. With this option Calibre will attempt '
'to perform full read-write functionality - Here be Dragons!! '
'Enable only if you are comfortable with restoring your kobo '
'to factory defaults and testing software. '
'This driver supports firmware V2.x.x and DBVersion up to ') + unicode(supported_dbversion),
_('Title to test when debugging') +
':::'+_('Part of title of a book that can be used when doing some tests for debugging. '
'The test is to see if the string is contained in the title of a book. '
'The better the match, the less extraneous output.'),
]
EXTRA_CUSTOMIZATION_DEFAULT = [
u'',
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
u''
]
OPT_COLLECTIONS = 0
OPT_CREATE_BOOKSHELVES = 1
OPT_DELETE_BOOKSHELVES = 2
OPT_UPLOAD_COVERS = 3
OPT_UPLOAD_GRAYSCALE_COVERS = 4
OPT_KEEP_COVER_ASPECT_RATIO = 5
OPT_SHOW_ARCHIVED_BOOK_RECORDS = 6
OPT_SHOW_PREVIEWS = 7
OPT_SHOW_RECOMMENDATIONS = 8
OPT_UPDATE_SERIES_DETAILS = 9
OPT_MODIFY_CSS = 10
OPT_SUPPORT_NEWER_FIRMWARE = 11
OPT_DEBUGGING_TITLE = 12
opts = None
TIMESTAMP_STRING = "%Y-%m-%dT%H:%M:%SZ"
AURA_PRODUCT_ID = [0x4203]
AURA_HD_PRODUCT_ID = [0x4193]
AURA_H2O_PRODUCT_ID = [0x4213]
GLO_PRODUCT_ID = [0x4173]
GLO_HD_PRODUCT_ID = [0x4223]
MINI_PRODUCT_ID = [0x4183]
TOUCH_PRODUCT_ID = [0x4163]
PRODUCT_ID = AURA_PRODUCT_ID + AURA_HD_PRODUCT_ID + AURA_H2O_PRODUCT_ID + \
GLO_PRODUCT_ID + GLO_HD_PRODUCT_ID + \
MINI_PRODUCT_ID + TOUCH_PRODUCT_ID
BCD = [0x0110, 0x0326]
# Image file name endings. Made up of: image size, min_dbversion, max_dbversion, isFullSize,
# Note: "200" has been used just as a much larger number than the current versions. It is just a lazy
# way of making it open ended.
COVER_FILE_ENDINGS = {
' - N3_FULL.parsed':[(600,800),0, 200,True,], # Used for screensaver, home screen
' - N3_LIBRARY_FULL.parsed':[(355,473),0, 200,False,], # Used for Details screen before FW2.8.1, then for current book tile on home screen
' - N3_LIBRARY_GRID.parsed':[(149,198),0, 200,False,], # Used for library lists
' - N3_LIBRARY_LIST.parsed':[(60,90),0, 53,False,],
' - AndroidBookLoadTablet_Aspect.parsed':[(355,473), 82, 200,False,], # Used for Details screen from FW2.8.1
# ' - N3_LIBRARY_SHELF.parsed': [(40,60),0, 52,],
}
GLO_COVER_FILE_ENDINGS = { # Glo and Aura share resolution, so the image sizes should be the same.
' - N3_FULL.parsed':[(758,1024),0, 200,True,], # Used for screensaver, home screen
' - N3_LIBRARY_FULL.parsed':[(355,479),0, 200,False,], # Used for Details screen before FW2.8.1, then for current book tile on home screen
' - N3_LIBRARY_GRID.parsed':[(149,201),0, 200,False,], # Used for library lists
' - AndroidBookLoadTablet_Aspect.parsed':[(355,479), 88, 200,False,], # Used for Details screen from FW2.8.1
}
AURA_HD_COVER_FILE_ENDINGS = {
' - N3_FULL.parsed': [(1080,1440), 0, 200,True,], # Used for screensaver, home screen
' - N3_LIBRARY_FULL.parsed':[(355, 471), 0, 200,False,], # Used for Details screen before FW2.8.1, then for current book tile on home screen
' - N3_LIBRARY_GRID.parsed':[(149, 198), 0, 200,False,], # Used for library lists
' - AndroidBookLoadTablet_Aspect.parsed':[(355, 471), 88, 200,False,], # Used for Details screen from FW2.8.1
}
# Following are the sizes used with pre2.1.4 firmware
# COVER_FILE_ENDINGS = {
# ' - N3_LIBRARY_FULL.parsed':[(355,530),0, 99,], # Used for Details screen
# ' - N3_LIBRARY_FULL.parsed':[(600,800),0, 99,],
# ' - N3_LIBRARY_GRID.parsed':[(149,233),0, 99,], # Used for library lists
# ' - N3_LIBRARY_LIST.parsed':[(60,90),0, 53,],
# ' - N3_LIBRARY_SHELF.parsed': [(40,60),0, 52,],
# ' - N3_FULL.parsed':[(600,800),0, 99,], # Used for screensaver if "Full screen" is checked.
# }
def initialize(self):
super(KOBOTOUCH, self).initialize()
self.bookshelvelist = []
def get_device_information(self, end_session=True):
self.set_device_name()
return super(KOBOTOUCH, self).get_device_information(end_session)
def books(self, oncard=None, end_session=True):
debug_print("KoboTouch:books - oncard='%s'"%oncard)
from calibre.ebooks.metadata.meta import path_to_ext
dummy_bl = self.booklist_class(None, None, None)
if oncard == 'carda' and not self._card_a_prefix:
self.report_progress(1.0, _('Getting list of books on device...'))
debug_print("KoboTouch:books - Asked to process 'carda', but do not have one!")
return dummy_bl
elif oncard == 'cardb' and not self._card_b_prefix:
self.report_progress(1.0, _('Getting list of books on device...'))
debug_print("KoboTouch:books - Asked to process 'cardb', but do not have one!")
return dummy_bl
elif oncard and oncard != 'carda' and oncard != 'cardb':
self.report_progress(1.0, _('Getting list of books on device...'))
debug_print("KoboTouch:books - unknown card")
return dummy_bl
prefix = self._card_a_prefix if oncard == 'carda' else \
self._card_b_prefix if oncard == 'cardb' \
else self._main_prefix
debug_print("KoboTouch:books - oncard='%s', prefix='%s'"%(oncard, prefix))
# Determine the firmware version
try:
with open(self.normalize_path(self._main_prefix + '.kobo/version'), 'rb') as f:
self.fwversion = f.readline().split(',')[2]
self.fwversion = tuple((int(x) for x in self.fwversion.split('.')))
except:
self.fwversion = (0,0,0)
debug_print('Kobo device: %s' % self.gui_name)
debug_print('Version of driver:', self.version, 'Has kepubs:', self.has_kepubs)
debug_print('Version of firmware:', self.fwversion, 'Has kepubs:', self.has_kepubs)
debug_print('Firmware supports cover image tree:', self.fwversion >= self.min_fwversion_images_tree)
self.booklist_class.rebuild_collections = self.rebuild_collections
# get the metadata cache
bl = self.booklist_class(oncard, prefix, self.settings)
opts = self.settings()
debug_print("KoboTouch:books - opts.extra_customization=", opts.extra_customization)
debug_print("KoboTouch:books - prefs['manage_device_metadata']=", prefs['manage_device_metadata'])
if opts.extra_customization:
debugging_title = opts.extra_customization[self.OPT_DEBUGGING_TITLE]
debug_print("KoboTouch:books - set_debugging_title to '%s'" % debugging_title)
bl.set_debugging_title(debugging_title)
debug_print("KoboTouch:books - length bl=%d"%len(bl))
need_sync = self.parse_metadata_cache(bl, prefix, self.METADATA_CACHE)
debug_print("KoboTouch:books - length bl after sync=%d"%len(bl))
# make a dict cache of paths so the lookup in the loop below is faster.
bl_cache = {}
for idx,b in enumerate(bl):
bl_cache[b.lpath] = idx
def update_booklist(prefix, path, title, authors, mime, date, ContentID, ContentType, ImageID, readstatus, MimeType, expired, favouritesindex, accessibility, isdownloaded, series, seriesnumber, userid, bookshelves):
show_debug = self.is_debugging_title(title)
# show_debug = authors == 'L. Frank Baum'
if show_debug:
debug_print("KoboTouch:update_booklist - title='%s'"%title, "ContentType=%s"%ContentType, "isdownloaded=", isdownloaded)
debug_print(
" prefix=%s, mime=%s, date=%s, readstatus=%d, MimeType=%s, expired=%d, favouritesindex=%d, accessibility=%d, isdownloaded=%s"%
(prefix, mime, date, readstatus, MimeType, expired, favouritesindex, accessibility, isdownloaded,))
changed = False
try:
lpath = path.partition(self.normalize_path(prefix))[2]
if lpath.startswith(os.sep):
lpath = lpath[len(os.sep):]
lpath = lpath.replace('\\', '/')
# debug_print("LPATH: ", lpath, " - Title: " , title)
playlist_map = {}
if lpath not in playlist_map:
playlist_map[lpath] = []
allow_shelves = True
if readstatus == 1:
playlist_map[lpath].append('Im_Reading')
elif readstatus == 2:
playlist_map[lpath].append('Read')
elif readstatus == 3:
playlist_map[lpath].append('Closed')
# Related to a bug in the Kobo firmware that leaves an expired row for deleted books
# this shows an expired Collection so the user can decide to delete the book
if expired == 3:
playlist_map[lpath].append('Expired')
allow_shelves = False
# A SHORTLIST is supported on the touch but the data field is there on most earlier models
if favouritesindex == 1:
playlist_map[lpath].append('Shortlist')
# The follwing is in flux:
# - FW2.0.0, DBVersion 53,55 accessibility == 1
# - FW2.1.2 beta, DBVersion == 56, accessibility == -1:
# So, the following should be OK
if isdownloaded == 'false':
if self.dbversion < 56 and accessibility <= 1 or self.dbversion >= 56 and accessibility == -1:
playlist_map[lpath].append('Deleted')
allow_shelves = False
if show_debug:
debug_print("KoboTouch:update_booklist - have a deleted book")
elif self.supports_kobo_archive() and (accessibility == 1 or accessibility == 2):
playlist_map[lpath].append('Archived')
allow_shelves = True
# Label Previews and Recommendations
if accessibility == 6:
if userid == '':
playlist_map[lpath].append('Recommendation')
allow_shelves = False
else:
playlist_map[lpath].append('Preview')
allow_shelves = False
elif accessibility == 4: # Pre 2.x.x firmware
playlist_map[lpath].append('Recommendation')
allow_shelves = False
kobo_collections = playlist_map[lpath][:]
if allow_shelves:
# debug_print('KoboTouch:update_booklist - allowing shelves - title=%s' % title)
if len(bookshelves) > 0:
playlist_map[lpath].extend(bookshelves)
if show_debug:
debug_print('KoboTouch:update_booklist - playlist_map=', playlist_map)
path = self.normalize_path(path)
# print "Normalized FileName: " + path
idx = bl_cache.get(lpath, None)
if idx is not None: # and not (accessibility == 1 and isdownloaded == 'false'):
if show_debug:
self.debug_index = idx
debug_print("KoboTouch:update_booklist - idx=%d"%idx)
debug_print("KoboTouch:update_booklist - lpath=%s"%lpath)
debug_print('KoboTouch:update_booklist - bl[idx].device_collections=', bl[idx].device_collections)
debug_print('KoboTouch:update_booklist - playlist_map=', playlist_map)
debug_print('KoboTouch:update_booklist - bookshelves=', bookshelves)
debug_print('KoboTouch:update_booklist - kobo_collections=', kobo_collections)
debug_print('KoboTouch:update_booklist - series="%s"' % bl[idx].series)
debug_print('KoboTouch:update_booklist - the book=', bl[idx])
debug_print('KoboTouch:update_booklist - the authors=', bl[idx].authors)
debug_print('KoboTouch:update_booklist - application_id=', bl[idx].application_id)
bl_cache[lpath] = None
if ImageID is not None:
imagename = self.imagefilename_from_imageID(prefix, ImageID)
if imagename is not None:
bl[idx].thumbnail = ImageWrapper(imagename)
if (ContentType == '6' and MimeType != 'application/x-kobo-epub+zip'):
if os.path.exists(self.normalize_path(os.path.join(prefix, lpath))):
if self.update_metadata_item(bl[idx]):
# print 'update_metadata_item returned true'
changed = True
else:
debug_print(" Strange: The file: ", prefix, lpath, " does not exist!")
debug_print("KoboTouch:update_booklist - book size=", bl[idx].size)
if show_debug:
debug_print("KoboTouch:update_booklist - ContentID='%s'"%ContentID)
bl[idx].contentID = ContentID
bl[idx].kobo_series = series
bl[idx].kobo_series_number = seriesnumber
bl[idx].can_put_on_shelves = allow_shelves
if lpath in playlist_map:
bl[idx].device_collections = playlist_map.get(lpath,[])
bl[idx].current_shelves = bookshelves
bl[idx].kobo_collections = kobo_collections
if show_debug:
debug_print('KoboTouch:update_booklist - updated bl[idx].device_collections=', bl[idx].device_collections)
debug_print('KoboTouch:update_booklist - playlist_map=', playlist_map, 'changed=', changed)
# debug_print('KoboTouch:update_booklist - book=', bl[idx])
debug_print("KoboTouch:update_booklist - book class=%s"%bl[idx].__class__)
debug_print("KoboTouch:update_booklist - book title=%s"%bl[idx].title)
else:
if show_debug:
debug_print('KoboTouch:update_booklist - idx is none')
try:
if os.path.exists(self.normalize_path(os.path.join(prefix, lpath))):
book = self.book_from_path(prefix, lpath, title, authors, mime, date, ContentType, ImageID)
else:
if isdownloaded == 'true': # A recommendation or preview is OK to not have a file
debug_print(" Strange: The file: ", prefix, lpath, " does not exist!")
title = "FILE MISSING: " + title
book = self.book_class(prefix, lpath, title, authors, mime, date, ContentType, ImageID, size=0)
if show_debug:
debug_print('KoboTouch:update_booklist - book file does not exist. ContentID="%s"'%ContentID)
except Exception as e:
debug_print("KoboTouch:update_booklist - exception creating book: '%s'"%str(e))
debug_print(" prefix: ", prefix, "lpath: ", lpath, "title: ", title, "authors: ", authors,
"mime: ", mime, "date: ", date, "ContentType: ", ContentType, "ImageID: ", ImageID)
raise
if show_debug:
debug_print('KoboTouch:update_booklist - class:', book.__class__)
# debug_print(' resolution:', book.__class__.__mro__)
debug_print(" contentid: '%s'"%book.contentID)
debug_print(" title:'%s'"%book.title)
debug_print(" the book:", book)
debug_print(" author_sort:'%s'"%book.author_sort)
debug_print(" bookshelves:", bookshelves)
debug_print(" kobo_collections:", kobo_collections)
# print 'Update booklist'
book.device_collections = playlist_map.get(lpath,[]) # if lpath in playlist_map else []
book.current_shelves = bookshelves
book.kobo_collections = kobo_collections
book.contentID = ContentID
book.kobo_series = series
book.kobo_series_number = seriesnumber
book.can_put_on_shelves = allow_shelves
# debug_print('KoboTouch:update_booklist - title=', title, 'book.device_collections', book.device_collections)
if bl.add_book(book, replace_metadata=False):
changed = True
if show_debug:
debug_print(' book.device_collections', book.device_collections)
debug_print(' book.title', book.title)
except: # Probably a path encoding error
import traceback
traceback.print_exc()
return changed
def get_bookshelvesforbook(connection, ContentID):
# debug_print("KoboTouch:get_bookshelvesforbook - " + ContentID)
bookshelves = []
if not self.supports_bookshelves():
return bookshelves
cursor = connection.cursor()
query = "select ShelfName " \
"from ShelfContent " \
"where ContentId = ? " \
"and _IsDeleted = 'false' " \
"and ShelfName is not null" # This should never be nulll, but it is protection against an error cause by a sync to the Kobo server
values = (ContentID, )
cursor.execute(query, values)
for i, row in enumerate(cursor):
bookshelves.append(row[0])
cursor.close()
# debug_print("KoboTouch:get_bookshelvesforbook - count bookshelves=" + unicode(count_bookshelves))
return bookshelves
self.debug_index = 0
import sqlite3 as sqlite
with closing(sqlite.connect(self.device_database_path())) as connection:
debug_print("KoboTouch:books - reading device database")
# return bytestrings if the content cannot the decoded as unicode
connection.text_factory = lambda x: unicode(x, "utf-8", "ignore")
cursor = connection.cursor()
cursor.execute('select version from dbversion')
result = cursor.fetchone()
self.dbversion = result[0]
debug_print("Database Version=%d"%self.dbversion)
self.bookshelvelist = self.get_bookshelflist(connection)
debug_print("KoboTouch:books - shelf list:", self.bookshelvelist)
opts = self.settings()
columns = 'Title, Attribution, DateCreated, ContentID, MimeType, ContentType, ImageID, ReadStatus'
if self.dbversion >= 16:
columns += ', ___ExpirationStatus, FavouritesIndex, Accessibility'
else:
columns += ', "-1" as ___ExpirationStatus, "-1" as FavouritesIndex, "-1" as Accessibility'
if self.dbversion >= 33:
columns += ', IsDownloaded'
else:
columns += ', "1" as IsDownloaded'
if self.supports_series():
columns += ", Series, SeriesNumber, ___UserID, ExternalId"
else:
columns += ', null as Series, null as SeriesNumber, ___UserID, null as ExternalId'
where_clause = ''
if self.supports_kobo_archive():
where_clause = (" where BookID is Null "
" and ((Accessibility = -1 and IsDownloaded in ('true', 1 )) or (Accessibility in (1,2) %(expiry)s) "
" %(previews)s %(recomendations)s )"
" and not ((___ExpirationStatus=3 or ___ExpirationStatus is Null) and ContentType = 6)") % \
dict(
expiry="" if opts.extra_customization[self.OPT_SHOW_ARCHIVED_BOOK_RECORDS] else "and IsDownloaded in ('true', 1)",
previews=" or (Accessibility in (6) and ___UserID <> '')" if opts.extra_customization[self.OPT_SHOW_PREVIEWS] else "",
recomendations=" or (Accessibility in (-1, 4, 6) and ___UserId = '')" if opts.extra_customization[
self.OPT_SHOW_RECOMMENDATIONS] else ""
)
elif self.supports_series():
where_clause = (" where BookID is Null "
" and ((Accessibility = -1 and IsDownloaded in ('true', 1)) or (Accessibility in (1,2)) %(previews)s %(recomendations)s )"
" and not ((___ExpirationStatus=3 or ___ExpirationStatus is Null) %(expiry)s)") % \
dict(
expiry=" and ContentType = 6" if opts.extra_customization[self.OPT_SHOW_ARCHIVED_BOOK_RECORDS] else "",
previews=" or (Accessibility in (6) and ___UserID <> '')" if opts.extra_customization[self.OPT_SHOW_PREVIEWS] else "",
recomendations=" or (Accessibility in (-1, 4, 6) and ___UserId = '')" if opts.extra_customization[
self.OPT_SHOW_RECOMMENDATIONS] else ""
)
elif self.dbversion >= 33:
where_clause = (' where BookID is Null %(previews)s %(recomendations)s and not ((___ExpirationStatus=3 or ___ExpirationStatus is Null) %(expiry)s)') % \
dict(
expiry=' and ContentType = 6' if opts.extra_customization[self.OPT_SHOW_ARCHIVED_BOOK_RECORDS] else '',
previews=' and Accessibility <> 6' if opts.extra_customization[self.OPT_SHOW_PREVIEWS] == False else '',
recomendations=' and IsDownloaded in (\'true\', 1)' if opts.extra_customization[self.OPT_SHOW_RECOMMENDATIONS] == False else ''
)
elif self.dbversion >= 16:
where_clause = (' where BookID is Null '
'and not ((___ExpirationStatus=3 or ___ExpirationStatus is Null) %(expiry)s)') % \
dict(expiry=' and ContentType = 6' if opts.extra_customization[self.OPT_SHOW_ARCHIVED_BOOK_RECORDS] else '')
else:
where_clause = ' where BookID is Null'
# Note: The card condition should not need the contentId test for the SD
# card. But the ExternalId does not get set for sideloaded kepubs on the
# SD card.
card_condition = ''
if self.has_externalid():
card_condition = " AND (externalId IS NOT NULL AND externalId <> '' OR contentId LIKE 'file:///mnt/sd/%')" if oncard == 'carda' else " AND (externalId IS NULL OR externalId = '') AND contentId NOT LIKE 'file:///mnt/sd/%'"
else:
card_condition = " AND contentId LIKE 'file:///mnt/sd/%'" if oncard == 'carda' else " AND contentId NOT LIKE'file:///mnt/sd/%'"
query = 'SELECT ' + columns + ' FROM content ' + where_clause + card_condition
debug_print("KoboTouch:books - query=", query)
try:
cursor.execute(query)
except Exception as e:
err = str(e)
if not ('___ExpirationStatus' in err
or 'FavouritesIndex' in err
or 'Accessibility' in err
or 'IsDownloaded' in err
or 'Series' in err
or 'ExternalId' in err
):
raise
query= ('select Title, Attribution, DateCreated, ContentID, MimeType, ContentType, '
'ImageID, ReadStatus, "-1" as ___ExpirationStatus, "-1" as '
'FavouritesIndex, "-1" as Accessibility, "1" as IsDownloaded, null as Series, null as SeriesNumber'
' from content where BookID is Null')
cursor.execute(query)
changed = False
for i, row in enumerate(cursor):
# self.report_progress((i+1) / float(numrows), _('Getting list of books on device...'))
show_debug = self.is_debugging_title(row[0])
if show_debug:
debug_print("KoboTouch:books - looping on database - row=%d" % i)
debug_print("KoboTouch:books - title='%s'"%row[0], "authors=", row[1])
debug_print("KoboTouch:books - row=", row)
if not hasattr(row[3], 'startswith') or row[3].lower().startswith("file:///usr/local/kobo/help/") or row[3].lower().startswith("/usr/local/kobo/help/"):
# These are internal to the Kobo device and do not exist
continue
externalId = None if row[15] and len(row[15]) == 0 else row[15]
path = self.path_from_contentid(row[3], row[5], row[4], oncard, externalId)
mime = mime_type_ext(path_to_ext(path)) if path.find('kepub') == -1 else 'application/x-kobo-epub+zip'
# debug_print("mime:", mime)
if show_debug:
debug_print("KoboTouch:books - path='%s'"%path, " ContentID='%s'"%row[3], " externalId=%s" % externalId)
bookshelves = get_bookshelvesforbook(connection, row[3])
prefix = self._card_a_prefix if oncard == 'carda' else self._main_prefix
changed = update_booklist(prefix, path, row[0], row[1], mime, row[2], row[3], row[5],
row[6], row[7], row[4], row[8], row[9], row[10], row[11],
row[12], row[13], row[14], bookshelves)
if changed:
need_sync = True
cursor.close()
if not prefs['manage_device_metadata'] == 'on_connect':
self.dump_bookshelves(connection)
else:
debug_print("KoboTouch:books - automatically managing metadata")
# Remove books that are no longer in the filesystem. Cache contains
# indices into the booklist if book not in filesystem, None otherwise
# Do the operation in reverse order so indices remain valid
for idx in sorted(bl_cache.itervalues(), reverse=True):
if idx is not None:
if not os.path.exists(self.normalize_path(os.path.join(prefix, bl[idx].lpath))):
need_sync = True
del bl[idx]
# else:
# debug_print("KoboTouch:books - Book in mtadata.calibre, on file system but not database - bl[idx].title:'%s'"%bl[idx].title)
# print "count found in cache: %d, count of files in metadata: %d, need_sync: %s" % \
# (len(bl_cache), len(bl), need_sync)
# Bypassing the KOBO sync_booklists as that does things we don't need to do
# Also forcing sync to see if this solves issues with updating shelves and matching books.
if need_sync or True: # self.count_found_in_bl != len(bl) or need_sync:
debug_print("KoboTouch:books - about to sync_booklists")
if oncard == 'cardb':
USBMS.sync_booklists(self, (None, None, bl))
elif oncard == 'carda':
USBMS.sync_booklists(self, (None, bl, None))
else:
USBMS.sync_booklists(self, (bl, None, None))
debug_print("KoboTouch:books - have done sync_booklists")
self.report_progress(1.0, _('Getting list of books on device...'))
debug_print("KoboTouch:books - end - oncard='%s'"%oncard)
return bl
def path_from_contentid(self, ContentID, ContentType, MimeType, oncard, externalId):
path = ContentID
if not externalId:
return super(KOBOTOUCH, self).path_from_contentid(ContentID, ContentType, MimeType, oncard)
if oncard == 'cardb':
print 'path from_contentid cardb'
else:
if (ContentType == "6" or ContentType == "10"): # and MimeType == 'application/x-kobo-epub+zip':
if path.startswith("file:///mnt/onboard/"):
path = self._main_prefix + path.replace("file:///mnt/onboard/", '')
elif path.startswith("file:///mnt/sd/"):
path = self._card_a_prefix + path.replace("file:///mnt/sd/", '')
elif externalId:
path = self._card_a_prefix + 'koboExtStorage/kepub/' + path
else:
path = self._main_prefix + '.kobo/kepub/' + path
else: # Should never get here, but, just in case...
# if path.startswith("file:///mnt/onboard/"):
path = path.replace("file:///mnt/onboard/", self._main_prefix)
path = path.replace("file:///mnt/sd/", self._card_a_prefix)
path = path.replace("/mnt/onboard/", self._main_prefix)
# print "Internal: " + path
return path
def imagefilename_from_imageID(self, prefix, ImageID):
show_debug = self.is_debugging_title(ImageID)
path = self.images_path(prefix, ImageID)
# path = self.normalize_path(path.replace('/', os.sep))
for ending, cover_options in self.cover_file_endings().items():
fpath = path + ending
if os.path.exists(fpath):
if show_debug:
debug_print("KoboTouch:imagefilename_from_imageID - have cover image fpath=%s" % (fpath))
return fpath
if show_debug:
debug_print("KoboTouch:imagefilename_from_imageID - no cover image found - ImageID=%s" % (ImageID))
return None
def get_extra_css(self):
extra_sheet = None
if self.modifying_css():
extra_css_path = os.path.join(self._main_prefix, self.KOBO_EXTRA_CSSFILE)
if os.path.exists(extra_css_path):
from cssutils import parseFile as cssparseFile
try:
extra_sheet = cssparseFile(extra_css_path)
debug_print("KoboTouch:get_extra_css: Using extra CSS in {0} ({1} rules)".format(extra_css_path, len(extra_sheet.cssRules)))
if len(extra_sheet.cssRules) ==0:
debug_print("KoboTouch:get_extra_css: Extra CSS file has no valid rules. CSS will not be modified.")
extra_sheet = None
except Exception as e:
debug_print("KoboTouch:get_extra_css: Problem parsing extra CSS file {0}".format(extra_css_path))
debug_print("KoboTouch:get_extra_css: Exception {0}".format(e))
return extra_sheet
def upload_books(self, files, names, on_card=None, end_session=True,
metadata=None):
debug_print('KoboTouch:upload_books - %d books'%(len(files)))
debug_print('KoboTouch:upload_books - files=', files)
if self.modifying_epub():
self.extra_sheet = self.get_extra_css()
i = 0
for file, n, mi in zip(files, names, metadata):
debug_print("KoboTouch:upload_books: Processing book: {0} by {1}".format(mi.title, " and ".join(mi.authors)))
debug_print("KoboTouch:upload_books: file=%s, name=%s" % (file, n))
self.report_progress(i / float(len(files)), "Processing book: {0} by {1}".format(mi.title, " and ".join(mi.authors)))
mi.kte_calibre_name = n
self._modify_epub(file, mi)
i += 1
self.report_progress(0, 'Working...')
result = super(KOBOTOUCH, self).upload_books(files, names, on_card, end_session, metadata)
# debug_print('KoboTouch:upload_books - result=', result)
if self.dbversion >= 53:
import sqlite3 as sqlite
try:
with closing(sqlite.connect(self.normalize_path(self._main_prefix +
'.kobo/KoboReader.sqlite'))) as connection:
connection.text_factory = lambda x: unicode(x, "utf-8", "ignore")
cursor = connection.cursor()
cleanup_query = "DELETE FROM content WHERE ContentID = ? AND Accessibility = 1 AND IsDownloaded = 'false'"
for fname, cycle in result:
show_debug = self.is_debugging_title(fname)
contentID = self.contentid_from_path(fname, 6)
if show_debug:
debug_print('KoboTouch:upload_books: fname=', fname)
debug_print('KoboTouch:upload_books: contentID=', contentID)
cleanup_values = (contentID,)
# debug_print('KoboTouch:upload_books: Delete record left if deleted on Touch')
cursor.execute(cleanup_query, cleanup_values)
self.set_filesize_in_device_database(connection, contentID, fname)
if not self.copying_covers():
imageID = self.imageid_from_contentid(contentID)
self.delete_images(imageID, fname)
connection.commit()
cursor.close()
except Exception as e:
debug_print('KoboTouch:upload_books - Exception: %s'%str(e))
return result
def _modify_epub(self, file, metadata, container=None):
debug_print("KoboTouch:_modify_epub:Processing {0} - {1}".format(metadata.author_sort, metadata.title))
# Currently only modifying CSS, so if no stylesheet, don't do anything
if not self.extra_sheet:
debug_print("KoboTouch:_modify_epub: no CSS file")
return True
commit_container = False
if not container:
commit_container = True
try:
from calibre.ebooks.oeb.polish.container import get_container
debug_print("KoboTouch:_modify_epub: creating container")
container = get_container(file)
container.css_preprocessor = DummyCSSPreProcessor()
except Exception as e:
debug_print("KoboTouch:_modify_epub: exception from get_container {0} - {1}".format(metadata.author_sort, metadata.title))
debug_print("KoboTouch:_modify_epub: exception is: {0}".format(e))
return False
else:
debug_print("KoboTouch:_modify_epub: received container")
from calibre.ebooks.oeb.base import OEB_STYLES
for cssname, mt in container.mime_map.iteritems():
if mt in OEB_STYLES:
newsheet = container.parsed(cssname)
oldrules = len(newsheet.cssRules)
# remove any existing @page rules in epub css
# if css to be appended contains an @page rule
if self.extra_sheet and len([r for r in self.extra_sheet if r.type == r.PAGE_RULE]):
page_rules = [r for r in newsheet if r.type == r.PAGE_RULE]
if len(page_rules) > 0:
debug_print("KoboTouch:_modify_epub:Removing existing @page rules")
for rule in page_rules:
rule.style = ''
# remove any existing widow/orphan settings in epub css
# if css to be appended contains a widow/orphan rule or we there is no extra CSS file
if (len([r for r in self.extra_sheet if r.type == r.STYLE_RULE
and (r.style['widows'] or r.style['orphans'])]) > 0):
widow_orphan_rules = [r for r in newsheet if r.type == r.STYLE_RULE
and (r.style['widows'] or r.style['orphans'])]
if len(widow_orphan_rules) > 0:
debug_print("KoboTouch:_modify_epub:Removing existing widows/orphans attribs")
for rule in widow_orphan_rules:
rule.style.removeProperty('widows')
rule.style.removeProperty('orphans')
# append all rules from kobo extra css stylesheet
for addrule in [r for r in self.extra_sheet.cssRules]:
newsheet.insertRule(addrule, len(newsheet.cssRules))
debug_print("KoboTouch:_modify_epub:CSS rules {0} -> {1} ({2})".format(oldrules, len(newsheet.cssRules), cssname))
container.dirty(cssname)
if commit_container:
debug_print("KoboTouch:_modify_epub: committing container.")
os.unlink(file)
container.commit(file)
return True
def delete_via_sql(self, ContentID, ContentType):
imageId = super(KOBOTOUCH, self).delete_via_sql(ContentID, ContentType)
if self.dbversion >= 53:
import sqlite3 as sqlite
debug_print('KoboTouch:delete_via_sql: ContentID="%s"'%ContentID, 'ContentType="%s"'%ContentType)
try:
with closing(sqlite.connect(self.device_database_path())) as connection:
debug_print('KoboTouch:delete_via_sql: have database connection')
# return bytestrings if the content cannot the decoded as unicode
connection.text_factory = lambda x: unicode(x, "utf-8", "ignore")
cursor = connection.cursor()
debug_print('KoboTouch:delete_via_sql: have cursor')
t = (ContentID,)
# Delete the Bookmarks
debug_print('KoboTouch:delete_via_sql: Delete from Bookmark')
cursor.execute('DELETE FROM Bookmark WHERE VolumeID = ?', t)
# Delete from the Bookshelf
debug_print('KoboTouch:delete_via_sql: Delete from the Bookshelf')
cursor.execute('delete from ShelfContent where ContentID = ?', t)
# ContentType 6 is now for all books.
debug_print('KoboTouch:delete_via_sql: BookID is Null')
cursor.execute('delete from content where BookID is Null and ContentID =?',t)
# Remove the content_settings entry
debug_print('KoboTouch:delete_via_sql: delete from content_settings')
cursor.execute('delete from content_settings where ContentID =?',t)
# Remove the ratings entry
debug_print('KoboTouch:delete_via_sql: delete from ratings')
cursor.execute('delete from ratings where ContentID =?',t)
# Remove any entries for the Activity table - removes tile from new home page
if self.has_activity_table():
debug_print('KoboTouch:delete_via_sql: delete from Activity')
cursor.execute('delete from Activity where Id =?', t)
connection.commit()
cursor.close()
debug_print('KoboTouch:delete_via_sql: finished SQL')
debug_print('KoboTouch:delete_via_sql: After SQL, no exception')
except Exception as e:
debug_print('KoboTouch:delete_via_sql - Database Exception: %s'%str(e))
debug_print('KoboTouch:delete_via_sql: imageId="%s"'%imageId)
if imageId is None:
imageId = self.imageid_from_contentid(ContentID)
return imageId
def delete_images(self, ImageID, book_path):
debug_print("KoboTouch:delete_images - ImageID=", ImageID)
if ImageID != None:
path = self.images_path(book_path, ImageID)
debug_print("KoboTouch:delete_images - path=%s" % path)
for ending in self.cover_file_endings().keys():
fpath = path + ending
fpath = self.normalize_path(fpath)
debug_print("KoboTouch:delete_images - fpath=%s" % fpath)
if os.path.exists(fpath):
debug_print("KoboTouch:delete_images - Image File Exists")
os.unlink(fpath)
try:
os.removedirs(os.path.dirname(path))
except:
pass
def contentid_from_path(self, path, ContentType):
show_debug = self.is_debugging_title(path) and True
if show_debug:
debug_print("KoboTouch:contentid_from_path - path='%s'"%path, "ContentType='%s'"%ContentType)
debug_print("KoboTouch:contentid_from_path - self._main_prefix='%s'"%self._main_prefix, "self._card_a_prefix='%s'"%self._card_a_prefix)
if ContentType == 6:
extension = os.path.splitext(path)[1]
if extension == '.kobo':
ContentID = os.path.splitext(path)[0]
# Remove the prefix on the file. it could be either
ContentID = ContentID.replace(self._main_prefix, '')
elif extension == '':
ContentID = path
ContentID = ContentID.replace(self._main_prefix + self.normalize_path('.kobo/kepub/'), '')
else:
ContentID = path
ContentID = ContentID.replace(self._main_prefix, "file:///mnt/onboard/")
if show_debug:
debug_print("KoboTouch:contentid_from_path - 1 ContentID='%s'"%ContentID)
if self._card_a_prefix is not None:
ContentID = ContentID.replace(self._card_a_prefix, "file:///mnt/sd/")
else: # ContentType = 16
debug_print("KoboTouch:contentid_from_path ContentType other than 6 - ContentType='%d'"%ContentType, "path='%s'"%path)
ContentID = path
ContentID = ContentID.replace(self._main_prefix, "file:///mnt/onboard/")
if self._card_a_prefix is not None:
ContentID = ContentID.replace(self._card_a_prefix, "file:///mnt/sd/")
ContentID = ContentID.replace("\\", '/')
if show_debug:
debug_print("KoboTouch:contentid_from_path - end - ContentID='%s'"%ContentID)
return ContentID
def get_content_type_from_extension(self, extension):
debug_print("KoboTouch:get_content_type_from_extension - start")
# With new firmware, ContentType appears to be 6 for all types of sideloaded books.
if self.fwversion >= (1,9,17) or extension == '.kobo' or extension == '.mobi':
debug_print("KoboTouch:get_content_type_from_extension - V2 firmware")
ContentType = 6
# For older firmware, it depends on the type of file.
elif extension == '.kobo' or extension == '.mobi':
ContentType = 6
else:
ContentType = 901
return ContentType
def update_device_database_collections(self, booklists, collections_attributes, oncard):
debug_print("KoboTouch:update_device_database_collections - oncard='%s'"%oncard)
if self.modify_database_check("update_device_database_collections") == False:
return
# Only process categories in this list
supportedcategories = {
"Im_Reading": 1,
"Read": 2,
"Closed": 3,
"Shortlist": 4,
"Archived": 5,
# "Preview":99, # Unsupported as we don't want to change it
}
# Define lists for the ReadStatus
readstatuslist = {
"Im_Reading":1,
"Read":2,
"Closed":3,
}
accessibilitylist = {
"Preview":6,
"Recommendation":4,
"Deleted":1,
}
# specialshelveslist = {
# "Shortlist":1,
# "Wishlist":2,
# }
# debug_print('KoboTouch:update_device_database_collections - collections_attributes=', collections_attributes)
opts = self.settings()
if opts.extra_customization:
create_bookshelves = opts.extra_customization[self.OPT_CREATE_BOOKSHELVES] and self.supports_bookshelves()
delete_empty_shelves = opts.extra_customization[self.OPT_DELETE_BOOKSHELVES] and self.supports_bookshelves()
update_series_details = opts.extra_customization[self.OPT_UPDATE_SERIES_DETAILS] and self.supports_series()
debugging_title = opts.extra_customization[self.OPT_DEBUGGING_TITLE]
debug_print("KoboTouch:update_device_database_collections - set_debugging_title to '%s'" % debugging_title)
booklists.set_debugging_title(debugging_title)
else:
delete_empty_shelves = False
create_bookshelves = False
update_series_details = False
opts = self.settings()
if opts.extra_customization:
create_bookshelves = opts.extra_customization[self.OPT_CREATE_BOOKSHELVES] and self.supports_bookshelves()
delete_empty_shelves = opts.extra_customization[self.OPT_DELETE_BOOKSHELVES] and self.supports_bookshelves()
else:
delete_empty_shelves = False
bookshelf_attribute = len(collections_attributes)
collections = booklists.get_collections(collections_attributes) if bookshelf_attribute else None
# debug_print('KoboTouch:update_device_database_collections - Collections:', collections)
# Create a connection to the sqlite database
# Needs to be outside books collection as in the case of removing
# the last book from the collection the list of books is empty
# and the removal of the last book would not occur
import sqlite3 as sqlite
with closing(sqlite.connect(self.normalize_path(self._main_prefix +
'.kobo/KoboReader.sqlite'))) as connection:
# return bytestrings if the content cannot the decoded as unicode
connection.text_factory = lambda x: unicode(x, "utf-8", "ignore")
if collections:
# debug_print("KoboTouch:update_device_database_collections - length collections=" + unicode(len(collections)))
# Need to reset the collections outside the particular loops
# otherwise the last item will not be removed
if self.dbversion < 53:
debug_print("KoboTouch:update_device_database_collections - calling reset_readstatus")
self.reset_readstatus(connection, oncard)
if self.dbversion >= 14 and self.fwversion < self.min_fwversion_shelves:
debug_print("KoboTouch:update_device_database_collections - calling reset_favouritesindex")
self.reset_favouritesindex(connection, oncard)
# debug_print("KoboTouch:update_device_database_collections - length collections=", len(collections))
# debug_print("KoboTouch:update_device_database_collections - self.bookshelvelist=", self.bookshelvelist)
# Process any collections that exist
for category, books in collections.items():
debug_print("KoboTouch:update_device_database_collections - category='%s' books=%d"%(category, len(books)))
if create_bookshelves and not (category in supportedcategories or category in readstatuslist or category in accessibilitylist):
self.check_for_bookshelf(connection, category)
# if category in self.bookshelvelist:
# debug_print("Category: ", category, " id = ", readstatuslist.get(category))
for book in books:
# debug_print(' Title:', book.title, 'category: ', category)
show_debug = self.is_debugging_title(book.title)
if show_debug:
debug_print(' Title="%s"'%book.title, 'category="%s"'%category)
# debug_print(book)
debug_print(' class=%s'%book.__class__)
debug_print(' book.contentID="%s"'%book.contentID)
debug_print(' book.application_id="%s"'%book.application_id)
if book.application_id is None:
continue
category_added = False
if book.contentID is None:
debug_print(' Do not know ContentID - Title="%s"'%book.title)
extension = os.path.splitext(book.path)[1]
ContentType = self.get_content_type_from_extension(extension) if extension != '' else self.get_content_type_from_path(book.path)
book.contentID = self.contentid_from_path(book.path, ContentType)
if category in self.bookshelvelist and self.supports_bookshelves():
if show_debug:
debug_print(' length book.device_collections=%d'%len(book.device_collections))
if category not in book.device_collections:
if show_debug:
debug_print(' Setting bookshelf on device')
self.set_bookshelf(connection, book, category)
category_added = True
elif category in readstatuslist.keys():
# Manage ReadStatus
self.set_readstatus(connection, book.contentID, readstatuslist.get(category))
category_added = True
elif category == 'Shortlist' and self.dbversion >= 14:
if show_debug:
debug_print(' Have an older version shortlist - %s'%book.title)
# Manage FavouritesIndex/Shortlist
if not self.supports_bookshelves():
if show_debug:
debug_print(' and about to set it - %s'%book.title)
self.set_favouritesindex(connection, book.contentID)
category_added = True
elif category in accessibilitylist.keys():
# Do not manage the Accessibility List
pass
if category_added and category not in book.device_collections:
if show_debug:
debug_print(' adding category to book.device_collections', book.device_collections)
book.device_collections.append(category)
else:
if show_debug:
debug_print(' category not added to book.device_collections', book.device_collections)
debug_print("KoboTouch:update_device_database_collections - end for category='%s'"%category)
elif bookshelf_attribute: # No collections but have set the shelf option
# Since no collections exist the ReadStatus needs to be reset to 0 (Unread)
debug_print("No Collections - reseting ReadStatus")
if self.dbversion < 53:
self.reset_readstatus(connection, oncard)
if self.dbversion >= 14 and self.fwversion < self.min_fwversion_shelves:
debug_print("No Collections - resetting FavouritesIndex")
self.reset_favouritesindex(connection, oncard)
# Set the series info and cleanup the bookshelves only if the firmware supports them and the user has set the options.
if (self.supports_bookshelves() or self.supports_series()) and (bookshelf_attribute or update_series_details):
debug_print("KoboTouch:update_device_database_collections - managing bookshelves and series.")
self.series_set = 0
books_in_library = 0
for book in booklists:
if book.application_id is not None:
books_in_library += 1
show_debug = self.is_debugging_title(book.title)
if show_debug:
debug_print("KoboTouch:update_device_database_collections - book.title=%s" % book.title)
if update_series_details:
self.set_series(connection, book)
if bookshelf_attribute:
if show_debug:
debug_print("KoboTouch:update_device_database_collections - about to remove a book from shelves book.title=%s" % book.title)
self.remove_book_from_device_bookshelves(connection, book)
book.device_collections.extend(book.kobo_collections)
if not prefs['manage_device_metadata'] == 'manual' and delete_empty_shelves:
debug_print("KoboTouch:update_device_database_collections - about to clear empty bookshelves")
self.delete_empty_bookshelves(connection)
debug_print("KoboTouch:update_device_database_collections - Number of series set=%d Number of books=%d" % (self.series_set, books_in_library))
self.dump_bookshelves(connection)
debug_print('KoboTouch:update_device_database_collections - Finished ')
def rebuild_collections(self, booklist, oncard):
debug_print("KoboTouch:rebuild_collections")
collections_attributes = self.get_collections_attributes()
debug_print('KoboTouch:rebuild_collections: collection fields:', collections_attributes)
self.update_device_database_collections(booklist, collections_attributes, oncard)
def upload_cover(self, path, filename, metadata, filepath):
'''
Upload book cover to the device. Default implementation does nothing.
:param path: The full path to the directory where the associated book is located.
:param filename: The name of the book file without the extension.
:param metadata: metadata belonging to the book. Use metadata.thumbnail
for cover
:param filepath: The full path to the ebook file
'''
debug_print("KoboTouch:upload_cover - path='%s' filename='%s' "%(path, filename))
debug_print(" filepath='%s' "%(filepath))
opts = self.settings()
if not self.copying_covers():
# Building thumbnails disabled
# debug_print('KoboTouch: not uploading cover')
return
# Only upload covers to SD card if that is supported
if self._card_a_prefix and os.path.abspath(path).startswith(os.path.abspath(self._card_a_prefix)) and not self.supports_covers_on_sdcard():
return
if not opts.extra_customization[self.OPT_UPLOAD_GRAYSCALE_COVERS]:
uploadgrayscale = False
else:
uploadgrayscale = True
# debug_print('KoboTouch: uploading cover')
try:
self._upload_cover(path, filename, metadata, filepath, uploadgrayscale, self.keep_cover_aspect())
except Exception as e:
debug_print('KoboTouch: FAILED to upload cover=%s Exception=%s'%(filepath, str(e)))
def imageid_from_contentid(self, ContentID):
ImageID = ContentID.replace('/', '_')
ImageID = ImageID.replace(' ', '_')
ImageID = ImageID.replace(':', '_')
ImageID = ImageID.replace('.', '_')
return ImageID
def images_path(self, path, imageId=None):
if self._card_a_prefix and os.path.abspath(path).startswith(os.path.abspath(self._card_a_prefix)) and self.supports_covers_on_sdcard():
path_prefix = 'koboExtStorage/images-cache/' if self.supports_images_tree() else 'koboExtStorage/images/'
path = os.path.join(self._card_a_prefix, path_prefix)
else:
path_prefix = '.kobo-images/' if self.supports_images_tree() else '.kobo/images/'
path = os.path.join(self._main_prefix, path_prefix)
if self.supports_images_tree() and imageId:
hash1 = qhash(imageId)
dir1 = hash1 & (0xff * 1)
dir2 = (hash1 & (0xff00 * 1)) >> 8
path = os.path.join(path, "%s" % dir1, "%s" % dir2)
if imageId:
path = os.path.join(path, imageId)
return path
def _upload_cover(self, path, filename, metadata, filepath, uploadgrayscale, keep_cover_aspect=False):
from calibre.utils.magick.draw import save_cover_data_to, identify_data
debug_print("KoboTouch:_upload_cover - filename='%s' uploadgrayscale='%s' "%(filename, uploadgrayscale))
if metadata.cover:
show_debug = self.is_debugging_title(filename)
if show_debug:
debug_print("KoboTouch:_upload_cover - path='%s'"%path, "filename='%s'"%filename)
debug_print(" filepath='%s'"%filepath)
cover = self.normalize_path(metadata.cover.replace('/', os.sep))
if os.path.exists(cover):
# Get ContentID for Selected Book
extension = os.path.splitext(filepath)[1]
ContentType = self.get_content_type_from_extension(extension) if extension != '' else self.get_content_type_from_path(filepath)
ContentID = self.contentid_from_path(filepath, ContentType)
try:
import sqlite3 as sqlite
with closing(sqlite.connect(self.device_database_path())) as connection:
# return bytestrings if the content cannot the decoded as unicode
connection.text_factory = lambda x: unicode(x, "utf-8", "ignore")
cursor = connection.cursor()
t = (ContentID,)
cursor.execute('select ImageId from Content where BookID is Null and ContentID = ?', t)
result = cursor.fetchone()
if result is None:
ImageID = self.imageid_from_contentid(ContentID)
debug_print("KoboTouch:_upload_cover - No rows exist in the database - generated ImageID='%s'" % ImageID)
else:
ImageID = result[0]
# debug_print("ImageId: ", result[0])
cursor.close()
if ImageID != None:
path = self.images_path(path, ImageID)
if show_debug:
debug_print("KoboTouch:_upload_cover - About to loop over cover endings")
image_dir = os.path.dirname(os.path.abspath(path))
if not os.path.exists(image_dir):
debug_print("KoboTouch:_upload_cover - Image directory does not exust. Creating path='%s'" % (image_dir))
os.makedirs(image_dir)
for ending, cover_options in self.cover_file_endings().items():
resize, min_dbversion, max_dbversion, isFullsize = cover_options
if show_debug:
debug_print("KoboTouch:_upload_cover - resize=%s min_dbversion=%d max_dbversion=%d" % (resize, min_dbversion, max_dbversion))
if self.dbversion >= min_dbversion and self.dbversion <= max_dbversion:
if show_debug:
debug_print("KoboTouch:_upload_cover - creating cover for ending='%s'"%ending) # , "resize'%s'"%resize)
fpath = path + ending
fpath = self.normalize_path(fpath.replace('/', os.sep))
with open(cover, 'rb') as f:
data = f.read()
if keep_cover_aspect:
if isFullsize:
resize = None
else:
width, height, fmt = identify_data(data)
cover_aspect = width / height
if cover_aspect > 1:
resize = (resize[0], int(resize[0] / cover_aspect))
elif cover_aspect < 1:
resize = (int(cover_aspect * resize[1]), resize[1])
# Return the data resized and in Grayscale if
# required
data = save_cover_data_to(data, 'dummy.jpg',
grayscale=uploadgrayscale,
resize_to=resize, return_data=True)
with open(fpath, 'wb') as f:
f.write(data)
fsync(f)
except Exception as e:
err = str(e)
debug_print("KoboTouch:_upload_cover - Exception string: %s"%err)
raise
else:
debug_print("KoboTouch:_upload_cover - ImageID could not be retrieved from the database")
def remove_book_from_device_bookshelves(self, connection, book):
show_debug = self.is_debugging_title(book.title) # or True
remove_shelf_list = set(book.current_shelves) - set(book.device_collections)
if show_debug:
debug_print('KoboTouch:remove_book_from_device_bookshelves - book.application_id="%s"'%book.application_id)
debug_print('KoboTouch:remove_book_from_device_bookshelves - book.contentID="%s"'%book.contentID)
debug_print('KoboTouch:remove_book_from_device_bookshelves - book.device_collections=', book.device_collections)
debug_print('KoboTouch:remove_book_from_device_bookshelves - book.current_shelves=', book.current_shelves)
debug_print('KoboTouch:remove_book_from_device_bookshelves - remove_shelf_list=', remove_shelf_list)
if len(remove_shelf_list) == 0:
return
query = 'DELETE FROM ShelfContent WHERE ContentId = ?'
values = [book.contentID,]
if book.device_collections:
placeholder = '?'
placeholders = ','.join(placeholder for unused in book.device_collections)
query += ' and ShelfName not in (%s)' % placeholders
values.extend(book.device_collections)
if show_debug:
debug_print('KoboTouch:remove_book_from_device_bookshelves query="%s"'%query)
debug_print('KoboTouch:remove_book_from_device_bookshelves values="%s"'%values)
cursor = connection.cursor()
cursor.execute(query, values)
connection.commit()
cursor.close()
def set_filesize_in_device_database(self, connection, contentID, fpath):
show_debug = self.is_debugging_title(fpath)
if show_debug:
debug_print('KoboTouch:set_filesize_in_device_database contentID="%s"'%contentID)
test_query = 'SELECT ___FileSize ' \
'FROM content ' \
'WHERE ContentID = ? ' \
' AND ContentType = 6'
test_values = (contentID, )
updatequery = 'UPDATE content ' \
'SET ___FileSize = ? ' \
'WHERE ContentId = ? ' \
'AND ContentType = 6'
cursor = connection.cursor()
cursor.execute(test_query, test_values)
result = cursor.fetchone()
if result is None:
if show_debug:
debug_print(' Did not find a record - new book on device')
elif os.path.exists(fpath):
file_size = os.stat(self.normalize_path(fpath)).st_size
if show_debug:
debug_print(' Found a record - will update - ___FileSize=', result[0], ' file_size=', file_size)
if file_size != int(result[0]):
update_values = (file_size, contentID, )
cursor.execute(updatequery, update_values)
if show_debug:
debug_print(' Size updated.')
connection.commit()
cursor.close()
# debug_print("KoboTouch:set_filesize_in_device_database - end")
def delete_empty_bookshelves(self, connection):
debug_print("KoboTouch:delete_empty_bookshelves - start")
delete_query = ("DELETE FROM Shelf "
"WHERE Shelf._IsSynced = 'false' "
"AND Shelf.InternalName not in ('Shortlist', 'Wishlist') "
"AND NOT EXISTS "
"(SELECT 1 FROM ShelfContent c "
"WHERE Shelf.Name = C.ShelfName "
"AND c._IsDeleted <> 'true')")
update_query = ("UPDATE Shelf "
"SET _IsDeleted = 'true' "
"WHERE Shelf._IsSynced = 'true' "
"AND Shelf.InternalName not in ('Shortlist', 'Wishlist') "
"AND NOT EXISTS "
"(SELECT 1 FROM ShelfContent C "
"WHERE Shelf.Name = C.ShelfName "
"AND c._IsDeleted <> 'true')")
delete_activity_query = ("DELETE FROM Activity "
"WHERE Type = 'Shelf' "
"AND NOT EXISTS "
"(SELECT 1 FROM Shelf "
"WHERE Shelf.Name = Activity.Id "
"AND Shelf._IsDeleted = 'false')"
)
cursor = connection.cursor()
cursor.execute(delete_query)
cursor.execute(update_query)
if self.has_activity_table():
cursor.execute(delete_activity_query)
connection.commit()
cursor.close()
debug_print("KoboTouch:delete_empty_bookshelves - end")
def get_bookshelflist(self, connection):
# Retrieve the list of booksehelves
# debug_print('KoboTouch:get_bookshelflist')
bookshelves = []
if not self.supports_bookshelves():
return bookshelves
query = 'SELECT Name FROM Shelf WHERE _IsDeleted = "false"'
cursor = connection.cursor()
cursor.execute(query)
# count_bookshelves = 0
for i, row in enumerate(cursor):
bookshelves.append(row[0])
# count_bookshelves = i + 1
cursor.close()
# debug_print("KoboTouch:get_bookshelflist - count bookshelves=" + unicode(count_bookshelves))
return bookshelves
def set_bookshelf(self, connection, book, shelfName):
show_debug = self.is_debugging_title(book.title)
if show_debug:
debug_print('KoboTouch:set_bookshelf book.ContentID="%s"'%book.contentID)
debug_print('KoboTouch:set_bookshelf book.current_shelves="%s"'%book.current_shelves)
if shelfName in book.current_shelves:
if show_debug:
debug_print(' book already on shelf.')
return
test_query = 'SELECT _IsDeleted FROM ShelfContent WHERE ShelfName = ? and ContentId = ?'
test_values = (shelfName, book.contentID, )
addquery = 'INSERT INTO ShelfContent ("ShelfName","ContentId","DateModified","_IsDeleted","_IsSynced") VALUES (?, ?, ?, "false", "false")'
add_values = (shelfName, book.contentID, time.strftime(self.TIMESTAMP_STRING, time.gmtime()), )
updatequery = 'UPDATE ShelfContent SET _IsDeleted = "false" WHERE ShelfName = ? and ContentId = ?'
update_values = (shelfName, book.contentID, )
cursor = connection.cursor()
cursor.execute(test_query, test_values)
result = cursor.fetchone()
if result is None:
if show_debug:
debug_print(' Did not find a record - adding')
cursor.execute(addquery, add_values)
elif result[0] == 'true':
if show_debug:
debug_print(' Found a record - updating - result=', result)
cursor.execute(updatequery, update_values)
connection.commit()
cursor.close()
# debug_print("KoboTouch:set_bookshelf - end")
def check_for_bookshelf(self, connection, bookshelf_name):
show_debug = self.is_debugging_title(bookshelf_name)
if show_debug:
debug_print('KoboTouch:check_for_bookshelf bookshelf_name="%s"'%bookshelf_name)
test_query = 'SELECT InternalName, Name, _IsDeleted FROM Shelf WHERE Name = ?'
test_values = (bookshelf_name, )
addquery = 'INSERT INTO "main"."Shelf"'
add_values = (time.strftime(self.TIMESTAMP_STRING, time.gmtime()),
bookshelf_name,
time.strftime(self.TIMESTAMP_STRING, time.gmtime()),
bookshelf_name,
"false",
"true",
"false",
)
if self.dbversion < 64:
addquery += ' ("CreationDate","InternalName","LastModified","Name","_IsDeleted","_IsVisible","_IsSynced")'\
' VALUES (?, ?, ?, ?, ?, ?, ?)'
else:
addquery += ' ("CreationDate", "InternalName","LastModified","Name","_IsDeleted","_IsVisible","_IsSynced", "Id")'\
' VALUES (?, ?, ?, ?, ?, ?, ?, ?)'
add_values = add_values +(bookshelf_name,)
if show_debug:
debug_print('KoboTouch:check_for_bookshelf addquery=', addquery)
debug_print('KoboTouch:check_for_bookshelf add_values=', add_values)
updatequery = 'UPDATE Shelf SET _IsDeleted = "false" WHERE Name = ?'
cursor = connection.cursor()
cursor.execute(test_query, test_values)
result = cursor.fetchone()
if result is None:
if show_debug:
debug_print(' Did not find a record - adding shelf "%s"' % bookshelf_name)
cursor.execute(addquery, add_values)
elif result[2] == 'true':
debug_print('KoboTouch:check_for_bookshelf - Shelf "%s" is deleted - undeleting. result[2]="%s"' % (bookshelf_name, unicode(result[2])))
cursor.execute(updatequery, test_values)
connection.commit()
cursor.close()
# Update the bookshelf list.
self.bookshelvelist = self.get_bookshelflist(connection)
# debug_print("KoboTouch:set_bookshelf - end")
def remove_from_bookshelves(self, connection, oncard, ContentID=None, bookshelves=None):
debug_print('KoboTouch:remove_from_bookshelf ContentID=', ContentID)
if not self.supports_bookshelves():
return
query = 'DELETE FROM ShelfContent'
values = []
if ContentID is not None:
query += ' WHERE ContentId = ?'
values.append(ContentID)
else:
if oncard == 'carda':
query += ' WHERE ContentID like \'file:///mnt/sd/%\''
elif oncard != 'carda' and oncard != 'cardb':
query += ' WHERE ContentID not like \'file:///mnt/sd/%\''
if bookshelves:
placeholder = '?'
placeholders = ','.join(placeholder for unused in bookshelves)
query += ' and ShelfName in (%s)' % placeholders
values.append(bookshelves)
debug_print('KoboTouch:remove_from_bookshelf query=', query)
debug_print('KoboTouch:remove_from_bookshelf values=', values)
cursor = connection.cursor()
cursor.execute(query, values)
connection.commit()
cursor.close()
debug_print("KoboTouch:remove_from_bookshelf - end")
def set_series(self, connection, book):
show_debug = self.is_debugging_title(book.title)
if show_debug:
debug_print('KoboTouch:set_series book.kobo_series="%s"'%book.kobo_series)
debug_print('KoboTouch:set_series book.series="%s"'%book.series)
debug_print('KoboTouch:set_series book.series_index=', book.series_index)
if book.series == book.kobo_series:
kobo_series_number = None
if book.kobo_series_number is not None:
try:
kobo_series_number = float(book.kobo_series_number)
except:
kobo_series_number = None
if kobo_series_number == book.series_index:
if show_debug:
debug_print('KoboTouch:set_series - series info the same - not changing')
return
update_query = 'UPDATE content SET Series=?, SeriesNumber==? where BookID is Null and ContentID = ?'
if book.series is None:
update_values = (None, None, book.contentID, )
elif book.series_index is None: # This should never happen, but...
update_values = (book.series, None, book.contentID, )
else:
update_values = (book.series, "%g"%book.series_index, book.contentID, )
cursor = connection.cursor()
try:
if show_debug:
debug_print('KoboTouch:set_series - about to set - parameters:', update_values)
cursor.execute(update_query, update_values)
self.series_set += 1
except:
debug_print(' Database Exception: Unable to set series info')
raise
else:
connection.commit()
cursor.close()
if show_debug:
debug_print("KoboTouch:set_series - end")
@classmethod
def settings(cls):
opts = cls._config().parse()
if isinstance(cls.EXTRA_CUSTOMIZATION_DEFAULT, list):
if opts.extra_customization is None:
opts.extra_customization = []
if not isinstance(opts.extra_customization, list):
opts.extra_customization = [opts.extra_customization]
if len(cls.EXTRA_CUSTOMIZATION_DEFAULT) > len(opts.extra_customization):
extra_options_offset = 0
extra_customization = []
for i,d in enumerate(cls.EXTRA_CUSTOMIZATION_DEFAULT):
if i >= len(opts.extra_customization) + extra_options_offset:
extra_customization.append(d)
elif d.__class__ != opts.extra_customization[i - extra_options_offset].__class__:
extra_options_offset += 1
extra_customization.append(d)
else:
extra_customization.append(opts.extra_customization[i - extra_options_offset])
opts.extra_customization = extra_customization
return opts
def isAura(self):
return self.detected_device.idProduct in self.AURA_PRODUCT_ID
def isAuraHD(self):
return self.detected_device.idProduct in self.AURA_HD_PRODUCT_ID
def isAuraH2O(self):
return self.detected_device.idProduct in self.AURA_H2O_PRODUCT_ID
def isGlo(self):
return self.detected_device.idProduct in self.GLO_PRODUCT_ID
def isGloHD(self):
return self.detected_device.idProduct in self.GLO_HD_PRODUCT_ID
def isMini(self):
return self.detected_device.idProduct in self.MINI_PRODUCT_ID
def isTouch(self):
return self.detected_device.idProduct in self.TOUCH_PRODUCT_ID
def cover_file_endings(self):
return self.GLO_COVER_FILE_ENDINGS if self.isGlo() or self.isAura() \
else self.AURA_HD_COVER_FILE_ENDINGS if self.isAuraHD() or self.isAuraH2O() or self.isGloHD() \
else self.COVER_FILE_ENDINGS
def set_device_name(self):
device_name = self.gui_name
if self.isAura():
device_name = 'Kobo Aura'
elif self.isAuraHD():
device_name = 'Kobo Aura HD'
elif self.isAuraH2O():
device_name = 'Kobo Aura H2O'
elif self.isGlo():
device_name = 'Kobo Glo'
elif self.isGloHD():
device_name = 'Kobo Glo HD'
elif self.isMini():
device_name = 'Kobo Mini'
elif self.isTouch():
device_name = 'Kobo Touch'
self.__class__.gui_name = device_name
return device_name
def copying_covers(self):
opts = self.settings()
return opts.extra_customization[self.OPT_UPLOAD_COVERS] or opts.extra_customization[self.OPT_KEEP_COVER_ASPECT_RATIO]
def keep_cover_aspect(self):
opts = self.settings()
return opts.extra_customization[self.OPT_KEEP_COVER_ASPECT_RATIO]
def modifying_epub(self):
return self.modifying_css()
def modifying_css(self):
opts = self.settings()
return opts.extra_customization[self.OPT_MODIFY_CSS]
def supports_bookshelves(self):
return self.dbversion >= self.min_supported_dbversion
def supports_series(self):
return self.dbversion >= self.min_dbversion_series
def supports_kobo_archive(self):
return self.dbversion >= self.min_dbversion_archive
def supports_covers_on_sdcard(self):
return self.dbversion >= self.min_dbversion_images_on_sdcard and self.fwversion >= self.min_fwversion_images_on_sdcard
def supports_images_tree(self):
return self.fwversion >= self.min_fwversion_images_tree
def has_externalid(self):
return self.dbversion >= self.min_dbversion_externalid
def has_activity_table(self):
return self.dbversion >= self.min_dbversion_activity
def modify_database_check(self, function):
# Checks to see whether the database version is supported
# and whether the user has chosen to support the firmware version
# debug_print("KoboTouch:modify_database_check - self.fwversion > self.max_supported_fwversion=", self.fwversion > self.max_supported_fwversion)
if self.dbversion > self.supported_dbversion or self.fwversion > self.max_supported_fwversion:
# Unsupported database
opts = self.settings()
if not opts.extra_customization[self.OPT_SUPPORT_NEWER_FIRMWARE]:
debug_print('The database has been upgraded past supported version')
self.report_progress(1.0, _('Removing books from device...'))
from calibre.devices.errors import UserFeedback
raise UserFeedback(_("Kobo database version unsupported - See details"),
_('Your Kobo is running an updated firmware/database version.'
' As calibre does not know about this updated firmware,'
' database editing is disabled, to prevent corruption.'
' You can still send books to your Kobo with calibre, '
' but deleting books and managing collections is disabled.'
' If you are willing to experiment and know how to reset'
' your Kobo to Factory defaults, you can override this'
' check by right clicking the device icon in calibre and'
' selecting "Configure this device" and then the '
' "Attempt to support newer firmware" option.'
' Doing so may require you to perform a factory reset of'
' your Kobo.') + (
'\nDevice database version: %s.'
'\nDevice firmware version: %s'
) % (self.dbversion, self.fwversion),
UserFeedback.WARN)
return False
else:
# The user chose to edit the database anyway
return True
else:
# Supported database version
return True
@classmethod
def is_debugging_title(cls, title):
if not DEBUG:
return False
# debug_print("KoboTouch:is_debugging - title=", title)
is_debugging = False
opts = cls.settings()
if opts.extra_customization:
debugging_title = opts.extra_customization[cls.OPT_DEBUGGING_TITLE]
is_debugging = len(debugging_title) > 0 and title.lower().find(debugging_title.lower()) >= 0 or len(title) == 0
return is_debugging
def dump_bookshelves(self, connection):
if not (DEBUG and self.supports_bookshelves() and False):
return
debug_print('KoboTouch:dump_bookshelves - start')
shelf_query = 'SELECT * FROM Shelf'
shelfcontent_query = 'SELECT * FROM ShelfContent'
placeholder = '%s'
cursor = connection.cursor()
prints('\nBookshelves on device:')
cursor.execute(shelf_query)
i = 0
for row in cursor:
placeholders = ', '.join(placeholder for unused in row)
prints(placeholders%row)
i += 1
if i == 0:
prints("No shelves found!!")
else:
prints("Number of shelves=%d"%i)
prints('\nBooks on shelves on device:')
cursor.execute(shelfcontent_query)
i = 0
for row in cursor:
placeholders = ', '.join(placeholder for unused in row)
prints(placeholders%row)
i += 1
if i == 0:
prints("No books are on any shelves!!")
else:
prints("Number of shelved books=%d"%i)
cursor.close()
debug_print('KoboTouch:dump_bookshelves - end')
| gpl-3.0 | 6,730,758,367,339,175,000 | 49.074966 | 237 | 0.549732 | false |
ghostshellgnome/clamwin | py/ClamTray.py | 1 | 38009 | #-----------------------------------------------------------------------------
# Name: Tray.py
# Product: ClamWin Free Antivirus
#
# Author: alch [alch at users dot sourceforge dot net]
#
# Created: 2004/19/03
# Copyright: Copyright alch (c) 2005
# Licence:
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#-----------------------------------------------------------------------------
# this code is based on win32gui_taskbar.py demo from Mark Hammond's
# win32 extensions.
import SetUnicode
import RedirectStd
import sys, os, time, tempfile, locale, re, random, types
import win32api, win32gui, win32con, win32event
import win32process, win32event
import Scheduler
import Config
import Process
import EmailAlert
import threading
import Utils, wxDialogScheduledScan
import version
class MainWindow:
MENU_OPEN_CLAM, MENU_UPDATE_DB, MENU_CHECK_UPDATE, MENU_CLAMWIN_WEB, MENU_CONFIGURE, MENU_SHOWSCANLOG, \
MENU_SHOWUPDATELOG, MENU_EXIT, MENU_CONFIGURESCHEDULER,\
MENU_TERMINATESCHEDULE, MENU_RUNSCHEDULE = range(1023, 1023 + 11)
ACTIVE_MUTEX='ClamWinTrayMutex01'
WM_TASKBAR_NOTIFY=win32con.WM_USER+20
WM_CONFIG_UPDATED=win32con.WM_USER+21
WM_SHOW_BALLOON=win32con.WM_USER+22
WM_CHECK_VERSION=win32con.WM_USER+23
def __init__(self, config, logon):
self._config = config
self._schedulers = []
self._scheduledScans = []
self._processes = []
self._balloon_info = None
self._balloonThreadLock = threading.Lock()
self._checkversionattempts = 0
self._dbupdateattempts = 0
self._nomenu = False
self._reschedule_delay = 300
self._scheduleCount = 0
msg_TaskbarRestart = win32gui.RegisterWindowMessage("TaskbarCreated");
message_map = {
msg_TaskbarRestart: self.OnRestart,
win32con.WM_DESTROY: self.OnDestroy,
win32con.WM_COMMAND: self.OnCommand,
MainWindow.WM_TASKBAR_NOTIFY: self.OnTaskbarNotify,
MainWindow.WM_CONFIG_UPDATED : self.OnConfigUpdated,
MainWindow.WM_SHOW_BALLOON : self.OnShowBalloon,
MainWindow.WM_CHECK_VERSION: self.OnCheckVersion,
}
# Register the Window class.
wc = win32gui.WNDCLASS()
hinst = wc.hInstance = win32api.GetModuleHandle(None)
wc.lpszClassName = "ClamWinTrayWindow"
wc.style = win32con.CS_VREDRAW | win32con.CS_HREDRAW;
wc.hCursor = win32gui.LoadCursor( 0, win32con.IDC_ARROW )
wc.hbrBackground = win32con.COLOR_WINDOW
wc.lpfnWndProc = message_map # could also specify a wndproc.
classAtom = win32gui.RegisterClass(wc)
# Create the Window.
style = win32con.WS_OVERLAPPED | win32con.WS_SYSMENU
self.hwnd = win32gui.CreateWindow( classAtom, "ClamWin", style, \
0, 0, win32con.CW_USEDEFAULT, win32con.CW_USEDEFAULT, \
0, 0, hinst, None)
win32gui.UpdateWindow(self.hwnd)
# create mutex to prevent further instances
self._hActiveMutex = win32event.CreateMutex(None, True, self.ACTIVE_MUTEX)
self._DoCreateIcons()
self._InitSchedulers(logon)
# start config monitor thread
self._configMonitor = MonitorConfig(self.NotifyConfig, (self.hwnd,))
self._configMonitor.start()
def _IsProcessRunning(self, proc, wait=False):
if wait:
timeout = 5
else:
timeout = 0
try:
proc.wait(timeout)
except Exception, e:
if isinstance(e, Process.ProcessError):
if e.errno == Process.ProcessProxy.WAIT_TIMEOUT:
return True
else:
return False
return False
def _StopProcesses(self):
# check if process is still running
for proc in self._processes:
if self._IsProcessRunning(proc):
# running - kill
proc.kill()
#wait to finish
if self._IsProcessRunning(proc, True):
#still running - complain and terminate
win32gui.MessageBox(self.hwnd, 'Unable to stop scheduled process, terminating', 'ClamWin Free Antivirus', win32con.MB_OK | win32con.MB_ICONSTOP)
os._exit(0)
proc.close()
self._processes = []
def _FindSchedule(self, label):
for scheduler in self._schedulers:
try:
if scheduler.label() == label:
return scheduler
except Exception, e:
print 'An error occured whilst finding scheduler label: %i. %s' % (label, str(e))
return None
def _TerminateSchedules(self):
self._StopProcesses()
for scheduler in self._schedulers:
try:
scheduler.stop()
# wait for completion
scheduler.join(2)
except Exception, e:
print 'An error occured whilst termintaing scheduler thread. Error: %s' % str(e)
self._schedulers = []
def _CreateScheduleLabel(self):
# ensure we return no more that 32 bit signed integer otherwise SendMessage API method complains
if self._scheduleCount >= sys.maxint:
self._scheduleCount = 0
self._scheduleCount = self._scheduleCount + 1
return self._scheduleCount
def _InitSchedulers(self, logon=False):
# close all running schedules
self._TerminateSchedules()
# load persistent scheduler
self._scheduledScans = wxDialogScheduledScan.LoadPersistentScheduledScans(
os.path.join(Utils.GetScheduleShelvePath(self._config), 'ScheduledScans'))
# create an update schedule to run now if 'Update on Logon' is selected
if logon and self._config.Get('Updates', 'UpdateOnLogon') == '1':
# set C locale, otherwise python and wxpython complain
locale.setlocale(locale.LC_ALL, 'C')
start_time = time.localtime(time.time() + 120)
weekday = int(time.strftime('%w', start_time))
if weekday: weekday -= 1
else: weekday = 6
scheduler = Scheduler.Scheduler('Once',
time.strftime('%H:%M:%S', start_time),
weekday,
False,
win32gui.SendMessage, (self.hwnd, win32con.WM_COMMAND, self.MENU_UPDATE_DB, 1),
('ClamWin_Scheduler_Info', 'ClamWin_Upadte_Time'))
scheduler.start()
self._schedulers.append(scheduler)
# create a scheduler thread for DB updates
if self._config.Get('Updates', 'Enable') == '1':
label = self._CreateScheduleLabel()
scheduler = Scheduler.Scheduler(self._config.Get('Updates', 'Frequency'),
self._config.Get('Updates', 'Time'),
int(self._config.Get('Updates', 'WeekDay')),
True,
win32gui.SendMessage, (self.hwnd, win32con.WM_COMMAND, self.MENU_UPDATE_DB, label),
('ClamWin_Scheduler_Info', 'ClamWin_Upadte_Time'),
0.5, label)
scheduler.start()
self._schedulers.append(scheduler)
# create scheduler threads for all scheduled scans
for scan in self._scheduledScans:
if scan.Active:
scheduler = Scheduler.Scheduler(scan.Frequency,
scan.Time,
int(scan.WeekDay),
False,
self.ScanPath, (self, scan.Path, scan.Description, scan.ScanMemory))
scheduler.start()
self._schedulers.append(scheduler)
# create scheduler thread for program version check
if self._config.Get('Updates', 'CheckVersion') == '1':
checkvertime = None
try:
f = file(os.path.join(tempfile.gettempdir(), 'ClamWin_CheckVer_Time'), 'r')
t = f.read()
f.close()
if time.time() >= float(t):
checkvertime = time.strftime('%H:%M:%S', time.localtime(float(t)))
except Exception, e:
print 'An error occured whilst reading last scheduled run from %s. Error: %s' % ('ClamWin_CheckVer_Time', str(e))
if checkvertime is None:
# 5 minutes to 1 hour after start
checkvertime = time.strftime('%H:%M:%S', time.localtime(time.time() + random.randint(300, 3600)))
print "using random checkversion time %s" % checkvertime
label = self._CreateScheduleLabel()
curDir = Utils.GetCurrentDir(True)
scheduler = Scheduler.Scheduler('Daily', # check once a day
checkvertime,
1, # unused
True,
win32gui.SendMessage, (self.hwnd, MainWindow.WM_CHECK_VERSION, 0, label),
('ClamWin_CheckVer_Info', 'ClamWin_CheckVer_Time'), 0.5, label)
scheduler.start()
self._schedulers.append(scheduler)
def _Terminate(self):
# terminate running threads
self._TerminateSchedules()
if self._configMonitor is not None:
self._configMonitor.stop()
self._configMonitor.join(2)
self._configMonitor = None
def _DoCreateIcons(self):
# Try and find a custom icon
hinst = win32api.GetModuleHandle(None)
iconPathName = os.path.abspath(os.path.join(os.path.split(sys.executable)[0],"img/TrayIcon.ico"))
if not os.path.isfile(iconPathName):
# Look in the current folder tree.
iconPathName = "img/TrayIcon.ico"
if os.path.isfile(iconPathName):
icon_flags = win32con.LR_LOADFROMFILE | win32con.LR_DEFAULTSIZE
hicon = win32gui.LoadImage(hinst, iconPathName, win32con.IMAGE_ICON, 0, 0, icon_flags)
else:
hicon = win32gui.LoadIcon(0, win32con.IDI_APPLICATION)
flags = win32gui.NIF_ICON | win32gui.NIF_MESSAGE | win32gui.NIF_TIP
nid = (self.hwnd, 0, flags, MainWindow.WM_TASKBAR_NOTIFY, hicon, "ClamWin Free Antivirus")
win32gui.Shell_NotifyIcon(win32gui.NIM_ADD, nid)
def OnRestart(self, hwnd, msg, wparam, lparam):
self._DoCreateIcons()
def OnDestroy(self, hwnd, msg, wparam, lparam):
nid = (self.hwnd, 0)
win32gui.Shell_NotifyIcon(win32gui.NIM_DELETE, nid)
self._Terminate()
win32event.ReleaseMutex(self._hActiveMutex)
win32api.CloseHandle(self._hActiveMutex)
# Terminate the app.
win32gui.PostQuitMessage(0)
def OnTaskbarNotify(self, hwnd, msg, wparam, lparam):
if lparam==win32con.WM_LBUTTONUP:
pass
elif lparam==win32con.WM_LBUTTONDBLCLK:
self.OnCommand(hwnd, win32con.WM_COMMAND, self.MENU_OPEN_CLAM, 0)
elif lparam==win32con.WM_RBUTTONUP:
if self._nomenu:
hwnd = win32gui.FindWindow('#32770', 'ClamWin Update')
if hwnd:
try:
win32gui.ShowWindow(hwnd, win32con.SW_SHOW)
win32gui.SetForegroundWindow(hwnd)
win32gui.SetFocus(hwnd)
except Exception, e:
print 'ShowWindow Error: %s' % str(e)
return 1
# create scheduler menu
scheduler_popup = win32gui.CreatePopupMenu()
win32gui.AppendMenu(scheduler_popup, win32con.MF_STRING,
self.MENU_CONFIGURESCHEDULER, "&Configure Scheduler")
if not self._processes:
flags = win32con.MF_GRAYED
else:
flags = 0
# create scheduled tasks menu
tasks_popup = win32gui.CreatePopupMenu()
i = 0
for scan in self._scheduledScans:
win32gui.AppendMenu(tasks_popup, win32con.MF_STRING,
self.MENU_RUNSCHEDULE + i, scan.Description)
i+=1
if not i:
flags2 = win32con.MF_GRAYED
else:
flags2 = 0
win32gui.InsertMenu(scheduler_popup, self.MENU_CONFIGURESCHEDULER,
win32con.MF_BYCOMMAND | win32con.MF_POPUP | flags2,
tasks_popup, "&Run Scheduled Scan")
win32gui.InsertMenu(scheduler_popup, flags,
win32con.MF_BYCOMMAND | win32con.MF_STRING | flags,
self.MENU_TERMINATESCHEDULE, "&Stop All Running Tasks Now")
# create reports menu
reports_popup = win32gui.CreatePopupMenu()
if not len(self._config.Get('ClamAV', 'LogFile')):
flags = win32con.MF_GRAYED
else:
flags = 0
win32gui.InsertMenu( reports_popup, 0,
win32con.MF_BYCOMMAND | win32con.MF_STRING | flags,
self.MENU_SHOWSCANLOG, "&Virus Scan Report")
if not len(self._config.Get('Updates', 'DBUpdateLogFile')):
flags = win32con.MF_GRAYED
else:
flags = 0
win32gui.InsertMenu( reports_popup, self.MENU_SHOWSCANLOG,
win32con.MF_BYCOMMAND | win32con.MF_STRING | flags,
self.MENU_SHOWUPDATELOG, "&Virus Database Update Report")
# create main menu
menu = win32gui.CreatePopupMenu()
win32gui.AppendMenu( menu, win32con.MF_STRING, self.MENU_OPEN_CLAM, "&Open ClamWin")
win32gui.AppendMenu( menu, win32con.MF_STRING, self.MENU_UPDATE_DB, "&Download Virus Database Update")
win32gui.AppendMenu( menu, win32con.MF_STRING, self.MENU_CONFIGURE, "&Configure ClamWin")
win32gui.AppendMenu( menu, win32con.MF_POPUP, scheduler_popup, "&Scheduler")
win32gui.AppendMenu( menu, win32con.MF_POPUP, reports_popup, "Display &Reports")
win32gui.AppendMenu( menu, win32con.MF_SEPARATOR, 0, "" )
win32gui.AppendMenu( menu, win32con.MF_STRING, self.MENU_CHECK_UPDATE, "Check &Latest Version")
win32gui.AppendMenu( menu, win32con.MF_STRING, self.MENU_CLAMWIN_WEB, "&Visit ClamWin Website")
win32gui.AppendMenu( menu, win32con.MF_SEPARATOR, 0, "" )
win32gui.AppendMenu( menu, win32con.MF_STRING, self.MENU_EXIT, "&Exit" )
pos = win32gui.GetCursorPos()
# See http://msdn.microsoft.com/library/default.asp?url=/library/en-us/winui/menus_0hdi.asp
try:
win32gui.SetForegroundWindow(self.hwnd)
except:
pass
try:
win32gui.SetMenuDefaultItem(menu, 0, 1)
except NameError:
pass
win32gui.TrackPopupMenu(menu, win32con.TPM_LEFTALIGN, pos[0], pos[1], 0, self.hwnd, None)
win32gui.SendMessage(self.hwnd, win32con.WM_NULL, 0, 0)
return 1
def OnCommand(self, hwnd, msg, wparam, lparam):
if self._nomenu:
return
id = win32api.LOWORD(wparam)
if id == self.MENU_OPEN_CLAM:
self._ShowClamWin()
elif id == self.MENU_UPDATE_DB:
self._UpdateDB(lparam)
elif id == self.MENU_CHECK_UPDATE:
self._OpenWebPage('http://www.clamwin.com/index.php?option=content&task=view&id=40&Itemid=60&version='+version.clamwin_version)
elif id == self.MENU_CLAMWIN_WEB:
self._OpenWebPage('http://www.clamwin.com')
elif id == self.MENU_CONFIGURE:
self._ShowConfigure()
elif id == self.MENU_SHOWSCANLOG:
self._ShowLog(self._config.Get('ClamAV', 'LogFile'))
elif id == self.MENU_SHOWUPDATELOG:
self._ShowLog(self._config.Get('Updates', 'DBUpdateLogFile'))
elif id == self.MENU_EXIT:
self.OnExit()
elif id == self.MENU_CONFIGURESCHEDULER:
self._ShowConfigure(True)
elif id == self.MENU_TERMINATESCHEDULE:
self._TerminateSchedules()
self._InitSchedulers()
elif (id >= self.MENU_RUNSCHEDULE) and \
(id < self.MENU_RUNSCHEDULE + len(self._scheduledScans)):
try:
path = self._scheduledScans[id - self.MENU_RUNSCHEDULE].Path
if path[len(path)-1] == '\\':
path = path[:len(path)-1]
self._ShowClamWin(path)
except Exception, e:
win32gui.MessageBox(self.hwnd,
'Could not launch ClamWin Scanner. Error: %s' % str(e),
'ClamWin Free Antivirus', win32con.MB_OK | win32con.MB_ICONERROR)
def OnConfigUpdated(self, hwnd, msg, wparam, lparam):
self._config.Read()
self._InitSchedulers()
def OnShowBalloon(self, hwnd, msg, wparam, lparam):
if self._balloon_info is not None:
try:
Utils.ShowBalloon(wparam, self._balloon_info, self.hwnd)
except Exception, e:
print 'Could not display balloon tooltip. Error: %s' % str(e)
def OnCheckVersion(self, hwnd, msg, wparam, lparam):
current_sched = self._FindSchedule(lparam)
ok = False
online = Utils.IsOnline()
if online:
try:
curDir = Utils.GetCurrentDir(True)
params = (' --mode=checkversion', )
ok = Utils.SpawnPyOrExe(True, os.path.join(curDir, 'ClamWin'), *params) == 0
except Exception, e:
print 'checkversion error: %s' % str(e)
self._nomenu = False
if not ok:
if self._checkversionattempts < 9 or not online:
# reschedule version check in 3 minutes
locale.setlocale(locale.LC_ALL, 'C')
start_time = time.localtime(time.time() + self._reschedule_delay)
weekday = int(time.strftime('%w', start_time))
if weekday: weekday -= 1
else: weekday = 6
print "rescheduling version check at %s" % time.strftime('%H:%M:%S', start_time)
rescheduled = Scheduler.Scheduler('Once',
time.strftime('%H:%M:%S', start_time),
weekday,
False,
win32gui.SendMessage, (self.hwnd, MainWindow.WM_CHECK_VERSION, 0, lparam),
('ClamWin_Scheduler_Info', 'ClamWin_Upadte_Time'))
if current_sched is not None:
current_sched.pause()
rescheduled.start()
self._schedulers.append(rescheduled)
self._checkversionattempts = self._checkversionattempts + 1
else:
# show balloon
if self._config.Get('UI', 'TrayNotify') == '1':
tray_notify_params = (('Unable to get online version. Most likely it\'s a temporary connectivity error and you don\'t have to do anything.\nIf you see this error often then allow clamwin.exe in your firewall and check proxy settings.\n', 0,
win32gui.NIIF_WARNING, 30000), None)
Utils.ShowBalloon(0, tray_notify_params, None, True)
self._checkversionattempts = 0
if current_sched is not None:
current_sched.resume()
else:
self._checkversionattempts = 0
def OnExit(self):
win32gui.DestroyWindow(self.hwnd)
def _ShowLog(self, logfile):
try:
curDir = Utils.GetCurrentDir(True)
params = (' --mode=viewlog', '--path="%s"' % logfile)
Utils.SpawnPyOrExe(False, os.path.join(curDir, 'ClamWin'), *params)
except Exception, e:
win32gui.MessageBox(self.hwnd, 'An error occured while displaying log file %s.\nError: %s' % (logfile, str(e)),
'ClamWin Free Antivirus', win32con.MB_OK | win32con.MB_ICONERROR)
def _ShowClamWin(self, path=''):
try:
if path:
params = (' --mode=scanner', ' --path=\"%s\"' % path)
else:
params = (' --mode=main',)
Utils.SpawnPyOrExe(False, os.path.join(Utils.GetCurrentDir(True), 'ClamWin'), *params)
except Exception, e:
win32gui.MessageBox(self.hwnd, 'An error occured while starting ClamWin Free Antivirus scanner.\n' + str(e), 'ClamWin Free Antivirus', win32con.MB_OK | win32con.MB_ICONERROR)
def _UpdateDB(self, schedule_label):
if not schedule_label:
try:
params = (' --mode=update', ' --config_file="%s"' % self._config.GetFilename(),)
Utils.SpawnPyOrExe(False, os.path.join(Utils.GetCurrentDir(True), 'ClamWin'), *params)
except Exception, e:
win32gui.MessageBox(self.hwnd, 'An error occured while starting ClamWin Free Antivirus Update.\n' + str(e), 'ClamWin Free Antivirus', win32con.MB_OK | win32con.MB_ICONERROR)
else: # update virus db silently
if Utils.IsOnline():
freshclam_conf = Utils.SaveFreshClamConf(self._config)
try:
if not len(freshclam_conf):
win32gui.MessageBox(self.hwnd, 'Unable to create freshclam configuration file. Please check there is enough space on the disk', 'Error', win32con.MB_OK | win32con.MB_ICONSTOP)
return
# create database folder before downloading
dbdir = self._config.Get('ClamAV', 'Database')
if dbdir and not os.path.exists(dbdir):
try:
os.makedirs(dbdir)
except:
pass
updatelog = tempfile.mktemp()
cmd = '--stdout --datadir="' + dbdir + '"' + \
' --config-file="%s" --log="%s"' % (freshclam_conf, updatelog)
cmd = '"%s" %s' % (self._config.Get('ClamAV', 'FreshClam'), cmd)
try:
if self._config.Get('UI', 'TrayNotify') == '1':
balloon = (('Virus database has been updated.', 0,
win32gui.NIIF_INFO, 10000),
('An error occured during Scheduled Virus Database Update. Please review the update report.', 1,
win32gui.NIIF_WARNING, 30000))
else:
balloon = None
proc = self._SpawnProcess(cmd,
'n',
self.DBUpdateProcessFinished,
(self._config.Get('ClamAV', 'Database'),
self._config.Get('Updates', 'DBUpdateLogFile'),
updatelog, False,
balloon, schedule_label))
self._processes.append(proc)
except Process.ProcessError, e:
print 'Unable to spawn scheduled process.\nCommand line: %s\nError: %s' % (cmd , str(e))
try:
os.remove(freshclam_conf)
os.remove(updatelog)
except:
pass
return
# wait 2 seconds for the process to start, then delete
# temp file
try:
proc.wait(2)
except:
pass
os.remove(freshclam_conf)
except Exception, e:
print 'Error performing Scheduled Update.', str(e)
os.remove(freshclam_conf)
else:
self.RescheduleDBUpdate(schedule_label)
def _OpenWebPage(self, url):
try:
import webbrowser
webbrowser.open(url)
except ImportError:
win32gui.MessageBox(self.hwnd, 'Please point your browser at: %s' % url, 'ClamWin Free Antivirus', win32con.MB_OK | win32con.MB_ICONINFORMATION)
def _ShowConfigure(self, switchToSchedule = False):
try:
curDir = Utils.GetCurrentDir(True)
if switchToSchedule:
mode = 'configure_schedule'
else:
mode = 'configure'
params = (' --mode=%s' % mode,
' --config_file="%s"' % self._config.GetFilename(),)
Utils.SpawnPyOrExe(False, os.path.join(curDir, 'ClamWin'), *params)
except Exception, e:
win32gui.MessageBox(self.hwnd, 'An error occured while starting ClamWin Free Antivirus Preferences.\n' + str(e), 'ClamWin Free Antivirus', win32con.MB_OK | win32con.MB_ICONERROR)
# returns process and stdout buffer
def _SpawnProcess(self, cmd, proc_priority, finished_func, finished_params):
# initialise environment var TMPDIR
# for clamav
try:
if os.getenv('TMPDIR') is None:
os.putenv('TMPDIR', tempfile.gettempdir())
#Utils.SetCygwinTemp()
except Exception, e:
print str(e)
# check that we got the command line
if cmd is None:
raise Process.ProcessError('Could not start process. No Command Line specified')
# start our process
try:
# check if the file exists first
executable = cmd.split('" ' ,1)[0].lstrip('"')
if not os.path.exists(executable):
raise Process.ProcessError('Could not start process.\n%s\nFile does not exist.' % executable)
out = OutBuffer(self, finished_func, finished_params)
proc = Process.ProcessProxy(cmd, stdout=out, priority=proc_priority)
out.AttachProcess(proc)
proc.wait(0)
except Exception, e:
if isinstance(e, Process.ProcessError):
if e.errno != Process.ProcessProxy.WAIT_TIMEOUT:
raise Process.ProcessError('Could not start process:\n%s\nError: %s' % (cmd, str(e)))
else:
raise Process.ProcessError('Could not start process:\n%s\nError: %s' % (cmd, str(e)))
return proc
def ScanPath(self, path, description, scanMemory):
scanlog = tempfile.mktemp()
path = '"%s"' % path.rstrip('\\').strip('"')
cmd = Utils.GetScanCmd(self._config, path, scanlog, True)
if scanMemory:
cmd += " --memory"
print cmd
try:
if self._config.Get('UI', 'TrayNotify') == '1':
balloon = (('Virus has been detected during scheduled scan! Please review the scan report.', 1,
win32gui.NIIF_ERROR, 30000),
('An error occured during scheduled scan. Please review the scan report.', 0,
win32gui.NIIF_WARNING, 30000))
else:
balloon = None
try:
priority = self._config.Get('ClamAV', 'Priority')[:1].lower()
except:
priority = 'n'
# clamav stopped writing start time of the scan to the log file
try:
file(scanlog, 'wt').write('\nScan Started %s' % time.ctime(time.time()))
except:
pass
proc = self._SpawnProcess(cmd,
priority,
self.ProcessFinished,
(self._config.Get('ClamAV', 'LogFile'),
scanlog,
self._config.Get('EmailAlerts', 'Enable') == '1',
balloon
))
self._processes.append(proc)
result = 0
except Process.ProcessError, e:
result = -1
try:
os.remove(scanlog)
except:
pass
print str(e)
if self._config.Get('UI', 'TrayNotify') == '1':
balloon_info = (('Running Scheduled Task:\n'+description, 0,
win32gui.NIIF_INFO, 10000),
('An error occured whilst running Running Scheduled Task '+description, 1,
win32gui.NIIF_WARNING, 30000))
self.ShowBalloon(result, balloon_info)
ScanPath = staticmethod(ScanPath)
def NotifyConfig(hwnd):
win32api.SendMessage(hwnd, MainWindow.WM_CONFIG_UPDATED, 0, 0)
NotifyConfig = staticmethod(NotifyConfig)
def RescheduleDBUpdate(self, schedule_label):
current_sched = self._FindSchedule(schedule_label)
# reschedule database update in 5 minutes
locale.setlocale(locale.LC_ALL, 'C')
start_time = time.localtime(time.time() + self._reschedule_delay)
weekday = int(time.strftime('%w', start_time))
if weekday: weekday -= 1
else: weekday = 6
print 'rescheduling db update attempt %i at %s' % (self._dbupdateattempts, time.strftime('%H:%M:%S', start_time))
rescheduled = Scheduler.Scheduler('Once',
time.strftime('%H:%M:%S', start_time),
weekday,
False,
win32gui.SendMessage, (self.hwnd, win32con.WM_COMMAND, self.MENU_UPDATE_DB, schedule_label),
('ClamWin_Scheduler_Info', 'ClamWin_Upadte_Time'))
if current_sched is not None:
current_sched.pause()
rescheduled.start()
self._schedulers.append(rescheduled)
self._dbupdateattempts = self._dbupdateattempts + 1
def DBUpdateProcessFinished(self, process, dbpath, log, appendlog, email_alert, balloon_info, schedule_label):
current_sched = self._FindSchedule(schedule_label)
if (not process.isKilled()) and (process.wait() not in (0, 1)):
if self._dbupdateattempts < 9:
self.ProcessFinished(self, process, log, appendlog, None, None)
self.RescheduleDBUpdate(schedule_label)
else:
print 'self._dbupdateattempts >= 9; displaying an error ballon, ', str(balloon_info)
if current_sched is not None:
current_sched.resume()
self._dbupdateattempts = 0
self.ProcessFinished(self, process, log, appendlog, None, balloon_info)
else:
if current_sched is not None:
current_sched.resume()
self.ProcessFinished(self, process, log, appendlog, None, balloon_info)
DBUpdateProcessFinished = staticmethod(DBUpdateProcessFinished)
def ProcessFinished(self, process, log, appendlog, email_alert, balloon_info):
# send the notification alert if we need to
if email_alert:
try:
if process.wait() == 1 and not process.isKilled():
msg = EmailAlert.ConfigVirusAlertMsg(self._config, (appendlog,))
msg.Send()
except Exception, e:
print 'Could not send email alert. Error: %s' % str(e)
print "Exit Code: ", process.wait()
if (not process.isKilled()) and (balloon_info is not None) and (process.wait() not in (54, 56)):
# show balloon
self.ShowBalloon(process.wait(), balloon_info)
# find and remove our process
try:
self._processes.remove(process)
except ValueError:
# ignore "not in list" errors
pass
time.sleep(1)
maxsize = int(self._config.Get('ClamAV', 'MaxLogSize'))*1048576
Utils.AppendLogFile(log, appendlog, maxsize)
try:
os.remove(appendlog)
except Exception, e:
print 'could not remove file: %s. Error: %s' % (appendlog, str(e))
Utils.CleanupTemp(process.getpid())
ProcessFinished = staticmethod(ProcessFinished)
# send message to the main window thread to display balloon notification
# we need to enclose the call to SendMessage within Lock().acquire()/Lock.release()
# to ensure that correct self._balloon_info is used when 2 threads want to
# display balloons simultaneously
def ShowBalloon(self, result, balloon_info):
self._balloonThreadLock.acquire()
try:
self._balloon_info = balloon_info
win32api.SendMessage(self.hwnd, MainWindow.WM_SHOW_BALLOON, result, 0)
finally:
self._balloon_info = None
self._balloonThreadLock.release()
# stdout buffer used by ProcessProxy to notify main thread
# when execution is complete
class OutBuffer(Process.IOBuffer):
def __init__(self, caller, notify, params):
Process.IOBuffer.__init__(self)
self.notify = notify
self._caller = caller
self._params = params
self._proc = None
# we don't need any input or output here
def _doWrite(self, s):
return
def _doRead(self, n):
return
def write(self, s):
return
def writelines(self, list):
return
def read(self, n=-1):
return
def readline(self, n=None):
return
def readlines(self):
return
def _doClose(self):
if self._proc:
self.notify(self._caller, self._proc, *self._params)
del self._proc
self._proc = None
Process.IOBuffer._doClose(self)
def AttachProcess(self, proc):
self._proc = proc
# this thread monitors changes to config files
# and notifies tray to reload if a change occurs
class MonitorConfig(threading.Thread):
def __init__(self, notify, args):
self.notify = notify
self.args = args
self._terminate = False
threading.Thread.__init__(self)
def __del__(self):
self.stop()
def run(self):
self._terminate = False
try:
hEvent = win32event.CreateEvent(None, True, False, Utils.CONFIG_EVENT)
except win32api.error:
return
while not self._terminate:
wait = win32event.WaitForSingleObject(hEvent, 1000);
if wait != win32event.WAIT_TIMEOUT:
self.notify(*self.args)
def stop(self):
if not self.isAlive():
return
self._terminate = True
def main():
# set C locale, otherwise python and wxpython complain
locale.setlocale(locale.LC_ALL, 'C')
# get the directory of our exetutable file
# when running as pyexe built module
currentDir = Utils.GetCurrentDir(True)
os.chdir(currentDir)
Utils.CreateProfile()
# see if we are already running and exit if so
try:
# try to acquire our active mutex
hMutex = win32event.OpenMutex(win32con.SYNCHRONIZE, False, MainWindow.ACTIVE_MUTEX)
# could open it - most likely another window is active
# just to be sure wait for it to see if it is claimed
if win32event.WaitForSingleObject(hMutex, 0) == win32event.WAIT_TIMEOUT:
# mutex is claimed, another window is already running - terminate
return
win32api.CloseHandle(hMutex)
except win32api.error:
pass
conf_file = None
for arg in sys.argv[1:]:
if arg.find('--config_file=') == 0:
conf_file = Utils.SafeExpandEnvironmentStrings(arg[len('--config_file='):])
if conf_file is None:
conf_file = os.path.join(Utils.GetProfileDir(True),'ClamWin.conf')
if not os.path.isfile(conf_file):
conf_file = 'ClamWin.conf'
config = Config.Settings(conf_file)
config.Read()
logon = False
for arg in sys.argv[1:]:
if arg == '--logon':
logon = True
w=MainWindow(config, logon)
win32gui.PumpMessages()
if __name__=='__main__':
main()
| gpl-2.0 | 3,308,064,944,882,480,000 | 41.839677 | 260 | 0.545765 | false |
gfyoung/pandas | pandas/tests/io/pytables/test_complex.py | 1 | 6374 | from warnings import catch_warnings
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, Series
import pandas._testing as tm
from pandas.tests.io.pytables.common import ensure_clean_path, ensure_clean_store
from pandas.io.pytables import read_hdf
# TODO(ArrayManager) HDFStore relies on accessing the blocks
pytestmark = td.skip_array_manager_not_yet_implemented
def test_complex_fixed(setup_path):
df = DataFrame(
np.random.rand(4, 5).astype(np.complex64),
index=list("abcd"),
columns=list("ABCDE"),
)
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df")
reread = read_hdf(path, "df")
tm.assert_frame_equal(df, reread)
df = DataFrame(
np.random.rand(4, 5).astype(np.complex128),
index=list("abcd"),
columns=list("ABCDE"),
)
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df")
reread = read_hdf(path, "df")
tm.assert_frame_equal(df, reread)
def test_complex_table(setup_path):
df = DataFrame(
np.random.rand(4, 5).astype(np.complex64),
index=list("abcd"),
columns=list("ABCDE"),
)
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", format="table")
reread = read_hdf(path, "df")
tm.assert_frame_equal(df, reread)
df = DataFrame(
np.random.rand(4, 5).astype(np.complex128),
index=list("abcd"),
columns=list("ABCDE"),
)
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", format="table", mode="w")
reread = read_hdf(path, "df")
tm.assert_frame_equal(df, reread)
def test_complex_mixed_fixed(setup_path):
complex64 = np.array(
[1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex64
)
complex128 = np.array(
[1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex128
)
df = DataFrame(
{
"A": [1, 2, 3, 4],
"B": ["a", "b", "c", "d"],
"C": complex64,
"D": complex128,
"E": [1.0, 2.0, 3.0, 4.0],
},
index=list("abcd"),
)
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df")
reread = read_hdf(path, "df")
tm.assert_frame_equal(df, reread)
def test_complex_mixed_table(setup_path):
complex64 = np.array(
[1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex64
)
complex128 = np.array(
[1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex128
)
df = DataFrame(
{
"A": [1, 2, 3, 4],
"B": ["a", "b", "c", "d"],
"C": complex64,
"D": complex128,
"E": [1.0, 2.0, 3.0, 4.0],
},
index=list("abcd"),
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["A", "B"])
result = store.select("df", where="A>2")
tm.assert_frame_equal(df.loc[df.A > 2], result)
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", format="table")
reread = read_hdf(path, "df")
tm.assert_frame_equal(df, reread)
def test_complex_across_dimensions_fixed(setup_path):
with catch_warnings(record=True):
complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j])
s = Series(complex128, index=list("abcd"))
df = DataFrame({"A": s, "B": s})
objs = [s, df]
comps = [tm.assert_series_equal, tm.assert_frame_equal]
for obj, comp in zip(objs, comps):
with ensure_clean_path(setup_path) as path:
obj.to_hdf(path, "obj", format="fixed")
reread = read_hdf(path, "obj")
comp(obj, reread)
def test_complex_across_dimensions(setup_path):
complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j])
s = Series(complex128, index=list("abcd"))
df = DataFrame({"A": s, "B": s})
with catch_warnings(record=True):
objs = [df]
comps = [tm.assert_frame_equal]
for obj, comp in zip(objs, comps):
with ensure_clean_path(setup_path) as path:
obj.to_hdf(path, "obj", format="table")
reread = read_hdf(path, "obj")
comp(obj, reread)
def test_complex_indexing_error(setup_path):
complex128 = np.array(
[1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex128
)
df = DataFrame(
{"A": [1, 2, 3, 4], "B": ["a", "b", "c", "d"], "C": complex128},
index=list("abcd"),
)
msg = (
"Columns containing complex values can be stored "
"but cannot be indexed when using table format. "
"Either use fixed format, set index=False, "
"or do not include the columns containing complex "
"values to data_columns when initializing the table."
)
with ensure_clean_store(setup_path) as store:
with pytest.raises(TypeError, match=msg):
store.append("df", df, data_columns=["C"])
def test_complex_series_error(setup_path):
complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j])
s = Series(complex128, index=list("abcd"))
msg = (
"Columns containing complex values can be stored "
"but cannot be indexed when using table format. "
"Either use fixed format, set index=False, "
"or do not include the columns containing complex "
"values to data_columns when initializing the table."
)
with ensure_clean_path(setup_path) as path:
with pytest.raises(TypeError, match=msg):
s.to_hdf(path, "obj", format="t")
with ensure_clean_path(setup_path) as path:
s.to_hdf(path, "obj", format="t", index=False)
reread = read_hdf(path, "obj")
tm.assert_series_equal(s, reread)
def test_complex_append(setup_path):
df = DataFrame(
{"a": np.random.randn(100).astype(np.complex128), "b": np.random.randn(100)}
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["b"])
store.append("df", df)
result = store.select("df")
tm.assert_frame_equal(pd.concat([df, df], 0), result)
| bsd-3-clause | -5,007,679,795,284,820,000 | 30.554455 | 84 | 0.562441 | false |
SelfDrivUTT/selfdrivutt | robot/raspberry/controls.py | 1 | 3292 | import socket
import sys
import os
import curses
from threading import Thread
class RemoteControlServer(object):
"""docstring for Curses_control"""
def __init__(self):
super(RemoteControl, self).__init__()
self.data = ''
self.stopped = False
self.HOST = os.environ.get('COMMAND_HOST', 'localhost')
self.PORT = os.environ.get('COMMAND_PORT', 9089)
def start(self):
self.socket_server = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
print('Socket created')
self.socket_server.bind((self.HOST, self.PORT))
print('Socket bind complete')
self.socket_server.listen(10)
print('Socket now listening')
self.conn, self.addr = self.socket_server.accept() # Accept the connection once (for starter)
print('Connected with ' + self.addr[0] + ':' + str(self.addr[1]))
Thread(target=self.update, args=()).start()
return self
def update(self):
while True:
try:
self.data = self.conn.recv(1024)
self.conn.send(self.data)
print(self.data)
if self.data == 27:
self.stop()
return
except socket.error as e:
print(e)
self.stop()
return
else:
if len(self.data) == 0:
print 'orderly shutdown on server end'
self.stop()
else:
print(self.data)
def read(self):
return self.data
def stop(self):
self.stopped = True
self.conn.close()
self.socket_server.close()
class CursesControl(object):
"""docstring for Curses_control"""
def __init__(self):
super(CursesControl, self).__init__()
# self.screen.nodelay()
self.event = 'unload'
self.stopped = False
def start(self):
self.screen = curses.initscr()
Thread(target=self.update, args=()).start()
return self
def update(self):
while True:
try:
curses.noecho()
curses.curs_set(0)
self.screen.keypad(1)
self.screen.addstr("Press a key, " + str(self.event))
self.event = self.screen.getch()
finally:
curses.endwin()
if self.stopped or self.event == 27:
return
def read(self):
if self.event == curses.KEY_LEFT:
command = 'left'
elif self.event == curses.KEY_RIGHT:
command = 'right'
elif self.event == curses.KEY_UP:
command = 'up'
elif self.event == curses.KEY_DOWN:
command = 'down'
elif self.event == 32: # SPACE
command = 'stop'
elif self.event == 27: # ESC key
command = 'quit'
elif self.event == ord('p'): # P key
command = 'auto_logic_based'
elif self.event == ord('o'): # O key
command = 'stream'
elif self.event == ord('m'): # O key
command = 'auto_neural_network'
else:
command = '?'
return command
def stop(self):
self.stopped = True
| mit | 2,659,452,182,422,635,000 | 28.132743 | 101 | 0.512151 | false |
all-of-us/raw-data-repository | rdr_service/alembic/versions/434fb0f05794_add_ignore_and_dev_note_to_genomics_.py | 1 | 1673 | """add ignore and dev note to genomics models.
Revision ID: 434fb0f05794
Revises: 994dfe6e53ee
Create Date: 2020-09-30 14:39:16.244636
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '434fb0f05794'
down_revision = '994dfe6e53ee'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('genomic_gc_validation_metrics', sa.Column('dev_note', sa.String(length=255), nullable=True))
op.add_column('genomic_gc_validation_metrics', sa.Column('ignore_flag', sa.SmallInteger(), nullable=True))
op.add_column('genomic_set_member', sa.Column('dev_note', sa.String(length=255), nullable=True))
op.add_column('genomic_set_member_history', sa.Column('dev_note', sa.String(length=255), nullable=True))
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('genomic_set_member', 'dev_note')
op.drop_column('genomic_set_member_history', 'dev_note')
op.drop_column('genomic_gc_validation_metrics', 'ignore_flag')
op.drop_column('genomic_gc_validation_metrics', 'dev_note')
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| bsd-3-clause | -7,922,045,653,497,436,000 | 28.875 | 111 | 0.679617 | false |
dannyroberts/eulxml | eulxml/xmlmap/premis.py | 1 | 5516 | # file eulxml/xmlmap/premis.py
#
# Copyright 2010,2011 Emory University Libraries
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
:mod:`eulxml.xmlmap` classes for dealing with the `PREMIS
<http://www.loc.gov/standards/premis/>`_ metadata format for
preservation metadata.
-----
'''
from eulxml import xmlmap
PREMIS_NAMESPACE = 'info:lc/xmlns/premis-v2'
'authoritative namespace for PREMIS'
PREMIS_SCHEMA = 'http://www.loc.gov/standards/premis/v2/premis-v2-1.xsd'
'authoritative schema location for PREMIS'
class BasePremis(xmlmap.XmlObject):
'''Base PREMIS class with namespace declaration common to all PREMIS
XmlObjects.
.. Note::
This class is intended mostly for internal use, but could be
useful when extending or adding additional PREMIS
:class:`~eulxml.xmlmap.XmlObject` classes. The
:attr:`PREMIS_NAMESPACE` is mapped to the prefix **p**.
'''
ROOT_NS = PREMIS_NAMESPACE
ROOT_NAMESPACES = {
'p': PREMIS_NAMESPACE,
'xsi': 'http://www.w3.org/2001/XMLSchema-instance'
}
class PremisRoot(BasePremis):
'''Base class with a schema declaration for any of the
root/stand-alone PREMIS elements:
* ``<premis>`` - :class:`Premis`
* ``<object>`` - :class:`Object`
* ``<event>`` - :class:`Event`
* ``<agent>``
* ``<rights>``
'''
XSD_SCHEMA = PREMIS_SCHEMA
class Object(PremisRoot):
'''Preliminary :class:`~eulxml.xmlmap.XmlObject` for a PREMIS
object.
Curently only includes the minimal required fields.
'''
ROOT_NAME = 'object'
type = xmlmap.StringField('@xsi:type') # file, representation, bitstream
'''type of object (e.g., file, representation, bitstream).
.. Note::
To be schema valid, object types must be in the PREMIS namespace, e.g.::
from eulxml.xmlmap import premis
obj = premis.Object()
obj.type = "p:file"
'''
id_type = xmlmap.StringField('p:objectIdentifier/p:objectIdentifierType')
'identifier type (`objectIdentifier/objectIdentifierType`)'
id = xmlmap.StringField('p:objectIdentifier/p:objectIdentifierValue')
'identifier value (`objectIdentifier/objectIdentifierValue`)'
class Event(PremisRoot):
'''Preliminary :class:`~eulxml.xmlmap.XmlObject` for a PREMIS
event.
.. Note::
The PREMIS schema requires that elements occur in a specified
order, which :mod:`eulxml` does not currently handle or manage.
As a work-around, when creating a new :class:`Event` from
scratch, you should set the following required fields in this
order: identifier (:attr:`id` and :attr:`ad_type`
'''
ROOT_NAME = 'event'
type = xmlmap.StringField('p:eventType')
'event type (``eventType``)'
id_type = xmlmap.StringField('p:eventIdentifier/p:eventIdentifierType')
'identifier type (`eventIdentifier/eventIdentifierType`)'
id = xmlmap.StringField('p:eventIdentifier/p:eventIdentifierValue')
'identifier value (`eventIdentifier/eventIdentifierValue`)'
date = xmlmap.StringField('p:eventDateTime')
'date/time for the event (`eventDateTime`)'
detail = xmlmap.StringField('p:eventDetail', required=False)
'event detail (`eventDetail`)'
outcome = xmlmap.StringField('p:eventOutcomeInformation/p:eventOutcome', required=False)
'''outcome of the event (`eventOutcomeInformation/eventOutcome`).
.. Note::
In this preliminary implementation, the outcome detail fields
are not mapped.
'''
# leaving out outcome detail for now...
# agent (optional, could be repeated)
agent_type = xmlmap.StringField('p:linkingAgentIdentifier/p:linkingAgentIdentifierType')
agent_id = xmlmap.StringField('p:linkingAgentIdentifier/p:linkingAgentIdentifierValue')
# object (optional, could be repeated)
object_type = xmlmap.StringField('p:linkingObjectIdentifier/p:linkingObjectIdentifierType')
object_id = xmlmap.StringField('p:linkingObjectIdentifier/p:linkingObjectIdentifierValue')
class Premis(PremisRoot):
'''Preliminary :class:`~eulxml.xmlmap.XmlObject` for a PREMIS
container element that can contain any of the other top-level
PREMIS elements.
Curently only includes mappings for a single object and list of
events.
'''
ROOT_NAME = 'premis'
version = xmlmap.StringField('@version')
'''Version of PREMIS in use; by default, new instances of
:class:`Premis` will be initialized with a version of 2.1'''
object = xmlmap.NodeField('p:object', Object)
'a single PREMIS :class:`object`'
events = xmlmap.NodeListField('p:event', Event)
'list of PREMIS events, as instances of :class:`Event`'
def __init__(self, *args, **kwargs):
# version is required for schema-validity; don't override a
# user-supplied version, but otherwise default to 2.1
if 'version' not in kwargs:
kwargs['version'] = '2.1'
super(Premis, self).__init__(*args, **kwargs)
| apache-2.0 | 2,107,150,789,629,381,400 | 36.020134 | 95 | 0.688724 | false |
mightbejosh/dj-braintree | djbraintree/admin.py | 1 | 6860 | # -*- coding: utf-8 -*-
"""
Note: Django 1.4 support was dropped in #107
https://github.com/pydanny/dj-braintree/pull/107
"""
from django.contrib import admin
from .models import Transaction
from .models import Customer
class CustomerHasCardListFilter(admin.SimpleListFilter):
title = "card presence"
parameter_name = "has_card"
def lookups(self, request, model_admin):
return [
["yes", "Has Card"],
["no", "Does Not Have a Card"]
]
def queryset(self, request, queryset):
if self.value() == "yes":
return queryset.exclude(card_fingerprint="")
if self.value() == "no":
return queryset.filter(card_fingerprint="")
class InvoiceCustomerHasCardListFilter(admin.SimpleListFilter):
title = "card presence"
parameter_name = "has_card"
def lookups(self, request, model_admin):
return [
["yes", "Has Card"],
["no", "Does Not Have a Card"]
]
def queryset(self, request, queryset):
if self.value() == "yes":
return queryset.exclude(customer__card_fingerprint="")
if self.value() == "no":
return queryset.filter(customer__card_fingerprint="")
#
# class CustomerSubscriptionStatusListFilter(admin.SimpleListFilter):
# title = "subscription status"
# parameter_name = "sub_status"
#
# def lookups(self, request, model_admin):
# statuses = [
# [x, x.replace("_", " ").title()]
# for x in CurrentSubscription.objects.all().values_list(
# "status",
# flat=True
# ).distinct()
# ]
# statuses.append(["none", "No Subscription"])
# return statuses
#
# def queryset(self, request, queryset):
# if self.value() is None:
# return queryset.all()
# else:
# return queryset.filter(current_subscription__status=self.value())
#
#
# def send_charge_receipt(modeladmin, request, queryset):
# """
# Function for sending receipts from the admin if a receipt is not sent for
# a specific charge.
# """
# for charge in queryset:
# charge.send_receipt()
#
#
# admin.site.register(
# Charge,
# readonly_fields=('created',),
# list_display=[
# "braintree_id",
# "customer",
# "amount",
# "description",
# "paid",
# "disputed",
# "refunded",
# "fee",
# "receipt_sent",
# "created"
# ],
# search_fields=[
# "braintree_id",
# "customer__braintree_id",
# "card_last_4",
# "invoice__braintree_id"
# ],
# list_filter=[
# "paid",
# "disputed",
# "refunded",
# "card_kind",
# "created"
# ],
# raw_id_fields=[
# "customer",
# "invoice"
# ],
# actions=(send_charge_receipt,),
# )
#
# admin.site.register(
# EventProcessingException,
# readonly_fields=('created',),
# list_display=[
# "message",
# "event",
# "created"
# ],
# search_fields=[
# "message",
# "traceback",
# "data"
# ],
# )
#
# admin.site.register(
# Event,
# raw_id_fields=["customer"],
# readonly_fields=('created',),
# list_display=[
# "braintree_id",
# "kind",
# "livemode",
# "valid",
# "processed",
# "created"
# ],
# list_filter=[
# "kind",
# "created",
# "valid",
# "processed"
# ],
# search_fields=[
# "braintree_id",
# "customer__braintree_id",
# "validated_message"
# ],
# )
#
#
# class CurrentSubscriptionInline(admin.TabularInline):
# model = CurrentSubscription
#
#
# def subscription_status(obj):
# return obj.current_subscription.status
# subscription_status.short_description = "Subscription Status"
#
#
# admin.site.register(
# Customer,
# raw_id_fields=["subscriber"],
# readonly_fields=('created',),
# list_display=[
# "braintree_id",
# "subscriber",
# "card_kind",
# "card_last_4",
# subscription_status,
# "created"
# ],
# list_filter=[
# "card_kind",
# CustomerHasCardListFilter,
# CustomerSubscriptionStatusListFilter
# ],
# search_fields=[
# "braintree_id"
# ],
# inlines=[CurrentSubscriptionInline]
# )
#
#
# class InvoiceItemInline(admin.TabularInline):
# model = InvoiceItem
#
#
# def customer_has_card(obj):
# """ Returns True if the customer has a card attached to its account."""
# return obj.customer.card_fingerprint != ""
# customer_has_card.short_description = "Customer Has Card"
#
#
# def customer_email(obj):
# """ Returns a string representation of the customer's email."""
# return str(obj.customer.subscriber.email)
# customer_email.short_description = "Customer"
#
#
# admin.site.register(
# Invoice,
# raw_id_fields=["customer"],
# readonly_fields=('created',),
# list_display=[
# "braintree_id",
# "paid",
# "closed",
# customer_email,
# customer_has_card,
# "period_start",
# "period_end",
# "subtotal",
# "total",
# "created"
# ],
# search_fields=[
# "braintree_id",
# "customer__braintree_id"
# ],
# list_filter=[
# InvoiceCustomerHasCardListFilter,
# "paid",
# "closed",
# "attempted",
# "attempts",
# "created",
# "date",
# "period_end",
# "total"
# ],
# inlines=[InvoiceItemInline]
# )
#
#
# admin.site.register(
# Transfer,
# raw_id_fields=["event"],
# readonly_fields=('created',),
# list_display=[
# "braintree_id",
# "amount",
# "status",
# "date",
# "description",
# "created"
# ],
# search_fields=[
# "braintree_id",
# "event__braintree_id"
# ]
# )
#
#
# class PlanAdmin(admin.ModelAdmin):
#
# def save_model(self, request, obj, form, change):
# """Update or create objects using our custom methods that
# sync with Braintree."""
#
# if change:
# obj.update_name()
#
# else:
# Plan.get_or_create(**form.cleaned_data)
#
# def get_readonly_fields(self, request, obj=None):
# readonly_fields = list(self.readonly_fields)
# if obj:
# readonly_fields.extend([
# 'braintree_id',
# 'amount',
# 'currency',
# 'interval',
# 'interval_count',
# 'trial_period_days'])
#
# return readonly_fields
#
# admin.site.register(Plan, PlanAdmin)
| bsd-3-clause | 6,652,920,508,881,095,000 | 23.326241 | 79 | 0.525364 | false |
ericchill/gnofract4d | fract4d/fc.py | 1 | 16906 | #!/usr/bin/env python
# A compiler from UltraFractal or Fractint formula files to C code
# The UltraFractal manual is the best current description of the file
# format. You can download it from http://www.ultrafractal.com/uf3-manual.zip
# The implementation is based on the outline in "Modern Compiler
# Implementation in ML: basic techniques" (Appel 1997, Cambridge)
# Overall structure:
# fractlexer.py and fractparser.py are the lexer and parser, respectively.
# They use the PLY package to do lexing and SLR parsing, and produce as
# output an abstract syntax tree (defined in the Absyn module).
# The Translate module type-checks the code, maintains the symbol
# table (symbol.py) and converts it into an intermediate form (ir.py)
# Canon performs several simplifying passes on the IR to make it easier
# to deal with, then codegen converts it into a linear sequence of
# simple C instructions
# Finally we invoke the C compiler to convert to a native code shared library
import getopt
import sys
import commands
import os.path
import stat
import random
import hashlib
import re
import copy
import fractconfig
import fractparser
import fractlexer
import translate
import codegen
import fracttypes
import absyn
import preprocessor
import cache
import gradient
class FormulaTypes:
FRACTAL = 0
COLORFUNC = 1
TRANSFORM = 2
GRADIENT = 3
NTYPES = 4
GRAD_UGR=0
GRAD_MAP=1
GRAD_GGR=2
GRAD_CS=3
matches = [
re.compile(r'(\.frm\Z)|(\.ufm\Z)', re.IGNORECASE),
re.compile(r'(\.cfrm\Z)|(\.ucl\Z)', re.IGNORECASE),
re.compile(r'\.uxf\Z', re.IGNORECASE),
re.compile(r'(\.ugr\Z)|(\.map\Z)|(\.ggr\Z)|(\.cs\Z)|(\.pal\Z)', re.IGNORECASE)
]
# indexed by FormulaTypes above
extensions = [ "frm", "cfrm", "uxf", "ggr", "pal"]
@staticmethod
def extension_from_type(t):
return FormulaTypes.extensions[t]
@staticmethod
def guess_type_from_filename(filename):
if FormulaTypes.matches[FormulaTypes.FRACTAL].search(filename):
return translate.T
elif FormulaTypes.matches[FormulaTypes.COLORFUNC].search(filename):
return translate.ColorFunc
elif FormulaTypes.matches[FormulaTypes.TRANSFORM].search(filename):
return translate.Transform
elif FormulaTypes.matches[FormulaTypes.GRADIENT].search(filename):
return translate.GradientFunc
@staticmethod
def guess_formula_type_from_filename(filename):
for i in xrange(FormulaTypes.NTYPES):
if FormulaTypes.matches[i].search(filename):
return i
raise ValueError("Unknown file type for '%s'" % filename)
@staticmethod
def guess_gradient_subtype_from_filename(filename):
filename = filename.lower()
if filename.endswith(".ugr"):
return FormulaTypes.GRAD_UGR
if filename.endswith(".map") or filename.endswith(".pal"):
return FormulaTypes.GRAD_MAP
if filename.endswith(".ggr"):
return FormulaTypes.GRAD_GGR
if filename.endswith(".cs"):
return FormulaTypes.GRAD_CS
raise ValueError("Unknown gradient type for '%s'" % filename)
@staticmethod
def isFormula(filename):
for matcher in FormulaTypes.matches:
if matcher.search(filename):
return True
return False
class FormulaFile:
def __init__(self, formulas, contents,mtime,filename):
self.formulas = formulas
self.contents = contents
self.mtime = mtime
self.filename = filename
self.file_backed = True
def out_of_date(self):
return self.file_backed and \
os.stat(self.filename)[stat.ST_MTIME] > self.mtime
def get_formula(self,formula):
return self.formulas.get(formula)
def get_formula_names(self, skip_type=None):
'''return all the coloring funcs except those marked as only suitable
for the OTHER kind (inside vs outside)'''
names = []
for name in self.formulas.keys():
sym = self.formulas[name].symmetry
if sym == None or sym == "BOTH" or sym != skip_type:
names.append(name)
return names
class Compiler:
def __init__(self):
self.parser = fractparser.parser
self.lexer = fractlexer.lexer
self.c_code = ""
self.path_lists = [ [], [], [], [] ]
self.cache = cache.T()
self.cache_dir = os.path.expanduser("~/.gnofract4d-cache/")
self.init_cache()
if 'win' != sys.platform[:3]:
self.compiler_name = "gcc"
self.flags = "-fPIC -DPIC -g -O3 -shared"
self.output_flag = "-o "
self.libs = "-lm"
else:
self.compiler_name = "cl"
self.flags = "/EHsc /Gd /nologo /W3 /LD /MT /TP /DWIN32 /DWINDOWS /D_USE_MATH_DEFINES"
self.output_flag = "/Fe"
self.libs = "/link /LIBPATH:\"%s/fract4d\" fract4d_stdlib.lib" % sys.path[0] # /DELAYLOAD:fract4d_stdlib.pyd DelayImp.lib
self.tree_cache = {}
self.leave_dirty = False
self.next_inline_number = 0
def _get_files(self):
return self.cache.files
files = property(_get_files)
def update_from_prefs(self,prefs):
self.compiler_name = prefs.get("compiler","name")
self.flags = prefs.get("compiler","options")
self.set_func_path_list(prefs.get_list("formula_path"))
self.path_lists[FormulaTypes.GRADIENT] = copy.copy(
prefs.get_list("map_path"))
def set_flags(self,flags):
self.flags = flags
def add_path(self,path,type):
self.path_lists[type].append(path)
def add_func_path(self,path):
self.path_lists[FormulaTypes.FRACTAL].append(path)
self.path_lists[FormulaTypes.COLORFUNC].append(path)
self.path_lists[FormulaTypes.TRANSFORM].append(path)
def set_func_path_list(self,list):
self.path_lists[FormulaTypes.FRACTAL] = copy.copy(list)
self.path_lists[FormulaTypes.COLORFUNC] = copy.copy(list)
self.path_lists[FormulaTypes.TRANSFORM] = copy.copy(list)
def init_cache(self):
self.cache.init()
def find_files(self,type):
files = {}
for dir in self.path_lists[type]:
if not os.path.isdir(dir):
continue
for file in os.listdir(dir):
if os.path.isfile(os.path.join(dir,file)):
files[file] = 1
return files.keys()
def find_files_of_type(self,type):
matcher = FormulaTypes.matches[type]
return [file for file in self.find_files(type)
if matcher.search(file)]
def find_formula_files(self):
return self.find_files_of_type(FormulaTypes.FRACTAL)
def find_colorfunc_files(self):
return self.find_files_of_type(FormulaTypes.COLORFUNC)
def find_transform_files(self):
return self.find_files_of_type(FormulaTypes.TRANSFORM)
def get_text(self,fname):
file = self.files.get(fname)
if not file:
self.load_formula_file(fname)
return self.files[fname].contents
def nextInlineFile(self,type):
self.next_inline_number += 1
ext = FormulaTypes.extension_from_type(type)
return "__inline__%d.%s" % (self.next_inline_number, ext)
def add_inline_formula(self,formbody, formtype):
# formbody contains a string containing the contents of a formula
formulas = self.parse_file(formbody)
fname = self.nextInlineFile(formtype)
ff = FormulaFile(formulas,formbody,0,fname)
ff.file_backed = False
self.files[fname] = ff
names = ff.get_formula_names()
if len(names) == 0:
formName = "error"
else:
formName = names[0]
return (fname, formName)
def last_chance(self,filename):
'''does nothing here, but can be overridden by GUI to prompt user.'''
raise IOError("Can't find formula file %s in formula search path" % \
filename)
def compile_one(self,formula):
self.compile(formula)
t = translate.T(absyn.Formula("",[],-1))
cg = self.compile(t)
t.merge(formula,"")
outputfile = os.path.abspath(self.generate_code(t, cg))
return outputfile
def compile_all(self,formula,cf0,cf1,transforms,options={}):
self.compile(formula,options)
self.compile(cf0,options)
self.compile(cf1,options)
for transform in transforms:
self.compile(transform,options)
# create temp empty formula and merge everything into that
t = translate.T(absyn.Formula("",[],-1))
cg = self.compile(t,options)
t.merge(formula,"")
t.merge(cf0,"cf0_")
t.merge(cf1,"cf1_")
for transform in transforms:
t.merge(transform,"t_")
outputfile = os.path.abspath(self.generate_code(t, cg))
return outputfile
def find_file(self,filename,type):
if os.path.exists(filename):
dir = os.path.dirname(filename)
if self.path_lists[type].count(dir) == 0:
# add directory to search path
self.path_lists[type].append(dir)
return filename
filename = os.path.basename(filename)
for path in self.path_lists[type]:
f = os.path.join(path,filename)
if os.path.exists(f):
return f
return self.last_chance(filename)
def add_endlines(self,result,final_line):
"Add info on which is the final source line of each formula"
if None == result:
return
l = len(result.children)
for i in xrange(l):
if i == l - 1:
result.children[i].last_line = final_line
else:
result.children[i].last_line = result.children[i+1].pos-1
def parse_file(self,s):
self.lexer.lineno = 1
result = None
try:
pp = preprocessor.T(s)
result = self.parser.parse(pp.out())
except preprocessor.Error, err:
# create an Error formula listing the problem
result = self.parser.parse('error {\n}\n')
result.children[0].children[0] = \
absyn.PreprocessorError(str(err), -1)
#print result.pretty()
self.add_endlines(result,self.lexer.lineno)
formulas = {}
for formula in result.children:
formulas[formula.leaf] = formula
return formulas
def load_formula_file(self, filename):
try:
type = FormulaTypes.guess_formula_type_from_filename(filename)
filename = self.find_file(filename,type)
s = open(filename,"r").read() # read in a whole file
basefile = os.path.basename(filename)
mtime = os.stat(filename)[stat.ST_MTIME]
if type == FormulaTypes.GRADIENT:
# don't try and parse gradient files apart from UGRs
subtype = FormulaTypes.guess_gradient_subtype_from_filename(filename)
if subtype == FormulaTypes.GRAD_UGR:
formulas = self.parse_file(s)
else:
formulas = {}
else:
formulas = self.parse_file(s)
ff = FormulaFile(formulas,s,mtime,filename)
self.files[basefile] = ff
return ff
except Exception, err:
#print "Error parsing '%s' : %s" % (filename, err)
raise
def out_of_date(self,filename):
basefile = os.path.basename(filename)
ff = self.files.get(basefile)
if not ff:
self.load_formula_file(filename)
ff = self.files.get(basefile)
return ff.out_of_date()
def get_file(self,filename):
basefile = os.path.basename(filename)
ff = self.files.get(basefile)
if not ff:
self.load_formula_file(filename)
ff = self.files.get(basefile)
elif ff.out_of_date():
self.load_formula_file(filename)
ff = self.files.get(basefile)
return ff
def get_formula_text(self,filename,formname):
ff = self.get_file(filename)
form = ff.get_formula(formname)
start_line = form.pos-1
last_line = form.last_line
lines = ff.contents.splitlines()
return "\n".join(lines[start_line:last_line])
def is_inline(self,filename, formname):
return not self.files[filename].file_backed
def compile(self,ir,options={}):
cg = codegen.T(ir.symbols,options)
cg.output_all(ir)
return cg
def hashcode(self,c_code):
hash = hashlib.md5()
hash.update(c_code)
hash.update(self.compiler_name)
hash.update(self.flags)
hash.update(self.libs)
return hash.hexdigest()
def generate_code(self,ir, cg, outputfile=None,cfile=None):
cg.output_decls(ir)
self.c_code = cg.output_c(ir)
hash = self.hashcode(self.c_code)
if outputfile == None:
outputfile = self.cache.makefilename(hash,".so")
if os.path.exists(outputfile):
# skip compilation - we already have this code
return outputfile
if cfile == None:
cfile = self.cache.makefilename(hash,".c")
if 'win' in sys.platform:
objfile = self.cache.makefilename(hash, ".obj")
open(cfile,"w").write(self.c_code)
# -march=i686 for 10% speed gain
cmd = "%s \"%s\" %s %s\"%s\"" % \
(self.compiler_name, cfile, self.flags, self.output_flag, outputfile)
if 'win' == sys.platform[:3]:
cmd += " /Fo\"%s\"" % objfile
cmd += " %s" % self.libs
#print "cmd: %s" % cmd
(status,output) = commands.getstatusoutput(cmd)
if status != 0:
raise fracttypes.TranslationError(
"Error reported by C compiler:%s" % output)
return outputfile
def get_parsetree(self,filename,formname):
ff = self.get_file(filename)
if ff == None : return None
return ff.get_formula(formname)
def guess_type_from_filename(self,filename):
return FormulaTypes.guess_type_from_filename(filename)
def get_formula(self, filename, formname,prefix=""):
type = self.guess_type_from_filename(filename)
f = self.get_parsetree(filename,formname)
if f != None:
f = type(f,prefix)
return f
def get_gradient(self, filename, formname):
g = gradient.Gradient()
if formname == None:
g.load(open(self.find_file(filename, 3))) # FIXME
else:
compiled_gradient = self.get_formula(filename,formname)
g.load_ugr(compiled_gradient)
return g
def get_random_gradient(self):
return self.get_random_formula(3) # FIXME
def get_random_formula(self,type):
files = self.find_files_of_type(type)
file = random.choice(files)
if gradient.FileType.guess(file) == gradient.FileType.UGR:
ff = self.get_file(file)
formulas = ff.formulas.keys()
formula = random.choice(formulas)
else:
formula = None
return (file,formula)
def clear_cache(self):
self.cache.clear()
def __del__(self):
if not self.leave_dirty:
self.clear_cache()
instance = Compiler()
instance.update_from_prefs(fractconfig.instance)
def usage():
print "FC : a compiler from Fractint .frm files to C code"
print "fc.py -o [outfile] -f [formula] infile"
sys.exit(1)
def generate(fc,formulafile, formula, outputfile, cfile):
# find the function we want
ir = fc.get_formula(formulafile,formula)
if ir == None:
raise Exception("Can't find formula %s in %s" % \
(formula, formulafile))
if ir.errors != []:
print "Errors during translation"
for e in ir.errors:
print e
raise Exception("Errors during translation")
cg = fc.compile(ir)
fc.generate_code(ir, cg, outputfile,cfile)
def main(args):
fc = Compiler()
fc.leave_dirty = True
for arg in args:
ff = fc.load_formula_file(arg)
for name in ff.get_formula_names():
print name
form = fc.get_formula(arg,name)
cg = fc.compile(form)
if __name__ == '__main__':
main(sys.argv[1:])
| bsd-3-clause | -7,829,546,766,683,979,000 | 31.827184 | 133 | 0.588726 | false |
devilry/devilry-django | devilry/devilry_group/tests/test_feedbackfeed/mixins/mixin_feedbackfeed_admin.py | 1 | 30195 | # -*- coding: utf-8 -*-
import mock
from django import http
from django.conf import settings
from django.http import Http404
from django.utils import timezone
from model_bakery import baker
from devilry.apps.core import models as core_models
from devilry.devilry_account import models as account_models
from devilry.devilry_account.models import PeriodPermissionGroup
from devilry.devilry_group import devilry_group_baker_factories as group_baker
from devilry.devilry_group import models as group_models
from devilry.devilry_group.cradmin_instances import crinstance_admin
from devilry.devilry_group.tests.test_feedbackfeed.mixins import mixin_feedbackfeed_common
class MixinTestFeedbackfeedAdmin(mixin_feedbackfeed_common.MixinTestFeedbackFeed):
"""
Mixin testclass for admin feedbackfeed tests.
Add tests for functionality and ui that all admin views share.
"""
viewclass = None
def __mock_cradmin_instance(self):
mockinstance = mock.MagicMock()
mockinstance.get_devilryrole_for_requestuser.return_value = 'admin'
return mockinstance
def test_get(self):
candidate = baker.make('core.Candidate',
relatedstudent=baker.make('core.RelatedStudent'))
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=candidate.assignment_group,
requestuser=candidate.relatedstudent.user)
self.assertEqual(mockresponse.selector.one('title').alltext_normalized,
candidate.assignment_group.assignment.get_path())
def test_move_deadline_button_rendered_if_deadline_expired_and_feedbackset_is_not_graded(self):
testuser = baker.make(settings.AUTH_USER_MODEL)
deadline_datetime = timezone.now() - timezone.timedelta(days=1)
testgroup = baker.make('core.AssignmentGroup',
parentnode__parentnode=baker.make_recipe('devilry.apps.core.period_active'))
test_feedbackset = group_baker.feedbackset_first_attempt_unpublished(group=testgroup,
deadline_datetime=deadline_datetime)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=test_feedbackset.group,
requestuser=testuser,
cradmin_instance=self.__mock_cradmin_instance()
)
self.assertTrue(mockresponse.selector.exists('.devilry-group-event__grade-move-deadline-button'))
def test_move_deadline_button_rendered_if_deadline_expired_and_feedbackset_is_graded(self):
testuser = baker.make(settings.AUTH_USER_MODEL)
deadline_datetime = timezone.now() - timezone.timedelta(days=1)
testgroup = baker.make('core.AssignmentGroup',
parentnode__parentnode=baker.make_recipe('devilry.apps.core.period_active'))
test_feedbackset = group_baker.feedbackset_first_attempt_published(
group=testgroup, deadline_datetime=deadline_datetime, grading_published_datetime=deadline_datetime)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=test_feedbackset.group,
requestuser=testuser,
cradmin_instance=self.__mock_cradmin_instance()
)
self.assertTrue(mockresponse.selector.exists('.devilry-group-event__grade-move-deadline-button'))
def test_new_attempt_button_rendered_if_deadline_expired_and_feedbackset_is_graded(self):
testuser = baker.make(settings.AUTH_USER_MODEL)
deadline_datetime = timezone.now() - timezone.timedelta(days=1)
testgroup = baker.make('core.AssignmentGroup',
parentnode__parentnode=baker.make_recipe('devilry.apps.core.period_active'))
test_feedbackset = group_baker.feedbackset_first_attempt_published(
group=testgroup, deadline_datetime=deadline_datetime, grading_published_datetime=deadline_datetime)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=test_feedbackset.group,
requestuser=testuser,
cradmin_instance=self.__mock_cradmin_instance()
)
self.assertTrue(mockresponse.selector.exists('.devilry-group-event__grade-last-new-attempt-button'))
def test_new_attempt_button_not_rendered_if_deadline_expired_and_feedbackset_not_graded(self):
testuser = baker.make(settings.AUTH_USER_MODEL)
deadline_datetime = timezone.now() - timezone.timedelta(days=1)
testgroup = baker.make('core.AssignmentGroup',
parentnode__parentnode=baker.make_recipe('devilry.apps.core.period_active'))
test_feedbackset = group_baker.feedbackset_first_attempt_unpublished(
group=testgroup, deadline_datetime=deadline_datetime)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=test_feedbackset.group,
requestuser=testuser,
cradmin_instance=self.__mock_cradmin_instance()
)
self.assertFalse(mockresponse.selector.exists('.devilry-group-event__grade-last-new-attempt-button'))
def test_assignment_deadline_hard_expired_comment_form_rendered(self):
testuser = baker.make(settings.AUTH_USER_MODEL)
deadline_datetime = timezone.now() - timezone.timedelta(days=1)
test_feedbackset = baker.make('devilry_group.FeedbackSet',
deadline_datetime=deadline_datetime,
group__parentnode__deadline_handling=core_models.Assignment.DEADLINEHANDLING_HARD,
group__parentnode__parentnode=baker.make_recipe(
'devilry.apps.core.period_active'))
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=test_feedbackset.group,
requestuser=testuser,
cradmin_instance=self.__mock_cradmin_instance()
)
self.assertTrue(mockresponse.selector.exists('.cradmin-legacy-form-wrapper'))
self.assertFalse(mockresponse.selector.exists('.devilry-feedbackfeed-form-disabled'))
def test_get_examiner_discuss_tab_buttons(self):
testgroup = baker.make('core.AssignmentGroup')
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=testgroup)
self.assertEqual(2, mockresponse.selector.count('.devilry-group-feedbackfeed-discuss-button'))
def test_get_feedbackfeed_event_delivery_passed(self):
assignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start',
max_points=10,
passing_grade_min_points=5)
testgroup = baker.make('core.AssignmentGroup', parentnode=assignment)
feedbackset = group_baker.feedbackset_first_attempt_published(
group=testgroup,
grading_points=7)
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=feedbackset.group)
self.assertTrue(mockresponse.selector.exists('.devilry-core-grade-passed'))
self.assertFalse(mockresponse.selector.exists('.devilry-core-grade-failed'))
def test_get_feedbackfeed_event_delivery_failed(self):
assignment = baker.make_recipe('devilry.apps.core.assignment_activeperiod_start',
max_points=10,
passing_grade_min_points=5)
testgroup = baker.make('core.AssignmentGroup', parentnode=assignment)
feedbackset = group_baker.feedbackset_first_attempt_published(
group=testgroup,
grading_points=0)
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=feedbackset.group)
self.assertTrue(mockresponse.selector.exists('.devilry-core-grade-failed'))
self.assertFalse(mockresponse.selector.exists('.devilry-core-grade-passed'))
def test_get_feedbackfeed_periodadmin(self):
period = baker.make('core.Period')
testgroup = baker.make('core.AssignmentGroup', parentnode__parentnode=period)
admin = baker.make(settings.AUTH_USER_MODEL)
testfeedbackset = group_baker.feedbackset_first_attempt_unpublished(group=testgroup)
baker.make('devilry_account.PermissionGroupUser',
user=admin,
permissiongroup=baker.make(
'devilry_account.PeriodPermissionGroup',
permissiongroup__grouptype=account_models.PermissionGroup.GROUPTYPE_PERIODADMIN,
period=period).permissiongroup)
comment = baker.make('devilry_group.GroupComment',
user_role='admin',
user=admin,
text='Hello, is it me you\'re looking for?',
feedback_set=testfeedbackset,
visibility=group_models.GroupComment.VISIBILITY_VISIBLE_TO_EVERYONE)
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=comment.feedback_set.group)
self.assertEqual(
'periodadmin',
PeriodPermissionGroup.objects.get_devilryrole_for_user_on_period(
period=period, user=admin))
self.assertTrue(mockresponse.selector.exists('.devilry-group-feedbackfeed-comment-admin'))
self.assertEqual(1, group_models.FeedbackSet.objects.count())
def test_get_feedbackfeed_comment_admin(self):
admin = baker.make('devilry_account.User', shortname='periodadmin', fullname='Thor the norse god')
period = baker.make_recipe('devilry.apps.core.period_active',
admins=[admin],
parentnode__admins=[baker.make('devilry_account.User', shortname='subjectadmin')],
parentnode__parentnode__admins=[baker.make('devilry_account.User',
shortname='nodeadmin')])
testgroup = baker.make('core.AssignmentGroup', parentnode__parentnode=period)
admin = baker.make(settings.AUTH_USER_MODEL)
testfeedbackset = group_baker.feedbackset_first_attempt_unpublished(group=testgroup)
comment = baker.make('devilry_group.GroupComment',
user_role='admin',
user=admin,
feedback_set=testfeedbackset,
visibility=group_models.GroupComment.VISIBILITY_VISIBLE_TO_EVERYONE)
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=comment.feedback_set.group)
self.assertTrue(mockresponse.selector.exists('.devilry-group-feedbackfeed-comment-admin'))
self.assertEqual(1, group_models.FeedbackSet.objects.count())
def test_get_feedbackfeed_periodadmin_raise_404_semi_anonymous(self):
# Mocks the return value of the crinstance's get_devilry_role_for_requestuser to return the user role.
# It's easier to read if we mock the return value rather than creating a
# permission group(this crinstance-function with permission groups is tested separately for the instance)
testperiod = baker.make('core.Period')
testassignment = baker.make('core.Assignment',
parentnode=testperiod,
anonymizationmode=core_models.Assignment.ANONYMIZATIONMODE_SEMI_ANONYMOUS)
testgroup = baker.make('core.AssignmentGroup', parentnode=testassignment)
testuser = baker.make(settings.AUTH_USER_MODEL, shortname='thor', fullname='Thor Thunder God')
mockrequest = mock.MagicMock()
mockrequest.cradmin_instance.get_devilryrole_for_requestuser.return_value = 'periodadmin'
with self.assertRaisesMessage(http.Http404, ''):
self.mock_getrequest(requestuser=testuser, cradmin_role=testgroup,
cradmin_instance=mockrequest.cradmin_instance)
self.assertEqual(1, group_models.FeedbackSet.objects.count())
def test_get_feedbackfeed_periodadmin_raise_404_fully_anonymous(self):
# Mocks the return value of the crinstance's get_devilry_role_for_requestuser to return the user role.
# It's easier to read if we mock the return value rather than creating a
# permission group(this crinstance-function with permission groups is tested separately for the instance)
testperiod = baker.make('core.Period')
testassignment = baker.make('core.Assignment',
parentnode=testperiod,
anonymizationmode=core_models.Assignment.ANONYMIZATIONMODE_FULLY_ANONYMOUS)
testgroup = baker.make('core.AssignmentGroup', parentnode=testassignment)
testuser = baker.make(settings.AUTH_USER_MODEL, shortname='thor', fullname='Thor Thunder God')
mockrequest = mock.MagicMock()
mockrequest.cradmin_instance.get_devilryrole_for_requestuser.return_value = 'periodadmin'
with self.assertRaisesMessage(http.Http404, ''):
self.mock_getrequest(requestuser=testuser, cradmin_role=testgroup,
cradmin_instance=mockrequest.cradmin_instance)
self.assertEqual(1, group_models.FeedbackSet.objects.count())
def test_get_feedbackfeed_subjectadmin_can_see_student_name_semi_anonymous(self):
# Mocks the return value of the crinstance's get_devilry_role_for_requestuser to return the user role.
# It's easier to read if we mock the return value rather than creating a
# permission group(this crinstance-function with permission groups is tested separately for the instance)
testassignment = baker.make('core.Assignment',
anonymizationmode=core_models.Assignment.ANONYMIZATIONMODE_SEMI_ANONYMOUS)
testgroup = baker.make('core.AssignmentGroup', parentnode=testassignment)
testfeedbackset = group_baker.feedbackset_first_attempt_unpublished(group=testgroup)
candidate = baker.make('core.Candidate',
assignment_group=testgroup,
relatedstudent__user__shortname='teststudent')
baker.make('devilry_group.GroupComment',
user=candidate.relatedstudent.user,
user_role='student',
feedback_set=testfeedbackset)
mockrequest = mock.MagicMock()
mockrequest.cradmin_instance.get_devilryrole_for_requestuser.return_value = 'subjectadmin'
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=testgroup,
cradmin_instance=mockrequest.cradmin_instance)
self.assertFalse(mockresponse.selector.exists('.devilry-core-candidate-anonymous-name'))
self.assertTrue(mockresponse.selector.exists('.devilry-user-verbose-inline'))
self.assertEqual(1, group_models.FeedbackSet.objects.count())
def test_get_feedbackfeed_subjectadmin_raise_404_fully_anonymous(self):
# Mocks the return value of the crinstance's get_devilry_role_for_requestuser to return the user role.
# It's easier to read if we mock the return value rather than creating a
# permission group(this crinstance-function with permission groups is tested separately for the instance)
testassignment = baker.make('core.Assignment',
anonymizationmode=core_models.Assignment.ANONYMIZATIONMODE_FULLY_ANONYMOUS)
testgroup = baker.make('core.AssignmentGroup', parentnode=testassignment)
testuser = baker.make(settings.AUTH_USER_MODEL, shortname='thor', fullname='Thor Thunder God')
mockrequest = mock.MagicMock()
mockrequest.cradmin_instance.get_devilryrole_for_requestuser.return_value = 'subjectadmin'
with self.assertRaisesMessage(http.Http404, ''):
self.mock_getrequest(requestuser=testuser, cradmin_role=testgroup,
cradmin_instance=mockrequest.cradmin_instance)
self.assertEqual(1, group_models.FeedbackSet.objects.count())
def test_get_periodadmin_no_access(self):
# Periodadmin does not have access to view when the user is not periodadmin for that period.
period1 = baker.make('core.Period')
period2 = baker.make('core.Period')
admin = baker.make(settings.AUTH_USER_MODEL)
permissiongroup = baker.make('devilry_account.PeriodPermissionGroup',
permissiongroup__grouptype=account_models.PermissionGroup.GROUPTYPE_PERIODADMIN,
period=period2)
baker.make('devilry_account.PermissionGroupUser',
user=admin,
permissiongroup=permissiongroup.permissiongroup)
testgroup = baker.make('core.AssignmentGroup', parentnode__parentnode=period1)
mockrequest = mock.MagicMock()
mockrequest.user = admin
mockrequest.cradmin_role = testgroup
crinstance = crinstance_admin.AdminCrInstance(request=mockrequest)
with self.assertRaises(Http404):
self.mock_getrequest(cradmin_role=testgroup, cradmin_instance=crinstance)
self.assertEqual(1, group_models.FeedbackSet.objects.count())
def test_get_subjectadmin_no_access(self):
# Subjectadmin does not have access to view when the user is not subjectadmin for that perdiod
subject1 = baker.make('core.Subject')
subject2 = baker.make('core.Subject')
admin = baker.make(settings.AUTH_USER_MODEL)
permissiongroup = baker.make('devilry_account.SubjectPermissionGroup',
permissiongroup__grouptype=account_models.PermissionGroup.GROUPTYPE_SUBJECTADMIN,
subject=subject2)
baker.make('devilry_account.PermissionGroupUser',
user=admin,
permissiongroup=permissiongroup.permissiongroup)
testgroup = baker.make('core.AssignmentGroup', parentnode__parentnode__parentnode=subject1)
mockrequest = mock.MagicMock()
mockrequest.user = admin
mockrequest.cradmin_role = testgroup
crinstance = crinstance_admin.AdminCrInstance(request=mockrequest)
with self.assertRaises(Http404):
self.mock_getrequest(cradmin_role=testgroup, cradmin_instance=crinstance)
self.assertEqual(1, group_models.FeedbackSet.objects.count())
def test_get_feedbackfeed_download_visible_public_commentfiles_exist(self):
testassignment = baker.make('core.Assignment')
testgroup = baker.make('core.AssignmentGroup', parentnode=testassignment)
testuser = baker.make(settings.AUTH_USER_MODEL)
testfeedbackset = group_baker.feedbackset_first_attempt_unpublished(group=testgroup)
candidate = baker.make('core.Candidate', assignment_group=testgroup)
group_comment = baker.make('devilry_group.GroupComment',
user=candidate.relatedstudent.user,
feedback_set=testfeedbackset)
baker.make('devilry_comment.CommentFile', comment=group_comment)
mock_cradmininstance = mock.MagicMock()
mock_cradmininstance.get_devilryrole_for_requestuser.return_value = 'periodadmin'
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testgroup,
cradmin_instance=mock_cradmininstance,
requestuser=testuser
)
self.assertTrue(
'Download:' in mockresponse.selector.one('.devilry-group-feedbackfeed-buttonbar').alltext_normalized)
def test_get_feedbackfeed_download_not_visible_private_commentfile_exist(self):
testassignment = baker.make('core.Assignment')
testgroup = baker.make('core.AssignmentGroup', parentnode=testassignment)
testfeedbackset = group_baker.feedbackset_first_attempt_unpublished(group=testgroup)
testuser = baker.make(settings.AUTH_USER_MODEL)
group_comment = baker.make('devilry_group.GroupComment',
feedback_set=testfeedbackset,
visibility=group_models.GroupComment.VISIBILITY_PRIVATE)
baker.make('devilry_comment.CommentFile', comment=group_comment)
mock_cradmininstance = mock.MagicMock()
mock_cradmininstance.get_devilryrole_for_requestuser.return_value = 'periodadmin'
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testgroup,
cradmin_instance=mock_cradmininstance,
requestuser=testuser
)
self.assertFalse(
'Download:' in mockresponse.selector.one('.devilry-group-feedbackfeed-buttonbar').alltext_normalized)
def test_get_feedbackfeed_download_not_visible_part_of_grading_not_published(self):
testassignment = baker.make('core.Assignment')
testgroup = baker.make('core.AssignmentGroup', parentnode=testassignment)
testfeedbackset = group_baker.feedbackset_first_attempt_unpublished(group=testgroup)
testuser = baker.make(settings.AUTH_USER_MODEL)
examiner = baker.make('core.Examiner', assignmentgroup=testgroup)
group_comment = baker.make('devilry_group.GroupComment',
feedback_set=testfeedbackset,
user=examiner.relatedexaminer.user,
user_role='examiner',
part_of_grading=True)
baker.make('devilry_comment.CommentFile', comment=group_comment)
mock_cradmininstance = mock.MagicMock()
mock_cradmininstance.get_devilryrole_for_requestuser.return_value = 'periodadmin'
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testgroup,
cradmin_instance=mock_cradmininstance,
requestuser=testuser
)
self.assertFalse(
'Download:' in mockresponse.selector.one('.devilry-group-feedbackfeed-buttonbar').alltext_normalized)
def test_get_no_edit_link_for_other_users_comments(self):
admin = baker.make('devilry_account.User', shortname='periodadmin', fullname='Thor')
period = baker.make_recipe('devilry.apps.core.period_active',
admins=[admin])
testgroup = baker.make('core.AssignmentGroup', parentnode__parentnode=period)
feedbackset = group_baker.feedbackset_first_attempt_unpublished(group=testgroup)
baker.make('devilry_group.GroupComment',
user_role='examiner',
feedback_set=feedbackset)
baker.make('devilry_group.GroupComment',
user_role='student',
feedback_set=feedbackset)
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=testgroup, requestuser=admin)
self.assertFalse(mockresponse.selector.exists('.devilry-group-comment-edit-link'))
self.assertFalse(mockresponse.selector.exists('.devilry-group-comment-edit-link__admin'))
self.assertFalse(mockresponse.selector.exists('.devilry-group-comment-edit-link__student'))
self.assertFalse(mockresponse.selector.exists('.devilry-group-comment-edit-link__examiner'))
def test_get_edit_link(self):
admin = baker.make('devilry_account.User', shortname='periodadmin', fullname='Thor')
period = baker.make_recipe('devilry.apps.core.period_active',
admins=[admin])
testgroup = baker.make('core.AssignmentGroup', parentnode__parentnode=period)
feedbackset = group_baker.feedbackset_first_attempt_unpublished(group=testgroup)
baker.make('devilry_group.GroupComment',
user=admin,
user_role='admin',
feedback_set=feedbackset)
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=testgroup,
requestuser=admin)
self.assertTrue(mockresponse.selector.exists('.devilry-group-comment-edit-link__admin'))
self.assertTrue('Edit',
mockresponse.selector.one('.devilry-group-comment-edit-link__admin').alltext_normalized)
def test_get_edit_link_url(self):
admin = baker.make('devilry_account.User', shortname='periodadmin', fullname='Thor')
period = baker.make_recipe('devilry.apps.core.period_active',
admins=[admin])
testgroup = baker.make('core.AssignmentGroup', parentnode__parentnode=period)
feedbackset = group_baker.feedbackset_first_attempt_unpublished(group=testgroup)
groupcomment = baker.make('devilry_group.GroupComment',
user=admin,
user_role='admin',
feedback_set=feedbackset)
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=testgroup,
requestuser=admin)
self.assertTrue(mockresponse.selector.exists('.devilry-group-comment-edit-link__admin'))
self.assertEqual(mockresponse.selector.one('.devilry-group-comment-edit-link__admin').get('href'),
'/devilry_group/admin/{}/feedbackfeed/groupcomment-edit/{}'.format(
testgroup.id, groupcomment.id))
def test_get_num_queries(self):
period = baker.make('core.Period')
admin = baker.make(settings.AUTH_USER_MODEL, shortname='thor', fullname='Thor Thunder God')
baker.make('devilry_account.PermissionGroupUser',
user=admin,
permissiongroup=baker.make(
'devilry_account.PeriodPermissionGroup',
permissiongroup__grouptype=account_models.PermissionGroup.GROUPTYPE_PERIODADMIN,
period=period).permissiongroup)
testgroup = baker.make('core.AssignmentGroup', parentnode__parentnode=period)
testfeedbackset = group_baker.feedbackset_first_attempt_unpublished(group=testgroup)
baker.make('core.Candidate', assignment_group=testgroup, _quantity=50)
examiner = baker.make('core.Examiner', assignmentgroup=testgroup)
baker.make('core.Examiner', assignmentgroup=testgroup, _quantity=50)
candidate = baker.make('core.Candidate', assignment_group=testgroup)
baker.make('devilry_group.GroupComment',
user=candidate.relatedstudent.user,
user_role='student',
feedback_set=testfeedbackset,
_quantity=20)
baker.make('devilry_group.GroupComment',
user=examiner.relatedexaminer.user,
user_role='examiner',
feedback_set=testfeedbackset,
_quantity=20)
mock_cradmininstance = mock.MagicMock()
mock_cradmininstance.get_devilryrole_for_requestuser.return_value = 'periodadmin'
with self.assertNumQueries(18):
self.mock_http200_getrequest_htmls(cradmin_role=testgroup,
requestuser=admin,
cradmin_instance=mock_cradmininstance)
self.assertEqual(1, group_models.FeedbackSet.objects.count())
def test_get_num_queries_with_commentfiles(self):
"""
NOTE: (works as it should)
Checking that no more queries are executed even though the
:func:`devilry.devilry_group.feedbackfeed_builder.FeedbackFeedTimelineBuilder.__get_feedbackset_queryset`
duplicates comment_file query.
"""
period = baker.make('core.Period')
admin = baker.make(settings.AUTH_USER_MODEL, shortname='thor', fullname='Thor Thunder God')
baker.make('devilry_account.PermissionGroupUser',
user=admin,
permissiongroup=baker.make(
'devilry_account.PeriodPermissionGroup',
permissiongroup__grouptype=account_models.PermissionGroup.GROUPTYPE_PERIODADMIN,
period=period).permissiongroup)
testgroup = baker.make('core.AssignmentGroup', parentnode__parentnode=period)
testfeedbackset = group_baker.feedbackset_first_attempt_unpublished(group=testgroup)
baker.make('core.Candidate', assignment_group=testgroup, _quantity=50)
examiner = baker.make('core.Examiner', assignmentgroup=testgroup)
baker.make('core.Examiner', assignmentgroup=testgroup, _quantity=50)
candidate = baker.make('core.Candidate', assignment_group=testgroup)
comment = baker.make('devilry_group.GroupComment',
user=candidate.relatedstudent.user,
user_role='student',
feedback_set=testfeedbackset)
comment2 = baker.make('devilry_group.GroupComment',
user=examiner.relatedexaminer.user,
user_role='examiner',
feedback_set=testfeedbackset)
baker.make('devilry_comment.CommentFile',
filename='test.py',
comment=comment,
_quantity=20)
baker.make('devilry_comment.CommentFile',
filename='test2.py',
comment=comment2,
_quantity=20)
mock_cradmininstance = mock.MagicMock()
mock_cradmininstance.get_devilryrole_for_requestuser.return_value = 'periodadmin'
with self.assertNumQueries(18):
self.mock_http200_getrequest_htmls(cradmin_role=testgroup,
requestuser=admin,
cradmin_instance=mock_cradmininstance)
self.assertEqual(1, group_models.FeedbackSet.objects.count()) | bsd-3-clause | -2,731,235,804,573,510,700 | 59.879032 | 120 | 0.646465 | false |
codilime/cloudify-manager | tests/workflow_tests/test_retrieve_resource_rendering.py | 1 | 2274 | ########
# Copyright (c) 2013 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import uuid
from testenv import TestCase
from testenv.utils import get_resource as resource
from testenv.utils import deploy_application as deploy
class RetriveResourceRenderingTest(TestCase):
dsl_path = resource('dsl/test-retrieve-resource-template.yaml')
template_path = 'jinja_rendering/for_template_rendering_tests.conf'
rendered_template_path = \
resource('dsl/jinja_rendering/rendered_template.conf')
def _get_expected_template(self):
with open(self.rendered_template_path, 'r') as f:
return f.read()
def _retrieve_resource_test(self, mode):
blueprint_id = 'blueprint-' + str(uuid.uuid4())
deployment, _ = deploy(
self.dsl_path,
blueprint_id=blueprint_id,
timeout_seconds=15,
inputs={
'rendering_tests_demo_conf': self.template_path,
'mode': mode
}
)
rendered_resource = \
self.get_plugin_data('testmockoperations',
deployment.id)['rendered_resource']
expected = self._get_expected_template()
return expected, rendered_resource
def test_get_resource_template(self):
expected, rendered_resource = self._retrieve_resource_test('get')
self.assertEqual(expected, rendered_resource)
def test_download_resource_template(self):
expected, rendered_resource_path = \
self._retrieve_resource_test('download')
with open(rendered_resource_path, 'r') as f:
rendered_resource = f.read()
self.assertEqual(expected, rendered_resource)
| apache-2.0 | -4,245,692,931,354,031,000 | 37.542373 | 79 | 0.663149 | false |
sileht/pifpaf | pifpaf/drivers/zookeeper.py | 1 | 2019 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pifpaf import drivers
class ZooKeeperDriver(drivers.Driver):
DEFAULT_PORT = 2181
PATH = ["/usr/share/zookeeper/bin",
"/usr/local/opt/zookeeper/libexec/bin"]
def __init__(self, port=DEFAULT_PORT, **kwargs):
"""Create a new ZooKeeper server."""
super(ZooKeeperDriver, self).__init__(**kwargs)
self.port = port
@classmethod
def get_options(cls):
return [
{"param_decls": ["--port"],
"type": int,
"default": cls.DEFAULT_PORT,
"help": "port to use for ZooKeeper"},
]
def _setUp(self):
super(ZooKeeperDriver, self)._setUp()
cfgfile = os.path.join(self.tempdir, "zoo.cfg")
with open(cfgfile, "w") as f:
f.write("""dataDir=%s
clientPort=%s""" % (self.tempdir, self.port))
logdir = os.path.join(self.tempdir, "log")
os.mkdir(logdir)
self.putenv("ZOOCFGDIR", self.tempdir, True)
self.putenv("ZOOCFG", cfgfile, True)
self.putenv("ZOO_LOG_DIR", logdir, True)
c, _ = self._exec(
["zkServer.sh", "start", cfgfile],
wait_for_line="STARTED",
path=self.PATH)
self.addCleanup(self._exec,
["zkServer.sh", "stop", cfgfile],
path=self.PATH)
self.putenv("ZOOKEEPER_PORT", str(self.port))
self.putenv("URL", "zookeeper://localhost:%d" % self.port)
| apache-2.0 | 3,448,701,531,717,726,000 | 30.061538 | 69 | 0.600792 | false |
sam-m888/gramps | gramps/gui/plug/export/_exportassistant.py | 1 | 24356 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2004-2007 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
# Contribution 2009 by Brad Crittenden <brad [AT] bradcrittenden.net>
# Copyright (C) 2008 Benny Malengier
# Copyright (C) 2010 Jakim Friant
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Written by B.Malengier
#-------------------------------------------------------------------------
#
# Python modules
#
#-------------------------------------------------------------------------
import os
import sys
#-------------------------------------------------------------------------
#
# set up logging
#
#-------------------------------------------------------------------------
import logging
log = logging.getLogger(".ExportAssistant")
#-------------------------------------------------------------------------
#
# Gnome modules
#
#-------------------------------------------------------------------------
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import GdkPixbuf
#-------------------------------------------------------------------------
#
# gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.const import ICON, SPLASH, GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
from gramps.gen.config import config
from ...pluginmanager import GuiPluginManager
from gramps.gen.utils.file import (find_folder, get_new_filename)
from ...managedwindow import ManagedWindow
from ...dialog import ErrorDialog
from ...user import User
#-------------------------------------------------------------------------
#
# ExportAssistant
#
#-------------------------------------------------------------------------
_ExportAssistant_pages = {
'intro' : 0,
'exporttypes' : 1,
'options' : 2,
'fileselect' : 3,
'confirm' : 4,
'summary' : 5,
}
class ExportAssistant(ManagedWindow, Gtk.Assistant):
"""
This class creates a GTK assistant to guide the user through the various
Save as/Export options.
The overall goal is to keep things simple by presenting few choice options
on each assistant page.
The export formats and options are obtained from the plugins.
"""
#override predefined do_xxx signal handlers
__gsignals__ = {"apply": "override", "cancel": "override",
"close": "override", "prepare": "override"}
def __init__(self,dbstate,uistate):
"""
Set up the assistant, and build all the possible assistant pages.
Some page elements are left empty, since their contents depends
on the user choices and on the success of the attempted save.
"""
self.dbstate = dbstate
self.uistate = uistate
self.writestarted = False
self.confirm = None
# set export mode and busy mode to avoid all other operations
self.uistate.set_export_mode(True)
#set up Assistant
Gtk.Assistant.__init__(self)
#set up ManagedWindow
self.top_title = _("Export Assistant")
ManagedWindow.__init__(self, uistate, [], self.__class__, modal=True)
#set_window is present in both parent classes
self.set_window(self, None, self.top_title, isWindow=True)
self.setup_configs('interface.exportassistant', 760, 500)
#set up callback method for the export plugins
self.callback = self.pulse_progressbar
person_handle = self.uistate.get_active('Person')
if person_handle:
self.person = self.dbstate.db.get_person_from_handle(person_handle)
if not self.person:
self.person = self.dbstate.db.find_initial_person()
else:
self.person = None
pmgr = GuiPluginManager.get_instance()
self.__exporters = pmgr.get_export_plugins()
self.map_exporters = {}
self.__previous_page = -1
#create the assistant pages
self.create_page_intro()
self.create_page_exporttypes()
self.create_page_options()
self.create_page_fileselect()
self.create_page_confirm()
#no progress page, looks ugly, and user needs to hit forward at end!
self.create_page_summary()
self.option_box_instance = None
#we need our own forward function as options page must not always be shown
self.set_forward_page_func(self.forward_func, None)
#ManagedWindow show method
self.show()
def build_menu_names(self, obj):
"""Override ManagedWindow method."""
return (self.top_title, self.top_title)
def create_page_intro(self):
"""Create the introduction page."""
label = Gtk.Label(label=self.get_intro_text())
label.set_line_wrap(True)
label.set_use_markup(True)
label.set_max_width_chars(60)
image = Gtk.Image()
image.set_from_file(SPLASH)
box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
box.pack_start(image, False, False, 5)
box.pack_start(label, False, False, 5)
page = box
page.show_all()
self.append_page(page)
self.set_page_title(page, _('Saving your data'))
self.set_page_complete(page, True)
self.set_page_type(page, Gtk.AssistantPageType.INTRO)
def create_page_exporttypes(self):
"""Create the export type page.
A Title label.
A grid of format radio buttons and their descriptions.
"""
self.format_buttons = []
box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
box.set_border_width(12)
box.set_spacing(12)
grid = Gtk.Grid()
grid.set_row_spacing(6)
grid.set_column_spacing(6)
button = None
recent_type = config.get('behavior.recent-export-type')
exporters = [(x.get_name().replace("_", ""), x) for x in self.__exporters]
exporters.sort()
ix = 0
for sort_title, exporter in exporters:
title = exporter.get_name()
description= exporter.get_description()
self.map_exporters[ix] = exporter
button = Gtk.RadioButton.new_with_mnemonic_from_widget(button, title)
button.set_tooltip_text(description)
self.format_buttons.append(button)
grid.attach(button, 0, 2*ix, 2, 1)
if ix == recent_type:
button.set_active(True)
ix += 1
box.pack_start(grid, False, False, 0)
page = box
page.show_all()
self.append_page(page)
self.set_page_title(page, _('Choose the output format'))
self.set_page_type(page, Gtk.AssistantPageType.CONTENT)
def create_page_options(self):
# as we do not know yet what to show, we create an empty page
page = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
page.set_border_width(0)
page.set_spacing(12)
page.show_all()
self.append_page(page)
self.set_page_title(page, _('Export options'))
self.set_page_complete(page, False)
self.set_page_type(page, Gtk.AssistantPageType.CONTENT)
def forward_func(self, pagenumber, data):
"""This function is called on forward press.
Normally, go to next page, however, before options,
we decide if options to show
"""
if pagenumber == _ExportAssistant_pages['exporttypes'] :
#decide if options need to be shown:
self.option_box_instance = None
ix = self.get_selected_format_index()
if not self.map_exporters[ix].get_config():
# no options needed
return pagenumber + 2
elif pagenumber == _ExportAssistant_pages['options']:
# need to check to see if we should show file selection
if (self.option_box_instance and
hasattr(self.option_box_instance, "no_fileselect")):
# don't show fileselect, but mark it ok
return pagenumber + 2
return pagenumber + 1
def create_options(self):
"""This method gets the option page, and fills it with the options."""
option = self.get_selected_format_index()
vbox = self.get_nth_page(_ExportAssistant_pages['options'])
(config_title, config_box_class) = self.map_exporters[option].get_config()
#self.set_page_title(vbox, config_title)
# remove present content of the vbox
list(map(vbox.remove, vbox.get_children()))
# add new content
if config_box_class:
self.option_box_instance = config_box_class(
self.person, self.dbstate, self.uistate, track=self.track,
window=self.window)
box = self.option_box_instance.get_option_box()
vbox.add(box)
else:
self.option_box_instance = None
vbox.show_all()
# We silently assume all options lead to accepted behavior
self.set_page_complete(vbox, True)
def create_page_fileselect(self):
self.chooser = Gtk.FileChooserWidget(action=Gtk.FileChooserAction.SAVE)
self.chooser.set_homogeneous(False) # Fix for bug #8350.
#add border
self.chooser.set_border_width(12)
#global files, ask before overwrite
self.chooser.set_local_only(False)
self.chooser.set_do_overwrite_confirmation(True)
#created, folder and name not set
self.folder_is_set = False
#connect changes in filechooser with check to mark page complete
self.chooser.connect("selection-changed", self.check_fileselect)
self.chooser.connect("key-release-event", self.check_fileselect)
#first selection does not give a selection-changed event, grab the button
self.chooser.connect("button-release-event", self.check_fileselect)
#Note, we can induce an exotic error, delete filename,
# do not release button, click forward. We expect user not to do this
# In case he does, recheck on confirmation page!
self.chooser.show_all()
page = self.chooser
self.append_page(page)
self.set_page_title(page, _('Select save file'))
self.set_page_type(page, Gtk.AssistantPageType.CONTENT)
def check_fileselect(self, filechooser, event=None, show=True):
"""Given a filechooser, determine if it can be marked complete in
the Assistant.
Used as normal callback and event callback. For callback, we will have
show=True
"""
filename = filechooser.get_filename()
if not filename:
self.set_page_complete(filechooser, False)
else:
folder = filechooser.get_current_folder()
if not folder:
folder = find_folder(filename)
else:
folder = find_folder(folder)
#the file must be valid, not a folder, and folder must be valid
if (filename and os.path.basename(filename.strip()) and folder):
#this page of the assistant is complete
self.set_page_complete(filechooser, True)
else :
self.set_page_complete(filechooser, False)
def create_page_confirm(self):
# Construct confirm page
self.confirm = Gtk.Label()
self.confirm.set_line_wrap(True)
self.confirm.set_use_markup(True)
self.confirm.show()
image = Gtk.Image()
image.set_from_file(SPLASH)
box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
box.set_border_width(12)
box.set_spacing(6)
box.pack_start(image, False, False, 5)
box.pack_start(self.confirm, False, False, 5)
self.progressbar = Gtk.ProgressBar()
box.pack_start(self.progressbar, False, False, 0)
page = box
self.append_page(page)
self.set_page_title(page, _('Final confirmation'))
self.set_page_type(page, Gtk.AssistantPageType.CONFIRM)
self.set_page_complete(page, True)
def create_page_summary(self):
# Construct summary page
# As this is the last page needs to be of page_type
# Gtk.AssistantPageType.CONFIRM or Gtk.AssistantPageType.SUMMARY
vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
vbox.set_border_width(12)
vbox.set_spacing(6)
image = Gtk.Image()
image.set_from_file(SPLASH)
vbox.pack_start(image, False, False, 5)
self.labelsum = Gtk.Label()
self.labelsum.set_line_wrap(True)
self.labelsum.set_use_markup(True)
vbox.pack_start(self.labelsum, False, False, 0)
page = vbox
page.show_all()
self.append_page(page)
self.set_page_title(page, _('Summary'))
self.set_page_complete(page, False)
self.set_page_type(page, Gtk.AssistantPageType.SUMMARY)
def do_apply(self):
pass
def do_close(self):
self.uistate.set_export_mode(False)
if self.writestarted :
pass
else :
self.close()
def do_cancel(self):
self.do_close()
def do_prepare(self, page):
"""
The "prepare" signal is emitted when a new page is set as the
assistant's current page, but before making the new page visible.
:param page: the new page to prepare for display.
"""
#determine if we go backward or forward
page_number = self.get_current_page()
assert page == self.get_nth_page(page_number)
if page_number <= self.__previous_page :
back = True
else :
back = False
if back :
#when moving backward, show page as it was,
#page we come from is set incomplete so as to disallow user jumping
# to last page after backward move
self.set_page_complete(self.get_nth_page(self.__previous_page),
False)
elif page_number == _ExportAssistant_pages['options']:
self.create_options()
self.set_page_complete(page, True)
elif page == self.chooser :
# next page is the file chooser, reset filename, keep folder where user was
folder, name = self.suggest_filename()
page.set_action(Gtk.FileChooserAction.SAVE)
if self.folder_is_set:
page.set_current_name(name)
else :
page.set_current_name(name)
page.set_current_folder(folder)
self.folder_is_set = True
# see if page is complete with above
self.check_fileselect(page, show=True)
elif self.get_page_type(page) == Gtk.AssistantPageType.CONFIRM:
# The confirm page with apply button
# Present user with what will happen
ix = self.get_selected_format_index()
format = self.map_exporters[ix].get_name()
page_complete = False
# If no file select:
if (self.option_box_instance and
hasattr(self.option_box_instance, "no_fileselect")):
# No file selection
filename = ''
confirm_text = _(
'The data will be exported as follows:\n\n'
'Format:\t%s\n\n'
'Press Apply to proceed, Back to revisit '
'your options, or Cancel to abort') % (format.replace("_",""), )
page_complete = True
else:
#Allow for exotic error: file is still not correct
self.check_fileselect(self.chooser, show=False)
if self.get_page_complete(self.chooser):
filename = self.chooser.get_filename()
name = os.path.split(filename)[1]
folder = os.path.split(filename)[0]
confirm_text = _(
'The data will be saved as follows:\n\n'
'Format:\t%(format)s\nName:\t%(name)s\nFolder:\t%(folder)s\n\n'
'Press Apply to proceed, Go Back to revisit '
'your options, or Cancel to abort') % {
'format': format.replace("_",""),
'name': name,
'folder': folder}
page_complete = True
else :
confirm_text = _(
'The selected file and folder to save to '
'cannot be created or found.\n\n'
'Press Back to return and select a valid filename.'
)
page_complete = False
# Set the page_complete status
self.set_page_complete(page, page_complete)
# If it is ok, then look for alternate confirm_text
if (page_complete and
self.option_box_instance and
hasattr(self.option_box_instance, "confirm_text")):
# Override message
confirm_text = self.option_box_instance.confirm_text
self.confirm.set_label(confirm_text)
self.progressbar.hide()
elif self.get_page_type(page) == Gtk.AssistantPageType.SUMMARY :
# The summary page
# Lock page, show progress bar
self.pre_save(page)
# save
success = self.save()
# Unlock page
self.post_save()
#update the label and title
if success:
conclusion_title = _('Your data has been saved')
conclusion_text = _(
'The copy of your data has been '
'successfully saved. You may press Close button '
'now to continue.\n\n'
'Note: the database currently opened in your Gramps '
'window is NOT the file you have just saved. '
'Future editing of the currently opened database will '
'not alter the copy you have just made. ')
#add test, what is dir
conclusion_text += '\n\n' + _('Filename: %s') %self.chooser.get_filename()
else:
conclusion_title = _('Saving failed')
conclusion_text = _(
'There was an error while saving your data. '
'You may try starting the export again.\n\n'
'Note: your currently opened database is safe. '
'It was only '
'a copy of your data that failed to save.')
self.labelsum.set_label(conclusion_text)
self.set_page_title(page, conclusion_title)
self.set_page_complete(page, True)
else :
#whatever other page, if we show it, it is complete to
self.set_page_complete(page, True)
#remember previous page for next time
self.__previous_page = page_number
def get_intro_text(self):
return _('Under normal circumstances, Gramps does not require you '
'to directly save your changes. All changes you make are '
'immediately saved to the database.\n\n'
'This process will help you save a copy of your data '
'in any of the several formats supported by Gramps. '
'This can be used to make a copy of your data, backup '
'your data, or convert it to a format that will allow '
'you to transfer it to a different program.\n\n'
'If you change your mind during this process, you '
'can safely press the Cancel button at any time and your '
'present database will still be intact.')
def get_selected_format_index(self):
"""
Query the format radiobuttons and return the index number of the
selected one.
"""
for ix in range(len(self.format_buttons)):
button = self.format_buttons[ix]
if button.get_active():
return ix
return 0
def suggest_filename(self):
"""Prepare suggested filename and set it in the file chooser."""
ix = self.get_selected_format_index()
ext = self.map_exporters[ix].get_extension()
default_dir = config.get('paths.recent-export-dir')
if ext == 'gramps':
new_filename = os.path.join(default_dir,'data.gramps')
elif ext == 'burn':
new_filename = os.path.basename(self.dbstate.db.get_save_path())
else:
new_filename = get_new_filename(ext,default_dir)
return (default_dir, os.path.split(new_filename)[1])
def save(self):
"""
Perform the actual Save As/Export operation.
Depending on the success status, set the text for the final page.
"""
success = False
try:
if (self.option_box_instance and
hasattr(self.option_box_instance, "no_fileselect")):
filename = ""
else:
filename = self.chooser.get_filename()
config.set('paths.recent-export-dir', os.path.split(filename)[0])
ix = self.get_selected_format_index()
config.set('behavior.recent-export-type', ix)
export_function = self.map_exporters[ix].get_export_function()
success = export_function(self.dbstate.db,
filename,
User(error=ErrorDialog, parent=self.uistate.window,
callback=self.callback),
self.option_box_instance)
except:
#an error not catched in the export_function itself
success = False
log.error(_("Error exporting your Family Tree"), exc_info=True)
return success
def pre_save(self, page):
''' Since we are in 'prepare', the next page is not yet shown, so
modify the 'confirm' page text and show the progress bar
'''
self.confirm.set_label(
_("Please wait while your data is selected and exported"))
self.writestarted = True
self.progressbar.show()
self.show_all()
self.set_busy_cursor(1)
def post_save(self):
self.set_busy_cursor(0)
self.progressbar.hide()
self.writestarted = False
def set_busy_cursor(self,value):
"""
Set or unset the busy cursor while saving data.
"""
BUSY_CURSOR = Gdk.Cursor.new_for_display(Gdk.Display.get_default(),
Gdk.CursorType.WATCH)
if value:
Gtk.Assistant.get_window(self).set_cursor(BUSY_CURSOR)
#self.set_sensitive(0)
else:
Gtk.Assistant.get_window(self).set_cursor(None)
#self.set_sensitive(1)
while Gtk.events_pending():
Gtk.main_iteration()
def pulse_progressbar(self, value, text=None):
self.progressbar.set_fraction(min(value/100.0, 1.0))
if text:
self.progressbar.set_text("%s: %d%%" % (text, value))
self.confirm.set_label(
_("Please wait while your data is selected and exported") +
"\n" + text)
else:
self.progressbar.set_text("%d%%" % value)
while Gtk.events_pending():
Gtk.main_iteration()
| gpl-2.0 | 2,831,607,764,847,831,600 | 36.819876 | 90 | 0.565282 | false |
allo-/django-bingo | bingo/times.py | 1 | 3716 | from django.utils import timezone
from django.conf import settings
from . import config
from datetime import datetime, timedelta
def now():
return timezone.localtime()
def get_times(site):
time_now = now()
start_time_begin = config.get("start_time_begin", site=site)
if start_time_begin is not None:
start_time_begin = timezone.make_aware(
datetime.combine(time_now, start_time_begin))
start_time_end = config.get("start_time_end", site=site)
if start_time_end is not None:
start_time_end = timezone.make_aware(datetime.combine(
time_now, start_time_end))
end_time = config.get("end_time", site=site)
if end_time is not None:
end_time = timezone.make_aware(datetime.combine(time_now, end_time))
vote_start_time = config.get("vote_start_time", site=site)
if vote_start_time is not None:
vote_start_time = timezone.make_aware(datetime.combine(
time_now,vote_start_time))
if start_time_begin is not None and start_time_end is not None:
# when the end of start time is "before" the start of start time,
# the end of start time is tomorrow
if start_time_end < start_time_begin:
start_time_end += timezone.timedelta(1, 0)
if end_time is not None:
# when the end time is "before" the end of start_time_end,
# the game ends tomorrow
if start_time_begin is not None and end_time < start_time_begin:
end_time = end_time + timezone.timedelta(1, 0)
if vote_start_time is not None:
# The vote time must come after the start of starttime.
# If it comes before, it must be tomorrow
if start_time_begin is not None and vote_start_time < start_time_begin:
vote_start_time = vote_start_time + timezone.timedelta(1, 0)
# When end time is now before the vote start time, end time needs to
# be adjusted to be tomorrow as well
if end_time < vote_start_time:
end_time = end_time + timezone.timedelta(1, 0)
# some sanity checks
if start_time_begin and start_time_end and vote_start_time:
assert start_time_begin < vote_start_time
if end_time and vote_start_time:
assert end_time > vote_start_time
if start_time_begin and start_time_end and end_time:
assert end_time > start_time_end
return {
'now': time_now,
'start_time_begin': start_time_begin,
'start_time_end': start_time_end,
'end_time': end_time,
'vote_start_time': vote_start_time,
}
def get_endtime(site):
""" returns the (static) game end time """
return get_times(site)['end_time']
def is_starttime(site):
"""
returns True, if no start times are set, or the current time
lies inside the starttime.
"""
if not config.get("start_time_begin", site=site) or \
not config.get("start_time_end", site=site):
return True
else:
times = get_times(site)
return times['start_time_begin'] \
< now() \
< times['start_time_end']
def is_after_votetime_start(site):
"""
returns True, if no vote_start_time is set,
or the current time is after the start of vote
time
"""
if not config.get("vote_start_time", site=site):
return True
else:
times = get_times(site)
return times['vote_start_time'] <= times['now']
def is_after_endtime(site):
end_time = config.get("end_time", site=site)
if end_time is None or is_starttime(site):
return False
else:
times = get_times(site)
return times['end_time'] < times['now']
| agpl-3.0 | 6,890,961,281,847,106,000 | 32.781818 | 80 | 0.619214 | false |
patrickhoefler/lwd | lwd.py | 1 | 7546 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Functions for turning the Wikidata dump into Linked Data
import codecs
import glob
import gzip
import json
import math
import os
import sys
import time
import xml.etree.cElementTree as ET
import settings
def process_dump():
# Print some status info
print 'Processing ' + settings.dump_filename
# Make sure the output folders exist
if not os.path.exists('output'):
os.mkdir('output')
if not os.path.exists('output/' + settings.output_folder):
os.mkdir('output/' + settings.output_folder)
if not os.path.exists('output/' + settings.output_folder + '/ttl'):
os.mkdir('output/' + settings.output_folder + '/ttl')
# Delete all old files
for f in glob.glob('output/' + settings.output_folder + '/ttl/*.ttl'):
os.remove(f)
# Initiate variables
entity_counter = 0
element_id = ''
# Start the clock
start_time = time.time()
# Load the dump file and create the iterator
context = ET.iterparse(settings.dump_filename, events=('start', 'end'))
context = iter(context)
event, root = context.next()
# Iterate over the dump file
for event, element in context:
# Check if we have reached the max number of processed entities
if settings.max_processed_entities > 0 and entity_counter == settings.max_processed_entities:
break
# Get the ID of the current entity
if event == 'end' and element.tag == '{http://www.mediawiki.org/xml/export-0.8/}title':
if element.text.find('Q') == 0:
element_id = element.text
elif element.text.find('Property:P') == 0:
element_id = element.text.split(':')[1]
# Get the data of the current entity
if element_id and event == 'end' and element.tag == '{http://www.mediawiki.org/xml/export-0.8/}text':
if element.text:
triples = get_nt_for_entity(element_id, element.text)
batch_id = str(int(math.floor(int(element_id[1:]) / settings.batchsize)) * settings.batchsize).zfill(8)
batchfile_ttl_name = 'output/' + settings.output_folder + '/ttl/' + element_id[0] + '_Batch_' + batch_id + '.ttl'
# If ttl file doesn't exist, create it and add the prefixes
if not os.path.isfile(batchfile_ttl_name):
prefixes = '# Extracted from ' + settings.dump_filename + ' with LWD (http://github.com/patrickhoefler/lwd)'
prefixes += """
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix wd: <http://www.wikidata.org/entity/> .
""".replace(' ', '')
with codecs.open(batchfile_ttl_name, 'a', 'utf-8') as batchfile_ttl:
batchfile_ttl.write(prefixes)
# Write the triples to the batchfile
with codecs.open(batchfile_ttl_name, 'a', 'utf-8') as batchfile_ttl:
batchfile_ttl.write(triples)
# One more entity
entity_counter += 1
# Print some progress
if entity_counter % 1000 == 0:
sys.stdout.write('.')
sys.stdout.flush()
# Print some statistics
if entity_counter % 10000 == 0:
lap_time = time.time()
print '\nProcessed ' + str(entity_counter) + ' entities in ' + str(lap_time - start_time) + ' seconds, on average ' + str(entity_counter / (lap_time - start_time)) + ' per second'
# Reset the element ID in preparation for the next iteration
element_id = ''
# Save the memory, save the world
root.clear()
# Stop the clock and print some final statistics
end_time = time.time()
print('\nProcessed ' + str(entity_counter) + ' entities in ' + str(end_time - start_time) + ' seconds, on average ' + str(entity_counter / (end_time - start_time)) + ' per second')
number_of_files = len(os.listdir('output/' + settings.output_folder + '/ttl'))
if number_of_files != 1:
plural = 's'
else:
plural = ''
print('Created ' + str(number_of_files) + ' .ttl file' + plural + ' in ./' + 'output/' + settings.output_folder + '/ttl')
def get_nt_for_entity(element_id, element_data):
# Turn the data JSON string into an object
data = json.loads(element_data)
entity_uri = 'wd:' + element_id
triples = ''
# Get the label in English
try:
triples = triples + entity_uri + ' rdfs:label ' + '"' + data['label']['en'].replace('\\', '\\\\').replace('"', '\\"') + '"@en .\n'
except:
# print 'No label for ' + element_id
pass
# Get the description in English
try:
triples = triples + entity_uri + ' rdfs:comment ' + '"' + data['description']['en'].replace('\\', '\\\\').replace('"', '\\"') + '"@en .\n'
except:
# print 'No description for ' + element_id
pass
# Are there any claims in the current element?
if data.get('claims'):
# Iterate over all claims
for claim in data['claims']:
predicate_id = 'P' + str(claim['m'][1])
predicate_uri = 'wd:' + predicate_id
if len(claim['m']) > 2:
# Is it an object property?
if claim['m'][2] == 'wikibase-entityid':
object_id = 'Q' + str(claim['m'][3]['numeric-id'])
object_uri = 'wd:' + object_id
triples = triples + entity_uri + ' ' + predicate_uri + ' ' + object_uri + ' .\n'
# Add RDF type
if predicate_id == 'P31':
triples = triples + entity_uri + ' rdf:type ' + object_uri + ' .\n'
# Is it a string value property?
if claim['m'][2] == 'string':
triples = triples + entity_uri + ' ' + predicate_uri + ' "' + claim['m'][3].replace('\\', '\\\\').replace('"', '\\"') + '" .\n'
return triples
def compress_ttl_files():
# Print some status info
print 'Compressing'
# Make sure the output folders exist
if not os.path.exists('output'):
os.mkdir('output')
if not os.path.exists('output/' + settings.output_folder):
os.mkdir('output/' + settings.output_folder)
if not os.path.exists('output/' + settings.output_folder + '/gz'):
os.mkdir('output/' + settings.output_folder + '/gz')
# Delete all old files
for f in glob.glob('output/' + settings.output_folder + '/gz/*.gz'):
os.remove(f)
# Compress all files
for input_file_name in glob.glob('output/' + settings.output_folder + '/ttl/*.ttl'):
with open(input_file_name, 'rb') as input_file:
with gzip.open('output/' + settings.output_folder + '/gz/' + input_file_name.split('/')[-1] + '.gz', 'wb') as output_file:
output_file.writelines(input_file)
# Print some progress
sys.stdout.write('.')
sys.stdout.flush()
# Print some final statistics
number_of_files = len(os.listdir('output/' + settings.output_folder + '/gz'))
if number_of_files != 1:
plural = 's'
else:
plural = ''
print('\nCreated ' + str(number_of_files) + ' .gz file' + plural + ' in ./' + 'output/' + settings.output_folder + '/gz')
| mit | -7,322,916,570,662,439,000 | 37.111111 | 199 | 0.551816 | false |
johnbachman/belpy | indra/sources/sofia/api.py | 1 | 5047 | import json
import time
import openpyxl
import requests
from indra.config import get_config
from .processor import SofiaJsonProcessor, SofiaExcelProcessor
def process_table(fname):
"""Return processor by processing a given sheet of a spreadsheet file.
Parameters
----------
fname : str
The name of the Excel file (typically .xlsx extension) to process
Returns
-------
sp : indra.sources.sofia.processor.SofiaProcessor
A SofiaProcessor object which has a list of extracted INDRA
Statements as its statements attribute.
"""
book = openpyxl.load_workbook(fname, read_only=True)
try:
rel_sheet = book['Relations']
except Exception as e:
rel_sheet = book['Causal']
event_sheet = book['Events']
entities_sheet = book['Entities']
sp = SofiaExcelProcessor(rel_sheet.rows, event_sheet.rows,
entities_sheet.rows)
sp.extract_relations(rel_sheet.rows)
sp.extract_events(event_sheet.rows, rel_sheet.rows)
return sp
def process_text(text, out_file='sofia_output.json', auth=None):
"""Return processor by processing text given as a string.
Parameters
----------
text : str
A string containing the text to be processed with Sofia.
out_file : Optional[str]
The path to a file to save the reader's output into.
Default: sofia_output.json
auth : Optional[list]
A username/password pair for the Sofia web service. If not given,
the SOFIA_USERNAME and SOFIA_PASSWORD values are loaded from either
the INDRA config or the environment.
Returns
-------
sp : indra.sources.sofia.processor.SofiaProcessor
A SofiaProcessor object which has a list of extracted INDRA
Statements as its statements attribute. If the API did not process
the text, None is returned.
"""
text_json = {'text': text}
if not auth:
user, password = _get_sofia_auth()
else:
user, password = auth
if not user or not password:
raise ValueError('Could not use SOFIA web service since'
' authentication information is missing. Please'
' set SOFIA_USERNAME and SOFIA_PASSWORD in the'
' INDRA configuration file or as environmental'
' variables.')
json_response, status_code, process_status = \
_text_processing(text_json=text_json, user=user, password=password)
# Check response status
if process_status != 'Done' or status_code != 200:
return None
# Cache reading output
if out_file:
with open(out_file, 'w') as fh:
json.dump(json_response, fh, indent=1)
return process_json(json_response)
def process_json(json_obj):
"""Return processor by processing a JSON object returned by Sofia.
Parameters
----------
json_obj : json
A JSON object containing extractions from Sofia.
Returns
-------
sp : indra.sources.sofia.processor.SofiaProcessor
A SofiaProcessor object which has a list of extracted INDRA
Statements as its statements attribute.
"""
sp = SofiaJsonProcessor(json_obj)
sp.extract_relations(json_obj)
sp.extract_events(json_obj)
return sp
def process_json_file(fname):
"""Return processor by processing a JSON file produced by Sofia.
Parameters
----------
fname : str
The name of the JSON file to process
Returns
-------
indra.sources.sofia.processor.SofiaProcessor
A SofiaProcessor object which has a list of extracted INDRA
Statements as its statements attribute.
"""
with open(fname, 'r') as fh:
jd = json.load(fh)
return process_json(jd)
def _get_sofia_auth():
sofia_username = get_config('SOFIA_USERNAME')
sofia_password = get_config('SOFIA_PASSWORD')
return sofia_username, sofia_password
def _sofia_api_post(api, option, json, auth):
return requests.post(url=api + option, json=json, auth=auth)
def _text_processing(text_json, user, password):
assert len(text_json) > 0
sofia_api = 'https://sofia.worldmodelers.com'
auth = (user, password)
# Initialize process
resp = _sofia_api_post(api=sofia_api, option='/process_text',
json=text_json, auth=auth)
res_json = resp.json()
# Get status
status = _sofia_api_post(api=sofia_api, option='/status',
json=res_json, auth=auth)
# Check status every two seconds
while status.json()['Status'] == 'Processing':
time.sleep(2.0)
status = _sofia_api_post(api=sofia_api, option='/status',
json=res_json, auth=auth)
results = _sofia_api_post(api=sofia_api, option='/results',
json=res_json, auth=auth)
status_code = results.status_code
process_status = status.json()['Status']
return results.json(), status_code, process_status
| mit | -2,170,171,834,669,639,000 | 30.54375 | 75 | 0.632059 | false |
calebmadrigal/algorithms-in-python | heap.py | 1 | 3668 | """heap.py - implementation of a heap priority queue. """
__author__ = "Caleb Madrigal"
__date__ = "2015-02-17"
import math
from enum import Enum
from autoresizelist import AutoResizeList
class HeapType(Enum):
maxheap = 1
minheap = 2
class Heap:
def __init__(self, initial_data=None, heap_type=HeapType.maxheap):
self.heap_type = heap_type
if heap_type == HeapType.maxheap:
self.comparator = lambda x, y: x > y
else:
self.comparator = lambda x, y: x < y
self.data = AutoResizeList()
if initial_data is not None:
self.build_heap(initial_data)
self._size = len(self.data)
def _left_child(self, index):
return 2*index + 1
def _right_child(self, index):
return 2*index + 2
def _parent(self, index):
return math.floor((index - 1) / 2.0)
def _is_root(self, index):
return index == 0
def _swap(self, i1, i2):
self.data[i1], self.data[i2] = self.data[i2], self.data[i1]
def build_heap(self, initial_data):
for i in initial_data:
self.data.prepend(i)
self.heap_down(0)
def heap_up(self, index):
# If we are at the root, return - we are done
if self._is_root(index):
return
# Else, compare the current node with the parent node, and if this node should be higher
# then the parent node, then swap and recursively call on the parent index
parent_index = self._parent(index)
if self.comparator(self.data[index], self.data[parent_index]):
self._swap(index, parent_index)
self.heap_up(parent_index)
def heap_down(self, index):
left_index = self._left_child(index)
right_index = self._right_child(index)
try:
left = self.data[left_index]
except IndexError:
left = None
try:
right = self.data[right_index]
except IndexError:
right = None
# Find the largest child
largest_child = left
largest_child_index = left_index
if left is not None and right is not None:
if self.comparator(right, left):
largest_child = right
largest_child_index = right_index
elif right is not None:
largest_child = right
largest_child_index = right_index
# If the largest child is not None and is higher priority than the current, then swap
# and recursively call on on the child index
if largest_child is not None and self.comparator(largest_child, self.data[index]):
self._swap(index, largest_child_index)
self.heap_down(largest_child_index)
def push(self, item):
insert_index = self._size # Insert at the end
self._size += 1
self.data[insert_index] = item
self.heap_up(insert_index)
return self
def peek(self):
return self.data[0]
def pop(self):
if len(self.data) < 1 or self.data[0] is None:
return None
# Take item from the root
item = self.data[0]
# Move the bottom-most, right-most item to the root
self.data[0] = self.data[self._size-1]
self.data[self._size-1] = None
self._size -= 1
self.heap_down(0)
return item
def size(self):
return self._size
def __repr__(self):
return str(self.data)
if __name__ == "__main__":
import unittest
testsuite = unittest.TestLoader().discover('test', pattern="*heap*")
unittest.TextTestRunner(verbosity=1).run(testsuite)
| mit | -855,685,561,109,325,200 | 27.65625 | 96 | 0.581516 | false |
eugenesan/postman | postman/FlickrWorker.py | 1 | 2057 | from BaseWorker import *
class FlickrWorker(BaseWorker):
key = '391fb6763fe0b5011cf52638067e0fed'
secret = '369f46a112452186'
def __init__(self, parent = None):
super(FlickrWorker, self).__init__(parent)
def run(self):
self.progressSignal.emit(self.stampConfig)
if not self.filesModel.count():
return
progressStep = 1.0 / self.filesModel.count()
flickrInstance = self.stampConfig['flickrInst']
for i in range(self.filesModel.count()):
self.status = 'Uploading: %s' % self.filesModel.filesList[i].filePath
self.statusSignal.emit(self.stampConfig)
imageFilename = self.filesModel.filesList[i].filePath.encode('UTF-8','ignore')
imageTitle = self.filesModel.filesList[i].title.encode('UTF-8','ignore')
imageDescription = self.filesModel.filesList[i].description.encode('UTF-8','ignore')
imageTags = self.filesModel.filesList[i].tags.encode('UTF-8','ignore')
# build space delimited tags string
tagList = ['"%s"' % tag.strip() for tag in imageTags.split(',')]
tagsString = ' '.join(tagList)
for r in range(self.retry):
try:
flickrInstance.upload(filename=imageFilename, title=imageTitle, description=imageDescription, tags=tagsString)
break
except Exception:
if r == self.retry - 1:
self.status = 'Failed'
self.statusSignal.emit(self.stampConfig)
self.doneSignal.emit(self.stampConfig)
return
self.progress += progressStep
self.progressSignal.emit(self.stampConfig)
self.status = 'Done'
self.result = True
self.statusSignal.emit(self.stampConfig)
self.doneSignal.emit(self.stampConfig)
| gpl-3.0 | -6,879,265,459,719,833,000 | 36.12963 | 130 | 0.557122 | false |
emmericp/dpdk | app/test/autotest_runner.py | 1 | 14419 | # SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2010-2014 Intel Corporation
# The main logic behind running autotests in parallel
from __future__ import print_function
import StringIO
import csv
from multiprocessing import Pool, Queue
import pexpect
import re
import subprocess
import sys
import time
import glob
import os
# wait for prompt
def wait_prompt(child):
try:
child.sendline()
result = child.expect(["RTE>>", pexpect.TIMEOUT, pexpect.EOF],
timeout=120)
except:
return False
if result == 0:
return True
else:
return False
# get all valid NUMA nodes
def get_numa_nodes():
return [
int(
re.match(r"node(\d+)", os.path.basename(node))
.group(1)
)
for node in glob.glob("/sys/devices/system/node/node*")
]
# find first (or any, really) CPU on a particular node, will be used to spread
# processes around NUMA nodes to avoid exhausting memory on particular node
def first_cpu_on_node(node_nr):
cpu_path = glob.glob("/sys/devices/system/node/node%d/cpu*" % node_nr)[0]
cpu_name = os.path.basename(cpu_path)
m = re.match(r"cpu(\d+)", cpu_name)
return int(m.group(1))
pool_child = None # per-process child
# we initialize each worker with a queue because we need per-pool unique
# command-line arguments, but we cannot do different arguments in an initializer
# because the API doesn't allow per-worker initializer arguments. so, instead,
# we will initialize with a shared queue, and dequeue command-line arguments
# from this queue
def pool_init(queue, result_queue):
global pool_child
cmdline, prefix = queue.get()
start_time = time.time()
name = ("Start %s" % prefix) if prefix != "" else "Start"
# use default prefix if no prefix was specified
prefix_cmdline = "--file-prefix=%s" % prefix if prefix != "" else ""
# append prefix to cmdline
cmdline = "%s %s" % (cmdline, prefix_cmdline)
# prepare logging of init
startuplog = StringIO.StringIO()
# run test app
try:
print("\n%s %s\n" % ("=" * 20, prefix), file=startuplog)
print("\ncmdline=%s" % cmdline, file=startuplog)
pool_child = pexpect.spawn(cmdline, logfile=startuplog)
# wait for target to boot
if not wait_prompt(pool_child):
pool_child.close()
result = tuple((-1,
"Fail [No prompt]",
name,
time.time() - start_time,
startuplog.getvalue(),
None))
pool_child = None
else:
result = tuple((0,
"Success",
name,
time.time() - start_time,
startuplog.getvalue(),
None))
except:
result = tuple((-1,
"Fail [Can't run]",
name,
time.time() - start_time,
startuplog.getvalue(),
None))
pool_child = None
result_queue.put(result)
# run a test
# each result tuple in results list consists of:
# result value (0 or -1)
# result string
# test name
# total test run time (double)
# raw test log
# test report (if not available, should be None)
#
# this function needs to be outside AutotestRunner class because otherwise Pool
# won't work (or rather it will require quite a bit of effort to make it work).
def run_test(target, test):
global pool_child
if pool_child is None:
return -1, "Fail [No test process]", test["Name"], 0, "", None
# create log buffer for each test
# in multiprocessing environment, the logging would be
# interleaved and will create a mess, hence the buffering
logfile = StringIO.StringIO()
pool_child.logfile = logfile
# make a note when the test started
start_time = time.time()
try:
# print test name to log buffer
print("\n%s %s\n" % ("-" * 20, test["Name"]), file=logfile)
# run test function associated with the test
result = test["Func"](pool_child, test["Command"])
# make a note when the test was finished
end_time = time.time()
log = logfile.getvalue()
# append test data to the result tuple
result += (test["Name"], end_time - start_time, log)
# call report function, if any defined, and supply it with
# target and complete log for test run
if test["Report"]:
report = test["Report"](target, log)
# append report to results tuple
result += (report,)
else:
# report is None
result += (None,)
except:
# make a note when the test crashed
end_time = time.time()
# mark test as failed
result = (-1, "Fail [Crash]", test["Name"],
end_time - start_time, logfile.getvalue(), None)
# return test results
return result
# class representing an instance of autotests run
class AutotestRunner:
cmdline = ""
parallel_test_groups = []
non_parallel_test_groups = []
logfile = None
csvwriter = None
target = ""
start = None
n_tests = 0
fails = 0
log_buffers = []
blacklist = []
whitelist = []
def __init__(self, cmdline, target, blacklist, whitelist, n_processes):
self.cmdline = cmdline
self.target = target
self.blacklist = blacklist
self.whitelist = whitelist
self.skipped = []
self.parallel_tests = []
self.non_parallel_tests = []
self.n_processes = n_processes
self.active_processes = 0
# parse the binary for available test commands
binary = cmdline.split()[0]
stripped = 'not stripped' not in \
subprocess.check_output(['file', binary])
if not stripped:
symbols = subprocess.check_output(['nm', binary]).decode('utf-8')
self.avail_cmds = re.findall('test_register_(\w+)', symbols)
else:
self.avail_cmds = None
# log file filename
logfile = "%s.log" % target
csvfile = "%s.csv" % target
self.logfile = open(logfile, "w")
csvfile = open(csvfile, "w")
self.csvwriter = csv.writer(csvfile)
# prepare results table
self.csvwriter.writerow(["test_name", "test_result", "result_str"])
# set up cmdline string
def __get_cmdline(self, cpu_nr):
cmdline = ("taskset -c %i " % cpu_nr) + self.cmdline
return cmdline
def __process_result(self, result):
# unpack result tuple
test_result, result_str, test_name, \
test_time, log, report = result
# get total run time
cur_time = time.time()
total_time = int(cur_time - self.start)
# print results, test run time and total time since start
result = ("%s:" % test_name).ljust(30)
result += result_str.ljust(29)
result += "[%02dm %02ds]" % (test_time / 60, test_time % 60)
# don't print out total time every line, it's the same anyway
print(result + "[%02dm %02ds]" % (total_time / 60, total_time % 60))
# if test failed and it wasn't a "start" test
if test_result < 0:
self.fails += 1
# collect logs
self.log_buffers.append(log)
# create report if it exists
if report:
try:
f = open("%s_%s_report.rst" %
(self.target, test_name), "w")
except IOError:
print("Report for %s could not be created!" % test_name)
else:
with f:
f.write(report)
# write test result to CSV file
self.csvwriter.writerow([test_name, test_result, result_str])
# this function checks individual test and decides if this test should be in
# the group by comparing it against whitelist/blacklist. it also checks if
# the test is compiled into the binary, and marks it as skipped if necessary
def __filter_test(self, test):
test_cmd = test["Command"]
test_id = test_cmd
# dump tests are specified in full e.g. "Dump_mempool"
if "_autotest" in test_id:
test_id = test_id[:-len("_autotest")]
# filter out blacklisted/whitelisted tests
if self.blacklist and test_id in self.blacklist:
return False
if self.whitelist and test_id not in self.whitelist:
return False
# if test wasn't compiled in, remove it as well
if self.avail_cmds and test_cmd not in self.avail_cmds:
result = 0, "Skipped [Not compiled]", test_id, 0, "", None
self.skipped.append(tuple(result))
return False
return True
def __run_test_group(self, test_group, worker_cmdlines):
group_queue = Queue()
init_result_queue = Queue()
for proc, cmdline in enumerate(worker_cmdlines):
prefix = "test%i" % proc if len(worker_cmdlines) > 1 else ""
group_queue.put(tuple((cmdline, prefix)))
# create a pool of worker threads
# we will initialize child in the initializer, and we don't need to
# close the child because when the pool worker gets destroyed, child
# closes the process
pool = Pool(processes=len(worker_cmdlines),
initializer=pool_init,
initargs=(group_queue, init_result_queue))
results = []
# process all initialization results
for _ in range(len(worker_cmdlines)):
self.__process_result(init_result_queue.get())
# run all tests asynchronously
for test in test_group:
result = pool.apply_async(run_test, (self.target, test))
results.append(result)
# tell the pool to stop all processes once done
pool.close()
# iterate while we have group execution results to get
while len(results) > 0:
# iterate over a copy to be able to safely delete results
# this iterates over a list of group results
for async_result in results[:]:
# if the thread hasn't finished yet, continue
if not async_result.ready():
continue
res = async_result.get()
self.__process_result(res)
# remove result from results list once we're done with it
results.remove(async_result)
# iterate over test groups and run tests associated with them
def run_all_tests(self):
# filter groups
self.parallel_tests = list(
filter(self.__filter_test,
self.parallel_tests)
)
self.non_parallel_tests = list(
filter(self.__filter_test,
self.non_parallel_tests)
)
parallel_cmdlines = []
# FreeBSD doesn't have NUMA support
numa_nodes = get_numa_nodes()
if len(numa_nodes) > 0:
for proc in range(self.n_processes):
# spread cpu affinity between NUMA nodes to have less chance of
# running out of memory while running multiple test apps in
# parallel. to do that, alternate between NUMA nodes in a round
# robin fashion, and pick an arbitrary CPU from that node to
# taskset our execution to
numa_node = numa_nodes[self.active_processes % len(numa_nodes)]
cpu_nr = first_cpu_on_node(numa_node)
parallel_cmdlines += [self.__get_cmdline(cpu_nr)]
# increase number of active processes so that the next cmdline
# gets a different NUMA node
self.active_processes += 1
else:
parallel_cmdlines = [self.cmdline] * self.n_processes
print("Running tests with %d workers" % self.n_processes)
# create table header
print("")
print("Test name".ljust(30) + "Test result".ljust(29) +
"Test".center(9) + "Total".center(9))
print("=" * 80)
if len(self.skipped):
print("Skipped autotests:")
# print out any skipped tests
for result in self.skipped:
# unpack result tuple
test_result, result_str, test_name, _, _, _ = result
self.csvwriter.writerow([test_name, test_result, result_str])
t = ("%s:" % test_name).ljust(30)
t += result_str.ljust(29)
t += "[00m 00s]"
print(t)
# make a note of tests start time
self.start = time.time()
# whatever happens, try to save as much logs as possible
try:
if len(self.parallel_tests) > 0:
print("Parallel autotests:")
self.__run_test_group(self.parallel_tests, parallel_cmdlines)
if len(self.non_parallel_tests) > 0:
print("Non-parallel autotests:")
self.__run_test_group(self.non_parallel_tests, [self.cmdline])
# get total run time
cur_time = time.time()
total_time = int(cur_time - self.start)
# print out summary
print("=" * 80)
print("Total run time: %02dm %02ds" % (total_time / 60,
total_time % 60))
if self.fails != 0:
print("Number of failed tests: %s" % str(self.fails))
# write summary to logfile
self.logfile.write("Summary\n")
self.logfile.write("Target: ".ljust(15) + "%s\n" % self.target)
self.logfile.write("Tests: ".ljust(15) + "%i\n" % self.n_tests)
self.logfile.write("Failed tests: ".ljust(
15) + "%i\n" % self.fails)
except:
print("Exception occurred")
print(sys.exc_info())
self.fails = 1
# drop logs from all executions to a logfile
for buf in self.log_buffers:
self.logfile.write(buf.replace("\r", ""))
return self.fails
| gpl-2.0 | -3,175,455,672,675,276,000 | 32.377315 | 80 | 0.559956 | false |
unkyulee/elastic-cms | src/web/modules/post/services/config.py | 1 | 2826 | import web.util.tools as tools
import os
from web import app
import lib.es as es
def get(p):
# get host and index from the global config
h = tools.get_conf(p['host'], p['navigation']['id'], 'host', 'http://localhost:9200')
n = tools.get_conf(p['host'], p['navigation']['id'], 'index', '')
return {
'name': get_conf(h, n, 'name', ''),
'description': get_conf(h, n, 'description', ''),
'host': h,
'index': n,
'upload_dir':
get_conf(h, n, 'upload_dir',
os.path.join( app.config.get('BASE_DIR'), 'uploads' )
),
'allowed_exts': get_conf(h, n, 'allowed_exts',''),
'page_size': get_conf(h, n, 'page_size', '10'),
'query': get_conf(h, n, 'query', '*'),
'sort_field': get_conf(h, n, 'sort_field', '_score'),
'sort_dir': get_conf(h, n, 'sort_dir', 'desc'),
'top': get_conf(h, n, 'top', ''),
'footer': get_conf(h, n, 'footer', ''),
'side': get_conf(h, n, 'side', ''),
'content_header': get_conf(h, n, 'content_header', ''),
'content_footer': get_conf(h, n, 'content_footer', ''),
'intro': get_conf(h, n, 'intro', ''),
'search_query': get_conf(h, n, 'search_query', ''),
'search_item_template': get_conf(h, n, 'search_item_template', ''),
'keep_history': get_conf(h, n, 'keep_history', 'Yes'),
}
def set(p):
# get host, index
host = p['c']['host']
if not host:
host = tools.get('host')
index = p['c']['index']
if not index:
index = tools.get('index')
# get host and index from the global config
tools.set_conf(p['host'], p['navigation']['id'], 'host', host)
tools.set_conf(p['host'], p['navigation']['id'], 'index', index)
# set local config
if p['c']['index']: # save local config only when index is already created
set_conf(host, index, 'name', tools.get('name'))
set_conf(host, index, 'description', tools.get('description'))
set_conf(host, index, 'upload_dir', tools.get('upload_dir'))
set_conf(host, index, 'allowed_exts', tools.get('allowed_exts'))
set_conf(host, index, 'page_size', tools.get('page_size'))
set_conf(host, index, 'query', tools.get('query'))
set_conf(host, index, 'sort_field', tools.get('sort_field'))
set_conf(host, index, 'sort_dir', tools.get('sort_dir'))
set_conf(host, index, 'keep_history', tools.get('keep_history'))
def get_conf(host, index, name, default):
ret = es.get(host, index, "config", name)
return ret.get('value') if ret and ret.get('value') else default
def set_conf(host, index, name, value):
config = {
'name': name,
'value': value
}
es.update(host, index, "config", name, config)
es.flush(host, index)
| mit | -7,525,611,961,569,722,000 | 37.189189 | 89 | 0.54954 | false |
GutenkunstLab/SloppyCell | test/test_FixedPoints.py | 1 | 3610 | import unittest
import scipy
from SloppyCell.ReactionNetworks import *
lorenz = Network('lorenz')
lorenz.add_compartment('basic')
lorenz.add_species('x', 'basic', 0.5)
lorenz.add_species('y', 'basic', 0.5)
lorenz.add_species('z', 'basic', 0.5)
lorenz.add_parameter('sigma', 1.0)
lorenz.add_parameter('r', 2.0)
lorenz.add_parameter('b', 2.0)
lorenz.add_rate_rule('x', 'sigma*(y-x)')
lorenz.add_rate_rule('y', 'r*x - y - x*z')
lorenz.add_rate_rule('z', 'x*y - b*z')
class test_fixedpoints(unittest.TestCase):
def test_basic(self):
""" Test basic fixed-point finding """
net = lorenz.copy('test')
fp = Dynamics.dyn_var_fixed_point(net, dv0=[1,1,1], with_logs=False)
# This should find the fixed-point [sqrt(2), sqrt(2), 1]
self.assertAlmostEqual(fp[0], scipy.sqrt(2), 6, 'Failed on basic 1,0.')
self.assertAlmostEqual(fp[1], scipy.sqrt(2), 6, 'Failed on basic 1,1.')
self.assertAlmostEqual(fp[2], 1, 6, 'Failed on basic 1,2.')
fp = Dynamics.dyn_var_fixed_point(net, dv0=[-0.1,-0.1,-0.1],
with_logs=False)
# This should find the fixed-point [0, 0, 0]
self.assertAlmostEqual(fp[0], 0, 6, 'Failed on basic 2,0.')
self.assertAlmostEqual(fp[1], 0, 6, 'Failed on basic 2,1.')
self.assertAlmostEqual(fp[2], 0, 6, 'Failed on basic 2,2.')
def test_withlogs(self):
""" Test fixed-point finding with logs """
net = lorenz.copy('test')
fp = Dynamics.dyn_var_fixed_point(net, dv0=[1,1,1], with_logs=True)
# This should find the fixed-point [sqrt(2), sqrt(2), 1]
self.assertAlmostEqual(fp[0], scipy.sqrt(2), 6, 'Failed on logs 1,0.')
self.assertAlmostEqual(fp[1], scipy.sqrt(2), 6, 'Failed on logs 1,1.')
self.assertAlmostEqual(fp[2], 1, 6, 'Failed on logs 1,2.')
fp = Dynamics.dyn_var_fixed_point(net, dv0=[0.1,0.1,0.1],
with_logs=True)
# This should find the fixed-point [0, 0, 0]
self.assertAlmostEqual(fp[0], 0, 6, 'Failed on logs 2,0.')
self.assertAlmostEqual(fp[1], 0, 6, 'Failed on logs 2,1.')
self.assertAlmostEqual(fp[2], 0, 6, 'Failed on logs 2,2.')
def test_stability(self):
net = lorenz.copy('test')
# The sqrt(b*(r-1)), sqrt(b*(r-1)), r-1 fixed point is stable for r < rH
# Strogatz, Nonlinear Dynamics and Chaos (p. 316)
fp, stable = Dynamics.dyn_var_fixed_point(net, dv0=[1,1,1],
stability=True)
self.assertEqual(stable, -1, 'Failed to classify stable fixed point')
# (0,0,0) is a saddle here
fp, stable = Dynamics.dyn_var_fixed_point(net, dv0=[0.01,0.01,0.01],
stability=True)
self.assertEqual(stable, 0, 'Failed to classify saddle')
# (0,0,0) is a stable node here
net.set_var_ic('r', 0.5)
fp, stable = Dynamics.dyn_var_fixed_point(net, dv0=[0.1,0.1,0.1],
stability=True)
self.assertEqual(stable, -1, 'Failed to classify stable fixed point')
# Now make the far fixed point a saddle...
net.set_var_ic('sigma', 6.0)
net.set_var_ic('r', 25)
fp, stable = Dynamics.dyn_var_fixed_point(net, dv0=[10,10,10],
stability=True)
self.assertEqual(stable, 0, 'Failed to classify saddle')
suite = unittest.makeSuite(test_fixedpoints)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | -4,611,129,762,351,684,600 | 43.567901 | 80 | 0.565097 | false |
tcarmelveilleux/IcarusAltimeter | Analysis/altitude_analysis.py | 1 | 1202 | # -*- coding: utf-8 -*-
"""
Created on Tue Jul 14 19:34:31 2015
@author: Tennessee
"""
import numpy as np
import matplotlib.pyplot as plt
def altitude(atm_hpa, sea_level_hpa):
return 44330 * (1.0 - np.power(atm_hpa / sea_level_hpa, 0.1903))
def plot_alt():
default_msl = 101300.0
pressure = np.linspace(97772.58 / 100.0, 79495.0 / 100.0, 2000)
alt_nominal = altitude(pressure, default_msl) - altitude(97772.58 / 100.0, default_msl)
alt_too_high = altitude(pressure, default_msl + (1000 / 100.0)) - altitude(97772.58 / 100.0, default_msl + (1000 / 100.0))
alt_too_low = altitude(pressure, default_msl - (1000 / 100.0)) - altitude(97772.58 / 100.0, default_msl - (1000 / 100.0))
f1 = plt.figure()
ax = f1.gca()
ax.plot(pressure, alt_nominal, "b-", label="nom")
ax.plot(pressure, alt_too_high, "r-", label="high")
ax.plot(pressure, alt_too_low, "g-", label="low")
ax.legend()
f1.show()
f2 = plt.figure()
ax = f2.gca()
ax.plot(pressure, alt_too_high - alt_nominal, "r-", label="high")
ax.plot(pressure, alt_too_low - alt_nominal, "g-", label="low")
ax.legend()
f2.show()
| mit | 4,210,792,572,743,000,600 | 26.953488 | 126 | 0.58985 | false |
aESeguridad/GERE | venv/lib/python2.7/site-packages/flask_weasyprint/__init__.py | 1 | 7726 | # coding: utf8
"""
flask_weasyprint
~~~~~~~~~~~~~~~~
Flask-WeasyPrint: Make PDF in your Flask app with WeasyPrint.
:copyright: (c) 2012 by Simon Sapin.
:license: BSD, see LICENSE for more details.
"""
import weasyprint
from flask import request, current_app
from werkzeug.test import Client, ClientRedirectError
from werkzeug.wrappers import Response
try:
import urlparse
except ImportError: # Python 3
from urllib import parse as urlparse
try:
unicode
except NameError: # Python 3
unicode = str
VERSION = '0.5'
__all__ = ['VERSION', 'make_flask_url_dispatcher', 'make_url_fetcher',
'HTML', 'CSS', 'render_pdf']
DEFAULT_PORTS = frozenset([('http', 80), ('https', 443)])
def make_flask_url_dispatcher():
"""Return an URL dispatcher based on the current :ref:`request context
<flask:request-context>`.
You generally don’t need to call this directly.
The context is used when the dispatcher is first created but not
afterwards. It is not required after this function has returned.
Dispatch to the context’s app URLs below the context’s root URL.
If the app has a ``SERVER_NAME`` :ref:`config <flask:config>`, also
accept URLs that have that domain name or a subdomain thereof.
"""
def parse_netloc(netloc):
"""Return (hostname, port)."""
parsed = urlparse.urlsplit('http://' + netloc)
return parsed.hostname, parsed.port
app = current_app._get_current_object()
root_path = request.script_root
server_name = app.config.get('SERVER_NAME')
if server_name:
hostname, port = parse_netloc(server_name)
def accept(url):
"""Accept any URL scheme; also accept subdomains."""
return url.hostname is not None and (
url.hostname == hostname or
url.hostname.endswith('.' + hostname))
else:
scheme = request.scheme
hostname, port = parse_netloc(request.host)
if (scheme, port) in DEFAULT_PORTS:
port = None
def accept(url):
"""Do not accept subdomains."""
return (url.scheme, url.hostname) == (scheme, hostname)
def dispatch(url_string):
if isinstance(url_string, bytes):
url_string = url_string.decode('utf8')
url = urlparse.urlsplit(url_string)
url_port = url.port
if (url.scheme, url_port) in DEFAULT_PORTS:
url_port = None
if accept(url) and url_port == port and url.path.startswith(root_path):
netloc = url.netloc
if url.port and not url_port:
netloc = netloc.rsplit(':', 1)[0] # remove default port
base_url = '%s://%s%s' % (url.scheme, netloc, root_path)
path = url.path[len(root_path):]
if url.query:
path += '?' + url.query
# Ignore url.fragment
return app, base_url, path
return dispatch
def make_url_fetcher(dispatcher=None,
next_fetcher=weasyprint.default_url_fetcher):
"""Return an function suitable as a ``url_fetcher`` in WeasyPrint.
You generally don’t need to call this directly.
If ``dispatcher`` is not provided, :func:`make_flask_url_dispatcher`
is called to get one. This requires a request context.
Otherwise, it must be a callable that take an URL and return either
``None`` or a ``(wsgi_callable, base_url, path)`` tuple. For None
``next_fetcher`` is used. (By default, fetch normally over the network.)
For a tuple the request is made at the WSGI level.
``wsgi_callable`` must be a Flask application or another WSGI callable.
``base_url`` is the root URL for the application while ``path``
is the path within the application.
Typically ``base_url + path`` is equal or equivalent to the passed URL.
"""
if dispatcher is None:
dispatcher = make_flask_url_dispatcher()
def flask_url_fetcher(url):
redirect_chain = set()
while 1:
result = dispatcher(url)
if result is None:
return next_fetcher(url)
app, base_url, path = result
client = Client(app, response_wrapper=Response)
if isinstance(path, unicode):
# TODO: double-check this. Apparently Werzeug %-unquotes bytes
# but not Unicode URLs. (IRI vs. URI or something.)
path = path.encode('utf8')
response = client.get(path, base_url=base_url)
if response.status_code == 200:
return dict(
string=response.data,
mime_type=response.mimetype,
encoding=response.charset,
redirected_url=url)
# The test client can follow redirects, but do it ourselves
# to get access to the redirected URL.
elif response.status_code in (301, 302, 303, 305, 307):
redirect_chain.add(url)
url = response.location
if url in redirect_chain:
raise ClientRedirectError('loop detected')
else:
raise ValueError('Flask-WeasyPrint got HTTP status %s for %s%s'
% (response.status, base_url, path))
return flask_url_fetcher
def _wrapper(class_, *args, **kwargs):
if args:
guess = args[0]
args = args[1:]
else:
guess = kwargs.pop('guess', None)
if guess is not None and not hasattr(guess, 'read'):
# Assume a (possibly relative) URL
guess = urlparse.urljoin(request.url, guess)
if 'string' in kwargs and 'base_url' not in kwargs:
# Strings do not have an "intrinsic" base URL, use the request context.
kwargs['base_url'] = request.url
kwargs['url_fetcher'] = make_url_fetcher()
return class_(guess, *args, **kwargs)
def HTML(*args, **kwargs):
"""Like `weasyprint.HTML()
<http://weasyprint.org/using/#the-weasyprint-html-class>`_ but:
* :func:`make_url_fetcher` is used to create an ``url_fetcher``
* If ``guess`` is not a file object, it is an URL relative to the current
request context.
This means that you can just pass a result from :func:`flask.url_for`.
* If ``string`` is passed, ``base_url`` defaults to the current
request’s URL.
This requires a Flask request context.
"""
return _wrapper(weasyprint.HTML, *args, **kwargs)
def CSS(*args, **kwargs):
return _wrapper(weasyprint.CSS, *args, **kwargs)
CSS.__doc__ = HTML.__doc__.replace('HTML', 'CSS').replace('html', 'css')
def render_pdf(html, stylesheets=None, download_filename=None):
"""Render a PDF to a response with the correct ``Content-Type`` header.
:param html:
Either a :class:`weasyprint.HTML` object or an URL to be passed
to :func:`flask_weasyprint.HTML`. The latter case requires
a request context.
:param stylesheets:
A list of user stylesheets, passed to
:meth:`~weasyprint.HTML.write_pdf`
:param download_filename:
If provided, the ``Content-Disposition`` header is set so that most
web browser will show the "Save as…" dialog with the value as the
default filename.
:returns: a :class:`flask.Response` object.
"""
if not hasattr(html, 'write_pdf'):
html = HTML(html)
pdf = html.write_pdf(stylesheets=stylesheets)
response = current_app.response_class(pdf, mimetype='application/pdf')
if download_filename:
response.headers.add('Content-Disposition', 'attachment',
filename=download_filename)
return response
| gpl-3.0 | 6,181,784,262,939,823,000 | 35.046729 | 79 | 0.612004 | false |
ThomasHabets/python-pyhsm | examples/yhsm-monitor-exit.py | 1 | 1480 | #!/usr/bin/env python
#
# Copyright (c) 2011, Yubico AB
# All rights reserved.
#
# Utility to send a MONITOR_EXIT command to a YubiHSM.
#
# MONITOR_EXIT only works if the YubiHSM is in debug mode. It would
# be a security problem to allow remote reconfiguration of a production
# YubiHSM.
#
# If your YubiHSM is not in debug mode, enter configuration mode by
# pressing the small button while inserting the YubiHSM in the USB port.
#
import sys
sys.path.append('Lib');
import pyhsm
device = "/dev/ttyACM0"
# simplified arguments parsing
d_argv = dict.fromkeys(sys.argv)
debug = d_argv.has_key('-v')
raw = d_argv.has_key('-v')
if d_argv.has_key('-h'):
sys.stderr.write("Syntax: %s [-v] [-R]\n" % (sys.argv[0]))
sys.stderr.write("\nOptions :\n")
sys.stderr.write(" -v verbose\n")
sys.stderr.write(" -R raw MONITOR_EXIT command\n")
sys.exit(0)
res = 0
try:
s = pyhsm.base.YHSM(device=device, debug = debug)
if raw:
# No initialization
s.write('\x7f\xef\xbe\xad\xba\x10\x41\x52\x45')
else:
print "Version: %s" % s.info()
s.monitor_exit()
print "Exited monitor-mode (maybe)"
if raw:
print "s.stick == %s" % s.stick
print "s.stick.ser == %s" % s.stick.ser
for _ in xrange(3):
s.stick.ser.write("\n")
line = s.stick.ser.readline()
print "%s" % (line)
except pyhsm.exception.YHSM_Error, e:
print "ERROR: %s" % e
res = 1
sys.exit(res)
| bsd-2-clause | 5,347,888,154,988,822,000 | 24.084746 | 72 | 0.618243 | false |
darvin/qtdjango | src/qtdjango/settings.py | 1 | 5503 | # -*- coding: utf-8 -*-
from qtdjango.helpers import test_connection
__author__ = 'darvin'
from qtdjango.connection import *
__author__ = 'darvin'
from PyQt4.QtCore import *
from PyQt4.QtGui import *
class BooleanEdit(QCheckBox):
def text(self):
return QVariant(self.checkState()).toString()
def setText(self, text):
self.setChecked(QVariant(text).toBool())
class SettingsDialog(QDialog):
widgets_table = [
# (name, caption, widget object, default value),
("address", u"Адрес сервера", QLineEdit, "http://127.0.0.1:8000"),
("api_path", u"Путь к api сервера", QLineEdit, "/api/"),
("server_package", u"Название пакета сервера", QLineEdit, "none"),
("login", u"Ваш логин", QLineEdit, ""),
("password", u"Ваш пароль", QLineEdit, ""),
("open_links_in_external_browser", \
u"Открывать ссылки из окна информации во внешнем браузере", BooleanEdit, True),
]
def __init__(self, parent=None, error_message=None, models_manager=None):
super(SettingsDialog, self).__init__(parent)
self.setWindowTitle(u"Настройки")
self.setModal(True)
self.formlayout = QFormLayout()
self.models_manager = models_manager
self.settings = QSettings()
self.message_widget = QLabel()
self.__widgets = []
for name, caption, widget_class, default in self.widgets_table:
self.__widgets.append((name, caption, widget_class(), default))
for name, caption, widget, default in self.__widgets:
self.formlayout.addRow(caption, widget)
widget.setText(self.settings.value(name, default).toString())
self.formlayout.addRow(self.message_widget)
if error_message is not None:
self.message(**error_message)
buttonBox = QDialogButtonBox(QDialogButtonBox.Save\
| QDialogButtonBox.Cancel |QDialogButtonBox.RestoreDefaults)
testButton = QPushButton(u"Тестировать соединение")
buttonBox.addButton(testButton, QDialogButtonBox.ActionRole)
testButton.clicked.connect(self.test)
buttonBox.accepted.connect(self.accept)
buttonBox.rejected.connect(self.reject)
buttonBox.button(QDialogButtonBox.RestoreDefaults).clicked.connect(self.restore)
self.formlayout.addRow(buttonBox)
self.setLayout(self.formlayout)
def accept(self):
if self.test():
for name, caption, widget, default in self.__widgets:
self.settings.setValue(name, widget.text())
self.models_manager.set_connection_params(\
self.get_value("address"), \
self.get_value("api_path"), \
self.get_value("login"),\
self.get_value("password"))
QDialog.accept(self)
def restore(self):
for name, caption, widget, default in self.__widgets:
widget.setText(default)
def message(self, text, error=False, works=False, fields=[]):
self.message_widget.setText(text)
if error:
color = "red"
elif works:
color = "green"
else:
color = "black"
css = "QLabel { color : %s; }" % color
self.message_widget.setStyleSheet(css)
for name, caption, widget, default in self.__widgets:
self.formlayout.labelForField(widget).setStyleSheet("")
if name in fields:
self.formlayout.labelForField(widget).setStyleSheet(css)
def get_value(self, name):
return unicode(self.settings.value(name).toString())
def test(self):
s = {}
for name, caption, widget, default in self.__widgets:
s[name] = unicode(widget.text())
try:
remote_version = test_connection(s["address"],s["api_path"],s["login"],s["password"])
import qtdjango
if qtdjango.__version__==remote_version:
self.message(text=u"Удаленный сервер настроен правильно!", works=True)
return True
elif remote_version is not None:
self.message(u"Версия системы на удаленном сервере отличается от\
версии системы на клиенте")
return True
except SocketError:
self.message(text=u"Ошибка при подключении к удаленному серверу", error=True, fields=\
("address",))
except ServerNotFoundError:
self.message(text=u"Удаленный сервер недоступен", error=True, fields=\
("address",))
except NotQtDjangoResponceError:
self.message(text=u"Не правильно настроен путь на удаленном сервере или \
удаленный сервер не является сервером системы", error=True, fields=\
("address","api_path"))
except AuthError:
self.message(text=u"Неверное имя пользователя или пароль", error=True, fields=\
("login","password"))
return False
| gpl-2.0 | 3,427,938,179,775,995,000 | 34.326389 | 98 | 0.599568 | false |
barbarahui/nuxeo-calisphere | s3stash/nxstash_mediajson.py | 1 | 4444 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys
import os
from s3stash.nxstashref import NuxeoStashRef
from deepharvest.deepharvest_nuxeo import DeepHarvestNuxeo
from deepharvest.mediajson import MediaJson
from dplaingestion.mappers.ucldc_nuxeo_mapper import UCLDCNuxeoMapper
import json
import s3stash.s3tools
FILENAME_FORMAT = "{}-media.json"
class NuxeoStashMediaJson(NuxeoStashRef):
''' create and stash media.json file for a nuxeo object '''
def __init__(self,
path,
bucket,
region,
pynuxrc='~/.pynuxrc',
replace=True,
**kwargs):
super(NuxeoStashMediaJson, self).__init__(path, bucket, region,
pynuxrc, replace, **kwargs)
self.dh = DeepHarvestNuxeo(
self.path, self.bucket, pynuxrc=self.pynuxrc)
self.mj = MediaJson()
self.filename = FILENAME_FORMAT.format(self.uid)
self.filepath = os.path.join(self.tmp_dir, self.filename)
self._update_report('filename', self.filename)
self._update_report('filepath', self.filepath)
def nxstashref(self):
return self.nxstash_mediajson()
def nxstash_mediajson(self):
''' create media.json file for object and stash on s3 '''
self._update_report('stashed', False)
# extract and transform metadata for parent obj and any components
parent_md = self._get_parent_metadata(self.metadata)
component_md = [
self._get_component_metadata(c)
for c in self.dh.fetch_components(self.metadata)
]
# create media.json file
media_json = self.mj.create_media_json(parent_md, component_md)
self._write_file(media_json, self.filepath)
# stash media.json file on s3
stashed, s3_report = s3stash.s3tools.s3stash(
self.filepath, self.bucket, self.filename, self.region,
'application/json', self.replace)
self._update_report('s3_stash', s3_report)
self._update_report('stashed', stashed)
self._remove_tmp()
return self.report
def _get_parent_metadata(self, obj):
''' assemble top-level (parent) object metadata '''
metadata = {}
metadata['label'] = obj['title']
# only provide id, href, format if Nuxeo Document has file attached
full_metadata = self.nx.get_metadata(uid=obj['uid'])
if self.dh.has_file(full_metadata):
metadata['id'] = obj['uid']
metadata['href'] = self.dh.get_object_download_url(full_metadata)
metadata['format'] = self.dh.get_calisphere_object_type(obj[
'type'])
if metadata['format'] == 'video':
metadata['dimensions'] = self.dh.get_video_dimensions(
full_metadata)
return metadata
def _get_component_metadata(self, obj):
''' assemble component object metadata '''
metadata = {}
full_metadata = self.nx.get_metadata(uid=obj['uid'])
metadata['label'] = obj['title']
metadata['id'] = obj['uid']
metadata['href'] = self.dh.get_object_download_url(full_metadata)
# extract additional ucldc metadata from 'properties' element
ucldc_md = self._get_ucldc_schema_properties(full_metadata)
for key, value in ucldc_md.iteritems():
metadata[key] = value
# map 'type'
metadata['format'] = self.dh.get_calisphere_object_type(obj['type'])
return metadata
def _get_ucldc_schema_properties(self, metadata):
''' get additional metadata as mapped by harvester '''
properties = {}
mapper = UCLDCNuxeoMapper(metadata)
mapper.map_original_record()
mapper.map_source_resource()
properties = mapper.mapped_data['sourceResource']
properties.update(mapper.mapped_data['originalRecord'])
return properties
def _write_file(self, content_dict, filepath):
""" convert dict to json and write to file """
content_json = json.dumps(
content_dict, indent=4, separators=(',', ': '), sort_keys=False)
with open(filepath, 'wb') as f:
f.write(content_json)
f.flush()
def main(argv=None):
pass
if __name__ == "__main__":
sys.exit(main())
| bsd-3-clause | -6,770,037,951,671,929,000 | 32.413534 | 77 | 0.602835 | false |
lalitkumarj/NEXT-psych | gui/base/app_manager/PoolBasedTripletMDS/PoolBasedTripletMDS.py | 1 | 9004 | import csv, json, os, requests, sys
from wtforms import Form, FieldList, FloatField, FormField, TextField, IntegerField, SelectField, validators, RadioField
from jinja2 import Environment, FileSystemLoader
import copy
from base.models import Experiment
from flask import render_template
from base.settings import Config
from base.app_manager.app_resource_prototype import AppResourcePrototype
config = Config()
TEMPLATES_DIRECTORY = os.path.dirname(__file__)
loader = FileSystemLoader(TEMPLATES_DIRECTORY)
env = Environment(loader=loader)
class PoolBasedTripletMDS(AppResourcePrototype):
"""
PoolBased TripletMDS
Author: Lalit Jain
App resource for PoolBasedTripletMDS.
"""
def get_experiment_params(self, args=None):
"""
Return a form with specific params for the new experiment form.
"""
alg_list = ['RandomSampling', 'UncertaintySampling','CrowdKernel']
# Alg row: follow this post: http://stackoverflow.com/questions/11402627/how-to-get-a-build-a-form-with-repeated-elements-well
class AlgorithmDefinitionRowForm(Form):
alg_label = TextField('Algorithm Label')
alg_id = SelectField('Algorithm Id', choices=[(algorithm, algorithm) for algorithm in alg_list])
alg_proportion = FloatField('Algorithm Proportion')
class PoolBasedTripletMDSParamsForm(Form):
d = IntegerField('Dimension', validators=[validators.required()], default=2)
failure_probability = FloatField('Confidence Level', validators=[validators.required()], default=0.95)
algorithm_management = RadioField('Algorithm Management',
choices=[('fixed_proportions','Fixed Proportions'),
('pure_exploration','Pure Exploration'),
('explore_exploit','Explore Exploit')],
default='fixed_proportions')
participant_to_algorithm_management = RadioField('Participant to Algorithm Management',
choices=[('one_to_many','One-to-many'),
('one_to_one','One-to-one')],
default='one_to_many')
# process the experiment parameters
def process_experiment_params(self):
return True
# List field of the rows of algorithm labels and alg id's
alg_rows = FieldList(FormField(AlgorithmDefinitionRowForm))
template = env.get_template("new_experiment_params.html")
return template, PoolBasedTripletMDSParamsForm
def get_experiment_dashboard(self, args=None):
"""
Return template with embedded widgets/plots/data for the dashboard.
"""
template = env.get_template("experiment_dashboard.html")
html = render_template(template)
return html
def get_formatted_participant_data(self, current_experiment, args=None):
"""
Return formatted participant logs that are app specific.
"""
# Use frontend base local url
url = "http://"+config.NEXT_BACKEND_HOST+":"+config.NEXT_BACKEND_PORT+"/api/experiment/"+current_experiment.exp_uid+"/"+current_experiment.exp_key+"/participants"
# Make a request to next_backend for the responses
try:
response = requests.get(url)
response_dict = eval(response.text)
except (requests.HTTPError, requests.ConnectionError) as e:
print "excepted e", e
raise
print response_dict
# Parse them into a csv
# The rows are participant_id, timestamp, center, left, right, target winner, alg_label
participant_responses = []
participant_responses.append(",".join(["Participant ID", "Timestamp","Center", "Left", "Right", "Answer", "Alg Label"]))
for participant_id, response_list in response_dict['participant_responses'].iteritems():
for response in response_list:
line = [participant_id, response['timestamp_query_generated']]
targets = {}
# This index is not a backend index! It is just one of the target_indices
for index in response['target_indices']:
targets[index['label']] = index
# Check for the index winner in this response
# Shouldn't we check for target_winner?
if 'index_winner' in response.keys() and response["index_winner"] == index['index']:
target_winner = index
# Append the center, left, right targets
line.extend([targets['center']['target']['target_id'], targets['left']['target']['target_id'], targets['right']['target']['target_id']])
# Append the target winner
line.append(target_winner['target']['target_id'])
# Append the alg_label
line.append(response['alg_label'])
participant_responses.append(",".join(line))
return participant_responses
def run_experiment(self, current_experiment, args=None):
"""
Run an initExp call on the frontend base level.
"""
# Set up request dictionary for api initExp call
initExp_dict = {}
initExp_dict['app_id'] = current_experiment.app_id
initExp_dict['site_id'] = config.SITE_ID
initExp_dict['site_key'] = config.SITE_KEY
initExp_dict['args'] = {}
# Set up args for api initExp call
initExp_dict['args']['instructions'] = current_experiment.instructions
initExp_dict['args']['debrief'] = current_experiment.debrief
initExp_dict['args']['d'] = current_experiment.params['d']
initExp_dict['args']['n'] = len(current_experiment.target_set.targets)
initExp_dict['args']['failure_probability'] = current_experiment.params['failure_probability']
initExp_dict['args']['participant_to_algorithm_management'] = current_experiment.params['participant_to_algorithm_management']
initExp_dict['args']['algorithm_management_settings'] = {}
initExp_dict['args']['algorithm_management_settings']['mode'] = current_experiment.params['algorithm_management']
initExp_dict['args']['algorithm_management_settings']['params'] = {}
initExp_dict['args']['algorithm_management_settings']['params']['proportions'] = []
initExp_dict['args']['alg_list'] = []
for alg in current_experiment.params['alg_rows']:
params_dict = copy.deepcopy(alg)
params_dict['params'] = {}
params_dict['test_alg_label'] = 'Test'
del params_dict['alg_proportion']
initExp_dict['args']['alg_list'].append(params_dict)
proportions_dict = {}
proportions_dict['alg_label'] = alg['alg_label']
proportions_dict['proportion'] = alg['alg_proportion']
initExp_dict['args']['algorithm_management_settings']['params']['proportions'].append(proportions_dict)
# Make request for initExp
try:
url = "http://"+config.NEXT_BACKEND_HOST+":"+config.NEXT_BACKEND_PORT+"/api/experiment"
response = requests.post(url,
json.dumps(initExp_dict),
headers={'content-type':'application/json'})
response_dict = eval(response.text)
except:
exc_class, exc, tb = sys.exc_info()
new_exc = Exception("%s. Error connecting to backend."%(exc or exc_class))
raise new_exc.__class__,new_exc, tb
return response_dict
def get_query(self, app_id, exp_uid, widget_key, args=None):
"""
Render custom query for app type
"""
# Make this more flexible
next_backend_url = "http://"+config.NEXT_BACKEND_GLOBAL_HOST+":"+config.NEXT_BACKEND_GLOBAL_PORT
# pass in cookie dependent data
requested_experiment = Experiment.objects(exp_uid=exp_uid)[0]
query_tries = requested_experiment.query_tries
debrief = requested_experiment.debrief
instructions = requested_experiment.instructions
template = env.get_template("query.html")
return render_template(template,
app_id=app_id,
exp_uid = exp_uid,
widget_key = widget_key,
next_backend_url=next_backend_url,
query_tries = query_tries,
debrief = debrief,
instructions = instructions)
| apache-2.0 | -5,100,060,686,821,481,000 | 48.745856 | 170 | 0.584407 | false |
anjel-ershova/python_training | fixture/fixture_group.py | 1 | 4896 | from model.model_group import Group
import random
class GroupHelper:
def __init__(self, app):
self.app = app
def select_group_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_name("selected[]")[index].click()
def select_group_by_id(self, id):
wd = self.app.wd
wd.find_element_by_css_selector("input[value='%s']" % id).click()
def select_first_group(self):
wd = self.app.wd
self.select_group_by_index(0)
def edit_if_not_none(self, field, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field).click()
wd.find_element_by_name(field).clear()
wd.find_element_by_name(field).send_keys(text)
else:
pass
def fill_group_form(self, group):
wd = self.app.wd
self.edit_if_not_none("group_name", group.name)
self.edit_if_not_none("group_header", group.header)
self.edit_if_not_none("group_footer", group.footer)
def create(self, group):
wd = self.app.wd
# open_groups_page
wd.find_element_by_link_text("groups").click()
# init group creation
wd.find_element_by_name("new").click()
self.fill_group_form(group)
# submit group creation
wd.find_element_by_name("submit").click()
self.return_to_groups_page()
self.group_cache = None
def edit_first_group(self):
wd = self.app.wd
self.edit_group_by_index(0)
def edit_group_by_index(self, index, new_group_data):
wd = self.app.wd
self.app.navigation.open_groups_page()
self.select_group_by_index(index)
# click edit button
wd.find_element_by_name("edit").click()
self.fill_group_form(new_group_data)
# submit edition
wd.find_element_by_name("update").click()
self.return_to_groups_page()
self.group_cache = None
def edit_group_by_id(self, id, new_group_data):
wd = self.app.wd
self.app.navigation.open_groups_page()
self.select_group_by_id(id)
# click edit button
wd.find_element_by_name("edit").click()
self.fill_group_form(new_group_data)
# submit edition
wd.find_element_by_name("update").click()
self.return_to_groups_page()
self.group_cache = None
def add_selected_contact_to_selected_group_by_id(self, target_group):
wd = self.app.wd
# открыть выпадающий список
to_group = wd.find_element_by_name("to_group")
to_group.click()
# выбор произвольной группы по value
to_group.find_element_by_css_selector("[value='%s']" % target_group.id).click()
wd.find_element_by_name("add").click()
def select_some_group_to_view(self, target_group):
wd = self.app.wd
# открыть выпадающий список
view_group = wd.find_element_by_name("group")
view_group.click()
# выбор произвольной группы по value
view_group.find_element_by_css_selector("[value='%s']" % target_group.id).click()
# def click_add_contact_to_group_button(self):
# wd = self.app.wd
# wd.find_element_by_name("add").click()
# self.app.navigation.open_home_page()
def delete_first_group(self):
wd = self.app.wd
self.delete_group_by_index(0)
def delete_group_by_index(self, index):
wd = self.app.wd
self.app.navigation.open_groups_page()
self.select_group_by_index(index)
# submit deletion
wd.find_element_by_name("delete").click()
self.return_to_groups_page()
self.group_cache = None
def delete_group_by_id(self, id):
wd = self.app.wd
self.app.navigation.open_groups_page()
self.select_group_by_id(id)
wd.find_element_by_name("delete").click()
self.return_to_groups_page()
self.group_cache = None
def return_to_groups_page(self):
wd = self.app.wd
wd.find_element_by_link_text("group page").click()
def count(self):
wd = self.app.wd
self.app.navigation.open_groups_page()
return len(wd.find_elements_by_name("selected[]"))
group_cache = None
def get_group_list(self):
if self.group_cache is None:
wd = self.app.wd
self.app.navigation.open_groups_page()
self.group_cache = []
wd.find_elements_by_css_selector("span.group")
for element in wd.find_elements_by_css_selector("span.group"):
text = element.text
id = int(element.find_element_by_name("selected[]").get_attribute("value"))
self.group_cache.append(Group(name=text, id=id))
return list(self.group_cache)
| apache-2.0 | 4,500,193,398,506,461,700 | 33.285714 | 91 | 0.592708 | false |
bb111189/Arky2 | boilerplate/external/pycountry/__init__.py | 1 | 3459 | # vim:fileencoding=utf-8
# Copyright (c) 2008-2011 gocept gmbh & co. kg
# See also LICENSE.txt
# $Id$
"""pycountry"""
import os.path
import pycountry.db
LOCALES_DIR = os.path.join(os.path.dirname(__file__), 'locales')
DATABASE_DIR = os.path.join(os.path.dirname(__file__), 'databases')
class Countries(pycountry.db.Database):
"""Provides access to an ISO 3166 database (Countries)."""
field_map = dict(alpha_2_code='alpha2',
alpha_3_code='alpha3',
numeric_code='numeric',
name='name',
official_name='official_name',
common_name='common_name')
data_class_name = 'Country'
xml_tag = 'iso_3166_entry'
class Scripts(pycountry.db.Database):
"""Providess access to an ISO 15924 database (Scripts)."""
field_map = dict(alpha_4_code='alpha4',
numeric_code='numeric',
name='name')
data_class_name = 'Script'
xml_tag = 'iso_15924_entry'
class Currencies(pycountry.db.Database):
"""Providess access to an ISO 4217 database (Currencies)."""
field_map = dict(letter_code='letter',
numeric_code='numeric',
currency_name='name')
data_class_name = 'Currency'
xml_tag = 'iso_4217_entry'
class Languages(pycountry.db.Database):
"""Providess access to an ISO 639-1/2 database (Languages)."""
field_map = dict(iso_639_2B_code='bibliographic',
iso_639_2T_code='terminology',
iso_639_1_code='alpha2',
common_name='common_name',
name='name')
data_class_name = 'Language'
xml_tag = 'iso_639_entry'
class Subdivision(pycountry.db.Data):
parent_code = None
def __init__(self, element, **kw):
super(Subdivision, self).__init__(element, **kw)
self.type = element.parentNode.attributes.get('type').value
self.country_code = self.code.split('-')[0]
if self.parent_code is not None:
self.parent_code = '%s-%s' % (self.country_code, self.parent_code)
@property
def country(self):
return countries.get(alpha2=self.country_code)
@property
def parent(self):
return subdivisions.get(code=self.parent_code)
class Subdivisions(pycountry.db.Database):
# Note: subdivisions can be hierarchical to other subdivisions. The
# parent_code attribute is related to other subdivisons, *not*
# the country!
xml_tag = 'iso_3166_2_entry'
data_class_base = Subdivision
data_class_name = 'Subdivision'
field_map = dict(code='code',
name='name',
parent='parent_code')
no_index = ['name', 'parent_code']
def __init__(self, *args, **kw):
super(Subdivisions, self).__init__(*args, **kw)
# Add index for the country code.
self.indices['country_code'] = {}
for subdivision in self:
divs = self.indices['country_code'].setdefault(
subdivision.country_code, set())
divs.add(subdivision)
countries = Countries(os.path.join(DATABASE_DIR, 'iso3166.xml'))
scripts = Scripts(os.path.join(DATABASE_DIR, 'iso15924.xml'))
currencies = Currencies(os.path.join(DATABASE_DIR, 'iso4217.xml'))
languages = Languages(os.path.join(DATABASE_DIR, 'iso639.xml'))
subdivisions = Subdivisions(os.path.join(DATABASE_DIR, 'iso3166_2.xml'))
| lgpl-3.0 | 8,592,734,702,324,915,000 | 30.445455 | 78 | 0.601619 | false |
abilian/abilian-core | src/abilian/core/models/tests/test_blob.py | 1 | 3462 | """"""
import uuid
from io import StringIO
from flask import Flask
from abilian.core.models.blob import Blob
from abilian.core.sqlalchemy import SQLAlchemy
from abilian.services import repository_service as repository
from abilian.services import session_repository_service as session_repository
#
# Unit tests
#
def test_auto_uuid() -> None:
blob = Blob()
assert blob.uuid is not None
assert isinstance(blob.uuid, uuid.UUID)
# test provided uuid is not replaced by a new one
u = uuid.UUID("4f80f02f-52e3-4fe2-b9f2-2c3e99449ce9")
blob = Blob(uuid=u)
assert isinstance(blob.uuid, uuid.UUID)
assert blob.uuid, u
def test_meta() -> None:
blob = Blob()
assert blob.meta == {}
#
# Integration tests
#
def test_md5(app: Flask, db: SQLAlchemy) -> None:
blob = Blob("test md5")
assert "md5" in blob.meta
assert blob.meta["md5"] == "0e4e3b2681e8931c067a23c583c878d5"
def test_size(app: Flask, db: SQLAlchemy) -> None:
blob = Blob("test")
assert blob.size == 4
def test_filename(app: Flask, db: SQLAlchemy) -> None:
content = StringIO("test")
content.filename = "test.txt"
blob = Blob(content)
assert "filename" in blob.meta
assert blob.meta["filename"] == "test.txt"
def test_mimetype(app: Flask, db: SQLAlchemy) -> None:
content = StringIO("test")
content.content_type = "text/plain"
blob = Blob(content)
assert "mimetype" in blob.meta
assert blob.meta["mimetype"] == "text/plain"
def test_nonzero(app: Flask, db: SQLAlchemy) -> None:
blob = Blob("test md5")
assert bool(blob)
# change uuid: repository will return None for blob.file
blob.uuid = uuid.uuid4()
assert not bool(blob)
# def test_query(app, db):
# session = db.session
# content = b"content"
# b = Blob(content)
# session.add(b)
# session.flush()
#
# assert Blob.query.by_uuid(b.uuid) is b
# assert Blob.query.by_uuid(str(b.uuid)) is b
#
# u = uuid.uuid4()
# assert Blob.query.by_uuid(u) is None
def test_value(app: Flask, db: SQLAlchemy) -> None:
session = db.session
content = b"content"
blob = Blob(content)
tr = session.begin(nested=True)
session.add(blob)
tr.commit()
assert repository.get(blob.uuid) is None
assert session_repository.get(blob, blob.uuid).open("rb").read() == content
assert blob.value == content
session.commit()
assert repository.get(blob.uuid).open("rb").read() == content
assert blob.value == content
session.begin(nested=True) # match session.rollback
with session.begin(nested=True):
session.delete(blob)
# object marked for deletion, but instance attribute should still be
# readable
fd = session_repository.get(blob, blob.uuid).open("rb")
assert fd.read() == content
# commit in transaction: session_repository has no content, 'physical'
# repository still has content
assert session_repository.get(blob, blob.uuid) is None
assert repository.get(blob.uuid).open("rb").read() == content
# rollback: session_repository has content again
session.rollback()
assert session_repository.get(blob, blob.uuid).open("rb").read() == content
session.delete(blob)
session.flush()
assert session_repository.get(blob, blob.uuid) is None
assert repository.get(blob.uuid).open("rb").read() == content
session.commit()
assert repository.get(blob.uuid) is None
| lgpl-2.1 | -4,471,897,591,690,982,400 | 26.259843 | 79 | 0.666667 | false |
Phonemetra/TurboCoin | test/functional/rpc_getblockstats.py | 1 | 6826 | #!/usr/bin/env python3
# Copyright (c) 2017-2019 TurboCoin
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test getblockstats rpc call
#
from test_framework.test_framework import TurbocoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
import json
import os
TESTSDIR = os.path.dirname(os.path.realpath(__file__))
class GetblockstatsTest(TurbocoinTestFramework):
start_height = 101
max_stat_pos = 2
def add_options(self, parser):
parser.add_argument('--gen-test-data', dest='gen_test_data',
default=False, action='store_true',
help='Generate test data')
parser.add_argument('--test-data', dest='test_data',
default='data/rpc_getblockstats.json',
action='store', metavar='FILE',
help='Test data file')
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def get_stats(self):
return [self.nodes[0].getblockstats(hash_or_height=self.start_height + i) for i in range(self.max_stat_pos+1)]
def generate_test_data(self, filename):
mocktime = 1525107225
self.nodes[0].setmocktime(mocktime)
self.nodes[0].generate(101)
address = self.nodes[0].get_deterministic_priv_key().address
self.nodes[0].sendtoaddress(address=address, amount=10, subtractfeefromamount=True)
self.nodes[0].generate(1)
self.sync_all()
self.nodes[0].sendtoaddress(address=address, amount=10, subtractfeefromamount=True)
self.nodes[0].sendtoaddress(address=address, amount=10, subtractfeefromamount=False)
self.nodes[0].settxfee(amount=0.003)
self.nodes[0].sendtoaddress(address=address, amount=1, subtractfeefromamount=True)
self.sync_all()
self.nodes[0].generate(1)
self.expected_stats = self.get_stats()
blocks = []
tip = self.nodes[0].getbestblockhash()
blockhash = None
height = 0
while tip != blockhash:
blockhash = self.nodes[0].getblockhash(height)
blocks.append(self.nodes[0].getblock(blockhash, 0))
height += 1
to_dump = {
'blocks': blocks,
'mocktime': int(mocktime),
'stats': self.expected_stats,
}
with open(filename, 'w', encoding="utf8") as f:
json.dump(to_dump, f, sort_keys=True, indent=2)
def load_test_data(self, filename):
with open(filename, 'r', encoding="utf8") as f:
d = json.load(f)
blocks = d['blocks']
mocktime = d['mocktime']
self.expected_stats = d['stats']
# Set the timestamps from the file so that the nodes can get out of Initial Block Download
self.nodes[0].setmocktime(mocktime)
self.sync_all()
for b in blocks:
self.nodes[0].submitblock(b)
def run_test(self):
test_data = os.path.join(TESTSDIR, self.options.test_data)
if self.options.gen_test_data:
self.generate_test_data(test_data)
else:
self.load_test_data(test_data)
self.sync_all()
stats = self.get_stats()
# Make sure all valid statistics are included but nothing else is
expected_keys = self.expected_stats[0].keys()
assert_equal(set(stats[0].keys()), set(expected_keys))
assert_equal(stats[0]['height'], self.start_height)
assert_equal(stats[self.max_stat_pos]['height'], self.start_height + self.max_stat_pos)
for i in range(self.max_stat_pos+1):
self.log.info('Checking block %d\n' % (i))
assert_equal(stats[i], self.expected_stats[i])
# Check selecting block by hash too
blockhash = self.expected_stats[i]['blockhash']
stats_by_hash = self.nodes[0].getblockstats(hash_or_height=blockhash)
assert_equal(stats_by_hash, self.expected_stats[i])
# Make sure each stat can be queried on its own
for stat in expected_keys:
for i in range(self.max_stat_pos+1):
result = self.nodes[0].getblockstats(hash_or_height=self.start_height + i, stats=[stat])
assert_equal(list(result.keys()), [stat])
if result[stat] != self.expected_stats[i][stat]:
self.log.info('result[%s] (%d) failed, %r != %r' % (
stat, i, result[stat], self.expected_stats[i][stat]))
assert_equal(result[stat], self.expected_stats[i][stat])
# Make sure only the selected statistics are included (more than one)
some_stats = {'minfee', 'maxfee'}
stats = self.nodes[0].getblockstats(hash_or_height=1, stats=list(some_stats))
assert_equal(set(stats.keys()), some_stats)
# Test invalid parameters raise the proper json exceptions
tip = self.start_height + self.max_stat_pos
assert_raises_rpc_error(-8, 'Target block height %d after current tip %d' % (tip+1, tip),
self.nodes[0].getblockstats, hash_or_height=tip+1)
assert_raises_rpc_error(-8, 'Target block height %d is negative' % (-1),
self.nodes[0].getblockstats, hash_or_height=-1)
# Make sure not valid stats aren't allowed
inv_sel_stat = 'asdfghjkl'
inv_stats = [
[inv_sel_stat],
['minfee' , inv_sel_stat],
[inv_sel_stat, 'minfee'],
['minfee', inv_sel_stat, 'maxfee'],
]
for inv_stat in inv_stats:
assert_raises_rpc_error(-8, 'Invalid selected statistic %s' % inv_sel_stat,
self.nodes[0].getblockstats, hash_or_height=1, stats=inv_stat)
# Make sure we aren't always returning inv_sel_stat as the culprit stat
assert_raises_rpc_error(-8, 'Invalid selected statistic aaa%s' % inv_sel_stat,
self.nodes[0].getblockstats, hash_or_height=1, stats=['minfee' , 'aaa%s' % inv_sel_stat])
# Mainchain's genesis block shouldn't be found on regtest
assert_raises_rpc_error(-5, 'Block not found', self.nodes[0].getblockstats,
hash_or_height='000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f')
# Invalid number of args
assert_raises_rpc_error(-1, 'getblockstats hash_or_height ( stats )', self.nodes[0].getblockstats, '00', 1, 2)
assert_raises_rpc_error(-1, 'getblockstats hash_or_height ( stats )', self.nodes[0].getblockstats)
if __name__ == '__main__':
GetblockstatsTest().main()
| mit | 1,836,108,626,867,167,700 | 40.621951 | 121 | 0.598447 | false |
savioabuga/django-geonames-field | docs/conf.py | 1 | 8231 | # -*- coding: utf-8 -*-
#
# complexity documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
cwd = os.getcwd()
parent = os.path.dirname(cwd)
sys.path.append(parent)
import geonames_field
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-geonames-field'
copyright = u'2015, Savio Abuga'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = geonames_field.__version__
# The full version, including alpha/beta/rc tags.
release = geonames_field.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-geonames-fielddoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-geonames-field.tex', u'django-geonames-field Documentation',
u'Savio Abuga', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-geonames-field', u'django-geonames-field Documentation',
[u'Savio Abuga'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django-geonames-field', u'django-geonames-field Documentation',
u'Savio Abuga', 'django-geonames-field', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| bsd-3-clause | -5,976,811,117,962,675,000 | 31.405512 | 80 | 0.708176 | false |
ecell/libmoleculizer | python-src/bngparser/src/moleculizer/moleculeinterpreter.py | 1 | 6341 | ###############################################################################
# BNGMZRConverter - A utility program for converting bngl input files to mzr
# input files.
# Copyright (C) 2007, 2008, 2009 The Molecular Sciences Institute
#
# Moleculizer is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Moleculizer is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Moleculizer; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Original Author:
# Nathan Addy, Scientific Programmer Email: [email protected]
# The Molecular Sciences Institute Email: [email protected]
#
#
###############################################################################
from moleculizermol import MoleculizerMol, MoleculizerSmallMol, MoleculizerModMol, isSmallMol, isModMol
from util import DataUnifier
from xmlobject import XmlObject
import pdb
# This class parses a Mols Block into a list of small-mols and big mols.
# It also manages the list of modifications.
class MoleculeDictionary:
class DuplicateMolDefinitionException(Exception): pass
class BadMolDefinitionException(Exception): pass
listOfNullModifications = ["none"]
def __init__(self, moleculeBlock, paramDict):
self.rawMoleculeDefinitions = moleculeBlock[:]
self.paramDict = paramDict
self.registeredMoleculesDictionary = {}
self.smallMolsDictionary = {}
self.modMolsDictionary = {}
self.initialize()
def initialize(self):
self.rawMoleculeDefinitions = DataUnifier( self.rawMoleculeDefinitions )
for line in self.rawMoleculeDefinitions:
if isSmallMol(line):
print "SM: %s" % line
MoleculizerSmallMol(line)
elif isModMol(line):
print "MM: %s" % line
MoleculizerModMol(line)
else:
print "'%s' is neither a ModMol nor a SmallMol, according to the isSmallMol and isModMol functions." % line
raise "Hello"
def parseMoleculeTypesLine(self, moleculeTypesLine):
parsedMol = MoleculizerMol(moleculeTypesLine)
parsedMolName = parsedMol.getName()
if parsedMolName in self.registeredMoleculesDictionary.keys():
raise DuplicateMolDefinitionException("Error, molecule %s already defined in the MoleculeInterpreter" % parsedMolName)
self.registeredMoleculesDictionary[parsedMolName] = parsedMol
def getUniversalModificationList(self):
return MoleculizerMol.modificationStates[:]
def addModifications(self, parentElmt):
for modification in self.getUniversalModificationList():
modificationTypeElmt = XmlObject("modification")
modificationTypeElmt.addAttribute("name", modification)
modificationTypeElmt.attachToParent(parentElmt)
weightElmt = XmlObject("weight-delta")
weightElmt.attachToParent(modificationTypeElmt)
if self.representsNullModification(modification):
weightDelta = 0.0
else:
weightDelta = 1.0
weightElmt.addAttribute("daltons", weightDelta)
def addMols(self, parentElmt):
for molName in self.registeredMoleculesDictionary.keys():
self.addModMolElmtToMolsElmt(parentElmt, self.registeredMoleculesDictionary[molName])
def addModMolElmt(self, parentElmt):
pass
def addModMolElmtToMolsElmt(self, xmlObject, moleculizerMolObject):
assert(isinstance(xmlObject, XmlObject))
assert(isinstance(moleculizerMolObject, MoleculizerMol))
newModMol = XmlObject("mod-mol")
newModMol.addAttribute("name", moleculizerMolObject.getName())
weightElement = XmlObject("weight")
# Obviously this is one of the big deficiencies of this thing. What shall
# we set the (mandatory) weights to? For now, let's just put in something
# arbitratry. But this is a big issue that ought to be fixed as soon as all
# the basic facilities of the code have been built in.
if moleculizerMolObject.getName() == "Pheromone":
weightElement.addAttribute("daltons", 10.0)
else:
weightElement.addAttribute("daltons", 100.0)
newModMol.addSubElement(weightElement)
for binding in moleculizerMolObject.bindingSites:
self.addBindingSiteElmtToModMolElmt(binding, moleculizerMolObject, newModMol)
for modification in moleculizerMolObject.modificationSites:
modSite, defaultModState = modification
modSiteElmt = XmlObject("mod-site")
modSiteElmt.addAttribute("name", modSite)
defModRefElmt = XmlObject("default-mod-ref")
defModRefElmt.addAttribute("name", defaultModState)
defModRefElmt.attachToParent(modSiteElmt).attachToParent(newModMol)
xmlObject.addSubElement(newModMol)
return
def addBindingSiteElmtToModMolElmt(self, bindingName, moleculizerMol, xmlObject):
newBindingElmt = XmlObject("binding-site")
newBindingElmt.addAttribute("name", bindingName)
defaultShape = XmlObject("default-shape-ref")
defaultShape.addAttribute("name", "default")
defaultShape.attachToParent(newBindingElmt)
for shapeName in moleculizerMol.bindingSites[bindingName]:
siteShapeElmt = XmlObject("site-shape")
siteShapeElmt.addAttribute("name", shapeName)
siteShapeElmt.attachToParent(newBindingElmt)
xmlObject.addSubElement(newBindingElmt)
return
def representsNullModification(self, modificationType):
return modificationType.lower() in MoleculeDictionary.listOfNullModifications
| gpl-2.0 | 4,373,819,553,800,396,000 | 37.664634 | 130 | 0.674972 | false |
GunnerJnr/_CodeInstitute | Stream-2/Back-End-Development/3.Testing/3.Safe-Refactoring/challenge-solution/test_vending_machine.py | 1 | 1027 | # import the unit test module
import unittest
from vending_machine import give_change
from vending_machine import give_change_decimal
# define a class (inherits from unittest)
class TestVendingMachine(unittest.TestCase):
# define our method (must because with test_ otherwise the test will not run)
def test_return_change(self):
# we're imaging there is a method called give_change which returns a list of coins
# given in an amount in pence
self.assertEqual(give_change(.17), [.10, .05, .02], 'wrong change given')
self.assertEqual(give_change(18), [.10, .05, .02, .01], 'wrong change given')
# decimal test
self.assertEqual(give_change_decimal(.17), [.10, .05, .02], 'wrong change given')
self.assertEqual(give_change_decimal(18), [.10, .05, .02, .01], 'wrong change given')
def test_multiple_same_coins(self):
self.assertEqual(give_change(.04), [.02, .02])
# decimal test
self.assertEqual(give_change_decimal(.04), [.02, .02])
| mit | -7,425,424,723,672,473,000 | 41.791667 | 93 | 0.667965 | false |
stackimpact/stackimpact-python | tests/message_queue_test.py | 1 | 1716 | import unittest
import sys
import json
import stackimpact
from stackimpact.utils import timestamp
from test_server import TestServer
class MessageQueueTest(unittest.TestCase):
def test_flush(self):
server = TestServer(5005)
server.start()
stackimpact._agent = None
agent = stackimpact.start(
dashboard_address = 'http://localhost:5005',
agent_key = 'key1',
app_name = 'TestPythonApp',
debug = True
)
m = {
'm1': 1
}
agent.message_queue.add('t1', m)
m = {
'm2': 2
}
agent.message_queue.add('t1', m)
agent.message_queue.queue[0]['added_at'] = timestamp() - 20 * 60
agent.message_queue.flush()
data = json.loads(server.get_request_data())
self.assertEqual(data['payload']['messages'][0]['content']['m2'], 2)
agent.destroy()
server.join()
def test_flush_fail(self):
server = TestServer(5006)
server.set_response_data("unparsablejson")
server.start()
stackimpact._agent = None
agent = stackimpact.start(
dashboard_address = 'http://localhost:5006',
agent_key = 'key1',
app_name = 'TestPythonApp',
debug = True
)
m = {
'm1': 1
}
agent.message_queue.add('t1', m)
m = {
'm2': 2
}
agent.message_queue.add('t1', m)
agent.message_queue.flush()
self.assertEqual(len(agent.message_queue.queue), 2)
agent.destroy()
server.join()
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 476,884,992,317,026,370 | 21 | 76 | 0.521562 | false |
mpi-sws-rse/antevents-python | examples/event_library_comparison/event.py | 1 | 8071 | """This version uses a traditional event-driven version,
using continuation passing style. Each method call is passed
a completion callback and an error callback
"""
from statistics import median
import json
import asyncio
import random
import time
import hbmqtt.client
from collections import deque
from antevents.base import SensorEvent
URL = "mqtt://localhost:1883"
class RandomSensor:
def __init__(self, sensor_id, mean=100.0, stddev=20.0, stop_after_events=None):
self.sensor_id = sensor_id
self.mean = mean
self.stddev = stddev
self.stop_after_events = stop_after_events
if stop_after_events is not None:
def generator():
for i in range(stop_after_events):
yield round(random.gauss(mean, stddev), 1)
else: # go on forever
def generator():
while True:
yield round(random.gauss(mean, stddev), 1)
self.generator = generator()
def sample(self):
return self.generator.__next__()
def __repr__(self):
if self.stop_after_events is None:
return 'RandomSensor(%s, mean=%s, stddev=%s)' % \
(self.sensor_id, self.mean, self.stddev)
else:
return 'RandomSensor(%s, mean=%s, stddev=%s, stop_after_events=%s)' % \
(self.sensor_id, self.mean, self.stddev, self.stop_after_events)
class PeriodicMedianTransducer:
"""Emit an event once every ``period`` input events.
The value is the median of the inputs received since the last
emission.
"""
def __init__(self, period=5):
self.period = period
self.samples = [None for i in range(period)]
self.events_since_last = 0
self.last_event = None # this is used in emitting the last event
def step(self, v):
self.samples[self.events_since_last] = v.val
self.events_since_last += 1
if self.events_since_last==self.period:
val = median(self.samples)
event = SensorEvent(sensor_id=v.sensor_id, ts=v.ts, val=val)
self.events_since_last = 0
return event
else:
self.last_event = v # save in case we complete before completing a period
return None
def complete(self):
if self.events_since_last>0:
# if we have some partial state, we emit one final event that
# averages whatever we saw since the last emission.
return SensorEvent(sensor_id=self.last_event.sensor_id,
ts=self.last_event.ts,
val=median(self.samples[0:self.events_since_last]))
def csv_writer(evt):
print("csv_writer(%s)" % repr(evt))
class MqttWriter:
"""All the processing is asynchronous. We ensure that a given send has
completed and the callbacks called before we process the next one.
"""
def __init__(self, url, topic, event_loop):
self.url = url
self.topic = topic
self.client = hbmqtt.client.MQTTClient(loop=event_loop)
self.event_loop = event_loop
self.connected = False
self.pending_task = None
self.request_queue = deque()
def _to_message(self, msg):
return bytes(json.dumps((msg.sensor_id, msg.ts, msg.val),), encoding='utf-8')
def _request_done(self, f, completion_cb, error_cb):
assert f==self.pending_task
self.pending_task = None
exc = f.exception()
if exc:
self.event_loop.call_soon(error_cb, exc)
else:
self.event_loop.call_soon(completion_cb)
if len(self.request_queue)>0:
self.event_loop.call_soon(self._process_queue)
def _process_queue(self):
assert self.pending_task == None
assert len(self.request_queue)>0
(msg, completion_cb, error_cb) = self.request_queue.popleft()
if msg is not None:
print("send from queue: %s" % msg)
self.pending_task = self.event_loop.create_task(
self.client.publish(self.topic, msg)
)
else: # None means that we wanted a disconnect
print("disconnect")
self.pending_task = self.event_loop.create_task(
self.client.disconnect()
)
self.pending_task.add_done_callback(lambda f:
self._request_done(f, completion_cb,
error_cb))
def send(self, msg, completion_cb, error_cb):
if not self.connected:
print("attempting connection")
self.request_queue.append((self._to_message(msg),
completion_cb, error_cb),)
self.connected = True
self.pending_task = self.event_loop.create_task(self.client.connect(self.url))
def connect_done(f):
assert f==self.pending_task
print("connected")
self.pending_task = None
self.event_loop.call_soon(self._process_queue)
self.pending_task.add_done_callback(connect_done)
elif self.pending_task:
self.request_queue.append((self._to_message(msg), completion_cb,
error_cb),)
else:
print("sending %s" % self._to_message(msg))
self.pending_task = self.event_loop.create_task(
self.client.publish(self.topic, self._to_message(msg))
)
self.pending_task.add_done_callback(lambda f:
self._request_done(f, completion_cb,
error_cb))
def disconnect(self, completion_cb, error_cb, drop_queue=False):
if not self.connected:
return
if len(self.request_queue)>0 and drop_queue: # for error situations
self.request_queue = deque()
if self.pending_task:
self.request_queue.append((None, completion_cb, error_cb),)
else:
print("disconnecting")
self.pending_task = self.event_loop.create_task(
self.client.disconnect()
)
self.pending_task.add_done_callback(lambda f:
self._request_done(f, completion_cb,
error_cb))
def sample_and_process(sensor, mqtt_writer, xducer, completion_cb, error_cb):
try:
sample = sensor.sample()
except StopIteration:
final_event = xducer.complete()
if final_event:
mqtt_writer.send(final_event,
lambda: mqtt_writer.disconnect(lambda: completion_cb(False), error_cb),
error_cb)
else:
mqtt_writer.disconnect(lambda: completion_cb(False), error_cb)
return
event = SensorEvent(sensor_id=sensor.sensor_id, ts=time.time(), val=sample)
csv_writer(event)
median_event = xducer.step(event)
if median_event:
mqtt_writer.send(median_event,
lambda: completion_cb(True), error_cb)
else:
completion_cb(True)
sensor = RandomSensor('sensor-2', stop_after_events=12)
transducer = PeriodicMedianTransducer(5)
event_loop = asyncio.get_event_loop()
writer = MqttWriter(URL, sensor.sensor_id, event_loop)
def loop():
def completion_cb(more):
if more:
event_loop.call_later(0.5, loop)
else:
print("all done, no more callbacks to schedule")
event_loop.stop()
def error_cb(e):
print("Got error: %s" % e)
event_loop.stop()
event_loop.call_soon(
lambda: sample_and_process(sensor, writer, transducer,
completion_cb, error_cb)
)
event_loop.call_soon(loop)
event_loop.run_forever()
print("that's all folks")
| apache-2.0 | 4,810,702,886,399,499,000 | 37.251185 | 100 | 0.563127 | false |
saikrishnar/vsm_preparer | scripts/quincontext.py | 1 | 1778 | #! /usr/bin/python
import os
def main(folder):
#vectors = []
#f = open('../dictionary/vectors.txt')
#for line in f:
# representation = line.strip('\n')
# vectors.append(representation)
#f.close()
for d, ds, fs in os.walk(folder):
for fname in fs:
if fname[-4:] != '.dur':
continue
fullfname = d + '/' + fname
phone_array = []
dur_array = []
fr = open(fullfname)
for line in fr:
if line.split('\n')[0] == '#':
continue
[phone, dur] = line.rstrip('\n').split()
phone_array.append(phone)
dur_array.append(dur)
fw = open(fullfname[:-4] + '.quindur', 'w')
for i in range(2, len(dur_array) - 2 ):
phoneme_2p = phone_array[i-2]
#phoneme_2p_index = uniquephone_list.index(phoneme_2p)
phoneme_1p = phone_array[i-1]
#phoneme_1p_index = uniquephone_list.index(phoneme_1p)
phoneme = phone_array[i]
#phoneme_index = uniquephone_list.index(phoneme)
phoneme_1n = phone_array[i+1]
#phoneme_1n_index = uniquephone_list.index(phoneme_1n)
phoneme_2n = phone_array[i+2]
#phoneme_2n_index = uniquephone_list.index(phoneme_2n)
duration = dur_array[i]
fw.write( str(float(duration)) + ' ' + phoneme_2p + ' ' + phoneme_1p + ' ' + phoneme + ' ' + phoneme_1n + ' ' + phoneme_2n + '\n')
fw.close()
fr.close()
if __name__ == '__main__':
folder = '../lab'
main(folder)
| gpl-2.0 | 4,492,283,820,192,578,000 | 34.56 | 151 | 0.45838 | false |
tomato42/tlsfuzzer | scripts/test-openssl-3712.py | 1 | 9502 | # Author: Hubert Kario, (c) 2015
# Released under Gnu GPL v2.0, see LICENSE file for details
from __future__ import print_function
import traceback
import sys
import getopt
from itertools import chain
from random import sample
from tlsfuzzer.runner import Runner
from tlsfuzzer.messages import Connect, ClientHelloGenerator, \
ClientKeyExchangeGenerator, ChangeCipherSpecGenerator, \
FinishedGenerator, ApplicationDataGenerator, AlertGenerator, \
ResetHandshakeHashes
from tlsfuzzer.expect import ExpectServerHello, ExpectCertificate, \
ExpectServerHelloDone, ExpectChangeCipherSpec, ExpectFinished, \
ExpectAlert, ExpectApplicationData, ExpectClose
from tlslite.constants import CipherSuite, AlertLevel, AlertDescription, \
ExtensionType
from tlsfuzzer.utils.lists import natural_sort_keys
version = 2
def help_msg():
print("Usage: <script-name> [-h hostname] [-p port] [[probe-name] ...]")
print(" -h hostname name of the host to run the test against")
print(" localhost by default")
print(" -p port port number to use for connection, 4433 by default")
print(" probe-name if present, will run only the probes with given")
print(" names and not all of them, e.g \"sanity\"")
print(" -e probe-name exclude the probe from the list of the ones run")
print(" may be specified multiple times")
print(" -x probe-name expect the probe to fail. When such probe passes despite being marked like this")
print(" it will be reported in the test summary and the whole script will fail.")
print(" May be specified multiple times.")
print(" -X message expect the `message` substring in exception raised during")
print(" execution of preceding expected failure probe")
print(" usage: [-x probe-name] [-X exception], order is compulsory!")
print(" -n num run 'num' or all(if 0) tests instead of default(all)")
print(" (excluding \"sanity\" tests)")
print(" --help this message")
def main():
#
# Test interleaving of Application Data with handshake messages,
# requires server to support client initiated renegotiation
#
host = "localhost"
port = 4433
num_limit = None
run_exclude = set()
expected_failures = {}
last_exp_tmp = None
argv = sys.argv[1:]
opts, args = getopt.getopt(argv, "h:p:e:x:X:n:", ["help"])
for opt, arg in opts:
if opt == '-h':
host = arg
elif opt == '-p':
port = int(arg)
elif opt == '-e':
run_exclude.add(arg)
elif opt == '-x':
expected_failures[arg] = None
last_exp_tmp = str(arg)
elif opt == '-X':
if not last_exp_tmp:
raise ValueError("-x has to be specified before -X")
expected_failures[last_exp_tmp] = str(arg)
elif opt == '-n':
num_limit = int(arg)
elif opt == '--help':
help_msg()
sys.exit(0)
else:
raise ValueError("Unknown option: {0}".format(opt))
if args:
run_only = set(args)
else:
run_only = None
conversations = {}
conversation = Connect(host, port)
node = conversation
ciphers = [CipherSuite.TLS_RSA_WITH_AES_128_CBC_SHA,
CipherSuite.TLS_EMPTY_RENEGOTIATION_INFO_SCSV]
node = node.add_child(ClientHelloGenerator(ciphers))
node = node.add_child(ExpectServerHello())
node = node.add_child(ExpectCertificate())
node = node.add_child(ExpectServerHelloDone())
node = node.add_child(ClientKeyExchangeGenerator())
node = node.add_child(ChangeCipherSpecGenerator())
node = node.add_child(FinishedGenerator())
node = node.add_child(ExpectChangeCipherSpec())
node = node.add_child(ExpectFinished())
node = node.add_child(ApplicationDataGenerator(
bytearray(b"GET / HTTP/1.0\n\n")))
node = node.add_child(ExpectApplicationData())
node = node.add_child(AlertGenerator(AlertLevel.warning,
AlertDescription.close_notify))
node = node.add_child(ExpectAlert())
node.next_sibling = ExpectClose()
conversations["sanity"] = conversation
conversation = Connect(host, port)
node = conversation
#ciphers = [CipherSuite.TLS_RSA_WITH_AES_128_CBC_SHA,
# CipherSuite.TLS_EMPTY_RENEGOTIATION_INFO_SCSV]
ciphers = [CipherSuite.TLS_RSA_WITH_AES_128_CBC_SHA]
node = node.add_child(ClientHelloGenerator(ciphers,
extensions={ExtensionType.renegotiation_info:None}))
node = node.add_child(ExpectServerHello(extensions={ExtensionType.renegotiation_info:None}))
node = node.add_child(ExpectCertificate())
node = node.add_child(ExpectServerHelloDone())
node = node.add_child(ClientKeyExchangeGenerator())
node = node.add_child(ChangeCipherSpecGenerator())
node = node.add_child(FinishedGenerator())
node = node.add_child(ExpectChangeCipherSpec())
node = node.add_child(ExpectFinished())
# 2nd handshake
node = node.add_child(ResetHandshakeHashes())
node = node.add_child(ClientHelloGenerator([CipherSuite.TLS_RSA_WITH_AES_128_CBC_SHA],
session_id=bytearray(0),
extensions={ExtensionType.renegotiation_info:None}))
node = node.add_child(ExpectServerHello(extensions={ExtensionType.renegotiation_info:None}))
node = node.add_child(ExpectCertificate())
node = node.add_child(ExpectServerHelloDone())
node = node.add_child(ApplicationDataGenerator(bytearray(b"hello server!\n")))
node = node.add_child(ClientKeyExchangeGenerator())
node = node.add_child(ChangeCipherSpecGenerator())
node = node.add_child(FinishedGenerator())
node = node.add_child(ExpectChangeCipherSpec())
node = node.add_child(ExpectFinished())
node = node.add_child(ApplicationDataGenerator(bytearray(b"hello server!\n")))
#node = node.add_child(ExpectApplicationData(bytearray(b"hello client!\n")))
node = node.add_child(AlertGenerator(AlertLevel.warning,
AlertDescription.close_notify))
node = node.add_child(ExpectAlert())
node.next_sibling = ExpectClose()
node.next_sibling.next_sibling = ExpectApplicationData()
conversations["weaved app data and handshake proto"] = conversation
# run the conversation
good = 0
bad = 0
xfail = 0
xpass = 0
failed = []
xpassed = []
if not num_limit:
num_limit = len(conversations)
# make sure that sanity test is run first and last
# to verify that server was running and kept running throughout
sanity_tests = [('sanity', conversations['sanity'])]
if run_only:
if num_limit > len(run_only):
num_limit = len(run_only)
regular_tests = [(k, v) for k, v in conversations.items() if
k in run_only]
else:
regular_tests = [(k, v) for k, v in conversations.items() if
(k != 'sanity') and k not in run_exclude]
sampled_tests = sample(regular_tests, min(num_limit, len(regular_tests)))
ordered_tests = chain(sanity_tests, sampled_tests, sanity_tests)
for c_name, c_test in ordered_tests:
if run_only and c_name not in run_only or c_name in run_exclude:
continue
print("{0} ...".format(c_name))
runner = Runner(c_test)
res = True
exception = None
try:
runner.run()
except Exception as exp:
exception = exp
print("Error while processing")
print(traceback.format_exc())
res = False
if c_name in expected_failures:
if res:
xpass += 1
xpassed.append(c_name)
print("XPASS-expected failure but test passed\n")
else:
if expected_failures[c_name] is not None and \
expected_failures[c_name] not in str(exception):
bad += 1
failed.append(c_name)
print("Expected error message: {0}\n"
.format(expected_failures[c_name]))
else:
xfail += 1
print("OK-expected failure\n")
else:
if res:
good += 1
print("OK\n")
else:
bad += 1
failed.append(c_name)
print("Test end")
print(20 * '=')
print("version: {0}".format(version))
print(20 * '=')
print("TOTAL: {0}".format(len(sampled_tests) + 2*len(sanity_tests)))
print("SKIP: {0}".format(len(run_exclude.intersection(conversations.keys()))))
print("PASS: {0}".format(good))
print("XFAIL: {0}".format(xfail))
print("FAIL: {0}".format(bad))
print("XPASS: {0}".format(xpass))
print(20 * '=')
sort = sorted(xpassed ,key=natural_sort_keys)
if len(sort):
print("XPASSED:\n\t{0}".format('\n\t'.join(repr(i) for i in sort)))
sort = sorted(failed, key=natural_sort_keys)
if len(sort):
print("FAILED:\n\t{0}".format('\n\t'.join(repr(i) for i in sort)))
if bad > 0:
sys.exit(1)
if __name__ == "__main__":
main()
| gpl-2.0 | -734,074,567,831,562,600 | 39.434043 | 108 | 0.603452 | false |
sumeetsk/NEXT-1 | apps/DuelingBanditsPureExploration/tests/test_api.py | 1 | 5422 | import numpy
import numpy as np
import numpy.random
import random
import json
import time
from datetime import datetime
import requests
from scipy.linalg import norm
import time
from multiprocessing import Pool
import os
import sys
try:
import next.apps.test_utils as test_utils
except:
sys.path.append('../../../next/apps')
import test_utils
def test_validation_params():
params = [{'num_tries': 5},
{'query_list': [[0, 1], [1, 2], [3, 4]]}]
for param in params:
print(param)
test_api(params=param)
def test_api(assert_200=True, num_arms=5, num_clients=8, delta=0.05,
total_pulls_per_client=5, num_experiments=1,
params={'num_tries': 5}):
app_id = 'DuelingBanditsPureExploration'
true_means = numpy.array(range(num_arms)[::-1])/float(num_arms)
pool = Pool(processes=num_clients)
supported_alg_ids = ['BR_LilUCB', 'BR_Random', 'ValidationSampling']
alg_list = []
for i, alg_id in enumerate(supported_alg_ids):
alg_item = {}
if alg_id == 'ValidationSampling':
alg_item['params'] = params
alg_item['alg_id'] = alg_id
alg_item['alg_label'] = alg_id+'_'+str(i)
alg_list.append(alg_item)
params = []
for algorithm in alg_list:
params.append({'alg_label': algorithm['alg_label'], 'proportion':1./len(alg_list)})
algorithm_management_settings = {}
algorithm_management_settings['mode'] = 'fixed_proportions'
algorithm_management_settings['params'] = params
print algorithm_management_settings
#################################################
# Test POST Experiment
#################################################
initExp_args_dict = {}
initExp_args_dict['args'] = {'alg_list': alg_list,
'algorithm_management_settings': algorithm_management_settings,
'context': 'Context for Dueling Bandits',
'context_type': 'text',
'debrief': 'Test debried.',
'failure_probability': 0.05,
'instructions': 'Test instructions.',
'participant_to_algorithm_management': 'one_to_many',
'targets': {'n': num_arms}}
initExp_args_dict['app_id'] = app_id
initExp_args_dict['site_id'] = 'replace this with working site id'
initExp_args_dict['site_key'] = 'replace this with working site key'
exp_info = []
for ell in range(num_experiments):
exp_info += [test_utils.initExp(initExp_args_dict)[1]]
# Generate participants
participants = []
pool_args = []
for i in range(num_clients):
participant_uid = '%030x' % random.randrange(16**30)
participants.append(participant_uid)
experiment = numpy.random.choice(exp_info)
exp_uid = experiment['exp_uid']
pool_args.append((exp_uid, participant_uid, total_pulls_per_client,
true_means,assert_200))
results = pool.map(simulate_one_client, pool_args)
for result in results:
result
test_utils.getModel(exp_uid, app_id, supported_alg_ids, alg_list)
def simulate_one_client(input_args):
exp_uid,participant_uid,total_pulls,true_means,assert_200 = input_args
getQuery_times = []
processAnswer_times = []
for t in range(total_pulls):
print " Participant {} had {} total pulls: ".format(participant_uid, t)
# test POST getQuery #
# return a widget 1/5 of the time (normally, use HTML)
widget = random.choice([True] + 4*[False])
getQuery_args_dict = {'args': {'participant_uid': participant_uid,
'widget': widget},
'exp_uid': exp_uid}
query_dict, dt = test_utils.getQuery(getQuery_args_dict)
getQuery_times.append(dt)
if widget:
query_dict = query_dict['args']
query_uid = query_dict['query_uid']
targets = query_dict['target_indices']
left = targets[0]['target']
right = targets[1]['target']
# sleep for a bit to simulate response time
ts = test_utils.response_delay()
# print left
reward_left = true_means[left['target_id']] + numpy.random.randn()*0.5
reward_right = true_means[right['target_id']] + numpy.random.randn()*0.5
if reward_left > reward_right:
target_winner = left
else:
target_winner = right
response_time = time.time() - ts
# test POST processAnswer
processAnswer_args_dict = {'args': {'query_uid': query_uid,
'response_time': response_time,
'target_winner': target_winner["target_id"]},
'exp_uid': exp_uid}
processAnswer_json_response, dt = test_utils.processAnswer(processAnswer_args_dict)
processAnswer_times += [dt]
r = test_utils.format_times(getQuery_times, processAnswer_times, total_pulls,
participant_uid)
return r
if __name__ == '__main__':
test_api()
# test_api(assert_200=True, num_arms=5, num_clients=10, delta=0.05,
# total_pulls_per_client=10, num_experiments=1)
| apache-2.0 | 3,044,284,908,923,959,000 | 34.907285 | 96 | 0.562339 | false |
Eksmo/calibre | src/calibre/gui2/viewer/position.py | 1 | 1815 | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <[email protected]>'
__docformat__ = 'restructuredtext en'
import json
class PagePosition(object):
def __init__(self, document):
self.document = document
@property
def viewport_cfi(self):
ans = None
res = self.document.mainFrame().evaluateJavaScript('''
ans = 'undefined';
if (window.paged_display) {
ans = window.paged_display.current_cfi();
if (!ans) ans = 'undefined';
}
ans;
''')
if res.isValid() and not res.isNull() and res.type() == res.String:
c = unicode(res.toString())
if c != 'undefined':
ans = c
return ans
def scroll_to_cfi(self, cfi):
if cfi:
cfi = json.dumps(cfi)
self.document.mainFrame().evaluateJavaScript(
'paged_display.jump_to_cfi(%s)'%cfi)
@property
def current_pos(self):
ans = self.viewport_cfi
if not ans:
ans = self.document.scroll_fraction
return ans
def __enter__(self):
self.save()
def __exit__(self, *args):
self.restore()
def save(self):
self._cpos = self.current_pos
def restore(self):
if self._cpos is None: return
self.to_pos(self._cpos)
self._cpos = None
def to_pos(self, pos):
if isinstance(pos, (int, float)):
self.document.scroll_fraction = pos
else:
self.scroll_to_cfi(pos)
def set_pos(self, pos):
self._cpos = pos
| gpl-3.0 | -6,051,221,796,672,716,000 | 24.928571 | 75 | 0.534986 | false |
openego/oeplatform | modelview/migrations/0022_auto_20160303_2233.py | 1 | 1468 | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-03-03 21:33
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("modelview", "0021_auto_20160303_2233")]
operations = [
migrations.AlterField(
model_name="energymodel",
name="model_file_format",
field=models.CharField(
choices=[
(".exe", ".exe"),
(".gms", ".gms"),
(".py", ".py"),
(".xls", ".xls"),
("other", "other"),
],
default="other",
help_text="In which format is the model saved?",
max_length=5,
verbose_name="Model file format",
),
),
migrations.AlterField(
model_name="energymodel",
name="model_input",
field=models.CharField(
choices=[
(".csv", ".csv"),
(".py", ".py"),
("text", "text"),
(".xls", ".xls"),
("other", "other"),
],
default="other",
help_text="Of which file format are the input and output data?",
max_length=5,
verbose_name="Input/output data file format",
),
),
]
| agpl-3.0 | -647,503,568,038,892,000 | 30.234043 | 80 | 0.419619 | false |
AjabWorld/ajabsacco | ajabsacco/core/facades/loans/transactions.py | 1 | 8484 | from ajabsacco.core.models import (
LoanTransactionEntry,
LoanProduct,
LoanAccount,
Message as SMSMessage,
)
from decimal import Decimal as D
from django.db.models import Q, Sum, F
from django.utils import timezone
from django.db import transaction as db_transaction
from ajabsacco.core import codes
from ajabsacco.core.sms import templates
from ajabsacco.core.utils import record_log, month_delta
from ajabsacco.core.exceptions import *
import ledger as ledger_facades
from ajabsacco.core.facades import transactions as transaction_facades
from ajabsacco.core.facades.loans import validations as validation_facades
import logging
logger = logging.getLogger('core.ajabsacco.loans')
import uuid
def allocate_repayment(loan_account, amount, *args, **kwargs):
with db_transaction.atomic():
fee_accrued = ledger_facades.loan_account_fees_due(loan_account)
penalties_accrued = ledger_facades.loan_account_penalties_due(loan_account)
interest_accrued = ledger_facades.loan_account_interest_due(loan_account)
principal_accrued = ledger_facades.loan_account_principal_due(loan_account)
#1. Align the order we will deduct the accruals
accruals = {
LoanProduct.ALLOCATION_CHOICE_FEE: (fee_accrued or D('0.0'), post_loan_fee),
LoanProduct.ALLOCATION_CHOICE_PENALTY: (penalties_accrued or D('0.0'), post_penalty_fee),
LoanProduct.ALLOCATION_CHOICE_INTEREST: (interest_accrued or D('0.0'), post_loan_interest),
}
amount = (amount or 0)
#get a sum of all accruals
total_accruals = sum(i[0] for i in accruals.values())
#Ensure we have sane values
if (amount > 0) and (principal_accrued > 0):
transaction_id = uuid.uuid4()
#setup allocation balance, to help us check to total allocation
allocation_balance = amount
if total_accruals > 0:
items_allocated = 0
#Loop through the allocation order
for allocation_item in LoanProduct.ALLOCATION_ORDER:
#Loop through all the accruals we are expecting to collect
for accrued_item, allocation_tuple in accruals.iteritems():
#put aside the variables from the turple
accrued_amount, transaction_func = allocation_tuple
#if allocation item is equal to accrued item code, and accrued amount is more than 1
#Check to ensure we do not get to negative numbers
if (allocation_item == accrued_item) and (accrued_amount > 0) and (allocation_balance > 0):
#if amount accrued is sizable, deduct
transaction_func(loan_account, accrued_amount, transaction_id=transaction_id)
#stamp new allocation
items_allocated += 1
#deduct amount posted from balance
allocation_balance -= accrued_amount
post_loan_principal(
loan_account,
allocation_balance,
transaction_id=transaction_id
)
loan_account.last_repayment_date = timezone.now()
loan_account.save()
else:
logger.debug("amount: %s and total_accruals %s" % (amount, total_accruals))
raise AmountNotSufficientException()
def apply_accruals(loan_account, approved_by=None):
with db_transaction.atomic():
date_disbursed = loan_account.date_disbursed
if date_disbursed is None:
raise ActionOnUndisbursedLoanException(
"You cannot apply accruals on Un-disbursed Loan %s" % loan_account.account_number)
date_today = timezone.now()
month_diff = month_delta(date_disbursed, date_today, days_of_the_month=30)
grace_period = loan_account.grace_period
grace_period_type = loan_account.product.grace_period_type
within_grace_period = ((month_diff - grace_period) < 1)
within_repayment_period = ((month_diff - loan_account.repayment_period) < 1)
if (not grace_period == 0):
if (within_grace_period):
if (grace_period_type == LoanProduct.FULL_GRACE_PERIOD):
#No need to proceed, we don't want to accrue anything
interest_due = 0
elif (grace_period_type == LoanProduct.PRINCIPAL_GRACE_PERIOD):
principal_due = loan_account.amount
interest_due = (
(principal_due * loan_account.interest_rate) /
loan_account.repayment_period
)
else:
if within_repayment_period:
principal_due = ledger_facades.loan_account_principal_due(loan_account)
interest_due = (
(principal_due * (loan_account.interest_rate / D('100.0'))) /
loan_account.repayment_period
)
if interest_due > 0:
apply_interest_to_account(loan_account, interest_due)
def disburse_loan(loan_account, *args, **kwargs):
with db_transaction.atomic():
validation_facades.validate_disbursement(loan_account)
debit_entry, credit_entry = transaction_facades.create_transaction(
LoanTransactionEntry, loan_account, loan_account.amount,
transaction_type=codes.TRANSACTION_TYPE_LOAN_DISBURSAL, *args, **kwargs
)
loan_account.status = LoanAccount.ACTIVE
loan_account.date_disbursed = timezone.now()
loan_account.save()
return (debit_entry, credit_entry)
def apply_interest_to_account(loan_account, amount, *args, **kwargs):
return transaction_facades.create_transaction(LoanTransactionEntry, loan_account, amount,
transaction_type=codes.TRANSACTION_TYPE_INTEREST_APPLY, *args, **kwargs)
def apply_fee_to_account(loan_account, amount, *args, **kwargs):
return transaction_facades.create_transaction(LoanTransactionEntry, loan_account, amount,
transaction_type=codes.TRANSACTION_TYPE_FEE_APPLY, *args, **kwargs)
def apply_penalty_to_account(loan_account, amount, *args, **kwargs):
return transaction_facades.create_transaction(LoanTransactionEntry, loan_account, amount,
transaction_type=codes.TRANSACTION_TYPE_PENALTY_APPLY, *args, **kwargs)
def write_off_loan_principal(loan_account, amount, *args, **kwargs):
return transaction_facades.create_transaction(LoanTransactionEntry, loan_account, amount,
transaction_type=codes.TRANSACTION_TYPE_PRINCIPAL_WRITE_OFF, *args, **kwargs)
def write_off_loan_interest(loan_account, amount, *args, **kwargs):
return transaction_facades.create_transaction(LoanTransactionEntry, loan_account, amount,
transaction_type=codes.TRANSACTION_TYPE_INTEREST_WRITE_OFF, *args, **kwargs)
def write_off_loan_penalty(loan_account, amount, *args, **kwargs):
return transaction_facades.create_transaction(LoanTransactionEntry, loan_account, amount,
transaction_type=codes.TRANSACTION_TYPE_PENALTY_WRITE_OFF, *args, **kwargs)
def write_off_loan_fee(loan_account, amount, *args, **kwargs):
return transaction_facades.create_transaction(LoanTransactionEntry, loan_account, amount,
transaction_type=codes.TRANSACTION_TYPE_FEE_WRITE_OFF, *args, **kwargs)
def post_loan_principal(loan_account, amount, *args, **kwargs):
return transaction_facades.create_transaction(LoanTransactionEntry, loan_account, amount,
transaction_type=codes.TRANSACTION_TYPE_PRINCIPAL_POSTING, *args, **kwargs)
def post_loan_interest(loan_account, amount, *args, **kwargs):
return transaction_facades.create_transaction(LoanTransactionEntry, loan_account, amount,
transaction_type=codes.TRANSACTION_TYPE_INTEREST_POSTING, *args, **kwargs)
def post_loan_fee(loan_account, amount, *args, **kwargs):
return transaction_facades.create_transaction(LoanTransactionEntry, loan_account, amount,
transaction_type=codes.TRANSACTION_TYPE_FEE_POSTING, *args, **kwargs)
def post_penalty_fee(loan_account, amount, *args, **kwargs):
return transaction_facades.create_transaction(LoanTransactionEntry, loan_account, amount,
transaction_type=codes.TRANSACTION_TYPE_PENALTY_POSTING, *args, **kwargs)
| apache-2.0 | 3,779,830,831,397,354,500 | 45.360656 | 115 | 0.658298 | false |
ArcherSys/ArcherSys | Lib/test/reperf.py | 1 | 1754 | <<<<<<< HEAD
<<<<<<< HEAD
import re
import time
def main():
s = "\13hello\14 \13world\14 " * 1000
p = re.compile(r"([\13\14])")
timefunc(10, p.sub, "", s)
timefunc(10, p.split, s)
timefunc(10, p.findall, s)
def timefunc(n, func, *args, **kw):
t0 = time.perf_counter()
try:
for i in range(n):
result = func(*args, **kw)
return result
finally:
t1 = time.perf_counter()
if n > 1:
print(n, "times", end=' ')
print(func.__name__, "%.3f" % (t1-t0), "CPU seconds")
main()
=======
import re
import time
def main():
s = "\13hello\14 \13world\14 " * 1000
p = re.compile(r"([\13\14])")
timefunc(10, p.sub, "", s)
timefunc(10, p.split, s)
timefunc(10, p.findall, s)
def timefunc(n, func, *args, **kw):
t0 = time.perf_counter()
try:
for i in range(n):
result = func(*args, **kw)
return result
finally:
t1 = time.perf_counter()
if n > 1:
print(n, "times", end=' ')
print(func.__name__, "%.3f" % (t1-t0), "CPU seconds")
main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
import re
import time
def main():
s = "\13hello\14 \13world\14 " * 1000
p = re.compile(r"([\13\14])")
timefunc(10, p.sub, "", s)
timefunc(10, p.split, s)
timefunc(10, p.findall, s)
def timefunc(n, func, *args, **kw):
t0 = time.perf_counter()
try:
for i in range(n):
result = func(*args, **kw)
return result
finally:
t1 = time.perf_counter()
if n > 1:
print(n, "times", end=' ')
print(func.__name__, "%.3f" % (t1-t0), "CPU seconds")
main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
| mit | -53,206,256,789,101,470 | 22.386667 | 61 | 0.513683 | false |
Erotemic/ibeis | dev/_scripts/installers.py | 1 | 16608 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
r"""
On mac need to run with sudo
Testing:
python %HOME%/code/ibeis/_installers/ibeis_pyinstaller_data_helper.py --test-get_data_list
python ~/code/ibeis/_installers/ibeis_pyinstaller_data_helper.py --test-get_data_list
SeeAlso:
_installers/ibeis_pyinstaller_data_helper.py
_installers/pyinstaller-ibeis.spec
WindowsNew:
python installers --build
python installers --inno
python installers --test
References:
https://groups.google.com/forum/#!topic/pyinstaller/178I9ANuk14
This script is often flaky. here are workarounds
CommonIssues:
Is the correct opencv being found?
Is 3.0 being built? I think we are on 2.4.8
InstallPyinstaller:
pip install pyinstaller
pip install pyinstaller --upgrade
Win32CommandLine:
# Uninstallation
python installers.py --clean
# Build Installer
pyinstaller --runtime-hook rthook_pyqt4.py _installers/pyinstaller-ibeis.spec -y
"C:\Program Files (x86)\Inno Setup 5\ISCC.exe" _installers\win_installer_script.iss
# Install
dist\ibeis-win32-setup.exe
# Test
"C:\Program Files (x86)\IBEIS\IBEISApp.exe"
"""
from __future__ import absolute_import, division, print_function
from os.path import dirname, realpath, join, exists, normpath
#import six
import utool as ut
import sys
import importlib
from os.path import join # NOQA
def use_development_pyinstaller():
"""
sudo pip uninstall pyinstaller
pip uninstall pyinstaller
code
git clone https://github.com/pyinstaller/pyinstaller.git
cd pyinstaller
sudo python setup.py develop
sudo python setup.py install
ib
which pyinstaller
export PATH=$PATH:/opt/local/Library/Frameworks/Python.framework/Versions/2.7/bin
had to uninstall sphinx
sudo pip uninstall sphinx
sudo pip uninstall sphinx
"""
def fix_pyinstaller_sip_api():
"""
Hack to get the correct version of SIP for win32
References:
http://stackoverflow.com/questions/21217399/pyqt4-qtcore-qvariant-object-instead-of-a-string
"""
import PyInstaller
from os.path import dirname, join # NOQA
hook_fpath = join(dirname(PyInstaller.__file__), 'loader', 'rthooks', 'pyi_rth_qt4plugins.py')
patch_code = ut.codeblock(
'''
try:
import sip
# http://stackoverflow.com/questions/21217399/pyqt4-qtcore-qvariant-object-instead-of-a-string
sip.setapi('QVariant', 2)
sip.setapi('QString', 2)
sip.setapi('QTextStream', 2)
sip.setapi('QTime', 2)
sip.setapi('QUrl', 2)
sip.setapi('QDate', 2)
sip.setapi('QDateTime', 2)
if hasattr(sip, 'setdestroyonexit'):
sip.setdestroyonexit(False) # This prevents a crash on windows
except ValueError as ex:
print('Warning: Value Error: %s' % str(ex))
pass
''')
fpath = hook_fpath
# Patch the hook file
tag = 'SIP_API_2'
ut.inject_python_code(fpath, patch_code, tag)
#ut.editfile(hook_fpath)
pass
def get_setup_dpath():
assert exists('setup.py'), 'must be run in ibeis directory'
#assert exists('main.py'), 'must be run in ibeis directory'
assert exists('../ibeis/ibeis'), 'must be run in ibeis directory'
cwd = normpath(realpath(dirname(__file__)))
return cwd
def clean_pyinstaller():
print('[installer] +--- CLEAN_PYINSTALLER ---')
cwd = get_setup_dpath()
ut.remove_files_in_dir(cwd, 'IBEISApp.pkg', recursive=False)
ut.remove_files_in_dir(cwd, 'qt_menu.nib', recursive=False)
ut.remove_files_in_dir(cwd, 'qt_menu.nib', recursive=False)
ut.delete(join(cwd, 'dist/ibeis'))
ut.delete(join(cwd, 'ibeis-win32-setup.exe'))
ut.delete(join(cwd, 'build'))
#ut.delete(join(cwd, 'pyrf'))
#ut.delete(join(cwd, 'pyhesaff'))
print('[installer] L___ FINSHED CLEAN_PYINSTALLER ___')
def build_pyinstaller():
"""
build_pyinstaller creates build/ibeis/* and dist/ibeis/*
"""
print('[installer] +--- BUILD_PYINSTALLER ---')
# 1) RUN: PYINSTALLER
# Run the pyinstaller command (does all the work)
utool_python_path = dirname(dirname(ut.__file__))
#import os
#os.environ['PYTHONPATH'] = os.pathsep.join([utool_python_path] + os.environ['PYTHONPATH'].strip(os.pathsep).split(os.pathsep))
import os
sys.path.insert(1, utool_python_path)
if not ut.WIN32:
pathcmd = 'export PYTHONPATH=%s%s$PYTHONPATH && ' % (utool_python_path, os.pathsep)
else:
pathcmd = ''
installcmd = '/opt/local/Library/Frameworks/Python.framework/Versions/2.7/bin/pyinstaller --runtime-hook _installers/rthook_pyqt4.py _installers/pyinstaller-ibeis.spec -y'
output, err, ret = ut.cmd(pathcmd + installcmd)
if ret != 0:
raise AssertionError('Pyinstalled failed with return code = %r' % (ret,))
#ut.cmd(installcmd)
#ut.cmd('pyinstaller --runtime-hook rthook_pyqt4.py _installers/pyinstaller-ibeis.spec -y')
#else:
#ut.cmd('pyinstaller', '_installers/pyinstaller-ibeis.spec', '-y')
#ut.cmd('pyinstaller', '--runtime-hook rthook_pyqt4.py', '_installers/pyinstaller-ibeis.spec')
# 2) POST: PROCESSING
# Perform some post processing steps on the mac
if sys.platform == 'darwin' and exists('dist/IBEIS.app/Contents/'):
copy_list = [
('ibsicon.icns', 'Resources/icon-windowed.icns'),
('Info.plist', 'Info.plist'),
]
srcdir = '_installers'
dstdir = 'dist/IBEIS.app/Contents/'
for srcname, dstname in copy_list:
src = join(srcdir, srcname)
dst = join(dstdir, dstname)
ut.copy(src, dst)
# TODO: make this take arguments instead of defaulting to ~/code/ibeis/build
#print("RUN: sudo ./_installers/mac_dmg_builder.sh")
app_fpath = get_dist_app_fpath()
print('[installer] app_fpath = %s' % (app_fpath,))
print('[installer] L___ FINISH BUILD_PYINSTALLER ___')
# ut.cmd('./_scripts/mac_dmg_builder.sh')
def ensure_inno_isinstalled():
""" Ensures that the current machine has INNO installed. returns path to the
executable """
assert ut.WIN32, 'Can only build INNO on windows'
inno_fpath = ut.search_in_dirs('Inno Setup 5\ISCC.exe', ut.get_install_dirs())
# Make sure INNO is installed
if inno_fpath is None:
print('WARNING: cannot find inno_fpath')
AUTO_FIXIT = ut.WIN32
print('Inno seems to not be installed. AUTO_FIXIT=%r' % AUTO_FIXIT)
if AUTO_FIXIT:
print('Automaticaly trying to downoad and install INNO')
# Download INNO Installer
inno_installer_url = 'http://www.jrsoftware.org/download.php/ispack.exe'
inno_installer_fpath = ut.download_url(inno_installer_url)
print('Automaticaly trying to install INNO')
# Install INNO Installer
ut.cmd(inno_installer_fpath)
else:
inno_homepage_url = 'http://www.jrsoftware.org/isdl.php'
ut.open_url_in_browser(inno_homepage_url)
raise AssertionError('Cannot find INNO and AUTOFIX it is false')
# Ensure that it has now been installed
inno_fpath = ut.search_in_dirs('Inno Setup 5\ISCC.exe', ut.get_install_dirs())
assert ut.checkpath(inno_fpath, verbose=True, info=True), 'inno installer is still not installed!'
return inno_fpath
def ensure_inno_script():
""" writes inno script to disk for win32 installer build """
cwd = get_setup_dpath()
iss_script_fpath = join(cwd, '_installers', 'win_installer_script.iss')
# THE ISS USES {} AS SYNTAX. CAREFUL
#app_publisher = 'Rensselaer Polytechnic Institute'
#app_name = 'IBEIS'
import ibeis
iss_script_code = ut.codeblock(
r'''
; Script generated by the Inno Setup Script Wizard.
; SEE THE DOCUMENTATION FOR DETAILS ON CREATING INNO SETUP SCRIPT FILES!
; http://www.jrsoftware.org/isdl.php
[Setup]
; NOTE: The value of AppId uniquely identifies this application.
; Do not use the same AppId value in installers for other applications.
; (To generate a new GUID, click Tools | Generate GUID inside the IDE.)
; Also it seems like the off-balanced curly brace is necessary
AppId={{47BE3DA2-261D-4672-9849-18BB2EB382FC}
AppName=IBEIS
AppVersion=''' + str(ibeis.__version__) + '''
;AppVerName=IBEIS 1
AppPublisher=Rensselaer Polytechnic Institute
AppPublisherURL=ibeis.org ;www.rpi.edu/~crallj/
AppSupportURL=ibeis.org ;ww.rpi.edu/~crallj/
AppUpdatesURL=ibeis.org ;www.rpi.edu/~crallj/
DefaultDirName={pf}\IBEIS
DefaultGroupName=IBEIS
OutputBaseFilename=ibeis-win32-setup
SetupIconFile=ibsicon.ico
Compression=lzma
SolidCompression=yes
[Languages]
Name: "english"; MessagesFile: "compiler:Default.isl"
[Tasks]
Name: "desktopicon"; Description: "{cm:CreateDesktopIcon}"; GroupDescription: "{cm:AdditionalIcons}"; Flags: unchecked
[Files]
Source: "..\dist\ibeis\IBEISApp.exe"; DestDir: "{app}"; Flags: ignoreversion
Source: "..\dist\ibeis\*"; DestDir: "{app}"; Flags: ignoreversion recursesubdirs createallsubdirs
; NOTE: Don't use "Flags: ignoreversion" on any shared system files
[Icons]
Name: "{group}\ibeis"; Filename: "{app}\IBEISApp.exe"
Name: "{commondesktop}\ibeis"; Filename: "{app}\IBEISApp.exe"; Tasks: desktopicon
[Run]
Filename: "{app}\IBEISApp.exe"; Description: "{cm:LaunchProgram,IBEIS}"; Flags: nowait postinstall skipifsilent
'''
)
ut.write_to(iss_script_fpath, iss_script_code, onlyifdiff=True)
assert ut.checkpath(iss_script_fpath, verbose=True, info=True), 'cannot find iss_script_fpath'
return iss_script_fpath
def build_win32_inno_installer():
""" win32 self-executable package """
print('[installer] +--- BUILD_WIN32_INNO_INSTALLER ---')
assert ut.WIN32, 'Can only build INNO on windows'
# Get inno executable
inno_fpath = ensure_inno_isinstalled()
# Get IBEIS inno script
iss_script_fpath = ensure_inno_script()
print('Trying to run ' + ' '.join(['"' + inno_fpath + '"', '"' + iss_script_fpath + '"']))
try:
command_args = ' '.join((inno_fpath, iss_script_fpath))
ut.cmd(command_args)
except Exception as ex:
ut.printex(ex, 'error running script')
raise
# Move the installer into dist and make a timestamped version
# Uninstall exe in case we need to cleanup
#uninstall_ibeis_exe = 'unins000.exe'
cwd = get_setup_dpath()
installer_fpath = join(cwd, '_installers', 'Output', 'ibeis-win32-setup.exe')
print('[installer] L___ BUILD_WIN32_INNO_INSTALLER ___')
return installer_fpath
def build_osx_dmg_installer():
# outputs dmg to
ut.cmd('./_installers/mac_dmg_builder.sh', sudo=True)
cwd = get_setup_dpath()
installer_fpath = join(cwd, 'dist', 'IBEIS.dmg')
return installer_fpath
def build_linux_zip_binaries():
fpath_list = ut.ls('dist/ibeis')
archive_fpath = 'dist/ibeis-linux-binary.zip'
ut.archive_files(archive_fpath, fpath_list)
return archive_fpath
def package_installer():
"""
system dependent post pyinstaller step
"""
print('[installer] +--- PACKAGE_INSTALLER ---')
#build_win32_inno_installer()
cwd = get_setup_dpath()
# Build the os-appropriate package
if sys.platform.startswith('win32'):
installer_src = build_win32_inno_installer()
installer_fname_fmt = 'ibeis-win32-install-{timestamp}.exe'
elif sys.platform.startswith('darwin'):
installer_src = build_osx_dmg_installer()
installer_fname_fmt = 'ibeis-osx-install-{timestamp}.dmg'
elif sys.platform.startswith('linux'):
installer_src = build_linux_zip_binaries()
installer_fname_fmt = 'ibeis-linux-binary-{timestamp}.zip'
#try:
# raise NotImplementedError('no linux packager (rpm or deb) supported. try running with --build')
#except Exception as ex:
# ut.printex(ex)
#pass
# timestamp the installer name
installer_fname = installer_fname_fmt.format(timestamp=ut.get_timestamp())
installer_dst = join(cwd, 'dist', installer_fname)
try:
ut.move(installer_src, installer_dst)
except Exception as ex:
ut.printex(ex, 'error moving setups', iswarning=True)
print('[installer] L___ FINISH PACKAGE_INSTALLER ___')
def fix_importlib_hook():
""" IMPORTLIB FIX
References:
http://stackoverflow.com/questions/18596410/importerror-no-module-named-mpl-toolkits-with-maptlotlib-1-3-0-and-py2exe
"""
try:
dpath_ = importlib.import_module('mpl_toolkits').__path__
if isinstance(dpath_, (list, tuple)):
for dpath in dpath_:
fpath = join(dpath, '__init__.py')
break
else:
dpath = dpath_
if not ut.checkpath(dpath, verbose=True, info=True):
ut.touch(fpath)
except ImportError as ex:
ut.printex(ex, 'pip install mpl_toolkits?')
def get_dist_app_fpath():
app_fpath = ut.unixpath('dist/ibeis/IBEISApp')
if ut.DARWIN:
app_fpath = ut.unixpath('dist/IBEIS.app/Contents/MacOS/IBEISApp')
if ut.WIN32:
app_fpath += '.exe'
return app_fpath
def run_suite_test():
app_fpath = get_dist_app_fpath()
ut.assert_exists(app_fpath, 'app fpath must exist', info=True, verbose=True)
ut.cmd(app_fpath + ' --run-utool-tests')
#ut.cmd(app_fpath + ' --run-vtool_ibeis-tests')
#ut.cmd(app_fpath + ' --run-ibeis-tests')
def run_app_test():
"""
Execute the installed app
"""
print('[installer] +--- TEST_APP ---')
app_fpath = get_dist_app_fpath()
ut.assert_exists(app_fpath, 'app fpath must exist', info=True, verbose=True)
if ut.DARWIN:
#ut.cmd('open ' + ut.unixpath('dist/IBEIS.app'))
"""
rm -rf ~/Desktop/IBEIS.app
rm -rf /Applications/IBEIS.app
ls /Applications/IBEIS.app
cd /Volumes/IBEIS
ib
cd dist
# Install to /Applications
hdiutil attach ~/code/ibeis/dist/IBEIS.dmg
cp -R /Volumes/IBEIS/IBEIS.app /Applications/IBEIS.app
hdiutil unmount /Volumes/IBEIS
open -a /Applications/IBEIS.app
chmod +x /Applications/IBEIS.app/Contents/MacOS/IBEISApp
cp -R /Volumes/IBEIS/IBEIS.app ~/Desktop
open -a ~/Desktop/IBEIS.app
chmod +x ~/code/ibeis/dist/IBEIS.app/Contents/MacOS/IBEISApp
open -a ~/code/ibeis/dist/IBEIS.app
open ~/code/ibeis/dist/IBEIS.app/Contents/MacOS/IBEISApp
open ~/Desktop/IBEIS.app
./dist/IBEIS.app/Contents/MacOS/IBEISApp --run-tests
"""
ut.cmd(app_fpath)
else:
ut.cmd(app_fpath)
print('[installer] L___ FINISH TEST_APP ___')
#ut.cmd(ut.unixpath('dist/ibeis/ibeis-win32-setup.exe'))
def main():
"""
CommandLine:
python installers.py --clean
python installers.py --all
python installers.py --inno
# For linux
python installers.py --clean
python installers.py --build
python installers.py --test
python installers.py --clean --build --test
python installers.py --build --test
"""
print('For a full run use: python installers.py --all')
print('[installer] +--- MAIN ---')
import functools
get_argflag = functools.partial(ut.get_argflag, need_prefix=False)
BUILD_APP = get_argflag(('--build'))
BUILD_INSTALLER = get_argflag(('--inno', '--package', '--pkg'))
TEST_RUN = get_argflag(('--run'))
TEST_CODE = get_argflag(('--test'))
CLEAN_BUILD = get_argflag(('--clean'))
ALL = get_argflag('--all')
fix_importlib_hook()
# default behavior is full build
DEFAULT_RUN = len(sys.argv) == 1
#or not (CLEAN_BUILD or BUILD_APP or BUILD_INSTALLER or TEST_APP)
# 1) SETUP: CLEAN UP
if CLEAN_BUILD or ALL:
clean_pyinstaller()
if BUILD_APP or ALL or DEFAULT_RUN:
build_pyinstaller()
if BUILD_INSTALLER or ALL:
package_installer()
if TEST_CODE or ALL:
run_suite_test()
if TEST_RUN or ALL:
run_app_test()
print('[installer] L___ FINISH MAIN ___')
if __name__ == '__main__':
main()
'''
dist\ibeis-win32-setup.exe
dist\ibeis\IBEISApp.exe
'''
| apache-2.0 | 5,563,224,776,023,611,000 | 34.037975 | 175 | 0.635718 | false |
rr-/ida-images | rgb-ida.py | 1 | 3099 | import idaapi
import librgb
from librgb.qt_shims import QtGui # important for PySide legacy IDA
from librgb.qt_shims import QtWidgets
try:
MAJOR, MINOR = map(int, idaapi.get_kernel_version().split("."))
except AttributeError:
MAJOR, MINOR = 6, 6
USING_IDA7API = MAJOR > 6
USING_PYQT5 = USING_IDA7API or (MAJOR == 6 and MINOR >= 9)
class DockableShim(object):
def __init__(self, title):
self._title = title
# IDA 7+ Widgets
if USING_IDA7API:
import sip
self._form = idaapi.create_empty_widget(self._title)
self.widget = sip.wrapinstance(long(self._form), QtWidgets.QWidget)
# legacy IDA PluginForm's
else:
self._form = idaapi.create_tform(self._title, None)
if USING_PYQT5:
self.widget = idaapi.PluginForm.FormToPyQtWidget(self._form)
else:
self.widget = idaapi.PluginForm.FormToPySideWidget(self._form)
def show(self):
if USING_IDA7API:
flags = (
idaapi.PluginForm.WOPN_TAB
| idaapi.PluginForm.WOPN_MENU
| idaapi.PluginForm.WOPN_RESTORE
| idaapi.PluginForm.WOPN_PERSIST
)
idaapi.display_widget(self._form, flags)
# legacy IDA PluginForm's
else:
flags = (
idaapi.PluginForm.FORM_TAB
| idaapi.PluginForm.FORM_MENU
| idaapi.PluginForm.FORM_RESTORE
| idaapi.PluginForm.FORM_PERSIST
| 0x80
) # idaapi.PluginForm.FORM_QWIDGET
idaapi.open_tform(self._form, flags)
class ImagePreviewPlugin(idaapi.plugin_t):
flags = 0
wanted_name = "Image previewer"
wanted_hotkey = "Alt + I"
comment = "Preview memory as image"
help = "https://github.com/rr-/ida-images"
def init(self):
return idaapi.PLUGIN_OK
def term(self):
pass
def run(self, arg):
class IdaWindowAdapter(librgb.GenericWindowAdapter):
def ask_address(self, address):
return AskAddr(address, "Please enter an address")
def ask_file(self):
return AskFile(1, "*.png", "Save the image as...")
image_preview_form = DockableShim("Image preview")
params = librgb.RendererParams()
params.readers = [librgb.MemoryReader()]
params.format = librgb.PixelFormats.GRAY8
params.width = 800
params.height = 600
params.flip = False
params.brightness = 50
adapter = IdaWindowAdapter(params)
shortcut_manager = librgb.ShortcutManager(adapter, params)
for shortcut, func in shortcut_manager.shortcuts.items():
adapter.define_shortcut(shortcut, image_preview_form.widget, func)
layout = adapter.create_layout()
image_preview_form.widget.setLayout(layout)
adapter.draw()
image_preview_form.show()
def PLUGIN_ENTRY():
return ImagePreviewPlugin()
if __name__ == "__main__":
ImagePreviewPlugin().run(0)
| mit | -2,430,317,480,103,620,600 | 29.683168 | 79 | 0.595999 | false |
bukun/pycsw | pycsw/server.py | 1 | 35618 | # -*- coding: utf-8 -*-
# =================================================================
#
# Authors: Tom Kralidis <[email protected]>
# Angelos Tzotsos <[email protected]>
#
# Copyright (c) 2016 Tom Kralidis
# Copyright (c) 2015 Angelos Tzotsos
# Copyright (c) 2016 James Dickens
# Copyright (c) 2016 Ricardo Silva
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
import logging
import os
from six.moves.urllib.parse import parse_qsl
from six.moves.urllib.parse import splitquery
from six.moves.urllib.parse import urlparse
from six import StringIO
from six.moves.configparser import SafeConfigParser
import sys
from time import time
import wsgiref.util
from pycsw.core.etree import etree
from pycsw import oaipmh, opensearch, sru
from pycsw.plugins.profiles import profile as pprofile
import pycsw.plugins.outputschemas
from pycsw.core import config, log, util
from pycsw.ogc.csw import csw2, csw3
LOGGER = logging.getLogger(__name__)
class Csw(object):
""" Base CSW server """
def __init__(self, rtconfig=None, env=None, version='3.0.0'):
""" Initialize CSW """
if not env:
self.environ = os.environ
else:
self.environ = env
self.context = config.StaticContext()
# Lazy load this when needed
# (it will permanently update global cfg namespaces)
self.sruobj = None
self.opensearchobj = None
self.oaipmhobj = None
# init kvp
self.kvp = {}
self.mode = 'csw'
self.asynchronous = False
self.soap = False
self.request = None
self.exception = False
self.status = 'OK'
self.profiles = None
self.manager = False
self.outputschemas = {}
self.mimetype = 'application/xml; charset=UTF-8'
self.encoding = 'UTF-8'
self.pretty_print = 0
self.domainquerytype = 'list'
self.orm = 'django'
self.language = {'639_code': 'en', 'text': 'english'}
self.process_time_start = time()
# define CSW implementation object (default CSW3)
self.iface = csw3.Csw3(server_csw=self)
self.request_version = version
if self.request_version == '2.0.2':
self.iface = csw2.Csw2(server_csw=self)
self.context.set_model('csw')
# load user configuration
try:
LOGGER.info('Loading user configuration')
if isinstance(rtconfig, SafeConfigParser): # serialized already
self.config = rtconfig
else:
self.config = SafeConfigParser()
if isinstance(rtconfig, dict): # dictionary
for section, options in rtconfig.items():
self.config.add_section(section)
for k, v in options.items():
self.config.set(section, k, v)
else: # configuration file
import codecs
with codecs.open(rtconfig, encoding='utf-8') as scp:
self.config.readfp(scp)
except Exception as err:
msg = 'Could not load configuration'
LOGGER.exception('%s %s: %s', msg, rtconfig, err)
self.response = self.iface.exceptionreport(
'NoApplicableCode', 'service', msg)
return
# set server.home safely
# TODO: make this more abstract
self.config.set(
'server', 'home',
os.path.dirname(os.path.join(os.path.dirname(__file__), '..'))
)
self.context.pycsw_home = self.config.get('server', 'home')
self.context.url = self.config.get('server', 'url')
log.setup_logger(self.config)
LOGGER.info('running configuration %s', rtconfig)
LOGGER.debug('QUERY_STRING: %s', self.environ['QUERY_STRING'])
# set OGC schemas location
if not self.config.has_option('server', 'ogc_schemas_base'):
self.config.set('server', 'ogc_schemas_base',
self.context.ogc_schemas_base)
# set mimetype
if self.config.has_option('server', 'mimetype'):
self.mimetype = self.config.get('server', 'mimetype').encode()
# set encoding
if self.config.has_option('server', 'encoding'):
self.encoding = self.config.get('server', 'encoding')
# set domainquerytype
if self.config.has_option('server', 'domainquerytype'):
self.domainquerytype = self.config.get('server', 'domainquerytype')
# set XML pretty print
if (self.config.has_option('server', 'pretty_print') and
self.config.get('server', 'pretty_print') == 'true'):
self.pretty_print = 1
# set Spatial Ranking option
if (self.config.has_option('server', 'spatial_ranking') and
self.config.get('server', 'spatial_ranking') == 'true'):
util.ranking_enabled = True
# set language default
if self.config.has_option('server', 'language'):
try:
LOGGER.info('Setting language')
lang_code = self.config.get('server', 'language').split('-')[0]
self.language['639_code'] = lang_code
self.language['text'] = self.context.languages[lang_code]
except Exception as err:
LOGGER.exception('Could not set language: %s', err)
pass
LOGGER.debug('Configuration: %s.', self.config)
LOGGER.debug('Model: %s.', self.context.model)
# load user-defined mappings if they exist
if self.config.has_option('repository', 'mappings'):
# override default repository mappings
try:
import imp
module = self.config.get('repository', 'mappings')
if '/' in module: # filepath
modulename = '%s' % os.path.splitext(module)[0].replace(
os.sep, '.')
mappings = imp.load_source(modulename, module)
else: # dotted name
mappings = __import__(module, fromlist=[''])
LOGGER.info('Loading custom repository mappings '
'from %s', module)
self.context.md_core_model = mappings.MD_CORE_MODEL
self.context.refresh_dc(mappings.MD_CORE_MODEL)
except Exception as err:
LOGGER.exception('Could not load custom mappings: %s', err)
self.response = self.iface.exceptionreport(
'NoApplicableCode', 'service',
'Could not load repository.mappings')
# load outputschemas
LOGGER.info('Loading outputschemas')
for osch in pycsw.plugins.outputschemas.__all__:
output_schema_module = __import__(
'pycsw.plugins.outputschemas.%s' % osch)
mod = getattr(output_schema_module.plugins.outputschemas, osch)
self.outputschemas[mod.NAMESPACE] = mod
LOGGER.debug('Outputschemas loaded: %s.', self.outputschemas)
LOGGER.debug('Namespaces: %s', self.context.namespaces)
def expand_path(self, path):
""" return safe path for WSGI environments """
if 'local.app_root' in self.environ and not os.path.isabs(path):
return os.path.join(self.environ['local.app_root'], path)
else:
return path
def dispatch_wsgi(self):
""" WSGI handler """
if hasattr(self, 'response'):
return self._write_response()
LOGGER.debug('WSGI mode detected')
if self.environ['REQUEST_METHOD'] == 'POST':
try:
request_body_size = int(self.environ.get('CONTENT_LENGTH', 0))
except (ValueError):
request_body_size = 0
self.requesttype = 'POST'
self.request = self.environ['wsgi.input'].read(request_body_size)
LOGGER.debug('Request type: POST. Request:\n%s\n', self.request)
else: # it's a GET request
self.requesttype = 'GET'
self.request = wsgiref.util.request_uri(self.environ)
try:
query_part = splitquery(self.request)[-1]
self.kvp = dict(parse_qsl(query_part, keep_blank_values=True))
except AttributeError as err:
LOGGER.exception('Could not parse query string')
self.kvp = {}
LOGGER.debug('Request type: GET. Request:\n%s\n', self.request)
return self.dispatch()
def opensearch(self):
""" enable OpenSearch """
if not self.opensearchobj:
self.opensearchobj = opensearch.OpenSearch(self.context)
return self.opensearchobj
def sru(self):
""" enable SRU """
if not self.sruobj:
self.sruobj = sru.Sru(self.context)
return self.sruobj
def oaipmh(self):
""" enable OAI-PMH """
if not self.oaipmhobj:
self.oaipmhobj = oaipmh.OAIPMH(self.context, self.config)
return self.oaipmhobj
def dispatch(self, writer=sys.stdout, write_headers=True):
""" Handle incoming HTTP request """
error = 0
if self.requesttype == 'GET':
self.kvp = self.normalize_kvp(self.kvp)
version_202 = ('version' in self.kvp and
self.kvp['version'] == '2.0.2')
accept_version_202 = ('acceptversions' in self.kvp and
'2.0.2' in self.kvp['acceptversions'])
if version_202 or accept_version_202:
self.request_version = '2.0.2'
elif self.requesttype == 'POST':
if self.request.find(b'cat/csw/2.0.2') != -1:
self.request_version = '2.0.2'
elif self.request.find(b'cat/csw/3.0') != -1:
self.request_version = '3.0.0'
if (not isinstance(self.kvp, str) and 'mode' in self.kvp and
self.kvp['mode'] == 'sru'):
self.mode = 'sru'
self.request_version = '2.0.2'
LOGGER.info('SRU mode detected; processing request')
self.kvp = self.sru().request_sru2csw(self.kvp)
if (not isinstance(self.kvp, str) and 'mode' in self.kvp and
self.kvp['mode'] == 'oaipmh'):
self.mode = 'oaipmh'
self.request_version = '2.0.2'
LOGGER.info('OAI-PMH mode detected; processing request.')
self.oaiargs = dict((k, v) for k, v in self.kvp.items() if k)
self.kvp = self.oaipmh().request(self.kvp)
if self.request_version == '2.0.2':
self.iface = csw2.Csw2(server_csw=self)
self.context.set_model('csw')
# configure transaction support, if specified in config
self._gen_manager()
namespaces = self.context.namespaces
ops = self.context.model['operations']
constraints = self.context.model['constraints']
# generate domain model
# NOTE: We should probably avoid this sort of mutable state for WSGI
if 'GetDomain' not in ops:
ops['GetDomain'] = self.context.gen_domains()
# generate distributed search model, if specified in config
if self.config.has_option('server', 'federatedcatalogues'):
LOGGER.info('Configuring distributed search')
constraints['FederatedCatalogues'] = {'values': []}
for fedcat in self.config.get('server',
'federatedcatalogues').split(','):
LOGGER.debug('federated catalogue: %s', fedcat)
constraints['FederatedCatalogues']['values'].append(fedcat)
for key, value in self.outputschemas.items():
get_records_params = ops['GetRecords']['parameters']
get_records_params['outputSchema']['values'].append(
value.NAMESPACE)
get_records_by_id_params = ops['GetRecordById']['parameters']
get_records_by_id_params['outputSchema']['values'].append(
value.NAMESPACE)
if 'Harvest' in ops:
harvest_params = ops['Harvest']['parameters']
harvest_params['ResourceType']['values'].append(
value.NAMESPACE)
LOGGER.info('Setting MaxRecordDefault')
if self.config.has_option('server', 'maxrecords'):
constraints['MaxRecordDefault']['values'] = [
self.config.get('server', 'maxrecords')]
# load profiles
if self.config.has_option('server', 'profiles'):
self.profiles = pprofile.load_profiles(
os.path.join('pycsw', 'plugins', 'profiles'),
pprofile.Profile,
self.config.get('server', 'profiles')
)
for prof in self.profiles['plugins'].keys():
tmp = self.profiles['plugins'][prof](self.context.model,
namespaces,
self.context)
key = tmp.outputschema # to ref by outputschema
self.profiles['loaded'][key] = tmp
self.profiles['loaded'][key].extend_core(self.context.model,
namespaces,
self.config)
LOGGER.debug('Profiles loaded: %s' % list(self.profiles['loaded'].keys()))
# init repository
# look for tablename, set 'records' as default
if not self.config.has_option('repository', 'table'):
self.config.set('repository', 'table', 'records')
repo_filter = None
if self.config.has_option('repository', 'filter'):
repo_filter = self.config.get('repository', 'filter')
if self.config.has_option('repository', 'source'): # load custom repository
rs = self.config.get('repository', 'source')
rs_modname, rs_clsname = rs.rsplit('.', 1)
rs_mod = __import__(rs_modname, globals(), locals(), [rs_clsname])
rs_cls = getattr(rs_mod, rs_clsname)
try:
self.repository = rs_cls(self.context, repo_filter)
LOGGER.debug('Custom repository %s loaded (%s)', rs, self.repository.dbtype)
except Exception as err:
msg = 'Could not load custom repository %s: %s' % (rs, err)
LOGGER.exception(msg)
error = 1
code = 'NoApplicableCode'
locator = 'service'
text = 'Could not initialize repository. Check server logs'
else: # load default repository
self.orm = 'sqlalchemy'
from pycsw.core import repository
try:
LOGGER.info('Loading default repository')
self.repository = repository.Repository(
self.config.get('repository', 'database'),
self.context,
self.environ.get('local.app_root', None),
self.config.get('repository', 'table'),
repo_filter
)
LOGGER.debug(
'Repository loaded (local): %s.' % self.repository.dbtype)
except Exception as err:
msg = 'Could not load repository (local): %s' % err
LOGGER.exception(msg)
error = 1
code = 'NoApplicableCode'
locator = 'service'
text = 'Could not initialize repository. Check server logs'
if self.requesttype == 'POST':
LOGGER.debug('HTTP POST request')
LOGGER.debug('CSW version: %s', self.iface.version)
self.kvp = self.iface.parse_postdata(self.request)
if isinstance(self.kvp, str): # it's an exception
error = 1
locator = 'service'
text = self.kvp
if (self.kvp.find('the document is not valid') != -1 or
self.kvp.find('document not well-formed') != -1):
code = 'NoApplicableCode'
else:
code = 'InvalidParameterValue'
LOGGER.debug('HTTP Headers:\n%s.', self.environ)
LOGGER.debug('Parsed request parameters: %s', self.kvp)
if (not isinstance(self.kvp, str) and 'mode' in self.kvp and
self.kvp['mode'] == 'opensearch'):
self.mode = 'opensearch'
LOGGER.info('OpenSearch mode detected; processing request.')
self.kvp['outputschema'] = 'http://www.w3.org/2005/Atom'
if ((len(self.kvp) == 0 and self.request_version == '3.0.0') or
(len(self.kvp) == 1 and 'config' in self.kvp)):
LOGGER.info('Turning on default csw30:Capabilities for base URL')
self.kvp = {
'service': 'CSW',
'acceptversions': '3.0.0',
'request': 'GetCapabilities'
}
http_accept = self.environ.get('HTTP_ACCEPT', '')
if 'application/opensearchdescription+xml' in http_accept:
self.mode = 'opensearch'
self.kvp['outputschema'] = 'http://www.w3.org/2005/Atom'
if error == 0:
# test for the basic keyword values (service, version, request)
basic_options = ['service', 'request']
request = self.kvp.get('request', '')
own_version_integer = util.get_version_integer(
self.request_version)
if self.request_version == '2.0.2':
basic_options.append('version')
if self.request_version == '3.0.0' and 'version' not in self.kvp and self.requesttype == 'POST':
if 'service' not in self.kvp:
self.kvp['service'] = 'CSW'
basic_options.append('service')
self.kvp['version'] = self.request_version
basic_options.append('version')
for k in basic_options:
if k not in self.kvp:
if (k in ['version', 'acceptversions'] and
request == 'GetCapabilities'):
pass
else:
error = 1
locator = k
code = 'MissingParameterValue'
text = 'Missing keyword: %s' % k
break
# test each of the basic keyword values
if error == 0:
# test service
if self.kvp['service'] != 'CSW':
error = 1
locator = 'service'
code = 'InvalidParameterValue'
text = 'Invalid value for service: %s.\
Value MUST be CSW' % self.kvp['service']
# test version
kvp_version = self.kvp.get('version', '')
try:
kvp_version_integer = util.get_version_integer(kvp_version)
except Exception as err:
kvp_version_integer = 'invalid_value'
if (request != 'GetCapabilities' and
kvp_version_integer != own_version_integer):
error = 1
locator = 'version'
code = 'InvalidParameterValue'
text = ('Invalid value for version: %s. Value MUST be '
'2.0.2 or 3.0.0' % kvp_version)
# check for GetCapabilities acceptversions
if 'acceptversions' in self.kvp:
for vers in self.kvp['acceptversions'].split(','):
vers_integer = util.get_version_integer(vers)
if vers_integer == own_version_integer:
break
else:
error = 1
locator = 'acceptversions'
code = 'VersionNegotiationFailed'
text = ('Invalid parameter value in '
'acceptversions: %s. Value MUST be '
'2.0.2 or 3.0.0' %
self.kvp['acceptversions'])
# test request
if self.kvp['request'] not in \
self.context.model['operations']:
error = 1
locator = 'request'
if request in ['Transaction', 'Harvest']:
code = 'OperationNotSupported'
text = '%s operations are not supported' % request
else:
code = 'InvalidParameterValue'
text = 'Invalid value for request: %s' % request
if error == 1: # return an ExceptionReport
LOGGER.error('basic service options error: %s, %s, %s', code, locator, text)
self.response = self.iface.exceptionreport(code, locator, text)
else: # process per the request value
if 'responsehandler' in self.kvp:
# set flag to process asynchronously
import threading
self.asynchronous = True
request_id = self.kvp.get('requestid', None)
if request_id is None:
import uuid
self.kvp['requestid'] = str(uuid.uuid4())
if self.kvp['request'] == 'GetCapabilities':
self.response = self.iface.getcapabilities()
elif self.kvp['request'] == 'DescribeRecord':
self.response = self.iface.describerecord()
elif self.kvp['request'] == 'GetDomain':
self.response = self.iface.getdomain()
elif self.kvp['request'] == 'GetRecords':
if self.asynchronous: # process asynchronously
threading.Thread(target=self.iface.getrecords).start()
self.response = self.iface._write_acknowledgement()
else:
self.response = self.iface.getrecords()
elif self.kvp['request'] == 'GetRecordById':
self.response = self.iface.getrecordbyid()
elif self.kvp['request'] == 'GetRepositoryItem':
self.response = self.iface.getrepositoryitem()
elif self.kvp['request'] == 'Transaction':
self.response = self.iface.transaction()
elif self.kvp['request'] == 'Harvest':
if self.asynchronous: # process asynchronously
threading.Thread(target=self.iface.harvest).start()
self.response = self.iface._write_acknowledgement()
else:
self.response = self.iface.harvest()
else:
self.response = self.iface.exceptionreport(
'InvalidParameterValue', 'request',
'Invalid request parameter: %s' % self.kvp['request']
)
LOGGER.info('Request processed')
if self.mode == 'sru':
LOGGER.info('SRU mode detected; processing response.')
self.response = self.sru().response_csw2sru(self.response,
self.environ)
elif self.mode == 'opensearch':
LOGGER.info('OpenSearch mode detected; processing response.')
self.response = self.opensearch().response_csw2opensearch(
self.response, self.config)
elif self.mode == 'oaipmh':
LOGGER.info('OAI-PMH mode detected; processing response.')
self.response = self.oaipmh().response(
self.response, self.oaiargs, self.repository,
self.config.get('server', 'url')
)
return self._write_response()
def getcapabilities(self):
""" Handle GetCapabilities request """
return self.iface.getcapabilities()
def describerecord(self):
""" Handle DescribeRecord request """
return self.iface.describerecord()
def getdomain(self):
""" Handle GetDomain request """
return self.iface.getdomain()
def getrecords(self):
""" Handle GetRecords request """
return self.iface.getrecords()
def getrecordbyid(self, raw=False):
""" Handle GetRecordById request """
return self.iface.getrecordbyid(raw)
def getrepositoryitem(self):
""" Handle GetRepositoryItem request """
return self.iface.getrepositoryitem()
def transaction(self):
""" Handle Transaction request """
return self.iface.transaction()
def harvest(self):
""" Handle Harvest request """
return self.iface.harvest()
def _write_response(self):
""" Generate response """
# set HTTP response headers and XML declaration
xmldecl = ''
appinfo = ''
LOGGER.info('Writing response.')
if hasattr(self, 'soap') and self.soap:
self._gen_soap_wrapper()
if etree.__version__ >= '3.5.0': # remove superfluous namespaces
etree.cleanup_namespaces(self.response,
keep_ns_prefixes=self.context.keep_ns_prefixes)
response = etree.tostring(self.response,
pretty_print=self.pretty_print,
encoding='unicode')
if (isinstance(self.kvp, dict) and 'outputformat' in self.kvp and
self.kvp['outputformat'] == 'application/json'):
self.contenttype = self.kvp['outputformat']
from pycsw.core.formats import fmt_json
response = fmt_json.xml2json(response,
self.context.namespaces,
self.pretty_print)
else: # it's XML
if 'outputformat' in self.kvp:
self.contenttype = self.kvp['outputformat']
else:
self.contenttype = self.mimetype
xmldecl = ('<?xml version="1.0" encoding="%s" standalone="no"?>'
'\n' % self.encoding)
appinfo = '<!-- pycsw %s -->\n' % self.context.version
if isinstance(self.contenttype, bytes):
self.contenttype = self.contenttype.decode()
s = (u'%s%s%s' % (xmldecl, appinfo, response)).encode(self.encoding)
LOGGER.debug('Response code: %s',
self.context.response_codes[self.status])
LOGGER.debug('Response:\n%s', s)
return [self.context.response_codes[self.status], s]
def _gen_soap_wrapper(self):
""" Generate SOAP wrapper """
LOGGER.info('Writing SOAP wrapper.')
node = etree.Element(
util.nspath_eval('soapenv:Envelope', self.context.namespaces),
nsmap=self.context.namespaces
)
schema_location_ns = util.nspath_eval('xsi:schemaLocation',
self.context.namespaces)
node.attrib[schema_location_ns] = '%s %s' % (
self.context.namespaces['soapenv'],
self.context.namespaces['soapenv']
)
node2 = etree.SubElement(
node, util.nspath_eval('soapenv:Body', self.context.namespaces))
if self.exception:
node3 = etree.SubElement(
node2,
util.nspath_eval('soapenv:Fault', self.context.namespaces)
)
node4 = etree.SubElement(
node3,
util.nspath_eval('soapenv:Code', self.context.namespaces)
)
etree.SubElement(
node4,
util.nspath_eval('soapenv:Value', self.context.namespaces)
).text = 'soap:Server'
node4 = etree.SubElement(
node3,
util.nspath_eval('soapenv:Reason', self.context.namespaces)
)
etree.SubElement(
node4,
util.nspath_eval('soapenv:Text', self.context.namespaces)
).text = 'A server exception was encountered.'
node4 = etree.SubElement(
node3,
util.nspath_eval('soapenv:Detail', self.context.namespaces)
)
node4.append(self.response)
else:
node2.append(self.response)
self.response = node
def _gen_manager(self):
""" Update self.context.model with CSW-T advertising """
if (self.config.has_option('manager', 'transactions') and
self.config.get('manager', 'transactions') == 'true'):
self.manager = True
self.context.model['operations_order'].append('Transaction')
self.context.model['operations']['Transaction'] = {
'methods': {'get': False, 'post': True},
'parameters': {}
}
schema_values = [
'http://www.opengis.net/cat/csw/2.0.2',
'http://www.opengis.net/cat/csw/3.0',
'http://www.opengis.net/wms',
'http://www.opengis.net/wmts/1.0',
'http://www.opengis.net/wfs',
'http://www.opengis.net/wfs/2.0',
'http://www.opengis.net/wcs',
'http://www.opengis.net/wps/1.0.0',
'http://www.opengis.net/sos/1.0',
'http://www.opengis.net/sos/2.0',
'http://www.isotc211.org/2005/gmi',
'urn:geoss:waf',
]
self.context.model['operations_order'].append('Harvest')
self.context.model['operations']['Harvest'] = {
'methods': {'get': False, 'post': True},
'parameters': {
'ResourceType': {'values': schema_values}
}
}
self.context.model['operations']['Transaction'] = {
'methods': {'get': False, 'post': True},
'parameters': {
'TransactionSchemas': {'values': sorted(schema_values)}
}
}
self.csw_harvest_pagesize = 10
if self.config.has_option('manager', 'csw_harvest_pagesize'):
self.csw_harvest_pagesize = int(
self.config.get('manager', 'csw_harvest_pagesize'))
def _test_manager(self):
""" Verify that transactions are allowed """
if self.config.get('manager', 'transactions') != 'true':
raise RuntimeError('CSW-T interface is disabled')
""" get the client first forwarded ip """
if 'HTTP_X_FORWARDED_FOR' in self.environ:
ipaddress = self.environ['HTTP_X_FORWARDED_FOR'].split(',')[0].strip()
else:
ipaddress = self.environ['REMOTE_ADDR']
if not self.config.has_option('manager', 'allowed_ips') or \
(self.config.has_option('manager', 'allowed_ips') and not
util.ipaddress_in_whitelist(ipaddress,
self.config.get('manager', 'allowed_ips').split(','))):
raise RuntimeError(
'CSW-T operations not allowed for this IP address: %s' % ipaddress)
def _cql_update_queryables_mappings(self, cql, mappings):
""" Transform CQL query's properties to underlying DB columns """
LOGGER.debug('Raw CQL text = %s', cql)
LOGGER.debug(str(list(mappings.keys())))
if cql is not None:
for key in mappings.keys():
try:
cql = cql.replace(key, mappings[key]['dbcol'])
except:
cql = cql.replace(key, mappings[key])
LOGGER.debug('Interpolated CQL text = %s.', cql)
return cql
def _process_responsehandler(self, xml):
""" Process response handler """
if self.kvp['responsehandler'] is not None:
LOGGER.info('Processing responsehandler %s' %
self.kvp['responsehandler'])
uprh = urlparse(self.kvp['responsehandler'])
if uprh.scheme == 'mailto': # email
import smtplib
LOGGER.debug('Email detected')
smtp_host = 'localhost'
if self.config.has_option('server', 'smtp_host'):
smtp_host = self.config.get('server', 'smtp_host')
body = ('Subject: pycsw %s results\n\n%s' %
(self.kvp['request'], xml))
try:
LOGGER.info('Sending email')
msg = smtplib.SMTP(smtp_host)
msg.sendmail(
self.config.get('metadata:main', 'contact_email'),
uprh.path, body
)
msg.quit()
LOGGER.debug('Email sent successfully.')
except Exception as err:
LOGGER.exception('Error processing email')
elif uprh.scheme == 'ftp':
import ftplib
LOGGER.debug('FTP detected.')
try:
LOGGER.info('Sending to FTP server.')
ftp = ftplib.FTP(uprh.hostname)
if uprh.username is not None:
ftp.login(uprh.username, uprh.password)
ftp.storbinary('STOR %s' % uprh.path[1:], StringIO(xml))
ftp.quit()
LOGGER.debug('FTP sent successfully.')
except Exception as err:
LOGGER.exception('Error processing FTP')
@staticmethod
def normalize_kvp(kvp):
"""Normalize Key Value Pairs.
This method will transform all keys to lowercase and leave values
unchanged, as specified in the CSW standard (see for example note
C on Table 62 - KVP Encoding for DescribeRecord operation request
of the CSW standard version 2.0.2)
:arg kvp: a mapping with Key Value Pairs
:type kvp: dict
:returns: A new dictionary with normalized parameters
"""
result = dict()
for name, value in kvp.items():
result[name.lower()] = value
return result
| mit | 5,338,149,996,846,063,000 | 39.659817 | 108 | 0.534758 | false |
zencore-dobetter/zencore-utils | src/zencore/utils/redis.py | 1 | 3995 | import uuid
import math
import time
import logging
import redis as engine
from zencore.errors import WrongParameterTypeError
from .types import smart_force_to_string
logger = logging.getLogger(__name__)
class RedisLock(object):
def __init__(self, url, name=None, app_name=None, expire=None, prefix="zencore:lock:", tick=5, **kwargs):
self.url = url
self.connection = engine.Redis.from_url(url, **kwargs)
self.app_name = app_name or str(uuid.uuid4())
self.prefix = prefix
self.expire = expire
self.tick = tick
if name:
self.setup(name)
def setup(self, name):
self.lock_name = ":".join([self.prefix, name])
self.signal_name = ":".join([self.prefix, name, "signal"])
def acquire(self, blocking=True, timeout=-1):
stime = time.clock()
while True:
result = self.acquire_nowait()
if result:
return True
if not blocking:
return False
if timeout == 0:
return False
if timeout > 0:
delta = math.ceil(timeout - (time.clock() - stime))
if delta < 0:
return False
if delta > self.tick:
delta = self.tick
else:
delta = self.tick
event = self.connection.blpop(self.signal_name, timeout=delta)
if event is None:
return False
def acquire_nowait(self):
result = self.connection.setnx(self.lock_name, self.app_name)
if result:
if self.expire:
self.connection.expire(self.lock_name, self.expire)
self.connection.delete(self.signal_name)
return True
return False
def release(self):
if self.is_lock_owner():
self.connection.delete(self.lock_name)
self.connection.rpush(self.signal_name, 1)
def force_clean(self):
self.connection.delete(self.lock_name)
self.connection.rpush(self.signal_name, 1)
def get_current_lock_owner(self, ):
return smart_force_to_string(self.connection.get(self.lock_name))
def is_lock_owner(self):
return self.get_current_lock_owner() == self.app_name
class Counter(object):
def __init__(self, connection, namespace):
self.connection = connection
self.namespace = namespace
def incr(self, name):
key = self.make_counter_key(name)
self.connection.incr(key)
def get(self, name):
key = self.make_counter_key(name)
return int(self.connection.get(key))
def getall(self):
keys = self.connection.keys(self.make_counter_pattern())
if not keys:
return {}
keys = [key.decode("utf-8") for key in keys]
values = [int(value) for value in self.connection.mget(*keys)]
return dict(zip(keys, values))
def make_counter_key(self, name):
return "{}:{}".format(self.namespace, name)
def make_counter_pattern(self):
return "{}:*".format(self.namespace)
def get_redis(config):
"""
从配置文件获取redis对象。
"""
if isinstance(config, engine.StrictRedis):
return config
if isinstance(config, str):
return engine.Redis.from_url(config)
if isinstance(config, dict):
url = config.get("url")
host = config.get("host")
if url:
db = config.get("db", None)
options = config.get("options", {})
return engine.Redis.from_url(url, db, **options)
if host:
return engine.Redis(**config)
logger.error("get_redis got wrong parameter type error.")
raise WrongParameterTypeError()
# ###########################################################################
# 重复或不推荐使用
# ###########################################################################
make_redis_instance = get_redis
| mit | 3,164,183,319,776,613,000 | 29.929688 | 109 | 0.558474 | false |
p/webracer | tests/request_via_form_test.py | 1 | 1451 | import webracer
import nose.plugins.attrib
from . import utils
from .apps import form_app
utils.app_runner_setup(__name__, form_app.app, 8059)
@nose.plugins.attrib.attr('client')
@webracer.config(host='localhost', port=8059)
class RequestViaFormTest(webracer.WebTestCase):
def test_get_form_as_url(self):
self.get('/method_check_form')
self.assert_status(200)
form = self.response.form()
self.get(form)
self.assertEqual('GET', self.response.body)
def test_post_form_as_url(self):
self.get('/textarea')
self.assert_status(200)
form = self.response.form()
self.post(form)
self.assertEqual('{}', self.response.body)
def test_post_form_with_elements(self):
self.get('/textarea')
self.assert_status(200)
form = self.response.form()
elements = form.elements
self.post(form, elements)
json = self.response.json
self.assertEqual(dict(field='hello world'), json)
def test_post_form_with_mutated_elements(self):
self.get('/textarea')
self.assert_status(200)
form = self.response.form()
elements = form.elements.mutable
elements.set_value('field', 'changed')
self.post(form, elements)
json = self.response.json
self.assertEqual(dict(field='changed'), json)
| bsd-2-clause | 4,729,082,334,174,765,000 | 28.02 | 57 | 0.598897 | false |
Ichag/django-timelinejs3 | timeline/migrations/0009_auto_20150819_0648.py | 1 | 3020 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('timeline', '0008_auto_20150818_2241'),
]
operations = [
migrations.AlterField(
model_name='options',
name='duration',
field=models.IntegerField(null=True, blank=True),
),
migrations.AlterField(
model_name='options',
name='height',
field=models.IntegerField(null=True, blank=True),
),
migrations.AlterField(
model_name='options',
name='marker_height_min',
field=models.IntegerField(null=True, blank=True),
),
migrations.AlterField(
model_name='options',
name='marker_padding',
field=models.IntegerField(null=True, blank=True),
),
migrations.AlterField(
model_name='options',
name='marker_width_min',
field=models.IntegerField(null=True, blank=True),
),
migrations.AlterField(
model_name='options',
name='menubar_height',
field=models.IntegerField(null=True, blank=True),
),
migrations.AlterField(
model_name='options',
name='optimal_tick_width',
field=models.IntegerField(null=True, blank=True),
),
migrations.AlterField(
model_name='options',
name='scale_factor',
field=models.IntegerField(null=True, blank=True),
),
migrations.AlterField(
model_name='options',
name='skinny_size',
field=models.IntegerField(null=True, blank=True),
),
migrations.AlterField(
model_name='options',
name='slide_default_fade',
field=models.IntegerField(null=True, blank=True),
),
migrations.AlterField(
model_name='options',
name='slide_padding_lr',
field=models.IntegerField(null=True, blank=True),
),
migrations.AlterField(
model_name='options',
name='start_at_slide',
field=models.IntegerField(null=True, blank=True),
),
migrations.AlterField(
model_name='options',
name='timenav_height',
field=models.IntegerField(null=True, blank=True),
),
migrations.AlterField(
model_name='options',
name='timenav_height_min',
field=models.IntegerField(null=True, blank=True),
),
migrations.AlterField(
model_name='options',
name='timenav_height_percentage',
field=models.IntegerField(null=True, blank=True),
),
migrations.AlterField(
model_name='options',
name='width',
field=models.IntegerField(null=True, blank=True),
),
]
| bsd-3-clause | 4,725,792,827,878,784,000 | 31.12766 | 61 | 0.54404 | false |
do-mpc/do-mpc | testing/test_oscillating_masses_discrete_dae.py | 1 | 3206 | #
# This file is part of do-mpc
#
# do-mpc: An environment for the easy, modular and efficient implementation of
# robust nonlinear model predictive control
#
# Copyright (c) 2014-2019 Sergio Lucia, Alexandru Tatulea-Codrean
# TU Dortmund. All rights reserved
#
# do-mpc is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3
# of the License, or (at your option) any later version.
#
# do-mpc is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with do-mpc. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import matplotlib.pyplot as plt
from casadi import *
from casadi.tools import *
import pdb
import sys
import unittest
sys.path.append('../')
import do_mpc
sys.path.pop(-1)
sys.path.append('../examples/oscillating_masses_discrete_dae/')
from template_model import template_model
from template_mpc import template_mpc
from template_simulator import template_simulator
sys.path.pop(-1)
class TestOscillatingMassesDiscrete(unittest.TestCase):
def test_oscillating_masses_discrete(self):
"""
Get configured do-mpc modules:
"""
model = template_model()
mpc = template_mpc(model)
simulator = template_simulator(model)
estimator = do_mpc.estimator.StateFeedback(model)
"""
Set initial state
"""
np.random.seed(99)
x0 = np.random.rand(model.n_x)-0.5
mpc.x0 = x0
simulator.x0 = x0
estimator.x0 = x0
# Use initial state to set the initial guess.
mpc.set_initial_guess()
# This is only meaningful for DAE systems.
simulator.set_initial_guess()
"""
Run some steps:
"""
for k in range(5):
u0 = mpc.make_step(x0)
y_next = simulator.make_step(u0)
x0 = estimator.make_step(y_next)
"""
Store results (from reference run):
"""
#do_mpc.data.save_results([mpc, simulator, estimator], 'results_oscillatingMasses_dae', overwrite=True)
"""
Compare results to reference run:
"""
ref = do_mpc.data.load_results('./results/results_oscillatingMasses_dae.pkl')
test = ['_x', '_u', '_aux', '_time', '_z']
for test_i in test:
# Check MPC
check = np.allclose(mpc.data.__dict__[test_i], ref['mpc'].__dict__[test_i])
self.assertTrue(check)
# Check Simulator
check = np.allclose(simulator.data.__dict__[test_i], ref['simulator'].__dict__[test_i])
self.assertTrue(check)
# Estimator
check = np.allclose(estimator.data.__dict__[test_i], ref['estimator'].__dict__[test_i])
self.assertTrue(check)
if __name__ == '__main__':
unittest.main()
| lgpl-3.0 | 573,126,971,548,186,750 | 28.962617 | 111 | 0.622895 | false |
scottgigante/picopore | test/test.py | 1 | 2274 | from __future__ import print_function
import subprocess
import os
import errno
import shutil
import time
import signal
__test_files__ = ["sample_data/albacore_1d_original.fast5", "sample_data/metrichor_2d_original.fast5"]
__test_runs__ = ["lossless", "deep-lossless"]
__prefix__ = "picopore.test"
__raw_runs__ = [["--fastq","--summary"],["--summary","--no-fastq"],["--fastq","--no-summary"],["--no-fastq","--no-summary"], ["--manual", "Analyses"]]
def mkdir(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def call(additionalArgs, prefix=None):
args=["python","-m","picopore","-y","--print-every","1"]
if prefix is not None:
args.extend(["--prefix",prefix])
args.extend(additionalArgs)
print(" ".join(args))
p = subprocess.call(args)
if prefix is not None:
filename = args[-1]
dirname = os.path.dirname(filename)
basename = os.path.basename(filename)
os.remove(os.path.join(dirname,".".join([prefix,basename])))
return p
def testFile(filename):
result = 0
for run in __test_runs__:
result += call(["--test","--mode",run, filename])
result += call(["--mode",run, filename], prefix=__prefix__)
for run in __raw_runs__:
result += call(["--mode","raw"] + run + [filename], prefix=__prefix__)
return result
def testRealtime(mode, additionalArgs=None, directory="realtime"):
__waittime = 10
mkdir(directory)
args = ["python","-m","picopore","-y","--realtime","--print-every","1"]
if additionalArgs is not None:
args.extend(additionalArgs)
args.extend(["--mode",mode,directory])
print(" ".join(args))
p = subprocess.Popen(args)
time.sleep(__waittime)
for filename in __test_files__:
shutil.copy(filename, directory)
time.sleep(__waittime)
p.send_signal(signal.SIGINT)
p.wait()
shutil.rmtree(directory)
return p.returncode
exitcode = 0
for filename in __test_files__:
exitcode += testFile(filename)
for mode in __test_runs__:
exitcode += testRealtime(mode)
for mode in __raw_runs__:
exitcode += testRealtime("raw", additionalArgs=mode)
exit(exitcode)
| gpl-3.0 | -8,761,686,030,981,263,000 | 30.583333 | 150 | 0.616535 | false |
whiteshield/EHScripter | EHScripter/netsparker.py | 1 | 6143 | ##!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import re
import string
from io import StringIO
from lxml import etree
try:
from .util import *
except Exception as e:
from util import *
class NetsparkerToMarkdown:
def __init__(self, options):
self.options=options
self.template=string.Template(self.options['template'])
if self.options['merge']:
self.template=string.Template(self.options['merge_template'])
self.merge_findinglist_template=string.Template(self.options['merge_findinglist_template'])
self.process()
def process(self):
if not os.path.exists(self.options['output_dir']):
os.makedirs(self.options['output_dir'])
filelist=[]
if os.path.isfile(self.options['load_file']):
filelist.append(self.options['load_file'])
elif os.path.isdir(self.options['load_file']):
for name in os.listdir(self.options["load_file"]):
if os.path.isfile(self.options['load_file']+'/'+name) and len(name)>11 and name[-11:]==".netsparker":
filelist.append(self.options["load_file"]+'/'+name)
counter=1
findings={}
for processfile in filelist:
content=open(processfile).read()
fileparts=content.split('<!-- Vulnerability Details -->')
vulns=fileparts[1].split('<h1')
fullparser=etree.HTMLParser()
fullhtml=etree.parse(StringIO(content), fullparser)
Target=self.attrib(fullhtml.xpath("//span[@class='dashboard-url']/a"),'href','N/A')
for vuln in vulns[1:]:
vuln='<h1'+vuln
parser=etree.HTMLParser()
vulnobj=etree.parse(StringIO(vuln), parser)
h1=self.value(vulnobj.xpath('//h1//text()'),'N/A')
Vulnerability=re.sub(r'\d+\\\. ','',h1)
Risk=self.value(vulnobj.xpath("//div[@class='vuln-block']/div[2]//text()"),'N/A').title()
VulnDesc=self.value(vulnobj.xpath("//div[@class='vulndesc']//text()"),'N/A')
if Risk=='Information':
Risk='Info'
if Risk=='Important':
Risk='High'
VulnDetails=vulnobj.xpath("//div[@class='vulnerability-detail']")
for VulnDetail in VulnDetails:
h2=self.value(VulnDetail.xpath('./div/h2//text()'),'N/A')
SubVulnerability=re.sub(r'\d+\.\d+\. ','',h2)
Link=self.attrib(VulnDetail.xpath('./div/div[2]/a'),'href','N/A')
ParamTableRows=VulnDetail.xpath('./div/table//tr')
lines=0;
ParamTable=''
for ParamTableRow in ParamTableRows:
ParamTableCells=ParamTableRow.xpath('./td')
cells=0
for ParamTableCell in ParamTableCells:
cell=self.value(ParamTableCell.xpath('.//text()'),'N/A').strip()
ParamTable+='| %s '%cell
cells+=1
ParamTable='%s|\n'%ParamTable
if lines==0:
sepstr=''
for i in range(0,cells):
sepstr+='| ------- '
sepstr='%s|\n'%sepstr
ParamTable+=sepstr
lines+=1
d={'Target':Target, 'Vulnerability':Vulnerability, 'Risk':Risk, 'VulnDesc':VulnDesc, 'SubVulnerability':SubVulnerability, 'Link':Link, 'ParamTable':ParamTable,'findinglist':''}
if not self.options['merge']:
dirname=slugify('%s-%s-%s-%04d-netsparker'%(Risk, Target, Vulnerability, counter))
if not os.path.exists(self.options['output_dir']+'/'+dirname):
os.makedirs(self.options['output_dir']+'/'+dirname)
counter+=1
temp=self.template
text=temp.substitute(d)
if self.options['result_overwrite'] or (not os.path.exists(self.options['output_dir']+'/'+dirname+'/document.md')):
tmpfile = open(self.options['output_dir']+'/'+dirname+'/document.md', 'w');
tmpfile.write(text)
tmpfile.close()
else :
slug=slugify('%s-%s-netsparker'%(Risk, Vulnerability))
if not findings.get(slug):
findings[slug]=[]
findings[slug].append(d)
for key, values in findings.items():
findinglist = ''
for d in values:
d['VulnDesc']=d['VulnDesc'].replace('$','$$')
d['ParamTable']=d['ParamTable'].replace('$','$$')
d['Link']=d['Link'].replace('$','$$')
temp=self.merge_findinglist_template
text=temp.substitute(d)
findinglist+=text+"\n\n"
d['findinglist']=findinglist
filename=key+".md";
temp=self.template
text=temp.substitute(d)
if self.options['result_overwrite'] or (not os.path.exists(self.options['output_dir']+'/'+filename)):
tmpfile = open(self.options['output_dir']+'/'+filename, 'w');
tmpfile.write(text)
tmpfile.close()
def value(self, x, default):
try:
#ret=x[0].strip()
ret="\n".join([html2markdown(html2markdown(y.strip(), True)) for y in x])
except Exception as e:
try:
ret=x.strip()
except Exception as ee:
ret=default
return ret
def attrib(self, x, attr, default):
try:
ret=x[0].attrib[attr]
except Exception as e:
try:
ret=x.attrib[attr]
except Exception as ee:
ret=default
return ret
| gpl-2.0 | 4,605,033,301,372,981,000 | 44.503704 | 196 | 0.496988 | false |
swcurran/tfrs | backend/api/models/UserViewModel.py | 1 | 1507 | """
REST API Documentation for the NRS TFRS Credit Trading Application
The Transportation Fuels Reporting System is being designed to streamline compliance reporting for transportation fuel suppliers in accordance with the Renewable & Low Carbon Fuel Requirements Regulation.
OpenAPI spec version: v1
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.db import models
class UserViewModel(models.Model):
given_name = models.CharField(max_length=255, blank=True, null=True)
surname = models.CharField(max_length=255, blank=True, null=True)
email = models.CharField(max_length=255, blank=True, null=True)
active = models.BooleanField()
sm_authorization_id = models.CharField(max_length=255, blank=True, null=True)
user_roles = models.ManyToManyField('UserRole',
related_name='UserViewModeluser_roles',
blank=True)
class Meta:
abstract = True
| apache-2.0 | 9,088,370,143,756,785,000 | 40.861111 | 208 | 0.704048 | false |
vanesa/kid-o | kido/admin/utils.py | 1 | 3921 | # -*- coding: utf-8 -*-
""" Flask-Admin utilities."""
from flask import abort, redirect, request, url_for
from flask_admin import AdminIndexView, expose
from flask_admin.base import MenuLink
from flask_admin.contrib.sqla import ModelView
from flask_login import current_user
from functools import wraps
from kido import app
from kido.constants import PERMISSION_ADMIN
def admin_required(f):
@wraps(f)
def decorated(*args, **kwargs):
if not current_user.is_authenticated:
return redirect(url_for("views.general.login", next=request.url))
users_permissions = current_user.permissions
if PERMISSION_ADMIN not in users_permissions:
app.logger.debug("Not an admin")
abort(404)
return f(*args, **kwargs)
return decorated
def permission_required(permissions):
if not isinstance(permissions, (list, set, tuple)):
permissions = [permissions]
permissions = [x.upper() for x in permissions]
def decorator(method):
@wraps(method)
def f(*args, **kwargs):
if not current_user.is_authenticated:
return redirect(url_for("views.general.login", next=request.url))
users_permissions = current_user.permissions
if PERMISSION_ADMIN not in users_permissions:
for permission in permissions:
if permission not in users_permissions:
app.logger.debug("Missing permission: {0}".format(permission))
abort(404)
return method(*args, **kwargs)
return f
return decorator
class AuthenticatedMenuLink(MenuLink):
def is_accessible(self):
return current_user.is_authenticated
class CustomAdminIndexView(AdminIndexView):
extra_css = None
extra_js = None
@expose("/")
@admin_required
def index(self):
if not current_user.is_authenticated:
return redirect(url_for("views.general.login", next=request.url))
return super(CustomAdminIndexView, self).index()
@expose("/login/")
def login_view(self):
return redirect(url_for("views.general.login", next=request.url))
@expose("/logout/")
def logout_view(self):
return redirect("/logout")
class CustomModelView(ModelView):
page_size = 50
extra_css = None
extra_js = None
action_template = "admin/action.html"
edit_template = "admin/model/edit.html"
create_template = "admin/model/create.html"
list_template = "admin/model/custom_list.html"
_include = None
class_attributes = [
"page_size",
"can_create",
"can_edit",
"can_delete",
"column_searchable_list",
"column_filters",
"column_exclude_list",
"column_default_sort",
]
def __init__(self, *args, **kwargs):
if "exclude" in kwargs:
self.form_excluded_columns = kwargs["exclude"]
del kwargs["exclude"]
if "include" in kwargs:
self._include = kwargs["include"]
del kwargs["include"]
for item in self.class_attributes:
if item in kwargs:
setattr(self, item, kwargs[item])
del kwargs[item]
super(CustomModelView, self).__init__(*args, **kwargs)
def get_list_columns(self):
if self._include:
return self.get_column_names(
only_columns=self.scaffold_list_columns() + self._include,
excluded_columns=self.column_exclude_list,
)
return super(CustomModelView, self).get_list_columns()
def is_accessible(self):
if not current_user.is_authenticated:
return False
users_permissions = current_user.permissions
return PERMISSION_ADMIN in users_permissions
def inaccessible_callback(self, name, **kwargs):
return abort(404)
| bsd-3-clause | -352,002,768,738,183,740 | 28.931298 | 86 | 0.61872 | false |
eepgwde/pyeg0 | gmus/GMus0.py | 1 | 1699 | ## @file GMus0.py
# @brief Application support class for the Unofficial Google Music API.
# @author weaves
#
# @details
# This class uses @c gmusicapi.
#
# @note
# An application support class is one that uses a set of driver classes
# to provide a set of higher-level application specific methods.
#
# @see
# https://github.com/simon-weber/Unofficial-Google-Music-API
# http://unofficial-google-music-api.readthedocs.org/en/latest/
from __future__ import print_function
from GMus00 import GMus00
import logging
import ConfigParser, os, logging
import pandas as pd
import json
from gmusicapi import Mobileclient
## Set of file paths for the configuration file.
paths = ['site.cfg', os.path.expanduser('~/share/site/.safe/gmusic.cfg')]
## Google Music API login, search and result cache.
#
# The class needs to a configuration file with these contents. (The
# values of the keys must be a valid Google Play account.)
#
# <pre>
# [credentials]
# username=username\@gmail.com
# password=SomePassword9
# </pre>
class GMus0(GMus00):
## Ad-hoc method to find the indices of duplicated entries.
def duplicated(self):
# self._df = self._df.sort(['album', 'title', 'creationTimestamp'],
# ascending=[1, 1, 0])
df = self.df[list(['title', 'album', 'creationTimestamp'])]
df['n0'] = df['title'] + '|' + df['album']
df = df.sort(['n0','creationTimestamp'], ascending=[1, 0])
# Only rely on counts of 2.
s0 = pd.Series(df.n0)
s1 = s0.value_counts()
s2 = set( (s1[s1.values >= 2]).index )
df1 = df[df.n0.isin(s2)]
df1['d'] = df1.duplicated('n0')
s3 = list(df1[df1.d].index)
return s3
| gpl-3.0 | 6,076,076,360,128,342,000 | 30.462963 | 74 | 0.656857 | false |
mdmueller/ascii-profiling | parallel.py | 1 | 4245 | import timeit
import time
from astropy.io import ascii
import pandas
import numpy as np
from astropy.table import Table, Column
from tempfile import NamedTemporaryFile
import random
import string
import matplotlib.pyplot as plt
import webbrowser
def make_table(table, size=10000, n_floats=10, n_ints=0, n_strs=0, float_format=None, str_val=None):
if str_val is None:
str_val = "abcde12345"
cols = []
for i in xrange(n_floats):
dat = np.random.uniform(low=1, high=10, size=size)
cols.append(Column(dat, name='f{}'.format(i)))
for i in xrange(n_ints):
dat = np.random.randint(low=-9999999, high=9999999, size=size)
cols.append(Column(dat, name='i{}'.format(i)))
for i in xrange(n_strs):
if str_val == 'random':
dat = np.array([''.join([random.choice(string.letters) for j in range(10)]) for k in range(size)])
else:
dat = np.repeat(str_val, size)
cols.append(Column(dat, name='s{}'.format(i)))
t = Table(cols)
if float_format is not None:
for col in t.columns.values():
if col.name.startswith('f'):
col.format = float_format
t.write(table.name, format='ascii')
output_text = []
def plot_case(n_floats=10, n_ints=0, n_strs=0, float_format=None, str_val=None):
global table1, output_text
n_rows = (10000, 20000, 50000, 100000, 200000) # include 200000 for publish run
numbers = (1, 1, 1, 1, 1)
repeats = (3, 2, 1, 1, 1)
times_fast = []
times_fast_parallel = []
times_pandas = []
for n_row, number, repeat in zip(n_rows, numbers, repeats):
table1 = NamedTemporaryFile()
make_table(table1, n_row, n_floats, n_ints, n_strs, float_format, str_val)
t = timeit.repeat("ascii.read(table1.name, format='basic', guess=False, use_fast_converter=True)",
setup='from __main__ import ascii, table1', number=number, repeat=repeat)
times_fast.append(min(t) / number)
t = timeit.repeat("ascii.read(table1.name, format='basic', guess=False, parallel=True, use_fast_converter=True)",
setup='from __main__ import ascii, table1', number=number, repeat=repeat)
times_fast_parallel.append(min(t) / number)
t = timeit.repeat("pandas.read_csv(table1.name, sep=' ', header=0)",
setup='from __main__ import table1, pandas', number=number, repeat=repeat)
times_pandas.append(min(t) / number)
plt.loglog(n_rows, times_fast, '-or', label='io.ascii Fast-c')
plt.loglog(n_rows, times_fast_parallel, '-og', label='io.ascii Parallel Fast-c')
plt.loglog(n_rows, times_pandas, '-oc', label='Pandas')
plt.grid()
plt.legend(loc='best')
plt.title('n_floats={} n_ints={} n_strs={} float_format={} str_val={}'.format(
n_floats, n_ints, n_strs, float_format, str_val))
plt.xlabel('Number of rows')
plt.ylabel('Time (sec)')
img_file = 'graph{}.png'.format(len(output_text) + 1)
plt.savefig(img_file)
plt.clf()
text = 'Pandas to io.ascii Fast-C speed ratio: {:.2f} : 1<br/>'.format(times_fast[-1] / times_pandas[-1])
text += 'io.ascii parallel to Pandas speed ratio: {:.2f} : 1'.format(times_pandas[-1] / times_fast_parallel[-1])
output_text.append((img_file, text))
plot_case(n_floats=10, n_ints=0, n_strs=0)
plot_case(n_floats=10, n_ints=10, n_strs=10)
plot_case(n_floats=10, n_ints=10, n_strs=10, float_format='%.4f')
plot_case(n_floats=10, n_ints=0, n_strs=0, float_format='%.4f')
plot_case(n_floats=0, n_ints=0, n_strs=10)
plot_case(n_floats=0, n_ints=0, n_strs=10, str_val="'asdf asdfa'")
plot_case(n_floats=0, n_ints=0, n_strs=10, str_val="random")
plot_case(n_floats=0, n_ints=10, n_strs=0)
html_file = open('out.html', 'w')
html_file.write('<html><head><meta charset="utf-8"/><meta content="text/html;charset=UTF-8" http-equiv="Content-type"/>')
html_file.write('</html><body><h1 style="text-align:center;">Profile of io.ascii</h1>')
for img, descr in output_text:
html_file.write('<img src="{}"><p style="font-weight:bold;">{}</p><hr>'.format(img, descr))
html_file.write('</body></html>')
html_file.close()
webbrowser.open('out.html')
| mit | -9,088,434,750,835,889,000 | 45.141304 | 122 | 0.623793 | false |
patriciohc/carga-de-xls-a-MySQL | Choose_file.py | 1 | 3639 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Choose_file.ui'
#
# Created: Sat Oct 17 15:55:19 2015
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(524, 146)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.verticalLayoutWidget = QtGui.QWidget(self.centralwidget)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(10, 10, 501, 81))
self.verticalLayoutWidget.setObjectName(_fromUtf8("verticalLayoutWidget"))
self.verticalLayout = QtGui.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setMargin(0)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.label = QtGui.QLabel(self.verticalLayoutWidget)
self.label.setObjectName(_fromUtf8("label"))
self.horizontalLayout_2.addWidget(self.label)
self.txtFile = QtGui.QLineEdit(self.verticalLayoutWidget)
self.txtFile.setObjectName(_fromUtf8("txtFile"))
self.horizontalLayout_2.addWidget(self.txtFile)
self.btChooseFile = QtGui.QPushButton(self.verticalLayoutWidget)
self.btChooseFile.setObjectName(_fromUtf8("btChooseFile"))
self.horizontalLayout_2.addWidget(self.btChooseFile)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.btClose = QtGui.QPushButton(self.verticalLayoutWidget)
self.btClose.setObjectName(_fromUtf8("btClose"))
self.horizontalLayout.addWidget(self.btClose)
self.btLoadFile = QtGui.QPushButton(self.verticalLayoutWidget)
self.btLoadFile.setObjectName(_fromUtf8("btLoadFile"))
self.horizontalLayout.addWidget(self.btLoadFile)
self.verticalLayout.addLayout(self.horizontalLayout)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 524, 25))
self.menubar.setObjectName(_fromUtf8("menubar"))
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow", None))
self.label.setText(_translate("MainWindow", "File", None))
self.btChooseFile.setText(_translate("MainWindow", "Choose", None))
self.btClose.setText(_translate("MainWindow", "Cerrar", None))
self.btLoadFile.setText(_translate("MainWindow", "Cargar Archivo", None))
| apache-2.0 | -8,237,645,389,629,048,000 | 46.25974 | 82 | 0.718054 | false |
NeuralProsthesisLab/unlock | unlock/analysis/test/test_data_bank.py | 1 | 3421 | # Copyright (c) James Percent and Unlock contributors.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Unlock nor the names of its contributors may be used
# to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.import socket
__author__ = 'jpercent'
from .. import switch
import threading
import time
import random
import unittest
class AttrTest(object):
def __init__(self):
super(AttrTest, self).__init__()
self.a = 0
self.b = 1
self.c = 2
def d(self):
self.d_value = True
def e(self, e, e1):
self.e_value = e
self.e1_value = e1
class MiscTests(unittest.TestCase):
def testSwitch(self):
correct = False
incorrect = False
val = 'v'
for case in switch(val):
if case('v'):
correct = True
break
if case('d'):
incorrect = True
break
if case ():
incorrect = True
break
self.assertTrue(correct and not incorrect)
correct = False
incorrect = False
val = 'd'
for case in switch(val):
if case('v'):
incorrect = True
break
if case('d'):
correct = True
break
if case ():
incorrect = True
break
self.assertTrue(correct and not incorrect)
correct = False
incorrect = False
val = ['efg', 'v']
for case in switch(val):
if case('v'):
incorrect = True
break
if case('d'):
incorrect = True
break
if case (['efg', 'v']):
correct = True
break
if case ():
incorrect = True
break
self.assertTrue(correct and not incorrect)
def getSuite():
return unittest.makeSuite(MiscTests,'test')
if __name__ == "__main__":
unittest.main()
| bsd-3-clause | -8,389,552,375,796,463,000 | 30.1 | 82 | 0.599532 | false |
c0cky/mediathread | mediathread/djangosherd/api.py | 1 | 4549 | # pylint: disable-msg=R0904
from tastypie import fields
from tastypie.resources import ModelResource
from mediathread.api import UserResource, TagResource
from mediathread.assetmgr.models import Asset
from mediathread.djangosherd.models import SherdNote, DiscussionIndex
from mediathread.projects.models import ProjectNote
from mediathread.taxonomy.api import TermResource
from mediathread.taxonomy.models import TermRelationship
class SherdNoteResource(ModelResource):
author = fields.ForeignKey(UserResource, 'author',
full=True, null=True, blank=True)
class Meta:
queryset = SherdNote.objects.select_related('asset').order_by("id")
excludes = ['tags', 'body', 'added', 'modified']
list_allowed_methods = []
detail_allowed_methods = []
def dehydrate(self, bundle):
try:
bundle.data['is_global_annotation'] = \
bundle.obj.is_global_annotation()
bundle.data['asset_id'] = str(bundle.obj.asset.id)
bundle.data['is_null'] = bundle.obj.is_null()
bundle.data['annotation'] = bundle.obj.annotation()
bundle.data['url'] = bundle.obj.get_absolute_url()
modified = bundle.obj.modified.strftime("%m/%d/%y %I:%M %p") \
if bundle.obj.modified else ''
bundle.data['metadata'] = {
'tags': TagResource().render_list(bundle.request,
bundle.obj.tags_split()),
'body': bundle.obj.body.strip() if bundle.obj.body else '',
'primary_type': bundle.obj.asset.primary.label,
'modified': modified,
'timecode': bundle.obj.range_as_timecode(),
'title': bundle.obj.title
}
editable = (bundle.request.user.id ==
getattr(bundle.obj, 'author_id', -1))
citable = bundle.request.GET.get('citable', '') == 'true'
# assumed: there is only one ProjectNote per annotation
reference = ProjectNote.objects.filter(
annotation__id=bundle.obj.id).first()
if reference:
# notes in a submitted response are not editable
editable = editable and not reference.project.is_submitted()
if citable:
# this is a heavy operation. don't call it unless needed
citable = reference.project.can_cite(bundle.request.course,
bundle.request.user)
bundle.data['editable'] = editable
bundle.data['citable'] = citable
termResource = TermResource()
vocabulary = {}
related = TermRelationship.objects.get_for_object(
bundle.obj).prefetch_related('term__vocabulary')
for rel in related:
if rel.term.vocabulary.id not in vocabulary:
vocabulary[rel.term.vocabulary.id] = {
'id': rel.term.vocabulary.id,
'display_name': rel.term.vocabulary.display_name,
'terms': []
}
vocabulary[rel.term.vocabulary.id]['terms'].append(
termResource.render_one(bundle.request, rel.term))
bundle.data['vocabulary'] = vocabulary.values()
except Asset.DoesNotExist:
bundle.data['asset_id'] = ''
bundle.data['metadata'] = {'title': 'Item Deleted'}
return bundle
def render_one(self, request, selection, asset_key):
# assumes user is allowed to see this note
bundle = self.build_bundle(obj=selection, request=request)
dehydrated = self.full_dehydrate(bundle)
bundle.data['asset_key'] = '%s_%s' % (asset_key,
bundle.data['asset_id'])
return self._meta.serializer.to_simple(dehydrated, None)
class DiscussionIndexResource(object):
def render_list(self, request, indicies):
collaborations = DiscussionIndex.with_permission(request, indicies)
ctx = {
'references': [{
'id': obj.collaboration.object_pk,
'title': obj.collaboration.title,
'type': obj.get_type_label(),
'url': obj.get_absolute_url(),
'modified': obj.modified.strftime("%m/%d/%y %I:%M %p")}
for obj in collaborations]}
return ctx
| gpl-2.0 | -7,704,276,080,529,471,000 | 41.915094 | 79 | 0.56386 | false |
fdslight/fdslight | freenet/handlers/tundev.py | 1 | 5566 | #!/usr/bin/env python3
import os, sys
import pywind.evtframework.handlers.handler as handler
import freenet.lib.fn_utils as fn_utils
import freenet.lib.simple_qos as simple_qos
try:
import fcntl
except ImportError:
pass
class tun_base(handler.handler):
__creator_fd = None
# 要写入到tun的IP包
___ip_packets_for_write = []
# 写入tun设备的最大IP数据包的个数
__MAX_WRITE_QUEUE_SIZE = 1024
# 当前需要写入tun设备的IP数据包的个数
__current_write_queue_n = 0
__BLOCK_SIZE = 16 * 1024
__qos = None
def __create_tun_dev(self, name):
"""创建tun 设备
:param name:
:return fd:
"""
tun_fd = fn_utils.tuntap_create(name, fn_utils.IFF_TUN | fn_utils.IFF_NO_PI)
fn_utils.interface_up(name)
if tun_fd < 0:
raise SystemError("can not create tun device,please check your root")
return tun_fd
@property
def creator(self):
return self.__creator_fd
def init_func(self, creator_fd, tun_dev_name, *args, **kwargs):
"""
:param creator_fd:
:param tun_dev_name:tun 设备名称
:param subnet:如果是服务端则需要则个参数
"""
tun_fd = self.__create_tun_dev(tun_dev_name)
if tun_fd < 3:
print("error:create tun device failed:%s" % tun_dev_name)
sys.exit(-1)
self.__creator_fd = creator_fd
self.__qos = simple_qos.qos(simple_qos.QTYPE_DST)
self.set_fileno(tun_fd)
fcntl.fcntl(tun_fd, fcntl.F_SETFL, os.O_NONBLOCK)
self.dev_init(tun_dev_name, *args, **kwargs)
return tun_fd
def dev_init(self, dev_name, *args, **kwargs):
pass
def evt_read(self):
for i in range(32):
try:
ip_packet = os.read(self.fileno, self.__BLOCK_SIZE)
except BlockingIOError:
break
self.__qos.add_to_queue(ip_packet)
self.__qos_from_tundev()
def task_loop(self):
self.__qos_from_tundev()
def __qos_from_tundev(self):
results = self.__qos.get_queue()
for ip_packet in results:
self.handle_ip_packet_from_read(ip_packet)
if not results:
self.del_loop_task(self.fileno)
else:
self.add_to_loop_task(self.fileno)
def evt_write(self):
try:
ip_packet = self.___ip_packets_for_write.pop(0)
except IndexError:
self.remove_evt_write(self.fileno)
return
self.__current_write_queue_n -= 1
try:
os.write(self.fileno, ip_packet)
except BlockingIOError:
self.__current_write_queue_n += 1
self.___ip_packets_for_write.insert(0, ip_packet)
return
''''''
def handle_ip_packet_from_read(self, ip_packet):
"""处理读取过来的IP包,重写这个方法
:param ip_packet:
:return None:
"""
pass
def handle_ip_packet_for_write(self, ip_packet):
"""处理要写入的IP包,重写这个方法
:param ip_packet:
:return new_ip_packet:
"""
pass
def error(self):
self.dev_error()
def dev_error(self):
"""重写这个方法
:return:
"""
pass
def timeout(self):
self.dev_timeout()
def dev_timeout(self):
"""重写这个方法
:return:
"""
pass
def delete(self):
self.dev_delete()
def dev_delete(self):
"""重写这个方法
:return:
"""
pass
def add_to_sent_queue(self, ip_packet):
# 丢到超出规定的数据包,防止内存过度消耗
n_ip_message = self.handle_ip_packet_for_write(ip_packet)
if not n_ip_message: return
if self.__current_write_queue_n == self.__MAX_WRITE_QUEUE_SIZE:
# 删除第一个包,防止队列过多
self.__current_write_queue_n -= 1
self.___ip_packets_for_write.pop(0)
return
self.__current_write_queue_n += 1
self.___ip_packets_for_write.append(n_ip_message)
class tundevs(tun_base):
"""服务端的tun数据处理
"""
def dev_init(self, dev_name):
self.register(self.fileno)
self.add_evt_read(self.fileno)
def handle_ip_packet_from_read(self, ip_packet):
self.dispatcher.send_msg_to_tunnel_from_tun(ip_packet)
def handle_ip_packet_for_write(self, ip_packet):
return ip_packet
def dev_delete(self):
self.unregister(self.fileno)
os.close(self.fileno)
def dev_error(self):
self.delete_handler(self.fileno)
def dev_timeout(self):
pass
def handle_msg_from_tunnel(self, message):
self.add_to_sent_queue(message)
self.add_evt_write(self.fileno)
class tundevc(tun_base):
def dev_init(self, dev_name):
self.register(self.fileno)
self.add_evt_read(self.fileno)
def handle_ip_packet_from_read(self, ip_packet):
self.dispatcher.handle_msg_from_tundev(ip_packet)
def handle_ip_packet_for_write(self, ip_packet):
return ip_packet
def dev_delete(self):
self.unregister(self.fileno)
os.close(self.fileno)
def dev_error(self):
self.delete_handler(self.fileno)
def dev_timeout(self):
pass
def msg_from_tunnel(self, message):
self.add_to_sent_queue(message)
self.add_evt_write(self.fileno)
| bsd-2-clause | 794,198,108,318,362,000 | 23.490741 | 84 | 0.567864 | false |
nuodb/nuodb-django | test/auth/tests/tokens.py | 1 | 2747 | import sys
from datetime import date, timedelta
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.auth.tokens import PasswordResetTokenGenerator
from django.contrib.auth.tests.utils import skipIfCustomUser
from django.test import TestCase
from django.utils import unittest
from django.test.testcases import skipIfDBFeature
@skipIfCustomUser
class TokenGeneratorTest(TestCase):
def test_make_token(self):
"""
Ensure that we can make a token and that it is valid
"""
user = User.objects.create_user('tokentestuser', '[email protected]', 'testpw')
p0 = PasswordResetTokenGenerator()
tk1 = p0.make_token(user)
self.assertTrue(p0.check_token(user, tk1))
@skipIfDBFeature('supports_transactions')
def test_10265(self):
"""
Ensure that the token generated for a user created in the same request
will work correctly.
"""
# See ticket #10265
user = User.objects.create_user('comebackkid', '[email protected]', 'testpw')
p0 = PasswordResetTokenGenerator()
tk1 = p0.make_token(user)
reload = User.objects.get(username='comebackkid')
tk2 = p0.make_token(reload)
self.assertEqual(tk1, tk2)
def test_timeout(self):
"""
Ensure we can use the token after n days, but no greater.
"""
# Uses a mocked version of PasswordResetTokenGenerator so we can change
# the value of 'today'
class Mocked(PasswordResetTokenGenerator):
def __init__(self, today):
self._today_val = today
def _today(self):
return self._today_val
user = User.objects.create_user('tokentestuser', '[email protected]', 'testpw')
p0 = PasswordResetTokenGenerator()
tk1 = p0.make_token(user)
p1 = Mocked(date.today() + timedelta(settings.PASSWORD_RESET_TIMEOUT_DAYS))
self.assertTrue(p1.check_token(user, tk1))
p2 = Mocked(date.today() + timedelta(settings.PASSWORD_RESET_TIMEOUT_DAYS + 1))
self.assertFalse(p2.check_token(user, tk1))
@unittest.skipIf(sys.version_info[:2] >= (3, 0), "Unnecessary test with Python 3")
def test_date_length(self):
"""
Make sure we don't allow overly long dates, causing a potential DoS.
"""
user = User.objects.create_user('ima1337h4x0r', '[email protected]', 'p4ssw0rd')
p0 = PasswordResetTokenGenerator()
# This will put a 14-digit base36 timestamp into the token, which is too large.
self.assertRaises(ValueError,
p0._make_token_with_timestamp,
user, 175455491841851871349)
| bsd-3-clause | 3,380,484,588,206,988,000 | 38.242857 | 88 | 0.646159 | false |
durden/dayonetools | dayonetools/services/habit_list.py | 1 | 10350 | """
This module provides a way to import data from the Habit List iPhone
application (http://habitlist.com/) into Day One Journal
(http://dayoneapp.com/) entries.
To use this module you must first do a manual export of your data from Habit
list. This can be done by the following:
- Open Habit List iPhone app
- Click the 'gear' icon for settings at the bottom of the main 'Today' view
- Choose the 'Export Data' option
- E-mail the data to yourself
- Copy and paste the e-mail contents into a file of your choosing
- Remove the 'sent from iPhone' line at the end of your e-mail. This
will cause the script to NOT process the JSON data.
- DO NOT REMOVE THE LAST TWO EMPTY LINES OF THE E-MAIL. WE CURRENTLY
HAVE A BUG THAT EXPECTS THESE LINES.
- You can choose to optionally remove the first few lines of the e-mail
that are not JSON data, everything up to the first '[' character.
- Again, this is optional because this module will attempt to ignore
any non-JSON data at the START of a file.
At this point, you are ready to do the actual conversion from JSON to Day One
entires. So, you should check all the 'settings' in this module for things you
would like to change:
- HEADER_FOR_DAY_ONE_ENTRIES
- DAYONE_ENTRIES
- ENTRY_TEMPLATE
- TIMEZONE
- Make sure to choose the timezone of your iPhone because the Habit
List app stores all timezones in UTC and you'll want to convert this
to the timezone your iPhone used at the time you completed the habit.
This will ensure your Day One entries match the time you completed
the task and also prevent a habit from showing up more than once per
day which can happen with UTC time if you complete a habit late in
one day and early in the next, etc.
- You can find a list of available timezone strings here:
- http://en.wikipedia.org/wiki/List_of_tz_database_time_zones
Next, you can run this module with your exported JSON data as an argument like
so:
- python services/habit_list.py -f habit_list_data.json -t
Also, it's encouraged to run this with the '-t' option first so that all your
Day One entires will be created in a local directory called 'test.' This will
allow you to inspect the conversion. You can manually copy a few select
entries into your Day One 'entries/' folder to ensure you approve of the
formatting and can easily make any formatting adjustments. Then, you can run
this module again without the '-t' to fully import Habit List entries into Day
One.
"""
import argparse
import collections
from datetime import datetime
import json
import os
import re
import uuid
from dateutil import tz
from dayonetools.services import convert_to_dayone_date_string
DAYONE_ENTRIES = '/Users/durden/Dropbox/Apps/Day One/Journal.dayone/entries/'
# This text will be inserted into the first line of all entries created, set to
# '' to remove this completely.
HEADER_FOR_DAYONE_ENTRIES = 'Habit List entry'
# Note the strange lack of indentation on the {entry_text} b/c day one will
# display special formatting to text that is indented, which we want to avoid.
ENTRY_TEMPLATE = """
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>Creation Date</key>
<date>{date}</date>
<key>Entry Text</key>
<string> {entry_title}
<![CDATA[
{habits}]]>
#habits #habit_list
</string>
<key>Starred</key>
<false/>
<key>Tags</key>
<array>
<string>habits</string>
<string>habit_list</string>
</array>
<key>UUID</key>
<string>{uuid_str}</string>
</dict>
</plist>
"""
TIMEZONE = 'America/Chicago'
def _parse_args():
"""Parse sys.argv arguments"""
parser = argparse.ArgumentParser(
description='Export Habit List data to Day One')
parser.add_argument('-f', '--file', action='store',
dest='input_file', required=True,
help='JSON file to import from')
parser.add_argument('-v', '--verbose', default=False, action='store_true',
dest='verbose', required=False,
help='Verbose debugging information')
parser.add_argument('-t', '--test', default=False, action='store_true',
dest='test', required=False,
help=('Test import by creating Day one files in local '
'directory for inspect'))
def _datetime(str_):
"""Convert date string in YYYY-MM-DD format to datetime object"""
if not str_:
return None
try:
date = datetime.strptime(str_, '%Y-%m-%d')
except ValueError:
msg = 'Invalid date format, should be YYYY-MM-DD'
raise argparse.ArgumentTypeError(msg)
return date.replace(tzinfo=_user_time_zone())
parser.add_argument('-s', '--since', type=_datetime,
help=('Only process entries starting with YYYY-MM-DD '
'and newer'))
return vars(parser.parse_args())
def _user_time_zone():
"""Get default timezone for user"""
try:
return tz.gettz(TIMEZONE)
except Exception as err:
print 'Failed getting timezone, check your TIMEZONE variable'
raise
def _user_time_zone_date(dt, user_time_zone, utc_time_zone):
"""
Convert given datetime string into a yyyy-mm-dd string taking into
account the user time zone
Keep in mind that this conversion might change the actual day if the
habit was entered 'early' or 'late' in the day. This is correct because
the user entered the habit in their own timezone, but the app stores this
internally (and exports) in utc. So, here we are effectively converting
the time back to when the user actually entered it, based on the timezone
the user claims they were in.
"""
# We know habit list stores in UTC so don't need the timezone info
dt = dt.split('+')[0].strip()
dtime_obj = datetime.strptime(dt, '%Y-%m-%d %H:%M:%S')
# Tell native datetime object we are using UTC, then we need to convert
# that UTC time into the user's timezone BEFORE stripping off the time
# to make sure the year, month, and date take into account timezone
# differences.
utc = dtime_obj.replace(tzinfo=utc_time_zone)
return utc.astimezone(user_time_zone)
def _habits_to_markdown(habits):
"""Create markdown list of habits"""
# FIXME: This is inefficient but not sure of a good way to use join since
# we want to add a chacter to the beginning and end of each string in list.
markdown = ''
for habit, dt_obj in habits:
markdown += '- [%02d:%02d] %s\n' % (dt_obj.hour, dt_obj.minute, habit)
return markdown
def create_habitlist_entry(directory, day_str, habits, verbose):
"""Create day one file entry for given habits, date pair"""
# Create unique uuid without any specific machine information
# (uuid() vs. uuid()) and strip any '-' characters to be
# consistent with dayone format.
uuid_str = re.sub('-', '', str(uuid.uuid4()))
file_name = '%s.doentry' % (uuid_str)
full_file_name = os.path.join(directory, file_name)
date = convert_to_dayone_date_string(day_str)
habits = _habits_to_markdown(habits)
entry = {'entry_title': HEADER_FOR_DAYONE_ENTRIES,
'habits': habits,'date': date, 'uuid_str': uuid_str}
with open(full_file_name, 'w') as file_obj:
text = ENTRY_TEMPLATE.format(**entry)
file_obj.write(text)
if verbose:
print 'Created entry for %s: %s' % (date, file_name)
def parse_habits_file(filename, start_date=None):
"""
Parse habits json file and return dict of data organized by day
start_date can be a datetime object used only to return habits that were
started on or after start_date
"""
with open(filename, 'r') as file_obj:
# FIXME: This expects 3 lines of junk at the beginning of the file, but
# we could just read until we find '[' and ignore up until that point.
junk = file_obj.readline()
junk = file_obj.readline()
junk = file_obj.readline()
# FIXME: For my sample this is about 27kb of memory
_json = file_obj.read()
# FIXME: Downside here is that we assume the user was in the same timezone
# for every habit. However, it's possible that some of the habits were
# entered while the user was traveling in a different timezone, etc.
iphone_time_zone = _user_time_zone()
utc_time_zone = tz.gettz('UTC')
# Use a set b/c we can only do each habit once a day
habits = collections.defaultdict(set)
# FIXME: Maybe optimize this to not hold it all in memory
# We have to parse all json and return it b/c the data is organized by
# habit and we need it organized by date. So, we can't use a generator or
# anything to yield values as they come b/c we won't know if we've parsed
# the entire day until all JSON is parsed.
# FIXME: Should have something to catch ValueError exceptions around this
# so we can show the line with the error if something is wrong.
for habit in json.loads(_json):
name = habit['name']
for dt in habit['completed']:
dt_obj = _user_time_zone_date(dt, iphone_time_zone, utc_time_zone)
if start_date is None or dt_obj >= start_date:
# Habits will be organized by day then each one will have it's
# own time.
day_str = dt_obj.strftime('%Y-%m-%d')
habits[day_str].add((name, dt_obj))
return habits
def main():
args = _parse_args()
if args['test']:
directory = './test'
try:
os.mkdir(directory)
except OSError as err:
print 'Warning: %s' % (err)
else:
directory = DAYONE_ENTRIES
habits = parse_habits_file(args['input_file'], args['since'])
for day_str, days_habits in habits.iteritems():
create_habitlist_entry(directory, day_str, days_habits, args['verbose'])
if __name__ == '__main__':
main()
| mit | -1,088,409,671,153,115,600 | 35.315789 | 102 | 0.648792 | false |
gtcasl/eiger | Eiger.py | 1 | 20400 | #!/usr/bin/python
#
# \file Eiger.py
# \author Eric Anger <[email protected]>
# \date July 6, 2012
#
# \brief Command line interface into Eiger modeling framework
#
# \changes Added more plot functionality; Benjamin Allan, SNL 5/2013
#
import argparse
import matplotlib.pyplot as plt
import numpy as np
import math
import tempfile
import shutil
import os
from ast import literal_eval
import json
import sys
from collections import namedtuple
from tabulate import tabulate
from sklearn.cluster import KMeans
from eiger import database, PCA, LinearRegression
Model = namedtuple('Model', ['metric_names', 'means', 'stdevs',
'rotation_matrix', 'kmeans', 'models'])
def import_model(args):
database.addModelFromFile(args.database, args.file, args.source_name, args.description)
def export_model(args):
database.dumpModelToFile(args.database, args.file, args.id)
def list_models(args):
all_models = database.getModels(args.database)
print tabulate(all_models, headers=['ID', 'Description', 'Created', 'Source'])
def trainModel(args):
print "Training the model..."
training_DC = database.DataCollection(args.training_dc, args.database)
try:
performance_metric_id = [m[0] for m in training_DC.metrics].index(args.target)
except ValueError:
print "Unable to find target metric '%s', " \
"please specify a valid one: " % (args.target,)
for (my_name,my_desc,my_type) in training_DC.metrics:
print "\t%s" % (my_name,)
return
training_performance = training_DC.profile[:,performance_metric_id]
metric_names = [m[0] for m in training_DC.metrics if m[0] != args.target]
if args.predictor_metrics != None:
metric_names = filter(lambda x: x in args.predictor_metrics, metric_names)
metric_ids = [[m[0] for m in training_DC.metrics].index(n) for n in metric_names]
if not metric_ids:
print "Unable to make model for empty data collection. Aborting..."
return
training_profile = training_DC.profile[:,metric_ids]
#pca
training_pca = PCA.PCA(training_profile)
nonzero_components = training_pca.nonzeroComponents()
rotation_matrix = training_pca.components[:,nonzero_components]
rotated_training_profile = np.dot(training_profile, rotation_matrix)
#kmeans
n_clusters = args.clusters
kmeans = KMeans(n_clusters)
means = np.mean(rotated_training_profile, axis=0)
stdevs = np.std(rotated_training_profile - means, axis=0, ddof=1)
stdevs[stdevs==0.0] = 1.0
clusters = kmeans.fit_predict((rotated_training_profile - means)/stdevs)
# reserve a vector for each model created per cluster
models = [0] * len(clusters)
print "Modeling..."
for i in range(n_clusters):
cluster_profile = rotated_training_profile[clusters==i,:]
cluster_performance = training_performance[clusters==i]
regression = LinearRegression.LinearRegression(cluster_profile,
cluster_performance)
pool = [LinearRegression.identityFunction()]
for col in range(cluster_profile.shape[1]):
if('inv_quadratic' in args.regressor_functions):
pool.append(LinearRegression.powerFunction(col, -2))
if('inv_linear' in args.regressor_functions):
pool.append(LinearRegression.powerFunction(col, -1))
if('inv_sqrt' in args.regressor_functions):
pool.append(LinearRegression.powerFunction(col, -.5))
if('sqrt' in args.regressor_functions):
pool.append(LinearRegression.powerFunction(col, .5))
if('linear' in args.regressor_functions):
pool.append(LinearRegression.powerFunction(col, 1))
if('quadratic' in args.regressor_functions):
pool.append(LinearRegression.powerFunction(col, 2))
if('log' in args.regressor_functions):
pool.append(LinearRegression.logFunction(col))
if('cross' in args.regressor_functions):
for xcol in range(col, cluster_profile.shape[1]):
pool.append(LinearRegression.crossFunction(col, xcol))
if('div' in args.regressor_functions):
for xcol in range(col, cluster_profile.shape[1]):
pool.append(LinearRegression.divFunction(col,xcol))
pool.append(LinearRegression.divFunction(xcol,col))
(models[i], r_squared, r_squared_adj) = regression.select(pool,
threshold=args.threshold,
folds=args.nfolds)
print "Index\tMetric Name"
print '\n'.join("%s\t%s" % metric for metric in enumerate(metric_names))
print "PCA matrix:"
print rotation_matrix
print "Model:\n" + str(models[i])
print "Finished modeling cluster %s:" % (i,)
print "r squared = %s" % (r_squared,)
print "adjusted r squared = %s" % (r_squared_adj,)
model = Model(metric_names, means, stdevs, rotation_matrix, kmeans, models)
# if we want to save the model file, copy it now
outfilename = training_DC.name + '.model' if args.output == None else args.output
if args.json == True:
writeToFileJSON(model, outfilename)
else:
writeToFile(model, outfilename)
if args.test_fit:
args.experiment_dc = args.training_dc
args.model = outfilename
testModel(args)
def dumpCSV(args):
training_DC = database.DataCollection(args.training_dc, args.database)
names = [met[0] for met in training_DC.metrics]
if args.metrics != None:
names = args.metrics
header = ','.join(names)
idxs = training_DC.metricIndexByName(names)
profile = training_DC.profile[:,idxs]
outfile = sys.stdout if args.output == None else args.output
np.savetxt(outfile, profile, delimiter=',',
header=header, comments='')
def testModel(args):
print "Testing the model fit..."
test_DC = database.DataCollection(args.experiment_dc, args.database)
model = readFile(args.model)
_runExperiment(model.kmeans, model.means, model.stdevs, model.models,
model.rotation_matrix, test_DC,
args, model.metric_names)
def readFile(infile):
with open(infile, 'r') as modelfile:
first_char = modelfile.readline()[0]
if first_char == '{':
return readJSONFile(infile)
else:
return readBespokeFile(infile)
def plotModel(args):
print "Plotting model..."
model = readFile(args.model)
if args.plot_pcs_per_metric:
PCA.PlotPCsPerMetric(rotation_matrix, metric_names,
title="PCs Per Metric")
if args.plot_metrics_per_pc:
PCA.PlotMetricsPerPC(rotation_matrix, metric_names,
title="Metrics Per PC")
def _stringToArray(string):
"""
Parse string of form [len](number,number,number,...) to a numpy array.
"""
length = string[:string.find('(')]
values = string[string.find('('):]
arr = np.array(literal_eval(values))
return np.reshape(arr, literal_eval(length))
def _runExperiment(kmeans, means, stdevs, models, rotation_matrix,
experiment_DC, args, metric_names):
unordered_metric_ids = experiment_DC.metricIndexByType('deterministic',
'nondeterministic')
unordered_metric_names = [experiment_DC.metrics[mid][0] for mid in unordered_metric_ids]
# make sure all metric_names are in experiment_DC.metrics[:][0]
have_metrics = [x in unordered_metric_names for x in metric_names]
if not all(have_metrics):
print("Experiment DC does not have matching metrics. Aborting...")
return
# set the correct ordering
expr_metric_ids = [unordered_metric_ids[unordered_metric_names.index(name)]
for name in metric_names]
for idx,metric in enumerate(experiment_DC.metrics):
if(metric[0] == args.target):
performance_metric_id = idx
performance = experiment_DC.profile[:,performance_metric_id]
profile = experiment_DC.profile[:,expr_metric_ids]
rotated_profile = np.dot(profile, rotation_matrix)
means = np.mean(rotated_profile, axis=0)
stdevs = np.std(rotated_profile - means, axis=0, ddof=1)
stdevs = np.nan_to_num(stdevs)
stdevs[stdevs==0.0] = 1.0
clusters = kmeans.predict((rotated_profile - means)/stdevs)
prediction = np.empty_like(performance)
for i in range(len(kmeans.cluster_centers_)):
prediction[clusters==i] = abs(models[i].poll(rotated_profile[clusters==i]))
if args.show_prediction:
print "Actual\t\tPredicted"
print '\n'.join("%s\t%s" % x for x in zip(performance,prediction))
mse = sum([(a-p)**2 for a,p in
zip(performance, prediction)]) / len(performance)
rmse = math.sqrt(mse)
mape = 100 * sum([abs((a-p)/a) for a,p in
zip(performance,prediction)]) / len(performance)
print "Number of experiment trials: %s" % len(performance)
print "Mean Average Percent Error: %s" % mape
print "Mean Squared Error: %s" % mse
print "Root Mean Squared Error: %s" % rmse
def writeToFileJSON(model, outfile):
# Let's assume model has all the attributes we care about
json_root = {}
json_root["metric_names"] = [name for name in model.metric_names]
json_root["means"] = [mean for mean in model.means.tolist()]
json_root["std_devs"] = [stdev for stdev in model.stdevs.tolist()]
json_root["rotation_matrix"] = [[elem for elem in row] for row in model.rotation_matrix.tolist()]
json_root["clusters"] = []
for i in range(len(model.kmeans.cluster_centers_)):
json_cluster = {}
json_cluster["center"] = [center for center in model.kmeans.cluster_centers_[i].tolist()]
# get models in json format
json_cluster["regressors"] = model.models[i].toJSONObject()
json_root["clusters"].append(json_cluster)
with open(outfile, 'w') as out:
json.dump(json_root, out, indent=4)
def readJSONFile(infile):
with open(infile, 'r') as modelfile:
json_root = json.load(modelfile)
metric_names = json_root['metric_names']
means = np.array(json_root['means'])
stdevs = np.array(json_root['std_devs'])
rotation_matrix = np.array(json_root['rotation_matrix'])
empty_kmeans = KMeans(n_clusters=len(json_root['clusters']), n_init=1)
centers = []
models = []
for cluster in json_root['clusters']:
centers.append(np.array(cluster['center']))
models.append(LinearRegression.Model.fromJSONObject(cluster['regressors']))
kmeans = empty_kmeans.fit(centers)
return Model(metric_names, means, stdevs, rotation_matrix, kmeans, models)
def writeToFile(model, outfile):
with open(outfile, 'w') as modelfile:
# For printing the original model file encoding
modelfile.write("%s\n%s\n" % (len(model.metric_names), '\n'.join(model.metric_names)))
modelfile.write("[%s](%s)\n" %
(len(model.means), ','.join([str(mean) for mean in model.means.tolist()])))
modelfile.write("[%s](%s)\n" %
(len(model.stdevs), ','.join([str(stdev) for stdev in model.stdevs.tolist()])))
modelfile.write("[%s,%s]" % model.rotation_matrix.shape)
modelfile.write("(%s)\n" %
','.join(["(%s)" %
','.join([str(elem) for elem in row])
for row in model.rotation_matrix.tolist()]))
for i in range(len(model.kmeans.cluster_centers_)):
modelfile.write('Model %s\n' % i)
modelfile.write("[%s](%s)\n" % (model.rotation_matrix.shape[1],
','.join([str(center) for center in
model.kmeans.cluster_centers_[i].tolist()])))
modelfile.write(repr(model.models[i]))
modelfile.write('\n') # need a trailing newline
def readBespokeFile(infile):
"""Returns a Model namedtuple with all the model parts"""
with open(infile, 'r') as modelfile:
lines = iter(modelfile.read().splitlines())
n_params = int(lines.next())
metric_names = [lines.next() for i in range(n_params)]
means = _stringToArray(lines.next())
stdevs = _stringToArray(lines.next())
rotation_matrix = _stringToArray(lines.next())
models = []
centroids = []
try:
while True:
name = lines.next() # kill a line
centroids.append(_stringToArray(lines.next()))
weights = _stringToArray(lines.next())
functions = [LinearRegression.stringToFunction(lines.next())
for i in range(weights.shape[0])]
models.append(LinearRegression.Model(functions, weights))
except StopIteration:
pass
kmeans = KMeans(len(centroids))
kmeans.cluster_centers_ = np.array(centroids)
return Model(metric_names, means, stdevs, rotation_matrix, kmeans, models)
def convert(args):
print "Converting model..."
with open(args.input, 'r') as modelfile:
first_char = modelfile.readline()[0]
if first_char == '{':
model = readJSONFile(args.input)
writeToFile(model, args.output)
else:
model = readBespokeFile(args.input)
writeToFileJSON(model, args.output)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = \
'Command line interface into Eiger performance modeling framework \
for all model generation, polling, and serialization tasks.',
argument_default=None,
fromfile_prefix_chars='@')
subparsers = parser.add_subparsers(title='subcommands')
train_parser = subparsers.add_parser('train',
help='train a model with data from the database',
description='Train a model with data from the database')
train_parser.set_defaults(func=trainModel)
dump_parser = subparsers.add_parser('dump',
help='dump data collection to CSV',
description='Dump data collection as CSV')
dump_parser.set_defaults(func=dumpCSV)
test_parser = subparsers.add_parser('test',
help='test how well a model predicts a data collection',
description='Test how well a model predicts a data collection')
test_parser.set_defaults(func=testModel)
plot_parser = subparsers.add_parser('plot',
help='plot the behavior of a model',
description='Plot the behavior of a model')
plot_parser.set_defaults(func=plotModel)
convert_parser = subparsers.add_parser('convert',
help='transform a model into a different file format',
description='Transform a model into a different file format')
convert_parser.set_defaults(func=convert)
list_model_parser = subparsers.add_parser('list',
help='list available models in the Eiger DB',
description='List available models in the Eiger DB')
list_model_parser.set_defaults(func=list_models)
import_model_parser = subparsers.add_parser('import',
help='import model file into the Eiger DB',
description='Import model file into the Eiger DB')
import_model_parser.set_defaults(func=import_model)
export_model_parser = subparsers.add_parser('export',
help='export model from Eiger DB to file',
description='Export model from Eiger DB to file')
export_model_parser.set_defaults(func=export_model)
"""TRAINING ARGUMENTS"""
train_parser.add_argument('database', type=str, help='Name of the database file')
train_parser.add_argument('training_dc', type=str,
help='Name of the training data collection')
train_parser.add_argument('target', type=str,
help='Name of the target metric to predict')
train_parser.add_argument('--test-fit', action='store_true', default=False,
help='If set will test the model fit against the training data.')
train_parser.add_argument('--show-prediction', action='store_true',
default=False,
help='If set, send the actual and predicted values to stdout.')
train_parser.add_argument('--predictor-metrics', nargs='*',
help='Only use these metrics when building a model.')
train_parser.add_argument('--output', type=str,
help='Filename to output file to, otherwise use "<training_dc>.model"')
train_parser.add_argument('--clusters', '-k', type=int, default=1,
help='Number of clusters for kmeans')
train_parser.add_argument('--threshold', type=float,
help='Cutoff threshold of increase in adjusted R-squared value when'
' adding new predictors to the model')
train_parser.add_argument('--nfolds', type=int,
help='Number of folds to use in k-fold cross validation.')
train_parser.add_argument('--regressor-functions', nargs='*',
default=['inv_quadratic', 'inv_linear', 'inv_sqrt', 'sqrt',
'linear', 'quadratic', 'log', 'cross', 'div'],
help='Regressor functions to use. Options are linear, quadratic, '
'sqrt, inv_linear, inv_quadratic, inv_sqrt, log, cross, and div. '
'Defaults to all.')
train_parser.add_argument('--json', action='store_true', default=False,
help='Output model in JSON format, rather than bespoke')
"""DUMP CSV ARGUMENTS"""
dump_parser.add_argument('database', type=str, help='Name of the database file')
dump_parser.add_argument('training_dc', type=str,
help='Name of the data collection to dump')
dump_parser.add_argument('--metrics', nargs='*',
help='Only dump these metrics.')
dump_parser.add_argument('--output', type=str, help='Name of file to dump CSV to')
"""TEST ARGUMENTS"""
test_parser.add_argument('database', type=str, help='Name of the database file')
test_parser.add_argument('experiment_dc', type=str,
help='Name of the data collection to experiment on')
test_parser.add_argument('model', type=str,
help='Name of the model to use')
test_parser.add_argument('target', type=str,
help='Name of the target metric to predict')
test_parser.add_argument('--show-prediction', action='store_true',
default=False,
help='If set, send the actual and predicted values to stdout.')
"""PLOT ARGUMENTS"""
plot_parser.add_argument('model', type=str,
help='Name of the model to use')
plot_parser.add_argument('--plot-pcs-per-metric', action='store_true',
default=False,
help='If set, plots the breakdown of principal components per metric.')
plot_parser.add_argument('--plot-metrics-per-pc',
action='store_true',
default=False,
help='If set, plots the breakdown of metrics per principal component.')
"""CONVERT ARGUMENTS"""
convert_parser.add_argument('input', type=str,
help='Name of input model to convert from')
convert_parser.add_argument('output', type=str,
help='Name of output model to convert to')
"""LIST ARGUMENTS"""
list_model_parser.add_argument('database', type=str, help='Name of the database file')
"""IMPORT ARGUMENTS"""
import_model_parser.add_argument('database', type=str,
help='Name of the database file')
import_model_parser.add_argument('file', type=str,
help='Name of the model file to import')
import_model_parser.add_argument('source_name', type=str,
help='Name of the source of the model (ie Eiger)')
import_model_parser.add_argument('--description', type=str,
default='',
help='String to describe the model')
"""EXPORT ARGUMENTS"""
export_model_parser.add_argument('database', type=str,
help='Name of the database file')
export_model_parser.add_argument('id', type=int,
help='ID number identifying which model in the database to export ')
export_model_parser.add_argument('file', type=str,
help='Name of the file to export into')
args = parser.parse_args()
args.func(args)
print "Done."
| bsd-3-clause | 8,259,570,660,788,744,000 | 44.033113 | 101 | 0.634265 | false |
vesellov/bitdust.devel | customer/data_sender.py | 1 | 14665 | #!/usr/bin/python
# data_sender.py
#
# Copyright (C) 2008-2018 Veselin Penev, https://bitdust.io
#
# This file (data_sender.py) is part of BitDust Software.
#
# BitDust is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# BitDust Software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with BitDust Software. If not, see <http://www.gnu.org/licenses/>.
#
# Please contact us if you have any questions at [email protected]
#
#
#
#
"""
.. module:: data_sender.
.. raw:: html
<a href="https://bitdust.io/automats/data_sender/data_sender.png" target="_blank">
<img src="https://bitdust.io/automats/data_sender/data_sender.png" style="max-width:100%;">
</a>
A state machine to manage data sending process, acts very simple:
1) when new local data is created it tries to send it to the correct supplier
2) wait while ``p2p.io_throttle`` is doing some data transmission to remote suppliers
3) calls ``p2p.backup_matrix.ScanBlocksToSend()`` to get a list of pieces needs to be send
4) this machine is restarted every minute to check if some more data needs to be send
5) also can be restarted at any time when it is needed
EVENTS:
* :red:`block-acked`
* :red:`block-failed`
* :red:`init`
* :red:`new-data`
* :red:`restart`
* :red:`scan-done`
* :red:`timer-1min`
* :red:`timer-1sec`
"""
#------------------------------------------------------------------------------
from __future__ import absolute_import
from io import open
#------------------------------------------------------------------------------
_Debug = True
_DebugLevel = 12
#------------------------------------------------------------------------------
import os
import time
#------------------------------------------------------------------------------
from logs import lg
from automats import automat
from automats import global_state
from lib import misc
from lib import packetid
from contacts import contactsdb
from userid import my_id
from main import settings
from p2p import contact_status
from . import io_throttle
#------------------------------------------------------------------------------
_DataSender = None
_ShutdownFlag = False
#------------------------------------------------------------------------------
def A(event=None, arg=None):
"""
Access method to interact with the state machine.
"""
global _DataSender
if _DataSender is None:
_DataSender = DataSender(
name='data_sender',
state='READY',
debug_level=_DebugLevel,
log_events=_Debug,
log_transitions=_Debug,
)
if event is not None:
_DataSender.automat(event, arg)
return _DataSender
def Destroy():
"""
Destroy the state machine and remove the instance from memory.
"""
global _DataSender
if _DataSender is None:
return
_DataSender.destroy()
del _DataSender
_DataSender = None
class DataSender(automat.Automat):
"""
A class to manage process of sending data packets to remote suppliers.
"""
timers = {
'timer-1min': (60, ['READY']),
'timer-1min': (60, ['READY']),
'timer-1sec': (1.0, ['SENDING']),
}
statistic = {}
def state_changed(self, oldstate, newstate, event, arg):
global_state.set_global_state('DATASEND ' + newstate)
def A(self, event, arg):
#---READY---
if self.state == 'READY':
if event == 'new-data' or event == 'timer-1min' or event == 'restart':
self.state = 'SCAN_BLOCKS'
self.doScanAndQueue(arg)
elif event == 'init':
pass
#---SCAN_BLOCKS---
elif self.state == 'SCAN_BLOCKS':
if event == 'scan-done' and self.isQueueEmpty(arg):
self.state = 'READY'
self.doRemoveUnusedFiles(arg)
elif event == 'scan-done' and not self.isQueueEmpty(arg):
self.state = 'SENDING'
#---SENDING---
elif self.state == 'SENDING':
if event == 'restart' or ( ( event == 'timer-1sec' or event == 'block-acked' or event == 'block-failed' or event == 'new-data' ) and self.isQueueEmpty(arg) ):
self.state = 'SCAN_BLOCKS'
self.doScanAndQueue(arg)
return None
def isQueueEmpty(self, arg):
if not arg:
return io_throttle.IsSendingQueueEmpty()
remoteID, _ = arg
return io_throttle.OkToSend(remoteID)
def doScanAndQueue(self, arg):
global _ShutdownFlag
if _Debug:
lg.out(_DebugLevel, 'data_sender.doScanAndQueue _ShutdownFlag=%r' % _ShutdownFlag)
if _Debug:
log = open(os.path.join(settings.LogsDir(), 'data_sender.log'), 'w')
log.write(u'doScanAndQueue %s\n' % time.asctime()) # .decode('utf-8')
if _ShutdownFlag:
if _Debug:
log.write(u'doScanAndQueue _ShutdownFlag is True\n')
self.automat('scan-done')
if _Debug:
log.flush()
log.close()
return
for customer_idurl in contactsdb.known_customers():
if '' not in contactsdb.suppliers(customer_idurl):
from storage import backup_matrix
for backupID in misc.sorted_backup_ids(
list(backup_matrix.local_files().keys()), True):
this_customer_idurl = packetid.CustomerIDURL(backupID)
if this_customer_idurl != customer_idurl:
continue
packetsBySupplier = backup_matrix.ScanBlocksToSend(backupID)
if _Debug:
log.write(u'%s\n' % packetsBySupplier)
for supplierNum in packetsBySupplier.keys():
supplier_idurl = contactsdb.supplier(supplierNum, customer_idurl=customer_idurl)
if not supplier_idurl:
lg.warn('unknown supplier_idurl supplierNum=%s for %s, customer_idurl=%s' % (
supplierNum, backupID, customer_idurl))
continue
for packetID in packetsBySupplier[supplierNum]:
backupID_, _, supplierNum_, _ = packetid.BidBnSnDp(packetID)
if backupID_ != backupID:
lg.warn('unexpected backupID supplierNum=%s for %s, customer_idurl=%s' % (
packetID, backupID, customer_idurl))
continue
if supplierNum_ != supplierNum:
lg.warn('unexpected supplierNum %s for %s, customer_idurl=%s' % (
packetID, backupID, customer_idurl))
continue
if io_throttle.HasPacketInSendQueue(
supplier_idurl, packetID):
if _Debug:
log.write(u'%s already in sending queue for %s\n' % (packetID, supplier_idurl))
continue
if not io_throttle.OkToSend(supplier_idurl):
if _Debug:
log.write(u'skip, not ok to send %s\n' % supplier_idurl)
continue
customerGlobalID, pathID = packetid.SplitPacketID(packetID)
# tranByID = gate.transfers_out_by_idurl().get(supplier_idurl, [])
# if len(tranByID) > 3:
# log.write(u'transfers by %s: %d\n' % (supplier_idurl, len(tranByID)))
# continue
customerGlobalID, pathID = packetid.SplitPacketID(packetID)
filename = os.path.join(
settings.getLocalBackupsDir(),
customerGlobalID,
pathID,
)
if not os.path.isfile(filename):
if _Debug:
log.write(u'%s is not a file\n' % filename)
continue
if io_throttle.QueueSendFile(
filename,
packetID,
supplier_idurl,
my_id.getLocalID(),
self._packetAcked,
self._packetFailed,
):
if _Debug:
log.write(u'io_throttle.QueueSendFile %s\n' % packetID)
else:
if _Debug:
log.write(u'io_throttle.QueueSendFile FAILED %s\n' % packetID)
# lg.out(6, ' %s for %s' % (packetID, backupID))
# DEBUG
# break
self.automat('scan-done')
if _Debug:
log.flush()
log.close()
# def doPrintStats(self, arg):
# """
# """
# if lg.is_debug(18):
# transfers = transport_control.current_transfers()
# bytes_stats = transport_control.current_bytes_transferred()
# s = ''
# for info in transfers:
# s += '%s ' % (diskspace.MakeStringFromBytes(bytes_stats[info.transfer_id]).replace(' ', '').replace('bytes', 'b'))
# lg.out(0, 'transfers: ' + s[:120])
def doRemoveUnusedFiles(self, arg):
# we want to remove files for this block
# because we only need them during rebuilding
if settings.getBackupsKeepLocalCopies() is True:
# if user set this in settings - he want to keep the local files
return
# ... user do not want to keep local backups
if settings.getGeneralWaitSuppliers() is True:
from customer import fire_hire
# but he want to be sure - all suppliers are green for a long time
if len(contact_status.listOfflineSuppliers()) > 0 or time.time(
) - fire_hire.GetLastFireTime() < 24 * 60 * 60:
# some people are not there or we do not have stable team yet
# do not remove the files because we need it to rebuild
return
count = 0
from storage import backup_matrix
from storage import restore_monitor
from storage import backup_rebuilder
if _Debug:
lg.out(_DebugLevel, 'data_sender.doRemoveUnusedFiles')
for backupID in misc.sorted_backup_ids(
list(backup_matrix.local_files().keys())):
if restore_monitor.IsWorking(backupID):
if _Debug:
lg.out(
_DebugLevel,
' %s : SKIP, because restoring' %
backupID)
continue
if backup_rebuilder.IsBackupNeedsWork(backupID):
if _Debug:
lg.out(
_DebugLevel,
' %s : SKIP, because needs rebuilding' %
backupID)
continue
if not backup_rebuilder.ReadStoppedFlag():
if backup_rebuilder.A().currentBackupID is not None:
if backup_rebuilder.A().currentBackupID == backupID:
if _Debug:
lg.out(
_DebugLevel,
' %s : SKIP, because rebuilding is in process' %
backupID)
continue
packets = backup_matrix.ScanBlocksToRemove(
backupID, settings.getGeneralWaitSuppliers())
for packetID in packets:
customer, pathID = packetid.SplitPacketID(packetID)
filename = os.path.join(settings.getLocalBackupsDir(), customer, pathID)
if os.path.isfile(filename):
try:
os.remove(filename)
# lg.out(6, ' ' + os.path.basename(filename))
except:
lg.exc()
continue
count += 1
if _Debug:
lg.out(_DebugLevel, ' %d files were removed' % count)
backup_matrix.ReadLocalFiles()
def _packetAcked(self, packet, ownerID, packetID):
from storage import backup_matrix
backupID, blockNum, supplierNum, dataORparity = packetid.BidBnSnDp(packetID)
backup_matrix.RemoteFileReport(
backupID, blockNum, supplierNum, dataORparity, True)
if ownerID not in self.statistic:
self.statistic[ownerID] = [0, 0]
self.statistic[ownerID][0] += 1
self.automat('block-acked', (ownerID, packetID))
def _packetFailed(self, remoteID, packetID, why):
from storage import backup_matrix
backupID, blockNum, supplierNum, dataORparity = packetid.BidBnSnDp(
packetID)
backup_matrix.RemoteFileReport(
backupID, blockNum, supplierNum, dataORparity, False)
if remoteID not in self.statistic:
self.statistic[remoteID] = [0, 0]
self.statistic[remoteID][1] += 1
self.automat('block-failed', (remoteID, packetID))
def statistic():
"""
The ``data_sender()`` keeps track of sending results with every supplier.
This is used by ``fire_hire()`` to decide how reliable is given
supplier.
"""
global _DataSender
if _DataSender is None:
return {}
return _DataSender.statistic
def SetShutdownFlag():
"""
Set flag to indicate that no need to send anything anymore.
"""
global _ShutdownFlag
_ShutdownFlag = True
| agpl-3.0 | 4,224,043,803,797,804,000 | 38.422043 | 170 | 0.512104 | false |
agacek/camkes-tool | camkes/internal/version.py | 1 | 1813 | #
# Copyright 2014, NICTA
#
# This software may be distributed and modified according to the terms of
# the BSD 2-Clause license. Note that NO WARRANTY is provided.
# See "LICENSE_BSD2.txt" for details.
#
# @TAG(NICTA_BSD)
#
'''Versioning functionality. This computes a version identifier based on the
current source code state. It was decided this was more reliable while the tool
is under active development. Note that any extraneous files in your source
directory that match the version filters will be accumulated in the version
computation.'''
from memoization import memoized
import hashlib, os, re
@memoized
def version():
# Files to consider relevant. Each entry should be a pair of (path, filter)
# where 'path' is relative to the directory of this file and 'filter' is a
# regex describing which filenames to match under the given path.
SOURCES = [
('../', r'^.*\.py$'), # Python sources
('../templates', r'.*'), # Templates
]
my_path = os.path.dirname(os.path.abspath(__file__))
sources = set()
# Accumulate all relevant source files.
for s in SOURCES:
path = os.path.join(my_path, s[0])
regex = re.compile(s[1])
for root, _, files in os.walk(path):
for f in files:
if regex.match(f):
sources.add(os.path.abspath(os.path.join(root, f)))
# Hash each file and hash a concatenation of these hashes. Note, hashing a
# hash is not good practice for cryptography, but it's fine for this
# purpose.
hfinal = hashlib.sha1() #pylint: disable=E1101
for s in sources:
with open(s, 'r') as f:
h = hashlib.sha1(f.read()).hexdigest() #pylint: disable=E1101
hfinal.update('%s|' % h) #pylint: disable=E1101
return hfinal.hexdigest()
| bsd-2-clause | -41,992,440,657,990,110 | 35.26 | 79 | 0.656922 | false |
ibrica/universe-server | play.py | 1 | 1073 | from multiprocessing import Process
import time
import gym
import universe
from universe.spaces.vnc_event import keycode
from envs import create_env
def start_game(model, env_name):
"""regular Python process, not using torch"""
p = Process(target=play_game, args=(model,env_name))
p.start()
# Don't wait with join, respond to user request
def play_game(model, env_name):
"""Play game with saved model if ther's no model play random"""
env = create_env(env_name, client_id="play1",remotes=1) # Local docker container
max_game_length = 10000
state = env.reset()
reward_sum = 0
start_time = time.time()
for step in range(max_game_length ):
state, reward, done, _ = env.step( ['up' for i in range(60)]) #no saved model for now keep pressing up, 60 times in minute
reward_sum += reward
print("Time {}, game reward {}, game length {}".format(
time.strftime("%Hh %Mm %Ss"),
reward_sum,
time.gmtime(time.time() - start_time)))
if done:
break | mit | -6,093,060,618,181,687,000 | 33.645161 | 130 | 0.630941 | false |
droundy/deft | talks/colloquium/figs/plot-walls.py | 1 | 3242 | #!/usr/bin/python
# We need the following two lines in order for matplotlib to work
# without access to an X server.
from __future__ import division
import matplotlib
matplotlib.use('Agg')
import pylab, numpy, sys
xmax = 2.5
xmin = -0.4
def plotit(dftdata, mcdata):
dft_len = len(dftdata[:,0])
dft_dr = dftdata[2,0] - dftdata[1,0]
mcdata = numpy.insert(mcdata,0,0,0)
mcdata[0,0]=-10
mcoffset = 10/2
offset = -3/2
n0 = dftdata[:,6]
nA = dftdata[:,8]
nAmc = mcdata[:,11]
n0mc = mcdata[:,10]
pylab.figure(figsize=(6, 6))
pylab.subplots_adjust(hspace=0.001)
n_plt = pylab.subplot(3,1,3)
n_plt.plot(mcdata[:,0]/2+mcoffset,mcdata[:,1]*4*numpy.pi/3,"b-",label='$n$ Monte Carlo')
n_plt.plot(dftdata[:,0]/2+offset,dftdata[:,1]*4*numpy.pi/3,"b--",label='$n$ DFT')
n_plt.legend(loc='best', ncol=1).draw_frame(False) #.get_frame().set_alpha(0.5)
n_plt.yaxis.set_major_locator(pylab.MaxNLocator(6,steps=[1,5,10],prune='upper'))
pylab.ylim(ymin=0)
pylab.xlim(xmin, xmax)
pylab.xlabel("$z/\sigma$")
pylab.ylabel("$n(\mathbf{r})$")
n_plt.axvline(x=0, color='k', linestyle=':')
n = len(mcdata[:,0])
#pylab.twinx()
dftr = dftdata[:,0]/2+offset
thiswork = dftdata[:,5]
gross = dftdata[:,7]
stop_here = int(dft_len - 1/dft_dr)
print stop_here
start_here = int(2.5/dft_dr)
off = 1
me = 40
A_plt = pylab.subplot(3,1,1)
A_plt.axvline(x=0, color='k', linestyle=':')
A_plt.plot(mcdata[:,0]/2+mcoffset,mcdata[:,2+2*off]/nAmc,"r-",label="$g_\sigma^A$ Monte Carlo")
A_plt.plot(dftr[dftr>=0],thiswork[dftr>=0],"ro",markevery=me*.8,label="$g_\sigma^A$ this work")
A_plt.plot(dftr[dftr>=0],gross[dftr>=0],"rx",markevery=me,label="Gross",
markerfacecolor='none',markeredgecolor='red', markeredgewidth=1)
A_plt.legend(loc='best', ncol=1).draw_frame(False) #.get_frame().set_alpha(0.5)
A_plt.yaxis.set_major_locator(pylab.MaxNLocator(integer=True,prune='upper'))
pylab.ylim(ymin=0)
pylab.ylabel("$g_\sigma^A$")
pylab.xlim(xmin, xmax)
n0mc[0]=1
mcdata[0,10]=1
S_plt = pylab.subplot(3,1,2)
S_plt.axvline(x=0, color='k', linestyle=':')
S_plt.plot(mcdata[:,0]/2+mcoffset,mcdata[:,3+2*off]/n0mc,"g-",label="$g_\sigma^S$ Monte Carlo")
S_plt.plot(dftdata[:,0]/2+offset,dftdata[:,4],"gx",markevery=me/2,label="Yu and Wu")
S_plt.legend(loc='best', ncol=1).draw_frame(False) #.get_frame().set_alpha(0.5)
#pylab.ylim(ymax=12)
S_plt.yaxis.set_major_locator(pylab.MaxNLocator(5,integer=True,prune='upper'))
pylab.xlim(xmin, xmax)
pylab.ylim(ymin=0)
pylab.ylabel("$g_\sigma^S$")
xticklabels = A_plt.get_xticklabels() + S_plt.get_xticklabels()
pylab.setp(xticklabels, visible=False)
mcdata10 = numpy.loadtxt('../../papers/contact/figs/mc-walls-20-196.dat')
dftdata10 = numpy.loadtxt('../../papers/contact/figs/wallsWB-0.10.dat')
mcdata40 = numpy.loadtxt('../../papers/contact/figs/mc-walls-20-817.dat')
dftdata40 = numpy.loadtxt('../../papers/contact/figs/wallsWB-0.40.dat')
plotit(dftdata10, mcdata10)
pylab.savefig('figs/walls-10.pdf', transparent=True)
plotit(dftdata40, mcdata40)
pylab.savefig('figs/walls-40.pdf', transparent=True)
| gpl-2.0 | -4,687,526,299,663,627,000 | 33.489362 | 99 | 0.637569 | false |
AprilBrother/esptool | esptool.py | 1 | 28432 | #!/usr/bin/env python
#
# ESP8266 ROM Bootloader Utility
# https://github.com/themadinventor/esptool
#
# Copyright (C) 2014 Fredrik Ahlberg
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51 Franklin
# Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
import struct
import serial
import time
import argparse
import os
import subprocess
import tempfile
class ESPROM:
# These are the currently known commands supported by the ROM
ESP_FLASH_BEGIN = 0x02
ESP_FLASH_DATA = 0x03
ESP_FLASH_END = 0x04
ESP_MEM_BEGIN = 0x05
ESP_MEM_END = 0x06
ESP_MEM_DATA = 0x07
ESP_SYNC = 0x08
ESP_WRITE_REG = 0x09
ESP_READ_REG = 0x0a
# Maximum block sized for RAM and Flash writes, respectively.
ESP_RAM_BLOCK = 0x1800
ESP_FLASH_BLOCK = 0x100
# Default baudrate. The ROM auto-bauds, so we can use more or less whatever we want.
ESP_ROM_BAUD = 115200
# First byte of the application image
ESP_IMAGE_MAGIC = 0xe9
# Initial state for the checksum routine
ESP_CHECKSUM_MAGIC = 0xef
# OTP ROM addresses
ESP_OTP_MAC0 = 0x3ff00050
ESP_OTP_MAC1 = 0x3ff00054
# Sflash stub: an assembly routine to read from spi flash and send to host
SFLASH_STUB = "\x80\x3c\x00\x40\x1c\x4b\x00\x40\x21\x11\x00\x40\x00\x80" \
"\xfe\x3f\xc1\xfb\xff\xd1\xf8\xff\x2d\x0d\x31\xfd\xff\x41\xf7\xff\x4a" \
"\xdd\x51\xf9\xff\xc0\x05\x00\x21\xf9\xff\x31\xf3\xff\x41\xf5\xff\xc0" \
"\x04\x00\x0b\xcc\x56\xec\xfd\x06\xff\xff\x00\x00"
def __init__(self, port=0, baud=ESP_ROM_BAUD):
self._port = serial.Serial(port)
# setting baud rate in a separate step is a workaround for
# CH341 driver on some Linux versions (this opens at 9600 then
# sets), shouldn't matter for other platforms/drivers. See
# https://github.com/themadinventor/esptool/issues/44#issuecomment-107094446
self._port.baudrate = baud
""" Read bytes from the serial port while performing SLIP unescaping """
def read(self, length=1):
b = ''
while len(b) < length:
c = self._port.read(1)
if c == '\xdb':
c = self._port.read(1)
if c == '\xdc':
b = b + '\xc0'
elif c == '\xdd':
b = b + '\xdb'
else:
raise FatalError('Invalid SLIP escape')
else:
b = b + c
return b
""" Write bytes to the serial port while performing SLIP escaping """
def write(self, packet):
buf = '\xc0' \
+ (packet.replace('\xdb','\xdb\xdd').replace('\xc0','\xdb\xdc')) \
+ '\xc0'
self._port.write(buf)
""" Calculate checksum of a blob, as it is defined by the ROM """
@staticmethod
def checksum(data, state=ESP_CHECKSUM_MAGIC):
for b in data:
state ^= ord(b)
return state
""" Send a request and read the response """
def command(self, op=None, data=None, chk=0):
if op:
pkt = struct.pack('<BBHI', 0x00, op, len(data), chk) + data
self.write(pkt)
# tries to get a response until that response has the
# same operation as the request or a retries limit has
# exceeded. This is needed for some esp8266s that
# reply with more sync responses than expected.
retries = 100
while retries > 0:
(op_ret, val, body) = self.receive_response()
if op is None or op_ret == op:
return val, body # valid response received
retries = retries - 1
raise FatalError("Response doesn't match request")
""" Receive a response to a command """
def receive_response(self):
# Read header of response and parse
if self._port.read(1) != '\xc0':
raise FatalError('Invalid head of packet')
hdr = self.read(8)
(resp, op_ret, len_ret, val) = struct.unpack('<BBHI', hdr)
if resp != 0x01:
raise FatalError('Invalid response 0x%02x" to command' % resp)
# The variable-length body
body = self.read(len_ret)
# Terminating byte
if self._port.read(1) != chr(0xc0):
raise FatalError('Invalid end of packet')
return op_ret, val, body
""" Perform a connection test """
def sync(self):
self.command(ESPROM.ESP_SYNC, '\x07\x07\x12\x20' + 32 * '\x55')
for i in xrange(7):
self.command()
""" Try connecting repeatedly until successful, or giving up """
def connect(self):
print 'Connecting...'
for _ in xrange(4):
# worst-case latency timer should be 255ms (probably <20ms)
self._port.timeout = 0.3
for _ in xrange(4):
try:
self._port.flushInput()
self._port.flushOutput()
self.sync()
self._port.timeout = 5
return
except:
time.sleep(0.05)
raise FatalError('Failed to connect to ESP8266')
""" Read memory address in target """
def read_reg(self, addr):
res = self.command(ESPROM.ESP_READ_REG, struct.pack('<I', addr))
if res[1] != "\0\0":
raise FatalError('Failed to read target memory')
return res[0]
""" Write to memory address in target """
def write_reg(self, addr, value, mask, delay_us=0):
if self.command(ESPROM.ESP_WRITE_REG,
struct.pack('<IIII', addr, value, mask, delay_us))[1] != "\0\0":
raise FatalError('Failed to write target memory')
""" Start downloading an application image to RAM """
def mem_begin(self, size, blocks, blocksize, offset):
if self.command(ESPROM.ESP_MEM_BEGIN,
struct.pack('<IIII', size, blocks, blocksize, offset))[1] != "\0\0":
raise FatalError('Failed to enter RAM download mode')
""" Send a block of an image to RAM """
def mem_block(self, data, seq):
if self.command(ESPROM.ESP_MEM_DATA,
struct.pack('<IIII', len(data), seq, 0, 0) + data,
ESPROM.checksum(data))[1] != "\0\0":
raise FatalError('Failed to write to target RAM')
""" Leave download mode and run the application """
def mem_finish(self, entrypoint=0):
if self.command(ESPROM.ESP_MEM_END,
struct.pack('<II', int(entrypoint == 0), entrypoint))[1] != "\0\0":
raise FatalError('Failed to leave RAM download mode')
""" Start downloading to Flash (performs an erase) """
def flash_begin(self, size, offset):
old_tmo = self._port.timeout
num_blocks = (size + ESPROM.ESP_FLASH_BLOCK - 1) / ESPROM.ESP_FLASH_BLOCK
sectors_per_block = 16
sector_size = 4096
num_sectors = (size + sector_size - 1) / sector_size
start_sector = offset / sector_size
head_sectors = sectors_per_block - (start_sector % sectors_per_block)
if num_sectors < head_sectors:
head_sectors = num_sectors
if num_sectors < 2 * head_sectors:
erase_size = (num_sectors + 1) / 2 * sector_size
else:
erase_size = (num_sectors - head_sectors) * sector_size
self._port.timeout = 10
result = self.command(ESPROM.ESP_FLASH_BEGIN,
struct.pack('<IIII', erase_size, num_blocks, ESPROM.ESP_FLASH_BLOCK, offset))[1]
if result != "\0\0":
raise FatalError.WithResult('Failed to enter Flash download mode (result "%s")', result)
self._port.timeout = old_tmo
""" Write block to flash """
def flash_block(self, data, seq):
result = self.command(ESPROM.ESP_FLASH_DATA, struct.pack('<IIII', len(data), seq, 0, 0) + data, ESPROM.checksum(data))[1]
if result != "\0\0":
raise FatalError.WithResult('Failed to write to target Flash after seq %d (got result %%s)' % seq, result)
""" Leave flash mode and run/reboot """
def flash_finish(self, reboot=False):
pkt = struct.pack('<I', int(not reboot))
if self.command(ESPROM.ESP_FLASH_END, pkt)[1] != "\0\0":
raise FatalError('Failed to leave Flash mode')
""" Run application code in flash """
def run(self, reboot=False):
# Fake flash begin immediately followed by flash end
self.flash_begin(0, 0)
self.flash_finish(reboot)
""" Read MAC from OTP ROM """
def read_mac(self):
mac0 = self.read_reg(self.ESP_OTP_MAC0)
mac1 = self.read_reg(self.ESP_OTP_MAC1)
if ((mac1 >> 16) & 0xff) == 0:
oui = (0x18, 0xfe, 0x34)
elif ((mac1 >> 16) & 0xff) == 1:
oui = (0xac, 0xd0, 0x74)
else:
raise FatalError("Unknown OUI")
return oui + ((mac1 >> 8) & 0xff, mac1 & 0xff, (mac0 >> 24) & 0xff)
""" Read SPI flash manufacturer and device id """
def flash_id(self):
self.flash_begin(0, 0)
self.write_reg(0x60000240, 0x0, 0xffffffff)
self.write_reg(0x60000200, 0x10000000, 0xffffffff)
flash_id = self.read_reg(0x60000240)
self.flash_finish(False)
return flash_id
""" Read SPI flash """
def flash_read(self, offset, size, count=1):
# Create a custom stub
stub = struct.pack('<III', offset, size, count) + self.SFLASH_STUB
# Trick ROM to initialize SFlash
self.flash_begin(0, 0)
# Download stub
self.mem_begin(len(stub), 1, len(stub), 0x40100000)
self.mem_block(stub, 0)
self.mem_finish(0x4010001c)
# Fetch the data
data = ''
for _ in xrange(count):
if self._port.read(1) != '\xc0':
raise FatalError('Invalid head of packet (sflash read)')
data += self.read(size)
if self._port.read(1) != chr(0xc0):
raise FatalError('Invalid end of packet (sflash read)')
return data
""" Abuse the loader protocol to force flash to be left in write mode """
def flash_unlock_dio(self):
# Enable flash write mode
self.flash_begin(0, 0)
# Reset the chip rather than call flash_finish(), which would have
# write protected the chip again (why oh why does it do that?!)
self.mem_begin(0,0,0,0x40100000)
self.mem_finish(0x40000080)
""" Perform a chip erase of SPI flash """
def flash_erase(self):
# Trick ROM to initialize SFlash
self.flash_begin(0, 0)
# This is hacky: we don't have a custom stub, instead we trick
# the bootloader to jump to the SPIEraseChip() routine and then halt/crash
# when it tries to boot an unconfigured system.
self.mem_begin(0,0,0,0x40100000)
self.mem_finish(0x40004984)
# Yup - there's no good way to detect if we succeeded.
# It it on the other hand unlikely to fail.
class ESPFirmwareImage:
def __init__(self, filename=None):
self.segments = []
self.entrypoint = 0
self.flash_mode = 0
self.flash_size_freq = 0
if filename is not None:
f = file(filename, 'rb')
(magic, segments, self.flash_mode, self.flash_size_freq, self.entrypoint) = struct.unpack('<BBBBI', f.read(8))
# some sanity check
if magic != ESPROM.ESP_IMAGE_MAGIC or segments > 16:
raise FatalError('Invalid firmware image')
for i in xrange(segments):
(offset, size) = struct.unpack('<II', f.read(8))
if offset > 0x40200000 or offset < 0x3ffe0000 or size > 65536:
raise FatalError('Suspicious segment 0x%x, length %d' % (offset, size))
segment_data = f.read(size)
if len(segment_data) < size:
raise FatalError('End of file reading segment 0x%x, length %d (actual length %d)' % (offset, size, len(segment_data)))
self.segments.append((offset, size, segment_data))
# Skip the padding. The checksum is stored in the last byte so that the
# file is a multiple of 16 bytes.
align = 15 - (f.tell() % 16)
f.seek(align, 1)
self.checksum = ord(f.read(1))
def add_segment(self, addr, data):
# Data should be aligned on word boundary
l = len(data)
if l % 4:
data += b"\x00" * (4 - l % 4)
if l > 0:
self.segments.append((addr, len(data), data))
def save(self, filename):
f = file(filename, 'wb')
f.write(struct.pack('<BBBBI', ESPROM.ESP_IMAGE_MAGIC, len(self.segments),
self.flash_mode, self.flash_size_freq, self.entrypoint))
checksum = ESPROM.ESP_CHECKSUM_MAGIC
for (offset, size, data) in self.segments:
f.write(struct.pack('<II', offset, size))
f.write(data)
checksum = ESPROM.checksum(data, checksum)
align = 15 - (f.tell() % 16)
f.seek(align, 1)
f.write(struct.pack('B', checksum))
class ELFFile:
def __init__(self, name):
self.name = name
self.symbols = None
def _fetch_symbols(self):
if self.symbols is not None:
return
self.symbols = {}
try:
tool_nm = "xtensa-lx106-elf-nm"
if os.getenv('XTENSA_CORE') == 'lx106':
tool_nm = "xt-nm"
proc = subprocess.Popen([tool_nm, self.name], stdout=subprocess.PIPE)
except OSError:
print "Error calling %s, do you have Xtensa toolchain in PATH?" % tool_nm
sys.exit(1)
for l in proc.stdout:
fields = l.strip().split()
try:
if fields[0] == "U":
print "Warning: ELF binary has undefined symbol %s" % fields[1]
continue
self.symbols[fields[2]] = int(fields[0], 16)
except ValueError:
raise FatalError("Failed to strip symbol output from nm: %s" % fields)
def get_symbol_addr(self, sym):
self._fetch_symbols()
return self.symbols[sym]
def get_entry_point(self):
tool_readelf = "xtensa-lx106-elf-readelf"
if os.getenv('XTENSA_CORE') == 'lx106':
tool_readelf = "xt-readelf"
try:
proc = subprocess.Popen([tool_readelf, "-h", self.name], stdout=subprocess.PIPE)
except OSError:
print "Error calling %s, do you have Xtensa toolchain in PATH?" % tool_readelf
sys.exit(1)
for l in proc.stdout:
fields = l.strip().split()
if fields[0] == "Entry":
return int(fields[3], 0)
def load_section(self, section):
tool_objcopy = "xtensa-lx106-elf-objcopy"
if os.getenv('XTENSA_CORE') == 'lx106':
tool_objcopy = "xt-objcopy"
tmpsection = tempfile.mktemp(suffix=".section")
try:
subprocess.check_call([tool_objcopy, "--only-section", section, "-Obinary", self.name, tmpsection])
with open(tmpsection, "rb") as f:
data = f.read()
finally:
os.remove(tmpsection)
return data
def arg_auto_int(x):
return int(x, 0)
def div_roundup(a, b):
""" Return a/b rounded up to nearest integer,
equivalent result to int(math.ceil(float(int(a)) / float(int(b))), only
without possible floating point accuracy errors.
"""
return (int(a) + int(b) - 1) / int(b)
class FatalError(RuntimeError):
"""
Wrapper class for runtime errors that aren't caused by internal bugs, but by
ESP8266 responses or input content.
"""
def __init__(self, message):
RuntimeError.__init__(self, message)
@staticmethod
def WithResult(message, result):
"""
Return a fatal error object that includes the hex values of
'result' as a string formatted argument.
"""
return FatalError(message % ", ".join(hex(ord(x)) for x in result))
def main():
parser = argparse.ArgumentParser(description='ESP8266 ROM Bootloader Utility', prog='esptool')
parser.add_argument(
'--port', '-p',
help='Serial port device',
default='/dev/ttyUSB0')
parser.add_argument(
'--baud', '-b',
help='Serial port baud rate',
type=arg_auto_int,
default=ESPROM.ESP_ROM_BAUD)
subparsers = parser.add_subparsers(
dest='operation',
help='Run esptool {command} -h for additional help')
parser_load_ram = subparsers.add_parser(
'load_ram',
help='Download an image to RAM and execute')
parser_load_ram.add_argument('filename', help='Firmware image')
parser_dump_mem = subparsers.add_parser(
'dump_mem',
help='Dump arbitrary memory to disk')
parser_dump_mem.add_argument('address', help='Base address', type=arg_auto_int)
parser_dump_mem.add_argument('size', help='Size of region to dump', type=arg_auto_int)
parser_dump_mem.add_argument('filename', help='Name of binary dump')
parser_read_mem = subparsers.add_parser(
'read_mem',
help='Read arbitrary memory location')
parser_read_mem.add_argument('address', help='Address to read', type=arg_auto_int)
parser_write_mem = subparsers.add_parser(
'write_mem',
help='Read-modify-write to arbitrary memory location')
parser_write_mem.add_argument('address', help='Address to write', type=arg_auto_int)
parser_write_mem.add_argument('value', help='Value', type=arg_auto_int)
parser_write_mem.add_argument('mask', help='Mask of bits to write', type=arg_auto_int)
parser_write_flash = subparsers.add_parser(
'write_flash',
help='Write a binary blob to flash')
parser_write_flash.add_argument('addr_filename', nargs='+', help='Address and binary file to write there, separated by space')
parser_write_flash.add_argument('--flash_freq', '-ff', help='SPI Flash frequency',
choices=['40m', '26m', '20m', '80m'], default='40m')
parser_write_flash.add_argument('--flash_mode', '-fm', help='SPI Flash mode',
choices=['qio', 'qout', 'dio', 'dout'], default='qio')
parser_write_flash.add_argument('--flash_size', '-fs', help='SPI Flash size in Mbit',
choices=['4m', '2m', '8m', '16m', '32m', '16m-c1', '32m-c1', '32m-c2'], default='4m')
subparsers.add_parser(
'run',
help='Run application code in flash')
parser_image_info = subparsers.add_parser(
'image_info',
help='Dump headers from an application image')
parser_image_info.add_argument('filename', help='Image file to parse')
parser_make_image = subparsers.add_parser(
'make_image',
help='Create an application image from binary files')
parser_make_image.add_argument('output', help='Output image file')
parser_make_image.add_argument('--segfile', '-f', action='append', help='Segment input file')
parser_make_image.add_argument('--segaddr', '-a', action='append', help='Segment base address', type=arg_auto_int)
parser_make_image.add_argument('--entrypoint', '-e', help='Address of entry point', type=arg_auto_int, default=0)
parser_elf2image = subparsers.add_parser(
'elf2image',
help='Create an application image from ELF file')
parser_elf2image.add_argument('input', help='Input ELF file')
parser_elf2image.add_argument('--output', '-o', help='Output filename prefix', type=str)
parser_elf2image.add_argument('--flash_freq', '-ff', help='SPI Flash frequency',
choices=['40m', '26m', '20m', '80m'], default='40m')
parser_elf2image.add_argument('--flash_mode', '-fm', help='SPI Flash mode',
choices=['qio', 'qout', 'dio', 'dout'], default='qio')
parser_elf2image.add_argument('--flash_size', '-fs', help='SPI Flash size in Mbit',
choices=['4m', '2m', '8m', '16m', '32m', '16m-c1', '32m-c1', '32m-c2'], default='4m')
subparsers.add_parser(
'read_mac',
help='Read MAC address from OTP ROM')
subparsers.add_parser(
'flash_id',
help='Read SPI flash manufacturer and device ID')
parser_read_flash = subparsers.add_parser(
'read_flash',
help='Read SPI flash content')
parser_read_flash.add_argument('address', help='Start address', type=arg_auto_int)
parser_read_flash.add_argument('size', help='Size of region to dump', type=arg_auto_int)
parser_read_flash.add_argument('filename', help='Name of binary dump')
subparsers.add_parser(
'erase_flash',
help='Perform Chip Erase on SPI flash')
args = parser.parse_args()
# Create the ESPROM connection object, if needed
esp = None
if args.operation not in ('image_info','make_image','elf2image'):
esp = ESPROM(args.port, args.baud)
esp.connect()
# Do the actual work. Should probably be split into separate functions.
if args.operation == 'load_ram':
image = ESPFirmwareImage(args.filename)
print 'RAM boot...'
for (offset, size, data) in image.segments:
print 'Downloading %d bytes at %08x...' % (size, offset),
sys.stdout.flush()
esp.mem_begin(size, div_roundup(size, esp.ESP_RAM_BLOCK), esp.ESP_RAM_BLOCK, offset)
seq = 0
while len(data) > 0:
esp.mem_block(data[0:esp.ESP_RAM_BLOCK], seq)
data = data[esp.ESP_RAM_BLOCK:]
seq += 1
print 'done!'
print 'All segments done, executing at %08x' % image.entrypoint
esp.mem_finish(image.entrypoint)
elif args.operation == 'read_mem':
print '0x%08x = 0x%08x' % (args.address, esp.read_reg(args.address))
elif args.operation == 'write_mem':
esp.write_reg(args.address, args.value, args.mask, 0)
print 'Wrote %08x, mask %08x to %08x' % (args.value, args.mask, args.address)
elif args.operation == 'dump_mem':
f = file(args.filename, 'wb')
for i in xrange(args.size / 4):
d = esp.read_reg(args.address + (i * 4))
f.write(struct.pack('<I', d))
if f.tell() % 1024 == 0:
print '\r%d bytes read... (%d %%)' % (f.tell(),
f.tell() * 100 / args.size),
sys.stdout.flush()
print 'Done!'
elif args.operation == 'write_flash':
assert len(args.addr_filename) % 2 == 0
flash_mode = {'qio':0, 'qout':1, 'dio':2, 'dout': 3}[args.flash_mode]
flash_size_freq = {'4m':0x00, '2m':0x10, '8m':0x20, '16m':0x30, '32m':0x40, '16m-c1': 0x50, '32m-c1':0x60, '32m-c2':0x70}[args.flash_size]
flash_size_freq += {'40m':0, '26m':1, '20m':2, '80m': 0xf}[args.flash_freq]
flash_info = struct.pack('BB', flash_mode, flash_size_freq)
while args.addr_filename:
address = int(args.addr_filename[0], 0)
filename = args.addr_filename[1]
args.addr_filename = args.addr_filename[2:]
image = file(filename, 'rb').read()
print 'Erasing flash...'
blocks = div_roundup(len(image), esp.ESP_FLASH_BLOCK)
esp.flash_begin(blocks * esp.ESP_FLASH_BLOCK, address)
seq = 0
written = 0
t = time.time()
while len(image) > 0:
print '\rWriting at 0x%08x... (%d %%)' % (address + seq * esp.ESP_FLASH_BLOCK, 100 * (seq + 1) / blocks),
sys.stdout.flush()
block = image[0:esp.ESP_FLASH_BLOCK]
# Fix sflash config data
if address == 0 and seq == 0 and block[0] == '\xe9':
block = block[0:2] + flash_info + block[4:]
# Pad the last block
block = block + '\xff' * (esp.ESP_FLASH_BLOCK - len(block))
esp.flash_block(block, seq)
image = image[esp.ESP_FLASH_BLOCK:]
seq += 1
written += len(block)
t = time.time() - t
print '\rWrote %d bytes at 0x%08x in %.1f seconds (%.1f kbit/s)...' % (written, address, t, written / t * 8 / 1000)
print '\nLeaving...'
if args.flash_mode == 'dio':
esp.flash_unlock_dio()
else:
esp.flash_begin(0, 0)
esp.flash_finish(False)
elif args.operation == 'run':
esp.run()
elif args.operation == 'image_info':
image = ESPFirmwareImage(args.filename)
print ('Entry point: %08x' % image.entrypoint) if image.entrypoint != 0 else 'Entry point not set'
print '%d segments' % len(image.segments)
print
checksum = ESPROM.ESP_CHECKSUM_MAGIC
for (idx, (offset, size, data)) in enumerate(image.segments):
print 'Segment %d: %5d bytes at %08x' % (idx + 1, size, offset)
checksum = ESPROM.checksum(data, checksum)
print
print 'Checksum: %02x (%s)' % (image.checksum, 'valid' if image.checksum == checksum else 'invalid!')
elif args.operation == 'make_image':
image = ESPFirmwareImage()
if len(args.segfile) == 0:
raise FatalError('No segments specified')
if len(args.segfile) != len(args.segaddr):
raise FatalError('Number of specified files does not match number of specified addresses')
for (seg, addr) in zip(args.segfile, args.segaddr):
data = file(seg, 'rb').read()
image.add_segment(addr, data)
image.entrypoint = args.entrypoint
image.save(args.output)
elif args.operation == 'elf2image':
if args.output is None:
args.output = args.input + '-'
e = ELFFile(args.input)
image = ESPFirmwareImage()
image.entrypoint = e.get_entry_point()
for section, start in ((".text", "_text_start"), (".data", "_data_start"), (".rodata", "_rodata_start")):
data = e.load_section(section)
image.add_segment(e.get_symbol_addr(start), data)
image.flash_mode = {'qio':0, 'qout':1, 'dio':2, 'dout': 3}[args.flash_mode]
image.flash_size_freq = {'4m':0x00, '2m':0x10, '8m':0x20, '16m':0x30, '32m':0x40, '16m-c1': 0x50, '32m-c1':0x60, '32m-c2':0x70}[args.flash_size]
image.flash_size_freq += {'40m':0, '26m':1, '20m':2, '80m': 0xf}[args.flash_freq]
image.save(args.output + "0x00000.bin")
data = e.load_section(".irom0.text")
off = e.get_symbol_addr("_irom0_text_start") - 0x40200000
assert off >= 0
f = open(args.output + "0x%05x.bin" % off, "wb")
f.write(data)
f.close()
elif args.operation == 'read_mac':
mac = esp.read_mac()
print 'MAC: %s' % ':'.join(map(lambda x: '%02x' % x, mac))
elif args.operation == 'flash_id':
flash_id = esp.flash_id()
print 'Manufacturer: %02x' % (flash_id & 0xff)
print 'Device: %02x%02x' % ((flash_id >> 8) & 0xff, (flash_id >> 16) & 0xff)
elif args.operation == 'read_flash':
print 'Please wait...'
file(args.filename, 'wb').write(esp.flash_read(args.address, 1024, div_roundup(args.size, 1024))[:args.size])
elif args.operation == 'erase_flash':
esp.flash_erase()
if __name__ == '__main__':
try:
main()
except FatalError as e:
print '\nA fatal error occurred: %s' % e
sys.exit(2)
| gpl-2.0 | 1,992,661,783,218,690,600 | 38.709497 | 152 | 0.573579 | false |
eharney/cinder | cinder/api/v3/attachments.py | 1 | 11362 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The volumes attachments API."""
from oslo_log import log as logging
import webob
from cinder.api import common
from cinder.api import microversions as mv
from cinder.api.openstack import wsgi
from cinder.api.v3.views import attachments as attachment_views
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder import utils
from cinder.volume import api as volume_api
LOG = logging.getLogger(__name__)
class AttachmentsController(wsgi.Controller):
"""The Attachments API controller for the OpenStack API."""
_view_builder_class = attachment_views.ViewBuilder
allowed_filters = {'volume_id', 'status', 'instance_id', 'attach_status'}
def __init__(self, ext_mgr=None):
"""Initialize controller class."""
self.volume_api = volume_api.API()
self.ext_mgr = ext_mgr
super(AttachmentsController, self).__init__()
@wsgi.Controller.api_version(mv.NEW_ATTACH)
def show(self, req, id):
"""Return data about the given attachment."""
context = req.environ['cinder.context']
attachment = objects.VolumeAttachment.get_by_id(context, id)
return attachment_views.ViewBuilder.detail(attachment)
@wsgi.Controller.api_version(mv.NEW_ATTACH)
def index(self, req):
"""Return a summary list of attachments."""
attachments = self._items(req)
return attachment_views.ViewBuilder.list(attachments)
@wsgi.Controller.api_version(mv.NEW_ATTACH)
def detail(self, req):
"""Return a detailed list of attachments."""
attachments = self._items(req)
return attachment_views.ViewBuilder.list(attachments, detail=True)
@common.process_general_filtering('attachment')
def _process_attachment_filtering(self, context=None, filters=None,
req_version=None):
utils.remove_invalid_filter_options(context, filters,
self.allowed_filters)
def _items(self, req):
"""Return a list of attachments, transformed through view builder."""
context = req.environ['cinder.context']
req_version = req.api_version_request
# Pop out non search_opts and create local variables
search_opts = req.GET.copy()
sort_keys, sort_dirs = common.get_sort_params(search_opts)
marker, limit, offset = common.get_pagination_params(search_opts)
self._process_attachment_filtering(context=context,
filters=search_opts,
req_version=req_version)
if search_opts.get('instance_id', None):
search_opts['instance_uuid'] = search_opts.pop('instance_id', None)
if context.is_admin and 'all_tenants' in search_opts:
del search_opts['all_tenants']
return objects.VolumeAttachmentList.get_all(
context, search_opts=search_opts, marker=marker, limit=limit,
offset=offset, sort_keys=sort_keys, sort_direction=sort_dirs)
else:
return objects.VolumeAttachmentList.get_all_by_project(
context, context.project_id, search_opts=search_opts,
marker=marker, limit=limit, offset=offset, sort_keys=sort_keys,
sort_direction=sort_dirs)
@wsgi.Controller.api_version(mv.NEW_ATTACH)
@wsgi.response(202)
def create(self, req, body):
"""Create an attachment.
This method can be used to create an empty attachment (reserve) or to
create and initialize a volume attachment based on the provided input
parameters.
If the caller does not yet have the connector information but needs to
reserve an attachment for the volume (ie Nova BootFromVolume) the
create can be called with just the volume-uuid and the server
identifier. This will reserve an attachment, mark the volume as
reserved and prevent any new attachment_create calls from being made
until the attachment is updated (completed).
The alternative is that the connection can be reserved and initialized
all at once with a single call if the caller has all of the required
information (connector data) at the time of the call.
NOTE: In Nova terms server == instance, the server_id parameter
referenced below is the UUID of the Instance, for non-nova consumers
this can be a server UUID or some other arbitrary unique identifier.
Expected format of the input parameter 'body':
.. code-block:: json
{
"attachment":
{
"volume_uuid": "volume-uuid",
"instance_uuid": "nova-server-uuid",
"connector": "null|<connector-object>"
}
}
Example connector:
.. code-block:: json
{
"connector":
{
"initiator": "iqn.1993-08.org.debian:01:cad181614cec",
"ip":"192.168.1.20",
"platform": "x86_64",
"host": "tempest-1",
"os_type": "linux2",
"multipath": false,
"mountpoint": "/dev/vdb",
"mode": "null|rw|ro"
}
}
NOTE all that's required for a reserve is volume_uuid
and an instance_uuid.
returns: A summary view of the attachment object
"""
context = req.environ['cinder.context']
instance_uuid = body['attachment'].get('instance_uuid', None)
if not instance_uuid:
raise webob.exc.HTTPBadRequest(
explanation=_("Must specify 'instance_uuid' "
"to create attachment."))
volume_uuid = body['attachment'].get('volume_uuid', None)
if not volume_uuid:
raise webob.exc.HTTPBadRequest(
explanation=_("Must specify 'volume_uuid' "
"to create attachment."))
volume_ref = objects.Volume.get_by_id(
context,
volume_uuid)
connector = body['attachment'].get('connector', None)
err_msg = None
try:
attachment_ref = (
self.volume_api.attachment_create(context,
volume_ref,
instance_uuid,
connector=connector))
except exception.NotAuthorized:
raise
except exception.CinderException as ex:
err_msg = _(
"Unable to create attachment for volume (%s).") % ex.msg
LOG.exception(err_msg)
except Exception as ex:
err_msg = _("Unable to create attachment for volume.")
LOG.exception(err_msg)
finally:
if err_msg:
raise webob.exc.HTTPInternalServerError(explanation=err_msg)
return attachment_views.ViewBuilder.detail(attachment_ref)
@wsgi.Controller.api_version(mv.NEW_ATTACH)
def update(self, req, id, body):
"""Update an attachment record.
Update a reserved attachment record with connector information and set
up the appropriate connection_info from the driver.
Expected format of the input parameter 'body':
.. code:: json
{
"attachment":
{
"connector":
{
"initiator": "iqn.1993-08.org.debian:01:cad181614cec",
"ip":"192.168.1.20",
"platform": "x86_64",
"host": "tempest-1",
"os_type": "linux2",
"multipath": False,
"mountpoint": "/dev/vdb",
"mode": None|"rw"|"ro",
}
}
}
"""
context = req.environ['cinder.context']
attachment_ref = (
objects.VolumeAttachment.get_by_id(context, id))
connector = body['attachment'].get('connector', None)
if not connector:
raise webob.exc.HTTPBadRequest(
explanation=_("Must specify 'connector' "
"to update attachment."))
err_msg = None
try:
attachment_ref = (
self.volume_api.attachment_update(context,
attachment_ref,
connector))
except exception.NotAuthorized:
raise
except exception.CinderException as ex:
err_msg = (
_("Unable to update attachment.(%s).") % ex.msg)
LOG.exception(err_msg)
except Exception:
err_msg = _("Unable to update the attachment.")
LOG.exception(err_msg)
finally:
if err_msg:
raise webob.exc.HTTPInternalServerError(explanation=err_msg)
# TODO(jdg): Test this out some more, do we want to return and object
# or a dict?
return attachment_views.ViewBuilder.detail(attachment_ref)
@wsgi.Controller.api_version(mv.NEW_ATTACH)
def delete(self, req, id):
"""Delete an attachment.
Disconnects/Deletes the specified attachment, returns a list of any
known shared attachment-id's for the effected backend device.
returns: A summary list of any attachments sharing this connection
"""
context = req.environ['cinder.context']
attachment = objects.VolumeAttachment.get_by_id(context, id)
attachments = self.volume_api.attachment_delete(context, attachment)
return attachment_views.ViewBuilder.list(attachments)
@wsgi.response(202)
@wsgi.Controller.api_version(mv.NEW_ATTACH_COMPLETION)
@wsgi.action('os-complete')
def complete(self, req, id, body):
"""Mark a volume attachment process as completed (in-use)."""
context = req.environ['cinder.context']
attachment_ref = (
objects.VolumeAttachment.get_by_id(context, id))
volume_ref = objects.Volume.get_by_id(
context,
attachment_ref.volume_id)
attachment_ref.update({'attach_status': 'attached'})
attachment_ref.save()
volume_ref.update({'status': 'in-use', 'attach_status': 'attached'})
volume_ref.save()
def create_resource(ext_mgr):
"""Create the wsgi resource for this controller."""
return wsgi.Resource(AttachmentsController(ext_mgr))
| apache-2.0 | -5,867,306,877,933,996,000 | 38.451389 | 79 | 0.58106 | false |
turbokongen/home-assistant | homeassistant/components/plex/config_flow.py | 1 | 15991 | """Config flow for Plex."""
import copy
import logging
from aiohttp import web_response
import plexapi.exceptions
from plexapi.gdm import GDM
from plexauth import PlexAuth
import requests.exceptions
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components.http.view import HomeAssistantView
from homeassistant.components.media_player import DOMAIN as MP_DOMAIN
from homeassistant.const import (
CONF_CLIENT_ID,
CONF_HOST,
CONF_PORT,
CONF_SOURCE,
CONF_SSL,
CONF_TOKEN,
CONF_URL,
CONF_VERIFY_SSL,
)
from homeassistant.core import callback
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.network import get_url
from .const import ( # pylint: disable=unused-import
AUTH_CALLBACK_NAME,
AUTH_CALLBACK_PATH,
AUTOMATIC_SETUP_STRING,
CONF_IGNORE_NEW_SHARED_USERS,
CONF_IGNORE_PLEX_WEB_CLIENTS,
CONF_MONITORED_USERS,
CONF_SERVER,
CONF_SERVER_IDENTIFIER,
CONF_USE_EPISODE_ART,
DEFAULT_PORT,
DEFAULT_SSL,
DEFAULT_VERIFY_SSL,
DOMAIN,
MANUAL_SETUP_STRING,
PLEX_SERVER_CONFIG,
SERVERS,
X_PLEX_DEVICE_NAME,
X_PLEX_PLATFORM,
X_PLEX_PRODUCT,
X_PLEX_VERSION,
)
from .errors import NoServersFound, ServerNotSpecified
from .server import PlexServer
_LOGGER = logging.getLogger(__package__)
@callback
def configured_servers(hass):
"""Return a set of the configured Plex servers."""
return {
entry.data[CONF_SERVER_IDENTIFIER]
for entry in hass.config_entries.async_entries(DOMAIN)
}
async def async_discover(hass):
"""Scan for available Plex servers."""
gdm = GDM()
await hass.async_add_executor_job(gdm.scan)
for server_data in gdm.entries:
await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: config_entries.SOURCE_INTEGRATION_DISCOVERY},
data=server_data,
)
class PlexFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a Plex config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_PUSH
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return PlexOptionsFlowHandler(config_entry)
def __init__(self):
"""Initialize the Plex flow."""
self.current_login = {}
self.available_servers = None
self.plexauth = None
self.token = None
self.client_id = None
self._manual = False
async def async_step_user(
self, user_input=None, errors=None
): # pylint: disable=arguments-differ
"""Handle a flow initialized by the user."""
if user_input is not None:
return await self.async_step_plex_website_auth()
if self.show_advanced_options:
return await self.async_step_user_advanced(errors=errors)
return self.async_show_form(step_id="user", errors=errors)
async def async_step_user_advanced(self, user_input=None, errors=None):
"""Handle an advanced mode flow initialized by the user."""
if user_input is not None:
if user_input.get("setup_method") == MANUAL_SETUP_STRING:
self._manual = True
return await self.async_step_manual_setup()
return await self.async_step_plex_website_auth()
data_schema = vol.Schema(
{
vol.Required("setup_method", default=AUTOMATIC_SETUP_STRING): vol.In(
[AUTOMATIC_SETUP_STRING, MANUAL_SETUP_STRING]
)
}
)
return self.async_show_form(
step_id="user_advanced", data_schema=data_schema, errors=errors
)
async def async_step_manual_setup(self, user_input=None, errors=None):
"""Begin manual configuration."""
if user_input is not None and errors is None:
user_input.pop(CONF_URL, None)
host = user_input.get(CONF_HOST)
if host:
port = user_input[CONF_PORT]
prefix = "https" if user_input.get(CONF_SSL) else "http"
user_input[CONF_URL] = f"{prefix}://{host}:{port}"
elif CONF_TOKEN not in user_input:
return await self.async_step_manual_setup(
user_input=user_input, errors={"base": "host_or_token"}
)
return await self.async_step_server_validate(user_input)
previous_input = user_input or {}
data_schema = vol.Schema(
{
vol.Optional(
CONF_HOST,
description={"suggested_value": previous_input.get(CONF_HOST)},
): str,
vol.Required(
CONF_PORT, default=previous_input.get(CONF_PORT, DEFAULT_PORT)
): int,
vol.Required(
CONF_SSL, default=previous_input.get(CONF_SSL, DEFAULT_SSL)
): bool,
vol.Required(
CONF_VERIFY_SSL,
default=previous_input.get(CONF_VERIFY_SSL, DEFAULT_VERIFY_SSL),
): bool,
vol.Optional(
CONF_TOKEN,
description={"suggested_value": previous_input.get(CONF_TOKEN)},
): str,
}
)
return self.async_show_form(
step_id="manual_setup", data_schema=data_schema, errors=errors
)
async def async_step_server_validate(self, server_config):
"""Validate a provided configuration."""
errors = {}
self.current_login = server_config
plex_server = PlexServer(self.hass, server_config)
try:
await self.hass.async_add_executor_job(plex_server.connect)
except NoServersFound:
_LOGGER.error("No servers linked to Plex account")
errors["base"] = "no_servers"
except (plexapi.exceptions.BadRequest, plexapi.exceptions.Unauthorized):
_LOGGER.error("Invalid credentials provided, config not created")
errors[CONF_TOKEN] = "faulty_credentials"
except requests.exceptions.SSLError as error:
_LOGGER.error("SSL certificate error: [%s]", error)
errors["base"] = "ssl_error"
except (plexapi.exceptions.NotFound, requests.exceptions.ConnectionError):
server_identifier = (
server_config.get(CONF_URL) or plex_server.server_choice or "Unknown"
)
_LOGGER.error("Plex server could not be reached: %s", server_identifier)
errors[CONF_HOST] = "not_found"
except ServerNotSpecified as available_servers:
self.available_servers = available_servers.args[0]
return await self.async_step_select_server()
except Exception as error: # pylint: disable=broad-except
_LOGGER.exception("Unknown error connecting to Plex server: %s", error)
return self.async_abort(reason="unknown")
if errors:
if self._manual:
return await self.async_step_manual_setup(
user_input=server_config, errors=errors
)
return await self.async_step_user(errors=errors)
server_id = plex_server.machine_identifier
url = plex_server.url_in_use
token = server_config.get(CONF_TOKEN)
entry_config = {CONF_URL: url}
if self.client_id:
entry_config[CONF_CLIENT_ID] = self.client_id
if token:
entry_config[CONF_TOKEN] = token
if url.startswith("https"):
entry_config[CONF_VERIFY_SSL] = server_config.get(
CONF_VERIFY_SSL, DEFAULT_VERIFY_SSL
)
data = {
CONF_SERVER: plex_server.friendly_name,
CONF_SERVER_IDENTIFIER: server_id,
PLEX_SERVER_CONFIG: entry_config,
}
entry = await self.async_set_unique_id(server_id)
if self.context[CONF_SOURCE] == config_entries.SOURCE_REAUTH:
self.hass.config_entries.async_update_entry(entry, data=data)
_LOGGER.debug("Updated config entry for %s", plex_server.friendly_name)
await self.hass.config_entries.async_reload(entry.entry_id)
return self.async_abort(reason="reauth_successful")
self._abort_if_unique_id_configured()
_LOGGER.debug("Valid config created for %s", plex_server.friendly_name)
return self.async_create_entry(title=plex_server.friendly_name, data=data)
async def async_step_select_server(self, user_input=None):
"""Use selected Plex server."""
config = dict(self.current_login)
if user_input is not None:
config[CONF_SERVER] = user_input[CONF_SERVER]
return await self.async_step_server_validate(config)
configured = configured_servers(self.hass)
available_servers = [
name
for (name, server_id) in self.available_servers
if server_id not in configured
]
if not available_servers:
return self.async_abort(reason="all_configured")
if len(available_servers) == 1:
config[CONF_SERVER] = available_servers[0]
return await self.async_step_server_validate(config)
return self.async_show_form(
step_id="select_server",
data_schema=vol.Schema(
{vol.Required(CONF_SERVER): vol.In(available_servers)}
),
errors={},
)
async def async_step_integration_discovery(self, discovery_info):
"""Handle GDM discovery."""
machine_identifier = discovery_info["data"]["Resource-Identifier"]
await self.async_set_unique_id(machine_identifier)
self._abort_if_unique_id_configured()
host = f"{discovery_info['from'][0]}:{discovery_info['data']['Port']}"
name = discovery_info["data"]["Name"]
self.context["title_placeholders"] = {
"host": host,
"name": name,
}
return await self.async_step_user()
async def async_step_plex_website_auth(self):
"""Begin external auth flow on Plex website."""
self.hass.http.register_view(PlexAuthorizationCallbackView)
hass_url = get_url(self.hass)
headers = {"Origin": hass_url}
payload = {
"X-Plex-Device-Name": X_PLEX_DEVICE_NAME,
"X-Plex-Version": X_PLEX_VERSION,
"X-Plex-Product": X_PLEX_PRODUCT,
"X-Plex-Device": self.hass.config.location_name,
"X-Plex-Platform": X_PLEX_PLATFORM,
"X-Plex-Model": "Plex OAuth",
}
session = async_get_clientsession(self.hass)
self.plexauth = PlexAuth(payload, session, headers)
await self.plexauth.initiate_auth()
forward_url = f"{hass_url}{AUTH_CALLBACK_PATH}?flow_id={self.flow_id}"
auth_url = self.plexauth.auth_url(forward_url)
return self.async_external_step(step_id="obtain_token", url=auth_url)
async def async_step_obtain_token(self, user_input=None):
"""Obtain token after external auth completed."""
token = await self.plexauth.token(10)
if not token:
return self.async_external_step_done(next_step_id="timed_out")
self.token = token
self.client_id = self.plexauth.client_identifier
return self.async_external_step_done(next_step_id="use_external_token")
async def async_step_timed_out(self, user_input=None):
"""Abort flow when time expires."""
return self.async_abort(reason="token_request_timeout")
async def async_step_use_external_token(self, user_input=None):
"""Continue server validation with external token."""
server_config = {CONF_TOKEN: self.token}
return await self.async_step_server_validate(server_config)
async def async_step_reauth(self, data):
"""Handle a reauthorization flow request."""
self.current_login = dict(data)
return await self.async_step_user()
class PlexOptionsFlowHandler(config_entries.OptionsFlow):
"""Handle Plex options."""
def __init__(self, config_entry):
"""Initialize Plex options flow."""
self.options = copy.deepcopy(dict(config_entry.options))
self.server_id = config_entry.data[CONF_SERVER_IDENTIFIER]
async def async_step_init(self, user_input=None):
"""Manage the Plex options."""
return await self.async_step_plex_mp_settings()
async def async_step_plex_mp_settings(self, user_input=None):
"""Manage the Plex media_player options."""
plex_server = self.hass.data[DOMAIN][SERVERS][self.server_id]
if user_input is not None:
self.options[MP_DOMAIN][CONF_USE_EPISODE_ART] = user_input[
CONF_USE_EPISODE_ART
]
self.options[MP_DOMAIN][CONF_IGNORE_NEW_SHARED_USERS] = user_input[
CONF_IGNORE_NEW_SHARED_USERS
]
self.options[MP_DOMAIN][CONF_IGNORE_PLEX_WEB_CLIENTS] = user_input[
CONF_IGNORE_PLEX_WEB_CLIENTS
]
account_data = {
user: {"enabled": bool(user in user_input[CONF_MONITORED_USERS])}
for user in plex_server.accounts
}
self.options[MP_DOMAIN][CONF_MONITORED_USERS] = account_data
return self.async_create_entry(title="", data=self.options)
available_accounts = {name: name for name in plex_server.accounts}
available_accounts[plex_server.owner] += " [Owner]"
default_accounts = plex_server.accounts
known_accounts = set(plex_server.option_monitored_users)
if known_accounts:
default_accounts = {
user
for user in plex_server.option_monitored_users
if plex_server.option_monitored_users[user]["enabled"]
}
for user in plex_server.accounts:
if user not in known_accounts:
available_accounts[user] += " [New]"
if not plex_server.option_ignore_new_shared_users:
for new_user in plex_server.accounts - known_accounts:
default_accounts.add(new_user)
return self.async_show_form(
step_id="plex_mp_settings",
data_schema=vol.Schema(
{
vol.Required(
CONF_USE_EPISODE_ART,
default=plex_server.option_use_episode_art,
): bool,
vol.Optional(
CONF_MONITORED_USERS, default=default_accounts
): cv.multi_select(available_accounts),
vol.Required(
CONF_IGNORE_NEW_SHARED_USERS,
default=plex_server.option_ignore_new_shared_users,
): bool,
vol.Required(
CONF_IGNORE_PLEX_WEB_CLIENTS,
default=plex_server.option_ignore_plexweb_clients,
): bool,
}
),
)
class PlexAuthorizationCallbackView(HomeAssistantView):
"""Handle callback from external auth."""
url = AUTH_CALLBACK_PATH
name = AUTH_CALLBACK_NAME
requires_auth = False
async def get(self, request):
"""Receive authorization confirmation."""
hass = request.app["hass"]
await hass.config_entries.flow.async_configure(
flow_id=request.query["flow_id"], user_input=None
)
return web_response.Response(
headers={"content-type": "text/html"},
text="<script>window.close()</script>Success! This window can be closed",
)
| apache-2.0 | 1,489,924,755,053,045,500 | 36.36215 | 85 | 0.598837 | false |
italomaia/turtle-linux | games/Dynamite/pgu/test.py | 1 | 1624 | import pygame
from pygame.locals import *
import gui
screen = pygame.display.set_mode(
(640, 480), FULLSCREEN ) # try adding DOUBLEBUF | HWSURFACE
# pygame.mouse.set_visible(0)
app = gui.App()
c = gui.Container(width=640,height=480)
##
## dialog 1
##
t1 = gui.Table()
t1.tr()
t1.add(gui.Label("Gal Test"))
t2 = gui.Table()
t2.tr()
t2.add(gui.Label("Gui Widgets"))
t2.add(gui.Input())
t2.tr()
t2.add(gui.Label("Button"))
t2.add(gui.Button("Click Me!"))
d1 = gui.Dialog(t1, t2)
c.add(d1, 50, 150)
##
## dialog 2
##
t3 = gui.Table()
t3.tr()
t3.add(gui.Label("Another one"))
t4 = gui.Table()
t4.tr()
t4.add(gui.Label("Name"))
t4.add(gui.Input())
t4.tr()
t4.add(gui.Label("Ohh"))
b1 = gui.Button("OK")
t4.add(b1)
d2 = gui.Dialog(t3, t4)
c.add(d2, 50, 300)
##
## some labels
##
l1 = gui.Label("Suppose this is a menu", color=(255, 255, 255) )
c.add(l1, 50, 50)
l2 = gui.Label("Click <SPACE> to hide top dialog", color=(255, 255,
255) )
c.add(l2, 50, 75)
l3 = gui.Label("Opps... Did it happen?", color=(255, 255, 255) )
##
## app begins
##
app.init(widget=c,screen=screen)
FRAME_EVT = USEREVENT + 1
pygame.event.Event(FRAME_EVT)
pygame.time.set_timer(FRAME_EVT, 30)
_quit = 0
while _quit == 0:
event = pygame.event.wait()
if event.type == FRAME_EVT:
pygame.display.flip()
continue
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
_quit = 1
continue
elif event.key == K_SPACE:
d1.close()
c.add(l3, 100, 100)
app._event(event)
screen.fill((0,0,0))
app.paint(screen)
| gpl-3.0 | 4,685,452,212,621,695,000 | 14.037037 | 68 | 0.589286 | false |
ezbake/ezbake-common-python | thrift/thrift-utils/lib/ezbake/thrift/utils/ezthrifttest.py | 1 | 3296 | # Copyright (C) 2013-2014 Computer Sciences Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
EzThriftTest contains classes that will be useful for testing thrift services
"""
from kazoo.testing import KazooTestCase
from ezbake.discovery import ServiceDiscoveryClient
from thrift.protocol import TBinaryProtocol
from thrift.server import TServer
from thrift.transport import TSocket, TTransport
from thrift.transport.TTransport import TTransportException
from ..transport.EzSSLSocket import TSSLServerSocket
from multiprocessing.process import Process
import time
import logging
logger = logging.getLogger(__name__)
class EzThriftServerTestHarness(KazooTestCase):
"""The EzThriftServerTestHarness extends KazooTestCase to provide service discovery for clients in tests
The thrift server is started using a TSimpleServer and registered with EzBake service discovery
"""
def setUp(self):
super(EzThriftServerTestHarness, self).setUp()
self.sd_client = ServiceDiscoveryClient(self.hosts)
self.server_processes = []
@staticmethod
def __thrift_server(processor, host="localhost", port=8449, use_simple_server=True,
use_ssl=False, ca_certs=None, cert=None, key=None):
if use_ssl:
transport = TSSLServerSocket(host=host, port=port,
ca_certs=ca_certs, cert=cert, key=key)
else:
transport = TSocket.TServerSocket(host=host, port=port)
t_factory = TTransport.TBufferedTransportFactory()
p_factory = TBinaryProtocol.TBinaryProtocolFactory()
if use_simple_server:
server = TServer.TSimpleServer(processor, transport, t_factory, p_factory)
else:
server = TServer.TThreadedServer(processor, transport, t_factory, p_factory)
try:
server.serve()
except (Exception, AttributeError, TTransportException) as e:
print e
logger.error("Server error: %s", e)
def add_server(self, app_name, service_name, host, port, processor, use_simple_server=True, wait=1,
use_ssl=False, ca_certs=None, cert=None, key=None):
self.sd_client.register_endpoint(app_name, service_name, host, port)
server_process = Process(target=self.__thrift_server,
args=(processor, host, port, use_simple_server, use_ssl, ca_certs, cert, key))
server_process.start()
time.sleep(wait)
self.server_processes.append(server_process)
def tearDown(self):
super(EzThriftServerTestHarness, self).tearDown()
for server_process in self.server_processes:
if server_process.is_alive():
server_process.terminate() | apache-2.0 | -5,059,667,138,645,476,000 | 40.2125 | 111 | 0.68932 | false |
wfx/epack | epack/libarchive/ffi.py | 1 | 7623 | # This file is part of a program licensed under the terms of the GNU Lesser
# General Public License version 2 (or at your option any later version)
# as published by the Free Software Foundation: http://www.gnu.org/licenses/
from __future__ import division, print_function, unicode_literals
from ctypes import (
c_char_p, c_int, c_uint, c_longlong, c_size_t, c_void_p,
c_wchar_p, CFUNCTYPE, POINTER,
)
try:
from ctypes import c_ssize_t
except ImportError:
from ctypes import c_longlong as c_ssize_t
import ctypes
from ctypes.util import find_library
import logging
import mmap
import os
from .exception import ArchiveError
logger = logging.getLogger('libarchive')
page_size = mmap.PAGESIZE
libarchive_path = os.environ.get('LIBARCHIVE') or \
find_library('archive') or \
find_library('libarchive') or \
'libarchive.so'
libarchive = ctypes.cdll.LoadLibrary(libarchive_path)
# Constants
ARCHIVE_EOF = 1 # Found end of archive.
ARCHIVE_OK = 0 # Operation was successful.
ARCHIVE_RETRY = -10 # Retry might succeed.
ARCHIVE_WARN = -20 # Partial success.
ARCHIVE_FAILED = -25 # Current operation cannot complete.
ARCHIVE_FATAL = -30 # No more operations are possible.
AE_IFMT = 0o170000
AE_IFREG = 0o100000
AE_IFLNK = 0o120000
AE_IFSOCK = 0o140000
AE_IFCHR = 0o020000
AE_IFBLK = 0o060000
AE_IFDIR = 0o040000
AE_IFIFO = 0o010000
# Callback types
WRITE_CALLBACK = CFUNCTYPE(
c_ssize_t, c_void_p, c_void_p, POINTER(c_void_p), c_size_t
)
OPEN_CALLBACK = CFUNCTYPE(c_int, c_void_p, c_void_p)
CLOSE_CALLBACK = CFUNCTYPE(c_int, c_void_p, c_void_p)
VOID_CB = lambda *_: ARCHIVE_OK
# Type aliases, for readability
c_archive_p = c_void_p
c_archive_entry_p = c_void_p
# Helper functions
def _error_string(archive_p):
msg = error_string(archive_p)
if msg is None:
return
try:
return msg.decode('ascii')
except UnicodeDecodeError:
return msg
def archive_error(archive_p, retcode):
msg = _error_string(archive_p)
raise ArchiveError(msg, errno(archive_p), retcode, archive_p)
def check_null(ret, func, args):
if ret is None:
raise ArchiveError(func.__name__+' returned NULL')
return ret
def check_int(retcode, func, args):
if retcode >= 0:
return retcode
elif retcode == ARCHIVE_WARN:
logger.warning(_error_string(args[0]))
return retcode
else:
raise archive_error(args[0], retcode)
def ffi(name, argtypes, restype, errcheck=None):
f = getattr(libarchive, 'archive_'+name)
f.argtypes = argtypes
f.restype = restype
if errcheck:
f.errcheck = errcheck
globals()[name] = f
return f
# FFI declarations
# archive_util
errno = ffi('errno', [c_archive_p], c_int)
error_string = ffi('error_string', [c_archive_p], c_char_p)
# archive_entry
ffi('entry_new', [], c_archive_entry_p, check_null)
ffi('entry_filetype', [c_archive_entry_p], c_int)
ffi('entry_mtime', [c_archive_entry_p], c_int)
ffi('entry_perm', [c_archive_entry_p], c_int)
ffi('entry_pathname_w', [c_archive_entry_p], c_wchar_p)
ffi('entry_sourcepath', [c_archive_entry_p], c_char_p)
ffi('entry_size', [c_archive_entry_p], c_longlong)
ffi('entry_size_is_set', [c_archive_entry_p], c_int)
ffi('entry_update_pathname_utf8', [c_archive_entry_p, c_char_p], None)
ffi('entry_clear', [c_archive_entry_p], c_archive_entry_p)
ffi('entry_free', [c_archive_entry_p], None)
# archive_read
ffi('read_new', [], c_archive_p, check_null)
READ_FORMATS = set((
'7zip', 'all', 'ar', 'cab', 'cpio', 'empty', 'iso9660', 'lha', 'mtree',
'rar', 'raw', 'tar', 'xar', 'zip'
))
for f_name in list(READ_FORMATS):
try:
ffi('read_support_format_'+f_name, [c_archive_p], c_int, check_int)
except AttributeError: # pragma: no cover
logger.warning('read format "%s" is not supported' % f_name)
READ_FORMATS.remove(f_name)
READ_FILTERS = set((
'all', 'bzip2', 'compress', 'grzip', 'gzip', 'lrzip', 'lzip', 'lzma',
'lzop', 'none', 'rpm', 'uu', 'xz'
))
for f_name in list(READ_FILTERS):
try:
ffi('read_support_filter_'+f_name, [c_archive_p], c_int, check_int)
except AttributeError: # pragma: no cover
logger.warning('read filter "%s" is not supported' % f_name)
READ_FILTERS.remove(f_name)
ffi('read_open_fd', [c_archive_p, c_int, c_size_t], c_int, check_int)
ffi('read_open_filename_w', [c_archive_p, c_wchar_p, c_size_t],
c_int, check_int)
ffi('read_open_memory', [c_archive_p, c_void_p, c_size_t], c_int, check_int)
ffi('read_next_header', [c_archive_p, POINTER(c_void_p)], c_int, check_int)
ffi('read_next_header2', [c_archive_p, c_void_p], c_int, check_int)
ffi('read_close', [c_archive_p], c_int, check_int)
ffi('read_free', [c_archive_p], c_int, check_int)
# archive_read_disk
ffi('read_disk_new', [], c_archive_p, check_null)
ffi('read_disk_set_standard_lookup', [c_archive_p], c_int, check_int)
ffi('read_disk_open', [c_archive_p, c_char_p], c_int, check_int)
ffi('read_disk_open_w', [c_archive_p, c_wchar_p], c_int, check_int)
ffi('read_disk_descend', [c_archive_p], c_int, check_int)
# archive_read_data
ffi('read_data_block',
[c_archive_p, POINTER(c_void_p), POINTER(c_size_t), POINTER(c_longlong)],
c_int, check_int)
ffi('read_data', [c_archive_p, c_void_p, c_size_t], c_ssize_t, check_int)
ffi('read_data_skip', [c_archive_p], c_int, check_int)
# archive_write
ffi('write_new', [], c_archive_p, check_null)
ffi('write_disk_new', [], c_archive_p, check_null)
ffi('write_disk_set_options', [c_archive_p, c_int], c_int, check_int)
WRITE_FORMATS = set((
'7zip', 'ar_bsd', 'ar_svr4', 'cpio', 'cpio_newc', 'gnutar', 'iso9660',
'mtree', 'mtree_classic', 'pax', 'pax_restricted', 'shar', 'shar_dump',
'ustar', 'v7tar', 'xar', 'zip'
))
for f_name in list(WRITE_FORMATS):
try:
ffi('write_set_format_'+f_name, [c_archive_p], c_int, check_int)
except AttributeError: # pragma: no cover
logger.warning('write format "%s" is not supported' % f_name)
WRITE_FORMATS.remove(f_name)
WRITE_FILTERS = set((
'b64encode', 'bzip2', 'compress', 'grzip', 'gzip', 'lrzip', 'lzip', 'lzma',
'lzop', 'uuencode', 'xz'
))
for f_name in list(WRITE_FILTERS):
try:
ffi('write_add_filter_'+f_name, [c_archive_p], c_int, check_int)
except AttributeError: # pragma: no cover
logger.warning('write filter "%s" is not supported' % f_name)
WRITE_FILTERS.remove(f_name)
ffi('write_open',
[c_archive_p, c_void_p, OPEN_CALLBACK, WRITE_CALLBACK, CLOSE_CALLBACK],
c_int, check_int)
ffi('write_open_fd', [c_archive_p, c_int], c_int, check_int)
ffi('write_open_filename', [c_archive_p, c_char_p], c_int, check_int)
ffi('write_open_filename_w', [c_archive_p, c_wchar_p], c_int, check_int)
ffi('write_open_memory',
[c_archive_p, c_void_p, c_size_t, POINTER(c_size_t)],
c_int, check_int)
ffi('write_get_bytes_in_last_block', [c_archive_p], c_int, check_int)
ffi('write_get_bytes_per_block', [c_archive_p], c_int, check_int)
ffi('write_set_bytes_in_last_block', [c_archive_p, c_int], c_int, check_int)
ffi('write_set_bytes_per_block', [c_archive_p, c_int], c_int, check_int)
ffi('write_header', [c_archive_p, c_void_p], c_int, check_int)
ffi('write_data', [c_archive_p, c_void_p, c_size_t], c_ssize_t, check_int)
ffi('write_data_block', [c_archive_p, c_void_p, c_size_t, c_longlong],
c_int, check_int)
ffi('write_finish_entry', [c_archive_p], c_int, check_int)
ffi('write_close', [c_archive_p], c_int, check_int)
ffi('write_free', [c_archive_p], c_int, check_int)
| gpl-3.0 | -946,142,099,526,545,800 | 30.114286 | 79 | 0.647908 | false |
chintak/scikit-image | skimage/feature/util.py | 1 | 4726 | import numpy as np
from skimage.util import img_as_float
class FeatureDetector(object):
def __init__(self):
self.keypoints_ = np.array([])
def detect(self, image):
"""Detect keypoints in image.
Parameters
----------
image : 2D array
Input image.
"""
raise NotImplementedError()
class DescriptorExtractor(object):
def __init__(self):
self.descriptors_ = np.array([])
def extract(self, image, keypoints):
"""Extract feature descriptors in image for given keypoints.
Parameters
----------
image : 2D array
Input image.
keypoints : (N, 2) array
Keypoint locations as ``(row, col)``.
"""
raise NotImplementedError()
def plot_matches(ax, image1, image2, keypoints1, keypoints2, matches,
keypoints_color='k', matches_color=None, only_matches=False):
"""Plot matched features.
Parameters
----------
ax : matplotlib.axes.Axes
Matches and image are drawn in this ax.
image1 : (N, M [, 3]) array
First grayscale or color image.
image2 : (N, M [, 3]) array
Second grayscale or color image.
keypoints1 : (K1, 2) array
First keypoint coordinates as ``(row, col)``.
keypoints2 : (K2, 2) array
Second keypoint coordinates as ``(row, col)``.
matches : (Q, 2) array
Indices of corresponding matches in first and second set of
descriptors, where ``matches[:, 0]`` denote the indices in the first
and ``matches[:, 1]`` the indices in the second set of descriptors.
keypoints_color : matplotlib color, optional
Color for keypoint locations.
matches_color : matplotlib color, optional
Color for lines which connect keypoint matches. By default the
color is chosen randomly.
only_matches : bool, optional
Whether to only plot matches and not plot the keypoint locations.
"""
image1 = img_as_float(image1)
image2 = img_as_float(image2)
new_shape1 = list(image1.shape)
new_shape2 = list(image2.shape)
if image1.shape[0] < image2.shape[0]:
new_shape1[0] = image2.shape[0]
elif image1.shape[0] > image2.shape[0]:
new_shape2[0] = image1.shape[0]
if image1.shape[1] < image2.shape[1]:
new_shape1[1] = image2.shape[1]
elif image1.shape[1] > image2.shape[1]:
new_shape2[1] = image1.shape[1]
if new_shape1 != image1.shape:
new_image1 = np.zeros(new_shape1, dtype=image1.dtype)
new_image1[:image1.shape[0], :image1.shape[1]] = image1
image1 = new_image1
if new_shape2 != image2.shape:
new_image2 = np.zeros(new_shape2, dtype=image2.dtype)
new_image2[:image2.shape[0], :image2.shape[1]] = image2
image2 = new_image2
image = np.concatenate([image1, image2], axis=1)
offset = image1.shape
if not only_matches:
ax.scatter(keypoints1[:, 1], keypoints1[:, 0],
facecolors='none', edgecolors=keypoints_color)
ax.scatter(keypoints2[:, 1] + offset[1], keypoints2[:, 0],
facecolors='none', edgecolors=keypoints_color)
ax.imshow(image)
ax.axis((0, 2 * offset[1], offset[0], 0))
for i in range(matches.shape[0]):
idx1 = matches[i, 0]
idx2 = matches[i, 1]
if matches_color is None:
color = np.random.rand(3, 1)
else:
color = matches_color
ax.plot((keypoints1[idx1, 1], keypoints2[idx2, 1] + offset[1]),
(keypoints1[idx1, 0], keypoints2[idx2, 0]),
'-', color=color)
def _prepare_grayscale_input_2D(image):
image = np.squeeze(image)
if image.ndim != 2:
raise ValueError("Only 2-D gray-scale images supported.")
return img_as_float(image)
def _mask_border_keypoints(image_shape, keypoints, distance):
"""Mask coordinates that are within certain distance from the image border.
Parameters
----------
image_shape : (2, ) array_like
Shape of the image as ``(rows, cols)``.
keypoints : (N, 2) array
Keypoint coordinates as ``(rows, cols)``.
distance : int
Image border distance.
Returns
-------
mask : (N, ) bool array
Mask indicating if pixels are within the image (``True``) or in the
border region of the image (``False``).
"""
rows = image_shape[0]
cols = image_shape[1]
mask = (((distance - 1) < keypoints[:, 0])
& (keypoints[:, 0] < (rows - distance + 1))
& ((distance - 1) < keypoints[:, 1])
& (keypoints[:, 1] < (cols - distance + 1)))
return mask
| bsd-3-clause | 973,310,001,294,730,000 | 28.354037 | 79 | 0.585485 | false |
mbrukman/delayed-replay | tests/proxy_test.py | 1 | 1244 | #!/usr/bin/python
#
# Copyright 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
# Tests for functionality in the proxy.py file.
import proxy
import unittest
class GetTargetUrlTest(unittest.TestCase):
def testSimple(self):
cases = [
('foo/bar', '/?q=foo/bar'),
('/home/~user', '/?q=/home/%7Euser')
]
for expected, path in cases:
actual = proxy.GetTargetUrl(path)
if expected != actual:
print 'Failed conversion for %s' % path
print 'expected: %s' % expected
print ' actual: %s' % actual
self.assertEquals(expected, actual)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -9,002,905,825,614,061,000 | 29.341463 | 80 | 0.634244 | false |