repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
bcstack/btsdk | shell/shell.py | 1 | 3000 | import sys, atexit
import pickle
import bluetooth as bt
from cmd import Cmd
from service import SerialPortService
import socket
banner = '''Welcome to btsdk shell.
\tType 'help()' for help
'''
class Shell(Cmd):
prompt = 'btsdk> '
intro = "btsdk shell"
def __init__(self):
Cmd.__init__(self)
self.device = None
self.port = None
self.service = None
socket.setdefaulttimeout(0.1)
atexit.register(self.on_exit)
print 'loading saved data',
try:
f = open('save.p', 'rb')
saved = pickle.load(f)
self.device = saved['device']
print 'OK'
except (IOError):
print 'FAILED'
if self.device:
print 'DUT:', self.device
else:
print 'No DUT. please scan and select.'
def on_exit(self):
saved = {'device': self.device}
pickle.dump( saved, open( "save.p", "wb" ) )
def do_scan(self, arg):
'scan for bluetooth devices'
self.devices = bt.discover_devices(lookup_names = True)
print '%s\t%s\t\t\t%s' %('Index', 'Address', 'Name')
print
for i in range(len(self.devices)):
d = self.devices[i]
print '%d\t%s\t%s' %(i, d[0], d[1])
print 'please select one as DUT'
def do_select(self, line):
'''select [index]
select the device'''
if line == '':
print 'missing parameter'
return
i = int(line)
if i >= len(self.devices):
print 'Index %d is out of range 0..%d' %(i, len(self.devices) - 1)
return
d = self.devices[i]
self.device = d[0]
print 'selected <%d> %s %s' %(i,d[0], d[1])
def do_conn(self, line):
'connect to DUT'
if self.port:
print 'already connected'
else:
print 'connecting ...'
records = bt.find_service(uuid=bt.SERIAL_PORT_CLASS,
address=self.device)
if len(records) == 0:
print "port not found"
return
portnum = records[0]['port']
print 'SPP port is', portnum
self.port = bt.BluetoothSocket(bt.RFCOMM)
self.port.connect((self.device, portnum))
self.service = SerialPortService(self.port)
self.service.start()
print 'done'
def do_disc(self, line):
'disconnect'
if self.port:
print 'disconnecting ...'
self.service.end()
self.port = None
print 'done'
else:
print 'not connected'
def do_led(self, line):
'set led color r,g,b,w'
self.service.send("1234")
def do_q(self, line):
'quit'
print 'bye'
return True
def do_EOF(self, line):
'quit the system'
print 'bye'
return True
shell = Shell()
shell.cmdloop()
| apache-2.0 | 7,593,908,226,832,237,000 | 26.027027 | 78 | 0.507 | false |
kinsights/django-contrib-comments | tests/testapp/tests/test_templatetags.py | 1 | 7995 | from __future__ import absolute_import
from django.contrib.contenttypes.models import ContentType
from django.template import Template, Context
from django_comments.forms import CommentForm
from django_comments.models import Comment
from testapp.models import Article, Author
from . import CommentTestCase
class CommentTemplateTagTests(CommentTestCase):
def render(self, t, **c):
ctx = Context(c)
out = Template(t).render(ctx)
return ctx, out
def testCommentFormTarget(self):
ctx, out = self.render("{% load comments %}{% comment_form_target %}")
self.assertEqual(out, "/post/")
def testGetCommentForm(self, tag=None):
t = "{% load comments %}" + (tag or "{% get_comment_form for testapp.article a.id as form %}")
ctx, out = self.render(t, a=Article.objects.get(pk=1))
self.assertEqual(out, "")
self.assertTrue(isinstance(ctx["form"], CommentForm))
def testGetCommentFormFromLiteral(self):
self.testGetCommentForm("{% get_comment_form for testapp.article 1 as form %}")
def testGetCommentFormFromObject(self):
self.testGetCommentForm("{% get_comment_form for a as form %}")
def testWhitespaceInGetCommentFormTag(self):
self.testGetCommentForm("{% load comment_testtags %}{% get_comment_form for a|noop:'x y' as form %}")
def testRenderCommentForm(self, tag=None):
t = "{% load comments %}" + (tag or "{% render_comment_form for testapp.article a.id %}")
ctx, out = self.render(t, a=Article.objects.get(pk=1))
self.assertTrue(out.strip().startswith("<form action="))
self.assertTrue(out.strip().endswith("</form>"))
def testRenderCommentFormFromLiteral(self):
self.testRenderCommentForm("{% render_comment_form for testapp.article 1 %}")
def testRenderCommentFormFromObject(self):
self.testRenderCommentForm("{% render_comment_form for a %}")
def testWhitespaceInRenderCommentFormTag(self):
self.testRenderCommentForm("{% load comment_testtags %}{% render_comment_form for a|noop:'x y' %}")
def testRenderCommentFormFromObjectWithQueryCount(self):
with self.assertNumQueries(1):
self.testRenderCommentFormFromObject()
def verifyGetCommentCount(self, tag=None):
t = "{% load comments %}" + (tag or "{% get_comment_count for testapp.article a.id as cc %}") + "{{ cc }}"
ctx, out = self.render(t, a=Article.objects.get(pk=1))
self.assertEqual(out, "2")
def testGetCommentCount(self):
self.createSomeComments()
self.verifyGetCommentCount("{% get_comment_count for testapp.article a.id as cc %}")
def testGetCommentCountFromLiteral(self):
self.createSomeComments()
self.verifyGetCommentCount("{% get_comment_count for testapp.article 1 as cc %}")
def testGetCommentCountFromObject(self):
self.createSomeComments()
self.verifyGetCommentCount("{% get_comment_count for a as cc %}")
def testWhitespaceInGetCommentCountTag(self):
self.createSomeComments()
self.verifyGetCommentCount("{% load comment_testtags %}{% get_comment_count for a|noop:'x y' as cc %}")
def verifyGetCommentList(self, tag=None):
c1, c2, c3, c4 = Comment.objects.all()[:4]
t = "{% load comments %}" + (tag or "{% get_comment_list for testapp.author a.id as cl %}")
ctx, out = self.render(t, a=Author.objects.get(pk=1))
self.assertEqual(out, "")
self.assertEqual(list(ctx["cl"]), [c2])
def testGetCommentList(self):
self.createSomeComments()
self.verifyGetCommentList("{% get_comment_list for testapp.author a.id as cl %}")
def testGetCommentListFromLiteral(self):
self.createSomeComments()
self.verifyGetCommentList("{% get_comment_list for testapp.author 1 as cl %}")
def testGetCommentListFromObject(self):
self.createSomeComments()
self.verifyGetCommentList("{% get_comment_list for a as cl %}")
def testWhitespaceInGetCommentListTag(self):
self.createSomeComments()
self.verifyGetCommentList("{% load comment_testtags %}{% get_comment_list for a|noop:'x y' as cl %}")
def testGetCommentPermalink(self):
c1, c2, c3, c4 = self.createSomeComments()
t = "{% load comments %}{% get_comment_list for testapp.author author.id as cl %}"
t += "{% get_comment_permalink cl.0 %}"
ct = ContentType.objects.get_for_model(Author)
author = Author.objects.get(pk=1)
ctx, out = self.render(t, author=author)
self.assertEqual(out, "/cr/%s/%s/#c%s" % (ct.id, author.id, c2.id))
def testGetCommentPermalinkFormatted(self):
c1, c2, c3, c4 = self.createSomeComments()
t = "{% load comments %}{% get_comment_list for testapp.author author.id as cl %}"
t += "{% get_comment_permalink cl.0 '#c%(id)s-by-%(user_name)s' %}"
ct = ContentType.objects.get_for_model(Author)
author = Author.objects.get(pk=1)
ctx, out = self.render(t, author=author)
self.assertEqual(out, "/cr/%s/%s/#c%s-by-Joe Somebody" % (ct.id, author.id, c2.id))
def testWhitespaceInGetCommentPermalinkTag(self):
c1, c2, c3, c4 = self.createSomeComments()
t = "{% load comments comment_testtags %}{% get_comment_list for testapp.author author.id as cl %}"
t += "{% get_comment_permalink cl.0|noop:'x y' %}"
ct = ContentType.objects.get_for_model(Author)
author = Author.objects.get(pk=1)
ctx, out = self.render(t, author=author)
self.assertEqual(out, "/cr/%s/%s/#c%s" % (ct.id, author.id, c2.id))
def testRenderCommentList(self, tag=None):
t = "{% load comments %}" + (tag or "{% render_comment_list for testapp.article a.id %}")
ctx, out = self.render(t, a=Article.objects.get(pk=1))
self.assertTrue(out.strip().startswith("<dl id=\"comments\">"))
self.assertTrue(out.strip().endswith("</dl>"))
def testRenderCommentListFromLiteral(self):
self.testRenderCommentList("{% render_comment_list for testapp.article 1 %}")
def testRenderCommentListFromObject(self):
self.testRenderCommentList("{% render_comment_list for a %}")
def testWhitespaceInRenderCommentListTag(self):
self.testRenderCommentList("{% load comment_testtags %}{% render_comment_list for a|noop:'x y' %}")
def testNumberQueries(self):
"""
Ensure that the template tags use cached content types to reduce the
number of DB queries.
Refs #16042.
"""
self.createSomeComments()
# {% render_comment_list %} -----------------
# Clear CT cache
ContentType.objects.clear_cache()
with self.assertNumQueries(4):
self.testRenderCommentListFromObject()
# CT's should be cached
with self.assertNumQueries(3):
self.testRenderCommentListFromObject()
# {% get_comment_list %} --------------------
ContentType.objects.clear_cache()
with self.assertNumQueries(4):
self.verifyGetCommentList()
with self.assertNumQueries(3):
self.verifyGetCommentList()
# {% render_comment_form %} -----------------
ContentType.objects.clear_cache()
with self.assertNumQueries(3):
self.testRenderCommentForm()
with self.assertNumQueries(2):
self.testRenderCommentForm()
# {% get_comment_form %} --------------------
ContentType.objects.clear_cache()
with self.assertNumQueries(3):
self.testGetCommentForm()
with self.assertNumQueries(2):
self.testGetCommentForm()
# {% get_comment_count %} -------------------
ContentType.objects.clear_cache()
with self.assertNumQueries(3):
self.verifyGetCommentCount()
with self.assertNumQueries(2):
self.verifyGetCommentCount()
| bsd-3-clause | -1,461,776,287,184,920,800 | 39.378788 | 114 | 0.637398 | false |
colinfong/AICombat | AICombat/battle.py | 1 | 1488 | # global imports
import pygame
# local imports
import resource
from definitions import terrain, action
from square import Square
from realbot import Realbot
from dumbbot import Dumbbot
class Battle():
def __init__(self):
# Load terrain tiles
self.terrainImages = {}
self.terrainImages[terrain.EMPTY] = resource.loadImage("terrain_empty.png")
# Initialize arena squares and image
rows = 20
cols = 20
self.arena = []
for row in xrange(rows):
self.arena.append([])
for col in xrange(cols):
self.arena[-1].append(Square())
self.arenaRect = pygame.Rect(0, 0, rows*20, cols*20)
self.arenaImage = pygame.Surface(self.arenaRect.size).convert()
self.arenaImage.fill((255,255,255))
for row in xrange(rows):
for col in xrange(cols):
self.arenaImage.blit(self.terrainImages[self.arena[row][col].terrain][0], (row*20,col*20))
# Initialize real bots
# For now, hardcode in a Dumbbot
self.bots = pygame.sprite.LayeredUpdates()
self.bots.add(Realbot(Dumbbot()))
# Initialize other bookkeeping variables
self.totalElapsed = 0
def update(self, events, elapsed):
self.totalElapsed += elapsed
self.bots.get_sprite(0).update(self.arena, [], elapsed)
def draw(self, screen):
screen.blit(self.arenaImage, self.arenaRect)
self.bots.draw(screen)
| gpl-2.0 | 2,995,180,122,049,652,700 | 29.367347 | 106 | 0.623656 | false |
Zephor5/mypika | pooled_pika.py | 1 | 7002 | # coding=utf-8
import logging
import threading
from contextlib import contextmanager
from pika.adapters.twisted_connection import TwistedChannel
from pika.adapters.twisted_connection import TwistedProtocolConnection
from pika.connection import Parameters
from twisted.internet import defer
from twisted.internet import reactor
from twisted.internet.protocol import ClientCreator
try:
itervalues = dict.itervalues
except AttributeError:
itervalues = dict.values
__all__ = ["VERSION", "PooledConn"]
VERSION = "0.2.0"
logger = logging.getLogger(__name__)
__lock = threading.Lock()
@contextmanager
def _lock():
__lock.acquire()
try:
yield
finally:
__lock.release()
class PooledConn(object):
"""
"""
_my_params = {}
_my_pools = ({}, {})
_my_channels = {}
_timeout = {}
_max_size = {}
_waiting = {}
max_size = 100
# retry once by default
retry = True
timeout_conn = 1
loop = reactor
def __new__(cls, params, timeout_conn=0, max_size=None):
"""
:param cls:
:param params: connection params
:param timeout_conn: connection timeout
:param max_size: max pool size for each connection
:return: PoolConn instance
"""
if isinstance(params, Parameters):
_id = repr(params) # take repr of params for id to identify the pool
instance = super(PooledConn, cls).__new__(cls)
if _id in cls._my_params:
# always update the params instance
cls._my_params[_id] = params
else:
cls._my_params[_id] = params
cls._my_pools[0][_id] = {}
cls._my_pools[1][_id] = {}
cls._my_channels[_id] = {}
cls._max_size[_id] = max_size if max_size else cls.max_size
cls._timeout[_id] = (
timeout_conn if timeout_conn > 0 else cls.timeout_conn
)
cls._waiting[_id] = []
# only works when first created
instance.__params = cls._my_params[_id]
instance.__max_size = cls._max_size[_id]
instance.timeout_conn = cls._timeout[_id]
instance.__idle_pool, instance.__using_pool = (
cls._my_pools[i][_id] for i in (0, 1)
)
instance.__channel_pool = cls._my_channels[_id]
instance.waiting = cls._waiting[_id]
return instance
else:
raise TypeError("only accept pika Parameters type")
def __init__(self, *args, **kwargs):
"""
:param args: to keep with the __new__
:param kwargs: to keep with the __new__
:return:
"""
# self.__params
# self.__max_size
# self.timeout_conn
# self.__idle_pool
# self.__using_pool
# self.__channel_pool
def __connect(self, retrying=False):
params = self.__params
cc = ClientCreator(self.loop, TwistedProtocolConnection, params)
_d = cc.connectTCP(params.host, params.port, timeout=self.timeout_conn)
def conn_ready(c):
c.ready.addCallback(lambda _: c)
return c.ready
_d.addCallback(conn_ready)
_d.addCallbacks(
self._in_pool,
lambda err: err if retrying or not self.retry else self.__connect(True),
) # retry once when err
return _d
def _in_pool(self, conn):
assert isinstance(
conn, TwistedProtocolConnection
), "conn must be TwistedProtocolConnection"
logger.debug("in pool : %s" % conn)
_id = id(conn)
if self.size < self.__max_size:
# add hook to clear the bad connection object in the pool
conn.ready = defer.Deferred()
conn.ready.addErrback(
self._clear,
self.__idle_pool,
self.__using_pool,
self.__channel_pool,
_id,
)
# add new conn in using pool
self.__using_pool[_id] = conn
else:
raise RuntimeError("_in_pool, unexpected reach")
return conn
@staticmethod
def _clear(reason, idle_pool, using_pool, channel_pool, conn_id):
"""
clear the bad connection
:param reason:
:param idle_pool:
:param using_pool:
:param channel_pool:
:param conn_id:
:return:
"""
with _lock():
try:
idle_pool.pop(conn_id)
logger.info("a connection lost when not using")
except KeyError:
if using_pool.pop(conn_id, None):
logger.warn("connection lost when using, should be handled later")
return reason
finally:
channel_pool.pop(conn_id, None)
def _get_channel(self, conn):
_id = id(conn)
d = None
p = self.__channel_pool
if _id in p:
channel = p[_id]
if channel.is_open:
d = defer.Deferred()
d.callback(p[_id])
if d is None:
d = conn.channel()
d.addCallback(
lambda ch: p.update({_id: ch}) or setattr(ch, "pool_id_", _id) or ch
)
def _h_err(ch, _conn):
_conn.ready.addErrback(lambda _, _c: p.pop(id(_c), None), _conn)
return ch
d.addCallback(_h_err, conn)
return d
def acquire(self, channel=False):
d = defer.Deferred()
if channel:
d.addCallback(self._get_channel)
with _lock():
while self.__idle_pool:
_id, conn = self.__idle_pool.popitem()
if conn._impl.is_open:
self.__using_pool[_id] = conn
d.callback(conn)
return d
if self.size >= self.__max_size:
self.waiting.append(d)
else:
self.__connect().chainDeferred(d)
return d
def release(self, c):
if isinstance(c, TwistedProtocolConnection):
_id = id(c)
elif isinstance(c, TwistedChannel):
_id = c.pool_id_
c = self.__using_pool.get(_id, None)
else:
return c
if _id in self.__using_pool:
# clear each hook add to ready when using
c.ready.callbacks = c.ready.callbacks[:1]
if self.waiting:
self.waiting.pop(0).callback(c)
else:
with _lock():
# put the conn back to idle
self.__idle_pool[_id] = self.__using_pool.pop(_id)
@property
def size(self):
with _lock():
return len(self.__idle_pool) + len(self.__using_pool)
def clear(self):
with _lock():
for c in itervalues(self.__idle_pool):
c.close()
| bsd-3-clause | -3,714,928,351,200,007,700 | 28.795745 | 86 | 0.517138 | false |
lavende/pottery | pottery/consts.py | 1 | 3215 | protocol_version = 'HTTP/1.0'
STATUS_CODE = {
100: ('Continue', 'Request received, please continue'),
101: ('Switching Protocols',
'Switching to new protocol; obey Upgrade header'),
200: ('OK', 'Request fulfilled, document follows'),
201: ('Created', 'Document created, URL follows'),
202: ('Accepted',
'Request accepted, processing continues off-line'),
203: ('Non-Authoritative Information', 'Request fulfilled from cache'),
204: ('No Content', 'Request fulfilled, nothing follows'),
205: ('Reset Content', 'Clear input form for further input.'),
206: ('Partial Content', 'Partial content follows.'),
300: ('Multiple Choices',
'Object has several resources -- see URI list'),
301: ('Moved Permanently', 'Object moved permanently -- see URI list'),
302: ('Found', 'Object moved temporarily -- see URI list'),
303: ('See Other', 'Object moved -- see Method and URL list'),
304: ('Not Modified',
'Document has not changed since given time'),
305: ('Use Proxy',
'You must use proxy specified in Location to access this '
'resource.'),
307: ('Temporary Redirect',
'Object moved temporarily -- see URI list'),
400: ('Bad Request',
'Bad request syntax or unsupported method'),
401: ('Unauthorized',
'No permission -- see authorization schemes'),
402: ('Payment Required',
'No payment -- see charging schemes'),
403: ('Forbidden',
'Request forbidden -- authorization will not help'),
404: ('Not Found', 'Nothing matches the given URI'),
405: ('Method Not Allowed',
'Specified method is invalid for this resource.'),
406: ('Not Acceptable', 'URI not available in preferred format.'),
407: ('Proxy Authentication Required', 'You must authenticate with '
'this proxy before proceeding.'),
408: ('Request Timeout', 'Request timed out; try again later.'),
409: ('Conflict', 'Request conflict.'),
410: ('Gone',
'URI no longer exists and has been permanently removed.'),
411: ('Length Required', 'Client must specify Content-Length.'),
412: ('Precondition Failed', 'Precondition in headers is false.'),
413: ('Request Entity Too Large', 'Entity is too large.'),
414: ('Request-URI Too Long', 'URI is too long.'),
415: ('Unsupported Media Type', 'Entity body in unsupported format.'),
416: ('Requested Range Not Satisfiable',
'Cannot satisfy request range.'),
417: ('Expectation Failed',
'Expect condition could not be satisfied.'),
500: ('Internal Server Error', 'Server got itself in trouble'),
501: ('Not Implemented',
'Server does not support this operation'),
502: ('Bad Gateway', 'Invalid responses from another server/proxy.'),
503: ('Service Unavailable',
'The server cannot process the request due to a high load'),
504: ('Gateway Timeout',
'The gateway server did not receive a timely response'),
505: ('HTTP Version Not Supported', 'Cannot fulfill request.'),
}
DEFAULT_ERROR_MESSAGE = """\
<head>
<title>Error response</title>
</head>
<body>
<h1>Error response</h1>
<p>Error code %(code)d.
<p>Message: %(message)s.
<p>Error code explanation: %(code)s = %(explain)s.
</body>
"""
| mit | -2,016,716,971,171,177,700 | 39.1875 | 73 | 0.657232 | false |
lsaffre/lino-welfare | lino_welfare/modlib/integ/models.py | 1 | 23340 | # -*- coding: UTF-8 -*-
# Copyright 2013-2018 Rumma & Ko Ltd
# License: BSD (see file COPYING for details)
"""
The :xfile:`models` module for the :mod:`lino_welfare.modlib.integ` app.
"""
from __future__ import unicode_literals
from builtins import str
import logging
logger = logging.getLogger(__name__)
import datetime
from django.utils.translation import ugettext_lazy as _
from django.db import models
from django.db.utils import DatabaseError
from django.conf import settings
from etgen.html import E
from lino.utils.report import Report
from lino.mixins import ObservedDateRange
from lino.modlib.users.choicelists import UserTypes
from lino.modlib.system.choicelists import PeriodEvents
from lino.api import dd, rt, gettext
from .roles import IntegUser
config = dd.plugins.integ
contacts = dd.resolve_app('contacts')
pcsw = dd.resolve_app('pcsw')
isip = dd.resolve_app('isip')
jobs = dd.resolve_app('jobs')
courses = dd.resolve_app('courses')
cv = dd.resolve_app('cv')
# properties = dd.resolve_app('properties')
from lino_xl.lib.coachings.utils import has_contracts_filter
from lino_xl.lib.clients.choicelists import ClientEvents, ObservedEvent
from lino_xl.lib.clients.choicelists import ClientStates
from lino_xl.lib.coachings.desktop import CoachingEndings
class ClientIsAvailable(ObservedEvent):
text = _("Available")
def add_filter(self, qs, pv):
period = (pv.start_date, pv.end_date)
flt = has_contracts_filter('isip_contract_set_by_client', period)
flt |= has_contracts_filter('job_contract_set_by_client', period)
if dd.is_installed('immersion'):
flt |= has_contracts_filter(
'immersion_contract_set_by_client', period)
qs = qs.exclude(flt)
return qs
ClientEvents.add_item_instance(ClientIsAvailable("available"))
class Clients(pcsw.CoachedClients):
# Black right-pointing triangle : U+25B6 ▶
# Black right-pointing pointer : U+25BA ►
help_text = """Wie Kontakte --> Klienten, aber mit \
DSBE-spezifischen Kolonnen und Filterparametern."""
#~ detail_layout = IntegClientDetail()
required_roles = dd.login_required(IntegUser)
params_panel_hidden = True
title = _("Integration Clients")
order_by = "last_name first_name id".split()
allow_create = False # see blog/2012/0922
use_as_default_table = False
column_names = "name_column:20 #active_contract:16 \
applies_from applies_until contract_company:16 \
national_id:10 gsm:10 address_column age:10 email phone:10 \
id aid_type language:10"
parameters = dict(
group=dd.ForeignKey("pcsw.PersonGroup", blank=True, null=True,
verbose_name=_("Integration phase")),
language=dd.ForeignKey('languages.Language',
verbose_name=_("Language knowledge"),
blank=True, null=True),
wanted_property=dd.ForeignKey('properties.Property',
verbose_name=_("Wanted skill"),
blank=True, null=True),
only_active=models.BooleanField(
_("Only active clients"), default=False,
help_text=_(
"Show only clients in 'active' integration phases")),
**pcsw.CoachedClients.parameters)
params_layout = """
client_state coached_by and_coached_by start_date end_date observed_event
aged_from aged_to gender nationality also_obsolete
language wanted_property group only_active only_primary
"""
@classmethod
def param_defaults(self, ar, **kw):
kw = super(Clients, self).param_defaults(ar, **kw)
kw.update(client_state=ClientStates.coached)
kw.update(coached_by=ar.get_user())
# kw.update(only_primary=True)
return kw
@classmethod
def get_request_queryset(self, ar):
#~ ar.param_values.update(client_state = ClientStates.coached)
qs = super(Clients, self).get_request_queryset(ar)
if ar.param_values.language:
qs = qs.filter(languageknowledge__language=
ar.param_values.language).distinct()
if ar.param_values.wanted_property:
qs = qs.filter(personproperty__property=
ar.param_values.wanted_property).distinct()
if ar.param_values.group:
qs = qs.filter(group=ar.param_values.group)
if ar.param_values.only_active:
qs = qs.filter(group__active=True)
return qs
@classmethod
def get_title_tags(self, ar):
for t in super(Clients, self).get_title_tags(ar):
yield t
if ar.param_values.only_active:
yield str(ar.actor.parameters['only_active'].verbose_name)
if ar.param_values.language:
yield str(
ar.actor.parameters['language'].verbose_name) + \
' ' + str(ar.param_values.language)
if ar.param_values.group:
yield str(ar.param_values.group)
class UsersWithClients(dd.VirtualTable):
"""An overview table for agents of the integration service.
"""
required_roles = dd.login_required(IntegUser)
label = _("Users with their Clients")
display_mode = 'html'
@classmethod
def get_data_rows(self, ar):
"""We only want the users who actually have at least one client.
We store the corresponding request in the user object
under the name `my_persons`.
The list displays only integration agents, i.e. users with a
nonempty `integ_level`. With one subtility: system admins
also have a nonempty `integ_level`, but normal users don't
want to see them. So we add the rule that only system admins
see other system admins.
"""
u = ar.get_user()
if u is None or not u.user_type.has_required_roles([dd.SiteAdmin]):
user_types = [p for p in UserTypes.items()
if p.has_required_roles([IntegUser])
and not p.has_required_roles([dd.SiteAdmin])]
else:
user_types = [
p for p in UserTypes.items()
if p.has_required_roles([IntegUser])]
qs = rt.models.users.User.objects.filter(user_type__in=user_types)
for user in qs.order_by('username'):
r = Clients.request(param_values=dict(coached_by=user))
if r.get_total_count():
user.my_persons = r
yield user
@dd.virtualfield('coachings.Coaching.user')
def user(self, obj, ar):
return obj
@dd.requestfield(_("Total"))
def row_total(self, obj, ar):
return obj.my_persons
@dd.requestfield(_("Primary clients"))
def primary_clients(self, obj, ar):
t = dd.today()
return Clients.request(param_values=dict(
only_primary=True, coached_by=obj, start_date=t, end_date=t))
@dd.requestfield(_("Active clients"))
def active_clients(self, obj, ar):
#~ return MyActiveClients.request(ar.ui,subst_user=obj)
t = dd.today()
return Clients.request(param_values=dict(
only_active=True, only_primary=True,
coached_by=obj, start_date=t, end_date=t))
@dd.receiver(dd.post_analyze)
def on_database_ready(sender, **kw):
"""
Builds columns dynamically from the :class:`PersonGroup` database table.
This must also be called before each test case.
"""
self = UsersWithClients
self.column_names = 'user:10'
try:
for pg in pcsw.PersonGroup.objects.exclude(
ref_name='').order_by('ref_name'):
def w(pg):
# we must evaluate `today` for each request, not only
# once on server startup
today = dd.today()
pv = dict(group=pg, start_date=today, end_date=today)
if dd.plugins.integ.only_primary:
pv.update(only_primary=True)
def func(self, obj, ar):
pv.update(coached_by=obj)
return Clients.request(param_values=pv)
return func
vf = dd.RequestField(w(pg), verbose_name=pg.name)
self.add_virtual_field('G' + pg.ref_name, vf)
self.column_names += ' ' + vf.name
except DatabaseError:
pass # happens e.g. if database isn't yet initialized
self.column_names += ' primary_clients active_clients row_total'
self.clear_handle() # avoid side effects when running multiple test cases
settings.SITE.resolve_virtual_fields()
class CompareRequestsTable(dd.VirtualTable):
"""
This is one of the tables of the :class:`ActivityReport`.
"""
label = _("Evolution générale")
auto_fit_column_widths = True
column_names = "description old_value new_value"
display_mode = 'html'
hide_sums = True
@dd.displayfield(_("Description"))
def description(self, row, ar):
return row[0]
@dd.requestfield(_("Initial value"))
def old_value(self, row, ar):
return row[1]
@dd.requestfield(_("Final value"))
def new_value(self, row, ar):
return row[2]
@classmethod
def get_data_rows(self, ar):
#~ rows = []
pv = ar.master_instance
if pv is None:
return
#~ def add(A,oe=None,**kw):
def add(A, **kw):
pva = dict(**kw)
ar = A.request(param_values=pva)
cells = [ar.get_title()]
for d in (pv.start_date, pv.end_date):
ar = A.request(
param_values=dict(pva, start_date=d, end_date=d))
#~ print 20130527, ar
cells.append(ar)
return cells
yield add(
pcsw.CoachedClients, observed_event=pcsw.ClientEvents.active)
yield add(isip.Contracts, observed_event=isip.ContractEvents.active)
#~ yield add(isip.Contracts,isip.ContractEvents.ended)
yield add(jobs.Contracts, observed_event=isip.ContractEvents.active)
#~ yield add(jobs.Contracts,isip.ContractEvents.ended)
if hasattr(courses, 'PendingCourseRequests'):
# chatelet uses `lino.modlib.courses` which doesn't have
# this table.
yield add(courses.PendingCourseRequests)
all_contracts = isip.Contracts.request(
param_values=dict(
start_date=pv.start_date,
end_date=pv.end_date)).get_data_iterator()
# DISTINCT on fields doesn't work in sqlite
study_types = set(all_contracts.values_list('study_type', flat=True))
#~ print 20130527, study_types
for st in study_types:
if st is not None:
yield add(isip.Contracts,
observed_event=isip.ContractEvents.active,
study_type=cv.StudyType.objects.get(pk=st))
class PeriodicNumbers(dd.VirtualTable):
label = _("Indicateurs d'activité")
auto_fit_column_widths = True
column_names = "description number"
display_mode = 'html'
hide_sums = True
@dd.displayfield(_("Description"))
def description(self, row, ar):
return row[0]
@dd.requestfield(_("Number"))
def number(self, row, ar):
return row[1]
@classmethod
def get_data_rows(self, ar):
mi = ar.master_instance
if mi is None:
return
DSBE = rt.models.coachings.CoachingType.objects.get(
pk=isip.COACHINGTYPE_DSBE)
def add(A, **pva):
#~ pva = dict(**kw)
ar = A.request(param_values=pva)
cells = [ar.get_title()]
ar = A.request(
param_values=dict(pva, start_date=mi.start_date, end_date=mi.end_date))
cells.append(ar)
return cells
#~ def add(A,oe):
#~ cells = ["%s %s" % (A.model._meta.verbose_name_plural,oe.text)]
#~ pv = dict(start_date=mi.start_date,end_date=mi.end_date)
#~ pv.update(observed_event=oe)
#~ ar = A.request(param_values=pv)
#~ cells.append(ar)
#~ return cells
yield add(
rt.models.coachings.Coachings,
observed_event=PeriodEvents.started, coaching_type=DSBE)
yield add(
rt.models.coachings.Coachings,
observed_event=PeriodEvents.active, coaching_type=DSBE)
yield add(
rt.models.coachings.Coachings,
observed_event=PeriodEvents.ended, coaching_type=DSBE)
yield add(pcsw.Clients, observed_event=pcsw.ClientEvents.active)
yield add(pcsw.Clients, observed_event=pcsw.ClientEvents.created)
yield add(pcsw.Clients, observed_event=pcsw.ClientEvents.modified)
for A in (isip.Contracts, jobs.Contracts):
yield add(A, observed_event=isip.ContractEvents.started)
yield add(A, observed_event=isip.ContractEvents.active)
yield add(A, observed_event=isip.ContractEvents.ended)
yield add(A, observed_event=isip.ContractEvents.decided)
yield add(A, observed_event=isip.ContractEvents.issued)
class CoachingEndingsByUser(dd.VentilatingTable, CoachingEndings):
label = _("Coaching endings by user")
hide_zero_rows = True
@classmethod
def get_ventilated_columns(self):
try:
DSBE = rt.models.coachings.CoachingType.objects.get(
pk=isip.COACHINGTYPE_DSBE)
except rt.models.coachings.CoachingType.DoesNotExist:
DSBE = None
def w(user):
def func(fld, obj, ar):
mi = ar.master_instance
if mi is None:
return None
pv = dict(start_date=mi.start_date, end_date=mi.end_date)
pv.update(observed_event=PeriodEvents.ended)
pv.update(coaching_type=DSBE)
if user is not None:
pv.update(coached_by=user)
pv.update(ending=obj)
return rt.models.coachings.Coachings.request(param_values=pv)
return func
user_types = [p for p in UserTypes.items()
if p.has_required_roles([IntegUser])]
for u in settings.SITE.user_model.objects.filter(user_type__in=user_types):
yield dd.RequestField(w(u), verbose_name=str(u.username))
yield dd.RequestField(w(None), verbose_name=_("Total"))
# not currently used
class CoachingEndingsByType(dd.VentilatingTable, CoachingEndings):
label = _("Coaching endings by type")
@classmethod
def get_ventilated_columns(self):
def w(ct):
def func(fld, obj, ar):
mi = ar.master_instance
if mi is None:
return None
pv = dict(start_date=mi.start_date, end_date=mi.end_date)
pv.update(observed_event=PeriodEvents.ended)
if ct is not None:
pv.update(coaching_type=ct)
pv.update(ending=obj)
return rt.models.coachings.Coachings.request(
param_values=pv)
return func
for ct in rt.models.coachings.CoachingType.objects.all():
yield dd.RequestField(w(ct), verbose_name=str(ct))
yield dd.RequestField(w(None), verbose_name=_("Total"))
class ContractsByType(dd.VentilatingTable):
contracts_table = isip.Contracts
contract_type_model = isip.ContractType
observed_event = isip.ContractEvents.ended
selector_key = NotImplementedError
hide_zero_rows = True
@classmethod
def get_observed_period(self, mi):
return dict(start_date=mi.start_date, end_date=mi.end_date)
@classmethod
def get_ventilated_columns(self):
def w(ct):
def func(fld, obj, ar):
mi = ar.master_instance
if mi is None:
return None
pv = self.get_observed_period(mi)
pv.update(observed_event=self.observed_event)
if ct is not None:
pv.update(type=ct)
pv[self.selector_key] = obj
return self.contracts_table.request(param_values=pv)
return func
for ct in self.contract_type_model.objects.all():
yield dd.RequestField(w(ct), verbose_name=str(ct))
yield dd.RequestField(w(None), verbose_name=_("Total"))
class ContractEndingsByType(ContractsByType, isip.ContractEndings):
label = _("Contract endings by type")
selector_key = 'ending'
class JobsContractEndingsByType(ContractEndingsByType):
contracts_table = jobs.Contracts
contract_type_model = jobs.ContractType
from lino_welfare.modlib.users.desktop import Users
class ContractsPerUserAndContractType(ContractsByType, Users):
label = _("PIIS par agent et type")
#~ filter = Q(coaching_type=isip.COACHINGTYPE_DSBE)
contracts_table = isip.Contracts
observed_event = isip.ContractEvents.active
contract_type_model = isip.ContractType
selector_key = 'user'
@classmethod
def get_observed_period(self, mi):
return dict(start_date=mi.end_date, end_date=mi.end_date)
class JobsContractsPerUserAndContractType(ContractsPerUserAndContractType):
label = _("Art60§7 par agent et type")
contracts_table = jobs.Contracts
contract_type_model = jobs.ContractType
class StudyTypesAndContracts(cv.StudyTypes, dd.VentilatingTable):
label = _("PIIS et types de formation")
help_text = _("""Nombre de PIIS actifs par
type de formation et type de contrat.""")
contracts_table = isip.Contracts
@classmethod
def get_request_queryset(cls, ar):
#~ logger.info("20120608.get_request_queryset param_values = %r",ar.param_values)
qs = super(StudyTypesAndContracts, cls).get_request_queryset(ar)
qs = qs.annotate(count=models.Count('contract'))
return qs.filter(count__gte=1)
#~ return qs
@dd.virtualfield(dd.ForeignKey(
'cv.StudyType')) # , verbose_name=_("Description")))
def description(self, obj, ar):
return obj
@classmethod
def get_ventilated_columns(self):
def w(ct):
def func(fld, obj, ar):
mi = ar.master_instance
if mi is None:
return None
pv = dict(start_date=mi.start_date, end_date=mi.end_date)
pv.update(observed_event=isip.ContractEvents.active)
if ct is not None:
pv.update(type=ct)
pv.update(study_type=obj)
return self.contracts_table.request(param_values=pv)
return func
for ct in isip.ContractType.objects.filter(needs_study_type=True):
yield dd.RequestField(w(ct), verbose_name=str(ct))
yield dd.RequestField(w(None), verbose_name=_("Total"))
class CompaniesAndContracts(contacts.Companies, dd.VentilatingTable):
label = _("Organisations externes et contrats")
help_text = _("""Nombre de PIIS actifs par
organisation externe et type de contrat.""")
contracts_table = isip.Contracts
contract_types = isip.ContractType
hide_zero_rows = True
@classmethod
def get_request_queryset(cls, ar):
qs = super(CompaniesAndContracts, cls).get_request_queryset(ar)
qs = qs.annotate(count=models.Count(
'isip_contractpartner_set_by_company'))
return qs.filter(count__gte=1)
@dd.virtualfield(dd.ForeignKey('contacts.Company'))
def description(self, obj, ar):
return obj
@classmethod
def get_ventilated_columns(self):
def w(ct):
def func(fld, obj, ar):
mi = ar.master_instance
if mi is None:
return None
pv = dict(start_date=mi.start_date, end_date=mi.end_date)
pv.update(observed_event=isip.ContractEvents.active)
if ct is not None:
pv.update(type=ct)
pv.update(company=obj)
return self.contracts_table.request(param_values=pv)
return func
for ct in self.contract_types.objects.all():
label = str(ct)
yield dd.RequestField(w(ct), verbose_name=label)
yield dd.RequestField(w(None), verbose_name=_("Total"))
class JobProvidersAndContracts(CompaniesAndContracts):
"""Ventilates number of job supplyments by provider and type."""
label = _("Job providers and contrats")
contracts_table = jobs.Contracts
contract_types = jobs.ContractType
@classmethod
def get_request_queryset(cls, ar):
#~ qs = super(CompaniesAndContracts,cls).get_request_queryset(ar)
qs = jobs.JobProvider.objects.all()
qs = qs.annotate(count=models.Count('jobs_contract_set_by_company'))
return qs.filter(count__gte=1)
class ActivityReport(Report):
"""Gives an overview about the work of the Integration Service during
a given period.
"""
required_roles = dd.login_required(IntegUser)
label = _("Activity Report")
parameters = ObservedDateRange(
# start_date=models.DateField(verbose_name=_("Period from")),
# end_date=models.DateField(verbose_name=_("until")),
include_jobs=models.BooleanField(
verbose_name=dd.plugins.jobs.verbose_name),
include_isip=models.BooleanField(verbose_name=_("ISIP")),
)
params_layout = "start_date end_date include_jobs include_isip"
#~ params_panel_hidden = True
@classmethod
def param_defaults(self, ar, **kw):
D = datetime.date
kw.update(start_date=D(dd.today().year, 1, 1))
kw.update(end_date=D(dd.today().year, 12, 31))
return kw
@classmethod
def get_story(cls, self, ar):
yield E.h2(gettext("Introduction"))
yield E.p("Ceci est un ", E.b("rapport"), """,
càd un document complet généré par Lino, contenant des
sections, des tables et du texte libre.
Dans la version écran cliquer sur un chiffre pour voir d'où
il vient.
""")
yield E.h2(str(UsersWithClients.label))
yield UsersWithClients
yield E.h2(gettext("Indicateurs généraux"))
yield CompareRequestsTable
yield E.p('.')
yield PeriodicNumbers
yield E.h2(gettext("Causes d'arrêt des interventions"))
yield CoachingEndingsByUser
#~ yield E.p('.')
#~ yield CoachingEndingsByType
yield E.h1(str(isip.Contract._meta.verbose_name_plural))
#~ yield E.p("Voici quelques tables complètes:")
for A in (ContractsPerUserAndContractType, CompaniesAndContracts,
ContractEndingsByType, StudyTypesAndContracts):
yield E.h2(str(A.label))
# if A.help_text:
# yield E.p(unicode(A.help_text))
yield A
yield E.h1(str(jobs.Contract._meta.verbose_name_plural))
for A in (JobsContractsPerUserAndContractType,
JobProvidersAndContracts, JobsContractEndingsByType):
yield E.h2(str(A.label))
# if A.help_text:
# yield E.p(unicode(A.help_text))
yield A
| agpl-3.0 | -1,453,504,115,902,986,000 | 35.333333 | 89 | 0.612664 | false |
PennBBL/utils | hopsonr/mprage_processing/xnat_bet.py | 1 | 18954 | #!/import/monstrum/Applications/epd-7.1/bin/python
from nipype.interfaces import fsl
import xnatmaster30 as xnatmaster
import argparse
import sys
import array
import subprocess
import os
import fnmatch
import shutil
import uuid
'''
By Chadtj
V1 Initial Version
V2 Uses new bbl:bet datatype and checks for existing nifti - no functional changes
'''
def slice_bet(tmpdir,niftiname):
slice = fsl.Slicer()
slice.inputs.in_file = niftiname
slice.inputs.args = '-s 1 -x 0.4 ' +tmpdir+'/1.png -x 0.5 '+tmpdir+'/2.png -x 0.6 '+tmpdir+'/3.png -y 0.4 '+tmpdir+'/4.png -y 0.5 '+tmpdir+'/5.png -y 0.6 '+tmpdir+'/6.png -z 0.4 '+tmpdir+'/7.png -z 0.5 '+tmpdir+'/8.png -z 0.6'
slice.inputs.out_file = tmpdir+'/9.png'
res = slice.run()
print "Sliced"
def do_bet(head, fracval, fourD, logpath, tmpdir, prov_list):
bet = fsl.BET()
prefile = head.split('/')[-1]
prefile_wo_ext = prefile[:prefile.find(".nii")]
if fourD == False:
bet.inputs.in_file = head
bet.inputs.out_file = tmpdir + prefile_wo_ext + version + str(fracval) + 'frac_3D.nii.gz'
bet.inputs.frac = fracval
bet.inputs.mask = True
xnatmaster.add_to_log(logpath, bet.cmdline)
prov_list = xnatmaster.track_provenance(prov_list,'/import/monstrum/Applications/fsl_4.1.7/bin/bet','v2',head+' '+tmpdir + prefile_wo_ext + version + str(fracval) + 'frac_3D.nii.gz -m')
result = bet.run()
slice_bet(tmpdir,tmpdir + prefile_wo_ext + version + str(fracval) + 'frac_3D.nii.gz')
return tmpdir + prefile_wo_ext + version + str(fracval) + 'frac_3D'
else:
bet.inputs.in_file = head
bet.inputs.mask = True
bet.inputs.out_file = tmpdir + prefile_wo_ext + version + str(fracval) + 'frac_4D.nii.gz'
bet.inputs.functional=True
bet.inputs.frac = fracval
xnatmaster.add_to_log(logpath, bet.cmdline)
prov_list = xnatmaster.track_provenance(prov_list,'/import/monstrum/Applications/fsl_4.1.7/bin/bet','v2',head+' '+tmpdir + prefile_wo_ext + version + str(fracval) + 'frac_4D.nii.gz -F')
result = bet.run()
slice_bet(tmpdir,tmpdir + prefile_wo_ext + version + str(fracval) + 'frac_4D.nii.gz')
return tmpdir + prefile_wo_ext + version + str(fracval) + 'frac_4D'
###Setup input args here
parser = argparse.ArgumentParser(description='Python Pipeline BET Action Script');
group = parser.add_argument_group('Required')
group.add_argument('-scanid', action="store", dest='scanid', required=True, help='MR Session (scan id) of the Dicoms to convert')
group.add_argument('-download',action="store", dest='download', required=True, help='Should this download the results or just put it back into XNAT? 1 to download or 0 to not download')
optgroup=parser.add_argument_group('Optional')
optgroup.add_argument('-frac', action="store", dest='frac', required=False, help='Fractional intensity value. Default .5 ', default=.5)
optgroup.add_argument('-fourD', action="store", dest='fourD', required=False, help='Is this 4D FMRI Data? Default 0 for MPRAGE', default='0')
optgroup.add_argument('-scantype',action="store", dest='scantype', required=False, help='Enter the type of scan, currently available options are MPRAGE, T2, DTI, DWI, EPI, ASL', default='')
optgroup.add_argument('-upload',action="store", dest='upload', required=False, help='Should this result be uploaded into XNAT? 1 to upload, 0 to keep locally. Default: 0', default='0')
optgroup.add_argument('-outdir',action="store", dest='outdir', required=False, help='Name of the output directory if downloading the result', default='')
optgroup.add_argument('-tmpdir',action="store", dest='tmpdir', required=False, help='Name of the temporary directory to do work', default='/import/monstrum/tmp/bet')
optgroup.add_argument('-configfile',action="store",dest='configfile',required=False, help='Enter path to your XNAT config file if desired.', default='X')
optgroup.add_argument('-seqname',action="store",dest='seqname',required=False, help='Enter a valid sequence name here, eg. DTI, MPRAGE, DWI, T2, RESTBOLD, FRAC2BACK, IDEMO, ASL, or ALL',default='-1')
optgroup.add_argument('-check_existing',action="store",dest='findexisting',required=False,help='Just download Nifti if it already exists. 1 if yes, 0 to force a new nifti to be made.', default='1')
optgroup.add_argument('-sequence_id',action="store", dest='sequence_id', required=False, help='Probably for internal XNAT pipeline use only', default='-1')
parser.add_argument('-version', action='version', version='%(prog)s 2.0')
version='_bet_v2_'
########
###Parse input args here
inputArguments = parser.parse_args()
scanid = inputArguments.scanid
download = inputArguments.download
outdir = inputArguments.outdir
scantype = inputArguments.scantype
tmpdir = inputArguments.tmpdir
upload = inputArguments.upload
configfile = inputArguments.configfile
sn = inputArguments.seqname
fourD = inputArguments.fourD
frac = inputArguments.frac
findexisting = inputArguments.findexisting
sid = inputArguments.sequence_id
########
### Done setting up inputs #####
if scantype != '' and int(sid) != -1:
print "Got both scantype and sequence_id specified; sequence_id takes priority"
scantype = scantype.upper()
if outdir == '' and download == '1':
print "Need to specify full path to the output directory with the download flag"
sys.exit(1)
if download == '0' and upload == '0':
print "Please specify either -download 1 and/or -upload 1, otherwise this script has no real purpose"
sys.exit(1)
if outdir == '':
outdir = tmpdir
scanid_array = xnatmaster.parse_scanids(scanid)
central = xnatmaster.setup_xnat_connection(configfile)
corrected_scanid_array = []
for i in range(0,len(scanid_array)):
corrected_scanid_array.append(xnatmaster.add_zeros_to_scanid(scanid_array[i],central))
print str(scanid_array[i]) + ' is valid.'
print corrected_scanid_array
tmpdir = xnatmaster.append_slash(tmpdir)
tmpuuid = uuid.uuid4()
tmpdir = tmpdir + str(tmpuuid) + '/'
if not xnatmaster.ensure_dir_exists(tmpdir) and xnatmaster.ensure_write_permissions(tmpdir):
print "Could not create tmpdir"
sys.exit(1)
if str(download) == '1':
outdir = xnatmaster.append_slash(outdir)
if not xnatmaster.ensure_dir_exists(outdir) and xnatmaster.ensure_write_permissions(outdir):
sys.exit(1)
'''
Done creating neccessary directories
'''
'''
BET specific Validation on input args
'''
if fourD=='0' or fourD.lower()=='no' or fourD.lower()=='false':
fourD=False
elif fourD=='1' or fourD.lower()=='yes' or fourD.lower()=='true':
fourD=True
else:
fourD=False
if float(frac) < 0 or float(frac) > 1:
print "Invalid frac value. Must be between 0 and 1."
sys.exit(1)
for i in corrected_scanid_array:
print "Now dealing with scanid: " + str(i) + '.'
newtmpdir = tmpdir + str(i) + '/'
# newoutdir = outdir + str(i) + '/'
newlogdir = newtmpdir + 'logs/'
# if not xnatmaster.ensure_dir_exists(newoutdir) and xnatmaster.ensure_write_permissions(newoutdir):
# sys.exit(1)
if not xnatmaster.ensure_dir_exists(newtmpdir) and xnatmaster.ensure_write_permissions(newtmpdir):
sys.exit(1)
if not xnatmaster.ensure_dir_exists(newlogdir) and xnatmaster.ensure_write_permissions(newlogdir):
sys.exit(1)
tstamp = xnatmaster.do_tstamp()
logpath = newlogdir + str(i) + str(version) + str(tstamp) + '.log'
otherparams = '-upload ' + str(upload) + ' -download ' + str(download) + ' -outdir ' + str(outdir) + ' -tmpdir ' + str(tmpdir) + ' -scantype ' + str(scantype) + ' -sequence_id ' + str(sid) + \
' -seqname ' + str(sn) + ' -configfile ' + str(configfile)
xnatmaster.print_all_settings('bet.py',version, i, tstamp, otherparams , logpath)
matched_sequences = xnatmaster.find_matched_sequences(i,scantype,sid,sn,central)
print matched_sequences
for line in matched_sequences:
try:
subj_id = line.get('subject_id')
seqname = line.get('qlux_qluxname')
sessid = line.get('session_id')
proj_name = line.get('project')
scandate = line.get('date')
seq_id = line.get('imagescan_id')
imgorient = line.get('mr_imageorientationpatient')
formname = line.get('mr_seriesdescription')
if formname == 'MoCoSeries':
formname = 'ep2d_se_pcasl_PHC_1200ms_moco'
#NewDir str begin
formname = formname.replace("(","_")
formname = formname.replace(")","_")
formname = formname.replace(" ","_")
nonzeroi = str(i).lstrip('0')
nonzerosubid = str(subj_id).lstrip('0')
newoutdir = outdir + str(nonzerosubid) + '_' + str(nonzeroi) + '/' + str(seq_id) + '_' + str(seqname)+'/bet/'
if not xnatmaster.ensure_dir_exists(newoutdir) and xnatmaster.ensure_write_permissions(newoutdir) and not xnatmaster.ensure_dir_exists(newtmpdir) and xnatmaster.ensure_write_permissions(newtmpdir):
sys.exit(1)
##New dir str end
print "Form: " + str(formname);
xnatmaster.add_to_log(logpath, "Processing sequence: " + seqname + ":" + str(seq_id))
global prov_list
prov_list = []
betfound = 0
niftifound = 0
donewithsequence = 0
niftifound = xnatmaster.existing_nifti(i,seq_id,central)
if niftifound < 1:
print "Could not find nifti for this scan. Please run dicoms2nifti before this script."
sys.exit(1)
if findexisting == '1':
xnatmaster.add_to_log(logpath, "Checking for existing BET: " + seq_id)
betfound = xnatmaster.existing_bet(i,seq_id,central)
if betfound > 0:
if download == '1':
xnatmaster.get_bet(i, seq_id, newoutdir, central, proj_name, subj_id)
xnatmaster.add_to_log(logpath, "Downloaded existing bet to : " + newoutdir + " Done with this sequence.")
donewithsequence=1
if upload == '1':
donewithsequence=1
else:
xnatmaster.add_to_log(logpath, "No existing BET: " + seq_id)
if not xnatmaster.ensure_dir_exists(tmpdir+'NIFTI') and xnatmaster.ensure_write_permissions(tmpdir+'NIFTI'):
sys.exit(0)
niftidict = xnatmaster.get_nifti(i, seq_id, tmpdir+'NIFTI/', central, proj_name, subj_id)
print niftidict
from_seq = niftidict['fromname']
starting_nifti = niftidict['niftipath']
print "Nifti to work from is in: " + str(starting_nifti)
result = do_bet(starting_nifti,frac,fourD,logpath,tmpdir,prov_list)
maskfile = result+'_mask.nii.gz'
result = result+'.nii.gz'
print prov_list
print "Resulting Nifti is at: " + result
if download == '1':
shutil.copyfile(result,newoutdir+result.split('/')[-1])
shutil.copyfile(maskfile,newoutdir+maskfile.split('/')[-1])
shutil.copyfile(logpath,newoutdir+logpath.split('/')[-1])
print "Downloaded nifti to: " + newoutdir+result.split('/')[-1]
if upload == '1':
xnatmaster.add_to_log(logpath,"Now saving into XNAT.")
#Do upload here
thetype="bbl:bet"
assname=str(sessid) + '_' + str(formname) + '_BET_SEQ0' + str(seq_id) + '_RUN01'
assname=assname.replace(".","_")
assname=assname.replace("-","_")
myproject=central.select('/projects/'+proj_name)
assessor=myproject.subject(subj_id).experiment(sessid).assessor(assname)
if assessor.exists():
print "Found original run..."
assname=xnatmaster.get_new_assessor(sessid,subj_id,formname,seq_id,proj_name,central)
myproject=central.select('/projects/'+proj_name)
assessor=myproject.subject(subj_id).experiment(sessid).assessor(assname)
assessor.create(**{'assessors':thetype,'xsi:type':thetype,thetype+'/date':str(xnatmaster.get_today()),thetype+'/imageScan_ID':str(seq_id),thetype+'/validationStatus':'unvalidated',thetype+'/status':'completed',thetype+'/source_id':str(from_seq),thetype+'/id':str(assname),thetype+'/SequenceName':formname,thetype+'/PipelineDataTypeVersion':'1.0',thetype+'/PipelineScriptVersion':'2.0'});
xnatmaster.extract_provenance(assessor,prov_list)
assessor.out_resource('LOG').file(str(sessid) + '_' + formname + '_SEQ0' + seq_id + '.log').put(logpath)
assessor.out_resource('BET').file(str(sessid) + '_' + formname + '_BET_SEQ0' + seq_id + '.nii.gz').put(result)
assessor.out_resource('BETMASK').file(str(sessid) + '_' + formname + '_BETMASK_SEQ0' + seq_id + '_mask.nii.gz').put(maskfile)
assessor.out_resource('QAIMAGE').file('1.png').put(tmpdir+'/1.png')
assessor.out_resource('QAIMAGE').file('2.png').put(tmpdir+'/2.png')
assessor.out_resource('QAIMAGE').file('3.png').put(tmpdir+'/3.png')
assessor.out_resource('QAIMAGE').file('4.png').put(tmpdir+'/4.png')
assessor.out_resource('QAIMAGE').file('5.png').put(tmpdir+'/5.png')
assessor.out_resource('QAIMAGE').file('6.png').put(tmpdir+'/6.png')
assessor.out_resource('QAIMAGE').file('7.png').put(tmpdir+'/7.png')
assessor.out_resource('QAIMAGE').file('8.png').put(tmpdir+'/8.png')
assessor.out_resource('QAIMAGE').file('9.png').put(tmpdir+'/9.png')
if findexisting == '0' and donewithsequence == 0 :
if betfound > 0:
xnatmaster.add_to_log(logpath, "Forcing the creation of a new BET: " + seq_id)
else:
xnatmaster.add_to_log(logpath, "Creating new BET: " + seq_id)
if not xnatmaster.ensure_dir_exists(tmpdir+'NIFTI') and xnatmaster.ensure_write_permissions(tmpdir+'NIFTI'):
sys.exit(0)
niftidict = xnatmaster.get_nifti(i, seq_id, tmpdir+'NIFTI/', central, proj_name, subj_id)
from_seq = niftidict['fromname']
starting_nifti = niftidict['niftipath']
print "Nifti to work from is in: " + str(starting_nifti)
result = do_bet(starting_nifti,frac,fourD,logpath,tmpdir,prov_list)
maskfile = result+'_mask.nii.gz'
result = result+'.nii.gz'
print prov_list
print "Resulting Nifti is at: " + result
if download == '1':
shutil.copyfile(result,newoutdir+result.split('/')[-1])
shutil.copyfile(maskfile,newoutdir+maskfile.split('/')[-1])
shutil.copyfile(logpath,newoutdir+logpath.split('/')[-1])
print "Downloaded nifti to: " + newoutdir+result.split('/')[-1]
if upload == '1':
xnatmaster.add_to_log(logpath,"Now saving into XNAT.")
#Do upload here
thetype="bbl:bet"
assname=str(sessid) + '_' + str(formname) + '_BET_SEQ0' + str(seq_id) + '_RUN01'
assname=assname.replace(".","_")
assname=assname.replace("-","_")
myproject=central.select('/projects/'+proj_name)
assessor=myproject.subject(subj_id).experiment(sessid).assessor(assname)
if assessor.exists():
print "Found original run..."
assname=xnatmaster.get_new_assessor(sessid,subj_id,formname,seq_id,proj_name,central)
myproject=central.select('/projects/'+proj_name)
assessor=myproject.subject(subj_id).experiment(sessid).assessor(assname)
assessor.create(**{'assessors':thetype,'xsi:type':thetype,thetype+'/date':str(xnatmaster.get_today()),thetype+'/imageScan_ID':str(seq_id),thetype+'/validationStatus':'unvalidated',thetype+'/status':'completed',thetype+'/source_id':str(from_seq),thetype+'/id':str(assname),thetype+'/SequenceName':formname,thetype+'/PipelineDataTypeVersion':'1.0',thetype+'/PipelineScriptVersion':'2.0'});
xnatmaster.extract_provenance(assessor,prov_list)
assessor.out_resource('LOG').file(str(sessid) + '_' + formname + '_SEQ0' + seq_id + '.log').put(logpath)
assessor.out_resource('BET').file(str(sessid) + '_' + formname + '_BET_SEQ0' + seq_id + '.nii.gz').put(result)
assessor.out_resource('BETMASK').file(str(sessid) + '_' + formname + '_BETMASK_SEQ0' + seq_id + '_mask.nii.gz').put(maskfile)
assessor.out_resource('QAIMAGE').file('1.png').put(tmpdir+'/1.png')
assessor.out_resource('QAIMAGE').file('2.png').put(tmpdir+'/2.png')
assessor.out_resource('QAIMAGE').file('3.png').put(tmpdir+'/3.png')
assessor.out_resource('QAIMAGE').file('4.png').put(tmpdir+'/4.png')
assessor.out_resource('QAIMAGE').file('5.png').put(tmpdir+'/5.png')
assessor.out_resource('QAIMAGE').file('6.png').put(tmpdir+'/6.png')
assessor.out_resource('QAIMAGE').file('7.png').put(tmpdir+'/7.png')
assessor.out_resource('QAIMAGE').file('8.png').put(tmpdir+'/8.png')
assessor.out_resource('QAIMAGE').file('9.png').put(tmpdir+'/9.png')
except IndexError, e:
xnatmaster.add_to_log(logpath,e)
| gpl-3.0 | -5,262,109,881,789,303,000 | 60.739414 | 428 | 0.580775 | false |
tensorflow/datasets | tensorflow_datasets/image/symmetric_solids/symmetric_solids.py | 1 | 5595 | # coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""symmetric_solids dataset."""
import numpy as np
import tensorflow as tf
import tensorflow_datasets.public_api as tfds
_DESCRIPTION = """
This is a pose estimation dataset, consisting of symmetric 3D shapes where
multiple orientations are visually indistinguishable.
The challenge is to predict all equivalent orientations when only one
orientation is paired with each image during training (as is the scenario for
most pose estimation datasets). In contrast to most pose estimation datasets,
the full set of equivalent orientations is available for evaluation.
There are eight shapes total, each rendered from 50,000 viewpoints distributed
uniformly at random over the full space of 3D rotations.
Five of the shapes are featureless -- tetrahedron, cube, icosahedron, cone, and
cylinder.
Of those, the three Platonic solids (tetrahedron, cube, icosahedron) are
annotated with their 12-, 24-, and 60-fold discrete symmetries, respectively.
The cone and cylinder are annotated with their continuous symmetries discretized
at 1 degree intervals. These symmetries are provided for evaluation; the
intended supervision is only a single rotation with each image.
The remaining three shapes are marked with a distinguishing feature.
There is a tetrahedron with one red-colored face, a cylinder with an off-center
dot, and a sphere with an X capped by a dot. Whether or not the distinguishing
feature is visible, the space of possible orientations is reduced. We do not
provide the set of equivalent rotations for these shapes.
Each example contains of
- the 224x224 RGB image
- a shape index so that the dataset may be filtered by shape.
The indices correspond to:
- 0 = tetrahedron
- 1 = cube
- 2 = icosahedron
- 3 = cone
- 4 = cylinder
- 5 = marked tetrahedron
- 6 = marked cylinder
- 7 = marked sphere
- the rotation used in the rendering process, represented as a 3x3 rotation matrix
- the set of known equivalent rotations under symmetry, for evaluation.
In the case of the three marked shapes, this is only the rendering rotation.
"""
_CITATION = """\
@inproceedings{implicitpdf2021,
title = {Implicit Representation of Probability Distributions on the Rotation
Manifold},
author = {Murphy, Kieran and Esteves, Carlos and Jampani, Varun and
Ramalingam, Srikumar and Makadia, Ameesh}
booktitle = {International Conference on Machine Learning}
year = {2021}
}
"""
_DATA_PATH = 'https://storage.googleapis.com/gresearch/implicit-pdf/symsol_dataset.zip'
_IMAGE_DIMENSIONS = (224, 224, 3)
_SHAPE_NAMES = [
'tet',
'cube',
'icosa',
'cone',
'cyl',
'tetX',
'cylO',
'sphereX',
]
class SymmetricSolids(tfds.core.GeneratorBasedBuilder):
"""DatasetBuilder for symmetric_solids dataset."""
VERSION = tfds.core.Version('1.0.0')
RELEASE_NOTES = {
'1.0.0': 'Initial release.',
}
def _info(self) -> tfds.core.DatasetInfo:
"""Returns the dataset metadata."""
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
'image':
tfds.features.Image(shape=_IMAGE_DIMENSIONS, dtype=tf.uint8),
'label_shape':
tfds.features.ClassLabel(names=_SHAPE_NAMES),
'rotation':
tfds.features.Tensor(shape=(3, 3), dtype=tf.float32),
'rotations_equivalent':
tfds.features.Tensor(shape=(None, 3, 3), dtype=tf.float32),
}),
# These are returned if `as_supervised=True` in `builder.as_dataset`.
supervised_keys=('image', 'rotation'),
homepage='https://implicit-pdf.github.io',
citation=_CITATION,
)
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
"""Returns SplitGenerators."""
extracted_paths = dl_manager.download_and_extract(_DATA_PATH)
return {
'train':
self._generate_examples(
images_path=extracted_paths / 'train/images',
rotations_path=extracted_paths / 'train/rotations.npz'),
'test':
self._generate_examples(
images_path=extracted_paths / 'test/images',
rotations_path=extracted_paths / 'test/rotations.npz'),
}
def _generate_examples(self, images_path, rotations_path):
"""Yields examples."""
with rotations_path.open('rb') as f:
rotations = dict(np.load(f))
for key in rotations:
rotations[key] = rotations[key].astype(np.float32)
for image_path in images_path.glob('*.png'):
fname = image_path.name
shape_name, image_index = fname.split('_')
image_index = int(image_index.split('.')[0])
shape_id = _SHAPE_NAMES.index(shape_name)
yield fname, {
'image': image_path,
'label_shape': shape_id,
'rotation': rotations[shape_name][image_index, 0],
'rotations_equivalent': rotations[shape_name][image_index],
}
| apache-2.0 | 5,088,339,265,059,656,000 | 36.3 | 87 | 0.690974 | false |
bobmyhill/burnman | burnman/data/input_raw_endmember_datasets/HGP633data_to_burnman.py | 2 | 9875 | # This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for the Earth and Planetary Sciences
# Copyright (C) 2012 - 2019 by the BurnMan team, released under the GNU
# GPL v2 or later.
# This is a standalone program that converts the Holland and Powell data format
# into the standard burnman format (printed to file)
# It only outputs properties of solid endmembers - other endmembers are
# currently ignored.
import sys
import os.path
import pprint
from collections import OrderedDict
# hack to allow scripts to be placed in subdirectories next to burnman:
if not os.path.exists('burnman') and os.path.exists('../../../burnman'):
sys.path.insert(1, os.path.abspath('../../..'))
import burnman
from burnman.processchemistry import dictionarize_formula, formula_mass
if os.path.isfile('tc-ds633.txt') == False:
print('This code requires the data file tc-ds633.txt.')
print(
'This file is bundled with the software package THERMOCALC, which can be found here:')
print(
'https://www.esc.cam.ac.uk/research/research-groups/research-projects/tim-hollands-software-pages/thermocalc')
print('')
print('Please download the file and place it in this directory.')
exit()
# Components
components = ['Si', 'Ti', 'Al', 'Fe', 'Mg', 'Mn', 'Ca', 'Na',
'K', 'O', 'H', 'C', 'Cl', 'e-', 'Ni', 'Zr', 'S', 'Cu', 'Cr']
class Endmember:
def __init__(self, name, atoms, formula, sites, comp, H, S, V, Cp, a, k, flag, od):
if flag != -1 and flag != -2 and k[0] > 0:
formula = dictionarize_formula(formula)
self.params = OrderedDict([('name', name),
('formula', formula),
('equation_of_state', 'hp_tmt'),
('H_0', round(H * 1e3, 10)),
('S_0', round(S * 1e3, 10)),
('V_0', round(V * 1e-5, 15)),
('Cp', [round(Cp[0] * 1e3, 10),
round(Cp[1] * 1e3, 10),
round(Cp[2] * 1e3, 10),
round(Cp[3] * 1e3, 10)]),
('a_0', a),
('K_0', round(k[0] * 1e8, 10)),
('Kprime_0', k[1]),
('Kdprime_0', round(k[2] * 1e-8, 15)),
('n', sum(formula.values())),
('molar_mass', round(formula_mass(formula), 10))])
if flag == 1:
self.landau_hp = OrderedDict([('P_0', 1e5),
('T_0', 298.15),
('Tc_0', od[0]),
('S_D', round(od[1] * 1e3, 10)),
('V_D', round(od[2] * 1e-5, 10))])
if flag == 2:
self.bragg_williams = OrderedDict([('deltaH', round(od[0] * 1e3, 10)),
('deltaV', round(od[1] * 1e-5, 15)),
('Wh', round(od[2] * 1e3, 10)),
('Wv', round(od[3] * 1e-5, 15)),
('n', od[4]),
('factor', od[5])])
# Read dataset
with open('tc-ds633.txt', 'r') as f:
ds = [line.split() for line in f]
def getmbr(ds, mbr):
mbrarray = []
for i in range(0, int(ds[0][0])):
if ds[i * 4 + 3][0] == mbr:
atoms = 0.0
formula = ''
for j in range(3, len(ds[i * 4 + 3]) - 1, 2):
atoms += float(ds[i * 4 + 3][j])
formula = formula + \
components[int(ds[i * 4 + 3][j - 1]) - 1] + str(
round(float(ds[i * 4 + 3][j]), 10))
if mbr.endswith('L'):
flag = -2
od = [0]
else:
flag = int(ds[i * 4 + 6][4])
endmember = Endmember(mbr, atoms, formula, int(ds[i * 4 + 3][1]),
list(map(float, ds[i * 4 + 3][2:(len(ds[i * 4 + 3]) - 1)])),
float(ds[i * 4 + 4][0]), float(ds[i * 4 + 4][1]),
float(ds[i * 4 + 4][2]), map(float, ds[i * 4 + 5]),
float(ds[i * 4 + 6][0]), list(map(float, ds[i * 4 + 6][1:4])),
flag, list(map(float, ds[i * 4 + 6][5:])))
return endmember
with open('HGP_2018_ds633.py', 'wb') as outfile:
outfile.write('# This file is part of BurnMan - a thermoelastic and '
'thermodynamic toolkit for the Earth and Planetary Sciences\n'
'# Copyright (C) 2012 - 2019 by the BurnMan team, '
'released under the GNU \n# GPL v2 or later.\n\n\n'
'"""\n'
'HGP_2018 (ds-62)\n'
'Endmember minerals from Holland, Green and Powell (2018) and references therein\n'
'Dataset version 6.33\n'
'The values in this document are all in S.I. units,\n'
'unlike those in the original tc-ds633.txt\n'
'File autogenerated using HGP633data_to_burnman.py\n'
'"""\n\n'
'from ..mineral import Mineral\n'
'from ..processchemistry import dictionarize_formula, formula_mass\n\n')
outfile.write('"""\n'
'ENDMEMBERS\n'
'"""\n\n')
def pprint_ordered_dict(d, leading_string, extra_whitespace=0):
whitespace = ' ' * (len(leading_string)+2+extra_whitespace)
s = pprint.pformat(d)
s = s.replace('), ', ',\n{0}\''.format(whitespace))
s = s.replace('\', ', '\': ').replace(' \'(\'', '\'')
s = s.replace('OrderedDict([(', leading_string+'{').replace(')])', '}')
return s
formula = '0'
for i in range(int(ds[0][0])):
mbr = ds[i * 4 + 3][0]
M = getmbr(ds, mbr)
if mbr == 'and': # change silly abbreviation
mbr = 'andalusite'
# Print parameters
if hasattr(M, 'params'):
outfile.write('class {0} (Mineral):\n'.format(mbr)+
' def __init__(self):\n')
s = pprint_ordered_dict(M.params, leading_string = ' self.params = ')
s = s.replace('000000.0', 'e6')
outfile.write(s)
outfile.write('\n')
# Print property modifiers (if they exist)
if hasattr(M, 'landau_hp'):
outfile.write(' self.property_modifiers = [[')
s = pprint_ordered_dict(M.landau_hp, leading_string = '\'landau_hp\', ', extra_whitespace = 36)
outfile.write(s)
outfile.write(']]\n')
if hasattr(M, 'bragg_williams') and M.bragg_williams['factor'] > 0:
outfile.write(' self.property_modifiers = [[')
s = pprint_ordered_dict(M.bragg_williams, leading_string = '\'bragg_williams\', ', extra_whitespace = 36)
outfile.write(s)
outfile.write(']]\n')
outfile.write(' Mineral.__init__(self)\n\n')
outfile.write('def cov():\n'
' \"\"\"\n'
' A function which loads and returns the variance-covariance matrix of the\n'
' zero-point energies of all the endmembers in the dataset.\n\n'
' Returns\n'
' -------\n'
' cov : dictionary\n'
' Dictionary keys are:\n'
' - endmember_names: a list of endmember names, and\n'
' - covariance_matrix: a 2D variance-covariance array for the\n'
' endmember zero-point energies of formation\n'
' \"\"\"\n\n'
' from .HGP_2018_ds633_cov import cov\n'
' return cov\n\n')
# Process uncertainties
with open('HGP_2018_ds633_cov.py', 'wb') as outfile:
outfile.write('# This file is part of BurnMan - a thermoelastic and '
'thermodynamic toolkit for the Earth and Planetary Sciences\n'
'# Copyright (C) 2012 - 2019 by the BurnMan team, '
'released under the GNU \n# GPL v2 or later.\n\n\n'
'"""\n'
'HGP_2018 (ds-633) zero-point energy covariance matrix\n'
'Derived from Holland, Green and Powell (2018) and references therein\n'
'Dataset version 6.33\n'
'The values in this document are all in S.I. units,\n'
'unlike those in the original tc-ds633.txt\n'
'File autogenerated using HGP633data_to_burnman.py\n'
'"""\n\n'
'from numpy import array\n\n'
'cov = ')
import numpy as np
n_mbrs = int(ds[0][0])
names = []
for i in range(n_mbrs):
names.append(ds[i*4+3][0])
cov = []
for i in range(n_mbrs*4+4, len(ds)-2):
cov.extend(map(float, ds[i]))
i_utr = np.triu_indices(n_mbrs)
i_ltr = np.tril_indices(n_mbrs)
M = np.zeros((n_mbrs, n_mbrs))
M[i_utr] = cov[1:]
M[i_ltr] = M.T[i_ltr]
M = M*1.e6 # (kJ/mol)^2 -> (J/mol)^2
d = {'endmember_names':names,
'covariance_matrix':M}
np.set_printoptions(threshold='nan')
import pprint
pp = pprint.PrettyPrinter(indent=0, width=160, depth=3, stream=outfile)
pp.pprint(d)
outfile.write('\n')
| gpl-2.0 | 526,628,182,742,594,200 | 41.564655 | 121 | 0.458937 | false |
LibraryOfCongress/gazetteer | etl/parser/feature_type_maps/digitizer_types.py | 1 | 1508 | default_types = {
"default" : "BLDG"
}
use_types_map = {
"Residential" : "HSE",
"Worship" : "CTTR",
"Educational" : "SCH",
"Commercial": "BLDO",
"Industrial" : "MFG",
"Health": "HSP",
"Transport":"BLDG",
"Military":"INSM",
"unknown": "BLDG"
}
use_sub_types_map = {
"Apartments": "HSE",
"Houses": "HSE",
"Church" : "CH",
"Synagogue" : "CTRR",
"School": "SCH",
"Railroad System" : "RSTN",
"Bank" : "BANK",
"Hotel":"HTL",
"Library": "LIBR",
"Freight House":"RSTN",
"Hospital" : "HSP",
"Lumber Yard":"MLSW",
"Shop" : "RET",
"Gallery": "MUS",
"Pharmacy": "RET",
"Cobbler" : "RET",
"Office" : "BLDO",
"Saw Mill" : "MLSW",
"Distillery": "MFGB",
"Warehouse" : "SHSE",
"Storehouse": "SHSE",
"Gas Works": "OILT",
"Foundary": "FNDY",
"Paper Mill": "ML",
"Textile Mill": "ML",
"Locomotive Works": "RYD",
"Brewery" : "MFGB",
"Factory" : "MFG",
"Manufactory": "MFG",
"Paint Shop": "MFG",
"Rope Walk": "BLDG",
"Slaughter House": "BLDG",
"Asylum Insane" : "ASYL",
"Asylum Inebriate": "ASYL",
"Asylum Oprhan" : "BLDG",
"Almshouse" : "BLDG",
"Quarantine" : "BLDG",
"Sanatorium" : "SNTR",
"Toll House" : "BLDG",
"Toll Gate" : "BLDG",
"Railroad System" : "RSTN",
"Railroad Depot" : "RSTN",
"Subway Platform" :"MTRO",
"Docks" : "DCKY",
"Armory": "INSM",
"Battery": "INSM",
"Fortification": "FT"
}
| mit | 2,526,698,296,780,296,000 | 21.848485 | 31 | 0.500663 | false |
michaelimfeld/notipy-server | tests/unit/test_main.py | 1 | 1942 | """
`notipyserver` - User-Notification-Framework server
Provides test cases for the notipyserver main module.
:copyright: (c) by Michael Imfeld
:license: MIT, see LICENSE for details
"""
import sys
from threading import Thread
from mock import patch
from nose.tools import assert_equal
import notipyserver.__main__
def test_get_telegram_updater():
"""
Test get telegram updater
"""
with patch("notipyserver.__main__.Updater"), \
patch("notipyserver.__main__.Config"):
notipyserver.__main__.get_telegram_updater()
def test_main():
"""
Test main
"""
with patch("notipyserver.__main__.NotipyServer") as mock, \
patch.object(sys, "argv", []), \
patch.object(sys, "exit"):
notipyserver.__main__.main()
mock.return_value.start.assert_called_with()
def test_notipy_server():
"""
Test instantiation of NotipyServer
"""
with patch("notipyserver.__main__.get_telegram_updater") as mock:
notipyserver.__main__.NotipyServer("foo", 9999)
mock.assert_called_with()
def test_notipy_server_sig_handler():
"""
Test signal handler of NotipyServer
"""
with patch("notipyserver.__main__.get_telegram_updater"):
notifier = notipyserver.__main__.NotipyServer("foo", 9999)
with patch.object(notifier, "_updater") as mock:
notifier.signal_handler()
mock.stop.assert_called_with()
def test_notipy_server_start():
"""
Test NotipyServer start
"""
with patch("notipyserver.__main__.get_telegram_updater"):
notifier = notipyserver.__main__.NotipyServer("foo", 9999)
with patch.object(notifier, "_updater") as updater, \
patch.object(notifier, "_app") as app:
thr = Thread(target=notifier.start)
thr.start()
notifier.signal_handler()
updater.start_polling.assert_called_with()
app.start.assert_called_with()
| mit | -4,133,699,871,132,350,500 | 25.972222 | 69 | 0.636457 | false |
hackerkid/zulip | zerver/lib/response.py | 1 | 2912 | from typing import Any, List, Mapping, Optional
import orjson
from django.http import HttpResponse, HttpResponseNotAllowed
from django.utils.translation import ugettext as _
from zerver.lib.exceptions import JsonableError
class HttpResponseUnauthorized(HttpResponse):
status_code = 401
def __init__(self, realm: str, www_authenticate: Optional[str] = None) -> None:
HttpResponse.__init__(self)
if www_authenticate is None:
self["WWW-Authenticate"] = f'Basic realm="{realm}"'
elif www_authenticate == "session":
self["WWW-Authenticate"] = f'Session realm="{realm}"'
else:
raise AssertionError("Invalid www_authenticate value!")
def json_unauthorized(
message: Optional[str] = None, www_authenticate: Optional[str] = None
) -> HttpResponse:
if message is None:
message = _("Not logged in: API authentication or user session required")
resp = HttpResponseUnauthorized("zulip", www_authenticate=www_authenticate)
resp.content = orjson.dumps(
{"result": "error", "msg": message},
option=orjson.OPT_APPEND_NEWLINE,
)
return resp
def json_method_not_allowed(methods: List[str]) -> HttpResponseNotAllowed:
resp = HttpResponseNotAllowed(methods)
resp.content = orjson.dumps(
{"result": "error", "msg": "Method Not Allowed", "allowed_methods": methods}
)
return resp
def json_response(
res_type: str = "success", msg: str = "", data: Mapping[str, Any] = {}, status: int = 200
) -> HttpResponse:
content = {"result": res_type, "msg": msg}
content.update(data)
# Because we don't pass a default handler, OPT_PASSTHROUGH_DATETIME
# actually causes orjson to raise a TypeError on datetime objects. This
# helps us avoid relying on the particular serialization used by orjson.
return HttpResponse(
content=orjson.dumps(
content,
option=orjson.OPT_APPEND_NEWLINE | orjson.OPT_PASSTHROUGH_DATETIME,
),
content_type="application/json",
status=status,
)
def json_success(data: Mapping[str, Any] = {}) -> HttpResponse:
return json_response(data=data)
def json_response_from_error(exception: JsonableError) -> HttpResponse:
"""
This should only be needed in middleware; in app code, just raise.
When app code raises a JsonableError, the JsonErrorHandler
middleware takes care of transforming it into a response by
calling this function.
"""
response = json_response(
"error", msg=exception.msg, data=exception.data, status=exception.http_status_code
)
for header, value in exception.extra_headers.items():
response[header] = value
return response
def json_error(msg: str, data: Mapping[str, Any] = {}, status: int = 400) -> HttpResponse:
return json_response(res_type="error", msg=msg, data=data, status=status)
| apache-2.0 | 1,714,913,067,446,472,200 | 32.860465 | 93 | 0.674107 | false |
eliostvs/django-kb | kb/tests/article/tests_search_indexes.py | 1 | 1665 | from __future__ import unicode_literals
from django.core.management import call_command
from model_mommy import mommy
from kb.tests.test import SearchViewTestCase
from kb.models import Article
from kb.views import SearchView
class SearchArticleTestCase(SearchViewTestCase):
view_function = SearchView
view_name = 'search'
def setUp(self):
mommy.make_recipe('kb.tests.category_with_articles')
for article in Article.objects.all():
article.tags.add('bar')
call_command('rebuild_index', interactive=False, verbosity=0)
def test_search_title(self):
response = self.get({'q': 'published article title'})
object_list = response.context['page'].object_list
self.assertHttpOK(response)
self.assertSeqEqual([a.object for a in object_list], Article.objects.published())
def test_search_content(self):
response = self.get({'q': 'published article content'})
object_list = response.context['page'].object_list
self.assertHttpOK(response)
self.assertSeqEqual([a.object for a in object_list], Article.objects.published())
def test_search_tag(self):
response = self.get({'q': 'bar'})
object_list = response.context['page'].object_list
self.assertHttpOK(response)
self.assertSeqEqual([a.object for a in object_list], Article.objects.published())
def test_search_draf_article_should_fail(self):
response = self.get({'q': 'draft article title'})
object_list = response.context['page'].object_list
self.assertHttpOK(response)
self.assertFalse([a.object for a in object_list])
| bsd-3-clause | 6,676,508,754,264,055,000 | 32.3 | 89 | 0.679279 | false |
USGSDenverPychron/pychron | pychron/pipeline/plot/panels/ideogram_panel.py | 1 | 1951 | # ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# ============= standard library imports ========================
from numpy import Inf
from pychron.pipeline.plot.panels.figure_panel import FigurePanel
from pychron.pipeline.plot.plotter.ideogram import Ideogram
# ============= local library imports ==========================
class IdeogramPanel(FigurePanel):
_figure_klass = Ideogram
# _index_attr = 'uage'
def _get_init_xlimits(self):
po = self.plot_options
attr = po.index_attr
center = None
mi, ma = Inf, -Inf
if attr:
if po.use_static_limits:
mi, ma = po.xlow, po.xhigh
else:
xmas, xmis = zip(*[(i.max_x(attr), i.min_x(attr))
for i in self.figures])
mi, ma = min(xmis), max(xmas)
cs = [i.mean_x(attr) for i in self.figures]
center = sum(cs) / len(cs)
if po.use_centered_range:
w2 = po.centered_range / 2.0
mi, ma = center - w2, center + w2
return center, mi, ma
# ============= EOF =============================================
| apache-2.0 | -1,805,856,260,850,883,000 | 37.254902 | 81 | 0.509995 | false |
uadnan/pytrace | tests/core/debugger_tests.py | 1 | 3490 | import unittest2
import inspect
from pytrace.core.debugger import ManagedDebugger, DebuggerEvent
TEST_FUNCTION_SCRIPT = """
def hello(arg1, arg2, *args, **kwargs):
var1 = 20
return var1
hello(1, 2, 3, 4, 5, 6, x=1, y=3)
"""
class ManagedDebuggerTests(unittest2.TestCase):
debugger_events = []
def _debugger_step(self, event, *args, **kwargs):
self.debugger_events.append((event, args, kwargs))
def test_hello_world(self):
self.debugger_events = []
debugger = ManagedDebugger(self._debugger_step)
debugger.run("print ('Hello Wolrd')")
assert len(self.debugger_events) == 2
def test_syntax_error(self):
self.debugger_events = []
debugger = ManagedDebugger(self._debugger_step)
debugger.run("print 'Hello Wolrd")
assert len(self.debugger_events) == 1
assert self.debugger_events[0][0] == DebuggerEvent.SyntaxError
assert isinstance(self.debugger_events[0][2]["exception"], SyntaxError)
def test_step_line(self):
self.debugger_events = []
debugger = ManagedDebugger(self._debugger_step)
debugger.run("print ('Hello Wolrd')")
assert len(self.debugger_events) == 2
assert self.debugger_events[0][0] == DebuggerEvent.StepLine
def test_function_call(self):
self.debugger_events = []
debugger = ManagedDebugger(self._debugger_step)
debugger.run(TEST_FUNCTION_SCRIPT)
assert len(self.debugger_events) > 3
assert self.debugger_events[2][0] == DebuggerEvent.EnterBlock
def test_function_exit(self):
self.debugger_events = []
debugger = ManagedDebugger(self._debugger_step)
debugger.run(TEST_FUNCTION_SCRIPT)
assert len(self.debugger_events) > 5
assert self.debugger_events[5][0] == DebuggerEvent.ExitBlock
def test_return_value(self):
self.debugger_events = []
debugger = ManagedDebugger(self._debugger_step)
debugger.run(TEST_FUNCTION_SCRIPT)
assert len(self.debugger_events) > 5
assert self.debugger_events[5][0] == DebuggerEvent.ExitBlock
kwargs = self.debugger_events[5][2]
assert 'return_value' in kwargs
assert kwargs['return_value'] == 20
def test_function_arguments_capturing(self):
self.debugger_events = []
debugger = ManagedDebugger(self._debugger_step)
debugger.run(TEST_FUNCTION_SCRIPT)
assert len(self.debugger_events) > 3
arguments = self.debugger_events[2][2]['arguments']
assert len(arguments) == 4
assert 'arg1' in arguments
assert 'arg2' in arguments
assert 'args' in arguments
assert 'kwargs' in arguments
assert arguments['arg1'] == 1
assert arguments['arg2'] == 2
assert arguments['args'] == (3, 4, 5, 6)
assert arguments['kwargs'] == dict(x=1, y=3)
def test_exception(self):
self.debugger_events = []
debugger = ManagedDebugger(self._debugger_step)
debugger.run("raise ValueError()")
assert len(self.debugger_events) == 3
assert self.debugger_events[1][0] == DebuggerEvent.Exception
kwargs = self.debugger_events[1][2]
assert 'frame' in kwargs
assert 'ex_type' in kwargs
assert 'traceback' in kwargs
assert isinstance(kwargs['ex_type'], type)
assert inspect.istraceback(kwargs['traceback'])
assert inspect.isframe(kwargs['frame'])
| mit | 4,361,538,670,441,920,000 | 33.9 | 79 | 0.636963 | false |
Lucretiel/genetics | genetics/dna/base.py | 1 | 1479 | import abc
class DNABase(metaclass=abc.ABCMeta):
'''
DNABase is the base class for all dna. It defines the abstract methods that
all DNA should have, as well as an __lt__ method for sorting.
'''
__slots__ = ()
#TODO: Add helpers for calling type(self)(...) everywhere
@abc.abstractmethod
def mutate(self, mask):
'''
This method should return a DNA object that is the result of applying
the mutation mask to each component of this DNA. It is allowed to
return self if and only if the mask application doesn't change the dna
at all.
'''
pass
@abc.abstractmethod
def combine(self, other, mask):
'''
Return a tuple of two new DNAs that are the result of combining this
DNA with other, using the mask.
'''
pass
@classmethod
def total_length(cls):
'''
This method is provided for backwards compatibility, and returns the
total length of this DNA. For DNA with subcomponents, this is the sum
of the lengths of the subcomponents. This is now computed beforehand
and stored in a class-scope variable. Deprecated.
'''
return cls.static_length
def combine_element_pairs(pairs):
'''
When given an iterable that returns pairs- such as
[(1, 2), (3, 4), (5, 6)] combine them into a pair of iterables, such as
((1, 3, 5), (2, 4, 6))
'''
return tuple(zip(*pairs))
| lgpl-2.1 | -28,554,161,450,181,932 | 29.8125 | 79 | 0.621366 | false |
blurstudio/cross3d | cross3d/classes/fcurve.py | 1 | 17768 | import os
import math
import hashlib
import xml.dom.minidom
from framerange import FrameRange
from valuerange import ValueRange
from cross3d.constants import ControllerType, TangentType, ExtrapolationType
class Key(object):
def __init__(self, **kwargs):
self.value = float(kwargs.get('value', 0.0))
self.time = float(kwargs.get('time', 0.0))
# Tangent angles are sorted in radians.
self.inTangentAngle = float(kwargs.get('inTangentAngle', 0.0))
self.outTangentAngle = float(kwargs.get('outTangentAngle', 0.0))
self.inTangentType = int(kwargs.get('inTangentType', TangentType.Automatic))
self.outTangentType = int(kwargs.get('outTangentType', TangentType.Automatic))
self.outTangentLength = float(kwargs.get('outTangentLength', 0.0))
self.inTangentLength = float(kwargs.get('inTangentLength', 0.0))
# Normalized tangents scale based on the distance to the key they are pointing to.
self.normalizedTangents = bool(kwargs.get('normalizedTangents', True))
# Broken key allows to have manipulate tangent individually.
self.brokenTangents = bool(kwargs.get('brokenTangents', False))
@property
def inTangentPoint(self):
x = self.inTangentLength * math.cos(self.inTangentAngle)
y = self.inTangentLength * math.sin(self.inTangentAngle)
return self.time - x, self.value + y
@property
def outTangentPoint(self):
x = self.outTangentLength * math.cos(self.outTangentAngle)
y = self.outTangentLength * math.sin(self.outTangentAngle)
return self.time + x, self.value + y
class FCurve(object):
def __init__(self, **kwargs):
self._name = unicode(kwargs.get('name', ''))
self._type = int(kwargs.get('tpe', ControllerType.BezierFloat))
self._keys = []
self._inExtrapolation = int(kwargs.get('inExtrapolation', ExtrapolationType.Constant))
self._outExtrapolation = int(kwargs.get('outExtrapolation', ExtrapolationType.Constant))
def valueAtTime(self, time):
"""Returns the value of the fcurve at the specified time
Args:
time (float): time at which to evaluate the fcurve.
Returns:
float: value of the fcurve at the specified time.
"""
# we need to ensure keys is sorted properly for this to work.
sortedKeys = sorted(self._keys, key=lambda k: k.time)
# If the time specified is out of the range of keyframes, we'll need to
# extrapolate to find the value. This will be split into its own fn since
# it gets a bit messy.
if time < sortedKeys[0].time or time > sortedKeys[-1].time:
return self.extrapolateValue(time)
i = 0
t = sortedKeys[i].time
maxI = len(sortedKeys) - 1
while t < time and i < maxI:
i += 1
t = sortedKeys[i].time
if t == time:
# time is at a key -- we can just return that key's value.
return sortedKeys[i].value
else:
# we should have two keys that our time falls between
k0 = sortedKeys[i - 1]
k1 = sortedKeys[i]
return self.bezierEvaluation(k0, k1, time)
def plot(self, startValue=None, endValue=None, resolution=1.0, plotHandles=True):
"""Uses matplotlib to generate a plot of the curve, primarily useful for debugging purposes.
Args:
startValue (float): Starting value for portion of the curve to sample.
endValue (float): Ending value for portion of the curve to sample.
resolution (float): Frequency with which to sample the curve.
"""
fullRange = self.range()
startValue = fullRange[0] if startValue is None else startValue
endValue = fullRange[1] if endValue is None else endValue
import numpy as np
import matplotlib.pyplot as plt
# plot handles, if asked
if plotHandles:
for key in self._keys:
points = zip(key.inTangentPoint, (key.time, key.value), key.outTangentPoint)
plt.plot(*points, marker='o', color='blue')
plt.plot(*points, color='black')
# plot line
x = np.arange(startValue, endValue, resolution)
f = np.vectorize(self.valueAtTime)
plt.plot(x, f(x))
plt.show()
def plotted(self, rng, step=1):
plotted = FCurve()
for value in xrange(rng[0], rng[1], step):
self.addKey(time=value, value=self.valueAtTime(value))
return plotted
def offset(self, value, attr='time', rnd=False):
for key in self._keys:
v = getattr(key, attr) + float(value)
v = round(v) if rnd else v
setattr(key, attr, v)
def keys(self):
return self._keys
def scale(self, value, attr='time', pivot=0.0, rnd=False):
for key in self._keys:
# Scaling the attribute.
v = (getattr(key, attr) - pivot) * value + pivot
v = round(v) if rnd else v
setattr(key, attr, v)
# Getting the tangents time and value.
inTangentTime = math.cos(key.inTangentAngle) * key.inTangentLength
inTangentValue = math.sin(key.inTangentAngle) * key.inTangentLength
outTangentTime = math.cos(key.outTangentAngle) * key.outTangentLength
outTangentValue = math.sin(key.outTangentAngle) * key.outTangentLength
# Scaling the right tangent components.
if attr == 'time':
inTangentTime *= value
outTangentTime *= value
elif attr == 'value':
inTangentValue *= value
outTangentValue *= value
# Setting the tangent data on the keys.
key.inTangentAngle = math.atan2(inTangentValue, inTangentTime)
key.inTangentLength = math.sqrt(inTangentValue**2 + inTangentTime**2)
key.outTangentAngle = math.atan2(outTangentValue, outTangentTime)
key.outTangentLength = math.sqrt(outTangentValue**2 + outTangentTime**2)
def remap(self, rng, attr='time', rnd=False):
start = getattr(self._keys[0], attr)
end = getattr(self._keys[-1], attr)
# Difference is not the same as duration.
difference = float(end - start)
ratio = (rng[1] - rng[0]) / difference
self.scale(ratio, attr=attr, rnd=rnd, pivot=start)
self.offset(rng[0] - start, attr=attr, rnd=rnd)
def round(self, attr='time'):
for key in self._keys:
v = getattr(key, attr)
setattr(key, attr, round(v))
def invert(self, conversionRatio=1.0):
""" Inverse time and values of each key.
Args:
conversionRatio(float): The conversion ratio to go from Y to X.
For example you might want to inverse a curve where frames on X are expressed in seconds on Y.
The X values will need to be divided by a frame rate to become meaningful Y values.
On the other hand Y values will have to be multiplied by that same ratio to become meaningful X values.
"""
# Before we flip we rationalize the Y axis based on provided conversion ratio.
if conversionRatio and conversionRatio != 1.0:
self.scale(conversionRatio, attr='value')
for key in self._keys:
time = key.time
value = key.value
# Swapping time and value.
key.time = value
key.value = time
# Flipping tangents based on a 45 degrees line.
key.inTangentAngle = math.pi / 2.0 - key.inTangentAngle
key.outTangentAngle = math.pi / 2.0 - key.outTangentAngle
# We revert the scale of the Y axis.
if conversionRatio and conversionRatio != 1.0:
self.scale(1 / conversionRatio, attr='value')
def range(self, attr='time'):
# TODO: This will only work for curves whos start at their minumum and ends at their maximum.
keys = self._keys
start = getattr(keys[0], attr) if len(keys) > 1 else 0
end = getattr(keys[-1], attr) if len(keys) > 1 else 0
return ValueRange(start, end)
def setExtrapolation(self, extrapolation=[None, None]):
self._inExtrapolation = extrapolation[0] or self._inExtrapolation
self._outExtrapolation = extrapolation[1] or self._outExtrapolation
def extrapolation(self):
return (self._inExtrapolation, self._outExtrapolation)
def name(self):
return self._name
def type(self):
return self._type
def setType(self, tpe):
self._type = tpe
def setName(self, name):
self._name = name
def addKey(self, **kwargs):
key = Key(**kwargs)
self._keys.append(key)
return self._keys
def __len__(self):
return len(self.keys())
def __nonzero__(self):
return bool(self.__len__())
def __eq__(self, other):
""" Allows to compare to fCurve objects.
"""
if isinstance(other, FCurve):
if cross3d.debugLevel >= cross3d.constants.DebugLevels.Mid:
with open(r'C:\temp\fCurve.xml', 'w') as fle:
fle.write(self.toXML())
with open(r'C:\temp\otherFCurve.xml', 'w') as fle:
fle.write(other.toXML())
return self.__hash__() == other.__hash__()
return False
def __hash__(self):
return hashlib.sha224(self.toXML()).hexdigest()
def __ne__(self, other):
return not self.__eq__(other)
def fromXML(self, xml):
""" Loads curve data from an XML document.
Args:
xml(string): The xml we want to load on the curve.
"""
# If the document is a path we try to load the XML from that file.
from cross3d.migrate.XML import XMLDocument
document = XMLDocument()
document.parse(xml)
# Getting the curve element.
fCurveElement = document.root()
self._name = fCurveElement.attribute('name')
self._type = ControllerType.valueByLabel(fCurveElement.attribute('type'))
self._inExtrapolation = ExtrapolationType.valueByLabel(fCurveElement.attribute('inExtrapolation'))
self._outExtrapolation = ExtrapolationType.valueByLabel(fCurveElement.attribute('outExtrapolation'))
self._keys = []
for element in fCurveElement.children():
# This guarantees that the XML is somehow valid.
if element.findChild('inTangentAngle'):
# Getting tangent types.
inTangentType = element.findChild('inTangentType').value()
outTangentType = element.findChild('outTangentType').value()
# TODO: Remove in a few month. That's for backward compatibility.
tbc = {'custom': 'Bezier', 'linear': 'Linear', 'auto': 'Automatic', 'step': 'Stepped'}
if inTangentType in tbc:
inTangentType = tbc[inTangentType]
if outTangentType in tbc:
outTangentType = tbc[outTangentType]
kwargs = {'time': element.attribute('time'),
'value': element.attribute('value'),
'inTangentAngle': element.findChild('inTangentAngle').value(),
'outTangentAngle': element.findChild('outTangentAngle').value(),
'inTangentType': TangentType.valueByLabel(inTangentType),
'outTangentType': TangentType.valueByLabel(outTangentType),
'inTangentLength': element.findChild('inTangentLength').value(),
'outTangentLength': element.findChild('outTangentLength').value(),
'normalizedTangents': element.findChild('normalizedTangents').value() == 'True',
'brokenTangents': element.findChild('brokenTangents').value() == 'True'}
self._keys.append(Key(**kwargs))
def toXML(self):
""" Translate the curve data into a XML.
TODO: I hate the API for XML so I shove most of it here.
Returns:
str: The XML data for that curve.
"""
from cross3d.migrate.XML import XMLDocument
document = XMLDocument()
fCurveElement = document.addNode('fCurve')
fCurveElement.setAttribute('name', self._name)
fCurveElement.setAttribute('type', ControllerType.labelByValue(self._type))
fCurveElement.setAttribute('inExtrapolation', ExtrapolationType.labelByValue(self._inExtrapolation))
fCurveElement.setAttribute('outExtrapolation', ExtrapolationType.labelByValue(self._outExtrapolation))
for key in self._keys:
keyElement = fCurveElement.addNode('key')
keyElement.setAttribute('value', key.value)
keyElement.setAttribute('time', key.time)
properties = {'inTangentAngle': key.inTangentAngle,
'outTangentAngle': key.outTangentAngle,
'inTangentType': TangentType.labelByValue(key.inTangentType),
'outTangentType': TangentType.labelByValue(key.outTangentType),
'inTangentLength': key.inTangentLength,
'outTangentLength': key.outTangentLength,
'normalizedTangents': key.normalizedTangents,
'brokenTangents': key.brokenTangents}
for prop in sorted(properties.keys()):
propertyElement = keyElement.addNode(prop)
propertyElement.setValue(properties[prop])
return document.toxml()
def write(self, path):
if path and isinstance(path, basestring):
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
os.makedirs(dirname)
with open(path, 'w') as fle:
fle.write(self.toXML())
def read(self, path):
with open(path) as fle:
self.fromXML(fle.read())
return True
def extrapolateValue(self, time):
"""Returns the value at a given time outside the range of keyframes for the curve, using the
curve's extrapolation mode for values outside the keyframe range in that direction.
Args:
time (float): time at which to calculate the curve's value
Returns:
float: Extrapolated value for the curve at the specified time.
"""
sortedKeys = sorted(self._keys, key=lambda k: k.time)
if time >= sortedKeys[0].time and time <= sortedKeys[-1].time:
raise ValueError('Unable to extrapolate value for time within keyframed curve.')
t0, t1 = sortedKeys[0].time, sortedKeys[-1].time
dt = t1 - t0
dtx = 0
if time < sortedKeys[0].time:
# time is before start
mode = self._inExtrapolation
if mode == ExtrapolationType.Constant:
return sortedKeys[0].value
before = True
dtx = t0 - time
else:
# time is after end
mode = self._outExtrapolation
if mode == ExtrapolationType.Constant:
return sortedKeys[-1].value
before = False
dtx = time - t1
if mode == ExtrapolationType.Linear:
v = sortedKeys[0].value if before else sortedKeys[-1].value
tangentLength = sortedKeys[0].outTangentLength if before else sortedKeys[-1].inTangentLength
if tangentLength:
# get the angle of the opposite tangent (max doesn't store tangents)
# for the outer side in this case.
theta = sortedKeys[0].outTangentAngle if before else sortedKeys[-1].inTangentAngle
# Now get the inverse angle, since we want to move on the opposite vector
theta = math.pi - theta
# delta from the range to our unknown is our triangle's base,
# theta is the angle, and our y value is the side.
# Solve for y, and then offset by the value of the last keyframe.
return dtx * math.tan(theta) + v
else:
if len(sortedKeys) == 1:
return sortedKeys[0].value
if before:
x = sortedKeys[1].time - sortedKeys[0].time
y = sortedKeys[0].value - sortedKeys[1].value
offset = sortedKeys[0].value
else:
x = sortedKeys[-1].time - sortedKeys[-2].time
y = sortedKeys[-1].value - sortedKeys[-2].value
offset = sortedKeys[-1].value
return (y / x) * dtx + offset
elif mode == ExtrapolationType.Cycled:
# We're just looping through the existing timeline now, so we can modulus the delta of
# sample position with the delta of the start/end keyframe times
tp = dtx % dt
# If we fell off the beginning, we need to play through backwards
if before:
tp = dt - tp
# Now we can just get the value for the time
return self.valueAtTime(tp + t0)
elif mode == ExtrapolationType.CycledWithOffset:
# This is going to work the same as cycled, except we'll need to add an offset.
# our position will be the same, but we'll also need a repetition count to multuiply by
# our offset.
tp = dtx % dt
tc = math.floor(dtx / dt) + 1
offset = tc * (sortedKeys[-1].value - sortedKeys[0].value)
offset *= (-1 if before else 1)
# If we fell off the beginning, we need to play through backwards.
if before:
tp = dt - tp
# Now we can just get the value for the time and add our offset
return self.valueAtTime(tp + t0) + offset
elif mode == ExtrapolationType.PingPong:
# Again this will be similar to Cycled, however now we'll need to reverse the looping
# direction with each cycle.
tp = dtx % dt
oddRep = not bool(math.floor(dtx / dt) % 2)
# If it's an odd numbered repetition, we need to reverse it.
if (not oddRep and before) or (oddRep and not before):
tp = dt - tp
# Now we can just get the value for the time
return self.valueAtTime(tp + t0)
else:
raise ValueError('Unable to extrapolate values: invalid ExtrapolationType found.')
@staticmethod
def bezierEvaluation(key0, key1, frame):
"""Finds the point on a cubic bezier spline at time frame between two keys.
Args:
key0 (Key): Starting key for the spline
key1 (Key): Ending key for the spline
t (float): Time (as a frame) to solve for
Returns:
Tuple: Tuple of float values for the x (time) and y (value) coordinates of the resulting
point.
"""
# Implementation by Tyler Fox, modified by Will Cavanagh.
# Based on method described at
# http://edmund.birotanker.com/monotonic-bezier-curves-for-animation.html
p0x, p0y = key0.time, key0.value
p1x, p1y = key0.outTangentPoint
p2x, p2y = key1.inTangentPoint
p3x, p3y = key1.time, key1.value
totalXRecip = 1.0 / (p3x - p0x)
f = (p1x - p0x) * totalXRecip
g = (p3x - p2x) * totalXRecip
xVal = (frame - p0x) * totalXRecip
d = 3*f + 3*g - 2
n = 2*f + g - 1
r = (n*n - f*d) / (d*d)
q = ((3*f*d*n - 2*n*n*n) / (d*d*d)) - xVal/d
discriminant = q*q - 4*r*r*r
if discriminant >= 0:
pm = (discriminant**0.5)/2 #plus/minus portion of equation
# We're able to only use the + portion of the +/- and get an accurate
# outcome. Saves steps / logic.
w = (-q/2 + pm)**(1/3.0)
u = w + r/w
else:
theta = math.acos(-q / ( 2*r**(3/2.0)) )
phi = theta/3 + 4*math.pi/3
u = 2 * r**(0.5) * math.cos(phi)
t = u + n/d
t1 = 1-t
return (t1**3*p0y + 3*t1**2*t*p1y + 3*t1*t**2*p2y + t**3*p3y)
| mit | -7,578,486,437,210,674,000 | 33.967611 | 106 | 0.675203 | false |
tunix/raptiye-django | raptiye/blog/widgets.py | 1 | 2592 | # raptiye
# Copyright (C) 2009 Alper Kanat <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import json
from django import forms
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from tagging.models import Tag
from models import Entry
__all__ = ("AutoCompleteTagInput", )
class AutoCompleteTagInput(forms.TextInput):
def __init__(self, *args, **kwargs):
if "model" in kwargs:
self.model = kwargs.get("model", Entry)
del(kwargs["model"])
super(AutoCompleteTagInput, self).__init__(*args, **kwargs)
self.startText = _(u"Enter Tag Name Here")
self.emptyText = _(u"No Results")
class Media:
css = {
"screen": ("css/autoSuggest.css",)
}
js = (
"js/jquery.autoSuggest-packed.js",
)
def render(self, name, value, attrs=None):
output = super(AutoCompleteTagInput, self).render(name, value, attrs)
tags = Tag.objects.usage_for_model(self.model)
tag_list = json.dumps([{"name": t.name, "value": t.name} for t in tags])
value = (",".join(value.split(" ")) if value.find(",") == -1 else value) if value else ""
return output + mark_safe(u'''<script type="text/javascript" charset="utf-8">
$("#id_%s").autoSuggest(%s, {
"asHtmlID": "%s",
"preFill": "%s",
"startText": "%s",
"emptyText": "%s",
"neverSubmit": true
})
$("form").submit(function() {
$("<input>").attr({
"name": "%s",
"type": "hidden"
}).val(
$("#as-values-%s").val()
).appendTo($("form"))
})
</script>''' % (name, tag_list, name, value, self.startText, self.emptyText, name, name))
| gpl-3.0 | -3,215,278,082,990,815,700 | 33.506849 | 97 | 0.567515 | false |
foresthz/fusion5.1 | www/scm/viewvc/lib/viewvc.py | 1 | 127915 | # -*-python-*-
#
# Copyright (C) 1999-2006 The ViewCVS Group. All Rights Reserved.
#
# By using this file, you agree to the terms and conditions set forth in
# the LICENSE.html file which can be found at the top level of the ViewVC
# distribution or at http://viewvc.org/license-1.html.
#
# For more information, visit http://viewvc.org/
#
# -----------------------------------------------------------------------
#
# viewvc: View CVS/SVN repositories via a web browser
#
# -----------------------------------------------------------------------
__version__ = '1.0.0'
# this comes from our library; measure the startup time
import debug
debug.t_start('startup')
debug.t_start('imports')
# standard modules that we know are in the path or builtin
import sys
import os
import sapi
import cgi
import string
import urllib
import mimetypes
import time
import re
import rfc822
import stat
import struct
import types
import tempfile
# these modules come from our library (the stub has set up the path)
import compat
import config
import popen
import ezt
import accept
import vclib
try:
import idiff
except (SyntaxError, ImportError):
idiff = None
debug.t_end('imports')
#########################################################################
checkout_magic_path = '*checkout*'
# According to RFC 1738 the '~' character is unsafe in URLs.
# But for compatibility with URLs bookmarked with old releases of ViewCVS:
oldstyle_checkout_magic_path = '~checkout~'
docroot_magic_path = '*docroot*'
viewcvs_mime_type = 'text/vnd.viewcvs-markup'
alt_mime_type = 'text/x-cvsweb-markup'
view_roots_magic = '*viewroots*'
# Put here the variables we need in order to hold our state - they
# will be added (with their current value) to (almost) any link/query
# string you construct.
_sticky_vars = [
'hideattic',
'sortby',
'sortdir',
'logsort',
'diff_format',
'search',
'limit_changes',
]
# for reading/writing between a couple descriptors
CHUNK_SIZE = 8192
# for rcsdiff processing of header
_RCSDIFF_IS_BINARY = 'binary-diff'
_RCSDIFF_ERROR = 'error'
# special characters that don't need to be URL encoded
_URL_SAFE_CHARS = "/*~"
class Request:
def __init__(self, server, cfg):
self.server = server
self.cfg = cfg
self.script_name = _normalize_path(server.getenv('SCRIPT_NAME', ''))
self.browser = server.getenv('HTTP_USER_AGENT', 'unknown')
# in lynx, it it very annoying to have two links per file, so
# disable the link at the icon in this case:
self.no_file_links = string.find(self.browser, 'Lynx') != -1
# newer browsers accept gzip content encoding and state this in a
# header (netscape did always but didn't state it) It has been
# reported that these braindamaged MS-Internet Explorers claim
# that they accept gzip .. but don't in fact and display garbage
# then :-/
self.may_compress = (
( string.find(server.getenv('HTTP_ACCEPT_ENCODING', ''), 'gzip') != -1
or string.find(self.browser, 'Mozilla/3') != -1)
and string.find(self.browser, 'MSIE') == -1
)
# process the Accept-Language: header
hal = server.getenv('HTTP_ACCEPT_LANGUAGE','')
self.lang_selector = accept.language(hal)
self.language = self.lang_selector.select_from(cfg.general.languages)
# load the key/value files, given the selected language
self.kv = cfg.load_kv_files(self.language)
def run_viewvc(self):
cfg = self.cfg
# global needed because "import vclib.svn" causes the
# interpreter to make vclib a local variable
global vclib
# This function first parses the query string and sets the following
# variables. Then it executes the request.
self.view_func = None # function to call to process the request
self.repos = None # object representing current repository
self.rootname = None # name of current root (as used in viewvc.conf)
self.roottype = None # current root type ('svn' or 'cvs')
self.rootpath = None # physical path to current root
self.pathtype = None # type of path, either vclib.FILE or vclib.DIR
self.where = None # path to file or directory in current root
self.query_dict = {} # validated and cleaned up query options
self.path_parts = None # for convenience, equals where.split('/')
self.pathrev = None # current path revision or tag
# redirect if we're loading from a valid but irregular URL
# These redirects aren't neccessary to make ViewVC work, it functions
# just fine without them, but they make it easier for server admins to
# implement access restrictions based on URL
needs_redirect = 0
# Process the query params
for name, values in self.server.params().items():
# patch up old queries that use 'cvsroot' to look like they used 'root'
if name == 'cvsroot':
name = 'root'
needs_redirect = 1
# same for 'only_with_tag' and 'pathrev'
if name == 'only_with_tag':
name = 'pathrev'
needs_redirect = 1
# validate the parameter
_validate_param(name, values[0])
# if we're here, then the parameter is okay
self.query_dict[name] = values[0]
# handle view parameter
self.view_func = _views.get(self.query_dict.get('view', None),
self.view_func)
# Process PATH_INFO component of query string
path_info = self.server.getenv('PATH_INFO', '')
# clean it up. this removes duplicate '/' characters and any that may
# exist at the front or end of the path.
### we might want to redirect to the cleaned up URL
path_parts = _path_parts(path_info)
if path_parts:
# handle magic path prefixes
if path_parts[0] == docroot_magic_path:
# if this is just a simple hunk of doc, then serve it up
self.where = _path_join(path_parts[1:])
return view_doc(self)
elif path_parts[0] in (checkout_magic_path,
oldstyle_checkout_magic_path):
path_parts.pop(0)
self.view_func = view_checkout
if not cfg.options.checkout_magic:
needs_redirect = 1
# handle tarball magic suffixes
if self.view_func is download_tarball:
if (self.query_dict.get('parent')):
del path_parts[-1]
elif path_parts[-1][-7:] == ".tar.gz":
path_parts[-1] = path_parts[-1][:-7]
# Figure out root name
self.rootname = self.query_dict.get('root')
if self.rootname == view_roots_magic:
del self.query_dict['root']
self.rootname = ""
needs_redirect = 1
elif self.rootname is None:
if cfg.options.root_as_url_component:
if path_parts:
self.rootname = path_parts.pop(0)
else:
self.rootname = ""
else:
self.rootname = cfg.general.default_root
elif cfg.options.root_as_url_component:
needs_redirect = 1
self.where = _path_join(path_parts)
self.path_parts = path_parts
if self.rootname:
# Create the repository object
if cfg.general.cvs_roots.has_key(self.rootname):
self.rootpath = os.path.normpath(cfg.general.cvs_roots[self.rootname])
try:
if cfg.general.use_rcsparse:
import vclib.ccvs
self.repos = vclib.ccvs.CCVSRepository(self.rootname,
self.rootpath)
else:
import vclib.bincvs
self.repos = vclib.bincvs.BinCVSRepository(self.rootname,
self.rootpath,
cfg.general)
self.roottype = 'cvs'
except vclib.ReposNotFound:
raise debug.ViewVCException(
'%s not found!\nThe wrong path for this repository was '
'configured, or the server on which the CVS tree lives may be '
'down. Please try again in a few minutes.'
% self.rootname)
# required so that spawned rcs programs correctly expand $CVSHeader$
os.environ['CVSROOT'] = self.rootpath
elif cfg.general.svn_roots.has_key(self.rootname):
self.rootpath = cfg.general.svn_roots[self.rootname]
try:
if re.match(_re_rewrite_url, self.rootpath):
# If the rootpath is a URL, we'll use the svn_ra module, but
# lie about its name.
import vclib.svn_ra
vclib.svn = vclib.svn_ra
self.repos = vclib.svn.SubversionRepository(self.rootname,
self.rootpath)
else:
self.rootpath = os.path.normpath(self.rootpath)
import vclib.svn
self.repos = vclib.svn.SubversionRepository(self.rootname,
self.rootpath,
cfg.general.svn_path)
self.roottype = 'svn'
except vclib.ReposNotFound:
raise debug.ViewVCException(
'%s not found!\nThe wrong path for this repository was '
'configured, or the server on which the Subversion tree lives may'
'be down. Please try again in a few minutes.'
% self.rootname)
except vclib.InvalidRevision, ex:
raise debug.ViewVCException(str(ex))
else:
raise debug.ViewVCException(
'The root "%s" is unknown. If you believe the value is '
'correct, then please double-check your configuration.'
% self.rootname, "404 Repository not found")
# If this is using an old-style 'rev' parameter, redirect to new hotness.
# Subversion URLs will now use 'pathrev'; CVS ones use 'revision'.
if self.repos and self.query_dict.has_key('rev'):
if self.roottype == 'svn' \
and not self.query_dict.has_key('pathrev') \
and not self.view_func == view_revision:
self.query_dict['pathrev'] = self.query_dict['rev']
del self.query_dict['rev']
else: # elif not self.query_dict.has_key('revision'): ?
self.query_dict['revision'] = self.query_dict['rev']
del self.query_dict['rev']
needs_redirect = 1
if self.repos and self.view_func is not redirect_pathrev:
# Make sure path exists
self.pathrev = pathrev = self.query_dict.get('pathrev')
self.pathtype = _repos_pathtype(self.repos, path_parts, pathrev)
if self.pathtype is None:
# path doesn't exist, see if it could be an old-style ViewVC URL
# with a fake suffix
result = _strip_suffix('.diff', path_parts, pathrev, vclib.FILE, \
self.repos, view_diff) or \
_strip_suffix('.tar.gz', path_parts, pathrev, vclib.DIR, \
self.repos, download_tarball) or \
_strip_suffix('root.tar.gz', path_parts, pathrev, vclib.DIR, \
self.repos, download_tarball) or \
_strip_suffix(self.rootname + '-root.tar.gz', \
path_parts, pathrev, vclib.DIR, \
self.repos, download_tarball) or \
_strip_suffix('root', \
path_parts, pathrev, vclib.DIR, \
self.repos, download_tarball) or \
_strip_suffix(self.rootname + '-root', \
path_parts, pathrev, vclib.DIR, \
self.repos, download_tarball)
if result:
self.path_parts, self.pathtype, self.view_func = result
self.where = _path_join(self.path_parts)
needs_redirect = 1
else:
raise debug.ViewVCException('%s: unknown location'
% self.where, '404 Not Found')
# If we have an old ViewCVS Attic URL which is still valid, then redirect
if self.roottype == 'cvs':
attic_parts = None
if (self.pathtype == vclib.FILE and len(self.path_parts) > 1
and self.path_parts[-2] == 'Attic'):
attic_parts = self.path_parts[:-2] + self.path_parts[-1:]
elif (self.pathtype == vclib.DIR and len(self.path_parts) > 0
and self.path_parts[-1] == 'Attic'):
attic_parts = self.path_parts[:-1]
if attic_parts:
self.path_parts = attic_parts
self.where = _path_join(attic_parts)
needs_redirect = 1
# If this is a forbidden directory, stop now
if self.path_parts and self.pathtype == vclib.DIR \
and cfg.is_forbidden(self.path_parts[0]):
raise debug.ViewVCException('%s: unknown location' % path_parts[0],
'404 Not Found')
if self.view_func is None:
# view parameter is not set, try looking at pathtype and the
# other parameters
if not self.rootname:
self.view_func = view_roots
elif self.pathtype == vclib.DIR:
# ViewCVS 0.9.2 used to put ?tarball=1 at the end of tarball urls
if self.query_dict.has_key('tarball'):
self.view_func = download_tarball
else:
self.view_func = view_directory
elif self.pathtype == vclib.FILE:
if self.query_dict.has_key('r1') and self.query_dict.has_key('r2'):
self.view_func = view_diff
elif self.query_dict.has_key('annotate'):
self.view_func = view_annotate
elif self.query_dict.has_key('graph'):
if not self.query_dict.has_key('makeimage'):
self.view_func = view_cvsgraph
else:
self.view_func = view_cvsgraph_image
elif self.query_dict.has_key('revision') \
or cfg.options.default_file_view != "log":
if self.query_dict.get('content-type', None) in (viewcvs_mime_type,
alt_mime_type):
self.view_func = view_markup
else:
self.view_func = view_checkout
else:
self.view_func = view_log
# if we have a directory and the request didn't end in "/", then redirect
# so that it does.
if (self.pathtype == vclib.DIR and path_info[-1:] != '/'
and self.view_func is not view_revision
and self.view_func is not view_roots
and self.view_func is not download_tarball
and self.view_func is not redirect_pathrev):
needs_redirect = 1
# redirect now that we know the URL is valid
if needs_redirect:
self.server.redirect(self.get_url())
# Finally done parsing query string, set mime type and call view_func
self.mime_type = None
if self.pathtype == vclib.FILE:
self.mime_type = guess_mime(self.where)
# startup is done now.
debug.t_end('startup')
self.view_func(self)
def get_url(self, escape=0, partial=0, **args):
"""Constructs a link to another ViewVC page just like the get_link
function except that it returns a single URL instead of a URL
split into components"""
url, params = apply(self.get_link, (), args)
qs = compat.urlencode(params)
if qs:
result = urllib.quote(url, _URL_SAFE_CHARS) + '?' + qs
else:
result = urllib.quote(url, _URL_SAFE_CHARS)
if partial:
result = result + (qs and '&' or '?')
if escape:
result = self.server.escape(result)
return result
def get_form(self, **args):
"""Constructs a link to another ViewVC page just like the get_link
function except that it returns a base URL suitable for use as an HTML
form action and a string of HTML input type=hidden tags with the link
parameters."""
url, params = apply(self.get_link, (), args)
action = self.server.escape(urllib.quote(url, _URL_SAFE_CHARS))
hidden_values = prepare_hidden_values(params)
return action, hidden_values
def get_link(self, view_func=None, where=None, pathtype=None, params=None):
"""Constructs a link pointing to another ViewVC page. All arguments
correspond to members of the Request object. If they are set to
None they take values from the current page. Return value is a base
URL and a dictionary of parameters"""
cfg = self.cfg
if view_func is None:
view_func = self.view_func
if params is None:
params = self.query_dict.copy()
else:
params = params.copy()
# must specify both where and pathtype or neither
assert (where is None) == (pathtype is None)
# if we are asking for the revision info view, we don't need any
# path information
if (view_func is view_revision or view_func is view_roots
or view_func is redirect_pathrev):
where = pathtype = None
elif where is None:
where = self.where
pathtype = self.pathtype
# no need to add sticky variables for views with no links
sticky_vars = not (view_func is view_checkout
or view_func is download_tarball)
# The logic used to construct the URL is an inverse of the
# logic used to interpret URLs in Request.run_viewvc
url = self.script_name
# add checkout magic if neccessary
if view_func is view_checkout and cfg.options.checkout_magic:
url = url + '/' + checkout_magic_path
# add root to url
rootname = None
if view_func is not view_roots:
if cfg.options.root_as_url_component:
# remove root from parameter list if present
try:
rootname = params['root']
except KeyError:
rootname = self.rootname
else:
del params['root']
# add root path component
if rootname is not None:
url = url + '/' + rootname
else:
# add root to parameter list
try:
rootname = params['root']
except KeyError:
rootname = params['root'] = self.rootname
# no need to specify default root
if rootname == cfg.general.default_root:
del params['root']
# add 'pathrev' value to parameter list
if (self.pathrev is not None
and not params.has_key('pathrev')
and view_func is not view_revision
and rootname == self.rootname):
params['pathrev'] = self.pathrev
# add path
if where:
url = url + '/' + where
# add suffix for tarball
if view_func is download_tarball:
if not where and not cfg.options.root_as_url_component:
url = url + '/' + rootname + '-root'
params['parent'] = '1'
url = url + '.tar.gz'
# add trailing slash for a directory
elif pathtype == vclib.DIR:
url = url + '/'
# normalize top level URLs for use in Location headers and A tags
elif not url:
url = '/'
# no need to explicitly specify directory view for a directory
if view_func is view_directory and pathtype == vclib.DIR:
view_func = None
# no need to explicitly specify roots view when in root_as_url
# mode or there's no default root
if view_func is view_roots and (cfg.options.root_as_url_component
or not cfg.general.default_root):
view_func = None
# no need to explicitly specify annotate view when
# there's an annotate parameter
if view_func is view_annotate and params.get('annotate') is not None:
view_func = None
# no need to explicitly specify diff view when
# there's r1 and r2 parameters
if (view_func is view_diff and params.get('r1') is not None
and params.get('r2') is not None):
view_func = None
# no need to explicitly specify checkout view when it's the default
# view, when checkout_magic is enabled, or when "revision" is present
if view_func is view_checkout:
if ((cfg.options.default_file_view != "log" and pathtype == vclib.FILE)
or cfg.options.checkout_magic
or params.get('revision') is not None):
view_func = None
view_code = _view_codes.get(view_func)
if view_code and not (params.has_key('view') and params['view'] is None):
params['view'] = view_code
# add sticky values to parameter list
if sticky_vars:
for name in _sticky_vars:
value = self.query_dict.get(name)
if value is not None and not params.has_key(name):
params[name] = value
# remove null values from parameter list
for name, value in params.items():
if value is None:
del params[name]
return url, params
def _path_parts(path):
"""Split up a repository path into a list of path components"""
# clean it up. this removes duplicate '/' characters and any that may
# exist at the front or end of the path.
return filter(None, string.split(path, '/'))
def _normalize_path(path):
"""Collapse leading slashes in the script name
You only get multiple slashes in the script name when users accidentally
type urls like http://abc.com//viewvc.cgi/, but we correct for it
because we output the script name in links and web browsers
interpret //viewvc.cgi/ as http://viewvc.cgi/
"""
i = 0
for c in path:
if c != '/':
break
i = i + 1
if i:
return path[i-1:]
return path
def _validate_param(name, value):
"""Validate whether the given value is acceptable for the param name.
If the value is not allowed, then an error response is generated, and
this function throws an exception. Otherwise, it simply returns None.
"""
try:
validator = _legal_params[name]
except KeyError:
raise debug.ViewVCException(
'An illegal parameter name ("%s") was passed.' % name,
'400 Bad Request')
if validator is None:
return
# is the validator a regex?
if hasattr(validator, 'match'):
if not validator.match(value):
raise debug.ViewVCException(
'An illegal value ("%s") was passed as a parameter.' %
value, '400 Bad Request')
return
# the validator must be a function
validator(value)
def _validate_regex(value):
# hmm. there isn't anything that we can do here.
### we need to watch the flow of these parameters through the system
### to ensure they don't hit the page unescaped. otherwise, these
### parameters could constitute a CSS attack.
pass
# obvious things here. note that we don't need uppercase for alpha.
_re_validate_alpha = re.compile('^[a-z]+$')
_re_validate_number = re.compile('^[0-9]+$')
# when comparing two revs, we sometimes construct REV:SYMBOL, so ':' is needed
_re_validate_revnum = re.compile('^[-_.a-zA-Z0-9:~\\[\\]/]*$')
# it appears that RFC 2045 also says these chars are legal: !#$%&'*+^{|}~`
# but woah... I'll just leave them out for now
_re_validate_mimetype = re.compile('^[-_.a-zA-Z0-9/]+$')
# date time values
_re_validate_datetime = re.compile(r'^(\d\d\d\d-\d\d-\d\d(\s+\d\d:\d\d(:\d\d)?)?)?$')
# the legal query parameters and their validation functions
_legal_params = {
'root' : None,
'view' : None,
'search' : _validate_regex,
'p1' : None,
'p2' : None,
'hideattic' : _re_validate_number,
'limit_changes' : _re_validate_number,
'sortby' : _re_validate_alpha,
'sortdir' : _re_validate_alpha,
'logsort' : _re_validate_alpha,
'diff_format' : _re_validate_alpha,
'pathrev' : _re_validate_revnum,
'dir_pagestart' : _re_validate_number,
'log_pagestart' : _re_validate_number,
'hidecvsroot' : _re_validate_number,
'annotate' : _re_validate_revnum,
'graph' : _re_validate_revnum,
'makeimage' : _re_validate_number,
'tarball' : _re_validate_number,
'parent' : _re_validate_number,
'r1' : _re_validate_revnum,
'tr1' : _re_validate_revnum,
'r2' : _re_validate_revnum,
'tr2' : _re_validate_revnum,
'rev' : _re_validate_revnum,
'revision' : _re_validate_revnum,
'content-type' : _re_validate_mimetype,
# for query
'branch' : _validate_regex,
'branch_match' : _re_validate_alpha,
'dir' : None,
'file' : _validate_regex,
'file_match' : _re_validate_alpha,
'who' : _validate_regex,
'who_match' : _re_validate_alpha,
'querysort' : _re_validate_alpha,
'date' : _re_validate_alpha,
'hours' : _re_validate_number,
'mindate' : _re_validate_datetime,
'maxdate' : _re_validate_datetime,
'format' : _re_validate_alpha,
'limit' : _re_validate_number,
# for redirect_pathrev
'orig_path' : None,
'orig_pathtype' : None,
'orig_pathrev' : None,
'orig_view' : None,
}
def _path_join(path_parts):
return string.join(path_parts, '/')
def _strip_suffix(suffix, path_parts, rev, pathtype, repos, view_func):
"""strip the suffix from a repository path if the resulting path
is of the specified type, otherwise return None"""
l = len(suffix)
if path_parts[-1][-l:] == suffix:
path_parts = path_parts[:]
if len(path_parts[-1]) == l:
del path_parts[-1]
else:
path_parts[-1] = path_parts[-1][:-l]
t = _repos_pathtype(repos, path_parts, rev)
if pathtype == t:
return path_parts, t, view_func
return None
def _repos_pathtype(repos, path_parts, rev):
"""return the type of a repository path, or None if the path doesn't exist"""
try:
return repos.itemtype(path_parts, rev)
except vclib.ItemNotFound:
return None
def _orig_path(request, rev_param='revision', path_param=None):
"Get original path of requested file at old revision before copies or moves"
# The 'pathrev' variable is interpreted by nearly all ViewVC views to
# provide a browsable snapshot of a repository at some point in its history.
# 'pathrev' is a tag name for CVS repositories and a revision number for
# Subversion repositories. It's automatically propagated between pages by
# logic in the Request.get_link() function which adds it to links like a
# sticky variable. When 'pathrev' is set, directory listings only include
# entries that exist in the specified revision or tag. Similarly, log pages
# will only show revisions preceding the point in history specified by
# 'pathrev.' Markup, checkout, and annotate pages show the 'pathrev'
# revision of files by default when no other revision is specified.
#
# In Subversion repositories, paths are always considered to refer to the
# pathrev revision. For example, if there is a "circle.jpg" in revision 3,
# which is renamed and modified as "square.jpg" in revision 4, the original
# circle image is visible at the following URLs:
#
# *checkout*/circle.jpg?pathrev=3
# *checkout*/square.jpg?revision=3
# *checkout*/square.jpg?revision=3&pathrev=4
#
# Note that the following:
#
# *checkout*/circle.jpg?rev=3
#
# now gets redirected to one of the following URLs:
#
# *checkout*/circle.jpg?pathrev=3 (for Subversion)
# *checkout*/circle.jpg?revision=3 (for CVS)
#
rev = request.query_dict.get(rev_param, request.pathrev)
path = request.query_dict.get(path_param, request.where)
if rev is not None and hasattr(request.repos, '_getrev'):
pathrev = request.repos._getrev(request.pathrev)
rev = request.repos._getrev(rev)
return _path_parts(vclib.svn.get_location(request.repos, path,
pathrev, rev)), rev
return _path_parts(path), rev
def _install_path(path):
"""Get usable path for a path relative to ViewVC install directory"""
if os.path.isabs(path):
return path
return os.path.normpath(os.path.join(os.path.dirname(__file__),
os.pardir,
path))
def check_freshness(request, mtime=None, etag=None, weak=0):
# See if we are supposed to disable etags (for debugging, usually)
cfg = request.cfg
if not cfg.options.generate_etags:
return 0
request_etag = request_mtime = None
if etag is not None:
if weak:
etag = 'W/"%s"' % etag
else:
etag = '"%s"' % etag
request_etag = request.server.getenv('HTTP_IF_NONE_MATCH')
if mtime is not None:
try:
request_mtime = request.server.getenv('HTTP_IF_MODIFIED_SINCE')
request_mtime = rfc822.mktime_tz(rfc822.parsedate_tz(request_mtime))
except:
request_mtime = None
# if we have an etag, use that for freshness checking.
# if not available, then we use the last-modified time.
# if not available, then the document isn't fresh.
if etag is not None:
isfresh = (request_etag == etag)
elif mtime is not None:
isfresh = (request_mtime >= mtime)
else:
isfresh = 0
## require revalidation after 15 minutes ...
if cfg and cfg.options.http_expiration_time >= 0:
expiration = compat.formatdate(time.time() +
cfg.options.http_expiration_time)
request.server.addheader('Expires', expiration)
request.server.addheader('Cache-Control',
'max-age=%d' % cfg.options.http_expiration_time)
if isfresh:
request.server.header(status='304 Not Modified')
else:
if etag is not None:
request.server.addheader('ETag', etag)
if mtime is not None:
request.server.addheader('Last-Modified', compat.formatdate(mtime))
return isfresh
def get_view_template(cfg, view_name, language="en"):
# see if the configuration specifies a template for this view
tname = vars(cfg.templates).get(view_name)
# if there is no specific template definition for this view, look in
# the default location (relative to the configured template_dir)
if not tname:
tname = os.path.join(cfg.options.template_dir, view_name + ".ezt")
# allow per-language template selection
tname = string.replace(tname, '%lang%', language)
# finally, construct the whole template path.
tname = _install_path(tname)
debug.t_start('ezt-parse')
template = ezt.Template(tname)
debug.t_end('ezt-parse')
return template
def generate_page(request, view_name, data):
template = get_view_template(request.cfg, view_name, request.language)
template.generate(request.server.file(), data)
def nav_path(request):
"""Return current path as list of items with "name" and "href" members
The href members are view_directory links for directories and view_log
links for files, but are set to None when the link would point to
the current view"""
if not request.repos:
return []
is_dir = request.pathtype == vclib.DIR
# add root item
items = []
root_item = _item(name=request.server.escape(request.repos.name), href=None)
if request.path_parts or request.view_func is not view_directory:
root_item.href = request.get_url(view_func=view_directory,
where='', pathtype=vclib.DIR,
params={}, escape=1)
items.append(root_item)
# add path part items
path_parts = []
for part in request.path_parts:
path_parts.append(part)
is_last = len(path_parts) == len(request.path_parts)
item = _item(name=part, href=None)
if not is_last or (is_dir and request.view_func is not view_directory):
item.href = request.get_url(view_func=view_directory,
where=_path_join(path_parts),
pathtype=vclib.DIR,
params={}, escape=1)
elif not is_dir and request.view_func is not view_log:
item.href = request.get_url(view_func=view_log,
where=_path_join(path_parts),
pathtype=vclib.FILE,
params={}, escape=1)
items.append(item)
return items
def prep_tags(request, tags):
url, params = request.get_link(params={'pathrev': None})
params = compat.urlencode(params)
if params:
url = urllib.quote(url, _URL_SAFE_CHARS) + '?' + params + '&pathrev='
else:
url = urllib.quote(url, _URL_SAFE_CHARS) + '?pathrev='
url = request.server.escape(url)
links = [ ]
for tag in tags:
links.append(_item(name=tag.name, href=url+tag.name))
links.sort(lambda a, b: cmp(a.name, b.name))
return links
def guess_mime(filename):
return mimetypes.guess_type(filename)[0]
def is_viewable_image(mime_type):
return mime_type and mime_type in ('image/gif', 'image/jpeg', 'image/png')
def is_text(mime_type):
return not mime_type or mime_type[:5] == 'text/'
def is_plain_text(mime_type):
return not mime_type or mime_type == 'text/plain'
def default_view(mime_type, cfg):
"Determine whether file should be viewed through markup page or sent raw"
# If the mime type is text/anything or a supported image format we view
# through the markup page. If the mime type is something else, we send
# it directly to the browser. That way users can see things like flash
# animations, pdfs, word documents, multimedia, etc, which wouldn't be
# very useful marked up. If the mime type is totally unknown (happens when
# we encounter an unrecognized file extension) we also view it through
# the markup page since that's better than sending it text/plain.
if (cfg.options.allow_markup and
(is_viewable_image(mime_type) or is_text(mime_type))):
return view_markup
return view_checkout
def get_file_view_info(request, where, rev=None, mime_type=None, pathrev=-1):
"""Return common hrefs and a viewability flag used for various views
of FILENAME at revision REV whose MIME type is MIME_TYPE."""
rev = rev and str(rev) or None
mime_type = mime_type or request.mime_type
if pathrev == -1: # cheesy default value, since we need to preserve None
pathrev = request.pathrev
download_text_href = annotate_href = revision_href = None
view_href = request.get_url(view_func=view_markup,
where=where,
pathtype=vclib.FILE,
params={'revision': rev,
'pathrev': pathrev},
escape=1)
download_href = request.get_url(view_func=view_checkout,
where=where,
pathtype=vclib.FILE,
params={'revision': rev,
'pathrev': pathrev},
escape=1)
if not is_plain_text(mime_type):
download_text_href = request.get_url(view_func=view_checkout,
where=where,
pathtype=vclib.FILE,
params={'content-type': 'text/plain',
'revision': rev,
'pathrev': rev},
escape=1)
if request.cfg.options.allow_annotate:
annotate_href = request.get_url(view_func=view_annotate,
where=where,
pathtype=vclib.FILE,
params={'annotate': rev,
'pathrev': pathrev},
escape=1)
if request.roottype == 'svn':
revision_href = request.get_url(view_func=view_revision,
params={'revision': rev},
escape=1)
prefer_markup = default_view(mime_type, request.cfg) == view_markup
return view_href, download_href, download_text_href, \
annotate_href, revision_href, ezt.boolean(prefer_markup)
# Regular expressions for location text that looks like URLs and email
# addresses. Note that the regexps assume the text is already HTML-encoded.
_re_rewrite_url = re.compile('((http|https|ftp|file|svn|svn\+ssh)(://[-a-zA-Z0-9%.~:_/]+)((\?|\&)([-a-zA-Z0-9%.~:_]+)=([-a-zA-Z0-9%.~:_])+)*(#([-a-zA-Z0-9%.~:_]+)?)?)')
_re_rewrite_email = re.compile('([-a-zA-Z0-9_.\+]+)@(([-a-zA-Z0-9]+\.)+[A-Za-z]{2,4})')
def htmlify(html):
html = cgi.escape(html)
html = re.sub(_re_rewrite_url, r'<a href="\1">\1</a>', html)
html = re.sub(_re_rewrite_email, r'<a href="mailto:\1@\2">\1@\2</a>', html)
return html
def format_log(log, cfg):
s = htmlify(log[:cfg.options.short_log_len])
if len(log) > cfg.options.short_log_len:
s = s + '...'
return s
_time_desc = {
1 : 'second',
60 : 'minute',
3600 : 'hour',
86400 : 'day',
604800 : 'week',
2628000 : 'month',
31536000 : 'year',
}
def get_time_text(request, interval, num):
"Get some time text, possibly internationalized."
### some languages have even harder pluralization rules. we'll have to
### deal with those on demand
if num == 0:
return ''
text = _time_desc[interval]
if num == 1:
attr = text + '_singular'
fmt = '%d ' + text
else:
attr = text + '_plural'
fmt = '%d ' + text + 's'
try:
fmt = getattr(request.kv.i18n.time, attr)
except AttributeError:
pass
return fmt % num
def little_time(request):
try:
return request.kv.i18n.time.little_time
except AttributeError:
return 'very little time'
def html_time(request, secs, extended=0):
secs = long(time.time()) - secs
if secs < 2:
return little_time(request)
breaks = _time_desc.keys()
breaks.sort()
i = 0
while i < len(breaks):
if secs < 2 * breaks[i]:
break
i = i + 1
value = breaks[i - 1]
s = get_time_text(request, value, secs / value)
if extended and i > 1:
secs = secs % value
value = breaks[i - 2]
ext = get_time_text(request, value, secs / value)
if ext:
### this is not i18n compatible. pass on it for now
s = s + ', ' + ext
return s
def common_template_data(request):
cfg = request.cfg
data = {
'cfg' : cfg,
'vsn' : __version__,
'kv' : request.kv,
'docroot' : cfg.options.docroot is None \
and request.script_name + '/' + docroot_magic_path \
or cfg.options.docroot,
'where' : request.server.escape(request.where),
'roottype' : request.roottype,
'rootname' : request.server.escape(request.rootname),
'pathtype' : None,
'nav_path' : nav_path(request),
'up_href' : None,
'log_href' : None,
'log_href_rev': None,
'graph_href': None,
'rss_href' : None,
'view' : _view_codes[request.view_func],
}
if request.pathtype == vclib.DIR:
data['pathtype'] = 'dir'
elif request.pathtype == vclib.FILE:
data['pathtype'] = 'file'
data['change_root_action'], data['change_root_hidden_values'] = \
request.get_form(view_func=view_directory, where='', pathtype=vclib.DIR,
params={'root': None})
# add in the roots for the selection
roots = []
allroots = list_roots(cfg)
if len(allroots):
rootnames = allroots.keys()
rootnames.sort(icmp)
for rootname in rootnames:
href = request.get_url(view_func=view_directory,
where='', pathtype=vclib.DIR,
params={'root': rootname}, escape=1)
roots.append(_item(name=request.server.escape(rootname),
type=allroots[rootname][1], href=href))
data['roots'] = roots
if request.path_parts:
dir = _path_join(request.path_parts[:-1])
data['up_href'] = request.get_url(view_func=view_directory,
where=dir, pathtype=vclib.DIR,
params={}, escape=1)
if request.pathtype == vclib.FILE:
if (request.view_func is not view_log):
data['log_href'] = request.get_url(view_func=view_log,
params={}, escape=1)
if (request.view_func is view_diff):
data['log_href_rev'] = request.query_dict.get('r2')
elif (request.view_func is view_annotate):
# if user did "view=annotate" there may not be an annotate key
if request.query_dict.has_key('annotate'):
data['log_href_rev'] = request.query_dict.get('annotate')
elif request.query_dict.has_key('revision'):
data['log_href_rev'] = request.query_dict.get('revision')
if (request.roottype == 'cvs' and cfg.options.use_cvsgraph
and request.view_func is not view_cvsgraph):
data['graph_href'] = request.get_url(view_func=view_cvsgraph,
params={}, escape=1)
elif request.pathtype == vclib.DIR:
if request.roottype == 'svn':
data['log_href'] = request.get_url(view_func=view_log,
params={}, escape=1)
if is_query_supported(request):
data['rss_href'] = request.get_url(view_func=view_query,
params={'date': 'month',
'format': 'rss'},
escape=1)
return data
def nav_header_data(request, rev, orig_path):
view_href, download_href, download_text_href, annotate_href, \
revision_href, prefer_markup \
= get_file_view_info(request, request.where, rev, request.mime_type)
data = common_template_data(request)
data.update({
'rev' : rev,
'view_href' : view_href,
'annotate_href' : annotate_href,
'download_href' : download_href,
'download_text_href' : download_text_href,
'revision_href' : revision_href,
'prefer_markup' : prefer_markup,
'orig_path' : None,
'orig_href' : None,
})
if orig_path != request.path_parts:
path = _path_join(orig_path)
data['orig_path'] = path
data['orig_href'] = request.get_url(view_func=view_log,
where=path,
pathtype=vclib.FILE,
params={'pathrev': rev},
escape=1)
return data
def retry_read(src, reqlen=CHUNK_SIZE):
while 1:
chunk = src.read(CHUNK_SIZE)
if not chunk:
# need to check for eof methods because the cStringIO file objects
# returned by ccvs don't provide them
if hasattr(src, 'eof') and src.eof() is None:
time.sleep(1)
continue
return chunk
def copy_stream(src, dst=None, htmlize=0):
if dst is None:
dst = sys.stdout
while 1:
chunk = retry_read(src)
if not chunk:
break
if htmlize:
chunk = htmlify(chunk)
dst.write(chunk)
class MarkupPipeWrapper:
"""An EZT callback that outputs a filepointer, plus some optional
pre- and post- text."""
def __init__(self, fp, pretext=None, posttext=None, htmlize=1):
self.fp = fp
self.pretext = pretext
self.posttext = posttext
self.htmlize = htmlize
def __call__(self, ctx):
if self.pretext:
ctx.fp.write(self.pretext)
copy_stream(self.fp, ctx.fp, self.htmlize)
self.fp.close()
if self.posttext:
ctx.fp.write(self.posttext)
class MarkupShell:
"""A EZT callback object slamming file contents through shell tools."""
def __init__(self, fp, cmds):
self.fp = fp
self.cmds = cmds
def __call__(self, ctx):
ctx.fp.flush()
try:
pipe = popen.pipe_cmds(self.cmds, ctx.fp)
try:
if self.fp:
copy_stream(self.fp, pipe)
self.fp.close()
self.fp = None
finally:
pipe.close()
except IOError:
raise debug.ViewVCException \
('Error running external program. Command line was: "%s"'
% string.join(map(lambda args: string.join(args, ' '), self.cmds),
' | '))
def __del__(self):
self.close()
def close(self):
if self.fp:
self.fp.close()
self.fp = None
class MarkupEnscript(MarkupShell):
def __init__(self, cfg, fp, filename):
# I've tried to pass option '-C' to enscript to generate line numbers
# Unfortunately this option doesn't work with HTML output in enscript
# version 1.6.2.
enscript_cmd = [os.path.normpath(os.path.join(cfg.options.enscript_path,
'enscript')),
'--color', '--language=html', '--pretty-print',
'-o', '-', '-']
### I'd like to also strip the <PRE> and </PRE> tags, too, but
### can't come up with a suitable sed expression. Using
### '1,/^<PRE>$/d;/<\\/PRE>/,$d;p' gets me most of the way, but
### will drop the last line of a non-newline-terminated filed.
sed_cmd = ['sed', '-n', '/^<PRE>$/,/<\\/PRE>$/p']
MarkupShell.__init__(self, fp, [enscript_cmd, sed_cmd])
self.filename = filename
def __call__(self, ctx):
# create a temporary file with the same name as the file in
# the repository so enscript can detect file type correctly
dir = compat.mkdtemp()
try:
file = os.path.join(dir, self.filename)
try:
copy_stream(self.fp, open(file, 'wb'))
self.fp.close()
self.fp = None
self.cmds[0][-1] = file
MarkupShell.__call__(self, ctx)
finally:
os.unlink(file)
finally:
os.rmdir(dir)
class MarkupPHP(MarkupShell):
def __init__(self, php_exe_path, fp):
php_cmd = [php_exe_path, '-q', '-s', '-n']
MarkupShell.__init__(self, fp, [php_cmd])
class MarkupHighlight(MarkupShell):
def __init__(self, cfg, fp, filename):
try:
ext = filename[string.rindex(filename, ".") + 1:]
except ValueError:
ext = 'txt'
highlight_cmd = [os.path.normpath(os.path.join(cfg.options.highlight_path,
'highlight')),
'--syntax', ext, '--force',
'--anchors', '--fragment', '--xhtml']
if cfg.options.highlight_line_numbers:
highlight_cmd.extend(['--linenumbers'])
if cfg.options.highlight_convert_tabs:
highlight_cmd.extend(['--replace-tabs',
str(cfg.options.highlight_convert_tabs)])
MarkupShell.__init__(self, fp, [highlight_cmd])
def markup_stream_python(fp, cfg):
### Convert this code to use the recipe at:
### http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52298
### Note that the cookbook states all the code is licensed according to
### the Python license.
try:
# See if Marc-Andre Lemburg's py2html stuff is around.
# http://www.egenix.com/files/python/SoftwareDescriptions.html#py2html.py
### maybe restrict the import to *only* this directory?
sys.path.insert(0, cfg.options.py2html_path)
import py2html
import PyFontify
except ImportError:
return None
### It doesn't escape stuff quite right, nor does it munge URLs and
### mailtos as well as we do.
html = cgi.escape(fp.read())
pp = py2html.PrettyPrint(PyFontify.fontify, "rawhtml", "color")
pp.set_mode_rawhtml_color()
html = pp.fontify(html)
html = re.sub(_re_rewrite_url, r'<a href="\1">\1</a>', html)
html = re.sub(_re_rewrite_email, r'<a href="mailto:\1">\1</a>', html)
return html
def markup_stream_php(fp, cfg):
if not cfg.options.use_php:
return None
sys.stdout.flush()
# clearing the following environment variables prevents a
# "No input file specified" error from the php cgi executable
# when ViewVC is running under a cgi environment. when the
# php cli executable is used they can be left alone
#
#os.putenv("GATEWAY_INTERFACE", "")
#os.putenv("PATH_TRANSLATED", "")
#os.putenv("REQUEST_METHOD", "")
#os.putenv("SERVER_NAME", "")
#os.putenv("SERVER_SOFTWARE", "")
return MarkupPHP(cfg.options.php_exe_path, fp)
markup_streamers = {
# '.py' : markup_stream_python,
'.php' : markup_stream_php,
'.inc' : markup_stream_php,
}
def make_time_string(date, cfg):
"""Returns formatted date string in either local time or UTC.
The passed in 'date' variable is seconds since epoch.
"""
if date is None:
return 'Unknown date'
if cfg.options.use_localtime:
localtime = time.localtime(date)
return time.asctime(localtime) + ' ' + time.tzname[localtime[8]]
else:
return time.asctime(time.gmtime(date)) + ' UTC'
def make_rss_time_string(date, cfg):
"""Returns formatted date string in UTC, formatted for RSS.
The passed in 'date' variable is seconds since epoch.
"""
if date is None:
return 'Unknown date'
return time.strftime("%a, %d %b %Y %H:%M:%S", time.gmtime(date)) + ' UTC'
def view_markup(request):
cfg = request.cfg
path, rev = _orig_path(request)
fp, revision = request.repos.openfile(path, rev)
# Since the templates could be changed by the user, we can't provide
# a strong validator for this page, so we mark the etag as weak.
if check_freshness(request, None, revision, weak=1):
fp.close()
return
data = nav_header_data(request, revision, path)
data.update({
'mime_type' : request.mime_type,
'log' : None,
'date' : None,
'ago' : None,
'author' : None,
'branches' : None,
'tags' : None,
'branch_points' : None,
'changed' : None,
'size' : None,
'state' : None,
'vendor_branch' : None,
'prev' : None,
})
if cfg.options.show_log_in_markup:
options = {'svn_latest_log': 1}
revs = request.repos.itemlog(path, revision, options)
entry = revs[-1]
data.update({
'date' : make_time_string(entry.date, cfg),
'author' : entry.author,
'changed' : entry.changed,
'log' : htmlify(entry.log),
'size' : entry.size,
})
if entry.date is not None:
data['ago'] = html_time(request, entry.date, 1)
if request.roottype == 'cvs':
branch = entry.branch_number
prev = entry.prev or entry.parent
data.update({
'state' : entry.dead and 'dead',
'prev' : prev and prev.string,
'vendor_branch' : ezt.boolean(branch and branch[2] % 2 == 1),
'branches' : string.join(map(lambda x: x.name, entry.branches), ', '),
'tags' : string.join(map(lambda x: x.name, entry.tags), ', '),
'branch_points': string.join(map(lambda x: x.name,
entry.branch_points), ', ')
})
markup_fp = None
if is_viewable_image(request.mime_type):
fp.close()
url = request.get_url(view_func=view_checkout, params={'revision': rev},
escape=1)
markup_fp = '<img src="%s" alt="" /><br />' % url
else:
basename, ext = os.path.splitext(request.path_parts[-1])
streamer = markup_streamers.get(ext)
if streamer:
markup_fp = streamer(fp, cfg)
elif cfg.options.use_enscript:
markup_fp = MarkupEnscript(cfg, fp, request.path_parts[-1])
elif cfg.options.use_highlight:
markup_fp = MarkupHighlight(cfg, fp, request.path_parts[-1])
# If no one has a suitable markup handler, we'll use the default.
if not markup_fp:
markup_fp = MarkupPipeWrapper(fp)
data['markup'] = markup_fp
request.server.header()
generate_page(request, "markup", data)
def revcmp(rev1, rev2):
rev1 = map(int, string.split(rev1, '.'))
rev2 = map(int, string.split(rev2, '.'))
return cmp(rev1, rev2)
def prepare_hidden_values(params):
"""returns variables from params encoded as a invisible HTML snippet.
"""
hidden_values = []
for name, value in params.items():
hidden_values.append('<input type="hidden" name="%s" value="%s" />' %
(name, value))
return string.join(hidden_values, '')
def sort_file_data(file_data, roottype, sortdir, sortby, group_dirs):
# convert sortdir into a sign bit
s = sortdir == "down" and -1 or 1
# in cvs, revision numbers can't be compared meaningfully between
# files, so try to do the right thing and compare dates instead
if roottype == "cvs" and sortby == "rev":
sortby = "date"
def file_sort_cmp(file1, file2, sortby=sortby, group_dirs=group_dirs, s=s):
# if we're grouping directories together, sorting is pretty
# simple. a directory sorts "higher" than a non-directory, and
# two directories are sorted as normal.
if group_dirs:
if file1.kind == vclib.DIR:
if file2.kind == vclib.DIR:
# two directories, no special handling.
pass
else:
# file1 is a directory, it sorts first.
return -1
elif file2.kind == vclib.DIR:
# file2 is a directory, it sorts first.
return 1
# we should have data on these. if not, then it is because we requested
# a specific tag and that tag is not present on the file.
if file1.rev is not None and file2.rev is not None:
# sort according to sortby
if sortby == 'rev':
return s * revcmp(file1.rev, file2.rev)
elif sortby == 'date':
return s * cmp(file2.date, file1.date) # latest date is first
elif sortby == 'log':
return s * cmp(file1.log, file2.log)
elif sortby == 'author':
return s * cmp(file1.author, file2.author)
elif file1.rev is not None:
return -1
elif file2.rev is not None:
return 1
# sort by file name
return s * cmp(file1.name, file2.name)
file_data.sort(file_sort_cmp)
def icmp(x, y):
"""case insensitive comparison"""
return cmp(string.lower(x), string.lower(y))
def view_roots(request):
data = common_template_data(request)
request.server.header()
generate_page(request, "roots", data)
def view_directory(request):
# For Subversion repositories, the revision acts as a weak validator for
# the directory listing (to take into account template changes or
# revision property changes).
if request.roottype == 'svn':
rev = request.repos._getrev(request.pathrev)
tree_rev = vclib.svn.created_rev(request.repos, request.where, rev)
if check_freshness(request, None, str(tree_rev), weak=1):
return
# List current directory
cfg = request.cfg
options = {}
if request.roottype == 'cvs':
hideattic = int(request.query_dict.get('hideattic',
cfg.options.hide_attic))
options["cvs_subdirs"] = (cfg.options.show_subdir_lastmod and
cfg.options.show_logs)
file_data = request.repos.listdir(request.path_parts, request.pathrev,
options)
# Filter file list if a regex is specified
search_re = request.query_dict.get('search', '')
if cfg.options.use_re_search and search_re:
file_data = search_files(request.repos, request.path_parts, request.pathrev,
file_data, search_re)
# Retrieve log messages, authors, revision numbers, timestamps
request.repos.dirlogs(request.path_parts, request.pathrev, file_data, options)
# sort with directories first, and using the "sortby" criteria
sortby = request.query_dict.get('sortby', cfg.options.sort_by) or 'file'
sortdir = request.query_dict.get('sortdir', 'up')
sort_file_data(file_data, request.roottype, sortdir, sortby,
cfg.options.sort_group_dirs)
# loop through entries creating rows and changing these values
rows = [ ]
num_displayed = 0
num_dead = 0
# set some values to be used inside loop
where = request.where
where_prefix = where and where + '/'
for file in file_data:
row = _item(graph_href=None, author=None, log=None, log_file=None,
log_rev=None, state=None, size=None, mime_type=None,
date=None, ago=None, view_href=None, log_href=None,
revision_href=None, annotate_href=None, download_href=None,
download_text_href=None, prefer_markup=ezt.boolean(0))
row.rev = file.rev
row.author = file.author
row.state = (request.roottype == 'cvs' and file.dead) and 'dead' or ''
if file.date is not None:
row.date = make_time_string(file.date, cfg)
row.ago = html_time(request, file.date)
if cfg.options.show_logs and file.log is not None:
row.log = format_log(file.log, cfg)
row.anchor = request.server.escape(file.name)
row.name = request.server.escape(file.name)
row.pathtype = (file.kind == vclib.FILE and 'file') or \
(file.kind == vclib.DIR and 'dir')
row.errors = file.errors
if file.kind == vclib.DIR:
if (where == '') and (cfg.is_forbidden(file.name)):
continue
if (request.roottype == 'cvs' and cfg.options.hide_cvsroot
and where == '' and file.name == 'CVSROOT'):
continue
row.view_href = request.get_url(view_func=view_directory,
where=where_prefix+file.name,
pathtype=vclib.DIR,
params={},
escape=1)
if request.roottype == 'svn':
row.revision_href = request.get_url(view_func=view_revision,
params={'revision': file.rev},
escape=1)
if request.roottype == 'cvs' and file.rev is not None:
row.rev = None
if cfg.options.show_logs:
row.log_file = file.newest_file
row.log_rev = file.rev
if request.roottype == 'svn':
row.log_href = request.get_url(view_func=view_log,
where=where_prefix + file.name,
pathtype=vclib.DIR,
params={},
escape=1)
elif file.kind == vclib.FILE:
if request.roottype == 'cvs' and file.dead:
num_dead = num_dead + 1
if hideattic:
continue
num_displayed = num_displayed + 1
file_where = where_prefix + file.name
if request.roottype == 'svn':
row.size = file.size
### for Subversion, we should first try to get this from the properties
row.mime_type = guess_mime(file.name)
row.view_href, row.download_href, row.download_text_href, \
row.annotate_href, row.revision_href, \
row.prefer_markup \
= get_file_view_info(request, file_where, file.rev, row.mime_type)
row.log_href = request.get_url(view_func=view_log,
where=file_where,
pathtype=vclib.FILE,
params={},
escape=1)
if cfg.options.use_cvsgraph and request.roottype == 'cvs':
row.graph_href = request.get_url(view_func=view_cvsgraph,
where=file_where,
pathtype=vclib.FILE,
params={},
escape=1)
rows.append(row)
# prepare the data that will be passed to the template
data = common_template_data(request)
data.update({
'entries' : rows,
'sortby' : sortby,
'sortdir' : sortdir,
'tarball_href' : None,
'search_re' : search_re and htmlify(search_re) or None,
'dir_pagestart' : None,
'sortby_file_href' : request.get_url(params={'sortby': 'file',
'sortdir': None},
escape=1),
'sortby_rev_href' : request.get_url(params={'sortby': 'rev',
'sortdir': None},
escape=1),
'sortby_date_href' : request.get_url(params={'sortby': 'date',
'sortdir': None},
escape=1),
'sortby_author_href' : request.get_url(params={'sortby': 'author',
'sortdir': None},
escape=1),
'sortby_log_href' : request.get_url(params={'sortby': 'log',
'sortdir': None},
escape=1),
'files_shown' : num_displayed,
'num_dead' : num_dead,
'youngest_rev' : None,
'youngest_rev_href' : None,
'selection_form' : None,
'queryform_href' : None,
'attic_showing' : None,
'show_attic_href' : None,
'hide_attic_href' : None,
'branch_tags': None,
'plain_tags': None,
})
# clicking on sort column reverses sort order
if sortdir == 'down':
revsortdir = None # 'up'
else:
revsortdir = 'down'
if sortby in ['file', 'rev', 'date', 'log', 'author']:
data['sortby_%s_href' % sortby] = request.get_url(params={'sortdir':
revsortdir},
escape=1)
# set cvs-specific fields
if request.roottype == 'cvs':
plain_tags = options['cvs_tags']
plain_tags.sort(icmp)
plain_tags.reverse()
branch_tags = options['cvs_branches']
branch_tags.sort(icmp)
branch_tags.reverse()
data.update({
'attic_showing' : ezt.boolean(not hideattic),
'show_attic_href' : request.get_url(params={'hideattic': 0}, escape=1),
'hide_attic_href' : request.get_url(params={'hideattic': 1}, escape=1),
'branch_tags': branch_tags,
'plain_tags': plain_tags,
})
# set svn-specific fields
elif request.roottype == 'svn':
data['tree_rev'] = tree_rev
data['tree_rev_href'] = request.get_url(view_func=view_revision,
params={'revision': tree_rev},
escape=1)
data['youngest_rev'] = vclib.svn.get_youngest_revision(request.repos)
data['youngest_rev_href'] = request.get_url(view_func=view_revision,
params={},
escape=1)
if is_query_supported(request):
params = {}
if request.roottype == 'cvs' and request.pathrev:
params['branch'] = request.pathrev
data['queryform_href'] = request.get_url(view_func=view_queryform,
params=params,
escape=1)
if cfg.options.use_pagesize:
data['dir_paging_action'], data['dir_paging_hidden_values'] = \
request.get_form(params={'dir_pagestart': None})
if cfg.options.allow_tar:
data['tarball_href'] = request.get_url(view_func=download_tarball,
params={},
escape=1)
pathrev_form(request, data)
### one day, if EZT has "or" capability, we can lose this
data['search_re_form'] = ezt.boolean(cfg.options.use_re_search
and (num_displayed > 0 or search_re))
if data['search_re_form']:
data['search_re_action'], data['search_re_hidden_values'] = \
request.get_form(params={'search': None})
if cfg.options.use_pagesize:
data['dir_pagestart'] = int(request.query_dict.get('dir_pagestart',0))
data['entries'] = paging(data, 'entries', data['dir_pagestart'], 'name',
cfg.options.use_pagesize)
request.server.header()
generate_page(request, "directory", data)
def paging(data, key, pagestart, local_name, pagesize):
# Implement paging
# Create the picklist
picklist = data['picklist'] = []
for i in range(0, len(data[key]), pagesize):
pick = _item(start=None, end=None, count=None)
pick.start = getattr(data[key][i], local_name)
pick.count = i
pick.page = (i / pagesize) + 1
try:
pick.end = getattr(data[key][i+pagesize-1], local_name)
except IndexError:
pick.end = getattr(data[key][-1], local_name)
picklist.append(pick)
data['picklist_len'] = len(picklist)
# Need to fix
# pagestart can be greater than the length of data[key] if you
# select a tag or search while on a page other than the first.
# Should reset to the first page, this test won't do that every
# time that it is needed.
# Problem might go away if we don't hide non-matching files when
# selecting for tags or searching.
if pagestart > len(data[key]):
pagestart = 0
pageend = pagestart + pagesize
# Slice
return data[key][pagestart:pageend]
def pathrev_form(request, data):
lastrev = None
if request.roottype == 'svn':
data['pathrev_action'], data['pathrev_hidden_values'] = \
request.get_form(view_func=redirect_pathrev,
params={'pathrev': None,
'orig_path': request.where,
'orig_pathtype': request.pathtype,
'orig_pathrev': request.pathrev,
'orig_view': _view_codes.get(request.view_func)})
if request.pathrev:
youngest = vclib.svn.get_youngest_revision(request.repos)
lastrev = vclib.svn.last_rev(request.repos, request.where,
request.pathrev, youngest)[0]
if lastrev == youngest:
lastrev = None
data['pathrev'] = request.pathrev
data['lastrev'] = lastrev
action, hidden_values = request.get_form(params={'pathrev': lastrev})
if request.roottype != 'svn':
data['pathrev_action'] = action
data['pathrev_hidden_values'] = hidden_values
data['pathrev_clear_action'] = action
data['pathrev_clear_hidden_values'] = hidden_values
return lastrev
def redirect_pathrev(request):
assert request.roottype == 'svn'
new_pathrev = request.query_dict.get('pathrev') or None
path = request.query_dict.get('orig_path', '')
pathtype = request.query_dict.get('orig_pathtype')
pathrev = request.query_dict.get('orig_pathrev')
view = _views.get(request.query_dict.get('orig_view'))
youngest = vclib.svn.get_youngest_revision(request.repos)
# go out of the way to allow revision numbers higher than youngest
try:
new_pathrev = int(new_pathrev)
except ValueError:
pass
except TypeError:
pass
else:
if new_pathrev > youngest:
new_pathrev = youngest
if _repos_pathtype(request.repos, _path_parts(path), new_pathrev):
pathrev = new_pathrev
else:
pathrev, path = vclib.svn.last_rev(request.repos, path, pathrev,
new_pathrev)
# allow clearing sticky revision by submitting empty string
if new_pathrev is None and pathrev == youngest:
pathrev = None
request.server.redirect(request.get_url(view_func=view,
where=path,
pathtype=pathtype,
params={'pathrev': pathrev}))
def logsort_date_cmp(rev1, rev2):
# sort on date; secondary on revision number
return -cmp(rev1.date, rev2.date) or -cmp(rev1.number, rev2.number)
def logsort_rev_cmp(rev1, rev2):
# sort highest revision first
return -cmp(rev1.number, rev2.number)
def view_log(request):
cfg = request.cfg
diff_format = request.query_dict.get('diff_format', cfg.options.diff_format)
logsort = request.query_dict.get('logsort', cfg.options.log_sort)
pathtype = request.pathtype
if pathtype is vclib.DIR and request.roottype == 'cvs':
raise debug.ViewVCException('Unsupported feature: log view on CVS '
'directory', '400 Bad Request')
options = {}
options['svn_show_all_dir_logs'] = 1 ### someday make this optional?
options['svn_cross_copies'] = cfg.options.cross_copies
show_revs = request.repos.itemlog(request.path_parts, request.pathrev,
options)
if logsort == 'date':
show_revs.sort(logsort_date_cmp)
elif logsort == 'rev':
show_revs.sort(logsort_rev_cmp)
else:
# no sorting
pass
# selected revision
selected_rev = request.query_dict.get('r1')
entries = [ ]
name_printed = { }
cvs = request.roottype == 'cvs'
for rev in show_revs:
entry = _item()
entry.rev = rev.string
entry.state = (cvs and rev.dead and 'dead')
entry.author = rev.author
entry.changed = rev.changed
entry.date = make_time_string(rev.date, cfg)
entry.ago = None
if rev.date is not None:
entry.ago = html_time(request, rev.date, 1)
entry.log = htmlify(rev.log or "")
entry.size = rev.size
entry.branch_point = None
entry.next_main = None
entry.orig_path = None
entry.copy_path = None
entry.view_href = None
entry.download_href = None
entry.download_text_href = None
entry.annotate_href = None
entry.revision_href = None
entry.sel_for_diff_href = None
entry.diff_to_sel_href = None
entry.diff_to_prev_href = None
entry.diff_to_branch_href = None
entry.diff_to_main_href = None
if request.roottype == 'cvs':
prev = rev.prev or rev.parent
entry.prev = prev and prev.string
branch = rev.branch_number
entry.vendor_branch = ezt.boolean(branch and branch[2] % 2 == 1)
entry.branches = prep_tags(request, rev.branches)
entry.tags = prep_tags(request, rev.tags)
entry.branch_points = prep_tags(request, rev.branch_points)
entry.tag_names = map(lambda x: x.name, rev.tags)
if branch and not name_printed.has_key(branch):
entry.branch_names = map(lambda x: x.name, rev.branches)
name_printed[branch] = 1
else:
entry.branch_names = [ ]
if rev.parent and rev.parent is not prev and not entry.vendor_branch:
entry.branch_point = rev.parent.string
# if it's the last revision on a branch then diff against the
# last revision on the higher branch (e.g. change is committed and
# brought over to -stable)
if not rev.next and rev.parent and rev.parent.next:
r = rev.parent.next
while r.next:
r = r.next
entry.next_main = r.string
elif request.roottype == 'svn':
entry.prev = rev.prev and rev.prev.string
entry.branches = entry.tags = entry.branch_points = [ ]
entry.tag_names = entry.branch_names = [ ]
entry.vendor_branch = None
if rev.filename != request.where:
entry.orig_path = rev.filename
entry.copy_path = rev.copy_path
entry.copy_rev = rev.copy_rev
if entry.orig_path:
entry.orig_href = request.get_url(view_func=view_log,
where=entry.orig_path,
pathtype=vclib.FILE,
params={'pathrev': rev.string},
escape=1)
if rev.copy_path:
entry.copy_href = request.get_url(view_func=view_log,
where=rev.copy_path,
pathtype=vclib.FILE,
params={'pathrev': rev.copy_rev},
escape=1)
# view/download links
if pathtype is vclib.FILE:
entry.view_href, entry.download_href, entry.download_text_href, \
entry.annotate_href, entry.revision_href, entry.prefer_markup \
= get_file_view_info(request, request.where, rev.string,
request.mime_type)
else:
entry.revision_href = request.get_url(view_func=view_revision,
params={'revision': rev.string},
escape=1)
entry.view_href = request.get_url(view_func=view_directory,
where=rev.filename,
pathtype=vclib.DIR,
params={'pathrev': rev.string},
escape=1)
# calculate diff links
if selected_rev != entry.rev:
entry.sel_for_diff_href = \
request.get_url(view_func=view_log, params={'r1': entry.rev}, escape=1)
if entry.prev is not None:
entry.diff_to_prev_href = \
request.get_url(view_func=view_diff,
params={'r1': entry.prev,
'r2': entry.rev,
'diff_format': None},
escape=1)
if selected_rev and \
selected_rev != str(entry.rev) and \
selected_rev != str(entry.prev) and \
selected_rev != str(entry.branch_point) and \
selected_rev != str(entry.next_main):
entry.diff_to_sel_href = \
request.get_url(view_func=view_diff,
params={'r1': selected_rev,
'r2': entry.rev,
'diff_format': None},
escape=1)
if entry.next_main:
entry.diff_to_main_href = \
request.get_url(view_func=view_diff,
params={'r1': entry.next_main,
'r2': entry.rev,
'diff_format': None},
escape=1)
if entry.branch_point:
entry.diff_to_branch_href = \
request.get_url(view_func=view_diff,
params={'r1': entry.branch_point,
'r2': entry.rev,
'diff_format': None},
escape=1)
# Save our escaping until the end so stuff above works
if entry.orig_path:
entry.orig_path = request.server.escape(entry.orig_path)
if entry.copy_path:
entry.copy_path = request.server.escape(entry.copy_path)
entries.append(entry)
data = common_template_data(request)
data.update({
'default_branch' : None,
'mime_type' : request.mime_type,
'rev_selected' : selected_rev,
'diff_format' : diff_format,
'logsort' : logsort,
'human_readable' : ezt.boolean(diff_format in ('h', 'l')),
'log_pagestart' : None,
'entries': entries,
'prefer_markup' : ezt.boolean(0),
'view_href' : None,
'download_href': None,
'download_text_href': None,
'annotate_href': None,
'tag_prefer_markup' : ezt.boolean(0),
'tag_view_href' : None,
'tag_download_href': None,
'tag_download_text_href': None,
'tag_annotate_href': None,
})
lastrev = pathrev_form(request, data)
if cfg.options.use_pagesize:
data['log_paging_action'], data['log_paging_hidden_values'] = \
request.get_form(params={'log_pagestart': None})
data['diff_select_action'], data['diff_select_hidden_values'] = \
request.get_form(view_func=view_diff,
params={'r1': None, 'r2': None, 'tr1': None,
'tr2': None, 'diff_format': None})
data['logsort_action'], data['logsort_hidden_values'] = \
request.get_form(params={'logsort': None})
if pathtype is vclib.FILE:
if not request.pathrev or lastrev is None:
view_href, download_href, download_text_href, \
annotate_href, revision_href, prefer_markup \
= get_file_view_info(request, request.where, None,
request.mime_type, None)
data.update({
'view_href': view_href,
'download_href': download_href,
'download_text_href': download_text_href,
'annotate_href': annotate_href,
'prefer_markup': prefer_markup,
})
if request.pathrev and request.roottype == 'cvs':
view_href, download_href, download_text_href, \
annotate_href, revision_href, prefer_markup \
= get_file_view_info(request, request.where, None, request.mime_type)
data.update({
'tag_view_href': view_href,
'tag_download_href': download_href,
'tag_download_text_href': download_text_href,
'tag_annotate_href': annotate_href,
'tag_prefer_markup': prefer_markup,
})
else:
if not request.pathrev:
data['view_href'] = request.get_url(view_func=view_directory,
params={}, escape=1)
taginfo = options.get('cvs_tags', {})
tagitems = taginfo.items()
tagitems.sort()
tagitems.reverse()
main = taginfo.get('MAIN')
if main:
# Default branch may have multiple names so we list them
branches = []
for branch in main.aliases:
# Don't list MAIN
if branch is not main:
branches.append(branch)
data['default_branch'] = prep_tags(request, branches)
data['tags'] = tags = [ ]
data['branch_tags'] = branch_tags = []
data['plain_tags'] = plain_tags = []
for tag, rev in tagitems:
if rev.co_rev:
tags.append(_item(rev=rev.co_rev.string, name=tag))
if rev.is_branch:
branch_tags.append(tag)
else:
plain_tags.append(tag)
if cfg.options.use_pagesize:
data['log_pagestart'] = int(request.query_dict.get('log_pagestart',0))
data['entries'] = paging(data, 'entries', data['log_pagestart'],
'revision', cfg.options.use_pagesize)
request.server.header()
generate_page(request, "log", data)
def view_checkout(request):
path, rev = _orig_path(request)
fp, revision = request.repos.openfile(path, rev)
# The revision number acts as a strong validator.
if not check_freshness(request, None, revision):
request.server.header(request.query_dict.get('content-type')
or request.mime_type or 'text/plain')
copy_stream(fp)
fp.close()
def view_annotate(request):
if not request.cfg.options.allow_annotate:
raise debug.ViewVCException('Annotation view is disabled',
'403 Forbidden')
path, rev = _orig_path(request, 'annotate')
### be nice to hook this into the template...
import blame
diff_url = request.get_url(view_func=view_diff,
params={'r1': None, 'r2': None},
escape=1, partial=1)
include_url = request.get_url(view_func=view_log, where='/WHERE/',
pathtype=vclib.FILE, params={}, escape=1)
source, revision = blame.blame(request.repos, path,
diff_url, include_url, rev)
data = nav_header_data(request, revision, path)
data['lines'] = source
request.server.header()
generate_page(request, "annotate", data)
def view_cvsgraph_image(request):
"output the image rendered by cvsgraph"
# this function is derived from cgi/cvsgraphmkimg.cgi
cfg = request.cfg
if not cfg.options.use_cvsgraph:
raise debug.ViewVCException('Graph view is disabled', '403 Forbidden')
request.server.header('image/png')
rcsfile = request.repos.rcsfile(request.path_parts)
fp = popen.popen(os.path.normpath(os.path.join(cfg.options.cvsgraph_path,
'cvsgraph')),
("-c", _install_path(cfg.options.cvsgraph_conf),
"-r", request.repos.rootpath,
rcsfile), 'rb', 0)
copy_stream(fp)
fp.close()
def view_cvsgraph(request):
"output a page containing an image rendered by cvsgraph"
cfg = request.cfg
if not cfg.options.use_cvsgraph:
raise debug.ViewVCException('Graph view is disabled', '403 Forbidden')
data = common_template_data(request)
# Required only if cvsgraph needs to find it's supporting libraries.
# Uncomment and set accordingly if required.
#os.environ['LD_LIBRARY_PATH'] = '/usr/lib:/usr/local/lib'
imagesrc = request.get_url(view_func=view_cvsgraph_image, escape=1)
view = default_view(request.mime_type, cfg)
up_where = _path_join(request.path_parts[:-1])
# Create an image map
rcsfile = request.repos.rcsfile(request.path_parts)
fp = popen.popen(os.path.join(cfg.options.cvsgraph_path, 'cvsgraph'),
("-i",
"-c", _install_path(cfg.options.cvsgraph_conf),
"-r", request.repos.rootpath,
"-x", "x",
"-3", request.get_url(view_func=view_log, params={},
escape=1),
"-4", request.get_url(view_func=view,
params={'revision': None},
escape=1, partial=1),
"-5", request.get_url(view_func=view_diff,
params={'r1': None, 'r2': None},
escape=1, partial=1),
"-6", request.get_url(view_func=view_directory,
where=up_where,
pathtype=vclib.DIR,
params={'pathrev': None},
escape=1, partial=1),
rcsfile), 'rb', 0)
data.update({
'imagemap' : fp,
'imagesrc' : imagesrc,
})
request.server.header()
generate_page(request, "graph", data)
def search_files(repos, path_parts, rev, files, search_re):
""" Search files in a directory for a regular expression.
Does a check-out of each file in the directory. Only checks for
the first match.
"""
# Pass in search regular expression. We check out
# each file and look for the regular expression. We then return the data
# for all files that match the regex.
# Compile to make sure we do this as fast as possible.
searchstr = re.compile(search_re)
# Will become list of files that have at least one match.
# new_file_list also includes directories.
new_file_list = [ ]
# Loop on every file (and directory)
for file in files:
# Is this a directory? If so, append name to new_file_list
# and move to next file.
if file.kind != vclib.FILE:
new_file_list.append(file)
continue
# Only files at this point
# Shouldn't search binary files, or should we?
# Should allow all text mime types to pass.
if not is_text(guess_mime(file.name)):
continue
# Only text files at this point
# Assign contents of checked out file to fp.
fp = repos.openfile(path_parts + [file.name], rev)[0]
# Read in each line, use re.search to search line.
# If successful, add file to new_file_list and break.
while 1:
line = fp.readline()
if not line:
break
if searchstr.search(line):
new_file_list.append(file)
# close down the pipe (and wait for the child to terminate)
fp.close()
break
return new_file_list
def view_doc(request):
"""Serve ViewVC static content locally.
Using this avoids the need for modifying the setup of the web server.
"""
document = request.where
filename = _install_path(os.path.join(request.cfg.options.template_dir,
"docroot", document))
# Stat the file to get content length and last-modified date.
try:
info = os.stat(filename)
except OSError, v:
raise debug.ViewVCException('Static file "%s" not available\n(%s)'
% (document, str(v)), '404 Not Found')
content_length = str(info[stat.ST_SIZE])
last_modified = info[stat.ST_MTIME]
# content_length + mtime makes a pretty good etag.
if check_freshness(request, last_modified,
"%s-%s" % (content_length, last_modified)):
return
try:
fp = open(filename, "rb")
except IOError, v:
raise debug.ViewVCException('Static file "%s" not available\n(%s)'
% (document, str(v)), '404 Not Found')
request.server.addheader('Content-Length', content_length)
if document[-3:] == 'png':
request.server.header('image/png')
elif document[-3:] == 'jpg':
request.server.header('image/jpeg')
elif document[-3:] == 'gif':
request.server.header('image/gif')
elif document[-3:] == 'css':
request.server.header('text/css')
else: # assume HTML:
request.server.header()
copy_stream(fp)
fp.close()
def rcsdiff_date_reformat(date_str, cfg):
if date_str is None:
return None
try:
date = compat.cvs_strptime(date_str)
except ValueError:
return date_str
return make_time_string(compat.timegm(date), cfg)
_re_extract_rev = re.compile(r'^[-+*]{3} [^\t]+\t([^\t]+)\t((\d+\.)*\d+)$')
_re_extract_info = re.compile(r'@@ \-([0-9]+).*\+([0-9]+).*@@(.*)')
def spaced_html_text(text, cfg):
text = string.expandtabs(string.rstrip(text))
hr_breakable = cfg.options.hr_breakable
# in the code below, "\x01" will be our stand-in for "&". We don't want
# to insert "&" because it would get escaped by htmlify(). Similarly,
# we use "\x02" as a stand-in for "<br>"
if hr_breakable > 1 and len(text) > hr_breakable:
text = re.sub('(' + ('.' * hr_breakable) + ')', '\\1\x02', text)
if hr_breakable:
# make every other space "breakable"
text = string.replace(text, ' ', ' \x01nbsp;')
else:
text = string.replace(text, ' ', '\x01nbsp;')
text = htmlify(text)
text = string.replace(text, '\x01', '&')
text = string.replace(text, '\x02', '<span style="color:red">\</span><br />')
return text
class DiffSource:
def __init__(self, fp, cfg):
self.fp = fp
self.cfg = cfg
self.save_line = None
self.line_number = None
# keep track of where we are during an iteration
self.idx = -1
self.last = None
# these will be set once we start reading
self.state = 'no-changes'
self.left_col = [ ]
self.right_col = [ ]
def __getitem__(self, idx):
if idx == self.idx:
return self.last
if idx != self.idx + 1:
raise DiffSequencingError()
# keep calling _get_row until it gives us something. sometimes, it
# doesn't return a row immediately because it is accumulating changes.
# when it is out of data, _get_row will raise IndexError.
while 1:
item = self._get_row()
if item:
self.idx = idx
self.last = item
return item
def _get_row(self):
if self.state[:5] == 'flush':
item = self._flush_row()
if item:
return item
self.state = 'dump'
if self.save_line:
line = self.save_line
self.save_line = None
else:
line = self.fp.readline()
if not line:
if self.state == 'no-changes':
self.state = 'done'
return _item(type='no-changes')
# see if there are lines to flush
if self.left_col or self.right_col:
# move into the flushing state
self.state = 'flush-' + self.state
return None
# nothing more to return
raise IndexError
if line[:2] == '@@':
self.state = 'dump'
self.left_col = [ ]
self.right_col = [ ]
match = _re_extract_info.match(line)
self.line_number = int(match.group(2)) - 1
return _item(type='header',
line_info_left=match.group(1),
line_info_right=match.group(2),
line_info_extra=match.group(3))
if line[0] == '\\':
# \ No newline at end of file
# move into the flushing state. note: it doesn't matter if we really
# have data to flush or not; that will be figured out later
self.state = 'flush-' + self.state
return None
diff_code = line[0]
output = spaced_html_text(line[1:], self.cfg)
if diff_code == '+':
if self.state == 'dump':
self.line_number = self.line_number + 1
return _item(type='add', right=output, line_number=self.line_number)
self.state = 'pre-change-add'
self.right_col.append(output)
return None
if diff_code == '-':
self.state = 'pre-change-remove'
self.left_col.append(output)
return None # early exit to avoid line in
if self.left_col or self.right_col:
# save the line for processing again later, and move into the
# flushing state
self.save_line = line
self.state = 'flush-' + self.state
return None
self.line_number = self.line_number + 1
return _item(type='context', left=output, right=output,
line_number=self.line_number)
def _flush_row(self):
if not self.left_col and not self.right_col:
# nothing more to flush
return None
if self.state == 'flush-pre-change-remove':
return _item(type='remove', left=self.left_col.pop(0))
# state == flush-pre-change-add
item = _item(type='change',
have_left=ezt.boolean(0),
have_right=ezt.boolean(0))
if self.left_col:
item.have_left = ezt.boolean(1)
item.left = self.left_col.pop(0)
if self.right_col:
self.line_number = self.line_number + 1
item.have_right = ezt.boolean(1)
item.right = self.right_col.pop(0)
item.line_number = self.line_number
return item
class DiffSequencingError(Exception):
pass
def diff_parse_headers(fp, diff_type, rev1, rev2, sym1=None, sym2=None):
date1 = date2 = log_rev1 = log_rev2 = flag = None
header_lines = []
if diff_type == vclib.UNIFIED:
f1 = '--- '
f2 = '+++ '
elif diff_type == vclib.CONTEXT:
f1 = '*** '
f2 = '--- '
else:
f1 = f2 = None
# If we're parsing headers, then parse and tweak the diff headers,
# collecting them in an array until we've read and handled them all.
if f1 and f2:
parsing = 1
len_f1 = len(f1)
len_f2 = len(f2)
while parsing:
line = fp.readline()
if not line:
break
if line[:len(f1)] == f1:
match = _re_extract_rev.match(line)
if match:
date1 = match.group(1)
log_rev1 = match.group(2)
if sym1:
line = line[:-1] + ' %s\n' % sym1
elif line[:len(f2)] == f2:
match = _re_extract_rev.match(line)
if match:
date2 = match.group(1)
log_rev2 = match.group(2)
if sym2:
line = line[:-1] + ' %s\n' % sym2
parsing = 0
elif line[:3] == 'Bin':
flag = _RCSDIFF_IS_BINARY
parsing = 0
elif (string.find(line, 'not found') != -1 or
string.find(line, 'illegal option') != -1):
flag = _RCSDIFF_ERROR
parsing = 0
header_lines.append(line)
if (log_rev1 and log_rev1 != rev1):
raise debug.ViewVCException('rcsdiff found revision %s, but expected '
'revision %s' % (log_rev1, rev1),
'500 Internal Server Error')
if (log_rev2 and log_rev2 != rev2):
raise debug.ViewVCException('rcsdiff found revision %s, but expected '
'revision %s' % (log_rev2, rev2),
'500 Internal Server Error')
return date1, date2, flag, string.join(header_lines, '')
def _get_diff_path_parts(request, query_key, rev, base_rev):
if request.query_dict.has_key(query_key):
parts = _path_parts(request.query_dict[query_key])
elif request.roottype == 'svn':
try:
repos = request.repos
parts = _path_parts(vclib.svn.get_location(repos, request.where,
repos._getrev(base_rev),
repos._getrev(rev)))
except vclib.InvalidRevision:
raise debug.ViewVCException('Invalid path(s) or revision(s) passed '
'to diff', '400 Bad Request')
except vclib.ItemNotFound:
raise debug.ViewVCException('Invalid path(s) or revision(s) passed '
'to diff', '400 Bad Request')
else:
parts = request.path_parts
return parts
def setup_diff(request):
query_dict = request.query_dict
rev1 = r1 = query_dict['r1']
rev2 = r2 = query_dict['r2']
sym1 = sym2 = None
# hack on the diff revisions
if r1 == 'text':
rev1 = query_dict.get('tr1', None)
if not rev1:
raise debug.ViewVCException('Missing revision from the diff '
'form text field', '400 Bad Request')
else:
idx = string.find(r1, ':')
if idx == -1:
rev1 = r1
else:
rev1 = r1[:idx]
sym1 = r1[idx+1:]
if r2 == 'text':
rev2 = query_dict.get('tr2', None)
if not rev2:
raise debug.ViewVCException('Missing revision from the diff '
'form text field', '400 Bad Request')
sym2 = ''
else:
idx = string.find(r2, ':')
if idx == -1:
rev2 = r2
else:
rev2 = r2[:idx]
sym2 = r2[idx+1:]
if request.roottype == 'svn':
rev1 = str(request.repos._getrev(rev1))
rev2 = str(request.repos._getrev(rev2))
p1 = _get_diff_path_parts(request, 'p1', rev1, request.pathrev)
p2 = _get_diff_path_parts(request, 'p2', rev2, request.pathrev)
try:
if revcmp(rev1, rev2) > 0:
rev1, rev2 = rev2, rev1
sym1, sym2 = sym2, sym1
p1, p2 = p2, p1
except ValueError:
raise debug.ViewVCException('Invalid revision(s) passed to diff',
'400 Bad Request')
return p1, p2, rev1, rev2, sym1, sym2
def view_patch(request):
cfg = request.cfg
query_dict = request.query_dict
p1, p2, rev1, rev2, sym1, sym2 = setup_diff(request)
# In the absence of a format dictation in the CGI params, we'll let
# use the configured diff format, allowing 'c' to mean 'c' and
# anything else to mean 'u'.
format = query_dict.get('diff_format',
cfg.options.diff_format == 'c' and 'c' or 'u')
if format == 'c':
diff_type = vclib.CONTEXT
elif format == 'u':
diff_type = vclib.UNIFIED
else:
raise debug.ViewVCException('Diff format %s not understood'
% format, '400 Bad Request')
try:
fp = request.repos.rawdiff(p1, rev1, p2, rev2, diff_type)
except vclib.InvalidRevision:
raise debug.ViewVCException('Invalid path(s) or revision(s) passed '
'to diff', '400 Bad Request')
date1, date2, flag, headers = diff_parse_headers(fp, diff_type, rev1, rev2,
sym1, sym2)
request.server.header('text/plain')
sys.stdout.write(headers)
copy_stream(fp)
fp.close()
def view_diff(request):
cfg = request.cfg
query_dict = request.query_dict
p1, p2, rev1, rev2, sym1, sym2 = setup_diff(request)
# since templates are in use and subversion allows changes to the dates,
# we can't provide a strong etag
if check_freshness(request, None, '%s-%s' % (rev1, rev2), weak=1):
return
diff_type = None
diff_options = {}
human_readable = 0
format = query_dict.get('diff_format', cfg.options.diff_format)
if format == 'c':
diff_type = vclib.CONTEXT
elif format == 's':
diff_type = vclib.SIDE_BY_SIDE
elif format == 'l':
diff_type = vclib.UNIFIED
diff_options['context'] = 15
human_readable = 1
elif format == 'h':
diff_type = vclib.UNIFIED
human_readable = 1
elif format == 'u':
diff_type = vclib.UNIFIED
else:
raise debug.ViewVCException('Diff format %s not understood'
% format, '400 Bad Request')
if human_readable:
diff_options['funout'] = cfg.options.hr_funout
diff_options['ignore_white'] = cfg.options.hr_ignore_white
diff_options['ignore_keyword_subst'] = cfg.options.hr_ignore_keyword_subst
try:
fp = sidebyside = unified = None
if (cfg.options.hr_intraline and idiff
and ((human_readable and idiff.sidebyside)
or (not human_readable and diff_type == vclib.UNIFIED))):
f1 = request.repos.openfile(p1, rev1)[0]
try:
lines_left = f1.readlines()
finally:
f1.close()
f2 = request.repos.openfile(p2, rev2)[0]
try:
lines_right = f2.readlines()
finally:
f2.close()
if human_readable:
sidebyside = idiff.sidebyside(lines_left, lines_right,
diff_options.get("context", 5))
else:
unified = idiff.unified(lines_left, lines_right,
diff_options.get("context", 2))
else:
fp = request.repos.rawdiff(p1, rev1, p2, rev2, diff_type, diff_options)
except vclib.InvalidRevision:
raise debug.ViewVCException('Invalid path(s) or revision(s) passed '
'to diff', '400 Bad Request')
data = common_template_data(request)
data.update({
'path_left': _path_join(p1),
'path_right': _path_join(p2),
'rev_left' : rev1,
'rev_right' : rev2,
'tag_left' : sym1,
'tag_right' : sym2,
'diff_format' : request.query_dict.get('diff_format',
cfg.options.diff_format),
'annotate_href' : None,
})
orig_params = request.query_dict.copy()
orig_params['diff_format'] = None
data['diff_format_action'], data['diff_format_hidden_values'] = \
request.get_form(params=orig_params)
data['patch_href'] = request.get_url(view_func=view_patch,
params=orig_params,
escape=1)
if request.cfg.options.allow_annotate:
data['annotate_href'] = request.get_url(view_func=view_annotate,
where=_path_join(p2),
pathtype=vclib.FILE,
params={'annotate': rev2},
escape=1)
if fp:
date1, date2, flag, headers = diff_parse_headers(fp, diff_type, rev1, rev2,
sym1, sym2)
else:
date1 = date2 = flag = headers = None
raw_diff_fp = changes = None
if fp:
if human_readable:
if flag is not None:
changes = [ _item(type=flag) ]
else:
changes = DiffSource(fp, cfg)
else:
raw_diff_fp = MarkupPipeWrapper(fp, htmlify(headers), None, 1)
data.update({
'date_left' : rcsdiff_date_reformat(date1, cfg),
'date_right' : rcsdiff_date_reformat(date2, cfg),
'raw_diff' : raw_diff_fp,
'changes' : changes,
'sidebyside': sidebyside,
'unified': unified,
})
request.server.header()
generate_page(request, "diff", data)
def generate_tarball_header(out, name, size=0, mode=None, mtime=0,
uid=0, gid=0, typefrag=None, linkname='',
uname='viewvc', gname='viewvc',
devmajor=1, devminor=0, prefix=None,
magic='ustar', version='', chksum=None):
if not mode:
if name[-1:] == '/':
mode = 0755
else:
mode = 0644
if not typefrag:
if name[-1:] == '/':
typefrag = '5' # directory
else:
typefrag = '0' # regular file
if not prefix:
prefix = ''
block1 = struct.pack('100s 8s 8s 8s 12s 12s',
name,
'%07o' % mode,
'%07o' % uid,
'%07o' % gid,
'%011o' % size,
'%011o' % mtime)
block2 = struct.pack('c 100s 6s 2s 32s 32s 8s 8s 155s',
typefrag,
linkname,
magic,
version,
uname,
gname,
'%07o' % devmajor,
'%07o' % devminor,
prefix)
if not chksum:
dummy_chksum = ' '
block = block1 + dummy_chksum + block2
chksum = 0
for i in range(len(block)):
chksum = chksum + ord(block[i])
block = block1 + struct.pack('8s', '%07o' % chksum) + block2
block = block + '\0' * (512 - len(block))
out.write(block)
def generate_tarball(out, request, reldir, stack):
# get directory info from repository
rep_path = request.path_parts + reldir
entries = request.repos.listdir(rep_path, request.pathrev, {})
request.repos.dirlogs(rep_path, request.pathrev, entries, {})
entries.sort(lambda a, b: cmp(a.name, b.name))
# figure out corresponding path in tar file. everything gets put underneath
# a single top level directory named after the repository directory being
# tarred
if request.path_parts:
tar_dir = request.path_parts[-1] + '/'
else:
tar_dir = request.rootname + '/'
if reldir:
tar_dir = tar_dir + _path_join(reldir) + '/'
# Subdirectory datestamps will be the youngest of the datestamps of
# version items (files for CVS, files or dirs for Subversion) in
# that subdirectory.
latest_date = 0
cvs = request.roottype == 'cvs'
for file in entries:
# Skip dead or busted CVS files, and CVS subdirs.
if (cvs and (file.kind != vclib.FILE or (file.rev is None or file.dead))):
continue
if file.date > latest_date:
latest_date = file.date
# push directory onto stack. it will only be included in the tarball if
# files are found underneath it
stack.append(tar_dir)
for file in entries:
if (file.kind != vclib.FILE or
(cvs and (file.rev is None or file.dead))):
continue
for dir in stack:
generate_tarball_header(out, dir, mtime=latest_date)
del stack[:]
if cvs:
info = os.stat(file.path)
mode = (info[stat.ST_MODE] & 0555) | 0200
else:
mode = 0644
### read the whole file into memory? bad... better to do 2 passes
fp = request.repos.openfile(rep_path + [file.name], request.pathrev)[0]
contents = fp.read()
fp.close()
generate_tarball_header(out, tar_dir + file.name,
len(contents), mode, file.date)
out.write(contents)
out.write('\0' * (511 - ((len(contents) + 511) % 512)))
# recurse into subdirectories
for file in entries:
if file.errors or file.kind != vclib.DIR:
continue
# skip forbidden/hidden directories (top-level only)
if not rep_path:
if (request.cfg.is_forbidden(file.name)
or (cvs and request.cfg.options.hide_cvsroot
and file.name == 'CVSROOT')):
continue
generate_tarball(out, request, reldir + [file.name], stack)
# pop directory (if it's being pruned. otherwise stack is already empty)
del stack[-1:]
def download_tarball(request):
if not request.cfg.options.allow_tar:
raise debug.ViewVCException('Tarball generation is disabled',
'403 Forbidden')
### look for GZIP binary
request.server.header('application/octet-stream')
sys.stdout.flush()
fp = popen.pipe_cmds([('gzip', '-c', '-n')])
generate_tarball(fp, request, [], [])
fp.write('\0' * 1024)
fp.close()
def view_revision(request):
if request.roottype == "cvs":
raise ViewVCException("Revision view not supported for CVS repositories "
"at this time.", "400 Bad Request")
data = common_template_data(request)
query_dict = request.query_dict
rev = request.repos._getrev(query_dict.get('revision'))
date, author, msg, changes = vclib.svn.get_revision_info(request.repos, rev)
date_str = make_time_string(date, request.cfg)
# The revision number acts as a weak validator.
if check_freshness(request, None, str(rev), weak=1):
return
# Handle limit_changes parameter
cfg_limit_changes = request.cfg.options.limit_changes
limit_changes = int(query_dict.get('limit_changes', cfg_limit_changes))
more_changes = None
more_changes_href = None
first_changes = None
first_changes_href = None
if limit_changes and len(changes) > limit_changes:
more_changes = len(changes) - limit_changes
params = query_dict.copy()
params['limit_changes'] = 0
more_changes_href = request.get_url(params=params, escape=1)
changes = changes[:limit_changes]
elif cfg_limit_changes and len(changes) > cfg_limit_changes:
first_changes = cfg_limit_changes
params = query_dict.copy()
params['limit_changes'] = None
first_changes_href = request.get_url(params=params, escape=1)
# add the hrefs, types, and prev info
for change in changes:
change.view_href = change.diff_href = change.type = change.log_href = None
pathtype = (change.pathtype == vclib.FILE and 'file') \
or (change.pathtype == vclib.DIR and 'dir') \
or None
if (change.action == 'added' or change.action == 'replaced') \
and not change.is_copy:
change.text_mods = 0
change.prop_mods = 0
view_func = None
if change.pathtype is vclib.FILE:
view_func = view_markup
if change.text_mods:
params = {'pathrev' : str(rev),
'r1' : str(rev),
'r2' : str(change.base_rev),
}
change.diff_href = request.get_url(view_func=view_diff,
where=change.filename,
pathtype=change.pathtype,
params=params,
escape=1)
elif change.pathtype is vclib.DIR:
view_func=view_directory
if change.pathtype:
if change.action == 'deleted':
link_rev = str(change.base_rev)
link_where = change.base_path
else:
link_rev = str(rev)
link_where = change.filename
change.view_href = request.get_url(view_func=view_func,
where=link_where,
pathtype=change.pathtype,
params={'pathrev' : link_rev},
escape=1)
change.log_href = request.get_url(view_func=view_log,
where=link_where,
pathtype=change.pathtype,
params={'pathrev' : link_rev},
escape=1)
change.text_mods = ezt.boolean(change.text_mods)
change.prop_mods = ezt.boolean(change.prop_mods)
change.is_copy = ezt.boolean(change.is_copy)
change.pathtype = pathtype
# use same variable names as the log template
change.path = change.filename
change.copy_path = change.base_path
change.copy_rev = change.base_rev
del change.filename
del change.base_path
del change.base_rev
prev_rev_href = next_rev_href = None
if rev > 0:
prev_rev_href = request.get_url(view_func=view_revision,
where=None,
pathtype=None,
params={'revision': str(rev - 1)},
escape=1)
if rev < request.repos.youngest:
next_rev_href = request.get_url(view_func=view_revision,
where=None,
pathtype=None,
params={'revision': str(rev + 1)},
escape=1)
data.update({
'rev' : str(rev),
'author' : author,
'date' : date_str,
'log' : msg and htmlify(msg) or None,
'ago' : None,
'changes' : changes,
'prev_href' : prev_rev_href,
'next_href' : next_rev_href,
'limit_changes': limit_changes,
'more_changes': more_changes,
'more_changes_href': more_changes_href,
'first_changes': first_changes,
'first_changes_href': first_changes_href,
})
if date is not None:
data['ago'] = html_time(request, date, 1)
data['jump_rev_action'], data['jump_rev_hidden_values'] = \
request.get_form(params={'revision': None})
request.server.header()
generate_page(request, "revision", data)
def is_query_supported(request):
"""Returns true if querying is supported for the given path."""
return request.cfg.cvsdb.enabled \
and request.pathtype == vclib.DIR \
and request.roottype in ['cvs', 'svn']
def view_queryform(request):
if not is_query_supported(request):
raise debug.ViewVCException('Can not query project root "%s" at "%s".'
% (request.rootname, request.where),
'403 Forbidden')
data = common_template_data(request)
data['query_action'], data['query_hidden_values'] = \
request.get_form(view_func=view_query, params={'limit_changes': None})
# default values ...
data['branch'] = request.query_dict.get('branch', '')
data['branch_match'] = request.query_dict.get('branch_match', 'exact')
data['dir'] = request.query_dict.get('dir', '')
data['file'] = request.query_dict.get('file', '')
data['file_match'] = request.query_dict.get('file_match', 'exact')
data['who'] = request.query_dict.get('who', '')
data['who_match'] = request.query_dict.get('who_match', 'exact')
data['querysort'] = request.query_dict.get('querysort', 'date')
data['date'] = request.query_dict.get('date', 'hours')
data['hours'] = request.query_dict.get('hours', '2')
data['mindate'] = request.query_dict.get('mindate', '')
data['maxdate'] = request.query_dict.get('maxdate', '')
data['limit_changes'] = int(request.query_dict.get('limit_changes',
request.cfg.options.limit_changes))
data['dir_href'] = request.get_url(view_func=view_directory, params={},
escape=1)
request.server.header()
generate_page(request, "query_form", data)
def parse_date(s):
'''Parse a date string from the query form.'''
match = re.match(r'^(\d\d\d\d)-(\d\d)-(\d\d)(?:\ +(\d\d):(\d\d)(?::(\d\d))?)?$', s)
if match:
year = int(match.group(1))
month = int(match.group(2))
day = int(match.group(3))
hour = match.group(4)
if hour is not None:
hour = int(hour)
else:
hour = 0
minute = match.group(5)
if minute is not None:
minute = int(minute)
else:
minute = 0
second = match.group(6)
if second is not None:
second = int(second)
else:
second = 0
# return a "seconds since epoch" value assuming date given in UTC
tm = (year, month, day, hour, minute, second, 0, 0, 0)
return compat.timegm(tm)
else:
return None
def english_query(request):
'''Generate a sentance describing the query.'''
ret = [ 'Checkins ' ]
dir = request.query_dict.get('dir', '')
if dir:
ret.append('to ')
if ',' in dir:
ret.append('subdirectories')
else:
ret.append('subdirectory')
ret.append(' <em>%s</em> ' % htmlify(dir))
file = request.query_dict.get('file', '')
if file:
if len(ret) != 1: ret.append('and ')
ret.append('to file <em>%s</em> ' % htmlify(file))
who = request.query_dict.get('who', '')
branch = request.query_dict.get('branch', '')
if branch:
ret.append('on branch <em>%s</em> ' % htmlify(branch))
else:
ret.append('on all branches ')
if who:
ret.append('by <em>%s</em> ' % htmlify(who))
date = request.query_dict.get('date', 'hours')
if date == 'hours':
ret.append('in the last %s hours' % htmlify(request.query_dict.get('hours', '2')))
elif date == 'day':
ret.append('in the last day')
elif date == 'week':
ret.append('in the last week')
elif date == 'month':
ret.append('in the last month')
elif date == 'all':
ret.append('since the beginning of time')
elif date == 'explicit':
mindate = request.query_dict.get('mindate', '')
maxdate = request.query_dict.get('maxdate', '')
if mindate and maxdate:
w1, w2 = 'between', 'and'
else:
w1, w2 = 'since', 'before'
if mindate:
mindate = make_time_string(parse_date(mindate), request.cfg)
ret.append('%s <em>%s</em> ' % (w1, mindate))
if maxdate:
maxdate = make_time_string(parse_date(maxdate), request.cfg)
ret.append('%s <em>%s</em> ' % (w2, maxdate))
return string.join(ret, '')
def prev_rev(rev):
'''Returns a string representing the previous revision of the argument.'''
r = string.split(rev, '.')
# decrement final revision component
r[-1] = str(int(r[-1]) - 1)
# prune if we pass the beginning of the branch
if len(r) > 2 and r[-1] == '0':
r = r[:-2]
return string.join(r, '.')
def build_commit(request, files, limited_files, dir_strip):
commit = _item(num_files=len(files), files=[])
commit.limited_files = ezt.boolean(limited_files)
desc = files[0].GetDescription()
commit.log = htmlify(desc)
commit.short_log = format_log(desc, request.cfg)
commit.author = htmlify(files[0].GetAuthor())
commit.rss_date = make_rss_time_string(files[0].GetTime(), request.cfg)
if request.roottype == 'svn':
commit.rev = files[0].GetRevision()
commit.rss_url = 'http://%s%s' % \
(request.server.getenv("HTTP_HOST"),
request.get_url(view_func=view_revision,
params={'revision': commit.rev},
escape=1))
else:
commit.rev = None
commit.rss_url = None
len_strip = len(dir_strip)
for f in files:
commit_time = f.GetTime()
if commit_time:
commit_time = make_time_string(commit_time, request.cfg)
else:
commit_time = ' '
dirname = f.GetDirectory()
filename = f.GetFile()
if dir_strip:
assert dirname[:len_strip] == dir_strip
assert len(dirname) == len_strip or dirname[len(dir_strip)] == '/'
dirname = dirname[len_strip+1:]
filename = dirname and ("%s/%s" % (dirname, filename)) or filename
params = { 'revision': f.GetRevision() }
if f.GetBranch(): params['pathrev'] = f.GetBranch()
dir_href = request.get_url(view_func=view_directory,
where=dirname, pathtype=vclib.DIR,
params=params,
escape=1)
log_href = request.get_url(view_func=view_log,
where=filename, pathtype=vclib.FILE,
params=params,
escape=1)
view_href = request.get_url(view_func=view_markup,
where=filename, pathtype=vclib.FILE,
params={'revision': f.GetRevision() },
escape=1)
download_href = request.get_url(view_func=view_checkout,
where=filename, pathtype=vclib.FILE,
params={'revision': f.GetRevision() },
escape=1)
diff_href = request.get_url(view_func=view_diff,
where=filename, pathtype=vclib.FILE,
params={'r1': prev_rev(f.GetRevision()),
'r2': f.GetRevision(),
'diff_format': None},
escape=1)
# skip files in forbidden or hidden modules
dir_parts = filter(None, string.split(dirname, '/'))
if dir_parts \
and ((dir_parts[0] == 'CVSROOT'
and request.cfg.options.hide_cvsroot) \
or request.cfg.is_forbidden(dir_parts[0])):
continue
commit.files.append(_item(date=commit_time,
dir=htmlify(dirname),
file=htmlify(f.GetFile()),
author=htmlify(f.GetAuthor()),
rev=f.GetRevision(),
branch=f.GetBranch(),
plus=int(f.GetPlusCount()),
minus=int(f.GetMinusCount()),
type=f.GetTypeString(),
dir_href=dir_href,
log_href=log_href,
view_href=view_href,
download_href=download_href,
prefer_markup=ezt.boolean
(default_view(guess_mime(filename), request.cfg)
== view_markup),
diff_href=diff_href))
return commit
def query_backout(request, commits):
request.server.header('text/plain')
if commits:
print '# This page can be saved as a shell script and executed.'
print '# It should be run at the top of your work area. It will update'
print '# your working copy to back out the changes selected by the'
print '# query.'
print
else:
print '# No changes were selected by the query.'
print '# There is nothing to back out.'
return
for commit in commits:
for fileinfo in commit.files:
if request.roottype == 'cvs':
print 'cvs update -j %s -j %s %s/%s' \
% (fileinfo.rev, prev_rev(fileinfo.rev),
fileinfo.dir, fileinfo.file)
elif request.roottype == 'svn':
print 'svn merge -r %s:%s %s/%s' \
% (fileinfo.rev, prev_rev(fileinfo.rev),
fileinfo.dir, fileinfo.file)
def view_query(request):
if not is_query_supported(request):
raise debug.ViewVCException('Can not query project root "%s" at "%s".'
% (request.rootname, request.where),
'403 Forbidden')
# get form data
branch = request.query_dict.get('branch', '')
branch_match = request.query_dict.get('branch_match', 'exact')
dir = request.query_dict.get('dir', '')
file = request.query_dict.get('file', '')
file_match = request.query_dict.get('file_match', 'exact')
who = request.query_dict.get('who', '')
who_match = request.query_dict.get('who_match', 'exact')
querysort = request.query_dict.get('querysort', 'date')
date = request.query_dict.get('date', 'hours')
hours = request.query_dict.get('hours', '2')
mindate = request.query_dict.get('mindate', '')
maxdate = request.query_dict.get('maxdate', '')
format = request.query_dict.get('format')
limit = int(request.query_dict.get('limit', 0))
limit_changes = int(request.query_dict.get('limit_changes',
request.cfg.options.limit_changes))
match_types = { 'exact':1, 'like':1, 'glob':1, 'regex':1, 'notregex':1 }
sort_types = { 'date':1, 'author':1, 'file':1 }
date_types = { 'hours':1, 'day':1, 'week':1, 'month':1,
'all':1, 'explicit':1 }
# parse various fields, validating or converting them
if not match_types.has_key(branch_match): branch_match = 'exact'
if not match_types.has_key(file_match): file_match = 'exact'
if not match_types.has_key(who_match): who_match = 'exact'
if not sort_types.has_key(querysort): querysort = 'date'
if not date_types.has_key(date): date = 'hours'
mindate = parse_date(mindate)
maxdate = parse_date(maxdate)
global cvsdb
import cvsdb
db = cvsdb.ConnectDatabaseReadOnly(request.cfg)
repos_root, repos_dir = cvsdb.FindRepository(db, request.rootpath)
if not repos_root:
raise debug.ViewVCException(
"The root '%s' was not found in the commit database "
% request.rootname)
# create the database query from the form data
query = cvsdb.CreateCheckinQuery()
query.SetRepository(repos_root)
# treat "HEAD" specially ...
if branch_match == 'exact' and branch == 'HEAD':
query.SetBranch('')
elif branch:
query.SetBranch(branch, branch_match)
if dir:
for subdir in string.split(dir, ','):
path = (_path_join(repos_dir + request.path_parts
+ [ string.strip(subdir) ]))
query.SetDirectory(path, 'exact')
query.SetDirectory('%s/%%' % cvsdb.EscapeLike(path), 'like')
else:
where = _path_join(repos_dir + request.path_parts)
if where: # if we are in a subdirectory ...
query.SetDirectory(where, 'exact')
query.SetDirectory('%s/%%' % cvsdb.EscapeLike(where), 'like')
if file:
query.SetFile(file, file_match)
if who:
query.SetAuthor(who, who_match)
query.SetSortMethod(querysort)
if date == 'hours':
query.SetFromDateHoursAgo(int(hours))
elif date == 'day':
query.SetFromDateDaysAgo(1)
elif date == 'week':
query.SetFromDateDaysAgo(7)
elif date == 'month':
query.SetFromDateDaysAgo(31)
elif date == 'all':
pass
elif date == 'explicit':
if mindate is not None:
query.SetFromDateObject(mindate)
if maxdate is not None:
query.SetToDateObject(maxdate)
if limit:
query.SetLimit(limit)
elif format == 'rss':
query.SetLimit(request.cfg.cvsdb.rss_row_limit)
# run the query
db.RunQuery(query)
sql = htmlify(db.CreateSQLQueryString(query))
# gather commits
commits = []
plus_count = 0
minus_count = 0
mod_time = -1
if query.commit_list:
files = []
limited_files = 0
current_desc = query.commit_list[0].GetDescriptionID()
current_rev = query.commit_list[0].GetRevision()
dir_strip = _path_join(repos_dir)
for commit in query.commit_list:
# base modification time on the newest commit ...
if commit.GetTime() > mod_time: mod_time = commit.GetTime()
# form plus/minus totals
plus_count = plus_count + int(commit.GetPlusCount())
minus_count = minus_count + int(commit.GetMinusCount())
# group commits with the same commit message ...
desc = commit.GetDescriptionID()
# For CVS, group commits with the same commit message.
# For Subversion, group them only if they have the same revision number
if request.roottype == 'cvs':
if current_desc == desc:
if not limit_changes or len(files) < limit_changes:
files.append(commit)
else:
limited_files = 1
continue
else:
if current_rev == commit.GetRevision():
if not limit_changes or len(files) < limit_changes:
files.append(commit)
else:
limited_files = 1
continue
# if our current group has any allowed files, append a commit
# with those files.
if len(files):
commits.append(build_commit(request, files, limited_files, dir_strip))
files = [ commit ]
limited_files = 0
current_desc = desc
current_rev = commit.GetRevision()
# we need to tack on our last commit grouping, but, again, only if
# it has allowed files.
if len(files):
commits.append(build_commit(request, files, limited_files, dir_strip))
# only show the branch column if we are querying all branches
# or doing a non-exact branch match on a CVS repository.
show_branch = ezt.boolean(request.roottype == 'cvs' and
(branch == '' or branch_match != 'exact'))
# a link to modify query
queryform_href = request.get_url(view_func=view_queryform, escape=1)
# backout link
params = request.query_dict.copy()
params['format'] = 'backout'
backout_href = request.get_url(params=params,
escape=1)
# link to zero limit_changes value
params = request.query_dict.copy()
params['limit_changes'] = 0
limit_changes_href = request.get_url(params=params, escape=1)
# if we got any results, use the newest commit as the modification time
if mod_time >= 0:
if check_freshness(request, mod_time):
return
if format == 'backout':
query_backout(request, commits)
return
data = common_template_data(request)
data.update({
'sql': sql,
'english_query': english_query(request),
'queryform_href': queryform_href,
'backout_href': backout_href,
'plus_count': plus_count,
'minus_count': minus_count,
'show_branch': show_branch,
'querysort': querysort,
'commits': commits,
'limit_changes': limit_changes,
'limit_changes_href': limit_changes_href,
})
if format == 'rss':
request.server.header("text/xml")
generate_page(request, "rss", data)
else:
request.server.header()
generate_page(request, "query_results", data)
_views = {
'annotate': view_annotate,
'co': view_checkout,
'diff': view_diff,
'dir': view_directory,
'graph': view_cvsgraph,
'graphimg': view_cvsgraph_image,
'log': view_log,
'markup': view_markup,
'patch': view_patch,
'query': view_query,
'queryform': view_queryform,
'rev': view_revision,
'roots': view_roots,
'tar': download_tarball,
'redirect_pathrev': redirect_pathrev,
}
_view_codes = {}
for code, view in _views.items():
_view_codes[view] = code
def list_roots(cfg):
allroots = { }
for root in cfg.general.cvs_roots.keys():
allroots[root] = [cfg.general.cvs_roots[root], 'cvs']
for root in cfg.general.svn_roots.keys():
allroots[root] = [cfg.general.svn_roots[root], 'svn']
return allroots
def load_config(pathname=None, server=None):
debug.t_start('load-config')
if pathname is None:
pathname = (os.environ.get("VIEWVC_CONF_PATHNAME")
or os.environ.get("VIEWCVS_CONF_PATHNAME")
or _install_path("viewvc.conf"))
cfg = config.Config()
cfg.set_defaults()
cfg.load_config(pathname, server and server.getenv("HTTP_HOST"))
# load mime types file
if cfg.general.mime_types_file:
mimetypes.init([cfg.general.mime_types_file])
# special handling for root_parents. Each item in root_parents is
# a "directory : repo_type" string. For each item in
# root_parents, we get a list of the subdirectories.
#
# If repo_type is "cvs", and the subdirectory contains a child
# "CVSROOT/config", then it is added to cvs_roots. Or, if the
# root directory itself contains a child "CVSROOT/config" file,
# then all its subdirectories are added to cvs_roots.
#
# If repo_type is "svn", and the subdirectory contains a child
# "format", then it is added to svn_roots.
for pp in cfg.general.root_parents:
pos = string.rfind(pp, ':')
if pos < 0:
raise debug.ViewVCException(
"The path '%s' in 'root_parents' does not include a "
"repository type." % pp)
repo_type = string.strip(pp[pos+1:])
pp = os.path.normpath(string.strip(pp[:pos]))
try:
subpaths = os.listdir(pp)
except OSError:
raise debug.ViewVCException(
"The path '%s' in 'root_parents' does not refer to "
"a valid directory." % pp)
cvsroot = os.path.exists(os.path.join(pp, "CVSROOT", "config"))
for subpath in subpaths:
if os.path.exists(os.path.join(pp, subpath)):
if (repo_type == 'cvs'
and (os.path.exists(os.path.join(pp, subpath, "CVSROOT", "config"))
or (cvsroot and (subpath != 'CVSROOT'
or not cfg.options.hide_cvsroot)))):
cfg.general.cvs_roots[subpath] = os.path.join(pp, subpath)
elif repo_type == 'svn' and \
os.path.exists(os.path.join(pp, subpath, "format")):
cfg.general.svn_roots[subpath] = os.path.join(pp, subpath)
debug.t_end('load-config')
return cfg
def view_error(server, cfg):
exc_dict = debug.GetExceptionData()
status = exc_dict['status']
if exc_dict['msg']:
exc_dict['msg'] = htmlify(exc_dict['msg'])
if exc_dict['stacktrace']:
exc_dict['stacktrace'] = htmlify(exc_dict['stacktrace'])
handled = 0
# use the configured error template if possible
try:
if cfg and not server.headerSent:
server.header(status=status)
template = get_view_template(cfg, "error")
template.generate(sys.stdout, exc_dict)
handled = 1
except:
pass
# but fallback to the old exception printer if no configuration is
# available, or if something went wrong
if not handled:
debug.PrintException(server, exc_dict)
def main(server, cfg):
try:
debug.t_start('main')
try:
# build a Request object, which contains info about the HTTP request
request = Request(server, cfg)
request.run_viewvc()
except SystemExit, e:
return
except:
view_error(server, cfg)
finally:
debug.t_end('main')
debug.dump()
debug.DumpChildren(server)
class _item:
def __init__(self, **kw):
vars(self).update(kw)
| gpl-2.0 | 1,117,488,272,726,977,000 | 34.277165 | 172 | 0.585365 | false |
spikeekips/txmongo2 | setup.py | 1 | 1049 | # -*- coding: utf-8 -*-
from setuptools import setup
setup(
name='txmongo2',
description='another mongodb driver for twisted.',
long_description='''
mongodb driver for twisted, forked from
`https://github.com/oubiwann/txmongo.git`. still need for testing.
''',
author='Spike^ekipS',
author_email='https://github.com/spikeekips',
url='https://github.com/spikeekips',
version='0.2a',
license='License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
classifiers=(
'Development Status :: 3 - Alpha',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux',
'Operating System :: POSIX',
'Programming Language :: Python',
'Framework :: Twisted',
),
zip_safe=False,
install_requires=(
'Twisted',
'pymongo',
'nose',
),
packages=('txmongo2', 'txmongo2._gridfs', ),
package_dir={'': 'src', },
test_suite="nose.collector",
)
| apache-2.0 | -3,390,687,502,465,538,000 | 27.351351 | 89 | 0.570067 | false |
iambrettstar/wemake | api/routes/polls_api.py | 1 | 1487 | from flask import abort, request
from flask_restful import Resource
from mysql.connector import Error
from models.schemas import PollSchema
from models import Poll
class PollsAPI(Resource):
@staticmethod
def get(poll_id=None):
"""HTTP GET method - returns a list of polls if no id specified,
otherwise returns the poll by the given id, aborting if not
found"""
poll = Poll()
schema = PollSchema()
try:
if poll_id is None:
poll_list = poll.find()
return schema.dump(poll_list, many=True).data
else:
poll_list = poll.find(poll_id)
if not poll_list:
abort(404, "Unable to find poll with id {}".format(
poll_id))
return schema.dump(poll_list[0]).data
except Error as error:
abort(500, error.msg)
@staticmethod
def post():
"""HTTP POST method - creates a new poll in the DB"""
if not request.json:
abort(400, "Missing required poll data")
print(request.json)
try:
schema = PollSchema()
result = schema.load(request.json, partial=('id',))
if result.errors:
abort(400, result.errors)
poll = result.data
poll.insert()
return schema.dump(poll).data, 201
except Error as error:
abort(500, error.msg)
| gpl-3.0 | -3,734,023,964,343,934,000 | 26.537037 | 72 | 0.549428 | false |
michaelwang/SlackImageStorage | SlackImageStorage/encryptedTools.py | 1 | 1075 | import string
import base64
from Crypto.Cipher import AES
import time
class CryptoTool(object):
def __init__(self):
self.padding = '{'
self.block_size = 32
self.pad = lambda s : s + (self.block_size - len(s) % self.block_size) * self.padding
self.encodedAES = lambda c,s : base64.b64encode(c.encrypt(self.pad(s)))
self.decodedAES = lambda c,e : c.decrypt(base64.b64decode(e)).rstrip(self.padding)
self.secretKey = '1234567812345678'
self.cipher = AES.new(self.secretKey)
def encryptString(self,stringObject):
return self.encodedAES(self.cipher,stringObject)
def decryptedString(self,encodedString):
return self.decodedAES(self.cipher,encodedString)
if __name__ == '__main__':
tool = CryptoTool()
timestamp = int(time.time())
encodedAESString = tool.encryptString('abcdefg'+ str(timestamp))
print encodedAESString +'\n'
decodedAESString = tool.decryptedString(encodedAESString)
print decodedAESString + '\n'
| gpl-3.0 | -3,576,943,901,784,612,000 | 34.833333 | 93 | 0.64186 | false |
asockman/treemechanics | vtrees23.py | 1 | 4109 | """ trees v0.2
procedurally generated foliage
(implicitly happy)
agrippa kellum : june 2013
"""
__all__ = ['Branch', 'Trunk']
import sys, random, math, time, visual as v, numpy as np
class Branch(object):
def __init__(self,inc,az,parent,origin):
self.parent = parent
self.size = 1
self.children = []
self.growtick = 0
self.order = parent.order + 1
self.splitchance = parent.splitchance
self.thickness = 1/100
self.thicken = 0.1/100
self.incmod = inc
self.inclination = parent.inclination+self.incmod
self.azmod = az
self.azimuth = parent.azimuth+self.azmod
self.origin = np.array(origin)
self.relativeorigin = (np.sum((self.origin-parent.origin)**2))**0.5
self.end = np.array(origin)
self.terminated = 0
self.age = 0
def delete(self):
while self.children:
self.children[0].delete()
self.parent.children.remove(self)
del self
def get_end(self,size):
x2 = self.origin[0] + (math.cos(self.azimuth) * math.sin(self.inclination) * size)
y2 = self.origin[1] + (math.cos(self.inclination) * size)
z2 = self.origin[2] + (math.sin(self.azimuth) * math.sin(self.inclination) * size)
return np.array([x2,y2,z2])
# @classmethod
def branchoff(self):
azmod = (random.random()*azrange*2)-azrange
incmod = (random.random()*incrange*2)-incrange
while abs(self.inclination+incmod) > maxinc:
incmod = (random.random()*incrange*2)-incrange
self.children.append(self.__class__(incmod, azmod, self, self.end))
def grow(self):
self.growtick += dominance**self.order
if self.growtick >= 1: #if i have received enough nutrients to grow...
self.growtick %= 1
self.age += 1 #TEMPORARY
self.thickness += self.thicken
if self.terminated == 0: #...and i have a meristem...
if random.random() <= self.splitchance:
self.branchoff()
self.size += growth
self.incmod *= branchdrop
if abs(self.inclination) < maxinc:
self.inclination = self.parent.inclination+self.incmod
else:
self.delete()
self.origin = self.parent.get_end(self.relativeorigin)
self.end = self.get_end(self.size)
for b in self.children:
b.grow()
class Trunk(Branch):
''' A Branch who has no parent -- used to start a tree '''
def __init__(self, origin):
self.azimuth = random.random()*math.pi*2
self.inclination = 0
self.order = -1
self.splitchance = splitchance
super().__init__(0, 0, self,origin)
class VBranch(Branch, v.cone):
def __init__(self, *a):
super().__init__(*a)
v.cone.__init__(self, pos=self.origin, axis=(self.end-self.origin), radius=self.thickness, color=treebark)
def delete(self):
self.visible = False
super().delete()
def draw(self):
self.pos = self.origin
self.axis = (self.end-self.origin)
self.radius = (self.thickness)
if self.children:
for b in self.children:
b.draw()
class VTrunk(Trunk, VBranch):
pass
height = 800
width = 1000
# random.seed(232)
green = (0.2,0.8,0)
yello = (0.8,0.8,0)
treebark = (0.6,0.4,0)
incrange = 0.5
azrange = math.pi*2
growth = 0.03
splitchance = 0.005
leafmod = 0.1
maxinc = math.pi/2
branchdrop = 1.0005
dominance = 1 #how much nutrients does a parent give its child?
display = v.display(title="my own tree!",width=width,height=height)
tree = VTrunk((0,0,0))
mousedown = 0
growit = 0
while 1:
if display.kb.keys:
key = display.kb.getkey()
if key == 'e':
growit = 1
else:
growit = 0
if growit:
tree.grow()
display.center = tree.get_end(tree.size/2)
tree.draw() | mit | 946,289,153,943,667,300 | 26.77027 | 114 | 0.563641 | false |
AnykeyNL/uArmProPython | svg_example.py | 1 | 1467 | # Example made by OssiLehtinen
#
from svgpathtools import svg2paths, wsvg
import numpy as np
import uArmRobot
import time
#Configure Serial Port
#serialport = "com3" # for windows
serialport = "/dev/ttyACM0" # for linux like system
# Connect to uArm
myRobot = uArmRobot.robot(serialport,0) # user 0 for firmware < v4 and use 1 for firmware v4
myRobot.debug = True # Enable / Disable debug output on screen, by default disabled
myRobot.connect()
myRobot.mode(1) # Set mode to Normal
# Read in the svg
paths, attributes = svg2paths('drawing.svg')
scale = .25
steps_per_seg = 3
coords = []
x_offset = 200
height = 90
draw_speed = 1000
# Convert the paths to a list of coordinates
for i in range(len(paths)):
path = paths[i]
attribute = attributes[i]
# A crude check for whether a path should be drawn. Does it have a style defined?
if 'style' in attribute:
for seg in path:
segcoords = []
for p in range(steps_per_seg+1):
cp = seg.point(float(p)/float(steps_per_seg))
segcoords.append([-np.real(cp)*scale+x_offset, np.imag(cp)*scale])
coords.append(segcoords)
# The starting point
myRobot.goto(coords[0][0][0], coords[0][0][1], height, 6000)
for seg in coords:
myRobot.goto(seg[0][0], seg[0][1], height, 6000)
time.sleep(0.15)
for p in seg:
myRobot.goto_laser(p[0], p[1], height, draw_speed)
# Back to the starting point (and turn the laser off)
myRobot.goto(coords[0][0][0], coords[0][0][1], height, 6000)
| gpl-3.0 | 5,763,393,511,411,950,000 | 23.45 | 92 | 0.69257 | false |
Castronova/EMIT | wrappers/odm2_data.py | 1 | 6423 | __author__ = 'tonycastronova'
import datetime as dt
from api_old.ODM2.Core.services import readCore
from api_old.ODM2.Results.services import readResults
# from shapely import wkb
import stdlib, uuid
from utilities.status import Status
import datatypes
from utilities import geometry
class odm2(object):
def __init__(self,resultid, session):
# get result object and result timeseries
core = readCore(session)
obj = core.getResultByID(resultID=int(resultid))
readres = readResults(session)
results = readres.getTimeSeriesValuesByResultId(resultId=int(resultid))
# separate the date and value pairs in the timeseries
dates = [date.ValueDateTime for date in results]
values = [val.DataValue for val in results]
# basic exchange item info
id = uuid.uuid4().hex[:8]
name = obj.VariableObj.VariableCode
desc = obj.VariableObj.VariableDefinition
#unit = obj.UnitObj.UnitsName
#vari = obj.VariableObj.VariableNameCV
type = stdlib.ExchangeItemType.OUTPUT
start = min(dates)
end = max(dates)
# build variable
variable = stdlib.Variable()
variable.VariableDefinition(obj.VariableObj.VariableDefinition)
variable.VariableNameCV(obj.VariableObj.VariableNameCV)
# build unit
unit = stdlib.Unit()
unit.UnitAbbreviation(obj.UnitObj.UnitsAbbreviation)
unit.UnitName(obj.UnitObj.UnitsName)
unit.UnitTypeCV(obj.UnitObj.UnitsTypeCV)
# build geometries
# todo: need to specify srs and elevation
wkb = str(obj.FeatureActionObj.SamplingFeatureObj.FeatureGeometry.data)
geom = geometry.fromWKB(wkb)
# build exchange item object
oei = stdlib.ExchangeItem(id=id,
name=name,
desc=desc,
geometry=geom,
unit=unit,
variable=variable,type=type )
# set global parameters
self.__id = id
self.__name = name
self.__start=start
self.__end=end
self.__output={self.__name: oei}
self.__desc=obj.VariableObj.VariableDefinition
self.__current_time = self.simulation_start()
self.__obj = obj
self.__resultid = obj.ResultID
self.__session = session
self.__status = Status.Loaded
def type(self):
return datatypes.ModelTypes.Data
def save(self):
#return [self.get_output_by_name(outputname=self.name())]
#return [self.__output]
return []
def run(self,inputs):
# set the status to finished
self.status(Status.Finished)
def run_timestep(self,inputs,time):
# set the status to finished
self.status(Status.Finished)
def prepare(self):
'''
Called before simulation run to prepare the model
:return: READY status
'''
# query the database
#return [self.get_output_by_name(outputname=self.name())]
self.status(Status.Ready)
def session(self):
return self.__session
def obj(self):
return self.__obj
#
# def actionid(self):
# return self.__actionid
def resultid(self):
return self.__resultid
def id(self):
return self.__id
def time_step(self):
"""
ini configuration file
"""
#return (int(self.__params['time_step'][0]['value']),self.__params['time_step'][0]['unit_type_cv'])
raise NotImplementedError('This is an abstract method that must be implemented!')
def outputs(self):
"""
ini configuration file
"""
return self.__output
def inputs(self):
return {}
def simulation_start(self):
return self.__start
def simulation_end(self):
return self.__end
def name(self):
return self.__name
def description(self):
return self.__desc
def current_time(self):
return self.__current_time
def increment_time(self, time):
value,unit = self.time_step()
# if unit == 'millisecond': self.__current_time += dt.timedelta(milliseconds=value)
# elif unit == 'second': self.__current_time += dt.timedelta(seconds =value)
# elif unit == 'minute': self.__current_time += dt.timedelta(minutes=value)
# elif unit == 'hour': self.__current_time += dt.timedelta(hours=value)
# elif unit == 'day': self.__current_time += dt.timedelta(days=value)
# else:
# raise Exception('Unknown unit: %s'%unit)
if unit == 'millisecond': time += dt.timedelta(milliseconds=value)
elif unit == 'second': time += dt.timedelta(seconds =value)
elif unit == 'minute': time += dt.timedelta(minutes=value)
elif unit == 'hour': time += dt.timedelta(hours=value)
elif unit == 'day': time += dt.timedelta(days=value)
else:
raise Exception('Unknown unit: %s'%unit)
return time
def get_output_by_name(self,outputname):
outputs = self.outputs()
if outputs.has_key(outputname):
return outputs[outputname]
else:
print 'Could not find output: %s' + outputname
return None
#return [self.__output]
#
# outputs = self.outputs()
#
# for output in outputs:
# if output.name() == outputname:
# return output
#
# raise Exception('Could not find output: %s' + outputname)
#raise NotImplementedError('This is an abstract method that must be implemented!')
def set_geom_values(self,variablename,geometry,datavalues):
#
# item = self.get_output_by_name(variablename)
#
# geometries = item.geometries()
# for geom in geometries:
# if geom.geom().equals(geometry):
# geom.datavalues().set_timeseries(datavalues)
# return
# raise Exception ('Error setting data for variable: %s' % variablename)
raise NotImplementedError('This is an abstract method that must be implemented!')
def status(self, value=None):
if value is not None:
self.__status = value
return self.__status | gpl-2.0 | 4,727,562,517,179,709,000 | 29.590476 | 107 | 0.589288 | false |
openstack/manila | manila/common/constants.py | 1 | 9242 | # Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# The maximum value a signed INT type may have
DB_MAX_INT = 0x7FFFFFFF
# SHARE AND GENERAL STATUSES
STATUS_CREATING = 'creating'
STATUS_CREATING_FROM_SNAPSHOT = 'creating_from_snapshot'
STATUS_DELETING = 'deleting'
STATUS_DELETED = 'deleted'
STATUS_ERROR = 'error'
STATUS_ERROR_DELETING = 'error_deleting'
STATUS_AVAILABLE = 'available'
STATUS_INACTIVE = 'inactive'
STATUS_MANAGING = 'manage_starting'
STATUS_MANAGE_ERROR = 'manage_error'
STATUS_UNMANAGING = 'unmanage_starting'
STATUS_MANAGE_ERROR_UNMANAGING = 'manage_error_unmanage_starting'
STATUS_UNMANAGE_ERROR = 'unmanage_error'
STATUS_UNMANAGED = 'unmanaged'
STATUS_EXTENDING = 'extending'
STATUS_EXTENDING_ERROR = 'extending_error'
STATUS_SHRINKING = 'shrinking'
STATUS_SHRINKING_ERROR = 'shrinking_error'
STATUS_MIGRATING = 'migrating'
STATUS_MIGRATING_TO = 'migrating_to'
STATUS_SHRINKING_POSSIBLE_DATA_LOSS_ERROR = (
'shrinking_possible_data_loss_error'
)
STATUS_REPLICATION_CHANGE = 'replication_change'
STATUS_RESTORING = 'restoring'
STATUS_REVERTING = 'reverting'
STATUS_REVERTING_ERROR = 'reverting_error'
# Access rule states
ACCESS_STATE_QUEUED_TO_APPLY = 'queued_to_apply'
ACCESS_STATE_QUEUED_TO_DENY = 'queued_to_deny'
ACCESS_STATE_APPLYING = 'applying'
ACCESS_STATE_DENYING = 'denying'
ACCESS_STATE_ACTIVE = 'active'
ACCESS_STATE_ERROR = 'error'
ACCESS_STATE_DELETED = 'deleted'
# Share instance "access_rules_status" field values
SHARE_INSTANCE_RULES_SYNCING = 'syncing'
SHARE_INSTANCE_RULES_ERROR = 'error'
# States/statuses for multiple resources
STATUS_NEW = 'new'
STATUS_OUT_OF_SYNC = 'out_of_sync'
STATUS_ACTIVE = 'active'
# Share server migration statuses
STATUS_SERVER_MIGRATING = 'server_migrating'
STATUS_SERVER_MIGRATING_TO = 'server_migrating_to'
# Share server update statuses
STATUS_SERVER_NETWORK_CHANGE = 'network_change'
# Share network statuses
STATUS_NETWORK_ACTIVE = 'active'
STATUS_NETWORK_ERROR = 'error'
STATUS_NETWORK_CHANGE = 'network_change'
ACCESS_RULES_STATES = (
ACCESS_STATE_QUEUED_TO_APPLY,
ACCESS_STATE_QUEUED_TO_DENY,
ACCESS_STATE_APPLYING,
ACCESS_STATE_DENYING,
ACCESS_STATE_ACTIVE,
ACCESS_STATE_ERROR,
ACCESS_STATE_DELETED,
)
# Share and share server migration task states
TASK_STATE_MIGRATION_STARTING = 'migration_starting'
TASK_STATE_MIGRATION_IN_PROGRESS = 'migration_in_progress'
TASK_STATE_MIGRATION_COMPLETING = 'migration_completing'
TASK_STATE_MIGRATION_SUCCESS = 'migration_success'
TASK_STATE_MIGRATION_ERROR = 'migration_error'
TASK_STATE_MIGRATION_CANCELLED = 'migration_cancelled'
TASK_STATE_MIGRATION_CANCEL_IN_PROGRESS = 'migration_cancel_in_progress'
TASK_STATE_MIGRATION_DRIVER_STARTING = 'migration_driver_starting'
TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS = 'migration_driver_in_progress'
TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE = 'migration_driver_phase1_done'
# Share statuses used by data service and host assisted migration
TASK_STATE_DATA_COPYING_STARTING = 'data_copying_starting'
TASK_STATE_DATA_COPYING_IN_PROGRESS = 'data_copying_in_progress'
TASK_STATE_DATA_COPYING_COMPLETING = 'data_copying_completing'
TASK_STATE_DATA_COPYING_COMPLETED = 'data_copying_completed'
TASK_STATE_DATA_COPYING_CANCELLED = 'data_copying_cancelled'
TASK_STATE_DATA_COPYING_ERROR = 'data_copying_error'
BUSY_TASK_STATES = (
TASK_STATE_MIGRATION_STARTING,
TASK_STATE_MIGRATION_IN_PROGRESS,
TASK_STATE_MIGRATION_COMPLETING,
TASK_STATE_MIGRATION_DRIVER_STARTING,
TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS,
TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE,
TASK_STATE_DATA_COPYING_STARTING,
TASK_STATE_DATA_COPYING_IN_PROGRESS,
TASK_STATE_DATA_COPYING_COMPLETING,
TASK_STATE_DATA_COPYING_COMPLETED,
)
BUSY_COPYING_STATES = (
TASK_STATE_DATA_COPYING_STARTING,
TASK_STATE_DATA_COPYING_IN_PROGRESS,
TASK_STATE_DATA_COPYING_COMPLETING,
)
TRANSITIONAL_STATUSES = (
STATUS_CREATING, STATUS_DELETING,
STATUS_MANAGING, STATUS_UNMANAGING,
STATUS_EXTENDING, STATUS_SHRINKING,
STATUS_MIGRATING, STATUS_MIGRATING_TO,
STATUS_RESTORING, STATUS_REVERTING,
STATUS_SERVER_MIGRATING, STATUS_SERVER_MIGRATING_TO,
)
INVALID_SHARE_INSTANCE_STATUSES_FOR_ACCESS_RULE_UPDATES = (
TRANSITIONAL_STATUSES + (STATUS_ERROR,)
)
SUPPORTED_SHARE_PROTOCOLS = (
'NFS', 'CIFS', 'GLUSTERFS', 'HDFS', 'CEPHFS', 'MAPRFS')
SECURITY_SERVICES_ALLOWED_TYPES = ['active_directory', 'ldap', 'kerberos']
LIKE_FILTER = ['name~', 'description~']
NFS_EXPORTS_FILE = '/etc/exports'
NFS_EXPORTS_FILE_TEMP = '/var/lib/nfs/etab'
MOUNT_FILE = '/etc/fstab'
MOUNT_FILE_TEMP = '/etc/mtab'
# Below represented ports are ranges (from, to)
CIFS_PORTS = (
("tcp", (445, 445)),
("tcp", (137, 139)),
("udp", (137, 139)),
("udp", (445, 445)),
)
NFS_PORTS = (
("tcp", (2049, 2049)),
("udp", (2049, 2049)),
)
SSH_PORTS = (
("tcp", (22, 22)),
)
PING_PORTS = (
("icmp", (-1, -1)),
)
WINRM_PORTS = (
("tcp", (5985, 5986)),
)
SERVICE_INSTANCE_SECGROUP_DATA = (
CIFS_PORTS + NFS_PORTS + PING_PORTS + WINRM_PORTS)
ACCESS_LEVEL_RW = 'rw'
ACCESS_LEVEL_RO = 'ro'
ACCESS_LEVELS = (
ACCESS_LEVEL_RW,
ACCESS_LEVEL_RO,
)
TASK_STATE_STATUSES = (
TASK_STATE_MIGRATION_STARTING,
TASK_STATE_MIGRATION_IN_PROGRESS,
TASK_STATE_MIGRATION_COMPLETING,
TASK_STATE_MIGRATION_SUCCESS,
TASK_STATE_MIGRATION_ERROR,
TASK_STATE_MIGRATION_CANCELLED,
TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS,
TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE,
TASK_STATE_DATA_COPYING_STARTING,
TASK_STATE_DATA_COPYING_IN_PROGRESS,
TASK_STATE_DATA_COPYING_COMPLETING,
TASK_STATE_DATA_COPYING_COMPLETED,
TASK_STATE_DATA_COPYING_CANCELLED,
TASK_STATE_DATA_COPYING_ERROR,
None,
)
SERVER_TASK_STATE_STATUSES = (
TASK_STATE_MIGRATION_STARTING,
TASK_STATE_MIGRATION_IN_PROGRESS,
TASK_STATE_MIGRATION_COMPLETING,
TASK_STATE_MIGRATION_SUCCESS,
TASK_STATE_MIGRATION_ERROR,
TASK_STATE_MIGRATION_CANCEL_IN_PROGRESS,
TASK_STATE_MIGRATION_CANCELLED,
TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS,
TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE,
None,
)
SHARE_SERVER_STATUSES = (
STATUS_ACTIVE,
STATUS_ERROR,
STATUS_DELETING,
STATUS_CREATING,
STATUS_MANAGING,
STATUS_UNMANAGING,
STATUS_UNMANAGE_ERROR,
STATUS_MANAGE_ERROR,
STATUS_INACTIVE,
STATUS_SERVER_MIGRATING,
STATUS_SERVER_MIGRATING_TO,
STATUS_SERVER_NETWORK_CHANGE,
)
SHARE_NETWORK_STATUSES = (
STATUS_NETWORK_ACTIVE,
STATUS_NETWORK_ERROR,
STATUS_NETWORK_CHANGE,
)
REPLICA_STATE_ACTIVE = 'active'
REPLICA_STATE_IN_SYNC = 'in_sync'
REPLICA_STATE_OUT_OF_SYNC = 'out_of_sync'
REPLICATION_TYPE_READABLE = 'readable'
REPLICATION_TYPE_WRITABLE = 'writable'
REPLICATION_TYPE_DR = 'dr'
class ExtraSpecs(object):
# Extra specs key names
DRIVER_HANDLES_SHARE_SERVERS = "driver_handles_share_servers"
SNAPSHOT_SUPPORT = "snapshot_support"
REPLICATION_TYPE_SPEC = "replication_type"
CREATE_SHARE_FROM_SNAPSHOT_SUPPORT = "create_share_from_snapshot_support"
REVERT_TO_SNAPSHOT_SUPPORT = "revert_to_snapshot_support"
MOUNT_SNAPSHOT_SUPPORT = "mount_snapshot_support"
AVAILABILITY_ZONES = "availability_zones"
PROVISIONING_MAX_SHARE_SIZE = "provisioning:max_share_size"
PROVISIONING_MIN_SHARE_SIZE = "provisioning:min_share_size"
# Extra specs containers
REQUIRED = (
DRIVER_HANDLES_SHARE_SERVERS,
)
OPTIONAL = (
SNAPSHOT_SUPPORT,
CREATE_SHARE_FROM_SNAPSHOT_SUPPORT,
REVERT_TO_SNAPSHOT_SUPPORT,
REPLICATION_TYPE_SPEC,
MOUNT_SNAPSHOT_SUPPORT,
AVAILABILITY_ZONES,
PROVISIONING_MAX_SHARE_SIZE,
PROVISIONING_MIN_SHARE_SIZE
)
# NOTE(cknight): Some extra specs are necessary parts of the Manila API and
# should be visible to non-admin users. REQUIRED specs are user-visible, as
# are a handful of community-agreed standardized OPTIONAL ones.
TENANT_VISIBLE = REQUIRED + OPTIONAL
BOOLEAN = (
DRIVER_HANDLES_SHARE_SERVERS,
SNAPSHOT_SUPPORT,
CREATE_SHARE_FROM_SNAPSHOT_SUPPORT,
REVERT_TO_SNAPSHOT_SUPPORT,
MOUNT_SNAPSHOT_SUPPORT,
)
# NOTE(cknight): Some extra specs are optional, but a nominal (typically
# False, but may be non-boolean) default value for each is still needed
# when creating shares.
INFERRED_OPTIONAL_MAP = {
SNAPSHOT_SUPPORT: False,
CREATE_SHARE_FROM_SNAPSHOT_SUPPORT: False,
REVERT_TO_SNAPSHOT_SUPPORT: False,
MOUNT_SNAPSHOT_SUPPORT: False,
}
REPLICATION_TYPES = ('writable', 'readable', 'dr')
| apache-2.0 | 7,765,387,853,528,770,000 | 30.222973 | 79 | 0.719325 | false |
zjuchenyuan/BioWeb | Lib/Bio/SeqIO/SwissIO.py | 1 | 6309 | # Copyright 2006-2013 by Peter Cock.
# Revisions copyright 2008-2009 by Michiel de Hoon.
# All rights reserved.
#
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Bio.SeqIO support for the "swiss" (aka SwissProt/UniProt) file format.
You are expected to use this module via the Bio.SeqIO functions.
See also the Bio.SwissProt module which offers more than just accessing
the sequences as SeqRecord objects.
See also Bio.SeqIO.UniprotIO.py which supports the "uniprot-xml" format.
"""
from __future__ import print_function
from Bio import Seq
from Bio import SeqRecord
from Bio import Alphabet
from Bio import SeqFeature
from Bio import SwissProt
def _make_position(location_string, offset=0):
"""Turn a Swiss location position into a SeqFeature position object (PRIVATE).
An offset of -1 is used with a start location to make it pythonic.
"""
if location_string == "?":
return SeqFeature.UnknownPosition()
# Hack so that feature from 0 to 0 becomes 0 to 0, not -1 to 0.
try:
return SeqFeature.ExactPosition(max(0, offset + int(location_string)))
except ValueError:
pass
if location_string.startswith("<"):
try:
return SeqFeature.BeforePosition(max(0, offset + int(location_string[1:])))
except ValueError:
pass
elif location_string.startswith(">"): # e.g. ">13"
try:
return SeqFeature.AfterPosition(max(0, offset + int(location_string[1:])))
except ValueError:
pass
elif location_string.startswith("?"): # e.g. "?22"
try:
return SeqFeature.UncertainPosition(max(0, offset + int(location_string[1:])))
except ValueError:
pass
raise NotImplementedError("Cannot parse location '%s'" % location_string)
def _make_seqfeature(name, from_res, to_res, description, ft_id):
"""Construct SeqFeature from feature data from parser (PRIVATE)."""
loc = SeqFeature.FeatureLocation(_make_position(from_res, -1),
_make_position(to_res, 0))
if not ft_id:
ft_id = "<unknown id>" # The default in SeqFeature object
return SeqFeature.SeqFeature(loc, type=name, id=ft_id,
qualifiers={"description": description})
def SwissIterator(handle):
"""Breaks up a Swiss-Prot/UniProt file into SeqRecord objects.
Every section from the ID line to the terminating // becomes
a single SeqRecord with associated annotation and features.
This parser is for the flat file "swiss" format as used by:
- Swiss-Prot aka SwissProt
- TrEMBL
- UniProtKB aka UniProt Knowledgebase
For consistency with BioPerl and EMBOSS we call this the "swiss"
format. See also the SeqIO support for "uniprot-xml" format.
Rather than calling it directly, you are expected to use this
parser via Bio.SeqIO.parse(..., format="swiss") instead.
"""
swiss_records = SwissProt.parse(handle)
for swiss_record in swiss_records:
# Convert the SwissProt record to a SeqRecord
seq = Seq.Seq(swiss_record.sequence, Alphabet.generic_protein)
record = SeqRecord.SeqRecord(seq,
id=swiss_record.accessions[0],
name=swiss_record.entry_name,
description=swiss_record.description,
features=[_make_seqfeature(*f) for f
in swiss_record.features],
)
record.description = swiss_record.description
for cross_reference in swiss_record.cross_references:
if len(cross_reference) < 2:
continue
database, accession = cross_reference[:2]
dbxref = "%s:%s" % (database, accession)
if dbxref not in record.dbxrefs:
record.dbxrefs.append(dbxref)
annotations = record.annotations
annotations['accessions'] = swiss_record.accessions
if swiss_record.created:
annotations['date'] = swiss_record.created[0]
if swiss_record.sequence_update:
annotations[
'date_last_sequence_update'] = swiss_record.sequence_update[0]
if swiss_record.annotation_update:
annotations['date_last_annotation_update'] = swiss_record.annotation_update[0]
if swiss_record.gene_name:
annotations['gene_name'] = swiss_record.gene_name
annotations['organism'] = swiss_record.organism.rstrip(".")
annotations['taxonomy'] = swiss_record.organism_classification
annotations['ncbi_taxid'] = swiss_record.taxonomy_id
if swiss_record.host_organism:
annotations['organism_host'] = swiss_record.host_organism
if swiss_record.host_taxonomy_id:
annotations['host_ncbi_taxid'] = swiss_record.host_taxonomy_id
if swiss_record.comments:
annotations['comment'] = "\n".join(swiss_record.comments)
if swiss_record.references:
annotations['references'] = []
for reference in swiss_record.references:
feature = SeqFeature.Reference()
feature.comment = " ".join("%s=%s;" % k_v for k_v in reference.comments)
for key, value in reference.references:
if key == 'PubMed':
feature.pubmed_id = value
elif key == 'MEDLINE':
feature.medline_id = value
elif key == 'DOI':
pass
elif key == 'AGRICOLA':
pass
else:
raise ValueError(
"Unknown key %s found in references" % key)
feature.authors = reference.authors
feature.title = reference.title
feature.journal = reference.location
annotations['references'].append(feature)
if swiss_record.keywords:
record.annotations['keywords'] = swiss_record.keywords
yield record
| mit | -5,726,444,842,499,396,000 | 42.212329 | 90 | 0.606911 | false |
jainpranav/DFA_and_Regex_Toolbox | Automata_Editor/gui.py | 1 | 9003 | try:
from Tkinter import *
import tkFont
except ImportError as err:
print ("error: %s. Tkinter library is required for using the GUI.") % err.message
sys.exit(1)
from Automata import *
dotFound = isInstalled("dot")
if dotFound:
try:
import Image, ImageTk
except ImportError as err:
print ("Notice: %s. The PIL library is required for displaying the graphs.") % err.message
dotFound = False
else:
print "Notice: The GraphViz software is required for displaying the graphs."
class AutomataGUI:
def __init__(self, root, dotFound):
self.root = root
self.initUI()
self.selectedButton = 0
self.dotFound = dotFound
startRegex = "(0+1)0*"
self.regexVar.set(startRegex)
self.handleBuildRegexButton()
def initUI(self):
self.root.title("Automata From Regular Expressions")
ScreenSizeX = self.root.winfo_screenwidth()
ScreenSizeY = self.root.winfo_screenheight()
ScreenRatioX = 0.7
ScreenRatioY = 0.8
self.FrameSizeX = int(ScreenSizeX * ScreenRatioX)
self.FrameSizeY = int(ScreenSizeY * ScreenRatioY)
print self.FrameSizeY, self.FrameSizeX
FramePosX = (ScreenSizeX - self.FrameSizeX)/2
FramePosY = (ScreenSizeY - self.FrameSizeY)/2
padX = 10
padY = 10
self.root.geometry("%sx%s+%s+%s" % (self.FrameSizeX,self.FrameSizeY,FramePosX,FramePosY))
self.root.resizable(width=False, height=False)
parentFrame = Frame(self.root, width = int(self.FrameSizeX - 2*padX), height = int(self.FrameSizeY - 2*padY))
parentFrame.grid(padx=padX, pady=padY, stick=E+W+N+S)
regexFrame = Frame(parentFrame)
enterRegexLabel = Label(regexFrame, text="Enter regular expression [operators allowed are plus (+), dot (.) and star (*)]:")
self.regexVar = StringVar()
self.regexField = Entry(regexFrame, width=80, textvariable=self.regexVar)
buildRegexButton = Button(regexFrame, text="Build", width=10, command=self.handleBuildRegexButton)
enterRegexLabel.grid(row=0, column=0, sticky=W)
self.regexField.grid(row=1, column=0, sticky=W)
buildRegexButton.grid(row=1, column=1, padx=5)
testStringFrame = Frame(parentFrame)
testStringLabel = Label(testStringFrame, text="Enter a test string: ")
self.testVar = StringVar()
self.testStringField = Entry(testStringFrame, width=80, textvariable=self.testVar)
testStringButton = Button(testStringFrame, text="Test", width=10, command=self.handleTestStringButton)
testStringLabel.grid(row=0, column=0, sticky=W)
self.testStringField.grid(row=1, column=0, sticky=W)
testStringButton.grid(row=1, column=1, padx=5)
self.statusLabel = Label(parentFrame)
buttonGroup = Frame(parentFrame)
self.timingLabel = Label(buttonGroup, text="Idle...", width=50, justify=LEFT)
nfaButton = Button(buttonGroup, text="NFA", width=15, command=self.handlenfaButton)
dfaButton = Button(buttonGroup, text="DFA", width=15, command=self.handledfaButton)
minDFAButton = Button(buttonGroup, text="Minimized DFA", width=15, command=self.handleminDFAButton)
self.timingLabel.grid(row=0, column=0, sticky=W)
nfaButton.grid(row=0, column=1)
dfaButton.grid(row=0, column=2)
minDFAButton.grid(row=0, column=3)
automataCanvasFrame = Frame(parentFrame, height=100, width=100)
self.cwidth = int(self.FrameSizeX - (2*padX + 20))
self.cheight = int(self.FrameSizeY * 0.6)
self.automataCanvas = Canvas(automataCanvasFrame, bg='#FFFFFF', width= self.cwidth, height = self.cheight,scrollregion=(0,0,self.cwidth,self.cheight))
hbar=Scrollbar(automataCanvasFrame,orient=HORIZONTAL)
hbar.pack(side=BOTTOM,fill=X)
hbar.config(command=self.automataCanvas.xview)
vbar=Scrollbar(automataCanvasFrame,orient=VERTICAL)
vbar.pack(side=RIGHT,fill=Y)
vbar.config(command=self.automataCanvas.yview)
self.automataCanvas.config(xscrollcommand=hbar.set, yscrollcommand=vbar.set)
self.canvasitems = []
self.automataCanvas.pack()
self.bottomLabel = Label(parentFrame, text="Created by Pranav Jain under Dr. Astrid Kiehn")
regexFrame.grid(row=0, column=0, sticky=W, padx=(50,0))
testStringFrame.grid(row=1, column=0, sticky=W, padx=(50,0))
self.statusLabel.grid(row=2, column=0, sticky=W, padx=(50,0))
buttonGroup.grid(row=3, column=0)
automataCanvasFrame.grid(row=4, column=0, sticky=E+W+N+S)
self.bottomLabel.grid(row=5, column=0, sticky=W, pady=10)
def handleBuildRegexButton(self):
t = time.time()
try:
inp = self.regexVar.get().replace(' ','')
if inp == '':
self.statusLabel.config(text="Detected empty regex!")
return
self.createAutomata(inp)
except BaseException as e:
self.statusLabel.config(text="Failure: %s" % e)
self.timingLabel.configure(text="Operation completed in " + "%.4f" % (time.time() - t) + " seconds")
self.displayAutomata()
def handleTestStringButton(self):
t = time.time()
inp = self.testVar.get().replace(' ','')
if inp == '':
inp = [':e:']
if self.dfaObj.acceptsString(inp):
self.statusLabel.config(text="Accepts :)")
else:
self.statusLabel.config(text="Does not accept :|")
self.timingLabel.configure(text="Operation completed in " + "%.4f" % (time.time() - t) + " seconds")
def handlenfaButton(self):
self.selectedButton = 0
self.displayAutomata()
def handledfaButton(self):
self.selectedButton = 1
self.displayAutomata()
def handleminDFAButton(self):
self.selectedButton = 2
self.displayAutomata()
def createAutomata(self, inp):
print "Regex: ", inp
nfaObj = NFAfromRegex(inp)
self.nfa = nfaObj.getNFA()
self.dfaObj = DFAfromNFA(self.nfa)
self.dfa = self.dfaObj.getDFA()
self.minDFA = self.dfaObj.getMinimisedDFA()
if self.dotFound:
drawGraph(self.dfa, "dfa")
drawGraph(self.nfa, "nfa")
drawGraph(self.minDFA, "mdfa")
dfafile = "graphdfa.png"
nfafile = "graphnfa.png"
mindfafile = "graphmdfa.png"
self.nfaimagefile = Image.open(nfafile)
self.dfaimagefile = Image.open(dfafile)
self.mindfaimagefile = Image.open(mindfafile)
self.nfaimg = ImageTk.PhotoImage(self.nfaimagefile)
self.dfaimg = ImageTk.PhotoImage(self.dfaimagefile)
self.mindfaimg = ImageTk.PhotoImage(self.mindfaimagefile)
def displayAutomata(self):
for item in self.canvasitems:
self.automataCanvas.delete(item)
if self.selectedButton == 0:
header = "e-NFA"
automata = self.nfa
if self.dotFound:
image = self.nfaimg
imagefile = self.nfaimagefile
elif self.selectedButton == 1:
header = "DFA"
automata = self.dfa
if self.dotFound:
image = self.dfaimg
imagefile = self.dfaimagefile
elif self.selectedButton == 2:
header = "Minimised DFA"
automata = self.minDFA
if self.dotFound:
image = self.mindfaimg
imagefile = self.mindfaimagefile
font = tkFont.Font(family="times", size=20)
(w,h) = (font.measure(header),font.metrics("linespace"))
headerheight = h + 10
itd = self.automataCanvas.create_text(10,10,text=header, font=font, anchor=NW)
self.canvasitems.append(itd)
[text, linecount] = automata.getPrintText()
font = tkFont.Font(family="times", size=13)
(w,h) = (font.measure(text),font.metrics("linespace"))
textheight = headerheight + linecount * h + 20
itd = self.automataCanvas.create_text(10, headerheight + 10,text=text, font=font, anchor=NW)
self.canvasitems.append(itd)
if self.dotFound:
itd = self.automataCanvas.create_image(10, textheight, image=image, anchor=NW)
self.canvasitems.append(itd)
totalwidth = imagefile.size[0] + 10
totalheight = imagefile.size[1] + textheight + 10
else:
totalwidth = self.cwidth + 10
totalheight = textheight + 10
if totalheight < self.cheight:
totalheight = self.cheight
if totalwidth < self.cwidth:
totalwidth = self.cwidth
self.automataCanvas.config(scrollregion=(0,0,totalwidth,totalheight))
def main():
global dotFound
root = Tk()
app = AutomataGUI(root, dotFound)
root.mainloop()
if __name__ == '__main__':
main()
| apache-2.0 | -7,390,420,190,441,354,000 | 41.267606 | 158 | 0.625791 | false |
spacy-io/spaCy | spacy/tests/parser/test_parse.py | 1 | 15404 | import pytest
from numpy.testing import assert_equal
from spacy.attrs import DEP
from spacy.lang.en import English
from spacy.training import Example
from spacy.tokens import Doc
from spacy import util
from ..util import apply_transition_sequence, make_tempdir
TRAIN_DATA = [
(
"They trade mortgage-backed securities.",
{
"heads": [1, 1, 4, 4, 5, 1, 1],
"deps": ["nsubj", "ROOT", "compound", "punct", "nmod", "dobj", "punct"],
},
),
(
"I like London and Berlin.",
{
"heads": [1, 1, 1, 2, 2, 1],
"deps": ["nsubj", "ROOT", "dobj", "cc", "conj", "punct"],
},
),
]
CONFLICTING_DATA = [
(
"I like London and Berlin.",
{
"heads": [1, 1, 1, 2, 2, 1],
"deps": ["nsubj", "ROOT", "dobj", "cc", "conj", "punct"],
},
),
(
"I like London and Berlin.",
{
"heads": [0, 0, 0, 0, 0, 0],
"deps": ["ROOT", "nsubj", "nsubj", "cc", "conj", "punct"],
},
),
]
PARTIAL_DATA = [
(
"I like London.",
{
"heads": [1, 1, 1, None],
"deps": ["nsubj", "ROOT", "dobj", None],
},
),
]
eps = 0.1
def test_parser_root(en_vocab):
words = ["i", "do", "n't", "have", "other", "assistance"]
heads = [3, 3, 3, 3, 5, 3]
deps = ["nsubj", "aux", "neg", "ROOT", "amod", "dobj"]
doc = Doc(en_vocab, words=words, heads=heads, deps=deps)
for t in doc:
assert t.dep != 0, t.text
@pytest.mark.skip(
reason="The step_through API was removed (but should be brought back)"
)
@pytest.mark.parametrize("words", [["Hello"]])
def test_parser_parse_one_word_sentence(en_vocab, en_parser, words):
doc = Doc(en_vocab, words=words, heads=[0], deps=["ROOT"])
assert len(doc) == 1
with en_parser.step_through(doc) as _: # noqa: F841
pass
assert doc[0].dep != 0
@pytest.mark.skip(
reason="The step_through API was removed (but should be brought back)"
)
def test_parser_initial(en_vocab, en_parser):
words = ["I", "ate", "the", "pizza", "with", "anchovies", "."]
transition = ["L-nsubj", "S", "L-det"]
doc = Doc(en_vocab, words=words)
apply_transition_sequence(en_parser, doc, transition)
assert doc[0].head.i == 1
assert doc[1].head.i == 1
assert doc[2].head.i == 3
assert doc[3].head.i == 3
def test_parser_parse_subtrees(en_vocab, en_parser):
words = ["The", "four", "wheels", "on", "the", "bus", "turned", "quickly"]
heads = [2, 2, 6, 2, 5, 3, 6, 6]
deps = ["dep"] * len(heads)
doc = Doc(en_vocab, words=words, heads=heads, deps=deps)
assert len(list(doc[2].lefts)) == 2
assert len(list(doc[2].rights)) == 1
assert len(list(doc[2].children)) == 3
assert len(list(doc[5].lefts)) == 1
assert len(list(doc[5].rights)) == 0
assert len(list(doc[5].children)) == 1
assert len(list(doc[2].subtree)) == 6
def test_parser_merge_pp(en_vocab):
words = ["A", "phrase", "with", "another", "phrase", "occurs"]
heads = [1, 5, 1, 4, 2, 5]
deps = ["det", "nsubj", "prep", "det", "pobj", "ROOT"]
pos = ["DET", "NOUN", "ADP", "DET", "NOUN", "VERB"]
doc = Doc(en_vocab, words=words, deps=deps, heads=heads, pos=pos)
with doc.retokenize() as retokenizer:
for np in doc.noun_chunks:
retokenizer.merge(np, attrs={"lemma": np.lemma_})
assert doc[0].text == "A phrase"
assert doc[1].text == "with"
assert doc[2].text == "another phrase"
assert doc[3].text == "occurs"
@pytest.mark.skip(
reason="The step_through API was removed (but should be brought back)"
)
def test_parser_arc_eager_finalize_state(en_vocab, en_parser):
words = ["a", "b", "c", "d", "e"]
# right branching
transition = ["R-nsubj", "D", "R-nsubj", "R-nsubj", "D", "R-ROOT"]
tokens = Doc(en_vocab, words=words)
apply_transition_sequence(en_parser, tokens, transition)
assert tokens[0].n_lefts == 0
assert tokens[0].n_rights == 2
assert tokens[0].left_edge.i == 0
assert tokens[0].right_edge.i == 4
assert tokens[0].head.i == 0
assert tokens[1].n_lefts == 0
assert tokens[1].n_rights == 0
assert tokens[1].left_edge.i == 1
assert tokens[1].right_edge.i == 1
assert tokens[1].head.i == 0
assert tokens[2].n_lefts == 0
assert tokens[2].n_rights == 2
assert tokens[2].left_edge.i == 2
assert tokens[2].right_edge.i == 4
assert tokens[2].head.i == 0
assert tokens[3].n_lefts == 0
assert tokens[3].n_rights == 0
assert tokens[3].left_edge.i == 3
assert tokens[3].right_edge.i == 3
assert tokens[3].head.i == 2
assert tokens[4].n_lefts == 0
assert tokens[4].n_rights == 0
assert tokens[4].left_edge.i == 4
assert tokens[4].right_edge.i == 4
assert tokens[4].head.i == 2
# left branching
transition = ["S", "S", "S", "L-nsubj", "L-nsubj", "L-nsubj", "L-nsubj"]
tokens = Doc(en_vocab, words=words)
apply_transition_sequence(en_parser, tokens, transition)
assert tokens[0].n_lefts == 0
assert tokens[0].n_rights == 0
assert tokens[0].left_edge.i == 0
assert tokens[0].right_edge.i == 0
assert tokens[0].head.i == 4
assert tokens[1].n_lefts == 0
assert tokens[1].n_rights == 0
assert tokens[1].left_edge.i == 1
assert tokens[1].right_edge.i == 1
assert tokens[1].head.i == 4
assert tokens[2].n_lefts == 0
assert tokens[2].n_rights == 0
assert tokens[2].left_edge.i == 2
assert tokens[2].right_edge.i == 2
assert tokens[2].head.i == 4
assert tokens[3].n_lefts == 0
assert tokens[3].n_rights == 0
assert tokens[3].left_edge.i == 3
assert tokens[3].right_edge.i == 3
assert tokens[3].head.i == 4
assert tokens[4].n_lefts == 4
assert tokens[4].n_rights == 0
assert tokens[4].left_edge.i == 0
assert tokens[4].right_edge.i == 4
assert tokens[4].head.i == 4
def test_parser_set_sent_starts(en_vocab):
# fmt: off
words = ['Ein', 'Satz', '.', 'Außerdem', 'ist', 'Zimmer', 'davon', 'überzeugt', ',', 'dass', 'auch', 'epige-', '\n', 'netische', 'Mechanismen', 'eine', 'Rolle', 'spielen', ',', 'also', 'Vorgänge', ',', 'die', '\n', 'sich', 'darauf', 'auswirken', ',', 'welche', 'Gene', 'abgelesen', 'werden', 'und', '\n', 'welche', 'nicht', '.', '\n']
heads = [1, 1, 1, 30, 4, 4, 7, 4, 7, 17, 14, 14, 11, 14, 17, 16, 17, 6, 17, 20, 11, 20, 26, 22, 26, 26, 20, 26, 29, 31, 31, 25, 31, 32, 17, 4, 4, 36]
deps = ['nk', 'ROOT', 'punct', 'mo', 'ROOT', 'sb', 'op', 'pd', 'punct', 'cp', 'mo', 'nk', '', 'nk', 'sb', 'nk', 'oa', 're', 'punct', 'mo', 'app', 'punct', 'sb', '', 'oa', 'op', 'rc', 'punct', 'nk', 'sb', 'oc', 're', 'cd', '', 'oa', 'ng', 'punct', '']
# fmt: on
doc = Doc(en_vocab, words=words, deps=deps, heads=heads)
for i in range(len(words)):
if i == 0 or i == 3:
assert doc[i].is_sent_start is True
else:
assert doc[i].is_sent_start is False
for sent in doc.sents:
for token in sent:
assert token.head in sent
@pytest.mark.parametrize("pipe_name", ["parser", "beam_parser"])
def test_incomplete_data(pipe_name):
# Test that the parser works with incomplete information
nlp = English()
parser = nlp.add_pipe(pipe_name)
train_examples = []
for text, annotations in PARTIAL_DATA:
train_examples.append(Example.from_dict(nlp.make_doc(text), annotations))
for dep in annotations.get("deps", []):
if dep is not None:
parser.add_label(dep)
optimizer = nlp.initialize(get_examples=lambda: train_examples)
for i in range(150):
losses = {}
nlp.update(train_examples, sgd=optimizer, losses=losses)
assert losses[pipe_name] < 0.0001
# test the trained model
test_text = "I like securities."
doc = nlp(test_text)
assert doc[0].dep_ == "nsubj"
assert doc[2].dep_ == "dobj"
assert doc[0].head.i == 1
assert doc[2].head.i == 1
@pytest.mark.parametrize("pipe_name", ["parser", "beam_parser"])
def test_overfitting_IO(pipe_name):
# Simple test to try and quickly overfit the dependency parser (normal or beam)
nlp = English()
parser = nlp.add_pipe(pipe_name)
train_examples = []
for text, annotations in TRAIN_DATA:
train_examples.append(Example.from_dict(nlp.make_doc(text), annotations))
for dep in annotations.get("deps", []):
parser.add_label(dep)
optimizer = nlp.initialize()
# run overfitting
for i in range(200):
losses = {}
nlp.update(train_examples, sgd=optimizer, losses=losses)
assert losses[pipe_name] < 0.0001
# test the trained model
test_text = "I like securities."
doc = nlp(test_text)
assert doc[0].dep_ == "nsubj"
assert doc[2].dep_ == "dobj"
assert doc[3].dep_ == "punct"
assert doc[0].head.i == 1
assert doc[2].head.i == 1
assert doc[3].head.i == 1
# Also test the results are still the same after IO
with make_tempdir() as tmp_dir:
nlp.to_disk(tmp_dir)
nlp2 = util.load_model_from_path(tmp_dir)
doc2 = nlp2(test_text)
assert doc2[0].dep_ == "nsubj"
assert doc2[2].dep_ == "dobj"
assert doc2[3].dep_ == "punct"
assert doc2[0].head.i == 1
assert doc2[2].head.i == 1
assert doc2[3].head.i == 1
# Make sure that running pipe twice, or comparing to call, always amounts to the same predictions
texts = [
"Just a sentence.",
"Then one more sentence about London.",
"Here is another one.",
"I like London.",
]
batch_deps_1 = [doc.to_array([DEP]) for doc in nlp.pipe(texts)]
batch_deps_2 = [doc.to_array([DEP]) for doc in nlp.pipe(texts)]
no_batch_deps = [doc.to_array([DEP]) for doc in [nlp(text) for text in texts]]
assert_equal(batch_deps_1, batch_deps_2)
assert_equal(batch_deps_1, no_batch_deps)
def test_beam_parser_scores():
# Test that we can get confidence values out of the beam_parser pipe
beam_width = 16
beam_density = 0.0001
nlp = English()
config = {
"beam_width": beam_width,
"beam_density": beam_density,
}
parser = nlp.add_pipe("beam_parser", config=config)
train_examples = []
for text, annotations in CONFLICTING_DATA:
train_examples.append(Example.from_dict(nlp.make_doc(text), annotations))
for dep in annotations.get("deps", []):
parser.add_label(dep)
optimizer = nlp.initialize()
# update a bit with conflicting data
for i in range(10):
losses = {}
nlp.update(train_examples, sgd=optimizer, losses=losses)
# test the scores from the beam
test_text = "I like securities."
doc = nlp.make_doc(test_text)
docs = [doc]
beams = parser.predict(docs)
head_scores, label_scores = parser.scored_parses(beams)
for j in range(len(doc)):
for label in parser.labels:
label_score = label_scores[0][(j, label)]
assert 0 - eps <= label_score <= 1 + eps
for i in range(len(doc)):
head_score = head_scores[0][(j, i)]
assert 0 - eps <= head_score <= 1 + eps
def test_beam_overfitting_IO():
# Simple test to try and quickly overfit the Beam dependency parser
nlp = English()
beam_width = 16
beam_density = 0.0001
config = {
"beam_width": beam_width,
"beam_density": beam_density,
}
parser = nlp.add_pipe("beam_parser", config=config)
train_examples = []
for text, annotations in TRAIN_DATA:
train_examples.append(Example.from_dict(nlp.make_doc(text), annotations))
for dep in annotations.get("deps", []):
parser.add_label(dep)
optimizer = nlp.initialize()
# run overfitting
for i in range(150):
losses = {}
nlp.update(train_examples, sgd=optimizer, losses=losses)
assert losses["beam_parser"] < 0.0001
# test the scores from the beam
test_text = "I like securities."
docs = [nlp.make_doc(test_text)]
beams = parser.predict(docs)
head_scores, label_scores = parser.scored_parses(beams)
# we only processed one document
head_scores = head_scores[0]
label_scores = label_scores[0]
# test label annotations: 0=nsubj, 2=dobj, 3=punct
assert label_scores[(0, "nsubj")] == pytest.approx(1.0, abs=eps)
assert label_scores[(0, "dobj")] == pytest.approx(0.0, abs=eps)
assert label_scores[(0, "punct")] == pytest.approx(0.0, abs=eps)
assert label_scores[(2, "nsubj")] == pytest.approx(0.0, abs=eps)
assert label_scores[(2, "dobj")] == pytest.approx(1.0, abs=eps)
assert label_scores[(2, "punct")] == pytest.approx(0.0, abs=eps)
assert label_scores[(3, "nsubj")] == pytest.approx(0.0, abs=eps)
assert label_scores[(3, "dobj")] == pytest.approx(0.0, abs=eps)
assert label_scores[(3, "punct")] == pytest.approx(1.0, abs=eps)
# test head annotations: the root is token at index 1
assert head_scores[(0, 0)] == pytest.approx(0.0, abs=eps)
assert head_scores[(0, 1)] == pytest.approx(1.0, abs=eps)
assert head_scores[(0, 2)] == pytest.approx(0.0, abs=eps)
assert head_scores[(2, 0)] == pytest.approx(0.0, abs=eps)
assert head_scores[(2, 1)] == pytest.approx(1.0, abs=eps)
assert head_scores[(2, 2)] == pytest.approx(0.0, abs=eps)
assert head_scores[(3, 0)] == pytest.approx(0.0, abs=eps)
assert head_scores[(3, 1)] == pytest.approx(1.0, abs=eps)
assert head_scores[(3, 2)] == pytest.approx(0.0, abs=eps)
# Also test the results are still the same after IO
with make_tempdir() as tmp_dir:
nlp.to_disk(tmp_dir)
nlp2 = util.load_model_from_path(tmp_dir)
docs2 = [nlp2.make_doc(test_text)]
parser2 = nlp2.get_pipe("beam_parser")
beams2 = parser2.predict(docs2)
head_scores2, label_scores2 = parser2.scored_parses(beams2)
# we only processed one document
head_scores2 = head_scores2[0]
label_scores2 = label_scores2[0]
# check the results again
assert label_scores2[(0, "nsubj")] == pytest.approx(1.0, abs=eps)
assert label_scores2[(0, "dobj")] == pytest.approx(0.0, abs=eps)
assert label_scores2[(0, "punct")] == pytest.approx(0.0, abs=eps)
assert label_scores2[(2, "nsubj")] == pytest.approx(0.0, abs=eps)
assert label_scores2[(2, "dobj")] == pytest.approx(1.0, abs=eps)
assert label_scores2[(2, "punct")] == pytest.approx(0.0, abs=eps)
assert label_scores2[(3, "nsubj")] == pytest.approx(0.0, abs=eps)
assert label_scores2[(3, "dobj")] == pytest.approx(0.0, abs=eps)
assert label_scores2[(3, "punct")] == pytest.approx(1.0, abs=eps)
assert head_scores2[(0, 0)] == pytest.approx(0.0, abs=eps)
assert head_scores2[(0, 1)] == pytest.approx(1.0, abs=eps)
assert head_scores2[(0, 2)] == pytest.approx(0.0, abs=eps)
assert head_scores2[(2, 0)] == pytest.approx(0.0, abs=eps)
assert head_scores2[(2, 1)] == pytest.approx(1.0, abs=eps)
assert head_scores2[(2, 2)] == pytest.approx(0.0, abs=eps)
assert head_scores2[(3, 0)] == pytest.approx(0.0, abs=eps)
assert head_scores2[(3, 1)] == pytest.approx(1.0, abs=eps)
assert head_scores2[(3, 2)] == pytest.approx(0.0, abs=eps)
| mit | 2,471,457,338,668,305,400 | 36.381068 | 338 | 0.584507 | false |
jayrambhia/SimpleCV2 | SimpleCV/Features/Superpixels.py | 1 | 10945 | from SimpleCV.base import cv2, np, LazyProperty, copy
from SimpleCV.ImageClass import Image
from SimpleCV.Features import Blob, FeatureSet
from SimpleCV.Color import Color
class SLIC:
"""
**SUMMARY**
This class contains an implementation of the SLIC Superpixel
algorithm by Achanta et al. (PAMI'12, vol. 34, num. 11, pp. 2274-2282).
The C++ implementation from which this Python implementation is derived
can be found here https://github.com/PSMM/SLIC-Superpixels
**EXAMPLE**
>>> img = Image("lenna")
>>> nr_superpixels = 400
>>> step = int((img.width*img.height/nr_superpixels)**0.5)
>>> nc = 40
>>> slic = SLIC(img, step, nc)
>>> superpixels = slic.generateSuperPixels()
>>> superpixels.show()
"""
def __init__(self, img, step, nc):
self.image = img
self.img = img.getNumpy()
self.labimg = cv2.cvtColor(self.img, cv2.COLOR_BGR2LAB).astype(np.float64)
self.contourImage = img.copy()
self.contourImg = self.contourImage._numpy
self.width, self.height = img.size()
self.step = step
self.nc = nc
self.ns = step
self.FLT_MAX = 1000000
self.ITERATIONS = 10
def generateSuperPixels(self):
"""
Compute the over-segmentation based on the step-size and relative
weighting of the pixel and colour values.
"""
self._initData()
indnp = np.mgrid[0:self.height,0:self.width].swapaxes(0,2).swapaxes(0,1)
for i in range(self.ITERATIONS):
self.distances = self.FLT_MAX * np.ones(self.img.shape[:2])
for j in xrange(self.centers.shape[0]):
xlow, xhigh = int(self.centers[j][3] - self.step), int(self.centers[j][3] + self.step)
ylow, yhigh = int(self.centers[j][4] - self.step), int(self.centers[j][4] + self.step)
if xlow <= 0:
xlow = 0
if xhigh > self.width:
xhigh = self.width
if ylow <=0:
ylow = 0
if yhigh > self.height:
yhigh = self.height
cropimg = self.labimg[ylow : yhigh , xlow : xhigh].astype(np.int64)
colordiff = cropimg - self.labimg[self.centers[j][4], self.centers[j][3]]
colorDist = np.sqrt(np.sum(np.square(colordiff.astype(np.int64)), axis=2))
yy, xx = np.ogrid[ylow : yhigh, xlow : xhigh]
pixdist = ((yy-self.centers[j][4])**2 + (xx-self.centers[j][3])**2)**0.5
dist = ((colorDist/self.nc)**2 + (pixdist/self.ns)**2)**0.5
distanceCrop = self.distances[ylow : yhigh, xlow : xhigh]
idx = dist < distanceCrop
distanceCrop[idx] = dist[idx]
self.distances[ylow : yhigh, xlow : xhigh] = distanceCrop
self.clusters[ylow : yhigh, xlow : xhigh][idx] = j
for k in xrange(len(self.centers)):
idx = (self.clusters == k)
colornp = self.labimg[idx]
distnp = indnp[idx]
self.centers[k][0:3] = np.sum(colornp, axis=0)
sumy, sumx = np.sum(distnp, axis=0)
self.centers[k][3:] = sumx, sumy
self.centers[k] /= np.sum(idx)
self._createConnectivity()
superpixels = self._segmentSuperpixels()
return superpixels
def _initData(self):
"""
Initialize the cluster centers and initial values of the pixel-wise
cluster assignment and distance values.
"""
self.clusters = -1 * np.ones(self.img.shape[:2])
self.distances = self.FLT_MAX * np.ones(self.img.shape[:2])
centers = []
for i in xrange(self.step, self.width - self.step/2, self.step):
for j in xrange(self.step, self.height - self.step/2, self.step):
nc = self._findLocalMinimum(center=(i, j))
color = self.labimg[nc[1], nc[0]]
center = [color[0], color[1], color[2], nc[0], nc[1]]
centers.append(center)
self.center_counts = np.zeros(len(centers))
self.centers = np.array(centers)
def _findLocalMinimum(self, center):
"""
Find a local gradient minimum of a pixel in a 3x3 neighbourhood.
This method is called upon initialization of the cluster centers.
"""
min_grad = self.FLT_MAX
loc_min = center
for i in xrange(center[0] - 1, center[0] + 2):
for j in xrange(center[1] - 1, center[1] + 2):
c1 = self.labimg[j+1, i]
c2 = self.labimg[j, i+1]
c3 = self.labimg[j, i]
if ((c1[0] - c3[0])**2)**0.5 + ((c2[0] - c3[0])**2)**0.5 < min_grad:
min_grad = abs(c1[0] - c3[0]) + abs(c2[0] - c3[0])
loc_min = [i, j]
return loc_min
def _createConnectivity(self):
"""
Enforce connectivity of the superpixels. Needs to be optimized.
"""
label = 0
adjlabel = 0
lims = self.width * self.height / self.centers.shape[0]
dx4 = [-1, 0, 1, 0]
dy4 = [0, -1, 0, 1]
new_clusters = -1 * np.ones(self.img.shape[:2]).astype(np.int64)
elements = []
for i in xrange(self.width):
for j in xrange(self.height):
if new_clusters[j, i] == -1:
elements = []
elements.append((j, i))
for dx, dy in zip(dx4, dy4):
x = elements[0][1] + dx
y = elements[0][0] + dy
if (x>=0 and x < self.width and
y>=0 and y < self.height and
new_clusters[y, x] >=0):
adjlabel = new_clusters[y, x]
count = 1
c = 0
while c < count:
for dx, dy in zip(dx4, dy4):
x = elements[c][1] + dx
y = elements[c][0] + dy
if (x>=0 and x<self.width and y>=0 and y<self.height):
if new_clusters[y, x] == -1 and self.clusters[j, i] == self.clusters[y, x]:
elements.append((y, x))
new_clusters[y, x] = label
count+=1
c+=1
#print count
if (count <= lims >> 2):
for c in range(count):
new_clusters[elements[c]] = adjlabel
label-=1
label+=1
self.new_clusters = new_clusters
def _segmentSuperpixels(self):
img = self.new_clusters
limit = np.max(img)
superpixels = Superpixels()
for label in range(limit+1):
clusterimg = Image(255*(img == label).astype(np.uint8))
blobs = clusterimg.findBlobs()
if blobs is None:
continue
blob = blobs[-1]
blob.image = self.image & clusterimg
superpixels.append(blob)
return superpixels
class Superpixels(FeatureSet):
"""
** SUMMARY **
Superpixels is a class extended from FeatureSet which is a class
extended from Python's list. So, it has all the properties of a list
as well as all the properties of FeatureSet.
Each object of this list is a Blob corresponding to the superpixel.
** EXAMPLE **
>>> image = Image("lenna")
>>> sp = image.segmentSuperpixels(300, 20)
>>> sp.show()
>>> sp.centers()
"""
def __init__(self):
self._drawingImage = None
self.clusterMeanImage = None
pass
def append(self, blob):
list.append(self, blob)
#if len(self) != 1:
#self.image += blob.image.copy()
@LazyProperty
def image(self):
img = None
for sp in self:
if img is None:
img = sp.image
else:
img += sp.image
return img
def draw(self, color=Color.RED, width=2, alpha=255):
"""
**SUMMARY**
Draw all the superpixels, in the given color, to the appropriate layer
By default, this draws the superpixels boundary. If you
provide a width, an outline of the exterior and interior contours is drawn.
**PARAMETERS**
* *color* -The color to render the blob as a color tuple.
* *width* - The width of the drawn blob in pixels, if -1 then filled then the polygon is filled.
* *alpha* - The alpha value of the rendered blob 0=transparent 255=opaque.
**RETURNS**
Image with superpixels drawn on it.
**EXAMPLE**
>>> image = Image("lenna")
>>> sp = image.segmentSuperpixels(300, 20)
>>> sp.draw(color=(255, 0, 255), width=5, alpha=128).show()
"""
img = self.image.copy()
self._drawingImage = Image(self.image.getEmpty(3))
_mLayers = []
for sp in self:
sp.draw(color=color, width=width, alpha=alpha)
self._drawingImage += sp.image.copy()
for layer in sp.image._mLayers:
_mLayers.append(layer)
self._drawingImage._mLayers = copy(_mLayers)
return self._drawingImage.copy()
def show(self, color=Color.RED, width=2, alpha=255):
"""
**SUMMARY**
This function automatically draws the superpixels on the drawing image
and shows it.
** RETURNS **
None
** EXAMPLE **
>>> image = Image("lenna")
>>> sp = image.segmentSuperpixels(300, 20)
>>> sp.show(color=(255, 0, 255), width=5, alpha=128)
"""
if type(self._drawingImage) == type(None):
self.draw(color=color, width=width, alpha=alpha)
self._drawingImage.show()
def colorWithClusterMeans(self):
"""
**SUMMARY**
This function colors each superpixel with its mean color and
return an image.
**RETURNS**
Image with superpixles drawn in its mean color.
**EXAMPLE**
>>> image = Image("lenna")
>>> sp = image.segmentSuperpixels(300, 20)
>>> sp.colorWithClusterMeans().show()
"""
if type(self.clusterMeanImage) != type(None):
return self.clusterMeanImage
self.clusterMeanImage = Image(self.image.getEmpty(3))
_mLayers = []
for sp in self:
color = tuple(reversed(sp.meanColor()))
sp.draw(color=color, width=-1)
self.clusterMeanImage += sp.image.copy()
for layer in sp.image._mLayers:
_mLayers.append(layer)
self.clusterMeanImage._mLayers = copy(_mLayers)
return self.clusterMeanImage
| bsd-3-clause | -863,911,276,328,462,000 | 35.605351 | 104 | 0.522979 | false |
tectronics/faint-graphics-editor | python/generate/set_and_get.py | 1 | 14757 | # Copyright 2012 Lukas Kemmer
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
import sys
"""Parameters for gencpp.py."""
SHORTHAND = 1
class CppEnum:
"""Contains the fields for creating a C++-enum for an
IntSetting."""
def __init__( self, name, entries ):
self.name = name
self.entries = entries
def get_name(self):
return self.name
class Int:
def __init__(self, cpp_name, py_name, pretty_name, min_value, max_value, doc_str):
self.cpp_name = cpp_name
self.py_name = py_name
self.pretty_name = pretty_name
self.min_value = min_value
self.max_value = max_value
self.doc_str = doc_str
self.cpp_type = "IntSetting"
def get_type(self): # For backwards compat. with gencpp.py
return "int"
class Float:
def __init__(self, cpp_name, py_name, pretty_name, min_value, max_value, doc_str):
self.cpp_name = cpp_name
self.py_name = py_name
self.pretty_name = pretty_name
self.min_value = min_value
self.max_value = max_value
self.doc_str = doc_str
self.cpp_type = "FloatSetting"
def get_type(self): # For backwards compat. with gencpp.py
return "float"
class StringToInt:
def __init__(self, cpp_name, py_name, pretty_name, py_to_cpp_map, doc_str, cpp_enum):
self.cpp_name = cpp_name
self.py_name = py_name
self.pretty_name = pretty_name
self.py_to_cpp_map = py_to_cpp_map
self.doc_str = doc_str
self.cpp_enum = cpp_enum
self.cpp_type = "EnumSetting<%s>" % self.cpp_enum.name
def get_type(self): # For backwards compat. with gencpp.py
return "stringtoint"
class Color:
def __init__(self, cpp_name, py_name, pretty_name, doc_str):
self.cpp_name = cpp_name
self.py_name = py_name
self.pretty_name = pretty_name
self.doc_str = doc_str
self.cpp_type = "ColorSetting"
def get_type(self): # For backwards compat. with gencpp.py
return "color"
class Bool:
def __init__(self, cpp_name, py_name, pretty_name, doc_str):
self.cpp_name = cpp_name
self.py_name = py_name
self.pretty_name = pretty_name
self.doc_str = doc_str
self.cpp_type = "BoolSetting"
def get_type(self): # For backwards compat. with gencpp.py
return "bool"
class String:
def __init__(self, cpp_name, py_name, pretty_name, doc_str):
self.cpp_name = cpp_name
self.py_name = py_name
self.pretty_name = pretty_name
self.doc_str = doc_str
self.cpp_type = "StringSetting"
def get_type(self): # For backwards compat. with gencpp.py
return "string"
setters_and_getters = {
"ts_AlignedResize" : Bool(
cpp_name="ts_AlignedResize",
py_name="alignedresize",
pretty_name="Aligned Resize",
doc_str="Axis-aligned scaling?"),
"ts_AlphaBlending" : Bool(
cpp_name="ts_AlphaBlending",
py_name="alphablending",
pretty_name="Alpha Blending",
doc_str="Alpha blending?"),
"ts_AntiAlias" : Bool(
cpp_name="ts_AntiAlias",
py_name="antialias",
pretty_name="Anti Aliasing",
doc_str="Anti-aliasing?"),
"ts_BackgroundStyle" : StringToInt(
cpp_name="ts_BackgroundStyle",
py_name="bgstyle",
pretty_name="Background Style",
py_to_cpp_map={"masked" : "BackgroundStyle::MASKED",
"m" : ("BackgroundStyle::MASKED", SHORTHAND),
"opaque" : "BackgroundStyle::SOLID",
"o" : ("BackgroundStyle::SOLID", SHORTHAND) },
doc_str=("Background style:\\n"
"m: masked background, o: opaque background."),
cpp_enum=CppEnum("BackgroundStyle", entries=(
"MASKED",
"SOLID"))),
"ts_Bg" : Color(
"ts_Bg",
"bg",
"Background Color. Can be a color tuple, a gradient or a pattern.",
"Background color."),
"ts_BoundedText" : Bool(
cpp_name="ts_BoundedText",
py_name="bounded",
pretty_name="Bounded Text",
doc_str="True if the text is bounded by a rectangle"),
"ts_BrushShape" : StringToInt(
cpp_name="ts_BrushShape",
py_name="brushshape",
pretty_name="Brush Shape",
py_to_cpp_map={"square" : "BrushShape::SQUARE",
"circle" : "BrushShape::CIRCLE",
"experimental" : "BrushShape::EXPERIMENTAL"},
doc_str="Brush shape setting (square or circle)",
cpp_enum=CppEnum("BrushShape", entries=(
"SQUARE",
"CIRCLE",
"EXPERIMENTAL"))),
"ts_BrushSize" : Int(
cpp_name="ts_BrushSize",
py_name="brushsize",
pretty_name="Brush Size",
min_value=1, max_value=255,
doc_str="Brush size (1-255)"),
"ts_ClosedPath" : Bool(
cpp_name="ts_ClosedPath",
py_name="closedpath",
pretty_name="Closed Path",
doc_str="Closed path?"),
"ts_EditPoints" : Bool(
cpp_name="ts_EditPoints",
py_name="editpoints",
pretty_name="Edit Points",
doc_str="Point-editing?"),
"ts_Fg" : Color(
"ts_Fg",
"fg",
"Foreground Color. Can be a color tuple, a gradient or a pattern.",
"Foreground color."),
"ts_FillStyle" : StringToInt(
cpp_name="ts_FillStyle",
py_name="fillstyle",
pretty_name="Fill Style",
py_to_cpp_map={"border" : "FillStyle::BORDER",
"b" : ("FillStyle::BORDER", SHORTHAND),
"fill" : "FillStyle::FILL",
"f" : ("FillStyle::FILL", SHORTHAND),
"border+fill" : "FillStyle::BORDER_AND_FILL",
"bf" : ("FillStyle::BORDER_AND_FILL", SHORTHAND),
"none" : "FillStyle::NONE" },
doc_str=("Fill style\\nb: border, f: fill, bf: border and fill"
"- otherwise none."),
cpp_enum=CppEnum("FillStyle", entries=(
"BORDER",
"BORDER_AND_FILL",
"FILL",
"NONE"))),
# Fixme: Using int for now, This should use a special setting type, and a
# Python class for filters.
"ts_Filter" : Int(
cpp_name="ts_Filter",
py_name="filter",
pretty_name="Filter",
min_value=0, max_value=5,
doc_str="Filter (0-5)"),
"ts_FontBold" : Bool(
cpp_name="ts_FontBold",
py_name="fontbold",
pretty_name="Bold Font",
doc_str="Bold font?" ),
"ts_FontFace" : String(
cpp_name="ts_FontFace",
py_name="font",
pretty_name="Font Face",
doc_str="Font face string."),
"ts_FontItalic" : Bool(
cpp_name="ts_FontItalic",
py_name="fontitalic",
pretty_name="Italic Font",
doc_str="Italic font?" ),
"ts_FontSize" : Int(
cpp_name="ts_FontSize",
py_name="fontsize",
pretty_name="Font Size",
min_value=1,
max_value=999,
doc_str="Font size (1-999)."),
"ts_HorizontalAlign" : StringToInt(
cpp_name="ts_HorizontalAlign",
py_name="halign",
pretty_name="Horizontal text alignment",
py_to_cpp_map={"left" : "HorizontalAlign::LEFT",
"right" : "HorizontalAlign::RIGHT",
"center" : ("HorizontalAlign::CENTER")},
doc_str="Horizontal text alignment, left, right or center",
cpp_enum=CppEnum("HorizontalAlign",
entries=("LEFT", "RIGHT", "CENTER"))),
"ts_LayerStyle" : StringToInt(
cpp_name="ts_LayerStyle",
py_name="layerstyle",
pretty_name="Layer Style",
py_to_cpp_map={"raster" : "Layer::RASTER",
"object" : "Layer::OBJECT",
"r" : ("Layer::RASTER", SHORTHAND),
"o" : ("Layer::OBJECT", SHORTHAND)},
doc_str="Layer choice\\nr: raster, o: object",
cpp_enum=CppEnum("Layer", entries=("RASTER", "OBJECT"))),
"ts_LineArrowhead" : StringToInt(
cpp_name="ts_LineArrowhead",
py_name="arrow",
pretty_name="Arrowhead",
py_to_cpp_map={
"none" : "LineArrowhead::NONE",
"front" : "LineArrowhead::FRONT",
"back" : "LineArrowhead::BACK",
"both" : "LineArrowhead::BOTH",
"f" : ("LineArrowhead::FRONT", SHORTHAND),
"b" : ("LineArrowhead::BACK", SHORTHAND),
"fb" : ("LineArrowhead::BOTH", SHORTHAND)},
doc_str="Arrowhead setting.\\nf: front, b: back, fb: both",
cpp_enum=CppEnum("LineArrowhead", entries=(
"NONE",
"FRONT",
"BACK",
"BOTH"))),
"ts_LineCap" : StringToInt(
cpp_name="ts_LineCap",
py_name="cap",
pretty_name="Line Cap",
py_to_cpp_map={"round" : "LineCap::ROUND",
"r" : ("LineCap::ROUND", SHORTHAND),
"flat": "LineCap::BUTT",
"f": ("LineCap::BUTT", SHORTHAND)},
doc_str="Line cap setting (line ends).\\nr:round, f: flat",
cpp_enum=CppEnum("LineCap", entries=(
"BUTT",
"ROUND"))),
"ts_LineJoin" : StringToInt(
cpp_name="ts_LineJoin",
py_name="join",
pretty_name="Line Join",
py_to_cpp_map={"round" : "LineJoin::ROUND",
"bevel": "LineJoin::BEVEL",
"miter": "LineJoin::MITER"},
doc_str=("Line join setting (line connections).\\n"
"('round', 'bevel' or 'miter')"),
cpp_enum=CppEnum("LineJoin", entries=(
"MITER",
"BEVEL",
"ROUND"))),
"ts_LineStyle" : StringToInt(
cpp_name="ts_LineStyle",
py_name="linestyle",
pretty_name="Line Style",
py_to_cpp_map={"solid" : "LineStyle::SOLID",
"s" : ("LineStyle::SOLID", SHORTHAND),
"long_dash" : "LineStyle::LONG_DASH",
"ld" : ("LineStyle::LONG_DASH", SHORTHAND)},
doc_str="Line style\\ns: solid, ld: long-dash",
cpp_enum=CppEnum("LineStyle", entries=(
"SOLID",
"LONG_DASH"))),
"ts_LineWidth" : Float(
cpp_name="ts_LineWidth",
py_name="linewidth",
pretty_name="Line Width",
min_value=0,
max_value=255,
doc_str="Line width(0,255)"),
"ts_ParseExpressions" : Bool(
cpp_name="ts_ParseExpressions",
py_name="parsing",
pretty_name="Parse Expressions",
doc_str="True if the text should be parsed for expressions."),
"ts_PolyLine" : Bool(
cpp_name="ts_PolyLine",
py_name="polyline",
pretty_name="Poly-Lines",
doc_str="Create poly-lines?"),
"ts_PointType" : StringToInt(
cpp_name="ts_PointType",
py_name="pointtype",
pretty_name="Point Type",
py_to_cpp_map={"line" : "PointType::LINE",
"curve" : "PointType::CURVE"},
doc_str='The type used for new points for the Path tool: line or curve.',
cpp_enum=CppEnum("PointType", entries=("LINE", "CURVE"))),
"ts_RadiusX" : Float(
cpp_name="ts_RadiusX",
py_name="rx",
pretty_name="x-radius",
min_value=0,
max_value=sys.float_info.max,
doc_str="The width of rounded rectangle arcs"),
"ts_RadiusY" : Float(
cpp_name="ts_RadiusY",
py_name="ry",
pretty_name="y-radius",
min_value=0,
max_value=sys.float_info.max,
doc_str="The height of rounded rectangle arcs"),
"ts_SwapColors" : Bool(
cpp_name="ts_SwapColors",
py_name="swapcolors",
pretty_name="Swap Colors",
doc_str="Swap foreground and background colors?" ),
"ts_TapeStyle" : StringToInt(
cpp_name="ts_TapeStyle",
py_name="tapestyle",
pretty_name="Tape measure tool style",
py_to_cpp_map={"line" : "TapeMeasureStyle::LINE",
"triangle" : "TapeMeasureStyle::TRIANGLE"},
doc_str="Tape measures (one length or three).",
cpp_enum=CppEnum("TapeMeasureStyle",
entries=("LINE", "TRIANGLE"))),
"ts_TextRenderStyle" : StringToInt(
cpp_name="ts_TextRenderStyle",
py_name="text_render_style",
pretty_name="Text Render Style",
py_to_cpp_map={
"cairopath" : "TextRenderStyle::CAIRO_PATH",
"pangolayout" : "TextRenderStyle::PANGO_LAYOUT",
"pangolayoutfallback" : "TextRenderStyle::PANGO_LAYOUT_UNLESS_ROTATED"},
doc_str=("Text rendering style, either convert to path (cairopath) or "
"use pango rendering (pangolayout). "
"To use pango-layout with unrotated text only, use "
"pangolayoutfallback."),
cpp_enum=CppEnum("TextRenderStyle", entries=(
"CAIRO_PATH",
"PANGO_LAYOUT",
"PANGO_LAYOUT_UNLESS_ROTATED"))),
"ts_Unit" : String(
cpp_name="ts_Unit",
py_name="unit",
pretty_name="Unit",
doc_str="Measurement unit."),
"ts_VerticalAlign" : StringToInt(
cpp_name="ts_VerticalAlign",
py_name="valign",
pretty_name="Vertical text alignment",
py_to_cpp_map={"top" : "VerticalAlign::TOP",
"middle" : "VerticalAlign::MIDDLE",
"bottom" : ("VerticalAlign::BOTTOM")},
doc_str="Vertical text alignment, top, middle or bottom",
cpp_enum=CppEnum("VerticalAlign", entries=("TOP", "MIDDLE", "BOTTOM"))),
}
| apache-2.0 | -5,173,521,413,677,159,000 | 33.135714 | 89 | 0.530257 | false |
aqavi-paracha/coinsbazar | qa/pull-tester/pull-tester.py | 1 | 8761 | #!/usr/bin/python
import json
from urllib import urlopen
import requests
import getpass
from string import Template
import sys
import os
import subprocess
class RunError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def run(command, **kwargs):
fail_hard = kwargs.pop("fail_hard", True)
# output to /dev/null by default:
kwargs.setdefault("stdout", open('/dev/null', 'w'))
kwargs.setdefault("stderr", open('/dev/null', 'w'))
command = Template(command).substitute(os.environ)
if "TRACE" in os.environ:
if 'cwd' in kwargs:
print("[cwd=%s] %s"%(kwargs['cwd'], command))
else: print(command)
try:
process = subprocess.Popen(command.split(' '), **kwargs)
process.wait()
except KeyboardInterrupt:
process.terminate()
raise
if process.returncode != 0 and fail_hard:
raise RunError("Failed: "+command)
return process.returncode
def checkout_pull(clone_url, commit, out):
# Init
build_dir=os.environ["BUILD_DIR"]
run("umount ${CHROOT_COPY}/proc", fail_hard=False)
run("rsync --delete -apv ${CHROOT_MASTER}/ ${CHROOT_COPY}")
run("rm -rf ${CHROOT_COPY}${SCRIPTS_DIR}")
run("cp -a ${SCRIPTS_DIR} ${CHROOT_COPY}${SCRIPTS_DIR}")
# Merge onto upstream/master
run("rm -rf ${BUILD_DIR}")
run("mkdir -p ${BUILD_DIR}")
run("git clone ${CLONE_URL} ${BUILD_DIR}")
run("git remote add pull "+clone_url, cwd=build_dir, stdout=out, stderr=out)
run("git fetch pull", cwd=build_dir, stdout=out, stderr=out)
if run("git merge "+ commit, fail_hard=False, cwd=build_dir, stdout=out, stderr=out) != 0:
return False
run("chown -R ${BUILD_USER}:${BUILD_GROUP} ${BUILD_DIR}", stdout=out, stderr=out)
run("mount --bind /proc ${CHROOT_COPY}/proc")
return True
def commentOn(commentUrl, success, inMerge, needTests, linkUrl):
common_message = """
This test script verifies pulls every time they are updated. It, however, dies sometimes and fails to test properly. If you are waiting on a test, please check timestamps to verify that the test.log is moving at http://jenkins.bluematt.me/pull-tester/current/
Contact BlueMatt on freenode if something looks broken."""
# Remove old CoinsBazarPullTester comments (I'm being lazy and not paginating here)
recentcomments = requests.get(commentUrl+"?sort=created&direction=desc",
auth=(os.environ['GITHUB_USER'], os.environ["GITHUB_AUTH_TOKEN"])).json
for comment in recentcomments:
if comment["user"]["login"] == os.environ["GITHUB_USER"] and common_message in comment["body"]:
requests.delete(comment["url"],
auth=(os.environ['GITHUB_USER'], os.environ["GITHUB_AUTH_TOKEN"]))
if success == True:
if needTests:
message = "Automatic sanity-testing: PLEASE ADD TEST-CASES, though technically passed. See " + linkUrl + " for binaries and test log."
else:
message = "Automatic sanity-testing: PASSED, see " + linkUrl + " for binaries and test log."
post_data = { "body" : message + common_message}
elif inMerge:
post_data = { "body" : "Automatic sanity-testing: FAILED MERGE, see " + linkUrl + " for test log." + """
This pull does not merge cleanly onto current master""" + common_message}
else:
post_data = { "body" : "Automatic sanity-testing: FAILED BUILD/TEST, see " + linkUrl + " for binaries and test log." + """
This could happen for one of several reasons:
1. It chanages changes build scripts in a way that made them incompatible with the automated testing scripts (please tweak those patches in qa/pull-tester)
2. It adds/modifies tests which test network rules (thanks for doing that), which conflicts with a patch applied at test time
3. It does not build on either Linux i386 or Win32 (via MinGW cross compile)
4. The test suite fails on either Linux i386 or Win32
5. The block test-cases failed (lookup the first bNN identifier which failed in https://github.com/TheBlueMatt/test-scripts/blob/master/FullBlockTestGenerator.java)
If you believe this to be in error, please ping BlueMatt on freenode or TheBlueMatt here.
""" + common_message}
resp = requests.post(commentUrl, json.dumps(post_data), auth=(os.environ['GITHUB_USER'], os.environ["GITHUB_AUTH_TOKEN"]))
def testpull(number, comment_url, clone_url, commit):
print("Testing pull %d: %s : %s"%(number, clone_url,commit))
dir = os.environ["RESULTS_DIR"] + "/" + commit + "/"
print(" ouput to %s"%dir)
if os.path.exists(dir):
os.system("rm -r " + dir)
os.makedirs(dir)
currentdir = os.environ["RESULTS_DIR"] + "/current"
os.system("rm -r "+currentdir)
os.system("ln -s " + dir + " " + currentdir)
out = open(dir + "test.log", 'w+')
resultsurl = os.environ["RESULTS_URL"] + commit
checkedout = checkout_pull(clone_url, commit, out)
if checkedout != True:
print("Failed to test pull - sending comment to: " + comment_url)
commentOn(comment_url, False, True, False, resultsurl)
open(os.environ["TESTED_DB"], "a").write(commit + "\n")
return
run("rm -rf ${CHROOT_COPY}/${OUT_DIR}", fail_hard=False);
run("mkdir -p ${CHROOT_COPY}/${OUT_DIR}", fail_hard=False);
run("chown -R ${BUILD_USER}:${BUILD_GROUP} ${CHROOT_COPY}/${OUT_DIR}", fail_hard=False)
script = os.environ["BUILD_PATH"]+"/qa/pull-tester/pull-tester.sh"
script += " ${BUILD_PATH} ${MINGW_DEPS_DIR} ${SCRIPTS_DIR}/CoinsBazardComparisonTool_jar/CoinsBazardComparisonTool.jar 0 6 ${OUT_DIR}"
returncode = run("chroot ${CHROOT_COPY} sudo -u ${BUILD_USER} -H timeout ${TEST_TIMEOUT} "+script,
fail_hard=False, stdout=out, stderr=out)
run("mv ${CHROOT_COPY}/${OUT_DIR} " + dir)
run("mv ${BUILD_DIR} " + dir)
if returncode == 42:
print("Successfully tested pull (needs tests) - sending comment to: " + comment_url)
commentOn(comment_url, True, False, True, resultsurl)
elif returncode != 0:
print("Failed to test pull - sending comment to: " + comment_url)
commentOn(comment_url, False, False, False, resultsurl)
else:
print("Successfully tested pull - sending comment to: " + comment_url)
commentOn(comment_url, True, False, False, resultsurl)
open(os.environ["TESTED_DB"], "a").write(commit + "\n")
def environ_default(setting, value):
if not setting in os.environ:
os.environ[setting] = value
if getpass.getuser() != "root":
print("Run me as root!")
sys.exit(1)
if "GITHUB_USER" not in os.environ or "GITHUB_AUTH_TOKEN" not in os.environ:
print("GITHUB_USER and/or GITHUB_AUTH_TOKEN environment variables not set")
sys.exit(1)
environ_default("CLONE_URL", "https://github.com/bitcoin/bitcoin.git")
environ_default("MINGW_DEPS_DIR", "/mnt/w32deps")
environ_default("SCRIPTS_DIR", "/mnt/test-scripts")
environ_default("CHROOT_COPY", "/mnt/chroot-tmp")
environ_default("CHROOT_MASTER", "/mnt/chroot")
environ_default("OUT_DIR", "/mnt/out")
environ_default("BUILD_PATH", "/mnt/bitcoin")
os.environ["BUILD_DIR"] = os.environ["CHROOT_COPY"] + os.environ["BUILD_PATH"]
environ_default("RESULTS_DIR", "/mnt/www/pull-tester")
environ_default("RESULTS_URL", "http://jenkins.bluematt.me/pull-tester/")
environ_default("GITHUB_REPO", "bitcoin/bitcoin")
environ_default("TESTED_DB", "/mnt/commits-tested.txt")
environ_default("BUILD_USER", "matt")
environ_default("BUILD_GROUP", "matt")
environ_default("TEST_TIMEOUT", str(60*60*2))
print("Optional usage: pull-tester.py 2112")
f = open(os.environ["TESTED_DB"])
tested = set( line.rstrip() for line in f.readlines() )
f.close()
if len(sys.argv) > 1:
pull = requests.get("https://api.github.com/repos/"+os.environ["GITHUB_REPO"]+"/pulls/"+sys.argv[1],
auth=(os.environ['GITHUB_USER'], os.environ["GITHUB_AUTH_TOKEN"])).json
testpull(pull["number"], pull["_links"]["comments"]["href"],
pull["head"]["repo"]["clone_url"], pull["head"]["sha"])
else:
for page in range(1,100):
result = requests.get("https://api.github.com/repos/"+os.environ["GITHUB_REPO"]+"/pulls?state=open&page=%d"%(page,),
auth=(os.environ['GITHUB_USER'], os.environ["GITHUB_AUTH_TOKEN"])).json
if len(result) == 0: break;
for pull in result:
if pull["head"]["sha"] in tested:
print("Pull %d already tested"%(pull["number"],))
continue
testpull(pull["number"], pull["_links"]["comments"]["href"],
pull["head"]["repo"]["clone_url"], pull["head"]["sha"])
| mit | -2,317,701,358,058,249,700 | 45.354497 | 260 | 0.647415 | false |
Merlin71/Aria | schedule.py | 1 | 11148 | ## @file
## @brief schedule implementation
import ConfigParser
import Queue
import logging
import threading
import uuid
from multiprocessing import Pool
from time import sleep, time
from pubsub import pub
## @class Schedule
## @brief Schedule for periodic task
## @details Allow creation start periodic tasks
## @warning In current version all function start in service thread, so long computation may effect service work
## @note In current version it impossible to update periodic task. To do this need remove task and insert new one
## @par This class subscribes on events :
## @li ScheduleAdd - insert task into schedule. Parameters : task=ScheduleTask object
## @li ScheduleRemove - remove task from schedule. Parameters : task=ScheduleTask object
## @see ScheduleTask
##
## @code{.py}
## from pubsub import pub
## from schedule import ScheduleTask
## import ConfigParser
## config = ConfigParser.SafeConfigParser(allow_no_value=True)
## config.read('Aria.cfg')
## schedule_service = Schedule(config)
## some_task = ScheduleTask(True,10,some_func,'SomeParam')
## pub.sendMessage('ScheduleAdd', task=some_task)
## #some work
## pub.sendMessage('ScheduleRemove', task=some_task)
## @endcode
class Schedule(threading.Thread):
## @brief Create schedule instance
## @details Create and initiate schedule. Start separate thread for service. Subscribe from event
## @param[in] config ConfigParser object with configuration file loaded into
def __init__(self, config):
super(Schedule, self).__init__()
self._config = config
try:
self._logger = logging.getLogger('schedule')
except ConfigParser.NoSectionError as e:
print 'Fatal error - fail to set logger.Error: %s ' % e.message
raise ConfigParser.NoSectionError
self._logger.debug('Initializing schedule')
self._shutdown_even = threading.Event()
self._main_mutex = threading.Lock()
try:
queue_size = self._config.getint('Schedule', 'MsgQueueSize')
except ConfigParser.NoSectionError as e:
self._logger.warning('Fail to read schedule settings. Using default. Error : %s' % e)
queue_size = 5
self._main_queue = Queue.Queue(queue_size)
try:
pub.subscribe(self._add_task, 'ScheduleAdd')
except:
self._logger.error('Subscription failed')
self._logger.debug('Subscribing to event: ScheduleRemove')
try:
pub.subscribe(self._del_task, 'ScheduleRemove')
except:
self._logger.error('Subscription failed')
self._thread = None # Init variable
self._logger.info('Schedule ready')
## @brief Service main function
## @details Run in separate thread
def _service_run(self):
try:
self._logger = logging.getLogger('schedule')
except ConfigParser.NoSectionError as e:
print 'Fatal error - fail to set logger.Error: %s ' % e.message
raise ConfigParser.NoSectionError
self._logger.debug('Schedule logger started')
try:
self._pool_size = self._config.getint('Schedule', 'PoolSize')
except ConfigParser.NoSectionError as e:
self._logger.warning('Error while loading configuration. Using default. Error: %s' % e)
self._pool_size = 3
try:
self._min_resolution = self._config.getint('Schedule', 'MinRes')
except ConfigParser.NoSectionError as e:
self._logger.warning('Error while loading configuration. Using default. Error: %s')
self._min_resolution = 1
try:
self._logger.debug('Starting Pool')
self._pool = Pool(processes=self._pool_size) # Start a worker processes.
except:
self._logger.warning('Pool start failed')
return
self._task_list = {}
_delete_list = []
self._logger.info('Schedule running')
while not self._shutdown_even.is_set():
self._main_mutex.acquire(True)
for _task_key, _task_obj in self._task_list.iteritems():
if int(time()) > _task_obj.next_run():
self._logger.debug("Starting task %s" % _task_key)
_task_obj.start(self._pool)
if _task_obj.is_periodic() is False:
_delete_list.append(_task_key)
# Add tasks
while not self._main_queue.empty():
try:
_task = self._main_queue.get(False)
if _task.get_callback() is None:
self._logger.info('Removing task %s from schedule' % _task.get_uuid())
_delete_list.append(_task.get_uuid())
else:
self._logger.info('Adding task %s to schedule' % _task.get_uuid())
self._task_list[_task.get_uuid()] = _task
except Queue.Empty:
self._logger.warning('Someone pulled data from queue. Maybe multiply schedule threads running')
break
# Remove task
for _task in _delete_list:
self._logger.debug('Removing task %s' % _task)
self._task_list.pop(_task, None)
_delete_list = []
self._main_mutex.release()
sleep(self._min_resolution)
self._pool.close()
## @brief Insert task into schedule queue
## details Lock service tread and insert data into queue
## @param[in] task ScheduleTask object
def _add_task(self, task):
self._main_mutex.acquire(True)
self._main_queue.put(task, True)
self._logger.info('Schedule task added with uuid : %s' % task.get_uuid())
self._main_mutex.release()
## @brief Remove task from schedule queue
## details Lock service tread and insert data into queue. In service thread task will be removed from task list
## @param[in] task ScheduleTask object
## @note To distinct between new task to insert and remove one that should be removed,callback changed to None
def _del_task(self, task):
self._main_mutex.acquire(True)
task._remove()
self._main_queue.put(task, True)
self._logger.info('Schedule task removed with uuid : %s' % task.get_uuid())
self._main_mutex.release()
## @brief Start schedule thread
## details Start execution of schedule thread
## @note Should can once after creation or stop
def start_schedule(self):
self._logger.info('Starting schedule thread')
try:
self._thread = threading.Thread(target=self._service_run)
self._thread.start()
except threading.ThreadError as e:
self._logger.warning('Fail to start schedule thread with error: %s' % e)
self._logger.debug('Subscribing to event: ScheduleAdd')
## @brief Stop schedule thread
## details Stop execution of schedule thread
def stop_schedule(self):
self._logger.info('Stopping schedule thread')
self._main_mutex.acquire(True)
self._shutdown_even.set()
self._thread.join(self._min_resolution * 5)
if self._thread.isAlive():
self._logger.warning('Schedule thread still running')
else:
self._logger.info('Schedule thread stopped')
## @class ScheduleTask
## @brief Schedule task
## @details Create task to be send into schedule to periodic execution
## @warning In current version all function start in service thread, so long computation may effect service work
## @note In current version it impossible to update periodic task. To do this need remove task and insert new one
## @par To add/remove task into/from schedule use 'pubsub' event :
## @li ScheduleAdd - insert task into schedule. Parameters : task=ScheduleTask object
## @li ScheduleRemove - remove task from schedule. Parameters : task=ScheduleTask object
## @see Schedule
##
## @code{.py}
## from pubsub import pub
## from schedule import ScheduleTask
## import ConfigParser
## config = ConfigParser.SafeConfigParser(allow_no_value=True)
## config.read('Aria.cfg')
## schedule_service = Schedule(config)
## some_task = ScheduleTask(True,10,some_func,'SomeParam')
## pub.sendMessage('ScheduleAdd', task=some_task)
## #some work
## pub.sendMessage('ScheduleRemove', task=some_task)
## @endcode
class ScheduleTask:
## @brief Task for schedule
## @details Create task to be send into schedule to periodic execution
## @param[in] is_periodic If set to True then task will be repeated after 'period' time
## @param[in] period If task periodic - repeat time in seconds , else execution time in epoch format
## @param[in] callback callback function
## @param[in] kwargs Parameters to callback function
## @warning In current version all function start in service thread, so long computation may effect service work
## @note In current version it impossible to update periodic task. To do this need remove task and insert new one
## @note Execution of task can be with some time lag.
def __init__(self, is_periodic, period, callback, kwargs):
self._uuid = uuid.uuid4()
self._is_periodic = is_periodic
if self._is_periodic:
self._interval = period
self._next_run = int(time()) + period
else:
self._interval = 0
self._next_run = period
self._callback = callback
self._kwargs = kwargs
self._is_running = False
## @brief Mark task to remove
def _remove(self):
self._callback = None
## @brief Return callback function
## @return Callback function
def get_callback(self):
return self._callback
## @brief Return unique id of task
## @details Id can be used to distinct between tasks
## @return Unique id of task
def get_uuid(self):
return self._uuid
## @brief Return if task periodic
## @return If task pereodic
def is_periodic(self):
return self._is_periodic
## @brief Return next execution time
## @return Next execution time in epoch format
def next_run(self):
return self._next_run
## @brief Start execution of callback function
## @param[in] worker_pool Process pool for independent execution
## @note In current version not used independent execution
def start(self, worker_pool):
self._is_running = True
if self._is_periodic:
self._next_run = int(time()) + self._interval
try:
if self._is_periodic:
self._callback(self._kwargs)
# worker_pool.apply_async(self._callback, args=(), kwds=self._kwargs, callback=self._task_callback)
else:
worker_pool.apply_async(self._callback, args=(), kwds=self._kwargs)
except:
pass
## @brief Callback function for pool process
## @details This function called when task callback function ended
def _task_callback(self):
self._is_running = False
print "Done"
| gpl-3.0 | -5,195,212,340,877,595,000 | 40.288889 | 117 | 0.63267 | false |
unioslo/cerebrum | Cerebrum/modules/hostpolicy/HostPolicyConstants.py | 1 | 3539 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2011 University of Oslo, Norway
#
# This file is part of Cerebrum.
#
# Cerebrum is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Cerebrum is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cerebrum; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
# $Id$
"""
The Constants defined for the HostPolicy module, depending on the DNS module.
"""
from Cerebrum import Constants as CereConst
__version__ = "$Revision$"
# $URL$
# $Source$
class _PolicyRelationshipCode(CereConst._CerebrumCode):
"""Mappings stored in the hostpolicy_relationship_code table"""
_lookup_table = '[:table schema=cerebrum name=hostpolicy_relationship_code]'
class Constants(CereConst.Constants):
entity_hostpolicy_atom = CereConst._EntityTypeCode(
'hostpolicy_atom',
'hostpolicy_atom - see table "cerebrum.hostpolicy_component" and friends.')
entity_hostpolicy_role = CereConst._EntityTypeCode(
'hostpolicy_role',
'hostpolicy_role - see table "cerebrum.hostpolicy_component" and friends.')
hostpolicy_component_namespace = CereConst._ValueDomainCode(
'hostpol_comp_ns',
'Domain for hostpolicy-components')
hostpolicy_mutually_exclusive = _PolicyRelationshipCode(
"hostpol_mutex",
"Source policy and target policy are mutually exclusive")
hostpolicy_contains = _PolicyRelationshipCode(
"hostpol_contains",
"Source policy contains target policy")
class CLConstants(CereConst.CLConstants):
# ChangeLog constants
hostpolicy_atom_create = CereConst._ChangeTypeCode(
'hostpolicy_atom', 'create', 'create atom %(subject)s')
hostpolicy_atom_mod = CereConst._ChangeTypeCode(
'hostpolicy_atom', 'modify', 'modify atom %(subject)s')
hostpolicy_atom_delete = CereConst._ChangeTypeCode(
'hostpolicy_atom', 'delete', 'delete atom %(subject)s')
hostpolicy_role_create = CereConst._ChangeTypeCode(
'hostpolicy_role', 'create', 'create role %(subject)s')
hostpolicy_role_mod = CereConst._ChangeTypeCode(
'hostpolicy_role', 'modify', 'modify role %(subject)s')
hostpolicy_role_delete = CereConst._ChangeTypeCode(
'hostpolicy_role', 'delete', 'delete role %(subject)s')
hostpolicy_relationship_add = CereConst._ChangeTypeCode(
'hostpolicy_relationship',
'add',
'add relationship %(subject)s -> %(dest)s')
# TODO: type is not given here
hostpolicy_relationship_remove = CereConst._ChangeTypeCode(
'hostpolicy_relationship',
'remove',
'remove relationship %(subject)s -> %(dest)s')
# TODO: type is not given here
hostpolicy_policy_add = CereConst._ChangeTypeCode(
'hostpolicy',
'add',
'add policy %(dest)s to host %(subject)s')
hostpolicy_policy_remove = CereConst._ChangeTypeCode(
'hostpolicy',
'remove',
'remove policy %(dest)s from host %(subject)s')
PolicyRelationshipCode = _PolicyRelationshipCode
| gpl-2.0 | 6,810,090,009,265,447,000 | 36.252632 | 83 | 0.695677 | false |
deapplegate/wtgpipeline | coadd_CR_removal.py | 1 | 128247 | #! /usr/bin/env python
from __future__ import division #3/2=1.5 and 3//2=1
#adam-does# makes REMS masks
#adam-use # use on clusters where we have wayyyy to many exposures in the lensing band to make masks by hand (i.e. RXJ2129)
import sys
#from import_tools import *
import imagetools_light as imagetools
import astropy
from astropy.io import ascii
from copy import deepcopy as cp
import os
import skimage
from skimage import measure
from skimage import morphology
from matplotlib.pyplot import *
import numpy
import scipy
from numpy import *
import time
ns=globals()
conn8=array([[1,1,1],[1,1,1],[1,1,1]])
conn4=array([[0,1,0],[1,1,1],[0,1,0]])
connS=array([[0,1,1,0],[1,1,1,1],[1,1,1,1],[0,1,1,0]],dtype=bool)
#START: LABEL FUNCTIONS
def plotlabels(ll,segments=None,slices=None,params=None,background=None):
'''plot stamps of all of the masks in the label list `ll`.
ll: the list of segmentation numbers you want to plot
segments: the segmetation map
slices: the slices for each segment in segmentation map
params: a list of properties of each segment (any segment property you want), this will show up in the title of each subplot
background: the image that the segmentation map was selected from
'''
try:
if segments is None:segments=BBCRseg
if slices is None: slices=BBCRslices
if params is None: params=ll
if background is None: background=image
patches=[]
for l in ll:
patches.append(imagetools.slice_expand(tuple(slices[l-1]),3))
fig=figure(figsize=(22,13.625))
Nlabels=len(ll)
if Nlabels<=4:
fig,axes = imagetools.AxesList(fig=fig,compact=.02,shape=(2,2))
textsize=14
elif Nlabels<=9:
fig,axes = imagetools.AxesList(fig=fig,compact=.02,shape=(3,3))
textsize=13
elif Nlabels<=16:
fig,axes = imagetools.AxesList(fig=fig,compact=.02,shape=(4,4))
textsize=12
elif Nlabels<=25:
fig,axes = imagetools.AxesList(fig=fig,compact=.02,shape=(5,5))
textsize=11
elif Nlabels<=6*7:
fig,axes = imagetools.AxesList(fig=fig,compact=.02,shape=(6,7))
textsize=10
elif Nlabels<=6*8:
fig,axes = imagetools.AxesList(fig=fig,compact=.02,shape=(6,8))
textsize=10
elif Nlabels<=7*8:
fig,axes = imagetools.AxesList(fig=fig,compact=.02,shape=(7,8))
textsize=9
elif Nlabels<=7*9:
fig,axes = imagetools.AxesList(fig=fig,compact=.02,shape=(7,9))
textsize=9
else:
fig,axes = imagetools.AxesList(fig=fig,compact=.02,shape=(8,10))
fig.subplots_adjust(top=.95)
textsize=8
if len(params)==Nlabels:
for ax,sl,title,l in zip(axes,patches,params,ll):
##spots=segments[sl]>0
spots=segments[sl]==l
yy,xx=nonzero(spots)
stamp=background[sl]
ax.imshow(stamp,interpolation='nearest',origin='lower left')
ax.scatter(xx,yy,marker='o',edgecolors='k',facecolors='None',label='points')
ax.set_title(str(title),size=10)
elif len(params)==len(slices):
for ax,sl,l in zip(axes,patches,ll):
title=params[l-1]
##spots=segments[sl]>0
spots=segments[sl]==l
yy,xx=nonzero(spots)
stamp=background[sl]
ax.imshow(stamp,interpolation='nearest',origin='lower left')
ax.scatter(xx,yy,marker='o',edgecolors='k',facecolors='None',label='points')
ax.set_title(str(title),size=10)
else:
raise Exception('gotta have len(params)==len(slices) or len(params)==len(ll)')
return fig
except:
ns.update(locals())
show()
raise
plotdir='/nfs/slac/g/ki/ki18/anja/SUBARU/eyes/CRNitschke_output/plot_SCIENCE_compare/'
from glob import glob
coadd_masterdir='/gpfs/slac/kipac/fs1/u/awright/SUBARU/RXJ2129/W-C-RC/SCIENCE/'
supa_coadds=glob(coadd_masterdir+'coadd_RXJ2129_SUPA01*/coadd.fits')
median_coadd=coadd_masterdir+'coadd_RXJ2129_all/coadd.fits'
medianfitsfl=astropy.io.fits.open(median_coadd)
median_image=medianfitsfl[0].data
median_header=medianfitsfl[0].header
medianfitsfl.close()
for supa in supa_coadds:
supa_fitsfl=astropy.io.fits.open(supa)
supa_image=supa_fitsfl[0].data
supa_header=supa_fitsfl[0].header
supa_fitsfl.close()
## terminal starts here
rms= supa_image.std()
supa_mask=supa_image>3*rms
supa_seg,supa_Nlabels=scipy.ndimage.label(supa_mask,conn4)
## get size of each segment
supa_regs=skimage.measure.regionprops(supa_seg)
supa_area=array([supa_regs[i].area for i in range(supa_Nlabels)])
## get slices
supa_slices=scipy.ndimage.find_objects(supa_seg)
## maybe something else will work better than a simple max_ratio
## but I'll just throw this in for now.
plot_ll=[]
plot_segments=supa_seg.copy()
plot_background=supa_image.copy()
max_ratios=[]
for i in range(1,supa_Nlabels+1):
# filter out small masks
if supa_area[i-1]<=3:
continue
stamp_slice=supa_slices[i-1]
## use slices to pick out stamps
stamp_supa=supa_image[stamp_slice]
stamp_med=median_image[stamp_slice]
## ratio in stamp-region
stamp_ratio=stamp_supa/stamp_med
stamp_mask=supa_seg[stamp_slice]==i
relevant_ratios=stamp_ratio[stamp_mask]
## here on down, you figure it out.
#relevant_ratios.max()
if i<=90:
plot_ll.append(i)
max_ratios.append(relevant_ratios.max())
fig=plotlabels(plot_ll,plot_segments,supa_slices,max_ratios,plot_background)
#fig.savefig('...')
show()
sys.exit()
hist(max_ratios,bins=linspace(0.1,100.0,200),log=True)
## some stuff to switch from mosaic (coadd.fits) to resampled-chip images (SUPA....resamp.fits):
py2ds9= lambda (x,y): (y+1,x)
ds92py= lambda (x,y): (y,x-1)
coords2ints= lambda position: tuple([int(round(x)) for x in position])
def mosaic_to_resamp_chip_map(mosaic_coadd_fl):
''' input the coadd fits image, and you'll get a map of where the different resampled chips fall on that grid'''
os.path.dirname(mosaic_coadd_fl)
mosaic_fitsfl=astropy.io.fits.open(mosaic_coadd_fl)
mosaic2chip_map=zeros(mosaic_fitsfl[0].data.shape,dtype=int)
mosaic_header=mosaic_fitsfl[0].header
mosaic_fitsfl.close()
resamp_wt_chips=[]
resamp_chip_slices=[]
for i in range(1,11):
#resamp_chips+=glob(os.path.dirname(mosaic_coadd_fl)+'/SUPA*_%sOCF[A-Z]*.sub.[A-Z]*.resamp.fits' % (i))
resamp_chip_fl=glob(os.path.dirname(mosaic_coadd_fl)+'/SUPA*_%sOCF[A-Z]*.sub.[A-Z]*.resamp.fits' % (i))[0]
resamp_chip_wt_fl=glob(os.path.dirname(mosaic_coadd_fl)+'/SUPA*_%sOCF[A-Z]*.sub.[A-Z]*.resamp.weight.fits' % (i))[0]
resamp_chip_fo=astropy.io.fits.open(resamp_chip_fl)
resamp_chip_image=resamp_chip_fo[0].data
resamp_chip_header=resamp_chip_fo[0].header
resamp_chip_fo.close()
pyCOMIN1=resamp_chip_header['COMIN1']-1 #have to switch to zero indexing
pyCOMIN2=resamp_chip_header['COMIN2']-1 #have to switch to zero indexing
resamp_chip_slice=(slice(pyCOMIN2,pyCOMIN2+resamp_chip_image.shape[0]),slice(pyCOMIN1,pyCOMIN1+resamp_chip_image.shape[1]))
resamp_chip_slices.append(resamp_chip_slice)
resamp_wt_chips.append(resamp_chip_wt_fl)
mosaic2chip_map[resamp_chip_slice]=i
#return mosaic2chip_map,(resamp_chip_slices,resamp_wt_chips)
return mosaic2chip_map
def mosaic_position_to_resamp_chip(position,mosmap,mosaic_coadd_fl):
'''
INPUTS
position: (x,y) position in the mosaic image (in coadd.fits). Note that x and y axes are defined by python (which is probably the reverse of the coordinates ds9 uses)
mosmap: output from mosaic_to_resamp_chip_map, i.e. mosmap=mosaic_to_resamp_chip_map(mosaic_coadd_fl)
RETURNS
gives you the chip number and position within the resampled chip corresponding to the input position in the coadd mosaic'''
pair=tuple([int(round(x)) for x in position])
chipnum=mosmap[pair]
resamp_chip_fl=glob(os.path.dirname(mosaic_coadd_fl)+'/SUPA*_%sOCF[A-Z]*.sub.[A-Z]*.resamp.fits' % (chipnum))[0]
resamp_chip_fo=astropy.io.fits.open(resamp_chip_fl)
#resamp_chip_image=resamp_chip_fo[0].data
resamp_chip_header=resamp_chip_fo[0].header
resamp_chip_fo.close()
pyCOMIN1=resamp_chip_header['COMIN1']-1 #have to switch to zero indexing
pyCOMIN2=resamp_chip_header['COMIN2']-1 #have to switch to zero indexing
resamp_chip_position=(position[0]-pyCOMIN2,position[1]-pyCOMIN1)
return chipnum , resamp_chip_position
def mosaic_position_to_resamp_chip_ds9_coords(position,mosmap,mosaic_coadd_fl):
'''this is `mosaic_position_to_resamp_chip`, but you can use coords from ds9/regions'''
pypos=ds92py(position)
chip_pypos=mosaic_position_to_resamp_chip(pypos,mosmap,mosaic_coadd_fl)
chip_ds9pos=py2ds9(chip_pypos[-1])
print ' mosaic position:',position
print 'resampled chip position:',chip_ds9pos
return chip_pypos[0],chip_ds9pos
## LET'S TEST IT OUT
mosaic_coadd_fl='/gpfs/slac/kipac/fs1/u/awright/SUBARU/RXJ2129/W-C-RC/SCIENCE/coadd_RXJ2129_SUPA0135155/coadd.fits'
mosmap=mosaic_to_resamp_chip_map(mosaic_coadd_fl)
## test this by making a polygon and translating it over to two different places
## input from : /u/ki/awright/wtgpipeline/ccd9_rect_small.reg
## output went into: /u/ki/awright/wtgpipeline/ccd9_rect_resamp_chip.reg
polygon=(6094.7797,8877.1077,7670.9609,8886.9588,7680.8121,5645.9362,6075.0774,5616.3828)
yy=polygon[1:-1:2]
xx=polygon[0:-1:2]
if len(xx)!=len(yy):
yy=list(polygon[1:-1:2])+[polygon[-1]]
pairs=zip(xx,yy)
newpairs=[]
newpolygon=[]
for p in pairs:
chip,pnew=mosaic_position_to_resamp_chip_ds9_coords(p,mosmap,mosaic_coadd_fl)
newpairs.append(pnew)
newpolygon.append(pnew[0])
newpolygon.append(pnew[1])
print newpolygon
## put into here: /u/ki/awright/wtgpipeline/ccd9_rect_resamp_chip.reg
## check answer with:
## ds9 -zscale /gpfs/slac/kipac/fs1/u/awright/SUBARU/RXJ2129/W-C-RC/SCIENCE/coadd_RXJ2129_all/SUPA0135155_9OCFSIR.sub.RXJ2129_all.resamp.fits -region load /u/ki/awright/wtgpipeline/ccd9_rect_resamp_chip.reg /gpfs/slac/kipac/fs1/u/awright/SUBARU/RXJ2129/W-C-RC/SCIENCE/coadd_RXJ2129_SUPA0135155/coadd.fits -region load /u/ki/awright/wtgpipeline/ccd9_rect_small.reg &
sys.exit()
## some stuff used for developement, no longer needed:
resamp_chip_fl='/gpfs/slac/kipac/fs1/u/awright/SUBARU/RXJ2129/W-C-RC/SCIENCE/coadd_RXJ2129_all/SUPA0135155_9OCFSIR.sub.RXJ2129_all.resamp.fits'
resamp_chip_fo=astropy.io.fits.open(resamp_chip_fl)
resamp_chip_image=resamp_chip_fo[0].data
resamp_chip_header=resamp_chip_fo[0].header
resamp_chip_fo.close()
COMIN1=resamp_chip_header['COMIN1']
COMIN2=resamp_chip_header['COMIN2']
#del# resamp_chip_chip_slice=(slice(COMIN1,resamp_chip_image.shape[0]+COMIN1,None), slice(COMIN2,resamp_chip_image.shape[1]+COMIN2,None))
resamp_chip_slice=(slice(-1+COMIN2,-1+COMIN2+resamp_chip_image.shape[0]),slice(-1+COMIN1,-1+COMIN1+resamp_chip_image.shape[1]))
mosaic_coadd_fl='/gpfs/slac/kipac/fs1/u/awright/SUBARU/RXJ2129/W-C-RC/SCIENCE/coadd_RXJ2129_SUPA0135155/coadd.fits'
mosaic_fitsfl=astropy.io.fits.open(mosaic_coadd_fl)
mosaic_image=mosaic_fitsfl[0].data
mosaic_header=mosaic_fitsfl[0].header
mosaic_fitsfl.close()
mosaic_selection=mosaic_image[resamp_chip_slice]
#mask=chip_selection==0
#resamp_chip_compare=resamp_chip_image.copy()
#resamp_chip_compare[mask]=0
## this is equivalent to /gpfs/slac/kipac/fs1/u/awright/SUBARU/RXJ2129/W-C-RC/SCIENCE/coadd_RXJ2129_SUPA0135155/SUPA0135155_9OCFSIR.sub.RXJ2129_SUPA0135155.resamp.fits
## COMMENT Axis-dependent SWarp parameters
## COMIN1 = 5859 / Output minimum position of image
## COMIN2 = 5140 / Output minimum position of image
## COMMENT
## COMMENT Image-dependent SWarp parameters
## FLXSCALE= 4.395378000000E-03 / Relative flux scaling from photometry
## FLASCALE= 1.000000000000E+00 / Relative flux scaling from astrometry
## BACKMEAN= 7.703595519188E-01 / Effective background level
## BACKSIG = 3.174919056997E+01 / Effective background RMS
sys.exit()
#adam-SHNT# probably easiest to do a resamp2mosaic map (using COMIN1 and COMIN2) and just invert it
#def resamp2mosaic():
sys.exit()
sys.exit()
#START: RING FUNCTIONS define commands for fixing rings
slope_flat_cut=.04
ring_rr_cut=2.8 #new
def track2ring(track_spots,ring_spots):
'''this is designed to extend tracks through objects they are near. it is called within `ringer`. this fits a line to `track_spots` and stretches the mask along the line through the ring'''
try:
#fill ring then get ring pairs
ring_spots_all=scipy.ndimage.binary_fill_holes(ring_spots)
ayy,axx=nonzero(ring_spots_all)
#get track pairs and fit line to track spots
oyy,oxx=nonzero(track_spots)
rr,poly,polytype=polyfitter(track_spots,1)
#get spots in filled ring that are co-linear with the track
try:
m,b=poly.coeffs #for line y=m*x+b or x=m*y+b
print "track2ring poly.coeffs runs fine"
except ValueError:
print "track2ring poly.coeffs ValueErr"
return track_spots,0,rr
except AttributeError:
print "track2ring poly.coeffs AttributeErr"
return track_spots,0,rr
if rr>ring_rr_cut or isnan(rr):
return track_spots,0,rr
if polytype=='x_of_y':
aX=poly(ayy)
aOffsets=(axx-aX).__abs__()
oX=poly(oyy)
oOffsets=(oxx-oX).__abs__()
elif polytype=='y_of_x':
aY=poly(axx)
aOffsets=(ayy-aY).__abs__()
oY=poly(oxx)
oOffsets=(oyy-oY).__abs__()
else:
return track_spots,0,rr
extend_track_spots=aOffsets<1.3
xmin=oxx.min();xmax=oxx.max();ymin=oyy.min();ymax=oyy.max()
#make sure they are extending along the main axis of the track
ur=(axx>=xmax)*(ayy>=ymax)
ul=(axx<=xmin)*(ayy>=ymax)
lr=(axx>=xmax)*(ayy<=ymin)
ll=(axx<=xmin)*(ayy<=ymin)
if math.fabs(m)<slope_flat_cut:
if polytype=='x_of_y':
Rxxyy_spots=extend_track_spots*(ayy>=ymax) #upper
Lxxyy_spots=extend_track_spots*(ayy<=ymin) #lower
elif polytype=='y_of_x':
Rxxyy_spots=extend_track_spots*(axx>=xmax) #right
Lxxyy_spots=extend_track_spots*(axx<=xmin) #left
elif math.fabs(m)>slope_flat_cut**(-1):
if polytype=='x_of_y':
Rxxyy_spots=extend_track_spots*(axx>=xmax) #right
Lxxyy_spots=extend_track_spots*(axx<=xmin) #left
elif polytype=='y_of_x':
Rxxyy_spots=extend_track_spots*(ayy>=ymax) #upper
Lxxyy_spots=extend_track_spots*(ayy<=ymin) #lower
elif m>0:
Rxxyy_spots=extend_track_spots*ur
Lxxyy_spots=extend_track_spots*ll
elif m<0:
Rxxyy_spots=extend_track_spots*lr
Lxxyy_spots=extend_track_spots*ul
Rxx,Ryy=axx[Rxxyy_spots],ayy[Rxxyy_spots]
Lxx,Lyy=axx[Lxxyy_spots],ayy[Lxxyy_spots]
#now change the final mask if the edgepoints are above the threshold
track_spots_final=track_spots.copy()
Rpts=zip(Ryy,Rxx)
Lpts=zip(Lyy,Lxx)
included=0
for o in Rpts+Lpts:
included+=1
track_spots_final[o]=True
# now include a co-linear connector (intra-track/inter-track connector)
track_spots_final=connector(track_spots_final)
return track_spots_final,included,rr
except:
ns.update(locals())
print "adam-Error: in running BB on fl=",fl,"\n\nrun this command to check it out: ipython -i -- CRNitschke/blocked_blender.2.2.py ",fl,"\n\n"; raise
def ringable(ra_object):
'''this takes an "almost ring" and makes it a true ring. it is called within `ringer`.'''
ra_spots=asarray(ra_object.copy(),dtype=bool)
ra_insides=scipy.ndimage.binary_fill_holes(ra_spots)* logical_not(ra_spots)
hom=zeros(ra_spots.shape)
for corner in [(0,0),(0,-1),(-1,0),(-1,-1)]:
miss=zeros((3,3),dtype=bool)
miss[corner]=1
hit=scipy.ndimage.morphology.binary_dilation(miss,conn4)*logical_not(miss)
hom+=scipy.ndimage.morphology.binary_hit_or_miss(ra_spots, structure1=hit, structure2=miss)
hom=asarray(hom,dtype=bool)
fill_them=ra_insides*hom
ra_spots[fill_them]=1
#new to accomodate count_hole_filled_pixels
ra_skel=pymorph.thin(ra_spots)
ra_ring=pymorph.thin(ra_skel,pymorph.endpoints())
if not ra_ring.any(): #fill in the tiny gaps that ruin the ring!
ra4_spots=scipy.ndimage.binary_dilation(ra_spots,conn4)
ra4_skel=pymorph.thin(ra4_spots)
ra4_ring=pymorph.thin(ra4_skel,pymorph.endpoints())
if ra4_ring.any(): #fill in the tiny gaps that ruin the ring!
print "ringable 4\n"
ra_insides=scipy.ndimage.binary_fill_holes(ra4_ring)
fill_them=ra_insides*ra4_spots
ra_spots[fill_them]=1
return ra_spots
ra8_spots=scipy.ndimage.binary_dilation(ra_spots,conn8)
ra8_skel=pymorph.thin(ra8_spots)
ra8_ring=pymorph.thin(ra8_skel,pymorph.endpoints())
if ra8_ring.any(): #fill in the tiny gaps that ruin the ring!
print "ringable 8\n"
ra_insides=scipy.ndimage.binary_fill_holes(ra8_ring)
fill_them=ra_insides*ra8_spots
ra_spots[fill_them]=1
return ra_spots
def ringer_noplot(spots_ringer,l_ringer,filtstamp_ringer,imstamp_ringer,seg0stamp_ringer,star_stamp):
'''input the detection stamp with a ring in it and output the detection stamp if you remove the ring and extend the outside tracks through the ring'''
try:
fl_label_str='file=%s label=%.4i' % (OFB,l_ringer)
#DONT CONTINUE: if saturation spike
sl2_height,sl2_width=imstamp_ringer.shape
sl2_height,sl2_width=float(sl2_height-6),float(sl2_width-6)
if sl2_height>230 and (sl2_height/sl2_width)>25:
return spots_ringer, "saturation spike"
#DONT CONTINUE: if really long and skinny ring
inside4_b4=scipy.ndimage.binary_opening(scipy.ndimage.binary_fill_holes(spots_ringer)* logical_not(spots_ringer),array([[1,1],[1,1]],dtype=bool)).any()
#START: what was `getring_track(spots_ringer)`
#input object mask and output the pixels separated into a ring pixels and track pixels
ringer_skel=pymorph.thin(spots_ringer)
ring=pymorph.thin(ringer_skel,pymorph.endpoints())
if not ring.any(): #fill in the tiny gaps that ruin the ring!
spots_ringer2=ringable(spots_ringer)
ringer_skel=pymorph.thin(spots_ringer2)
ring=pymorph.thin(ringer_skel,pymorph.endpoints())
if not ring.any():
print (fl_label_str+": RINGABLE didnt work!\n")
return spots_ringer, "Un-ringable holes"
else:
spots_ringer=spots_ringer2
#DONT CONTINUE: if really long and skinny ring
inside4_after=scipy.ndimage.binary_opening(scipy.ndimage.binary_fill_holes(spots_ringer)* logical_not(spots_ringer),array([[1,1],[1,1]],dtype=bool)).sum()
if not inside4_b4 and not inside4_after>5:
return spots_ringer, "none in square pattern" #might as well put this at beginning, if it fails (and I want it to pass) it'll probably pass after the thresh is raised
#now if there are gaps in the ring, then take only the inner portion surrounding them
insides=scipy.ndimage.binary_fill_holes(ring)* logical_not(ring)
newinsides=skimage.morphology.remove_small_objects(insides,2,connectivity=1) #conn4
if (insides!=newinsides).any():
newinsides_seg,Nnewinsides_segs= scipy.ndimage.label(newinsides,conn8)
if Nnewinsides_segs<=1:
ring2=scipy.ndimage.binary_dilation(newinsides,conn8,mask=ring)-newinsides
ring=ring2
insides=newinsides
#skel_outside_ring=ringer_skel*logical_not(scipy.ndimage.binary_fill_holes(scipy.ndimage.binary_dilation(ring,conn4)))
ring_and_insides=insides+ring
outsides=logical_not(ring_and_insides)
skel_outside_ring=ringer_skel*outsides
ringer_track_portions=skimage.morphology.remove_small_objects(skel_outside_ring,3,connectivity=2) #conn8
ringer_track_spots=spots_ringer*scipy.ndimage.binary_dilation(ringer_track_portions,conn8,mask=outsides)
Rgetring_ring,Rgetring_track=asarray(ring,dtype=bool),asarray(ringer_track_spots,dtype=bool)
#END: end of what was previously getring_track
#DONT CONTINUE: if it's a circle of cosmics
#tree_ring=ring.copy()
ring_and_outer_layer=scipy.ndimage.binary_dilation(ring,conn4,mask=outsides)
image_ring,image_ring_widen=imstamp_ringer[ring],imstamp_ringer[ring_and_outer_layer]
image_ring.sort();image_ring_widen.sort()
image_ring,image_ring_widen=image_ring[:-3],image_ring_widen[:-3]
image_ring_mean=max(image_ring.mean(),image_ring_widen.mean())
image_ring_filled_mean=(imstamp_ringer[insides].mean())
if image_ring_mean>image_ring_filled_mean: #if the mean value of the edge is greater than the middle, then it isn't an object at all
print (fl_label_str+": circle of cosmics!\n")
return spots_ringer, "Circle of Cosmics"
#get original mask
ringer_mask0=seg0stamp_ringer>0
ringer0=ringer_mask0*spots_ringer
yy0,xx0=nonzero(ringer0)
Pts0=zip(yy0,xx0)
for pt0 in Pts0:
if not Rgetring_track[pt0]:
if skel_outside_ring[pt0]:
skel_outside_seg,Nskelsegs=scipy.ndimage.label(skel_outside_ring,conn8)
pt0_l=skel_outside_seg[pt0]
pt0_spots=skel_outside_seg==pt0_l
Rgetring_track[pt0_spots]=True
else:
Rgetring_track[pt0]=True
if not Rgetring_track.any():#Now if it was all ring
#reset to the original mask
return spots_ringer, "Entire thing was a ring"
#SIMPLE LINE: BEGIN try seeing if everything fits in a simple line really easily
max_within=scipy.stats.scoreatpercentile(filtstamp_ringer[ring_and_insides],95)
cosmics_lintry=(filtstamp_ringer>max_within*2)*spots_ringer
yy_lin,xx_lin=nonzero(cosmics_lintry)
try:
track_length=sqrt((xx_lin.max()-xx_lin.min())**2+(yy_lin.max()-yy_lin.min())**2)
if cosmics_lintry.sum()>4 and track_length>7:
track_spots_final,included,rr=track2ring(cosmics_lintry,Rgetring_ring)
if (rr<.75) or (cosmics_lintry.sum()>9 and rr<1.03):
print (fl_label_str+": SIMPLE LINE!\n")
track_spots_final,stretch_count=iter_track_stretch(track_spots_final, filtstamp_ringer,dt_times_pt01,BASE,l_ringer,star_stamp,ts_rr_cut=ring_rr_cut,rr_per_step=.25)
#now include tracks that overlap with the mask
ring_seg,Nring_track_labels=scipy.ndimage.label(Rgetring_track*logical_not(ring_and_outer_layer),conn8)
track_seg_include=ring_seg[cosmics_lintry]
track_seg_include_labels=unique(track_seg_include).tolist()
try:track_seg_include_labels.remove(0)
except ValueError:pass
if track_seg_include_labels:
spots_yy_all,spots_xx_all=array([],dtype=int),array([],dtype=int)
for l_track in track_seg_include_labels:
spots=ring_seg==l_track
track_spots_final+=spots
spots_yy,spots_xx=nonzero(spots)
spots_yy_all=append(spots_yy_all,spots_yy)
spots_xx_all=append(spots_xx_all,spots_xx)
ringer_yy,ringer_xx=nonzero(track_spots_final)
return track_spots_final, 0 #ringstat==0 implies all is well with ringer
except ValueError:
if cosmics_lintry.any(): print "adam-Error: in running BB on fl=",fl,"\n\nrun this command to check it out: ipython -i -- CRNitschke/blocked_blender.2.2.py ",fl,"\n\n"; raise
else: pass
#SIMPLE LINE: END try seeing if everything fits in a simple line really easily
# first doing ring segments with 1 layer outside the ring excluded (gets closer to the ring), then doing it with 2 layers excluded (has advantage of not mixing detections near the ring). then if the 1layer and 2layer thing disagree I ta
ring_seg,Nring_track_labels=scipy.ndimage.label(Rgetring_track*logical_not(ring_and_outer_layer),conn8)
ringer_track_labels=range(1,1+Nring_track_labels)
ring_slices=scipy.ndimage.find_objects(ring_seg)
ring_and_outer_layers2=scipy.ndimage.binary_dilation(ring_and_outer_layer,conn8,mask=outsides)
ring_seg_layers2,Nring_track_labels_layers2=scipy.ndimage.label(Rgetring_track*logical_not(ring_and_outer_layers2),conn8)
#if there are a ton of track pieces, then I'll go with the original mask thing
ringer_track_labels_loop=copy(ringer_track_labels)
xx_seg,yy_seg=array([]),array([])
for l_bit in ringer_track_labels_loop:
sl=ring_slices[l_bit-1]
track_spots=ring_seg[sl]==l_bit
ringer0_here=ringer0[sl][track_spots].any()
if track_spots.sum()<7:
continue
layers2_stamp=ring_seg_layers2[sl]
layers2_at_track=layers2_stamp[track_spots]
layers2_at_track_labels=unique(layers2_at_track).tolist()
try:layers2_at_track_labels.remove(0)
except ValueError:pass
Nl2s_possible=len(layers2_at_track_labels)
if Nl2s_possible>1:
l2_sizes=[]
l2_in_orig=[]
for l2_l in layers2_at_track_labels:
l2_spots=layers2_stamp==l2_l
l2_in_orig.append(ringer0[sl][l2_spots].any())
l2_sizes.append(l2_spots.sum())
l2_sizes=array(l2_sizes)
l2_size_cut=l2_sizes>2
Nl2s=sum(l2_size_cut)
if Nl2s>=2:
if ringer0_here and not array(l2_in_orig).any(): continue
ringer_track_labels_add=max(ringer_track_labels)+1+arange(Nl2s_possible)
ringer_track_labels=ringer_track_labels+ringer_track_labels_add.tolist()
ring_seg[sl][track_spots]=0
ringer_track_labels.remove(l_bit)
for l2_l,ring_seg_l in zip(layers2_at_track_labels,ringer_track_labels_add):
l2_spots=layers2_stamp==l2_l
l2yy,l2xx=nonzero(l2_spots)
xx_seg=append(xx_seg,l2xx)
yy_seg=append(yy_seg,l2yy)
ring_seg[sl][l2_spots]=ring_seg_l
print (fl_label_str+": thing with 1layer and 2layer masks actually matters!\n")
ringer_track_labels=asarray(ringer_track_labels)
ring_seg_in_orig=[]
ring_seg_maxvals=[]
ring_seg_areas=[]
for l_bit in ringer_track_labels:
track_spots=ring_seg==l_bit
ring_seg_in_orig.append(ringer0[track_spots].any())
ring_seg_maxvals.append(filtstamp_ringer[track_spots].max())
ring_seg_areas.append(track_spots.sum())
ring_seg_in_orig=asarray(ring_seg_in_orig)
ring_seg_maxvals=asarray(ring_seg_maxvals)
ring_seg_areas=asarray(ring_seg_areas)
#keep anything that's above twice the highest ring value or was an original masked pixel
ring_seg_keep=(ring_seg_maxvals>max_within*2) + ring_seg_in_orig
if ring_seg_keep.sum()>0:ringer_track_labels=ringer_track_labels[ring_seg_keep]
else:
print (fl_label_str+': if none are the originals, then take the largest and the brightest\n')
try:
max_label=ringer_track_labels[ring_seg_maxvals.argmax()]
area_label=ringer_track_labels[ring_seg_areas.argmax()]
ringer_track_labels=[max_label]
if area_label!=max_label and ring_seg_areas.max()>5: ringer_track_labels.append(area_label)
except ValueError:
return spots_ringer, "Un-ringable holes"#if there is no max valued/max area thing, then they're all super small and
newring=ringer0.copy() #at the very least, use the original track pixels
Nringworms=0
for bit_i,l_bit in enumerate(ringer_track_labels):
track_spots=ring_seg==l_bit
track_spots_final,included,rr=track2ring(track_spots,Rgetring_ring)
#now extend track?!
if not isnan(rr):track_spots_final,stretch_count=iter_track_stretch(track_spots_final, filtstamp_ringer,dt_times_pt01,BASE,l_ringer,star_stamp,ts_rr_cut=ring_rr_cut,name_extras=('ring_rr%.2f' % (rr,)).replace('.','pt'),rr_per_step=.2)
else:track_spots_final=scipy.ndimage.binary_dilation(track_spots_final,conn8)
newring+=track_spots_final
ringer_yy,ringer_xx=nonzero(track_spots_final)
try:
if rr>ring_rr_cut or isnan(rr):
Nringworms+=1
except IndexError: pass
#if there are 2 worms, then mask entire thing!
if Nringworms>1:
newring+=ring_and_insides
ringer_Fyy,ringer_Fxx=nonzero(newring)
return newring, 0 #ringstat==0 implies all is well with ringer
except:
ns.update(locals())
print "adam-Error: in running BB on fl=",fl,"\n\nrun this command to check it out: ipython -i -- CRNitschke/blocked_blender.2.2.py ",fl,"\n\n"; raise
#RINGER
def ringer(spots_ringer,l_ringer,filtstamp_ringer,imstamp_ringer,seg0stamp_ringer,star_stamp):
'''input the detection stamp with a ring in it and output the detection stamp if you remove the ring and extend the outside tracks through the ring'''
try:
pltstr='pltRevise%s_holes_ringer-label%.4i' % (OFB,l_ringer)
pltextras=''
fl_label_str='file=%s label=%.4i' % (OFB,l_ringer)
#DONT CONTINUE: if saturation spike
sl2_height,sl2_width=imstamp_ringer.shape
sl2_height,sl2_width=float(sl2_height-6),float(sl2_width-6)
if sl2_height>230 and (sl2_height/sl2_width)>25:
return spots_ringer, "saturation spike"
#DONT CONTINUE: if really long and skinny ring
inside4_b4=scipy.ndimage.binary_opening(scipy.ndimage.binary_fill_holes(spots_ringer)* logical_not(spots_ringer),array([[1,1],[1,1]],dtype=bool)).any()
#START: what was `getring_track(spots_ringer)`
#input object mask and output the pixels separated into a ring pixels and track pixels
ringer_skel=pymorph.thin(spots_ringer)
ring=pymorph.thin(ringer_skel,pymorph.endpoints())
if not ring.any(): #fill in the tiny gaps that ruin the ring!
spots_ringer2=ringable(spots_ringer)
ringer_skel=pymorph.thin(spots_ringer2)
ring=pymorph.thin(ringer_skel,pymorph.endpoints())
if not ring.any():
print (fl_label_str+": RINGABLE didnt work!\n")
f=figure(figsize=(20,10))
yy,xx=nonzero(spots_ringer)
imshow(imstamp_ringer,interpolation='nearest',origin='lower left')
scatter(xx,yy,edgecolors='k',facecolors='None')
title('Holes there, but not ringable')
pltextras+='-NoChange_UnRingable'
f.suptitle(fl_label_str+pltextras)
f.savefig(plotdir+pltstr+pltextras)
close(f);del f
return spots_ringer, "Un-ringable holes"
else:
spots_ringer=spots_ringer2
#DONT CONTINUE: if really long and skinny ring
inside4_after=scipy.ndimage.binary_opening(scipy.ndimage.binary_fill_holes(spots_ringer)* logical_not(spots_ringer),array([[1,1],[1,1]],dtype=bool)).sum()
if not inside4_b4 and not inside4_after>5:
return spots_ringer, "none in square pattern" #might as well put this at beginning, if it fails (and I want it to pass) it'll probably pass after the thresh is raised
#now if there are gaps in the ring, then take only the inner portion surrounding them
insides=scipy.ndimage.binary_fill_holes(ring)* logical_not(ring)
newinsides=skimage.morphology.remove_small_objects(insides,2,connectivity=1) #conn4
if (insides!=newinsides).any():
newinsides_seg,Nnewinsides_segs= scipy.ndimage.label(newinsides,conn8)
if Nnewinsides_segs<=1:
ring2=scipy.ndimage.binary_dilation(newinsides,conn8,mask=ring)-newinsides
f=figure()
ax=f.add_subplot(2,2,1);imshow(ring,interpolation='nearest',origin='lower left');title('ring')
ax=f.add_subplot(2,2,2);imshow(insides,interpolation='nearest',origin='lower left');title('insides')
ax=f.add_subplot(2,2,3);imshow(newinsides,interpolation='nearest',origin='lower left');title('newinsides')
ax=f.add_subplot(2,2,4);imshow(ring2,interpolation='nearest',origin='lower left');title('ring2')
pltextras+='-reringing'
f.suptitle(fl_label_str+pltextras+'NewRing')
f.savefig(plotdir+pltstr+pltextras)
close(f);del f
ring=ring2
insides=newinsides
#skel_outside_ring=ringer_skel*logical_not(scipy.ndimage.binary_fill_holes(scipy.ndimage.binary_dilation(ring,conn4)))
ring_and_insides=insides+ring
outsides=logical_not(ring_and_insides)
skel_outside_ring=ringer_skel*outsides
ringer_track_portions=skimage.morphology.remove_small_objects(skel_outside_ring,3,connectivity=2) #conn8
ringer_track_spots=spots_ringer*scipy.ndimage.binary_dilation(ringer_track_portions,conn8,mask=outsides)
Rgetring_ring,Rgetring_track=asarray(ring,dtype=bool),asarray(ringer_track_spots,dtype=bool)
#END: end of what was previously getring_track
#DONT CONTINUE: if it's a circle of cosmics
#tree_ring=ring.copy()
ring_and_outer_layer=scipy.ndimage.binary_dilation(ring,conn4,mask=outsides)
image_ring,image_ring_widen=imstamp_ringer[ring],imstamp_ringer[ring_and_outer_layer]
image_ring.sort();image_ring_widen.sort()
image_ring,image_ring_widen=image_ring[:-3],image_ring_widen[:-3]
image_ring_mean=max(image_ring.mean(),image_ring_widen.mean())
image_ring_filled_mean=(imstamp_ringer[insides].mean())
if image_ring_mean>image_ring_filled_mean: #if the mean value of the edge is greater than the middle, then it isn't an object at all
print (fl_label_str+": circle of cosmics!\n")
f=figure(figsize=(20,10))
yy,xx=nonzero(spots_ringer)
imshow(imstamp_ringer,interpolation='nearest',origin='lower left')
scatter(xx,yy,edgecolors='k',facecolors='None')
title('circle of cosmics')
pltextras+='-NoChange_CircleOfCosmics'
f.suptitle('file=%s label=%.4i image_ring_mean=%.4f>image_ring_filled_mean=%.4f' % (OFB,l_ringer,image_ring_mean,image_ring_filled_mean) + pltextras)
f.savefig(plotdir+pltstr+pltextras)
close(f);del f
return spots_ringer, "Circle of Cosmics"
#get original mask
ringer_mask0=seg0stamp_ringer>0
ringer0=ringer_mask0*spots_ringer
yy0,xx0=nonzero(ringer0)
Pts0=zip(yy0,xx0)
for pt0 in Pts0:
if not Rgetring_track[pt0]:
if skel_outside_ring[pt0]:
skel_outside_seg,Nskelsegs=scipy.ndimage.label(skel_outside_ring,conn8)
pt0_l=skel_outside_seg[pt0]
pt0_spots=skel_outside_seg==pt0_l
Rgetring_track[pt0_spots]=True
else:
Rgetring_track[pt0]=True
f=figure(figsize=(20,10))
f.subplots_adjust(left=.03, bottom=.03, right=.97, top=.93);f.suptitle(fl,size=8)
if not Rgetring_track.any():#Now if it was all ring
#reset to the original mask
ax=f.add_subplot(111)
yy,xx=nonzero(spots_ringer)
imshow(filtstamp_ringer,interpolation='nearest',origin='lower left')
scatter(xx,yy,edgecolors='k',facecolors='None')
scatter(xx0,yy0,edgecolors='w',marker='x')
ax.set_title('No track found around the ring. Un-doing the blend so the original mask (the white "x"s) will be used!')
pltextras+='-NoChange_NoTrack'
f.suptitle(fl_label_str+pltextras)
f.savefig(plotdir+pltstr+pltextras)
close(f);del f
return spots_ringer, "Entire thing was a ring"
#SIMPLE LINE: BEGIN try seeing if everything fits in a simple line really easily
max_within=scipy.stats.scoreatpercentile(filtstamp_ringer[ring_and_insides],95)
cosmics_lintry=(filtstamp_ringer>max_within*2)*spots_ringer
yy_lin,xx_lin=nonzero(cosmics_lintry)
try:
track_length=sqrt((xx_lin.max()-xx_lin.min())**2+(yy_lin.max()-yy_lin.min())**2)
if cosmics_lintry.sum()>4 and track_length>7:
track_spots_final,included,rr=track2ring(cosmics_lintry,Rgetring_ring)
if (rr<.75) or (cosmics_lintry.sum()>9 and rr<1.03):
print (fl_label_str+": SIMPLE LINE!\n")
track_spots_final,stretch_count=iter_track_stretch(track_spots_final, filtstamp_ringer,dt_times_pt01,BASE,l_ringer,star_stamp,ts_rr_cut=ring_rr_cut,rr_per_step=.25)
#now include tracks that overlap with the mask
ring_seg,Nring_track_labels=scipy.ndimage.label(Rgetring_track*logical_not(ring_and_outer_layer),conn8)
track_seg_include=ring_seg[cosmics_lintry]
track_seg_include_labels=unique(track_seg_include).tolist()
try:track_seg_include_labels.remove(0)
except ValueError:pass
if track_seg_include_labels:
spots_yy_all,spots_xx_all=array([],dtype=int),array([],dtype=int)
for l_track in track_seg_include_labels:
spots=ring_seg==l_track
track_spots_final+=spots
spots_yy,spots_xx=nonzero(spots)
spots_yy_all=append(spots_yy_all,spots_yy)
spots_xx_all=append(spots_xx_all,spots_xx)
ringer_yy,ringer_xx=nonzero(track_spots_final)
imshow(filtstamp_ringer,interpolation='nearest',origin='lower left')
scatter(ringer_xx,ringer_yy,marker='o',edgecolors='k',facecolors='None',s=50)
scatter(xx_lin,yy_lin,marker='x',edgecolors='w',facecolors='None')
pltextras+='-simple_line_interupt'
try:
scatter(spots_xx_all,spots_yy_all,marker='s',edgecolors='purple',facecolors='None',s=50)
f.suptitle('SIMPLE LINE: file=%s label=%.4i rr=%.4f' % (OFB,l_ringer,rr) +pltextras+'\nwhite "x"=spots that formed simple line, black "o"=final mask, purple \t=overlapping tracks included')
except:
f.suptitle('SIMPLE LINE: file=%s label=%.4i rr=%.4f' % (OFB,l_ringer,rr) +pltextras+'\nwhite "x"=spots that formed simple line, black "o"=final mask')
f.savefig(plotdir+pltstr+pltextras)
close(f);del f
return track_spots_final, 0 #ringstat==0 implies all is well with ringer
except ValueError:
if cosmics_lintry.any(): print "adam-Error: in running BB on fl=",fl,"\n\nrun this command to check it out: ipython -i -- CRNitschke/blocked_blender.2.2.py ",fl,"\n\n"; raise
else: pass
#SIMPLE LINE: END try seeing if everything fits in a simple line really easily
ax=f.add_subplot(2,6,1);ax.set_title('spots_ringer="o"\n& original mask ="x"');yy,xx=nonzero(spots_ringer);imshow(filtstamp_ringer,interpolation='nearest',origin='lower left');scatter(xx,yy,edgecolors='k',facecolors='None')
scatter(xx0,yy0,edgecolors='w',marker='x')
ax=f.add_subplot(2,6,2);ax.set_title('ringer_skel');yy,xx=nonzero(ringer_skel);imshow(filtstamp_ringer,interpolation='nearest',origin='lower left');scatter(xx,yy,edgecolors='k',facecolors='None')
ax=f.add_subplot(2,6,3);ax.set_title('Rgetring_ring&ring');yy,xx=nonzero(ring);imshow(filtstamp_ringer,interpolation='nearest',origin='lower left');scatter(xx,yy,edgecolors='k',facecolors='None')
ax=f.add_subplot(2,6,4);ax.set_title('skel_outside_ring');yy,xx=nonzero(skel_outside_ring);imshow(filtstamp_ringer,interpolation='nearest',origin='lower left');scatter(xx,yy,edgecolors='k',facecolors='None')
ax=f.add_subplot(2,6,5);ax.set_title('ringer_track_portions');yy,xx=nonzero(ringer_track_portions);imshow(filtstamp_ringer,interpolation='nearest',origin='lower left');scatter(xx,yy,edgecolors='k',facecolors='None')
ax=f.add_subplot(2,6,6);ax.set_title('Rgetring_track\n& ringer_track_spots');yy,xx=nonzero(ringer_track_spots);imshow(filtstamp_ringer,interpolation='nearest',origin='lower left');scatter(xx,yy,edgecolors='k',facecolors='None')
# first doing ring segments with 1 layer outside the ring excluded (gets closer to the ring), then doing it with 2 layers excluded (has advantage of not mixing detections near the ring). then if the 1layer and 2layer thing disagree I take the 2layer results (as long as they fit certain criteria)
ring_seg,Nring_track_labels=scipy.ndimage.label(Rgetring_track*logical_not(ring_and_outer_layer),conn8)
ringer_track_labels=range(1,1+Nring_track_labels)
ring_slices=scipy.ndimage.find_objects(ring_seg)
ring_and_outer_layers2=scipy.ndimage.binary_dilation(ring_and_outer_layer,conn8,mask=outsides)
ring_seg_layers2,Nring_track_labels_layers2=scipy.ndimage.label(Rgetring_track*logical_not(ring_and_outer_layers2),conn8)
#if there are a ton of track pieces, then I'll go with the original mask thing
ringer_track_labels_loop=copy(ringer_track_labels)
xx_seg,yy_seg=array([]),array([])
for l_bit in ringer_track_labels_loop:
sl=ring_slices[l_bit-1]
track_spots=ring_seg[sl]==l_bit
ringer0_here=ringer0[sl][track_spots].any()
if track_spots.sum()<7:
continue
layers2_stamp=ring_seg_layers2[sl]
layers2_at_track=layers2_stamp[track_spots]
layers2_at_track_labels=unique(layers2_at_track).tolist()
try:layers2_at_track_labels.remove(0)
except ValueError:pass
Nl2s_possible=len(layers2_at_track_labels)
if Nl2s_possible>1:
l2_sizes=[]
l2_in_orig=[]
for l2_l in layers2_at_track_labels:
l2_spots=layers2_stamp==l2_l
l2_in_orig.append(ringer0[sl][l2_spots].any())
l2_sizes.append(l2_spots.sum())
l2_sizes=array(l2_sizes)
l2_size_cut=l2_sizes>2
Nl2s=sum(l2_size_cut)
if Nl2s>=2:
if ringer0_here and not array(l2_in_orig).any(): continue
ringer_track_labels_add=max(ringer_track_labels)+1+arange(Nl2s_possible)
ringer_track_labels=ringer_track_labels+ringer_track_labels_add.tolist()
ring_seg[sl][track_spots]=0
ringer_track_labels.remove(l_bit)
for l2_l,ring_seg_l in zip(layers2_at_track_labels,ringer_track_labels_add):
l2_spots=layers2_stamp==l2_l
l2yy,l2xx=nonzero(l2_spots)
xx_seg=append(xx_seg,l2xx)
yy_seg=append(yy_seg,l2yy)
ring_seg[sl][l2_spots]=ring_seg_l
print (fl_label_str+": thing with 1layer and 2layer masks actually matters!\n")
pltextras+='-2layer_masks'
ringer_track_labels=asarray(ringer_track_labels)
ring_seg_in_orig=[]
ring_seg_maxvals=[]
ring_seg_areas=[]
for l_bit in ringer_track_labels:
track_spots=ring_seg==l_bit
ring_seg_in_orig.append(ringer0[track_spots].any())
ring_seg_maxvals.append(filtstamp_ringer[track_spots].max())
ring_seg_areas.append(track_spots.sum())
ax=f.add_subplot(2,6,7)
ax.set_title('ring_seg')
imshow(ring_seg,interpolation='nearest',origin='lower left')
if len(xx_seg):scatter(xx_seg,yy_seg,edgecolors='k',facecolors='None')
ring_seg_in_orig=asarray(ring_seg_in_orig)
ring_seg_maxvals=asarray(ring_seg_maxvals)
ring_seg_areas=asarray(ring_seg_areas)
#keep anything that's above twice the highest ring value or was an original masked pixel
ring_seg_keep=(ring_seg_maxvals>max_within*2) + ring_seg_in_orig
if ring_seg_keep.sum()>0:
ringer_track_labels=ringer_track_labels[ring_seg_keep]
else:
print (fl_label_str+': if none are the originals, then take the largest and the brightest\n')
pltextras+='-largest_and_brightest'
try:
max_label=ringer_track_labels[ring_seg_maxvals.argmax()]
area_label=ringer_track_labels[ring_seg_areas.argmax()]
ringer_track_labels=[max_label]
if area_label!=max_label and ring_seg_areas.max()>5: ringer_track_labels.append(area_label)
except ValueError:
close(f);del f
return spots_ringer, "Un-ringable holes"#if there is no max valued/max area thing, then they're all super small and
newring=ringer0.copy() #at the very least, use the original track pixels
Nringworms=0
for bit_i,l_bit in enumerate(ringer_track_labels):
track_spots=ring_seg==l_bit
track_spots_final,included,rr=track2ring(track_spots,Rgetring_ring)
#now extend track?!
if not isnan(rr):
track_spots_final,stretch_count=iter_track_stretch(track_spots_final, filtstamp_ringer,dt_times_pt01,BASE,l_ringer,star_stamp,ts_rr_cut=ring_rr_cut,name_extras=('ring_rr%.2f' % (rr,)).replace('.','pt'),rr_per_step=.2)
else:
track_spots_final=scipy.ndimage.binary_dilation(track_spots_final,conn8)
newring+=track_spots_final
ringer_yy,ringer_xx=nonzero(track_spots_final)
try:
ax=f.add_subplot(2,6,bit_i+8)
ax.set_title('ringer track extension\niter='+str(bit_i))
imshow(filtstamp_ringer,interpolation='nearest',origin='lower left')
scatter(ringer_xx,ringer_yy,marker='o',edgecolors='k',facecolors='None',s=50)
if rr>ring_rr_cut or isnan(rr):
Nringworms+=1
pltextras+='-ringworms%s' % (Nringworms)
ax.set_title(ax.get_title()+" (rr=%.3f>rr_cut=%.3f)" % (rr,ring_rr_cut))
except ValueError: #if there are a lot of track pieces
if not 'TrackPiecesEQ' in pltextras:
pltextras+='-TrackPiecesEQ%s' % (len(ringer_track_labels))
except IndexError: #if there are a lot of track pieces
if not 'TrackPiecesEQ' in pltextras:
pltextras+='-TrackPiecesEQ%s' % (len(ringer_track_labels))
#if there are 2 worms, then mask entire thing!
if Nringworms>1:
newring+=ring_and_insides
ringer_Fyy,ringer_Fxx=nonzero(newring)
ax=f.add_subplot(2,6,11)
ax.set_title('ringer track extension\nFINAL')
imshow(filtstamp_ringer,interpolation='nearest',origin='lower left')
scatter(ringer_Fxx,ringer_Fyy,marker='o',edgecolors='k',facecolors='None',s=50)
ax=f.add_subplot(2,6,12)
ax.set_title('unfiltered image')
imshow(imstamp_ringer,interpolation='nearest',origin='lower left')
scatter(ringer_Fxx,ringer_Fyy,marker='o',edgecolors='k',facecolors='None',s=50)
pltextras+='-Changed'
f.suptitle(fl_label_str+pltextras)
f.savefig(plotdir+pltstr+pltextras)
close(f);del f
return newring, 0 #ringstat==0 implies all is well with ringer
except:
ns.update(locals())
show();print "adam-Error: in running BB on fl=",fl,"\n\nrun this command to check it out: ipython -i -- CRNitschke/blocked_blender.2.2.py ",fl,"\n\n"; raise
#END: RING FUNCTIONS define commands for fixing rings
#START: HOLE FUNCTIONS define command for counting holes in objects
def count_hole_filled_pixels(spots):
'''count the number of holes in the spots, and if there is a ring that isn't quite filled, then fill it and count the holes'''
holefilledpixels=(mahotas.close_holes(spots)!=spots).sum()
if holefilledpixels>9:
return holefilledpixels
spots4=mahotas.dilate(spots,conn4)
holefilledpixels4=(mahotas.close_holes(spots4)!=spots4).sum()
if holefilledpixels4>holefilledpixels:
return holefilledpixels4
spots8=mahotas.dilate(spots,conn8)
holefilledpixels8=(mahotas.close_holes(spots8)!=spots8).sum()
if holefilledpixels8>holefilledpixels:
return holefilledpixels8
holefilledpixels_options=array([holefilledpixels,holefilledpixels4,holefilledpixels8])
return holefilledpixels_options.max()
#END: HOLE FUNCTIONS define command for counting holes in objects
#START: POLYNOMIAL FUNCTIONS define command for fitting lines and polynomials to objects
def polyfitter_specific(cosmics,polytype,degree=1):
'''This fits a polynomial (of a specific polytype, i.e. x_of_y or y_of_x) to the True elements in cosmics.
call it like: rr,poly,polytype=polyfitter_specific(cosmics,'x_of_y',1)'''
try:
yy,xx=nonzero(cosmics)
if polytype=='y_of_x': #XY
pXY, residualsXY, rankXY, singular_valuesXY, rcondXY = polyfit(xx,yy,degree,full=True)
try:
rXY=residualsXY.min()
except ValueError:
rXY=nan
if isnan(rXY):
return nan,None,None
rr=rXY/len(xx)
y_of_x = poly1d(pXY)
return rr,y_of_x,'y_of_x'
if polytype=='x_of_y': #YX
pYX, residualsYX, rankYX, singular_valuesYX, rcondYX = polyfit(yy,xx,degree,full=True)
try:
rYX=residualsYX.min()
except ValueError:
rYX=nan
if isnan(rYX):
return nan,None,None
rr=rYX/len(xx)
x_of_y = poly1d(pYX)
return rr,x_of_y,'x_of_y'
except:
ns.update(locals())
show();print "adam-Error: in running BB on fl=",fl,"\n\nrun this command to check it out: ipython -i -- CRNitschke/blocked_blender.2.2.py ",fl,"\n\n"; raise
def polyfitter(cosmics,degree=1):
'''This fits a polynomial to the True elements in cosmics.
call it like: rr,poly,polytype=polyfitter(cosmics,1)'''
try:
yy,xx=nonzero(cosmics)
#if cosmics is small enough, then see how oblong it is and if it's super oblong then fit with the dependent variable being the one we have more of
if len(xx)<100:
Yextent,Xextent=len(unique(yy)),len(unique(xx))
Y2Xratio=Yextent/float(Xextent)
if Y2Xratio>2.0:
return polyfitter_specific(cosmics,'x_of_y',degree=degree)
elif Y2Xratio<.5:
return polyfitter_specific(cosmics,'y_of_x',degree=degree)
#else continue with the fit
#if cosmics is big or not oblong it continues with the usual fit here
try:
pXY, residualsXY, rankXY, singular_valuesXY, rcondXY = polyfit(xx,yy,degree,full=True)
rXY=residualsXY.min()
except ValueError:
rXY=nan
try:
pYX, residualsYX, rankYX, singular_valuesYX, rcondYX = polyfit(yy,xx,degree,full=True)
rYX=residualsYX.min()
except ValueError:
rYX=nan
residual=nanmin([rXY,rYX])
if isnan(residual):
return nan,None,None
rr=residual/len(xx)
if rXY<=rYX:
y_of_x = poly1d(pXY)
return rr,y_of_x,'y_of_x'
else:
x_of_y = poly1d(pYX)
return rr,x_of_y,'x_of_y'
except:
ns.update(locals())
print "adam-Error: in running BB on fl=",fl,"\n\nrun this command to check it out: ipython -i -- CRNitschke/blocked_blender.2.2.py ",fl,"\n\n"; raise
def cosmicpoly(l,cosmics,stamp,ax,**kwargs):
'''cosmicpoly is like polyfitter(cosmics,degree=5)'''
try:
yy,xx=nonzero(cosmics)
pXY, residualsXY, rankXY, singular_valuesXY, rcondXY = polyfit(xx,yy,5,full=True)
pYX, residualsYX, rankYX, singular_valuesYX, rcondYX = polyfit(yy,xx,5,full=True)
try:
rXY=residualsXY.min()
except ValueError:
rXY=nan
try:
rYX=residualsYX.min()
except ValueError:
rYX=nan
residual=nanmin([rXY,rYX])
if isnan(residual):
return ax,nan
rr=residual/len(xx)
y_of_x = poly1d(pXY)
x_of_y = poly1d(pYX)
X=arange(xx.min(),xx.max(),.1)
Y=arange(yy.min(),yy.max(),.1)
ax.imshow(stamp,interpolation='nearest',origin='lower left')
if not 'marker' in kwargs:
kwargs['marker']='o'
ax.scatter(xx,yy,edgecolors='k',facecolors='None',label='points',**kwargs)
if rXY<rYX:
ax.plot(X,y_of_x(X),'y')
ax.plot(x_of_y(Y),Y,'r--')
else:
ax.plot(X,y_of_x(X),'y--')
ax.plot(x_of_y(Y),Y,'r')
yd,yu=yy.min()-4,yy.max()+4
xd,xu=xx.min()-4,xx.max()+4
ywidth=yu-yd
xwidth=xu-xd
if xwidth>ywidth:
ax.set_ylim(yd,yd+xwidth)
ax.set_xlim(xd,xu)
elif ywidth>xwidth:
ax.set_xlim(xd,xd+ywidth)
ax.set_ylim(yd,yu)
ax.set_title('label %s: residual/#points=%.3f' % (l,rr),size=12)
return ax,rr
except:
ns.update(locals())
show();raise
#END: POLYNOMIAL FUNCTIONS define command for fitting lines and polynomials to objects
#START: TRACK STRETCHING and CONNECTING
ts_count=0
def track_stretcher(cosmics,CRfiltstamp,thresh,star_stamp,stretchL_total,stretchR_total,ts_rr_cut,name_extras,rr_per_step):
'''this fits a line to `cosmics` and stretches the mask along the line, then it determines if any of the pixels included from the stretching have counts in `CRfiltstamp` above `thresh`. If they do, then those pixels are included in the final mask and it returns `cosmics_final,1`, else it returns `cosmics,0`. It is mean to be called from within iter_track_stretch'''
try:
rr,poly,polytype=polyfitter(cosmics,1)
#get spots along the line
if rr>ts_rr_cut:
return cosmics,0,0,rr
#get cosmic endpoints
cosmic_ends=cosmics*logical_not(pymorph.thin(cosmics,pymorph.endpoints(option='homotopic'),1))
around_cosmics=scipy.ndimage.binary_dilation(cosmic_ends,structure=conn8, iterations=2) * logical_not(cosmics+star_stamp) #this way stars aren't included in pts at all!
ayy,axx=nonzero(around_cosmics)
if polytype=='x_of_y':
aX=poly(ayy)
aOffsets=(axx-aX).__abs__()
elif polytype=='y_of_x':
aY=poly(axx)
aOffsets=(ayy-aY).__abs__()
else:
return cosmics,0,0,rr
close_cutL=1.2+stretchL_total*rr_per_step
close_cutR=1.2+stretchR_total*rr_per_step
extend_track_spotsL=aOffsets<close_cutL
extend_track_spotsR=aOffsets<close_cutR
if not extend_track_spotsL.any() or not extend_track_spotsR.any():
return cosmics,0,0,rr
#get the corner spots!
end_yy,end_xx=nonzero(cosmic_ends)
if polytype=='x_of_y':
end_X=poly(end_yy)
endpts_off=(end_xx-end_X).__abs__()
elif polytype=='y_of_x':
end_Y=poly(end_xx)
endpts_off=(end_yy-end_Y).__abs__()
endpts=zip(end_yy,end_xx)
UR=array([end[0]+end[1] for end in endpts])
UL=array([end[0]-end[1] for end in endpts])
LL=array([-end[0]-end[1] for end in endpts])
LR=array([-end[0]+end[1] for end in endpts])
close_enoughL=endpts_off<close_cutL+.5 #give it an extra 1/2 pixel so it has a chance of picking up neighbors
close_enoughR=endpts_off<close_cutR+.5 #give it an extra 1/2 pixel so it has a chance of picking up neighbors
Lce=close_enoughL.any()
Rce=close_enoughR.any()
if not Lce and not Rce:
return cosmics,0,0,rr
if Lce:
endpts_Lstandard=[endpt for i,endpt in enumerate(endpts) if close_enoughL[i]]
UR_Lstandard=UR[close_enoughL]
UL_Lstandard=UL[close_enoughL]
LL_Lstandard=LL[close_enoughL]
LR_Lstandard=LR[close_enoughL]
URpt_Lstandard=endpts_Lstandard[UR_Lstandard.argmax()]
ULpt_Lstandard=endpts_Lstandard[UL_Lstandard.argmax()]
LLpt_Lstandard=endpts_Lstandard[LL_Lstandard.argmax()]
LRpt_Lstandard=endpts_Lstandard[LR_Lstandard.argmax()]
if Rce:
endpts_Rstandard=[endpt for i,endpt in enumerate(endpts) if close_enoughR[i]]
UR_Rstandard=UR[close_enoughR]
UL_Rstandard=UL[close_enoughR]
LL_Rstandard=LL[close_enoughR]
LR_Rstandard=LR[close_enoughR]
URpt_Rstandard=endpts_Rstandard[UR_Rstandard.argmax()]
ULpt_Rstandard=endpts_Rstandard[UL_Rstandard.argmax()]
LLpt_Rstandard=endpts_Rstandard[LL_Rstandard.argmax()]
LRpt_Rstandard=endpts_Rstandard[LR_Rstandard.argmax()]
#make sure they are extending along the main axis of the track
try:
m,b=poly.coeffs #for line y=m*x+b or x=m*y+b
if math.fabs(m)<slope_flat_cut:
if polytype=='x_of_y':
title_extras=' ***|srt8 UP and DOWN|*** '
Ltype=1
if Rce:
UR_pt=URpt_Rstandard;UL_pt=ULpt_Rstandard
Ux_midpt=(UR_pt[1]+UL_pt[1])/2.0
Rxxyy_spots=extend_track_spotsR*(ayy>=max(UR_pt[0],UL_pt[0])-1)*((axx<=Ux_midpt+1)*(axx>=Ux_midpt-1)) #upper restricted
if Lce:
LR_pt=LRpt_Lstandard;LL_pt=LLpt_Lstandard
Lx_midpt=(LR_pt[1]+LL_pt[1])/2.0
Lxxyy_spots=extend_track_spotsL*(ayy<=min(LR_pt[0],LL_pt[0])+1)*((axx<=Lx_midpt+1)*(axx>=Lx_midpt-1)) #lower restricted
elif polytype=='y_of_x':
title_extras=' ***_srt8 RIGHT and LEFT_*** '
Ltype=2
if Rce:
UR_pt=URpt_Rstandard;LR_pt=LRpt_Rstandard
Ry_midpt=(UR_pt[0]+LR_pt[0])/2.0
Rxxyy_spots=extend_track_spotsR*(axx>=max(UR_pt[1],LR_pt[1])-1)*((ayy<=Ry_midpt+1)*(ayy>=Ry_midpt-1)) #right restricted
if Lce:
UL_pt=ULpt_Lstandard;LL_pt=LLpt_Lstandard
Ly_midpt=(UL_pt[0]+LL_pt[0])/2.0
Lxxyy_spots=extend_track_spotsL*(axx<=min(UL_pt[1],LL_pt[1])+1)*((ayy<=Ly_midpt+1)*(ayy>=Ly_midpt-1)) #left restricted
elif math.fabs(m)>slope_flat_cut**(-1):
if polytype=='x_of_y':
title_extras=' ***_srt8 RIGHT and LEFT_*** '
Ltype=3
if Rce:
UR_pt=URpt_Rstandard;LR_pt=LRpt_Rstandard
Ry_midpt=(UR_pt[0]+LR_pt[0])/2.0
Rxxyy_spots=extend_track_spotsR*(axx>=max(UR_pt[1],LR_pt[1])-1)*((ayy<=Ry_midpt+1)*(ayy>=Ry_midpt-1)) #right restricted
if Lce:
UL_pt=ULpt_Lstandard;LL_pt=LLpt_Lstandard
Ly_midpt=(UL_pt[0]+LL_pt[0])/2.0
Lxxyy_spots=extend_track_spotsL*(axx<=min(UL_pt[1],LL_pt[1])+1)*((ayy<=Ly_midpt+1)*(ayy>=Ly_midpt-1)) #left restricted
elif polytype=='y_of_x':
title_extras=' ***|srt8 UP and DOWN|*** '
Ltype=4
if Rce:
UR_pt=URpt_Rstandard;UL_pt=ULpt_Rstandard
Ux_midpt=(UR_pt[1]+UL_pt[1])/2.0
Rxxyy_spots=extend_track_spotsR*(ayy>=max(UR_pt[0],UL_pt[0])-1)*((axx<=Ux_midpt+1)+(axx>=Ux_midpt-1)) #upper restricted
if Lce:
LR_pt=LRpt_Lstandard;LL_pt=LLpt_Lstandard
Lx_midpt=(LR_pt[1]+LL_pt[1])/2.0
Lxxyy_spots=extend_track_spotsL*(ayy<=min(LR_pt[0],LL_pt[0])+1)*((axx<=Lx_midpt+1)+(axx>=Lx_midpt-1)) #lower restricted
elif m>0:
title_extras=' ***/UPPER RIGHT and LOWER LEFT/*** '
Ltype=5
if Rce:
ur=(axx>=URpt_Rstandard[1]-1)*(ayy>=URpt_Rstandard[0]-1)
Rxxyy_spots=extend_track_spotsR*ur
if Lce:
ll=(axx<=LLpt_Lstandard[1]+1)*(ayy<=LLpt_Lstandard[0]+1)
Lxxyy_spots=extend_track_spotsL*ll
elif m<0:
title_extras=' ***\\UPPER LEFT and LOWER RIGHT\\*** '
Ltype=6
if Rce:
lr=(axx>=LRpt_Rstandard[1]-1)*(ayy<=LRpt_Rstandard[0]+1)
Rxxyy_spots=extend_track_spotsR*lr
if Lce:
ul=(axx<=ULpt_Lstandard[1]+1)*(ayy>=ULpt_Lstandard[0]-1)
Lxxyy_spots=extend_track_spotsL*ul
except ValueError:
return cosmics,0,0,rr
except AttributeError:
return cosmics,0,0,rr
#pick the things from Rxxyy_spots and Lxxyy_spots which have the highest value
if Rce:
Rxx,Ryy=axx[Rxxyy_spots],ayy[Rxxyy_spots]
Rpts=zip(Ryy,Rxx)
Rpts_vals=array([CRfiltstamp[o] for o in Rpts])
Rabove_thresh=Rpts_vals>thresh
Rinclude=(Rabove_thresh).any()
else: Rinclude=False
if Lce:
Lxx,Lyy=axx[Lxxyy_spots],ayy[Lxxyy_spots]
Lpts=zip(Lyy,Lxx)
Lpts_vals=array([CRfiltstamp[o] for o in Lpts])
Labove_thresh=Lpts_vals>thresh
Linclude=(Labove_thresh).any()
else: Linclude=False
if not Rinclude and not Linclude:
return cosmics,0,0,rr
#now get edges
cosmics_final=cosmics.copy()
cosmics_expanded1=scipy.ndimage.binary_dilation(cosmic_ends,structure=conn8, iterations=1)
if Rinclude:
R_Tedge_or_Fouter=array([cosmics_expanded1[o] for o in Rpts])
outer_above_thresh=Rabove_thresh[logical_not(R_Tedge_or_Fouter)].any()
inner_above_thresh=Rabove_thresh[(R_Tedge_or_Fouter)].any()
Rpts2include=set([])
if outer_above_thresh: #then take the max outer thing and it's edges above the thresh
out_pt=Rpts[Rpts_vals.argmax()]
outer_surrounding=set([(out_pt[0]+mx,out_pt[1]+my) for mx,my in itertools.product([-1,0,1],[-1,0,1])])
outer_above_thresh=set([pt for i,pt in enumerate(Rpts) if Rabove_thresh[i]])
Rpts2include=Rpts2include.union(set.intersection(outer_above_thresh,outer_surrounding))
outer_inner_connection=set([pt for i,pt in enumerate(Rpts) if R_Tedge_or_Fouter[i]])
Rpts2include=Rpts2include.union(set.intersection(outer_inner_connection,outer_surrounding))
if inner_above_thresh: #then take the max inner thing and it's edges above the thresh
in_pt=Rpts[Rpts_vals.argmax()]
inner_above_thresh=set([pt for i,pt in enumerate(Rpts) if Rabove_thresh[i]])
inner_surrounding=set([(in_pt[0]+mx,in_pt[1]+my) for mx,my in itertools.product([-1,0,1],[-1,0,1])])
Rpts2include=Rpts2include.union(set.intersection(inner_above_thresh,inner_surrounding))
for o in Rpts2include:
cosmics_final[o]=True
if Linclude:
L_Tedge_or_Fouter=array([cosmics_expanded1[o] for o in Lpts])
outer_above_thresh=Labove_thresh[logical_not(L_Tedge_or_Fouter)].any()
inner_above_thresh=Labove_thresh[(L_Tedge_or_Fouter)].any()
Lpts2include=set([])
if outer_above_thresh: #then take the max outer thing and it's edges above the thresh
out_pt=Lpts[Lpts_vals.argmax()]
outer_above_thresh=set([pt for i,pt in enumerate(Lpts) if Labove_thresh[i]])
outer_surrounding=set([(out_pt[0]+mx,out_pt[1]+my) for mx,my in itertools.product([-1,0,1],[-1,0,1])])
Lpts2include=Lpts2include.union(set.intersection(outer_above_thresh,outer_surrounding))
outer_inner_connection=set([pt for i,pt in enumerate(Lpts) if L_Tedge_or_Fouter[i]])
Lpts2include=Lpts2include.union(set.intersection(outer_inner_connection,outer_surrounding))
if inner_above_thresh: #then take the max inner thing and it's edges above the thresh
in_pt=Lpts[Lpts_vals.argmax()]
inner_above_thresh=set([pt for i,pt in enumerate(Lpts) if Labove_thresh[i]])
inner_surrounding=set([(in_pt[0]+mx,in_pt[1]+my) for mx,my in itertools.product([-1,0,1],[-1,0,1])])
Lpts2include=Lpts2include.union(set.intersection(inner_above_thresh,inner_surrounding))
for o in Lpts2include:
cosmics_final[o]=True
########f=figure(figsize=(11,10))
########ax2=f.add_subplot(1,2,2)
########ax1=f.add_subplot(10,2,19)
########yy1,xx1=nonzero(cosmics)
########yy2,xx2=nonzero(cosmics_final*logical_not(cosmics))
########ax2.imshow(CRfiltstamp,interpolation='nearest',origin='lower left')
########ax2.scatter(xx1,yy1,marker='o',edgecolors='k',facecolors='None',s=40)
########ax2.scatter(xx2,yy2,s=35,alpha=.5,marker='x',edgecolors='w',facecolors='None')
########xx_ends_plot=[]
########yy_ends_plot=[]
########if Rce:
######## ULLRxx_Rends_plot=[pt[1] for pt in [ULpt_Rstandard,LRpt_Rstandard]]
######## ULLRyy_Rends_plot=[pt[0] for pt in [ULpt_Rstandard,LRpt_Rstandard]]
######## URLLxx_Rends_plot=[pt[1] for pt in [URpt_Rstandard,LLpt_Rstandard]]
######## URLLyy_Rends_plot=[pt[0] for pt in [URpt_Rstandard,LLpt_Rstandard]]
######## ax2.scatter(URLLxx_Rends_plot,URLLyy_Rends_plot,s=60,marker='>',edgecolors='yellow',facecolors='None',label='UR/LL')
######## ax2.scatter(ULLRxx_Rends_plot,ULLRyy_Rends_plot,s=60,marker='>',edgecolors='purple',facecolors='None',label='UL/LR')
######## xx_ends_plot+=ULLRxx_Rends_plot;xx_ends_plot+=URLLxx_Rends_plot
######## yy_ends_plot+=ULLRyy_Rends_plot;yy_ends_plot+=URLLyy_Rends_plot
########if Lce:
######## ULLRxx_Lends_plot=[pt[1] for pt in [ULpt_Lstandard,LRpt_Lstandard]]
######## ULLRyy_Lends_plot=[pt[0] for pt in [ULpt_Lstandard,LRpt_Lstandard]]
######## URLLxx_Lends_plot=[pt[1] for pt in [URpt_Lstandard,LLpt_Lstandard]]
######## URLLyy_Lends_plot=[pt[0] for pt in [URpt_Lstandard,LLpt_Lstandard]]
######## ax2.scatter(URLLxx_Lends_plot,URLLyy_Lends_plot,s=60,marker='<',edgecolors='yellow',facecolors='None',label='UR/LL')
######## ax2.scatter(ULLRxx_Lends_plot,ULLRyy_Lends_plot,s=60,marker='<',edgecolors='purple',facecolors='None',label='UL/LR')
######## xx_ends_plot+=ULLRxx_Lends_plot;xx_ends_plot+=URLLxx_Lends_plot
######## yy_ends_plot+=ULLRyy_Lends_plot;yy_ends_plot+=URLLyy_Lends_plot
########f.suptitle('white "x"=added by stretching , black "o"=there before \n yellow ">"=UR/LL_Rstandard , purple ">"=UL/LR_Rstandard || yellow "<"=UR/LL_Lstandard , purple "<"=UL/LR_Lstandard\n'+title_extras)
########ax1.set_frame_on(False)
########f=imagetools.AxesStripText(f,axes=[ax1],allticks=True,titles=False)
########ax1.set_title('stretchL_total=%s\nstretchR_total=%s\nrr=%.3f\npolytype=%s\nLtype=%s\nm=%.3f\nLce=%s Linclude=%s\nRce=%s Rinclude=%s' % (stretchL_total,stretchR_total,rr,polytype,Ltype,m,Lce,Linclude,Rce,Rinclude))
########ax2.set_xlim(min(xx_ends_plot)-5,max(xx_ends_plot)+5)
########ax2.set_ylim(min(yy_ends_plot)-5,max(yy_ends_plot)+5)
########NameString='pltRevise%s_stretch-TS%.4i-iter%s' % (OFB,ts_count,name_extras)
########f=imagetools.NameFileDate(f,NameString,FileString,DateString)
########f.savefig(plotdir+NameString)
########close(f);del f
global ts_count
ts_count+=1
return cosmics_final,Linclude,Rinclude,rr
except:
ns.update(locals())
show();print "adam-Error: in running BB on fl=",fl,"\n\nrun this command to check it out: ipython -i -- CRNitschke/blocked_blender.2.2.py ",fl,"\n\n"; raise
def iter_track_stretch(cosmics, CRfiltstamp,bthresh,BASE,l,star_stamp,name_extras='',ts_rr_cut=1.8,rr_per_step=.07,track_len_cut=4):
'''run track_stretcher over and over until it converges'''
yy_lin,xx_lin=nonzero(cosmics)
track_length=sqrt((xx_lin.max()-xx_lin.min())**2+(yy_lin.max()-yy_lin.min())**2)
if track_length<track_len_cut:
return cosmics,0
stretch_countL=0
stretch_countR=0
stretch=1
########cosmics_no_stretch=cosmics.copy() #noplot
while stretch:
cosmics,stretchL,stretchR,rr=track_stretcher(cosmics,CRfiltstamp,bthresh,star_stamp,stretch_countL,stretch_countR,ts_rr_cut,name_extras,rr_per_step)
stretch_countL+=stretchL
stretch_countR+=stretchR
stretch=stretchL or stretchR
stretch_count=stretch_countL+stretch_countR
########global ts_count
########ts_count+=1
########if stretch_count:
######## f=figure(figsize=(11,10))
######## ax=f.add_subplot(1,1,1)
######## yy1,xx1=nonzero(cosmics_no_stretch)
######## yy2,xx2=nonzero(cosmics*logical_not(cosmics_no_stretch))
######## ax.imshow(CRfiltstamp,interpolation='nearest',origin='lower left')
######## ax.scatter(xx1,yy1,marker='o',edgecolors='k',facecolors='None',s=40)
######## ax.scatter(xx2,yy2,marker='x',edgecolors='w',facecolors='None')
######## bthresh_tag=('bthresh%.3i' % (bthresh))
######## NameString='pltRevise%s_stretch-TS%.4i-%s-label%.4i%s' % (OFB,ts_count,bthresh_tag,l,name_extras)
######## ax.set_title('white "x" = added by stretching\n# stretch iterations Left=%s Right=%s\nstretch threshold=%s (label=%s) rr=%.3f' % (stretch_countL,stretch_countR,bthresh_tag,l,rr))
######## f=imagetools.NameFileDate(f,NameString,FileString,DateString)
######## if cosmics.size>100:
######## ax.set_xlim(min(xx1.min(),xx2.min())-3,max(xx1.max(),xx2.max())+3)
######## ax.set_ylim(min(yy1.min(),yy2.min())-3,max(yy1.max(),yy2.max())+3)
######## f.savefig(plotdir+NameString)
######## close(f);del f
########else:
######## f=figure(figsize=(11,10))
######## ax=f.add_subplot(1,1,1)
######## yy1,xx1=nonzero(cosmics_no_stretch)
######## ax.imshow(CRfiltstamp,interpolation='nearest',origin='lower left')
######## ax.scatter(xx1,yy1,marker='o',edgecolors='k',facecolors='None',s=40)
######## bthresh_tag=('bthresh%.3i' % (bthresh))
######## NameString='pltRevise%s_stretch-TS%.4i-unstretchable-%s-label%.4i%s' % (OFB,ts_count,bthresh_tag,l,name_extras)
######## ax.set_title('UNSTRETCHABLE (label=%s) rr=%.3f' % (l,rr))
######## f=imagetools.NameFileDate(f,NameString,FileString,DateString)
######## f.savefig(plotdir+NameString)
######## close(f);del f
return cosmics,stretch_count
def connector(cosmics):
'take non-connected cosmics (that are almost connected) and connect them'
contig_checkseg,Npieces=scipy.ndimage.label(cosmics,conn8)
del contig_checkseg
if Npieces<=1:
return cosmics
rr,poly,polytype=polyfitter(cosmics,1)
if rr>3.0:
return cosmics
hull_final=skimage.morphology.convex_hull_image(cosmics) * logical_not(cosmics)
hyy,hxx=nonzero(hull_final)
if polytype=='x_of_y':
hX=poly(hyy)
hOffsets=(hxx-hX).__abs__()
elif polytype=='y_of_x':
hY=poly(hxx)
hOffsets=(hyy-hY).__abs__()
else:
return cosmics
if rr<.6:hull_rr=.6
elif rr>1.2:hull_rr=1.2
else: hull_rr=rr
hull_extend_cosmics=hOffsets<hull_rr
Hxx,Hyy=hxx[hull_extend_cosmics],hyy[hull_extend_cosmics]
Hpts=zip(Hyy,Hxx)
for o in Hpts:
cosmics[o]=True
return cosmics
#END: TRACK STRETCHING and CONNECTING
#START: BLENDING FUNCTIONS
def ExpandMaskAbove(image,mask,EdgeThresh):
'''take the input mask and add in edges that are above some threshold'''
expand_mask=scipy.ndimage.binary_dilation(mask,structure=conn8, iterations=1) #we use conn4 in step5_make_inputs_and_outputs.2.1.py
edge_mask=expand_mask*logical_not(mask)
edgepixels=ma.array(image,mask=logical_not(edge_mask)) #mask all non-edges
edgeout=edgepixels>EdgeThresh #edges > bthresh = True & edges < bthresh = False
add2mask=ma.filled(edgeout,False) #non-edges=False and edges<bthresh=False
maskEGbthresh=mask+add2mask #maskEGbthresh=size thresholded mask OR mask edge above the EdgeThresh
return maskEGbthresh
#set cutting parameters
blend_raise_bthresh_amount=70.0
blend_rr_cut=3.0
blend_sizediff_1vs2_cut=60
blend_slope_off_cut=.06
blend_holefilledpixels_cut=21
#SHNT: make it plot/not plot based on value of PLOT_ON_OFF
def blocked_blender(bthresh,CRfiltimage,CRll,CRslices,starbools,CRseg):
'''take input CR detections and output a detections that have been blblend_ended (surroundings have been included in the mask if they were above bthresh) and blocked (meaning they are blocked from hitting a detected object'''
try:
print '\n############# START BLEND: bthresh = '+str(bthresh)+" ###################"
blend_Niters=[];blend_ended=[]
blended_CRseg=CRseg.copy()
bthresh2=bthresh+blend_raise_bthresh_amount
bthresh_raise_tag=('bthresh%.i_to_%.i' % (bthresh,bthresh+blend_raise_bthresh_amount))
for l in CRll:
sl=CRslices[l-1]
sle=imagetools.slice_expand(sl,100)
CRfiltstamp=CRfiltimage[sle]
SBstamp=starbools[sle]
cosmics1=blended_CRseg[sle]==l
cosmics2=cosmics1.copy()
#iterate a max of 100 times to expand to neighboring pixels above bthresh
for i in range(100): #limit to 100 iterations
cosmicsb4=cosmics1.copy()
cosmics1=ExpandMaskAbove(CRfiltstamp,cosmicsb4,bthresh)
if (cosmics1==cosmicsb4).all():
blend_ended.append(0)
break
if SBstamp[cosmics1].any():
blend_ended.append(1)
break
else:
blend_ended.append(2)
blend_Niters.append(i+1)
#do this iteration again at a higher threshold
for i in range(100): #limit to 100 iterations
cosmics2b4=cosmics2.copy()
cosmics2=ExpandMaskAbove(CRfiltstamp,cosmics2b4,bthresh2)
if (cosmics2==cosmics2b4).all():
break
if SBstamp[cosmics2].any():
break
# if the higher threshold result is way smaller, then consider returning the 2nd one
size_diff12=cosmics1.sum()-cosmics2.sum()
if size_diff12>blend_sizediff_1vs2_cut:
#START: now make sure sum(8)>9 just to make sure
open8_cosmics=scipy.ndimage.binary_opening(cosmics1,conn8)
open8_Nspots= float(open8_cosmics.sum())
open8_frac=open8_Nspots/cosmics1.sum()
if open8_frac<.03: #if less than 3% of the pixels in cosmics1 are open8 pixels, then cosmics1 isn't a blob, so stick with cosmics1 (rather than switching to cosmics2)
if PLOT_ON_OFF:
f=figure(figsize=(12,9));f.add_subplot(121);title('FINAL: cosmics1');yy,xx=nonzero(cosmics1);imshow(CRfiltstamp,interpolation='nearest',origin='lower left');scatter(xx,yy,marker='o',edgecolors='k',facecolors='None')
f.add_subplot(122);title('cosmics2');yy,xx=nonzero(cosmics2);imshow(CRfiltstamp,interpolation='nearest',origin='lower left');scatter(xx,yy,marker='o',edgecolors='k',facecolors='None')
f.suptitle('Stick with cosmics1\nfailed: open8_frac=%.3f < .03' % (open8_frac,))
f.savefig(plotdir+'pltRevise%s_failed_raise_thresh_%s-No_Open8-label%.4i' % (OFB,bthresh_raise_tag,l))
close(f);del f
#END: now make sure sum(8)>9 just to make sure
else:
rr2,poly2,polytype2=polyfitter(cosmics2,1)
if isnan(rr2) or rr2>blend_rr_cut: #if the linefit looks like crap for cosmics2, then it probably will for cosmics1, so we assume it's not a line and take 2nd mask
if open8_frac>.2: #if greater than 20% of the image is open8, then use cosmics2:
#stretch cosmics2 if it's linear
if rr2<1.2:
cosmics2_addons,count_stretch2=iter_track_stretch(cosmics2, CRfiltstamp,bthresh*.4,BASE,l,SBstamp,name_extras='_InBlender2',ts_rr_cut=2.0,rr_per_step=.04)
cosmics2[cosmics2_addons*cosmics1]=True
cosmics2=connector(cosmics2)
else:
count_stretch2=0
#make sure picking cosmics2 doesn't mean that we're breaking the track into smaller pieces
contig_checkseg,Npieces2=scipy.ndimage.label(cosmics2,conn8)
contig_checkseg,Npieces1=scipy.ndimage.label(cosmics1,conn8)
if Npieces2<=Npieces1: #if picking cosmics2 doesn't break the track up into smaller pieces, then continue
if PLOT_ON_OFF:
f=figure(figsize=(12,9));f.add_subplot(121);title('cosmics1');yy,xx=nonzero(cosmics1);imshow(CRfiltstamp,interpolation='nearest',origin='lower left');scatter(xx,yy,marker='o',edgecolors='k',facecolors='None')
f.add_subplot(122);title('FINAL: cosmics2');yy,xx=nonzero(cosmics2);imshow(CRfiltstamp,interpolation='nearest',origin='lower left');scatter(xx,yy,marker='o',edgecolors='k',facecolors='None')
f.suptitle('Going from cosmics1 to cosmics2 (count_stretch2=%s)!\npassed: open8_frac=%.3f < .03\npassed: rr2=%.3f>blend_rr_cut=%.3f\npassed: open8_frac=%.3f >.2' % (count_stretch2,open8_frac,rr2,blend_rr_cut,open8_frac))
f.savefig(plotdir+'pltRevise%s_passed_raise_thresh_%s-simple-label%.4i' % (OFB,bthresh_raise_tag,l))
close(f);del f
cosmics1=cosmics2
else:
if PLOT_ON_OFF:
f=figure(figsize=(12,9));f.add_subplot(121);title('FINAL: cosmics1');yy,xx=nonzero(cosmics1);imshow(CRfiltstamp,interpolation='nearest',origin='lower left');scatter(xx,yy,marker='o',edgecolors='k',facecolors='None')
f.add_subplot(122);title('cosmics2');yy,xx=nonzero(cosmics2);imshow(CRfiltstamp,interpolation='nearest',origin='lower left');scatter(xx,yy,marker='o',edgecolors='k',facecolors='None')
f.suptitle('Stick with cosmics1!\npassed: open8_frac=%.3f < .03\npassed: rr2=%.3f>blend_rr_cut=%.3f\nfailed open8_frac=%.3f >.2' % (open8_frac,rr2,blend_rr_cut,open8_frac))
f.savefig(plotdir+'pltRevise%s_failed_raise_thresh_%s-simple-label%.4i' % (OFB,bthresh_raise_tag,l))
close(f);del f
else: #if the line fit is decent for cosmics2, try the line fit for cosmics1
yy2,xx2=nonzero(cosmics2)
slope2=poly2.coeffs[0]
#try:
# slope2=poly2.coeffs[0]
#except ValueError:
# pass
#except AttributeError:
# pass
yy1,xx1=nonzero(cosmics1*logical_not(cosmics2))
yy3,xx3=nonzero(cosmics1)
if polytype2=='y_of_x': #if rXY2<rYX2:
pXY1, residualsXY1, rankXY1, singular_valuesXY1, rcondXY1 = polyfit(xx1,yy1,1,full=True)
slope1=pXY1[0]
slope_off=abs(slope2-slope1)
rr1=residualsXY1[0]/len(xx1)
poly1 = poly1d(pXY1)
X3=arange(xx3.min(),xx3.max(),.1)
pltxx1,pltyy1=X3,poly1(X3)
pltxx2,pltyy2=X3,poly2(X3)
else: #if polytype2=='x_of_y': #if rYX2<rXY2:
pYX1, residualsYX1, rankYX1, singular_valuesYX1, rcondYX1 = polyfit(yy1,xx1,1,full=True)
slope1=pYX1[0]
slope_off=abs(slope2-slope1)
rr1=residualsYX1[0]/len(xx1)
poly1 = poly1d(pYX1)
Y3=arange(yy3.min(),yy3.max(),.1)
pltxx1,pltyy1=poly1(Y3),Y3
pltxx2,pltyy2=poly2(Y3),Y3
if isnan(rr1) or rr1>(blend_rr_cut+1.0) or slope_off>blend_slope_off_cut:#if the linefit looks like crap for cosmics1, then we assume it's not a line and take 2nd mask
#stretch cosmics2 if it's linear
if rr2<1.2:
cosmics2_addons,count_stretch2=iter_track_stretch(cosmics2, CRfiltstamp,bthresh*.4,BASE,l,SBstamp,name_extras='_InBlender2',ts_rr_cut=2.0,rr_per_step=.04)
cosmics2[cosmics2_addons*cosmics1]=True
cosmics2=connector(cosmics2)
else:
count_stretch2=0
#make sure picking cosmics2 doesn't mean that we're breaking the track into smaller pieces
contig_checkseg,Npieces2=scipy.ndimage.label(cosmics2,conn8)
contig_checkseg,Npieces1=scipy.ndimage.label(cosmics1,conn8)
if Npieces2<=Npieces1: #if picking cosmics2 doesn't break the track up into smaller pieces, then continue
if PLOT_ON_OFF:
f=figure(figsize=(12,9));f.add_subplot(121);title('cosmics1');yy,xx=nonzero(cosmics1);imshow(CRfiltstamp,interpolation='nearest',origin='lower left');scatter(xx,yy,marker='o',edgecolors='k',facecolors='None')
f.add_subplot(122);title('FINAL: cosmics2');yy,xx=nonzero(cosmics2);imshow(CRfiltstamp,interpolation='nearest',origin='lower left');scatter(xx,yy,marker='o',edgecolors='k',facecolors='None')
f.suptitle('Going from cosmics1 to cosmics2! (count_stretch2=%s)\npassed: open8_frac=%.3f < .03\npassed: rr2=%.3f>blend_rr_cut=%.3f\npassed: rr1=%.3f>blend_rr_cut+1=%.3f or slope_off=%.3f>blend_slope_off_cut=%.3f' % (count_stretch2,open8_frac,rr2,blend_rr_cut,rr1,blend_rr_cut+1.0,slope_off,blend_slope_off_cut))
f.savefig(plotdir+'pltRevise%s_passed_raise_thresh_%s-higher_thresh_much_smaller-label%.4i' % (OFB,bthresh_raise_tag,l))
close(f);del f
cosmics1=cosmics2
elif PLOT_ON_OFF: #else cosmics1 stays the same because I determine that they are both lines along the same trajectory!
f=figure()
f.suptitle('Stick with cosmics1!\npassed: open8_frac=%.3f < .03\npassed: rr2=%.3f>blend_rr_cut=%.3f\nfailed: rr1=%.3f>blend_rr_cut=%.3f and slope_off=%.3f>blend_slope_off_cut=%.3f' % (open8_frac,rr2,blend_rr_cut,rr1,blend_rr_cut,slope_off,blend_slope_off_cut))
ax=f.add_subplot(1,1,1)
ax.imshow(CRfiltstamp,interpolation='nearest',origin='lower left')
ax.scatter(xx1,yy1,marker='o',edgecolors='k',facecolors='None',label='cosmics1')
ax.scatter(xx2,yy2,marker='x',edgecolors='w',facecolors='None',label='cosmics2')
ax.plot(pltxx2,pltyy2,'w')
ax.plot(pltxx1,pltyy1,'k--')
ax.set_ylim(yy3.min()-3,yy3.max()+3)
ax.set_xlim(xx3.min()-3,xx3.max()+3)
f.savefig(plotdir+'pltRevise%s_failed_raise_thresh_%s-SameTrajectory-label%.4i' % (OFB,bthresh_raise_tag,l))
close(f);del f
#get the number hole pixels using the simple way of doing it rather than using `holefilledpixels=count_hole_filled_pixels(cosmics1)`
if bthresh==bthresh1: #only do the holefilled cut raise if it's the first time using blender
holefilledpixels=(scipy.ndimage.binary_fill_holes(cosmics1)!=cosmics1).sum()
if holefilledpixels>blend_holefilledpixels_cut:
if PLOT_ON_OFF:
f=figure(figsize=(12,9));f.add_subplot(121);title('cosmics1');yy,xx=nonzero(cosmics1);imshow(CRfiltstamp,interpolation='nearest',origin='lower left');scatter(xx,yy,marker='o',edgecolors='k',facecolors='None')
f.add_subplot(122);title('FINAL: cosmics2');yy,xx=nonzero(cosmics2);imshow(CRfiltstamp,interpolation='nearest',origin='lower left');scatter(xx,yy,marker='o',edgecolors='k',facecolors='None')
f.suptitle('Go from cosmics1 to cosmics2!')
f.savefig(plotdir+'pltRevise%s_passed_raise_thresh_%s-holefilledpixels-label%.4i' % (OFB,bthresh_raise_tag,l))
close(f);del f
print "holefilledpixels: ",holefilledpixels
cosmics1=cosmics2
blended_CRseg[sle][cosmics1]=l
#loop ends if mask (1) converges, (2) hits a star, or (3) hits 100 iterations
blend_ended=array(blend_ended)
print "times converged: ",(blend_ended==0).sum()
print "times hit star : ",(blend_ended==1).sum()
print "times 100 iters: ",(blend_ended==2).sum()
print "at bthresh %.3i it converges after a mean of %.3f iterations" % (bthresh,numpy.mean(blend_Niters))
print "# iterations=",blend_Niters
print '############# END BLEND: bthresh = '+str(bthresh)+" ###################\n"
return blended_CRseg
except:
ns.update(locals())
show();print "adam-Error: in running BB on fl=",fl,"\n\nrun this command to check it out: ipython -i -- CRNitschke/blocked_blender.2.2.py ",fl,"\n\n"; raise
#END: BLENDING FUNCTIONS
def reset_labels(prob_labels,segs2reset):
'''take in a current image and an older image and reset the masks in `prob_labels` to how they were in the older image'''
CRsegX=segs2reset.copy()
for l in prob_labels:
spots=segs2reset==l
CRsegX[spots]=0 #reset the problem labels to zero
newspots=spots*detections0
CRsegX[newspots]=l #reset the problem labels to their original value
return CRsegX
#END: LABEL FUNCTIONS
PLOT_ON_OFF=0 #0=plotting off 1=plotting on
if __name__ == "__main__":
args=imagetools.ArgCleaner(sys.argv)
if len(sys.argv)<2:
sys.exit()
fl=args[-1]
if not os.path.isfile(fl):
print "sys.argv=",sys.argv
print "adam-Error: in running BB on fl=",fl,"\n\nrun this command to check it out: ipython -i -- CRNitschke/blocked_blender.2.2.py ",fl,"\n\n"; raise Exception(fl+" is not a file!")
else:
print "starting file=",fl
#try:
# PLOT_ON_OFF=sys.argv[2]
#except:
# pass
#START: iter0
t0=time.time()
#get the image for `fl`
image=imagetools.GetImage(fl)
back_im=scipy.stats.scoreatpercentile(image,48)
CRfl=astropy.io.fits.open(fl)
header=CRfl[0].header
OBJECT=header['MYOBJ']
FILTER=header['FILTER']
CCDnum=header['IMAGEID']
#if CCDnum==7: PLOT_ON_OFF=1
#iter0: take the original files2check and prepare them for blending
files2check=[]
flname=os.path.basename(fl).split('.')[0]
BASE=os.path.basename(fl).split('OCF')[0]
#get cosmics images
OFB='%s_%s_%s' % (OBJECT,FILTER,BASE,)
CR_segfl='/nfs/slac/g/ki/ki18/anja/SUBARU/eyes/CRNitschke_output/data_SCIENCE_cosmics/SEGMENTATION_CRN-cosmics_%s_%s.%s.fits' % (OBJECT,FILTER,BASE,)
CR_filtfl='/nfs/slac/g/ki/ki18/anja/SUBARU/eyes/CRNitschke_output/data_SCIENCE_cosmics/FILTERED_CRN-cosmics_%s_%s.%s.fits' % (OBJECT,FILTER,BASE,)
CRfitsfl=astropy.io.fits.open(CR_filtfl)
rms=CRfitsfl[0].header['MYRMS']
rms_bins=arange(10,100,5)
#adam-tmp# rms_bins=arange(10,90,5)
bthresh1_bin=digitize([rms],rms_bins)[0] #no "-1" here because I want the top-edge of the bin, not the bottom edge
#adam-tmp# if bthresh1_bin==0 or bthresh1_bin>15:
if bthresh1_bin==0 or bthresh1_bin>17:
print "adam-Error: in running BB on fl=",fl,"\n\nrun this command to check it out: ipython -i -- CRNitschke/blocked_blender.2.2.py ",fl,"\n\n"; raise Exception('this rms just is not right')
bthresh1=rms_bins[bthresh1_bin]
dt=CRfitsfl[0].header['CRN_DT']*rms#; ft=CRfitsfl[0].header['CRN_FT']*rms
dt_times_pt01=int(dt*.01+1) #this is like a ceiling function
CRfiltimage=CRfitsfl[0].data
CRfiltheader=CRfitsfl[0].header
CRfitsfl.close()
#get stars images
star_segfl='/nfs/slac/g/ki/ki18/anja/SUBARU/eyes/CRNitschke_output/data_SCIENCE_stars/SEGMENTATION_CRN-stars_%s_%s.%s.fits' % (OBJECT,FILTER,BASE,)
starseg0=asarray(imagetools.GetImage(star_segfl),dtype=int)
star0_slices=scipy.ndimage.find_objects(starseg0 )
Nstars=starseg0.max()
#remove stars that don't at least have a square in them
for i in range(1,Nstars+1):
sl=star0_slices[i-1]
spots=starseg0[sl]==i
openspots=scipy.ndimage.binary_opening(spots,array([[1,1],[1,1]]))
if not openspots.any():
starseg0[sl][spots]=0
#now add in things with 100 pixels above saturation level (in case they aren't there yet)
sat=(image-back_im)>21000
sat_big=skimage.morphology.remove_small_objects(sat,60,connectivity=2) #conn8
sat_seg,Nsat_labels=mahotas.label(sat_big,conn8)
sat_labels=arange(Nsat_labels)+1
sat_slices=scipy.ndimage.find_objects(sat_seg)
s2=skimage.morphology.star(2)
sat_spike_bools=zeros(image.shape,dtype=bool)
#add very large regions near saturation that have an s2 shape in them
for l,sl in zip(sat_labels,sat_slices):
spots=sat_seg[sl]==l
if scipy.ndimage.binary_opening(spots,s2).any():
ll_ss0=unique(starseg0[sl][spots])
ss0_bools=zeros(image.shape,dtype=bool)
ss0_bools[sl][spots]=True
#print "ss0_bools.sum() before: ",ss0_bools.sum()
for l0 in ll_ss0:
if l0==0:continue
ss0_bools+=(starseg0==l0)
#print "ss0_bools.sum() after: ",ss0_bools.sum()
starseg0[ss0_bools]=l+Nstars
sl_wY,sl_wX=imagetools.slice_size(sl)
ratio_h2w=float(sl_wY)/sl_wX
if ratio_h2w>2:
sat_spike_bools[ss0_bools]=True
#setup final star position array
sat_spike_bools=mahotas.dilate(sat_spike_bools,conn4)#dilate only those large saturation areas
starbools=mahotas.dilate(starseg0>Nstars,conn4)#dilate only those large saturation areas
starbools+=(starseg0>0)
#get cosmics and remove the ones that overlap with the stars (these will be replaced later, but I don't want them to be blended!)
CRseg0=asarray(imagetools.GetImage(CR_segfl),dtype=int)
CRll_for_loop=arange(CRseg0.max())+1
CRll=CRll_for_loop.tolist()
CRslices=scipy.ndimage.find_objects(CRseg0)
CRoverlapSTAR=zeros(CRseg0.shape,dtype=bool) #these are almost entirely saturation spikes!
CRoverlapSTAR_Ncosmics_mask_at_end=0
CRoverlapSTAR_Npixels_mask_at_end=0
for l in CRll_for_loop:
CRsl=CRslices[l-1]
CRspots=CRseg0[CRsl]==l
CR_on_star_frac=starbools[CRsl][CRspots].mean()
if CR_on_star_frac>0:
#test if it is a major hit or a minor hit
if CR_on_star_frac<0.5:
CRsl2=imagetools.slice_expand(CRsl,1)
STARspots=starbools[CRsl2]
STARspots2=scipy.ndimage.binary_dilation(STARspots,conn8)
CRspots2=CRseg0[CRsl2]==l
CR_on_dilated_star_frac=STARspots2[CRspots2].mean()
if CR_on_dilated_star_frac<0.5: #if it's a minor hit, then remove the overlap and continue
overlap=CRspots2*STARspots2
CRseg0[CRsl2][overlap]=0
continue
#always remove a major hit from list of CRs
CRll.remove(l)
CRseg0[CRsl][CRspots]=0
if CRspots.sum()>9: #if big enough, then remove it later
CRoverlapSTAR_Ncosmics_mask_at_end+=1
CRoverlapSTAR_Npixels_mask_at_end+=CRspots.sum()
CRoverlapSTAR[CRsl][CRspots]=1
CRll=asarray(CRll)
#get the info needed to define the blender function
#start saving output
compare_dir='/nfs/slac/g/ki/ki18/anja/SUBARU/eyes/CRNitschke_output/data_SCIENCE_compare/'
detections0=CRseg0>0
WOblendCRfiltimage=CRfiltimage.copy()
WOblendCRfiltimage[detections0]=-2000
#save original file
hdu=astropy.io.fits.PrimaryHDU(image)
hdu.header=CRfiltheader
fl_original=compare_dir+'BBout_ORIGINAL_%s_%s.%s.fits' % (OBJECT,FILTER,BASE)
hdu.writeto(fl_original,overwrite=True)
files2check.append(fl_original)
#save old CR mask file
hdu=astropy.io.fits.PrimaryHDU(WOblendCRfiltimage)
hdu.header=CRfiltheader
fl_woblend=compare_dir+'BBout_WOblend_%s_%s.%s.fits' % (OBJECT,FILTER,BASE)
hdu.writeto(fl_woblend,overwrite=True)
files2check.append(fl_woblend)
#END: iter0
#START: iter1
t1=time.time()
#iter1: run the blender!
bthresh1_tag=('bthresh%.3i' % (bthresh1))
CRblended1=blocked_blender(bthresh1,CRfiltimage,CRll,CRslices,starbools,CRseg0.copy())
BBCRmask=CRblended1>0
print "Masked",float((BBCRmask).sum())/detections0.sum(),"times the number of original pixels"
BBCRseg,BBCR_Nlabels=scipy.ndimage.label(BBCRmask,conn8)
BBCRslices_b4=scipy.ndimage.find_objects(BBCRseg)
BBCRlabels=arange(BBCR_Nlabels)+1
#get the number of holes in each detection
BBCRslices=[]
Nholefilledpixels=[]
BBCR_hit_spike=[]
for l,sl in zip(BBCRlabels,BBCRslices_b4):
spots=BBCRseg[sl]==l
Nholefilledpixels.append(count_hole_filled_pixels(spots))
sl3=imagetools.slice_expand(sl,3)
BBCRslices.append(sl3)
BBCR_hit_spike.append(sat_spike_bools[sl][spots].any())
Nholefilledpixels=asarray(Nholefilledpixels)
BBCR_hit_spike=asarray(BBCR_hit_spike)
BBCRregs=skimage.measure.regionprops(BBCRseg)
area=array([BBCRregs[i].area for i in range(BBCR_Nlabels)])
#select cut parameters
rr_iterN_cut=3.1 #masks with poly residual/#pts>=this will have their threshold raised
holefilledpixels_cut=5 #masks with >this pixels will be sent to the ringer function
open_cut=11;open_rr_cut=.8
area_cut=8
#get rid of masks that are just big blobs near saturation spikes
BBCRll_spike_overlaps=BBCRlabels[BBCR_hit_spike]
consider_spikes=len(BBCRll_spike_overlaps)
if consider_spikes:
spike_overlap_fail_area_cut=area[BBCRll_spike_overlaps-1]<=area_cut #if area<area_cut, then give it a free pass
spike_overlap_reset={} #True=>reset to original form #False=>keep as is. Either way don't include in iter2 and beyond
spike_overlap_stats={}
for l_spike_fail in BBCRll_spike_overlaps[spike_overlap_fail_area_cut]:
spike_overlap_reset[l_spike_fail]=False
spike_overlap_stats[l_spike_fail]="KEEP: It's small, just keep it as is."
#iter1: select the masks big enough to be able to fail cuts
hole_cuts=Nholefilledpixels>holefilledpixels_cut
big_enough=area>area_cut
area2polyfit=area[big_enough]
BBCRlabels2polyfit=BBCRlabels[big_enough]
#iter1: find detections from iter1 that fail the polynomial fit cut (add to list of bad labels if poly doesn't fit well)
cut_labels2=[];cut_details2=[]
########count=0
for i,(k,size_k) in enumerate(zip(BBCRlabels2polyfit,area2polyfit)):
########Nax= i % 9 + 1
########if Nax==1:
######## count+=1
######## if i!=0:
######## f=imagetools.AxesStripText(f,allticks=True,titles=False)
######## f.savefig(plotdir+'pltRevise%s_bad_labels-polyfit_num%.3i' % (OFB,count))
######## close(f);del f
######## f=figure(figsize=(14,14))
########ax=f.add_subplot(3,3,Nax)
sl=BBCRslices[k-1]
cosmics=BBCRseg[sl]==k
########stamp=image[sl]
########ax,rr_k=cosmicpoly(k,cosmics,stamp,ax)
rr_k,poly_k,polytype_k=polyfitter(cosmics,degree=5)
open8_cosmics=scipy.ndimage.binary_opening(cosmics,conn8)
open8_Nspots=open8_cosmics.sum()
open8_frac=float(open8_Nspots)/size_k
if k in BBCRll_spike_overlaps:
if open8_frac>.2:
spike_overlap_reset[k]=True
spike_overlap_stats[k]=("RESET: (fyi rr=%.1f>%.1f) open8_frac=%.2f>.2" % (rr_k,rr_iterN_cut,open8_frac))
else:
spike_overlap_reset[k]=False
spike_overlap_stats[k]=("KEEP: (fyi rr=%.1f>%.1f) open8_frac=%.2f<.2" % (rr_k,rr_iterN_cut,open8_frac))
elif rr_k>rr_iterN_cut and open8_frac>.03:
cut_labels2.append(k)
cut_details2.append("rr=%.2f>%.2f and open8_frac=%.3f>.03" % (rr_k,rr_iterN_cut,open8_frac))
elif open8_Nspots>open_cut:
openS_cosmics=scipy.ndimage.binary_opening(cosmics,connS)
openS_Nspots=openS_cosmics.sum()
if openS_Nspots>open_cut and rr_k>open_rr_cut:
cut_labels2.append(k)
cut_details2.append("sum(S)=%s>%s sum(8)=%s>%s & rr=%.2f>%.2f" % (openS_Nspots,open_cut,open8_Nspots,open_cut,rr_k,open_rr_cut))
########ax.set_title(ax.get_title().replace('residual/#points','rr')+'\nsum(S)=%s sum(8)=%s' % (openS_Nspots,open8_Nspots),size=10.5)
########else:
######## f=imagetools.AxesStripText(f,allticks=True,titles=False)
######## f.savefig(plotdir+'pltRevise%s_bad_labels-polyfit_num%.3i' % (OFB,count))
######## close(f);del f
if consider_spikes:
if PLOT_ON_OFF:
f=plotlabels(spike_overlap_stats.keys(),params=spike_overlap_stats.values())
f.suptitle('before')
f=imagetools.AxesStripText(f,allticks=True,titles=False)
f.savefig(plotdir+'pltSatSpikes%s-1before' % (OFB,))
close(f);del f
results_spike_reset=array(spike_overlap_reset.values())
ll_spikes=array(spike_overlap_reset.keys())
ll_spike_reset=ll_spikes[results_spike_reset]
BBCRseg=reset_labels(ll_spike_reset,BBCRseg)
if PLOT_ON_OFF:
f=plotlabels(spike_overlap_stats.keys(),params=spike_overlap_stats.values())
f.suptitle('after')
f=imagetools.AxesStripText(f,allticks=True,titles=False)
f.savefig(plotdir+'pltSatSpikes%s-2after' % (OFB,))
close(f);del f
#iter1: find detections from iter1 that fail the number of filled pixels cut
fillSET=set(BBCRlabels[hole_cuts])
fillLL=array(list(fillSET.difference(ll_spike_reset)))
else:
fillLL=BBCRlabels[hole_cuts]
try:
fillLL_Nholes_filled=Nholefilledpixels[fillLL-1]
except IndexError:
fillLL_Nholes_filled=0
for l in fillLL:
if l in cut_labels2:
ind=cut_labels2.index(l)
cut_labels2.pop(ind)
cut_details2.pop(ind)
if PLOT_ON_OFF:
params=['label=%s #holes=%s' % (hole_l, hole_N) for hole_l,hole_N in zip(fillLL,fillLL_Nholes_filled)]
f=plotlabels(fillLL,params=params)
f.savefig(plotdir+'pltRevise%s_bad_labels-holes_1before-unfiltered' % (OFB,))
close(f);del f
f=plotlabels(fillLL,params=params,background=CRfiltimage)
f.savefig(plotdir+'pltRevise%s_bad_labels-holes_1before-filtered' % (OFB,))
close(f);del f
#iter1: END
#BEGIN: RING
Nring_fixed1=0
for l in fillLL:
sl2=BBCRslices[l-1]
spots_ring=BBCRseg[sl2]==l
if PLOT_ON_OFF: newring,ringstat=ringer(spots_ringer=spots_ring.copy(),l_ringer=l,filtstamp_ringer=CRfiltimage[sl2],imstamp_ringer=image[sl2],seg0stamp_ringer=CRseg0[sl2],star_stamp=starbools[sl2])
else: newring,ringstat=ringer_noplot(spots_ringer=spots_ring.copy(),l_ringer=l,filtstamp_ringer=CRfiltimage[sl2],imstamp_ringer=image[sl2],seg0stamp_ringer=CRseg0[sl2],star_stamp=starbools[sl2])
if ringstat==0:
Nring_fixed1+=1
BBCRseg[sl2][spots_ring]=0
BBCRseg[sl2][newring]=l
else:
cut_labels2.append(l)
cut_details2.append(ringstat)
#moved the "after" plot to the end
#END: RING
#START: iter2
cut_labels={2:cut_labels2}
cut_details={2:cut_details2}
CRblendeds={1:BBCRseg.copy()}
if bthresh1<25: #for things with bthresh1 very low, make the iterN thing more realistic
bthreshs={2:40.0,3:60.0,4:80.0,5:100.0,6:120.0,7:140.0,8:160.0}
elif bthresh1<60: #this used to always be the setup!
bthreshs={2:60.0,3:80.0,4:100.0,5:120.0,6:140.0,7:160.0,8:180.0}
elif bthresh1<80: #don't want to have bthresh2<bthresh1 ever!
bthreshs={2:80.0,3:100.0,4:120.0,5:140.0,6:160.0,7:180.0,8:200.0}
else:
bthreshs={2:100.0,3:120.0,4:140.0,5:160.0,6:180.0,7:200.0,8:220.0}
iterN_final=max(bthreshs.keys())
star_ERASE=zeros(CRseg0.shape,dtype=bool)
antidrop_extras=''
iterN_stats={}
for iterN in range(2,iterN_final+1):
exec "t%s=time.time()" % (iterN)
iterN_stats[iterN]={'DONE-swallowed':0,'DONE-PREVsplit':0,'DONE-Multiple Ringers':0,'NEXT-rr':0,'NEXT-ring failed':0,'NEXT-open':0,'NEXT-rr=nan size>9 open8=0':0,'REMOVED-ERASE':0,'DONE-ERASE FAILED':0,'DONE-PASSED ALL':0}
rr_iterN_cut+=.1
#iter2: take detections from iter1 that fail the cuts and reset them to the way they were at iter0
CRsegN=reset_labels(cut_labels[iterN],CRblendeds[iterN-1]) #reset cut_labels2 from CRblendeds[iterN-1] to CRseg0
bthresh_tag=('bthresh%.3i' % (bthreshs[iterN]))
#iter2: take cut detections from iter1 (which have been reset to iter0) and reblend them at a higher thresh
CRblendeds[iterN]=blocked_blender(bthreshs[iterN],CRfiltimage,cut_labels[iterN],BBCRslices,starbools,CRsegN)
CRblendeds_slices=scipy.ndimage.find_objects(CRblendeds[iterN])
del CRsegN
print "had in iter1: ",(CRblendeds[1]>0).sum()
print "now have in iter"+str(iterN)+": ", (CRblendeds[iterN]>0).sum()
#iter2: plot detections from iter2 and determine if they pass the iter3 cuts or not
count=1;cut_labels[iterN+1]=[];cut_details[iterN+1]=[]
if PLOT_ON_OFF: f=figure(figsize=(22,13.625))
for i,probl in enumerate(cut_labels[iterN]):
title_extras=''
Nax= i % 9 + 1
if Nax==1:
if i!=0:
if PLOT_ON_OFF:
f.suptitle('orange "X" = original masked spots \t white "X" = masked spots when blending at bthresh=%.3i\nblack "o" = masked spots after raising non-poly cuts to bthresh=%.3f' % (bthresh1,bthreshs[iterN],))
f=imagetools.AxesStripText(f,allticks=True,titles=False)
f=imagetools.AxesCompact(f,.1)
f.savefig(plotdir+'pltRevise%s_anti-drop_%s-%slabel_group_num%.3i' % (OFB,bthresh_tag,antidrop_extras,count))
close(f);del f
f=figure(figsize=(22,13.625))
antidrop_extras=''
count+=1
try:
sl=CRblendeds_slices[probl-1]
except IndexError:
iterN_stats[iterN]['DONE-swallowed']+=1 #adam: hit this once, it'll just say continue and hope this doesn't make problems later! (check ~/my_data/SUBARU/RXJ2129/W-C-RC_2012-07-23/SCIENCE/SUPA7-23/SCIENCE/SUPA013516 _6OCF.fits)
continue
if sl==None: #if this label was swallowed by another label, then skip it!
iterN_stats[iterN]['DONE-swallowed']+=1
continue
#iter2: RING now do the ringer thing!
sl2=imagetools.slice_expand(sl,3)
iterNmask=CRblendeds[iterN][sl2]==probl #change this so plot looks right
if not iterNmask.any(): #if this label was swallowed by another label, then skip it!
iterN_stats[iterN]['DONE-swallowed']+=1
continue
holefilledpixels=count_hole_filled_pixels(iterNmask)
run_ring_bool= holefilledpixels>holefilledpixels_cut
if run_ring_bool:
if PLOT_ON_OFF: newring,ringstat=ringer(spots_ringer=iterNmask.copy(),l_ringer=probl,filtstamp_ringer=CRfiltimage[sl2],imstamp_ringer=image[sl2],seg0stamp_ringer=CRseg0[sl2],star_stamp=starbools[sl2])
else: newring,ringstat=ringer_noplot(spots_ringer=iterNmask.copy(),l_ringer=probl,filtstamp_ringer=CRfiltimage[sl2],imstamp_ringer=image[sl2],seg0stamp_ringer=CRseg0[sl2],star_stamp=starbools[sl2])
if ringstat==0:
CRblendeds[iterN][sl2][iterNmask]=0
CRblendeds[iterN][sl2][newring]=probl
title_extras+=" Used Ring(stat=0)"
iterNmask=newring
holefilledpixels=count_hole_filled_pixels(iterNmask)
else:
title_extras+=" Used Ring(stat!=0)"
else:ringstat=0
#iter2: get needed info for the BLOB cut!
blended_only_spots= iterNmask.copy()
open8_blended_only_spots=scipy.ndimage.binary_opening(blended_only_spots,conn8)
openS_blended_only_spots=scipy.ndimage.binary_opening(blended_only_spots,connS)
open8_Nspots= open8_blended_only_spots.sum();openS_Nspots=openS_blended_only_spots.sum()
del open8_blended_only_spots,openS_blended_only_spots
#iter2: STRETCH now do the iter_track_stretch thing!
slE=imagetools.slice_expand(sl,100)
if bthreshs[iterN]>=80:
iterNmaskE=CRblendeds[iterN][slE]==probl
cosmics,stretch_count=iter_track_stretch(iterNmaskE.copy(),CRfiltimage[slE] ,bthreshs[iterN]-20,BASE,probl,starbools[slE],name_extras="_ADloop",rr_per_step=.1,ts_rr_cut=1.0,track_len_cut=13)
if stretch_count:
stretch_pixels=cosmics*logical_not(iterNmaskE)
stretch_unnecessary=(CRblendeds[iterN][slE].copy()>0) * stretch_pixels
print "number of stretched pixels already covered=",stretch_unnecessary.sum()," of total ",stretch_pixels.sum()
stretch_necessary=stretch_pixels * logical_not(stretch_unnecessary)
CRblendeds[iterN][slE][stretch_necessary]=probl
title_extras+=" Used Stretch"
#iter2: do the plotting by using a square slice within the slE slice!
slsq=imagetools.slice_square(scipy.ndimage.find_objects(asarray(CRblendeds[iterN][slE]==probl,dtype=int))[0])
slsq3=imagetools.slice_expand(slsq,3)
stamp=image[slE][slsq3]
iter1mask=BBCRseg[slE][slsq3]==probl
iterNmask_slsq3=CRblendeds[iterN][slE][slsq3]==probl #this is iterNmask, but in the slsq3 form
iter0mask=iterNmask_slsq3*(CRseg0[slE][slsq3]>0)
yy0,xx0=nonzero(iter0mask)
masksize0=len(xx0)
#iter2: determine if iter2 detections pass the iter3 cuts or not
masksize=iterNmask_slsq3.sum()
open8_frac=float(open8_Nspots)/masksize
if PLOT_ON_OFF:
ax=f.add_subplot(3,3,Nax)
yy1,xx1=nonzero(iter1mask)
ax.scatter(xx1,yy1,marker='x',color='w',lw=.5,alpha=.5)
ax,rr_i=cosmicpoly(probl,iterNmask_slsq3,stamp,ax,marker='s',s=40)
if isnan(rr_i):
ax.set_title('label %s: rr=nan' % (probl,))
yyN,xxN=nonzero(iterNmask_slsq3)
ax.imshow(stamp,interpolation='nearest',origin='lower left')
ax.scatter(xxN,yyN,marker='o',edgecolors='k',facecolors='None')
ax.scatter(xx0,yy0,marker='x',color='orange',s=50)
ax.set_ylim(0,slsq3[0].stop-slsq3[0].start);ax.set_xlim(0,slsq3[1].stop-slsq3[1].start)
else:
rr_i,poly_i,polytype_i=polyfitter(iterNmask_slsq3,degree=5)
#START: PREVsplit
autopass=False
if not (run_ring_bool and ringstat==0): #if we didn't successfully run the ringer function
#check if the mask has been split into 2 pieces
iterPREVmask_slsq3=CRblendeds[iterN-1][slE][slsq3]==probl #this is iterNmask, but for the N-1 iteration
contig_checkseg,contig_check_NlabelsN=scipy.ndimage.label(iterNmask_slsq3,conn8)
contig_checkseg,contig_check_NlabelsPREV=scipy.ndimage.label(iterPREVmask_slsq3,conn8)
names="iterN=",iterN,"probl=",probl,"contig_check_NlabelsN=",contig_check_NlabelsN,"contig_check_NlabelsPREV=",contig_check_NlabelsPREV
del contig_checkseg
if contig_check_NlabelsN>contig_check_NlabelsPREV: #if label has been split-up take the last one
Nopen8_iterPREVmask_slsq3=scipy.ndimage.binary_opening(iterPREVmask_slsq3,conn8).sum()
Ntotal_iterPREVmask_slsq3=iterPREVmask_slsq3.sum()
open8_frac_PREV=float(Nopen8_iterPREVmask_slsq3)/Ntotal_iterPREVmask_slsq3
if open8_frac<=.3 and open8_frac_PREV<open8_frac+.2:
#open8_iterPREVmask_slsq3=scipy.ndimage.binary_opening(iterPREVmask_slsq3,conn8)
#iterPREV_8less=iterPREVmask_slsq3-open8_iterPREVmask_slsq3
#contig_checkseg,contig_check_NlabelsPREV_8less=scipy.ndimage.label(iterPREV_8less,conn8)
#if contig_check_NlabelsN>contig_check_NlabelsPREV_8less: #if label has been split-up take the last one
iterN_stats[iterN]['DONE-PREVsplit']+=1
CRblendeds[iterN][slE][slsq3][iterPREVmask_slsq3]=probl
iterNmask_slsq3=iterPREVmask_slsq3
if PLOT_ON_OFF:ax.set_title('label=%s DONE!!! PREV declared iterN-1 better!\nI dont want to break this up into more pieces!' % (probl))
print ('label=%s DONE!!! PREV declared iterN-1 better!\nI dont want to break this up into more pieces!' % (probl))
antidrop_extras+='PREVsplit-'
autopass=True
#END: PREVsplit
if not autopass and ((ringstat=="Circle of Cosmics" or ringstat=="none in square pattern") and iterN>=3 and open8_frac<.2):
iterN_stats[iterN]['DONE-Multiple Ringers']+=1
more_title_extras="DONE!!! Circle of Cosmics rr=%.2f size=%s sum(8)=%s sum(S)=%s open8_frac=%.2f<.2" % (rr_i, iterNmask_slsq3.sum(), open8_Nspots, openS_Nspots,open8_frac)
antidrop_extras+='CosmicCircle-'
elif not autopass and (open8_frac>.03 and rr_i>rr_iterN_cut): #if not autopass and (more than 3% of the pixels in cosmics are open8 pixels, then cosmics is a blob, so raise the thresh
iterN_stats[iterN]['NEXT-rr']+=1
cut_labels[iterN+1].append(probl)
cut_details[iterN+1].append("rr=%.2f>%.2f open8_frac=%.2f>.03" % (rr_i,rr_iterN_cut,open8_frac))
more_title_extras=('this iter (%s of %s) details: ' % (iterN+1,iterN_final))+cut_details[iterN+1][-1]
elif not autopass and (ringstat!=0 and holefilledpixels>holefilledpixels_cut):
iterN_stats[iterN]['NEXT-ring failed']+=1
cut_labels[iterN+1].append(probl)
cut_details[iterN+1].append(ringstat)
more_title_extras=('this iter (%s of %s) details: ' % (iterN+1,iterN_final))+cut_details[iterN+1][-1]
elif not autopass and (open8_Nspots>open_cut and openS_Nspots>open_cut and rr_i>open_rr_cut):
iterN_stats[iterN]['NEXT-open']+=1
cut_labels[iterN+1].append(probl)
cut_details[iterN+1].append("sum(S)=%s>%s sum(8)=%s>%s rr=%.2f>%.2f" % (openS_Nspots,open_cut,open8_Nspots,open_cut,rr_i,open_rr_cut))
more_title_extras=('this iter (%s of %s) details: ' % (iterN+1,iterN_final))+cut_details[iterN+1][-1]
elif not autopass and (isnan(rr_i) and masksize>9 and not open8_Nspots):
iterN_stats[iterN]['NEXT-rr=nan size>9 open8=0']+=1
cut_labels[iterN+1].append(probl)
cut_details[iterN+1].append("rr=nan size>9 open8_Nspots=0")
more_title_extras=('this iter (%s of %s) details: ' % (iterN+1,iterN_final))+cut_details[iterN+1][-1]
elif not autopass and (isnan(rr_i) and masksize>9 and masksize0<3 and open8_frac>.6): #if not autopass and (this is true then it might be a star!
#make sure that the original mask pixels are all 4-connected and that the mask isn't in 2 pieces
contig_checkseg,contig_check_NlabelsN=scipy.ndimage.label(iterNmask_slsq3,conn8)
contig_checkseg,contig_check_Nlabels0=scipy.ndimage.label(iter0mask,conn8)
del contig_checkseg
contig_check_Nlabels=max(contig_check_NlabelsN,contig_check_Nlabels0)
#make sure that the hottest pixel and the 2nd hottest are next to one another and both are 8 connected with the 3rd hottest
stampmax=stamp[iterNmask_slsq3].max()
maxspot=stamp==stampmax
stamp_no_max=stamp.copy();stamp_no_max[maxspot]=0;stamp_no_max[logical_not(iterNmask_slsq3)]=0
maxspot2=stamp_no_max==stamp_no_max.max()
max_and_2nd_next=sum(maxspot2*binary_dilation(maxspot))>0
max_or_2=maxspot2+maxspot
stamp_no_max_or_2=stamp.copy();stamp_no_max_or_2[max_or_2]=0;stamp_no_max_or_2[logical_not(iterNmask_slsq3)]=0
maxspot3=stamp_no_max_or_2==stamp_no_max_or_2.max()
max_and_2nd_next_to_3rd=sum(max_or_2*binary_dilation(maxspot3,conn8))>1
if max_and_2nd_next and max_and_2nd_next_to_3rd and contig_check_Nlabels==1: #if this is true then it might be a star! (this should drastically reduce the "pac-man effect"!)
iterN_stats[iterN]['REMOVED-ERASE']+=1
more_title_extras="ERASED!!! (should be star) rr=nan open8_frac=%.2f>.6 Nlabels=1" % (open8_frac,)
more_title_extras+="\nPASSED all OF: max_and_2nd_next=%s max_and_2nd_next_to_3rd=%s" % (max_and_2nd_next,max_and_2nd_next_to_3rd ) #if this is true then it might be a star! (this should drastically reduce the "pac-man effect"!)
star_ERASE[slE][slsq3][iterNmask_slsq3]=1
else:
iterN_stats[iterN]['DONE-ERASE FAILED']+=1
more_title_extras="DONE!!! Didn't Pass ERASE (not star) rr=nan open8_frac=%.2f>.6" % (open8_frac,)
more_title_extras+="\nFAILED one OF:Nlabels=%s>1 max_and_2nd_next=%s max_and_2nd_next_to_3rd=%s" % (contig_check_Nlabels,max_and_2nd_next,max_and_2nd_next_to_3rd )
else:
iterN_stats[iterN]['DONE-PASSED ALL']+=1
more_title_extras="DONE!!! rr=%.2f iterNmask.sum()=%s sum(8)=%s sum(S)=%s open8_frac=%.2f" % (rr_i, iterNmask_slsq3.sum(), open8_Nspots, openS_Nspots,open8_frac)
#START: PREV
#PREV: check how the mask compares to the old mask!
######## else:
######## #check and see if the deleted part was clean (not conn8,etc.)!
######## PREV_removed=iterPREVmask_slsq3-iterNmask_slsq3
######## PREV_important=PREV_removed-(PREV_removed*mahotas.sobel(iterNmask_slsq3))
######## open8_PREV_important=scipy.ndimage.binary_opening(PREV_important,conn8)
######## N_PREV_important=float(PREV_important.sum())
######## N8_PREV_important=float(open8_PREV_important.sum())
######## open8_frac_PREV=N8_PREV_important/N_PREV_important
######## if open8_frac_PREV<.5 and (N_PREV_important-N8_PREV_important)>3:
######## PREV_good_removal=scipy.ndimage.binary_opening(PREV_important,conn8)+scipy.ndimage.binary_opening(PREV_important,connS)
######## PREV_putback=PREV_important-PREV_good_removal
######## skimage.morphology.remove_small_objects(PREV_putback,3,connectivity=2,in_place=True) #conn8
######## if PREV_putback.sum()>3:
######## PREV_seg,N_PREV_segs=scipy.ndimage.label(PREV_putback,conn8)
######## around_iterN=scipy.ndimage.binary_dilation(iterNmask_slsq3,conn8)
######## PREV_segs_nearby=unique(PREV_seg[around_iterN]).tolist()
######## try:PREV_segs_nearby.remove(0)
######## except ValueError:pass
######## if PREV_segs_nearby:
######## Nmask_old=iterNmask_slsq3.copy()
######## add_xx,add_yy=[],[]
######## for PREV_l in PREV_segs_nearby:
######## add_l=PREV_seg==PREV_l
######## l_yy,l_xx=nonzero(add_l)
######## add_xx+=l_xx.tolist()
######## add_yy+=l_yy.tolist()
######## iterNmask_slsq3[add_l]=True
######## print "added %s to label=%s" % (add_l.sum(),probl)
######## #now add the labels from #PREV_segs_nearby to the mask
######## f_PREV=figure(figsize=(10,12))
######## ax_PREV=f_PREV.add_subplot(111)
######## ax_PREV.imshow(stamp,interpolation='nearest',origin='lower left')
######## yy_PREV,xx_PREV=nonzero(Nmask_old)
######## ax_PREV.scatter(xx_PREV,yy_PREV,marker='x',s=60,edgecolors='w',facecolors='none',label='Nmask_old')
######## ax_PREV.scatter(add_xx,add_yy,s=70,marker='x',edgecolors='purple',facecolors='none',label='actually put back')
######## #yy_PREV,xx_PREV=nonzero(PREV_important)
######## #scatter(xx_PREV,yy_PREV,marker='x',edgecolors='w',facecolors='none',label='PREV_important')
######## yy_PREV,xx_PREV=nonzero(PREV_good_removal)
######## ax_PREV.scatter(xx_PREV,yy_PREV,marker='o',edgecolors='k',facecolors='k',label='PREV_good_removal')
######## yy_PREV,xx_PREV=nonzero(PREV_putback)
######## ax_PREV.scatter(xx_PREV,yy_PREV,s=70,marker='s',edgecolors='purple',facecolors='none',label='PREV_putback')
######## legend()
######## f_PREV.suptitle('pltRevise%s_PREV-%s-label%.4i' % (OFB,bthresh_tag,probl))
######## f_PREV=imagetools.AxesCompact(f_PREV,.1)
######## f_PREV.savefig(plotdir+'pltRevise%s_PREV-%s-label%.4i' % (OFB,bthresh_tag,probl))
######## antidrop_extras+='PREVputback-'
#END: PREV
if PLOT_ON_OFF: ax.set_title(ax.get_title()+'\n'+more_title_extras+title_extras+('\nlast iter (%s of %s) details: ' % (iterN,iterN_final))+cut_details[iterN][i],size=10)
if PLOT_ON_OFF:
f.suptitle('orange "X" = original masked spots \t white "X" = masked spots when blending at bthresh=%.3i\nblack "o" = masked spots after raising non-poly cuts to bthresh=%.3f' % (bthresh1,bthreshs[iterN],))
f=imagetools.AxesStripText(f,allticks=True,titles=False)
f=imagetools.AxesCompact(f,.1)
f.savefig(plotdir+'pltRevise%s_anti-drop_%s-%slabel_group_num%.3i' % (OFB,bthresh_tag,antidrop_extras,count))
antidrop_extras=''
close(f);del f
#ERASE the removed stars
CRblendeds[iterN_final][star_ERASE]=0
#iter2: this is it, all I need to do is to reset anything that's filled. Just to be safe
BBCRblend_comparable=CRblendeds[iterN_final].copy()
BBCRblend_comparable=asarray(BBCRblend_comparable,dtype=int)
#Save Erased Stars
hdu=astropy.io.fits.PrimaryHDU(asarray(star_ERASE,dtype=int))
hdu.header=CRfiltheader
fl_erase=compare_dir+'BB_ERASED_'+bthresh1_tag+'_BBCR_%s_%s.%s.fits' % (OBJECT,FILTER,BASE)
hdu.writeto(fl_erase,overwrite=True)
files2check.append(fl_erase)
#RING: plot rings, the "after" version
if PLOT_ON_OFF:
f=plotlabels(fillLL,segments=BBCRblend_comparable,slices=BBCRslices,params=params)
f.savefig(plotdir+'pltRevise%s_bad_labels-holes_2after' % (OFB,))
close(f);del f
#END: iter2
#START: LastStretch
tLS=time.time()
#last step should be to fit a line for each cosmic and connect any close co-linear tracks
LastStretchmask=CRblendeds[iterN_final].copy()>0
LastStretchseg,LastStretch_Nlabels=scipy.ndimage.label(LastStretchmask,conn8)
LastStretchslices=scipy.ndimage.find_objects(LastStretchseg)
LastStretchregs=skimage.measure.regionprops(LastStretchseg)
LastStretcharea=array([LastStretchregs[i].area for i in range(LastStretch_Nlabels)])
LastStretchlabels=arange(1,LastStretch_Nlabels+1)
BIGll=LastStretchlabels[LastStretcharea>6]
LastStretch_rr_cut=1.8
LastStretch_Ncosmics_added=0
LastStretch_Npixels_added=[]
for l in BIGll:
sl_l=imagetools.slice_expand(LastStretchslices[l-1],20)
seg_l=LastStretchseg[sl_l]
spots=seg_l==l
yy_lin,xx_lin=nonzero(spots)
track_length=sqrt((xx_lin.max()-xx_lin.min())**2+(yy_lin.max()-yy_lin.min())**2)
xx_plot=arange(xx_lin.min(),xx_lin.max(),.1)
yy_plot=arange(yy_lin.min(),yy_lin.max(),.1)
rr,poly,polytype=polyfitter(spots,1)
if rr<LastStretch_rr_cut and track_length>5:
ayy,axx=nonzero((seg_l>0)*logical_not(spots))
if polytype=='x_of_y':
aX=poly(ayy)
aOffsets=(axx-aX).__abs__()
elif polytype=='y_of_x':
aY=poly(axx)
aOffsets=(ayy-aY).__abs__()
extend_track_spots=aOffsets<LastStretch_rr_cut
Npixels=extend_track_spots.sum()
if Npixels:
LastStretch_Ncosmics_added+=1
LastStretch_Npixels_added.append(Npixels)
star_stamp=starbools[sl_l]
stretched_spots,stretch_count=iter_track_stretch(spots.copy(),CRfiltimage[sl_l] ,dt_times_pt01,BASE,l,star_stamp,name_extras='_LastStretch',ts_rr_cut=1.8,rr_per_step=.2)
fill_these=LastStretchseg[sl_l][stretched_spots]==0
LastStretchseg[sl_l][stretched_spots][fill_these]=l
LastStretch_Npixels_added=asarray(LastStretch_Npixels_added)
#END: LastStretch
#START: CR/star overlap
tOverlap=time.time()
#setup final masks which include the CR/star overlap
BBCRmask_final=LastStretchseg>0
BBCRmask_final[CRoverlapSTAR]=True #put spots where CRseg0 and starseg overlap back into final mask (this will mainly include more saturation spikes)
BBCRimage_final=image.copy()
BBCRimage_final[BBCRmask_final]=0 #CRs=0 and CRseg0/starseg overlap=0 too
#plot this to make sure I'm not making an aweful mistake
BBCRimage_plot_comp=image.copy()
BBCRimage_plot_comp[LastStretchseg>0]=0 #just the CRs=0
if PLOT_ON_OFF:
f=imagetools.ImageWithSpots([BBCRimage_plot_comp,BBCRimage_final],name1='image with masks from before the CR-star overlap was replaced', name2='image with CR-star overlap masked',mode='alpha')
f.savefig(plotdir+'pltRevise%s_CR-star_overlap' % (OFB,))
close(f);del f
#END: CR/star overlap
#START: 400
t400=time.time()
#now add on the stuff that you only pick-up with a very low threshold (mainly for the low seeing objects)
CR_filtfl_ft400=CR_filtfl.replace('_CRN-cosmics','_FT400_CRN-cosmics')
CRfilt_ft400=imagetools.GetImage(CR_filtfl_ft400)
BBCRmask_final_copy=BBCRmask_final.copy()
CR400=CRfilt_ft400>400
CRseg_400_start,CR_400_Nlabels=scipy.ndimage.label(CR400,conn8)
CRslices_400=scipy.ndimage.find_objects(CRseg_400_start)
CRregs_400=skimage.measure.regionprops(CRseg_400_start,intensity_image=CRfilt_ft400)
maxval_400=array([CRregs_400[i].max_intensity for i in range(CR_400_Nlabels)])
eccentricity_400=array([CRregs_400[i].eccentricity for i in range(CR_400_Nlabels)])
area_400=array([CRregs_400[i].area for i in range(CR_400_Nlabels)])
CRll_400=arange(CR_400_Nlabels)+1
ok_label_400=[]
s2t_400=[]
BBCR_frac_400=[]
for l,size_l in zip(CRll_400,area_400):
sl=imagetools.slice_expand(CRslices_400[l-1],2)
spots=CRseg_400_start[sl]==l
sl2_height,sl2_width=spots.shape
yy,xx=nonzero(spots)
spots_beside_track=scipy.ndimage.binary_dilation(spots,conn4)*logical_not(spots)
beside_track_mean=(image[sl][spots_beside_track]-back_im).mean()
track_mean=(image[sl][spots]-back_im).mean()
side2track_ratio=beside_track_mean/track_mean
s2t_400.append(side2track_ratio)
BBCR_frac_400.append(BBCRmask_final_copy[sl][spots].mean())
if sl2_width<6 and sl2_height>200 and (sl2_height/sl2_width)>25:ok_label_400.append(False) #get rid of saturation spikes
elif starbools[sl][spots].any():ok_label_400.append(False)
elif (xx==xx[0]).all():ok_label_400.append(False)#get rid of str8 up and down stuff!
else:ok_label_400.append(True)
BBCR_frac_400=array(BBCR_frac_400)
s2t_400=array(s2t_400)
ok_label_400=array(ok_label_400)
s2t_400_cutval=.33 #was .4
eccentricity_400_cutval=.88 #was .88
area_400_cutval=5 #was 6
maxval_400_cutval=2000.0 #was 2000
standard_cut_400=ok_label_400*(s2t_400<s2t_400_cutval)*(eccentricity_400>eccentricity_400_cutval)*(area_400>area_400_cutval)*(maxval_400>maxval_400_cutval)
fives_cut_400=ok_label_400*(eccentricity_400>.91)*(area_400==5)*(maxval_400>3500)*(s2t_400<s2t_400_cutval) #was without s2t cut
fours_cut_400=ok_label_400*(eccentricity_400>.95)*(area_400==4)*(maxval_400>3500)*(s2t_400<s2t_400_cutval) #was without s2t cut
all_cut_400=standard_cut_400+fives_cut_400+fours_cut_400#+brighter_circular_cut_400
CRseg_400_final=CRseg_400_start.copy()
for l in CRll_400[logical_not(all_cut_400)]:
sl=CRslices_400[l-1]
spots=CRseg_400_final[sl]==l
CRseg_400_final[sl][spots]=0
for l in CRll_400[all_cut_400]:
sl=CRslices_400[l-1]
sl_l=imagetools.slice_expand(sl,25)
spots=CRseg_400_final[sl_l]==l
star_stamp=starbools[sl_l]
try:stretched_spots,stretch_count=iter_track_stretch(spots.copy(),CRfilt_ft400[sl_l] ,dt_times_pt01*2,BASE,l,star_stamp,name_extras='_400',rr_per_step=.25)
except ValueError:continue
if stretch_count:
BBCR_frac_l=BBCRmask_final_copy[sl_l][stretched_spots].mean()
if BBCR_frac_l<BBCR_frac_400[l-1]: #only update things if it's better
BBCR_frac_400[l-1]=BBCR_frac_l
CRseg_400_final[sl_l][stretched_spots]=l
CRslices_400[l-1]=sl_l
#params=["e=%.2f max=%.1f frac_done=%.2f\ns2t=%.2f (.35 cut)" % (ecc,maxval,fc,s2t) for ecc, maxval,fc,s2t in zip(eccentricity_400,maxval_400,BBCR_frac_400,s2t_400)]
params=["e=%.2f max=%.1f\ns2t=%.2f (<.33)" % (ecc,maxval,s2t) for ecc,maxval,s2t in zip(eccentricity_400,maxval_400,s2t_400)]
tryitllS=CRll_400[standard_cut_400*(BBCR_frac_400<.9)]
if len(tryitllS) and PLOT_ON_OFF:
f=plotlabels(tryitllS,segments=CRseg_400_final,slices=CRslices_400,params=params,background=image)
f=imagetools.AxesStripText(f,allticks=True,titles=False)
f.savefig(plotdir+'pltRevise%s_extras_standard_cut_400' % (OFB,))
close(f);del f
#GalFix# does the s2t cut do anything?
standard_cut_400_NOT_s2t=ok_label_400*(s2t_400>s2t_400_cutval)*(eccentricity_400>eccentricity_400_cutval)*(area_400>area_400_cutval)*(maxval_400>maxval_400_cutval)
tryitllS_NOT_s2t=CRll_400[standard_cut_400_NOT_s2t*(BBCR_frac_400<.9)]
if len(tryitllS_NOT_s2t) and PLOT_ON_OFF:
f=plotlabels(tryitllS_NOT_s2t,segments=CRseg_400_final,slices=CRslices_400,params=params,background=image)
f=imagetools.AxesStripText(f,allticks=True,titles=False)
f.savefig(plotdir+'pltRevise%s_extras_standard_cut_400_NOT_s2t' % (OFB,))
close(f);del f
tryit=fives_cut_400*logical_not(standard_cut_400)*(BBCR_frac_400<.9)
tryitll5=CRll_400[tryit]
if len(tryitll5) and PLOT_ON_OFF:
f=plotlabels(tryitll5,segments=CRseg_400_final,slices=CRslices_400,params=params,background=image)
f=imagetools.AxesStripText(f,allticks=True,titles=False)
f.savefig(plotdir+'pltRevise%s_extras_fives_cut_400' % (OFB,))
close(f);del f
tryit=fours_cut_400*logical_not(standard_cut_400)*(BBCR_frac_400<.9)
tryitll4=CRll_400[tryit]
if len(tryitll4) and PLOT_ON_OFF:
f=plotlabels(tryitll4,segments=CRseg_400_final,slices=CRslices_400,params=params,background=image)
f=imagetools.AxesStripText(f,allticks=True,titles=False)
f.savefig(plotdir+'pltRevise%s_extras_fours_cut_400' % (OFB,))
close(f);del f
ll_400_final=tryitll4.tolist()+tryitll5.tolist()+tryitllS.tolist()
totally_new_400=0
for l in ll_400_final:
fc=BBCR_frac_400[l-1]
if fc==0.0: totally_new_400+=1
#END: 400
#START: save results
tsave=time.time()
FINALmask=BBCRmask_final.copy()
for l in ll_400_final:
sl=CRslices_400[l-1]
spots=CRseg_400_final[sl]==l
FINALmask[sl][spots]=True
FINALimage=image.copy()
FINALimage[FINALmask]=0 #CRs=0 and CRseg0/starseg overlap=0 too
FINALseg,FINAL_Nlabels=scipy.ndimage.label(FINALmask,conn8)
hdu=astropy.io.fits.PrimaryHDU(FINALimage)
hdu.header=CRfiltheader
fl_revised=compare_dir+'BBrevised_'+bthresh1_tag+'_BBCR_%s_%s.%s.fits' % (OBJECT,FILTER,BASE)
hdu.writeto(fl_revised,overwrite=True)
files2check.append(fl_revised)
#files2check.append(compare_dir+'BBrevised_bfrac0pt0100_BBCR_%s_%s.%s.fits' % (OBJECT,FILTER,BASE))
#save output mask for bonnpipeline code
CR_newsegfl=CR_segfl.replace('SEGMENTATION_CRN-cosmics','SEGMENTATION_BB_CRN-cosmics')
hdu=astropy.io.fits.PrimaryHDU(FINALseg)
hdu.header=CRfiltheader
hdu.writeto(CR_newsegfl ,overwrite=True)
tend=time.time()
#END: save results
#START: print stats!
times_start=asarray([t0, t1, t2, t3, t4, t5, t6, t7, t8, tLS, tOverlap, t400, tsave, tend])
things=['iter0','iter1','iter2','iter3','iter4','iter5','iter6','iter7','iter8','LastStretch','CRonSTAR','FT400','SAVE']
times_took=(times_start[1:]-times_start[:-1])/60.0
time_total=(tend-t0)/60.0
time_percent=times_took/time_total*100
thing_times=[str(round(tt,2)) for tt in times_took]
thing_time_percent=["("+str(round(tt,0))+"%)" for tt in time_percent]
end_str_print=''
#**set PLOT_ON_OFF=1**
BBstat_str="|***$$$~~~: "+"BB stats for the file="+fl+" :***$$$~~~|"
BBstat_len=len(BBstat_str)-2
BBstat_details="|***$$$~~~: MYSEEING=%.2f EXPTIME=%i RMS=%.2f " % (header['MYSEEING'],header['EXPTIME'],rms)
nl= BBstat_details+" %"+str(BBstat_len-len(BBstat_details)-10)+"s"
detections1=BBCRseg>0
end_str_print+= "\n"+"|"+"-"*BBstat_len+"|"+"\n"+BBstat_str+"\n"+nl % (" ")+":***$$$~~~|"+"\n|"+"-"*BBstat_len+"|"
next_time_line = (' '.join([things.pop(0),'took',thing_times.pop(0),'minutes',thing_time_percent.pop(0)])).rjust(BBstat_len)
end_str_print+= "\n|"+next_time_line+"|\n| iter0| # cosmics before blending :"+ str(CRseg0.max())
end_str_print+= "\n"+"| iter0| # masked pixels before blending :"+ str(detections0.sum())+ " %="+ str(detections0.mean())
next_time_line = (' '.join([things.pop(0),'took',thing_times.pop(0),'minutes',thing_time_percent.pop(0)])).rjust(BBstat_len)
end_str_print+= "\n|"+next_time_line+"|\n| iter1| # cosmics after blending #1 :"+ str(BBCRseg.max())
end_str_print+= "\n"+"| iter1| # masked pixels after blending #1 :"+ str(detections1.sum())+" %="+ str(detections1.mean())
end_str_print+= "\n"+"| iter1| # that are big enough (area>8) to be considered in raise thresh cut:"+ str(big_enough.sum())+" %="+ str( big_enough.mean())
end_str_print+= "\n"+"| iter1| # with large holes that will be sent to raise thresh cut:"+ str(hole_cuts.sum())+ " of those this many were fixed:"+ str(Nring_fixed1)
end_str_print+= "\n"+"| iter1| # with bad rr not great rr and open8 and openS ringer failed (i.e. masks considered in iterN):"+ str(len(cut_labels2))
end_str_print+= "\n|\n| iterN| iterations 2 thru 8 "
done_keys=asarray(["DONE-Multiple Ringers", "DONE-swallowed", "DONE-PASSED ALL", "DONE-PREVsplit", "DONE-ERASE FAILED"])
next_keys=asarray([ "NEXT-open", "NEXT-ring failed", "NEXT-rr", "NEXT-rr=nan size>9 open8=0"])
iterN_stats_all={"DONE-swallowed":0,"DONE-PREVsplit":0,"DONE-Multiple Ringers":0,"NEXT-rr":0,"NEXT-ring failed":0,"NEXT-open":0,"NEXT-rr=nan size>9 open8=0":0,"REMOVED-ERASE":0,"DONE-ERASE FAILED":0,"DONE-PASSED ALL":0}
done_all=0; next_all=0
for iterN in range(2,iterN_final+1):
next_time_line = (' '.join([things.pop(0),'took',thing_times.pop(0),'minutes',thing_time_percent.pop(0)])).rjust(BBstat_len)
done=0; next=0
for key in sort(iterN_stats[iterN].keys()):
iterN_stats_all[key]+=iterN_stats[iterN][key]
if 'DONE' in key: done+=iterN_stats[iterN][key]
if 'NEXT' in key: next+=iterN_stats[iterN][key]
done_all+=done;next_all+=next
done_str_total="| iter%s| %s DONE: " % (iterN,done)
removed_str_total="| iter%s| %s REMOVED-ERASED STAR CANDIDATES " % (iterN,iterN_stats[iterN]['REMOVED-ERASE'])
next_str_total="| iter%s| %s NEXT: " % (iterN,next)
done_vals=asarray([iterN_stats[iterN][key] for key in done_keys])
next_vals=asarray([iterN_stats[iterN][key] for key in next_keys])
done_str_pieces=["("+str(i+1)+": "+dk.replace("DONE-","")+") == "+str(iterN_stats[iterN][dk]) for i,dk in enumerate(done_keys[done_vals.argsort()[::-1]])]
done_str=done_str_total+" ".join(done_str_pieces)
next_str_pieces=["("+str(i+1)+": "+dk.replace("NEXT-","")+") == "+str(iterN_stats[iterN][dk]) for i,dk in enumerate(next_keys[next_vals.argsort()[::-1]])]
next_str=next_str_total+" ".join(next_str_pieces)
end_str_print+= "\n|"+next_time_line+"|\n"+done_str
end_str_print+= "\n"+next_str
end_str_print+= "\n"+removed_str_total
else:
end_str_print+= "\n|\n| iterN| iterations 2 thru 8 totals (NEXT stats aren't all that meaningful here)"
done_str_total="| iter%s| %s DONE: " % ("N",done_all)
removed_str_total="| iter%s| %s REMOVED-ERASED STAR CANDIDATES " % ("N",iterN_stats_all["REMOVED-ERASE"])
next_str_total="| iter%s| %s NEXT: " % ("N",next_all)
done_vals=asarray([iterN_stats_all[key] for key in done_keys])
next_vals=asarray([iterN_stats_all[key] for key in next_keys])
done_str_pieces=["("+str(i+1)+": "+dk.replace("DONE-","")+") == "+str(iterN_stats_all[dk]) for i,dk in enumerate(done_keys[done_vals.argsort()[::-1]])]
done_str=done_str_total+' '.join(done_str_pieces)
next_str_pieces=["("+str(i+1)+": "+dk.replace("NEXT-","")+") == "+str(iterN_stats_all[dk]) for i,dk in enumerate(next_keys[next_vals.argsort()[::-1]])]
next_str=next_str_total+" ".join(next_str_pieces)
end_str_print+= "\n"+done_str
end_str_print+= "\n"+next_str
end_str_print+= "\n"+removed_str_total
next_time_line = (' '.join([things.pop(0),'took',thing_times.pop(0),'minutes',thing_time_percent.pop(0)])).rjust(BBstat_len)
end_str_print+= "\n|"+next_time_line+"|\n| LastStretch| Masked a total of this many cosmics: "+ str(LastStretch_Ncosmics_added)
end_str_print+= "\n"+"| LastStretch| of which an average of this # of pixels was added on: "+ str(LastStretch_Npixels_added.mean())
next_time_line = (' '.join([things.pop(0),'took',thing_times.pop(0),'minutes',thing_time_percent.pop(0)])).rjust(BBstat_len)
end_str_print+= "\n|"+next_time_line+"|\n| CRonSTAR| Masked a total of this many cosmics: "+ str(CRoverlapSTAR_Ncosmics_mask_at_end)
try:
end_str_print+= "\n"+"| CRonSTAR| of which an average of this # of pixels was added on: "+ str(CRoverlapSTAR_Npixels_mask_at_end/CRoverlapSTAR_Ncosmics_mask_at_end)
except ZeroDivisionError:pass
next_time_line = (' '.join([things.pop(0),'took',thing_times.pop(0),'minutes',thing_time_percent.pop(0)])).rjust(BBstat_len)
end_str_print+= "\n|"+next_time_line+"|\n| FT400| Masked a total of this many cosmics: "+ str(len(ll_400_final))
end_str_print+= "\n"+"| FT400| of which these many were totally new: "+ str(totally_new_400)
next_time_line = (' '.join([things.pop(0),'took',thing_times.pop(0),'minutes',thing_time_percent.pop(0)])).rjust(BBstat_len)
end_str_print+= "\n|"+next_time_line+"|\n| FINAL| Total cosmics+sat spikes masked: "+ str(FINAL_Nlabels)
TOTAL_BBCR=FINAL_Nlabels-CRoverlapSTAR_Ncosmics_mask_at_end
RATE_BBCR=TOTAL_BBCR/header["EXPTIME"]
end_str_print+= "\n"+"| FINAL| Total cosmics masked: "+str(TOTAL_BBCR)
end_str_print+= "\n"+"| FINAL| cosmics masked per second exposed: "+str(RATE_BBCR)
end_str_print+= "\n"+"|"+"-"*BBstat_len+"|"
#asciiable data
end_str_print+= "\n"+"BBSSCR_stats-BB %s %s %.2f %.2f %s %i %.2f %i %i %i %i %i" % (BASE,header["FILTER"],header["MYSEEING"],RATE_BBCR,TOTAL_BBCR,header["EXPTIME"],rms,iterN_stats_all["REMOVED-ERASE"],FINAL_Nlabels,CRoverlapSTAR_Ncosmics_mask_at_end,totally_new_400, LastStretch_Ncosmics_added)
#ascii_names=["BASE","FILTER","SEEING","RATE_BBCR","TOTAL_BBCR","EXPTIME","RMS","ERASED","TOTAL","CRonSTAR","FT400_new","LastStretch"]
#ascii_vals= (BASE,header["FILTER"],header["MYSEEING"],RATE_BBCR,TOTAL_BBCR,header["EXPTIME"],rms,iterN_stats_all["REMOVED-ERASE"],FINAL_Nlabels,CRoverlapSTAR_Ncosmics_mask_at_end,totally_new_400, LastStretch_Ncosmics_added)
#end_str_print+= "\n"+"ascii %s\t%s\t%.2f\t%.2f\t%s\t%i\t%.2f\t%i\t%i\t%i\t%i\t%i" % (BASE,header["FILTER"],header["MYSEEING"],RATE_BBCR,TOTAL_BBCR,header["EXPTIME"],rms,iterN_stats_all["REMOVED-ERASE"],FINAL_Nlabels,CRoverlapSTAR_Ncosmics_mask_at_end,totally_new_400, LastStretch_Ncosmics_added)
#end_str_print+= "\n"+"\nascii_BB", BASE,header["FILTER"],header["MYSEEING"],RATE_BBCR,TOTAL_BBCR,header["EXPTIME"],rms,iterN_stats_all["REMOVED-ERASE"],FINAL_Nlabels,CRoverlapSTAR_Ncosmics_mask_at_end,totally_new_400, LastStretch_Ncosmics_added
end_str_print+= "\n"+"\nds9 -zscale -tile mode column "+" ".join(files2check)+" -zscale -lock frame image -lock crosshair image -geometry 2000x2000 &"
end_str_print+= "\n"+"\ndone with file="+fl+"\n"+"$"*BBstat_len+"\n\n"
#END: end_str_print+= "\n"+stats!
print end_str_print
| mit | 4,366,150,895,433,374,000 | 52.369538 | 368 | 0.682698 | false |
PaulWay/spacewalk | client/solaris/rhnclient/rhn/client/rhnLog.py | 1 | 1423 | #!/usr/bin/python
#
import time
import string
import config
class Log:
"""
attempt to log all interesting stuff, namely, anything that hits
the network any error messages, package installs, etc
""" # " emacs sucks
def __init__(self):
self.app = "rhn client"
self.cfg = config.initUp2dateConfig()
def log_debug(self, *args):
if self.cfg["debug"] > 1:
apply(self.log_me, args, {})
if self.cfg["isatty"]:
print "D:", string.join(map(lambda a: str(a), args), " ")
def log_me(self, *args):
self.log_info = "[%s] %s" % (time.ctime(time.time()), self.app)
s = ""
for i in args:
s = s + "%s" % (i,)
self.write_log(s)
def trace_me(self):
self.log_info = "[%s] %s" % (time.ctime(time.time()), self.app)
import traceback
x = traceback.extract_stack()
bar = string.join(traceback.format_list(x))
self.write_log(bar)
def write_log(self, s):
log_name = self.cfg["logFile"] or "%s//var/log/up2date" % config.PREFIX
log_file = open(log_name, 'a')
msg = "%s %s\n" % (self.log_info, str(s))
log_file.write(msg)
log_file.flush()
log_file.close()
def initLog():
global log
try:
log = log
except NameError:
log = None
if log == None:
log = Log()
return log
| gpl-2.0 | 3,413,463,565,188,236,000 | 23.964912 | 79 | 0.529866 | false |
clayz/crazy-quiz-web | src/entities/audit.py | 1 | 1313 | from google.appengine.ext import ndb
from entities import BaseEntity
class Purchase(BaseEntity):
goods_id = ndb.IntegerProperty(required=True)
version = ndb.StringProperty(required=True)
date = ndb.DateTimeProperty(required=True)
@classmethod
def get_last(cls, user_key):
return cls.query(ancestor=user_key).order(-cls.date)
class Exchange(BaseEntity):
goods_id = ndb.IntegerProperty(required=True)
version = ndb.StringProperty(required=True)
date = ndb.DateTimeProperty(required=True)
@classmethod
def get_last(cls, user_key):
return cls.query(ancestor=user_key).order(-cls.date)
class Earn(BaseEntity):
type_id = ndb.IntegerProperty(required=True)
version = ndb.StringProperty(required=True)
date = ndb.DateTimeProperty(required=True)
@classmethod
def get_last(cls, user_key):
return cls.query(ancestor=user_key).order(-cls.date)
class Consume(BaseEntity):
type_id = ndb.IntegerProperty(required=True)
album = ndb.IntegerProperty()
level = ndb.IntegerProperty()
picture = ndb.IntegerProperty()
version = ndb.StringProperty(required=True)
date = ndb.DateTimeProperty(required=True)
@classmethod
def get_last(cls, user_key):
return cls.query(ancestor=user_key).order(-cls.date) | apache-2.0 | -4,952,125,220,807,724,000 | 28.2 | 60 | 0.71211 | false |
tereka114/chainer | tests/testing_tests/test_condition.py | 1 | 4816 | import unittest
from chainer.testing import condition
# The test fixtures of this TestCase is used to be decorated by
# decorator in test. So we do not run them alone.
class MockUnitTest(unittest.TestCase):
failure_case_counter = 0
success_case_counter = 0
probabilistic_case_counter = 0
probabilistic_case_success_counter = 0
probabilistic_case_failure_counter = 0
def failure_case(self):
self.failure_case_counter += 1
self.fail()
def success_case(self):
self.success_case_counter += 1
self.assertTrue(True)
def probabilistic_case(self):
self.probabilistic_case_counter += 1
if self.probabilistic_case_counter % 2 == 0:
self.probabilistic_case_success_counter += 1
self.assertTrue(True)
else:
self.probabilistic_case_failure_counter += 1
self.fail()
def runTest(self):
pass
def _should_fail(self, f):
self.assertRaises(AssertionError, f, self.unit_test)
def _should_pass(self, f):
f(self.unit_test)
class TestRepeatWithSuccessAtLeast(unittest.TestCase):
def _decorate(self, f, times, min_success):
return condition.repeat_with_success_at_least(
times, min_success)(f)
def setUp(self):
self.unit_test = MockUnitTest()
def test_all_trials_fail(self):
f = self._decorate(MockUnitTest.failure_case, 10, 1)
_should_fail(self, f)
self.assertEqual(self.unit_test.failure_case_counter, 10)
def test_all_trials_fail2(self):
f = self._decorate(MockUnitTest.failure_case, 10, 0)
_should_pass(self, f)
self.assertLessEqual(self.unit_test.failure_case_counter, 10)
def test_all_trials_succeed(self):
f = self._decorate(MockUnitTest.success_case, 10, 10)
_should_pass(self, f)
self.assertEqual(self.unit_test.success_case_counter, 10)
def test_all_trials_succeed2(self):
self.assertRaises(AssertionError,
condition.repeat_with_success_at_least,
10, 11)
def test_half_of_trials_succeed(self):
f = self._decorate(MockUnitTest.probabilistic_case, 10, 5)
_should_pass(self, f)
self.assertLessEqual(self.unit_test.probabilistic_case_counter, 10)
self.assertGreaterEqual(
self.unit_test.probabilistic_case_success_counter, 5)
self.assertLessEqual(
self.unit_test.probabilistic_case_failure_counter, 5)
def test_half_of_trials_succeed2(self):
f = self._decorate(MockUnitTest.probabilistic_case, 10, 6)
_should_fail(self, f)
self.assertLessEqual(self.unit_test.probabilistic_case_counter, 10)
self.assertLess(
self.unit_test.probabilistic_case_success_counter, 6)
self.assertGreaterEqual(
self.unit_test.probabilistic_case_failure_counter, 5)
class TestRepeat(unittest.TestCase):
def _decorate(self, f, times):
return condition.repeat(times)(f)
def setUp(self):
self.unit_test = MockUnitTest()
def test_failure_case(self):
f = self._decorate(MockUnitTest.failure_case, 10)
_should_fail(self, f)
self.assertLessEqual(self.unit_test.failure_case_counter, 10)
def test_success_case(self):
f = self._decorate(MockUnitTest.success_case, 10)
_should_pass(self, f)
self.assertEqual(self.unit_test.success_case_counter, 10)
def test_probabilistic_case(self):
f = self._decorate(MockUnitTest.probabilistic_case, 10)
_should_fail(self, f)
self.assertLessEqual(self.unit_test.probabilistic_case_counter, 10)
self.assertLess(self.unit_test.probabilistic_case_success_counter, 10)
self.assertGreater(
self.unit_test.probabilistic_case_failure_counter, 0)
class TestRetry(unittest.TestCase):
def _decorate(self, f, times):
return condition.retry(times)(f)
def setUp(self):
self.unit_test = MockUnitTest()
def test_failure_case(self):
f = self._decorate(MockUnitTest.failure_case, 10)
_should_fail(self, f)
self.assertEqual(self.unit_test.failure_case_counter, 10)
def test_success_case(self):
f = self._decorate(MockUnitTest.success_case, 10)
_should_pass(self, f)
self.assertLessEqual(self.unit_test.success_case_counter, 10)
def test_probabilistic_case(self):
f = self._decorate(MockUnitTest.probabilistic_case, 10)
_should_pass(self, f)
self.assertLessEqual(
self.unit_test.probabilistic_case_counter, 10)
self.assertGreater(
self.unit_test.probabilistic_case_success_counter, 0)
self.assertLess(self.unit_test.probabilistic_case_failure_counter, 10)
| mit | -2,550,624,793,642,368,000 | 32.213793 | 78 | 0.653654 | false |
kubeflow/examples | github_issue_summarization/pipelines/components/t2t/t2t-app/app/main.py | 1 | 5290 | # -*- coding: utf-8 -*-
# Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import logging
import os
import random
import re
import requests
from flask import Flask
from flask import jsonify
from flask import render_template
from flask import g, request
import pandas as pd
import tensorflow as tf
# similar to T2T's query.py
# https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/serving/query.py
from tensor2tensor import problems as problems_lib # pylint: disable=unused-import
from tensor2tensor.utils import registry
from tensor2tensor.utils import usr_dir
from tensor2tensor.serving import serving_utils
app = Flask(__name__)
model_name = os.getenv('MODEL_NAME', 'ghsumm')
problem_name = os.getenv('PROBLEM_NAME', 'gh_problem')
t2t_usr_dir = os.getenv('T2T_USR_DIR', 'ghsumm/trainer')
hparams_name = os.getenv('HPARAMS', 'transformer_prepend')
data_dir = os.getenv('DATADIR', 'gs://aju-dev-demos-codelabs/kubecon/t2t_data_gh_all/')
github_token = os.getenv('GH_TOKEN', 'xxx')
SERVER = os.getenv('TFSERVING_HOST', 'ghsumm.kubeflow')
print("using server: %s" % SERVER)
SERVABLE_NAME = os.getenv('TF_SERVABLE_NAME', 'ghsumm')
print("using model servable name: %s" % SERVABLE_NAME)
SAMPLE_ISSUES = './github_issues_sample.csv'
SERVER_URL = 'http://' + SERVER + ':8500/v1/models/' + SERVABLE_NAME + ':predict'
def get_issue_body(issue_url):
issue_url = re.sub('.*github.com/', 'https://api.github.com/repos/',
issue_url)
tf.logging.info("issue url: %s", issue_url)
# tf.logging.info("using GH token: %s", github_token)
response = requests.get(
issue_url, headers={
'Authorization': 'token {}'.format(github_token)
}).json()
tf.logging.info("----response from url fetch: %s", response)
return response['body']
@app.route('/')
def index():
return render_template('index.html')
@app.route("/random_github_issue", methods=['GET'])
def random_github_issue():
github_issues = getattr(g, '_github_issues', None)
if github_issues is None:
github_issues = g._github_issues = pd.read_csv(
SAMPLE_ISSUES).body.tolist()
random_issue = github_issues[random.randint(0,
len(github_issues) - 1)]
tf.logging.info("----random issue text: %s", random_issue)
return jsonify({'body': random_issue})
@app.route("/summary", methods=['POST'])
def summary():
"""Main prediction route.
Provides a machine-generated summary of the given text. Sends a request to a live
model trained on GitHub issues.
"""
global problem #pylint: disable=global-statement
if problem is None:
init()
request_fn = make_tfserving_rest_request_fn()
if request.method == 'POST':
issue_text = request.form["issue_text"]
issue_url = request.form["issue_url"]
if issue_url:
print("fetching issue from URL...")
issue_text = get_issue_body(issue_url)
tf.logging.info("issue_text: %s", issue_text)
outputs = serving_utils.predict([issue_text], problem, request_fn)
outputs, = outputs
output, score = outputs #pylint: disable=unused-variable
tf.logging.info("output: %s", output)
return jsonify({'summary': output, 'body': issue_text})
return ('', 204)
problem = None
def init():
# global input_encoder, output_decoder, fname, problem
global problem #pylint: disable=global-statement
tf.logging.set_verbosity(tf.logging.INFO)
tf.logging.info("importing ghsumm/trainer from {}".format(t2t_usr_dir))
usr_dir.import_usr_dir(t2t_usr_dir)
print(t2t_usr_dir)
problem = registry.problem(problem_name)
hparams = tf.contrib.training.HParams(data_dir=os.path.expanduser(data_dir))
problem.get_hparams(hparams)
def make_tfserving_rest_request_fn():
"""Wraps function to make CloudML Engine requests with runtime args."""
def _make_tfserving_rest_request_fn(examples):
"""..."""
# api = discovery.build("ml", "v1", credentials=credentials)
# parent = "projects/%s/models/%s/versions/%s" % (cloud.default_project(),
# model_name, version)
input_data = {
"instances": [{
"input": {
"b64": base64.b64encode(ex.SerializeToString())
}
} for ex in examples]
}
response = requests.post(SERVER_URL, json=input_data)
predictions = response.json()['predictions']
tf.logging.info("Predictions: %s", predictions)
return predictions
return _make_tfserving_rest_request_fn
@app.errorhandler(500)
def server_error(e):
logging.exception('An error occurred during a request.')
return """
An internal error occurred: <pre>{}</pre>
See logs for full stacktrace.
""".format(e), 500
if __name__ == '__main__':
app.run(port=8080, debug=True)
| apache-2.0 | -3,029,379,828,580,967,400 | 31.857143 | 88 | 0.681285 | false |
hamicornfury/storyscape.django- | src/medialibrary/models.py | 1 | 4634 | """
Design of this model is influnced by Dublin Core Element Set Metadata.
"""
from django.db import models
from django.contrib.auth.models import User
from django.conf import settings
import tagging
from tagging.fields import TagField
from django.forms import ModelForm, BooleanField
# registered with tagging, decleration near end of file.
MEDIAOBJECT_max_length_bytes = 16384
MEDIAOBJECT_max_length_name = 60
DEFAULT_LICENSE = 'http://web.resource.org/cc/PublicDomain'
def upload_file_to(instance, filename):
# this way we spread out where images saved, but always in the
# same dir for a user
# note instance is a User object see view where instance set from request.user
ftype = filename[-3:].lower()
filename = filename[:-3] + ftype
path = settings.MEDIAOBJECT_UPLOAD_URL_ROOT_DIR_NAME +'/org/' + ftype + '/'
#IMPORTANT: what to know the file type since we sort into different dirs ...
path += instance.creator.username + '/'
# instance.user.date_joined.strftime('%Y/%m/%d/')
path += filename
return path
class MediaType(models.Model):
"""
simple model used to indicate if media is a still image, video,
audio, or other type of media.
See http://dublincore.org/documents/dcmi-type-vocabulary/ for info
on types, part of dublin core metadata
"""
label = models.CharField(max_length=20, default='unknown')
"""TODO: need to add description """
class MediaFormat(models.Model):
"""
simple model used to indicate if media is a still image, video,
audio, or other type of media 'StillImage' ...
"""
label = models.CharField(max_length=10, default='unknown')
"""TODO: need to add description """
class MediaObject(models.Model):
"""
MediaObject is used to model image, video, and audio types of
media
"""
name = models.CharField(max_length=MEDIAOBJECT_max_length_name, blank=True, default='imagefile')
# CURRENTLY this is the modified url? should be the original??????
url = models.CharField(max_length=255, unique=True, blank=True)
"""
create a recursive relationship--an object that has a many-to-many
relationship with objects of the same model class.
"""
related = models.ManyToManyField('self', blank=True)
"""
I think it is less memory and faster to reference the
foreign key, rather than have these be char fields.
"""
"""
NOTE: should allow type and format to be null, this way you can
use get_or_create with MediaObject
"""
type = models.ForeignKey(MediaType, blank=True)
format = models.ForeignKey(MediaFormat, blank=True)
publisher = models.CharField(max_length=60, default="Sodiioo", blank=True)
license = models.CharField(max_length=60, blank=True,
default=DEFAULT_LICENSE)
creator = models.ForeignKey(User, null=True, blank=True)
creation_datetime = models.DateTimeField('date time of creation')
original = models.BooleanField(default=False)
# used for form for uploading image
upload_image = models.ImageField(upload_to=upload_file_to,
max_length=60, blank=False, null=True)
#tags, this model is registered below with django-tagging
has_tag = models.IntegerField(blank=True, null=True, default=0)
mo_tags = TagField(verbose_name = "Image Tags")
def is_visible(self):
return not self.license
class MediaLibrary(models.Model):
"""
MediaLibrary is meant to be a personal subset of all media objects.
These are the media items that a user wants to keep in their own
library for easy access.
"""
user = models.ForeignKey(User)
media_object = models.ManyToManyField(MediaObject)
class ImageUploadForm(ModelForm):
is_public = BooleanField(label = "Public", required = False, initial = True)
def __init__(self, *args, **kwargs):
super(ImageUploadForm, self).__init__(*args, **kwargs)
# change display label for form item 'name'
class Meta:
model = MediaObject
fields = ('upload_image', 'mo_tags')
def save(self, *args, **kwargs):
# the image gets saved to the MEDIA_ROOT + path defined in upload_to here
imgobjectform = super(ImageUploadForm, self).save(*args, **kwargs)
imgobjectform.url = imgobjectform.upload_image.name
return imgobjectform
try:
tagging.register(MediaObject)
except tagging.AlreadyRegistered:
pass
import signals
signals.MediaLibrary # just here to silence warnings | mit | -747,592,366,850,270,500 | 32.832117 | 101 | 0.672421 | false |
Ghini/ghini.desktop | bauble/plugins/garden/accession.py | 1 | 117889 | # -*- coding: utf-8 -*-
#
# Copyright 2008-2010 Brett Adams
# Copyright 2015-2016 Mario Frasca <[email protected]>.
# Copyright 2017 Jardín Botánico de Quito
#
# This file is part of ghini.desktop.
#
# ghini.desktop is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ghini.desktop is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ghini.desktop. If not, see <http://www.gnu.org/licenses/>.
#
# accessions module
#
import datetime
from decimal import Decimal, ROUND_DOWN
import os
from random import random
import sys
import traceback
import weakref
import logging
from functools import reduce
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
from gi.repository import Gtk
import lxml.etree as etree
from gi.repository import Pango
from sqlalchemy import and_, or_, func
from sqlalchemy import ForeignKey, Column, Unicode, Integer, Boolean, \
UnicodeText
from sqlalchemy.orm import EXT_CONTINUE, MapperExtension, \
backref, relation, reconstructor, validates
from sqlalchemy.orm.session import object_session
from sqlalchemy.exc import DBAPIError
import bauble
import bauble.db as db
import bauble.editor as editor
from bauble import meta
from bauble.error import check
import bauble.paths as paths
from bauble.plugins.garden.propagation import SourcePropagationPresenter, \
Propagation
from bauble.plugins.garden.source import Contact, create_contact, \
Source, Collection, CollectionPresenter, PropagationChooserPresenter
import bauble.prefs as prefs
import bauble.btypes as types
import bauble.utils as utils
from bauble.view import (InfoBox, InfoExpander, PropertiesExpander,
MapInfoExpander,
select_in_search_results, Action)
import bauble.view as view
from bauble.search import SearchStrategy
from bauble.utils import safe_int
# TODO: underneath the species entry create a label that shows information
# about the family of the genus of the species selected as well as more
# info about the genus so we know exactly what plant is being selected
# e.g. Malvaceae (sensu lato), Hibiscus (senso stricto)
def longitude_to_dms(decimal):
return decimal_to_dms(Decimal(decimal), 'long')
def latitude_to_dms(decimal):
return decimal_to_dms(Decimal(decimal), 'lat')
def decimal_to_dms(decimal, long_or_lat):
'''
:param decimal: the value to convert
:param long_or_lat: should be either "long" or "lat"
@returns dir, degrees, minutes seconds, seconds rounded to two
decimal places
'''
if long_or_lat == 'long':
check(abs(decimal) <= 180)
else:
check(abs(decimal) <= 90)
dir_map = {'long': ['E', 'W'],
'lat': ['N', 'S']}
direction = dir_map[long_or_lat][0]
if decimal < 0:
direction = dir_map[long_or_lat][1]
dec = Decimal(str(abs(decimal)))
d = Decimal(str(dec)).to_integral(rounding=ROUND_DOWN)
m = Decimal(abs((dec-d)*60)).to_integral(rounding=ROUND_DOWN)
m2 = Decimal(abs((dec-d)*60))
places = 2
q = Decimal((0, (1,), -places))
s = Decimal(abs((m2-m) * 60)).quantize(q)
return direction, d, m, s
def dms_to_decimal(dir, deg, min, sec, precision=6):
'''
convert degrees, minutes, seconds to decimal
return a decimal.Decimal
'''
nplaces = Decimal(10) ** -precision
if dir in ('E', 'W'): # longitude
check(abs(deg) <= 180)
else:
check(abs(deg) <= 90)
check(abs(min) < 60)
check(abs(sec) < 60)
deg = Decimal(str(abs(deg)))
min = Decimal(str(min))
sec = Decimal(str(sec))
dec = abs(sec/Decimal('3600')) + abs(min/Decimal('60.0')) + deg
if dir in ('W', 'S'):
dec = -dec
return dec.quantize(nplaces)
def generic_taxon_add_action(model, view, presenter, top_presenter,
button, taxon_entry):
"""user hit click on taxon add button
new taxon goes into model.species;
its string representation into taxon_entry.
"""
from bauble.plugins.plants.species import edit_species
committed = edit_species(parent_view=view.get_window(), is_dependent_window=True)
if committed:
if isinstance(committed, list):
committed = committed[0]
logger.debug('new taxon added from within AccessionEditor')
# add the new taxon to the session and start using it
presenter.session.add(committed)
taxon_entry.set_text("%s" % committed)
presenter.remove_problem(
hash(Gtk.Buildable.get_name(taxon_entry)), None)
setattr(model, 'species', committed)
presenter._dirty = True
top_presenter.refresh_sensitivity()
else:
logger.debug('new taxon not added after request from AccessionEditor')
def edit_callback(accessions):
e = AccessionEditor(model=accessions[0])
return e.start()
def add_plants_callback(accessions):
session = db.Session()
acc = session.merge(accessions[0])
e = PlantEditor(model=Plant(accession=acc))
# session creates unbound object. editor decides what to do with it.
session.close()
return e.start() is not None
def remove_callback(accessions):
acc = accessions[0]
if len(acc.plants) > 0:
safe = utils.xml_safe
plants = [str(plant) for plant in acc.plants]
values = dict(num_plants=len(acc.plants),
plant_codes=safe(', '.join(plants)))
msg = (_('%(num_plants)s plants depend on this accession: '
'<b>%(plant_codes)s</b>\n\n') % values +
_('You cannot remove an accession with plants.'))
utils.message_dialog(msg, type=Gtk.MessageType.WARNING)
return
else:
msg = _("Are you sure you want to remove accession <b>%s</b>?") % \
utils.xml_safe(str(acc))
if not utils.yes_no_dialog(msg):
return
try:
session = db.Session()
obj = session.query(Accession).get(acc.id)
session.delete(obj)
session.commit()
except Exception as e:
msg = _('Could not delete.\n\n%s') % utils.xml_safe(str(e))
utils.message_details_dialog(msg, traceback.format_exc(),
type=Gtk.MessageType.ERROR)
finally:
session.close()
return True
edit_action = Action('acc_edit', _('_Edit'),
callback=edit_callback,
accelerator='<ctrl>e')
add_plant_action = Action('acc_add', _('_Add plants'),
callback=add_plants_callback,
accelerator='<ctrl>k')
remove_action = Action('acc_remove', _('_Delete'),
callback=remove_callback,
accelerator='<ctrl>Delete')
acc_context_menu = [edit_action, add_plant_action, remove_action]
ver_level_descriptions = \
{0: _('The name of the record has not been checked by any authority.'),
1: _('The name of the record determined by comparison with other '
'named plants.'),
2: _('The name of the record determined by a taxonomist or by other '
'competent persons using herbarium and/or library and/or '
'documented living material.'),
3: _('The name of the plant determined by taxonomist engaged in '
'systematic revision of the group.'),
4: _('The record is part of type gathering or propagated from type '
'material by asexual methods.')}
class Verification(db.Base):
"""
:Table name: verification
:Columns:
verifier: :class:`sqlalchemy.types.Unicode`
The name of the person that made the verification.
date: :class:`sqlalchemy.types.Date`
The date of the verification
reference: :class:`sqlalchemy.types.UnicodeText`
The reference material used to make this verification
level: :class:`sqlalchemy.types.Integer`
Determines the level or authority of the verifier. If it is
not known whether the name of the record has been verified by
an authority, then this field should be None.
Possible values:
- 0: The name of the record has not been checked by any authority.
- 1: The name of the record determined by comparison with
other named plants.
- 2: The name of the record determined by a taxonomist or by
other competent persons using herbarium and/or library and/or
documented living material.
- 3: The name of the plant determined by taxonomist engaged in
systematic revision of the group.
- 4: The record is part of type gathering or propagated from
type material by asexual methods
notes: :class:`sqlalchemy.types.UnicodeText`
Notes about this verification.
accession_id: :class:`sqlalchemy.types.Integer`
Foreign Key to the :class:`Accession` table.
species_id: :class:`sqlalchemy.types.Integer`
Foreign Key to the :class:`~bauble.plugins.plants.Species` table.
prev_species_id: :class:`~sqlalchemy.types.Integer`
Foreign key to the :class:`~bauble.plugins.plants.Species`
table. What it was verified from.
"""
__tablename__ = 'verification'
__mapper_args__ = {'order_by': 'verification.date'}
# columns
verifier = Column(Unicode(64), nullable=False)
date = Column(types.Date, nullable=False)
reference = Column(UnicodeText)
accession_id = Column(Integer, ForeignKey('accession.id'), nullable=False)
# the level of assurance of this verification
level = Column(Integer, nullable=False, autoincrement=False)
# what it was verified as
species_id = Column(Integer, ForeignKey('species.id'), nullable=False)
# what it was verified from
prev_species_id = Column(Integer, ForeignKey('species.id'), nullable=False)
species = relation(
'Species', primaryjoin='Verification.species_id==Species.id')
prev_species = relation(
'Species', primaryjoin='Verification.prev_species_id==Species.id')
notes = Column(UnicodeText)
# TODO: I have no internet, so I write this here. please remove this note
# and add the text as new issues as soon as possible.
#
# First of all a ghini-1.1 issue: being 'Accession' an abstract concept, you
# don't make a Voucher of an Accession, you make a Voucher of a Plant. As
# with Photos, in the Accession InfoBox you want to see all Vouchers of all
# Plantings belonging to the Accession.
#
# 2: imagine you go on expedition and collect vouchers as well as seeds, or
# stekken:nl. You will have vouchers of the parent plant plant, but the
# parent plant will not be in your collection. This justifies requiring the
# ability to add a Voucher to a Plant and mark it as Voucher of its parent
# plant. On the other hand though, if the parent plant *is* in your
# collection and the link is correctly represented in a Propagation, any
# 'parent plant voucher' will conflict with the vouchers associated to the
# parent plant. Maybe this can be solved by disabling the whole
# parent_voucher panel in the case of plants resulting of a garden
# propagation.
#
# 3: InfoBox (Accession AND Plant) are to show parent plant information as a
# link to the parent plant, or as the name of the parent plant voucher. At
# the moment this is only partially the case for
herbarium_codes = {}
class Voucher(db.Base):
"""
:Table name: voucher
:Columns:
herbarium: :class:`sqlalchemy.types.Unicode`
The name of the herbarium.
code: :class:`sqlalchemy.types.Unicode`
The herbarium code for the voucher.
parent_material: :class:`sqlalchemy.types.Boolean`
Is this voucher relative to the parent material of the accession.
accession_id: :class:`sqlalchemy.types.Integer`
Foreign key to the :class:`Accession` .
"""
__tablename__ = 'voucher'
herbarium = Column(Unicode(5), nullable=False)
code = Column(Unicode(32), nullable=False)
parent_material = Column(Boolean, default=False)
accession_id = Column(Integer, ForeignKey('accession.id'), nullable=False)
# accession = relation('Accession', uselist=False,
# backref=backref('vouchers',
# cascade='all, delete-orphan'))
# invalidate an accessions string cache after it has been updated
class AccessionMapperExtension(MapperExtension):
def after_update(self, mapper, conn, instance):
instance.invalidate_str_cache()
return EXT_CONTINUE
# ITF2 - E.1; Provenance Type Flag; Transfer code: prot
prov_type_values = [
('Wild', _('Accession of wild source')), # W
('Cultivated', _('Propagule(s) from a wild source plant')), # Z
('NotWild', _("Accession not of wild source")), # G
('Purchase', _('Purchase or gift')), # COLLAPSE INTO G
('InsufficientData', _("Insufficient Data")), # U
('Unknown', _("Unknown")), # COLLAPSE INTO U
(None, ''), # do not transfer this field
]
# ITF2 - E.3; Wild Provenance Status Flag; Transfer code: wpst
# - further specifies the W and Z prov type flag
#
# according to the ITF2, the keys should literally be one of: 'Wild native',
# 'Wild non-native', 'Cultivated native', 'Cultivated non-native'. In
# practice the standard just requires we note whether a wild (a cultivated
# propagule Z or the one directly collected W) plant is native or not to the
# place where it was found. a boolean should suffice, exporting will expand
# to and importing will collapse from the standard value. Giving all four
# options after the user has already selected W or Z works only confusing to
# user not familiar with ITF2 standard.
wild_prov_status_values = [
# Endemic found within indigenous range
('WildNative', _("Wild native")),
# found outside indigenous range
('WildNonNative', _("Wild non-native")),
# Endemic, cultivated, reintroduced or translocated within its
# indigenous range
('CultivatedNative', _("Cultivated native")),
# MISSING cultivated, found outside its indigenous range
# (u'CultivatedNonNative', _("Cultivated non-native"))
# TO REMOVE:
('Impound', _("Impound")),
('Collection', _("Collection")),
('Rescue', _("Rescue")),
('InsufficientData', _("Insufficient Data")),
('Unknown', _("Unknown")),
# Not transferred
(None, '')]
# not ITF2
# - further specifies the Z prov type flag value
cultivated_prov_status_values = [
('InVitro', _("In vitro")),
('Division', _("Division")),
('Seed', _("Seed")),
('Unknown', _("Unknown")),
(None, '')]
# not ITF2
# - further specifies the G prov type flag value
purchase_prov_status_values = [
('National', _("National")),
('Imported', _("Imported")),
('Unknown', _("Unknown")),
(None, '')]
# not ITF2
recvd_type_values = {
'ALAY': _('Air layer'),
'BBPL': _('Balled & burlapped plant'),
'BRPL': _('Bare root plant'),
'BUDC': _('Bud cutting'),
'BUDD': _('Budded'),
'BULB': _('Bulb'),
'CLUM': _('Clump'),
'CORM': _('Corm'),
'DIVI': _('Division'),
'GRAF': _('Graft'),
'LAYE': _('Layer'),
'PLNT': _('Planting'),
'PSBU': _('Pseudobulb'),
'RCUT': _('Rooted cutting'),
'RHIZ': _('Rhizome'),
'ROOC': _('Root cutting'),
'ROOT': _('Root'),
'SCIO': _('Scion'),
'SEDL': _('Seedling'),
'SEED': _('Seed'),
'SPOR': _('Spore'),
'SPRL': _('Sporeling'),
'TUBE': _('Tuber'),
'UNKN': _('Unknown'),
'URCU': _('Unrooted cutting'),
'BBIL': _('Bulbil'),
'VEGS': _('Vegetative spreading'),
'SCKR': _('Root sucker'),
None: ''
}
accession_type_to_plant_material = {
#u'Plant': _('Planting'),
'BBPL': 'Plant',
'BRPL': 'Plant',
'PLNT': 'Plant',
'SEDL': 'Plant',
#u'Seed': _('Seed/Spore'),
'SEED': 'Seed',
'SPOR': 'Seed',
'SPRL': 'Seed',
#u'Vegetative': _('Vegetative Part'),
'BUDC': 'Vegetative',
'BUDD': 'Vegetative',
'BULB': 'Vegetative',
'CLUM': 'Vegetative',
'CORM': 'Vegetative',
'DIVI': 'Vegetative',
'GRAF': 'Vegetative',
'LAYE': 'Vegetative',
'PSBU': 'Vegetative',
'RCUT': 'Vegetative',
'RHIZ': 'Vegetative',
'ROOC': 'Vegetative',
'ROOT': 'Vegetative',
'SCIO': 'Vegetative',
'TUBE': 'Vegetative',
'URCU': 'Vegetative',
'BBIL': 'Vegetative',
'VEGS': 'Vegetative',
'SCKR': 'Vegetative',
#u'Tissue': _('Tissue Culture'),
'ALAY': 'Tissue',
#u'Other': _('Other'),
'UNKN': 'Other',
None: None
}
def compute_serializable_fields(cls, session, keys):
result = {'accession': None}
acc_keys = {}
acc_keys.update(keys)
acc_keys['code'] = keys['accession']
accession = Accession.retrieve_or_create(
session, acc_keys, create=(
'taxon' in acc_keys and 'rank' in acc_keys))
result['accession'] = accession
return result
AccessionNote = db.make_note_class('Accession', compute_serializable_fields)
class Accession(db.Base, db.Serializable, db.WithNotes):
"""
:Table name: accession
:Columns:
*code*: :class:`sqlalchemy.types.Unicode`
the accession code
*prov_type*: :class:`bauble.types.Enum`
the provenance type
Possible values:
* first column of prov_type_values
*wild_prov_status*: :class:`bauble.types.Enum`
this column can be used to give more provenance
information
Possible values:
* union of first columns of wild_prov_status_values,
* purchase_prov_status_values,
* cultivated_prov_status_values
*date_accd*: :class:`bauble.types.Date`
the date this accession was accessioned
*id_qual*: :class:`bauble.types.Enum`
The id qualifier is used to indicate uncertainty in the
identification of this accession
Possible values:
* aff. - affinity with
* cf. - compare with
* forsan - perhaps
* near - close to
* ? - questionable
* incorrect
*id_qual_rank*: :class:`sqlalchemy.types.Unicode`
The rank of the species that the id_qual refers to.
*private*: :class:`sqlalchemy.types.Boolean`
Flag to indicate where this information is sensitive and
should be kept private
*species_id*: :class:`sqlalchemy.types.Integer()`
foreign key to the species table
:Properties:
*species*:
the species this accession refers to
*source*:
source is a relation to a Source instance
*plants*:
a list of plants related to this accession
*verifications*:
a list of verifications on the identification of this accession
:Constraints:
"""
__tablename__ = 'accession'
__mapper_args__ = {'order_by': 'accession.code',
'extension': AccessionMapperExtension()}
# columns
#: the accession code
code = Column(Unicode(20), nullable=False, unique=True)
code_format = '%Y%PD####'
@validates('code')
def validate_stripping(self, key, value):
if value is None:
return None
return value.strip()
prov_type = Column(types.Enum(values=[i[0] for i in prov_type_values],
translations=dict(prov_type_values)),
default=None)
wild_prov_status = Column(
types.Enum(values=[i[0] for i in wild_prov_status_values],
translations=dict(wild_prov_status_values)),
default=None)
date_accd = Column(types.Date)
date_recvd = Column(types.Date)
quantity_recvd = Column(Integer, autoincrement=False)
recvd_type = Column(types.Enum(values=list(recvd_type_values.keys()),
translations=recvd_type_values),
default=None)
# ITF2 - C24 - Rank Qualified Flag - Transfer code: rkql
## B: Below Family; F: Family; G: Genus; S: Species; I: first
## Infraspecific Epithet; J: second Infraspecific Epithet; C: Cultivar;
id_qual_rank = Column(Unicode(10))
# ITF2 - C25 - Identification Qualifier - Transfer code: idql
id_qual = Column(types.Enum(values=['aff.', 'cf.', 'incorrect',
'forsan', 'near', '?', '']),
nullable=False,
default='')
# "private" new in 0.8b2
private = Column(Boolean, default=False)
species_id = Column(Integer, ForeignKey('species.id'), nullable=False)
# intended location
intended_location_id = Column(Integer, ForeignKey('location.id'))
intended2_location_id = Column(Integer, ForeignKey('location.id'))
# the source of the accession
source = relation('Source', uselist=False, cascade='all, delete-orphan',
backref=backref('accession', uselist=False))
# relations
species = relation('Species', uselist=False,
backref=backref('accessions',
cascade='all, delete-orphan'))
# use Plant.code for the order_by to avoid ambiguous column names
plants = relation('Plant', cascade='all, delete-orphan',
#order_by='plant.code',
backref=backref('accession', uselist=False))
verifications = relation('Verification', # order_by='date',
cascade='all, delete-orphan',
backref=backref('accession', uselist=False))
vouchers = relation('Voucher', cascade='all, delete-orphan',
backref=backref('accession', uselist=False))
intended_location = relation(
'Location', primaryjoin='Accession.intended_location_id==Location.id')
intended2_location = relation(
'Location', primaryjoin='Accession.intended2_location_id==Location.id')
@classmethod
def get_next_code(cls, code_format=None):
"""
Return the next available accession code.
the format is stored in the `bauble` table.
the format may contain a %PD, replaced by the plant delimiter.
date formatting is applied.
If there is an error getting the next code the None is returned.
"""
# auto generate/increment the accession code
session = db.Session()
if code_format is None:
code_format = cls.code_format
format = code_format.replace('%PD', Plant.get_delimiter())
today = datetime.date.today()
if format.find('%{Y-1}') >= 0:
format = format.replace('%{Y-1}', str(today.year - 1))
format = today.strftime(format)
start = format.rstrip('#')
if start == format:
# fixed value
return start
digits = len(format) - len(start)
format = start + '%%0%dd' % digits
q = session.query(Accession.code).\
filter(Accession.code.startswith(start))
next = None
try:
if q.count() > 0:
codes = [safe_int(row[0][len(start):]) for row in q]
next = format % (max(codes)+1)
else:
next = format % 1
except Exception as e:
logger.debug(e)
pass
finally:
session.close()
return str(next)
def search_view_markup_pair(self):
"""provide the two lines describing object for SearchView row.
"""
first, second = (utils.xml_safe(str(self)),
self.species_str(markup=True, authors=True))
suffix = _("%(1)s plant groups in %(2)s location(s)") % {
'1': len(set(self.plants)),
'2': len(set(p.location for p in self.plants))}
suffix = ('<span foreground="#555555" size="small" '
'weight="light"> - %s</span>') % suffix
return first + suffix, second
@property
def parent_plant(self):
try:
return self.source.plant_propagation.plant
except AttributeError:
return None
@property
def propagations(self):
import operator
return reduce(operator.add, [p.propagations for p in self.plants], [])
@property
def pictures(self):
import operator
return reduce(operator.add, [p.pictures for p in self.plants], [])
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__cached_species_str = {}
@reconstructor
def init_on_load(self):
"""
Called instead of __init__() when an Accession is loaded from
the database.
"""
self.__cached_species_str = {}
def invalidate_str_cache(self):
self.__cached_species_str = {}
def __str__(self):
return self.code
def species_str(self, authors=False, markup=False):
"""
Return the string of the species with the id qualifier(id_qual)
injected into the proper place.
If the species isn't part of a session of if the species is dirty,
i.e. in object_session(species).dirty, then a new string will be
built even if the species hasn't been changed since the last call
to this method.
"""
# WARNING: don't use session.is_modified() here because it
# will query lots of dependencies
try:
cached = self.__cached_species_str[(markup, authors)]
except KeyError:
self.__cached_species_str[(markup, authors)] = None
cached = None
session = object_session(self.species)
if session:
# if not part of a session or if the species is dirty then
# build a new string
if cached is not None and self.species not in session.dirty:
return cached
if not self.species:
return None
# show a warning if the id_qual is aff. or cf. but the
# id_qual_rank is None, but only show it once
try:
self.__warned_about_id_qual
except AttributeError:
self.__warned_about_id_qual = False
if self.id_qual in ('aff.', 'cf.') and not self.id_qual_rank \
and not self.__warned_about_id_qual:
msg = _('If the id_qual is aff. or cf. '
'then id_qual_rank is required. %s ') % self.code
logger.warning(msg)
self.__warned_about_id_qual = True
if self.id_qual:
logger.debug('id_qual is %s' % self.id_qual)
sp_str = self.species.str(
authors, markup, remove_zws=True,
qualification=(self.id_qual_rank, self.id_qual))
else:
sp_str = self.species.str(authors, markup, remove_zws=True)
self.__cached_species_str[(markup, authors)] = sp_str
return sp_str
def markup(self):
return '%s (%s)' % (self.code, self.accession.species_str(markup=True, authors=True))
def as_dict(self):
result = db.Serializable.as_dict(self)
result['species'] = self.species.str(remove_zws=True, authors=False)
if self.source and self.source.source_detail:
result['contact'] = self.source.source_detail.name
return result
@classmethod
def correct_field_names(cls, keys):
for internal, exchange in [('species', 'taxon')]:
if exchange in keys:
keys[internal] = keys[exchange]
del keys[exchange]
@classmethod
def compute_serializable_fields(cls, session, keys):
logger.debug('compute_serializable_fields(session, %s)' % keys)
result = {'species': None}
keys = dict(keys) # make copy
if 'species' in keys:
keys['taxon'] = keys['species']
keys['rank'] = 'species'
if 'rank' in keys and 'taxon' in keys:
## now we must connect the accession to the species it refers to
if keys['rank'] == 'species':
genus_name, epithet = keys['taxon'].split(' ', 1)
sp_dict = {'ht-epithet': genus_name,
'epithet': epithet}
result['species'] = Species.retrieve_or_create(
session, sp_dict, create=False)
elif keys['rank'] == 'genus':
result['species'] = Species.retrieve_or_create(
session, {'ht-epithet': keys['taxon'],
'epithet': 'sp'})
elif keys['rank'] == 'familia':
unknown_genus = 'Zzz-' + keys['taxon'][:-1]
Genus.retrieve_or_create(
session, {'ht-epithet': keys['taxon'],
'epithet': unknown_genus})
result['species'] = Species.retrieve_or_create(
session, {'ht-epithet': unknown_genus,
'epithet': 'sp'})
return result
@classmethod
def retrieve(cls, session, keys):
try:
return session.query(cls).filter(
cls.code == keys['code']).one()
except:
return None
def top_level_count(self):
sd = self.source and self.source.source_detail
return {(1, 'Accessions'): 1,
(2, 'Species'): set([self.species.id]),
(3, 'Genera'): set([self.species.genus.id]),
(4, 'Families'): set([self.species.genus.family.id]),
(5, 'Plantings'): len(self.plants),
(6, 'Living plants'): sum(p.quantity for p in self.plants),
(7, 'Locations'): set([p.location.id for p in self.plants]),
(8, 'Sources'): set(sd and [sd.id] or [])}
from bauble.plugins.garden.plant import Plant, PlantEditor
class AccessionEditorView(editor.GenericEditorView):
"""
AccessionEditorView provide the view part of the
model/view/presenter paradigm. It also acts as the view for any
child presenter contained within the AccessionEditorPresenter.
The primary function of the view is setup an parts of the
interface that don't chage due to user interaction. Although it
also provides some utility methods for changing widget states.
"""
expanders_pref_map = {
# 'acc_notes_expander': 'editor.accession.notes.expanded',
# 'acc_source_expander': 'editor.accession.source.expanded'
}
_tooltips = {
'acc_species_entry': _(
"The species must be selected from the list of completions. "
"To add a species use the Species editor."),
'acc_code_entry': _("The accession ID must be a unique code"),
'acc_id_qual_combo': (_("The ID Qualifier\n\n"
"Possible values: %s")
% utils.enum_values_str('accession.id_qual')),
'acc_id_qual_rank_combo': _('The part of the taxon name that the id '
'qualifier refers to.'),
'acc_date_accd_entry': _('The date this species was accessioned.'),
'acc_date_recvd_entry': _('The date this species was received.'),
'acc_recvd_type_comboentry': _(
'The type of the accessioned material.'),
'acc_quantity_recvd_entry': _('The amount of plant material at the '
'time it was accessioned.'),
'intended_loc_comboentry': _('The intended location for plant '
'material being accessioned.'),
'intended2_loc_comboentry': _('The intended location for plant '
'material being accessioned.'),
'intended_loc_create_plant_checkbutton': _('Immediately create a plant at this location, using all plant material.'),
'acc_prov_combo': (_('The origin or source of this accession.\n\n'
'Possible values: %s') %
', '.join(i[1] for i in prov_type_values)),
'acc_wild_prov_combo': (_('The wild status is used to clarify the '
'provenance.\n\nPossible values: %s') %
', '.join(i[1]
for i in wild_prov_status_values)),
'acc_private_check': _('Indicates whether this accession record '
'should be considered private.'),
'acc_cancel_button': _('Cancel your changes.'),
'acc_ok_button': _('Save your changes.'),
'acc_ok_and_add_button': _('Save your changes and add a '
'plant to this accession.'),
'acc_next_button': _('Save your changes and add another '
'accession.'),
'sources_code_entry': "ITF2 - E7 - Donor's Accession Identifier - donacc",
}
def __init__(self, parent=None):
"""
"""
super().__init__(os.path.join(paths.lib_dir(), 'plugins', 'garden',
'acc_editor.glade'),
parent=parent)
self.attach_completion('acc_species_entry',
cell_data_func=self.species_cell_data_func,
match_func=self.species_match_func)
self.set_accept_buttons_sensitive(False)
self.restore_state()
# TODO: at the moment this also sets up some of the view parts
# of child presenters like the CollectionPresenter, etc.
# datum completions
completion = self.attach_completion('datum_entry',
minimum_key_length=1,
match_func=self.datum_match,
text_column=0)
model = Gtk.ListStore(str)
for abbr in sorted(datums.keys()):
# TODO: should create a marked up string with the datum description
model.append([abbr])
completion.set_model(model)
self.init_translatable_combo('acc_prov_combo', prov_type_values)
self.init_translatable_combo('acc_wild_prov_combo',
wild_prov_status_values)
self.init_translatable_combo('acc_recvd_type_comboentry',
recvd_type_values)
adjustment = self.widgets.source_sw.get_vadjustment()
adjustment.props.value = 0.0
self.widgets.source_sw.set_vadjustment(adjustment)
# set current page so we don't open the last one that was open
self.widgets.notebook.set_current_page(0)
def get_window(self):
return self.widgets.accession_dialog
def set_accept_buttons_sensitive(self, sensitive):
'''
set the sensitivity of all the accept/ok buttons for the editor dialog
'''
self.widgets.acc_ok_button.set_sensitive(sensitive)
self.widgets.acc_ok_and_add_button.set_sensitive(sensitive)
self.widgets.acc_next_button.set_sensitive(sensitive)
def save_state(self):
'''
save the current state of the gui to the preferences
'''
for expander, pref in self.expanders_pref_map.items():
prefs.prefs[pref] = self.widgets[expander].get_expanded()
def restore_state(self):
'''
restore the state of the gui from the preferences
'''
for expander, pref in self.expanders_pref_map.items():
expanded = prefs.prefs.get(pref, True)
self.widgets[expander].set_expanded(expanded)
def start(self):
return self.get_window().run()
@staticmethod
# staticmethod ensures the AccessionEditorView gets garbage collected.
def datum_match(completion, key, treeiter, data=None):
datum = completion.get_model()[treeiter][0]
words = datum.split(' ')
for w in words:
if w.lower().startswith(key.lower()):
return True
return False
@staticmethod
# staticmethod ensures the AccessionEditorView gets garbage collected.
def species_match_func(completion, key, treeiter, data=None):
species = completion.get_model()[treeiter][0]
epg, eps = (species.str(remove_zws=True).lower() + ' ').split(' ')[:2]
key_epg, key_eps = (key.replace('\u200b', '').lower() + ' ').split(' ')[:2]
if not epg:
epg = str(species.genus.epithet).lower()
if (epg.startswith(key_epg) and eps.startswith(key_eps)):
return True
return False
@staticmethod
# staticmethod ensures the AccessionEditorView gets garbage collected.
def species_cell_data_func(column, renderer, model, treeiter, data=None):
v = model[treeiter][0]
renderer.set_property(
'text', '%s (%s)' % (v.str(authors=True), v.genus.family))
class VoucherPresenter(editor.GenericEditorPresenter):
def __init__(self, parent, model, view, session):
super().__init__(model, view)
self.parent_ref = weakref.ref(parent)
self.session = session
self._dirty = False
#self.refresh_view()
self.view.connect('voucher_add_button', 'clicked', self.on_add_clicked)
self.view.connect('voucher_remove_button', 'clicked',
self.on_remove_clicked)
self.view.connect('parent_voucher_add_button', 'clicked',
self.on_add_clicked, True)
self.view.connect('parent_voucher_remove_button', 'clicked',
self.on_remove_clicked, True)
def _voucher_data_func(column, cell, model, treeiter, prop):
v = model[treeiter][0]
cell.set_property('text', getattr(v, prop))
def setup_column(tree, column, cell, prop):
column = self.view.widgets[column]
cell = self.view.widgets[cell]
column.clear_attributes(cell) # get rid of some warnings
cell.props.editable = True
self.view.connect(
cell, 'edited', self.on_cell_edited, (tree, prop))
column.set_cell_data_func(cell, _voucher_data_func, prop)
setup_column('voucher_treeview', 'voucher_herb_column',
'voucher_herb_cell', 'herbarium')
setup_column('voucher_treeview', 'voucher_code_column',
'voucher_code_cell', 'code')
setup_column('parent_voucher_treeview', 'parent_voucher_herb_column',
'parent_voucher_herb_cell', 'herbarium')
setup_column('parent_voucher_treeview', 'parent_voucher_code_column',
'parent_voucher_code_cell', 'code')
# intialize vouchers treeview
treeview = self.view.widgets.voucher_treeview
utils.clear_model(treeview)
model = Gtk.ListStore(object)
for voucher in self.model.vouchers:
if not voucher.parent_material:
model.append([voucher])
treeview.set_model(model)
# initialize parent vouchers treeview
treeview = self.view.widgets.parent_voucher_treeview
utils.clear_model(treeview)
model = Gtk.ListStore(object)
for voucher in self.model.vouchers:
if voucher.parent_material:
model.append([voucher])
treeview.set_model(model)
def is_dirty(self):
return self._dirty
def on_cell_edited(self, cell, path, new_text, data):
treeview, prop = data
treemodel = self.view.widgets[treeview].get_model()
voucher = treemodel[path][0]
if getattr(voucher, prop) == new_text:
return # didn't change
setattr(voucher, prop, utils.utf8(new_text))
self._dirty = True
self.parent_ref().refresh_sensitivity()
def on_remove_clicked(self, button, parent=False):
if parent:
treeview = self.view.widgets.parent_voucher_treeview
else:
treeview = self.view.widgets.voucher_treeview
model, treeiter = treeview.get_selection().get_selected()
voucher = model[treeiter][0]
voucher.accession = None
model.remove(treeiter)
self._dirty = True
self.parent_ref().refresh_sensitivity()
def on_add_clicked(self, button, parent=False):
"""
"""
if parent:
treeview = self.view.widgets.parent_voucher_treeview
else:
treeview = self.view.widgets.voucher_treeview
voucher = Voucher()
voucher.accession = self.model
voucher.parent_material = parent
model = treeview.get_model()
treeiter = model.insert(0, [voucher])
path = model.get_path(treeiter)
column = treeview.get_column(0)
treeview.set_cursor(path, column, start_editing=True)
class VerificationPresenter(editor.GenericEditorPresenter):
"""
VerificationPresenter
:param parent:
:param model:
:param view:
:param session:
"""
PROBLEM_INVALID_DATE = random()
def __init__(self, parent, model, view, session):
super().__init__(model, view)
self.parent_ref = weakref.ref(parent)
self.session = session
self.view.connect('ver_add_button', 'clicked', self.on_add_clicked)
# remove any verification boxes that would have been added to
# the widget in a previous run
box = self.view.widgets.verifications_parent_box
list(map(box.remove, box.get_children()))
# order by date of the existing verifications
for ver in model.verifications:
expander = self.add_verification_box(model=ver)
expander.set_expanded(False) # all are collapsed to start
# if no verifications were added then add an empty VerificationBox
if len(self.view.widgets.verifications_parent_box.get_children()) < 1:
self.add_verification_box()
# expand the first verification expander
self.view.widgets.verifications_parent_box.get_children()[0].\
set_expanded(True)
self._dirty = False
def is_dirty(self):
return self._dirty
def refresh_view(self):
pass
def on_add_clicked(self, *args):
self.add_verification_box()
def add_verification_box(self, model=None):
"""
:param model:
"""
box = VerificationPresenter.VerificationBox(self, model)
self.view.widgets.verifications_parent_box.pack_start(box, False, False, 0)
self.view.widgets.verifications_parent_box.reorder_child(box, 0)
box.show_all()
return box
class VerificationBox(Gtk.HBox):
def __init__(self, parent, model):
super().__init__(self)
check(not model or isinstance(model, Verification))
self.presenter = weakref.ref(parent)
self.model = model
if not self.model:
self.model = Verification()
self.model.prev_species = self.presenter().model.species
# copy UI definitions from the accession editor glade file
filename = os.path.join(paths.lib_dir(), "plugins", "garden",
"acc_editor.glade")
xml = etree.parse(filename)
el = xml.find("//object[@id='ver_box']")
builder = Gtk.Builder()
s = '<interface>%s</interface>' % etree.tostring(el)
if sys.platform == 'win32':
# NOTE: PyGTK for Win32 is broken so we have to include
# this little hack
#
# TODO: is this only a specific set of version of
# PyGTK/GTK...it was only tested with PyGTK 2.12
builder.add_from_string(s, -1)
else:
builder.add_from_string(s)
self.widgets = utils.BuilderWidgets(builder)
ver_box = self.widgets.ver_box
self.widgets.remove_parent(ver_box)
self.pack_start(ver_box, True, True, 0)
# verifier entry
entry = self.widgets.ver_verifier_entry
if self.model.verifier:
entry.props.text = self.model.verifier
self.presenter().view.connect(
entry, 'changed', self.on_entry_changed, 'verifier')
# date entry
self.date_entry = self.widgets.ver_date_entry
if self.model.date:
utils.set_widget_value(self.date_entry, self.model.date)
else:
self.date_entry.props.text = utils.today_str()
self.presenter().view.connect(
self.date_entry, 'changed', self.on_date_entry_changed)
# reference entry
ref_entry = self.widgets.ver_ref_entry
if self.model.reference:
ref_entry.props.text = self.model.reference
self.presenter().view.connect(
ref_entry, 'changed', self.on_entry_changed, 'reference')
# species entries
def sp_get_completions(text):
query = self.presenter().session.query(Species).join('genus').\
filter(utils.ilike(Genus.genus, '%s%%' % text)).\
filter(Species.id != self.model.id).\
order_by(Species.sp)
return query
def sp_cell_data_func(col, cell, model, treeiter, data=None):
v = model[treeiter][0]
cell.set_property('text', '%s (%s)' %
(v.str(authors=True),
v.genus.family))
ver_prev_taxon_entry = self.widgets.ver_prev_taxon_entry
def on_prevsp_select(value):
self.set_model_attr('prev_species', value)
self.presenter().view.attach_completion(
ver_prev_taxon_entry, sp_cell_data_func)
if self.model.prev_species:
ver_prev_taxon_entry.props.text = "%s" % self.model.prev_species
self.presenter().assign_completions_handler(
ver_prev_taxon_entry, sp_get_completions, on_prevsp_select)
ver_new_taxon_entry = self.widgets.ver_new_taxon_entry
def on_sp_select(value):
self.set_model_attr('species', value)
self.presenter().view.attach_completion(
ver_new_taxon_entry, sp_cell_data_func)
if self.model.species:
ver_new_taxon_entry.props.text = utils.utf8(self.model.species)
self.presenter().assign_completions_handler(
ver_new_taxon_entry, sp_get_completions, on_sp_select)
## add a taxon implies setting the ver_new_taxon_entry
self.presenter().view.connect(
self.widgets.ver_taxon_add_button, 'clicked',
self.on_taxon_add_button_clicked,
ver_new_taxon_entry)
combo = self.widgets.ver_level_combo
renderer = Gtk.CellRendererText()
renderer.props.wrap_mode = Pango.WrapMode.WORD
# TODO: should auto calculate the wrap width with a
# on_size_allocation callback
renderer.props.wrap_width = 400
combo.pack_start(renderer, True)
def cell_data_func(col, cell, model, treeiter, data=None):
level = model[treeiter][0]
descr = model[treeiter][1]
cell.set_property('markup', '<b>%s</b> : %s'
% (level, descr))
combo.set_cell_data_func(renderer, cell_data_func)
model = Gtk.ListStore(int, str)
for level, descr in ver_level_descriptions.items():
model.append([level, descr])
combo.set_model(model)
if self.model.level:
utils.set_widget_value(combo, self.model.level)
self.presenter().view.connect(combo, 'changed',
self.on_level_combo_changed)
# notes text view
textview = self.widgets.ver_notes_textview
textview.set_border_width(1)
buff = Gtk.TextBuffer()
if self.model.notes:
buff.props.text = self.model.notes
textview.set_buffer(buff)
self.presenter().view.connect(buff, 'changed',
self.on_entry_changed, 'notes')
# remove button
button = self.widgets.ver_remove_button
self._sid = self.presenter().view.connect(
button, 'clicked', self.on_remove_button_clicked)
# copy to general tab
button = self.widgets.ver_copy_to_taxon_general
self._sid = self.presenter().view.connect(
button, 'clicked', self.on_copy_to_taxon_general_clicked)
self.update_label()
def on_date_entry_changed(self, entry, data=None):
from bauble.editor import ValidatorError
value = None
PROBLEM = 'INVALID_DATE'
try:
value = editor.DateValidator().to_python(entry.props.text)
except ValidatorError as e:
logger.debug(e)
self.presenter().add_problem(PROBLEM, entry)
else:
self.presenter().remove_problem(PROBLEM, entry)
self.set_model_attr('date', value)
def on_copy_to_taxon_general_clicked(self, button):
if self.model.species is None:
return
parent = self.get_parent()
msg = _("Are you sure you want to copy this verification to the general taxon?")
if not utils.yes_no_dialog(msg):
return
# copy verification species to general tab
if self.model.accession:
self.presenter().parent_ref().view.widgets.acc_species_entry.\
set_text(utils.utf8(self.model.species))
self.presenter()._dirty = True
self.presenter().parent_ref().refresh_sensitivity()
def on_remove_button_clicked(self, button):
parent = self.get_parent()
msg = _("Are you sure you want to remove this verification?")
if not utils.yes_no_dialog(msg):
return
if parent:
parent.remove(self)
# disconnect clicked signal to make garbage collecting work
button.disconnect(self._sid)
# remove verification from accession
if self.model.accession:
self.model.accession.verifications.remove(self.model)
self.presenter()._dirty = True
self.presenter().parent_ref().refresh_sensitivity()
def on_entry_changed(self, entry, attr):
text = entry.props.text
if not text:
self.set_model_attr(attr, None)
else:
self.set_model_attr(attr, utils.utf8(text))
def on_level_combo_changed(self, combo, *args):
i = combo.get_active_iter()
level = combo.get_model()[i][0]
self.set_model_attr('level', level)
def set_model_attr(self, attr, value):
setattr(self.model, attr, value)
if attr != 'date' and not self.model.date:
# When we create a new verification box we set today's date
# in the GtkEntry but not in the model so the presenter
# doesn't appear dirty. Now that the user is setting
# something, we trigger the 'changed' signal on the 'date'
# entry as well, by first clearing the entry then setting it
# to its intended value.
tmp = self.date_entry.props.text
self.date_entry.props.text = ''
self.date_entry.props.text = tmp
# if the verification isn't yet associated with an accession
# then set the accession when we start changing values, this way
# we can setup a dummy verification in the interface
if not self.model.accession:
self.presenter().model.verifications.append(self.model)
self.presenter()._dirty = True
self.update_label()
self.presenter().parent_ref().refresh_sensitivity()
def update_label(self):
parts = []
# TODO: the parts string isn't being translated
if self.model.date:
parts.append('<b>%(date)s</b> : ')
if self.model.species:
parts.append(_('verified as %(species)s '))
if self.model.verifier:
parts.append(_('by %(verifier)s'))
label = ' '.join(parts) % dict(date=self.model.date,
species=self.model.species,
verifier=self.model.verifier)
self.widgets.ver_expander_label.props.use_markup = True
self.widgets.ver_expander_label.props.label = label
def set_expanded(self, expanded):
self.widgets.ver_expander.props.expanded = expanded
def on_taxon_add_button_clicked(self, button, taxon_entry):
## we come here when we are adding a Verification, and the
## Verification wants to refer to a new taxon.
generic_taxon_add_action(
self.model, self.presenter().view, self.presenter(),
self.presenter().parent_ref(),
button, taxon_entry)
class SourcePresenter(editor.GenericEditorPresenter):
"""
SourcePresenter
:param parent:
:param model:
:param view:
:param session:
"""
garden_prop_str = _('Garden Propagation')
def __init__(self, parent, model, view, session):
super().__init__(model, view)
self.parent_ref = weakref.ref(parent)
self.session = session
self._dirty = False
self.view.connect('new_source_button', 'clicked',
self.on_new_source_button_clicked)
self.view.widgets.source_garden_prop_box.props.visible = False
self.view.widgets.source_sw.props.visible = False
self.view.widgets.source_none_label.props.visible = True
# populate the source combo
def on_select(source):
if not source:
self.model.source = None
elif isinstance(source, Contact):
self.model.source = self.source
self.model.source.source_detail = source
elif source == self.garden_prop_str:
self.model.source = self.source
self.model.source.source_detail = None
else:
logger.warning('unknown source: %s' % source)
#self.model.source = self.source
#self.model.source.source_detail = source_detail
self.init_source_comboentry(on_select)
if self.model.source:
self.source = self.model.source
self.view.widgets.sources_code_entry.props.text = \
self.source.sources_code
else:
self.source = Source()
# self.model.source will be reset the None if the source
# combo value is None in commit_changes()
self.model.source = self.source
self.view.widgets.sources_code_entry.props.text = ''
if self.source.collection:
self.collection = self.source.collection
enabled = True
else:
self.collection = Collection()
self.session.add(self.collection)
enabled = False
self.view.widgets.source_coll_add_button.props.sensitive = not enabled
self.view.widgets.source_coll_remove_button.props.sensitive = enabled
self.view.widgets.source_coll_expander.props.expanded = enabled
self.view.widgets.source_coll_expander.props.sensitive = enabled
if self.source.propagation:
self.propagation = self.source.propagation
enabled = True
else:
self.propagation = Propagation()
self.session.add(self.propagation)
enabled = False
self.view.widgets.source_prop_add_button.props.sensitive = not enabled
self.view.widgets.source_prop_remove_button.props.sensitive = enabled
self.view.widgets.source_prop_expander.props.expanded = enabled
self.view.widgets.source_prop_expander.props.sensitive = enabled
# TODO: all the sub presenters here take the
# AccessionEditorPresenter as their parent though their real
# parent is this SourcePresenter....having the
# AccessionEditorPresenter is easier since what we really need
# access to is refresh_sensitivity() and possible
# set_model_attr() but having the SourcePresenter would be
# more "correct"
# presenter that allows us to create a new Propagation that is
# specific to this Source and not attached to any Plant
self.source_prop_presenter = SourcePropagationPresenter(
self.parent_ref(), self.propagation, view, session)
self.source_prop_presenter.register_clipboard()
# presenter that allows us to select an existing propagation
self.prop_chooser_presenter = PropagationChooserPresenter(
self.parent_ref(), self.source, view, session)
# collection data
self.collection_presenter = CollectionPresenter(
self.parent_ref(), self.collection, view, session)
self.collection_presenter.register_clipboard()
def on_changed(entry, *args):
text = entry.props.text
if text.strip():
self.source.sources_code = utils.utf8(text)
else:
self.source.sources_code = None
self._dirty = True
self.refresh_sensitivity()
self.view.connect('sources_code_entry', 'changed', on_changed)
self.view.connect('source_coll_add_button', 'clicked',
self.on_coll_add_button_clicked)
self.view.connect('source_coll_remove_button', 'clicked',
self.on_coll_remove_button_clicked)
self.view.connect('source_prop_add_button', 'clicked',
self.on_prop_add_button_clicked)
self.view.connect('source_prop_remove_button', 'clicked',
self.on_prop_remove_button_clicked)
def all_problems(self):
"""
Return a union of all the problems from this presenter and
child presenters
"""
return (self.problems | self.collection_presenter.problems |
self.prop_chooser_presenter.problems |
self.source_prop_presenter.problems)
def cleanup(self):
super().cleanup()
self.collection_presenter.cleanup()
self.prop_chooser_presenter.cleanup()
self.source_prop_presenter.cleanup()
def start(self):
active = None
if self.model.source:
if self.model.source.source_detail:
active = self.model.source.source_detail
elif self.model.source.plant_propagation:
active = self.garden_prop_str
self.populate_source_combo(active)
def is_dirty(self):
return self._dirty or self.source_prop_presenter.is_dirty() or \
self.prop_chooser_presenter.is_dirty() or \
self.collection_presenter.is_dirty()
def refresh_sensitivity(self):
logger.warning('refresh_sensitivity: %s' % str(self.problems))
self.parent_ref().refresh_sensitivity()
def on_coll_add_button_clicked(self, *args):
self.model.source.collection = self.collection
self.view.widgets.source_coll_expander.props.expanded = True
self.view.widgets.source_coll_expander.props.sensitive = True
self.view.widgets.source_coll_add_button.props.sensitive = False
self.view.widgets.source_coll_remove_button.props.sensitive = True
self._dirty = True
self.refresh_sensitivity()
def on_coll_remove_button_clicked(self, *args):
self.model.source.collection = None
self.view.widgets.source_coll_expander.props.expanded = False
self.view.widgets.source_coll_expander.props.sensitive = False
self.view.widgets.source_coll_add_button.props.sensitive = True
self.view.widgets.source_coll_remove_button.props.sensitive = False
self._dirty = True
self.refresh_sensitivity()
def on_prop_add_button_clicked(self, *args):
self.model.source.propagation = self.propagation
self.view.widgets.source_prop_expander.props.expanded = True
self.view.widgets.source_prop_expander.props.sensitive = True
self.view.widgets.source_prop_add_button.props.sensitive = False
self.view.widgets.source_prop_remove_button.props.sensitive = True
self._dirty = True
self.refresh_sensitivity()
def on_prop_remove_button_clicked(self, *args):
self.model.source.propagation = None
self.view.widgets.source_prop_expander.props.expanded = False
self.view.widgets.source_prop_expander.props.sensitive = False
self.view.widgets.source_prop_add_button.props.sensitive = True
self.view.widgets.source_prop_remove_button.props.sensitive = False
self._dirty = True
self.refresh_sensitivity()
def on_new_source_button_clicked(self, *args):
"""
Opens a new ContactEditor when clicked and repopulates the
source combo if a new Contact is created.
"""
committed = create_contact(parent=self.view.get_window())
new_detail = None
if committed:
new_detail = committed[0]
self.session.add(new_detail)
self.populate_source_combo(new_detail)
def populate_source_combo(self, active=None):
"""
If active=None then set whatever was previously active before
repopulating the combo.
"""
combo = self.view.widgets.acc_source_comboentry
if not active:
treeiter = combo.get_active_iter()
if treeiter:
active = combo.get_model()[treeiter][0]
combo.set_model(None)
model = Gtk.ListStore(object)
none_iter = model.append([''])
model.append([self.garden_prop_str])
list(map(lambda x: model.append([x]), self.session.query(Contact)))
combo.set_model(model)
combo.get_child().get_completion().set_model(model)
combo._populate = True
if active:
results = utils.search_tree_model(model, active)
if results:
combo.set_active_iter(results[0])
else:
combo.set_active_iter(none_iter)
combo._populate = False
def init_source_comboentry(self, on_select):
"""
A comboentry that allows the location to be entered requires
more custom setup than view.attach_completion and
self.assign_simple_handler can provides. This method allows us to
have completions on the location entry based on the location code,
location name and location string as well as selecting a location
from a combo drop down.
:param on_select: called when an item is selected
"""
PROBLEM = 'unknown_source'
def cell_data_func(col, cell, model, treeiter, data=None):
cell.props.text = utils.utf8(model[treeiter][0])
combo = self.view.widgets.acc_source_comboentry
combo.clear()
cell = Gtk.CellRendererText()
combo.pack_start(cell, True)
combo.set_cell_data_func(cell, cell_data_func)
completion = Gtk.EntryCompletion()
cell = Gtk.CellRendererText() # set up the completion renderer
completion.pack_start(cell, True)
completion.set_cell_data_func(cell, cell_data_func)
def match_func(completion, key, treeiter, data=None):
model = completion.get_model()
value = model[treeiter][0]
# allows completions of source details by their ID
if utils.utf8(value).lower().startswith(key.lower()) or \
(isinstance(value, Contact) and
str(value.id).startswith(key)):
return True
return False
completion.set_match_func(match_func)
entry = combo.get_child()
entry.set_completion(completion)
def update_visible():
widget_visibility = dict(source_sw=False,
source_garden_prop_box=False,
source_none_label=False)
if entry.props.text == self.garden_prop_str:
widget_visibility['source_garden_prop_box'] = True
elif not self.model.source or not self.model.source.source_detail:
widget_visibility['source_none_label'] = True
else:
#self.model.source.source_detail = value
widget_visibility['source_sw'] = True
for widget, value in widget_visibility.items():
self.view.widgets[widget].props.visible = value
self.view.widgets.source_alignment.props.sensitive = True
def on_match_select(completion, model, treeiter):
value = model[treeiter][0]
# TODO: should we reset/store the entry values if the
# source is changed and restore them if they are switched
# back
if not value:
combo.get_child().props.text = ''
on_select(None)
else:
combo.get_child().props.text = utils.utf8(value)
on_select(value)
# don't set the model as dirty if this is called during
# populate_source_combo
if not combo._populate:
self._dirty = True
self.refresh_sensitivity()
return True
self.view.connect(completion, 'match-selected', on_match_select)
def on_entry_changed(entry, data=None):
text = utils.utf8(entry.props.text)
# see if the text matches a completion string
comp = entry.get_completion()
def _cmp(row, data):
val = row[0]
if (utils.utf8(val) == data or
(isinstance(val, Contact) and val.id == data)):
return True
else:
return False
found = utils.search_tree_model(comp.get_model(), text, _cmp)
if len(found) == 1:
# the model and iter here should technically be the tree
comp.emit('match-selected', comp.get_model(), found[0])
self.remove_problem(PROBLEM, entry)
else:
self.add_problem(PROBLEM, entry)
update_visible()
return True
self.view.connect(entry, 'changed', on_entry_changed)
def on_combo_changed(combo, *args):
active = combo.get_active_iter()
if active:
detail = combo.get_model()[active][0]
# set the text value on the entry since it does all the
# validation
if not detail:
combo.get_child().props.text = ''
else:
combo.get_child().props.text = utils.utf8(detail)
update_visible()
return True
self.view.connect(combo, 'changed', on_combo_changed)
class AccessionEditorPresenter(editor.GenericEditorPresenter):
widget_to_field_map = {'acc_code_entry': 'code',
'acc_id_qual_combo': 'id_qual',
'acc_date_accd_entry': 'date_accd',
'acc_date_recvd_entry': 'date_recvd',
'acc_recvd_type_comboentry': 'recvd_type',
'acc_quantity_recvd_entry': 'quantity_recvd',
'intended_loc_comboentry': 'intended_location',
'intended2_loc_comboentry': 'intended2_location',
'acc_prov_combo': 'prov_type',
'acc_wild_prov_combo': 'wild_prov_status',
'acc_species_entry': 'species',
'acc_private_check': 'private',
'intended_loc_create_plant_checkbutton': 'create_plant',
}
PROBLEM_INVALID_DATE = random()
PROBLEM_DUPLICATE_ACCESSION = random()
PROBLEM_ID_QUAL_RANK_REQUIRED = random()
def __init__(self, model, view):
'''
:param model: an instance of class Accession
;param view: an instance of AccessionEditorView
'''
super().__init__(model, view)
self.initializing = True
self.create_toolbar()
self._dirty = False
self.session = object_session(model)
self._original_code = self.model.code
self.current_source_box = None
model.create_plant = False
# set the default code and add it to the top of the code formats
self.populate_code_formats(model.code or '')
self.view.widget_set_value('acc_code_format_comboentry',
model.code or '')
if not model.code:
model.code = model.get_next_code()
if self.model.species:
self._dirty = True
self.ver_presenter = VerificationPresenter(self, self.model, self.view,
self.session)
self.voucher_presenter = VoucherPresenter(self, self.model, self.view,
self.session)
self.source_presenter = SourcePresenter(self, self.model, self.view,
self.session)
notes_parent = self.view.widgets.notes_parent_box
notes_parent.foreach(notes_parent.remove)
self.notes_presenter = \
editor.NotesPresenter(self, 'notes', notes_parent)
self.init_enum_combo('acc_id_qual_combo', 'id_qual')
# init id_qual_rank
utils.setup_text_combobox(self.view.widgets.acc_id_qual_rank_combo)
self.refresh_id_qual_rank_combo()
def on_changed(combo, *args):
it = combo.get_active_iter()
if not it:
self.model.id_qual_rank = None
return
text, col = combo.get_model()[it]
self.set_model_attr('id_qual_rank', utils.utf8(col))
self.view.connect('acc_id_qual_rank_combo', 'changed', on_changed)
# refresh_view will fire signal handlers for any connected widgets.
from bauble.plugins.garden import init_location_comboentry
def on_loc_select(field_name, value):
if self.initializing:
return
self.set_model_attr(field_name, value)
refresh_create_plant_checkbutton_sensitivity()
from functools import partial
init_location_comboentry(
self, self.view.widgets.intended_loc_comboentry,
partial(on_loc_select, 'intended_location'), required=False)
init_location_comboentry(
self, self.view.widgets.intended2_loc_comboentry,
partial(on_loc_select, 'intended2_location'), required=False)
# put model values in view before most handlers are connected
self.initializing = True
self.refresh_view()
self.initializing = False
# connect signals
def sp_get_completions(text):
query = self.session.query(Species)
genus = ''
try:
genus = text.split(' ')[0]
except Exception:
pass
from utils import ilike
return query.filter(
and_(Species.genus_id == Genus.id,
or_(ilike(Genus.genus, '%s%%' % text),
ilike(Genus.genus, '%s%%' % genus)))).\
order_by(Species.sp)
def on_select(value):
logger.debug('on select: %s' % value)
if isinstance(value, str):
value = Species.retrieve(
self.session, {'species': value})
def set_model(v):
self.set_model_attr('species', v)
self.refresh_id_qual_rank_combo()
for kid in self.view.widgets.message_box_parent.get_children():
self.view.widgets.remove_parent(kid)
set_model(value)
if not value:
return
syn = self.session.query(SpeciesSynonym).\
filter(SpeciesSynonym.synonym_id == value.id).first()
if not syn:
set_model(value)
return
msg = _('The species <b>%(synonym)s</b> is a synonym of '
'<b>%(species)s</b>.\n\nWould you like to choose '
'<b>%(species)s</b> instead?') % \
{'synonym': syn.synonym, 'species': syn.species}
box = None
def on_response(button, response):
self.view.widgets.remove_parent(box)
box.destroy()
if response:
completion = self.view.widgets.acc_species_entry.\
get_completion()
utils.clear_model(completion)
model = Gtk.ListStore(object)
model.append([syn.species])
completion.set_model(model)
self.view.widgets.acc_species_entry.\
set_text(utils.utf8(syn.species))
set_model(syn.species)
box = self.view.add_message_box(utils.MESSAGE_BOX_YESNO)
box.message = msg
box.on_response = on_response
box.show()
self.assign_completions_handler('acc_species_entry',
sp_get_completions,
on_select=on_select)
self.assign_simple_handler('acc_prov_combo', 'prov_type')
self.assign_simple_handler('acc_wild_prov_combo', 'wild_prov_status')
# connect recvd_type comboentry widget and child entry
self.view.connect('acc_recvd_type_comboentry', 'changed',
self.on_recvd_type_comboentry_changed)
self.view.connect(self.view.widgets.acc_recvd_type_comboentry.get_child(),
'changed', self.on_recvd_type_entry_changed)
self.view.connect('acc_code_entry', 'changed',
self.on_acc_code_entry_changed)
# date received
self.view.connect('acc_date_recvd_entry', 'changed',
self.on_date_entry_changed, 'date_recvd')
utils.setup_date_button(self.view, 'acc_date_recvd_entry',
'acc_date_recvd_button')
# date accessioned
self.view.connect('acc_date_accd_entry', 'changed',
self.on_date_entry_changed, 'date_accd')
utils.setup_date_button(self.view, 'acc_date_accd_entry',
'acc_date_accd_button')
self.view.connect(
self.view.widgets.intended_loc_add_button,
'clicked',
self.on_loc_button_clicked,
self.view.widgets.intended_loc_comboentry,
'intended_location')
self.view.connect(
self.view.widgets.intended2_loc_add_button,
'clicked',
self.on_loc_button_clicked,
self.view.widgets.intended2_loc_comboentry,
'intended2_location')
## add a taxon implies setting the acc_species_entry
self.view.connect(
self.view.widgets.acc_taxon_add_button, 'clicked',
lambda b, w: generic_taxon_add_action(
self.model, self.view, self, self, b, w),
self.view.widgets.acc_species_entry)
self.has_plants = len(model.plants) > 0
view.widget_set_sensitive('intended_loc_create_plant_checkbutton', not self.has_plants)
def refresh_create_plant_checkbutton_sensitivity(*args):
if self.has_plants:
view.widget_set_sensitive('intended_loc_create_plant_checkbutton', False)
return
location_chosen = bool(self.model.intended_location)
has_quantity = self.model.quantity_recvd and bool(int(self.model.quantity_recvd)) or False
view.widget_set_sensitive('intended_loc_create_plant_checkbutton', has_quantity and location_chosen)
self.assign_simple_handler(
'acc_quantity_recvd_entry', 'quantity_recvd')
self.view.connect_after('acc_quantity_recvd_entry', 'changed',
refresh_create_plant_checkbutton_sensitivity)
self.assign_simple_handler('acc_id_qual_combo', 'id_qual',
editor.UnicodeOrNoneValidator())
self.assign_simple_handler('acc_private_check', 'private')
self.refresh_sensitivity()
refresh_create_plant_checkbutton_sensitivity()
if self.model not in self.session.new:
self.view.widgets.acc_ok_and_add_button.set_sensitive(True)
self.initializing = False
def populate_code_formats(self, entry_one=None, values=None):
logger.debug('populate_code_formats %s %s' % (entry_one, values))
ls = self.view.widgets.acc_code_format_liststore
if entry_one is None:
entry_one = ls.get_value(ls.get_iter_first(), 0)
ls.clear()
ls.append([entry_one])
if values is None:
query = self.session.\
query(meta.BaubleMeta).\
filter(meta.BaubleMeta.name.like('acidf_%')).\
order_by(meta.BaubleMeta.name)
if query.count():
Accession.code_format = query.first().value
values = [r.value for r in query]
for v in values:
ls.append([v])
def on_acc_code_format_comboentry_changed(self, widget, *args):
code_format = self.view.widget_get_value(widget)
code = Accession.get_next_code(code_format)
self.view.widget_set_value('acc_code_entry', code)
def on_acc_code_format_edit_btn_clicked(self, widget, *args):
view = editor.GenericEditorView(
os.path.join(paths.lib_dir(), 'plugins', 'garden',
'acc_editor.glade'),
root_widget_name='acc_codes_dialog')
ls = view.widgets.acc_codes_liststore
ls.clear()
query = self.session.\
query(meta.BaubleMeta).\
filter(meta.BaubleMeta.name.like('acidf_%')).\
order_by(meta.BaubleMeta.name)
for i, row in enumerate(query):
ls.append([i+1, row.value])
ls.append([len(ls)+1, ''])
class Presenter(editor.GenericEditorPresenter):
def on_acc_cf_renderer_edited(self, widget, iter, value):
i = ls.get_iter_from_string(str(iter))
ls.set_value(i, 1, value)
if ls.iter_next(i) is None:
if value:
ls.append([len(ls)+1, ''])
elif value == '':
ls.remove(i)
while i:
ls.set_value(i, 0, ls.get_value(i, 0)-1)
i = ls.iter_next(i)
presenter = Presenter(ls, view, session=db.Session())
if presenter.start() > 0:
presenter.session.\
query(meta.BaubleMeta).\
filter(meta.BaubleMeta.name.like('acidf_%')).\
delete(synchronize_session=False)
i = 1
iter = ls.get_iter_first()
values = []
while iter:
value = ls.get_value(iter, 1)
iter = ls.iter_next(iter)
i += 1
if not value:
continue
obj = meta.BaubleMeta(name='acidf_%02d' % i,
value=value)
values.append(value)
presenter.session.add(obj)
self.populate_code_formats(values=values)
presenter.session.commit()
presenter.session.close()
def refresh_id_qual_rank_combo(self):
"""
Populate the id_qual_rank_combo with the parts of the species string
"""
combo = self.view.widgets.acc_id_qual_rank_combo
utils.clear_model(combo)
if not self.model.species:
return
model = Gtk.ListStore(str, str)
species = self.model.species
it = model.append([str(species.genus), 'genus'])
active = None
if self.model.id_qual_rank == 'genus':
active = it
it = model.append([str(species.sp), 'sp'])
if self.model.id_qual_rank == 'sp':
active = it
infrasp_parts = []
for level in (1, 2, 3, 4):
infrasp = [s for s in species.get_infrasp(level) if s is not None]
if infrasp:
infrasp_parts.append(' '.join(infrasp))
if infrasp_parts:
it = model.append([' '.join(infrasp_parts), 'infrasp'])
if self.model.id_qual_rank == 'infrasp':
active = it
# if species.infrasp:
# s = ' '.join([str(isp) for isp in species.infrasp])
# if len(s) > 32:
# s = '%s...' % s[:29]
# it = model.append([s, 'infrasp'])
# if self.model.id_qual_rank == 'infrasp':
# active = it
it = model.append(('', None))
if not active:
active = it
combo.set_model(model)
combo.set_active_iter(active)
def on_loc_button_clicked(self, button, target_widget, target_field):
logger.debug('on_loc_button_clicked %s, %s, %s, %s' %
(self, button, target_widget, target_field))
from bauble.plugins.garden.location import LocationEditor
editor = LocationEditor(parent=self.view.get_window())
if editor.start():
location = editor.presenter.model
self.session.add(location)
self.remove_problem(None, target_widget)
self.view.widget_set_value(target_widget, location)
self.set_model_attr(target_field, location)
def is_dirty(self):
if self.initializing:
return False
presenters = [self.ver_presenter, self.voucher_presenter,
self.notes_presenter, self.source_presenter]
dirty_kids = [p.is_dirty() for p in presenters]
return self._dirty or True in dirty_kids
def on_recvd_type_comboentry_changed(self, combo, *args):
"""
"""
value = None
treeiter = combo.get_active_iter()
if treeiter:
value = combo.get_model()[treeiter][0]
else:
# the changed handler is fired again after the
# combo.get_child().props.text with the activer iter set to None
return True
# the entry change handler does the validation of the model
combo.get_child().props.text = recvd_type_values[value]
def on_recvd_type_entry_changed(self, entry, *args):
"""
"""
problem = 'BAD_RECVD_TYPE'
text = entry.props.text
if not text.strip():
self.remove_problem(problem, entry)
self.set_model_attr('recvd_type', None)
return
model = self.view.widgets.acc_recvd_type_comboentry.get_model()
def match_func(row, data):
return str(row[0]).lower() == str(data).lower() or \
str(row[1]).lower() == str(data).lower()
results = utils.search_tree_model(model, text, match_func)
if results and len(results) == 1: # is match is unique
self.remove_problem(problem, entry)
self.set_model_attr('recvd_type', model[results[0]][0])
else:
self.add_problem(problem, entry)
self.set_model_attr('recvd_type', None)
def on_acc_code_entry_changed(self, entry, data=None):
text = entry.get_text()
query = self.session.query(Accession)
if text != self._original_code \
and query.filter_by(code=str(text)).count() > 0:
self.add_problem(self.PROBLEM_DUPLICATE_ACCESSION,
self.view.widgets.acc_code_entry)
self.set_model_attr('code', None)
return
self.remove_problem(self.PROBLEM_DUPLICATE_ACCESSION,
self.view.widgets.acc_code_entry)
if text == '':
self.set_model_attr('code', None)
else:
self.set_model_attr('code', utils.utf8(text))
def on_date_entry_changed(self, entry, prop):
"""handle changed signal.
used by acc_date_recvd_entry and acc_date_accd_entry
:param prop: the model property to change, should be
date_recvd or date_accd
"""
from bauble.editor import ValidatorError
value = None
PROBLEM = 'INVALID_DATE'
try:
value = editor.DateValidator().to_python(entry.props.text)
except ValidatorError as e:
logger.debug(e)
self.add_problem(PROBLEM, entry)
else:
self.remove_problem(PROBLEM, entry)
self.set_model_attr(prop, value)
def set_model_attr(self, field, value, validator=None):
"""
Set attributes on the model and update the GUI as expected.
"""
#debug('set_model_attr(%s, %s)' % (field, value))
super().set_model_attr(field, value, validator)
self._dirty = True
# TODO: add a test to make sure that the change notifiers are
# called in the expected order
prov_sensitive = True
wild_prov_combo = self.view.widgets.acc_wild_prov_combo
if field == 'prov_type':
if self.model.prov_type == 'Wild':
self.model.wild_prov_status = wild_prov_combo.get_active_text()
else:
# remove the value in the model from the wild_prov_combo
prov_sensitive = False
self.model.wild_prov_status = None
wild_prov_combo.set_sensitive(prov_sensitive)
self.view.widgets.acc_wild_prov_combo.set_sensitive(prov_sensitive)
if field == 'id_qual' and not self.model.id_qual_rank:
self.add_problem(self.PROBLEM_ID_QUAL_RANK_REQUIRED,
self.view.widgets.acc_id_qual_rank_combo)
else:
self.remove_problem(self.PROBLEM_ID_QUAL_RANK_REQUIRED)
self.refresh_sensitivity()
def validate(self, add_problems=False):
"""
Validate the self.model
"""
# TODO: if add_problems=True then we should add problems to
# all the required widgets that don't have values
if not self.model.code or not self.model.species:
return False
for ver in self.model.verifications:
ignore = ('id', 'accession_id', 'species_id', 'prev_species_id')
if utils.get_invalid_columns(ver, ignore_columns=ignore) or \
not ver.species or not ver.prev_species:
return False
for voucher in self.model.vouchers:
ignore = ('id', 'accession_id')
if utils.get_invalid_columns(voucher, ignore_columns=ignore):
return False
# validate the source if there is one
if self.model.source:
if utils.get_invalid_columns(self.model.source.collection):
return False
if utils.get_invalid_columns(self.model.source.propagation):
return False
if not self.model.source.propagation:
return True
prop = self.model.source.propagation
prop_ignore = ['id', 'propagation_id']
prop_model = None
if prop and prop.prop_type == 'Seed':
prop_model = prop._seed
elif prop and prop.prop_type == 'UnrootedCutting':
prop_model = prop._cutting
else:
logger.debug('AccessionEditorPresenter.validate(): unknown prop_type')
return True # let user save it anyway
if utils.get_invalid_columns(prop_model, prop_ignore):
return False
return True
def refresh_sensitivity(self):
"""
Refresh the sensitivity of the fields and accept buttons according
to the current values in the model.
"""
if self.model.species and self.model.id_qual:
self.view.widgets.acc_id_qual_rank_combo.set_sensitive(True)
else:
self.view.widgets.acc_id_qual_rank_combo.set_sensitive(False)
sensitive = self.is_dirty() and self.validate() \
and not self.problems \
and not self.source_presenter.all_problems() \
and not self.ver_presenter.problems \
and not self.voucher_presenter.problems
self.view.set_accept_buttons_sensitive(sensitive)
def refresh_view(self):
'''
get the values from the model and put them in the view
'''
date_format = prefs.prefs[prefs.date_format_pref]
for widget, field in self.widget_to_field_map.items():
if field == 'species_id':
value = self.model.species
else:
value = getattr(self.model, field)
self.view.widget_set_value(widget, value)
self.view.widget_set_value(
'acc_wild_prov_combo',
dict(wild_prov_status_values)[self.model.wild_prov_status],
index=1)
self.view.widget_set_value(
'acc_prov_combo',
dict(prov_type_values)[self.model.prov_type],
index=1)
self.view.widget_set_value(
'acc_recvd_type_comboentry',
recvd_type_values[self.model.recvd_type],
index=1)
self.view.widgets.acc_private_check.set_inconsistent(False)
self.view.widgets.acc_private_check.\
set_active(self.model.private is True)
sensitive = self.model.prov_type == 'Wild'
self.view.widgets.acc_wild_prov_combo.set_sensitive(sensitive)
self.view.widgets.acc_wild_prov_combo.set_sensitive(sensitive)
def cleanup(self):
super().cleanup()
self.ver_presenter.cleanup()
self.voucher_presenter.cleanup()
self.source_presenter.cleanup()
def start(self):
self.source_presenter.start()
r = self.view.start()
return r
class AccessionEditor(editor.GenericModelViewPresenterEditor):
# these have to correspond to the response values in the view
RESPONSE_OK_AND_ADD = 11
RESPONSE_NEXT = 22
ok_responses = (RESPONSE_OK_AND_ADD, RESPONSE_NEXT)
def __init__(self, model=None, parent=None):
'''
:param model: Accession instance or None
:param parent: the parent widget
'''
if model is None:
model = Accession()
super().__init__(model, parent)
self.parent = parent
self._committed = []
view = AccessionEditorView(parent=parent)
self.presenter = AccessionEditorPresenter(self.model, view)
# set the default focus
if self.model.species is None:
view.widgets.acc_species_entry.grab_focus()
else:
view.widgets.acc_code_entry.grab_focus()
def handle_response(self, response):
'''
handle the response from self.presenter.start() in self.start()
'''
not_ok_msg = _('Are you sure you want to lose your changes?')
if response == Gtk.ResponseType.OK or response in self.ok_responses:
try:
if not self.presenter.validate():
# TODO: ideally the accept buttons wouldn't have
# been sensitive until validation had already
# succeeded but we'll put this here either way and
# show a message about filling in the fields
#
# msg = _('Some required fields have not been completed')
return False
if self.presenter.is_dirty():
self.commit_changes()
self._committed.append(self.model)
except DBAPIError as e:
msg = _('Error committing changes.\n\n%s') % \
utils.xml_safe(str(e.orig))
utils.message_details_dialog(msg, str(e), Gtk.MessageType.ERROR)
return False
except Exception as e:
msg = _('Unknown error when committing changes. See the '
'details for more information.\n\n%s') \
% utils.xml_safe(e)
utils.message_details_dialog(msg, traceback.format_exc(),
Gtk.MessageType.ERROR)
return False
elif self.presenter.is_dirty() and utils.yes_no_dialog(not_ok_msg) \
or not self.presenter.is_dirty():
self.session.rollback()
return True
else:
return False
# respond to responses
more_committed = None
if response == self.RESPONSE_NEXT:
self.presenter.cleanup()
e = AccessionEditor(parent=self.parent)
more_committed = e.start()
elif response == self.RESPONSE_OK_AND_ADD:
e = PlantEditor(Plant(accession=self.model), self.parent)
more_committed = e.start()
if more_committed is not None:
if isinstance(more_committed, list):
self._committed.extend(more_committed)
else:
self._committed.append(more_committed)
return True
def start(self):
from bauble.plugins.plants.species_model import Species
if self.session.query(Species).count() == 0:
msg = _('You must first add or import at least one species into '
'the database before you can add accessions.')
utils.message_dialog(msg)
return
while True:
#debug(self.presenter.source_presenter.source)
#debug(self.presenter.source_presenter.source.collection)
response = self.presenter.start()
self.presenter.view.save_state()
if self.handle_response(response):
break
self.session.close() # cleanup session
self.presenter.cleanup()
return self._committed
@staticmethod
def _cleanup_collection(model):
'''
'''
if not model:
return
# TODO: we should raise something besides commit ValueError
# so we can give a meaningful response
if model.latitude is not None or model.longitude is not None:
if (model.latitude is not None and model.longitude is None) or \
(model.longitude is not None and model.latitude is None):
msg = _('model must have both latitude and longitude or '
'neither')
raise ValueError(msg)
elif model.latitude is None and model.longitude is None:
model.geo_accy = None # don't save
else:
model.geo_accy = None # don't save
# reset the elevation accuracy if the elevation is None
if model.elevation is None:
model.elevation_accy = None
return model
def commit_changes(self):
if self.model.source:
if not self.model.source.collection:
utils.delete_or_expunge(
self.presenter.source_presenter.collection)
if self.model.source.propagation:
self.model.source.propagation.clean()
else:
utils.delete_or_expunge(
self.presenter.source_presenter.propagation)
else:
utils.delete_or_expunge(
self.presenter.source_presenter.source)
utils.delete_or_expunge(
self.presenter.source_presenter.collection)
utils.delete_or_expunge(
self.presenter.source_presenter.propagation)
if self.model.id_qual is None:
self.model.id_qual_rank = None
# should we also add a plant for this accession?
if self.model.create_plant:
logger.debug('creating plant for new accession')
accession = self.model
location = accession.intended_location
plant = Plant(accession=accession, code='1', quantity=accession.quantity_recvd, location=location,
acc_type=accession_type_to_plant_material.get(self.model.recvd_type))
self.session.add(plant)
return super().commit_changes()
# import at the bottom to avoid circular dependencies
from bauble.plugins.plants.genus import Genus
from bauble.plugins.plants.species_model import Species, SpeciesSynonym
#
# infobox for searchview
#
# TODO: i don't think this shows all field of an accession, like the
# accuracy values
class GeneralAccessionExpander(InfoExpander):
"""
generic information about an accession like
number of clones, provenance type, wild provenance type, speciess
"""
def __init__(self, widgets):
'''
'''
super().__init__(_("General"), widgets)
general_box = self.widgets.general_box
self.widgets.general_window.remove(general_box)
self.vbox.pack_start(general_box, True, True, 0)
self.current_obj = None
self.private_image = self.widgets.acc_private_data
def on_species_clicked(*args):
select_in_search_results(self.current_obj.species)
utils.make_label_clickable(self.widgets.name_data, on_species_clicked)
def on_parent_plant_clicked(*args):
select_in_search_results(self.current_obj.source.plant_propagation.plant)
utils.make_label_clickable(self.widgets.parent_plant_data,
on_parent_plant_clicked)
def on_nplants_clicked(*args):
cmd = 'plant where accession.code="%s"' % self.current_obj.code
bauble.gui.send_command(cmd)
utils.make_label_clickable(self.widgets.nplants_data,
on_nplants_clicked)
def update(self, row):
'''
'''
self.current_obj = row
self.widget_set_value('acc_code_data', '<big>%s</big>' %
utils.xml_safe(str(row.code)),
markup=True)
acc_private = self.widgets.acc_private_data
if row.private:
if acc_private.get_parent() != self.widgets.acc_code_box:
self.widgets.acc_code_box.pack_start(acc_private, True, True, 0)
else:
self.widgets.remove_parent(acc_private)
self.widget_set_value('name_data', row.species_str(markup=True, authors=True),
markup=True)
session = object_session(row)
plant_locations = {}
for plant in row.plants:
if plant.quantity == 0:
continue
q = plant_locations.setdefault(plant.location, 0)
plant_locations[plant.location] = q + plant.quantity
if plant_locations:
strs = []
for location, quantity in plant_locations.items():
strs.append(_('%(quantity)s in %(location)s')
% dict(location=str(location), quantity=quantity))
s = '\n'.join(strs)
else:
s = '0'
self.widget_set_value('living_plants_data', s)
nplants = session.query(Plant).filter_by(accession_id=row.id).count()
self.widget_set_value('nplants_data', nplants)
self.set_labeled_value('date_recvd', row.date_recvd)
self.set_labeled_value('date_accd', row.date_accd)
type_str = ''
if row.recvd_type:
type_str = recvd_type_values[row.recvd_type]
self.set_labeled_value('recvd_type', type_str)
quantity_str = ''
if row.quantity_recvd:
quantity_str = row.quantity_recvd
self.set_labeled_value('quantity_recvd', quantity_str)
prov_str = dict(prov_type_values)[row.prov_type]
if row.prov_type == 'Wild' and row.wild_prov_status:
prov_str = '%s (%s)' % \
(prov_str, dict(wild_prov_status_values)[row.wild_prov_status])
self.set_labeled_value('prov', prov_str)
image_size = Gtk.IconSize.MENU
stock = Gtk.STOCK_NO
if row.private:
stock = Gtk.STOCK_YES
self.widgets.private_image.set_from_stock(stock, image_size)
loc_map = (('intended_loc', 'intended_location'),
('intended2_loc', 'intended2_location'))
set_count = False
for prefix, attr in loc_map:
location_str = ''
location = getattr(row, attr)
if location:
set_count = True
if location.name and location.code:
location_str = '%s (%s)' % (location.name,
location.code)
elif location.name and not location.code:
location_str = '%s' % location.name
elif not location.name and location.code:
location_str = '(%s)' % location.code
self.set_labeled_value(prefix, location_str)
self.widgets['intended_loc_separator'].set_visible(set_count)
class SourceExpander(InfoExpander):
def __init__(self, widgets):
super().__init__(_('Source'), widgets)
source_box = self.widgets.source_box
self.widgets.source_window.remove(source_box)
self.vbox.pack_start(source_box, True, True, 0)
def update_collection(self, collection):
self.widget_set_value('loc_data', collection.locale)
self.widget_set_value('datum_data', collection.gps_datum)
geo_accy = collection.geo_accy
if not geo_accy:
geo_accy = ''
else:
geo_accy = '(+/- %sm)' % geo_accy
lat_str = ''
if collection.latitude is not None:
dir, deg, min, sec = latitude_to_dms(collection.latitude)
lat_str = '%s (%s %s°%s\'%.2f") %s' % \
(collection.latitude, dir, deg, min, sec, geo_accy)
self.widget_set_value('lat_data', lat_str)
long_str = ''
if collection.longitude is not None:
dir, deg, min, sec = longitude_to_dms(collection.longitude)
long_str = '%s (%s %s°%s\'%.2f") %s' % \
(collection.longitude, dir, deg, min, sec, geo_accy)
self.widget_set_value('lon_data', long_str)
elevation = ''
if collection.elevation:
elevation = '%sm' % collection.elevation
if collection.elevation_accy:
elevation += ' (+/- %sm)' % collection.elevation_accy
self.widget_set_value('elev_data', elevation)
self.widget_set_value('coll_data', collection.collector)
self.widget_set_value('date_data', collection.date)
self.widget_set_value('collid_data', collection.collectors_code)
self.widget_set_value('habitat_data', collection.habitat)
self.widget_set_value('collnotes_data', collection.notes)
def update(self, row):
if not row.source:
self.props.expanded = False
self.props.sensitive = False
return
if row.source.source_detail:
self.widgets.source_name_label.props.visible = True
self.widgets.source_name_data.props.visible = True
self.widget_set_value('source_name_data',
utils.utf8(row.source.source_detail))
def on_source_clicked(w, e, x):
select_in_search_results(x)
utils.make_label_clickable(self.widgets.source_name_data,
on_source_clicked,
row.source.source_detail)
else:
self.widgets.source_name_label.props.visible = False
self.widgets.source_name_data.props.visible = False
sources_code = ''
if row.source.sources_code:
sources_code = row.source.sources_code
self.widget_set_value('sources_code_data', utils.utf8(sources_code))
if row.source.plant_propagation:
self.widgets.parent_plant_label.props.visible = True
self.widgets.parent_plant_eventbox.props.visible = True
self.widget_set_value('parent_plant_data',
str(row.source.plant_propagation.plant))
self.widget_set_value('propagation_data',
row.source.plant_propagation.get_summary())
else:
self.widgets.parent_plant_label.props.visible = False
self.widgets.parent_plant_eventbox.props.visible = False
prop_str = ''
if row.source.propagation:
prop_str = row.source.propagation.get_summary()
self.widget_set_value('propagation_data', prop_str)
if row.source.collection:
self.widgets.collection_expander.props.expanded = True
self.widgets.collection_expander.props.sensitive = True
self.update_collection(row.source.collection)
else:
self.widgets.collection_expander.props.expanded = False
self.widgets.collection_expander.props.sensitive = False
class VerificationsExpander(InfoExpander):
"""
the accession's notes
"""
def __init__(self, widgets):
super().__init__(_("Verifications"), widgets)
# notes_box = self.widgets.notes_box
# self.widgets.notes_window.remove(notes_box)
# self.vbox.pack_start(notes_box, True, True, 0)
def update(self, row):
pass
#self.widget_set_value('notes_data', row.notes)
class VouchersExpander(InfoExpander):
"""
the accession's notes
"""
def __init__(self, widgets):
super().__init__(_("Vouchers"), widgets)
def update(self, row):
for kid in self.vbox.get_children():
self.vbox.remove(kid)
if not row.vouchers:
self.set_expanded(False)
self.set_sensitive(False)
return
# TODO: should save/restore the expanded state of the vouchers
self.set_expanded(True)
self.set_sensitive(True)
parents = [v for v in row.vouchers if v.parent_material]
for voucher in parents:
s = '%s %s (parent)' % (voucher.herbarium, voucher.code)
label = Gtk.Label(label=s)
label.set_alignment(0.0, 0.5)
self.vbox.pack_start(label, True, True, 0)
label.show()
not_parents = [v for v in row.vouchers if not v.parent_material]
for voucher in not_parents:
s = '%s %s' % (voucher.herbarium, voucher.code)
label = Gtk.Label(label=s)
label.set_alignment(0.0, 0.5)
self.vbox.pack_start(label, True, True, 0)
label.show()
class AccessionInfoBox(InfoBox):
"""
- general info
- source
"""
def __init__(self):
super().__init__()
filename = os.path.join(paths.lib_dir(), "plugins", "garden",
"acc_infobox.glade")
self.widgets = utils.BuilderWidgets(filename)
self.general = GeneralAccessionExpander(self.widgets)
self.add_expander(self.general)
self.source = SourceExpander(self.widgets)
self.add_expander(self.source)
# self.vouchers = VouchersExpander(self.widgets)
# self.add_expander(self.vouchers)
# self.verifications = VerificationsExpander(self.widgets)
# self.add_expander(self.verifications)
self.links = view.LinksExpander('notes')
self.add_expander(self.links)
self.mapinfo = MapInfoExpander(self.get_map_extents)
self.add_expander(self.mapinfo)
self.props = PropertiesExpander()
self.add_expander(self.props)
def get_map_extents(self, accession):
result = []
for plant in accession.plants:
try:
result.append(plant.coords)
except:
pass
return result
def update(self, row):
if isinstance(row, Collection):
row = row.source.accession
self.general.update(row)
self.mapinfo.update(row)
self.props.update(row)
# if row.verifications:
# self.verifications.update(row)
# self.verifications.set_expanded(row.verifications != None)
# self.verifications.set_sensitive(row.verifications != None)
# self.vouchers.update(row)
urls = [x for x in [utils.get_urls(note.note) for note in row.notes] if x != []]
if not urls:
self.links.props.visible = False
self.links._sep.props.visible = False
else:
self.links.props.visible = True
self.links._sep.props.visible = True
self.links.update(row)
self.source.props.sensitive = True
self.source.update(row)
#
# Map Datum List - this list should be available as a list of completions for
# the datum text entry....the best way is that is to show the abbreviation
# with the long string in parenthesis or with different markup but selecting
# the completion will enter the abbreviation....though the entry should be
# free text....this list complements of:
# http://www8.garmin.com/support/faqs/MapDatumList.pdf
#
# Abbreviation: Name
datums = {"Adindan": "Adindan- Ethiopia, Mali, Senegal, Sudan",
"Afgooye": "Afgooye- Somalia",
"AIN EL ABD": "'70 AIN EL ANBD 1970- Bahrain Island, Saudi Arabia",
"Anna 1 Ast '65": "Anna 1 Astro '65- Cocos I.",
"ARC 1950": "ARC 1950- Botswana, Lesotho, Malawi, Swaziland, Zaire, Zambia",
"ARC 1960": "Kenya, Tanzania",
"Ascnsn Isld '58": "Ascension Island '58- Ascension Island",
"Astro Dos 71/4": "Astro Dos 71/4- St. Helena",
"Astro B4 Sorol": "Sorol Atoll- Tern Island",
"Astro Bcn \"E\"": "Astro Beacon \"E\"- Iwo Jima",
"Astr Stn '52": "Astronomic Stn '52- Marcus Island",
"Aus Geod '66": "Australian Geod '66- Australia, Tasmania Island",
"Aus Geod '84": "Australian Geod '84- Australia, Tasmania Island",
"Austria": "Austria",
"Bellevue (IGN)": "Efate and Erromango Islands",
"Bermuda 1957": "Bermuda 1957- Bermuda Islands",
"Bogota Observ": "Bogata Obsrvatry- Colombia",
"Campo Inchspe": "Campo Inchauspe- Argentina",
"Canton Ast '66": "Canton Astro 1966- Phoenix Islands",
"Cape": "Cape- South Africa",
"Cape Canavrl": "Cape Canaveral- Florida, Bahama Islands",
"Carthage": "Carthage- Tunisia",
"CH-1903": "CH 1903- Switzerland",
"Chatham 1971": "Chatham 1971- Chatham Island (New Zealand)",
"Chua Astro": "Chua Astro- Paraguay",
"Corrego Alegr": "Corrego Alegre- Brazil",
"Croatia": "Croatia",
"Djakarta": "Djakarta (Batavia)- Sumatra Island (Indonesia)",
"Dos 1968": "Dos 1968- Gizo Island (New Georgia Islands)",
"Dutch": "Dutch",
"Easter Isld 67": "Easter Island 1967",
"European 1950": "European 1950- Austria, Belgium, Denmark, Finland, France, Germany, Gibraltar, Greece, Italy, Luxembourg, Netherlands, Norway, Portugal, Spain, Sweden, Switzerland",
"European 1979": "European 1979- Austria, Finland, Netherlands, Norway, Spain, Sweden, Switzerland",
"Finland Hayfrd": "Finland Hayford- Finland",
"Gandajika Base": "Gandajika Base- Republic of Maldives",
"GDA": "Geocentric Datum of Australia",
"Geod Datm '49": "Geodetic Datum '49- New Zealand",
"Guam 1963": "Guam 1963- Guam Island",
"Gux 1 Astro": "Guadalcanal Island",
"Hjorsey 1955": "Hjorsey 1955- Iceland",
"Hong Kong '63": "Hong Kong",
"Hu-Tzu-Shan": "Taiwan",
"Indian Bngldsh": "Indian- Bangladesh, India, Nepal",
"Indian Thailand": "Indian- Thailand, Vietnam",
"Indonesia 74": "Indonesia 1974- Indonesia",
"Ireland 1965": "Ireland 1965- Ireland",
"ISTS 073 Astro": "ISTS 073 ASTRO '69- Diego Garcia",
"Johnston Island": "Johnston Island NAD27 Central",
"Kandawala": "Kandawala- Sri Lanka",
"Kergueln Islnd": "Kerguelen Island",
"Kertau 1948": "West Malaysia, Singapore",
"L.C. 5 Astro": "Cayman Brac Island",
"Liberia 1964": "Liberia 1964- Liberia",
"Luzon Mindanao": "Luzon- Mindanao Island",
"Luzon Philippine": "Luzon- Philippines (excluding Mindanao Isl.)",
"Mahe 1971": "Mahe 1971- Mahe Island",
"Marco Astro": "Marco Astro- Salvage Isl.",
"Massawa": "Massawa- Eritrea (Ethiopia)",
"Merchich": "Merchich- Morocco",
"Midway Ast '61": "Midway Astro '61- Midway",
"Minna": "Minna- Nigeria",
"NAD27 Alaska": "North American 1927- Alaska",
"NAD27 Bahamas": "North American 1927- Bahamas",
"NAD27 Canada": "North American 1927- Canada and Newfoundland",
"NAD27 Canal Zn": "North American 1927- Canal Zone",
"NAD27 Caribbn": "North American 1927- Caribbean (Barbados, Caicos Islands, Cuba, Dominican Repuplic, Grand Cayman, Jamaica, Leeward and Turks Islands)",
"NAD27 Central": "North American 1927- Central America (Belize, Costa Rica, El Salvador, Guatemala, Honduras, Nicaragua)",
"NAD27 CONUS": "North American 1927- Mean Value (CONUS)",
"NAD27 Cuba": "North American 1927- Cuba",
"NAD27 Grnland": "North American 1927- Greenland (Hayes Peninsula)",
"NAD27 Mexico": "North American 1927- Mexico",
"NAD27 San Sal": "North American 1927- San Salvador Island",
"NAD83": "North American 1983- Alaska, Canada, Central America, CONUS, Mexico",
"Naparima BWI": "Naparima BWI- Trinidad and Tobago",
"Nhrwn Masirah": "Nahrwn- Masirah Island (Oman)",
"Nhrwn Saudi A": "Nahrwn- Saudi Arabia",
"Nhrwn United A": "Nahrwn- United Arab Emirates",
"Obsrvtorio '66": "Observatorio 1966- Corvo and Flores Islands (Azores)",
"Old Egyptian": "Old Egyptian- Egypt",
"Old Hawaiian": "Old Hawaiian- Mean Value",
"Oman": "Oman- Oman",
"Old Srvy GB": "Old Survey Great Britain- England, Isle of Man, Scotland, Shetland Isl., Wales",
"Pico De Las Nv": "Canary Islands",
"Potsdam": "Potsdam-Germany",
"Prov S Am '56": "Prov Amricn '56- Bolivia, Chile,Colombia, Ecuador, Guyana, Peru, Venezuela",
"Prov S Chln '63": "So. Chilean '63- S. Chile",
"Ptcairn Ast '67": "Pitcairn Astro '67- Pitcairn",
"Puerto Rico": "Puerto Rico & Virgin Isl.",
"Qatar National": "Qatar National- Qatar South Greenland",
"Qornoq": "Qornoq- South Greenland",
"Reunion": "Reunion- Mascarene Island",
"Rome 1940": "Rome 1940- Sardinia Isl.",
"RT 90": "Sweden",
"Santo (Dos)": "Santo (Dos)- Espirito Santo",
"Sao Braz": "Sao Braz- Sao Miguel, Santa Maria Islands",
"Sapper Hill '43": "Sapper Hill 1943- East Falkland Island",
"Schwarzeck": "Schwarzeck- Namibia",
"SE Base": "Southeast Base- Porto Santo and Madiera Islands",
"South Asia": "South Asia- Singapore",
"Sth Amrcn '69": "S. American '69- Argentina, Bolivia, Brazil, Chile, Colombia, Ecuador, Guyana, Paraguay, Peru, Venezuela, Trin/Tobago",
"SW Base": "Southwest Base- Faial, Graciosa, Pico, Sao Jorge and Terceira",
"Taiwan": "Taiwan",
"Timbalai 1948": "Timbalai 1948- Brunei and E. Malaysia (Sarawak and Sabah)",
"Tokyo": "Tokyo- Japan, Korea, Okinawa",
"Tristan Ast '68": "Tristan Astro 1968- Tristan da Cunha",
"Viti Levu 1916": "Viti Levu 1916- Viti Levu/Fiji Islands",
"Wake-Eniwetok": "Wake-Eniwetok- Marshall",
"WGS 72": "World Geodetic System 72",
"WGS 84": "World Geodetic System 84",
"Zanderij": "Zanderij- Surinam (excluding San Salvador Island)",
"User": "User-defined custom datum"}
| gpl-2.0 | -6,980,470,676,698,707,000 | 39.042459 | 193 | 0.580702 | false |
bluedynamics/activities.runtime | setup.py | 1 | 1857 | # -*- coding: utf-8 -*-
#
# Copyright 2009: Johannes Raggam, BlueDynamics Alliance
# http://bluedynamics.com
# GNU Lesser General Public License Version 2 or later
__author__ = """Johannes Raggam <[email protected]>"""
__docformat__ = 'plaintext'
from setuptools import setup, find_packages
import sys, os
version = '1.0'
shortdesc ="Runtime engine for activities"
longdesc = open(os.path.join(os.path.dirname(__file__), 'README.txt')).read()
longdesc += open(os.path.join(os.path.dirname(__file__), 'LICENSE.txt')).read()
setup(name='activities.runtime',
version=version,
description=shortdesc,
long_description=longdesc,
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Zope3',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules'
], # Get strings from http://pypi.python.org/pypi?:action=list_classifiers
keywords='UML Activities runtime',
author='Johannes Raggam',
author_email='[email protected]',
url='',
license='LGPL',
packages = find_packages('src'),
package_dir = {'': 'src'},
namespace_packages=['activities'],
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
# -*- Extra requirements: -*
'activities.metamodel',
'zope.interface',
'zope.component',
],
extras_require={
'test': [
'interlude',
'activities.transform.xmi',
]
},
entry_points="""
# -*- Entry points: -*-
""",
)
| lgpl-3.0 | -5,878,272,516,904,475,000 | 31.578947 | 91 | 0.58643 | false |
lukasgarcya/django-tutorial | financeiro/financeiro/settings.py | 1 | 3178 | """
Django settings for financeiro project.
Generated by 'django-admin startproject' using Django 1.9.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '!lxp^uu&up613=mod5#6ect62-8(k66_fh2u*a$=y!7_yibsbi'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'financeiro.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'financeiro.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
| gpl-3.0 | 2,011,637,693,139,170,300 | 25.264463 | 91 | 0.690686 | false |
yashpungaliya/MailingListParser | lib/input/check_headers.py | 1 | 11571 | import email
import imaplib
import json
from input.imap.connection import open_connection
from input.imap.header import get_mail_header
from util.read_utils import lines_per_n
def get_unavailable_uid():
"""
This function returns a list of UIDs that are not available in the IMAP server
:return: List containing the UIDs not available in the IMAP server
"""
imaplib._MAXLINE = 800000
conn = open_connection()
conn.select('INBOX')
search_str = 'UID ' + '1:*'
retcode, uids = conn.uid('SEARCH', None, search_str)
available_uid = []
for uid in uids[0].split():
available_uid.append(int(uid))
try:
conn.close()
except:
pass
conn.logout()
return set(range(min(available_uid), max(available_uid)+1)) - set(available_uid)
# This list stores the UIDs of mails that have duplicate entries in the JSON file.
duplicate_uid = set()
# This set stores the UIDs of mails that don't have an entry in the JSON file - UIDs are consecutive numbers.
missing_uid = set()
# This list stores the UIDs of mails that have entries with insufficient entries in the JSON file.
invalid_uid = set()
# This set stores the UIDs of mails that are not forwarded from LKML subscription which is stored in a text file.
unwanted_uid = set()
# This set stores the UIDs for which corresponding mails are not available in the IMAP server.
unavailable_uid = set()
last_uid_read = 0
def check_validity(check_unavailable_uid='False', json_header_filename='headers.json'):
"""
This function checks for and prints duplicate, missing, and invalid objects in the "headers.json" file.
This function can be run first to generate a list of duplicate, missing, or invalid objects' UIDs which
can then be used to add or remove their entries from the JSON file.
:return: Last UID that was checked by the function.
"""
previous_uid = 0
# The "read_uid" set is used to keep track of all the UIDs that have been read from the JSON file.
# In case a duplicate exists, it would be read twice and hence would fail the set membership test.
read_uid = set([])
# This variable contains the last UID that was checked. This variable is returned by the function.
last_valid_uid = 0
header_attrib = {'Message-ID', 'From', 'To', 'Cc', 'In-Reply-To', 'Time'}
# Read UIDs of mails that are not forwarded from LKML subscription which is stored in a text file.
with open(json_header_filename, 'r') as json_file:
for chunk in lines_per_n(json_file, 9):
try:
json_obj = json.loads(chunk)
except:
print("Unreadable JSON object after UID: " + str(previous_uid))
break
# Checking for duplicate objects
if not json_obj['Message-ID'] in read_uid:
read_uid.add(json_obj['Message-ID'])
else:
duplicate_uid.add(json_obj['Message-ID'])
# Check if the JSON object has sufficient attributes by checking if "header_attrib" is a subset of its keys
if not set(header_attrib) <= json_obj.keys() or json_obj['Time'] is None:
invalid_uid.add(json_obj['Message-ID'])
# Check if it is a mail that is sent directly to "[email protected]", in which caseit has not been
# forwarded from the LKML subscription.
if json_obj['To'] == "[email protected]":
unwanted_uid.add(json_obj['Message-ID'])
previous_uid = json_obj['Message-ID']
# Calculate the missing UIDs by performing a set difference on all the UIDs possible till the highest UID read
# from the actual UIDs that have been read.
if previous_uid != 0:
global last_uid_read
last_uid_read = max(read_uid)
global missing_uid
missing_uid = set(range(min(read_uid), last_uid_read+1)) - read_uid
global unavailable_uid
if check_unavailable_uid:
unavailable_uid = get_unavailable_uid()
print("Unavailable UIDs: ", unavailable_uid if len(unavailable_uid) > 0 else "None")
with open("unwanted_uid.txt", 'a') as unw_file:
for uid in unwanted_uid:
unw_file.write(str(uid) + '\n')
print("Unwanted UIDs: ", unwanted_uid if len(unwanted_uid) > 0 else "None")
print("Duplicate UIDs: ", duplicate_uid if len(duplicate_uid) > 0 else "None")
print("Missing UIDs: ", missing_uid if len(missing_uid) > 0 else "None")
print("Invalid UIDs: ", invalid_uid if len(invalid_uid) > 0 else "None")
return last_uid_read
def remove_unwanted_headers(to_remove=unwanted_uid, json_header_filename='headers.json'):
"""
This function removes all the UIDs specified in the to_remove parameter. By default, it removes all the unwanted
entries in the JSON file, i.e. the list of UIDs of mails that are not forwarded from LKML subscription.
:param to_remove: A list of UIDs that need to be removed. Default value is the list of unwanted mails' UIDs
"""
if len(to_remove) > 0:
print("Removing unwanted headers...")
# This list contains a list of JSON objects that need to be written to file
write_to_file = []
with open(json_header_filename, 'r') as json_file:
for chunk in lines_per_n(json_file, 9):
json_obj = json.loads(chunk)
if not json_obj['Message-ID'] in unwanted_uid:
write_to_file.append(json_obj)
with open(json_header_filename, 'w') as json_file:
for json_obj in write_to_file:
json.dump(json_obj, json_file, indent=1)
json_file.write("\n")
def remove_duplicate_headers(to_remove=duplicate_uid, json_header_filename='headers.json'):
"""
This function removes all the duplicate entries of the UIDs specified in the to_remove parameter. By default,
it removes all the duplicate entries in the JSON file.
:param to_remove: A list of UIDs that need to be removed. Default value is the list of duplicate mails' UIDs.
"""
# The "read_uid" set is used to keep track of all the UIDs that have been read from the JSON file.
# In case a duplicate exists, it would be read twice and hence would fail the set membership test.
read_uid = set([])
if len(to_remove) > 0:
print("Removing duplicate headers...")
# This list contains a list of JSON objects that need to be written to file
write_to_file = []
with open(json_header_filename, 'r') as json_file:
for chunk in lines_per_n(json_file, 9):
json_obj = json.loads(chunk)
if not json_obj['Message-ID'] in read_uid:
write_to_file.append(json_obj)
read_uid.add(json_obj['Message-ID'])
with open(json_header_filename, 'w') as json_file:
for json_obj in write_to_file:
json.dump(json_obj, json_file, indent=1)
json_file.write("\n")
def add_missing_headers(to_add=missing_uid, unwanted_uid_filename="unwanted_uid.txt"):
"""
This function adds the mails that have been missed out, considering the fact that UIDs are consecutive.
If a mail that is missing in the JSON file is not available or has been deleted, this function ignores that UID.
:param to_add: A list of UIDs that need to be added. Default value is the list of missing mails' UIDs.
"""
# To prevent replacement of mails that are not forwarded from the LKML subscription:
with open(unwanted_uid_filename, 'r') as unw_file:
for line in unw_file:
unwanted_uid.add(int(line.strip()))
to_add = [x for x in to_add if x not in unwanted_uid]
# To prevent attempts to replace mails are known to be not available in the IMAP server:
to_add = [x for x in to_add if x not in unavailable_uid]
if len(to_add) > 0:
print("Fetching missing headers...")
get_mail_header(to_add, False)
def replace_invalid_headers(to_replace=invalid_uid, json_header_filename="headers.json"):
"""
This function removes the mail headers that have insufficient attributes and fetches those headers again.
If an attribute is missing in the original mail header or if the mail has been deleted, this function ignores that UID.
:param to_replace: A list of UIDs that need to be replaced. Default value is the list of invalid mails' UIDs.
"""
if len(to_replace) > 0:
print("Replacing invalid headers...")
# This list contains a list of JSON objects that need to be written to file
write_to_file = []
with open(json_header_filename, 'r') as json_file:
for chunk in lines_per_n(json_file, 9):
json_obj = json.loads(chunk)
if not json_obj['Message-ID'] in invalid_uid:
write_to_file.append(json_obj)
with open(json_header_filename, 'w') as json_file:
for json_obj in write_to_file:
json.dump(json_obj, json_file, indent=1)
json_file.write("\n")
add_missing_headers(to_replace)
def write_uid_map(from_index=1, to_index=last_uid_read, uid_map_filename="thread_uid_map.json"):
"""
To ensure that references are correctly recorded in the JSON file such that there are no references to mails that
do not exist and to ease the processing of headers, a map with the string in the Message-Id field of the header to
the UID of the mail is required. This function fetches the headers from the IMAP server and adds the required
pairs of Message_ID and UID to the JSON file.
:param from_index: Fetches headers from this UID onwards.
:param to_index: Fetches headers till this UID (non inclusive).
"""
with open(uid_map_filename, 'r') as map_file:
uid_msg_id_map = json.load(map_file)
map_file.close()
to_get = list(range(from_index, to_index))
imaplib._MAXLINE = 800000
conn = open_connection()
try:
conn.select('INBOX')
for num in to_get:
# conn.uid() converts the arguments provided to an IMAP command to fetch the mail using the UID sepcified by num
# Uncomment the line below to fetch the entire message rather than just the mail headers.
# typ, msg_header = conn.uid('FETCH', num, '(RFC822)')
typ, msg_header = conn.uid('FETCH', str(num), '(RFC822.HEADER)')
for response_part in msg_header:
if isinstance(response_part, tuple):
print("Processing mail #", num)
# "response_part" contains the required info as a byte stream.
# This has to be converted to a message stream using the email module
original = email.message_from_bytes(response_part[1])
# The splicing is done as to remove the '<' and '>' from the message-id string
uid_msg_id_map[original['Message-ID'][1:-1]] = num
finally:
try:
conn.close()
except:
pass
conn.logout()
with open(uid_map_filename, mode='w', encoding='utf-8') as f:
json.dump(uid_msg_id_map, f, indent=1)
f.close()
| gpl-3.0 | 2,200,656,162,760,982,300 | 42.164122 | 124 | 0.626739 | false |
arnehilmann/sunstone-rest-client | src/main/python/sunstone_rest_client/__init__.py | 1 | 8210 | #!/usr/bin/env python
from __future__ import print_function
import json
import logging
import re
import time
import requests
from bs4 import BeautifulSoup
try:
from logging import NullHandler
except ImportError:
from sunstone_rest_client.util import NullHandler
class LoginFailedException(Exception):
pass
class NotFoundException(Exception):
pass
class ReplyException(Exception):
pass
logger = logging.getLogger(__name__)
logger.addHandler(NullHandler())
class RestClient(object):
vm_details = {"/log": "vm_log", "": "VM"}
def __init__(self, url, verify=True, use_cache=True, disable_urllib3_warnings=True, simple_logging=False):
self.url = url.rstrip("/")
self.username = None
self.password = None
self.csrftoken = None
self.client_opts = {}
self.verify = verify
self.use_cache = use_cache
self.failed_login = False
self.cache = {}
self.session = None
if disable_urllib3_warnings:
logger.debug("disabling urllib3 warning of requests packages")
requests.packages.urllib3.disable_warnings()
if simple_logging:
logger.setLevel(logging.DEBUG)
sh = logging.StreamHandler()
sh.setLevel(logging.DEBUG)
sh.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
logger.addHandler(sh)
def login(self, username, password, **kwargs):
self.username = username
self.password = password
logger.debug("login url: %s" % self.url)
logger.debug("login username: %s" % self.username)
self.failed_login = False
return self
def _login(self):
if self.failed_login:
raise LoginFailedException("login failed too often, giving up here...")
for i in range(10):
self.session = requests.session() # TODO is it really necessary to start a new session on every iteration?
try:
login = self.session.post(self.url + "/login", auth=(self.username, self.password))
if not login.ok:
self.failed_login = True
raise LoginFailedException("login failed too often, giving up here...")
except Exception as e:
logger.debug("%s: retrying" % str(e))
time.sleep(.2)
continue
logger.debug("sunstone session cookie: %s" % self.session.cookies.get("sunstone"))
time.sleep(.2)
root = self.session.get(self.url, headers={'Referer': self.url})
if self.session.cookies.get("one-user"):
break
time.sleep(.2)
if not self.session.cookies.get("one-user"):
raise LoginFailedException("credentials supposedly okay, but authorization handshake failed repeatedly")
self.csrftoken = find_csrftoken(root.content)
if not self.csrftoken:
raise LoginFailedException("no csrftoken found in %s" % self.url)
self.client_opts["csrftoken"] = self.csrftoken
for i in range(5):
try:
logger.debug("checking session, fetching random vm details, awaiting status != 401 (Unauthorized)")
self.get_vm_detail(333333, "log")
break
except NotFoundException:
break
except Exception as e:
if i == 10:
raise LoginFailedException("login and csrftoken okay, but still not authorized, giving up!", e)
logger.debug(e)
time.sleep(.2)
return self
def _fetch(self, endpoint=""):
endpoint = endpoint if endpoint.startswith("/") else "/" + endpoint
if endpoint in self.cache:
return self.cache[endpoint]
if not self.csrftoken:
self._login()
reply = self.session.get(self.url + endpoint, params=self.client_opts)
if not reply.ok:
if reply.status_code == 404:
raise NotFoundException(endpoint)
raise ReplyException("unable to fetch %s: %i %s" % (endpoint, reply.status_code, reply.reason), reply)
reply_json = reply.json()
if self.use_cache:
self.cache[endpoint] = reply_json
return reply_json
def fetch_vms(self):
vms = self._fetch("vm")["VM_POOL"]["VM"]
if isinstance(vms, dict):
return [vms]
return vms if vms else []
def get_vm_by_id(self, id):
id = str(id)
for vm in self.fetch_vms():
if vm["ID"] == id:
return vm
def get_vm_detail(self, id, detail=None):
if detail:
detail = detail if detail.startswith("/") else "/" + detail
detail = detail if detail else ""
toplevel = RestClient.vm_details.get(detail)
if toplevel:
return self._fetch("/vm/%s%s" % (id, detail)).get(toplevel)
return self._fetch("/vm/%s%s" % (id, detail))
def get_multiple_vms_by_name(self, name):
for vm in self.fetch_vms():
if vm["NAME"] == name:
yield vm
def get_first_vm_by_name(self, name):
return next(self.get_multiple_vms_by_name(name))
def fetch_templates(self):
templates = self._fetch("vmtemplate")["VMTEMPLATE_POOL"]["VMTEMPLATE"]
if isinstance(templates, dict):
templates = [templates]
return templates
def get_template_by_id(self, id):
id = str(id)
for template in self.fetch_templates():
if template["UID"] == id:
return template
return {}
def get_multiple_templates_by_name(self, name):
for template in self.fetch_templates():
if template["NAME"] == name:
yield template
def get_first_template_by_name(self, name):
return next(self.get_multiple_templates_by_name(name))
def _action(self, endpoint, perform, params):
action = {"action": {"perform": perform, "params": params}, "csrftoken": self.csrftoken}
reply = self.session.post(self.url + endpoint, data=json.dumps(action))
return reply
def instantiate(self, template, vm_name):
endpoint = "vmtemplate/%s/action" % template["UID"]
params = {"vm_name": vm_name, "hold": False, "template": template["TEMPLATE"]}
return self._action(endpoint, "instantiate", params)
def instantiate_by_name(self, template_name, vm_name):
template = self.get_first_template_by_name(template_name)
return self.instantiate(template, vm_name)
def instantiate_by_id(self, template_id, vm_name):
template = self.get_template_by_id(template_id)
return self.instantiate(template, vm_name)
def delete_vm(self, vm):
return self.delete_vm_by_id(vm["ID"])
def delete_multiple_vms_by_name(self, name):
replies = {}
for vm in self.get_multiple_vms_by_name(name):
replies[vm["ID"]] = self.delete_vm(vm)
return replies
def delete_vm_by_id(self, vm_id):
data = "csrftoken=%s" % self.csrftoken
endpoint = "vm/%s" % vm_id
reply = self.session.delete(self.url + endpoint,
data=data,
headers={"Content-Type":
"application/x-www-form-urlencoded; charset=UTF-8"})
return reply
def fetch_hosts(self):
hosts = self._fetch("host")["HOST_POOL"]["HOST"]
if isinstance(hosts, dict):
return [hosts]
return hosts if hosts else []
def find_csrftoken(html):
soup = BeautifulSoup(html, 'html.parser')
for script in soup.findAll('script'):
match = re.search('var csrftoken\s*=\s*["\'](.*)["\']\s*;', script.text)
if match:
return match.group(1)
return None
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
client = RestClient("http://localhost:9869").login("oneadmin", "opennebula")
print(client.get_vm_detail(38))
print(client.get_vm_detail(38, "log"))
| apache-2.0 | -761,745,019,978,906,100 | 33.351464 | 119 | 0.583191 | false |
TommyN94/CodeEvalSolutions | SwapCase.py | 1 | 1155 | # Swap Case
#
# https://www.codeeval.com/open_challenges/96/
#
# Challenge Description: Write a program which swaps letters' case in a
# sentence. All non-letter characters should remain the same.
import string
import sys
def swap_case(input_string):
swapped_string = ''
for char in input_string:
if char in string.ascii_lowercase:
swapped_string += char.upper()
elif char in string.ascii_uppercase:
swapped_string += char.lower()
else:
swapped_string += char
return swapped_string
def swap_case2(input_string):
n_char = len(input_string)
swapped_characters = [None] * n_char
for i in range(n_char):
char = input_string[i]
if char in string.ascii_lowercase:
swapped_characters[i] = char.upper()
elif char in string.ascii_uppercase:
swapped_characters[i] = char.lower()
else:
swapped_characters[i] = char
return ''.join(swapped_characters)
input_file = sys.argv[1]
test_cases = open(input_file, 'r')
for case in test_cases:
print(swap_case(case.rstrip()))
test_cases.close()
sys.exit(0)
| mit | -7,641,557,907,283,652,000 | 25.25 | 72 | 0.636364 | false |
EmanueleCannizzaro/scons | test/Batch/up_to_date.py | 1 | 3025 | #!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/Batch/up_to_date.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""
Verify simple use of $SOURCES with batch builders correctly decide
that files are up to date on a rebuild.
"""
import TestSCons
test = TestSCons.TestSCons()
_python_ = TestSCons._python_
test.write('batch_build.py', """\
import os
import sys
dir = sys.argv[1]
for infile in sys.argv[2:]:
inbase = os.path.splitext(os.path.split(infile)[1])[0]
outfile = os.path.join(dir, inbase+'.out')
open(outfile, 'wb').write(open(infile, 'rb').read())
sys.exit(0)
""")
test.write('SConstruct', """
env = Environment()
env['BATCH_BUILD'] = 'batch_build.py'
env['BATCHCOM'] = r'%(_python_)s $BATCH_BUILD ${TARGET.dir} $SOURCES'
bb = Action('$BATCHCOM', batch_key=True)
env['BUILDERS']['Batch'] = Builder(action=bb)
env1 = env.Clone()
env1.Batch('out1/f1a.out', 'f1a.in')
env1.Batch('out1/f1b.out', 'f1b.in')
env2 = env.Clone()
env2.Batch('out2/f2a.out', 'f2a.in')
env3 = env.Clone()
env3.Batch('out3/f3a.out', 'f3a.in')
env3.Batch('out3/f3b.out', 'f3b.in')
""" % locals())
test.write('f1a.in', "f1a.in\n")
test.write('f1b.in', "f1b.in\n")
test.write('f2a.in', "f2a.in\n")
test.write('f3a.in', "f3a.in\n")
test.write('f3b.in', "f3b.in\n")
expect = test.wrap_stdout("""\
%(_python_)s batch_build.py out1 f1a.in f1b.in
%(_python_)s batch_build.py out2 f2a.in
%(_python_)s batch_build.py out3 f3a.in f3b.in
""" % locals())
test.run(stdout = expect)
test.must_match(['out1', 'f1a.out'], "f1a.in\n")
test.must_match(['out1', 'f1b.out'], "f1b.in\n")
test.must_match(['out2', 'f2a.out'], "f2a.in\n")
test.must_match(['out3', 'f3a.out'], "f3a.in\n")
test.must_match(['out3', 'f3b.out'], "f3b.in\n")
test.up_to_date(options = '--debug=explain', arguments = '.')
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit | 5,646,750,723,889,606,000 | 31.526882 | 98 | 0.696529 | false |
rroart/stockstat | tensorflow/flasktfmain.py | 1 | 7375 | #!/usr/bin/python3
from flask import Flask, request
import sys
from multiprocessing import Process, Queue
import json
from werkzeug.wrappers import Response
def classifyrunner2(queue, request):
cl.do_learntestclassify(queue, request)
def predictrunner(queue, request):
import predict
pr = predict.Predict()
pr.do_learntestlist(queue, request)
def hasgpurunner(queue, dummy):
import device
device.hasgpu(queue)
app = Flask(__name__)
@app.route('/', methods=['GET'])
def healthcheck():
return(Response())
@app.route('/eval', methods=['POST'])
def do_eval():
return cl.do_eval(request)
@app.route('/classify', methods=['POST'])
def do_classify():
import classify
cl = classify.Classify()
return cl.do_classify(request)
@app.route('/learntest', methods=['POST'])
def do_learntest():
def classifyrunner(queue, request):
try:
import classify
cl = classify.Classify()
cl.do_learntest(queue, request)
except:
import sys,traceback
memory = "CUDA error: out of memory" in traceback.format_exc()
cudnn = "0 successful operations" in traceback.format_exc()
queue.put(Response(json.dumps({"classifycatarray": None, "classifyprobarray": None, "accuracy": None, "loss": None, "exception" : True, "gpu" : hasgpu, "memory" : memory, "cudnn" : cudnn }), mimetype='application/json'))
traceback.print_exc(file=sys.stdout)
print("\n")
import random
f = open("/tmp/outtf" + argstr() + str(random.randint(1000,9999)) + ".txt", "w")
f.write(request.get_data(as_text=True))
traceback.print_exc(file=f)
f.close()
aqueue = Queue()
process = Process(target=classifyrunner, args=(aqueue, request))
try:
import queue
process.start()
while True:
try:
result = aqueue.get(timeout=timeout)
break
except queue.Empty as e:
if not process.is_alive():
print("Process died")
result = Response(json.dumps({"classifycatarray": None, "classifyprobarray": None, "accuracy": None, "loss": None, "exception" : True, "gpu" : hasgpu, "memory" : False, "cudnn" : False }), mimetype='application/json')
break
except Exception as e:
print(e)
import sys,traceback
traceback.print_exc(file=sys.stdout)
return result
@app.route('/learntestclassify', methods=['POST'])
def do_learntestclassify():
def classifyrunner(queue, request):
try:
import classify
cl = classify.Classify()
cl.do_learntestclassify(queue, request)
except:
import sys,traceback
memory = "CUDA error: out of memory" in traceback.format_exc()
cudnn = "0 successful operations" in traceback.format_exc()
queue.put(Response(json.dumps({"classifycatarray": None, "classifyprobarray": None, "accuracy": None, "loss": None, "exception" : True, "gpu" : hasgpu, "memory" : memory, "cudnn" : cudnn }), mimetype='application/json'))
traceback.print_exc(file=sys.stdout)
print("\n")
import random
f = open("/tmp/outtf" + argstr() + str(random.randint(1000,9999)) + ".txt", "w")
f.write(request.get_data(as_text=True))
traceback.print_exc(file=f)
f.close()
aqueue = Queue()
process = Process(target=classifyrunner, args=(aqueue, request))
try:
import queue
process.start()
while True:
try:
result = aqueue.get(timeout=timeout)
break
except queue.Empty as e:
if not process.is_alive():
print("Process died")
result = Response(json.dumps({"classifycatarray": None, "classifyprobarray": None, "accuracy": None, "loss": None, "exception" : True, "gpu" : hasgpu, "memory" : False, "cudnn" : False }), mimetype='application/json')
break
except Exception as e:
print(e)
import sys,traceback
traceback.print_exc(file=sys.stdout)
return result
@app.route('/predictone', methods=['POST'])
def do_learntestpredictone():
queue = Queue()
process = Process(target=predictrunner, args=(queue, request))
process.start()
result = queue.get()
process.join()
return result
@app.route('/predict', methods=['POST'])
def do_learntestpredict():
queue = Queue()
process = Process(target=predictrunner, args=(queue, request))
process.start()
result = queue.get()
process.join()
return result
@app.route('/dataset', methods=['POST'])
def do_dataset():
def classifyrunner(queue, request):
try:
import classify
cl = classify.Classify()
cl.do_dataset(queue, request)
except:
import sys,traceback
memory = "CUDA error: out of memory" in traceback.format_exc()
cudnn = "0 successful operations" in traceback.format_exc()
queue.put(Response(json.dumps({"accuracy": None, "loss": None, "exception" : True, "gpu" : hasgpu, "memory" : memory, "cudnn" : cudnn }), mimetype='application/json'))
traceback.print_exc(file=sys.stdout)
print("\n")
import random
f = open("/tmp/outtf" + argstr() + str(random.randint(1000,9999)) + ".txt", "w")
f.write(request.get_data(as_text=True))
traceback.print_exc(file=f)
f.close()
aqueue = Queue()
process = Process(target=classifyrunner, args=(aqueue, request))
try:
import queue
process.start()
while True:
try:
result = aqueue.get(timeout=timeout)
break
except queue.Empty as e:
if not process.is_alive():
print("Process died")
result = Response(json.dumps({"classifycatarray": None, "classifyprobarray": None, "accuracy": None, "loss": None, "exception" : True, "gpu" : hasgpu, "memory" : False, "cudnn" : False }), mimetype='application/json')
break
except Exception as e:
print(e)
import sys,traceback
traceback.print_exc(file=sys.stdout)
return result
@app.route('/filename', methods=['POST'])
def do_filename():
def filenamerunner(queue, request):
import classify
cl = classify.Classify()
cl.do_filename(queue, request)
queue = Queue()
process = Process(target=filenamerunner, args=(queue, request))
process.start()
result = queue.get()
process.join()
return result
def argstr():
if len(sys.argv) > 1 and sys.argv[1].isnumeric():
return sys.argv[1]
else:
return str(80)
if __name__ == '__main__':
timeout = 60
queue = Queue()
process = Process(target=hasgpurunner, args=(queue, None))
process.start()
hasgpu = queue.get()
process.join()
threaded = False
if len(sys.argv) > 1 and (not hasgpu) and sys.argv[1] == 'multi':
threaded = True
print("Run threaded")
port = argstr()
print("Used port", port)
app.run(host='0.0.0.0', port=port, threaded=threaded)
| agpl-3.0 | 165,869,653,316,749,470 | 34.800971 | 237 | 0.588068 | false |
zielmicha/pyjvm | tests/pystone/pystone.py | 1 | 7376 | #! /usr/bin/python2.7
"""
"PYSTONE" Benchmark Program
Version: Python/1.1 (corresponds to C/1.1 plus 2 Pystone fixes)
Author: Reinhold P. Weicker, CACM Vol 27, No 10, 10/84 pg. 1013.
Translated from ADA to C by Rick Richardson.
Every method to preserve ADA-likeness has been used,
at the expense of C-ness.
Translated from C to Python by Guido van Rossum.
Version History:
Version 1.1 corrects two bugs in version 1.0:
First, it leaked memory: in Proc1(), NextRecord ends
up having a pointer to itself. I have corrected this
by zapping NextRecord.PtrComp at the end of Proc1().
Second, Proc3() used the operator != to compare a
record to None. This is rather inefficient and not
true to the intention of the original benchmark (where
a pointer comparison to None is intended; the !=
operator attempts to find a method __cmp__ to do value
comparison of the record). Version 1.1 runs 5-10
percent faster than version 1.0, so benchmark figures
of different versions can't be compared directly.
"""
LOOPS = 50000
from time import clock
__version__ = "1.1"
[Ident1, Ident2, Ident3, Ident4, Ident5] = range(1, 6)
class Record:
def __init__(self, PtrComp = None, Discr = 0, EnumComp = 0,
IntComp = 0, StringComp = 0):
self.PtrComp = PtrComp
self.Discr = Discr
self.EnumComp = EnumComp
self.IntComp = IntComp
self.StringComp = StringComp
def copy(self):
return Record(self.PtrComp, self.Discr, self.EnumComp,
self.IntComp, self.StringComp)
TRUE = 1
FALSE = 0
def main(loops=LOOPS):
benchtime, stones = pystones(loops)
#print "Pystone(%s) time for %d passes = %g" % \
# (__version__, loops, benchtime)
print "This machine benchmarks at " + str(stones) + " pystones/second"
def pystones(loops=LOOPS):
return Proc0(loops)
IntGlob = 0
BoolGlob = FALSE
Char1Glob = '\0'
Char2Glob = '\0'
Array1Glob = [0]*51
Array2Glob = map(lambda x: x[:], [Array1Glob]*51)
PtrGlb = None
PtrGlbNext = None
def Proc0(loops=LOOPS):
global IntGlob
global BoolGlob
global Char1Glob
global Char2Glob
global Array1Glob
global Array2Glob
global PtrGlb
global PtrGlbNext
starttime = clock()
for i in range(loops):
pass
nulltime = clock() - starttime
PtrGlbNext = Record()
PtrGlb = Record()
PtrGlb.PtrComp = PtrGlbNext
PtrGlb.Discr = Ident1
PtrGlb.EnumComp = Ident3
PtrGlb.IntComp = 40
PtrGlb.StringComp = "DHRYSTONE PROGRAM, SOME STRING"
String1Loc = "DHRYSTONE PROGRAM, 1'ST STRING"
Array2Glob[8][7] = 10
starttime = clock()
for i in range(loops):
Proc5()
Proc4()
IntLoc1 = 2
IntLoc2 = 3
String2Loc = "DHRYSTONE PROGRAM, 2'ND STRING"
EnumLoc = Ident2
BoolGlob = not Func2(String1Loc, String2Loc)
while IntLoc1 < IntLoc2:
IntLoc3 = 5 * IntLoc1 - IntLoc2
IntLoc3 = Proc7(IntLoc1, IntLoc2)
IntLoc1 = IntLoc1 + 1
Proc8(Array1Glob, Array2Glob, IntLoc1, IntLoc3)
PtrGlb = Proc1(PtrGlb)
CharIndex = 'A'
while CharIndex <= Char2Glob:
if EnumLoc == Func1(CharIndex, 'C'):
EnumLoc = Proc6(Ident1)
CharIndex = chr(ord(CharIndex)+1)
IntLoc3 = IntLoc2 * IntLoc1
IntLoc2 = IntLoc3 / IntLoc1
IntLoc2 = 7 * (IntLoc3 - IntLoc2) - IntLoc1
IntLoc1 = Proc2(IntLoc1)
benchtime = clock() - starttime - nulltime
if benchtime == 0.0:
loopsPerBenchtime = 0.0
else:
loopsPerBenchtime = (loops / benchtime)
return benchtime, loopsPerBenchtime
def Proc1(PtrParIn):
PtrParIn.PtrComp = NextRecord = PtrGlb.copy()
PtrParIn.IntComp = 5
NextRecord.IntComp = PtrParIn.IntComp
NextRecord.PtrComp = PtrParIn.PtrComp
NextRecord.PtrComp = Proc3(NextRecord.PtrComp)
if NextRecord.Discr == Ident1:
NextRecord.IntComp = 6
NextRecord.EnumComp = Proc6(PtrParIn.EnumComp)
NextRecord.PtrComp = PtrGlb.PtrComp
NextRecord.IntComp = Proc7(NextRecord.IntComp, 10)
else:
PtrParIn = NextRecord.copy()
NextRecord.PtrComp = None
return PtrParIn
def Proc2(IntParIO):
IntLoc = IntParIO + 10
while 1:
if Char1Glob == 'A':
IntLoc = IntLoc - 1
IntParIO = IntLoc - IntGlob
EnumLoc = Ident1
if EnumLoc == Ident1:
break
return IntParIO
def Proc3(PtrParOut):
global IntGlob
if PtrGlb is not None:
PtrParOut = PtrGlb.PtrComp
else:
IntGlob = 100
PtrGlb.IntComp = Proc7(10, IntGlob)
return PtrParOut
def Proc4():
global Char2Glob
BoolLoc = Char1Glob == 'A'
BoolLoc = BoolLoc or BoolGlob
Char2Glob = 'B'
def Proc5():
global Char1Glob
global BoolGlob
Char1Glob = 'A'
BoolGlob = FALSE
def Proc6(EnumParIn):
EnumParOut = EnumParIn
if not Func3(EnumParIn):
EnumParOut = Ident4
if EnumParIn == Ident1:
EnumParOut = Ident1
elif EnumParIn == Ident2:
if IntGlob > 100:
EnumParOut = Ident1
else:
EnumParOut = Ident4
elif EnumParIn == Ident3:
EnumParOut = Ident2
elif EnumParIn == Ident4:
pass
elif EnumParIn == Ident5:
EnumParOut = Ident3
return EnumParOut
def Proc7(IntParI1, IntParI2):
IntLoc = IntParI1 + 2
IntParOut = IntParI2 + IntLoc
return IntParOut
def Proc8(Array1Par, Array2Par, IntParI1, IntParI2):
global IntGlob
IntLoc = IntParI1 + 5
Array1Par[IntLoc] = IntParI2
Array1Par[IntLoc+1] = Array1Par[IntLoc]
Array1Par[IntLoc+30] = IntLoc
for IntIndex in range(IntLoc, IntLoc+2):
Array2Par[IntLoc][IntIndex] = IntLoc
Array2Par[IntLoc][IntLoc-1] = Array2Par[IntLoc][IntLoc-1] + 1
Array2Par[IntLoc+20][IntLoc] = Array1Par[IntLoc]
IntGlob = 5
def Func1(CharPar1, CharPar2):
CharLoc1 = CharPar1
CharLoc2 = CharLoc1
if CharLoc2 != CharPar2:
return Ident1
else:
return Ident2
def Func2(StrParI1, StrParI2):
IntLoc = 1
while IntLoc <= 1:
if Func1(StrParI1[IntLoc], StrParI2[IntLoc+1]) == Ident1:
CharLoc = 'A'
IntLoc = IntLoc + 1
if CharLoc >= 'W' and CharLoc <= 'Z':
IntLoc = 7
if CharLoc == 'X':
return TRUE
else:
if StrParI1 > StrParI2:
IntLoc = IntLoc + 7
return TRUE
else:
return FALSE
def Func3(EnumParIn):
EnumLoc = EnumParIn
if EnumLoc == Ident3: return TRUE
return FALSE
if __name__ == '__main__':
import sys
def error(msg):
print >>sys.stderr, msg,
print >>sys.stderr, "usage: %s [number_of_loops]" % sys.argv[0]
sys.exit(100)
nargs = len(sys.argv) - 1
if nargs > 1:
error("%d arguments are too many;" % nargs)
elif nargs == 1:
try: loops = int(sys.argv[1])
except ValueError:
error("Invalid argument %r;" % sys.argv[1])
else:
loops = LOOPS
main(loops)
| mit | 2,755,744,318,415,412,700 | 26.318519 | 74 | 0.599919 | false |
oblique-labs/pyVM | rpython/rlib/rstruct/test/test_runpack.py | 1 | 4012 | import pytest
from rpython.rtyper.test.tool import BaseRtypingTest
from rpython.rlib.rstruct.runpack import runpack
from rpython.rlib.rstruct import standardfmttable
from rpython.rlib.rstruct.error import StructError
from rpython.rlib.rarithmetic import LONG_BIT
import struct
class TestRStruct(BaseRtypingTest):
def test_unpack(self):
import sys
pad = '\x00' * (LONG_BIT//8-1) # 3 or 7 null bytes
def fn():
return runpack('sll', 'a'+pad+'\x03'+pad+'\x04'+pad)[1]
result = 3 if sys.byteorder == 'little' else 3 << (LONG_BIT-8)
assert fn() == result
assert self.interpret(fn, []) == result
def test_unpack_2(self):
data = struct.pack('iiii', 0, 1, 2, 4)
def fn():
a, b, c, d = runpack('iiii', data)
return a * 1000 + b * 100 + c * 10 + d
assert fn() == 124
assert self.interpret(fn, []) == 124
def test_unpack_error(self):
data = '123' # 'i' expects 4 bytes, not 3
def fn():
try:
runpack('i', data)
except StructError:
return True
else:
return False
assert fn()
assert self.interpret(fn, [])
def test_unpack_single(self):
data = struct.pack('i', 123)
def fn():
return runpack('i', data)
assert fn() == 123
assert self.interpret(fn, []) == 123
def test_unpack_big_endian(self):
def fn():
return runpack(">i", "\x01\x02\x03\x04")
assert fn() == 0x01020304
assert self.interpret(fn, []) == 0x01020304
def test_unpack_double_big_endian(self):
def fn():
return runpack(">d", "testtest")
assert fn() == struct.unpack(">d", "testtest")[0]
assert self.interpret(fn, []) == struct.unpack(">d", "testtest")[0]
def test_native_floats(self):
"""
Check the 'd' and 'f' format characters on native packing.
"""
d_data = struct.pack("df", 12.34, 12.34)
def fn():
d, f = runpack("@df", d_data)
return d, f
#
# direct test
d, f = fn()
assert d == 12.34 # no precision lost
assert f != 12.34 # precision lost
assert abs(f - 12.34) < 1E-6
#
# translated test
res = self.interpret(fn, [])
d = res.item0
f = res.item1 # convert from r_singlefloat
assert d == 12.34 # no precision lost
assert f != 12.34 # precision lost
assert abs(f - 12.34) < 1E-6
def test_unpack_standard_little(self):
def unpack(fmt, data):
def fn():
return runpack(fmt, data)
return self.interpret(fn, [])
#
assert unpack("<i", 'DCBA') == 0x41424344
assert unpack("<i", '\xfd\xff\xff\xff') == -3
assert unpack("<i", '\x00\x00\x00\x80') == -2147483648
assert unpack("<I", 'DCB\x81') == 0x81424344
assert unpack("<q", 'HGFEDCBA') == 0x4142434445464748
assert unpack("<q", 'HHIJKLM\xbe') == -0x41B2B3B4B5B6B7B8
assert unpack("<Q", 'HGFEDCB\x81') == 0x8142434445464748
def test_unpack_standard_big(self):
def unpack(fmt, data):
def fn():
return runpack(fmt, data)
return self.interpret(fn, [])
#
assert unpack(">i", 'ABCD') == 0x41424344
assert unpack(">i", '\xff\xff\xff\xfd') == -3
assert unpack(">i", '\x80\x00\x00\x00') == -2147483648
assert unpack(">I", '\x81BCD') == 0x81424344
assert unpack(">q", 'ABCDEFGH') == 0x4142434445464748
assert unpack(">q", '\xbeMLKJIHH') == -0x41B2B3B4B5B6B7B8
assert unpack(">Q", '\x81BCDEFGH') == 0x8142434445464748
class TestNoFastPath(TestRStruct):
def setup_method(self, meth):
standardfmttable.USE_FASTPATH = False
def teardown_method(self, meth):
standardfmttable.USE_FASTPATH = True
| mit | -8,812,446,480,289,066,000 | 33.586207 | 75 | 0.542622 | false |
Forage/Gramps | gramps/plugins/view/geoclose.py | 1 | 24015 | # -*- python -*-
# -*- coding: utf-8 -*-
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2011 Serge Noiraud
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
"""
Geography for two persons
"""
#-------------------------------------------------------------------------
#
# Python modules
#
#-------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.get_translation().gettext
import operator
from gi.repository import Gtk
from math import *
import cgi
#-------------------------------------------------------------------------
#
# set up logging
#
#-------------------------------------------------------------------------
import logging
_LOG = logging.getLogger("GeoGraphy.geoclose")
#-------------------------------------------------------------------------
#
# Gramps Modules
#
#-------------------------------------------------------------------------
from gramps.gen.lib import EventRoleType, EventType
from gramps.gen.config import config
from gramps.gen.datehandler import displayer, get_date
from gramps.gen.display.name import displayer as _nd
from gramps.gen.utils.place import conv_lat_lon
from gramps.gui.views.navigationview import NavigationView
from gramps.gui.views.bookmarks import PersonBookmarks
from gramps.plugins.lib.maps import constants
from gramps.plugins.lib.maps.geography import GeoGraphyView
from gramps.gui.selectors import SelectorFactory
from gramps.gen.utils.db import (get_birth_or_fallback, get_death_or_fallback)
#-------------------------------------------------------------------------
#
# Constants
#
#-------------------------------------------------------------------------
_UI_DEF = '''\
<ui>
<menubar name="MenuBar">
<menu action="GoMenu">
<placeholder name="CommonGo">
<menuitem action="Back"/>
<menuitem action="Forward"/>
<separator/>
<menuitem action="HomePerson"/>
<separator/>
</placeholder>
</menu>
<menu action="BookMenu">
<placeholder name="AddEditBook">
<menuitem action="AddBook"/>
<menuitem action="EditBook"/>
</placeholder>
</menu>
</menubar>
<toolbar name="ToolBar">
<placeholder name="CommonNavigation">
<toolitem action="Back"/>
<toolitem action="Forward"/>
<toolitem action="HomePerson"/>
<toolitem action="RefPerson"/>
</placeholder>
</toolbar>
</ui>
'''
#-------------------------------------------------------------------------
#
# GeoView
#
#-------------------------------------------------------------------------
class GeoClose(GeoGraphyView):
"""
The view used to render person map.
"""
CONFIGSETTINGS = (
('geography.path', constants.GEOGRAPHY_PATH),
('geography.zoom', 10),
('geography.zoom_when_center', 12),
('geography.show_cross', True),
('geography.lock', False),
('geography.center-lat', 0.0),
('geography.center-lon', 0.0),
('geography.map_service', constants.OPENSTREETMAP),
('geography.max_places', 5000),
# specific to geoclose :
('geography.color1', 'blue'),
('geography.color2', 'green'),
('geography.maximum_meeting_zone', 5),
)
def __init__(self, pdata, dbstate, uistate, nav_group=0):
GeoGraphyView.__init__(self, _("Have they been able to meet?"),
pdata, dbstate, uistate,
PersonBookmarks,
nav_group)
self.dbstate = dbstate
self.uistate = uistate
self.place_list = []
self.all_place_list = []
self.place_without_coordinates = []
self.minlat = self.maxlat = self.minlon = self.maxlon = 0.0
self.minyear = 9999
self.maxyear = 0
self.refperson = None
self.nbplaces = 0
self.nbmarkers = 0
self.sort = []
self.tracks = []
self.additional_uis.append(self.additional_ui())
self.ref_person = None
self.skip_list = []
self.track = []
self.place_list_active = []
self.place_list_ref = []
self.cal = config.get('preferences.calendar-format-report')
self.no_show_places_in_status_bar = False
def get_title(self):
"""
Used to set the titlebar in the configuration window.
"""
return _('GeoClose')
def get_stock(self):
"""
Returns the name of the stock icon to use for the display.
This assumes that this icon has already been registered
as a stock icon.
"""
return 'gramps-relation'
def get_viewtype_stock(self):
"""Type of view in category
"""
return 'geo-show-family'
def additional_ui(self):
"""
Specifies the UIManager XML code that defines the menus and buttons
associated with the interface.
"""
return _UI_DEF
def navigation_type(self):
"""
Indicates the navigation type. Navigation type can be the string
name of any of the primary objects.
"""
return 'Person'
def goto_handle(self, handle=None):
"""
Rebuild the tree with the given person handle as the root.
"""
self.place_list_active = []
self.place_list_ref = []
self.all_place_list = []
self.sort = []
self.places_found = []
self.place_without_coordinates = []
self.remove_all_gps()
self.remove_all_markers()
self.lifeway_layer.clear_ways()
self.message_layer.clear_messages()
active = self.get_active()
if active:
p1 = self.dbstate.db.get_person_from_handle(active)
color = self._config.get('geography.color2')
self._createmap(p1, color, self.place_list_active, False)
if self.refperson:
color = self._config.get('geography.color1')
self.message_layer.add_message(_("Reference : %(name)s ( %(birth)s - %(death)s )") % {
'name': _nd.display(self.refperson),
'birth': self.birth(self.refperson),
'death': self.death(self.refperson)})
if p1:
self.message_layer.add_message(_("The other : %(name)s ( %(birth)s - %(death)s )") % {
'name': _nd.display(p1),
'birth': self.birth(p1),
'death': self.death(p1)})
else:
self.message_layer.add_message(_("The other person is unknown"))
self._createmap(self.refperson, color, self.place_list_ref, True)
else:
self.message_layer.add_message(_("You must choose one reference person."))
self.message_layer.add_message(_("Go to the person view and select "
"the people you want to compare. "
"Return to this view and use the history."))
self.possible_meeting(self.place_list_ref, self.place_list_active)
self.uistate.modify_statusbar(self.dbstate)
def birth(self, person):
"""
return "" or the birth date of the person
"""
birth = get_birth_or_fallback(self.dbstate.db, person)
if birth and birth.get_type() != EventType.BIRTH:
sdate = get_date(birth)
if sdate:
bdate = "<i>%s</i>" % cgi.escape(sdate)
else:
bdate = ""
elif birth:
bdate = cgi.escape(get_date(birth))
else:
bdate = ""
return bdate
def death(self, person):
"""
return "" or the death date of the person
"""
death = get_death_or_fallback(self.dbstate.db, person)
if death and death.get_type() != EventType.DEATH:
sdate = get_date(death)
if sdate:
ddate = "<i>%s</i>" % cgi.escape(sdate)
else:
ddate = ""
elif death:
ddate = cgi.escape(get_date(death))
else:
ddate = ""
return ddate
def define_actions(self):
"""
Define action for the reference person button.
"""
NavigationView.define_actions(self)
self.ref_person = Gtk.ActionGroup(self.title + '/Selection')
self.ref_person.add_actions([
('RefPerson', 'gramps-person', _('reference _Person'), None ,
_("Select the person which is the reference for life ways"),
self.selectPerson),
])
self._add_action_group(self.ref_person)
def selectPerson(self, obj):
"""
Open a selection box to choose the ref person.
"""
self.track = []
self.skip_list = []
SelectPerson = SelectorFactory('Person')
sel = SelectPerson(self.dbstate, self.uistate, self.track,
_("Select the person which will be our reference."),
skip=self.skip_list)
self.refperson = sel.run()
self.goto_handle(None)
def build_tree(self):
"""
This is called by the parent class when the view becomes visible. Since
all handling of visibility is now in rebuild_trees, see that for more
information.
"""
active = self.get_active()
person = self.dbstate.db.get_person_from_handle(active)
self.lifeway_layer.clear_ways()
if person is None:
self.goto_handle(None)
else:
self.goto_handle(handle=person)
def draw(self, menu, marks, color, reference):
"""
Create all moves for the people's event.
"""
points = []
mark = None
for mark in marks:
startlat = float(mark[3])
startlon = float(mark[4])
not_stored = True
for idx in range(0, len(points)):
if points[idx][0] == startlat and points[idx][1] == startlon:
not_stored = False
if not_stored:
points.append((startlat, startlon))
self.lifeway_layer.add_way(points, color)
if reference:
self.lifeway_layer.add_way_ref(points, 'orange',
float(self._config.get("geography.maximum_meeting_zone")) / 10)
return False
def possible_meeting(self, place_list_ref, place_list_active):
"""
Try to see if two persons can be to the same place during their life.
If yes, show a marker with the dates foe each person.
"""
radius = float(self._config.get("geography.maximum_meeting_zone")/10.0)
for ref in place_list_ref:
for act in place_list_active:
if (hypot(float(act[3])-float(ref[3]),
float(act[4])-float(ref[4])) <= radius) == True:
# we are in the meeting zone
self.add_marker(None, None, act[3], act[4], act[7], True, 1)
self.all_place_list.append(act)
self.add_marker(None, None, ref[3], ref[4], ref[7], True, 1)
self.all_place_list.append(ref)
def _createmap(self, person, color, place_list, reference):
"""
Create all markers for each people's event in the database which has
a lat/lon.
"""
dbstate = self.dbstate
self.cal = config.get('preferences.calendar-format-report')
self.place_list = place_list
self.place_without_coordinates = []
self.minlat = self.maxlat = self.minlon = self.maxlon = 0.0
self.minyear = 9999
self.maxyear = 0
latitude = ""
longitude = ""
if person is not None:
# For each event, if we have a place, set a marker.
for event_ref in person.get_event_ref_list():
if not event_ref:
continue
event = dbstate.db.get_event_from_handle(event_ref.ref)
role = event_ref.get_role()
try:
date = event.get_date_object().to_calendar(self.cal)
except:
continue
eyear = str("%04d" % date.get_year()) + \
str("%02d" % date.get_month()) + \
str("%02d" % date.get_day())
place_handle = event.get_place_handle()
if place_handle:
place = dbstate.db.get_place_from_handle(place_handle)
if place:
longitude = place.get_longitude()
latitude = place.get_latitude()
latitude, longitude = conv_lat_lon(latitude,
longitude, "D.D8")
descr = place.get_title()
evt = EventType(event.get_type())
descr1 = _("%(eventtype)s : %(name)s") % {
'eventtype': evt,
'name': _nd.display(person)}
# place.get_longitude and place.get_latitude return
# one string. We have coordinates when the two values
# contains non null string.
if ( longitude and latitude ):
self._append_to_places_list(descr, evt,
_nd.display(person),
latitude, longitude,
descr1, eyear,
event.get_type(),
person.gramps_id,
place.gramps_id,
event.gramps_id,
role
)
else:
self._append_to_places_without_coord(
place.gramps_id, descr)
family_list = person.get_family_handle_list()
descr1 = " - "
for family_hdl in family_list:
family = self.dbstate.db.get_family_from_handle(family_hdl)
if family is not None:
fhandle = family_list[0] # first is primary
fam = dbstate.db.get_family_from_handle(fhandle)
handle = fam.get_father_handle()
father = dbstate.db.get_person_from_handle(handle)
if father:
descr1 = "%s - " % _nd.display(father)
handle = fam.get_mother_handle()
mother = dbstate.db.get_person_from_handle(handle)
if mother:
descr1 = "%s%s" % ( descr1, _nd.display(mother))
for event_ref in family.get_event_ref_list():
if event_ref:
event = dbstate.db.get_event_from_handle(
event_ref.ref)
role = event_ref.get_role()
if event.get_place_handle():
place_handle = event.get_place_handle()
if place_handle:
place = dbstate.db.get_place_from_handle(
place_handle)
if place:
longitude = place.get_longitude()
latitude = place.get_latitude()
latitude, longitude = conv_lat_lon(
latitude, longitude, "D.D8")
descr = place.get_title()
evt = EventType(
event.get_type())
eyear = str("%04d" % event.get_date_object().to_calendar(self.cal).get_year()) + \
str("%02d" % event.get_date_object().to_calendar(self.cal).get_month()) + \
str("%02d" % event.get_date_object().to_calendar(self.cal).get_day())
if ( longitude and latitude ):
self._append_to_places_list(descr,
evt, _nd.display(person),
latitude, longitude,
descr1, eyear,
event.get_type(),
person.gramps_id,
place.gramps_id,
event.gramps_id,
role
)
else:
self._append_to_places_without_coord( place.gramps_id, descr)
sort1 = sorted(self.place_list, key=operator.itemgetter(6))
self.draw(None, sort1, color, reference)
# merge with the last results
merge_list = []
for the_list in self.sort, sort1 : merge_list += the_list
self.sort = sorted(merge_list, key=operator.itemgetter(6))
def bubble_message(self, event, lat, lon, marks):
"""
Create the menu for the selected marker
"""
menu = Gtk.Menu()
menu.set_title("person")
events = []
message = ""
oldplace = ""
prevmark = None
for mark in marks:
for plce in self.all_place_list:
if (plce[3] == mark[3] and plce[4] == mark[4]):
if plce[10] in events:
continue
else:
events.append(plce[10])
if plce[0] != oldplace:
message = "%s :" % plce[0]
self.add_place_bubble_message(event, lat, lon,
marks, menu,
message, plce)
oldplace = plce[0]
message = ""
evt = self.dbstate.db.get_event_from_gramps_id(plce[10])
# format the date as described in preferences.
date = displayer.display(evt.get_date_object())
if date == "":
date = _("Unknown")
if ( plce[11] == EventRoleType.PRIMARY ):
message = "(%s) %s : %s" % ( date, plce[2], plce[1] )
elif ( plce[11] == EventRoleType.FAMILY ):
(father_name, mother_name) = self._get_father_and_mother_name(evt)
message = "(%s) %s : %s - %s" % (date, plce[7],
father_name,
mother_name )
else:
descr = evt.get_description()
if descr == "":
descr = _('No description')
message = "(%s) %s => %s" % ( date, plce[11], descr)
prevmark = plce
add_item = Gtk.MenuItem(label=message)
add_item.show()
menu.append(add_item)
itemoption = Gtk.Menu()
itemoption.set_title(message)
itemoption.show()
add_item.set_submenu(itemoption)
modify = Gtk.MenuItem(label=_("Edit Event"))
modify.show()
modify.connect("activate", self.edit_event,
event, lat, lon, prevmark)
itemoption.append(modify)
center = Gtk.MenuItem(label=_("Center on this place"))
center.show()
center.connect("activate", self.center_here,
event, lat, lon, prevmark)
itemoption.append(center)
menu.show()
menu.popup(None, None,
lambda menu, data: (event.get_root_coords()[0],
event.get_root_coords()[1], True),
None, event.button, event.time)
return 0
def add_specific_menu(self, menu, event, lat, lon):
"""
Add specific entry to the navigation menu.
"""
add_item = Gtk.MenuItem()
add_item.show()
menu.append(add_item)
add_item = Gtk.MenuItem(label=_("Choose the reference person"))
add_item.connect("activate", self.selectPerson)
add_item.show()
menu.append(add_item)
return
def get_default_gramplets(self):
"""
Define the default gramplets for the sidebar and bottombar.
"""
return (("Person Filter",),
())
def specific_options(self, configdialog):
"""
Add specific entry to the preference menu.
Must be done in the associated view.
"""
table = Gtk.Table(2, 2)
table.set_border_width(12)
table.set_col_spacings(6)
table.set_row_spacings(6)
configdialog.add_text(table,
_('The meeting zone probability radius.\n'
'The colored zone is approximative.\n'
'The meeting zone is only shown for the reference person.\n'
'The value 9 means about 42 miles or 67 kms.\n'
'The value 1 means about 4.6 miles or 7.5 kms.\n'
'The value is in tenth of degree.'),
1, line_wrap=False)
self.config_meeting_slider = configdialog.add_slider(table,
"",
2, 'geography.maximum_meeting_zone',
(1, 9))
return _('The selection parameters'), table
def config_connect(self):
"""
used to monitor changes in the ini file
"""
self._config.connect('geography.maximum_meeting_zone',
self.cb_update_meeting_radius)
def cb_update_meeting_radius(self, client, cnxn_id, entry, data):
"""
Called when the radius change
"""
self.goto_handle(handle=None)
| gpl-2.0 | 8,128,811,068,062,690,000 | 40.333907 | 125 | 0.466417 | false |
jwlin/web-crawler-tutorial | ch4/ptt_gossiping_ip.py | 1 | 4276 | import requests
import time
import json
import re
from bs4 import BeautifulSoup
API_KEY = 'YOUR_KEY' # 須先到 ipstack.com 網頁右上角 GET FREE API KEY 註冊帳號取得 API KEY
PTT_URL = 'https://www.ptt.cc'
def get_web_page(url):
resp = requests.get(
url=url,
cookies={'over18': '1'}
)
if resp.status_code != 200:
print('Invalid url:', resp.url)
return None
else:
return resp.text
def get_articles(dom, date):
soup = BeautifulSoup(dom, 'html5lib')
# 取得上一頁的連結
paging_div = soup.find('div', 'btn-group btn-group-paging')
prev_url = paging_div.find_all('a')[1]['href']
articles = [] # 儲存取得的文章資料
divs = soup.find_all('div', 'r-ent')
for d in divs:
if d.find('div', 'date').text.strip() == date: # 發文日期正確
# 取得推文數
push_count = 0
push_str = d.find('div', 'nrec').text
if push_str:
try:
push_count = int(push_str) # 轉換字串為數字
except ValueError:
# 若轉換失敗,可能是'爆'或 'X1', 'X2', ...
# 若不是, 不做任何事,push_count 保持為 0
if push_str == '爆':
push_count = 99
elif push_str.startswith('X'):
push_count = -10
# 取得文章連結及標題
if d.find('a'): # 有超連結,表示文章存在,未被刪除
href = d.find('a')['href']
title = d.find('a').text
author = d.find('div', 'author').text if d.find('div', 'author') else ''
articles.append({
'title': title,
'href': href,
'push_count': push_count,
'author': author
})
return articles, prev_url
def get_ip(dom):
# e.g., ※ 發信站: 批踢踢實業坊(ptt.cc), 來自: 27.52.6.175
pattern = '來自: \d+\.\d+\.\d+\.\d+'
match = re.search(pattern, dom)
if match:
return match.group(0).replace('來自: ', '')
else:
return None
def get_country(ip):
if ip:
data = json.loads(requests.get('http://freegeoip.net/json/' + ip).text)
country_name = data['country_name'] if data['country_name'] else None
return country_name
return None
def get_country_ipstack(ip):
if ip:
url = 'http://api.ipstack.com/{}?access_key={}'.format(ip, API_KEY)
data = requests.get(url).json()
country_name = data['country_name'] if data['country_name'] else None
return country_name
return None
if __name__ == '__main__':
print('取得今日文章列表...')
current_page = get_web_page(PTT_URL + '/bbs/Gossiping/index.html')
if current_page:
articles = [] # 全部的今日文章
today = time.strftime('%m/%d').lstrip('0') # 今天日期, 去掉開頭的 '0' 以符合 PTT 網站格式
current_articles, prev_url = get_articles(current_page, today) # 目前頁面的今日文章
while current_articles: # 若目前頁面有今日文章則加入 articles,並回到上一頁繼續尋找是否有今日文章
articles += current_articles
current_page = get_web_page(PTT_URL + prev_url)
current_articles, prev_url = get_articles(current_page, today)
print('共 %d 篇文章' % (len(articles)))
# 已取得文章列表,開始進入各文章尋找發文者 IP
print('取得前 50 篇文章 IP')
country_to_count = dict()
for article in articles[:50]:
print('查詢 IP:', article['title'])
page = get_web_page(PTT_URL + article['href'])
if page:
ip = get_ip(page)
country = get_country_ipstack(ip)
if country in country_to_count.keys():
country_to_count[country] += 1
else:
country_to_count[country] = 1
# 印出各國 IP 次數資訊
print('各國 IP 分布')
for k, v in country_to_count.items():
print(k, v) | mit | 6,299,184,830,573,667,000 | 30.825 | 88 | 0.515191 | false |
hakancelik96/coogger | core/threaded_comment/serializers.py | 1 | 1282 | from rest_framework import serializers
from .models import ThreadedComments
class ReplySerializer(serializers.ModelSerializer):
username = serializers.ReadOnlyField(source="user.username")
title = serializers.ReadOnlyField(source="user.userprofile.title")
avatar_url = serializers.ReadOnlyField(source="user.githubauthuser.avatar_url")
parent_user = serializers.ReadOnlyField(source="get_parent.username")
parent_id = serializers.ReadOnlyField(source="get_parent.id")
reply_count = serializers.ReadOnlyField()
permlink = serializers.ReadOnlyField()
image_address = serializers.ReadOnlyField()
class Meta:
model = ThreadedComments
fields = [
"id",
"title",
"app_label",
"updated",
"model_name",
"content_type",
"object_id",
"username",
"reply",
"avatar_url",
"body",
"image_address",
"permlink",
"reply_count",
"depth",
"get_absolute_url",
"views",
"upvote_count",
"downvote_count",
"parent_permlink",
"parent_user",
"parent_id",
"created",
]
| mit | -7,206,508,981,116,535,000 | 29.52381 | 83 | 0.561622 | false |
palfrey/coherence | misc/Rhythmbox-Plugin/upnp_coherence/__init__.py | 1 | 6137 | # -*- Mode: python; coding: utf-8; tab-width: 8; indent-tabs-mode: t; -*-
#
# Copyright 2008, Frank Scholz <[email protected]>
# Copyright 2008, James Livingston <[email protected]>
#
# Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
import rhythmdb, rb
import gobject, gtk
import coherence.extern.louie as louie
from coherence import log
UPNP_VERSION = 1 # should be actually 2, but due to some %$*! UPnP clients
# out there we have to set that here manually
# For the icon
import os.path, urllib, gnomevfs, gtk.gdk
class CoherencePlugin(rb.Plugin,log.Loggable):
logCategory = 'rb_coherence_plugin'
def __init__(self):
rb.Plugin.__init__(self)
self.coherence = None
def activate(self, shell):
from twisted.internet import gtk2reactor
try:
gtk2reactor.install()
except AssertionError, e:
# sometimes it's already installed
print e
self.coherence = self.get_coherence()
if self.coherence is None:
print "Coherence is not installed or too old, aborting"
return
print "coherence UPnP plugin activated"
self.shell = shell
self.sources = {}
# Set up our icon
the_icon = None
face_path = os.path.join(os.path.expanduser('~'), ".face")
if os.path.exists(face_path):
url = "file://" + urllib.pathname2url(face_path)
mimetype = gnomevfs.get_mime_type(url)
pixbuf = gtk.gdk.pixbuf_new_from_file(face_path)
width = "%s" % pixbuf.get_width()
height = "%s" % pixbuf.get_height()
depth = '24'
the_icon = {
'url':url,
'mimetype':mimetype,
'width':width,
'height':height,
'depth':depth
}
else:
the_icon = None
# create our own media server
from coherence.upnp.devices.media_server import MediaServer
from MediaStore import MediaStore
if the_icon:
server = MediaServer(self.coherence, MediaStore, version=UPNP_VERSION, no_thread_needed=True, db=self.shell.props.db, plugin=self, icon=the_icon)
else:
server = MediaServer(self.coherence, MediaStore, version=UPNP_VERSION, no_thread_needed=True, db=self.shell.props.db, plugin=self)
self.uuid = str(server.uuid)
if self.coherence_version >= (0,5,2):
# create our own media renderer
# but only if we have a matching Coherence package installed
from coherence.upnp.devices.media_renderer import MediaRenderer
from MediaPlayer import RhythmboxPlayer
if the_icon:
MediaRenderer(self.coherence, RhythmboxPlayer, version=UPNP_VERSION, no_thread_needed=True, shell=self.shell, icon=the_icon)
else:
MediaRenderer(self.coherence, RhythmboxPlayer, version=UPNP_VERSION, no_thread_needed=True, shell=self.shell)
# watch for media servers
louie.connect(self.detected_media_server,
'Coherence.UPnP.ControlPoint.MediaServer.detected',
louie.Any)
louie.connect(self.removed_media_server,
'Coherence.UPnP.ControlPoint.MediaServer.removed',
louie.Any)
def deactivate(self, shell):
print "coherence UPnP plugin deactivated"
if self.coherence is None:
return
self.coherence.shutdown()
louie.disconnect(self.detected_media_server,
'Coherence.UPnP.ControlPoint.MediaServer.detected',
louie.Any)
louie.disconnect(self.removed_media_server,
'Coherence.UPnP.ControlPoint.MediaServer.removed',
louie.Any)
del self.shell
del self.coherence
for usn, source in self.sources.iteritems():
source.delete_thyself()
del self.sources
# uninstall twisted reactor? probably not, since other things may have used it
def get_coherence (self):
coherence_instance = None
required_version = (0, 5, 7)
try:
from coherence.base import Coherence
from coherence import __version_info__
except ImportError, e:
print "Coherence not found"
return None
if __version_info__ < required_version:
required = '.'.join([str(i) for i in required_version])
found = '.'.join([str(i) for i in __version_info__])
print "Coherence %s required. %s found. Please upgrade" % (required, found)
return None
self.coherence_version = __version_info__
coherence_config = {
#'logmode': 'info',
'controlpoint': 'yes',
'plugins': {},
}
coherence_instance = Coherence(coherence_config)
return coherence_instance
def removed_media_server(self, udn):
print "upnp server went away %s" % udn
if self.sources.has_key(udn):
self.sources[udn].delete_thyself()
del self.sources[udn]
def detected_media_server(self, client, udn):
print "found upnp server %s (%s)" % (client.device.get_friendly_name(), udn)
self.warning("found upnp server %s (%s)" % (client.device.get_friendly_name(), udn))
if client.device.get_id() == self.uuid:
""" don't react on our own MediaServer"""
return
db = self.shell.props.db
group = rb.rb_source_group_get_by_name ("shared")
entry_type = db.entry_register_type("CoherenceUpnp:" + client.device.get_id()[5:])
from UpnpSource import UpnpSource
source = gobject.new (UpnpSource,
shell=self.shell,
entry_type=entry_type,
source_group=group,
plugin=self,
client=client,
udn=udn)
self.sources[udn] = source
self.shell.append_source (source, None)
| mit | 8,870,718,566,587,604,000 | 33.672316 | 157 | 0.588398 | false |
AddonScriptorDE/plugin.audio.vorleser_net | default.py | 1 | 5734 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import urllib,urllib2,re,xbmcplugin,xbmcgui,sys,xbmcaddon,base64
pluginhandle = int(sys.argv[1])
addon = xbmcaddon.Addon(id='plugin.audio.vorleser_net')
translation = addon.getLocalizedString
def index():
addDir(translation(30001),"","listAllAuthors","")
addDir("Krimi & Spannung","Krimi_kUND_Spannung","listCatBooks","")
addDir("Kinder & Jugendliche","Kinder_kUND_Jugendliche","listCatBooks","")
addDir("Romane & Erzählungen","Romane_kUND_Erzaehlungen","listCatBooks","")
addDir("Philosophie & Religion","Philosophie_kUND_Religion","listCatBooks","")
addDir("Hörspiele & Bühne","Hoerspiel_kUND_Buehne","listCatBooks","")
addDir("Lyrik & Musik","Lyrik_kUND_Poesie","listCatBooks","")
addDir("Sachtexte & Essays","Sachtexte_kUND_Essays","listCatBooks","")
xbmcplugin.endOfDirectory(pluginhandle)
def listAllAuthors():
content = getUrl("http://www.vorleser.net/alle_autoren.php")
match=re.compile('<a href="autor.php\\?id=(.+?)" name="(.+?)" class="rel pointer" onclick="setDetail\\(this\\)">(.+?)<br></a>', re.DOTALL).findall(content)
for id, temp, author in match:
addDir(cleanTitle(author),id,'listBooks',"")
xbmcplugin.endOfDirectory(pluginhandle)
def listCatBooks(id):
content = getUrl("http://www.vorleser.net/hoerbuch.php?kat="+id)
spl=content.split('<div class="box news')
for i in range(1,len(spl),1):
entry=spl[i]
match=re.compile('<div class="autor">(.+?)</div>', re.DOTALL).findall(entry)
author=match[0]
match=re.compile('<div class="h2 orange fieldH2">(.+?)</div>', re.DOTALL).findall(entry)
title=author+": "+match[0]
title=cleanTitle(title)
match=re.compile('<a href="hoerbuch.php\\?id=(.+?)"', re.DOTALL).findall(entry)
id=match[0]
match=re.compile('background-image:url\\((.+?)\\)', re.DOTALL).findall(entry)
thumb=""
if len(match)>0:
thumb="http://www.vorleser.net/"+match[0]
addLink(title,id,'playAudio',thumb)
xbmcplugin.endOfDirectory(pluginhandle)
def listBooks(id):
content = getUrl("http://www.vorleser.net/autor.php?id="+id)
spl=content.split('<div class="box news"')
for i in range(1,len(spl),1):
entry=spl[i]
match=re.compile('<div class="h2 orange" style="(.+?)">(.+?)</div>', re.DOTALL).findall(entry)
title=match[0][1]
title=cleanTitle(title)
match=re.compile('<a href="hoerbuch.php\\?id=(.+?)"', re.DOTALL).findall(entry)
id=match[0]
match=re.compile('background-image:url\\((.+?)\\)', re.DOTALL).findall(entry)
thumb=""
if len(match)>0:
thumb="http://www.vorleser.net/"+match[0]
addLink(title,id,'playAudio',thumb)
xbmcplugin.endOfDirectory(pluginhandle)
def playAudio(id):
listitem = xbmcgui.ListItem(path="http://www.vorleser.net/audio/"+id+".mp3")
return xbmcplugin.setResolvedUrl(pluginhandle, True, listitem)
def cleanTitle(title):
title=title.replace("<","<").replace(">",">").replace("&","&").replace("'","\\").replace("ć","c").replace("č","c").replace("đ","d").replace("“","\"").replace("”","\"").replace(""","\"").replace("ß","ß").replace("–","-")
title=title.replace("Ä","Ä").replace("Ü","Ü").replace("Ö","Ö").replace("ä","ä").replace("ü","ü").replace("ö","ö")
title=title.replace('<SPAN STYLE="font-family: Arial,Helvetica,Geneva,Sans-serif;">',"").replace("</SPAN>","")
title=title.strip()
return title
def getUrl(url):
req = urllib2.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; rv:11.0) Gecko/20100101 Firefox/11.0')
response = urllib2.urlopen(req,timeout=30)
link=response.read()
response.close()
return link
def parameters_string_to_dict(parameters):
''' Convert parameters encoded in a URL to a dict. '''
paramDict = {}
if parameters:
paramPairs = parameters[1:].split("&")
for paramsPair in paramPairs:
paramSplits = paramsPair.split('=')
if (len(paramSplits)) == 2:
paramDict[paramSplits[0]] = paramSplits[1]
return paramDict
def addLink(name,url,mode,iconimage):
u=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)
ok=True
liz=xbmcgui.ListItem(name, iconImage="DefaultVideo.png", thumbnailImage=iconimage)
liz.setInfo( type="Video", infoLabels={ "Title": name } )
liz.setProperty('IsPlayable', 'true')
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz)
return ok
def addDir(name,url,mode,iconimage):
u=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)
ok=True
liz=xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=iconimage)
liz.setInfo( type="Video", infoLabels={ "Title": name } )
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=True)
return ok
params=parameters_string_to_dict(sys.argv[2])
mode=params.get('mode')
url=params.get('url')
if type(url)==type(str()):
url=urllib.unquote_plus(url)
if mode == 'listBooks':
listBooks(url)
elif mode == 'listCatBooks':
listCatBooks(url)
elif mode == 'listAllAuthors':
listAllAuthors()
elif mode == 'playAudio':
playAudio(url)
else:
index()
| gpl-2.0 | -4,045,338,597,446,359,000 | 44.792 | 286 | 0.604298 | false |
ihydrogen/hydrogen-chat-bot-py | utils/print_hook.py | 1 | 2278 | import sys
# this class gets all output directed to stdout(e.g by print statements)
# and stderr and redirects it to a user defined function
class PrintHook:
# out = 1 means stdout will be hooked
# out = 0 means stderr will be hooked
def __init__(self, out=1):
self.func = None ##self.func is userdefined function
self.origOut = None
self.out = out
# user defined hook must return three variables
# proceed,lineNoMode,newText
def TestHook(self, text):
f = open('hook_log.txt', 'a')
f.write(text)
f.close()
return 0, 0, text
def Start(self, func=None):
if self.out:
sys.stdout = self
self.origOut = sys.__stdout__
else:
sys.stderr = self
self.origOut = sys.__stderr__
if func:
self.func = func
else:
self.func = self.TestHook
def flush(self):
self.origOut.flush()
pass
# Stop will stop routing of print statements thru this class
def Stop(self):
if self.out:
sys.stdout = sys.__stdout__
else:
sys.stderr = sys.__stderr__
self.func = None
# override write of stdout
def write(self, text):
global postNewText, newText
proceed = 1
lineNo = 0
addText = ''
if self.func != None:
proceed, lineNo, newText, postNewText = self.func(text)
if proceed:
if text.split() == []:
self.origOut.write(text)
else:
# if goint to stdout then only add line no file etc
# for stderr it is already there
if self.out:
if lineNo:
try:
raise Exception("Err print hook")
except:
if newText is not None:
self.origOut.write(newText)
if postNewText is not None:
self.origOut.write(postNewText)
# # pass all other methods to __stdout__ so that we don't have to override them
# def __getattr__(self, name):
# return self.origOut.__getattr__(name)
| apache-2.0 | 1,491,615,391,979,086,000 | 29.373333 | 83 | 0.514925 | false |
artur-shaik/qutebrowser | qutebrowser/config/config.py | 1 | 32194 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2015 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Configuration storage and config-related utilities.
This borrows a lot of ideas from configparser, but also has some things that
are fundamentally different. This is why nothing inherits from configparser,
but we borrow some methods and classes from there where it makes sense.
"""
import os
import sys
import os.path
import functools
import configparser
import contextlib
import collections
import collections.abc
from PyQt5.QtCore import pyqtSignal, pyqtSlot, QObject, QUrl, QSettings
from qutebrowser.config import configdata, configexc, textwrapper
from qutebrowser.config.parsers import ini, keyconf
from qutebrowser.commands import cmdexc, cmdutils
from qutebrowser.utils import (message, objreg, utils, standarddir, log,
qtutils, error, usertypes)
from qutebrowser.utils.usertypes import Completion
UNSET = object()
class change_filter: # pylint: disable=invalid-name
"""Decorator to filter calls based on a config section/option matching.
This could also be a function, but as a class (with a "wrong" name) it's
much cleaner to implement.
Attributes:
_sectname: The section to be filtered.
_optname: The option to be filtered.
_function: Whether a function rather than a method is decorated.
"""
def __init__(self, sectname, optname=None, function=False):
"""Save decorator arguments.
Gets called on parse-time with the decorator arguments.
Args:
sectname: The section to be filtered.
optname: The option to be filtered.
function: Whether a function rather than a method is decorated.
"""
if sectname not in configdata.DATA:
raise configexc.NoSectionError(sectname)
if optname is not None and optname not in configdata.DATA[sectname]:
raise configexc.NoOptionError(optname, sectname)
self._sectname = sectname
self._optname = optname
self._function = function
def __call__(self, func):
"""Filter calls to the decorated function.
Gets called when a function should be decorated.
Adds a filter which returns if we're not interested in the change-event
and calls the wrapped function if we are.
We assume the function passed doesn't take any parameters.
Args:
func: The function to be decorated.
Return:
The decorated function.
"""
if self._function:
@pyqtSlot(str, str)
@functools.wraps(func)
def wrapper(sectname=None, optname=None):
# pylint: disable=missing-docstring
if sectname is None and optname is None:
# Called directly, not from a config change event.
return func()
elif sectname != self._sectname:
return
elif self._optname is not None and optname != self._optname:
return
else:
return func()
else:
@pyqtSlot(str, str)
@functools.wraps(func)
def wrapper(wrapper_self, sectname=None, optname=None):
# pylint: disable=missing-docstring
if sectname is None and optname is None:
# Called directly, not from a config change event.
return func(wrapper_self)
elif sectname != self._sectname:
return
elif self._optname is not None and optname != self._optname:
return
else:
return func(wrapper_self)
return wrapper
def get(*args, **kwargs):
"""Convenience method to call get(...) of the config instance."""
return objreg.get('config').get(*args, **kwargs)
def section(sect):
"""Get a config section from the global config."""
return objreg.get('config')[sect]
def _init_main_config(parent=None):
"""Initialize the main config.
Args:
parent: The parent to pass to ConfigManager.
"""
args = objreg.get('args')
try:
config_obj = ConfigManager(standarddir.config(), 'qutebrowser.conf',
args.relaxed_config, parent=parent)
except (configexc.Error, configparser.Error, UnicodeDecodeError) as e:
log.init.exception(e)
errstr = "Error while reading config:"
try:
errstr += "\n\n{} -> {}:".format(
e.section, e.option) # pylint: disable=no-member
except AttributeError:
pass
errstr += "\n"
error.handle_fatal_exc(e, args, "Error while reading config!",
pre_text=errstr)
# We didn't really initialize much so far, so we just quit hard.
sys.exit(usertypes.Exit.err_config)
else:
objreg.register('config', config_obj)
if standarddir.config() is not None:
filename = os.path.join(standarddir.config(), 'qutebrowser.conf')
save_manager = objreg.get('save-manager')
save_manager.add_saveable(
'config', config_obj.save, config_obj.changed,
config_opt=('general', 'auto-save-config'), filename=filename)
for sect in config_obj.sections.values():
for opt in sect.values.values():
if opt.values['conf'] is None:
# Option added to built-in defaults but not in user's
# config yet
save_manager.save('config', explicit=True, force=True)
return
def _init_key_config(parent):
"""Initialize the key config.
Args:
parent: The parent to use for the KeyConfigParser.
"""
args = objreg.get('args')
try:
key_config = keyconf.KeyConfigParser(standarddir.config(), 'keys.conf',
args.relaxed_config,
parent=parent)
except (keyconf.KeyConfigError, UnicodeDecodeError) as e:
log.init.exception(e)
errstr = "Error while reading key config:\n"
if e.lineno is not None:
errstr += "In line {}: ".format(e.lineno)
error.handle_fatal_exc(e, args, "Error while reading key config!",
pre_text=errstr)
# We didn't really initialize much so far, so we just quit hard.
sys.exit(usertypes.Exit.err_key_config)
else:
objreg.register('key-config', key_config)
if standarddir.config() is not None:
save_manager = objreg.get('save-manager')
filename = os.path.join(standarddir.config(), 'keys.conf')
save_manager.add_saveable(
'key-config', key_config.save, key_config.config_dirty,
config_opt=('general', 'auto-save-config'), filename=filename,
dirty=key_config.is_dirty)
def _init_misc():
"""Initialize misc. config-related files."""
save_manager = objreg.get('save-manager')
state_config = ini.ReadWriteConfigParser(standarddir.data(), 'state')
for sect in ('general', 'geometry'):
try:
state_config.add_section(sect)
except configparser.DuplicateSectionError:
pass
# See commit a98060e020a4ba83b663813a4b9404edb47f28ad.
state_config['general'].pop('fooled', None)
objreg.register('state-config', state_config)
save_manager.add_saveable('state-config', state_config.save)
# We need to import this here because lineparser needs config.
from qutebrowser.misc import lineparser
command_history = lineparser.LimitLineParser(
standarddir.data(), 'cmd-history',
limit=('completion', 'cmd-history-max-items'),
parent=objreg.get('config'))
objreg.register('command-history', command_history)
save_manager.add_saveable('command-history', command_history.save,
command_history.changed)
# Set the QSettings path to something like
# ~/.config/qutebrowser/qsettings/qutebrowser/qutebrowser.conf so it
# doesn't overwrite our config.
#
# This fixes one of the corruption issues here:
# https://github.com/The-Compiler/qutebrowser/issues/515
if standarddir.config() is None:
path = os.devnull
else:
path = os.path.join(standarddir.config(), 'qsettings')
for fmt in (QSettings.NativeFormat, QSettings.IniFormat):
QSettings.setPath(fmt, QSettings.UserScope, path)
def init(parent=None):
"""Initialize the config.
Args:
parent: The parent to pass to QObjects which get initialized.
"""
_init_main_config(parent)
_init_key_config(parent)
_init_misc()
def _get_value_transformer(mapping):
"""Get a function which transforms a value for CHANGED_OPTIONS.
Args:
mapping: A dictionary mapping old values to new values. Value is not
transformed if the supplied value doesn't match the old value.
Return:
A function which takes a value and transforms it.
"""
def transformer(val):
try:
return mapping[val]
except KeyError:
return val
return transformer
def _transform_position(val):
"""Transformer for position values."""
mapping = {
'north': 'top',
'south': 'bottom',
'west': 'left',
'east': 'right',
}
try:
return mapping[val]
except KeyError:
return val
class ConfigManager(QObject):
"""Configuration manager for qutebrowser.
Class attributes:
KEY_ESCAPE: Chars which need escaping when they occur as first char
in a line.
ESCAPE_CHAR: The char to be used for escaping
RENAMED_SECTIONS: A mapping of renamed sections, {'oldname': 'newname'}
RENAMED_OPTIONS: A mapping of renamed options,
{('section', 'oldname'): 'newname'}
CHANGED_OPTIONS: A mapping of arbitrarily changed options,
{('section', 'option'): callable}.
The callable takes the old value and returns the new
one.
DELETED_OPTIONS: A (section, option) list of deleted options.
Attributes:
sections: The configuration data as an OrderedDict.
_fname: The filename to be opened.
_configdir: The dictionary to read the config from and save it in.
_interpolation: An configparser.Interpolation object
_proxies: configparser.SectionProxy objects for sections.
_initialized: Whether the ConfigManager is fully initialized yet.
Signals:
changed: Emitted when a config option changed.
style_changed: When style caches need to be invalidated.
Args: the changed section and option.
"""
KEY_ESCAPE = r'\#['
ESCAPE_CHAR = '\\'
RENAMED_SECTIONS = {
'permissions': 'content'
}
RENAMED_OPTIONS = {
('colors', 'tab.fg.odd'): 'tabs.fg.odd',
('colors', 'tab.fg.even'): 'tabs.fg.even',
('colors', 'tab.fg.selected'): 'tabs.fg.selected.odd',
('colors', 'tabs.fg.selected'): 'tabs.fg.selected.odd',
('colors', 'tab.bg.odd'): 'tabs.bg.odd',
('colors', 'tab.bg.even'): 'tabs.bg.even',
('colors', 'tab.bg.selected'): 'tabs.bg.selected.odd',
('colors', 'tabs.bg.selected'): 'tabs.bg.selected.odd',
('colors', 'tab.bg.bar'): 'tabs.bg.bar',
('colors', 'tab.indicator.start'): 'tabs.indicator.start',
('colors', 'tab.indicator.stop'): 'tabs.indicator.stop',
('colors', 'tab.indicator.error'): 'tabs.indicator.error',
('colors', 'tab.indicator.system'): 'tabs.indicator.system',
('tabs', 'auto-hide'): 'hide-auto',
('completion', 'history-length'): 'cmd-history-max-items',
('colors', 'downloads.fg'): 'downloads.fg.start',
}
DELETED_OPTIONS = [
('colors', 'tab.separator'),
('colors', 'tabs.separator'),
('colors', 'completion.item.bg'),
('tabs', 'indicator-space'),
('tabs', 'hide-auto'),
('tabs', 'hide-always'),
]
CHANGED_OPTIONS = {
('content', 'cookies-accept'):
_get_value_transformer({'default': 'no-3rdparty'}),
('tabs', 'position'): _transform_position,
('ui', 'downloads-position'): _transform_position,
('ui', 'remove-finished-downloads'):
_get_value_transformer({'false': '-1', 'true': '1000'})
}
changed = pyqtSignal(str, str)
style_changed = pyqtSignal(str, str)
def __init__(self, configdir, fname, relaxed=False, parent=None):
super().__init__(parent)
self._initialized = False
self.sections = configdata.data()
self._interpolation = configparser.ExtendedInterpolation()
self._proxies = {}
for sectname in self.sections:
self._proxies[sectname] = SectionProxy(self, sectname)
self._fname = fname
if configdir is None:
self._configdir = None
self._initialized = True
else:
self._configdir = configdir
parser = ini.ReadConfigParser(configdir, fname)
self._from_cp(parser, relaxed)
self._initialized = True
self._validate_all()
def __getitem__(self, key):
"""Get a section from the config."""
return self._proxies[key]
def __repr__(self):
return utils.get_repr(self, fname=self._fname)
def __str__(self):
"""Get the whole config as a string."""
lines = configdata.FIRST_COMMENT.strip('\n').splitlines()
for sectname, sect in self.sections.items():
lines.append('\n[{}]'.format(sectname))
lines += self._str_section_desc(sectname)
lines += self._str_option_desc(sectname, sect)
lines += self._str_items(sect)
return '\n'.join(lines) + '\n'
def _str_section_desc(self, sectname):
"""Get the section description string for sectname."""
wrapper = textwrapper.TextWrapper()
lines = []
seclines = configdata.SECTION_DESC[sectname].splitlines()
for secline in seclines:
if 'http://' in secline or 'https://' in secline:
lines.append('# ' + secline)
else:
lines += wrapper.wrap(secline)
return lines
def _str_option_desc(self, sectname, sect):
"""Get the option description strings for sect/sectname."""
wrapper = textwrapper.TextWrapper(initial_indent='#' + ' ' * 5,
subsequent_indent='#' + ' ' * 5)
lines = []
if not getattr(sect, 'descriptions', None):
return lines
for optname, option in sect.items():
lines.append('#')
if option.typ.special:
typestr = ''
else:
typestr = ' ({})'.format(option.typ.__class__.__name__)
lines.append("# {}{}:".format(optname, typestr))
try:
desc = self.sections[sectname].descriptions[optname]
except KeyError:
log.config.exception("No description for {}.{}!".format(
sectname, optname))
continue
for descline in desc.splitlines():
lines += wrapper.wrap(descline)
valid_values = option.typ.valid_values
if valid_values is not None:
if valid_values.descriptions:
for val in valid_values:
desc = valid_values.descriptions[val]
lines += wrapper.wrap(" {}: {}".format(val, desc))
else:
lines += wrapper.wrap("Valid values: {}".format(', '.join(
valid_values)))
lines += wrapper.wrap("Default: {}".format(
option.values['default']))
return lines
def _str_items(self, sect):
"""Get the option items as string for sect."""
lines = []
for optname, option in sect.items():
value = option.value(startlayer='conf')
for c in self.KEY_ESCAPE:
if optname.startswith(c):
optname = optname.replace(c, self.ESCAPE_CHAR + c, 1)
# configparser can't handle = in keys :(
optname = optname.replace('=', '<eq>')
keyval = '{} = {}'.format(optname, value)
lines.append(keyval)
return lines
def _get_real_sectname(self, cp, sectname):
"""Get an old or new section name based on a configparser.
This checks if sectname is in cp, and if not, migrates it if needed and
tries again.
Args:
cp: The configparser to check.
sectname: The new section name.
Returns:
The section name in the configparser as a string, or None if the
configparser doesn't contain the section.
"""
reverse_renamed_sections = {v: k for k, v in
self.RENAMED_SECTIONS.items()}
if sectname in reverse_renamed_sections:
old_sectname = reverse_renamed_sections[sectname]
else:
old_sectname = sectname
if old_sectname in cp:
return old_sectname
elif sectname in cp:
return sectname
else:
return None
def _from_cp(self, cp, relaxed=False):
"""Read the config from a configparser instance.
Args:
cp: The configparser instance to read the values from.
relaxed: Whether to ignore inexistent sections/options.
"""
for sectname in cp:
if sectname in self.RENAMED_SECTIONS:
sectname = self.RENAMED_SECTIONS[sectname]
if sectname is not 'DEFAULT' and sectname not in self.sections:
if not relaxed:
raise configexc.NoSectionError(sectname)
for sectname in self.sections:
self._from_cp_section(sectname, cp, relaxed)
def _from_cp_section(self, sectname, cp, relaxed):
"""Read a single section from a configparser instance.
Args:
sectname: The name of the section to read.
cp: The configparser instance to read the values from.
relaxed: Whether to ignore inexistent options.
"""
real_sectname = self._get_real_sectname(cp, sectname)
if real_sectname is None:
return
for k, v in cp[real_sectname].items():
if k.startswith(self.ESCAPE_CHAR):
k = k[1:]
if (sectname, k) in self.DELETED_OPTIONS:
return
if (sectname, k) in self.RENAMED_OPTIONS:
k = self.RENAMED_OPTIONS[sectname, k]
if (sectname, k) in self.CHANGED_OPTIONS:
func = self.CHANGED_OPTIONS[(sectname, k)]
v = func(v)
try:
self.set('conf', sectname, k, v, validate=False)
except configexc.NoOptionError:
if relaxed:
pass
else:
raise
def _validate_all(self):
"""Validate all values set in self._from_cp."""
for sectname, sect in self.sections.items():
mapping = {key: val.value() for key, val in sect.values.items()}
for optname, opt in sect.items():
interpolated = self._interpolation.before_get(
self, sectname, optname, opt.value(), mapping)
try:
opt.typ.validate(interpolated)
except configexc.ValidationError as e:
e.section = sectname
e.option = optname
raise
def _changed(self, sectname, optname):
"""Notify other objects the config has changed."""
log.config.debug("Config option changed: {} -> {}".format(
sectname, optname))
if sectname in ('colors', 'fonts'):
self.style_changed.emit(sectname, optname)
self.changed.emit(sectname, optname)
def _after_set(self, changed_sect, changed_opt):
"""Clean up caches and emit signals after an option has been set."""
self.get.cache_clear()
self._changed(changed_sect, changed_opt)
# Options in the same section and ${optname} interpolation.
for optname, option in self.sections[changed_sect].items():
if '${' + changed_opt + '}' in option.value():
self._changed(changed_sect, optname)
# Options in any section and ${sectname:optname} interpolation.
for sectname, sect in self.sections.items():
for optname, option in sect.items():
if ('${' + changed_sect + ':' + changed_opt + '}' in
option.value()):
self._changed(sectname, optname)
def items(self, sectname, raw=True):
"""Get a list of (optname, value) tuples for a section.
Implemented for configparser interpolation compatibility
Args:
sectname: The name of the section to get.
raw: Whether to get raw values. Note this parameter only exists
for ConfigParser compatibility and raw=False is not supported.
"""
items = []
if not raw:
raise ValueError("items() with raw=True is not implemented!")
for optname, option in self.sections[sectname].items():
items.append((optname, option.value()))
return items
def has_option(self, sectname, optname):
"""Check if option exists in section.
Args:
sectname: The section name.
optname: The option name
Return:
True if the option and section exist, False otherwise.
"""
if sectname not in self.sections:
return False
return optname in self.sections[sectname]
def remove_option(self, sectname, optname):
"""Remove an option.
Args:
sectname: The section where to remove an option.
optname: The option name to remove.
Return:
True if the option existed, False otherwise.
"""
try:
sectdict = self.sections[sectname]
except KeyError:
raise configexc.NoSectionError(sectname)
optname = self.optionxform(optname)
existed = optname in sectdict
if existed:
del sectdict[optname]
self.get.cache_clear()
return existed
@functools.lru_cache()
def get(self, sectname, optname, raw=False, transformed=True,
fallback=UNSET):
"""Get the value from a section/option.
We don't support the vars argument from configparser.get as it's not
hashable.
Args:
sectname: The section to get the option from.
optname: The option name
raw: Whether to get the uninterpolated, untransformed value.
transformed: Whether the value should be transformed.
Return:
The value of the option.
"""
if not self._initialized:
raise Exception("get got called before initialization was "
"complete!")
try:
sect = self.sections[sectname]
except KeyError:
if fallback is not UNSET:
return fallback
raise configexc.NoSectionError(sectname)
try:
val = sect[optname]
except KeyError:
if fallback is not UNSET:
return fallback
raise configexc.NoOptionError(optname, sectname)
if raw:
return val.value()
mapping = {key: val.value() for key, val in sect.values.items()}
newval = self._interpolation.before_get(self, sectname, optname,
val.value(), mapping)
if transformed:
newval = val.typ.transform(newval)
return newval
@contextlib.contextmanager
def _handle_config_error(self):
"""Catch errors in set_command and raise CommandError."""
try:
yield
except (configexc.NoOptionError, configexc.NoSectionError,
configexc.ValidationError) as e:
raise cmdexc.CommandError("set: {}".format(e))
except (configexc.Error, configparser.Error) as e:
raise cmdexc.CommandError("set: {} - {}".format(
e.__class__.__name__, e))
@cmdutils.register(name='set', instance='config', win_id='win_id',
completion=[Completion.section, Completion.option,
Completion.value])
def set_command(self, win_id, section_=None, option=None, value=None,
temp=False, print_=False):
"""Set an option.
If the option name ends with '?', the value of the option is shown
instead.
If the option name ends with '!' and it is a boolean value, toggle it.
//
Wrapper for self.set() to output exceptions in the status bar.
Args:
section_: The section where the option is in.
option: The name of the option.
value: The value to set.
temp: Set value temporarily.
print_: Print the value after setting.
"""
if section_ is not None and option is None:
raise cmdexc.CommandError(
"set: Either both section and option have to be given, or "
"neither!")
if section_ is None and option is None:
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=win_id)
tabbed_browser.openurl(QUrl('qute:settings'), newtab=False)
return
if option.endswith('?') and option != '?':
option = option[:-1]
print_ = True
else:
with self._handle_config_error():
if option.endswith('!') and option != '!' and value is None:
option = option[:-1]
val = self.get(section_, option)
layer = 'temp' if temp else 'conf'
if isinstance(val, bool):
self.set(layer, section_, option, str(not val))
else:
raise cmdexc.CommandError(
"set: Attempted inversion of non-boolean value.")
elif value is not None:
layer = 'temp' if temp else 'conf'
self.set(layer, section_, option, value)
else:
raise cmdexc.CommandError("set: The following arguments "
"are required: value")
if print_:
with self._handle_config_error():
val = self.get(section_, option, transformed=False)
message.info(win_id, "{} {} = {}".format(
section_, option, val), immediately=True)
def set(self, layer, sectname, optname, value, validate=True):
"""Set an option.
Args:
layer: A layer name as string (conf/temp/default).
sectname: The name of the section to change.
optname: The name of the option to change.
value: The new value.
validate: Whether to validate the value immediately.
"""
try:
value = self._interpolation.before_set(self, sectname, optname,
value)
except ValueError as e:
raise configexc.InterpolationSyntaxError(optname, sectname, str(e))
try:
sect = self.sections[sectname]
except KeyError:
raise configexc.NoSectionError(sectname)
mapping = {key: val.value() for key, val in sect.values.items()}
if validate:
interpolated = self._interpolation.before_get(
self, sectname, optname, value, mapping)
else:
interpolated = None
try:
sect.setv(layer, optname, value, interpolated)
except KeyError:
raise configexc.NoOptionError(optname, sectname)
else:
if self._initialized:
self._after_set(sectname, optname)
def save(self):
"""Save the config file."""
if self._configdir is None:
return
configfile = os.path.join(self._configdir, self._fname)
log.destroy.debug("Saving config to {}".format(configfile))
with qtutils.savefile_open(configfile) as f:
f.write(str(self))
def dump_userconfig(self):
"""Get the part of the config which was changed by the user.
Return:
The changed config part as string.
"""
lines = []
for sectname, sect in self.sections.items():
changed = sect.dump_userconfig()
if changed:
lines.append('[{}]'.format(sectname))
lines += ['{} = {}'.format(k, v) for k, v in changed]
if not lines:
lines = ['<Default configuration>']
return '\n'.join(lines)
def optionxform(self, val):
"""Implemented to be compatible with ConfigParser interpolation."""
return val
class SectionProxy(collections.abc.MutableMapping):
"""A proxy for a single section from a config.
Attributes:
_conf: The Config object.
_name: The section name.
"""
def __init__(self, conf, name):
"""Create a view on a section.
Args:
conf: The Config object.
name: The section name.
"""
self.conf = conf
self.name = name
def __repr__(self):
return utils.get_repr(self, name=self.name)
def __getitem__(self, key):
if not self.conf.has_option(self.name, key):
raise KeyError(key)
return self.conf.get(self.name, key)
def __setitem__(self, key, value):
return self.conf.set('conf', self.name, key, value)
def __delitem__(self, key):
if not (self.conf.has_option(self.name, key) and
self.conf.remove_option(self.name, key)):
raise KeyError(key)
def __contains__(self, key):
return self.conf.has_option(self.name, key)
def __len__(self):
return len(self._options())
def __iter__(self):
return self._options().__iter__()
def _options(self):
"""Get the option keys from this section."""
return self.conf.sections[self.name].keys()
def get(self, optname, *, raw=False): # pylint: disable=arguments-differ
"""Get a value from this section.
We deliberately don't support the default argument here, but have a raw
argument instead.
Args:
optname: The option name to get.
raw: Whether to get a raw value or not.
"""
return self.conf.get(self.name, optname, raw=raw)
| gpl-3.0 | -538,014,497,136,559,600 | 36.304751 | 79 | 0.571846 | false |
thangbui/geepee | examples/dgpr_aep_examples.py | 1 | 8718 | print "importing stuff..."
import numpy as np
import pdb
# import matplotlib
# matplotlib.use('Agg')
import matplotlib.pylab as plt
from scipy import special
from .context import aep
from .datautils import step, spiral
from .context import config
def run_regression_1D():
np.random.seed(42)
print "create dataset ..."
N = 200
X = np.random.rand(N, 1)
Y = np.sin(12 * X) + 0.5 * np.cos(25 * X) + np.random.randn(N, 1) * 0.2
# plt.plot(X, Y, 'kx', mew=2)
def plot(m):
xx = np.linspace(-0.5, 1.5, 100)[:, None]
# mean, var = m.predict_f(xx)
samples, mf, vf = m.predict_f(xx, config.PROP_MC)
zu = m.sgp_layers[0].zu
mean_u, var_u = m.predict_f(zu)
plt.figure()
plt.plot(X, Y, 'kx', mew=2)
# plt.plot(xx, mean, 'b', lw=2)
# plt.fill_between(
# xx[:, 0],
# mean[:, 0] - 2 * np.sqrt(var[:, 0]),
# mean[:, 0] + 2 * np.sqrt(var[:, 0]),
# color='blue', alpha=0.2)
plt.plot(np.tile(xx[np.newaxis, :], [200, 1]))
plt.errorbar(zu, mean_u, yerr=2 * np.sqrt(var_u), fmt='ro')
plt.xlim(-0.1, 1.1)
# inference
print "create model and optimize ..."
M = 20
hidden_size = [2]
model = aep.SDGPR(X, Y, M, hidden_size, lik='Gaussian')
model.optimise(method='L-BFGS-B', alpha=1, maxiter=2000)
plot(model)
# plt.show()
plt.savefig('/tmp/aep_dgpr_1D.pdf')
def run_banana():
def gridParams():
mins = [-3.25, -2.85]
maxs = [3.65, 3.4]
nGrid = 50
xspaced = np.linspace(mins[0], maxs[0], nGrid)
yspaced = np.linspace(mins[1], maxs[1], nGrid)
xx, yy = np.meshgrid(xspaced, yspaced)
Xplot = np.vstack((xx.flatten(), yy.flatten())).T
return mins, maxs, xx, yy, Xplot
def plot(m):
col1 = '#0172B2'
col2 = '#CC6600'
mins, maxs, xx, yy, Xplot = gridParams()
mf, vf = m.predict_f(Xplot)
plt.figure()
plt.plot(
Xtrain[:, 0][Ytrain[:, 0] == 1],
Xtrain[:, 1][Ytrain[:, 0] == 1],
'o', color=col1, mew=0, alpha=0.5)
plt.plot(
Xtrain[:, 0][Ytrain[:, 0] == -1],
Xtrain[:, 1][Ytrain[:, 0] == -1],
'o', color=col2, mew=0, alpha=0.5)
zu = m.sgp_layers[0].zu
plt.plot(zu[:, 0], zu[:, 1], 'ro', mew=0, ms=4)
plt.contour(xx, yy, mf.reshape(*xx.shape),
[0], colors='k', linewidths=1.8, zorder=100)
Xtrain = np.loadtxt('./examples/data/banana_X_train.txt', delimiter=',')
Ytrain = np.loadtxt('./examples/data/banana_Y_train.txt',
delimiter=',').reshape(-1, 1)
Ytrain[np.where(Ytrain == 0)[0]] = -1
M = 30
hidden_size = [2]
model = aep.SDGPR(Xtrain, Ytrain, M, hidden_size, lik='Probit')
model.optimise(method='L-BFGS-B', alpha=1.0, maxiter=2000)
plot(model)
plt.show()
plt.savefig('/tmp/aep_dgpc_banana.pdf')
def run_regression_1D_stoc():
np.random.seed(42)
print "create dataset ..."
N = 200
X = np.random.rand(N, 1)
Y = np.sin(12 * X) + 0.5 * np.cos(25 * X) + np.random.randn(N, 1) * 0.2
# plt.plot(X, Y, 'kx', mew=2)
def plot(m):
xx = np.linspace(-0.5, 1.5, 100)[:, None]
mean, var = m.predict_f(xx)
zu = m.sgp_layers[0].zu
mean_u, var_u = m.predict_f(zu)
plt.figure()
plt.plot(X, Y, 'kx', mew=2)
plt.plot(xx, mean, 'b', lw=2)
plt.fill_between(
xx[:, 0],
mean[:, 0] - 2 * np.sqrt(var[:, 0]),
mean[:, 0] + 2 * np.sqrt(var[:, 0]),
color='blue', alpha=0.2)
plt.errorbar(zu, mean_u, yerr=2 * np.sqrt(var_u), fmt='ro')
plt.xlim(-0.1, 1.1)
# inference
print "create model and optimize ..."
M = 20
hidden_size = [2]
model = aep.SDGPR(X, Y, M, hidden_size, lik='Gaussian')
model.optimise(method='adam', alpha=1.0,
maxiter=50000, mb_size=M, adam_lr=0.001)
plot(model)
plt.show()
plt.savefig('/tmp/aep_dgpr_1D_stoc.pdf')
def run_banana_stoc():
def gridParams():
mins = [-3.25, -2.85]
maxs = [3.65, 3.4]
nGrid = 50
xspaced = np.linspace(mins[0], maxs[0], nGrid)
yspaced = np.linspace(mins[1], maxs[1], nGrid)
xx, yy = np.meshgrid(xspaced, yspaced)
Xplot = np.vstack((xx.flatten(), yy.flatten())).T
return mins, maxs, xx, yy, Xplot
def plot(m):
col1 = '#0172B2'
col2 = '#CC6600'
mins, maxs, xx, yy, Xplot = gridParams()
mf, vf = m.predict_f(Xplot)
plt.figure()
plt.plot(
Xtrain[:, 0][Ytrain[:, 0] == 1],
Xtrain[:, 1][Ytrain[:, 0] == 1],
'o', color=col1, mew=0, alpha=0.5)
plt.plot(
Xtrain[:, 0][Ytrain[:, 0] == -1],
Xtrain[:, 1][Ytrain[:, 0] == -1],
'o', color=col2, mew=0, alpha=0.5)
zu = m.sgp_layers[0].zu
plt.plot(zu[:, 0], zu[:, 1], 'ro', mew=0, ms=4)
plt.contour(xx, yy, mf.reshape(*xx.shape),
[0], colors='k', linewidths=1.8, zorder=100)
Xtrain = np.loadtxt('./examples/data/banana_X_train.txt', delimiter=',')
Ytrain = np.loadtxt('./examples/data/banana_Y_train.txt',
delimiter=',').reshape(-1, 1)
Ytrain[np.where(Ytrain == 0)[0]] = -1
M = 30
hidden_size = [2]
model = aep.SDGPR(Xtrain, Ytrain, M, hidden_size, lik='Probit')
mb_size = int(Xtrain.shape[0] / 4)
model.optimise(method='adam', alpha=1.0, maxiter=100000,
mb_size=mb_size, adam_lr=0.001)
plot(model)
plt.show()
plt.savefig('/tmp/aep_dgpc_banana_stoc.pdf')
def run_step_1D():
np.random.seed(42)
def step(x):
y = x.copy()
y[y < 0.0] = 0.0
y[y > 0.0] = 1.0
return y + 0.02 * np.random.randn(x.shape[0], 1)
print "create dataset ..."
N = 100
X = np.random.rand(N, 1) * 3 - 1.5
Y = step(X) - 0.5
# plt.plot(X, Y, 'kx', mew=2)
def plot(m):
xx = np.linspace(-3, 3, 100)[:, None]
mean, var = m.predict_f(xx)
zu = m.sgp_layers[0].zu
mean_u, var_u = m.predict_f(zu)
plt.figure()
plt.plot(X, Y, 'kx', mew=2)
plt.plot(xx, mean, 'b', lw=2)
plt.fill_between(
xx[:, 0],
mean[:, 0] - 2 * np.sqrt(var[:, 0]),
mean[:, 0] + 2 * np.sqrt(var[:, 0]),
color='blue', alpha=0.2)
plt.errorbar(zu, mean_u, yerr=2 * np.sqrt(var_u), fmt='ro')
no_samples = 20
xx = np.linspace(-3, 3, 500)[:, None]
f_samples = m.sample_f(xx, no_samples)
for i in range(no_samples):
plt.plot(xx, f_samples[:, :, i], linewidth=0.5, alpha=0.5)
plt.xlim(-3, 3)
# inference
print "create model and optimize ..."
M = 20
hidden_size = [2]
model = aep.SDGPR(X, Y, M, hidden_size, lik='Gaussian')
# model.optimise(method='L-BFGS-B', alpha=1, maxiter=1000)
model.optimise(method='adam', adam_lr=0.05, alpha=1, maxiter=2000)
plot(model)
plt.show()
def run_spiral():
np.random.seed(42)
def gridParams():
mins = [-1.2, -1.2]
maxs = [1.2, 1.2]
nGrid = 80
xspaced = np.linspace(mins[0], maxs[0], nGrid)
yspaced = np.linspace(mins[1], maxs[1], nGrid)
xx, yy = np.meshgrid(xspaced, yspaced)
Xplot = np.vstack((xx.flatten(), yy.flatten())).T
return mins, maxs, xx, yy, Xplot
def plot(m):
col1 = '#0172B2'
col2 = '#CC6600'
mins, maxs, xx, yy, Xplot = gridParams()
mf, vf = m.predict_f(Xplot)
plt.figure()
plt.plot(
Xtrain[:, 0][Ytrain[:, 0] == 1],
Xtrain[:, 1][Ytrain[:, 0] == 1],
'o', color=col1, mew=0, alpha=0.5)
plt.plot(
Xtrain[:, 0][Ytrain[:, 0] == -1],
Xtrain[:, 1][Ytrain[:, 0] == -1],
'o', color=col2, mew=0, alpha=0.5)
zu = m.sgp_layers[0].zu
plt.plot(zu[:, 0], zu[:, 1], 'ro', mew=0, ms=4)
plt.contour(xx, yy, mf.reshape(*xx.shape),
[0], colors='k', linewidths=1.8, zorder=100)
N = 100
M = 20
Xtrain, Ytrain = spiral(N)
Xtrain /= 6
hidden_size = [2, 2]
model = aep.SDGPR(Xtrain, Ytrain, M, hidden_size, lik='Probit')
model.set_fixed_params(['sf_0', 'sf_1', 'sf_2'])
model.optimise(method='L-BFGS-B', alpha=1, maxiter=5000)
plot(model)
plt.show()
if __name__ == '__main__':
# run_regression_1D()
# run_banana()
run_step_1D()
# run_spiral()
# run_regression_1D_stoc()
# run_banana_stoc()
| mit | 7,320,144,237,141,424,000 | 30.247312 | 76 | 0.503441 | false |
indianajohn/ycmd | ycmd/tests/clang/clang_handlers_test.py | 1 | 1074 | # Copyright (C) 2015 ycmd contributors
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import * # noqa
from ..handlers_test import Handlers_test
class Clang_Handlers_test( Handlers_test ):
def __init__( self ):
self._file = __file__
| gpl-3.0 | -675,783,288,828,377,000 | 32.5625 | 70 | 0.746741 | false |
jdemon519/cfme_tests | cfme/cloud/instance/gce.py | 1 | 3774 | from utils import version, deferred_verpick
from cfme.exceptions import OptionNotAvailable
from cfme.web_ui import fill, flash
from cfme.fixtures import pytest_selenium as sel
from . import Instance, select_provision_image
class GCEInstance(Instance):
# CFME & provider power control options
START = "Start"
POWER_ON = START # For compatibility with the infra objects.
STOP = "Stop"
DELETE = "Delete"
TERMINATE = deferred_verpick({
version.LOWEST: 'Terminate',
'5.6.1': 'Delete',
})
# CFME-only power control options
SOFT_REBOOT = "Soft Reboot"
# Provider-only power control options
RESTART = "Restart"
# CFME power states
STATE_ON = "on"
STATE_OFF = "off"
STATE_SUSPENDED = "suspended"
STATE_TERMINATED = "terminated"
STATE_ARCHIVED = "archived"
STATE_UNKNOWN = "unknown"
UI_POWERSTATES_AVAILABLE = {
'on': [STOP, SOFT_REBOOT, TERMINATE],
'off': [START, TERMINATE]
}
UI_POWERSTATES_UNAVAILABLE = {
'on': [START],
'off': [STOP, SOFT_REBOOT]
}
def create(self, email=None, first_name=None, last_name=None, availability_zone=None,
instance_type=None, cloud_network=None, boot_disk_size=None, cancel=False,
**prov_fill_kwargs):
"""Provisions an GCE instance with the given properties through CFME
Args:
email: Email of the requester
first_name: Name of the requester
last_name: Surname of the requester
availability_zone: zone to deploy instance
cloud_network: Name of the cloud network the instance should belong to
instance_type: Type of the instance
boot_disk_size: size of root disk
cancel: Clicks the cancel button if `True`, otherwise clicks the submit button
(Defaults to `False`)
Note:
For more optional keyword arguments, see
:py:data:`cfme.cloud.provisioning.provisioning_form`
"""
from cfme.provisioning import provisioning_form
# Nav to provision form and select image
select_provision_image(template_name=self.template_name, provider=self.provider)
fill(provisioning_form, dict(
email=email,
first_name=first_name,
last_name=last_name,
instance_name=self.name,
availability_zone=availability_zone,
instance_type=instance_type,
cloud_network=cloud_network,
boot_disk_size=boot_disk_size,
**prov_fill_kwargs
))
if cancel:
sel.click(provisioning_form.cancel_button)
flash.assert_success_message(
"Add of new VM Provision Request was cancelled by the user")
else:
sel.click(provisioning_form.submit_button)
flash.assert_success_message(
"VM Provision Request was Submitted, you will be notified when your VMs are ready")
def power_control_from_provider(self, option):
"""Power control the instance from the provider
Args:
option: power control action to take against instance
Raises:
OptionNotAvailable: option param must have proper value
"""
if option == GCEInstance.START:
self.provider.mgmt.start_vm(self.name)
elif option == GCEInstance.STOP:
self.provider.mgmt.stop_vm(self.name)
elif option == GCEInstance.RESTART:
self.provider.mgmt.restart_vm(self.name)
elif option == GCEInstance.TERMINATE:
self.provider.mgmt.delete_vm(self.name)
else:
raise OptionNotAvailable(option + " is not a supported action")
| gpl-2.0 | -7,548,639,809,491,895,000 | 36.366337 | 99 | 0.622152 | false |
jbloom/mapmuts | scripts/mapmuts_inferdifferentialpreferences.py | 1 | 19811 | #!python
"""Infers differential preferences for each amino acid at each site.
Written by Jesse Bloom, 2014.
"""
import re
import sys
import os
import time
import tempfile
import math
import copy
import traceback
import multiprocessing
import warnings
import cPickle
import mapmuts
import mapmuts.io
import mapmuts.sequtils
import mapmuts.bayesian
import mapmuts.plot
def RMS(dpi_mean):
"""Computes root mean square value of entries."""
rms = 0.0
for x in dpi_mean:
rms += x**2
rms = math.sqrt(rms)
return rms
def Entropy(pi_mean):
"""Computes site entropy in bits from array of probabilities."""
h = 0.0 # calculate entropy
for pi in pi_mean:
if pi == 0:
pass
elif pi < 0:
raise ValueError("Negative pi value of %g" % pi)
else:
h -= pi * math.log(pi, 2)
return h
def RunMCMC(ires, error_control_counts, starting_sample_counts, control_selection_counts, selection_counts, wtcodon, f_prior, epsilon_prior, pi_concentration, epsilon_concentration, f_concentration, deltapi_concentration, nruns, nsteps, burn, thin, minvalue, convergence, stepincrease, pickleresults, seed):
"""Runs MCMC to infer differential preferences.
Calling variables have same meaning as in the *main* function.
The results are written using *cPickle* to the file
specified by *pickleresults*.
"""
mapmuts.bayesian.Seed(seed)
logstring = ['\nPerforming inference for site %d...' % ires]
start_t = time.clock()
returnvalue = \
mapmuts.bayesian.InferDifferentialPreferencesMCMC(\
error_control_counts, starting_sample_counts, control_selection_counts, selection_counts, wtcodon, f_prior, epsilon_prior, pi_concentration, epsilon_concentration, f_concentration, deltapi_concentration, nruns, nsteps, burn, thin, minvalue=minvalue)
t = time.clock() - start_t
run_diffs = [(selection, returnvalue[selection][3]) for selection in returnvalue.iterkeys()]
logstring.append(" completed MCMC of %d steps in %.1f seconds" % (nsteps, t))
if nruns > 1 and max([tup[1] for tup in run_diffs]) > convergence:
logstring.append('; inference FAILED to converge (run differences of: %s).\n' % ', '.join(['%g for %s' % (tup[1], tup[0]) for tup in run_diffs]))
if stepincrease > 1:
start_t = time.clock()
logstring.append('Trying again with %d-fold more steps...' % stepincrease)
returnvalue = \
mapmuts.bayesian.InferDifferentialPreferencesMCMC(\
error_control_counts, starting_sample_counts, control_selection_counts, selection_counts, wtcodon, f_prior, epsilon_prior, pi_concentration, epsilon_concentration, f_concentration, deltapi_concentration, nruns, int(stepincrease * nsteps), burn, thin, minvalue=minvalue)
assert len(returnvalue) >= 2, "Should be at least the control preferences and one differential preference"
t = time.clock() - start_t
run_diffs = [(selection, returnvalue[selection][3]) for selection in returnvalue.iterkeys()]
if max([tup[1] for tup in run_diffs]) <= convergence:
logstring.append(' this time MCMC converged in %.1f seconds (run differences of: %s).\n' % (t, ', '.join(['%g for %s' % (tup[1], tup[0]) for tup in run_diffs])))
else:
logstring.append(' MCMC still FAILED to converge in %.1f seconds (run differences of: %s).\n' % (t, ', '.join(['%g for %s' % (tup[1], tup[0]) for tup in run_diffs])))
elif nruns > 1:
logstring.append("; inference converged (run differences of: %s).\n" % ', '.join(['%g for %s' % (tup[1], tup[0]) for tup in run_diffs]))
else:
logstring.append('.\n')
logstring = ''.join(logstring)
cPickle.dump((logstring, returnvalue), open(pickleresults, 'w'))
time.sleep(1)
def main():
"""Main body of script."""
# hard-coded variables
includestop = True # include stop codons as a possible amino acid
burnfrac = 0.2 # set burn-in to this times nsteps
# check on module availability
if not mapmuts.bayesian.PymcAvailable():
raise ImportError("Cannot run this script as pymc or numpy are not available.")
aas = mapmuts.sequtils.AminoAcids(includestop=includestop)
codons = mapmuts.sequtils.Codons()
# read input variables
args = sys.argv[1 : ]
if len(args) != 1:
raise IOError("Script must be called with exactly one argument"\
+ ' specifying the name of the input file.')
infilename = sys.argv[1]
if not os.path.isfile(infilename):
raise IOError("Failed to find infile of %s" % infilename)
d = mapmuts.io.ParseInfile(open(infilename))
outfileprefix = mapmuts.io.ParseStringValue(d, 'outfileprefix')
if outfileprefix.upper() == 'NONE':
outfileprefix = ''
logfile = "%sinferdifferentialpreferences_log.txt" % outfileprefix
log = open(logfile, 'w')
log.write("Beginning execution of mapmuts_inferpreferences.py"\
" in directory %s" % (os.getcwd()))
mapmuts.io.PrintVersions(log)
log.write("Input data being read from infile %s\n\n" % infilename)
log.write("Progress being logged to this file, %s\n\n" % logfile)
log.write("Read the following key/value pairs from infile %s:"\
% (infilename))
for (key, value) in d.iteritems():
log.write("\n%s %s" % (key, value))
codoncounts_data = {} # dictionary keyed by sample type
for sample in ['error_control', 'starting_sample', 'control_selection']:
fname = mapmuts.io.ParseStringValue(d, sample)
if not os.path.isfile(fname):
raise IOError("Failed to find file %s specified by %s" % (fname, sample))
codoncounts_data[sample] = mapmuts.io.ReadCodonCounts(open(fname))
selections = []
for (key, value) in d.iteritems():
m = re.search('^selection_(?P<sample>\S+)$', key)
if m:
sample = m.group('sample')
if sample in codoncounts_data:
raise ValueError("Duplicate selection sample of name %s" % sample)
if sample in ['error_control', 'starting_selection', 'control_selection']:
raise ValueError("Selection sample cannot have name %s" % sample)
if not os.path.isfile(value):
raise IOError("Failed to find file %s specified for selection sample %s" % (value, sample))
codoncounts_data[sample] = mapmuts.io.ReadCodonCounts(open(value))
selections.append(sample)
if not selections:
raise ValueError("Failed to find any selected samples with keys of form selection_???")
for value in codoncounts_data.itervalues():
mapmuts.sequtils.ClassifyCodonCounts(value)
log.write("\n\nRead codon counts for the following samples: %s" % ', '.join(codoncounts_data.keys()))
epsilon_concentration = mapmuts.io.ParseFloatValue(d, 'epsilon_concentration')
assert epsilon_concentration > 0, "epsilon_concentration must be > 0"
f_concentration = mapmuts.io.ParseFloatValue(d, 'f_concentration')
assert f_concentration > 0, "f_concentration must be > 0"
deltapi_concentration = mapmuts.io.ParseFloatValue(d, 'deltapi_concentration')
assert deltapi_concentration > 0, "deltapi_concentration must be > 0"
pi_concentration = mapmuts.io.ParseFloatValue(d, 'pi_concentration')
assert pi_concentration > 0, "pi_concentration must be > 0"
minvalue = mapmuts.io.ParseFloatValue(d, 'minvalue')
assert minvalue > 0, "minvalue must be > 0"
seed = mapmuts.io.ParseIntValue(d, 'seed')
nruns = mapmuts.io.ParseIntValue(d, 'nruns')
assert nruns >= 1, "nruns must be >= 1"
if nruns < 2:
warnings.warn('Will not be able to check for convergence since nruns < 2. You are suggested to use nruns >= 2.')
nsteps = mapmuts.io.ParseIntValue(d, 'nsteps')
burn = int(burnfrac * nsteps)
assert nsteps >= 1 and burn >= 1, "nsteps must be set to a larger value than %d" % nsteps
thin = mapmuts.io.ParseIntValue(d, 'thin')
assert thin >= 1, "thin must be >= 1"
convergence = mapmuts.io.ParseFloatValue(d, 'convergence')
assert convergence > 0, "convergence must be > 0"
stepincrease = mapmuts.io.ParseIntValue(d, 'stepincrease')
assert stepincrease >= 1, "stepincrease must be >= 1"
ncpus = mapmuts.io.ParseIntValue(d, 'ncpus')
assert ncpus >= 1, "ncpus must be >= 1"
MCMC_traces = mapmuts.io.ParseStringValue(d, 'MCMC_traces')
if MCMC_traces in ['None', 'False']:
MCMC_traces = None
elif not mapmuts.plot.PylabAvailable():
log.write("\nWARNING: cannot create posterior plots as pylab / matplotlib are not available.\n")
MCMC_traces = None
elif not os.path.isdir(MCMC_traces):
os.mkdir(MCMC_traces)
preference_plots = mapmuts.io.ParseStringValue(d, 'preference_plots')
if preference_plots in ['None', 'False']:
preference_plots = None
elif not mapmuts.plot.PylabAvailable():
log.write("\nWARNING: cannot create preference plots as pylab / matplotlib are not available.\n")
preference_plots = None
elif not os.path.isdir(preference_plots):
os.mkdir(preference_plots)
# Now set up to run the MCMC
# first, compute the parameters needed for the priors
starting_mutrate = codoncounts_data['starting_sample']['TOTAL_MUT'] / float(codoncounts_data['starting_sample']['TOTAL_COUNTS'])
error_rate = codoncounts_data['error_control']['TOTAL_MUT'] / float(codoncounts_data['error_control']['TOTAL_COUNTS'])
f_prior = (starting_mutrate - error_rate) / float(len(codons) - 1)
log.write('\nThe prior estimate for the frequency of any specific mutation in starting_sample is %g (overall mutation rate of %g in starting_sample minus overall error rate of %g for error_control, divided by number of codons).' % (f_prior, starting_mutrate, error_rate))
epsilon_prior = {}
for (ndiffs, denom) in [(1, 9.0), (2, 27.0), (3, 27.0)]:
epsilon_prior[ndiffs] = codoncounts_data['error_control']['TOTAL_N_%dMUT' % ndiffs] / float(codoncounts_data['error_control']['TOTAL_COUNTS']) / denom
log.write('\nThe prior estimate for the error rate in error_control for a mutation with %d nucleotide changes is %g.' % (ndiffs, epsilon_prior[ndiffs]))
# now get a list of all (sites, wildtype_codon)
sites = [(key, codoncounts_data['error_control'][key]['WT']) for key in codoncounts_data['error_control'].keys() if isinstance(key, int)]
sites.sort()
for (sample, sampledata) in codoncounts_data.iteritems():
samplesites = [(key, sampledata[key]['WT']) for key in sampledata.keys() if isinstance(key, int)]
samplesites.sort()
if sites != samplesites:
raise ValueError("Not all samples specify the same set of sites / wildtype codons.")
log.write("\nData is specified for %d sites.\n" % len(sites))
preferencesfile = '%spreferences_control_selection.txt' % outfileprefix
preferencescred95file = '%spreferences_control_selection_credibleintervals_95.txt' % outfileprefix
log.write('\nPreferences for control selection will be written to %s and %s.\n' % (preferencesfile, preferencescred95file))
preferencesfile = open(preferencesfile, 'w')
preferencesfile.write('#SITE\tWT_AA\tSITE_ENTROPY\t%s\n' % '\t'.join(['PI_%s' % aa for aa in aas]))
preferencescred95file = open(preferencescred95file, 'w')
preferencescred95file.write('SITE\t%s\n' % '\t'.join(['PI_%s_95cred' % aa for aa in aas]))
meanfiles = {}
cred95files = {}
for selection in selections:
meanfiles[selection] = '%sdifferentialpreferences_selection_%s.txt' % (outfileprefix, selection)
cred95files[selection] = '%sdifferentialpreferences_selection_%s_credibleintervals_95.txt' % (outfileprefix, selection)
log.write('\nDifferential preferences for selection %s will be written to %s and %s.\n' % (selection, meanfiles[selection], cred95files[selection]))
meanfiles[selection] = open(meanfiles[selection], 'w')
cred95files[selection] = open(cred95files[selection], 'w')
meanfiles[selection].write('#SITE\tWT_AA\tRMS_dPI\t%s\n' % '\t'.join(['dPI_%s' % aa for aa in aas]))
cred95files[selection].write('#SITE\t%s\n' % '\t'.join(['dPI_%s_95cred' % aa for aa in aas]))
log.write('\nNow beginning inferences...\n')
log.flush()
processes = {} # keyed by residue number, value is multiprocessing.Process
wtaa_d = {} # keyed by residue number, value is wtaa
pickleresults = {} # keyed by residue number, value is pickle file name
try:
# set up the processes
for (ires, wtcodon) in sites:
wtaa = mapmuts.sequtils.Translate([('wt', wtcodon)])[0][1]
if not wtaa:
wtaa = '*'
wtaa_d[ires] = wtaa
(fd, pickleresults[ires]) = tempfile.mkstemp()
os.close(fd)
error_control_counts = dict([(codon, codoncounts_data['error_control'][ires][codon]) for codon in codons])
starting_sample_counts = dict([(codon, codoncounts_data['starting_sample'][ires][codon]) for codon in codons])
control_selection_counts = dict([(codon, codoncounts_data['control_selection'][ires][codon]) for codon in codons])
selection_counts = {}
for selection in selections:
selection_counts[selection] = dict([(codon, codoncounts_data[selection][ires][codon]) for codon in codons])
processes[ires] = multiprocessing.Process(target=RunMCMC, args=(ires, error_control_counts, starting_sample_counts, control_selection_counts, selection_counts, wtcodon, f_prior, epsilon_prior, pi_concentration, epsilon_concentration, f_concentration, deltapi_concentration, nruns, nsteps, burn, thin, minvalue, convergence, stepincrease, pickleresults[ires], seed))
# start running processes. Don't start the next
# until the first residue still running is done.
processes_running = dict([(ires, False) for ires in processes.iterkeys()])
processes_finished = dict([(ires, False) for ires in processes.iterkeys()])
for (ires, wtcodon) in sites:
i = 0
while (ires + i <= sites[-1][0]) and (processes_running.values().count(True) < ncpus):
if (not processes_finished[ires + i]) and (not processes_running[ires + i]):
processes[ires + i].start()
processes_running[ires + i] = True
time.sleep(1)
i += 1
if not processes_running[ires]:
raise ValueError("Process for ires %d should be running" % ires)
while processes[ires].is_alive():
time.sleep(1)
if processes[ires].exitcode:
raise ValueError("Error running MCMC for residue %d" % ires)
processes_finished[ires] = True
processes_running[ires] = False
(logstring, returnvalue) = cPickle.load(open(pickleresults[ires]))
os.remove(pickleresults[ires])
log.write(logstring)
log.flush()
(mean, cred95, traces, run_diff) = returnvalue['control_selection']
assert len(aas) == len(mean) == len(cred95), "Not right number of entries"
assert abs(sum(mean) - 1.0) < 1e-7, "Sum of control preferences of %g not close to one." % sum(mean)
preferencesfile.write('%d\t%s\t%g\t%s\n' % (ires, wtaa_d[ires], Entropy(mean), '\t'.join(['%g' % pi for pi in mean])))
preferencescred95file.write('%d\t%s\n' % (ires, '\t'.join(['%g,%g' % (x[0], x[1]) for x in cred95])))
preferencesfile.flush()
preferencescred95file.flush()
for selection in selections:
(mean, cred95, traces, run_diff) = returnvalue[selection]
assert len(aas) == len(mean) == len(cred95), "Not right number of entries"
assert abs(sum(mean)) < 1e-7, "Sum of differential preferences of %g not close to one." % sum(mean)
meanfiles[selection].write('%d\t%s\t%g\t%s\n' % (ires, wtaa_d[ires], RMS(mean), '\t'.join(['%g' % dpi for dpi in mean])))
cred95files[selection].write('%d\t%s\n' % (ires, '\t'.join(['%g,%g' % (x[0], x[1]) for x in cred95])))
meanfiles[selection].flush()
cred95files[selection].flush()
if MCMC_traces:
for selection in ['control_selection'] + selections:
plottraces = []
trace_labels = []
for irun in range(nruns):
plottraces += [returnvalue[selection][2][irun].transpose()[iaa] for iaa in range(len(aas))]
trace_labels += [aas[iaa] for iaa in range(len(aas))]
if selection == 'control_selection':
plotname = '%s/%spreferences_control_selection_%d.pdf' % (MCMC_traces, outfileprefix, ires)
ylabel = 'preference'
title = 'control selection preferences, residue %d' % ires
else:
plotname = '%s/%sdifferentialpreferences_selection_%s_%d.pdf' % (MCMC_traces, outfileprefix, selection, ires)
ylabel = 'differential preference'
title = 'selection %s differential preferences, residue %d' % (selection.replace('_', ' '), ires)
assert os.path.isdir(os.path.dirname(plotname)), "Cannot find directory for %s" % plotname
mapmuts.plot.PlotTraces(plottraces, plotname, xlabel='MCMC step', ylabel=ylabel, title=title, trace_labels=trace_labels)
log.write('Wrote MCMC traces to %s\n' % plotname)
log.flush()
if preference_plots:
for selection in ['control_selection'] + selections:
if selection == 'control_selection':
plotname = '%s/%spreferences_control_selection_%d.pdf' % (preference_plots, outfileprefix, ires)
differentialpreferences = False
else:
plotname = '%s/%sdifferentialpreferences_selection_%s_%d.pdf' % (preference_plots, outfileprefix, selection, ires)
differentialpreferences = True
(mean, cred95) = (dict(zip(aas, returnvalue[selection][0])), dict(zip(aas, returnvalue[selection][1])))
assert os.path.isdir(os.path.dirname(plotname)), "Cannot find directory for %s" % plotname
mapmuts.plot.PlotEquilibriumFreqs(mean, plotname, 'residue %d' % ires, pi_errs=cred95, differentialpreferences=differentialpreferences)
except:
(exc_type, exc_value, exc_traceback) = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback, file=log)
log.write("\n\nPrematurely closing log due to execution error.")
raise
finally:
log.write("\n\nExecution completed at %s." % time.ctime())
log.close()
preferencesfile.close()
preferencescred95file.close()
for selection in selections:
meanfiles[selection].close()
cred95files[selection].close()
try:
for (i, is_running) in processes_running.iteritems():
if is_running and processes[i].is_alive():
processes[i].terminate()
except NameError:
pass
try:
for f in pickleresults.itervalues():
if os.path.isfile(f):
os.remove(f)
except NameError:
pass
if __name__ == '__main__':
main() # run the script
| gpl-3.0 | -2,978,101,380,568,323,600 | 56.257225 | 377 | 0.63177 | false |
lcostantino/healing-os | healing/actionexecutor/rpcapi.py | 1 | 2386 | # Copyright 2013, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Client side of the action manager RPC API.
"""
from oslo.config import cfg
from healing import config
from oslo import messaging
from healing.objects import base as objects_base
from healing.openstack.common import jsonutils
from healing.rpc import rpc
CONF = config.CONF
rpcapi_cap_opt = cfg.StrOpt('actionmanager',
default='1.0',
help='Set a version cap for messages sent to action services')
CONF.register_opt(rpcapi_cap_opt, 'upgrade_levels')
class ActionAPI(object):
'''Client side
API version history:
1.0 - Initial version. '''
VERSION_ALIASES = {}
def __init__(self):
super(ActionAPI, self).__init__()
target = messaging.Target(topic=CONF.action_executor.topic, version='1.0')
version_cap = self.VERSION_ALIASES.get(CONF.upgrade_levels.actionmanager,
CONF.upgrade_levels.actionmanager)
serializer = objects_base.HealingObjectSerializer()
self.client = rpc.get_client(target, version_cap=version_cap,
serializer=serializer)
def run_action(self, ctxt, actions, block=False):
cctxt = self.client.prepare()
cctxt.cast(ctxt, 'run_action', actions=actions, block=block)
def run_action_and_wait(self, ctxt, actions, block=False):
cctxt = self.client.prepare()
cctxt.call(ctxt, 'run_action', actions=actions, block=block)
def alarm(self, ctxt, alarm_id, source=None, contract_id=None,
resource_id=None, project_id=None):
cctxt = self.client.prepare()
cctxt.cast(ctxt, 'alarm', alarm_id=alarm_id, source=source,
contract_id=contract_id, resource_id=resource_id,
project_id=project_id)
| apache-2.0 | 1,756,752,875,997,406,200 | 36.28125 | 82 | 0.661358 | false |
TOSUKUi/readyfor-api | readyforapi/errors.py | 1 | 1532 | __author__ = 'TOSUKUi'
from requests.exceptions import *
class APIException(Exception):
"""
Base class for all API exception
"""
pass
class ProjectPageEndException(APIException):
"""
An error caused by crawl ended and hided project page
"""
pass
class ProjectPageNotPublishedException(APIException):
"""
An error caused by crawl when project is not published
"""
pass
class APIProjectError(APIException):
"""
An API error caused by a project error, like a html format is changed or html
"""
pass
class HtmlParseException(APIException):
"""
An Api error caused by parser error
"""
pass
class ProjectPageTabParseException(HtmlParseException):
"""
An html parse error caused by parser error of project tab.
"""
pass
class ProjectNotFoundError(APIProjectError):
"""
The specified project aws not found on the readyfor. (Bad vanity URL? Non-existent ID?)
"""
pass
class ProjectCommentsPageBackersZeroException(APIProjectError):
"""
An error cause when comments page backers count is 0
"""
class AccessException(RequestException):
"""
An error caused by page access error, like access denied by readyfor.
"""
pass
class UserNoIDException(APIException):
"""
An error occur when user object is constructed which user_id is NoID
"""
pass
class PageAccessException(AccessException):
"""
An error caused by 4** error, for example, page not found.
""" | mit | -3,692,547,134,322,571,000 | 18.909091 | 91 | 0.676893 | false |
tridesclous/tridesclous | tridesclous/jobtools.py | 1 | 5604 | """
Some help function to compute in parallel processing at some stage:
* CatalogueConstructor.run_signalprocessor = preprocessing + peak detection
Used only for offline computation.
This is usefull mainlly when the IO are slow.
"""
import time
import os
import loky
#~ import concurrent.futures.ThreadPoolExecutor
import numpy as np
from .dataio import DataIO
from .signalpreprocessor import signalpreprocessor_engines
from .peakdetector import get_peak_detector_class
from . import labelcodes
# TODO VERY IMPORTANT MOVE THIS
_dtype_peak = [('index', 'int64'), ('cluster_label', 'int64'), ('channel', 'int64'), ('segment', 'int64'), ('extremum_amplitude', 'float64'),]
def signalprocessor_initializer(dirname, chan_grp_, seg_num_,
internal_dtype, chunksize, pad_width_,
signals_medians, signals_mads,
signal_preprocessor_params,
peak_detector_params):
global dataio
global chan_grp
global seg_num
global pad_width
global signalpreprocessor
global peakdetector
dataio = DataIO(dirname)
#~ print('signalprocessor_initializer', id(dataio))
chan_grp = chan_grp_
seg_num = seg_num_
pad_width = pad_width_
p = dict(signal_preprocessor_params)
engine = p.pop('engine')
SignalPreprocessor_class = signalpreprocessor_engines[engine]
signalpreprocessor = SignalPreprocessor_class(dataio.sample_rate, dataio.nb_channel(chan_grp), chunksize, dataio.source_dtype)
p['normalize'] = True
p['signals_medians'] = signals_medians
p['signals_mads'] = signals_mads
signalpreprocessor.change_params(**p)
p = dict(peak_detector_params)
engine = p.pop('engine')
method = p.pop('method')
PeakDetector_class = get_peak_detector_class(method, engine)
geometry = dataio.get_geometry(chan_grp)
peakdetector = PeakDetector_class(dataio.sample_rate, dataio.nb_channel(chan_grp),
chunksize, internal_dtype, geometry)
peakdetector.change_params(**p)
def read_process_write_one_chunk(args):
i_start, i_stop = args
# this read process and write one chunk
global dataio
global chan_grp
global seg_num
global pad_width
global signalpreprocessor
global peakdetector
#~ print(i_start, i_stop, id(dataio))
#~ print(dataio)
# read chunk and pad
sigs_chunk = dataio.get_signals_chunk(seg_num=seg_num, chan_grp=chan_grp,
i_start=i_start, i_stop=i_stop, signal_type='initial', pad_width=pad_width)
#~ print('pad_width', pad_width)
#~ print('read_process_write_one_chunk', i_start, i_stop, i_stop-i_start, sigs_chunk.shape)
# process
preprocessed_chunk = signalpreprocessor.process_buffer(sigs_chunk)
#~ exit()
# peak detection
n_span = peakdetector.n_span
assert n_span < pad_width
chunk_peak = preprocessed_chunk[pad_width-n_span:-pad_width+n_span]
time_ind_peaks, chan_ind_peaks, peak_val_peaks = peakdetector.process_buffer(chunk_peak)
peaks = np.zeros(time_ind_peaks.size, dtype=_dtype_peak)
peaks['index'] = time_ind_peaks - n_span+ i_start
peaks['segment'][:] = seg_num
peaks['cluster_label'][:] = labelcodes.LABEL_NO_WAVEFORM
if chan_ind_peaks is None:
peaks['channel'][:] = -1
else:
peaks['channel'][:] = chan_ind_peaks
if peak_val_peaks is None:
peaks['extremum_amplitude'][:] = 0.
else:
peaks['extremum_amplitude'][:] = peak_val_peaks
# remove the padding and write
preprocessed_chunk = preprocessed_chunk[pad_width:-pad_width]
dataio.set_signals_chunk(preprocessed_chunk, seg_num=seg_num, chan_grp=chan_grp,
i_start=i_start, i_stop=i_stop, signal_type='processed')
return peaks
def run_parallel_read_process_write(cc, seg_num, length, n_jobs):
chunksize = cc.info['chunksize']
pad_width = cc.info['preprocessor']['pad_width']
initargs=(cc.dataio.dirname, cc.chan_grp, seg_num,
cc.internal_dtype, chunksize, pad_width,
cc.signals_medians, cc.signals_mads,
cc.info['preprocessor'],
cc.info['peak_detector'],
)
if length is None:
length = cc.dataio.get_segment_length(seg_num)
num_chunk = length // chunksize
chunk_slice = [(i*chunksize, (i+1)*chunksize) for i in range(num_chunk)]
if length % chunksize > 0:
chunk_slice.append((num_chunk*chunksize, length))
if n_jobs < 0:
n_jobs = os.cpu_count() + 1 - n_jobs
if n_jobs > 1:
n_jobs = min(n_jobs, len(chunk_slice))
executor = loky.get_reusable_executor(
max_workers=n_jobs, initializer=signalprocessor_initializer,
initargs=initargs, context="loky", timeout=20)
#~ concurrent.futures.ThreadPoolExecutor
#~ executor =
all_peaks = executor.map(read_process_write_one_chunk, chunk_slice)
for peaks in all_peaks:
#~ print('peaks', peaks.size)
cc.arrays.append_chunk('all_peaks', peaks)
else:
signalprocessor_initializer(*initargs)
for sl in chunk_slice:
peaks = read_process_write_one_chunk(sl)
#~ print(peaks)
cc.arrays.append_chunk('all_peaks', peaks)
cc.dataio.flush_processed_signals(seg_num=seg_num, chan_grp=cc.chan_grp, processed_length=int(length))
| mit | -7,212,987,108,487,900,000 | 31.022857 | 143 | 0.627944 | false |
klen/peewee_migrate | peewee_migrate/router.py | 1 | 11193 | """Migration router."""
import os
import pkgutil
import re
import sys
from importlib import import_module
from types import ModuleType
from unittest import mock
import peewee as pw
from peewee_migrate import LOGGER, MigrateHistory
from peewee_migrate.auto import diff_many, NEWLINE
from peewee_migrate.migrator import Migrator
try:
from functools import cached_property
except ImportError:
from cached_property import cached_property
CLEAN_RE = re.compile(r'\s+$', re.M)
CURDIR = os.getcwd()
DEFAULT_MIGRATE_DIR = os.path.join(CURDIR, 'migrations')
UNDEFINED = object()
VOID = lambda m, d: None # noqa
with open(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'template.txt')) as t:
MIGRATE_TEMPLATE = t.read()
class BaseRouter(object):
"""Abstract base class for router."""
def __init__(self, database, migrate_table='migratehistory', ignore=None,
schema=None, logger=LOGGER):
"""Initialize the router."""
self.database = database
self.migrate_table = migrate_table
self.schema = schema
self.ignore = ignore
self.logger = logger
if not isinstance(self.database, (pw.Database, pw.Proxy)):
raise RuntimeError('Invalid database: %s' % database)
@cached_property
def model(self):
"""Initialize and cache MigrationHistory model."""
MigrateHistory._meta.database = self.database
MigrateHistory._meta.table_name = self.migrate_table
MigrateHistory._meta.schema = self.schema
MigrateHistory.create_table(True)
return MigrateHistory
@property
def todo(self):
"""Get migrations to run."""
raise NotImplementedError
@property
def done(self):
"""Scan migrations in database."""
return [mm.name for mm in self.model.select().order_by(self.model.id)]
@property
def diff(self):
"""Calculate difference between fs and db."""
done = set(self.done)
return [name for name in self.todo if name not in done]
@cached_property
def migrator(self):
"""Create migrator and setup it with fake migrations."""
migrator = Migrator(self.database)
for name in self.done:
self.run_one(name, migrator)
return migrator
def create(self, name='auto', auto=False):
"""Create a migration.
:param auto: Python module path to scan for models.
"""
migrate = rollback = ''
if auto:
# Need to append the CURDIR to the path for import to work.
sys.path.append(CURDIR)
models = auto if isinstance(auto, list) else [auto]
if not all([_check_model(m) for m in models]):
try:
modules = models
if isinstance(auto, bool):
modules = [m for _, m, ispkg in pkgutil.iter_modules([CURDIR]) if ispkg]
models = [m for module in modules for m in load_models(module)]
except ImportError as exc:
self.logger.exception(exc)
return self.logger.error("Can't import models module: %s", auto)
if self.ignore:
models = [m for m in models if m._meta.name not in self.ignore]
for migration in self.diff:
self.run_one(migration, self.migrator, fake=True)
migrate = compile_migrations(self.migrator, models)
if not migrate:
return self.logger.warning('No changes found.')
rollback = compile_migrations(self.migrator, models, reverse=True)
self.logger.info('Creating migration "%s"', name)
name = self.compile(name, migrate, rollback)
self.logger.info('Migration has been created as "%s"', name)
return name
def merge(self, name='initial'):
"""Merge migrations into one."""
migrator = Migrator(self.database)
migrate = compile_migrations(migrator, self.migrator.orm.values())
if not migrate:
return self.logger.error("Can't merge migrations")
self.clear()
self.logger.info('Merge migrations into "%s"', name)
rollback = compile_migrations(self.migrator, [])
name = self.compile(name, migrate, rollback, 0)
migrator = Migrator(self.database)
self.run_one(name, migrator, fake=True, force=True)
self.logger.info('Migrations has been merged into "%s"', name)
def clear(self):
"""Clear migrations."""
self.model.delete().execute()
def compile(self, name, migrate='', rollback='', num=None):
raise NotImplementedError
def read(self, name):
raise NotImplementedError
def run_one(self, name, migrator, fake=True, downgrade=False, force=False):
"""Run/emulate a migration with given name."""
try:
migrate, rollback = self.read(name)
if fake:
mocked_cursor = mock.Mock()
mocked_cursor.fetch_one.return_value = None
with mock.patch('peewee.Model.select'):
with mock.patch('peewee.Database.execute_sql', return_value=mocked_cursor):
migrate(migrator, self.database, fake=fake)
if force:
self.model.create(name=name)
self.logger.info('Done %s', name)
migrator.clean()
return migrator
with self.database.transaction():
if not downgrade:
self.logger.info('Migrate "%s"', name)
migrate(migrator, self.database, fake=fake)
migrator.run()
self.model.create(name=name)
else:
self.logger.info('Rolling back %s', name)
rollback(migrator, self.database, fake=fake)
migrator.run()
self.model.delete().where(self.model.name == name).execute()
self.logger.info('Done %s', name)
except Exception:
self.database.rollback()
operation = 'Migration' if not downgrade else 'Rollback'
self.logger.exception('%s failed: %s', operation, name)
raise
def run(self, name=None, fake=False):
"""Run migrations."""
self.logger.info('Starting migrations')
done = []
diff = self.diff
if not diff:
self.logger.info('There is nothing to migrate')
return done
migrator = self.migrator
for mname in diff:
self.run_one(mname, migrator, fake=fake, force=fake)
done.append(mname)
if name and name == mname:
break
return done
def rollback(self, name):
name = name.strip()
done = self.done
if not done:
raise RuntimeError('No migrations are found.')
if name != done[-1]:
raise RuntimeError('Only last migration can be canceled.')
migrator = self.migrator
self.run_one(name, migrator, False, True)
self.logger.warning('Downgraded migration: %s', name)
class Router(BaseRouter):
filemask = re.compile(r"[\d]{3}_[^\.]+\.py$")
def __init__(self, database, migrate_dir=DEFAULT_MIGRATE_DIR, **kwargs):
super(Router, self).__init__(database, **kwargs)
self.migrate_dir = migrate_dir
@property
def todo(self):
"""Scan migrations in file system."""
if not os.path.exists(self.migrate_dir):
self.logger.warning('Migration directory: %s does not exist.', self.migrate_dir)
os.makedirs(self.migrate_dir)
return sorted(f[:-3] for f in os.listdir(self.migrate_dir) if self.filemask.match(f))
def compile(self, name, migrate='', rollback='', num=None):
"""Create a migration."""
if num is None:
num = len(self.todo)
name = '{:03}_'.format(num + 1) + name
filename = name + '.py'
path = os.path.join(self.migrate_dir, filename)
with open(path, 'w') as f:
f.write(MIGRATE_TEMPLATE.format(migrate=migrate, rollback=rollback, name=filename))
return name
def read(self, name):
"""Read migration from file."""
call_params = dict()
if os.name == 'nt' and sys.version_info >= (3, 0):
# if system is windows - force utf-8 encoding
call_params['encoding'] = 'utf-8'
with open(os.path.join(self.migrate_dir, name + '.py'), **call_params) as f:
code = f.read()
scope = {}
code = compile(code, '<string>', 'exec', dont_inherit=True)
exec(code, scope, None)
return scope.get('migrate', VOID), scope.get('rollback', VOID)
def clear(self):
"""Remove migrations from fs."""
super(Router, self).clear()
for name in self.todo:
filename = os.path.join(self.migrate_dir, name + '.py')
os.remove(filename)
class ModuleRouter(BaseRouter):
def __init__(self, database, migrate_module='migrations', **kwargs):
"""Initialize the router."""
super(ModuleRouter, self).__init__(database, **kwargs)
if isinstance(migrate_module, str):
migrate_module = import_module(migrate_module)
self.migrate_module = migrate_module
def read(self, name):
"""Read migrations from a module."""
mod = getattr(self.migrate_module, name)
return getattr(mod, 'migrate', VOID), getattr(mod, 'rollback', VOID)
def load_models(module):
"""Load models from given module."""
if isinstance(module, ModuleType):
# if itself is module already
modules = [module]
else:
modules = _import_submodules(module)
return {m for module in modules for m in filter(
_check_model, (getattr(module, name) for name in dir(module))
)}
def _import_submodules(package, passed=UNDEFINED):
if passed is UNDEFINED:
passed = set()
if isinstance(package, str):
package = import_module(package)
modules = []
if set(package.__path__) & passed:
return modules
passed |= set(package.__path__)
for loader, name, is_pkg in pkgutil.walk_packages(package.__path__, package.__name__ + '.'):
module = loader.find_module(name).load_module(name)
modules.append(module)
if is_pkg:
modules += _import_submodules(module)
return modules
def _check_model(obj):
"""Check object if it's a peewee model and unique."""
return isinstance(obj, type) and issubclass(obj, pw.Model) and hasattr(obj, '_meta')
def compile_migrations(migrator, models, reverse=False):
"""Compile migrations for given models."""
source = migrator.orm.values()
if reverse:
source, models = models, source
migrations = diff_many(models, source, migrator, reverse=reverse)
if not migrations:
return False
migrations = NEWLINE + NEWLINE.join('\n\n'.join(migrations).split('\n'))
return CLEAN_RE.sub('\n', migrations)
| bsd-3-clause | -2,340,202,740,054,259 | 32.713855 | 96 | 0.590994 | false |
pombredanne/project_index | project_index/settings.py | 1 | 5089 | # Copyright (C) 2014 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Django settings for project_index project.
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
PROJECT_DIR = os.path.join(BASE_DIR, 'project_index')
PUBLIC_DIR = os.path.join(BASE_DIR, 'public')
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
MEDIA_ROOT = os.path.join(PUBLIC_DIR, 'media')
STATIC_ROOT = os.path.join(PUBLIC_DIR, 'static')
STATICFILES_DIRS = [
os.path.join(PROJECT_DIR, 'static'),
]
CELERYD_TASK_TIME_LIMIT = 600
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.gzip.GZipMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'project_index.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'project_index.wsgi.application'
TEMPLATE_DIRS = [
os.path.join(PROJECT_DIR, 'templates'),
]
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'index',
'notes',
'markdown_deux',
'djcelery',
'kombu.transport.django',
)
MESSAGE_STORAGE = 'django.contrib.messages.storage.cookie.CookieStorage'
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.request',
'django.contrib.messages.context_processors.messages',
'project_index.context_processors.branding',
'project_index.context_processors.wiki',
'project_index.context_processors.menu',
)
AUTH_PROFILE_MODULE = 'accounts.UserProfile'
BROKER_URL = 'django://'
CELERY_RESULT_BACKEND = 'djcelery.backends.database:DatabaseBackend'
# test runner to avoid pre django 1.6 warnings
# TEST_RUNNER = 'django.test.runner.DiscoverRunner'
TEST_RUNNER = 'djcelery.contrib.test_runner.CeleryTestSuiteRunner'
def _dictmerge(a, b):
""" deep merge two dictionaries """
ret = dict(a.items() + b.items())
for key in set(a.keys()) & set(b.keys()):
if isinstance(a[key], dict) and isinstance(b[key], dict):
ret[key] = _dictmerge(a[key], b[key])
return ret
from local_settings import * # noqa
for var, val in [i for i in locals().items() if i[0].startswith('EXTRA_')]:
name = var[len('EXTRA_'):]
try:
locals()[name] += val # append list
except TypeError:
locals()[name] = _dictmerge(locals()[name], val) # merge dict
| gpl-3.0 | 1,524,545,702,789,491,500 | 33.619048 | 75 | 0.727255 | false |
Aloomaio/googleads-python-lib | examples/ad_manager/v201805/order_service/create_orders.py | 1 | 1807 | #!/usr/bin/env python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example creates new orders.
To determine which orders exist, run get_all_orders.py.
"""
import uuid
# Import appropriate modules from the client library.
from googleads import ad_manager
COMPANY_ID = 'INSERT_ADVERTISER_COMPANY_ID_HERE'
SALESPERSON_ID = 'INSERT_SALESPERSON_ID_HERE'
TRAFFICKER_ID = 'INSERT_TRAFFICKER_ID_HERE'
def main(client, company_id, salesperson_id, trafficker_id):
# Initialize appropriate service.
order_service = client.GetService('OrderService', version='v201805')
# Create order objects.
orders = []
for _ in xrange(5):
order = {
'name': 'Order #%s' % uuid.uuid4(),
'advertiserId': company_id,
'salespersonId': salesperson_id,
'traffickerId': trafficker_id
}
orders.append(order)
# Add orders.
orders = order_service.createOrders(orders)
# Display results.
for order in orders:
print ('Order with id "%s" and name "%s" was created.'
% (order['id'], order['name']))
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client, COMPANY_ID, SALESPERSON_ID, TRAFFICKER_ID)
| apache-2.0 | 3,779,387,657,084,411,000 | 29.627119 | 74 | 0.706696 | false |
moneta-project/moneta-2.0.1.0 | contrib/linearize/linearize-data.py | 1 | 8603 | #!/usr/bin/python
#
# linearize-data.py: Construct a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2014 The Moneta developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from __future__ import print_function, division
import json
import struct
import re
import os
import base64
import httplib
import sys
import hashlib
import datetime
import time
from collections import namedtuple
settings = {}
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
def calc_hdr_hash(blk_hdr):
hash1 = hashlib.sha256()
hash1.update(blk_hdr)
hash1_o = hash1.digest()
hash2 = hashlib.sha256()
hash2.update(hash1_o)
hash2_o = hash2.digest()
return hash2_o
def calc_hash_str(blk_hdr):
hash = calc_hdr_hash(blk_hdr)
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
return hash_str
def get_blk_dt(blk_hdr):
members = struct.unpack("<I", blk_hdr[68:68+4])
nTime = members[0]
dt = datetime.datetime.fromtimestamp(nTime)
dt_ym = datetime.datetime(dt.year, dt.month, 1)
return (dt_ym, nTime)
def get_block_hashes(settings):
blkindex = []
f = open(settings['hashlist'], "r")
for line in f:
line = line.rstrip()
blkindex.append(line)
print("Read " + str(len(blkindex)) + " hashes")
return blkindex
def mkblockmap(blkindex):
blkmap = {}
for height,hash in enumerate(blkindex):
blkmap[hash] = height
return blkmap
# Block header and extent on disk
BlockExtent = namedtuple('BlockExtent', ['fn', 'offset', 'inhdr', 'blkhdr', 'size'])
class BlockDataCopier:
def __init__(self, settings, blkindex, blkmap):
self.settings = settings
self.blkindex = blkindex
self.blkmap = blkmap
self.inFn = 0
self.inF = None
self.outFn = 0
self.outsz = 0
self.outF = None
self.outFname = None
self.blkCountIn = 0
self.blkCountOut = 0
self.lastDate = datetime.datetime(2000, 1, 1)
self.highTS = 1408893517 - 315360000
self.timestampSplit = False
self.fileOutput = True
self.setFileTime = False
self.maxOutSz = settings['max_out_sz']
if 'output' in settings:
self.fileOutput = False
if settings['file_timestamp'] != 0:
self.setFileTime = True
if settings['split_timestamp'] != 0:
self.timestampSplit = True
# Extents and cache for out-of-order blocks
self.blockExtents = {}
self.outOfOrderData = {}
self.outOfOrderSize = 0 # running total size for items in outOfOrderData
def writeBlock(self, inhdr, blk_hdr, rawblock):
if not self.fileOutput and ((self.outsz + self.inLen) > self.maxOutSz):
self.outF.close()
if self.setFileTime:
os.utime(outFname, (int(time.time()), highTS))
self.outF = None
self.outFname = None
self.outFn = outFn + 1
self.outsz = 0
(blkDate, blkTS) = get_blk_dt(blk_hdr)
if self.timestampSplit and (blkDate > self.lastDate):
print("New month " + blkDate.strftime("%Y-%m") + " @ " + hash_str)
lastDate = blkDate
if outF:
outF.close()
if setFileTime:
os.utime(outFname, (int(time.time()), highTS))
self.outF = None
self.outFname = None
self.outFn = self.outFn + 1
self.outsz = 0
if not self.outF:
if self.fileOutput:
outFname = self.settings['output_file']
else:
outFname = "%s/blk%05d.dat" % (self.settings['output'], outFn)
print("Output file" + outFname)
self.outF = open(outFname, "wb")
self.outF.write(inhdr)
self.outF.write(blk_hdr)
self.outF.write(rawblock)
self.outsz = self.outsz + len(inhdr) + len(blk_hdr) + len(rawblock)
self.blkCountOut = self.blkCountOut + 1
if blkTS > self.highTS:
self.highTS = blkTS
if (self.blkCountOut % 1000) == 0:
print('%i blocks scanned, %i blocks written (of %i, %.1f%% complete)' %
(self.blkCountIn, self.blkCountOut, len(self.blkindex), 100.0 * self.blkCountOut / len(self.blkindex)))
def inFileName(self, fn):
return "%s/blk%05d.dat" % (self.settings['input'], fn)
def fetchBlock(self, extent):
'''Fetch block contents from disk given extents'''
with open(self.inFileName(extent.fn), "rb") as f:
f.seek(extent.offset)
return f.read(extent.size)
def copyOneBlock(self):
'''Find the next block to be written in the input, and copy it to the output.'''
extent = self.blockExtents.pop(self.blkCountOut)
if self.blkCountOut in self.outOfOrderData:
# If the data is cached, use it from memory and remove from the cache
rawblock = self.outOfOrderData.pop(self.blkCountOut)
self.outOfOrderSize -= len(rawblock)
else: # Otherwise look up data on disk
rawblock = self.fetchBlock(extent)
self.writeBlock(extent.inhdr, extent.blkhdr, rawblock)
def run(self):
while self.blkCountOut < len(self.blkindex):
if not self.inF:
fname = self.inFileName(self.inFn)
print("Input file" + fname)
try:
self.inF = open(fname, "rb")
except IOError:
print("Premature end of block data")
return
inhdr = self.inF.read(8)
if (not inhdr or (inhdr[0] == "\0")):
self.inF.close()
self.inF = None
self.inFn = self.inFn + 1
continue
inMagic = inhdr[:4]
if (inMagic != self.settings['netmagic']):
print("Invalid magic:" + inMagic)
return
inLenLE = inhdr[4:]
su = struct.unpack("<I", inLenLE)
inLen = su[0] - 80 # length without header
blk_hdr = self.inF.read(80)
inExtent = BlockExtent(self.inFn, self.inF.tell(), inhdr, blk_hdr, inLen)
hash_str = calc_hash_str(blk_hdr)
if not hash_str in blkmap:
print("Skipping unknown block " + hash_str)
self.inF.seek(inLen, os.SEEK_CUR)
continue
blkHeight = self.blkmap[hash_str]
self.blkCountIn += 1
if self.blkCountOut == blkHeight:
# If in-order block, just copy
rawblock = self.inF.read(inLen)
self.writeBlock(inhdr, blk_hdr, rawblock)
# See if we can catch up to prior out-of-order blocks
while self.blkCountOut in self.blockExtents:
self.copyOneBlock()
else: # If out-of-order, skip over block data for now
self.blockExtents[blkHeight] = inExtent
if self.outOfOrderSize < self.settings['out_of_order_cache_sz']:
# If there is space in the cache, read the data
# Reading the data in file sequence instead of seeking and fetching it later is preferred,
# but we don't want to fill up memory
self.outOfOrderData[blkHeight] = self.inF.read(inLen)
self.outOfOrderSize += inLen
else: # If no space in cache, seek forward
self.inF.seek(inLen, os.SEEK_CUR)
print("Done (%i blocks written)" % (self.blkCountOut))
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-data.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'netmagic' not in settings:
settings['netmagic'] = 'f9beb4d9'
if 'input' not in settings:
settings['input'] = 'input'
if 'hashlist' not in settings:
settings['hashlist'] = 'hashlist.txt'
if 'file_timestamp' not in settings:
settings['file_timestamp'] = 0
if 'split_timestamp' not in settings:
settings['split_timestamp'] = 0
if 'max_out_sz' not in settings:
settings['max_out_sz'] = 1000L * 1000 * 1000
if 'out_of_order_cache_sz' not in settings:
settings['out_of_order_cache_sz'] = 100 * 1000 * 1000
settings['max_out_sz'] = long(settings['max_out_sz'])
settings['split_timestamp'] = int(settings['split_timestamp'])
settings['file_timestamp'] = int(settings['file_timestamp'])
settings['netmagic'] = settings['netmagic'].decode('hex')
settings['out_of_order_cache_sz'] = int(settings['out_of_order_cache_sz'])
if 'output_file' not in settings and 'output' not in settings:
print("Missing output file / directory")
sys.exit(1)
blkindex = get_block_hashes(settings)
blkmap = mkblockmap(blkindex)
if not "000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f" in blkmap:
print("not found")
else:
BlockDataCopier(settings, blkindex, blkmap).run()
| mit | -2,063,092,180,579,883,300 | 27.772575 | 108 | 0.673835 | false |
gear11/G11AWSTools | g11awstools/ec2.py | 1 | 3772 | __author__ = 'jenkins'
import logging
from subprocess import Popen, PIPE, STDOUT
import json
import itertools
from g11pyutils import IndexedDictList, StopWatch
import sys
import argparse
import time
LOG = logging.getLogger("ec2")
class ec2:
"""A base class for EC2 tools, including utility functions, and also a main dispatcher"""
def parse_args(self, parser):
args = parser.parse_args()
level = logging.DEBUG if args.debug else logging.INFO
logging.basicConfig(format='%(asctime)-15s %(levelname)s:%(name)s:%(message)s', level=level, stream=sys.stderr)
return args
def start_instance(self, instance, block):
"""Starts the given EC2 instance, optionally blocking until it completes."""
LOG.info("Starting EC2 instance %s. Cultivate the virtue of patience." % instance["Name"])
self.popen_aws("aws ec2 start-instances --instance-ids %s" % instance["InstanceId"])
if block:
while instance["State"]["Name"] != "running":
time.sleep(5.0)
instance = self.refresh(instance)
LOG.info("Instance started, allowing 30s for SSH to init")
time.sleep(30)
return instance
def stop_instance(self, instance, block):
"""Starts the given EC2 instance, optionally blocking until it completes."""
LOG.info("Stopping EC2 instance %s. Cultivate the virtue of patience." % instance["Name"])
self.popen_aws("aws ec2 stop-instances --instance-ids %s" % instance["InstanceId"])
if block:
while instance["State"]["Name"] != "stopped":
time.sleep(5.0)
instance = self.refresh(instance)
return instance
def instances(self):
"""Returns a list of dictionaries containing metadata for EC2 instances.
The attributes are derived from the <tt>aws ec2 describe-instances</tt> command."""
rsp, _ = self.popen_aws("aws ec2 describe-instances")
instances = []
for i in itertools.chain([r["Instances"][0] for r in rsp["Reservations"]]):
# Assign name
i["Name"] = i["Tags"][0]["Value"]
instances.append(i)
LOG.debug("Name: %s, ID: %s, DNS: %s" % (i["Name"], i["InstanceId"], i["PublicDnsName"]))
return IndexedDictList(instances)
def refresh(self, instance):
"""Refreshes and returns the AWS data for the given instance"""
rsp, _ = self.popen_aws("aws ec2 describe-instances --instance-ids %s" % instance["InstanceId"])
i = rsp["Reservations"][0]["Instances"][0]
i["Name"] = i["Tags"][0]["Value"]
return i
def popen_aws(self, cmd):
"""Executs the AWS command (via Popen) and returns a tuple of (JSON stdout, str stderr)"""
LOG.debug("Executing AWS cmd \"%s\"", cmd)
p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE)
sout = p.stdout.read()
serr = p.stderr.read()
#LOG.debug(sout)
return (json.loads(sout) if sout else None, serr)
def main(self):
from ec2sh import ec2sh
from ec2stop import ec2stop
from ec2list import ec2list
cmds = {
'sh' : ec2sh,
'stop' : ec2stop,
'ls' : ec2list
}
# Identify and instantiate command
cmd_arg = sys.argv.pop(1)
cmd_instance = cmds[cmd_arg]()
# Invole command with arguments
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--debug", help="Print debug info",action='store_true')
sw = StopWatch().start()
cmd_instance.run(parser)
print "Command completed in %ssec" % sw.readSec()
def main():
ec2().main()
if __name__ == '__main__':
main() | gpl-3.0 | 4,127,796,341,156,811,000 | 38.302083 | 119 | 0.604454 | false |
chriskiehl/python-stix | stix/bindings/extensions/test_mechanism/yara.py | 1 | 7533 | # Copyright (c) 2015, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated Thu Apr 11 15:07:58 2013 by generateDS.py version 2.9a.
#
import sys
from mixbox.binding_utils import *
from stix.bindings import register_extension
import stix.bindings.indicator as indicator_binding
import stix.bindings.stix_common as stix_common_binding
XML_NS = "http://stix.mitre.org/extensions/TestMechanism#YARA-1"
#
# Data representation classes.
#
@register_extension
class YaraTestMechanismType(indicator_binding.TestMechanismType):
"""The YaraTestMechanismType specifies an instantial extension from the
abstract indicator_binding.TestMechanismType intended to support the inclusion of
a YARA rule as a test mechanism content."""
subclass = None
superclass = indicator_binding.TestMechanismType
xmlns = XML_NS
xmlns_prefix = "yaraTM"
xml_type = "YaraTestMechanismType"
def __init__(self, idref=None, id=None, Efficacy=None, Producer=None, Version=None, Rule=None):
super(YaraTestMechanismType, self).__init__(idref=idref, id=id, Efficacy=Efficacy, Producer=Producer)
self.Version = Version
self.Rule = Rule
def factory(*args_, **kwargs_):
if YaraTestMechanismType.subclass:
return YaraTestMechanismType.subclass(*args_, **kwargs_)
else:
return YaraTestMechanismType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Version(self): return self.Version
def set_Version(self, Version): self.Version = Version
def get_Rule(self): return self.Rule
def set_Rule(self, Rule): self.Rule = Rule
def hasContent_(self):
if (
self.Version is not None or
self.Rule is not None or
super(YaraTestMechanismType, self).hasContent_()
):
return True
else:
return False
def export(self, lwrite, level, nsmap, namespace_=XML_NS, name_='YaraTestMechanismType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(lwrite, level, pretty_print)
lwrite('<%s:%s%s' % (nsmap[namespace_], name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(lwrite, level, already_processed, namespace_, name_='YaraTestMechanismType')
if self.hasContent_():
lwrite('>%s' % (eol_, ))
self.exportChildren(lwrite, level + 1, nsmap, XML_NS, name_, pretty_print=pretty_print)
showIndent(lwrite, level, pretty_print)
lwrite('</%s:%s>%s' % (nsmap[namespace_], name_, eol_))
else:
lwrite('/>%s' % (eol_, ))
def exportAttributes(self, lwrite, level, already_processed, namespace_='yaraTM:', name_='YaraTestMechanismType'):
super(YaraTestMechanismType, self).exportAttributes(lwrite, level, already_processed, namespace_, name_='YaraTestMechanismType')
# if 'xmlns' not in already_processed:
# already_processed.add('xmlns')
# xmlns = " xmlns:%s='%s'" % (self.xmlns_prefix, self.xmlns)
# lwrite(xmlns)
if 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
xsi_type = " xsi:type='%s:%s'" % (self.xmlns_prefix, self.xml_type)
lwrite(xsi_type)
def exportChildren(self, lwrite, level, nsmap, namespace_=XML_NS, name_='YaraTestMechanismType', fromsubclass_=False, pretty_print=True):
super(YaraTestMechanismType, self).exportChildren(lwrite, level, nsmap, indicator_binding.XML_NS, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Version is not None:
showIndent(lwrite, level, pretty_print)
lwrite('<%s:Version>%s</%s:Version>%s' % (nsmap[namespace_], quote_xml(self.Version), nsmap[namespace_], eol_))
if self.Rule is not None:
self.Rule.export(lwrite, level, nsmap, namespace_, name_='Rule', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(YaraTestMechanismType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Version':
Version_ = child_.text
Version_ = self.gds_validate_string(Version_, node, 'Version')
self.Version = Version_
elif nodeName_ == 'Rule':
obj_ = stix_common_binding.EncodedCDATAType.factory()
obj_.build(child_)
self.set_Rule(obj_)
super(YaraTestMechanismType, self).buildChildren(child_, node, nodeName_, True)
# end class YaraTestMechanismType
GDSClassesMapping = {}
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
"""
def usage():
print USAGE_TEXT
sys.exit(1)
def get_root_tag(node):
tag = Tag_pattern_.match(node.tag).groups()[-1]
rootClass = GDSClassesMapping.get(tag)
if rootClass is None:
rootClass = globals().get(tag)
return tag, rootClass
def parse(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'YaraTestMechanismType'
rootClass = YaraTestMechanismType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(sys.stdout, 0, name_=rootTag,
namespacedef_='',
pretty_print=True)
return rootObj
def parseEtree(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'YaraTestMechanismType'
rootClass = YaraTestMechanismType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
rootElement = rootObj.to_etree(None, name_=rootTag)
content = etree_.tostring(rootElement, pretty_print=True,
xml_declaration=True, encoding="utf-8")
sys.stdout.write(content)
sys.stdout.write('\n')
return rootObj, rootElement
def parseString(inString):
from StringIO import StringIO
doc = parsexml_(StringIO(inString))
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'YaraTestMechanismType'
rootClass = YaraTestMechanismType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
# doc = None
# sys.stdout.write('<?xml version="1.0" ?>\n')
# rootObj.export(sys.stdout, 0, name_="YaraTestMechanismType",
# namespacedef_='')
return rootObj
def main():
args = sys.argv[1:]
if len(args) == 1:
parse(args[0])
else:
usage()
if __name__ == '__main__':
#import pdb; pdb.set_trace()
main()
__all__ = [
"YaraTestMechanismType"
]
| bsd-3-clause | 3,301,177,143,255,918,000 | 36.854271 | 145 | 0.640515 | false |
ghickman/djangopypi | djangopypi/models.py | 1 | 6237 | import os
from django.conf import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils import simplejson as json
from django.utils.datastructures import MultiValueDict
from django.contrib.auth.models import User
class PackageInfoField(models.Field):
description = u'Python Package Information Field'
__metaclass__ = models.SubfieldBase
def __init__(self, *args, **kwargs):
kwargs['editable'] = False
super(PackageInfoField,self).__init__(*args, **kwargs)
def to_python(self, value):
if isinstance(value, basestring):
if value:
return MultiValueDict(json.loads(value))
else:
return MultiValueDict()
if isinstance(value, dict):
return MultiValueDict(value)
if isinstance(value,MultiValueDict):
return value
raise ValueError('Unexpected value encountered when converting data to python')
def get_prep_value(self, value):
if isinstance(value,MultiValueDict):
return json.dumps(dict(value.iterlists()))
if isinstance(value, dict):
return json.dumps(value)
if isinstance(value, basestring) or value is None:
return value
raise ValueError('Unexpected value encountered when preparing for database')
def get_internal_type(self):
return 'TextField'
class Classifier(models.Model):
name = models.CharField(max_length=255, primary_key=True)
class Meta:
verbose_name = _(u"classifier")
verbose_name_plural = _(u"classifiers")
ordering = ('name',)
def __unicode__(self):
return self.name
class Package(models.Model):
name = models.CharField(max_length=255, unique=True, primary_key=True,
editable=False)
auto_hide = models.BooleanField(default=True, blank=False)
allow_comments = models.BooleanField(default=True, blank=False)
owners = models.ManyToManyField(User, blank=True,
related_name="packages_owned")
maintainers = models.ManyToManyField(User, blank=True,
related_name="packages_maintained")
class Meta:
verbose_name = _(u"package")
verbose_name_plural = _(u"packages")
get_latest_by = "releases__latest"
ordering = ['name',]
def __unicode__(self):
return self.name
@models.permalink
def get_absolute_url(self):
return ('djangopypi-package', (), {'package': self.name})
@property
def latest(self):
try:
return self.releases.latest()
except Release.DoesNotExist:
return None
def get_release(self, version):
"""Return the release object for version, or None"""
try:
return self.releases.get(version=version)
except Release.DoesNotExist:
return None
class Release(models.Model):
package = models.ForeignKey(Package, related_name="releases", editable=False)
version = models.CharField(max_length=128, editable=False)
metadata_version = models.CharField(max_length=64, default='1.0')
package_info = PackageInfoField(blank=False)
hidden = models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True, editable=False)
class Meta:
verbose_name = _(u"release")
verbose_name_plural = _(u"releases")
unique_together = ("package", "version")
get_latest_by = 'created'
ordering = ['-created']
def __unicode__(self):
return self.release_name
@property
def release_name(self):
return u"%s-%s" % (self.package.name, self.version)
@property
def summary(self):
return self.package_info.get('summary', u'')
@property
def description(self):
return self.package_info.get('description', u'')
@property
def classifiers(self):
return self.package_info.getlist('classifier')
@models.permalink
def get_absolute_url(self):
return ('djangopypi-release', (), {'package': self.package.name,
'version': self.version})
class Distribution(models.Model):
release = models.ForeignKey(Release, related_name="distributions",
editable=False)
content = models.FileField(upload_to=settings.DJANGOPYPI_RELEASE_UPLOAD_TO)
md5_digest = models.CharField(max_length=32, blank=True, editable=False)
filetype = models.CharField(max_length=32, blank=False,
choices=settings.DJANGOPYPI_DIST_FILE_TYPES)
pyversion = models.CharField(max_length=16, blank=True,
choices=settings.DJANGOPYPI_PYTHON_VERSIONS)
comment = models.CharField(max_length=255, blank=True)
signature = models.TextField(blank=True)
created = models.DateTimeField(auto_now_add=True, editable=False)
uploader = models.ForeignKey(User, editable=False)
@property
def filename(self):
return os.path.basename(self.content.name)
@property
def display_filetype(self):
for key,value in settings.DJANGOPYPI_DIST_FILE_TYPES:
if key == self.filetype:
return value
return self.filetype
@property
def path(self):
return self.content.name
def get_absolute_url(self):
return "%s#md5=%s" % (self.content.url, self.md5_digest)
class Meta:
verbose_name = _(u"distribution")
verbose_name_plural = _(u"distributions")
unique_together = ("release", "filetype", "pyversion")
def __unicode__(self):
return self.filename
class Review(models.Model):
release = models.ForeignKey(Release, related_name="reviews")
rating = models.PositiveSmallIntegerField(blank=True)
comment = models.TextField(blank=True)
class Meta:
verbose_name = _(u'release review')
verbose_name_plural = _(u'release reviews')
try:
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], ["^djangopypi\.models\.PackageInfoField"])
except ImportError:
pass
| bsd-3-clause | -8,300,209,897,284,802,000 | 32.713514 | 87 | 0.636043 | false |
aqisnotliquid/aqbot | aqbot/plugins/javascript.py | 1 | 1061 | import json
import urllib3
from urllib.parse import quote, urlparse
from aqbot.lib.plugins.plugin import PluginObject
__all__ = ["JavascriptPlugin"]
class JavascriptPlugin(PluginObject):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def setup(self):
self.command_manager.add(self, self.js_cmd, "!js")
def js_cmd(self, listener, sender, target, args):
arg_list = args.split()
if len(arg_list) < 1:
self.messenger.msg(target, "No code to execute!")
http = urllib3.PoolManager()
jsurl = "http://52.3.107.139:3000/%s" % quote(args)
o = urlparse(jsurl)
r = http.request('GET', o.geturl())
if self.is_json(r.data.decode('UTF-8')):
pd = json.loads(r.data.decode('UTF-8'))
else:
pd = r.data.decode('UTF-8')
self.messenger.msg(target, pd)
@staticmethod
def is_json(myjson):
try:
json.loads(myjson)
except ValueError:
return False
return True
| mit | -7,890,583,089,124,650,000 | 24.878049 | 61 | 0.576814 | false |
ppyordanov/HCI_4_Future_Cities | Server/src/virtualenv/Lib/_weakrefset.py | 1 | 5922 | # Access WeakSet through the weakref module.
# This code is separated-out because it is needed
# by abc.py to load everything else at startup.
from _weakref import ref
__all__ = ['WeakSet']
class _IterationGuard(object):
# This context manager registers itself in the current iterators of the
# weak container, such as to delay all removals until the context manager
# exits.
# This technique should be relatively thread-safe (since sets are).
def __init__(self, weakcontainer):
# Don't create cycles
self.weakcontainer = ref(weakcontainer)
def __enter__(self):
w = self.weakcontainer()
if w is not None:
w._iterating.add(self)
return self
def __exit__(self, e, t, b):
w = self.weakcontainer()
if w is not None:
s = w._iterating
s.remove(self)
if not s:
w._commit_removals()
class WeakSet(object):
def __init__(self, data=None):
self.data = set()
def _remove(item, selfref=ref(self)):
self = selfref()
if self is not None:
if self._iterating:
self._pending_removals.append(item)
else:
self.data.discard(item)
self._remove = _remove
# A list of keys to be removed
self._pending_removals = []
self._iterating = set()
if data is not None:
self.update(data)
def _commit_removals(self):
l = self._pending_removals
discard = self.data.discard
while l:
discard(l.pop())
def __iter__(self):
with _IterationGuard(self):
for itemref in self.data:
item = itemref()
if item is not None:
# Caveat: the iterator will keep a strong reference to
# `item` until it is resumed or closed.
yield item
def __len__(self):
return len(self.data) - len(self._pending_removals)
def __contains__(self, item):
try:
wr = ref(item)
except TypeError:
return False
return wr in self.data
def __reduce__(self):
return (self.__class__, (list(self),),
getattr(self, '__dict__', None))
__hash__ = None
def add(self, item):
if self._pending_removals:
self._commit_removals()
self.data.add(ref(item, self._remove))
def clear(self):
if self._pending_removals:
self._commit_removals()
self.data.clear()
def copy(self):
return self.__class__(self)
def pop(self):
if self._pending_removals:
self._commit_removals()
while True:
try:
itemref = self.data.pop()
except KeyError:
raise KeyError('pop from empty WeakSet')
item = itemref()
if item is not None:
return item
def remove(self, item):
if self._pending_removals:
self._commit_removals()
self.data.remove(ref(item))
def discard(self, item):
if self._pending_removals:
self._commit_removals()
self.data.discard(ref(item))
def update(self, other):
if self._pending_removals:
self._commit_removals()
for element in other:
self.add(element)
def __ior__(self, other):
self.update(other)
return self
def difference(self, other):
newset = self.copy()
newset.difference_update(other)
return newset
__sub__ = difference
def difference_update(self, other):
self.__isub__(other)
def __isub__(self, other):
if self._pending_removals:
self._commit_removals()
if self is other:
self.data.clear()
else:
self.data.difference_update(ref(item) for item in other)
return self
def intersection(self, other):
return self.__class__(item for item in other if item in self)
__and__ = intersection
def intersection_update(self, other):
self.__iand__(other)
def __iand__(self, other):
if self._pending_removals:
self._commit_removals()
self.data.intersection_update(ref(item) for item in other)
return self
def issubset(self, other):
return self.data.issubset(ref(item) for item in other)
__le__ = issubset
def __lt__(self, other):
return self.data < set(ref(item) for item in other)
def issuperset(self, other):
return self.data.issuperset(ref(item) for item in other)
__ge__ = issuperset
def __gt__(self, other):
return self.data > set(ref(item) for item in other)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self.data == set(ref(item) for item in other)
def __ne__(self, other):
opposite = self.__eq__(other)
if opposite is NotImplemented:
return NotImplemented
return not opposite
def symmetric_difference(self, other):
newset = self.copy()
newset.symmetric_difference_update(other)
return newset
__xor__ = symmetric_difference
def symmetric_difference_update(self, other):
self.__ixor__(other)
def __ixor__(self, other):
if self._pending_removals:
self._commit_removals()
if self is other:
self.data.clear()
else:
self.data.symmetric_difference_update(ref(item, self._remove) for item in other)
return self
def union(self, other):
return self.__class__(e for s in (self, other) for e in s)
__or__ = union
def isdisjoint(self, other):
return len(self.intersection(other)) == 0
| mit | 4,164,580,266,749,653,500 | 26.544186 | 92 | 0.555049 | false |
cowhi/HFO | experiments/agents/adhoc.py | 1 | 8788 | # -*- coding: utf-8 -*-
"""
Created on Thu Sep 8 09:21:38 2016
@author: Felipe Leno
This file implements our advisor-advisee proposal.
This agent act as SARSA, and the exploration strategy is changed according to our proposal
"""
from sarsatile import SARSATile
from threading import Thread
from advice_util import AdviceUtil
import random
from time import sleep
import math
import agent
import abc
class AdHoc(SARSATile):
budgetAsk = 0
budgetAdvise = 0
spentBudgetAsk = 0
spentBudgetAdvise = 0
scalingVisits = math.exp(10)
lastStatus = agent.IN_GAME
#Enum for importance metrics
VISIT_IMPORTANCE, Q_IMPORTANCE = range(2)
stateImportanceMetric = None
adviceObject = None
ASK,ADVISE = range(2)
visitTable = None
advisedState = None
informAction = None #must be informed in subclass
def __init__(self, budgetAsk, budgetAdvise,stateImportanceMetric,seed=12345, port=12345,epsilon=0.1, alpha=0.1, gamma=0.9, decayRate=0.9, serverPath = "/home/leno/HFO/bin/"):
super(AdHoc, self).__init__(seed=seed,port=port,serverPath = serverPath)
self.name = "AdHoc"
self.visitTable = {}
self.advisedState = {}
self.budgetAsk = budgetAsk
self.budgetAdvise = budgetAdvise
self.stateImportanceMetric = stateImportanceMetric
def select_action(self, stateFeatures, state, noAdvice=False):
"""Changes the exploration strategy"""
if self.exploring and self.spentBudgetAsk < self.budgetAsk and stateFeatures[self.ABLE_KICK] == 1 and not noAdvice:
#Check if it should ask for advice
ask = self.check_ask(state)
if ask:
#----
#Ask for advice
#----
#In case the agent will communicate its intended action
if self.informAction:
normalAction = super(AdHoc, self).select_action(stateFeatures,state)
else:
normalAction = None
advised = self.adviceObject.ask_advice(self.get_Unum(),stateFeatures,normalAction)
if advised:
try:
self.advisedState[self.quantize_features(state)] = True
self.spentBudgetAsk = self.spentBudgetAsk + 1
action = self.combineAdvice(advised)
return action
except:
print "Exception when combining the advice " + str(advised)
#No need to compute two times the intended action
if self.informAction:
return normalAction
return super(AdHoc, self).select_action(stateFeatures,state,noAdvice)
@abc.abstractmethod
def check_advise(self,stateFeatures,state):
"""Returns if the agent should advice in this state.
The advised action is also returned in the positive case"""
#importance = self.state_importance(state,self.stateImportanceMetric)
#midpoint = self.midpoint(self.ADVISE)
#Calculates the probability
#prob = self.calc_prob_adv(importance,midpoint,self.ADVISE)
##
#processedState = self.quantize_features(state)
#numberVisits = self.number_visits(processedState)
#if importance>0:
#print str(importance)+" - "+str(prob)
##
#Check if the agent should advise
#if random.random() < prob and prob > 0.1:
#advisedAction = self.select_action(stateFeatures,state,True)
#return True,advisedAction
#return False,None
def combineAdvice(self,advised):
return int(max(set(advised), key=advised.count))
def state_importance(self,state,typeProb):
"""Calculates the state importance
state - the state
typeProb - is the state importance being calculated in regard to
the number of visits or also by Q-table values?"""
processedState = self.quantize_features(state)
numberVisits = self.number_visits(processedState)
if numberVisits == 0:
return 0.0
visitImportance = numberVisits / (numberVisits + math.log(self.scalingVisits + numberVisits))
if typeProb == self.VISIT_IMPORTANCE:
return visitImportance
elif typeProb==self.Q_IMPORTANCE:
maxQ = -float("inf")
minQ = float("inf")
#Get max and min Q value
actions = [self.DRIBBLE, self.SHOOT, self.PASSfar, self.PASSnear]
for act in actions:
if (processedState,act) in self.qTable:
actQ = self.qTable.get((processedState, act))
if actQ > maxQ:
maxQ = actQ
if actQ < minQ:
minQ = actQ
# print "MaxQ "+str(maxQ)
# print "MinQ "+str(minQ)
# print "len "+str(len(actions))
qImportance = math.fabs(maxQ - minQ) #* len(actions)
if qImportance==float('inf'):
return 0.0
#if qImportance != 0:
#print str(qImportance) + " - "+str(visitImportance)
return qImportance / (1-visitImportance)
#If the agent got here, it is an error
return None
def step(self, state, action):
"""Modifies the default step action just to include a state visit counter"""
if self.exploring:
processedState = self.quantize_features(state)
self.visitTable[processedState] = self.visitTable.get(processedState,0.0) + 1
status, statePrime, actionPrime = super(AdHoc, self).step(state,action)
self.lastStatus = status
if self.lastStatus != self.IN_GAME:
self.advisedState = {}
return status, statePrime, actionPrime
@abc.abstractmethod
def check_ask(self,state):
"""Returns if the agent should ask for advise in this state"""
#if self.exploring and not (self.quantize_features(state) in self.advisedState):
# importance = self.state_importance(state,self.VISIT_IMPORTANCE)
# midpoint = self.midpoint(self.ASK)
#Calculates the probability
# prob = self.calc_prob_adv(importance,midpoint,self.ASK)
##
#processedState = self.quantize_features(state)
#numberVisits = self.number_visits(processedState)
#print str(numberVisits)+" - "+str(prob)
##
# if random.random() < prob and prob > 0.1:
# return True
#return False
#Call default sarsa method if no action was selected
def calc_prob_adv(self,importance,midpoint,typeProb):
"""Calculates the probability of giving/receiving advice
importance - the current state importance
midpoint - the midpoint for the logistic function
typeProb - ASK or ADVISE
"""
signal = 1 if typeProb == self.ASK else -1
k = 10
prob = 1 / (1 + math.exp(signal * k * (importance-midpoint)))
return prob
def advise_action(self,uNum,state,adviseeAction=None):
"""Verifies if the agent can advice a friend, and return the action if possible"""
if self.spentBudgetAdvise < self.budgetAdvise:
#Check if the agent should advise
advise,advisedAction = self.check_advise(state,self.get_transformed_features(state))
if advise:
if adviseeAction is None or advisedAction!=adviseeAction:
self.spentBudgetAdvise = self.spentBudgetAdvise + 1
return advisedAction
return None
def setupAdvising(self,agentIndex,allAgents):
""" This method is called in preparation for advising """
self.adviceObject = AdviceUtil()
advisors = [x for i,x in enumerate(allAgents) if i!=agentIndex]
self.adviceObject.setupAdvisors(advisors)
def get_used_budget(self):
return self.spentBudgetAdvise
@abc.abstractmethod
def midpoint(self,typeMid):
"""Calculates the midpoint"""
pass
def number_visits(self,state):
return self.visitTable.get(state,0.0) | mit | -6,799,699,553,853,046,000 | 36.241525 | 178 | 0.572827 | false |
eReuse/DeviceHub | ereuse_devicehub/documents/documents.py | 1 | 2092 | from eve.auth import requires_auth
from flask import Blueprint, request, render_template, Response, make_response
from flask_weasyprint import HTML, render_pdf
from pydash import identity
from ereuse_devicehub.exceptions import WrongQueryParam
from ereuse_devicehub.header_cache import header_cache
from ereuse_devicehub.rest import execute_get
"""
The documents blueprint offers several documents (in PDF format for example)
related to the resources in DeviceHub.
This module uses Weasyprint to generate PDFs. See static/style.css for more info.
"""
documents = Blueprint('Documents', __name__, template_folder='templates',
static_folder='static', static_url_path='/documents/static')
def generate_document(template: str, filename: str) -> Response:
"""Generates the document in PDF (default) or HTML if argument debug is True."""
if request.args.get('debug', False, bool):
response = make_response(template) # HTML
else:
response = render_pdf(HTML(string=template), download_filename=filename) # PDF
return response
@documents.route('/<db>/documents/delivery-note')
@header_cache(expires=None)
def delivery_note(db: str) -> Response:
"""
Gets a PDF containing a delivery note for the passed-in devices.
:param db:
:arg ids: A list of device ids.
"""
requires_auth('resource')(identity)('devices')
ids = request.args.getlist('ids')
if not ids:
raise WrongQueryParam('ids', 'Send some device ids.')
query_params = {
'where': {'_id': {'$in': ids}},
'embedded': {'tests': 1, 'erasures': 1}
}
template_params = {
'title': 'Delivery note',
'devices': execute_get(db + '/devices', params=query_params)['_items'],
'fields': (
{'path': '_id', 'name': 'System ID'},
{'path': '@type', 'name': 'Type'},
{'path': 'serialNumber', 'name': 'S/N'},
)
}
template = render_template('documents/delivery_note.html', **template_params)
return generate_document(template, filename='delivery note.pdf')
| agpl-3.0 | -6,992,374,593,629,774,000 | 37.036364 | 87 | 0.659656 | false |
IceCTF/ctf-platform | api/achievements/stage2/stage2.py | 1 | 1105 | def process(api, data):
pid = data["pid"]
stage = {
"Open Sesame": "026cd71be5149e4c645a45b4915d9917",
"Overflow 1": "0b944e114502bd1af14469dbb51b9572",
"Overflow 2": "ee876f0736c4fe1b5607b68098f4cc1c",
"Simple": "1fbb1e3943c2c6c560247ac8f9289780",
"Diary": "edcf7eb3a7d5eab2be6688cb3e59fcee",
"Farm Animals": "956441e1e973c3bd0a34d6991b5ac28b",
"Numb3rs": "e30ecef3cbbab794c809f219eddadba8",
"Document Troubles": "dded365363e42ab2aa379d7675dfc849",
"Scan Me": "d36a6d075a8bc5b5d63423d6fd40099e",
"SHARKNADO!": "a656827139fffc0d9c8f26a353d64cbd",
"Statistics": "c33e404a441c6ba9648f88af3c68a1ca",
"Bomb!": "763420a82e74a0e0f7af734c6334ef2c",
"Giffy": "dfc4e89f4369ac8d6a92e8893b54fa51",
"Injection": "548bb431406ebb2d5ba9dd380ede692a",
"SQL Injection 1": "8dfc2dd7c2bdb05ac64e15a23339f113"
}
pids = api.problem.get_solved_pids(tid=data['tid'])
earned = True
for pid in stage.values():
if pid not in pids:
earned = False
return earned, {} | mit | 6,360,206,171,100,581,000 | 43.24 | 64 | 0.673303 | false |
Landver/netmon | apps/customers/scripts.py | 1 | 1047 | import subprocess
import os
import pexpect
def create_sub_folder(sub_folder, ftp_login, ftp_password, name_for_site_folder):
'''create site_folder and subfolders'''
path_to_backup_file = "/home/%s/%s/%s" % (ftp_login, name_for_site_folder, sub_folder)
if not os.path.exists(path_to_backup_file):
# first we need to enter as specific ftp user, because otherwise we can't create/edit user's folders
site_command = pexpect.spawn("su %s -c 'mkdir ~/%s'" % (ftp_login, sub_folder))
site_command.expect("Password: ")
site_command.sendline(ftp_password)
# if os.getlogin() == ftp_login:
# answer = os.getlogin()[:]
# # now we create the subfolders
# os.makedirs(path_to_backup_file)
# # give corp user permitions to own the whole site folder
# subprocess.run(
# ['chown', ftp_login, ':', ftp_login, '-R', "~"]
# )
# subprocess.run(['chmod', '-R', '744', path_to_site])
# return answer | mit | -130,851,163,906,361,420 | 37.814815 | 108 | 0.585482 | false |
sosey/ginga | ginga/util/iohelper.py | 1 | 2078 | #
# iohelper.py -- misc routines used in manipulating files, paths and urls.
#
# Eric Jeschke ([email protected])
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import os
import re
from ginga.misc import Bunch
from ginga.util.six.moves import urllib_parse
def get_fileinfo(filespec, cache_dir='/tmp', download=False):
"""
Parse a file specification and return information about it.
"""
numhdu = None
# User specified an HDU using bracket notation at end of path?
match = re.match(r'^(.+)\[(\d+)\]$', filespec)
if match:
filespec = match.group(1)
numhdu = int(match.group(2))
else:
filespec = filespec
url = filespec
filepath = None
# Does this look like a URL?
match = re.match(r"^(\w+)://(.+)$", filespec)
if match:
urlinfo = urllib_parse.urlparse(filespec)
if urlinfo.scheme == 'file':
# local file
filepath = urlinfo.path
match = re.match(r"^/(\w+\:)", filepath)
if match:
# This is a windows path with a drive letter?
# strip the leading slash
# NOTE: this seems like it should not be necessary and might
# break some cases
filepath = filepath[1:]
else:
path, filename = os.path.split(urlinfo.path)
filepath = os.path.join(cache_dir, filename)
else:
# Not a URL
filepath = filespec
url = "file://" + filepath
ondisk = os.path.exists(filepath)
res = Bunch.Bunch(filepath=filepath, url=url, numhdu=numhdu,
ondisk=ondisk)
return res
def name_image_from_path(path, idx=None):
(path, filename) = os.path.split(path)
# Remove trailing .extension
(name, ext) = os.path.splitext(filename)
#if '.' in name:
# name = name[:name.rindex('.')]
if idx is not None:
name = '%s[%d]' % (name, idx)
return name
| bsd-3-clause | -1,321,144,616,752,803,300 | 27.465753 | 76 | 0.586141 | false |
shippableSamples/sample_python_coveralls | setup.py | 1 | 1683 | import sys
from setuptools.command.test import test as TestCommand
from setuptools import setup
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
setup(
name='coveralls',
version='0.4.2',
packages=['coveralls'],
url='http://github.com/coagulant/coveralls-python',
license='MIT',
author='Ilya Baryshev',
author_email='[email protected]',
description='Show coverage stats online via coveralls.io',
long_description=open('README.rst').read() + '\n\n' + open('CHANGELOG.rst').read(),
entry_points={
'console_scripts': [
'coveralls = coveralls.cli:main',
],
},
install_requires=['PyYAML>=3.10', 'docopt>=0.6.1', 'coverage>=3.6', 'requests>=1.0.0'],
tests_require=['mock', 'pytest', 'sh>=1.08'],
cmdclass={'test': PyTest},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Topic :: Software Development :: Testing',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: Implementation :: PyPy',
'Programming Language :: Python :: Implementation :: CPython',
],
)
| mit | 503,579,127,926,410,430 | 32.66 | 91 | 0.607249 | false |
unreal666/outwiker | src/outwiker/gui/controls/texteditorbase.py | 2 | 24214 | # -*- coding: utf-8 -*-
import codecs
import html
import wx
import wx.lib.newevent
from wx.stc import StyledTextCtrl, STC_CP_UTF8
import outwiker.core.system
from outwiker.core.textprinter import TextPrinter
from outwiker.gui.searchreplacecontroller import SearchReplaceController
from outwiker.gui.searchreplacepanel import SearchReplacePanel
from outwiker.gui.texteditorhelper import TextEditorHelper
from outwiker.core.commands import getClipboardText
class TextEditorBase(wx.Panel):
def __init__(self, parent):
super(TextEditorBase, self).__init__(parent, style=0)
self.textCtrl = StyledTextCtrl(self, -1)
# Used to fix text encoding after clipboard pasting
self._needFixTextEncoding = False
# Создание панели поиска и ее контроллера
self._searchPanel = SearchReplacePanel(self)
self._searchPanelController = SearchReplaceController(
self._searchPanel,
self)
self._searchPanel.setController(self._searchPanelController)
self._do_layout()
self._helper = TextEditorHelper()
self._bind()
self._setDefaultSettings()
def _bind(self):
self.textCtrl.Bind(wx.EVT_KEY_DOWN, self.onKeyDown)
self.textCtrl.Bind(wx.stc.EVT_STC_CHANGE, self.__onChange)
# To check inserted text encoding
self.textCtrl.SetModEventMask(wx.stc.STC_MOD_BEFOREINSERT)
self.textCtrl.Bind(wx.stc.EVT_STC_MODIFIED, self.__onModified)
def _do_layout(self):
mainSizer = wx.FlexGridSizer(rows=2, cols=0, vgap=0, hgap=0)
mainSizer.AddGrowableRow(0)
mainSizer.AddGrowableCol(0)
mainSizer.Add(self.textCtrl, 0, wx.EXPAND, 0)
mainSizer.Add(self._searchPanel, 0, wx.EXPAND, 0)
self.SetSizer(mainSizer)
self._searchPanel.Hide()
self.Layout()
def onKeyDown(self, event):
key = event.GetUnicodeKey()
if key == wx.WXK_ESCAPE:
self._searchPanel.Close()
event.Skip()
def _setDefaultSettings(self):
self.textCtrl.SetEndAtLastLine(False)
self.textCtrl.StyleClearAll()
self.textCtrl.SetWrapMode(wx.stc.STC_WRAP_WORD)
self.textCtrl.SetWrapVisualFlags(wx.stc.STC_WRAPVISUALFLAG_END)
self.textCtrl.SetTabWidth(4)
self._setDefaultHotKeys()
def _setDefaultHotKeys(self):
self.textCtrl.CmdKeyClearAll()
# Clear Cmd keys for Ubuntu
for key in list(range(ord('A'), ord('Z') + 1)) + list(range(ord('0'), ord('9') + 1)):
self.textCtrl.CmdKeyClear(key, wx.stc.STC_SCMOD_ALT | wx.stc.STC_SCMOD_CTRL)
self.textCtrl.CmdKeyClear(key, wx.stc.STC_SCMOD_SHIFT | wx.stc.STC_SCMOD_ALT | wx.stc.STC_SCMOD_CTRL)
self.textCtrl.CmdKeyClear(wx.stc.STC_KEY_UP, wx.stc.STC_SCMOD_CTRL)
self.textCtrl.CmdKeyClear(wx.stc.STC_KEY_DOWN, wx.stc.STC_SCMOD_CTRL)
# Code from Wikidpad sources
# Default mapping based on Scintilla's "KeyMap.cxx" file
defaultHotKeys = (
(wx.stc.STC_KEY_DOWN, wx.stc.STC_SCMOD_NORM, wx.stc.STC_CMD_LINEDOWN),
(wx.stc.STC_KEY_UP, wx.stc.STC_SCMOD_NORM, wx.stc.STC_CMD_LINEUP),
# (wx.stc.STC_KEY_DOWN, wx.stc.STC_SCMOD_CTRL, wx.stc.STC_CMD_LINESCROLLDOWN),
# (wx.stc.STC_KEY_UP, wx.stc.STC_SCMOD_CTRL, wx.stc.STC_CMD_LINESCROLLUP),
(wx.stc.STC_KEY_UP, wx.stc.STC_SCMOD_SHIFT, wx.stc.STC_CMD_LINEUPEXTEND),
(wx.stc.STC_KEY_DOWN, wx.stc.STC_SCMOD_SHIFT, wx.stc.STC_CMD_LINEDOWNEXTEND),
(ord('['), wx.stc.STC_SCMOD_CTRL, wx.stc.STC_CMD_PARAUP),
(ord('['), wx.stc.STC_SCMOD_SHIFT | wx.stc.STC_SCMOD_CTRL, wx.stc.STC_CMD_PARAUPEXTEND),
(ord(']'), wx.stc.STC_SCMOD_CTRL, wx.stc.STC_CMD_PARADOWN),
(ord(']'), wx.stc.STC_SCMOD_SHIFT | wx.stc.STC_SCMOD_CTRL, wx.stc.STC_CMD_PARADOWNEXTEND),
(wx.stc.STC_KEY_LEFT, wx.stc.STC_SCMOD_NORM, wx.stc.STC_CMD_CHARLEFT),
(wx.stc.STC_KEY_LEFT, wx.stc.STC_SCMOD_SHIFT, wx.stc.STC_CMD_CHARLEFTEXTEND),
# (wx.stc.STC_KEY_LEFT, wx.stc.STC_SCMOD_CTRL, wx.stc.STC_CMD_WORDLEFT),
# (wx.stc.STC_KEY_LEFT, wx.stc.STC_SCMOD_SHIFT | wx.stc.STC_SCMOD_CTRL, wx.stc.STC_CMD_WORDLEFTEXTEND),
(wx.stc.STC_KEY_RIGHT, wx.stc.STC_SCMOD_NORM, wx.stc.STC_CMD_CHARRIGHT),
(wx.stc.STC_KEY_RIGHT, wx.stc.STC_SCMOD_SHIFT, wx.stc.STC_CMD_CHARRIGHTEXTEND),
# (wx.stc.STC_KEY_RIGHT, wx.stc.STC_SCMOD_CTRL, wx.stc.STC_CMD_WORDRIGHT),
# (wx.stc.STC_KEY_RIGHT, wx.stc.STC_SCMOD_SHIFT | wx.stc.STC_SCMOD_CTRL, wx.stc.STC_CMD_WORDRIGHTEXTEND),
(ord('/'), wx.stc.STC_SCMOD_CTRL, wx.stc.STC_CMD_WORDPARTLEFT),
(ord('/'), wx.stc.STC_SCMOD_SHIFT | wx.stc.STC_SCMOD_CTRL, wx.stc.STC_CMD_WORDPARTLEFTEXTEND),
(ord('\\'), wx.stc.STC_SCMOD_CTRL, wx.stc.STC_CMD_WORDPARTRIGHT),
(ord('\\'), wx.stc.STC_SCMOD_SHIFT | wx.stc.STC_SCMOD_CTRL, wx.stc.STC_CMD_WORDPARTRIGHTEXTEND),
(wx.stc.STC_KEY_HOME, wx.stc.STC_SCMOD_NORM, wx.stc.STC_CMD_VCHOME),
(wx.stc.STC_KEY_HOME, wx.stc.STC_SCMOD_SHIFT, wx.stc.STC_CMD_VCHOMEEXTEND),
(wx.stc.STC_KEY_HOME, wx.stc.STC_SCMOD_CTRL, wx.stc.STC_CMD_DOCUMENTSTART),
(wx.stc.STC_KEY_HOME, wx.stc.STC_SCMOD_SHIFT | wx.stc.STC_SCMOD_CTRL, wx.stc.STC_CMD_DOCUMENTSTARTEXTEND),
(wx.stc.STC_KEY_HOME, wx.stc.STC_SCMOD_ALT, wx.stc.STC_CMD_HOMEDISPLAY),
(wx.stc.STC_KEY_END, wx.stc.STC_SCMOD_NORM, wx.stc.STC_CMD_LINEEND),
(wx.stc.STC_KEY_END, wx.stc.STC_SCMOD_SHIFT, wx.stc.STC_CMD_LINEENDEXTEND),
(wx.stc.STC_KEY_END, wx.stc.STC_SCMOD_CTRL, wx.stc.STC_CMD_DOCUMENTEND),
(wx.stc.STC_KEY_END, wx.stc.STC_SCMOD_SHIFT | wx.stc.STC_SCMOD_CTRL, wx.stc.STC_CMD_DOCUMENTENDEXTEND),
(wx.stc.STC_KEY_END, wx.stc.STC_SCMOD_ALT, wx.stc.STC_CMD_LINEENDDISPLAY),
(wx.stc.STC_KEY_PRIOR, wx.stc.STC_SCMOD_NORM, wx.stc.STC_CMD_PAGEUP),
(wx.stc.STC_KEY_PRIOR, wx.stc.STC_SCMOD_SHIFT, wx.stc.STC_CMD_PAGEUPEXTEND),
(wx.stc.STC_KEY_NEXT, wx.stc.STC_SCMOD_NORM, wx.stc.STC_CMD_PAGEDOWN),
(wx.stc.STC_KEY_NEXT, wx.stc.STC_SCMOD_SHIFT, wx.stc.STC_CMD_PAGEDOWNEXTEND),
(wx.stc.STC_KEY_DELETE, wx.stc.STC_SCMOD_NORM, wx.stc.STC_CMD_CLEAR),
(wx.stc.STC_KEY_INSERT, wx.stc.STC_SCMOD_NORM, wx.stc.STC_CMD_EDITTOGGLEOVERTYPE),
(wx.stc.STC_KEY_ESCAPE, wx.stc.STC_SCMOD_NORM, wx.stc.STC_CMD_CANCEL),
(wx.stc.STC_KEY_BACK, wx.stc.STC_SCMOD_NORM, wx.stc.STC_CMD_DELETEBACK),
(wx.stc.STC_KEY_BACK, wx.stc.STC_SCMOD_SHIFT, wx.stc.STC_CMD_DELETEBACK),
(wx.stc.STC_KEY_BACK, wx.stc.STC_SCMOD_ALT, wx.stc.STC_CMD_UNDO),
(ord('Z'), wx.stc.STC_SCMOD_CTRL, wx.stc.STC_CMD_UNDO),
(ord('Y'), wx.stc.STC_SCMOD_CTRL, wx.stc.STC_CMD_REDO),
(ord('A'), wx.stc.STC_SCMOD_CTRL, wx.stc.STC_CMD_SELECTALL),
(wx.stc.STC_KEY_INSERT, wx.stc.STC_SCMOD_CTRL | wx.stc.STC_SCMOD_SHIFT, wx.stc.STC_CMD_COPY),
(wx.stc.STC_KEY_INSERT, wx.stc.STC_SCMOD_SHIFT, wx.stc.STC_CMD_PASTE),
(ord('C'), wx.stc.STC_SCMOD_CTRL, wx.stc.STC_CMD_COPY),
(ord('X'), wx.stc.STC_SCMOD_CTRL, wx.stc.STC_CMD_CUT),
(ord('V'), wx.stc.STC_SCMOD_CTRL, wx.stc.STC_CMD_PASTE),
(wx.stc.STC_KEY_TAB, wx.stc.STC_SCMOD_NORM, wx.stc.STC_CMD_TAB),
(wx.stc.STC_KEY_TAB, wx.stc.STC_SCMOD_SHIFT, wx.stc.STC_CMD_BACKTAB),
(wx.stc.STC_KEY_RETURN, wx.stc.STC_SCMOD_NORM, wx.stc.STC_CMD_NEWLINE),
(wx.stc.STC_KEY_RETURN, wx.stc.STC_SCMOD_SHIFT, wx.stc.STC_CMD_NEWLINE),
(wx.stc.STC_KEY_ADD, wx.stc.STC_SCMOD_CTRL, wx.stc.STC_CMD_ZOOMIN),
(wx.stc.STC_KEY_SUBTRACT, wx.stc.STC_SCMOD_CTRL, wx.stc.STC_CMD_ZOOMOUT),
(wx.stc.STC_KEY_DOWN, wx.stc.STC_SCMOD_SHIFT | wx.stc.STC_SCMOD_ALT, wx.stc.STC_CMD_LINEDOWNRECTEXTEND),
(wx.stc.STC_KEY_UP, wx.stc.STC_SCMOD_SHIFT | wx.stc.STC_SCMOD_ALT, wx.stc.STC_CMD_LINEUPRECTEXTEND),
(wx.stc.STC_KEY_LEFT, wx.stc.STC_SCMOD_SHIFT | wx.stc.STC_SCMOD_ALT, wx.stc.STC_CMD_CHARLEFTRECTEXTEND),
(wx.stc.STC_KEY_RIGHT, wx.stc.STC_SCMOD_SHIFT | wx.stc.STC_SCMOD_ALT, wx.stc.STC_CMD_CHARRIGHTRECTEXTEND),
(wx.stc.STC_KEY_HOME, wx.stc.STC_SCMOD_SHIFT | wx.stc.STC_SCMOD_ALT, wx.stc.STC_CMD_VCHOMERECTEXTEND),
(wx.stc.STC_KEY_END, wx.stc.STC_SCMOD_SHIFT | wx.stc.STC_SCMOD_ALT, wx.stc.STC_CMD_LINEENDRECTEXTEND),
(wx.stc.STC_KEY_PRIOR, wx.stc.STC_SCMOD_SHIFT | wx.stc.STC_SCMOD_ALT, wx.stc.STC_CMD_PAGEUPRECTEXTEND),
(wx.stc.STC_KEY_NEXT, wx.stc.STC_SCMOD_SHIFT | wx.stc.STC_SCMOD_ALT, wx.stc.STC_CMD_PAGEDOWNRECTEXTEND),
# (wx.stc.STC_KEY_DELETE, wx.stc.STC_SCMOD_SHIFT | wx.stc.STC_SCMOD_CTRL, wx.stc.STC_CMD_DELLINERIGHT),
# (wx.stc.STC_KEY_BACK, wx.stc.STC_SCMOD_SHIFT | wx.stc.STC_SCMOD_CTRL, wx.stc.STC_CMD_DELLINELEFT),
# (wx.stc.STC_KEY_BACK, wx.stc.STC_SCMOD_CTRL, wx.stc.STC_CMD_DELWORDLEFT),
# (wx.stc.STC_KEY_DELETE, wx.stc.STC_SCMOD_CTRL, wx.stc.STC_CMD_DELWORDRIGHT),
# (wx.stc.STC_KEY_DIVIDE, wx.stc.STC_SCMOD_CTRL, wx.stc.STC_CMD_SETZOOM),
# (ord('L'), wx.stc.STC_SCMOD_CTRL, wx.stc.STC_CMD_LINECUT),
# (ord('L'), wx.stc.STC_SCMOD_SHIFT | wx.stc.STC_SCMOD_CTRL, wx.stc.STC_CMD_LINEDELETE),
# (ord('T'), wx.stc.STC_SCMOD_SHIFT | wx.stc.STC_SCMOD_CTRL, wx.stc.STC_CMD_LINECOPY),
# (ord('T'), wx.stc.STC_SCMOD_CTRL, wx.stc.STC_CMD_LINETRANSPOSE),
# (ord('D'), wx.stc.STC_SCMOD_CTRL, wx.stc.STC_CMD_SELECTIONDUPLICATE),
# (ord('U'), wx.stc.STC_SCMOD_CTRL, wx.stc.STC_CMD_LOWERCASE),
# (ord('U'), wx.stc.STC_SCMOD_SHIFT | wx.stc.STC_SCMOD_CTRL, wx.stc.STC_CMD_UPPERCASE),
)
[self.textCtrl.CmdKeyAssign(*key) for key in defaultHotKeys]
@property
def searchPanel(self):
"""
Возвращает контроллер панели поиска
"""
return self._searchPanelController
def Print(self):
selectedtext = self.textCtrl.GetSelectedText()
text = self.textCtrl.GetText()
printer = TextPrinter(self)
printer.printout(text if len(selectedtext) == 0 else selectedtext)
def getPosChar(self, posBytes):
return len(self.textCtrl.GetTextRange(0, posBytes))
def AddText(self, text):
self.textCtrl.AddText(text)
def replaceText(self, text):
self.textCtrl.ReplaceSelection(text)
def toddleLinePrefix(self, line, prefix):
"""
If line with number "line" starts with prefix, prefix will be removed
else prefix will be added.
"""
assert line < self.GetLineCount()
line_text = self.GetLine(line)
if line_text.startswith(prefix):
line_text = line_text[len(prefix):]
else:
line_text = prefix + line_text
self.SetLine(line, line_text)
def toddleSelectedLinesPrefix(self, prefix):
"""
Apply toddleLinePrefix method to selected lines
"""
self.BeginUndoAction()
old_sel_start = self.GetSelectionStart()
old_sel_end = self.GetSelectionEnd()
first_line, last_line = self.GetSelectionLines()
[self.toddleLinePrefix(n, prefix)
for n
in range(first_line, last_line + 1)]
if old_sel_start != old_sel_end:
new_sel_start = self.GetLineStartPosition(first_line)
new_sel_end = self.GetLineEndPosition(last_line)
else:
new_sel_start = new_sel_end = self.GetLineEndPosition(last_line)
self.SetSelection(new_sel_start, new_sel_end)
self.EndUndoAction()
def turnText(self, lefttext, righttext):
selText = self.textCtrl.GetSelectedText()
newtext = lefttext + selText + righttext
self.textCtrl.ReplaceSelection(newtext)
currPos = self.GetSelectionEnd()
if len(selText) == 0:
# Если не оборачиваем текст, а делаем пустой тег,
# то поместим каретку до закрывающегося тега
newpos = currPos - len(righttext)
self.SetSelection(newpos, newpos)
else:
self.SetSelection(currPos - len(selText) - len(righttext),
currPos - len(righttext))
def escapeHtml(self):
selText = self.textCtrl.GetSelectedText()
text = html.escape(selText, quote=False)
self.textCtrl.ReplaceSelection(text)
def SetReadOnly(self, readonly):
self.textCtrl.SetReadOnly(readonly)
def GetReadOnly(self) -> bool:
return self.textCtrl.GetReadOnly()
def GetText(self) -> str:
return self.textCtrl.GetText()
def SetText(self, text: str) -> None:
text = self._fixTextEncoding(text)
self.textCtrl.SetText(text)
def EmptyUndoBuffer(self):
self.textCtrl.EmptyUndoBuffer()
def GetSelectedText(self) -> str:
return self.textCtrl.GetSelectedText()
def GetCurrentLine(self) -> int:
'''
Returns the line number of the line with the caret.
'''
return self.textCtrl.GetCurrentLine()
def GetCurrentLineText(self) -> str:
'''
Retrieve the text of the line containing the caret.
'''
return self.textCtrl.GetCurLine()[0]
def ScrollToLine(self, line):
self.textCtrl.ScrollToLine(line)
def SetSelection(self, start, end):
"""
start и end в символах, а не в байтах, в отличие от исходного
StyledTextCtrl
"""
startText = self.GetText()[:start]
endText = self.GetText()[:end]
firstByte = self._helper.calcByteLen(startText)
endByte = self._helper.calcByteLen(endText)
self.textCtrl.SetSelection(firstByte, endByte)
def GotoPos(self, pos):
pos_bytes = self._helper.calcBytePos(self.GetText(), pos)
self.textCtrl.GotoPos(pos_bytes)
def GetCurrentPosition(self):
"""
Возвращает номер символа(а не байта), перед которых находится курсор
"""
return self._calcCharPos(self.textCtrl.GetCurrentPos())
def GetSelectionStart(self):
"""
Возвращает позицию начала выбранной области в символах, а не в байтах
"""
return self._calcCharPos(self.textCtrl.GetSelectionStart())
def GetSelectionLines(self):
"""
Return tuple (first selected line, last selected line)
"""
start_bytes = self.textCtrl.GetSelectionStart()
end_bytes = self.textCtrl.GetSelectionEnd()
return (self.textCtrl.LineFromPosition(start_bytes),
self.textCtrl.LineFromPosition(end_bytes))
def GetSelectionEnd(self):
"""
Возвращает позицию конца выбранной области в символах, а не в байтах
"""
return self._calcCharPos(self.textCtrl.GetSelectionEnd())
def SetFocus(self):
self.textCtrl.SetFocus()
self.textCtrl.SetSTCFocus(True)
def GetLine(self, line):
"""
Return line with the "line" number. \n included.
"""
return self.textCtrl.GetLine(line)
def SetLine(self, line, newline):
"""
Replace line with the number "line" newline.
Newline will be ended with "\n" else line will be joined with next line
"""
linecount = self.GetLineCount()
assert line < linecount
line_start_bytes = self.textCtrl.PositionFromLine(line)
line_end_bytes = self.textCtrl.PositionFromLine(line + 1)
self.textCtrl.Replace(line_start_bytes, line_end_bytes, newline)
def GetLineCount(self):
return self.textCtrl.GetLineCount()
def GetLineStartPosition(self, line):
"""
Retrieve the position at the start of a line in symbols (not bytes)
"""
return self._calcCharPos(self.textCtrl.PositionFromLine(line))
def GetLineEndPosition(self, line: int) -> int:
"""
Get the position after the last visible characters on a line
in symbols (not bytes)
"""
return self._calcCharPos(self.textCtrl.GetLineEndPosition(line))
def MoveSelectedLinesUp(self):
"""
Move the selected lines up one line,
shifting the line above after the selection.
"""
self.textCtrl.MoveSelectedLinesUp()
def MoveSelectedLinesDown(self):
"""
Move the selected lines down one line,
shifting the line below before the selection.
"""
self.textCtrl.MoveSelectedLinesDown()
def LineDuplicate(self):
"""
Duplicate the current line.
"""
self.textCtrl.LineDuplicate()
def LineDelete(self):
"""
Delete the current line.
"""
self.textCtrl.LineDelete()
def BeginUndoAction(self):
self.textCtrl.BeginUndoAction()
def EndUndoAction(self):
self.textCtrl.EndUndoAction()
def JoinLines(self):
"""
Join selected lines
"""
first_line, last_line = self.GetSelectionLines()
if first_line != last_line:
last_line -= 1
self.BeginUndoAction()
for _ in range(first_line, last_line + 1):
line = self.GetLine(first_line).replace(u'\r\n', u'\n')
if line.endswith(u'\n'):
newline = line[:-1]
self.SetLine(first_line, newline)
new_sel_pos = self.GetLineEndPosition(first_line)
self.SetSelection(new_sel_pos, new_sel_pos)
self.EndUndoAction()
def DelWordLeft(self):
self.textCtrl.DelWordLeft()
def DelWordRight(self):
self.textCtrl.DelWordRight()
def DelLineLeft(self):
"""
Delete back from the current position to the start of the line
"""
self.textCtrl.DelLineLeft()
def DelLineRight(self):
"""
Delete forwards from the current position to the end of the line
"""
self.textCtrl.DelLineRight()
def WordLeft(self):
self.textCtrl.WordLeft()
def WordRight(self):
self.textCtrl.WordRight()
def WordLeftEnd(self):
self.textCtrl.WordLeftEnd()
def WordRightEnd(self):
self.textCtrl.WordRightEnd()
def WordLeftExtend(self):
self.textCtrl.WordLeftExtend()
def WordRightExtend(self):
self.textCtrl.WordRightExtend()
def GotoWordStart(self):
self.WordRight()
self.WordLeft()
def GotoWordEnd(self):
self.WordLeftEnd()
self.WordRightEnd()
def ScrollLineToCursor(self):
maxlines = self.textCtrl.LinesOnScreen()
line = self.GetCurrentLine()
if line >= maxlines:
delta = min(10, maxlines / 3)
line -= delta
if line < 0:
line = 0
self.ScrollToLine(line)
def WordStartPosition(self, pos):
pos_bytes = self._helper.calcBytePos(self.GetText(), pos)
result_bytes = self.textCtrl.WordStartPosition(pos_bytes, True)
return self.getPosChar(result_bytes)
def WordEndPosition(self, pos):
pos_bytes = self._helper.calcBytePos(self.GetText(), pos)
result_bytes = self.textCtrl.WordEndPosition(pos_bytes, True)
return self.getPosChar(result_bytes)
def GetWord(self, pos):
pos_bytes = self._helper.calcBytePos(self.GetText(), pos)
word_start_bytes = self.textCtrl.WordStartPosition(pos_bytes, True)
word_end_bytes = self.textCtrl.WordEndPosition(pos_bytes, True)
word = self.textCtrl.GetTextRange(word_start_bytes, word_end_bytes)
return word
def GetLineSelStartPosition(self, line: int) -> int:
'''
Retrieve the position of the end of the selection at the given line
(wx.stc.STC_INVALID_POSITION if no selection on this line).
'''
return self.textCtrl.GetLineSelStartPosition(line)
def GetLineSelEndPosition(self, line: int) -> int:
'''
Retrieve the position of the end of the selection at the given line
(wx.stc.STC_INVALID_POSITION if no selection on this line).
'''
return self.textCtrl.GetLineSelEndPosition(line)
def _calcCharPos(self, pos_bytes):
"""
Пересчет позиции в байтах в позицию в символах
"""
text_left = self.textCtrl.GetTextRange(0, pos_bytes)
currpos = len(text_left)
return currpos
def _getTextForParse(self):
# Табуляция в редакторе считается за несколько символов
return self.textCtrl.GetText().replace("\t", " ")
def _bindStandardMenuItems(self):
self.textCtrl.Bind(wx.EVT_MENU,
self.__onCopyFromEditor,
id=wx.ID_COPY)
self.textCtrl.Bind(wx.EVT_MENU,
self.__onCutFromEditor,
id=wx.ID_CUT)
self.textCtrl.Bind(wx.EVT_MENU,
self.__onPasteToEditor,
id=wx.ID_PASTE)
self.textCtrl.Bind(wx.EVT_MENU,
self.__onUndo,
id=wx.ID_UNDO)
self.textCtrl.Bind(wx.EVT_MENU,
self.__onRedo,
id=wx.ID_REDO)
self.textCtrl.Bind(wx.EVT_MENU,
self.__onSelectAll,
id=wx.ID_SELECTALL)
def __onCopyFromEditor(self, event):
self.textCtrl.Copy()
def __onCutFromEditor(self, event):
self.textCtrl.Cut()
def __onPasteToEditor(self, event):
self.textCtrl.Paste()
def __onUndo(self, event):
self.textCtrl.Undo()
def __onRedo(self, event):
self.textCtrl.Redo()
def __onSelectAll(self, event):
self.textCtrl.SelectAll()
def __onModified(self, event):
text = event.GetText()
if text != text.encode('utf-8', errors='replace').decode('utf-8'):
self._needFixTextEncoding = True
def __onChange(self, event):
if self._needFixTextEncoding:
self._needFixTextEncoding = False
self._fixText()
event.Skip()
def _fixTextEncoding(self, text: str) -> str:
result = text.encode('utf-8', errors='replace').decode('utf-8')
return result
def _fixText(self):
old_text = self.GetText()
new_text = self._fixTextEncoding(old_text)
if old_text != new_text:
old_selection_start = self.GetSelectionStart()
old_selection_end = self.GetSelectionEnd()
self.SetText(new_text)
self.SetSelection(old_selection_start, old_selection_end)
| gpl-3.0 | 5,251,865,995,897,898,000 | 40.134948 | 130 | 0.590974 | false |
akvo/akvo-rsr | akvo/rsr/management/commands/fix_project_qualitative_narrative_text_update.py | 1 | 1964 | # -*- coding: utf-8 -*-
# Akvo Reporting is covered by the GNU Affero General Public License.
# See more details in the license.txt file located at the root folder of the Akvo RSR module.
# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
from django.core.management.base import BaseCommand
from django.db.models import Q
from akvo.rsr.models import Project, IndicatorPeriodData
from akvo.rsr.models.result.utils import QUALITATIVE
class Command(BaseCommand):
help = """Move qualitative IndicatorPeriodData.text value to IndicatorPeriodData.narrative.
Fix inconsistency on IndicatorPeriodData narrative which should be stored in narrative
attribute instead of text attribute. This script will looks for qualitative period update
with non empty text attribute value but with empty narrative and move the text value to the
narrative attribute.
"""
def add_arguments(self, parser):
parser.add_argument(
'project_id',
type=int,
help='The root project hierarchy under which all projects are fixed',
)
def handle(self, *args, **options):
root_project = Project.objects.get(id=options['project_id'])
root_hierarchy_ids = root_project.descendants().values_list('id', flat=True)
queryset = IndicatorPeriodData.objects\
.filter(period__indicator__type=QUALITATIVE, period__indicator__result__project__in=root_hierarchy_ids)\
.filter(Q(narrative__isnull=True) | Q(narrative__exact=''))\
.exclude(Q(text__isnull=True) | Q(text__exact=''))
size = queryset.count()
for update in queryset.all():
print("Fixing data {}".format(update.id))
narrative = update.text
update.narrative = narrative
update.text = ''
update.save(update_fields=['narrative', 'text'])
print("Fixes {} data".format(size))
| agpl-3.0 | -6,030,668,272,118,436,000 | 40.787234 | 116 | 0.678208 | false |
MSchnei/py_pRF_motion | pyprf_feature/analysis/prepro/prepro_get_temp_info.py | 1 | 1502 | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 11 18:51:28 2018
@author: marian
"""
import os
import numpy as np
import pickle
# %% set parameters
# set input path
strPthPrnt = "/media/sf_D_DRIVE/MotionLocaliser/UsedPsychoPyScripts/P02/Conditions"
# provide names of condition files in the order that they were shown
lstPickleFiles = [
'Conditions_run01.pickle',
'Conditions_run02.pickle',
'Conditions_run03.pickle',
'Conditions_run04.pickle',
'Conditions_run05.pickle',
'Conditions_run06.pickle',
]
# provide the TR in seconds
varTr = 3.0
# provide the stimulation time
varStmTm = 3.0
# %% load conditions files
# Loop through npz files in target directory:
lstCond = []
for ind, cond in enumerate(lstPickleFiles):
inputFile = os.path.join(strPthPrnt, cond)
with open(inputFile, 'rb') as handle:
array1 = pickle.load(handle)
aryTmp = array1["Conditions"].astype('int32')
# append condition to list
lstCond.append(aryTmp)
# join conditions across runs
aryCond = np.vstack(lstCond)
# create empty array
aryTmpCond = np.empty((len(aryCond), 4), dtype='float16')
# get the condition nr
aryTmpCond[:, 0] = aryCond[:, 0]
# get the onset time
aryTmpCond[:, 1] = np.cumsum(np.ones(len(aryCond))*varTr) - varTr
# get the duration
aryTmpCond[:, 2] = np.ones(len(aryCond))*varStmTm
# add the feature identifier
aryTmpCond[:, 3] = aryCond[:, 1]
# set output name
strPthOut = os.path.join(strPthPrnt,'Conditions')
np.save(strPthOut, aryTmpCond)
| gpl-3.0 | -3,106,941,908,621,377,000 | 23.225806 | 83 | 0.702397 | false |
brendannee/Bikesy-Backend | pygs/graphserver/ext/osm/osmdb.py | 2 | 14297 | import sqlite3
import os
try:
import json
except ImportError:
import simplejson as json
import sys
import xml.sax
import binascii
from vincenty import vincenty
from struct import pack, unpack
from rtree import Rtree
def cons(ary):
for i in range(len(ary)-1):
yield (ary[i], ary[i+1])
def pack_coords(coords):
return binascii.b2a_base64( "".join([pack( "ff", *coord ) for coord in coords]) )
def unpack_coords(str):
bin = binascii.a2b_base64( str )
return [unpack( "ff", bin[i:i+8] ) for i in range(0, len(bin), 8)]
class Node:
def __init__(self, id, lon, lat):
self.id = id
self.lon = lon
self.lat = lat
self.tags = {}
def __repr__(self):
return "<Node id='%s' (%s, %s) n_tags=%d>"%(self.id, self.lon, self.lat, len(self.tags))
class Way:
def __init__(self, id):
self.id = id
self.nd_ids = []
self.tags = {}
def __repr__(self):
return "<Way id='%s' n_nds=%d n_tags=%d>"%(self.id, len(self.nd_ids), len(self.tags))
class WayRecord:
def __init__(self, id, tags, nds):
self.id = id
if type(tags)==unicode:
self.tags_str = tags
self.tags_cache = None
else:
self.tags_cache = tags
self.tags_str = None
if type(nds)==unicode:
self.nds_str = nds
self.nds_cache = None
else:
self.nds_cache = nds
self.nds_str = None
@property
def tags(self):
self.tags_cache = self.tags_cache or json.loads(self.tags_str)
return self.tags_cache
@property
def nds(self):
self.nds_cache = self.nds_cache or json.loads(self.nds_str)
return self.nds_cache
def __repr__(self):
return "<WayRecord id='%s'>"%self.id
class OSMDB:
def __init__(self, dbname,overwrite=False,rtree_index=True):
if overwrite:
try:
os.remove( dbname )
except OSError:
pass
self.conn = sqlite3.connect(dbname)
if rtree_index:
self.index = Rtree( dbname )
else:
self.index = None
if overwrite:
self.setup()
def setup(self):
c = self.conn.cursor()
c.execute( "CREATE TABLE nodes (id TEXT, tags TEXT, lat FLOAT, lon FLOAT, endnode_refs INTEGER DEFAULT 1)" )
c.execute( "CREATE TABLE ways (id TEXT, tags TEXT, nds TEXT)" )
self.conn.commit()
c.close()
def create_indexes(self):
c = self.conn.cursor()
c.execute( "CREATE INDEX nodes_id ON nodes (id)" )
c.execute( "CREATE INDEX nodes_lon ON nodes (lon)" )
c.execute( "CREATE INDEX nodes_lat ON nodes (lat)" )
c.execute( "CREATE INDEX ways_id ON ways (id)" )
self.conn.commit()
c.close()
def populate(self, osm_filename, accept=lambda tags: True, reporter=None):
print "importing osm from XML to sqlite database"
c = self.conn.cursor()
self.n_nodes = 0
self.n_ways = 0
superself = self
class OSMHandler(xml.sax.ContentHandler):
@classmethod
def setDocumentLocator(self,loc):
pass
@classmethod
def startDocument(self):
pass
@classmethod
def endDocument(self):
pass
@classmethod
def startElement(self, name, attrs):
if name=='node':
self.currElem = Node(attrs['id'], float(attrs['lon']), float(attrs['lat']))
elif name=='way':
self.currElem = Way(attrs['id'])
elif name=='tag':
self.currElem.tags[attrs['k']] = attrs['v']
elif name=='nd':
self.currElem.nd_ids.append( attrs['ref'] )
@classmethod
def endElement(self,name):
if name=='node':
if superself.n_nodes%5000==0:
print "node %d"%superself.n_nodes
superself.n_nodes += 1
superself.add_node( self.currElem, c )
elif name=='way':
if superself.n_ways%5000==0:
print "way %d"%superself.n_ways
superself.n_ways += 1
superself.add_way( self.currElem, c )
@classmethod
def characters(self, chars):
pass
xml.sax.parse(osm_filename, OSMHandler)
self.conn.commit()
c.close()
print "indexing primary tables...",
self.create_indexes()
print "done"
def set_endnode_ref_counts( self ):
"""Populate ways.endnode_refs. Necessary for splitting ways into single-edge sub-ways"""
print "counting end-node references to find way split-points"
c = self.conn.cursor()
endnode_ref_counts = {}
c.execute( "SELECT nds from ways" )
print "...counting"
for i, (nds_str,) in enumerate(c):
if i%5000==0:
print i
nds = json.loads( nds_str )
for nd in nds:
endnode_ref_counts[ nd ] = endnode_ref_counts.get( nd, 0 )+1
print "...updating nodes table"
for i, (node_id, ref_count) in enumerate(endnode_ref_counts.items()):
if i%5000==0:
print i
if ref_count > 1:
c.execute( "UPDATE nodes SET endnode_refs = ? WHERE id=?", (ref_count, node_id) )
self.conn.commit()
c.close()
def index_endnodes( self ):
print "indexing endpoint nodes into rtree"
c = self.conn.cursor()
c.execute( "SELECT id, lat, lon FROM nodes WHERE endnode_refs > 1" )
for id, lat, lon in c:
self.index.add( int(id), (lon, lat, lon, lat) )
c.close()
def create_and_populate_edges_table( self, tolerant=False ):
self.set_endnode_ref_counts()
self.index_endnodes()
print "splitting ways and inserting into edge table"
c = self.conn.cursor()
c.execute( "CREATE TABLE edges (id TEXT, parent_id TEXT, start_nd TEXT, end_nd TEXT, dist FLOAT, geom TEXT)" )
for i, way in enumerate(self.ways()):
try:
if i%5000==0:
print i
#split way into several sub-ways
subways = []
curr_subway = [ way.nds[0] ]
for nd in way.nds[1:]:
curr_subway.append( nd )
if self.node(nd)[4] > 1: # node reference count is greater than zero
subways.append( curr_subway )
curr_subway = [ nd ]
#insert into edge table
for i, subway in enumerate(subways):
coords = [(lambda x:(x[3],x[2]))(self.node(nd)) for nd in subway]
packt = pack_coords( coords )
dist = sum([vincenty(lat1, lng1, lat2, lng2) for (lng1, lat1), (lng2, lat2) in cons(coords)])
c.execute( "INSERT INTO edges VALUES (?, ?, ?, ?, ?, ?)", ("%s-%s"%(way.id, i),
way.id,
subway[0],
subway[-1],
dist,
packt) )
except IndexError:
if tolerant:
continue
else:
raise
print "indexing edges...",
c.execute( "CREATE INDEX edges_id ON edges (id)" )
c.execute( "CREATE INDEX edges_parent_id ON edges (parent_id)" )
print "done"
self.conn.commit()
c.close()
def edge(self, id):
c = self.conn.cursor()
c.execute( "SELECT edges.*, ways.tags FROM edges, ways WHERE ways.id = edges.parent_id AND edges.id = ?", (id,) )
try:
ret = c.next()
way_id, parent_id, from_nd, to_nd, dist, geom, tags = ret
return (way_id, parent_id, from_nd, to_nd, dist, unpack_coords( geom ), json.loads(tags))
except StopIteration:
c.close()
raise IndexError( "Database does not have an edge with id '%s'"%id )
c.close()
return ret
def edges(self):
c = self.conn.cursor()
c.execute( "SELECT edges.*, ways.tags FROM edges, ways WHERE ways.id = edges.parent_id" )
for way_id, parent_id, from_nd, to_nd, dist, geom, tags in c:
yield (way_id, parent_id, from_nd, to_nd, dist, unpack_coords(geom), json.loads(tags))
c.close()
def add_way( self, way, curs=None ):
if curs is None:
curs = self.conn.cursor()
close_cursor = True
else:
close_cursor = False
curs.execute("INSERT INTO ways (id, tags, nds) VALUES (?, ?, ?)", (way.id, json.dumps(way.tags), json.dumps(way.nd_ids) ))
if close_cursor:
self.conn.commit()
curs.close()
def add_node( self, node, curs=None ):
if curs is None:
curs = self.conn.cursor()
close_cursor = True
else:
close_cursor = False
curs.execute("INSERT INTO nodes (id, tags, lat, lon) VALUES (?, ?, ?, ?)", ( node.id, json.dumps(node.tags), node.lat, node.lon ) )
if close_cursor:
self.conn.commit()
curs.close()
def nodes(self):
c = self.conn.cursor()
c.execute( "SELECT * FROM nodes" )
for node_row in c:
yield node_row
c.close()
def node(self, id):
c = self.conn.cursor()
c.execute( "SELECT * FROM nodes WHERE id = ?", (id,) )
try:
ret = c.next()
except StopIteration:
c.close()
raise IndexError( "Database does not have node with id '%s'"%id )
c.close()
return ret
def nearest_node(self, lat, lon, range=0.005):
c = self.conn.cursor()
if self.index:
print "YOU'RE USING THE INDEX"
id = next(self.index.nearest( (lon, lat), 1 ))
print "THE ID IS %d"%id
c.execute( "SELECT id, lat, lon FROM nodes WHERE id = ?", (id,) )
else:
c.execute( "SELECT id, lat, lon FROM nodes WHERE endnode_refs > 1 AND lat > ? AND lat < ? AND lon > ? AND lon < ?", (lat-range, lat+range, lon-range, lon+range) )
dists = [(nid, nlat, nlon, ((nlat-lat)**2+(nlon-lon)**2)**0.5) for nid, nlat, nlon in c]
if len(dists)==0:
return (None, None, None, None)
return min( dists, key = lambda x:x[3] )
def nearest_of( self, lat, lon, nodes ):
c = self.conn.cursor()
c.execute( "SELECT id, lat, lon FROM nodes WHERE id IN (%s)"%",".join([str(x) for x in nodes]) )
dists = [(nid, nlat, nlon, ((nlat-lat)**2+(nlon-lon)**2)**0.5) for nid, nlat, nlon in c]
if len(dists)==0:
return (None, None, None, None)
return min( dists, key = lambda x:x[3] )
def way(self, id):
c = self.conn.cursor()
c.execute( "SELECT id, tags, nds FROM ways WHERE id = ?", (id,) )
try:
id, tags_str, nds_str = c.next()
ret = WayRecord(id, tags_str, nds_str)
except StopIteration:
raise Exception( "OSMDB has no way with id '%s'"%id )
finally:
c.close()
return ret
def way_nds(self, id):
c = self.conn.cursor()
c.execute( "SELECT nds FROM ways WHERE id = ?", (id,) )
(nds_str,) = c.next()
c.close()
return json.loads( nds_str )
def ways(self):
c = self.conn.cursor()
c.execute( "SELECT id, tags, nds FROM ways" )
for id, tags_str, nds_str in c:
yield WayRecord( id, tags_str, nds_str )
c.close()
def count_ways(self):
c = self.conn.cursor()
c.execute( "SELECT count(*) FROM ways" )
ret = c.next()[0]
c.close()
return ret
def count_edges(self):
c = self.conn.cursor()
c.execute( "SELECT count(*) FROM edges" )
ret = c.next()[0]
c.close()
return ret
def delete_way(self, id):
c = self.conn.cursor()
c.execute("DELETE FROM ways WHERE id = ?", (id,))
c.close()
def bounds(self):
c = self.conn.cursor()
c.execute( "SELECT min(lon), min(lat), max(lon), max(lat) FROM nodes" )
ret = c.next()
c.close()
return ret
def execute(self,sql,args=None):
c = self.conn.cursor()
if args:
for row in c.execute(sql,args):
yield row
else:
for row in c.execute(sql):
yield row
c.close()
def cursor(self):
return self.conn.cursor()
def test_wayrecord():
wr = WayRecord( "1", {'highway':'bumpkis'}, ['1','2','3'] )
assert wr.id == "1"
assert wr.tags == {'highway':'bumpkis'}
assert wr.nds == ['1','2','3']
wr = WayRecord( "1", "{\"highway\":\"bumpkis\"}", "[\"1\",\"2\",\"3\"]" )
assert wr.id == "1"
assert wr.tags == {'highway':'bumpkis'}
assert wr.nds == ['1','2','3']
def osm_to_osmdb(osm_filename, osmdb_filename, tolerant=False, skipload=False):
osmdb = OSMDB( osmdb_filename, overwrite=True )
if not skipload:
osmdb.populate( osm_filename, accept=lambda tags: 'highway' in tags, reporter=sys.stdout )
osmdb.create_and_populate_edges_table(tolerant)
def main():
from sys import argv
usage = "python osmdb.py osm_filename osmdb_filename"
if len(argv) < 3:
print usage
exit()
osm_filename = argv[1]
osmdb_filename = argv[2]
tolerant = 'tolerant' in argv
skipload = 'skipload' in argv
osm_to_osmdb(osm_filename, osmdb_filename, tolerant, skipload)
if __name__=='__main__':
main()
| bsd-3-clause | 7,347,071,993,847,417,000 | 28.417695 | 174 | 0.505491 | false |
amlight/ofp_sniffer | libs/core/filters.py | 1 | 6233 | """
Filters to be used
Any customized print filters should be inserted in this file
Filters are provided via CLI option -F json-file
"""
from libs.core.printing import PrintingOptions
from libs.core.sanitizer import Sanitizer
from libs.tcpiplib.tcpip import get_ofp_version
from libs.tcpiplib.process_data import is_protocol
from libs.tcpiplib.process_data import get_protocol
from libs.gen.dpid_handling import clear_dpid
def filter_msg(msg):
"""
This method will be the core of all filters. Any new filter comes here
Args:
msg: OFMessage class
Returns:
False: Don't filter packet
True: Filter it (don't print)
"""
if PrintingOptions().is_quiet():
# Don't print anything. Used in conjunction with some apps.
return True
if not PrintingOptions().has_filters():
# User hasn't selected CLI option -F
return False
# Filter per OF Version
if filter_of_version(msg):
return True
# Filter per OF Message Type
if filter_of_type(msg):
return True
# Filter Ethertypes from PacketIn/Out messages
if ethertype_filters(msg):
return True
# Filter PacketIn/Out based on DPID and Port
if dpid_filters(msg):
return True
# Don't filter
return False
def filter_of_version(msg):
"""
Check if the OpenFlow version is allowed
Args:
msg: OFMessage class
Returns:
False: Don't filter packet
True: Filter it (don't print)
"""
name_version = get_ofp_version(msg.ofp.header.version.value)
supported_versions = []
try:
for version in Sanitizer().allowed_of_versions:
supported_versions.append(version)
if name_version not in supported_versions:
return True
except KeyError:
pass
return False
def filter_of_type(msg):
"""
Filter per OF Message Type
Args:
msg: OFMessage class
Returns:
False: Don't filter packet
True: Filter it (don't print)
"""
name_version = get_ofp_version(msg.ofp.header.version.value)
# OF Types to be ignored through json file (-F)
try:
rejected_types = Sanitizer().allowed_of_versions[name_version]
if msg.ofp.header.message_type in rejected_types['rejected_of_types']:
return True
except KeyError:
pass
return False
def ethertype_filters(msg):
"""
Filter PacketIn and PacketOut messages based on Ethertype
Sanitizer filter (-F), entry "filters", "ethertype"
Args:
msg: class OFMessage
Returns:
False: Don't filter packet
True: Filter it (don't print)
"""
if msg.ofp.header.message_type in [10, 13]:
try:
filters = Sanitizer().filters['ethertypes']
except KeyError:
return False
if not len(filters):
# No filters
return False
# Go to payload
try:
if is_protocol(msg.ofp.data, lldp=True) and filters['lldp']:
return True
if is_protocol(msg.ofp.data, oess=True) and filters['fvd']:
return True
if is_protocol(msg.ofp.data, arp=True) and filters['arp']:
return True
except KeyError:
pass
# Other Ethertypes listed as hex
for protocol in filters['others']:
try:
if is_protocol(msg.ofp.data) == int(protocol, 16):
return True
except ValueError:
pass
return False
def dpid_filters(msg):
"""
Filter PacketIn and PacketOut messages based on DPID and ports
Sanitizer filter (-F), entry "filters", "packetIn_filter" or
"packetOut_filter"
If switch_dpid AND in_port are Any, don't filter (print it)
If switch_dpid OR in_port are NOT Any, print only what matches the
most specific (filter everything else)
Args:
msg: class OFMessage
Returns:
False: Don' filter packet (print it)
True: Filter it (don't print)
"""
# It has to be a PacketOut or PacketIn
if msg.ofp.header.message_type not in [10, 13]:
return False
# It has to be a LLDP packet
if not is_protocol(msg.ofp.data, lldp=True):
return False
try:
# If it is a PacketIn ...
if msg.ofp.header.message_type in [10]:
# It has to have a packetIn_filter filter
filters = Sanitizer().filters['packetIn_filter']
filter_port = filters['in_port']
# If it a PacketOut...
else:
# It has to have a packetOut_filter filter
filters = Sanitizer().filters['packetOut_filter']
filter_port = filters['out_port']
filter_dpid = filters['switch_dpid']
except KeyError:
return False
if not len(filters):
return False
# Was switch_dpid or in_port specified by user?
if filter_dpid in ['any', 'Any', 'ANY']:
if filter_port in ['any', 'Any', 'ANY']:
return False
# If we got here, it means we have content to avoid printing
print_it = False
lldp_msg = get_protocol(msg.ofp.data, lldp=True)
switch_dpid = clear_dpid(filter_dpid)
if print_switch_dpid(switch_dpid, lldp_msg.c_id):
if msg.ofp.header.message_type in [10]:
if print_port(filter_port, str(msg.ofp.in_port)):
print_it = True
else:
if print_port(filter_port, str(lldp_msg.p_id)):
print_it = True
if print_it:
return False
return True
def print_switch_dpid(filter_dpid, packet_dpid):
"""
Confirm if filter_dpid is packet_dpid or any
"""
packet_dpid = clear_dpid(packet_dpid)
if filter_dpid in [packet_dpid, 'Any', 'any', 'ANY']:
return True
return False
def print_port(filter_port, packet_port):
"""
Confirm if filter_port is packet_port or any
"""
if filter_port in [packet_port, 'Any', 'any', 'ANY']:
return True
return False
| apache-2.0 | 6,529,931,816,268,576,000 | 27.331818 | 78 | 0.589604 | false |
dhahaj/py | sell.py | 1 | 3010 | import sys, subprocess
from time import *
from common import *
from pprint import *
def error(error=''):
print 'Error:',error
sys.exit(1)
def parse_csv(what):
if what.count(',') == 0:
return list([what])
return val.split(',')
a = sys.argv
var = {'-s' : 'btc'}
d = {}
for i in range(a.__len__()):
try:
if a[i].startswith('-'):
try:
d.update({a[i]:a[i+1]})
except:
d.update({a[i]:''})
except: None
for item in d.iteritems():
key = item.__getitem__(0)
val = item.__getitem__(1)
var[key] = parse_csv(val)
# sec = var.get('-s')
pprint(var)
# sys.exit(0)
from pycoinex import *
def show_profit():
orig_amt = None
for i in c.bals.__iter__():
if i['currency_name'].upper()=='BTC':
orig_amt = i['amount']/1e8
break
time.sleep(1.5)
print '\nAvailable Balances:\n'
new_bals = c.available()
print_json(new_bals)
new_amt = new_bals['BTC']
print '\nEarnings This Session: %.8f BTC\n' % (new_amt-orig_amt)
def sell(pri, sec, amount=0.0):
mkt_name = '_'.join([pri,sec])
print '\nMarket Name:', mkt_name
# Get current account balance
cur_name = pri.upper()
bals = c.bals
bal = None
for b in bals:
if b['currency_name'] == cur_name:
bal = b['amount']/1e8
break
if bal == 0: error('Not enough funds in balance!')
elif bal is None: error('Currency Not Found!')
print 'Balance is: %.8f %s' % (bal, cur_name)
# Find the market id number
id = c.findid(mkt_name)
if id == -1: error('Market Not Found!')
print 'Market ID:', id
# Get the highest buying price
rate = c.highestBidOrder(id)
print 'Highest Buy Price: %.8f %s' % (rate,sec.upper())
# Display the potential sale
amount = float(amount)
if amount > 0: amt = amount
else: amt = bal
sale = amt*rate
print 'Selling %.8f %s' % (amt,pri.upper())
print 'Sale Amount: %.8f %s\n' % (sale,sec.upper())
# Make the sale
if var.has_key('-i'): cfrm='Y'
else: cfrm = raw_input('Continue? ')
if cfrm.upper() == 'Y':
order = c.order(id, amt, rate=rate)
order_data = order.json()['order']
order_data['amount'] = order_data['amount']/1e8
order_data['rate'] = order_data['rate']/1e8
print '\nOrder Details:\n'
print_json(order_data)
print
else: print '\nOrder cancelled.'
mkts = var.get('-p')
amts = var.get('-a')
smkt = var.get('-s')
if amts is None:
amts = []
for j in range(mkts.__len__()): amts.extend('0')
elif amts.__len__() > 1:
if mkts.__len__() is not amts.__len__(): error('Argument lengths do not match.')
for i in range(mkts.__len__()):
try:
time.sleep(4)
if type(smkt) is list:
sell(mkts[i], smkt[i], amts[i])
else: sell(mkts[i], smkt, amts[i])
except Exception as e:
print 'Error encountered:',e
show_profit()
| mit | 5,539,414,901,882,223,000 | 24.294118 | 84 | 0.544518 | false |
Tesora-Release/tesora-trove | trove/common/single_tenant_remote.py | 1 | 4173 | # Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from trove.common import cfg
from trove.common.remote import normalize_url
import trove.openstack.common.log as logging
from cinderclient.v2 import client as CinderClient
from neutronclient.v2_0 import client as NeutronClient
from novaclient.v1_1.client import Client as NovaClient
CONF = cfg.CONF
"""
trove.conf
...
The following should be set in the trove CONF file for this
single_tenant_remote config to work correctly.
nova_proxy_admin_user =
nova_proxy_admin_pass =
nova_proxy_admin_tenant_name =
trove_auth_url =
nova_compute_service_type =
nova_compute_url =
cinder_service_type =
os_region_name =
remote_nova_client = \
trove.common.single_tenant_remote.nova_client_trove_admin
remote_cinder_client = \
trove.common.single_tenant_remote.cinder_client_trove_admin
remote_neutron_client = \
trove.common.single_tenant_remote.neutron_client_trove_admin
...
"""
PROXY_AUTH_URL = CONF.trove_auth_url
LOG = logging.getLogger(__name__)
def nova_client_trove_admin(context, region_name=None, compute_url=None):
"""
Returns a nova client object with the trove admin credentials
:param context: original context from user request
:type context: trove.common.context.TroveContext
:return novaclient: novaclient with trove admin credentials
:rtype: novaclient.v1_1.client.Client
"""
compute_url = compute_url or CONF.nova_compute_url
client = NovaClient(CONF.nova_proxy_admin_user,
CONF.nova_proxy_admin_pass,
CONF.nova_proxy_admin_tenant_name,
auth_url=PROXY_AUTH_URL,
service_type=CONF.nova_compute_service_type,
region_name=region_name or CONF.os_region_name)
if compute_url and CONF.nova_proxy_admin_tenant_id:
client.client.management_url = "%s/%s/" % (
normalize_url(compute_url),
CONF.nova_proxy_admin_tenant_id)
return client
def cinder_client_trove_admin(context=None):
"""
Returns a cinder client object with the trove admin credentials
:param context: original context from user request
:type context: trove.common.context.TroveContext
:return cinderclient: cinderclient with trove admin credentials
"""
client = CinderClient.Client(CONF.nova_proxy_admin_user,
CONF.nova_proxy_admin_pass,
project_id=CONF.nova_proxy_admin_tenant_name,
auth_url=PROXY_AUTH_URL,
service_type=CONF.cinder_service_type,
region_name=CONF.os_region_name)
if CONF.cinder_url and CONF.nova_proxy_admin_tenant_id:
client.client.management_url = "%s/%s/" % (
normalize_url(CONF.cinder_url), CONF.nova_proxy_admin_tenant_id)
return client
def neutron_client_trove_admin(context=None):
"""
Returns a neutron client object with the trove admin credentials
:param context: original context from user request
:type context: trove.common.context.TroveContext
:return neutronclient: neutronclient with trove admin credentials
"""
client = NeutronClient.Client(
username=CONF.nova_proxy_admin_user,
password=CONF.nova_proxy_admin_pass,
tenant_name=CONF.nova_proxy_admin_tenant_name,
auth_url=PROXY_AUTH_URL,
service_type=CONF.neutron_service_type,
region_name=CONF.os_region_name)
if CONF.neutron_url:
client.management_url = CONF.neutron_url
return client
| apache-2.0 | -379,479,486,372,407,360 | 33.204918 | 78 | 0.686556 | false |
hpcloud-mon/monasca-perf | influx_test/test_305.py | 1 | 1366 | from testbase import TestBase
from influxparawrite import InfluxParaWrite
class test_305(TestBase):
def run(self):
self.env.sendSingleMetric(1,self.name,1)
self.env.sendSingleMetric(2,self.name,2)
self.env.sendSingleMetric(3,self.name,3)
if self.env.countMetrics(1,self.name) != 3:
return ["FAIL","node 1 wrong count"]
if self.env.countMetrics(2,self.name) != 3:
return ["FAIL","node 2 wrong count"]
if self.env.countMetrics(3,self.name) != 3:
return ["FAIL","node 3 wrong count"]
ipw = InfluxParaWrite(self.env)
ipw.start(3,1,'killInflux',self.name)
self.env.startInflux(1)
val = self.env.countMetrics(1,self.name)
if val != ipw.count+3:
return ["FAIL","node 1 wrong count 2: "+ str(val) + ' != '+str(ipw.count+3)]
val = self.env.countMetrics(2,self.name)
if val != ipw.count+3:
return ["FAIL","node 2 wrong count 2: "+ str(val) + ' != '+str(ipw.count+3)]
val = self.env.countMetrics(3,self.name)
if val != ipw.count+3:
return ["FAIL","node 3 wrong count 2: "+ str(val) + ' != '+str(ipw.count+3)]
return ["PASS",""]
def desc(self):
return 'Kill node 1. Fire off multiple writes while shutting down the node. Bring back up and query from that node'
| apache-2.0 | -3,526,152,816,272,863,000 | 46.103448 | 123 | 0.5959 | false |
x10an14/overtime-calculator | old-sanic/calculations.py | 1 | 3169 | """Module containing majory of calulation functions and their helpers."""
from datetime import datetime
from datetime import timedelta
from collections import defaultdict
from . import default_parse_fmt
from . import log_function_entry_and_exit
@log_function_entry_and_exit
def parse_row(row, field_names, datetime_parse_fmt=default_parse_fmt, has_duration=True):
# TODO: ADD DOCSTRING
# TODO: figure out which fields (if any) have datetime
# TODO: Replace these hardcoded field_names with names decided upon
# through above todo.
# Get date of current event/row
start_time = row[field_names[0]]
start_time = datetime.strptime(start_time, datetime_parse_fmt)
duration = row[field_names[1]]
if not has_duration:
# Get stop time to calculate duration
stop_time = datetime.strptime(duration, datetime_parse_fmt)
# Get duration as "HH:MM":
minutes = int((stop_time - start_time).total_seconds() // 60)
hours = str(minutes // 60).zfill(2)
minutes = str(minutes % 60).zfill(2)
duration = ":".join((hours, minutes))
return (duration, start_time.isocalendar())
@log_function_entry_and_exit
def parse_csv_reader_content(input_data, **kwargs):
# TODO: ADD DOCSTRING
total_sum, aggregate_records = timedelta(0), defaultdict(dict)
for row in input_data:
(duration, (_, week_nr, week_day)) = parse_row(row, **kwargs)
t = datetime.strptime(duration, "%H:%M")
total_sum += timedelta(hours=t.hour, minutes=t.minute)
if week_nr not in aggregate_records:
aggregate_records[week_nr] = defaultdict(list)
if week_day not in aggregate_records[week_nr]:
aggregate_records[week_nr][week_day] = [duration]
else:
aggregate_records[week_nr][week_day].append(duration)
# add total amount of seconds to return object
aggregate_records["total_sum"] = int(total_sum.total_seconds())
return aggregate_records
@log_function_entry_and_exit
def parse_aggregate_weeks_and_weekdays(aggregate_data, hours_per_week=37.5):
# TODO: ADD DOCSTRING
def get_timedelta_from_str(input_str, parse_fmt="%H:%M"):
t = datetime.strptime(input_str, parse_fmt)
return timedelta(hours=t.hour, minutes=t.minute)
total_balance, hours_per_week = timedelta(0), timedelta(hours=hours_per_week)
for week, days in aggregate_data.items():
if week == "total_sum":
continue
week_sum = timedelta(0)
for day, records in days.items():
week_sum += sum(
get_timedelta_from_str(record)
for record in records
)
week_balance = week_sum - hours_per_week
aggregate_data[week]["sum"] = int(week_sum.total_seconds())
aggregate_data[week]["balance"] = int(week_balance.total_seconds())
total_balance += week_balance
total_balance = aggregate_data["total_sum"] - int(total_balance.total_seconds())
aggregate_data["hours_per_week"] = hours_per_week.total_seconds() / 3600
aggregate_data["total_balance"] = total_balance
return aggregate_data
| mit | 6,430,448,965,321,955,000 | 33.824176 | 89 | 0.654465 | false |
tundish/blue_monday_78 | bluemonday78/utils/publisher.py | 1 | 3924 | #!/usr/bin/env python3
# encoding: UTF-8
# This file is part of Addison Arches.
#
# Addison Arches is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Addison Arches is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Addison Arches. If not, see <http://www.gnu.org/licenses/>.
import argparse
from collections import defaultdict
from collections import namedtuple
import glob
import os.path
import pathlib
import platform
import pprint
import sys
import uuid
__doc__ = """
Identify assets related to SceneScript folders.
NB: In order to reference a story arc from a different pathway, do `ln -s`, eg::
ln -rs -t bluemonday78/dialogue/w12_ducane/prison_wing \
bluemonday78/dialogue/w12_ducane/prison_office/release
This creates a relative soft link in `prison_wing` which will point to
`prison_office/release`.
"""
Assets = namedtuple("Assets", ["id", "pathways", "arc", "scripts"])
def find_scripts(path):
for id_location in glob.glob(os.path.join(path, "**/uuid.hex"), recursive=True):
try:
id_path = pathlib.Path(id_location)
uid = uuid.UUID(hex=id_path.read_text().strip())
except (ValueError, OSError):
print("Bad uuid at '", id_path, "'.", sep="", file=sys.stderr)
continue
else:
for script_path in sorted(id_path.parent.glob("*.rst")):
yield uid, script_path
def find_assets(path, prefix=""):
locations = defaultdict(list)
for uid, script_path in find_scripts(path):
locations[uid].append(script_path)
for uid, script_paths in locations.items():
arc_name = None
pathways = set()
scripts = set()
for script_path in script_paths:
arc_path = script_path.parent
pathways.add(arc_path.parent.relative_to(path).parts)
if not arc_path.is_symlink():
arc_name = arc_path.name
arc_name = ".".join((
prefix, str(arc_path.relative_to(path)).replace(os.sep, ".")
))
scripts.add(script_path.relative_to(path))
if "windows" in platform.system().lower():
# Find soft links explicitly because MS OS fails to do it
# within the source repository
links = set([
(pathlib.Path(f).parent, pathlib.Path(f).read_text())
for script in script_paths
for f in glob.glob(os.path.join(
path, "{0}/{1}".format("**", script.parent.name)),
recursive=True)
if pathlib.Path(f).is_file() and
0 < pathlib.Path(f).stat().st_size < 128
])
for parent, hop in links:
if parent.joinpath(hop).resolve() == arc_path.resolve():
pathways.add(parent.relative_to(path).parts)
yield Assets(
uid, frozenset(pathways),
arc_name, tuple(sorted(scripts))
)
def main(args):
assets = [i for path in args.paths for i in find_assets(path)]
pprint.pprint(assets)
def parser(description=__doc__):
rv = argparse.ArgumentParser(
description,
fromfile_prefix_chars="@"
)
rv.add_argument(
"paths", nargs="*", type=pathlib.Path,
help="supply a list of directory paths"
)
return rv
if __name__ == "__main__":
p = parser()
args = p.parse_args()
rv = main(args)
sys.exit(rv)
| agpl-3.0 | 8,140,897,638,692,505,000 | 31.429752 | 84 | 0.612385 | false |
ondrejch/FSM | MCNP/scripts/OLD/HexCore.py | 1 | 4054 | ''' @ author: James Ghawaly Jr.
@ version: 1.2
'''
import numpy
import os
class HexCore():
def __init__(self,side,hexSpace,coreRadius,coreHeight,rodRadius,rodHeight,radiusLimiter):
# generics
self.surfaceNumber = 0
self.cellNumber = 0
self.materNumber = 0
self.center = [0,0,0]
# core info
self.s = side
self.hexSpace = hexSpace
self.coreRadius = coreRadius
self.coreHeight = coreHeight
self.rodRadius = rodRadius
self.rodHeight = rodHeight
self.radiusLimiter = radiusLimiter
# for plotting
self.xPoints = []
self.yPoints = []
# for writing the MCNP input deck
self.surfaceCard = []
self.cellCard = []
self.dataCard = []
def genHexCore(self):
self.cylinder([0,0,0],self.radiusLimiter,self.coreHeight)
num=0
x0 = -1 * (self.s-1) * self.hexSpace / 2
y0 = -1 * (numpy.sqrt(3) / 2) * (self.s-1) * self.hexSpace
for y in range(self.s*2-1):
# number of rods in row for below or at the midpoint of the hexagon
num=self.s+y
# check if we are above the midpoint of the hex and if so, calculate a different number of rods for this row
if (y+1)>self.s:
num = (2*self.s-2)-(y-self.s)
for x in range(num):
if (y+1)<=self.s:
x_position = self.x_pos_bot(x,x0,y)
y_position = self.y_pos(y, y0)
z_position = 0
if ( numpy.sqrt( numpy.power(x_position,2.0) + numpy.power(y_position,2.0)) >= self.radiusLimiter ):
pass
else:
self.cylinder([x_position,y_position,z_position],self.rodRadius,self.rodHeight)
else:
x_position = self.x_pos_top(x, x0, y)
y_position = self.y_pos(y, y0)
z_position = 0
if (numpy.sqrt(numpy.power(x_position, 2.0) + numpy.power(y_position, 2.0)) >= self.radiusLimiter):
pass
else:
self.cylinder([x_position,y_position,z_position], self.rodRadius,self.rodHeight)
self.cell(1,"")
self.cell("",1)
def cylinder(self, position, radius, height):
self.surfaceNumber += 1
self.xPoints.append(position[0])
self.yPoints.append(position[1])
# x y z h1 h2 h3 R
self.surfaceCard.append(str("%s RCC %f %f %f %s %s %s %s")%(self.surfaceNumber,position[0],position[1],position[2],0,0,height,radius))
print(self.surfaceCard[-1])
def cell(self,inSurfaceNum,outSurfaceNum):
self.cellNumber+=1
try:
inSurfaceNum=-1*inSurfaceNum
except Exception:
pass
self.cellCard.append(str("%s %s %s imp:n=%s")%(self.cellNumber,inSurfaceNum,outSurfaceNum,1))
def sphere(self, position, radius):
pass
def x_pos_bot(self,i_x,x0,i_y):
return (x0-i_y*self.hexSpace/2)+(i_x*self.hexSpace)
def x_pos_top(self, i_x, x0, i_y):
return (x0+i_y*self.hexSpace/2)+(i_x*self.hexSpace)-(self.s-1)*self.hexSpace#(x0 - self.s*self.hexSpace/2 + (i_y-self.s)*(numpy.sqrt(3)/2)*self.hexSpace) + (i_x * self.hexSpace)
def y_pos(self,i_y,y0):
return y0+(numpy.sqrt(3)/2)*self.hexSpace*i_y
def getPoints(self):
return self.xPoints, self.yPoints
def writeInputDeck(self,filename,title):
with open(os.getcwd()+"/inputs"+filename,'w') as f2w:
f2w.write(title+"\n")
f2w.write("c CELL CARD\n")
for cells in self.cellCard:
f2w.write(cells+"\n")
f2w.write("\nc SURFACE CARD\n")
for surface in self.surfaceCard:
print(surface)
#f2w.write(surface.)
f2w.write("\nc DATA CARD\nNPS 1000\n")
| gpl-3.0 | -8,050,590,374,423,078,000 | 35.196429 | 185 | 0.53404 | false |
readline/btb | sortMatrix.py | 1 | 2782 | #!/usr/bin/env python
# =============================================================================
# Filename: sortMatrix.py
# Version: 1.0
# Author: Kai Yu - [email protected]
# https://github.com/readline
# Last modified: 2015-05-22 15:01
# Description:
# This script can sort a matrix either by row or by column, either in or not in reverse order.
# 1st column and 1st row must be headers.
# =============================================================================
from optparse import OptionParser
import copy,sys
def transpose(m):
tmpm = []
for c in range(len(m[0])):
tmpm.append([])
for r in range(len(m)):
for c in range(len(m[0])):
tmpm[c].append(m[r][c])
return tmpm
def importFile2mat(options):
infile = open(options.inpath, 'r')
tmp = []
while 1:
line = infile.readline()
if not line: break
tmp.append(line.rstrip().split('\t'))
infile.close()
return tmp
def mat2dic(mat):
tmpdic = {}
sortableMat = []
tmpdic['header'] = mat[0]
count = 0
for row in mat[1:]:
tmpdic[count] = row
sortableMat.append([count] + [float(n) for n in row[1:]])
count += 1
return tmpdic, sortableMat
def sortMatrix(mat,options):
cmd = 'newmat = sorted(mat, key=lambda x:(%s), reverse=%s)' \
%(','.join(['x[%d]'%n for n in range(1,len(mat[0]))]),str(options.reverse))
print cmd
exec cmd
return newmat
def saveMat(mat, options):
savefile = open(options.outpath,'w')
for row in mat:
savefile.write('\t'.join(row)+'\n')
savefile.close()
def main():
parser = OptionParser(usage="usage: %prog [options]")
parser.add_option("-i", "--input", dest="inpath",help="Input file path")
parser.add_option("-b", "--byrow", action="store_true", dest="byrow", default = False, help="Sort by row.[Default=False. Default sort by col.]")
parser.add_option("-r", "--reverse", action="store_true", dest="reverse", default = False,help="Sort in reverse order.[Default=False]")
parser.add_option("-o", "--output", dest="outpath", help="Output file path")
(options, args) = parser.parse_args()
if not options.inpath or not options.outpath:
parser.error('Incorrect input option. Use -h to see the help infomation.')
rawMat = importFile2mat(options)
if options.byrow == True:
rawMat = transpose(rawMat)
tmpdic, sortableMat = mat2dic(rawMat)
sortedMat = sortMatrix(sortableMat, options)
outMat = []
outMat.append(tmpdic['header'])
for n in sortedMat:
outMat.append(tmpdic[n[0]])
if options.byrow == True:
outMat = transpose(outMat)
saveMat(outMat, options)
if __name__ == '__main__':
main()
| gpl-2.0 | -5,350,637,548,227,602,000 | 29.23913 | 148 | 0.574407 | false |
gfyoung/numpy | numpy/core/einsumfunc.py | 2 | 51329 | """
Implementation of optimized einsum.
"""
from __future__ import division, absolute_import, print_function
import itertools
from numpy.compat import basestring
from numpy.core.multiarray import c_einsum
from numpy.core.numeric import asanyarray, tensordot
from numpy.core.overrides import array_function_dispatch
__all__ = ['einsum', 'einsum_path']
einsum_symbols = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
einsum_symbols_set = set(einsum_symbols)
def _flop_count(idx_contraction, inner, num_terms, size_dictionary):
"""
Computes the number of FLOPS in the contraction.
Parameters
----------
idx_contraction : iterable
The indices involved in the contraction
inner : bool
Does this contraction require an inner product?
num_terms : int
The number of terms in a contraction
size_dictionary : dict
The size of each of the indices in idx_contraction
Returns
-------
flop_count : int
The total number of FLOPS required for the contraction.
Examples
--------
>>> _flop_count('abc', False, 1, {'a': 2, 'b':3, 'c':5})
90
>>> _flop_count('abc', True, 2, {'a': 2, 'b':3, 'c':5})
270
"""
overall_size = _compute_size_by_dict(idx_contraction, size_dictionary)
op_factor = max(1, num_terms - 1)
if inner:
op_factor += 1
return overall_size * op_factor
def _compute_size_by_dict(indices, idx_dict):
"""
Computes the product of the elements in indices based on the dictionary
idx_dict.
Parameters
----------
indices : iterable
Indices to base the product on.
idx_dict : dictionary
Dictionary of index sizes
Returns
-------
ret : int
The resulting product.
Examples
--------
>>> _compute_size_by_dict('abbc', {'a': 2, 'b':3, 'c':5})
90
"""
ret = 1
for i in indices:
ret *= idx_dict[i]
return ret
def _find_contraction(positions, input_sets, output_set):
"""
Finds the contraction for a given set of input and output sets.
Parameters
----------
positions : iterable
Integer positions of terms used in the contraction.
input_sets : list
List of sets that represent the lhs side of the einsum subscript
output_set : set
Set that represents the rhs side of the overall einsum subscript
Returns
-------
new_result : set
The indices of the resulting contraction
remaining : list
List of sets that have not been contracted, the new set is appended to
the end of this list
idx_removed : set
Indices removed from the entire contraction
idx_contraction : set
The indices used in the current contraction
Examples
--------
# A simple dot product test case
>>> pos = (0, 1)
>>> isets = [set('ab'), set('bc')]
>>> oset = set('ac')
>>> _find_contraction(pos, isets, oset)
({'a', 'c'}, [{'a', 'c'}], {'b'}, {'a', 'b', 'c'})
# A more complex case with additional terms in the contraction
>>> pos = (0, 2)
>>> isets = [set('abd'), set('ac'), set('bdc')]
>>> oset = set('ac')
>>> _find_contraction(pos, isets, oset)
({'a', 'c'}, [{'a', 'c'}, {'a', 'c'}], {'b', 'd'}, {'a', 'b', 'c', 'd'})
"""
idx_contract = set()
idx_remain = output_set.copy()
remaining = []
for ind, value in enumerate(input_sets):
if ind in positions:
idx_contract |= value
else:
remaining.append(value)
idx_remain |= value
new_result = idx_remain & idx_contract
idx_removed = (idx_contract - new_result)
remaining.append(new_result)
return (new_result, remaining, idx_removed, idx_contract)
def _optimal_path(input_sets, output_set, idx_dict, memory_limit):
"""
Computes all possible pair contractions, sieves the results based
on ``memory_limit`` and returns the lowest cost path. This algorithm
scales factorial with respect to the elements in the list ``input_sets``.
Parameters
----------
input_sets : list
List of sets that represent the lhs side of the einsum subscript
output_set : set
Set that represents the rhs side of the overall einsum subscript
idx_dict : dictionary
Dictionary of index sizes
memory_limit : int
The maximum number of elements in a temporary array
Returns
-------
path : list
The optimal contraction order within the memory limit constraint.
Examples
--------
>>> isets = [set('abd'), set('ac'), set('bdc')]
>>> oset = set('')
>>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4}
>>> _path__optimal_path(isets, oset, idx_sizes, 5000)
[(0, 2), (0, 1)]
"""
full_results = [(0, [], input_sets)]
for iteration in range(len(input_sets) - 1):
iter_results = []
# Compute all unique pairs
for curr in full_results:
cost, positions, remaining = curr
for con in itertools.combinations(range(len(input_sets) - iteration), 2):
# Find the contraction
cont = _find_contraction(con, remaining, output_set)
new_result, new_input_sets, idx_removed, idx_contract = cont
# Sieve the results based on memory_limit
new_size = _compute_size_by_dict(new_result, idx_dict)
if new_size > memory_limit:
continue
# Build (total_cost, positions, indices_remaining)
total_cost = cost + _flop_count(idx_contract, idx_removed, len(con), idx_dict)
new_pos = positions + [con]
iter_results.append((total_cost, new_pos, new_input_sets))
# Update combinatorial list, if we did not find anything return best
# path + remaining contractions
if iter_results:
full_results = iter_results
else:
path = min(full_results, key=lambda x: x[0])[1]
path += [tuple(range(len(input_sets) - iteration))]
return path
# If we have not found anything return single einsum contraction
if len(full_results) == 0:
return [tuple(range(len(input_sets)))]
path = min(full_results, key=lambda x: x[0])[1]
return path
def _parse_possible_contraction(positions, input_sets, output_set, idx_dict, memory_limit, path_cost, naive_cost):
"""Compute the cost (removed size + flops) and resultant indices for
performing the contraction specified by ``positions``.
Parameters
----------
positions : tuple of int
The locations of the proposed tensors to contract.
input_sets : list of sets
The indices found on each tensors.
output_set : set
The output indices of the expression.
idx_dict : dict
Mapping of each index to its size.
memory_limit : int
The total allowed size for an intermediary tensor.
path_cost : int
The contraction cost so far.
naive_cost : int
The cost of the unoptimized expression.
Returns
-------
cost : (int, int)
A tuple containing the size of any indices removed, and the flop cost.
positions : tuple of int
The locations of the proposed tensors to contract.
new_input_sets : list of sets
The resulting new list of indices if this proposed contraction is performed.
"""
# Find the contraction
contract = _find_contraction(positions, input_sets, output_set)
idx_result, new_input_sets, idx_removed, idx_contract = contract
# Sieve the results based on memory_limit
new_size = _compute_size_by_dict(idx_result, idx_dict)
if new_size > memory_limit:
return None
# Build sort tuple
old_sizes = (_compute_size_by_dict(input_sets[p], idx_dict) for p in positions)
removed_size = sum(old_sizes) - new_size
# NB: removed_size used to be just the size of any removed indices i.e.:
# helpers.compute_size_by_dict(idx_removed, idx_dict)
cost = _flop_count(idx_contract, idx_removed, len(positions), idx_dict)
sort = (-removed_size, cost)
# Sieve based on total cost as well
if (path_cost + cost) > naive_cost:
return None
# Add contraction to possible choices
return [sort, positions, new_input_sets]
def _update_other_results(results, best):
"""Update the positions and provisional input_sets of ``results`` based on
performing the contraction result ``best``. Remove any involving the tensors
contracted.
Parameters
----------
results : list
List of contraction results produced by ``_parse_possible_contraction``.
best : list
The best contraction of ``results`` i.e. the one that will be performed.
Returns
-------
mod_results : list
The list of modifed results, updated with outcome of ``best`` contraction.
"""
best_con = best[1]
bx, by = best_con
mod_results = []
for cost, (x, y), con_sets in results:
# Ignore results involving tensors just contracted
if x in best_con or y in best_con:
continue
# Update the input_sets
del con_sets[by - int(by > x) - int(by > y)]
del con_sets[bx - int(bx > x) - int(bx > y)]
con_sets.insert(-1, best[2][-1])
# Update the position indices
mod_con = x - int(x > bx) - int(x > by), y - int(y > bx) - int(y > by)
mod_results.append((cost, mod_con, con_sets))
return mod_results
def _greedy_path(input_sets, output_set, idx_dict, memory_limit):
"""
Finds the path by contracting the best pair until the input list is
exhausted. The best pair is found by minimizing the tuple
``(-prod(indices_removed), cost)``. What this amounts to is prioritizing
matrix multiplication or inner product operations, then Hadamard like
operations, and finally outer operations. Outer products are limited by
``memory_limit``. This algorithm scales cubically with respect to the
number of elements in the list ``input_sets``.
Parameters
----------
input_sets : list
List of sets that represent the lhs side of the einsum subscript
output_set : set
Set that represents the rhs side of the overall einsum subscript
idx_dict : dictionary
Dictionary of index sizes
memory_limit_limit : int
The maximum number of elements in a temporary array
Returns
-------
path : list
The greedy contraction order within the memory limit constraint.
Examples
--------
>>> isets = [set('abd'), set('ac'), set('bdc')]
>>> oset = set('')
>>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4}
>>> _path__greedy_path(isets, oset, idx_sizes, 5000)
[(0, 2), (0, 1)]
"""
# Handle trivial cases that leaked through
if len(input_sets) == 1:
return [(0,)]
elif len(input_sets) == 2:
return [(0, 1)]
# Build up a naive cost
contract = _find_contraction(range(len(input_sets)), input_sets, output_set)
idx_result, new_input_sets, idx_removed, idx_contract = contract
naive_cost = _flop_count(idx_contract, idx_removed, len(input_sets), idx_dict)
# Initially iterate over all pairs
comb_iter = itertools.combinations(range(len(input_sets)), 2)
known_contractions = []
path_cost = 0
path = []
for iteration in range(len(input_sets) - 1):
# Iterate over all pairs on first step, only previously found pairs on subsequent steps
for positions in comb_iter:
# Always initially ignore outer products
if input_sets[positions[0]].isdisjoint(input_sets[positions[1]]):
continue
result = _parse_possible_contraction(positions, input_sets, output_set, idx_dict, memory_limit, path_cost,
naive_cost)
if result is not None:
known_contractions.append(result)
# If we do not have a inner contraction, rescan pairs including outer products
if len(known_contractions) == 0:
# Then check the outer products
for positions in itertools.combinations(range(len(input_sets)), 2):
result = _parse_possible_contraction(positions, input_sets, output_set, idx_dict, memory_limit,
path_cost, naive_cost)
if result is not None:
known_contractions.append(result)
# If we still did not find any remaining contractions, default back to einsum like behavior
if len(known_contractions) == 0:
path.append(tuple(range(len(input_sets))))
break
# Sort based on first index
best = min(known_contractions, key=lambda x: x[0])
# Now propagate as many unused contractions as possible to next iteration
known_contractions = _update_other_results(known_contractions, best)
# Next iteration only compute contractions with the new tensor
# All other contractions have been accounted for
input_sets = best[2]
new_tensor_pos = len(input_sets) - 1
comb_iter = ((i, new_tensor_pos) for i in range(new_tensor_pos))
# Update path and total cost
path.append(best[1])
path_cost += best[0][1]
return path
def _can_dot(inputs, result, idx_removed):
"""
Checks if we can use BLAS (np.tensordot) call and its beneficial to do so.
Parameters
----------
inputs : list of str
Specifies the subscripts for summation.
result : str
Resulting summation.
idx_removed : set
Indices that are removed in the summation
Returns
-------
type : bool
Returns true if BLAS should and can be used, else False
Notes
-----
If the operations is BLAS level 1 or 2 and is not already aligned
we default back to einsum as the memory movement to copy is more
costly than the operation itself.
Examples
--------
# Standard GEMM operation
>>> _can_dot(['ij', 'jk'], 'ik', set('j'))
True
# Can use the standard BLAS, but requires odd data movement
>>> _can_dot(['ijj', 'jk'], 'ik', set('j'))
False
# DDOT where the memory is not aligned
>>> _can_dot(['ijk', 'ikj'], '', set('ijk'))
False
"""
# All `dot` calls remove indices
if len(idx_removed) == 0:
return False
# BLAS can only handle two operands
if len(inputs) != 2:
return False
input_left, input_right = inputs
for c in set(input_left + input_right):
# can't deal with repeated indices on same input or more than 2 total
nl, nr = input_left.count(c), input_right.count(c)
if (nl > 1) or (nr > 1) or (nl + nr > 2):
return False
# can't do implicit summation or dimension collapse e.g.
# "ab,bc->c" (implicitly sum over 'a')
# "ab,ca->ca" (take diagonal of 'a')
if nl + nr - 1 == int(c in result):
return False
# Build a few temporaries
set_left = set(input_left)
set_right = set(input_right)
keep_left = set_left - idx_removed
keep_right = set_right - idx_removed
rs = len(idx_removed)
# At this point we are a DOT, GEMV, or GEMM operation
# Handle inner products
# DDOT with aligned data
if input_left == input_right:
return True
# DDOT without aligned data (better to use einsum)
if set_left == set_right:
return False
# Handle the 4 possible (aligned) GEMV or GEMM cases
# GEMM or GEMV no transpose
if input_left[-rs:] == input_right[:rs]:
return True
# GEMM or GEMV transpose both
if input_left[:rs] == input_right[-rs:]:
return True
# GEMM or GEMV transpose right
if input_left[-rs:] == input_right[-rs:]:
return True
# GEMM or GEMV transpose left
if input_left[:rs] == input_right[:rs]:
return True
# Einsum is faster than GEMV if we have to copy data
if not keep_left or not keep_right:
return False
# We are a matrix-matrix product, but we need to copy data
return True
def _parse_einsum_input(operands):
"""
A reproduction of einsum c side einsum parsing in python.
Returns
-------
input_strings : str
Parsed input strings
output_string : str
Parsed output string
operands : list of array_like
The operands to use in the numpy contraction
Examples
--------
The operand list is simplified to reduce printing:
>>> a = np.random.rand(4, 4)
>>> b = np.random.rand(4, 4, 4)
>>> __parse_einsum_input(('...a,...a->...', a, b))
('za,xza', 'xz', [a, b])
>>> __parse_einsum_input((a, [Ellipsis, 0], b, [Ellipsis, 0]))
('za,xza', 'xz', [a, b])
"""
if len(operands) == 0:
raise ValueError("No input operands")
if isinstance(operands[0], basestring):
subscripts = operands[0].replace(" ", "")
operands = [asanyarray(v) for v in operands[1:]]
# Ensure all characters are valid
for s in subscripts:
if s in '.,->':
continue
if s not in einsum_symbols:
raise ValueError("Character %s is not a valid symbol." % s)
else:
tmp_operands = list(operands)
operand_list = []
subscript_list = []
for p in range(len(operands) // 2):
operand_list.append(tmp_operands.pop(0))
subscript_list.append(tmp_operands.pop(0))
output_list = tmp_operands[-1] if len(tmp_operands) else None
operands = [asanyarray(v) for v in operand_list]
subscripts = ""
last = len(subscript_list) - 1
for num, sub in enumerate(subscript_list):
for s in sub:
if s is Ellipsis:
subscripts += "..."
elif isinstance(s, int):
subscripts += einsum_symbols[s]
else:
raise TypeError("For this input type lists must contain "
"either int or Ellipsis")
if num != last:
subscripts += ","
if output_list is not None:
subscripts += "->"
for s in output_list:
if s is Ellipsis:
subscripts += "..."
elif isinstance(s, int):
subscripts += einsum_symbols[s]
else:
raise TypeError("For this input type lists must contain "
"either int or Ellipsis")
# Check for proper "->"
if ("-" in subscripts) or (">" in subscripts):
invalid = (subscripts.count("-") > 1) or (subscripts.count(">") > 1)
if invalid or (subscripts.count("->") != 1):
raise ValueError("Subscripts can only contain one '->'.")
# Parse ellipses
if "." in subscripts:
used = subscripts.replace(".", "").replace(",", "").replace("->", "")
unused = list(einsum_symbols_set - set(used))
ellipse_inds = "".join(unused)
longest = 0
if "->" in subscripts:
input_tmp, output_sub = subscripts.split("->")
split_subscripts = input_tmp.split(",")
out_sub = True
else:
split_subscripts = subscripts.split(',')
out_sub = False
for num, sub in enumerate(split_subscripts):
if "." in sub:
if (sub.count(".") != 3) or (sub.count("...") != 1):
raise ValueError("Invalid Ellipses.")
# Take into account numerical values
if operands[num].shape == ():
ellipse_count = 0
else:
ellipse_count = max(operands[num].ndim, 1)
ellipse_count -= (len(sub) - 3)
if ellipse_count > longest:
longest = ellipse_count
if ellipse_count < 0:
raise ValueError("Ellipses lengths do not match.")
elif ellipse_count == 0:
split_subscripts[num] = sub.replace('...', '')
else:
rep_inds = ellipse_inds[-ellipse_count:]
split_subscripts[num] = sub.replace('...', rep_inds)
subscripts = ",".join(split_subscripts)
if longest == 0:
out_ellipse = ""
else:
out_ellipse = ellipse_inds[-longest:]
if out_sub:
subscripts += "->" + output_sub.replace("...", out_ellipse)
else:
# Special care for outputless ellipses
output_subscript = ""
tmp_subscripts = subscripts.replace(",", "")
for s in sorted(set(tmp_subscripts)):
if s not in (einsum_symbols):
raise ValueError("Character %s is not a valid symbol." % s)
if tmp_subscripts.count(s) == 1:
output_subscript += s
normal_inds = ''.join(sorted(set(output_subscript) -
set(out_ellipse)))
subscripts += "->" + out_ellipse + normal_inds
# Build output string if does not exist
if "->" in subscripts:
input_subscripts, output_subscript = subscripts.split("->")
else:
input_subscripts = subscripts
# Build output subscripts
tmp_subscripts = subscripts.replace(",", "")
output_subscript = ""
for s in sorted(set(tmp_subscripts)):
if s not in einsum_symbols:
raise ValueError("Character %s is not a valid symbol." % s)
if tmp_subscripts.count(s) == 1:
output_subscript += s
# Make sure output subscripts are in the input
for char in output_subscript:
if char not in input_subscripts:
raise ValueError("Output character %s did not appear in the input"
% char)
# Make sure number operands is equivalent to the number of terms
if len(input_subscripts.split(',')) != len(operands):
raise ValueError("Number of einsum subscripts must be equal to the "
"number of operands.")
return (input_subscripts, output_subscript, operands)
def _einsum_path_dispatcher(*operands, **kwargs):
# NOTE: technically, we should only dispatch on array-like arguments, not
# subscripts (given as strings). But separating operands into
# arrays/subscripts is a little tricky/slow (given einsum's two supported
# signatures), so as a practical shortcut we dispatch on everything.
# Strings will be ignored for dispatching since they don't define
# __array_function__.
return operands
@array_function_dispatch(_einsum_path_dispatcher, module='numpy')
def einsum_path(*operands, **kwargs):
"""
einsum_path(subscripts, *operands, optimize='greedy')
Evaluates the lowest cost contraction order for an einsum expression by
considering the creation of intermediate arrays.
Parameters
----------
subscripts : str
Specifies the subscripts for summation.
*operands : list of array_like
These are the arrays for the operation.
optimize : {bool, list, tuple, 'greedy', 'optimal'}
Choose the type of path. If a tuple is provided, the second argument is
assumed to be the maximum intermediate size created. If only a single
argument is provided the largest input or output array size is used
as a maximum intermediate size.
* if a list is given that starts with ``einsum_path``, uses this as the
contraction path
* if False no optimization is taken
* if True defaults to the 'greedy' algorithm
* 'optimal' An algorithm that combinatorially explores all possible
ways of contracting the listed tensors and choosest the least costly
path. Scales exponentially with the number of terms in the
contraction.
* 'greedy' An algorithm that chooses the best pair contraction
at each step. Effectively, this algorithm searches the largest inner,
Hadamard, and then outer products at each step. Scales cubically with
the number of terms in the contraction. Equivalent to the 'optimal'
path for most contractions.
Default is 'greedy'.
Returns
-------
path : list of tuples
A list representation of the einsum path.
string_repr : str
A printable representation of the einsum path.
Notes
-----
The resulting path indicates which terms of the input contraction should be
contracted first, the result of this contraction is then appended to the
end of the contraction list. This list can then be iterated over until all
intermediate contractions are complete.
See Also
--------
einsum, linalg.multi_dot
Examples
--------
We can begin with a chain dot example. In this case, it is optimal to
contract the ``b`` and ``c`` tensors first as represented by the first
element of the path ``(1, 2)``. The resulting tensor is added to the end
of the contraction and the remaining contraction ``(0, 1)`` is then
completed.
>>> a = np.random.rand(2, 2)
>>> b = np.random.rand(2, 5)
>>> c = np.random.rand(5, 2)
>>> path_info = np.einsum_path('ij,jk,kl->il', a, b, c, optimize='greedy')
>>> print(path_info[0])
['einsum_path', (1, 2), (0, 1)]
>>> print(path_info[1])
Complete contraction: ij,jk,kl->il
Naive scaling: 4
Optimized scaling: 3
Naive FLOP count: 1.600e+02
Optimized FLOP count: 5.600e+01
Theoretical speedup: 2.857
Largest intermediate: 4.000e+00 elements
-------------------------------------------------------------------------
scaling current remaining
-------------------------------------------------------------------------
3 kl,jk->jl ij,jl->il
3 jl,ij->il il->il
A more complex index transformation example.
>>> I = np.random.rand(10, 10, 10, 10)
>>> C = np.random.rand(10, 10)
>>> path_info = np.einsum_path('ea,fb,abcd,gc,hd->efgh', C, C, I, C, C,
optimize='greedy')
>>> print(path_info[0])
['einsum_path', (0, 2), (0, 3), (0, 2), (0, 1)]
>>> print(path_info[1])
Complete contraction: ea,fb,abcd,gc,hd->efgh
Naive scaling: 8
Optimized scaling: 5
Naive FLOP count: 8.000e+08
Optimized FLOP count: 8.000e+05
Theoretical speedup: 1000.000
Largest intermediate: 1.000e+04 elements
--------------------------------------------------------------------------
scaling current remaining
--------------------------------------------------------------------------
5 abcd,ea->bcde fb,gc,hd,bcde->efgh
5 bcde,fb->cdef gc,hd,cdef->efgh
5 cdef,gc->defg hd,defg->efgh
5 defg,hd->efgh efgh->efgh
"""
# Make sure all keywords are valid
valid_contract_kwargs = ['optimize', 'einsum_call']
unknown_kwargs = [k for (k, v) in kwargs.items() if k
not in valid_contract_kwargs]
if len(unknown_kwargs):
raise TypeError("Did not understand the following kwargs:"
" %s" % unknown_kwargs)
# Figure out what the path really is
path_type = kwargs.pop('optimize', True)
if path_type is True:
path_type = 'greedy'
if path_type is None:
path_type = False
memory_limit = None
# No optimization or a named path algorithm
if (path_type is False) or isinstance(path_type, basestring):
pass
# Given an explicit path
elif len(path_type) and (path_type[0] == 'einsum_path'):
pass
# Path tuple with memory limit
elif ((len(path_type) == 2) and isinstance(path_type[0], basestring) and
isinstance(path_type[1], (int, float))):
memory_limit = int(path_type[1])
path_type = path_type[0]
else:
raise TypeError("Did not understand the path: %s" % str(path_type))
# Hidden option, only einsum should call this
einsum_call_arg = kwargs.pop("einsum_call", False)
# Python side parsing
input_subscripts, output_subscript, operands = _parse_einsum_input(operands)
subscripts = input_subscripts + '->' + output_subscript
# Build a few useful list and sets
input_list = input_subscripts.split(',')
input_sets = [set(x) for x in input_list]
output_set = set(output_subscript)
indices = set(input_subscripts.replace(',', ''))
# Get length of each unique dimension and ensure all dimensions are correct
dimension_dict = {}
broadcast_indices = [[] for x in range(len(input_list))]
for tnum, term in enumerate(input_list):
sh = operands[tnum].shape
if len(sh) != len(term):
raise ValueError("Einstein sum subscript %s does not contain the "
"correct number of indices for operand %d."
% (input_subscripts[tnum], tnum))
for cnum, char in enumerate(term):
dim = sh[cnum]
# Build out broadcast indices
if dim == 1:
broadcast_indices[tnum].append(char)
if char in dimension_dict.keys():
# For broadcasting cases we always want the largest dim size
if dimension_dict[char] == 1:
dimension_dict[char] = dim
elif dim not in (1, dimension_dict[char]):
raise ValueError("Size of label '%s' for operand %d (%d) "
"does not match previous terms (%d)."
% (char, tnum, dimension_dict[char], dim))
else:
dimension_dict[char] = dim
# Convert broadcast inds to sets
broadcast_indices = [set(x) for x in broadcast_indices]
# Compute size of each input array plus the output array
size_list = []
for term in input_list + [output_subscript]:
size_list.append(_compute_size_by_dict(term, dimension_dict))
max_size = max(size_list)
if memory_limit is None:
memory_arg = max_size
else:
memory_arg = memory_limit
# Compute naive cost
# This isn't quite right, need to look into exactly how einsum does this
inner_product = (sum(len(x) for x in input_sets) - len(indices)) > 0
naive_cost = _flop_count(indices, inner_product, len(input_list), dimension_dict)
# Compute the path
if (path_type is False) or (len(input_list) in [1, 2]) or (indices == output_set):
# Nothing to be optimized, leave it to einsum
path = [tuple(range(len(input_list)))]
elif path_type == "greedy":
path = _greedy_path(input_sets, output_set, dimension_dict, memory_arg)
elif path_type == "optimal":
path = _optimal_path(input_sets, output_set, dimension_dict, memory_arg)
elif path_type[0] == 'einsum_path':
path = path_type[1:]
else:
raise KeyError("Path name %s not found", path_type)
cost_list, scale_list, size_list, contraction_list = [], [], [], []
# Build contraction tuple (positions, gemm, einsum_str, remaining)
for cnum, contract_inds in enumerate(path):
# Make sure we remove inds from right to left
contract_inds = tuple(sorted(list(contract_inds), reverse=True))
contract = _find_contraction(contract_inds, input_sets, output_set)
out_inds, input_sets, idx_removed, idx_contract = contract
cost = _flop_count(idx_contract, idx_removed, len(contract_inds), dimension_dict)
cost_list.append(cost)
scale_list.append(len(idx_contract))
size_list.append(_compute_size_by_dict(out_inds, dimension_dict))
bcast = set()
tmp_inputs = []
for x in contract_inds:
tmp_inputs.append(input_list.pop(x))
bcast |= broadcast_indices.pop(x)
new_bcast_inds = bcast - idx_removed
# If we're broadcasting, nix blas
if not len(idx_removed & bcast):
do_blas = _can_dot(tmp_inputs, out_inds, idx_removed)
else:
do_blas = False
# Last contraction
if (cnum - len(path)) == -1:
idx_result = output_subscript
else:
sort_result = [(dimension_dict[ind], ind) for ind in out_inds]
idx_result = "".join([x[1] for x in sorted(sort_result)])
input_list.append(idx_result)
broadcast_indices.append(new_bcast_inds)
einsum_str = ",".join(tmp_inputs) + "->" + idx_result
contraction = (contract_inds, idx_removed, einsum_str, input_list[:], do_blas)
contraction_list.append(contraction)
opt_cost = sum(cost_list) + 1
if einsum_call_arg:
return (operands, contraction_list)
# Return the path along with a nice string representation
overall_contraction = input_subscripts + "->" + output_subscript
header = ("scaling", "current", "remaining")
speedup = naive_cost / opt_cost
max_i = max(size_list)
path_print = " Complete contraction: %s\n" % overall_contraction
path_print += " Naive scaling: %d\n" % len(indices)
path_print += " Optimized scaling: %d\n" % max(scale_list)
path_print += " Naive FLOP count: %.3e\n" % naive_cost
path_print += " Optimized FLOP count: %.3e\n" % opt_cost
path_print += " Theoretical speedup: %3.3f\n" % speedup
path_print += " Largest intermediate: %.3e elements\n" % max_i
path_print += "-" * 74 + "\n"
path_print += "%6s %24s %40s\n" % header
path_print += "-" * 74
for n, contraction in enumerate(contraction_list):
inds, idx_rm, einsum_str, remaining, blas = contraction
remaining_str = ",".join(remaining) + "->" + output_subscript
path_run = (scale_list[n], einsum_str, remaining_str)
path_print += "\n%4d %24s %40s" % path_run
path = ['einsum_path'] + path
return (path, path_print)
def _einsum_dispatcher(*operands, **kwargs):
# Arguably we dispatch on more arguments that we really should; see note in
# _einsum_path_dispatcher for why.
for op in operands:
yield op
yield kwargs.get('out')
# Rewrite einsum to handle different cases
@array_function_dispatch(_einsum_dispatcher, module='numpy')
def einsum(*operands, **kwargs):
"""
einsum(subscripts, *operands, out=None, dtype=None, order='K',
casting='safe', optimize=False)
Evaluates the Einstein summation convention on the operands.
Using the Einstein summation convention, many common multi-dimensional,
linear algebraic array operations can be represented in a simple fashion.
In *implicit* mode `einsum` computes these values.
In *explicit* mode, `einsum` provides further flexibility to compute
other array operations that might not be considered classical Einstein
summation operations, by disabling, or forcing summation over specified
subscript labels.
See the notes and examples for clarification.
Parameters
----------
subscripts : str
Specifies the subscripts for summation as comma separated list of
subscript labels. An implicit (classical Einstein summation)
calculation is performed unless the explicit indicator '->' is
included as well as subscript labels of the precise output form.
operands : list of array_like
These are the arrays for the operation.
out : ndarray, optional
If provided, the calculation is done into this array.
dtype : {data-type, None}, optional
If provided, forces the calculation to use the data type specified.
Note that you may have to also give a more liberal `casting`
parameter to allow the conversions. Default is None.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the output. 'C' means it should
be C contiguous. 'F' means it should be Fortran contiguous,
'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise.
'K' means it should be as close to the layout as the inputs as
is possible, including arbitrarily permuted axes.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur. Setting this to
'unsafe' is not recommended, as it can adversely affect accumulations.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
Default is 'safe'.
optimize : {False, True, 'greedy', 'optimal'}, optional
Controls if intermediate optimization should occur. No optimization
will occur if False and True will default to the 'greedy' algorithm.
Also accepts an explicit contraction list from the ``np.einsum_path``
function. See ``np.einsum_path`` for more details. Defaults to False.
Returns
-------
output : ndarray
The calculation based on the Einstein summation convention.
See Also
--------
einsum_path, dot, inner, outer, tensordot, linalg.multi_dot
Notes
-----
.. versionadded:: 1.6.0
The Einstein summation convention can be used to compute
many multi-dimensional, linear algebraic array operations. `einsum`
provides a succinct way of representing these.
A non-exhaustive list of these operations,
which can be computed by `einsum`, is shown below along with examples:
* Trace of an array, :py:func:`numpy.trace`.
* Return a diagonal, :py:func:`numpy.diag`.
* Array axis summations, :py:func:`numpy.sum`.
* Transpositions and permutations, :py:func:`numpy.transpose`.
* Matrix multiplication and dot product, :py:func:`numpy.matmul` :py:func:`numpy.dot`.
* Vector inner and outer products, :py:func:`numpy.inner` :py:func:`numpy.outer`.
* Broadcasting, element-wise and scalar multiplication, :py:func:`numpy.multiply`.
* Tensor contractions, :py:func:`numpy.tensordot`.
* Chained array operations, in efficient calculation order, :py:func:`numpy.einsum_path`.
The subscripts string is a comma-separated list of subscript labels,
where each label refers to a dimension of the corresponding operand.
Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)``
is equivalent to :py:func:`np.inner(a,b) <numpy.inner>`. If a label
appears only once, it is not summed, so ``np.einsum('i', a)`` produces a
view of ``a`` with no changes. A further example ``np.einsum('ij,jk', a, b)``
describes traditional matrix multiplication and is equivalent to
:py:func:`np.matmul(a,b) <numpy.matmul>`. Repeated subscript labels in one
operand take the diagonal. For example, ``np.einsum('ii', a)`` is equivalent
to :py:func:`np.trace(a) <numpy.trace>`.
In *implicit mode*, the chosen subscripts are important
since the axes of the output are reordered alphabetically. This
means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while
``np.einsum('ji', a)`` takes its transpose. Additionally,
``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while,
``np.einsum('ij,jh', a, b)`` returns the transpose of the
multiplication since subscript 'h' precedes subscript 'i'.
In *explicit mode* the output can be directly controlled by
specifying output subscript labels. This requires the
identifier '->' as well as the list of output subscript labels.
This feature increases the flexibility of the function since
summing can be disabled or forced when required. The call
``np.einsum('i->', a)`` is like :py:func:`np.sum(a, axis=-1) <numpy.sum>`,
and ``np.einsum('ii->i', a)`` is like :py:func:`np.diag(a) <numpy.diag>`.
The difference is that `einsum` does not allow broadcasting by default.
Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the
order of the output subscript labels and therefore returns matrix
multiplication, unlike the example above in implicit mode.
To enable and control broadcasting, use an ellipsis. Default
NumPy-style broadcasting is done by adding an ellipsis
to the left of each term, like ``np.einsum('...ii->...i', a)``.
To take the trace along the first and last axes,
you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix
product with the left-most indices instead of rightmost, one can do
``np.einsum('ij...,jk...->ik...', a, b)``.
When there is only one operand, no axes are summed, and no output
parameter is provided, a view into the operand is returned instead
of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)``
produces a view (changed in version 1.10.0).
`einsum` also provides an alternative way to provide the subscripts
and operands as ``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``.
If the output shape is not provided in this format `einsum` will be
calculated in implicit mode, otherwise it will be performed explicitly.
The examples below have corresponding `einsum` calls with the two
parameter methods.
.. versionadded:: 1.10.0
Views returned from einsum are now writeable whenever the input array
is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now
have the same effect as :py:func:`np.swapaxes(a, 0, 2) <numpy.swapaxes>`
and ``np.einsum('ii->i', a)`` will return a writeable view of the diagonal
of a 2D array.
.. versionadded:: 1.12.0
Added the ``optimize`` argument which will optimize the contraction order
of an einsum expression. For a contraction with three or more operands this
can greatly increase the computational efficiency at the cost of a larger
memory footprint during computation.
Typically a 'greedy' algorithm is applied which empirical tests have shown
returns the optimal path in the majority of cases. In some cases 'optimal'
will return the superlative path through a more expensive, exhaustive search.
For iterative calculations it may be advisable to calculate the optimal path
once and reuse that path by supplying it as an argument. An example is given
below.
See :py:func:`numpy.einsum_path` for more details.
Examples
--------
>>> a = np.arange(25).reshape(5,5)
>>> b = np.arange(5)
>>> c = np.arange(6).reshape(2,3)
Trace of a matrix:
>>> np.einsum('ii', a)
60
>>> np.einsum(a, [0,0])
60
>>> np.trace(a)
60
Extract the diagonal (requires explicit form):
>>> np.einsum('ii->i', a)
array([ 0, 6, 12, 18, 24])
>>> np.einsum(a, [0,0], [0])
array([ 0, 6, 12, 18, 24])
>>> np.diag(a)
array([ 0, 6, 12, 18, 24])
Sum over an axis (requires explicit form):
>>> np.einsum('ij->i', a)
array([ 10, 35, 60, 85, 110])
>>> np.einsum(a, [0,1], [0])
array([ 10, 35, 60, 85, 110])
>>> np.sum(a, axis=1)
array([ 10, 35, 60, 85, 110])
For higher dimensional arrays summing a single axis can be done with ellipsis:
>>> np.einsum('...j->...', a)
array([ 10, 35, 60, 85, 110])
>>> np.einsum(a, [Ellipsis,1], [Ellipsis])
array([ 10, 35, 60, 85, 110])
Compute a matrix transpose, or reorder any number of axes:
>>> np.einsum('ji', c)
array([[0, 3],
[1, 4],
[2, 5]])
>>> np.einsum('ij->ji', c)
array([[0, 3],
[1, 4],
[2, 5]])
>>> np.einsum(c, [1,0])
array([[0, 3],
[1, 4],
[2, 5]])
>>> np.transpose(c)
array([[0, 3],
[1, 4],
[2, 5]])
Vector inner products:
>>> np.einsum('i,i', b, b)
30
>>> np.einsum(b, [0], b, [0])
30
>>> np.inner(b,b)
30
Matrix vector multiplication:
>>> np.einsum('ij,j', a, b)
array([ 30, 80, 130, 180, 230])
>>> np.einsum(a, [0,1], b, [1])
array([ 30, 80, 130, 180, 230])
>>> np.dot(a, b)
array([ 30, 80, 130, 180, 230])
>>> np.einsum('...j,j', a, b)
array([ 30, 80, 130, 180, 230])
Broadcasting and scalar multiplication:
>>> np.einsum('..., ...', 3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.einsum(',ij', 3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.einsum(3, [Ellipsis], c, [Ellipsis])
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.multiply(3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
Vector outer product:
>>> np.einsum('i,j', np.arange(2)+1, b)
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
>>> np.einsum(np.arange(2)+1, [0], b, [1])
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
>>> np.outer(np.arange(2)+1, b)
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
Tensor contraction:
>>> a = np.arange(60.).reshape(3,4,5)
>>> b = np.arange(24.).reshape(4,3,2)
>>> np.einsum('ijk,jil->kl', a, b)
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3])
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> np.tensordot(a,b, axes=([1,0],[0,1]))
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
Writeable returned arrays (since version 1.10.0):
>>> a = np.zeros((3, 3))
>>> np.einsum('ii->i', a)[:] = 1
>>> a
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
Example of ellipsis use:
>>> a = np.arange(6).reshape((3,2))
>>> b = np.arange(12).reshape((4,3))
>>> np.einsum('ki,jk->ij', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
>>> np.einsum('ki,...k->i...', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
>>> np.einsum('k...,jk', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
Chained array operations. For more complicated contractions, speed ups
might be achieved by repeatedly computing a 'greedy' path or pre-computing the
'optimal' path and repeatedly applying it, using an
`einsum_path` insertion (since version 1.12.0). Performance improvements can be
particularly significant with larger arrays:
>>> a = np.ones(64).reshape(2,4,8)
# Basic `einsum`: ~1520ms (benchmarked on 3.1GHz Intel i5.)
>>> for iteration in range(500):
... np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a)
# Sub-optimal `einsum` (due to repeated path calculation time): ~330ms
>>> for iteration in range(500):
... np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='optimal')
# Greedy `einsum` (faster optimal path approximation): ~160ms
>>> for iteration in range(500):
... np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='greedy')
# Optimal `einsum` (best usage pattern in some use cases): ~110ms
>>> path = np.einsum_path('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='optimal')[0]
>>> for iteration in range(500):
... np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize=path)
"""
# Grab non-einsum kwargs; do not optimize by default.
optimize_arg = kwargs.pop('optimize', False)
# If no optimization, run pure einsum
if optimize_arg is False:
return c_einsum(*operands, **kwargs)
valid_einsum_kwargs = ['out', 'dtype', 'order', 'casting']
einsum_kwargs = {k: v for (k, v) in kwargs.items() if
k in valid_einsum_kwargs}
# Make sure all keywords are valid
valid_contract_kwargs = ['optimize'] + valid_einsum_kwargs
unknown_kwargs = [k for (k, v) in kwargs.items() if
k not in valid_contract_kwargs]
if len(unknown_kwargs):
raise TypeError("Did not understand the following kwargs: %s"
% unknown_kwargs)
# Special handeling if out is specified
specified_out = False
out_array = einsum_kwargs.pop('out', None)
if out_array is not None:
specified_out = True
# Build the contraction list and operand
operands, contraction_list = einsum_path(*operands, optimize=optimize_arg,
einsum_call=True)
handle_out = False
# Start contraction loop
for num, contraction in enumerate(contraction_list):
inds, idx_rm, einsum_str, remaining, blas = contraction
tmp_operands = []
for x in inds:
tmp_operands.append(operands.pop(x))
# Do we need to deal with the output?
handle_out = specified_out and ((num + 1) == len(contraction_list))
# Call tensordot if still possible
if blas:
# Checks have already been handled
input_str, results_index = einsum_str.split('->')
input_left, input_right = input_str.split(',')
tensor_result = input_left + input_right
for s in idx_rm:
tensor_result = tensor_result.replace(s, "")
# Find indices to contract over
left_pos, right_pos = [], []
for s in sorted(idx_rm):
left_pos.append(input_left.find(s))
right_pos.append(input_right.find(s))
# Contract!
new_view = tensordot(*tmp_operands, axes=(tuple(left_pos), tuple(right_pos)))
# Build a new view if needed
if (tensor_result != results_index) or handle_out:
if handle_out:
einsum_kwargs["out"] = out_array
new_view = c_einsum(tensor_result + '->' + results_index, new_view, **einsum_kwargs)
# Call einsum
else:
# If out was specified
if handle_out:
einsum_kwargs["out"] = out_array
# Do the contraction
new_view = c_einsum(einsum_str, *tmp_operands, **einsum_kwargs)
# Append new items and dereference what we can
operands.append(new_view)
del tmp_operands, new_view
if specified_out:
return out_array
else:
return operands[0]
| bsd-3-clause | 551,246,019,888,227,500 | 34.995091 | 118 | 0.586043 | false |
blutooth/gp-svi | tests/test_systematic.py | 1 | 14736 | from __future__ import absolute_import
import autograd.numpy.random as npr
import autograd.numpy as np
import operator as op
from numpy_utils import (combo_check, stat_check, unary_ufunc_check,
binary_ufunc_check, binary_ufunc_check_no_same_args)
npr.seed(0)
# Array statistics functions
def test_max(): stat_check(np.max)
def test_all(): stat_check(np.all)
def test_any(): stat_check(np.any)
def test_max(): stat_check(np.max)
def test_mean(): stat_check(np.mean)
def test_min(): stat_check(np.min)
def test_sum(): stat_check(np.sum)
def test_prod(): stat_check(np.prod)
def test_var(): stat_check(np.var)
def test_std(): stat_check(np.std)
# Unary ufunc tests
def test_sin(): unary_ufunc_check(np.sin)
def test_abs(): unary_ufunc_check(np.abs, lims=[0.1, 4.0])
def test_absolute(): unary_ufunc_check(np.absolute, lims=[0.1, 4.0])
def test_abs_builtin(): unary_ufunc_check(abs, lims=[0.1, 4.0])
def test_arccosh(): unary_ufunc_check(np.arccosh, lims=[1.1, 4.0])
def test_arcsinh(): unary_ufunc_check(np.arcsinh, lims=[-0.9, 0.9])
def test_arctanh(): unary_ufunc_check(np.arctanh, lims=[-0.9, 0.9])
def test_ceil(): unary_ufunc_check(np.ceil, lims=[-1.5, 1.5], test_complex=False)
def test_cos(): unary_ufunc_check(np.cos)
def test_cosh(): unary_ufunc_check(np.cosh)
def test_deg2rad(): unary_ufunc_check(np.deg2rad, test_complex=False)
def test_degrees(): unary_ufunc_check(lambda x : np.degrees(x)/50.0, test_complex=False)
def test_exp(): unary_ufunc_check(np.exp)
def test_exp2(): unary_ufunc_check(np.exp2)
def test_expm1(): unary_ufunc_check(np.expm1)
def test_fabs(): unary_ufunc_check(np.fabs, test_complex=False)
def test_floor(): unary_ufunc_check(np.floor, lims=[-1.5, 1.5], test_complex=False)
def test_log(): unary_ufunc_check(np.log, lims=[0.2, 2.0])
def test_log10(): unary_ufunc_check(np.log10, lims=[0.2, 2.0])
def test_log1p(): unary_ufunc_check(np.log1p, lims=[0.2, 2.0])
def test_log2(): unary_ufunc_check(np.log2, lims=[0.2, 2.0])
def test_rad2deg(): unary_ufunc_check(lambda x : np.rad2deg(x)/50.0, test_complex=False)
def test_radians(): unary_ufunc_check(np.radians, test_complex=False)
def test_sign(): unary_ufunc_check(np.sign)
def test_sin(): unary_ufunc_check(np.sin)
def test_sinh(): unary_ufunc_check(np.sinh)
def test_sqrt(): unary_ufunc_check(np.sqrt, lims=[1.0, 3.0])
def test_square(): unary_ufunc_check(np.square, test_complex=False)
def test_tan(): unary_ufunc_check(np.tan, lims=[-1.1, 1.1])
def test_tanh(): unary_ufunc_check(np.tanh)
def test_real(): unary_ufunc_check(np.real)
def test_real_ic(): unary_ufunc_check(np.real_if_close)
def test_imag(): unary_ufunc_check(np.imag)
def test_conj(): unary_ufunc_check(np.conj)
def test_angle(): unary_ufunc_check(np.angle)
def test_ldexp(): unary_ufunc_check(lambda x: np.ldexp(x, 3), test_complex=False)
def test_ldexp_neg(): unary_ufunc_check(lambda x: np.ldexp(x, -2), test_complex=False)
# Re-add these tests once we switch to new refactoring that gets rid of NoDerivativeNode
# def test_frexp(): unary_ufunc_check(lambda x: np.frexp(x)[0], lims=[2.1, 2.9], test_complex=False)
# def test_frexp_neg(): unary_ufunc_check(lambda x: np.frexp(x)[0], lims=[-2.1, -2.9], test_complex=False)
# Binary ufunc tests
def test_add(): binary_ufunc_check(np.add)
def test_logaddexp(): binary_ufunc_check(np.logaddexp, test_complex=False)
def test_logaddexp2(): binary_ufunc_check(np.logaddexp2, test_complex=False)
def test_true_divide(): binary_ufunc_check(np.true_divide, lims_B=[0.3, 2.0], test_complex=False)
def test_true_divide_neg(): binary_ufunc_check(np.true_divide, lims_B=[-0.9, -2.0], test_complex=False)
def test_hypot(): binary_ufunc_check(np.hypot, test_complex=False)
def test_arctan2(): binary_ufunc_check(np.arctan2, test_complex=False)
def test_copysign(): binary_ufunc_check(np.copysign, test_complex=False)
def test_nextafter(): binary_ufunc_check(np.nextafter, test_complex=False)
def test_remainder(): binary_ufunc_check_no_same_args(np.remainder, lims_A=[-0.9, 0.9], lims_B=[0.7, 1.9], test_complex=False)
def test_fmod(): binary_ufunc_check_no_same_args(np.fmod, lims_A=[-0.9, 0.9], lims_B=[0.7, 1.9], test_complex=False)
def test_mod(): binary_ufunc_check_no_same_args(np.mod, lims_B=[ 0.8, 2.1], test_complex=False)
def test_mod_neg(): binary_ufunc_check_no_same_args(np.mod, lims_B=[-0.9, -2.0], test_complex=False)
def test_op_mul(): binary_ufunc_check(op.mul)
def test_op_add(): binary_ufunc_check(op.add)
def test_op_sub(): binary_ufunc_check(op.sub)
def test_op_div(): binary_ufunc_check(op.truediv, lims_B=[0.5, 2.0])
def test_op_pow(): binary_ufunc_check(op.pow, lims_A=[0.7, 2.0])
def test_op_mod(): binary_ufunc_check_no_same_args(op.mod, lims_B=[0.3, 2.0], test_complex=False)
def test_op_mod_neg(): binary_ufunc_check_no_same_args(op.mod, lims_B=[-0.3, -2.0], test_complex=False)
# Misc tests
R = npr.randn
def test_transpose(): combo_check(np.transpose, [0],
[R(2, 3, 4)], axes = [None, [0, 1, 2], [0, 2, 1],
[2, 0, 1], [2, 1, 0],
[1, 0, 2], [1, 2, 0]])
def test_repeat(): combo_check(np.repeat, [0], [R(2, 3, 4), R(3, 1)],
repeats=[0,1,2], axis = [None, 0, 1])
def test_diff():
combo_check(np.diff, [0], [R(5,5), R(5,5,5)], n=[1,2], axis=[0,1])
combo_check(np.diff, [0], [R(1), R(1,1)], axis=[0])
combo_check(np.diff, [0], [R(1,1), R(3,1)], axis=[1])
def test_tile():
combo_check(np.tile, [0], [R(2,1,3,1)], reps=[(1, 4, 1, 2)])
combo_check(np.tile, [0], [R(1,2)], reps=[(1,2), (2,3), (3,2,1)])
combo_check(np.tile, [0], [R(1)], reps=[(2,), 2])
def test_inner(): combo_check(np.inner, [0, 1],
[1.5, R(3), R(2, 3)],
[0.3, R(3), R(4, 3)])
def test_dot(): combo_check(np.dot, [0, 1],
[1.5, R(3), R(2, 3), R(2, 2, 3)],
[0.3, R(3), R(3, 4), R(2, 3, 4)])
def test_matmul(): combo_check(np.matmul, [0, 1],
[R(3), R(2, 3), R(2, 2, 3)],
[R(3), R(3, 4), R(2, 3, 4)])
def test_tensordot_1(): combo_check(np.tensordot, [0, 1],
[R(1, 3), R(2, 3, 2)],
[R(3), R(3, 1), R(3, 4, 2)],
axes=[ [(1,), (0,)] ])
def test_tensordot_2(): combo_check(np.tensordot, [0, 1],
[R(3), R(3, 1), R(3, 4, 2)],
[R(1, 3), R(2, 3, 2)],
axes=[ [(0,), (1,)] ])
def test_tensordot_3(): combo_check(np.tensordot, [0, 1],
[R(2, 3), R(2, 3, 4)],
[R(1, 2, 3), R(2, 2, 3, 4)],
axes=[ [(0, 1), (1, 2)] , [(1, 0), (2, 1)] ])
def test_tensordot_4(): combo_check(np.tensordot, [0, 1],
[R(2, 2), R(4, 2, 2)],
[R(2, 2), R(2, 2, 4)],
axes=[1, 2])
def test_tensordot_5(): combo_check(np.tensordot, [0, 1], [R(4)], [R()], axes=[0])
def test_tensordot_6(): combo_check(np.tensordot, [0, 1], [R(2,6)], [R(6,3)], axes=[[[-1], [0]]])
# Need custom tests because gradient is undefined when arguments are identical.
def test_maximum(): combo_check(np.maximum, [0, 1],
[R(1), R(1,4), R(3, 4)],
[R(1), R(1,4), R(3, 4)])
def test_fmax(): combo_check(np.fmax, [0, 1],
[R(1), R(1,4), R(3, 4)],
[R(1), R(1,4), R(3, 4)])
def test_minimum(): combo_check(np.minimum, [0, 1],
[R(1), R(1,4), R(3, 4)],
[R(1), R(1,4), R(3, 4)])
def test_fmin(): combo_check(np.fmin, [0, 1],
[R(1), R(1,4), R(3, 4)],
[R(1), R(1,4), R(3, 4)])
def test_sort(): combo_check(np.sort, [0], [R(1), R(7)])
def test_msort(): combo_check(np.msort, [0], [R(1), R(7)])
def test_partition(): combo_check(np.partition, [0],
[R(7), R(14)], kth=[0, 3, 6])
def test_atleast_1d(): combo_check(np.atleast_1d, [0], [1.2, R(1), R(7), R(1,4), R(2,4), R(2, 4, 5)])
def test_atleast_2d(): combo_check(np.atleast_2d, [0], [1.2, R(1), R(7), R(1,4), R(2,4), R(2, 4, 5)])
def test_atleast_3d(): combo_check(np.atleast_3d, [0], [1.2, R(1), R(7), R(1,4), R(2,4), R(2, 4, 5),
R(2, 4, 3, 5)])
def test_einsum_transpose(): combo_check(np.einsum, [1], ['ij->ji'], [R(1, 1), R(4,4), R(3,4)])
def test_einsum_matmult(): combo_check(np.einsum, [1, 2], ['ij,jk->ik'], [R(2, 3)], [R(3,4)])
def test_einsum_matmult_broadcast(): combo_check(np.einsum, [1, 2], ['...ij,...jk->...ik'],
[R(2, 3), R(2, 2, 3)],
[R(3, 4), R(2, 3, 4)])
def test_einsum_covsum(): combo_check(np.einsum, [1, 2], ['ijk,lji->lki'], [R(3, 4, 4)], [R(4, 4, 3)])
def test_einsum_ellipses(): combo_check(np.einsum, [1, 2], ['...jk,...lj->...lk', '...,...->...'],
[R(4, 4), R(3, 4, 4)],
[R(4, 4), R(3, 4, 4)])
def test_einsum_ellipses_tail(): combo_check(np.einsum, [1, 2], ['jk...,lj...->lk...'],
[R(3, 2), R(3, 2, 4)],
[R(2, 3), R(2, 3, 4)])
def test_einsum_ellipses_center(): combo_check(np.einsum, [1, 2], ['j...k,lj...->lk...'],
[R(2, 2), R(2, 2, 2)],
[R(2, 2), R(2, 2, 2)])
def test_einsum_three_args(): combo_check(np.einsum, [1, 2], ['ijk,lji,lli->lki'],
[R(3, 4, 4)], [R(4, 4, 3)], [R(4, 4, 3)])
def test_einsum2_transpose(): combo_check(np.einsum, [0], [R(1, 1), R(4,4), R(3,4)], [(0,1)], [(1,0)])
def test_einsum2_matmult(): combo_check(np.einsum, [0, 2], [R(2, 3)], [(0,1)], [R(3,4)], [(1,2)], [(0,2)])
def test_einsum2_matmult_broadcast(): combo_check(np.einsum, [0, 2],
[R(2, 3), R(2, 2, 3)], [(Ellipsis, 0, 1)],
[R(3, 4), R(2, 3, 4)], [(Ellipsis, 1, 2)],
[(Ellipsis, 0, 2)])
def test_einsum2_covsum(): combo_check(np.einsum, [0, 2], [R(3, 4, 4)], [(0,1,2)], [R(4, 4, 3)], [(3,1,0)], [(3,2,0)])
def test_einsum2_three_args(): combo_check(np.einsum, [0, 2],
[R(3, 4, 4)], [(0,1,2)], [R(4, 4, 3)], [(3,1,0)], [R(4, 4, 3)], [(3,3,0)], [(3,2,0)])
def test_trace(): combo_check(np.trace, [0], [R(5, 5), R(4, 5), R(5, 4), R(3, 4, 5)], offset=[-1, 0, 1])
def test_diag(): combo_check(np.diag, [0], [R(5, 5)], k=[-1, 0, 1])
def test_diag_flat():combo_check(np.diag, [0], [R(5)], k=[-1, 0, 1])
def test_tril(): combo_check(np.tril, [0], [R(5, 5)], k=[-1, 0, 1])
def test_triu(): combo_check(np.tril, [0], [R(5, 5)], k=[-1, 0, 1])
def test_tril_3d(): combo_check(np.tril, [0], [R(5, 5, 4)], k=[-1, 0, 1])
def test_triu_3d(): combo_check(np.tril, [0], [R(5, 5, 4)], k=[-1, 0, 1])
def test_swapaxes(): combo_check(np.swapaxes, [0], [R(3, 4, 5)], axis1=[0, 1, 2], axis2=[0, 1, 2])
def test_rollaxis(): combo_check(np.rollaxis, [0], [R(2, 3, 4)], axis =[0, 1, 2], start=[0, 1, 2, 3])
def test_cross(): combo_check(np.cross, [0, 1], [R(3, 3)], [R(3, 3)],
axisa=[-1, 0, 1], axisb=[-1, 0, 1], axisc=[-1, 0, 1], axis=[None, -1, 0, 1])
def test_vsplit_2d(): combo_check(np.vsplit, [0], [R(4, 8)], [4, [1, 2]])
def test_vsplit_3d(): combo_check(np.vsplit, [0], [R(4, 4, 4)], [2, [1, 2]])
def test_hsplit_2d(): combo_check(np.hsplit, [0], [R(4, 8)], [4, [1, 2]])
def test_hsplit_3d(): combo_check(np.hsplit, [0], [R(4, 4, 4)], [2, [1, 2]])
def test_dsplit_3d(): combo_check(np.dsplit, [0], [R(4, 4, 4)], [2, [1, 2]])
def test_split_1d(): combo_check(np.split, [0], [R(1), R(7)], [1], axis=[0])
def test_split_2d(): combo_check(np.split, [0], [R(4, 8)], [4, [1, 2]], axis=[0, 1])
def test_split_3d(): combo_check(np.split, [0], [R(4, 4, 4)], [2, [1, 2]], axis=[0, 1, 2])
def test_array_split_1d(): combo_check(np.array_split, [0], [R(1), R(7)], [1, 3], axis=[0])
def test_array_split_2d(): combo_check(np.array_split, [0], [R(7, 7)], [4, [3, 5]], axis=[0, 1])
def test_array_split_3d(): combo_check(np.array_split, [0], [R(7, 7, 7)], [4, [3, 5]], axis=[0, 1, 2])
def test_concatenate_1ist(): combo_check(np.concatenate, [0], [(R(1), R(3))], axis=[0])
def test_concatenate_tuple(): combo_check(np.concatenate, [0], [[R(1), R(3)]], axis=[0])
def test_concatenate_2d(): combo_check(np.concatenate, [0], [(R(2, 2), R(2, 2))], axis=[0, 1])
def test_concatenate_3d(): combo_check(np.concatenate, [0], [(R(2, 2, 2), R(2, 2, 2))], axis=[0, 1, 2])
def test_vstack_1d(): combo_check(np.vstack, [0], [R(2), (R(2), R(2))])
def test_vstack_2d(): combo_check(np.vstack, [0], [R(2, 3), (R(2, 4), R(1, 4))])
def test_vstack_3d(): combo_check(np.vstack, [0], [R(2, 3, 4), (R(2, 3, 4), R(5, 3, 4))])
def test_hstack_1d(): combo_check(np.hstack, [0], [R(2), (R(2), R(2))])
def test_hstack_2d(): combo_check(np.hstack, [0], [R(3, 2), (R(3, 4), R(3, 5))])
def test_hstack_3d(): combo_check(np.hstack, [0], [R(2, 3, 4), (R(2, 1, 4), R(2, 5, 4))])
def test_stack_1d(): combo_check(np.stack, [0], [(R(2),), (R(2), R(2))], axis=[0, 1])
def test_row_stack_1d(): combo_check(np.row_stack, [0], [R(2), (R(2), R(2))])
def test_row_stack_2d(): combo_check(np.row_stack, [0], [R(2, 3), (R(2, 4), R(1, 4))])
def test_column_stack_1d(): combo_check(np.column_stack, [0], [R(2), (R(2), R(2))])
def test_column_stack_2d(): combo_check(np.column_stack, [0], [R(2, 2), (R(2, 2), R(2, 2))])
def test_select(): combo_check(np.select, [1], [[R(3,4,5) > 0, R(3,4,5) > 0, R(3,4,5) > 0]],
[[R(3,4,5), R(3,4,5), R(3,4,5)]], default=[0.0, 1.1])
| mit | 6,447,764,142,806,344,000 | 61.440678 | 132 | 0.499321 | false |
fnp/wolnelektury | src/wolnelektury/management/commands/localepack.py | 1 | 9451 | # This file is part of Wolnelektury, licensed under GNU Affero GPLv3 or later.
# Copyright © Fundacja Nowoczesna Polska. See NOTICE for more information.
#
import os
import shutil
import sys
import tempfile
import allauth
from django.conf import settings
from django.core.management.base import BaseCommand
from django.core.management import call_command
from wolnelektury.utils import makedirs
from .translation2po import get_languages
ROOT = settings.ROOT_DIR
def is_our_app(mod):
return mod.__path__[0].startswith(ROOT)
class Locale(object):
def save(self, output_directory, languages):
pass
def compile(self):
pass
def generate(self, languages):
pass
def copy_f(frm, to):
makedirs(os.path.dirname(to))
shutil.copyfile(frm, to)
class AppLocale(Locale):
def __init__(self, appmod):
self.app = appmod
if not os.path.exists(os.path.join(self.path, 'locale')):
raise LookupError('No locale for app %s' % appmod)
@property
def path(self):
return self.app.__path__[0]
@property
def name(self):
return self.app.__name__
def save(self, output_directory, languages):
for lc in languages:
lc = lc[0]
if os.path.exists(os.path.join(self.path, 'locale', lc)):
copy_f(os.path.join(self.path, 'locale', lc, 'LC_MESSAGES', 'django.po'),
os.path.join(output_directory, lc, self.name + '.po'))
def load(self, input_directory, languages):
for lc in zip(*languages)[0]:
if os.path.exists(os.path.join(input_directory, lc, self.name + '.po')):
out = os.path.join(self.path, 'locale', lc, 'LC_MESSAGES', 'django.po')
makedirs(os.path.dirname(out))
copy_f(os.path.join(input_directory, lc, self.name + '.po'), out)
self.compile()
def compile(self):
wd = os.getcwd()
os.chdir(self.path)
try:
call_command('compilemessages', verbosity=0, settings='wolnelektury.settings')
except:
pass
finally:
os.chdir(wd)
def generate(self, languages):
wd = os.getcwd()
os.chdir(self.path)
try:
call_command('makemessages', all=True)
except:
pass
finally:
os.chdir(wd)
class ModelTranslation(Locale):
def __init__(self, appname, poname=None):
self.appname = appname
self.poname = poname and poname or appname
def save(self, output_directory, languages):
call_command('translation2po', self.appname, directory=output_directory, poname=self.poname)
def load(self, input_directory, languages):
call_command('translation2po', self.appname, directory=input_directory,
load=True, lang=','.join(zip(*languages)[0]), poname=self.poname, keep_running=True)
class CustomLocale(Locale):
def __init__(self, app_dir,
config=os.path.join(ROOT, "babel.cfg"),
out_file=os.path.join(ROOT, 'src/wolnelektury/locale-contrib/django.pot'),
name=None):
self.app_dir = app_dir
self.config = config
self.out_file = out_file
self.name = name
def generate(self, languages):
os.system('pybabel extract -F "%s" -o "%s" "%s"' % (self.config, self.out_file, self.app_dir))
os.system('pybabel update -D django -i %s -d %s' % (self.out_file, os.path.dirname(self.out_file)))
def po_file(self, language):
d = os.path.dirname(self.out_file)
n = os.path.basename(self.out_file).split('.')[0]
return os.path.join(d, language, 'LC_MESSAGES', n + '.po')
def save(self, output_directory, languages):
for lc in zip(*languages)[0]:
if os.path.exists(self.po_file(lc)):
copy_f(self.po_file(lc),
os.path.join(output_directory, lc, self.name + '.po'))
def load(self, input_directory, languages):
for lc in zip(*languages)[0]:
copy_f(os.path.join(input_directory, lc, self.name + '.po'),
self.po_file(lc))
self.compile()
def compile(self):
os.system('pybabel compile -D django -d %s' % os.path.dirname(self.out_file))
SOURCES = []
for appn in settings.INSTALLED_APPS:
app = __import__(appn)
if is_our_app(app):
try:
SOURCES.append(AppLocale(app))
except LookupError as e:
print("no locales in %s" % app.__name__)
SOURCES.append(ModelTranslation('infopages', 'infopages_db'))
SOURCES.append(CustomLocale(os.path.dirname(allauth.__file__), name='contrib'))
class Command(BaseCommand):
help = 'Make a locale pack'
def add_arguments(self, parser):
parser.add_argument(
'-l', '--load', help='load locales back to source',
action='store_true', dest='load', default=False)
parser.add_argument(
'-c', '--compile', help='compile messages',
action='store_true', dest='compile', default=False)
parser.add_argument(
'-g', '--generate', help='generate messages',
action='store_true', dest='generate', default=False)
parser.add_argument(
'-L', '--lang', help='load just one language',
dest='lang', default=None)
parser.add_argument(
'-d', '--directory', help='load from this directory',
dest='directory', default=None)
parser.add_argument(
'-o', '--outfile', help='Resulting zip file',
dest='outfile', default='./wl-locale.zip')
parser.add_argument(
'-m', '--merge', action='store_true',
dest='merge', default=False,
help='Use git to merge. Please use with clean working directory.')
parser.add_argument(
'-M', '--message', help='commit message',
dest='message', default='New locale')
def current_rev(self):
return os.popen('git rev-parse HEAD').read()
def current_branch(self):
return os.popen("git branch |grep '^[*]' | cut -c 3-").read()
def save(self, options):
packname = options.get('outfile')
packname_b = os.path.basename(packname).split('.')[0]
fmt = '.'.join(os.path.basename(packname).split('.')[1:])
if fmt != 'zip':
raise NotImplementedError('Sorry. Only zip format supported at the moment.')
tmp_dir = tempfile.mkdtemp('-wl-locale')
out_dir = os.path.join(tmp_dir, packname_b)
os.mkdir(out_dir)
try:
for lang in settings.LANGUAGES:
os.mkdir(os.path.join(out_dir, lang[0]))
for src in SOURCES:
src.generate(settings.LANGUAGES)
src.save(out_dir, settings.LANGUAGES)
# src.save(settings.LANGUAGES)
# write out revision
rev = self.current_rev()
rf = open(os.path.join(out_dir, '.revision'), 'w')
rf.write(rev)
rf.close()
cwd = os.getcwd()
try:
os.chdir(os.path.dirname(out_dir))
self.system('zip -r %s %s' % (os.path.join(cwd, packname_b+'.zip'), os.path.basename(out_dir)))
finally:
os.chdir(cwd)
# shutil.make_archive(packname_b, fmt, root_dir=os.path.dirname(out_dir),
# base_dir=os.path.basename(out_dir))
finally:
shutil.rmtree(tmp_dir, ignore_errors=True)
def generate(self):
for src in SOURCES:
src.generate(settings.LANGUAGES)
def load(self, options):
langs = get_languages(options['lang'])
for src in SOURCES:
src.load(options['directory'], langs)
def compile(self):
for src in SOURCES:
src.compile()
def handle(self, **options):
if options['load']:
if not options['directory'] or not os.path.exists(options['directory']):
print("Directory not provided or does not exist, please use -d")
sys.exit(1)
if options['merge']:
self.merge_setup(options['directory'])
self.load(options)
if options['merge']:
self.merge_finish(options['message'])
elif options['generate']:
self.generate()
elif options['compile']:
self.compile()
else:
self.save(options)
merge_branch = 'wl-locale-merge'
last_branch = None
def merge_setup(self, directory):
self.last_branch = self.current_branch()
rev = open(os.path.join(directory, '.revision')).read()
self.system('git checkout -b %s %s' % (self.merge_branch, rev))
def merge_finish(self, message):
self.system('git commit -a -m "%s"' % message.replace('"', '\\"'))
self.system('git checkout %s' % self.last_branch)
self.system('git merge -s recursive -X theirs %s' % self.merge_branch)
self.system('git branch -d %s' % self.merge_branch)
def system(self, fmt, *args):
code = os.system(fmt % args)
if code != 0:
raise OSError('Command %s returned with exit code %d' % (fmt % args, code))
return code
| agpl-3.0 | 127,870,635,041,599,140 | 33.23913 | 111 | 0.56709 | false |
EiNSTeiN-/deluge-gtk3 | deluge/plugins/blocklist/blocklist/webui.py | 1 | 2242 | #
# blocklist/webui.py
#
# Copyright (C) 2008 Martijn Voncken <[email protected]>
#
# Deluge is free software.
#
# You may redistribute it and/or modify it under the terms of the
# GNU General Public License, as published by the Free Software
# Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# deluge is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with deluge. If not, write to:
# The Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor
# Boston, MA 02110-1301, USA.
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the OpenSSL
# library.
# You must obey the GNU General Public License in all respects for all of
# the code used other than OpenSSL. If you modify file(s) with this
# exception, you may extend this exception to your version of the file(s),
# but you are not obligated to do so. If you do not wish to do so, delete
# this exception statement from your version. If you delete this exception
# statement from all source files in the program, then also delete it here.
#
#
import os
from deluge.log import LOG as log
from deluge.ui.client import client
from deluge import component
from deluge.plugins.pluginbase import WebPluginBase
from common import get_resource
#import deluge.ui.webui.lib.newforms_plus as forms
#config_page_manager = component.get("ConfigPageManager")
FORMAT_LIST = [
('gzmule',_("Emule IP list (GZip)")),
('spzip',_("SafePeer Text (Zipped)")),
('pgtext',_("PeerGuardian Text (Uncompressed)")),
('p2bgz',_("PeerGuardian P2B (GZip)"))
]
class WebUI(WebPluginBase):
def enable(self):
#config_page_manager.register('plugins','blocklist',BlockListCfgForm)
pass
def disable(self):
#config_page_manager.deregister('blocklist')
pass
scripts = [get_resource("blocklist.js")]
debug_scripts = scripts
| gpl-3.0 | 5,153,763,284,714,236,000 | 32.462687 | 78 | 0.716771 | false |
sidus-dev/poseidon | include/theme.py | 1 | 2058 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of Poseidon.
#
# Poseidon is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Poseidon is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Poseidon. If not, see <http://www.gnu.org/licenses/>.
import os, sys, gi
from gi.repository import Gtk, Gdk
sys.path.append(".")
from settings import theme_file, trans_pop
from functions import is_image_valid
def apply_css():
# Tested on Gtk 3.18/3.20
alpha = "popover, .popover { opacity: 0.95; }"
theme = "#notebook header { background: url('"\
+ theme_file + "') no-repeat center; background-size: cover; }"
css = """
#notebook tab { padding: 5px 10px 5px 10px; }
#frame_main border, #frame_find border, #frame_vte border, #frame_status border,
#frame_permission border, #frame_cert border, #frame_cookies border { border-style: none; }
#frame_main, #frame_find, #frame_vte, #frame_status, #frame_permission, #frame_cert, #frame_cookies,
#frame_mime border, #frame_mime { padding: 5px; }
#notebook header { background: none; }
#entry border { border-style: solid; }
#label_x509 { padding: 10px; }
#frame_x509 border { border-width: 0px 0px 1px 0px; }
"""
if trans_pop: css += alpha
if os.path.exists(theme_file):
if is_image_valid(theme_file): css += theme
cssprovider = Gtk.CssProvider()
cssprovider.load_from_data(bytes(css.encode()))
screen = Gdk.Screen.get_default()
stylecontext = Gtk.StyleContext()
stylecontext.add_provider_for_screen(screen, cssprovider, Gtk.STYLE_PROVIDER_PRIORITY_USER)
| gpl-3.0 | 6,193,077,838,437,735,000 | 33.881356 | 104 | 0.69242 | false |
mihaisoloi/conpaas | conpaas-services/contrib/libcloud/compute/drivers/opennebula.py | 1 | 42567 | # Copyright 2002-2009, Distributed Systems Architecture Group, Universidad
# Complutense de Madrid (dsa-research.org)
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
OpenNebula.org driver.
"""
__docformat__ = 'epytext'
from xml.etree import ElementTree as ET
from base64 import b64encode
import hashlib
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import next
from libcloud.utils.py3 import b
from libcloud.compute.base import NodeState, NodeDriver, Node, NodeLocation
from libcloud.common.base import ConnectionUserAndKey, XmlResponse
from libcloud.compute.base import NodeImage, NodeSize, StorageVolume
from libcloud.common.types import InvalidCredsError
from libcloud.compute.providers import Provider
__all__ = [
'ACTION',
'OpenNebulaResponse',
'OpenNebulaConnection',
'OpenNebulaNodeSize',
'OpenNebulaNetwork',
'OpenNebulaNodeDriver',
'OpenNebula_1_4_NodeDriver',
'OpenNebula_2_0_NodeDriver',
'OpenNebula_3_0_NodeDriver',
'OpenNebula_3_2_NodeDriver',
'OpenNebula_3_8_NodeDriver']
API_HOST = ''
API_PORT = (4567, 443)
API_SECURE = True
API_PLAIN_AUTH = False
DEFAULT_API_VERSION = '3.2'
class ACTION(object):
"""
All actions, except RESUME, only apply when the VM is in the "Running"
state.
"""
STOP = 'STOPPED'
"""
The VM is stopped, and its memory state stored to a checkpoint file. VM
state, and disk image, are transferred back to the front-end. Resuming
the VM requires the VM instance to be re-scheduled.
"""
SUSPEND = 'SUSPENDED'
"""
The VM is stopped, and its memory state stored to a checkpoint file. The VM
state, and disk image, are left on the host to be resumed later. Resuming
the VM does not require the VM to be re-scheduled. Rather, after
suspending, the VM resources are reserved for later resuming.
"""
RESUME = 'RESUME'
"""
The VM is resumed using the saved memory state from the checkpoint file,
and the VM's disk image. The VM is either started immediately, or
re-scheduled depending on how it was suspended.
"""
CANCEL = 'CANCEL'
"""
The VM is forcibly shutdown, its memory state is deleted. If a persistent
disk image was used, that disk image is transferred back to the front-end.
Any non-persistent disk images are deleted.
"""
SHUTDOWN = 'SHUTDOWN'
"""
The VM is gracefully shutdown by sending the ACPI signal. If the VM does
not shutdown, then it is considered to still be running. If successfully,
shutdown, its memory state is deleted. If a persistent disk image was used,
that disk image is transferred back to the front-end. Any non-persistent
disk images are deleted.
"""
REBOOT = 'REBOOT'
"""
Introduced in OpenNebula v3.2.
The VM is gracefully restarted by sending the ACPI signal.
"""
DONE = 'DONE'
"""
The VM is forcibly shutdown, its memory state is deleted. If a persistent
disk image was used, that disk image is transferred back to the front-end.
Any non-persistent disk images are deleted.
"""
class OpenNebulaResponse(XmlResponse):
"""
XmlResponse class for the OpenNebula.org driver.
"""
def success(self):
"""
Check if response has the appropriate HTTP response code to be a
success.
@rtype: C{bool}
@return: True is success, else False.
"""
i = int(self.status)
return i >= 200 and i <= 299
def parse_error(self):
"""
Check if response contains any errors.
@raise: L{InvalidCredsError}
@rtype: C{ElementTree}
@return: Contents of HTTP response body.
"""
if int(self.status) == httplib.UNAUTHORIZED:
raise InvalidCredsError(self.body)
return self.body
class OpenNebulaConnection(ConnectionUserAndKey):
"""
Connection class for the OpenNebula.org driver.
with plain_auth support
"""
host = API_HOST
port = API_PORT
secure = API_SECURE
plain_auth = API_PLAIN_AUTH
responseCls = OpenNebulaResponse
def __init__(self, *args, **kwargs):
if 'plain_auth' in kwargs:
self.plain_auth = kwargs.pop('plain_auth')
super(OpenNebulaConnection, self).__init__(*args, **kwargs)
def add_default_headers(self, headers):
"""
Add headers required by the OpenNebula.org OCCI interface.
Includes adding Basic HTTP Authorization headers for authenticating
against the OpenNebula.org OCCI interface.
@type headers: C{dict}
@param headers: Dictionary containing HTTP headers.
@rtype: C{dict}
@return: Dictionary containing updated headers.
"""
if self.plain_auth:
passwd = self.key
else:
passwd = hashlib.sha1(b(self.key)).hexdigest()
headers['Authorization'] =\
('Basic %s' % b64encode(b('%s:%s' % (self.user_id,
passwd))).decode('utf-8'))
return headers
class OpenNebulaNodeSize(NodeSize):
"""
NodeSize class for the OpenNebula.org driver.
"""
def __init__(self, id, name, ram, disk, bandwidth, price, driver,
cpu=None, vcpu=None):
super(OpenNebulaNodeSize, self).__init__(id=id, name=name, ram=ram,
disk=disk,
bandwidth=bandwidth,
price=price, driver=driver)
self.cpu = cpu
self.vcpu = vcpu
def __repr__(self):
return (('<OpenNebulaNodeSize: id=%s, name=%s, ram=%s, disk=%s, '
'bandwidth=%s, price=%s, driver=%s, cpu=%s, vcpu=%s ...>')
% (self.id, self.name, self.ram, self.disk, self.bandwidth,
self.price, self.driver.name, self.cpu, self.vcpu))
class OpenNebulaNetwork(object):
"""
Provide a common interface for handling networks of all types.
Network objects are analogous to physical switches connecting two or
more physical nodes together. The Network object provides the interface in
libcloud through which we can manipulate networks in different cloud
providers in the same way. Network objects don't actually do much directly
themselves, instead the network driver handles the connection to the
network.
You don't normally create a network object yourself; instead you use
a driver and then have that create the network for you.
>>> from libcloud.compute.drivers.dummy import DummyNodeDriver
>>> driver = DummyNetworkDriver()
>>> network = driver.create_network()
>>> network = driver.list_networks()[0]
>>> network.name
'dummy-1'
"""
def __init__(self, id, name, address, size, driver, extra=None):
self.id = str(id)
self.name = name
self.address = address
self.size = size
self.driver = driver
self.uuid = self.get_uuid()
self.extra = extra or {}
def get_uuid(self):
"""
Unique hash for this network.
The hash is a function of an SHA1 hash of the network's ID and
its driver which means that it should be unique between all
networks. In some subclasses (e.g. GoGrid) there is no ID
available so the public IP address is used. This means that,
unlike a properly done system UUID, the same UUID may mean a
different system install at a different time
>>> from libcloud.network.drivers.dummy import DummyNetworkDriver
>>> driver = DummyNetworkDriver()
>>> network = driver.create_network()
>>> network.get_uuid()
'd3748461511d8b9b0e0bfa0d4d3383a619a2bb9f'
Note, for example, that this example will always produce the
same UUID!
@rtype: C{string}
@return: Unique identifier for this instance.
"""
return hashlib.sha1(b("%s:%s" % (self.id,
self.driver.type))).hexdigest()
def __repr__(self):
return (('<OpenNebulaNetwork: uuid=%s, name=%s, address=%s, size=%s, '
'provider=%s ...>')
% (self.uuid, self.name, self.address, self.size,
self.driver.name))
class OpenNebulaNodeDriver(NodeDriver):
"""
OpenNebula.org node driver.
"""
connectionCls = OpenNebulaConnection
name = 'OpenNebula'
website = 'http://opennebula.org/'
type = Provider.OPENNEBULA
NODE_STATE_MAP = {
'INIT': NodeState.PENDING,
'PENDING': NodeState.PENDING,
'HOLD': NodeState.PENDING,
'ACTIVE': NodeState.RUNNING,
'STOPPED': NodeState.TERMINATED,
'SUSPENDED': NodeState.PENDING,
'DONE': NodeState.TERMINATED,
'FAILED': NodeState.TERMINATED}
def __new__(cls, key, secret=None, api_version=DEFAULT_API_VERSION,
**kwargs):
if cls is OpenNebulaNodeDriver:
if api_version in ['1.4']:
cls = OpenNebula_1_4_NodeDriver
elif api_version in ['2.0', '2.2']:
cls = OpenNebula_2_0_NodeDriver
elif api_version in ['3.0']:
cls = OpenNebula_3_0_NodeDriver
elif api_version in ['3.2']:
cls = OpenNebula_3_2_NodeDriver
elif api_version in ['3.6']:
cls = OpenNebula_3_6_NodeDriver
elif api_version in ['3.8']:
cls = OpenNebula_3_8_NodeDriver
if 'plain_auth' not in kwargs:
kwargs['plain_auth'] = cls.plain_auth
else:
cls.plain_auth = kwargs['plain_auth']
else:
raise NotImplementedError(
"No OpenNebulaNodeDriver found for API version %s" %
(api_version))
return super(OpenNebulaNodeDriver, cls).__new__(cls)
def create_node(self, **kwargs):
"""
Create a new OpenNebula node.
@inherits: L{NodeDriver.create_node}
@keyword networks: List of virtual networks to which this node should
connect. (optional)
@type networks: L{OpenNebulaNetwork} or
C{list} of L{OpenNebulaNetwork}
"""
compute = ET.Element('COMPUTE')
name = ET.SubElement(compute, 'NAME')
name.text = kwargs['name']
instance_type = ET.SubElement(compute, 'INSTANCE_TYPE')
instance_type.text = kwargs['size'].name
storage = ET.SubElement(compute, 'STORAGE')
ET.SubElement(storage,
'DISK',
{'image': '%s' % (str(kwargs['image'].id))})
if 'networks' in kwargs:
if not isinstance(kwargs['networks'], list):
kwargs['networks'] = [kwargs['networks']]
networkGroup = ET.SubElement(compute, 'NETWORK')
for network in kwargs['networks']:
if network.address:
ET.SubElement(networkGroup, 'NIC',
{'network': '%s' % (str(network.id)),
'ip': network.address})
else:
ET.SubElement(networkGroup, 'NIC',
{'network': '%s' % (str(network.id))})
xml = ET.tostring(compute)
node = self.connection.request('/compute', method='POST',
data=xml).object
return self._to_node(node)
def destroy_node(self, node):
url = '/compute/%s' % (str(node.id))
resp = self.connection.request(url, method='DELETE')
return resp.status == httplib.OK
def list_nodes(self):
return self._to_nodes(self.connection.request('/compute').object)
def list_images(self, location=None):
return self._to_images(self.connection.request('/storage').object)
def list_sizes(self, location=None):
"""
Return list of sizes on a provider.
@inherits: L{NodeDriver.list_sizes}
@return: List of compute node sizes supported by the cloud provider.
@rtype: C{list} of L{OpenNebulaNodeSize}
"""
return [
NodeSize(id=1,
name='small',
ram=None,
disk=None,
bandwidth=None,
price=None,
driver=self),
NodeSize(id=2,
name='medium',
ram=None,
disk=None,
bandwidth=None,
price=None,
driver=self),
NodeSize(id=3,
name='large',
ram=None,
disk=None,
bandwidth=None,
price=None,
driver=self),
]
def list_locations(self):
return [NodeLocation(0, '', '', self)]
def ex_list_networks(self, location=None):
"""
List virtual networks on a provider.
@type location: L{NodeLocation}
@param location: Location from which to request a list of virtual
networks. (optional)
@return: List of virtual networks available to be connected to a
compute node.
@rtype: C{list} of L{OpenNebulaNetwork}
"""
return self._to_networks(self.connection.request('/network').object)
def ex_node_action(self, node, action):
"""
Build action representation and instruct node to commit action.
Build action representation from the compute node ID, and the
action which should be carried out on that compute node. Then
instruct the node to carry out that action.
@param node: Compute node instance.
@type node: L{Node}
@param action: Action to be carried out on the compute node.
@type action: C{str}
@return: False if an HTTP Bad Request is received, else, True is
returned.
@rtype: C{bool}
"""
compute_node_id = str(node.id)
compute = ET.Element('COMPUTE')
compute_id = ET.SubElement(compute, 'ID')
compute_id.text = compute_node_id
state = ET.SubElement(compute, 'STATE')
state.text = action
xml = ET.tostring(compute)
url = '/compute/%s' % compute_node_id
resp = self.connection.request(url, method='PUT',
data=xml)
if resp.status == httplib.BAD_REQUEST:
return False
else:
return True
def _to_images(self, object):
"""
Request a list of images and convert that list to a list of NodeImage
objects.
Request a list of images from the OpenNebula web interface, and
issue a request to convert each XML object representation of an image
to a NodeImage object.
@rtype: C{list} of L{NodeImage}
@return: List of images.
"""
images = []
for element in object.findall('DISK'):
image_id = element.attrib['href'].partition('/storage/')[2]
image = self.connection.request(
('/storage/%s' % (image_id))).object
images.append(self._to_image(image))
return images
def _to_image(self, image):
"""
Take XML object containing an image description and convert to
NodeImage object.
@type image: L{ElementTree}
@param image: XML representation of an image.
@rtype: L{NodeImage}
@return: The newly extracted L{NodeImage}.
"""
return NodeImage(id=image.findtext('ID'),
name=image.findtext('NAME'),
driver=self.connection.driver,
extra={'size': image.findtext('SIZE'),
'url': image.findtext('URL')})
def _to_networks(self, object):
"""
Request a list of networks and convert that list to a list of
OpenNebulaNetwork objects.
Request a list of networks from the OpenNebula web interface, and
issue a request to convert each XML object representation of a network
to an OpenNebulaNetwork object.
@rtype: C{list} of L{OpenNebulaNetwork}
@return: List of virtual networks.
"""
networks = []
for element in object.findall('NETWORK'):
network_id = element.attrib['href'].partition('/network/')[2]
network_element = self.connection.request(
('/network/%s' % (network_id))).object
networks.append(self._to_network(network_element))
return networks
def _to_network(self, element):
"""
Take XML object containing a network description and convert to
OpenNebulaNetwork object.
Take XML representation containing a network description and
convert to OpenNebulaNetwork object.
@rtype: L{OpenNebulaNetwork}
@return: The newly extracted L{OpenNebulaNetwork}.
"""
return OpenNebulaNetwork(id=element.findtext('ID'),
name=element.findtext('NAME'),
address=element.findtext('ADDRESS'),
size=element.findtext('SIZE'),
driver=self.connection.driver)
def _to_nodes(self, object):
"""
Request a list of compute nodes and convert that list to a list of
Node objects.
Request a list of compute nodes from the OpenNebula web interface, and
issue a request to convert each XML object representation of a node
to a Node object.
@rtype: C{list} of L{Node}
@return: A list of compute nodes.
"""
computes = []
for element in object.findall('COMPUTE'):
compute_id = element.attrib['href'].partition('/compute/')[2]
compute = self.connection.request(
('/compute/%s' % (compute_id))).object
computes.append(self._to_node(compute))
return computes
def _to_node(self, compute):
"""
Take XML object containing a compute node description and convert to
Node object.
Take XML representation containing a compute node description and
convert to Node object.
@type compute: L{ElementTree}
@param compute: XML representation of a compute node.
@rtype: L{Node}
@return: The newly extracted L{Node}.
"""
try:
state = self.NODE_STATE_MAP[compute.findtext('STATE').upper()]
except KeyError:
state = NodeState.UNKNOWN
return Node(id=compute.findtext('ID'),
name=compute.findtext('NAME'),
state=state,
public_ips=self._extract_networks(compute),
private_ips=[],
driver=self.connection.driver,
image=self._extract_images(compute))
def _extract_networks(self, compute):
"""
Extract networks from a compute node XML representation.
Extract network descriptions from a compute node XML representation,
converting each network to an OpenNebulaNetwork object.
@type compute: L{ElementTree}
@param compute: XML representation of a compute node.
@rtype: C{list} of L{OpenNebulaNetwork}s.
@return: List of virtual networks attached to the compute node.
"""
networks = list()
network_list = compute.find('NETWORK')
for element in network_list.findall('NIC'):
networks.append(
OpenNebulaNetwork(id=element.attrib.get('network', None),
name=None,
address=element.attrib.get('ip', None),
size=1,
driver=self.connection.driver))
return networks
def _extract_images(self, compute):
"""
Extract image disks from a compute node XML representation.
Extract image disk descriptions from a compute node XML representation,
converting the disks to an NodeImage object.
@type compute: L{ElementTree}
@param compute: XML representation of a compute node.
@rtype: L{NodeImage}.
@return: First disk attached to a compute node.
"""
disks = list()
disk_list = compute.find('STORAGE')
if disk_list is not None:
for element in disk_list.findall('DISK'):
disks.append(
NodeImage(id=element.attrib.get('image', None),
name=None,
driver=self.connection.driver,
extra={'dev': element.attrib.get('dev', None)}))
# @TODO: Return all disks when the Node type accepts multiple
# attached disks per node.
if len(disks) > 0:
return disks[0]
else:
return None
class OpenNebula_1_4_NodeDriver(OpenNebulaNodeDriver):
"""
OpenNebula.org node driver for OpenNebula.org v1.4.
"""
pass
class OpenNebula_2_0_NodeDriver(OpenNebulaNodeDriver):
"""
OpenNebula.org node driver for OpenNebula.org v2.0 through OpenNebula.org
v2.2.
"""
def create_node(self, **kwargs):
"""
Create a new OpenNebula node.
@inherits: L{NodeDriver.create_node}
@keyword networks: List of virtual networks to which this node should
connect. (optional)
@type networks: L{OpenNebulaNetwork} or C{list}
of L{OpenNebulaNetwork}
@keyword context: Custom (key, value) pairs to be injected into
compute node XML description. (optional)
@type context: C{dict}
@return: Instance of a newly created node.
@rtype: L{Node}
"""
compute = ET.Element('COMPUTE')
name = ET.SubElement(compute, 'NAME')
name.text = kwargs['name']
instance_type = ET.SubElement(compute, 'INSTANCE_TYPE')
instance_type.text = kwargs['size'].name
disk = ET.SubElement(compute, 'DISK')
ET.SubElement(disk,
'STORAGE',
{'href': '/storage/%s' % (str(kwargs['image'].id))})
if 'networks' in kwargs:
if not isinstance(kwargs['networks'], list):
kwargs['networks'] = [kwargs['networks']]
for network in kwargs['networks']:
nic = ET.SubElement(compute, 'NIC')
ET.SubElement(nic, 'NETWORK',
{'href': '/network/%s' % (str(network.id))})
if network.address:
ip_line = ET.SubElement(nic, 'IP')
ip_line.text = network.address
if 'context' in kwargs:
if isinstance(kwargs['context'], dict):
contextGroup = ET.SubElement(compute, 'CONTEXT')
for key, value in list(kwargs['context'].items()):
context = ET.SubElement(contextGroup, key.upper())
context.text = value
xml = ET.tostring(compute)
node = self.connection.request('/compute', method='POST',
data=xml).object
return self._to_node(node)
def destroy_node(self, node):
url = '/compute/%s' % (str(node.id))
resp = self.connection.request(url, method='DELETE')
return resp.status == httplib.NO_CONTENT
def list_sizes(self, location=None):
"""
Return list of sizes on a provider.
@inherits: L{NodeDriver.list_sizes}
@return: List of compute node sizes supported by the cloud provider.
@rtype: C{list} of L{OpenNebulaNodeSize}
"""
return [
OpenNebulaNodeSize(id=1,
name='small',
ram=1024,
cpu=1,
disk=None,
bandwidth=None,
price=None,
driver=self),
OpenNebulaNodeSize(id=2,
name='medium',
ram=4096,
cpu=4,
disk=None,
bandwidth=None,
price=None,
driver=self),
OpenNebulaNodeSize(id=3,
name='large',
ram=8192,
cpu=8,
disk=None,
bandwidth=None,
price=None,
driver=self),
OpenNebulaNodeSize(id=4,
name='custom',
ram=0,
cpu=0,
disk=None,
bandwidth=None,
price=None,
driver=self),
]
def _to_images(self, object):
"""
Request a list of images and convert that list to a list of NodeImage
objects.
Request a list of images from the OpenNebula web interface, and
issue a request to convert each XML object representation of an image
to a NodeImage object.
@rtype: C{list} of L{NodeImage}
@return: List of images.
"""
images = []
for element in object.findall('STORAGE'):
image_id = element.attrib["href"].partition("/storage/")[2]
image = self.connection.request(
("/storage/%s" % (image_id))).object
images.append(self._to_image(image))
return images
def _to_image(self, image):
"""
Take XML object containing an image description and convert to
NodeImage object.
@type image: L{ElementTree}
@param image: XML representation of an image.
@rtype: L{NodeImage}
@return: The newly extracted L{NodeImage}.
"""
return NodeImage(id=image.findtext('ID'),
name=image.findtext('NAME'),
driver=self.connection.driver,
extra={'description': image.findtext('DESCRIPTION'),
'type': image.findtext('TYPE'),
'size': image.findtext('SIZE'),
'fstype': image.findtext('FSTYPE', None)})
def _to_node(self, compute):
"""
Take XML object containing a compute node description and convert to
Node object.
Take XML representation containing a compute node description and
convert to Node object.
@type compute: L{ElementTree}
@param compute: XML representation of a compute node.
@rtype: L{Node}
@return: The newly extracted L{Node}.
"""
try:
state = self.NODE_STATE_MAP[compute.findtext('STATE').upper()]
except KeyError:
state = NodeState.UNKNOWN
return Node(id=compute.findtext('ID'),
name=compute.findtext('NAME'),
state=state,
public_ips=self._extract_networks(compute),
private_ips=[],
driver=self.connection.driver,
image=self._extract_images(compute),
size=self._extract_size(compute),
extra={'context': self._extract_context(compute)})
def _extract_networks(self, compute):
"""
Extract networks from a compute node XML representation.
Extract network descriptions from a compute node XML representation,
converting each network to an OpenNebulaNetwork object.
@type compute: L{ElementTree}
@param compute: XML representation of a compute node.
@rtype: C{list} of L{OpenNebulaNetwork}
@return: List of virtual networks attached to the compute node.
"""
networks = []
for element in compute.findall('NIC'):
network = element.find('NETWORK')
network_id = network.attrib['href'].partition('/network/')[2]
networks.append(
OpenNebulaNetwork(id=network_id,
name=network.attrib.get('name', None),
address=element.findtext('IP'),
size=1,
driver=self.connection.driver,
extra={'mac': element.findtext('MAC')}))
return networks
def _extract_images(self, compute):
"""
Extract image disks from a compute node XML representation.
Extract image disk descriptions from a compute node XML representation,
converting the disks to an NodeImage object.
@type compute: L{ElementTree}
@param compute: XML representation of a compute node.
@rtype: C{list} of L{NodeImage}
@return: Disks attached to a compute node.
"""
disks = list()
for element in compute.findall('DISK'):
disk = element.find('STORAGE')
image_id = disk.attrib['href'].partition('/storage/')[2]
if 'id' in element.attrib:
disk_id = element.attrib['id']
else:
disk_id = None
disks.append(
NodeImage(id=image_id,
name=disk.attrib.get('name', None),
driver=self.connection.driver,
extra={'type': element.findtext('TYPE'),
'disk_id': disk_id,
'target': element.findtext('TARGET')}))
# Return all disks when the Node type accepts multiple attached disks
# per node.
if len(disks) > 1:
return disks
if len(disks) == 1:
return disks[0]
def _extract_size(self, compute):
"""
Extract size, or node type, from a compute node XML representation.
Extract node size, or node type, description from a compute node XML
representation, converting the node size to a NodeSize object.
@type compute: L{ElementTree}
@param compute: XML representation of a compute node.
@rtype: L{OpenNebulaNodeSize}
@return: Node type of compute node.
"""
instance_type = compute.find('INSTANCE_TYPE')
try:
return next((node_size for node_size in self.list_sizes()
if node_size.name == instance_type.text))
except StopIteration:
return None
def _extract_context(self, compute):
"""
Extract size, or node type, from a compute node XML representation.
Extract node size, or node type, description from a compute node XML
representation, converting the node size to a NodeSize object.
@type compute: L{ElementTree}
@param compute: XML representation of a compute node.
@rtype: C{dict}
@return: Dictionary containing (key, value) pairs related to
compute node context.
"""
contexts = dict()
context = compute.find('CONTEXT')
if context is not None:
for context_element in list(context):
contexts[context_element.tag.lower()] = context_element.text
return contexts
class OpenNebula_3_0_NodeDriver(OpenNebula_2_0_NodeDriver):
"""
OpenNebula.org node driver for OpenNebula.org v3.0.
"""
def ex_node_set_save_name(self, node, name):
"""
Build action representation and instruct node to commit action.
Build action representation from the compute node ID, the disk image
which will be saved, and the name under which the image will be saved
upon shutting down the compute node.
@param node: Compute node instance.
@type node: L{Node}
@param name: Name under which the image should be saved after shutting
down the compute node.
@type name: C{str}
@return: False if an HTTP Bad Request is received, else, True is
returned.
@rtype: C{bool}
"""
compute_node_id = str(node.id)
compute = ET.Element('COMPUTE')
compute_id = ET.SubElement(compute, 'ID')
compute_id.text = compute_node_id
disk = ET.SubElement(compute, 'DISK', {'id': str(node.image.id)})
ET.SubElement(disk, 'STORAGE',
{'href': '/storage/%s' % (str(node.image.id)),
'name': node.image.name})
ET.SubElement(disk, 'SAVE_AS', {'name': str(name)})
xml = ET.tostring(compute)
url = '/compute/%s' % compute_node_id
resp = self.connection.request(url, method='PUT',
data=xml)
if resp.status == httplib.BAD_REQUEST:
return False
else:
return True
def _to_network(self, element):
"""
Take XML object containing a network description and convert to
OpenNebulaNetwork object.
Take XML representation containing a network description and
convert to OpenNebulaNetwork object.
@return: The newly extracted L{OpenNebulaNetwork}.
@rtype: L{OpenNebulaNetwork}
"""
return OpenNebulaNetwork(id=element.findtext('ID'),
name=element.findtext('NAME'),
address=element.findtext('ADDRESS'),
size=element.findtext('SIZE'),
driver=self.connection.driver,
extra={'public': element.findtext('PUBLIC')})
class OpenNebula_3_2_NodeDriver(OpenNebula_3_0_NodeDriver):
"""
OpenNebula.org node driver for OpenNebula.org v3.2.
"""
def reboot_node(self, node):
return self.ex_node_action(node, ACTION.REBOOT)
def list_sizes(self, location=None):
"""
Return list of sizes on a provider.
@inherits: L{NodeDriver.list_sizes}
@return: List of compute node sizes supported by the cloud provider.
@rtype: C{list} of L{OpenNebulaNodeSize}
"""
return self._to_sizes(self.connection.request('/instance_type').object)
def _to_sizes(self, object):
"""
Request a list of instance types and convert that list to a list of
OpenNebulaNodeSize objects.
Request a list of instance types from the OpenNebula web interface,
and issue a request to convert each XML object representation of an
instance type to an OpenNebulaNodeSize object.
@return: List of instance types.
@rtype: C{list} of L{OpenNebulaNodeSize}
"""
sizes = []
size_id = 1
attributes = [('name', str, None), ('ram', int, 'MEMORY'),
('cpu', float, None), ('vcpu', float, None),
('disk', str, None), ('bandwidth', float, None),
('price', float, None)]
for element in object.findall('INSTANCE_TYPE'):
size_kwargs = {'id': size_id, 'driver': self}
values = self._get_attributes_values(attributes=attributes,
element=element)
size_kwargs.update(values)
size = OpenNebulaNodeSize(**size_kwargs)
sizes.append(size)
size_id += 1
return sizes
def _get_attributes_values(self, attributes, element):
values = {}
for attribute_name, attribute_type, alias in attributes:
key = alias if alias else attribute_name.upper()
value = element.findtext(key)
if value is not None:
value = attribute_type(value)
values[attribute_name] = value
return values
class OpenNebula_3_6_NodeDriver(OpenNebula_3_2_NodeDriver):
"""
OpenNebula.org node driver for OpenNebula.org v3.6.
"""
def _to_volume(self, storage):
return StorageVolume(id=storage.findtext('ID'),
name=storage.findtext('NAME'),
size=int(storage.findtext('SIZE')),
driver=self.connection.driver)
def create_volume(self, size, name, location=None, snapshot=None, persistent=True):
storage = ET.Element('STORAGE')
vol_name = ET.SubElement(storage, 'NAME')
vol_name.text = name
vol_type = ET.SubElement(storage, 'TYPE')
vol_type.text = 'DATABLOCK'
description = ET.SubElement(storage, 'DESCRIPTION')
description.text = 'Attached storage'
public = ET.SubElement(storage, 'PUBLIC')
public.text = 'NO'
persistent = ET.SubElement(storage, 'PERSISTENT')
if persistent:
persistent.text = 'YES'
else:
persistent.text = 'NO'
fstype = ET.SubElement(storage, 'FSTYPE')
fstype.text = 'ext3'
vol_size = ET.SubElement(storage, 'SIZE')
vol_size.text = str(size)
xml = ET.tostring(storage)
volume = self.connection.request('/storage',
{ 'occixml': xml }, method='POST').object
return self._to_volume(volume)
def destroy_volume(self, volume):
url = '/storage/%s' % (str(volume.id))
resp = self.connection.request(url, method='DELETE')
return resp.status == httplib.NO_CONTENT
def attach_volume(self, node, volume, device):
action = ET.Element('ACTION')
perform = ET.SubElement(action, 'PERFORM')
perform.text = 'ATTACHDISK'
params = ET.SubElement(action, 'PARAMS')
ET.SubElement(params,
'STORAGE',
{'href': '/storage/%s' % (str(volume.id))})
target = ET.SubElement(params, 'TARGET')
target.text = device
xml = ET.tostring(action)
url = '/compute/%s/action' % node.id
resp = self.connection.request(url, method='POST', data=xml)
return resp.status == httplib.ACCEPTED
def _do_detach_volume(self, node_id, disk_id):
action = ET.Element('ACTION')
perform = ET.SubElement(action, 'PERFORM')
perform.text = 'DETACHDISK'
params = ET.SubElement(action, 'PARAMS')
ET.SubElement(params,
'DISK',
{'id': disk_id})
xml = ET.tostring(action)
url = '/compute/%s/action' % node_id
resp = self.connection.request(url, method='POST', data=xml)
return resp.status == httplib.ACCEPTED
def detach_volume(self, volume):
# We need to find the node using this volume
for node in self.list_nodes():
if type(node.image) is not list:
# This node has only one associated image. It is not the one we
# are after.
continue
for disk in node.image:
if disk.id == volume.id:
# Node found. We can now detach the volume
disk_id = disk.extra['disk_id']
return self._do_detach_volume(node.id, disk_id)
return False
class OpenNebula_3_8_NodeDriver(OpenNebula_3_6_NodeDriver):
"""
OpenNebula.org node driver for OpenNebula.org v3.8.
"""
plain_auth = API_PLAIN_AUTH
def _to_sizes(self, object):
"""
Request a list of instance types and convert that list to a list of
OpenNebulaNodeSize objects.
Request a list of instance types from the OpenNebula web interface,
and issue a request to convert each XML object representation of an
instance type to an OpenNebulaNodeSize object.
@return: List of instance types.
@rtype: C{list} of L{OpenNebulaNodeSize}
"""
sizes = []
size_id = 1
attributes = [('name', str, None), ('ram', int, 'MEMORY'),
('cpu', float, None), ('vcpu', float, None),
('disk', str, None), ('bandwidth', float, None),
('price', float, None)]
for element in object.findall('INSTANCE_TYPE'):
element = self.connection.request(
('/instance_type/%s') % (element.attrib['name'])).object
size_kwargs = {'id': size_id, 'driver': self}
values = self._get_attributes_values(attributes=attributes,
element=element)
size_kwargs.update(values)
size = OpenNebulaNodeSize(**size_kwargs)
sizes.append(size)
size_id += 1
return sizes
def _ex_connection_class_kwargs(self):
"""
Set plain_auth as an extra L{OpenNebulaConnection_3_8} argument
@return: C{dict} of L{OpenNebulaConnection_3_8} input arguments
"""
return {'plain_auth': self.plain_auth}
| bsd-3-clause | 2,607,744,555,827,277,300 | 33.355932 | 87 | 0.558743 | false |
kkoci/orthosie | inventory/api_views.py | 1 | 6650 | # This file is part of Orthosie.
#
# Orthosie is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Orthosie is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Orthosie. If not, see <http://www.gnu.org/licenses/>.
from django.core.exceptions import ObjectDoesNotExist
from rest_framework import viewsets
from rest_framework.decorators import api_view, detail_route
from rest_framework.response import Response
from rest_framework.reverse import reverse
from inventory.serializers import ItemSerializer, GrocerySerializer
from inventory.serializers import ProduceSerializer, VendorSerializer
from inventory.models import Item, Grocery, Produce, Vendor
@api_view(['GET'])
def api_root(request, format=None):
"""
The entry endpoint of our API.
"""
return Response({
'item': reverse('item-list', request=request),
'grocery': reverse('grocery-list', request=request),
'produce': reverse('produce-list', request=request),
'vendor': reverse('vendor-list', request=request)
})
class ItemViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows items to be viewed or edited.
"""
queryset = Item.objects.all()
serializer_class = ItemSerializer
class GroceryViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows groceries to be viewed or edited.
"""
queryset = Grocery.objects.all()
serializer_class = GrocerySerializer
@detail_route(
methods=['post']
)
def update_vendor(self, request, *args, **kwargs):
grocery = self.get_object()
try:
vendor = Vendor.objects.get(name=request.POST['vendor'])
except ObjectDoesNotExist:
vendor = Vendor(name=request.POST['vendor'])
vendor.save()
grocery.vendor = vendor
grocery.save()
grocery = self.get_object()
serializer = self.get_serializer(grocery)
return Response(serializer.data)
@detail_route(
methods=['post']
)
def update_name(self, request, *args, **kwargs):
grocery = self.get_object()
grocery.name = request.POST['name']
grocery.save()
grocery = self.get_object()
serializer = self.get_serializer(grocery)
return Response(serializer.data)
@detail_route(
methods=['post']
)
def update_price(self, request, *args, **kwargs):
grocery = self.get_object()
grocery.price = request.POST['price']
grocery.save()
grocery = self.get_object()
serializer = self.get_serializer(grocery)
return Response(serializer.data)
@detail_route(
methods=['post']
)
def update_taxable(self, request, *args, **kwargs):
grocery = self.get_object()
grocery.taxable = (request.POST['taxable'].lower() == 'true')
grocery.save()
grocery = self.get_object()
serializer = self.get_serializer(grocery)
return Response(serializer.data)
@detail_route(
methods=['post']
)
def update_scalable(self, request, *args, **kwargs):
grocery = self.get_object()
grocery.scalable = (request.POST['scalable'].lower() == 'true')
grocery.save()
grocery = self.get_object()
serializer = self.get_serializer(grocery)
return Response(serializer.data)
class ProduceViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows produce to be viewed or edited.
"""
queryset = Produce.objects.all()
serializer_class = ProduceSerializer
@detail_route(
methods=['post']
)
def update_name(self, request, *args, **kwargs):
produce = self.get_object()
produce.name = request.POST['name']
produce.save()
produce = self.get_object()
serializer = self.get_serializer(produce)
return Response(serializer.data)
@detail_route(
methods=['post']
)
def update_variety(self, request, *args, **kwargs):
produce = self.get_object()
produce.variety = request.POST['variety']
produce.save()
produce = self.get_object()
serializer = self.get_serializer(produce)
return Response(serializer.data)
@detail_route(
methods=['post']
)
def update_size(self, request, *args, **kwargs):
produce = self.get_object()
produce.size = request.POST['size']
produce.save()
produce = self.get_object()
serializer = self.get_serializer(produce)
return Response(serializer.data)
@detail_route(
methods=['post']
)
def update_botanical(self, request, *args, **kwargs):
produce = self.get_object()
produce.botanical = request.POST['botanical']
produce.save()
produce = self.get_object()
serializer = self.get_serializer(produce)
return Response(serializer.data)
@detail_route(
methods=['post']
)
def update_price(self, request, *args, **kwargs):
produce = self.get_object()
produce.price = request.POST['price']
produce.save()
produce = self.get_object()
serializer = self.get_serializer(produce)
return Response(serializer.data)
@detail_route(
methods=['post']
)
def update_taxable(self, request, *args, **kwargs):
produce = self.get_object()
produce.taxable = (request.POST['taxable'].lower() == 'true')
produce.save()
produce = self.get_object()
serializer = self.get_serializer(produce)
return Response(serializer.data)
@detail_route(
methods=['post']
)
def update_scalable(self, request, *args, **kwargs):
produce = self.get_object()
produce.scalable = (request.POST['scalable'].lower() == 'true')
produce.save()
produce = self.get_object()
serializer = self.get_serializer(produce)
return Response(serializer.data)
class VendorViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows vendors to be viewed or edited.
"""
queryset = Vendor.objects.all()
serializer_class = VendorSerializer
| gpl-3.0 | -2,889,850,686,731,528,000 | 30.818182 | 73 | 0.633835 | false |
bakwc/PySyncObj | docs/source/conf.py | 1 | 9934 | # -*- coding: utf-8 -*-
#
# PySyncObj documentation build configuration file, created by
# sphinx-quickstart on Sat Sep 17 17:25:17 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import sphinx_rtd_theme
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'PySyncObj'
copyright = u'2021, Filipp Ozinov'
author = u'Filipp Ozinov'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.3.8'
# The full version, including alpha/beta/rc tags.
release = u'0.3.8'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'PySyncObj v0.2.3'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'PySyncObjdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'PySyncObj.tex', u'PySyncObj Documentation',
u'Filipp Ozinov', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pysyncobj', u'PySyncObj Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'PySyncObj', u'PySyncObj Documentation',
author, 'PySyncObj', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
autoclass_content = "both"
| mit | -6,954,531,954,047,670,000 | 27.628242 | 80 | 0.692974 | false |
Eldinnie/python-telegram-bot | telegram/inline/inlinequeryresultcachedmpeg4gif.py | 1 | 3903 | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2018
# Leandro Toledo de Souza <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains the classes that represent Telegram InlineQueryResultMpeg4Gif."""
from telegram import InlineQueryResult
class InlineQueryResultCachedMpeg4Gif(InlineQueryResult):
"""
Represents a link to a video animation (H.264/MPEG-4 AVC video without sound) stored on the
Telegram servers. By default, this animated MPEG-4 file will be sent by the user with an
optional caption. Alternatively, you can use :attr:`input_message_content` to send a message
with the specified content instead of the animation.
Attributes:
type (:obj:`str`): 'mpeg4_gif'.
id (:obj:`str`): Unique identifier for this result, 1-64 bytes.
mpeg4_file_id (:obj:`str`): A valid file identifier for the MP4 file.
title (:obj:`str`): Optional. Title for the result.
caption (:obj:`str`): Optional. Caption, 0-200 characters
parse_mode (:obj:`str`): Send Markdown or HTML, if you want Telegram apps to show
bold, italic, fixed-width text or inline URLs in the media caption.. See the constants
in :class:`telegram.ParseMode` for the available modes.
reply_markup (:class:`telegram.InlineKeyboardMarkup`): Optional. Inline keyboard attached
to the message.
input_message_content (:class:`telegram.InputMessageContent`): Optional. Content of the
message to be sent instead of the MPEG-4 file.
Args:
id (:obj:`str`): Unique identifier for this result, 1-64 bytes.
mpeg4_file_id (:obj:`str`): A valid file identifier for the MP4 file.
title (:obj:`str`, optional): Title for the result.
caption (:obj:`str`, optional): Caption, 0-200 characters
parse_mode (:obj:`str`, optional): Send Markdown or HTML, if you want Telegram apps to show
bold, italic, fixed-width text or inline URLs in the media caption.. See the constants
in :class:`telegram.ParseMode` for the available modes.
reply_markup (:class:`telegram.InlineKeyboardMarkup`, optional): Inline keyboard attached
to the message.
input_message_content (:class:`telegram.InputMessageContent`, optional): Content of the
message to be sent instead of the MPEG-4 file.
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
"""
def __init__(self,
id,
mpeg4_file_id,
title=None,
caption=None,
reply_markup=None,
input_message_content=None,
parse_mode=None,
**kwargs):
# Required
super(InlineQueryResultCachedMpeg4Gif, self).__init__('mpeg4_gif', id)
self.mpeg4_file_id = mpeg4_file_id
# Optionals
if title:
self.title = title
if caption:
self.caption = caption
if parse_mode:
self.parse_mode = parse_mode
if reply_markup:
self.reply_markup = reply_markup
if input_message_content:
self.input_message_content = input_message_content
| gpl-3.0 | 4,835,468,741,908,030,000 | 45.464286 | 99 | 0.65898 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.