repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
topliceanu/learn | python/algo/test/test_programming_set_2_1.py | 1 | 3883 | # -*- coding: utf-8 -*-
""" In this programming problem and the next you'll code up the greedy
algorithms from lecture for minimizing the weighted sum of completion times..
Use file [./Jobs.txt]. This file describes a set of jobs with positive and
integral weights and lengths. It has the format
[job_1_weight] [job_1_length]
[job_2_weight] [job_2_length]
...
For example, the third line of the file is "74 59", indicating that the second
job has weight 74 and length 59. You should NOT assume that edge weights or
lengths are distinct.
Your task in this problem is to run the greedy algorithm that schedules jobs in
decreasing order of the difference (weight - length). Recall from lecture that
this algorithm is not always optimal. IMPORTANT: if two jobs have equal
difference (weight - length), you should schedule the job with higher weight
first. Beware: if you break ties in a different way, you are likely to get the
wrong answer. You should report the sum of weighted completion times of the
resulting schedule --- a positive integer --- in the box below.
ADVICE: If you get the wrong answer, try out some small test cases to debug
your algorithm (and post your test cases to the discussion forum)!
"""
import os
#from src.job_scheduling import schedule
#
#
#jobs1 = []
#jobs2 = []
#with open('{base}/test/Jobs.txt'.format(base=os.getcwd()), 'r') as f:
# key = 1
# for line in f:
# [weight, length] = map(int, line.split())
# jobs1.append([key, weight, length])
# jobs2.append([key, weight, length])
# key += 1
#
#output1 = schedule(jobs1, score='diff')
#print '>>>> diff: ', output1['sum_completion_time']
#
#output2 = schedule(jobs2, score='ratio')
#print '>>>> ratio: ', output2['sum_completion_time']
""" In this programming problem you'll code up Prim's minimum spanning tree
algorithm. Use the text file [./Edges.txt]. This file describes an undirected
graph with integer edge costs. It has the format
[number_of_nodes] [number_of_edges]
[one_node_of_edge_1] [other_node_of_edge_1] [edge_1_cost]
[one_node_of_edge_2] [other_node_of_edge_2] [edge_2_cost]
...
For example, the third line of the file is "2 3 -8874", indicating that there
is an edge connecting vertex #2 and vertex #3 that has cost -8874. You should
NOT assume that edge costs are positive, nor should you assume that they are
distinct.
Your task is to run Prim's minimum spanning tree algorithm on this graph. You
should report the overall cost of a minimum spanning tree --- an integer, which
may or may not be negative --- in the box below.
IMPLEMENTATION NOTES: This graph is small enough that the straightforward O(mn)
time implementation of Prim's algorithm should work fine. OPTIONAL: For those
of you seeking an additional challenge, try implementing a heap-based version.
The simpler approach, which should already give you a healthy speed-up, is to
maintain relevant edges in a heap (with keys = edge costs). The superior
approach stores the unprocessed vertices in the heap, as described in lecture.
Note this requires a heap that supports deletions, and you'll probably need to
maintain some kind of mapping between vertices and their positions in the heap.
"""
#from src.graph import Graph
#from src.minimum_spanning_tree import prims_suboptimal_mst, prims_heap_mst, \
# kruskal_suboptimal_mst, kruskal_union_find_mst
#
#
#g = Graph(directed=False)
#
#with open('{base}/test/Edges.txt'.format(base=os.getcwd()), 'r') as f:
# for line in f:
# [tail, head, cost] = line.split()
# g.add_edge((int(tail), int(head), int(cost)))
#
##mst = prims_suboptimal_mst(g) # -3612829
##mst = prims_heap_mst(g) # -3611276
##mst = kruskal_suboptimal_mst(g) # -3362091
##mst = kruskal_union_find_mst(g) # -3362091
#
#cost = 0
#for edge in mst.get_edges():
# cost += edge[2]
#
#print '>>> cost: ', cost
| mit | 2,231,773,159,586,177,300 | 38.622449 | 79 | 0.714139 | false |
Micronaet/micronaet-sql7 | sql_due_list_cei/cei.py | 1 | 1889 | # -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Adding code for generate email and send from template thanks to OpenCode
#
###############################################################################
import os
import sys
import openerp.netsvc
import logging
from openerp.osv import osv, fields
from datetime import datetime, timedelta
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, DATETIME_FORMATS_MAP, float_compare
import openerp.addons.decimal_precision as dp
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
class sql_payment_duelist(osv.osv):
''' Payment stage for sending e-mail
'''
_name = 'sql.payment.duelist'
_inherit = 'sql.payment.duelist'
_columns = {
'type_cei': fields.related('partner_id', 'type_cei', type='selection',
string='CEI', selection=[
('i', 'Italy'),
('c', 'CEE'),
('e', 'Extra CEE'),
]),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 534,603,551,880,628,500 | 37.55102 | 121 | 0.625199 | false |
b-cube/OwsCapable | owscapable/fgdc.py | 1 | 10455 | # -*- coding: ISO-8859-15 -*-
# =============================================================================
# Copyright (c) 2010 Tom Kralidis
#
# Authors : Tom Kralidis <[email protected]>
#
# Contact email: [email protected]
# =============================================================================
""" FGDC metadata parser """
from __future__ import (absolute_import, division, print_function)
from owscapable.etree import etree
from owscapable import util
class Metadata(object):
""" Process metadata """
def __init__(self, md):
if hasattr(md, 'getroot'): # standalone document
self.xml = etree.tostring(md.getroot())
else: # part of a larger document
self.xml = etree.tostring(md)
self.idinfo = Idinfo(md)
self.eainfo = Eainfo(md)
self.distinfo = Distinfo(md)
self.metainfo = Metainfo(md)
if self.idinfo.datasetid:
self.identifier = self.idinfo.datasetid
class Idinfo(object):
""" Process idinfo """
def __init__(self, md):
val = md.find('idinfo/datasetid')
self.datasetid = util.testXMLValue(val)
val = md.find('idinfo/citation')
self.citation = Citation(val)
val = md.find('idinfo/descript')
if val is not None:
self.descript = Descript(val)
val = md.find('idinfo/timeperd')
self.timeperd = Timeperd(val)
val = md.find('idinfo/status')
if val is not None:
self.status = Status(val)
val = md.find('idinfo/spdom')
if val is not None:
self.spdom = Spdom(val)
val = md.find('idinfo/keywords')
if val is not None:
self.keywords = Keywords(val)
val = md.find('idinfo/accconst')
self.accconst = util.testXMLValue(val)
val = md.find('idinfo/useconst')
self.useconst = util.testXMLValue(val)
val = md.find('idinfo/ptcontac')
if val is not None:
self.ptcontac = Ptcontac(val)
val = md.find('idinfo/datacred')
self.datacred = util.testXMLValue(val)
val = md.find('idinfo/crossref')
self.crossref = Citation(val)
class Citation(object):
""" Process citation """
def __init__(self, md):
if md is not None:
self.citeinfo = {}
val = md.find('citeinfo/origin')
self.citeinfo['origin'] = util.testXMLValue(val)
val = md.find('citeinfo/pubdate')
self.citeinfo['pubdate'] = util.testXMLValue(val)
val = md.find('citeinfo/title')
self.citeinfo['title'] = util.testXMLValue(val)
val = md.find('citeinfo/geoform')
self.citeinfo['geoform'] = util.testXMLValue(val)
val = md.find('citeinfo/pubinfo/pubplace')
self.citeinfo['pubplace'] = util.testXMLValue(val)
val = md.find('citeinfo/pubinfo/publish')
self.citeinfo['publish'] = util.testXMLValue(val)
self.citeinfo['onlink'] = []
for link in md.findall('citeinfo/onlink'):
self.citeinfo['onlink'].append(util.testXMLValue(link))
class Descript(object):
""" Process descript """
def __init__(self, md):
val = md.find('abstract')
self.abstract = util.testXMLValue(val)
val = md.find('purpose')
self.purpose = util.testXMLValue(val)
val = md.find('supplinf')
self.supplinf = util.testXMLValue(val)
class Timeperd(object):
""" Process timeperd """
def __init__(self, md):
if md is not None:
val = md.find('current')
self.current = util.testXMLValue(val)
val = md.find('timeinfo')
if val is not None:
self.timeinfo = Timeinfo(val)
class Timeinfo(object):
""" Process timeinfo """
def __init__(self, md):
val = md.find('sngdate')
if val is not None:
self.sngdate = Sngdate(val)
val = md.find('rngdates')
if val is not None:
self.rngdates = Rngdates(val)
class Sngdate(object):
""" Process sngdate """
def __init__(self, md):
val = md.find('caldate')
self.caldate = util.testXMLValue(val)
val = md.find('time')
self.time = util.testXMLValue(val)
class Rngdates(object):
""" Process rngdates """
def __init__(self, md):
val = md.find('begdate')
self.begdate = util.testXMLValue(val)
val = md.find('begtime')
self.begtime = util.testXMLValue(val)
val = md.find('enddate')
self.enddate = util.testXMLValue(val)
val = md.find('endtime')
self.endtime = util.testXMLValue(val)
class Status(object):
""" Process status """
def __init__(self, md):
val = md.find('progress')
self.progress = util.testXMLValue(val)
val = md.find('update')
self.update = util.testXMLValue(val)
class Spdom(object):
""" Process spdom """
def __init__(self, md):
val = md.find('bounding/westbc')
self.westbc = util.testXMLValue(val)
val = md.find('bounding/eastbc')
self.eastbc = util.testXMLValue(val)
val = md.find('bounding/northbc')
self.northbc = util.testXMLValue(val)
val = md.find('bounding/southbc')
self.southbc = util.testXMLValue(val)
if (self.southbc is not None and self.northbc is not None and
self.eastbc is not None and self.westbc is not None):
self.bbox = Bbox(self)
class Bbox(object):
""" Generate bbox for spdom (convenience function) """
def __init__(self, spdom):
self.minx = spdom.westbc
self.miny = spdom.southbc
self.maxx = spdom.eastbc
self.maxy = spdom.northbc
class Keywords(object):
""" Process keywords """
def __init__(self, md):
self.theme = []
self.place = []
self.temporal = []
for i in md.findall('theme'):
theme = {}
val = i.find('themekt')
theme['themekt'] = util.testXMLValue(val)
theme['themekey'] = []
for j in i.findall('themekey'):
themekey = util.testXMLValue(j)
if themekey is not None:
theme['themekey'].append(themekey)
self.theme.append(theme)
for i in md.findall('place'):
theme = {}
place = {}
val = i.find('placekt')
theme['placekt'] = util.testXMLValue(val)
theme['placekey'] = []
for j in i.findall('placekey'):
theme['placekey'].append(util.testXMLValue(j))
self.place.append(place)
for i in md.findall('temporal'):
theme = {}
temporal = {}
val = i.find('tempkt')
theme['tempkt'] = util.testXMLValue(val)
theme['tempkey'] = []
for j in i.findall('tempkey'):
theme['tempkey'].append(util.testXMLValue(j))
self.temporal.append(temporal)
class Ptcontac(object):
""" Process ptcontac """
def __init__(self, md):
val = md.find('cntinfo/cntorgp/cntorg')
self.cntorg = util.testXMLValue(val)
val = md.find('cntinfo/cntorgp/cntper')
self.cntper = util.testXMLValue(val)
val = md.find('cntinfo/cntpos')
self.cntpos = util.testXMLValue(val)
val = md.find('cntinfo/cntaddr/addrtype')
self.addrtype = util.testXMLValue(val)
val = md.find('cntinfo/cntaddr/address')
self.address = util.testXMLValue(val)
val = md.find('cntinfo/cntaddr/city')
self.city = util.testXMLValue(val)
val = md.find('cntinfo/cntaddr/state')
self.state = util.testXMLValue(val)
val = md.find('cntinfo/cntaddr/postal')
self.postal = util.testXMLValue(val)
val = md.find('cntinfo/cntaddr/country')
self.country = util.testXMLValue(val)
val = md.find('cntinfo/cntvoice')
self.voice = util.testXMLValue(val)
val = md.find('cntinfo/cntemail')
self.email = util.testXMLValue(val)
class Eainfo(object):
""" Process eainfo """
def __init__(self, md):
val = md.find('eainfo/detailed/enttyp/enttypl')
self.enttypl = util.testXMLValue(val)
val = md.find('eainfo/detailed/enttyp/enttypd')
self.enttypd = util.testXMLValue(val)
val = md.find('eainfo/detailed/enttyp/enttypds')
self.enttypds = util.testXMLValue(val)
self.attr = []
for i in md.findall('eainfo/detailed/attr'):
attr = {}
val = i.find('attrlabl')
attr['attrlabl'] = util.testXMLValue(val)
val = i.find('attrdef')
attr['attrdef'] = util.testXMLValue(val)
val = i.find('attrdefs')
attr['attrdefs'] = util.testXMLValue(val)
val = i.find('attrdomv/udom')
attr['udom'] = util.testXMLValue(val)
self.attr.append(attr)
class Distinfo(object):
""" Process distinfo """
def __init__(self, md):
val = md.find('distinfo')
if val is not None:
val2 = val.find('stdorder')
if val2 is not None:
self.stdorder = {'digform': []}
for link in val2.findall('digform'):
digform = {}
digform['name'] = util.testXMLValue(link.find('digtinfo/formname'))
digform['url'] = util.testXMLValue(link.find('digtopt/onlinopt/computer/networka/networkr/'))
self.stdorder['digform'].append(digform)
class Metainfo(object):
""" Process metainfo """
def __init__(self, md):
val = md.find('metainfo/metd')
self.metd = util.testXMLValue(val)
val = md.find('metainfo/metrd')
self.metrd = util.testXMLValue(val)
val = md.find('metainfo/metc')
if val is not None:
self.metc = Ptcontac(val)
val = md.find('metainfo/metstdn')
self.metstdn = util.testXMLValue(val)
val = md.find('metainfo/metstdv')
self.metstdv = util.testXMLValue(val)
val = md.find('metainfo/metac')
self.metac = util.testXMLValue(val)
val = md.find('metainfo/metuc')
self.metuc = util.testXMLValue(val)
| bsd-3-clause | 5,161,168,610,335,866,000 | 30.208955 | 113 | 0.55122 | false |
HybridF5/jacket | jacket/tests/compute/unit/image/test_fake.py | 1 | 4715 | # Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from six.moves import StringIO
from jacket import context
from jacket.compute import exception
from jacket.compute import test
import jacket.tests.compute.unit.image.fake
class FakeImageServiceTestCase(test.NoDBTestCase):
def setUp(self):
super(FakeImageServiceTestCase, self).setUp()
self.image_service = jacket.tests.compute.unit.image.fake.FakeImageService()
self.context = context.get_admin_context()
def tearDown(self):
super(FakeImageServiceTestCase, self).tearDown()
jacket.tests.compute.unit.image.fake.FakeImageService_reset()
def test_detail(self):
res = self.image_service.detail(self.context)
for image in res:
keys = set(image.keys())
self.assertEqual(keys, set(['id', 'name', 'created_at',
'updated_at', 'deleted_at', 'deleted',
'status', 'is_public', 'properties',
'disk_format', 'container_format',
'size']))
self.assertIsInstance(image['created_at'], datetime.datetime)
self.assertIsInstance(image['updated_at'], datetime.datetime)
if not (isinstance(image['deleted_at'], datetime.datetime) or
image['deleted_at'] is None):
self.fail('image\'s "deleted_at" attribute was neither a '
'datetime object nor None')
def check_is_bool(image, key):
val = image.get('deleted')
if not isinstance(val, bool):
self.fail('image\'s "%s" attribute wasn\'t '
'a bool: %r' % (key, val))
check_is_bool(image, 'deleted')
check_is_bool(image, 'is_public')
def test_show_raises_imagenotfound_for_invalid_id(self):
self.assertRaises(exception.ImageNotFound,
self.image_service.show,
self.context,
'this image does not exist')
def test_create_adds_id(self):
index = self.image_service.detail(self.context)
image_count = len(index)
self.image_service.create(self.context, {})
index = self.image_service.detail(self.context)
self.assertEqual(len(index), image_count + 1)
self.assertTrue(index[0]['id'])
def test_create_keeps_id(self):
self.image_service.create(self.context, {'id': '34'})
self.image_service.show(self.context, '34')
def test_create_rejects_duplicate_ids(self):
self.image_service.create(self.context, {'id': '34'})
self.assertRaises(exception.CouldNotUploadImage,
self.image_service.create,
self.context,
{'id': '34'})
# Make sure there's still one left
self.image_service.show(self.context, '34')
def test_update(self):
self.image_service.create(self.context,
{'id': '34', 'foo': 'bar'})
self.image_service.update(self.context, '34',
{'id': '34', 'foo': 'baz'})
img = self.image_service.show(self.context, '34')
self.assertEqual(img['foo'], 'baz')
def test_delete(self):
self.image_service.create(self.context, {'id': '34', 'foo': 'bar'})
self.image_service.delete(self.context, '34')
self.assertRaises(exception.NotFound,
self.image_service.show,
self.context,
'34')
def test_create_then_get(self):
blob = 'some data'
s1 = StringIO(blob)
self.image_service.create(self.context,
{'id': '32', 'foo': 'bar'},
data=s1)
s2 = StringIO()
self.image_service.download(self.context, '32', data=s2)
self.assertEqual(s2.getvalue(), blob, 'Did not get blob back intact')
| apache-2.0 | 2,027,542,659,760,899,800 | 39.299145 | 84 | 0.564793 | false |
adamcataldo/djscrooge | djscrooge/test/library/end_of_day/test_mongodb_cache.py | 1 | 1312 | """This file contains the test_yahoo module of the DJ Scrooge backtesting API.
Copyright (C) 2012 James Adam Cataldo
This file is part of Pengoe.
Pengoe is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Pengoe. If not, see <http://www.gnu.org/licenses/>.
Dependencies:
proboscis: <https://github.com/rackspace/python-proboscis>
"""
from proboscis import test
from djscrooge.test.library.end_of_day.test_end_of_day import TestEndOfDay
from djscrooge.library.end_of_day.mongodb_cache import MongodbCache
@test()
class TestMongodbCache(TestEndOfDay):
"""Tests the Yahoo EndOfDay class."""
def __init__(self):
super(TestMongodbCache, self).__init__(MongodbCache)
if __name__ == "__main__":
from proboscis import TestProgram
TestProgram().run_and_exit() | gpl-3.0 | -2,187,895,209,300,121,900 | 36.514286 | 78 | 0.737805 | false |
TrondKjeldas/SoCo | soco/compat.py | 1 | 2435 | # -*- coding: utf-8 -*-
# pylint: disable=unused-import,import-error,no-name-in-module,
# pylint: disable=ungrouped-imports
"""This module contains various compatibility definitions and imports.
It is used internally by SoCo to ensure compatibility with Python 2."""
from __future__ import unicode_literals
try: # python 3
from http.server import SimpleHTTPRequestHandler # nopep8
from urllib.request import urlopen # nopep8
from urllib.error import URLError # nopep8
from urllib.parse import quote_plus # nopep8
import socketserver # nopep8
from queue import Queue # nopep8
StringType = bytes # nopep8
UnicodeType = str # nopep8
from urllib.parse import quote as quote_url # nopep8
from urllib.parse import urlparse, parse_qs # nopep8
except ImportError: # python 2.7
from SimpleHTTPServer import SimpleHTTPRequestHandler # nopep8
from urllib2 import urlopen, URLError # nopep8
from urllib import quote_plus # nopep8
import SocketServer as socketserver # nopep8
from Queue import Queue # nopep8
from types import StringType, UnicodeType # nopep8
from urllib import quote as quote_url # nopep8
from urlparse import urlparse, parse_qs # nopep8
try: # python 2.7 - this has to be done the other way round
from cPickle import dumps # nopep8
except ImportError: # python 3
from pickle import dumps # nopep8
# Support Python 2.6
try: # Python 2.7+
from logging import NullHandler # nopep8
except ImportError:
import logging
class NullHandler(logging.Handler):
"""Create a null handler if using Python 2.6"""
def emit(self, record):
pass
def with_metaclass(meta, *bases):
"""A Python 2/3 compatible way of declaring a metaclass.
Taken from `Jinja 2 <https://github.com/mitsuhiko/jinja2/blob/master/jinja2
/_compat.py>`_ via `python-future <http://python-future.org>`_. License:
BSD.
Use it like this::
class MyClass(with_metaclass(MyMetaClass, BaseClass)):
pass
"""
class _Metaclass(meta):
"""Inner class"""
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, attrs):
if this_bases is None:
return type.__new__(cls, name, (), attrs)
return meta(name, bases, attrs)
return _Metaclass(str('temporary_class'), None, {})
| mit | 347,761,600,964,849,000 | 32.356164 | 79 | 0.668172 | false |
danilobellini/pytest-doctest-custom | setup.py | 1 | 3678 | #!/usr/bin/env python
"""Py.test doctest custom plugin: setup script."""
import os, setuptools, itertools, ast
BLOCK_START = ".. %s"
BLOCK_END = ".. %s end"
PKG_DIR = os.path.dirname(__file__)
MODULE_FILE = os.path.join(PKG_DIR, "pytest_doctest_custom.py")
with open(os.path.join(PKG_DIR, "README.rst"), "r") as f:
README = f.read().splitlines()
def not_eq(value):
"""Partial evaluation of ``!=`` for currying"""
return lambda el: el != value
def get_block(name, data, newline="\n"):
"""
Joined multiline string block from a list of strings data. The
BLOCK_START and BLOCK_END delimiters are selected with the given
name and aren't included in the result.
"""
lines = itertools.dropwhile(not_eq(BLOCK_START % name), data)
next(lines) # Skip the start line, raise an error if there's no start line
return newline.join(itertools.takewhile(not_eq(BLOCK_END % name), lines))
def all_but_block(name, data, newline="\n", remove_empty_next=True):
"""
Joined multiline string from a list of strings data, removing a
block with the given name and its delimiters. Removes the empty
line after BLOCK_END when ``remove_empty_next`` is True.
"""
it = iter(data)
before = list(itertools.takewhile(not_eq(BLOCK_START % name), it))
after = list(itertools.dropwhile(not_eq(BLOCK_END % name), it))[1:]
if remove_empty_next and after and after[0].strip() == "":
return newline.join(before + after[1:])
return newline.join(before + after)
def single_line(value):
"""Returns the given string joined to a single line and trimmed."""
return " ".join(filter(None, map(str.strip, value.splitlines())))
def get_assignment(fname, varname):
"""
Gets the evaluated expression of a single assignment statement
``varname = expression`` in the referred file. The expression should
not depend on previous values, as the context isn't evaluated.
"""
with open(fname, "r") as f:
for n in ast.parse(f.read(), filename=fname).body:
if isinstance(n, ast.Assign) and len(n.targets) == 1 \
and n.targets[0].id == varname:
return eval(compile(ast.Expression(n.value), fname, "eval"))
metadata = {
"name": "pytest-doctest-custom",
"version": get_assignment(MODULE_FILE, "__version__"),
"author": "Danilo de Jesus da Silva Bellini",
"author_email": "[email protected]",
"url": "http://github.com/danilobellini/pytest-doctest-custom",
"description": single_line(get_block("summary", README)),
"long_description": all_but_block("summary", README),
"license": "MIT",
"py_modules": ["pytest_doctest_custom"],
"install_requires": ["pytest>=2.1"],
"entry_points": {"pytest11": ["doctest_custom = pytest_doctest_custom"]},
}
metadata["classifiers"] = """
Development Status :: 4 - Beta
Framework :: Pytest
Intended Audience :: Developers
License :: OSI Approved :: MIT License
Operating System :: POSIX :: Linux
Operating System :: OS Independent
Programming Language :: Python
Programming Language :: Python :: 2
Programming Language :: Python :: 2.6
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
Programming Language :: Python :: 3.2
Programming Language :: Python :: 3.3
Programming Language :: Python :: 3.4
Programming Language :: Python :: 3.5
Programming Language :: Python :: 3.6
Programming Language :: Python :: Implementation :: CPython
Programming Language :: Python :: Implementation :: Jython
Programming Language :: Python :: Implementation :: PyPy
Topic :: Software Development :: Testing
""".strip().splitlines()
setuptools.setup(**metadata)
| mit | -29,702,001,339,417,744 | 38.12766 | 78 | 0.673192 | false |
40323107/cpw12 | wsgi.py | 1 | 1498 | #!/usr/bin/env python
import os
def application(environ, start_response):
ctype = 'text/plain'
if environ['PATH_INFO'] == '/health':
response_body = "1"
elif environ['PATH_INFO'] == '/env':
response_body = ['%s: %s' % (key, value)
for key, value in sorted(environ.items())]
response_body = '\n'.join(response_body)
else:
ctype = 'text/html'
response_body = '''1x1=1 1x2=2 1x3=3 1x4=4 1x5=5 1x6=6 1x7=7 1x8=8 1x9=9 <br />
2x1=2 2x2=4 2x3=6 2x4=8 2x5=10 2x6=12 2x7=14 2x8=16 2x9=18 <br />
3x1=3 3x2=6 3x3=9 3x4=12 3x5=15 3x6=18 3x7=21 3x8=24 3x9=27 <br />
4x1=4 4x2=8 4x3=12 4x4=16 4x5=20 4x6=24 4x7=28 4x8=32 4x9=36 <br />
5x1=5 5x2=10 5x3=15 5x4=20 5x5=25 5x6=30 5x7=35 5x8=40 5x9=45 <br />
6x1=6 6x2=12 6x3=18 6x4=24 6x5=30 6x6=36 6x7=42 6x8=48 6x9=54 <br />
7x1=7 7x2=14 7x3=21 7x4=28 7x5=35 7x6=42 7x7=49 7x8=56 7x9=63 <br />
8x1=8 8x2=16 8x3=24 8x4=32 8x5=40 8x6=48 8x7=56 8x8=64 8x9=72 <br />
9x1=9 9x2=18 9x3=27 9x4=36 9x5=45 9x6=54 9x7=63 9x8=72 9x9=81 '''
status = '200 OK'
response_headers = [('Content-Type', ctype), ('Content-Length', str(len(response_body)))]
#
start_response(status, response_headers)
return [response_body.encode('utf-8') ]
#
# Below for testing only
#
if __name__ == '__main__':
from wsgiref.simple_server import make_server
httpd = make_server('localhost', 8051, application)
# Wait for a single request, serve it and quit.
httpd.handle_request()
| gpl-3.0 | -5,194,370,457,277,970,000 | 36.45 | 93 | 0.62283 | false |
Sorsly/subtle | google-cloud-sdk/lib/surface/compute/backend_services/update_backend.py | 4 | 12042 | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for updating a backend in a backend service."""
import copy
from googlecloudsdk.api_lib.compute import backend_services_utils
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import instance_groups_utils
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.command_lib.compute import flags as compute_flags
from googlecloudsdk.command_lib.compute import scope as compute_scope
from googlecloudsdk.command_lib.compute.backend_services import backend_flags
from googlecloudsdk.command_lib.compute.backend_services import flags
@base.ReleaseTracks(base.ReleaseTrack.GA)
class UpdateBackend(base_classes.ReadWriteCommand):
"""Update an existing backend in a backend service."""
def __init__(self, *args, **kwargs):
super(UpdateBackend, self).__init__(*args, **kwargs)
self.ref = None
@staticmethod
def Args(parser):
flags.GLOBAL_REGIONAL_BACKEND_SERVICE_ARG.AddArgument(parser)
backend_flags.AddDescription(parser)
backend_flags.AddInstanceGroup(
parser, operation_type='update', with_deprecated_zone=True)
backend_flags.AddBalancingMode(parser)
backend_flags.AddCapacityLimits(parser)
backend_flags.AddCapacityScalar(parser)
@property
def service(self):
if self.regional:
return self.compute.regionBackendServices
return self.compute.backendServices
@property
def resource_type(self):
if self.regional:
return 'regionBackendServices'
return 'backendServices'
def CreateReference(self, args):
# TODO(b/35133484): remove once base classes are refactored away
if not self.ref:
self.ref = flags.GLOBAL_REGIONAL_BACKEND_SERVICE_ARG.ResolveAsResource(
args,
self.resources,
scope_lister=compute_flags.GetDefaultScopeLister(self.compute_client))
self.regional = self.ref.Collection() == 'compute.regionBackendServices'
return self.ref
def GetGetRequest(self, args):
if self.regional:
return (self.service,
'Get',
self.messages.ComputeRegionBackendServicesGetRequest(
backendService=self.ref.Name(),
region=self.ref.region,
project=self.project))
return (self.service,
'Get',
self.messages.ComputeBackendServicesGetRequest(
backendService=self.ref.Name(),
project=self.project))
def GetSetRequest(self, args, replacement, existing):
if self.regional:
return (self.service,
'Update',
self.messages.ComputeRegionBackendServicesUpdateRequest(
backendService=self.ref.Name(),
backendServiceResource=replacement,
region=self.ref.region,
project=self.project))
return (self.service,
'Update',
self.messages.ComputeBackendServicesUpdateRequest(
backendService=self.ref.Name(),
backendServiceResource=replacement,
project=self.project))
def CreateGroupReference(self, args):
return instance_groups_utils.CreateInstanceGroupReference(
scope_prompter=self,
compute=self.compute,
resources=self.resources,
name=args.instance_group,
region=args.instance_group_region,
zone=(args.instance_group_zone
if args.instance_group_zone else args.zone),
zonal_resource_type='instanceGroups',
regional_resource_type='regionInstanceGroups')
def Modify(self, args, existing):
"""Override. See base class, ReadWriteCommand."""
backend_flags.WarnOnDeprecatedFlags(args)
replacement = copy.deepcopy(existing)
group_ref = self.CreateGroupReference(args)
backend_to_update = None
for backend in replacement.backends:
if group_ref.SelfLink() == backend.group:
backend_to_update = backend
if not backend_to_update:
scope_type = None
scope_name = None
if hasattr(group_ref, 'zone'):
scope_type = 'zone'
scope_name = group_ref.zone
if hasattr(group_ref, 'region'):
scope_type = 'region'
scope_name = group_ref.region
raise exceptions.ToolException(
'No backend with name [{0}] in {1} [{2}] is part of the backend '
'service [{3}].'.format(
group_ref.Name(), scope_type, scope_name, self.ref.Name()))
if args.description:
backend_to_update.description = args.description
elif args.description is not None:
backend_to_update.description = None
self.ModifyBalancingModeArgs(args, backend_to_update)
return replacement
def ModifyBalancingModeArgs(self, args, backend_to_update):
"""Update balancing mode fields in backend_to_update according to args.
Args:
args: The arguments given to the update-backend command.
backend_to_update: The backend message to modify.
"""
_ModifyBalancingModeArgs(self.messages, args, backend_to_update)
def Run(self, args):
if not any([
args.description is not None,
args.balancing_mode,
args.max_utilization is not None,
args.max_rate is not None,
args.max_rate_per_instance is not None,
args.max_connections is not None,
args.max_connections_per_instance is not None,
args.capacity_scaler is not None,
]):
raise exceptions.ToolException('At least one property must be modified.')
self.CreateReference(args)
return super(UpdateBackend, self).Run(args)
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class UpdateBackendBeta(UpdateBackend):
"""Update an existing backend in a backend service."""
@classmethod
def Args(cls, parser):
flags.GLOBAL_REGIONAL_BACKEND_SERVICE_ARG.AddArgument(parser)
backend_flags.AddDescription(parser)
backend_flags.AddInstanceGroup(
parser, operation_type='update', with_deprecated_zone=True)
backend_flags.AddBalancingMode(parser)
backend_flags.AddCapacityLimits(parser)
backend_flags.AddCapacityScalar(parser)
def CreateGroupReference(self, args):
"""Overrides."""
return instance_groups_utils.CreateInstanceGroupReference(
scope_prompter=self,
compute=self.compute,
resources=self.resources,
name=args.instance_group,
region=args.instance_group_region,
zone=(args.instance_group_zone
if args.instance_group_zone else args.zone),
zonal_resource_type='instanceGroups',
regional_resource_type='regionInstanceGroups')
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class UpdateBackendAlpha(UpdateBackend):
"""Update an existing backend in a backend service."""
@classmethod
def Args(cls, parser):
flags.GLOBAL_REGIONAL_BACKEND_SERVICE_ARG.AddArgument(parser)
backend_flags.AddDescription(parser)
flags.MULTISCOPE_INSTANCE_GROUP_ARG.AddArgument(
parser, operation_type='update')
backend_flags.AddBalancingMode(parser)
backend_flags.AddCapacityLimits(parser)
backend_flags.AddCapacityScalar(parser)
def CreateGroupReference(self, args):
"""Overrides."""
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
return flags.MULTISCOPE_INSTANCE_GROUP_ARG.ResolveAsResource(
args, holder.resources,
default_scope=compute_scope.ScopeEnum.ZONE,
scope_lister=compute_flags.GetDefaultScopeLister(
holder.client, self.project))
def _ClearMutualExclusiveBackendCapacityThresholds(backend):
"""Initialize the backend's mutually exclusive capacity thresholds."""
backend.maxRate = None
backend.maxRatePerInstance = None
backend.maxConnections = None
backend.maxConnectionsPerInstance = None
def _ModifyBalancingModeArgs(messages, args, backend_to_update):
"""Update balancing mode fields in backend_to_update according to args.
Args:
messages: API messages class, determined by the release track.
args: The arguments given to the update-backend command.
backend_to_update: The backend message to modify.
"""
backend_services_utils.ValidateBalancingModeArgs(
messages,
args,
backend_to_update.balancingMode)
if args.balancing_mode:
backend_to_update.balancingMode = (
messages.Backend.BalancingModeValueValuesEnum(
args.balancing_mode))
# If the balancing mode is being changed to RATE (CONNECTION), we must
# clear the max utilization and max connections (rate) fields, otherwise
# the server will reject the request.
if (backend_to_update.balancingMode ==
messages.Backend.BalancingModeValueValuesEnum.RATE):
backend_to_update.maxUtilization = None
backend_to_update.maxConnections = None
backend_to_update.maxConnectionsPerInstance = None
elif (backend_to_update.balancingMode ==
messages.Backend.BalancingModeValueValuesEnum.CONNECTION):
backend_to_update.maxUtilization = None
backend_to_update.maxRate = None
backend_to_update.maxRatePerInstance = None
# Now, we set the parameters that control load balancing.
# ValidateBalancingModeArgs takes care that the control parameters
# are compatible with the balancing mode.
if args.max_utilization is not None:
backend_to_update.maxUtilization = args.max_utilization
# max_rate, max_rate_per_instance, max_connections and
# max_connections_per_instance are mutually exclusive arguments.
if args.max_rate is not None:
_ClearMutualExclusiveBackendCapacityThresholds(backend_to_update)
backend_to_update.maxRate = args.max_rate
elif args.max_rate_per_instance is not None:
_ClearMutualExclusiveBackendCapacityThresholds(backend_to_update)
backend_to_update.maxRatePerInstance = args.max_rate_per_instance
elif args.max_connections is not None:
_ClearMutualExclusiveBackendCapacityThresholds(backend_to_update)
backend_to_update.maxConnections = args.max_connections
elif args.max_connections_per_instance is not None:
_ClearMutualExclusiveBackendCapacityThresholds(backend_to_update)
backend_to_update.maxConnectionsPerInstance = (
args.max_connections_per_instance)
if args.capacity_scaler is not None:
backend_to_update.capacityScaler = args.capacity_scaler
UpdateBackend.detailed_help = {
'brief': 'Update an existing backend in a backend service',
'DESCRIPTION': """
*{command}* updates a backend that is part of a backend
service. This is useful for changing the way a backend
behaves. Example changes that can be made include changing the
load balancing policy and ``draining'' a backend by setting
its capacity scaler to zero.
Backends are named by their associated instances groups, and one
of the ``--group'' or ``--instance-group'' flags is required to
identify the backend that you are modifying. You cannot "change"
the instance group associated with a backend, but you can accomplish
something similar with ``backend-services remove-backend'' and
``backend-services add-backend''.
`gcloud compute backend-services edit` can also be used to
update a backend if the use of a text editor is desired.
""",
}
UpdateBackendAlpha.detailed_help = UpdateBackend.detailed_help
UpdateBackendBeta.detailed_help = UpdateBackend.detailed_help
| mit | 1,338,644,663,265,337,000 | 37.350318 | 80 | 0.711344 | false |
scottnm/Pong | testText.py | 1 | 1120 | import pygame, sys
from pygame.locals import *
pygame.init()
dSurf = pygame.display.set_mode((400, 400))
pygame.display.set_caption("Test Text")
myFont = pygame.font.SysFont("Times New Roman", 30)
while True:
testText = myFont.render("Test Text", 0, (255,255,255))
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
pygame.display.update()
dSurf.fill((0,0,0))
dSurf.blit(testText, (200,200))
""" b1.checkCollision(pLEFT, pRIGHT)
b2.checkCollision(pLEFT, pRIGHT)
b1.update()
b2.update() """
"""#check for score on right goal
if(b1.x>WINDOW_WIDTH):
b1 = Ball()
sb.scoreLeft+=1
elif(b1.x<0):
b1 = Ball()
sb.scoreRight+=1
#check for score on left goal
if(b2.x>WINDOW_WIDTH):
b2 = Ball()
sb.scoreLeft+=1
elif(b2.x<0):
b2 = Ball()
sb.scoreRight+=1 """
"""#Draws Balls
b1.draw(DS)
b2.draw(DS)"""
| apache-2.0 | -7,127,174,258,844,549,000 | 22.829787 | 59 | 0.508036 | false |
quiltdata/quilt | lambdas/pkgselect/test/test_pkgselect.py | 1 | 19970 | """
Test functions for pkgselect endpoint
"""
import json
import os
from unittest import TestCase
from unittest.mock import patch
import boto3
import pandas as pd
import responses
from botocore.stub import Stubber
from t4_lambda_shared.utils import buffer_s3response, read_body
from ..index import file_list_to_folder, lambda_handler
class TestPackageSelect(TestCase):
"""
Unit tests for the PackageSelect API endpoint.
"""
def make_s3response(self, payload_bytes):
"""
Generate a mock s3 select response
"""
return {
'Payload': [
{
'Records': {
'Payload': payload_bytes
}
},
{
'Progress': {
'Details': {
'BytesScanned': 123,
'BytesProcessed': 123,
'BytesReturned': 123
}
}
},
{
'Stats': {
'Details': {
'BytesScanned': 123,
'BytesProcessed': 123,
'BytesReturned': 123
}
}
},
{
'End': {}
}
]
}
def make_manifest_query(self, logical_keys):
entries = []
for key in logical_keys:
entry = dict(
logical_key=key,
physical_key=f"{key}?versionid=1234",
size=100
)
entries.append(json.dumps(entry))
jsonl = "\n".join(entries)
streambytes = jsonl.encode()
return self.make_s3response(streambytes)
def setUp(self):
"""
Mocks to tests calls to S3 Select
"""
logical_keys = [
"foo.csv",
"bar/file1.txt",
"bar/file2.txt",
"bar/baz/file3.txt",
"bar/baz/file4.txt"
]
manifest_row = dict(
logical_key="bar/file1.txt",
physical_keys=["s3://test-bucket/bar/file1.txt"],
size=1234,
hash={"type": "SHA256", "value": "0123456789ABCDEF"},
meta={}
)
detailbytes = json.dumps(manifest_row).encode()
self.s3response = self.make_manifest_query(logical_keys)
self.s3response_detail = self.make_s3response(detailbytes)
self.s3response_incomplete = {
'Payload': [
{
'Records': {
'Payload': self.s3response['Payload'][0]['Records']['Payload']
}
},
{
'Stats': {
'Details': {
'BytesScanned': 123,
'BytesProcessed': 123,
'BytesReturned': 123
}
}
}
]
}
meta = {
"version": "v0",
"user_meta": {
"somefield": "somevalue"
},
"message": "Commit message"
}
metabytes = json.dumps(meta).encode()
self.s3response_meta = self.make_s3response(metabytes)
requests_mock = responses.RequestsMock(assert_all_requests_are_fired=False)
requests_mock.start()
self.addCleanup(requests_mock.stop)
env_patcher = patch.dict(os.environ, {
'AWS_ACCESS_KEY_ID': 'test_key',
'AWS_SECRET_ACCESS_KEY': 'test_secret',
})
env_patcher.start()
self.addCleanup(env_patcher.stop)
@classmethod
def _make_event(cls, params, headers=None):
return {
'httpMethod': 'POST',
'path': '/foo',
'pathParameters': {},
'queryStringParameters': params or None,
'headers': headers or None,
'body': None,
'isBase64Encoded': False,
}
def test_browse_top_level(self):
"""
Test that the S3 Select response is parsed
into the correct top-level folder view.
"""
df = pd.read_json(buffer_s3response(self.s3response), lines=True)
assert isinstance(df, pd.DataFrame)
folder = file_list_to_folder(df, 1000, 0)
assert len(folder['prefixes']) == 1
assert len(folder['objects']) == 1
assert folder['objects'][0]['logical_key'] == 'foo.csv'
assert folder['prefixes'][0]['logical_key'] == 'bar/'
def test_limit(self):
"""
Test that the S3 Select response is parsed
into the correct top-level folder view.
"""
df = pd.read_json(buffer_s3response(self.s3response), lines=True)
assert isinstance(df, pd.DataFrame)
folder = file_list_to_folder(df, 1, 0)
assert len(folder['prefixes']) == 1
assert len(folder['objects']) == 0
assert folder['prefixes'][0]['logical_key'] == 'bar/'
def test_offset(self):
"""
Test that the S3 Select response is parsed
into the correct top-level folder view.
"""
df = pd.read_json(buffer_s3response(self.s3response), lines=True)
assert isinstance(df, pd.DataFrame)
folder = file_list_to_folder(df, 1000, 1)
assert len(folder['prefixes']) == 0
assert len(folder['objects']) == 1
assert folder['objects'][0]['logical_key'] == 'foo.csv'
def test_browse_subfolder(self):
"""
Test that the S3 Select response is parsed
into the correct sub-folder view.
"""
prefix = "bar/"
df = pd.read_json(buffer_s3response(self.s3response), lines=True)
assert isinstance(df, pd.DataFrame)
filtered_df = df[df['logical_key'].str.startswith(prefix)]
stripped = filtered_df['logical_key'].str.slice(start=len(prefix))
stripped_df = stripped.to_frame('logical_key')
s3_df = pd.concat(
[stripped_df['logical_key'], filtered_df['size'], filtered_df['physical_key']],
axis=1,
keys=['logical_key', 'size', 'physical_key']
)
folder = file_list_to_folder(s3_df, 1000, 0)
assert len(folder['prefixes']) == 1
assert len(folder['objects']) == 2
object_keys = [obj['logical_key'] for obj in folder['objects']]
assert "file1.txt" in object_keys
assert "file2.txt" in object_keys
assert folder['prefixes'][0]['logical_key'] == "baz/"
def test_browse_subsubfolder(self):
"""
Test that the S3 Select response is parsed
into the correct sub-sub-folder view.
"""
prefix = "bar/baz/"
df = pd.read_json(buffer_s3response(self.s3response), lines=True)
assert isinstance(df, pd.DataFrame)
filtered_df = df[df['logical_key'].str.startswith(prefix)]
stripped = filtered_df['logical_key'].str.slice(start=len(prefix))
stripped_df = stripped.to_frame('logical_key')
s3_df = pd.concat(
[stripped_df['logical_key'], filtered_df['size'], filtered_df['physical_key']],
axis=1,
keys=['logical_key', 'size', 'physical_key']
)
folder = file_list_to_folder(s3_df, 1000, 0)
assert "objects" in folder
assert "prefixes" in folder
assert not folder['prefixes']
assert len(folder['objects']) == 2
object_keys = [obj['logical_key'] for obj in folder['objects']]
assert "file3.txt" in object_keys
assert "file4.txt" in object_keys
def test_folder_view(self):
"""
End-to-end test (folder view without a prefix)
"""
bucket = "bucket"
key = ".quilt/packages/manifest_hash"
params = dict(
bucket=bucket,
manifest=key,
access_key="TESTKEY",
secret_key="TESTSECRET",
session_token="TESTSESSION"
)
expected_args = {
'Bucket': bucket,
'Key': key,
'Expression': "SELECT SUBSTRING(s.logical_key, 1) AS logical_key FROM s3object s",
'ExpressionType': 'SQL',
'InputSerialization': {
'CompressionType': 'NONE',
'JSON': {'Type': 'LINES'}
},
'OutputSerialization': {'JSON': {'RecordDelimiter': '\n'}},
}
mock_s3 = boto3.client('s3')
with patch.object(
mock_s3,
'select_object_content',
side_effect=[
self.s3response,
self.s3response_meta
]
) as client_patch, patch(
'boto3.Session.client',
return_value=mock_s3
):
response = lambda_handler(self._make_event(params), None)
print(response)
assert response['statusCode'] == 200
folder = json.loads(read_body(response))['contents']
assert len(folder['prefixes']) == 1
assert len(folder['objects']) == 1
assert folder['objects'][0]['logical_key'] == 'foo.csv'
assert folder['prefixes'][0]['logical_key'] == 'bar/'
def test_folder_view_paging(self):
"""
End-to-end test (top-level folder view with a limit & offset)
"""
bucket = "bucket"
key = ".quilt/packages/manifest_hash"
params = dict(
bucket=bucket,
manifest=key,
prefix="paging_test/",
limit=10,
offset=10,
access_key="TESTKEY",
secret_key="TESTSECRET",
session_token="TESTSESSION"
)
expected_args = {
'Bucket': bucket,
'Key': key,
'Expression': "SELECT SUBSTRING(s.logical_key, 1) AS logical_key FROM s3object s",
'ExpressionType': 'SQL',
'InputSerialization': {
'CompressionType': 'NONE',
'JSON': {'Type': 'LINES'}
},
'OutputSerialization': {'JSON': {'RecordDelimiter': '\n'}},
}
paging_logical_keys = [
f"f{i:03d}.csv" for i in range(1000)
]
s3response_paging = self.make_manifest_query(paging_logical_keys)
mock_s3 = boto3.client('s3')
with patch.object(
mock_s3,
'select_object_content',
side_effect=[
s3response_paging,
self.s3response_meta
]
) as client_patch, patch(
'boto3.Session.client',
return_value=mock_s3
):
response = lambda_handler(self._make_event(params), None)
print(response)
assert response['statusCode'] == 200
folder = json.loads(read_body(response))['contents']
assert len(folder['prefixes']) == 0
assert len(folder['objects']) == 10
assert folder['total'] == 1000
assert folder['returned'] == 10
assert folder['objects'][0]['logical_key'] == 'f010.csv'
def test_detail_view(self):
"""
End-to-end test (detail view)
"""
bucket = "bucket"
key = ".quilt/packages/manifest_hash"
logical_key = "bar/file1.txt"
params = dict(
bucket=bucket,
manifest=key,
logical_key=logical_key,
access_key="TESTKEY",
secret_key="TESTSECRET",
session_token="TESTSESSION"
)
expected_sql = "SELECT s.* FROM s3object s WHERE s.logical_key = 'bar/file1.txt' LIMIT 1"
expected_args = {
'Bucket': bucket,
'Key': key,
'Expression': "SELECT SUBSTRING(s.logical_key, 1) AS logical_key FROM s3object s",
'ExpressionType': 'SQL',
'InputSerialization': {
'CompressionType': 'NONE',
'JSON': {'Type': 'LINES'}
},
'OutputSerialization': {'JSON': {'RecordDelimiter': '\n'}},
}
mock_s3 = boto3.client('s3')
with patch.object(
mock_s3,
'select_object_content',
return_value=self.s3response_detail
) as client_patch, patch(
'boto3.Session.client',
return_value=mock_s3
):
response = lambda_handler(self._make_event(params), None)
print(response)
assert response['statusCode'] == 200
json.loads(read_body(response))['contents']
def test_incomplete_credentials(self):
"""
Verify that a call with incomplete credentials fails.
"""
bucket = "bucket"
key = ".quilt/packages/manifest_hash"
logical_key = "bar/file1.txt"
params = dict(
bucket=bucket,
manifest=key,
logical_key=logical_key,
access_key="TESTKEY",
secret_key="TESTSECRET",
)
response = lambda_handler(self._make_event(params), None)
assert response['statusCode'] == 401
def test_blocked_anon_access(self):
"""
Verify that an anonymous call fails if ALLOW_ANONYMOUS_ACCESS
is not set.
"""
bucket = "bucket"
key = ".quilt/packages/manifest_hash"
logical_key = "bar/file1.txt"
params = dict(
bucket=bucket,
manifest=key,
logical_key=logical_key,
)
response = lambda_handler(self._make_event(params), None)
assert response['statusCode'] == 401
def test_anon_access(self):
"""
Test anonymous call w/ ALLOW_ANONYMOUS_ACCESS
"""
bucket = "bucket"
key = ".quilt/packages/manifest_hash"
params = dict(
bucket=bucket,
manifest=key,
)
expected_args = {
'Bucket': bucket,
'Key': key,
'Expression': "SELECT SUBSTRING(s.logical_key, 1) AS logical_key FROM s3object s",
'ExpressionType': 'SQL',
'InputSerialization': {
'CompressionType': 'NONE',
'JSON': {'Type': 'LINES'}
},
'OutputSerialization': {'JSON': {'RecordDelimiter': '\n'}},
}
env_patcher = patch.dict(os.environ, {
'AWS_ACCESS_KEY_ID': 'test_key',
'AWS_SECRET_ACCESS_KEY': 'test_secret',
'ALLOW_ANONYMOUS_ACCESS': '1'
})
env_patcher.start()
mock_s3 = boto3.client('s3')
response = {
'ETag': '12345',
'VersionId': '1.0',
'ContentLength': 123,
}
expected_params = {
'Bucket': bucket,
'Key': key,
}
s3_stubber = Stubber(mock_s3)
s3_stubber.activate()
s3_stubber.add_response('head_object', response, expected_params)
with patch.object(
mock_s3,
'select_object_content',
side_effect=[
self.s3response,
self.s3response_meta
]
) as client_patch, patch(
'boto3.Session.client',
return_value=mock_s3
):
response = lambda_handler(self._make_event(params), None)
print(response)
assert response['statusCode'] == 200
folder = json.loads(read_body(response))['contents']
print(folder)
assert len(folder['prefixes']) == 1
assert len(folder['objects']) == 1
assert folder['objects'][0]['logical_key'] == 'foo.csv'
assert folder['prefixes'][0]['logical_key'] == 'bar/'
s3_stubber.deactivate()
env_patcher.stop()
def test_non_string_keys(self):
"""
End-to-end test (folder view without a prefix)
"""
bucket = "bucket"
key = ".quilt/packages/manifest_hash"
params = dict(
bucket=bucket,
manifest=key,
access_key="TESTKEY",
secret_key="TESTSECRET",
session_token="TESTSESSION"
)
expected_args = {
'Bucket': bucket,
'Key': key,
'Expression': "SELECT SUBSTRING(s.logical_key, 1) AS logical_key FROM s3object s",
'ExpressionType': 'SQL',
'InputSerialization': {
'CompressionType': 'NONE',
'JSON': {'Type': 'LINES'}
},
'OutputSerialization': {'JSON': {'RecordDelimiter': '\n'}},
}
# Return a response with keys that are not strings (integers here)
# The important test case is where all members of a column are
# non-string
logical_keys = [
"1",
"2",
"3",
]
entries = []
for key in logical_keys:
entry = dict(
logical_key=key,
physical_key=key,
size=100
)
entries.append(json.dumps(entry))
jsonl = "\n".join(entries)
streambytes = jsonl.encode()
non_string_s3response = self.make_s3response(streambytes)
mock_s3 = boto3.client('s3')
with patch.object(
mock_s3,
'select_object_content',
side_effect=[
non_string_s3response,
self.s3response_meta
]
) as client_patch, patch(
'boto3.Session.client',
return_value=mock_s3
):
response = lambda_handler(self._make_event(params), None)
print(response)
assert response['statusCode'] == 200
folder = json.loads(read_body(response))['contents']
assert not folder['prefixes']
assert len(folder['objects']) == 3
assert folder['objects'][0]['logical_key'] == '1'
assert folder['objects'][1]['logical_key'] == '2'
assert folder['objects'][2]['logical_key'] == '3'
def test_empty_manifest(self):
"""
End-to-end test (folder view without a prefix) for an
empty package manifest
"""
bucket = "bucket"
key = ".quilt/packages/manifest_hash"
params = dict(
bucket=bucket,
manifest=key,
access_key="TESTKEY",
secret_key="TESTSECRET",
session_token="TESTSESSION"
)
expected_args = {
'Bucket': bucket,
'Key': key,
'Expression': "SELECT SUBSTRING(s.logical_key, 1) AS logical_key FROM s3object s",
'ExpressionType': 'SQL',
'InputSerialization': {
'CompressionType': 'NONE',
'JSON': {'Type': 'LINES'}
},
'OutputSerialization': {'JSON': {'RecordDelimiter': '\n'}},
}
# Empty manifest
jsonl = '{"version": "v0", "message": null}'
streambytes = jsonl.encode()
non_string_s3response = self.make_s3response(streambytes)
mock_s3 = boto3.client('s3')
with patch.object(
mock_s3,
'select_object_content',
side_effect=[
non_string_s3response,
self.s3response_meta
]
) as client_patch, patch(
'boto3.Session.client',
return_value=mock_s3
):
response = lambda_handler(self._make_event(params), None)
print(response)
assert response['statusCode'] == 200
folder = json.loads(read_body(response))['contents']
assert not folder['prefixes']
assert not folder['objects']
assert folder['total'] == 0
| apache-2.0 | 8,699,328,862,312,954,000 | 32.008264 | 97 | 0.499449 | false |
afg984/pyardrone | pyardrone/at/parameters.py | 1 | 4104 | import enum
import functools
import json
import io
import operator
from pyardrone.utils import repack_to_int
class Parameter:
'''
Base class of all at command parameters.
:param description: description of the parameter, stored in __doc__
:param default: default value of the parameter
'''
def __init__(self, description='', default=None, name=None, index=None):
self.__doc__ = description
self._default = default
self._name = name
self._index = index
def __repr__(self):
if self._name is not None:
return '<{self.__class__.__name__}:{self._name}>'.format(self=self)
else:
return super().__repr__()
def __get__(self, obj, type=None):
if obj is None:
return self
else:
return obj[self._index]
def __set__(self, obj, value):
raise AttributeError(
'{} of {} not settable, please use {}._replace'.format(
self._name, obj, obj.__class__.__name__))
@staticmethod
def _check(value):
'''
Checks the value on :py:class:`~pyardrone.at.base.ATCommand`\ 's init.
Subclasses can optionally define this method, the default
implementation is a no-op.
:raises TypeError: If the value is of the wrong type.
:raises ValueError: If the value is not valid.
'''
@staticmethod
def _pack(value):
'''
Packs the value.
Subclasses should define this method.
:rtype: bytes
'''
raise NotImplementedError
class Int32(Parameter):
'''
Parameter class of a 32-bit integer.
'''
@staticmethod
def _check(value):
if int.bit_length(value) > 32:
raise ValueError(
'value {} should be less than 4 bytes'.format(value)
)
@staticmethod
def _pack(value):
return str(int(value)).encode()
def __setattr__(self, name, value):
super().__setattr__(name, value)
if name == '_name' and hasattr(self, '_flags'):
self._flags.__name__ = value
def _set_flags(self, **flags):
'''
Set the flags of this argument.
Example: ``int_param._set_flags(a=1, b=2, c=4, d=8)``
'''
self._flags = enum.IntEnum('_flags', flags)
self.__dict__.update(self._flags.__members__)
self._patch_flag_doc()
def _patch_flag_doc(self):
patch = io.StringIO()
patch.write('\n\n:Flags:\n')
for key, value in sorted(
self._flags.__members__.items(),
key=operator.itemgetter(1)
):
patch.write(' * ``{}`` = *{:d}*\n'.format(
key, value))
self.__doc__ = self.__doc__.rstrip() + patch.getvalue()
class Float(Parameter):
'Parameter class of a float'
__slots__ = ()
@staticmethod
def _check(value):
float(value)
@staticmethod
def _pack(value):
return str(repack_to_int(value)).encode()
class String(Parameter):
'Parameter class of a string'
__slots__ = ()
@staticmethod
def _check(value):
if not isinstance(value, (str, bytes, float, int, bool)):
raise TypeError(
'{} is of type {}, which is unsupported'.format(
value,
type(value)
)
)
@functools.singledispatch
def _pack(value):
'''
packing rule:
=========== ============
Value Packes into
=========== ============
``True`` ``b'TRUE'``
``False`` ``b'FALSE'``
``65535`` ``b'65535'``
``0.32`` ``b'0.32'``
``'hello'`` ``b'hello'``
=========== ============
'''
return json.dumps(str(value)).encode()
@_pack.register(bool)
def _pack_bool(value):
return b'"TRUE"' if value else b'"FALSE"'
@_pack.register(bytes)
def _pack_bytes(value):
return json.dumps(value.decode()).encode()
_pack = staticmethod(_pack)
| mit | -8,764,931,321,798,617,000 | 24.02439 | 79 | 0.516813 | false |
narock/agu_analytics | query_KeywordSection.py | 1 | 1271 | import math, sys
from SPARQLWrapper import SPARQLWrapper, JSON
from queries.keywordSectionCount import keywordSectionCount
from queries.createKeywordSectionQuery import createKeywordSectionQuery
# get the year to query from the user
year = input("Enter year to query: ")
year = str(year)
# there are too many results to get all at once
# here we ask the database how many results there
# are for the year we are interested in. Given that
# we can get 10,000 results per query, we do a little
# math to compute how many times we need to query the database
# to get all the results
offset = 0
limit = float(keywordSectionCount(year))
numQueries = math.ceil(limit/10000)
# setting up the query
# specifying the web address of the database
# setting the return format to JSON - JavaScript Object Notation
sparql = SPARQLWrapper("http://abstractsearch.agu.org:8890/sparql")
sparql.setReturnFormat(JSON)
# keep looping and querying until we get all the results
while (numQueries > 0):
query = createKeywordSectionQuery(year,str(offset))
sparql.setQuery(query)
offset = offset + 10000
results = sparql.query().convert()
for result in results["results"]["bindings"]:
print(result["keyword"]["value"] + " " + result["section"]["value"])
numQueries = numQueries - 1
| gpl-3.0 | 7,374,139,133,229,208,000 | 36.382353 | 71 | 0.763179 | false |
quantumlib/Cirq | cirq-core/cirq/ops/measurement_gate_test.py | 1 | 15471 | # Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import cirq
def test_eval_repr():
# Basic safeguard against repr-inequality.
op = cirq.GateOperation(
gate=cirq.MeasurementGate(1, cirq.MeasurementKey(path=(), name='q0_1_0'), ()),
qubits=[cirq.GridQubit(0, 1)],
)
cirq.testing.assert_equivalent_repr(op)
@pytest.mark.parametrize('num_qubits', [1, 2, 4])
def test_measure_init(num_qubits):
assert cirq.MeasurementGate(num_qubits).num_qubits() == num_qubits
assert cirq.MeasurementGate(num_qubits, key='a').key == 'a'
assert cirq.MeasurementGate(num_qubits, key='a').mkey == cirq.MeasurementKey('a')
assert cirq.MeasurementGate(num_qubits, key=cirq.MeasurementKey('a')).key == 'a'
assert cirq.MeasurementGate(num_qubits, key=cirq.MeasurementKey('a')) == cirq.MeasurementGate(
num_qubits, key='a'
)
assert cirq.MeasurementGate(num_qubits, invert_mask=(True,)).invert_mask == (True,)
assert cirq.qid_shape(cirq.MeasurementGate(num_qubits)) == (2,) * num_qubits
assert cirq.qid_shape(cirq.MeasurementGate(3, qid_shape=(1, 2, 3))) == (1, 2, 3)
assert cirq.qid_shape(cirq.MeasurementGate(qid_shape=(1, 2, 3))) == (1, 2, 3)
with pytest.raises(ValueError, match='len.* >'):
cirq.MeasurementGate(5, invert_mask=(True,) * 6)
with pytest.raises(ValueError, match='len.* !='):
cirq.MeasurementGate(5, qid_shape=(1, 2))
with pytest.raises(ValueError, match='Specify either'):
cirq.MeasurementGate()
@pytest.mark.parametrize('num_qubits', [1, 2, 4])
def test_has_stabilizer_effect(num_qubits):
assert cirq.has_stabilizer_effect(cirq.MeasurementGate(num_qubits))
def test_measurement_eq():
eq = cirq.testing.EqualsTester()
eq.make_equality_group(
lambda: cirq.MeasurementGate(1, ''),
lambda: cirq.MeasurementGate(1, '', invert_mask=()),
lambda: cirq.MeasurementGate(1, '', qid_shape=(2,)),
)
eq.add_equality_group(cirq.MeasurementGate(1, 'a'))
eq.add_equality_group(cirq.MeasurementGate(1, 'a', invert_mask=(True,)))
eq.add_equality_group(cirq.MeasurementGate(1, 'a', invert_mask=(False,)))
eq.add_equality_group(cirq.MeasurementGate(1, 'b'))
eq.add_equality_group(cirq.MeasurementGate(2, 'a'))
eq.add_equality_group(cirq.MeasurementGate(2, ''))
eq.add_equality_group(
cirq.MeasurementGate(3, 'a'), cirq.MeasurementGate(3, 'a', qid_shape=(2, 2, 2))
)
eq.add_equality_group(cirq.MeasurementGate(3, 'a', qid_shape=(1, 2, 3)))
def test_measurement_full_invert_mask():
assert cirq.MeasurementGate(1, 'a').full_invert_mask() == (False,)
assert cirq.MeasurementGate(2, 'a', invert_mask=(False, True)).full_invert_mask() == (
False,
True,
)
assert cirq.MeasurementGate(2, 'a', invert_mask=(True,)).full_invert_mask() == (True, False)
@pytest.mark.parametrize('use_protocol', [False, True])
@pytest.mark.parametrize(
'gate',
[
cirq.MeasurementGate(1, 'a'),
cirq.MeasurementGate(1, 'a', invert_mask=(True,)),
cirq.MeasurementGate(1, 'a', qid_shape=(3,)),
cirq.MeasurementGate(2, 'a', invert_mask=(True, False), qid_shape=(2, 3)),
],
)
def test_measurement_with_key(use_protocol, gate):
if use_protocol:
gate1 = cirq.with_measurement_key_mapping(gate, {'a': 'b'})
else:
gate1 = gate.with_key('b')
assert gate1.key == 'b'
assert gate1.num_qubits() == gate.num_qubits()
assert gate1.invert_mask == gate.invert_mask
assert cirq.qid_shape(gate1) == cirq.qid_shape(gate)
if use_protocol:
gate2 = cirq.with_measurement_key_mapping(gate, {'a': 'a'})
else:
gate2 = gate.with_key('a')
assert gate2 == gate
@pytest.mark.parametrize(
'num_qubits, mask, bits, flipped',
[
(1, (), [0], (True,)),
(3, (False,), [1], (False, True)),
(3, (False, False), [0, 2], (True, False, True)),
],
)
def test_measurement_with_bits_flipped(num_qubits, mask, bits, flipped):
gate = cirq.MeasurementGate(num_qubits, key='a', invert_mask=mask, qid_shape=(3,) * num_qubits)
gate1 = gate.with_bits_flipped(*bits)
assert gate1.key == gate.key
assert gate1.num_qubits() == gate.num_qubits()
assert gate1.invert_mask == flipped
assert cirq.qid_shape(gate1) == cirq.qid_shape(gate)
# Flipping bits again restores the mask (but may have extended it).
gate2 = gate1.with_bits_flipped(*bits)
assert gate2.full_invert_mask() == gate.full_invert_mask()
def test_qudit_measure_qasm():
assert (
cirq.qasm(
cirq.measure(cirq.LineQid(0, 3), key='a'),
args=cirq.QasmArgs(),
default='not implemented',
)
== 'not implemented'
)
def test_qudit_measure_quil():
q0 = cirq.LineQid(0, 3)
qubit_id_map = {q0: '0'}
assert (
cirq.quil(
cirq.measure(q0, key='a'),
formatter=cirq.QuilFormatter(qubit_id_map=qubit_id_map, measurement_id_map={}),
)
== None
)
def test_measurement_gate_diagram():
# Shows key.
assert cirq.circuit_diagram_info(cirq.MeasurementGate(1)) == cirq.CircuitDiagramInfo(("M('')",))
assert cirq.circuit_diagram_info(
cirq.MeasurementGate(1, key='test')
) == cirq.CircuitDiagramInfo(("M('test')",))
# Uses known qubit count.
assert (
cirq.circuit_diagram_info(
cirq.MeasurementGate(3),
cirq.CircuitDiagramInfoArgs(
known_qubits=None,
known_qubit_count=3,
use_unicode_characters=True,
precision=None,
qubit_map=None,
),
)
== cirq.CircuitDiagramInfo(("M('')", 'M', 'M'))
)
# Shows invert mask.
assert cirq.circuit_diagram_info(
cirq.MeasurementGate(2, invert_mask=(False, True))
) == cirq.CircuitDiagramInfo(("M('')", "!M"))
# Omits key when it is the default.
a = cirq.NamedQubit('a')
b = cirq.NamedQubit('b')
cirq.testing.assert_has_diagram(
cirq.Circuit(cirq.measure(a, b)),
"""
a: ───M───
│
b: ───M───
""",
)
cirq.testing.assert_has_diagram(
cirq.Circuit(cirq.measure(a, b, invert_mask=(True,))),
"""
a: ───!M───
│
b: ───M────
""",
)
cirq.testing.assert_has_diagram(
cirq.Circuit(cirq.measure(a, b, key='test')),
"""
a: ───M('test')───
│
b: ───M───────────
""",
)
def test_measurement_channel():
np.testing.assert_allclose(
cirq.kraus(cirq.MeasurementGate(1)),
(np.array([[1, 0], [0, 0]]), np.array([[0, 0], [0, 1]])),
)
# yapf: disable
np.testing.assert_allclose(
cirq.kraus(cirq.MeasurementGate(2)),
(np.array([[1, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]]),
np.array([[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]]),
np.array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 0]]),
np.array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 1]])))
np.testing.assert_allclose(
cirq.kraus(cirq.MeasurementGate(2, qid_shape=(2, 3))),
(np.diag([1, 0, 0, 0, 0, 0]),
np.diag([0, 1, 0, 0, 0, 0]),
np.diag([0, 0, 1, 0, 0, 0]),
np.diag([0, 0, 0, 1, 0, 0]),
np.diag([0, 0, 0, 0, 1, 0]),
np.diag([0, 0, 0, 0, 0, 1])))
# yapf: enable
def test_measurement_qubit_count_vs_mask_length():
a = cirq.NamedQubit('a')
b = cirq.NamedQubit('b')
c = cirq.NamedQubit('c')
_ = cirq.MeasurementGate(num_qubits=1, invert_mask=(True,)).on(a)
_ = cirq.MeasurementGate(num_qubits=2, invert_mask=(True, False)).on(a, b)
_ = cirq.MeasurementGate(num_qubits=3, invert_mask=(True, False, True)).on(a, b, c)
with pytest.raises(ValueError):
_ = cirq.MeasurementGate(num_qubits=1, invert_mask=(True, False)).on(a)
with pytest.raises(ValueError):
_ = cirq.MeasurementGate(num_qubits=3, invert_mask=(True, False, True)).on(a, b)
def test_consistent_protocols():
for n in range(1, 5):
gate = cirq.MeasurementGate(num_qubits=n)
cirq.testing.assert_implements_consistent_protocols(gate)
gate = cirq.MeasurementGate(num_qubits=n, qid_shape=(3,) * n)
cirq.testing.assert_implements_consistent_protocols(gate)
def test_op_repr():
a, b = cirq.LineQubit.range(2)
assert repr(cirq.measure(a)) == 'cirq.measure(cirq.LineQubit(0))'
assert repr(cirq.measure(a, b)) == ('cirq.measure(cirq.LineQubit(0), cirq.LineQubit(1))')
assert repr(cirq.measure(a, b, key='out', invert_mask=(False, True))) == (
"cirq.measure(cirq.LineQubit(0), cirq.LineQubit(1), "
"key='out', "
"invert_mask=(False, True))"
)
def test_act_on_state_vector():
a, b = [cirq.LineQubit(3), cirq.LineQubit(1)]
m = cirq.measure(a, b, key='out', invert_mask=(True,))
args = cirq.ActOnStateVectorArgs(
target_tensor=cirq.one_hot(shape=(2, 2, 2, 2, 2), dtype=np.complex64),
available_buffer=np.empty(shape=(2, 2, 2, 2, 2)),
qubits=cirq.LineQubit.range(5),
prng=np.random.RandomState(),
log_of_measurement_results={},
)
cirq.act_on(m, args)
assert args.log_of_measurement_results == {'out': [1, 0]}
args = cirq.ActOnStateVectorArgs(
target_tensor=cirq.one_hot(
index=(0, 1, 0, 0, 0), shape=(2, 2, 2, 2, 2), dtype=np.complex64
),
available_buffer=np.empty(shape=(2, 2, 2, 2, 2)),
qubits=cirq.LineQubit.range(5),
prng=np.random.RandomState(),
log_of_measurement_results={},
)
cirq.act_on(m, args)
assert args.log_of_measurement_results == {'out': [1, 1]}
args = cirq.ActOnStateVectorArgs(
target_tensor=cirq.one_hot(
index=(0, 1, 0, 1, 0), shape=(2, 2, 2, 2, 2), dtype=np.complex64
),
available_buffer=np.empty(shape=(2, 2, 2, 2, 2)),
qubits=cirq.LineQubit.range(5),
prng=np.random.RandomState(),
log_of_measurement_results={},
)
cirq.act_on(m, args)
assert args.log_of_measurement_results == {'out': [0, 1]}
with pytest.raises(ValueError, match="already logged to key"):
cirq.act_on(m, args)
def test_act_on_clifford_tableau():
a, b = [cirq.LineQubit(3), cirq.LineQubit(1)]
m = cirq.measure(a, b, key='out', invert_mask=(True,))
# The below assertion does not fail since it ignores non-unitary operations
cirq.testing.assert_all_implemented_act_on_effects_match_unitary(m)
args = cirq.ActOnCliffordTableauArgs(
tableau=cirq.CliffordTableau(num_qubits=5, initial_state=0),
qubits=cirq.LineQubit.range(5),
prng=np.random.RandomState(),
log_of_measurement_results={},
)
cirq.act_on(m, args)
assert args.log_of_measurement_results == {'out': [1, 0]}
args = cirq.ActOnCliffordTableauArgs(
tableau=cirq.CliffordTableau(num_qubits=5, initial_state=8),
qubits=cirq.LineQubit.range(5),
prng=np.random.RandomState(),
log_of_measurement_results={},
)
cirq.act_on(m, args)
assert args.log_of_measurement_results == {'out': [1, 1]}
args = cirq.ActOnCliffordTableauArgs(
tableau=cirq.CliffordTableau(num_qubits=5, initial_state=10),
qubits=cirq.LineQubit.range(5),
prng=np.random.RandomState(),
log_of_measurement_results={},
)
cirq.act_on(m, args)
assert args.log_of_measurement_results == {'out': [0, 1]}
with pytest.raises(ValueError, match="already logged to key"):
cirq.act_on(m, args)
def test_act_on_stabilizer_ch_form():
a, b = [cirq.LineQubit(3), cirq.LineQubit(1)]
m = cirq.measure(a, b, key='out', invert_mask=(True,))
# The below assertion does not fail since it ignores non-unitary operations
cirq.testing.assert_all_implemented_act_on_effects_match_unitary(m)
args = cirq.ActOnStabilizerCHFormArgs(
state=cirq.StabilizerStateChForm(num_qubits=5, initial_state=0),
qubits=cirq.LineQubit.range(5),
prng=np.random.RandomState(),
log_of_measurement_results={},
)
cirq.act_on(m, args)
assert args.log_of_measurement_results == {'out': [1, 0]}
args = cirq.ActOnStabilizerCHFormArgs(
state=cirq.StabilizerStateChForm(num_qubits=5, initial_state=8),
qubits=cirq.LineQubit.range(5),
prng=np.random.RandomState(),
log_of_measurement_results={},
)
cirq.act_on(m, args)
assert args.log_of_measurement_results == {'out': [1, 1]}
args = cirq.ActOnStabilizerCHFormArgs(
state=cirq.StabilizerStateChForm(num_qubits=5, initial_state=10),
qubits=cirq.LineQubit.range(5),
prng=np.random.RandomState(),
log_of_measurement_results={},
)
cirq.act_on(m, args)
assert args.log_of_measurement_results == {'out': [0, 1]}
with pytest.raises(ValueError, match="already logged to key"):
cirq.act_on(m, args)
def test_act_on_qutrit():
a, b = [cirq.LineQid(3, dimension=3), cirq.LineQid(1, dimension=3)]
m = cirq.measure(a, b, key='out', invert_mask=(True,))
args = cirq.ActOnStateVectorArgs(
target_tensor=cirq.one_hot(
index=(0, 2, 0, 2, 0), shape=(3, 3, 3, 3, 3), dtype=np.complex64
),
available_buffer=np.empty(shape=(3, 3, 3, 3, 3)),
qubits=cirq.LineQid.range(5, dimension=3),
prng=np.random.RandomState(),
log_of_measurement_results={},
)
cirq.act_on(m, args)
assert args.log_of_measurement_results == {'out': [2, 2]}
args = cirq.ActOnStateVectorArgs(
target_tensor=cirq.one_hot(
index=(0, 1, 0, 2, 0), shape=(3, 3, 3, 3, 3), dtype=np.complex64
),
available_buffer=np.empty(shape=(3, 3, 3, 3, 3)),
qubits=cirq.LineQid.range(5, dimension=3),
prng=np.random.RandomState(),
log_of_measurement_results={},
)
cirq.act_on(m, args)
assert args.log_of_measurement_results == {'out': [2, 1]}
args = cirq.ActOnStateVectorArgs(
target_tensor=cirq.one_hot(
index=(0, 2, 0, 1, 0), shape=(3, 3, 3, 3, 3), dtype=np.complex64
),
available_buffer=np.empty(shape=(3, 3, 3, 3, 3)),
qubits=cirq.LineQid.range(5, dimension=3),
prng=np.random.RandomState(),
log_of_measurement_results={},
)
cirq.act_on(m, args)
assert args.log_of_measurement_results == {'out': [0, 2]}
| apache-2.0 | -8,647,133,064,538,131,000 | 34.263761 | 100 | 0.592455 | false |
tomchuk/meetup | tests/functional/fixtures.py | 1 | 2849 | import os
import time
import requests
from django.conf import settings
from django.core.urlresolvers import reverse, NoReverseMatch
import pytest
from splinter.driver.webdriver import chrome
class WebDriver(chrome.WebDriver):
django_live_server_url = ''
def _resolve_url(self, url, *args, **kwargs):
try:
url = reverse(url, args=args, kwargs=kwargs)
except NoReverseMatch:
pass
return url
def visit(self, url, *args, **kwargs):
url = self._resolve_url(url, *args, **kwargs)
if not url.startswith('http'):
url = self.django_live_server_url + url
super(WebDriver, self).visit(url)
def is_at(self, url, *args, **kwargs):
url = self._resolve_url(url, *args, **kwargs)
for x in range(10):
browser_url = self.url.rsplit('#')[0].rsplit('?')[0]
if browser_url.endswith(url):
return True
time.sleep(0.5)
return False
@pytest.fixture
def browser(request, live_server):
service_args = ['--no-proxy-server', '--noerrdialogs']
webdriver = WebDriver(wait_time=2, service_args=service_args)
webdriver.django_live_server_url = str(live_server)
webdriver.main_window_handle = webdriver.driver.current_window_handle
webdriver.driver.set_window_size(1280, 1024)
def fin():
for window_handle in webdriver.driver.window_handles:
if window_handle != webdriver.main_window_handle:
webdriver.driver.switch_to_window(window_handle)
time.sleep(0.5)
webdriver.driver.close()
webdriver.driver.switch_to_window(webdriver.main_window_handle)
webdriver.driver.close()
request.addfinalizer(fin)
return webdriver
@pytest.fixture
def pytestbdd_feature_base_dir():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'features')
@pytest.fixture
def facebook_user(request):
"""
A test email/pass for logging into the facebook auth dialog
"""
fb_url = 'https://graph.facebook.com'
fb_test_user_path = '/{app_id}/accounts/test-users'
fb_del_user_path = '/{user_id}'
fb_qs = '?access_token={app_id}|{app_secret}'
url = (fb_url + fb_test_user_path + fb_qs).format(
app_id=settings.FB_APP_ID,
app_secret=settings.FB_APP_SECRET,
)
response = requests.post(url)
user_data = response.json()
if 'error' in user_data:
pytest.fail('Facebook API error')
def fin():
url = (fb_url + fb_del_user_path + fb_qs).format(
app_id=settings.FB_APP_ID,
app_secret=settings.FB_APP_SECRET,
user_id=user_data['id']
)
requests.delete(url)
request.addfinalizer(fin)
return {
'email': user_data['email'],
'pass': user_data['password']
}
| mit | -3,229,707,179,660,637,700 | 28.677083 | 79 | 0.620218 | false |
repotvsupertuga/tvsupertuga.repository | plugin.video.TVsupertuga/resources/lib/plugins/b98tv.py | 1 | 9567 | """
Copyright (C) 2018 MuadDib
----------------------------------------------------------------------------
"THE BEER-WARE LICENSE" (Revision 42):
@tantrumdev wrote this file. As long as you retain this notice you can do
whatever you want with this stuff. Just Ask first when not released through
the tools and parser GIT. If we meet some day, and you think this stuff is
worth it, you can buy him a beer in return. - Muad'Dib
----------------------------------------------------------------------------
Version:
2018.7.2:
- Added Clear Cache function
- Minor update on fetch cache returns
2018.6.29:
- Added caching to primary menus (Cache time is 3 hours)
Examples:
<dir>
<title>Cartoon Series</title>
<meta>
<summary>One Click Play section of cartoon tv series</summary>
</meta>
<B98>serieslist/videos_categories/series</B98>
</dir>
<dir>
<title>Cartoon Studios</title>
<meta>
<summary>One Click Play section of cartoon tv series based on the animation studio that produced them</summary>
</meta>
<B98>serieslist/videos_categories/studios</B98>
</dir>
"""
import __builtin__
import base64,time
import json,re,requests,os,traceback,urlparse
import koding
import xbmc,xbmcaddon,xbmcgui
from koding import route
from resources.lib.plugin import Plugin
from resources.lib.util import dom_parser
from resources.lib.util.context import get_context_items
from resources.lib.util.xml import JenItem, JenList, display_list
from unidecode import unidecode
CACHE_TIME = 10800 # change to wanted cache time in seconds
addon_id = xbmcaddon.Addon().getAddonInfo('id')
addon_fanart = xbmcaddon.Addon().getAddonInfo('fanart')
addon_icon = xbmcaddon.Addon().getAddonInfo('icon')
next_icon = os.path.join(xbmc.translatePath(xbmcaddon.Addon().getAddonInfo('path')), 'resources', 'media', 'next.png')
User_Agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36'
base_main_link = 'https://www.b98.tv/'
class B98TV(Plugin):
name = "b98tv"
priority = 200
def process_item(self, item_xml):
if "<B98>" in item_xml:
item = JenItem(item_xml)
if "serieslist" in item.get("B98", ""):
result_item = {
'label': item["title"],
'icon': item.get("thumbnail", addon_icon),
'fanart': item.get("fanart", addon_fanart),
'mode': "B98Series",
'url': item.get("B98", ""),
'folder': True,
'imdb': "0",
'content': "files",
'season': "0",
'episode': "0",
'info': {},
'year': "0",
'context': get_context_items(item),
"summary": item.get("summary", None)
}
elif "playtoon/" in item.get("B98", ""):
result_item = {
'label': item["title"],
'icon': item.get("thumbnail", addon_icon),
'fanart': item.get("fanart", addon_fanart),
'mode': "B98Play",
'url': item.get("B98", ""),
'folder': False,
'imdb': "0",
'content': "files",
'season': "0",
'episode': "0",
'info': {},
'year': "0",
'context': get_context_items(item),
"summary": item.get("summary", None)
}
result_item['fanart_small'] = result_item["fanart"]
return result_item
def clear_cache(self):
dialog = xbmcgui.Dialog()
if dialog.yesno(xbmcaddon.Addon().getAddonInfo('name'), "Clear B98.tv Plugin Cache?"):
koding.Remove_Table("b98_com_plugin")
@route(mode='B98Series', args=["url"])
def get_B98Main_Processor(url):
url = url.replace('serieslist/', '')
url = urlparse.urljoin(base_main_link, url)
xml = fetch_from_db(url)
if not xml:
xml = ""
try:
html = requests.get(url).content
item_list = dom_parser.parseDOM(html, 'div', attrs={'class': 'item col-lg-3 col-md-3 col-sm-12 '})
for content in item_list:
link = re.compile('href="(.+?)"',re.DOTALL).findall(content)[0]
icon, title = re.compile('img src="(.+?) alt="(.+?)"',re.DOTALL).findall(content)[0]
try:
link = link.replace(base_main_link,'')
title = replaceHTMLCodes(title)
if 'videos_categories' in link:
if 'Darkwing Duck' not in title: # Why Dandy? Why?
xml += "<dir>"\
" <title>%s</title>"\
" <meta>"\
" <summary>%s</summary>"\
" </meta>"\
" <B98>serieslist/%s</B98>"\
" <thumbnail>%s</thumbnail>"\
"</dir>" % (title,title,link,icon)
else:
xml += "<item>"\
" <title>%s</title>"\
" <meta>"\
" <summary>%s</summary>"\
" </meta>"\
" <B98>playtoon/%s</B98>"\
" <thumbnail>%s</thumbnail>"\
"</item>" % (title,title,link,icon)
except:
continue
try:
navi_link = re.compile('a class="next page-numbers" href="(.+?)"',re.DOTALL).findall(html)[0]
xml += "<dir>"\
" <title>Next Page >></title>"\
" <meta>"\
" <summary>Click here to see the next page of awesome content!</summary>"\
" </meta>"\
" <B98>serieslist/%s</B98>"\
" <thumbnail>%s</thumbnail>"\
"</dir>" % (navi_link,next_icon)
except:
pass
except:
pass
save_to_db(xml, url)
jenlist = JenList(xml)
display_list(jenlist.get_list(), jenlist.get_content_type())
@route(mode='B98Play', args=["url"])
def get_B98Play(url):
url = url.replace('playtoon/', '')
try:
url = urlparse.urljoin(base_main_link, url)
html = requests.get(url).content
vid_url = re.compile('file: "(.*?)"',re.DOTALL).findall(html)[0]
if 'http:' in vid_url:
vid_url = vid_url.replace('http:', 'https:')
ep_title = re.compile('title>(.*?)\|',re.DOTALL).findall(html)[0]
ep_icon = re.compile('og:image" content="(.*?)"',re.DOTALL).findall(html)[0]
vid_url = vid_url + '|User-Agent=' + User_Agent
xbmc.executebuiltin("PlayMedia(%s)" % (vid_url))
quit()
return
except:
pass
def save_to_db(item, url):
if not item or not url:
return False
try:
koding.reset_db()
koding.Remove_From_Table(
"b98_com_plugin",
{
"url": url
})
koding.Add_To_Table("b98_com_plugin",
{
"url": url,
"item": base64.b64encode(item),
"created": time.time()
})
except:
return False
def fetch_from_db(url):
koding.reset_db()
b98_plugin_spec = {
"columns": {
"url": "TEXT",
"item": "TEXT",
"created": "TEXT"
},
"constraints": {
"unique": "url"
}
}
koding.Create_Table("b98_com_plugin", b98_plugin_spec)
match = koding.Get_From_Table(
"b98_com_plugin", {"url": url})
if match:
match = match[0]
if not match["item"]:
return None
created_time = match["created"]
if created_time and float(created_time) + CACHE_TIME >= time.time():
match_item = match["item"]
try:
result = base64.b64decode(match_item)
except:
return None
return result
else:
return None
else:
return None
def replaceHTMLCodes(txt):
txt = re.sub("(&#[0-9]+)([^;^0-9]+)", "\\1;\\2", txt)
txt = txt.replace(""", "\"").replace("&", "&")
txt = txt.replace('‘','\'').replace('’','\'').replace('&','&').replace('…','....')
txt = txt.strip()
return txt
def replaceEscapeCodes(txt):
try:
import html.parser as html_parser
except:
import HTMLParser as html_parser
txt = html_parser.HTMLParser().unescape(txt)
return txt
def remove_non_ascii(text):
try:
text = text.decode('utf-8').replace(u'\xc2', u'A').replace(u'\xc3', u'A').replace(u'\xc4', u'A').replace(u'\xe2', u'a')
except:
pass
return unidecode(text)
| gpl-2.0 | -4,601,575,278,499,375,000 | 34.565056 | 133 | 0.464513 | false |
nagaozen/my-os-customizations | usr/share/gtksourceview-2.0/language-specs/convert.py | 1 | 17436 | #!/usr/bin/env python2
import xml.dom.minidom as dom
import cgi
default_styles = {
'Comment' : 'def:comment',
'String' : 'def:string',
'Preprocessor' : 'def:preprocessor',
'Keyword' : 'def:keyword',
'Data Type' : 'def:type',
'Decimal' : 'def:decimal',
'Specials' : 'def:specials',
'Function' : 'def:function',
'Base-N Integer' : 'def:base-n-integer',
'Floating Point' : 'def:floating-point',
'Floating point' : 'def:floating-point',
'Others' : None,
'Other' : None,
'Others 2' : None,
'Others 3' : None,
}
def escape_escape_char(ch):
if ch == '\\':
return '\\\\'
elif ch in ['@']:
return ch
raise RuntimeError("don't know how to escape '%s'" % (ch,))
def escape_regex(s):
return cgi.escape(s)
def normalize_id(id):
if id == "C#":
return "c-sharp"
elif id == ".desktop":
return "desktop"
elif id == ".ini":
return "ini"
elif id == "C++ Line Comment":
return "cpp-line-comment"
elif id == "Markup (inline)":
return "markup-inline"
elif id == "Markup (block)":
return "markup-block"
else:
return id.replace(', ', '-').replace('.', '-').replace('*', '-').replace(',', '-').replace(' ', '-').replace('/', '-').replace('#', '-').lower()
class LangFile(object):
def __init__(self, id, name, _name, section, _section, mimetypes, globs, filename):
object.__init__(self)
assert name or _name
assert section or _section
self.id = normalize_id(id or name or _name)
self.name = name
self._name = _name
self.section = section
self._section = _section
self.mimetypes = mimetypes
self.globs = globs
self.filename = filename
self.contexts = []
self.escape_char = None
def set_esc_char(self, char):
self.escape_char = char
def add_context(self, ctx):
self.contexts.append(ctx)
def format_header(self, indent):
string = '<?xml version="1.0" encoding="UTF-8"?>\n<language id="%s"' % (self.id,)
if self.name:
string += ' name="%s"' % (self.name,)
else:
string += ' _name="%s"' % (self._name,)
string += ' version="2.0"'
if self.section:
string += ' section="%s"' % (self.section,)
else:
string += ' _section="%s"' % (self._section,)
string += '>\n'
if self.mimetypes or self.globs:
string += indent + '<metadata>\n'
if self.mimetypes:
string += 2*indent + '<property name="mimetypes">%s</property>\n' % (cgi.escape(self.mimetypes),)
if self.globs:
string += 2*indent + '<property name="globs">%s</property>\n' % (cgi.escape(self.globs),)
string += indent + '</metadata>\n\n'
return string
def format_footer(self, indent):
return '</language>\n'
def format_styles(self, indent):
string = indent + "<styles>\n"
styles = {}
for ctx in self.contexts:
map_to = default_styles[ctx.style_name]
styles[ctx.style] = [ctx.style_name, map_to]
for s in styles:
id = s
name, map_to = styles[s]
if map_to:
string += indent*2 + '<style id="%s" _name="%s" map-to="%s"/>\n' % (id, name, map_to)
else:
string += indent*2 + '<style id="%s" _name="%s"/>\n' % (id, name)
string += indent + "</styles>\n\n"
return string
def format_contexts(self, indent):
string = indent + '<definitions>\n'
if self.escape_char and self.escape_char != '\\':
char = escape_escape_char(self.escape_char)
string += indent*2 + '<context id="generated-escape">\n'
string += indent*3 + '<match>%s.</match>\n' % (char,)
string += indent*2 + '</context>\n'
string += indent*2 + '<context id="generated-line-escape">\n'
string += indent*3 + '<start>%s$</start>\n' % (char,)
string += indent*3 + '<end>^</end>\n'
string += indent*2 + '</context>\n'
for ctx in self.contexts:
if self.escape_char:
if self.escape_char != '\\':
esc_ctx = 'generated-escape'
line_esc_ctx = 'generated-line-escape'
else:
esc_ctx = 'def:escape'
line_esc_ctx = 'def:line-continue'
else:
esc_ctx = None
line_esc_ctx = None
string += ctx.format(indent, esc_ctx, line_esc_ctx)
string += indent*2 + '<context id="%s">\n' % (self.id,)
string += indent*3 + '<include>\n'
for ctx in self.contexts:
string += indent*4 + '<context ref="%s"/>\n' % (ctx.id,)
string += indent*3 + '</include>\n'
string += indent*2 + '</context>\n'
string += indent + '</definitions>\n'
return string
def format(self, indent=' '):
string = self.format_header(indent)
string += self.format_styles(indent)
string += self.format_contexts(indent)
string += self.format_footer(indent)
return string
class Context(object):
def __init__(self, name, _name, style):
object.__init__(self)
assert (name or _name) and style
self.name = name
self._name = _name
self.style_name = style
self.style = style.replace(' ', '-').lower()
self.id = normalize_id(name or _name)
self.is_container = False
def format(self, indent, esc_ctx, line_esc_ctx):
print "Implement me: %s.format()" % (type(self).__name__,)
return indent*2 + '<context id="%s"/>\n' % (self.id)
def format_escape(self, indent, esc_ctx, line_esc_ctx):
string = ""
if self.is_container and esc_ctx is not None:
string += indent*3 + '<include>\n'
string += indent*4 + '<context ref="%s"/>\n' % (esc_ctx,)
string += indent*4 + '<context ref="%s"/>\n' % (line_esc_ctx,)
string += indent*3 + '</include>\n'
return string
class KeywordList(Context):
def __init__(self, name, _name, style, keywords, case_sensitive,
match_empty_string_at_beginning,
match_empty_string_at_end,
beginning_regex, end_regex):
Context.__init__(self, name, _name, style)
self.keywords = keywords
self.case_sensitive = case_sensitive # ???
self.match_empty_string_at_beginning = match_empty_string_at_beginning
self.match_empty_string_at_end = match_empty_string_at_end
self.beginning_regex = beginning_regex
self.end_regex = end_regex
def format(self, indent, esc_ctx, line_esc_ctx):
string = indent*2 + '<context id="%s" style-ref="%s">\n' % (self.id, self.style)
if self.beginning_regex:
string += indent*3 + '<prefix>%s</prefix>\n' % (escape_regex(self.beginning_regex),)
elif not self.match_empty_string_at_beginning:
string += indent*3 + '<prefix></prefix>\n'
if self.end_regex:
string += indent*3 + '<suffix>%s</suffix>\n' % (escape_regex(self.end_regex),)
elif not self.match_empty_string_at_end:
string += indent*3 + '<suffix></suffix>\n'
for kw in self.keywords:
string += indent*3 + '<keyword>%s</keyword>\n' % (escape_regex(kw),)
string += self.format_escape(indent, esc_ctx, line_esc_ctx)
string += indent*2 + '</context>\n'
return string
class PatternItem(Context):
def __init__(self, name, _name, style, pattern):
Context.__init__(self, name, _name, style)
assert pattern
self.pattern = pattern
def format(self, indent, esc_ctx, line_esc_ctx):
string = indent*2 + '<context id="%s" style-ref="%s">\n' % (self.id, self.style)
string += indent*3 + '<match>%s</match>\n' % (escape_regex(self.pattern),)
string += self.format_escape(indent, esc_ctx, line_esc_ctx)
string += indent*2 + '</context>\n'
return string
class LineComment(Context):
def __init__(self, name, _name, style, start):
Context.__init__(self, name, _name, style)
assert start
self.start = start
self.is_container = True
def format(self, indent, esc_ctx, line_esc_ctx):
string = indent*2 + '<context id="%s" style-ref="%s" end-at-line-end="true">\n' % (self.id, self.style)
string += indent*3 + '<start>%s</start>\n' % (escape_regex(self.start),)
string += self.format_escape(indent, esc_ctx, line_esc_ctx)
string += indent*2 + '</context>\n'
return string
class BlockComment(Context):
def __init__(self, name, _name, style, start, end):
Context.__init__(self, name, _name, style)
assert start and end
self.start = start
self.end = end
self.is_container = True
def format(self, indent, esc_ctx, line_esc_ctx):
string = indent*2 + '<context id="%s" style-ref="%s">\n' % (self.id, self.style)
string += indent*3 + '<start>%s</start>\n' % (escape_regex(self.start),)
string += indent*3 + '<end>%s</end>\n' % (escape_regex(self.end),)
string += self.format_escape(indent, esc_ctx, line_esc_ctx)
string += indent*2 + '</context>\n'
return string
class String(Context):
def __init__(self, name, _name, style, start, end, end_at_line_end):
Context.__init__(self, name, _name, style)
assert start and end
self.start = start
if end and end.endswith("\\n"):
end = end[:-2]
end_at_line_end = True
self.end = end
self.end_at_line_end = end_at_line_end
self.is_container = True
def format(self, indent, esc_ctx, line_esc_ctx):
string = indent*2 + '<context id="%s" style-ref="%s"' % (self.id, self.style)
if self.end_at_line_end:
string += ' end-at-line-end="true"'
string += '>\n'
if self.start:
string += indent*3 + '<start>%s</start>\n' % (escape_regex(self.start),)
if self.end:
string += indent*3 + '<end>%s</end>\n' % (escape_regex(self.end),)
string += self.format_escape(indent, esc_ctx, line_esc_ctx)
string += indent*2 + '</context>\n'
return string
class SyntaxItem(Context):
def __init__(self, name, _name, style, start, end):
Context.__init__(self, name, _name, style)
assert start and end
self.start = start
self.end = end
self.end_at_line_end = False
if end and end.endswith("\\n"):
self.end = end[:-2]
self.end_at_line_end = True
self.is_container = True
def format(self, indent, esc_ctx, line_esc_ctx):
string = indent*2 + '<context id="%s" style-ref="%s"' % (self.id, self.style)
if self.end_at_line_end:
string += ' end-at-line-end="true"'
string += '>\n'
if self.start:
string += indent*3 + '<start>%s</start>\n' % (escape_regex(self.start),)
if self.end:
string += indent*3 + '<end>%s</end>\n' % (escape_regex(self.end),)
string += self.format_escape(indent, esc_ctx, line_esc_ctx)
string += indent*2 + '</context>\n'
return string
def first_child(node):
child = node.firstChild
while child is not None and child.nodeType != dom.Node.ELEMENT_NODE:
child = child.nextSibling
return child
def next_sibling(node):
next = node.nextSibling
while next is not None and next.nodeType != dom.Node.ELEMENT_NODE:
next = next.nextSibling
return next
def parseLineComment(cur, name, _name, style):
child = first_child(cur)
assert child is not None and child.tagName == "start-regex"
return LineComment(name, _name, style, child.firstChild.nodeValue)
def parseBlockComment(cur, name, _name, style):
start_regex = None
end_regex = None
child = first_child(cur)
while child is not None:
if child.tagName == "start-regex":
start_regex = child.firstChild.nodeValue
elif child.tagName == "end-regex":
end_regex = child.firstChild.nodeValue
child = next_sibling(child)
assert start_regex is not None
assert end_regex is not None
return BlockComment(name, _name, style, start_regex, end_regex)
def parseString(cur, name, _name, style):
start_regex = None
end_regex = None
end_at_line_end = True
prop = cur.getAttribute("end-at-line-end")
if prop:
if prop in ["TRUE", "1"]:
end_at_line_end = True
else:
end_at_line_end = False
child = first_child(cur)
while child is not None:
if child.tagName == "start-regex":
start_regex = child.firstChild.nodeValue
elif child.tagName == "end-regex":
end_regex = child.firstChild.nodeValue
child = next_sibling(child)
assert start_regex is not None
assert end_regex is not None
return String(name, _name, style, start_regex, end_regex, end_at_line_end)
def parseKeywordList(cur, name, _name, style):
case_sensitive = True
match_empty_string_at_beginning = True
match_empty_string_at_end = True
beginning_regex = None
end_regex = None
keywords = []
prop = cur.getAttribute("case-sensitive")
if prop:
if prop in ["TRUE", "1"]:
case_sensitive = True
else:
case_sensitive = False
prop = cur.getAttribute("match-empty-string-at-beginning")
if prop:
if prop in ["TRUE", "1"]:
match_empty_string_at_beginning = True
else:
match_empty_string_at_beginning = False
prop = cur.getAttribute("match-empty-string-at-end")
if prop:
if prop in ["TRUE", "1"]:
match_empty_string_at_end = True
else:
match_empty_string_at_end = False
prop = cur.getAttribute("beginning-regex")
if prop:
beginning_regex = prop
prop = cur.getAttribute("end-regex")
if prop:
end_regex = prop
child = first_child(cur)
while child is not None:
if child.tagName == "keyword":
keywords.append(child.firstChild.nodeValue)
child = next_sibling(child)
assert keywords
return KeywordList(name, _name, style, keywords, case_sensitive,
match_empty_string_at_beginning,
match_empty_string_at_end,
beginning_regex, end_regex)
def parsePatternItem(cur, name, _name, style):
child = first_child(cur)
assert child is not None and child.tagName == "regex"
return PatternItem(name, _name, style, child.firstChild.nodeValue)
def parseSyntaxItem(cur, name, _name, style):
start_regex = None
end_regex = None
child = first_child(cur)
while child is not None:
if child.tagName == "start-regex":
start_regex = child.firstChild.nodeValue
elif child.tagName == "end-regex":
end_regex = child.firstChild.nodeValue
child = next_sibling(child)
assert start_regex is not None
assert end_regex is not None
return SyntaxItem(name, _name, style, start_regex, end_regex)
def parseTag(cur):
_name = None
name = None
_name = cur.getAttribute("_name")
name = cur.getAttribute("name")
assert name or _name
style = cur.getAttribute("style") or "Normal"
if cur.tagName == "line-comment":
ctx = parseLineComment(cur, name, _name, style)
elif cur.tagName == "block-comment":
ctx = parseBlockComment(cur, name, _name, style)
elif cur.tagName == "string":
ctx = parseString(cur, name, _name, style)
elif cur.tagName == "keyword-list":
ctx = parseKeywordList(cur, name, _name, style)
elif cur.tagName == "pattern-item":
ctx = parsePatternItem(cur, name, _name, style)
elif cur.tagName == "syntax-item":
ctx = parseSyntaxItem(cur, name, _name, style)
else:
print "Unknown tag: %s" % (cur.tagName,)
ctx = None
return ctx
def parse_file(filename):
doc = dom.parse(filename)
node = doc.documentElement
contexts = []
esc_char = None
assert node.tagName == "language"
lang_file = LangFile(node.getAttribute("id"),
node.getAttribute("name"),
node.getAttribute("_name"),
node.getAttribute("section"),
node.getAttribute("_section"),
node.getAttribute("mimetypes"),
node.getAttribute("globs"),
filename)
node = first_child(node)
assert node is not None
while node is not None:
if node.tagName == "escape-char":
lang_file.set_esc_char(node.firstChild.nodeValue)
else:
lang_file.add_context(parseTag(node))
node = next_sibling(node)
return lang_file
if __name__ == '__main__':
import sys
if not sys.argv[1:]:
print "usage: %s LANG_FILE" % (sys.argv[0])
sys.exit(1)
lang_file = parse_file(sys.argv[1])
sys.stdout.write(lang_file.format())
| gpl-3.0 | 1,176,061,598,190,677,800 | 32.725338 | 152 | 0.558385 | false |
qedsoftware/commcare-hq | corehq/ex-submodules/casexml/apps/case/templatetags/case_tags.py | 1 | 17357 | from functools import partial
import copy
import datetime
import numbers
import pytz
import json
import types
from django import template
from django.core.urlresolvers import reverse
from django.template.loader import render_to_string
from django.utils.translation import ugettext as _
from django.utils.safestring import mark_safe
from django.utils.html import escape
from corehq.apps.products.models import SQLProduct
from couchdbkit import ResourceNotFound
from corehq.form_processor.interfaces.dbaccessors import LedgerAccessors
register = template.Library()
DYNAMIC_CASE_PROPERTIES_COLUMNS = 4
class CaseDisplayWrapper(object):
def __init__(self, case):
self.case = case
def actions(self):
actions = self.case.to_json()['actions']
return actions.reverse()
def to_full_dict(self):
"""
Include calculated properties that need to be available to the case
details display by overriding this method.
"""
json = self.case.to_json()
json['status'] = _('Closed') if self.case.closed else _('Open')
return json
def get_display_config(self):
return [
{
"layout": [
[
{
"expr": "name",
"name": _("Name"),
},
{
"expr": "opened_on",
"name": _("Opened On"),
"parse_date": True,
'is_phone_time': True,
},
{
"expr": "modified_on",
"name": _("Modified On"),
"parse_date": True,
"is_phone_time": True,
},
{
"expr": "closed_on",
"name": _("Closed On"),
"parse_date": True,
"is_phone_time": True,
},
],
[
{
"expr": "type",
"name": _("Case Type"),
"format": '<code>{0}</code>',
},
{
"expr": "user_id",
"name": _("Last Submitter"),
"process": 'doc_info',
},
{
"expr": "owner_id",
"name": _("Owner"),
"process": 'doc_info',
},
{
"expr": "_id",
"name": _("Case ID"),
},
],
],
}
]
def dynamic_properties(self):
# pop seen properties off of remaining case properties
dynamic_data = self.case.dynamic_case_properties()
# hack - as of commcare 2.0, external id is basically a dynamic property
# so also check and add it here
if self.case.external_id:
dynamic_data['external_id'] = self.case.external_id
return dynamic_data
@property
def related_cases_columns(self):
return [
{
'name': _('Status'),
'expr': "status"
},
{
'name': _('Case Type'),
'expr': "type",
},
{
'name': _('Date Opened'),
'expr': "opened_on",
'parse_date': True,
"is_phone_time": True,
},
{
'name': _('Date Modified'),
'expr': "modified_on",
'parse_date': True,
"is_phone_time": True,
}
]
@property
def related_type_info(self):
return None
class SupplyPointDisplayWrapper(CaseDisplayWrapper):
def to_full_dict(self):
from corehq.apps.locations.models import SQLLocation
data = super(SupplyPointDisplayWrapper, self).to_full_dict()
data.update({
'location_type': None,
'location_site_code': None,
'location_parent_name': None,
})
try:
location = SQLLocation.objects.get(location_id=self.case.location_id)
except (SQLLocation.DoesNotExist, AttributeError):
pass
else:
data['location_type'] = location.location_type_name
data['location_site_code'] = location.site_code
if location.parent:
data['location_parent_name'] = location.parent.name
return data
def get_display_config(self):
return [
{
"layout": [
[
{
"expr": "name",
"name": _("Name"),
},
{
"expr": "location_type",
"name": _("Type"),
},
{
"expr": "location_site_code",
"name": _("Code"),
},
#{
#"expr": "last_reported",
#"name": _("Last Reported"),
#},
],
[
{
"expr": "location_parent_name",
"name": _("Parent Location"),
},
{
"expr": "owner_id",
"name": _("Location"),
"process": "doc_info",
},
],
],
}
]
def get_wrapped_case(case):
from corehq.apps.commtrack import const
wrapper_class = {
const.SUPPLY_POINT_CASE_TYPE: SupplyPointDisplayWrapper,
}.get(case.type, CaseDisplayWrapper)
return wrapper_class(case)
def normalize_date(val):
# Can't use isinstance since datetime is a subclass of date.
if type(val) == datetime.date:
return datetime.datetime.combine(val, datetime.time.min)
return val
@register.simple_tag
def render_case(case, options):
"""
Uses options since Django 1.3 doesn't seem to support templatetag kwargs.
Change to kwargs when we're on a version of Django that does.
"""
from corehq.apps.hqwebapp.templatetags.proptable_tags import get_tables_as_rows, get_default_definition
wrapped_case = get_wrapped_case(case)
timezone = options.get('timezone', pytz.utc)
timezone = timezone.localize(datetime.datetime.utcnow()).tzinfo
_get_tables_as_rows = partial(get_tables_as_rows, timezone=timezone)
display = options.get('display') or wrapped_case.get_display_config()
show_transaction_export = options.get('show_transaction_export') or False
get_case_url = options['get_case_url']
data = copy.deepcopy(wrapped_case.to_full_dict())
default_properties = _get_tables_as_rows(data, display)
dynamic_data = wrapped_case.dynamic_properties()
for section in display:
for row in section['layout']:
for item in row:
dynamic_data.pop(item.get("expr"), None)
if dynamic_data:
dynamic_keys = sorted(dynamic_data.keys())
definition = get_default_definition(
dynamic_keys, num_columns=DYNAMIC_CASE_PROPERTIES_COLUMNS)
dynamic_properties = _get_tables_as_rows(dynamic_data, definition)
else:
dynamic_properties = None
the_time_is_now = datetime.datetime.utcnow()
tz_offset_ms = int(timezone.utcoffset(the_time_is_now).total_seconds()) * 1000
tz_abbrev = timezone.localize(the_time_is_now).tzname()
# ledgers
def _product_name(product_id):
try:
return SQLProduct.objects.get(product_id=product_id).name
except SQLProduct.DoesNotExist:
return (_('Unknown Product ("{}")').format(product_id))
ledger_map = LedgerAccessors(case.domain).get_case_ledger_state(case.case_id, ensure_form_id=True)
for section, product_map in ledger_map.items():
product_tuples = sorted(
(_product_name(product_id), product_map[product_id]) for product_id in product_map
)
ledger_map[section] = product_tuples
return render_to_string("case/partials/single_case.html", {
"default_properties": default_properties,
"default_properties_options": {
"style": "table"
},
"dynamic_properties": dynamic_properties,
"dynamic_properties_options": {
"style": "table"
},
"case": wrapped_case.case,
"case_actions": mark_safe(json.dumps(wrapped_case.actions())),
"timezone": timezone,
"tz_abbrev": tz_abbrev,
"case_hierarchy_options": {
"show_view_buttons": True,
"get_case_url": get_case_url,
"timezone": timezone
},
"ledgers": ledger_map,
"timezone_offset": tz_offset_ms,
"show_transaction_export": show_transaction_export,
"xform_api_url": reverse('single_case_forms', args=[case.domain, case.case_id]),
})
def get_inverse(val):
if isinstance(val, (datetime.datetime, datetime.date)):
return datetime.datetime.max - val
elif isinstance(val, numbers.Number):
return 10 ** 20
elif isinstance(val, (types.NoneType, bool)):
return not val
else:
raise Exception("%r has uninversable type: %s" % (val, type(val)))
def sortkey(child, type_info=None):
"""Return sortkey based on sort order defined in type_info, or use default
based on open/closed and opened_on/closed_on dates.
"""
type_info = type_info or {}
case = child['case']
if case.closed:
key = [1]
try:
for attr, direction in type_info[case.type]['closed_sortkeys']:
val = normalize_date(getattr(case, attr))
if direction.lower() == 'desc':
val = get_inverse(val)
key.append(val)
except KeyError:
key.append(datetime.datetime.max - case.closed_on)
else:
key = [0]
try:
for attr, direction in type_info[case.type]['open_sortkeys']:
val = normalize_date(getattr(case, attr))
if direction.lower() == 'desc':
val = get_inverse(val)
key.append(val)
except KeyError:
key.append(case.opened_on or datetime.datetime.min)
return key
def get_session_data(case, current_case, type_info):
# this logic should ideally be implemented in subclasses of
# CommCareCase
if type_info and case.type in type_info:
attr = type_info[case.type]['case_id_attr']
return {
attr: case.case_id,
'case_id': current_case.case_id
}
else:
return {
'case_id': case.case_id
}
TREETABLE_INDENT_PX = 19
def process_case_hierarchy(case_output, get_case_url, type_info):
current_case = case_output['case']
submit_url_root = reverse('receiver_post', args=[current_case.domain])
form_url_root = reverse('cloudcare_main', args=[current_case.domain, ''])
def process_output(case_output, depth=0):
for c in case_output['child_cases']:
process_output(c, depth=depth + 1)
case = case_output['case']
common_data = {
'indent_px': depth * TREETABLE_INDENT_PX,
'submit_url_root': submit_url_root,
'form_url_root': form_url_root,
'view_url': get_case_url(case.case_id),
'session_data': get_session_data(case, current_case, type_info)
}
data = type_info.get(case.type, {})
if 'description_property' in data:
data['description'] = getattr(
case, data['description_property'], None)
if 'edit_session_data' in data:
data['session_data'].update(data['edit_session_data'])
data.update(common_data)
case.edit_data = data
if 'child_type' in data and not case.closed:
child_type = data['child_type']
child_data = type_info.get(child_type, {})
child_data.update(common_data)
child_data.update({
"link_text": _("Add %(case_type)s") % {
'case_type': child_data.get('type_name', child_type)
},
"parent_node_id": case.case_id,
})
if 'create_session_data' in child_data:
child_data['session_data'].update(child_data['create_session_data'])
case.add_child_data = child_data
process_output(case_output)
def get_case_hierarchy(case, type_info):
def get_children(case, referenced_type=None, seen=None):
seen = seen or set()
ignore_types = type_info.get(case.type, {}).get("ignore_relationship_types", [])
if referenced_type and referenced_type in ignore_types:
return None
seen.add(case.case_id)
children = [
get_children(i.referenced_case, i.referenced_type, seen) for i in case.reverse_indices
if i.referenced_id not in seen
]
children = [c for c in children if c is not None]
# non-first-level descendants
descendant_types = []
for c in children:
descendant_types.extend(c['descendant_types'])
descendant_types = list(set(descendant_types))
children = sorted(children, key=partial(sortkey, type_info=type_info))
# set parent_case_id used by flat display
for c in children:
if not hasattr(c['case'], 'treetable_parent_node_id'):
c['case'].treetable_parent_node_id = case.case_id
child_cases = []
for c in children:
child_cases.extend(c['case_list'])
return {
'case': case,
'child_cases': children,
'descendant_types': list(set(descendant_types + [c['case'].type for c in children])),
'case_list': [case] + child_cases
}
return get_children(case)
def get_flat_descendant_case_list(case, get_case_url, type_info=None):
type_info = type_info or {}
hierarchy = get_case_hierarchy(case, type_info)
process_case_hierarchy(hierarchy, get_case_url, type_info)
return hierarchy['case_list']
@register.simple_tag
def render_case_hierarchy(case, options):
from corehq.apps.hqwebapp.templatetags.proptable_tags import get_display_data
wrapped_case = get_wrapped_case(case)
get_case_url = options.get('get_case_url')
timezone = options.get('timezone', pytz.utc)
columns = options.get('columns') or wrapped_case.related_cases_columns
show_view_buttons = options.get('show_view_buttons', True)
type_info = options.get('related_type_info', wrapped_case.related_type_info)
descendent_case_list = get_flat_descendant_case_list(
case, get_case_url, type_info=type_info
)
parent_cases = []
if case.indices:
# has parent case(s)
# todo: handle duplicates in ancestor path (bubbling up of parent-child
# relationships)
for idx in case.indices:
try:
parent_cases.append(idx.referenced_case)
except ResourceNotFound:
parent_cases.append(None)
for parent_case in parent_cases:
if parent_case:
parent_case.edit_data = {
'view_url': get_case_url(parent_case.case_id)
}
last_parent_id = parent_case.case_id
else:
last_parent_id = None
for c in descendent_case_list:
if not getattr(c, 'treetable_parent_node_id', None) and last_parent_id:
c.treetable_parent_node_id = last_parent_id
case_list = parent_cases + descendent_case_list
for c in case_list:
if not c:
continue
c.columns = []
case_dict = get_wrapped_case(c).to_full_dict()
for column in columns:
c.columns.append(get_display_data(
case_dict, column, timezone=timezone))
return render_to_string("case/partials/case_hierarchy.html", {
'current_case': case,
'domain': case.domain,
'case_list': case_list,
'columns': columns,
'num_columns': len(columns) + 1,
'show_view_buttons': show_view_buttons,
})
@register.simple_tag
def case_inline_display(case):
"""
Given a case id, make a best effort at displaying it.
"""
if case:
if case.opened_on:
ret = "%s (%s: %s)" % (case.name, _("Opened"), case.opened_on.date())
else:
ret = case.name
else:
ret = _("Empty Case")
return escape(ret)
| bsd-3-clause | 6,098,225,041,910,975,000 | 32.507722 | 107 | 0.520366 | false |
gsathya/bridgedb | lib/bridgedb/email/server.py | 1 | 18266 | # -*- coding: utf-8 ; test-case-name: bridgedb.test.test_email_server -*-
#_____________________________________________________________________________
#
# This file is part of BridgeDB, a Tor bridge distribution system.
#
# :authors: Nick Mathewson <[email protected]>
# Isis Lovecruft <[email protected]> 0xA3ADB67A2CDB8B35
# Matthew Finkel <[email protected]>
# please also see AUTHORS file
# :copyright: (c) 2007-2014, The Tor Project, Inc.
# (c) 2013-2014, Isis Lovecruft
# :license: see LICENSE for licensing information
#_____________________________________________________________________________
"""Servers which interface with clients and distribute bridges over SMTP."""
from __future__ import unicode_literals
import logging
import io
import socket
from twisted.internet import defer
from twisted.internet import reactor
from twisted.internet.error import CannotListenError
from twisted.internet.task import LoopingCall
from twisted.mail import smtp
from twisted.mail.smtp import rfc822date
from twisted.python import failure
from zope.interface import implements
from bridgedb import __version__
from bridgedb import safelog
from bridgedb.crypto import getGPGContext
from bridgedb.email import autoresponder
from bridgedb.email import templates
from bridgedb.email import request
from bridgedb.parse import addr
from bridgedb.parse.addr import UnsupportedDomain
from bridgedb.parse.addr import canonicalizeEmailDomain
class MailServerContext(object):
"""Helper object that holds information used by email subsystem.
:ivar str username: Reject any RCPT TO lines that aren't to this
user. See the ``EMAIL_USERNAME`` option in the config file.
(default: ``'bridges'``)
:ivar int maximumSize: Reject any incoming emails longer than
this size (in bytes). (default: 3084 bytes).
:ivar int smtpPort: The port to use for outgoing SMTP.
:ivar str smtpServer: The IP address to use for outgoing SMTP.
:ivar str smtpFromAddr: Use this address in the raw SMTP ``MAIL FROM``
line for outgoing mail. (default: ``[email protected]``)
:ivar str fromAddr: Use this address in the email :header:`From:`
line for outgoing mail. (default: ``[email protected]``)
:ivar int nBridges: The number of bridges to send for each email.
:ivar gpgContext: A ``gpgme.GpgmeContext`` (as created by
:func:`bridgedb.crypto.getGPGContext`), or None if we couldn't create
a proper GPGME context for some reason.
"""
def __init__(self, config, distributor, schedule):
"""Create a context for storing configs for email bridge distribution.
:type config: :class:`bridgedb.persistent.Conf`
:type distributor: :class:`bridgedb.Dist.EmailBasedDistributor`.
:param distributor: The distributor will handle getting the correct
bridges (or none) for a client for us.
:type schedule: :class:`bridgedb.schedule.ScheduledInterval`.
:param schedule: An interval-based scheduler, used to help the
:ivar:`distributor` know if we should give bridges to a client.
"""
self.config = config
self.distributor = distributor
self.schedule = schedule
self.maximumSize = smtp.SMTP.MAX_LENGTH
self.includeFingerprints = config.EMAIL_INCLUDE_FINGERPRINTS
self.nBridges = config.EMAIL_N_BRIDGES_PER_ANSWER
self.username = (config.EMAIL_USERNAME or "bridges")
self.hostname = socket.gethostname()
self.fromAddr = (config.EMAIL_FROM_ADDR or "[email protected]")
self.smtpFromAddr = (config.EMAIL_SMTP_FROM_ADDR or self.fromAddr)
self.smtpServerPort = (config.EMAIL_SMTP_PORT or 25)
self.smtpServerIP = (config.EMAIL_SMTP_HOST or "127.0.0.1")
self.domainRules = config.EMAIL_DOMAIN_RULES or {}
self.domainMap = config.EMAIL_DOMAIN_MAP or {}
self.canon = self.buildCanonicalDomainMap()
self.gpgContext = getGPGContext(config)
def buildCanonicalDomainMap(self):
"""Build a map for all email provider domains from which we will accept
emails to their canonical domain name.
.. note:: Be sure that ``MailServerContext.domainRules`` and
``MailServerContext.domainMap`` are set appropriately before calling
this method.
This method is automatically called during initialisation, and the
resulting domain map is stored as ``MailServerContext.canon``.
:rtype: dict
:returns: A dictionary which maps all domains and subdomains which we
accept emails from to their second-level, canonical domain names.
"""
canon = self.domainMap
for domain, rule in self.domainRules.items():
if domain not in canon.keys():
canon[domain] = domain
for domain in self.config.EMAIL_DOMAINS:
canon[domain] = domain
return canon
class SMTPMessage(object):
"""Plugs into the Twisted Mail and receives an incoming message.
:ivar list lines: A list of lines from an incoming email message.
:ivar int nBytes: The number of bytes received thus far.
:ivar bool ignoring: If ``True``, we're ignoring the rest of this message
because it exceeded :ivar:`MailServerContext.maximumSize`.
:ivar canonicalFromSMTP: See :meth:`SMTPAutoresponder.runChecks`.
:ivar canonicalFromEmail: See :meth:`SMTPAutoresponder.runChecks`.
:ivar canonicalDomainRules: See :meth:`SMTPAutoresponder.runChecks`.
:type message: :api:`twisted.mail.smtp.rfc822.Message` or ``None``
:ivar message: The incoming email message.
:type responder: :class:`autoresponder.SMTPAutoresponder`
:ivar responder: A parser and checker for the incoming :ivar:`message`. If
it decides to do so, it will build a
:meth:`~autoresponder.SMTPAutoresponder.reply` email and
:meth:`~autoresponder.SMTPAutoresponder.send` it.
"""
implements(smtp.IMessage)
def __init__(self, context, canonicalFromSMTP=None):
"""Create a new SMTPMessage.
These are created automatically via
:class:`SMTPIncomingDelivery`.
:param context: The configured :class:`MailServerContext`.
:type canonicalFromSMTP: str or None
:param canonicalFromSMTP: The canonical domain which this message was
received from. For example, if ``'gmail.com'`` is the configured
canonical domain for ``'googlemail.com'`` and a message is
received from the latter domain, then this would be set to the
former.
"""
self.context = context
self.canon = context.canon
self.canonicalFromSMTP = canonicalFromSMTP
self.canonicalFromEmail = None
self.canonicalDomainRules = None
self.lines = []
self.nBytes = 0
self.ignoring = False
self.message = None
self.responder = autoresponder.SMTPAutoresponder()
self.responder.incoming = self
def lineReceived(self, line):
"""Called when we get another line of an incoming message."""
self.nBytes += len(line)
if self.nBytes > self.context.maximumSize:
self.ignoring = True
else:
self.lines.append(line)
if not safelog.safe_logging:
logging.debug("> %s", line.rstrip("\r\n"))
def eomReceived(self):
"""Tell the :ivar:`responder` to reply when we receive an EOM."""
if not self.ignoring:
self.message = self.getIncomingMessage()
self.responder.reply()
return defer.succeed(None)
def connectionLost(self):
"""Called if we die partway through reading a message."""
pass
def getIncomingMessage(self):
"""Create and parse an :rfc:`2822` message object for all :ivar:`lines`
received thus far.
:rtype: :api:`twisted.mail.smtp.rfc822.Message`
:returns: A ``Message`` comprised of all lines received thus far.
"""
rawMessage = io.StringIO()
for line in self.lines:
rawMessage.writelines(unicode(line) + unicode('\n'))
rawMessage.seek(0)
return smtp.rfc822.Message(rawMessage)
class SMTPIncomingDelivery(smtp.SMTP):
"""Plugs into :class:`SMTPIncomingServerFactory` and handles SMTP commands
for incoming connections.
:type context: :class:`MailServerContext`
:ivar context: A context containing SMTP/Email configuration settings.
:ivar deferred: A :api:`deferred <twisted.internet.defer.Deferred>` which
will be returned when :meth:`reply` is called. Additional callbacks
may be set on this deferred in order to schedule additional actions
when the response is being sent.
:type fromCanonicalSMTP: str or ``None``
:ivar fromCanonicalSMTP: If set, this is the canonicalized domain name of
the address we received from incoming connection's ``MAIL FROM:``.
"""
implements(smtp.IMessageDelivery)
context = None
deferred = defer.Deferred()
fromCanonicalSMTP = None
@classmethod
def setContext(cls, context):
"""Set our :ivar:`context` to a new :class:`MailServerContext."""
cls.context = context
def receivedHeader(self, helo, origin, recipients):
"""Create the ``Received:`` header for an incoming email.
:type helo: tuple
:param helo: The lines received during SMTP client HELO.
:type origin: :api:`twisted.mail.smtp.Address`
:param origin: The email address of the sender.
:type recipients: list
:param recipients: A list of :api:`twisted.mail.smtp.User` instances.
"""
helo_ = ' helo={0}'.format(helo[0]) if helo[0] else ''
from_ = 'from %s ([%s]%s)' % (helo[0], helo[1], helo_)
by_ = 'by %s with BridgeDB (%s)' % (smtp.DNSNAME, __version__)
for_ = 'for %s; %s ' % (' '.join(map(str, recipients)), rfc822date())
return str('Received: %s\n\t%s\n\t%s' % (from_, by_, for_))
def validateFrom(self, helo, origin):
"""Validate the ``MAIL FROM:`` address on the incoming SMTP connection.
This is done at the SMTP layer. Meaning that if a Postfix or other
email server is proxying emails from the outside world to BridgeDB,
the :api:`origin.domain <twisted.email.smtp.Address.domain` will be
set to the local hostname. Therefore, if the SMTP ``MAIL FROM:``
domain name is our own hostname (as returned from
:func:`socket.gethostname`) or our own FQDN, allow the connection.
Otherwise, if the ``MAIL FROM:`` domain has a canonical domain in our
mapping (taken from :ivar:`context.canon <MailServerContext.canon>`, which
is taken in turn from the ``EMAIL_DOMAIN_MAP``), then our
:ivar:`fromCanonicalSMTP` is set to that domain.
:type helo: tuple
:param helo: The lines received during SMTP client HELO.
:type origin: :api:`twisted.mail.smtp.Address`
:param origin: The email address we received this message from.
:raises: :api:`twisted.mail.smtp.SMTPBadSender` if the
``origin.domain`` was neither our local hostname, nor one of the
canonical domains listed in :ivar:`context.canon`.
:rtype: :api:`twisted.mail.smtp.Address`
:returns: The ``origin``. We *must* return some non-``None`` data from
this method, or else Twisted will reply to the sender with a 503
error.
"""
try:
if ((origin.domain == self.context.hostname) or
(origin.domain == smtp.DNSNAME)):
self.fromCanonicalSMTP = origin.domain
else:
logging.debug("Canonicalizing client SMTP domain...")
canonical = canonicalizeEmailDomain(origin.domain,
self.context.canon)
logging.debug("Canonical SMTP domain: %r" % canonical)
self.fromCanonicalSMTP = canonical
except UnsupportedDomain as error:
logging.info(error)
raise smtp.SMTPBadSender(origin.domain)
except Exception as error:
logging.exception(error)
# This method **cannot** return None, or it'll cause a 503 error.
return origin
def validateTo(self, user):
"""Validate the SMTP ``RCPT TO:`` address for the incoming connection.
The local username and domain name to which this SMTP message is
addressed, after being stripped of any ``'+'`` aliases, **must** be
identical to those in the email address set our
``EMAIL_SMTP_FROM_ADDR`` configuration file option.
:type user: :api:`twisted.mail.smtp.User`
:param user: Information about the user this SMTP message was
addressed to.
:raises: A :api:`twisted.mail.smtp.SMTPBadRcpt` if any of the above
conditions weren't met.
:rtype: callable
:returns: A parameterless function which returns an instance of
:class:`SMTPMessage`.
"""
logging.debug("Validating SMTP 'RCPT TO:' email address...")
recipient = user.dest
ourAddress = smtp.Address(self.context.smtpFromAddr)
if not (ourAddress.domain in recipient.domain):
logging.debug(("Not our domain (%s) or subdomain, skipping"
" SMTP 'RCPT TO' address: %s")
% (ourAddress.domain, str(recipient)))
raise smtp.SMTPBadRcpt(str(recipient))
# The recipient's username should at least start with ours,
# but it still might be a '+' address.
if not recipient.local.startswith(ourAddress.local):
logging.debug(("Username doesn't begin with ours, skipping"
" SMTP 'RCPT TO' address: %s") % str(recipient))
raise smtp.SMTPBadRcpt(str(recipient))
# Ignore everything after the first '+', if there is one.
beforePlus = recipient.local.split('+', 1)[0]
if beforePlus != ourAddress.local:
raise smtp.SMTPBadRcpt(str(recipient))
return lambda: SMTPMessage(self.context, self.fromCanonicalSMTP)
class SMTPIncomingDeliveryFactory(object):
"""Factory for :class:`SMTPIncomingDelivery`s.
This class is used to distinguish between different messages delivered
over the same connection. This can be used to optimize delivery of a
single message to multiple recipients, something which cannot be done by
:api:`IMessageDelivery <twisted.mail.smtp.IMessageDelivery>` implementors
due to their lack of information.
:ivar context: A :class:`MailServerContext` for storing configuration settings.
:ivar delivery: A :class:`SMTPIncomingDelivery` to deliver incoming
SMTP messages to.
"""
implements(smtp.IMessageDeliveryFactory)
context = None
delivery = SMTPIncomingDelivery
def __init__(self):
logging.debug("%s created." % self.__class__.__name__)
@classmethod
def setContext(cls, context):
"""Set our :ivar:`context` and the context for our :ivar:`delivery`."""
cls.context = context
cls.delivery.setContext(cls.context)
def getMessageDelivery(self):
"""Get a new :class:`SMTPIncomingDelivery` instance."""
return self.delivery()
class SMTPIncomingServerFactory(smtp.SMTPFactory):
"""Plugs into :api:`twisted.mail.smtp.SMTPFactory`; creates a new
:class:`SMTPMessageDelivery`, which handles response email automation,
whenever we get a incoming connection on the SMTP port.
.. warning:: My :ivar:`context` isn't an OpenSSL context, as is used for
the :api:`twisted.mail.smtp.ESMTPSender`
:ivar context: A :class:`MailServerContext` for storing configuration settings.
:ivar deliveryFactory: A :class:`SMTPIncomingDeliveryFactory` for
producing :class:`SMTPIncomingDelivery`s.
:ivar domain: :api:`Our FQDN <twisted.mail.smtp.DNSNAME>`.
:ivar int timeout: The number of seconds to wait, after the last chunk of
data was received, before raising a
:api:`SMTPTimeoutError <twisted.mail.smtp.SMTPTimeoutError>` for an
incoming connection.
:ivar protocol: :api:`SMTP <twisted.mail.smtp.SMTP>`
"""
context = None
deliveryFactory = SMTPIncomingDeliveryFactory
def __init__(self, **kwargs):
smtp.SMTPFactory.__init__(self, **kwargs)
self.deliveryFactory = self.deliveryFactory()
@classmethod
def setContext(cls, context):
"""Set :ivar:`context` and :ivar:`deliveryFactory`.context."""
cls.context = context
cls.deliveryFactory.setContext(cls.context)
def buildProtocol(self, addr):
p = smtp.SMTPFactory.buildProtocol(self, addr)
self.deliveryFactory.transport = p.transport # XXX is this set yet?
p.factory = self
p.deliveryFactory = self.deliveryFactory
return p
def addServer(config, distributor, schedule):
"""Set up a SMTP server for responding to requests for bridges.
:type config: :class:`bridgedb.persistent.Conf`
:param config: A configuration object.
:type distributor: :class:`bridgedb.Dist.EmailBasedDistributor`
:param dist: A distributor which will handle database interactions, and
will decide which bridges to give to who and when.
:type schedule: :class:`bridgedb.schedule.ScheduledInterval`
:param schedule: The schedule. XXX: Is this even used?
"""
context = MailServerContext(config, distributor, schedule)
factory = SMTPIncomingServerFactory()
factory.setContext(context)
addr = config.EMAIL_BIND_IP or ""
port = config.EMAIL_PORT or 6725
try:
reactor.listenTCP(port, factory, interface=addr)
except CannotListenError as error: # pragma: no cover
logging.fatal(error)
raise SystemExit(error.message)
# Set up a LoopingCall to run every 30 minutes and forget old email times.
lc = LoopingCall(distributor.cleanDatabase)
lc.start(1800, now=False)
return factory
| bsd-3-clause | 4,093,551,767,474,609,700 | 41.282407 | 83 | 0.658217 | false |
mardiros/Caliop-PoC | caliop/caliop/views/api/config.py | 1 | 2025 | #-*- coding: utf-8 -*-
from __future__ import unicode_literals
from pyramid_jinja2 import renderer_factory
def includeme(config):
"""
Serve a static JSON based REST API.
"""
config.add_route('sessions', '/api/mock/sessions')
config.add_view('caliop.views.api.Sessions',
request_method=('POST', 'DELETE'),
route_name='sessions',
renderer='json')
config.add_route('threads', '/api/mock/threads')
config.add_view('caliop.views.api.Threads',
request_method=('GET', 'POST',),
route_name='threads',
renderer='json')
config.add_route('tagsToThreads', '/api/mock/threads/_tags')
config.add_view('caliop.views.api.TagsToThreads',
request_method=('PUT',),
route_name='tagsToThreads',
renderer='json')
config.add_route('thread', '/api/mock/threads/{thread_id}')
config.add_view('caliop.views.api.Thread',
request_method=('GET', 'PUT',),
route_name='thread',
renderer='json')
config.add_route('messages', '/api/mock/threads/{thread_id}/messages')
config.add_view('caliop.views.api.Messages',
request_method=('GET', 'POST',),
route_name='messages',
renderer='json')
config.add_route('users', '/api/mock/users')
config.add_view('caliop.views.api.Users',
request_method=('GET', 'POST',),
route_name='users',
renderer='json')
config.add_route('tags', '/api/mock/tags')
config.add_view('caliop.views.api.Tags',
request_method=('GET',),
route_name='tags',
renderer='json')
config.add_route('tagById', '/api/mock/tags/by_id/{tag_id}')
config.add_view('caliop.views.api.TagById',
request_method=('GET',),
route_name='tagById',
renderer='json')
config.add_route('tagByLabel', '/api/mock/tags/by_label/{tag_label}')
config.add_view('caliop.views.api.TagByLabel',
request_method=('GET',),
route_name='tagByLabel',
renderer='json')
| gpl-3.0 | 5,494,525,005,514,033,000 | 28.347826 | 74 | 0.602963 | false |
xlcteam/scoreBoard | scoreboard/urls.py | 1 | 1879 | from django.conf.urls import patterns, include, url
from django.views.generic import TemplateView, RedirectView
urlpatterns = patterns('scorebrd.views',
url(r'^$', 'index', name='index'),
# url(r'^login/', 'my_login', name='login'),
# url(r'^logout/', 'my_logout', name='logout'),
url(r'^results/live', 'results_live'),
url(r'^events/?$', 'events'),
url(r'^event/(?P<event_id>\d+)/?$', 'event', name="event"),
url(r'^event/new/?$', 'new_event', name='new_event'),
url(r'^competitions/?$', 'competitions'),
url(r'^competition/(?P<competition_id>\d+)/?$', 'competition', name="competition"),
url(r'^competition/new/?$', 'new_competition', name="new_competition"),
url(r'^groups/?$', 'groups'),
url(r'^group/(?P<group_id>\d+)/?$', 'group', name="group"),
url(r'^group/new/?$', 'new_group', name='new_group'),
url(r'^teams/?$', 'teams'),
url(r'^team/(?P<team_id>\d+)/?$', 'team', name="team"),
url(r'^team/new/?$', 'new_team', name='new_team'),
url(r'^matches/generate/?$', 'matches_generate_listing'),
url(r'^matches/generate/(?P<group_id>\d+)/?$', 'matches_generate'),
url(r'^match/play/(?P<match_id>\d+)/?$', 'match_play',
name='match_play'),
url(r'^match/save/(?P<match_id>\d+)/?$', 'match_save',
name='match_save'),
url(r'^results/?$', 'results'),
url(r'^results/live/?$', 'results_live', name="results_live"),
url(r'^results/group/(?P<group_id>\d+)/?$', 'results_group_view'),
url(r'^results/group/(?P<group_id>\d+)\.pdf/?$', 'results_group_pdf'),
url(r'^results/competition/(?P<competition_id>\d+)\.pdf/?$', 'results_competition_pdf'),
url(r'^results/event/(?P<event_id>\d+)\.pdf/?$', 'results_event_pdf'),
url(r'^results/team/(?P<team_id>\d+)/?$', 'results_team_view'),
url(r'^results/match/(?P<match_id>\d+)/?$', 'results_match_view'),
)
| bsd-3-clause | -1,357,999,164,565,140,200 | 43.738095 | 92 | 0.577967 | false |
ianmtaylor1/MCL | mcl/sparse.py | 1 | 8987 | """mcl/sparse.py
Code for performing the Markov Cluster Algorithm using scipy.sparse.spmatrix as
the underlying matrix. (AKA "sparse" matrices)"""
import numpy
import scipy.sparse as sparse
from datetime import datetime
_DEFAULT_ROW_THRESH=1E-14
_DEFAULT_CHECK_ITERATIONS=50
def _delta(A,B):
"""Computes the difference between two matrices"""
#return numpy.sqrt((A-B).power(2).sum())
return abs(A-B).sum()
def _maxdiff(A,B):
"""Computes the maximum difference of corresponding elements in A and B."""
return abs(A-B).max()
def _create_clusters(M,logger):
"""Interprets the idempotent matrix at the end of the MCL process
to produce the actual node clusters it represents."""
n_nodes = M.shape[0]
zero_thresh = 1/(n_nodes+0.5) #Stable values are all zero or of the form 1/n, 1 <= n <= n_nodes
# Node degrees: how many attractors each node is connected to
node_degrees = list(map((lambda x: int(x+0.5)),1/M.max(axis=1).toarray().flatten()))
# Get attractors
attractors = [x for x in range(n_nodes) if M[x,x] > zero_thresh]
attractor_degrees = [node_degrees[a] for a in attractors]
if logger is not None:
time = datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')
logger('{tm} Found {a} attractors'.format(tm=time,a=len(attractors)))
# Combine attractors into base of clusters. If there is a non-zero entry
# for an arc from one attractor to another, they're in the same cluster.
# Attractors can only be in clusters with attractors of the same degree (because
# all attractors in a cluster are connected to all other attractors in that
# cluster). Use this to boost performance.
clusters = [[a] for a,d in zip(attractors,attractor_degrees) if d==1]
partial_clusters = {d:[] for d in range(2,max(attractor_degrees)+1)}
for att,deg in zip(attractors,attractor_degrees):
if deg > 1: #We've already done degree 1
for i,clu in enumerate(partial_clusters[deg]):
if M[att,clu[0]] > zero_thresh:
clu.append(att)
if len(clu) == deg: # Check the cluster for completeness
clusters.append(clu)
partial_clusters[deg].pop(i)
break
else: # for -> else
# Because we're only looking at deg > 1, this never creates a
# "complete" cluster. We don't have to check the length.
partial_clusters[deg].append([att])
if logger is not None:
time = datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')
logger('{tm} Formed {c} clusters'.format(tm=time,c=len(clusters)))
# Now do the rest of them. Each remaining node mathematically guaranteed
# to go to at least one attractor. If there's a non-zero entry for an arc
# from a node to an attractor, then that node is in that attractor's cluster.
# SPARSE SPECIFIC: take advantage of the sparse matrix implementation by slicing
# an attractor's column and iterating only over all nonzero() rows.
M_csc_temp = sparse.csc_matrix(M)
for c in clusters:
possibles = M_csc_temp.getcol(c[0]).nonzero()[0]
c += [x for x in possibles if (M[x,x] <= zero_thresh) and (M[x,c[0]] > zero_thresh)]
return clusters
def _expand(M,e):
"""Expand the matrix by raising it to the e^th power"""
return M**e
def _inflate(M,r):
"""Inflate the matrix by raising each entry to the r^th power"""
return M.power(r)
def _make_stochastic(M):
"""Transforms a matrix into a row-stochastic matrix by dividing
each row by its sum."""
return sparse.diags(1/M.sum(axis=1).A1)*M
def _MCL_step(M,e,r):
"""Performs one iteration of the MCL algorithm.
M is assumed to be a square, row-stochastic matrix with float64
data type.
"""
temp = _make_stochastic(_inflate(_expand(M,e),r))
temp.eliminate_zeros()
return temp
def MCL(M,E,R,loop_weight=1,labels=None,thresh=None,logger=None):
"""Does the entire MCL process and returns clusters as a list of lists.
Parameters:
M - square (weighted) adjacency matrix for graph (type numpy.matrix or scipy.sparse.spmatrix)
E - iterable of parameters, e, to use in successive "expand" steps
R - iterable of parameters, r, to use in successive "inflate" steps
Optional:
loop_weight - Weight given to the loop edges that are added to each node
before MCL begins
labels - optional list of node labels. Column/row i of 'M' is labeled by
entry i of 'labels'. Affects how clusters are returned.
thresh - threshold for changes in successive steps. When the change is
below 'thresh', then the process stops.
logger - optional callable for logging steps of the process. Strings are
passed to this function detailing the status of the process.
Returns: a list of lists representing clusters in the input graph. If
'labels' is None, the elements will be indices of the matrix. If
'labels' is supplied, the elements will be the appropriate labels.
"""
# Check to see that inputs are valid
if M.shape[0] != M.shape[1]:
raise Exception('Matrix must be a square')
if (labels is not None) and (len(labels) != M.shape[0]):
raise Exception('Must be exactly one label per matrix column/row')
n_nodes = M.shape[0]
if thresh is None:
thresh = _DEFAULT_ROW_THRESH*n_nodes
if logger is not None:
iter = 0
time = datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')
logger('{tm} Start. nodes={n}, L1Thresh={thr:.1e}'.format(tm=time,n=n_nodes,thr=thresh))
# Set up the matrix (Use CSR format
T = _make_stochastic(
sparse.csr_matrix(M)
+loop_weight*sparse.identity(n_nodes,dtype='float_',format='csr')
)
# Loop through the algorithm with the supplied parameters until equilibrium
check_deltas = [thresh+1]*_DEFAULT_CHECK_ITERATIONS
for e,r in zip(E,R):
T2 = _MCL_step(T,e,r)
if logger is not None:
iter += 1
time = datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')
l1delta = _delta(T,T2)
maxdiff = _maxdiff(T,T2)
logger('{tm} Iteration {i}. L1Delta={delta:.1e}, MaxDiff={md:.1e}'.format(tm=time,i=iter,delta=l1delta,md=maxdiff))
check_deltas.insert(0,_delta(T,T2))
check_deltas.pop()
if max(check_deltas) < thresh:
T = T2
break
T = T2
else:
raise Exception('Not enough iterations performed to reach equilibrium')
# Interpret the results to form the clusters
clusters = _create_clusters(T,logger)
if labels is not None:
return [[labels[x] for x in c] for c in clusters]
else:
return clusters
def param_iter(start=[],tail=None):
"""A generator that first goes through each item in 'start', then
repeats 'tail' indefinitely."""
for x in start:
yield x
while tail is not None:
yield tail
def create_matrix(pairs,weights=None,labels=None,directed=False):
"""Creates a sparse adjacency matrix based on the values provided, for use in
the MCL process.
Parameters:
pairs - a list of 2-tuples for the edges.
weights - optional. List of weight values for the edges.
labels - optional. A list of all the nodes in graph this matrix represents.
If None, values in I and J are assumed to be 0-based indices. If labels
is provided, values in I and J should be listed in labels.
directed - optional. If False (default) mirrored edges will be created for every
edged provided. If True, only the exact edges specified will be created."""
if (weights is not None) and (len(pairs)!=len(weights)):
raise Exception('weights must be the same length as pairs')
# For every valid index k, an edge will be drawn from pairs[k][0] to pairs[k][1] with
# weight weights[k] (1, if no weights are provided). If directed==False, a
# corresponding edge will be created from pairs[k][1] to pairs[k][0].
if weights is None:
weights = [1.0]*len(pairs)
if labels is None:
matsize = max(max(a,b) for a,b in pairs)+1
row_idx = [i for i,_ in pairs]
col_idx = [j for _,j in pairs]
else:
matsize = len(labels)
label_dict = {x:i for i,x in enumerate(labels)}
try:
row_idx = [label_dict[a] for a,_ in pairs]
col_idx = [label_dict[b] for _,b in pairs]
except KeyError:
raise Exception('All values in pairs must be present in labels')
adj_mat = sparse.csr_matrix((weights,(row_idx,col_idx)),shape=(matsize,matsize))
if directed==False:
# Need to duplicate the edges. Add the matrix to its transpose and
# subtract the original diagonal.
orig_diag = sparse.diags(adj_mat.diagonal())
adj_mat += adj_mat.transpose() - orig_diag
return adj_mat | lgpl-3.0 | 5,609,223,975,875,018,000 | 45.092308 | 127 | 0.640036 | false |
digitalocean/netbox | netbox/extras/migrations/0051_migrate_customfields.py | 1 | 3951 | from django.db import migrations
from extras.choices import CustomFieldTypeChoices
def deserialize_value(field, value):
"""
Convert serialized values to JSON equivalents.
"""
if field.type in (CustomFieldTypeChoices.TYPE_INTEGER):
return int(value)
if field.type == CustomFieldTypeChoices.TYPE_BOOLEAN:
return bool(int(value))
if field.type == CustomFieldTypeChoices.TYPE_SELECT:
return field._choices.get(pk=int(value)).value
return value
def migrate_customfield_defaults(apps, schema_editor):
"""
Copy old serialized defaults to native JSON types.
"""
CustomField = apps.get_model('extras', 'CustomField')
for customfield in CustomField.objects.exclude(default=''):
try:
if customfield.type == CustomFieldTypeChoices.TYPE_INTEGER:
value = int(customfield.default)
elif customfield.type == CustomFieldTypeChoices.TYPE_BOOLEAN:
value = customfield.default in ['true', 'yes', '1']
else:
value = customfield.default
except ValueError:
raise ValueError(
f'Invalid default value "{customfield.default}" found for {customfield.type} '
f'custom field {customfield.name}'
)
CustomField.objects.filter(pk=customfield.pk).update(default2=value)
def migrate_customfieldchoices(apps, schema_editor):
"""
Collect all CustomFieldChoices for each applicable CustomField, and save them locally as an array on
the CustomField instance.
"""
CustomField = apps.get_model('extras', 'CustomField')
CustomFieldChoice = apps.get_model('extras', 'CustomFieldChoice')
for cf in CustomField.objects.filter(type='select'):
cf.choices = [
cfc.value for cfc in CustomFieldChoice.objects.filter(field=cf).order_by('weight', 'value')
]
cf.save()
def migrate_customfieldvalues(apps, schema_editor):
"""
Copy data from CustomFieldValues into the custom_field_data JSON field on each model instance.
"""
CustomFieldValue = apps.get_model('extras', 'CustomFieldValue')
for cfv in CustomFieldValue.objects.prefetch_related('field').exclude(serialized_value=''):
model = apps.get_model(cfv.obj_type.app_label, cfv.obj_type.model)
# Read and update custom field value for each instance
# TODO: This can be done more efficiently once .update() is supported for JSON fields
cf_data = model.objects.filter(pk=cfv.obj_id).values('custom_field_data').first()
try:
cf_data['custom_field_data'][cfv.field.name] = deserialize_value(cfv.field, cfv.serialized_value)
except Exception as e:
print(f'{cfv.field.name} ({cfv.field.type}): {cfv.serialized_value} ({cfv.pk})')
raise e
model.objects.filter(pk=cfv.obj_id).update(**cf_data)
def fix_filter_logic_values(apps, schema_editor):
"""
Fix invalid values for CustomField.filter_logic (see #5376)
"""
CustomField = apps.get_model('extras', 'CustomField')
CustomField.objects.filter(filter_logic="integer").update(filter_logic="loose")
class Migration(migrations.Migration):
dependencies = [
('circuits', '0020_custom_field_data'),
('dcim', '0117_custom_field_data'),
('extras', '0050_customfield_changes'),
('ipam', '0038_custom_field_data'),
('secrets', '0010_custom_field_data'),
('tenancy', '0010_custom_field_data'),
('virtualization', '0018_custom_field_data'),
]
operations = [
migrations.RunPython(
code=migrate_customfield_defaults
),
migrations.RunPython(
code=migrate_customfieldchoices
),
migrations.RunPython(
code=migrate_customfieldvalues
),
migrations.RunPython(
code=fix_filter_logic_values
),
]
| apache-2.0 | 1,010,862,674,792,506,200 | 34.918182 | 109 | 0.644394 | false |
ZTH1970/alcide | alcide/utils.py | 1 | 2580 | from django.contrib.auth.models import Group
from django.conf import settings
from datetime import timedelta, datetime
from .middleware.request import get_request
__EPOCH = datetime(day=5,month=1,year=1970)
def __date_to_datetime(date):
return datetime(date.year, date.month, date.day)
def weeks_since_epoch(date):
days_since_epoch = (__date_to_datetime(date) - __EPOCH).days
return days_since_epoch // 7
def weekday_ranks(date):
'''Returns n so that if date occurs on a certain weekday, this is the
n-th weekday of the month counting from the first. Also return -n-1 if
this is the n-th weekday of the month counting from the last.
'''
n = 0
month = date.month
i = date - timedelta(days=7)
while i.month == month:
n += 1
i = i - timedelta(days=7)
m = -1
i = date + timedelta(days=7)
while i.month == month:
m -= 1
i = i + timedelta(days=7)
return n, m
def is_super_user(user):
if not user or not user.is_authenticated():
return False
if user.is_superuser:
return True
super_group = None
try:
super_group = Group.objects.get(name='Super utilisateurs')
except:
return False
if super_group in user.groups.all():
return True
return False
def is_validator(user):
if is_super_user(user):
return True
if not user or not user.is_authenticated():
return False
validator_group = None
try:
validator_group = Group.objects.get(name='Administratifs')
except:
return False
if validator_group in user.groups.all():
return True
return False
def get_nir_control_key(nir):
try:
# Corse dpt 2A et 2B
minus = 0
if nir[6] in ('A', 'a'):
nir = [c for c in nir]
nir[6] = '0'
nir = ''.join(nir)
minus = 1000000
elif nir[6] in ('B', 'b'):
nir = [c for c in nir]
nir[6] = '0'
nir = ''.join(nir)
minus = 2000000
nir = int(nir) - minus
return (97 - (nir % 97))
except:
return None
def get_service_setting(setting_name, default_value=None):
from .cbv import HOME_SERVICE_COOKIE
request = get_request()
if not request:
return None
service = request.COOKIES.get(HOME_SERVICE_COOKIE)
if not service:
return None
if not hasattr(settings, 'SERVICE_SETTINGS'):
return None
return settings.SERVICE_SETTINGS.get(service, {}).get(setting_name) or default_value
| agpl-3.0 | 1,086,971,403,495,948,300 | 27.043478 | 88 | 0.597287 | false |
charanpald/wallhack | wallhack/clusterexp/ToyExample.py | 1 | 1353 |
"""
This is just a toy example on a made up graph which can be visualised.
"""
from apgl.graph import *
from exp.sandbox.IterativeSpectralClustering import *
from exp.sandbox.GraphIterators import *
numVertices = 14
numFeatures = 0
vList = VertexList(numVertices, numFeatures)
graph = SparseGraph(vList)
graph.addEdge(0, 1)
graph.addEdge(0, 2)
graph.addEdge(1, 2)
graph.addEdge(3, 4)
graph.addEdge(3, 5)
graph.addEdge(4, 5)
graph.addEdge(6, 7)
graph.addEdge(6, 8)
graph.addEdge(7, 7)
graph.addEdge(1, 4)
graph.addEdge(1, 6)
graph.addEdge(4, 9)
graph.addEdge(5, 9)
graph.addEdge(9, 10)
graph.addEdge(5, 10)
graph.addEdge(11, 0)
graph.addEdge(11, 1)
graph.addEdge(11, 2)
graph.addEdge(7, 12)
graph.addEdge(8, 12)
graph.addEdge(12, 13)
subgraphIndicesList = []
subgraphIndicesList.append(range(9))
subgraphIndicesList.append(range(11))
subgraphIndicesList.append(range(14))
k1 = 3
k2 = 5
clusterer = IterativeSpectralClustering(k1, k2)
#Test full computation of eigenvectors
graphIterator = IncreasingSubgraphListIterator(graph, subgraphIndicesList)
clustersList = clusterer.clusterFromIterator(graphIterator, True)
#clustersList = clusterer.cluster(graph, subgraphIndicesList, True)
print(clustersList)
#Seems to work fine and same in exact case, but not very interesting
#End clustering: array([1, 1, 1, 0, 0, 0, 2, 2, 2, 0, 0, 1, 2, 2])]
| gpl-3.0 | -4,704,334,724,956,656,000 | 22.327586 | 74 | 0.751663 | false |
mlperf/training_results_v0.7 | Google/benchmarks/dlrm/implementations/dlrm-research-TF-tpu-v4-512/dataloader.py | 1 | 9756 | # Lint as: python3
# Copyright 2020 Google. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Data loader for pre-processed Criteo data."""
import tensorflow.compat.v1 as tf
from REDACTED.dlrm import feature_config as fc
def rand_features(batch_size):
"""Emits random input features, used for testing."""
features = {}
pos_size = batch_size // 2
neg_size = batch_size - pos_size
features[fc.LABEL_FEATURE] = tf.concat([
tf.ones([pos_size, 1], dtype=tf.float32),
tf.zeros([neg_size, 1], dtype=tf.float32)
],
axis=0)
features["int-features"] = tf.random.uniform(
shape=(batch_size, len(fc.INT_FEATURES)), maxval=fc.FAKE_DATA_INT_MAX)
features["cat-features"] = tf.random.uniform(
shape=(batch_size, len(fc.CATEGORICAL_FEATURES)),
maxval=fc.FAKE_DATA_VOCAB_SIZE,
dtype=tf.int32)
return features
class CriteoTFRecordReader(object):
"""Input reader fn for TFRecords that have been serialized in batched form."""
def __init__(self,
file_path=None,
feature_config=None,
is_training=True,
use_cached_data=False,
use_synthetic_data=False,
params=None):
self._file_path = file_path
self._feature_config = feature_config
self._is_training = is_training
self._use_cached_data = use_cached_data
self._use_synthetic_data = use_synthetic_data
self._params = params
def __call__(self, params):
batch_size = params["batch_size"]
if self._use_synthetic_data:
ds = tf.data.Dataset.from_tensor_slices(rand_features(batch_size))
ds = ds.batch(batch_size, drop_remainder=True)
ds = ds.take(1).cache().repeat()
return ds
def _get_feature_spec():
feature_spec = {}
feature_spec[fc.LABEL_FEATURE] = tf.FixedLenFeature([
batch_size,
],
dtype=tf.float32)
for int_ft in fc.INT_FEATURES:
feature_spec[int_ft] = tf.FixedLenFeature([
batch_size,
],
dtype=tf.float32)
for cat_ft in fc.CATEGORICAL_FEATURES:
feature_spec[cat_ft] = tf.FixedLenFeature([], dtype=tf.string)
return feature_spec
def _parse_fn(serialized_example):
feature_spec = _get_feature_spec()
p_features = tf.parse_single_example(serialized_example, feature_spec)
features = {}
features[fc.LABEL_FEATURE] = tf.reshape(p_features[fc.LABEL_FEATURE],
(batch_size, 1))
int_features = []
for int_ft in fc.INT_FEATURES:
cur_feature = tf.reshape(p_features[int_ft], (batch_size, 1))
int_features.append(cur_feature)
features["int-features"] = tf.concat(int_features, axis=-1)
cat_features = []
tc_features = []
tbl_idxs_sorted = self._feature_config.get_table_idx_orderd_by_size()
for idx in range(len(fc.CATEGORICAL_FEATURES)):
# Add features from largest-vocab to smallest-vocab.
raw_tbl_idx = tbl_idxs_sorted[idx]
cat_feature_idx = raw_tbl_idx + 14
cat_feature = "categorical-feature-%d" % cat_feature_idx
# Decode from bytes to int32.
cat_ft_int32 = tf.io.decode_raw(p_features[cat_feature], tf.int32)
cat_ft_int32 = tf.reshape(cat_ft_int32, (batch_size, 1))
if idx < self._feature_config.get_num_tables_in_ec():
cat_features.append(cat_ft_int32)
else:
tc_features.append(cat_ft_int32)
features["cat-features"] = tf.concat(cat_features, axis=-1)
if tc_features:
features["tc-features"] = tf.concat(tc_features, axis=-1)
return features
filenames = tf.data.Dataset.list_files(self._file_path, shuffle=False)
filenames = filenames.shard(params["dataset_num_shards"],
params["dataset_index"])
# TODO(b/159039542): Temporary work-around to solve out-of-data crashing.
if self._is_training:
filenames = filenames.repeat()
ds = tf.data.TFRecordDataset(
filenames, buffer_size=64 * 1024 * 1024, num_parallel_reads=8)
ds = ds.map(_parse_fn, num_parallel_calls=8)
if not self._is_training:
num_dataset_samples = self._params["eval_steps"] * (
self._params["eval_batch_size"] // params["dataset_num_shards"])
num_dataset_batches = num_dataset_samples // batch_size
def _mark_as_padding(features):
"""Padding will be denoted with a label value of -1."""
features[fc.LABEL_FEATURE] = -1 * tf.ones(
(batch_size, 1), dtype=tf.float32)
return features
# 100 steps worth of padding.
padding_ds = ds.take(self._params["replicas_per_host"])
padding_ds = padding_ds.map(_mark_as_padding).repeat(100)
ds = ds.concatenate(padding_ds).take(num_dataset_batches)
ds = ds.prefetch(tf.data.experimental.AUTOTUNE)
if self._use_cached_data:
ds = ds.take(100).cache().repeat()
return ds
class CriteoTsvReader(object):
"""Input reader fn for pre-processed Criteo data.
Raw Criteo data is assumed to be preprocessed in the following way:
1. Missing values are replaced with zeros.
2. Negative values are replaced with zeros.
3. Integer features are transformed by log(x+1) and are hence tf.float32.
4. Categorical data is bucketized and are hence tf.int32.
"""
def __init__(self,
file_path=None,
feature_config=None,
is_training=True,
distributed_eval=False,
parallelism=1,
use_cached_data=False,
use_synthetic_data=False):
self._file_path = file_path
self._feature_config = feature_config
self._is_training = is_training
self._distributed_eval = distributed_eval
self._parallelism = parallelism
self._use_cached_data = use_cached_data
self._use_synthetic_data = use_synthetic_data
def __call__(self, params):
batch_size = params["batch_size"]
if self._use_synthetic_data:
ds = tf.data.Dataset.from_tensor_slices(rand_features(batch_size))
ds = ds.batch(batch_size, drop_remainder=True)
ds = ds.take(1).cache().repeat()
return ds
@tf.function
def _parse_example_fn(example):
"""Parser function for pre-processed Criteo TSV records."""
label_defaults = [[0.0]]
int_defaults = [
[0.0] for _ in range(self._feature_config.get_num_dense_features())
]
categorical_defaults = [
[0] for _ in range(self._feature_config.get_num_sparse_features())
]
record_defaults = label_defaults + int_defaults + categorical_defaults
fields = tf.decode_csv(
example, record_defaults, field_delim="\t", na_value="-1")
num_labels = 1
num_dense = len(int_defaults)
features = {}
features[fc.LABEL_FEATURE] = tf.reshape(fields[0], [batch_size, 1])
int_features = []
for idx in range(num_dense):
int_features.append(fields[idx + num_labels])
features["int-features"] = tf.stack(int_features, axis=1)
cat_features = []
tc_features = []
# Features for tables in EmbeddingCore is in cat_features; features for
# tables in REDACTED is in tc_features. The order of the input data
# follows the order of FLAG.vocab_sizes_embed, so we reorder the input
# data with resepct to the table sizes.
for idx, idx_by_size in enumerate(
self._feature_config.get_table_idx_orderd_by_size()):
if idx < self._feature_config.get_num_tables_in_ec():
cat_features.append(
tf.cast(
fields[idx_by_size + num_dense + num_labels], dtype=tf.int32))
else:
tc_features.append(
tf.cast(
fields[idx_by_size + num_dense + num_labels], dtype=tf.int32))
features["cat-features"] = tf.stack(cat_features, axis=1)
if tc_features:
features["tc-features"] = tf.stack(tc_features, axis=1)
return features
filenames = tf.data.Dataset.list_files(self._file_path, shuffle=False)
filenames = filenames.shard(params["dataset_num_shards"],
params["dataset_index"])
def make_dataset(ds_index):
ds = filenames.shard(self._parallelism, ds_index)
ds = ds.repeat(2)
ds = ds.interleave(
tf.data.TextLineDataset,
cycle_length=16,
block_length=batch_size // 8,
num_parallel_calls=8,
deterministic=False)
ds = ds.batch(batch_size, drop_remainder=True)
ds = ds.map(_parse_example_fn, num_parallel_calls=16)
return ds
ds_indices = tf.data.Dataset.range(self._parallelism)
ds = ds_indices.interleave(
make_dataset,
cycle_length=self._parallelism,
block_length=1,
num_parallel_calls=self._parallelism,
deterministic=False)
ds = ds.prefetch(tf.data.experimental.AUTOTUNE)
if self._use_cached_data:
ds = ds.take(100).cache().repeat()
return ds
| apache-2.0 | 6,009,022,825,742,480,000 | 36.961089 | 80 | 0.614494 | false |
raulperula/python_tutorials | tutorial-pyqt/src/example06.py | 1 | 2390 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'example6.ui'
#
# Created: Sun Jan 11 17:51:02 2015
# by: PyQt4 UI code generator 4.9.1
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName(_fromUtf8("Dialog"))
Dialog.resize(400, 300)
self.pushButtonSum = QtGui.QPushButton(Dialog)
self.pushButtonSum.setGeometry(QtCore.QRect(140, 190, 98, 27))
self.pushButtonSum.setObjectName(_fromUtf8("pushButtonSum"))
self.labelNumber1 = QtGui.QLabel(Dialog)
self.labelNumber1.setGeometry(QtCore.QRect(15, 50, 111, 20))
self.labelNumber1.setObjectName(_fromUtf8("labelNumber1"))
self.labelNumber2 = QtGui.QLabel(Dialog)
self.labelNumber2.setGeometry(QtCore.QRect(15, 90, 111, 20))
self.labelNumber2.setObjectName(_fromUtf8("labelNumber2"))
self.labelResult = QtGui.QLabel(Dialog)
self.labelResult.setGeometry(QtCore.QRect(55, 140, 81, 20))
self.labelResult.setObjectName(_fromUtf8("labelResult"))
self.lineEditNumber1 = QtGui.QLineEdit(Dialog)
self.lineEditNumber1.setGeometry(QtCore.QRect(140, 50, 113, 27))
self.lineEditNumber1.setObjectName(_fromUtf8("lineEditNumber1"))
self.lineEditNumber2 = QtGui.QLineEdit(Dialog)
self.lineEditNumber2.setGeometry(QtCore.QRect(140, 90, 113, 27))
self.lineEditNumber2.setObjectName(_fromUtf8("lineEditNumber2"))
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(QtGui.QApplication.translate("Dialog", "Dialog", None, QtGui.QApplication.UnicodeUTF8))
self.pushButtonSum.setText(QtGui.QApplication.translate("Dialog", "Sum", None, QtGui.QApplication.UnicodeUTF8))
self.labelNumber1.setText(QtGui.QApplication.translate("Dialog", "First number", None, QtGui.QApplication.UnicodeUTF8))
self.labelNumber2.setText(QtGui.QApplication.translate("Dialog", "Second number", None, QtGui.QApplication.UnicodeUTF8))
self.labelResult.setText(QtGui.QApplication.translate("Dialog", "Result", None, QtGui.QApplication.UnicodeUTF8))
| gpl-3.0 | 8,810,021,368,014,348,000 | 47.77551 | 128 | 0.7159 | false |
ywangd/stash | system/shui/pythonista_ui.py | 1 | 36021 | # coding: utf-8
from time import time
import six
import ui
from objc_util import on_main_thread, ObjCInstanceMethod, UIColor, create_objc_class, ObjCClass, ObjCInstance, ns
from ..shcommon import ON_IPAD, ON_IOS_8, sh_delay
from ..shcommon import K_CC, K_CD, K_HUP, K_HDN, K_CU, K_TAB, K_HIST, K_CZ, K_KB, CTRL_KEY_FLAG
from ..shscreens import DEFAULT_CHAR, ShChar
from .base import ShBaseUI, ShBaseTerminal, ShBaseSequentialRenderer
try:
from objc_util import *
except ImportError:
from .dummyobjc_util import *
NSMutableAttributedString = ObjCClass('NSMutableAttributedString')
UIFont = ObjCClass('UIFont')
BlackColor = UIColor.blackColor()
RedColor = UIColor.redColor()
GreenColor = UIColor.greenColor()
BrownColor = UIColor.brownColor()
BlueColor = UIColor.blueColor()
# BlueColor = UIColor.colorWithRed_green_blue_alpha_(0.3, 0.3, 1.0, 1.0)
MagentaColor = UIColor.magentaColor()
CyanColor = UIColor.cyanColor()
WhiteColor = UIColor.whiteColor()
GrayColor = UIColor.grayColor()
# GrayColor = UIColor.colorWithRed_green_blue_alpha_(0.5, 0.5, 0.5, 1.0)
YellowColor = UIColor.yellowColor()
# SmokeColor = UIColor.smokeColor()
SmokeColor = UIColor.colorWithRed_green_blue_alpha_(0.8, 0.8, 0.8, 1.0)
class ShVk(ui.View):
"""
The virtual keyboard container, which implements a swipe cursor positioning gesture
:type stash : StaSh
"""
def __init__(self, stash, name='vks', flex='wh'):
self.stash = stash
self.flex = flex
self.name = name
self.sv = ui.ScrollView(name, flex='wh')
super(ShVk, self).add_subview(self.sv)
self.sv.delegate = self
self.dx = 0
self.SCROLL_PER_CHAR = 20.0 # Number of pixels to scroll to move 1 character
def layout(self):
self.sv.content_size = (self.width + 1, self.height)
def add_subview(self, subview):
self.sv.add_subview(subview)
def remove_subview(self, subview):
self.sv.remove_subview(subview)
def scrollview_did_scroll(self, scrollview):
# integrate small scroll motions, but keep scrollview from actually moving
if not scrollview.decelerating:
self.dx -= scrollview.content_offset[0] / self.SCROLL_PER_CHAR
scrollview.content_offset = (0.0, 0.0)
offset = int(self.dx)
if offset:
self.dx -= offset
self.stash.mini_buffer.set_cursor(offset, whence=1)
class ShUI(ShBaseUI, ui.View):
"""
UI using the pythonista ui module
"""
def __init__(self, *args, **kwargs):
ShBaseUI.__init__(self, *args, **kwargs)
self.is_editing = False
# Start constructing the view's layout
self.name = 'StaSh'
self.flex = 'WH'
self.background_color = 0.0
self.txts = ui.View(name='txts', flex='WH') # Wrapper view of output and input areas
self.add_subview(self.txts)
self.txts.background_color = 0.7
# TODO: The accessory keys can be moved to a separate class
self.vks = ShVk(self.stash, name='vks', flex='WT')
self.vks.sv.delegate = self.stash.user_action_proxy.sv_delegate
self.txts.add_subview(self.vks)
self.vks.background_color = 0.7
k_hspacing = 1
self.k_tab = ui.Button(name='k_tab', title=' Tab ', flex='TB')
self.vks.add_subview(self.k_tab)
self.k_tab.action = self._vk_tapped
self.k_tab.font = self.BUTTON_FONT
self.k_tab.border_width = 1
self.k_tab.border_color = 0.9
self.k_tab.corner_radius = 5
self.k_tab.tint_color = 'black'
self.k_tab.background_color = 'white'
self.k_tab.size_to_fit()
self.k_grp_0 = ShVk(self.stash, name='k_grp_0', flex='WT') # vk group 0
self.k_grp_0.sv.delegate = self._vk_tapped
self.vks.add_subview(self.k_grp_0)
self.k_grp_0.background_color = 0.7
self.k_grp_0.x = self.k_tab.width + k_hspacing
self.k_hist = ui.Button(name='k_hist', title=' H ', flex='RTB')
self.k_grp_0.add_subview(self.k_hist)
self.k_hist.action = self._vk_tapped
self.k_hist.font = self.BUTTON_FONT
self.k_hist.border_width = 1
self.k_hist.border_color = 0.9
self.k_hist.corner_radius = 5
self.k_hist.tint_color = 'black'
self.k_hist.background_color = 'white'
self.k_hist.size_to_fit()
self.k_hup = ui.Button(name='k_hup', title=' Up ', flex='RTB')
self.k_grp_0.add_subview(self.k_hup)
self.k_hup.action = self._vk_tapped
self.k_hup.font = self.BUTTON_FONT
self.k_hup.border_width = 1
self.k_hup.border_color = 0.9
self.k_hup.corner_radius = 5
self.k_hup.tint_color = 'black'
self.k_hup.background_color = 'white'
self.k_hup.size_to_fit()
self.k_hup.x = self.k_hist.width + k_hspacing
self.k_hdn = ui.Button(name='k_hdn', title=' Dn ', flex='RTB')
self.k_grp_0.add_subview(self.k_hdn)
self.k_hdn.action = self._vk_tapped
self.k_hdn.font = self.BUTTON_FONT
self.k_hdn.border_width = 1
self.k_hdn.border_color = 0.9
self.k_hdn.corner_radius = 5
self.k_hdn.tint_color = 'black'
self.k_hdn.background_color = 'white'
self.k_hdn.size_to_fit()
self.k_hdn.x = self.k_hup.x + self.k_hup.width + k_hspacing
self.k_CD = ui.Button(name='k_CD', title=' CD ', flex='RTB')
self.k_grp_0.add_subview(self.k_CD)
self.k_CD.action = self._vk_tapped
self.k_CD.font = self.BUTTON_FONT
self.k_CD.border_width = 1
self.k_CD.border_color = 0.9
self.k_CD.corner_radius = 5
self.k_CD.tint_color = 'black'
self.k_CD.background_color = 'white'
self.k_CD.size_to_fit()
self.k_CD.x = self.k_hdn.x + self.k_hdn.width + k_hspacing
self.k_CC = ui.Button(name='k_CC', title=' CC ', flex='RTB')
self.k_grp_0.add_subview(self.k_CC)
self.k_CC.action = self._vk_tapped
self.k_CC.font = self.BUTTON_FONT
self.k_CC.border_width = 1
self.k_CC.border_color = 0.9
self.k_CC.corner_radius = 5
self.k_CC.tint_color = 'black'
self.k_CC.background_color = 'white'
self.k_CC.size_to_fit()
self.k_CC.x = self.k_CD.x + self.k_CD.width + k_hspacing
# Kill line key
self.k_CU = ui.Button(name='k_CU', title=' CU ', flex='RTB')
self.k_grp_0.add_subview(self.k_CU)
self.k_CU.action = self._vk_tapped
self.k_CU.font = self.BUTTON_FONT
self.k_CU.border_width = 1
self.k_CU.border_color = 0.9
self.k_CU.corner_radius = 5
self.k_CU.tint_color = 'black'
self.k_CU.background_color = 'white'
self.k_CU.size_to_fit()
self.k_CU.x = self.k_CC.x + self.k_CC.width + k_hspacing
# BG key
self.k_CZ = ui.Button(name='k_CZ', title=' CZ ', flex='RTB')
self.k_grp_0.add_subview(self.k_CZ)
self.k_CZ.action = self._vk_tapped
self.k_CZ.font = self.BUTTON_FONT
self.k_CZ.border_width = 1
self.k_CZ.border_color = 0.9
self.k_CZ.corner_radius = 5
self.k_CZ.tint_color = 'black'
self.k_CZ.background_color = 'white'
self.k_CZ.size_to_fit()
self.k_CZ.x = self.k_CU.x + self.k_CU.width + k_hspacing
# End Editing key
self.k_KB = ui.Button(name='k_KB', title=' KB ', flex='RTB')
self.k_grp_0.add_subview(self.k_KB)
self.k_KB.action = self._vk_tapped
self.k_KB.font = self.BUTTON_FONT
self.k_KB.border_width = 1
self.k_KB.border_color = 0.9
self.k_KB.corner_radius = 5
self.k_KB.tint_color = 'black'
self.k_KB.background_color = 'white'
self.k_KB.size_to_fit()
self.k_KB.x = self.k_CZ.x + self.k_CZ.width + k_hspacing
self.k_swap = ui.Button(name='k_swap', title='..', flex='LTB')
self.vks.add_subview(self.k_swap)
# self.k_swap.action = self.stash.user_action_proxy.vk_tapped
self.k_swap.action = lambda sender: self.toggle_k_grp()
self.k_swap.font = self.BUTTON_FONT
self.k_swap.border_width = 1
self.k_swap.border_color = 0.9
self.k_swap.corner_radius = 5
self.k_swap.tint_color = 'black'
self.k_swap.background_color = 'white'
self.k_swap.size_to_fit()
self.k_swap.width -= 2
self.k_swap.x = self.vks.width - self.k_swap.width
self.k_grp_1 = ShVk(self.stash, name='k_grp_1', flex='WT') # vk group 1
self.k_grp_1.sv.delegate = self.stash.user_action_proxy.sv_delegate
self.vks.add_subview(self.k_grp_1)
self.k_grp_1.background_color = 0.7
self.k_grp_1.x = self.k_tab.width + k_hspacing
offset = 0
for i, sym in enumerate(self.vk_symbols):
if sym == ' ':
continue
if not ON_IPAD and i > 7:
break
k_sym = ui.Button(name='k_sym', title=' %s ' % sym, flex='RTB')
self.k_grp_1.add_subview(k_sym)
k_sym.action = lambda vk: self.stash.mini_buffer.feed(self.terminal.selected_range, vk.title.strip())
k_sym.font = self.BUTTON_FONT
k_sym.border_width = 1
k_sym.border_color = 0.9
k_sym.corner_radius = 5
k_sym.tint_color = 'black'
k_sym.background_color = 'white'
k_sym.size_to_fit()
k_sym.x = offset + k_hspacing * i
offset += k_sym.width
self.k_grp_0.width = self.vks.width - self.k_tab.width - self.k_swap.width - 2 * k_hspacing
self.k_grp_1.width = self.vks.width - self.k_tab.width - self.k_swap.width - 2 * k_hspacing
self.vks.height = self.k_hist.height
self.vks.y = self.vks.superview.height - (self.vks.height + 4)
self.k_grp_1.send_to_back()
self.on_k_grp = 0
self.terminal = ShTerminal(
self.stash,
self,
self.txts,
width=self.txts.width,
height=self.txts.height - (self.vks.height + 8)
)
def keyboard_frame_did_change(self, frame):
"""
This is needed to make sure the extra key row is not covered by the
keyboard frame when it pops up.
:param frame:
:return:
"""
if self.on_screen:
if frame[3] > 0: # when keyboard appears
self.vks.hidden = False
self.txts.height = self.height - frame[3]
# Leave space for the virtual key row
self.terminal.size = self.txts.width, self.txts.height - (self.vks.height + 8)
else: # when keyboard goes away
# hide the virtual key row as well
self.vks.hidden = True
self.txts.height = self.height
# Take all space as virtual key row is now hidden
self.terminal.size = self.txts.width, self.txts.height
# TODO: Scroll to end? may not be necessary
def show(self):
"""
Present the UI
"""
self.present("panel")
self.terminal.begin_editing()
def close(self):
ui.View.close(self)
# on_exit() will be called in will_close()
# TODO: check the above
def will_close(self):
"""
Save stuff here
"""
self.on_exit()
def toggle_k_grp(self):
if self.on_k_grp == 0:
self.k_grp_1.bring_to_front()
else:
self.k_grp_0.bring_to_front()
self.on_k_grp = 1 - self.on_k_grp
def history_present(self, history):
"""
Present a history popover.
:param history: history to present
:type history: ShHistory
"""
listsource = ui.ListDataSource(history.getlist())
listsource.action = self.history_popover_tapped
table = ui.TableView()
listsource.font = self.BUTTON_FONT
table.data_source = listsource
table.delegate = listsource
table.width = 300
table.height = 300
table.row_height = self.BUTTON_FONT[1] + 4
table.present('popover')
table.wait_modal()
def history_popover_tapped(self, sender):
"""
Called when a row in the history popover was tapped.
:param sender: sender of the event
:type sender: ui.TableView
"""
if sender.selected_row >= 0:
self.history_selected(sender.items[sender.selected_row], sender.selected_row)
def _vk_tapped(self, sender):
"""
Called when a key was tapped
:param sender: sender of the event
:type sender: ui.Button
"""
# resolve key
mapping = [
# we can not use a dict here because ui.Button is unhashable
# instead, we use a pair of (key, value) and apply a liner search
# a binary search may be more efficient, but come on, this is definetly not required here
(self.k_tab, K_TAB),
(self.k_hist, K_HIST),
(self.k_hup, K_HUP),
(self.k_hdn, K_HDN),
(self.k_CC, K_CC),
(self.k_CD, K_CD),
(self.k_CU, K_CU),
(self.k_CZ, K_CZ),
(self.k_KB, K_KB),
]
key = None
for k, v in mapping:
if sender is k:
key = v
if key is None:
raise ValueError("Unknown sender: " + repr(sender))
# call action
self.stash.user_action_proxy.vk_tapped(key)
# ObjC related stuff
UIFont = ObjCClass('UIFont')
# noinspection PyAttributeOutsideInit,PyUnusedLocal,PyPep8Naming
class ShTerminal(ShBaseTerminal):
"""
This is a wrapper class of the actual TextView that subclass the SUITextView.
The wrapper is used to encapsulate the objc calls so that it behaves more like
a regular ui.TextView.
"""
def __init__(self, stash, parent, superview, width, height):
# Create the actual TextView by subclass SUITextView
UIKeyCommand = ObjCClass('UIKeyCommand')
def kcDispatcher_(_self, _cmd, _sender):
key_cmd = ObjCInstance(_sender)
stash.user_action_proxy.kc_pressed(str(key_cmd.input()), key_cmd.modifierFlags())
def keyCommands(_self, _cmd):
key_commands = [
UIKeyCommand.keyCommandWithInput_modifierFlags_action_('C',
CTRL_KEY_FLAG,
'kcDispatcher:'),
UIKeyCommand.keyCommandWithInput_modifierFlags_action_('D',
CTRL_KEY_FLAG,
'kcDispatcher:'),
UIKeyCommand.keyCommandWithInput_modifierFlags_action_('P',
CTRL_KEY_FLAG,
'kcDispatcher:'),
UIKeyCommand.keyCommandWithInput_modifierFlags_action_('N',
CTRL_KEY_FLAG,
'kcDispatcher:'),
UIKeyCommand.keyCommandWithInput_modifierFlags_action_('K',
CTRL_KEY_FLAG,
'kcDispatcher:'),
UIKeyCommand.keyCommandWithInput_modifierFlags_action_('U',
CTRL_KEY_FLAG,
'kcDispatcher:'),
UIKeyCommand.keyCommandWithInput_modifierFlags_action_('A',
CTRL_KEY_FLAG,
'kcDispatcher:'),
UIKeyCommand.keyCommandWithInput_modifierFlags_action_('E',
CTRL_KEY_FLAG,
'kcDispatcher:'),
UIKeyCommand.keyCommandWithInput_modifierFlags_action_('W',
CTRL_KEY_FLAG,
'kcDispatcher:'),
UIKeyCommand.keyCommandWithInput_modifierFlags_action_('L',
CTRL_KEY_FLAG,
'kcDispatcher:'),
UIKeyCommand.keyCommandWithInput_modifierFlags_action_('Z',
CTRL_KEY_FLAG,
'kcDispatcher:'),
UIKeyCommand.keyCommandWithInput_modifierFlags_action_('[',
CTRL_KEY_FLAG,
'kcDispatcher:'),
UIKeyCommand.keyCommandWithInput_modifierFlags_action_(']',
CTRL_KEY_FLAG,
'kcDispatcher:'),
UIKeyCommand.keyCommandWithInput_modifierFlags_action_('UIKeyInputUpArrow',
0,
'kcDispatcher:'),
UIKeyCommand.keyCommandWithInput_modifierFlags_action_('UIKeyInputDownArrow',
0,
'kcDispatcher:'),
UIKeyCommand.keyCommandWithInput_modifierFlags_action_('UIKeyInputLeftArrow',
0,
'kcDispatcher:'),
UIKeyCommand.keyCommandWithInput_modifierFlags_action_('UIKeyInputRightArrow',
0,
'kcDispatcher:'),
]
commands = ns(key_commands)
return commands.ptr
self.kc_handlers = {
('C',
CTRL_KEY_FLAG): parent.controlCAction,
('D',
CTRL_KEY_FLAG): parent.controlDAction,
('P',
CTRL_KEY_FLAG): parent.controlPAction,
('N',
CTRL_KEY_FLAG): parent.controlNAction,
('K',
CTRL_KEY_FLAG): parent.controlKAction,
('U',
CTRL_KEY_FLAG): parent.controlUAction,
('A',
CTRL_KEY_FLAG): parent.controlAAction,
('E',
CTRL_KEY_FLAG): parent.controlEAction,
('W',
CTRL_KEY_FLAG): parent.controlWAction,
('L',
CTRL_KEY_FLAG): parent.controlLAction,
('Z',
CTRL_KEY_FLAG): parent.controlZAction,
('[',
CTRL_KEY_FLAG): parent.dummyAction,
(']',
CTRL_KEY_FLAG): parent.dummyAction,
('UIKeyInputUpArrow', 0): parent.arrowUpAction,
('UIKeyInputDownArrow', 0): parent.arrowDownAction,
('UIKeyInputLeftArrow', 0): parent.arrowLeftAction,
('UIKeyInputRightArrow', 0): parent.arrowRightAction,
}
_ShTerminal = create_objc_class('_ShTerminal', ObjCClass('SUITextView'), [keyCommands, kcDispatcher_])
self.is_editing = False
self.superview = superview
self._delegate_view = ui.TextView()
self._delegate_view.delegate = stash.user_action_proxy.tv_delegate
self.tvo = _ShTerminal.alloc().initWithFrame_(((0, 0), (width, height))).autorelease()
self.tvo.setAutoresizingMask_(1 << 1 | 1 << 4) # flex Width and Height
self.content_inset = (0, 0, 0, 0)
self.auto_content_inset = False
# This setting helps preventing textview from jumping back to top
self.non_contiguous_layout = False
# Allow editing to the text attributes
# self.editing_text_attributes = True
ObjCInstance(self.superview).addSubview_(self.tvo)
self.delegate = self._delegate_view
# TextStorage
self.tso = self.tvo.textStorage()
# init baseclass and set attributes depending on settings
# we have to do this this late because setting a few of these attributes requires self.tvo to be set
ShBaseTerminal.__init__(self, stash, parent)
self.default_font = UIFont.fontWithName_size_('Menlo-Regular', self.font_size)
self.bold_font = UIFont.fontWithName_size_('Menlo-Bold', self.font_size)
self.italic_font = UIFont.fontWithName_size_('Menlo-Italic', self.font_size)
self.bold_italic_font = UIFont.fontWithName_size_('Menlo-BoldItalic', self.font_size)
self.autocapitalization_type = ui.AUTOCAPITALIZE_NONE
self.autocorrection_type = 1
self.spellchecking_type = 1
@property
def delegate(self):
return self._delegate_view.delegate
@delegate.setter
@on_main_thread
def delegate(self, value):
self.tvo.setDelegate_(ObjCInstance(value).delegate())
@property
def background_color(self):
return self._background_color
@background_color.setter
@on_main_thread
def background_color(self, value):
self._background_color = value
r, g, b, a = ui.parse_color(value)
self.tvo.setBackgroundColor_(UIColor.colorWithRed_green_blue_alpha_(r, g, b, 1))
@property
def text_font(self):
return self._text_font
@text_font.setter
@on_main_thread
def text_font(self, value):
name, size = self._text_font = value
self.tvo.setFont_(UIFont.fontWithName_size_(name, size))
@property
def indicator_style(self):
return self.tvo.indicatorStyle()
@indicator_style.setter
@on_main_thread
def indicator_style(self, value):
choices = {
'default': 0,
'black': 1,
'white': 2,
}
self.tvo.setIndicatorStyle_(choices[value])
@property
def text_color(self):
return self._text_color
@text_color.setter
@on_main_thread
def text_color(self, value):
self._text_color = value
r, g, b, a = ui.parse_color(value)
self.tvo.setTextColor_(UIColor.colorWithRed_green_blue_alpha_(r, g, b, 1))
@property
def tint_color(self):
return self._tint_color
@tint_color.setter
@on_main_thread
def tint_color(self, value):
self._tint_color = value
r, g, b, a = ui.parse_color(value)
self.tvo.setTintColor_(UIColor.colorWithRed_green_blue_alpha_(r, g, b, 1))
@property
def text(self):
return six.text_type(self.tvo.text())
@text.setter
@on_main_thread
def text(self, value):
self.tvo.setText_(value)
@property
def text_length(self):
return self.tvo.text().length()
@property
def attributed_text(self):
return self.tvo.attributedText()
@attributed_text.setter
@on_main_thread
def attributed_text(self, value):
self.tvo.setAttributedText_(value)
@property
def selected_range(self):
nsrange = self.tvo.selectedRange()
return nsrange.location, nsrange.location + nsrange.length
@selected_range.setter
@on_main_thread
def selected_range(self, rng):
"""
Set the cursor selection range. Note it checks the current range first and
only change it if the new range is different. This is to avoid setting
unwanted cursor_synced flag. Without the check, the cursor is repositioned
with the same range, this turn on the cursor_synced flag BUT will NOT trigger
the did_change_selection event (which is paired to cancel the cursor_synced
flag).
"""
if self.selected_range != rng:
self.cursor_synced = True
self.tvo.setSelectedRange_((rng[0], rng[1] - rng[0]))
@property
def autocapitalization_type(self):
return self._autocapitalization_type
@autocapitalization_type.setter
@on_main_thread
def autocapitalization_type(self, value):
self._autocapitalization_type = value
self.tvo.performSelector_withObject_('setAutocapitalizationType:', value)
@property
def autocorrection_type(self):
return self._autocorrection_type
@autocorrection_type.setter
@on_main_thread
def autocorrection_type(self, value):
self._autocorrection_type = value
ObjCInstanceMethod(self.tvo, 'setAutocorrectionType:')(value)
@property
def spellchecking_type(self):
return self._spellchecking_type
@spellchecking_type.setter
@on_main_thread
def spellchecking_type(self, value):
self._spellchecking_type = value
self.tvo.performSelector_withObject_('setSpellCheckingType:', value)
@property
def content_inset(self):
return self._content_inset
@content_inset.setter
@on_main_thread
def content_inset(self, value):
self._content_inset = value
insetStructure = self.tvo.contentInset()
insetStructure.top, insetStructure.left, insetStructure.bottom, insetStructure.right = value
@property
def auto_content_inset(self):
return self._auto_content_inset
@auto_content_inset.setter
@on_main_thread
def auto_content_inset(self, value):
self._auto_content_inset = value
self.tvo.setAutomaticallyAdjustsContentInsetForKeyboard_(value)
@property
def non_contiguous_layout(self):
return self._non_contiguous_layout
@non_contiguous_layout.setter
@on_main_thread
def non_contiguous_layout(self, value):
self._non_contiguous_layout = value
self.tvo.layoutManager().setAllowsNonContiguousLayout_(value)
@property
def editing_text_attributes(self):
return self._editing_text_attributes
@editing_text_attributes.setter
@on_main_thread
def editing_text_attributes(self, value):
self._editing_text_attributes = value
self.tvo.setAllowsEditingTextAttributes_(value)
@on_main_thread
def scroll_range_to_visible(self, rng):
self.tvo.scrollRangeToVisible_(rng)
@property
def size(self):
size = self.tvo.size()
return size.width, size.height
@size.setter
@on_main_thread
def size(self, value):
"""
Set the width and height of the view
:param value: A tuple of (width, height)
"""
self.tvo.setSize_(value)
@property
def content_size(self):
size = self.tvo.contentSize()
return size.width, size.height
@property
def content_offset(self):
point = self.tvo.contentOffset()
return point.x, point.y
@property
def visible_rect(self):
rect = self.tvo.visibleRect()
return rect.size.width, rect.size.height, rect.origin.x, rect.origin.y
@on_main_thread
def scroll_to_end(self):
content_height = self.content_size[1]
# rect_height is the visible rect's height
# rect_y is the y location where the visible rect locates in the
# coordinate of content_size
_, rect_height, _, rect_y = self.visible_rect
# If the space below rect_y is more than the visible rect's height,
# or if the visible rect is over-scrolled, scroll to the last line.
if content_height - rect_y > rect_height or \
(content_height > rect_height > content_height - rect_y): # over-scroll
self.tvo.scrollRangeToVisible_((len(self.text), 0))
@on_main_thread
def begin_editing(self):
self.tvo.becomeFirstResponder()
@on_main_thread
def end_editing(self):
self.tvo.resignFirstResponder()
def set_focus(self):
self.begin_editing()
def lose_focus(self):
self.end_editing()
# noinspection PyCallingNonCallable
def kc_pressed(self, key, modifierFlags):
handler = self.kc_handlers.get((key, modifierFlags), None)
if handler:
handler()
def get_wh(self):
font_width, font_height = ui.measure_string(
'a',
font=('Menlo-Regular', self.stash.config.getint('display', 'TEXT_FONT_SIZE')))
w = int(self.stash.ui.width / font_width)
h = int(self.stash.ui.height / font_height)
return (w, h)
class ShSequentialRenderer(ShBaseSequentialRenderer):
"""
A specific renderer for `ShSequentialScreen`. It does its job by
building texts from the in-memory screen and insert them to the
UI terminal.
:param ShSequentialScreen screen: In memory screen
:param ShTerminal terminal: The real UI terminal
"""
FG_COLORS = {
'black': BlackColor,
'red': RedColor,
'green': GreenColor,
'brown': BrownColor,
'blue': BlueColor,
'magenta': MagentaColor,
'cyan': CyanColor,
'white': WhiteColor,
'gray': GrayColor,
'yellow': YellowColor,
'smoke': SmokeColor,
'default': WhiteColor,
}
BG_COLORS = {
'black': BlackColor,
'red': RedColor,
'green': GreenColor,
'brown': BrownColor,
'blue': BlueColor,
'magenta': MagentaColor,
'cyan': CyanColor,
'white': WhiteColor,
'gray': GrayColor,
'yellow': YellowColor,
'smoke': SmokeColor,
'default': BlackColor,
}
RENDER_INTERVAL = 0.1
def __init__(self, *args, **kwargs):
ShBaseSequentialRenderer.__init__(self, *args, **kwargs)
self.last_rendered_time = 0
self.render_thread = None
def _get_font(self, attrs):
if attrs.bold and attrs.italics:
return self.terminal.bold_italic_font
elif attrs.bold:
return self.terminal.bold_font
elif attrs.italics:
return self.terminal.italic_font
else:
return self.terminal.default_font
def _build_attributes(self, attrs):
return {
'NSColor': self.FG_COLORS.get(attrs.fg,
WhiteColor),
'NSBackgroundColor': self.BG_COLORS.get(attrs.bg,
BlackColor),
'NSFont': self._get_font(attrs),
'NSUnderline': 1 if attrs.underscore else 0,
'NSStrikethrough': 1 if attrs.strikethrough else 0,
}
def _build_attributed_string(self, chars):
"""
Build attributed text in a more efficient way than char by char.
It groups characters with the same attributes and apply the attributes
to them at once.
:param [ShChar] chars: A list of ShChar upon which the attributed text is built.
:rtype: object
"""
# Initialize a string with default attributes
attributed_text = NSMutableAttributedString.alloc().initWithString_attributes_(
''.join(char.data for char in chars),
self._build_attributes(DEFAULT_CHAR),
).autorelease()
prev_char = chars[0]
location = length = 0
for idx, curr_char in enumerate(chars):
length += 1
if not ShChar.same_style(prev_char, curr_char): # a group is found
if not ShChar.same_style(prev_char, DEFAULT_CHAR): # skip default attrs
attributed_text.setAttributes_range_(self._build_attributes(prev_char), (location, length - 1))
length = 1
location = idx
prev_char = curr_char
if idx == len(chars) - 1: # last char
if not ShChar.same_style(prev_char, DEFAULT_CHAR):
attributed_text.setAttributes_range_(self._build_attributes(prev_char), (location, length))
return attributed_text
def render(self, no_wait=False):
"""
Render the screen buffer to the UITextView. Normally the rendering process
is delayed to throttle the total attempts of rendering.
:param bool no_wait: Immediately render the screen without delay.
"""
# The last_rendered_time is useful to ensure that the first rendering
# is not delayed.
if time() - self.last_rendered_time > self.RENDER_INTERVAL or no_wait:
if self.render_thread is not None:
self.render_thread.cancel()
self._render()
else: # delayed rendering
if self.render_thread is None or not self.render_thread.isAlive():
self.render_thread = sh_delay(self._render, self.RENDER_INTERVAL)
# Do nothing if there is already a delayed rendering thread waiting
@on_main_thread
def _render(self):
# This must run on the main UI thread. Otherwise it crashes.
self.last_rendered_time = time()
# Lock screen to get atomic information
with self.screen.acquire_lock():
intact_left_bound, intact_right_bound = self.screen.get_bounds()
screen_buffer_length = self.screen.text_length
cursor_xs, cursor_xe = self.screen.cursor_x
renderable_chars = self.screen.renderable_chars
self.screen.clean()
# Specific code for ios 8 to fix possible crash
if ON_IOS_8:
tvo_texts = NSMutableAttributedString.alloc().initWithAttributedString_(self.terminal.tvo.attributedText()
).autorelease()
else:
tvo_texts = self.terminal.tso
tvo_texts.beginEditing() # batch the changes
# First remove any leading texts that are rotated out
if intact_left_bound > 0:
tvo_texts.replaceCharactersInRange_withString_((0, intact_left_bound), '')
tv_text_length = tvo_texts.length()
# Second (re)render any modified trailing texts
# When there are contents beyond the right bound, either on screen
# or on terminal, the contents need to be re-rendered.
if intact_right_bound < max(tv_text_length, screen_buffer_length):
if len(renderable_chars) > 0:
tvo_texts.replaceCharactersInRange_withAttributedString_(
(intact_right_bound,
tv_text_length - intact_right_bound),
self._build_attributed_string(renderable_chars)
)
else: # empty string, pure deletion
tvo_texts.replaceCharactersInRange_withString_(
(intact_right_bound,
tv_text_length - intact_right_bound),
''
)
if ON_IOS_8:
self.terminal.tvo.setAttributedText_(tvo_texts) # set the text
else:
tvo_texts.endEditing() # end of batched changes
# Set the cursor position. This makes terminal and main screen cursors in sync
self.terminal.selected_range = (cursor_xs, cursor_xe)
# Ensure cursor line is visible by scroll to the end of the text
self.terminal.scroll_to_end()
| mit | 1,184,869,620,436,011,500 | 36.718325 | 118 | 0.558313 | false |
Giswater/giswater_qgis_plugin | map_tools/draw_profiles.py | 1 | 77823 | """
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
# -*- coding: utf-8 -*-
from qgis.core import QgsFeatureRequest, QgsVectorLayer, QgsProject, QgsReadWriteContext, QgsPrintLayout
from qgis.gui import QgsMapToolEmitPoint
from qgis.PyQt.QtCore import Qt
from qgis.PyQt.QtWidgets import QListWidget, QListWidgetItem, QLineEdit, QFileDialog
from qgis.PyQt.QtXml import QDomDocument
from functools import partial
from decimal import Decimal
import matplotlib.pyplot as plt
import math
import os
import json
from .. import utils_giswater
from .parent import ParentMapTool
from ..ui_manager import DrawProfile
from ..ui_manager import LoadProfiles
class NodeData:
def __init__(self):
self.start_point = None
self.top_elev = None
self.ymax = None
self.z1 = None
self.z2 = None
self.cat_geom = None
self.geom = None
self.slope = None
self.elev1 = None
self.elev2 = None
self.y1 = None
self.y2 = None
self.node_id = None
self.elev = None
self.code = None
self.node_1 = None
self.node_2 = None
class DrawProfiles(ParentMapTool):
""" Button 43: Draw_profiles """
def __init__(self, iface, settings, action, index_action):
""" Class constructor """
# Call ParentMapTool constructor
super().__init__(iface, settings, action, index_action)
self.list_of_selected_nodes = []
self.nodes = []
self.rotation_vd_exist = False
def activate(self):
# Remove all selections on canvas
self.remove_selection()
# Get version of pgRouting
sql = "SELECT version FROM pgr_version()"
row = self.controller.get_row(sql)
if not row:
message = "Error getting pgRouting version"
self.controller.show_warning(message)
return
self.version = str(row[0][:1])
# Set dialog
self.dlg_draw_profile = DrawProfile()
self.load_settings(self.dlg_draw_profile)
self.dlg_draw_profile.setWindowFlags(Qt.WindowStaysOnTopHint)
# Set icons
self.set_icon(self.dlg_draw_profile.btn_add_start_point, "111")
self.set_icon(self.dlg_draw_profile.btn_add_end_point, "111")
self.set_icon(self.dlg_draw_profile.btn_add_additional_point, "111")
self.set_icon(self.dlg_draw_profile.btn_delete_additional_point, "112")
self.widget_start_point = self.dlg_draw_profile.findChild(QLineEdit, "start_point")
self.widget_end_point = self.dlg_draw_profile.findChild(QLineEdit, "end_point")
self.widget_additional_point = self.dlg_draw_profile.findChild(QListWidget, "list_additional_points")
self.composers_path = self.dlg_draw_profile.findChild(QLineEdit, "composers_path")
start_point = QgsMapToolEmitPoint(self.canvas)
end_point = QgsMapToolEmitPoint(self.canvas)
self.start_end_node = [None, None]
# Set signals
self.dlg_draw_profile.rejected.connect(self.manage_rejected)
self.dlg_draw_profile.btn_close.clicked.connect(self.manage_rejected)
self.dlg_draw_profile.btn_add_start_point.clicked.connect(partial(self.activate_snapping, start_point))
self.dlg_draw_profile.btn_add_end_point.clicked.connect(partial(self.activate_snapping, end_point))
self.dlg_draw_profile.btn_add_start_point.clicked.connect(partial(self.activate_snapping_node, self.dlg_draw_profile.btn_add_start_point))
self.dlg_draw_profile.btn_add_end_point.clicked.connect(partial(self.activate_snapping_node, self.dlg_draw_profile.btn_add_end_point))
self.dlg_draw_profile.btn_add_additional_point.clicked.connect(partial(self.activate_snapping, start_point))
self.dlg_draw_profile.btn_add_additional_point.clicked.connect(partial(self.activate_snapping_node, self.dlg_draw_profile.btn_add_additional_point))
self.dlg_draw_profile.btn_delete_additional_point.clicked.connect(self.delete_additional_point)
self.dlg_draw_profile.btn_save_profile.clicked.connect(self.save_profile)
self.dlg_draw_profile.btn_load_profile.clicked.connect(self.load_profile)
self.dlg_draw_profile.btn_draw.clicked.connect(self.execute_profiles)
self.dlg_draw_profile.btn_clear_profile.clicked.connect(self.clear_profile)
self.dlg_draw_profile.btn_export_pdf.clicked.connect(self.export_pdf)
self.dlg_draw_profile.btn_export_pdf.clicked.connect(self.save_rotation_vdefault)
self.dlg_draw_profile.btn_update_path.clicked.connect(self.set_composer_path)
# Plugin path
plugin_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
# Get qgis_composers_path
sql = "SELECT value FROM config_param_user WHERE parameter = 'qgis_composers_path'"
row = self.controller.get_row(sql)
utils_giswater.setWidgetText(self.dlg_draw_profile, self.composers_path, str(row[0]))
# Fill ComboBox cbx_template with templates *.qpt from ...giswater/templates
template_path = utils_giswater.getWidgetText(self.dlg_draw_profile, self.composers_path)
template_files = []
try:
template_files = os.listdir(template_path)
except FileNotFoundError as e:
pass
self.files_qpt = [i for i in template_files if i.endswith('.qpt')]
self.dlg_draw_profile.cbx_template.clear()
self.dlg_draw_profile.cbx_template.addItem('')
for template in self.files_qpt:
self.dlg_draw_profile.cbx_template.addItem(str(template))
self.dlg_draw_profile.cbx_template.currentIndexChanged.connect(self.set_template)
self.layer_node = self.controller.get_layer_by_tablename("v_edit_node")
self.layer_arc = self.controller.get_layer_by_tablename("v_edit_arc")
self.list_of_selected_nodes = []
self.open_dialog(self.dlg_draw_profile)
def set_composer_path(self):
self.get_folder_dialog(self.dlg_draw_profile, 'composers_path')
template_path = utils_giswater.getWidgetText(self.dlg_draw_profile, self.composers_path)
sql = (f"UPDATE config_param_user "
f"SET value = '{template_path}' "
f"WHERE parameter = 'qgis_composers_path'")
self.controller.execute_sql(sql)
utils_giswater.setWidgetText(self.dlg_draw_profile, self.composers_path, str(template_path))
template_files = []
try:
template_files = os.listdir(template_path)
except FileNotFoundError as e:
pass
self.files_qpt = [i for i in template_files if i.endswith('.qpt')]
self.dlg_draw_profile.cbx_template.clear()
self.dlg_draw_profile.cbx_template.addItem('')
for template in self.files_qpt:
self.dlg_draw_profile.cbx_template.addItem(str(template))
self.dlg_draw_profile.cbx_template.currentIndexChanged.connect(self.set_template)
def save_profile(self):
""" Save profile """
profile_id = self.dlg_draw_profile.profile_id.text()
start_point = self.widget_start_point.text()
end_point = self.widget_end_point.text()
# Check if all data are entered
if profile_id == '' or start_point == '' or end_point == '':
message = "Some data is missing"
self.controller.show_info_box(message, "Info")
return
# Check if id of profile already exists in DB
sql = (f"SELECT DISTINCT(profile_id) "
f"FROM anl_arc_profile_value "
f"WHERE profile_id = '{profile_id}'")
row = self.controller.get_row(sql)
if row:
message = "Selected 'profile_id' already exist in database"
self.controller.show_warning(message, parameter=profile_id)
return
list_arc = []
n = self.dlg_draw_profile.tbl_list_arc.count()
for i in range(n):
list_arc.append(str(self.dlg_draw_profile.tbl_list_arc.item(i).text()))
sql = ""
for i in range(n):
sql += (f"INSERT INTO anl_arc_profile_value (profile_id, arc_id, start_point, end_point) "
f" VALUES ('{profile_id}', '{list_arc[i]}', '{start_point}', '{end_point}');\n")
status = self.controller.execute_sql(sql)
if not status:
message = "Error inserting profile table, you need to review data"
self.controller.show_warning(message)
return
# Show message to user
message = "Values has been updated"
self.controller.show_info(message)
self.deactivate()
def load_profile(self):
""" Open dialog load_profiles.ui """
self.dlg_load = LoadProfiles()
self.load_settings(self.dlg_load)
self.dlg_load.rejected.connect(partial(self.close_dialog, self.dlg_load.rejected))
self.dlg_load.btn_open.clicked.connect(self.open_profile)
self.dlg_load.btn_delete_profile.clicked.connect(self.delete_profile)
sql = "SELECT DISTINCT(profile_id) FROM anl_arc_profile_value"
rows = self.controller.get_rows(sql, commit=True)
if rows:
for row in rows:
item_arc = QListWidgetItem(str(row[0]))
self.dlg_load.tbl_profiles.addItem(item_arc)
self.open_dialog(self.dlg_load)
self.deactivate()
def open_profile(self):
""" Open selected profile from dialog load_profiles.ui """
selected_list = self.dlg_load.tbl_profiles.selectionModel().selectedRows()
if len(selected_list) == 0:
message = "Any record selected"
self.controller.show_warning(message)
return
# Selected item from list
selected_profile = self.dlg_load.tbl_profiles.currentItem().text()
# Get data from DB for selected item| profile_id, start_point, end_point
sql = ("SELECT start_point, end_point"
" FROM anl_arc_profile_value"
" WHERE profile_id = '" + selected_profile + "'")
row = self.controller.get_row(sql)
if not row:
return
start_point = row['start_point']
end_point = row['end_point']
# Fill widgets of form draw_profile | profile_id, start_point, end_point
self.widget_start_point.setText(str(start_point))
self.widget_end_point.setText(str(end_point))
self.dlg_draw_profile.profile_id.setText(str(selected_profile))
# Get all arcs from selected profile
sql = ("SELECT arc_id"
" FROM anl_arc_profile_value"
" WHERE profile_id = '" + selected_profile + "'")
rows = self.controller.get_rows(sql, commit=True)
if not rows:
return
arc_id = []
for row in rows:
arc_id.append(str(row[0]))
# Select arcs of the shortest path
for element_id in arc_id:
sql = ("SELECT sys_type"
" FROM v_edit_arc"
" WHERE arc_id = '" + str(element_id) + "'")
row = self.controller.get_row(sql)
if not row:
return
# Select feature from v_edit_man_@sys_type
sys_type = str(row[0].lower())
sql = "SELECT parent_layer FROM cat_feature WHERE system_id = '" + sys_type.upper() + "' LIMIT 1"
row = self.controller.get_row(sql, log_sql=True, commit=True)
self.layer_feature = self.controller.get_layer_by_tablename(row[0])
aux = ""
for row in arc_id:
aux += "arc_id = '" + str(row) + "' OR "
aux = aux[:-3] + ""
# Select snapped features
selection = self.layer_feature.getFeatures(QgsFeatureRequest().setFilterExpression(aux))
self.layer_feature.selectByIds([a.id() for a in selection])
node_id = []
for element_id in arc_id:
sql = ("SELECT node_1, node_2"
" FROM arc"
" WHERE arc_id = '" + str(element_id) + "'")
row = self.controller.get_row(sql)
node_id.append(row[0])
node_id.append(row[1])
if not row:
return
# Remove duplicated nodes
singles_list = []
for element in node_id:
if element not in singles_list:
singles_list.append(element)
node_id = singles_list
# Select nodes of shortest path on layers v_edit_man_|feature
for element_id in node_id:
sql = ("SELECT sys_type"
" FROM v_edit_node"
" WHERE node_id = '" + str(element_id) + "'")
row = self.controller.get_row(sql)
if not row:
return
# Select feature from v_edit_man_@sys_type
sys_type = str(row[0].lower())
sql = "SELECT parent_layer FROM cat_feature WHERE system_id = '" + sys_type.upper() + "' LIMIT 1"
row = self.controller.get_row(sql, log_sql=True, commit=True)
self.layer_feature = self.controller.get_layer_by_tablename(row[0])
aux = ""
for row in node_id:
aux += "node_id = '" + str(row) + "' OR "
aux = aux[:-3] + ""
# Select snapped features
selection = self.layer_feature.getFeatures(QgsFeatureRequest().setFilterExpression(aux))
self.layer_feature.selectByIds([a.id() for a in selection])
# Select arcs of shortest path on v_edit_arc for ZOOM SELECTION
expr_filter = "\"arc_id\" IN ("
for i in range(len(arc_id)):
expr_filter += "'" + str(arc_id[i]) + "', "
expr_filter = expr_filter[:-2] + ")"
(is_valid, expr) = self.check_expression(expr_filter, True) #@UnusedVariable
if not is_valid:
return
# Build a list of feature id's from the previous result
# Select features with these id's
it = self.layer_arc.getFeatures(QgsFeatureRequest(expr))
self.id_list = [i.id() for i in it]
self.layer_arc.selectByIds(self.id_list)
# Center shortest path in canvas - ZOOM SELECTION
self.canvas.zoomToSelected(self.layer_arc)
# After executing of profile enable btn_draw
self.dlg_draw_profile.btn_draw.setDisabled(False)
# Clear list
list_arc = []
self.dlg_draw_profile.tbl_list_arc.clear()
# Load list of arcs
for i in range(len(arc_id)):
item_arc = QListWidgetItem(arc_id[i])
self.dlg_draw_profile.tbl_list_arc.addItem(item_arc)
list_arc.append(arc_id[i])
self.node_id = node_id
self.arc_id = arc_id
# Draw profile
self.paint_event(self.arc_id, self.node_id)
self.dlg_draw_profile.cbx_template.setDisabled(False)
self.dlg_draw_profile.btn_export_pdf.setDisabled(False)
self.dlg_draw_profile.title.setDisabled(False)
self.dlg_draw_profile.rotation.setDisabled(False)
self.dlg_draw_profile.scale_vertical.setDisabled(False)
self.dlg_draw_profile.scale_horizontal.setDisabled(False)
self.dlg_draw_profile.btn_update_path.setDisabled(False)
self.close_dialog(self.dlg_load)
self.rotation_vd_exist = True
def activate_snapping(self, emit_point):
self.canvas.setMapTool(emit_point)
snapper = self.snapper_manager.get_snapper()
self.canvas.xyCoordinates.connect(self.mouse_move)
emit_point.canvasClicked.connect(partial(self.snapping_node, snapper))
def activate_snapping_node(self, widget):
# Create the appropriate map tool and connect the gotPoint() signal.
self.emit_point = QgsMapToolEmitPoint(self.canvas)
self.canvas.setMapTool(self.emit_point)
self.snapper = self.snapper_manager.get_snapper()
self.iface.setActiveLayer(self.layer_node)
self.canvas.xyCoordinates.connect(self.mouse_move)
# widget = clicked button
# self.widget_start_point | self.widget_end_point : QLabels
if str(widget.objectName()) == "btn_add_start_point":
self.widget_point = self.widget_start_point
if str(widget.objectName()) == "btn_add_end_point":
self.widget_point = self.widget_end_point
if str(widget.objectName()) == "btn_add_additional_point":
self.widget_point = self.widget_additional_point
self.emit_point.canvasClicked.connect(self.snapping_node)
def mouse_move(self, point):
event_point = self.snapper_manager.get_event_point(point=point)
# Snapping
result = self.snapper_manager.snap_to_current_layer(event_point)
if self.snapper_manager.result_is_valid():
layer = self.snapper_manager.get_snapped_layer(result)
if layer == self.layer_node:
self.snapper_manager.add_marker(result, self.vertex_marker)
else:
self.vertex_marker.hide()
def snapping_node(self, point): # @UnusedVariable
# Get clicked point
event_point = self.snapper_manager.get_event_point(point=point)
# Snapping
result = self.snapper_manager.snap_to_current_layer(event_point)
if self.snapper_manager.result_is_valid():
# Check feature
layer = self.snapper_manager.get_snapped_layer(result)
if layer == self.layer_node:
# Get the point
snapped_feat = self.snapper_manager.get_snapped_feature(result)
element_id = snapped_feat.attribute('node_id')
self.element_id = str(element_id)
# Leave selection
if self.widget_point == self.widget_start_point or self.widget_point == self.widget_end_point:
self.widget_point.setText(str(element_id))
if self.widget_point == self.widget_additional_point:
# Check if node already exist in list of additional points
# Clear list, its possible to have just one additional point
self.widget_additional_point.clear()
item_arc = QListWidgetItem(str(self.element_id))
self.widget_additional_point.addItem(item_arc)
n = len(self.start_end_node)
if n <=2:
self.start_end_node.insert(1, str(self.element_id))
if n > 2:
self.start_end_node[1] = str(self.element_id)
self.exec_path()
self.layer_feature = self.layer_node
# widget = clicked button
# self.widget_start_point | self.widget_end_point : QLabels
# start_end_node = [0] : node start | start_end_node = [1] : node end
aux = ""
if str(self.widget_point.objectName()) == "start_point":
self.start_end_node[0] = self.widget_point.text()
aux = f"node_id = '{self.start_end_node[0]}'"
if str(self.widget_point.objectName()) == "end_point":
self.start_end_node[1] = self.widget_point.text()
aux = f"node_id = '{self.start_end_node[0]}' OR node_id = '{self.start_end_node[1]}'"
if str(self.widget_point.objectName()) == "list_sdditional_points":
# After start_point and end_point in self.start_end_node add list of additional points from "cbx_additional_point"
aux = f"node_id = '{self.start_end_node[0]}' OR node_id = '{self.start_end_node[1]}'"
for i in range(2, len(self.start_end_node)):
aux += f" OR node_id = '{self.start_end_node[i]}'"
# Select snapped features
selection = self.layer_feature.getFeatures(QgsFeatureRequest().setFilterExpression(aux))
self.layer_feature.selectByIds([k.id() for k in selection])
self.exec_path()
def paint_event(self, arc_id, node_id):
""" Parent function - Draw profiles """
# Clear plot
plt.gcf().clear()
# arc_id ,node_id list of nodes and arc form dijkstra algoritam
self.set_parameters(arc_id, node_id)
self.fill_memory()
self.set_table_parameters()
# Start drawing
# Draw first | start node
self.draw_first_node(self.nodes[0])
# Draw nodes between first and last node
for i in range(1, self.n - 1):
self.draw_nodes(self.nodes[i], self.nodes[i - 1], i)
self.draw_ground()
# Draw last node
self.draw_last_node(self.nodes[self.n - 1], self.nodes[self.n - 2], self.n - 1)
# Set correct variable for draw ground (drawn centered)
self.first_top_x = self.first_top_x + self.nodes[self.n - 2].geom / 2
self.draw_ground()
self.draw_table_horizontals()
self.set_properties()
self.draw_coordinates()
self.draw_grid()
self.plot = plt
# If file profile.png exist overwrite
plugin_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
img_path = plugin_path + os.sep + "templates" + os.sep + "profile.png"
fig_size = plt.rcParams["figure.figsize"]
# Set figure width to 10.4 and height to 4.8
fig_size[0] = 10.4
fig_size[1] = 4.8
plt.rcParams["figure.figsize"] = fig_size
# Save profile with dpi = 300
plt.savefig(img_path, dpi=300)
def set_properties(self):
""" Set properties of main window """
# Set window name
self.win = plt.gcf()
self.win.canvas.set_window_title('Draw Profile')
# Hide axes
self.axes = plt.gca()
self.axes.set_axis_off()
# Set background color of window
self.fig1 = plt.figure(1)
self.fig1.tight_layout()
self.rect = self.fig1.patch
self.rect.set_facecolor('white')
# Set axes
x_min = round(self.nodes[0].start_point - self.fix_x - self.fix_x * Decimal(0.15))
x_max = round(self.nodes[self.n - 1].start_point + self.fix_x * Decimal(0.15))
self.axes.set_xlim([x_min, x_max])
# Set y-axes
y_min = round(self.min_top_elev - self.z - self.height_row*Decimal(1.5))
y_max = round(self.max_top_elev +self.height_row*Decimal(1.5))
self.axes.set_ylim([y_min, y_max + 1 ])
def set_parameters(self, arc_id, node_id):
""" Get and calculate parameters and values for drawing """
self.list_of_selected_arcs = arc_id
self.list_of_selected_nodes = node_id
self.gis_length = [0]
self.start_point = [0]
# Get arcs between nodes (on shortest path)
self.n = len(self.list_of_selected_nodes)
# Get length (gis_length) of arcs and save them in separate list ( self.gis_length )
for arc_id in self.list_of_selected_arcs:
# Get gis_length from v_edit_arc
sql = (f"SELECT gis_length "
f"FROM v_edit_arc "
f"WHERE arc_id = '{arc_id}'")
row = self.controller.get_row(sql)
if row:
self.gis_length.append(row[0])
# Calculate start_point (coordinates) of drawing for each node
n = len(self.gis_length)
for i in range(1, n):
x = self.start_point[i - 1] + self.gis_length[i]
self.start_point.append(x)
i += 1
def fill_memory(self):
""" Get parameters from data base. Fill self.nodes with parameters postgres """
self.nodes.clear()
bad_nodes_id = []
# Get parameters and fill the nodes
for i, node_id in enumerate(self.node_id):
# parameters : list of parameters for one node
parameters = NodeData()
parameters.start_point = self.start_point[i]
# Get data top_elev ,y_max, elev, nodecat_id from v_edit_node
# Change elev to sys_elev
sql = (f"SELECT sys_top_elev AS top_elev, sys_ymax AS ymax, sys_elev, nodecat_id, code "
f"FROM v_edit_node "
f"WHERE node_id = '{node_id}'")
# query for nodes
# SELECT elevation AS top_elev, depth AS ymax, top_elev-depth AS sys_elev, nodecat_id, code"
row = self.controller.get_row(sql)
columns = ['top_elev', 'ymax', 'sys_elev', 'nodecat_id', 'code']
if row:
if row[0] is None or row[1] is None or row[2] is None or row[3] is None or row[4] is None:
bad_nodes_id.append(node_id)
# Check if we have all data for drawing
for x in range(len(columns)):
if row[x] is None:
sql = (f"SELECT value::decimal(12,3) "
f"FROM config_param_system "
f"WHERE parameter = '{columns[x]}_vd'")
result = self.controller.get_row(sql)
row[x] = result[0]
parameters.top_elev = row[0]
parameters.ymax = row[1]
parameters.elev = row[2]
nodecat_id = row[3]
parameters.code = row[4]
parameters.node_id = str(node_id)
# Get data z1, z2 ,cat_geom1 ,elev1 ,elev2 , y1 ,y2 ,slope from v_edit_arc
# Change to elevmax1 and elevmax2
# Geom1 from cat_node
sql = (f"SELECT geom1 "
f"FROM cat_node "
f"WHERE id = '{nodecat_id}'")
row = self.controller.get_row(sql)
columns = ['geom1']
if row:
if row[0] is None:
bad_nodes_id.append(node_id)
# Check if we have all data for drawing
for x in range(0, len(columns)):
if row[x] is None:
sql = (f"SELECT value::decimal(12,3) "
f"FROM config_param_system "
f"WHERE parameter = '{columns[x]}_vd'")
result = self.controller.get_row(sql)
row[x] = result[0]
parameters.geom = row[0]
# Set node_id in nodes
parameters.node_id = node_id
self.nodes.append(parameters)
n = 0
for element_id in self.arc_id:
sql = (f"SELECT z1, z2, cat_geom1, sys_elev1, sys_elev2, sys_y1 AS y1, sys_y2 AS y2, slope, node_1, node_2 "
f"FROM v_edit_arc "
f"WHERE arc_id = '{element_id}'")
row = self.controller.get_row(sql)
# TODO:: v_nodes -> query for arcs
# SELECT 0 AS z1, 0 AS z2 , dnom/1000, NULL as sys_elev1, NULL as sys_elev2, NULL as y1, NULL as y2, NULL as slope, node_1, node_2,
columns = ['z1','z2','cat_geom1', 'sys_elev1', 'sys_elev2', 'y1', 'y2', 'slope']
# Check if self.nodes[n] is out of range
if n >= len(self.nodes):
return
if row:
# Check if we have all data for drawing
if row[0] is None or row[1] is None or row[2] is None or row[3] is None or row[4] is None or \
row[5] is None or row[6] is None or row[7] is None:
bad_nodes_id.append(element_id)
for x in range(0, len(columns)):
if row[x] is None:
sql = (f"SELECT value::decimal(12,3) "
f"FROM config_param_system "
f"WHERE parameter = '{columns[x]}_vd'")
result = self.controller.get_row(sql)
row[x] = result[0]
self.nodes[n].z1 = row[0]
self.nodes[n].z2 = row[1]
self.nodes[n].cat_geom = row[2]
self.nodes[n].elev1 = row[3]
self.nodes[n].elev2 = row[4]
self.nodes[n].y1 = row[5]
self.nodes[n].y2 = row[6]
self.nodes[n].slope = row[7]
self.nodes[n].node_1 = row[8]
self.nodes[n].node_2 = row[9]
n += 1
if not bad_nodes_id:
return
message = "Some parameters are missing (Values Defaults used for)"
self.controller.show_info_box(message, "Info", str(bad_nodes_id))
def draw_first_node(self, node):
""" Draw first node """
if node.node_id == node.node_1:
z = node.z1
reverse = False
else:
z = node.z2
reverse = True
# Get superior points
s1x = -node.geom / 2
s1y = node.top_elev
s2x = node.geom / 2
s2y = node.top_elev
s3x = node.geom / 2
s3y = node.top_elev - node.ymax + z + node.cat_geom
# Get inferior points
i1x = -node.geom / 2
i1y = node.top_elev - node.ymax
i2x = node.geom / 2
i2y = node.top_elev - node.ymax
i3x = node.geom / 2
i3y = node.top_elev - node.ymax + z
# Create list points
xinf = [s1x, i1x, i2x, i3x]
yinf = [s1y, i1y, i2y, i3y]
xsup = [s1x, s2x, s3x]
ysup = [s1y, s2y, s3y]
row = self.controller.get_config('draw_profile_conf')
if row is not None:
row = json.loads(row[0])
if 'color' in row:
# Draw lines acording list points
plt.plot(xinf, yinf, row['color'])
plt.plot(xsup, ysup, row['color'])
else:
plt.plot(xinf, yinf, 'black', zorder=100)
plt.plot(xsup, ysup, 'black', zorder=100)
self.first_top_x = 0
self.first_top_y = node.top_elev
# Draw fixed part of table
self.draw_fix_table(node.start_point, reverse)
# Save last points for first node
self.slast = [s3x, s3y]
self.ilast = [i3x, i3y]
# Save last points for first node
self.slast2 = [s3x, s3y]
self.ilast2 = [i3x, i3y]
def draw_fix_table(self, start_point, reverse):
""" Draw fixed part of table """
# DRAW TABLE - FIXED PART
# Draw fixed part of table
self.draw_marks(0)
# Vertical line [-1,0]
x = [start_point - self.fix_x * Decimal(0.2), start_point - self.fix_x * Decimal(0.2)]
y = [self.min_top_elev - 1 * self.height_row, self.min_top_elev - 6 * self.height_row]
plt.plot(x, y, 'black', zorder=100)
# Vertical line [-2,0]
x = [start_point - self.fix_x * Decimal(0.75), start_point - self.fix_x * Decimal(0.75)]
y = [self.min_top_elev - 2 * self.height_row, self.min_top_elev - 5 * self.height_row]
plt.plot(x, y, 'black', zorder=100)
# Vertical line [-3,0]
x = [start_point - self.fix_x, start_point - self.fix_x]
y = [self.min_top_elev - 1 * self.height_row, self.min_top_elev - 6 * self.height_row]
plt.plot(x, y, 'black', zorder=100)
# Fill the fixed part of table with data - draw text
# Called just with first node
self.data_fix_table(start_point, reverse)
def draw_marks(self, start_point):
""" Draw marks for each node """
# Vertical line [0,0]
x = [start_point, start_point]
y = [self.min_top_elev - 1 * self.height_row,
self.min_top_elev - 2 * self.height_row - Decimal(0.15) * self.height_row]
plt.plot(x, y, 'black', zorder=100)
# Vertical lines [0,0] - marks
x = [start_point, start_point]
y = [self.min_top_elev - Decimal(2.9) * self.height_row, self.min_top_elev - Decimal(3.15) * self.height_row]
plt.plot(x, y, 'black', zorder=100)
x = [start_point, start_point]
y = [self.min_top_elev - Decimal(3.9) * self.height_row, self.min_top_elev - Decimal(4.15) * self.height_row]
plt.plot(x, y, 'black', zorder=100)
x = [start_point, start_point]
y = [self.min_top_elev - Decimal(4.9) * self.height_row, self.min_top_elev - Decimal(5.15) * self.height_row]
plt.plot(x, y, 'black', zorder=100)
x = [start_point, start_point]
y = [self.min_top_elev - Decimal(5.9) * self.height_row, self.min_top_elev - Decimal(6.15) * self.height_row]
plt.plot(x, y, 'black', zorder=100)
def data_fix_table(self, start_point, reverse): #@UnusedVariable
""" FILL THE FIXED PART OF TABLE WITH DATA - DRAW TEXT """
c = (self.fix_x - self.fix_x * Decimal(0.2)) / 2
plt.text(-(c + self.fix_x * Decimal(0.2)),
self.min_top_elev - 1 * self.height_row - Decimal(0.45) * self.height_row, 'DIAMETER', fontsize=7.5,
horizontalalignment='center')
plt.text(-(c + self.fix_x * Decimal(0.2)),
self.min_top_elev - 1 * self.height_row - Decimal(0.80) * self.height_row, 'SLP. / LEN.', fontsize=7.5,
horizontalalignment='center')
c = (self.fix_x * Decimal(0.25)) / 2
plt.text(-(c + self.fix_x * Decimal(0.74)),
self.min_top_elev - Decimal(2) * self.height_row - self.height_row * 3 / 2, 'ORDINATES', fontsize=7.5,
rotation='vertical', horizontalalignment='center', verticalalignment='center')
plt.text(-self.fix_x * Decimal(0.70), self.min_top_elev - Decimal(2.05) * self.height_row - self.height_row / 2,
'TOP ELEV', fontsize=7.5, verticalalignment='center')
plt.text(-self.fix_x * Decimal(0.70), self.min_top_elev - Decimal(3.05) * self.height_row - self.height_row / 2,
'Y MAX', fontsize=7.5, verticalalignment='center')
plt.text(-self.fix_x * Decimal(0.70), self.min_top_elev - Decimal(4.05) * self.height_row - self.height_row / 2,
'ELEV', fontsize=7.5, verticalalignment='center')
c = (self.fix_x - self.fix_x * Decimal(0.2)) / 2
plt.text(-(c + self.fix_x * Decimal(0.2)),
self.min_top_elev - Decimal(self.height_row * 5 + self.height_row / 2), 'CODE', fontsize=7.5,
horizontalalignment='center', verticalalignment='center')
# Fill table with values
self.fill_data(0, 0, reverse)
def draw_nodes(self, node, prev_node, index):
""" Draw nodes between first and last node """
if node.node_id == prev_node.node_2:
z1 = prev_node.z2
reverse = False
elif node.node_id == prev_node.node_1:
z1 = prev_node.z1
reverse = True
if node.node_id == node.node_1:
z2 = node.z1
elif node.node_id == node.node_2:
z2 = node.z2
# Get superior points
s1x = self.slast[0]
s1y = self.slast[1]
s2x = node.start_point - node.geom / 2
s2y = node.top_elev - node.ymax + z1 + prev_node.cat_geom
s3x = node.start_point - node.geom / 2
s3y = node.top_elev
s4x = node.start_point + node.geom / 2
s4y = node.top_elev
s5x = node.start_point + node.geom / 2
s5y = node.top_elev -node.ymax + z2 + node.cat_geom
# Get inferior points
i1x = self.ilast[0]
i1y = self.ilast[1]
i2x = node.start_point - node.geom / 2
i2y = node.top_elev - node.ymax + z1
i3x = node.start_point - node.geom / 2
i3y = node.top_elev - node.ymax
i4x = node.start_point + node.geom / 2
i4y = node.top_elev - node.ymax
i5x = node.start_point + node.geom / 2
i5y = node.top_elev - node.ymax + z2
# Create list points
xinf = [i1x, i2x, i3x, i4x, i5x]
yinf = [i1y, i2y, i3y, i4y, i5y]
xsup = [s1x, s2x, s3x, s4x, s5x]
ysup = [s1y, s2y, s3y, s4y, s5y]
row = self.controller.get_config('draw_profile_conf')
if row is not None:
row = json.loads(row[0])
if 'color' in row:
# Draw lines acording list points
plt.plot(xinf, yinf, row['color'])
plt.plot(xsup, ysup, row['color'])
else:
plt.plot(xinf, yinf, 'black', zorder=100)
plt.plot(xsup, ysup, 'black', zorder=100)
self.node_top_x = node.start_point
self.node_top_y = node.top_elev
self.first_top_x = prev_node.start_point
self.first_top_y = prev_node.top_elev
# DRAW TABLE-MARKS
self.draw_marks(node.start_point)
# Fill table
self.fill_data(node.start_point, index, reverse)
# Save last points before the last node
self.slast = [s5x, s5y]
self.ilast = [i5x, i5y]
# Save last points for draw ground
self.slast2 = [s3x, s3y]
self.ilast2 = [i3x, i3y]
def fill_data(self, start_point, indx, reverse=False):
# Fill top_elevation and node_id for all nodes
plt.annotate(' ' + '\n' + str(round(self.nodes[indx].top_elev, 2)) + '\n' + ' ',
xy=(Decimal(start_point), self.min_top_elev - Decimal(self.height_row * 2 + self.height_row / 2)),
fontsize=6, rotation='vertical', horizontalalignment='center', verticalalignment='center')
# Draw node_id
plt.text(0 + start_point, self.min_top_elev - Decimal(self.height_row * 5 + self.height_row / 2),
self.nodes[indx].code, fontsize=7.5,
horizontalalignment='center', verticalalignment='center')
# Manage variables elev and y (elev1, elev2, y1, y2) acoording flow trace
if reverse:
# Fill y_max and elevation
# 1st node : y_max,y2 and top_elev, elev2
if indx == 0:
# # Fill y_max
plt.annotate(' ' + '\n' + str(round(self.nodes[0].ymax, 2)) + '\n' + str(round(self.nodes[0].y2, 2)),
xy=(Decimal(0 + start_point),
self.min_top_elev - Decimal(self.height_row * 3 + self.height_row / 2)), fontsize=6,
rotation='vertical', horizontalalignment='center', verticalalignment='center')
# Fill elevation
plt.annotate(' ' + '\n' + str(round(self.nodes[0].elev, 2)) + '\n' + str(round(self.nodes[0].elev2, 2)),
xy=(Decimal(0 + start_point),
self.min_top_elev - Decimal(self.height_row * 4 + self.height_row / 2)), fontsize=6,
rotation='vertical', horizontalalignment='center', verticalalignment='center')
# Last node : y_max,y1 and top_elev, elev1
elif indx == self.n - 1:
pass
# Fill y_max
plt.annotate(
str(round(self.nodes[indx - 1].y1, 2)) + '\n' + str(
round(self.nodes[indx].ymax, 2)) + '\n' + ' ',
xy=(Decimal(0 + start_point),
self.min_top_elev - Decimal(self.height_row * 3 + self.height_row / 2)), fontsize=6,
rotation='vertical', horizontalalignment='center', verticalalignment='center')
# Fill elevation
plt.annotate(
str(round(self.nodes[indx - 1].elev1, 2)) + '\n' + str(
round(self.nodes[indx].elev, 2)) + '\n' + ' ',
xy=(Decimal(0 + start_point),
self.min_top_elev - Decimal(self.height_row * 4 + self.height_row / 2)), fontsize=6,
rotation='vertical', horizontalalignment='center', verticalalignment='center')
else:
# Fill y_max
plt.annotate(
str(round(self.nodes[indx - 1].y1, 2)) + '\n' + str(
round(self.nodes[indx].ymax, 2)) + '\n' + str(
round(self.nodes[indx].y1, 2)),
xy=(Decimal(0 + start_point),
self.min_top_elev - Decimal(self.height_row * 3 + self.height_row / 2)), fontsize=6,
rotation='vertical', horizontalalignment='center', verticalalignment='center')
# Fill elevation
plt.annotate(
str(round(self.nodes[indx - 1].elev1, 2)) + '\n' + str(
round(self.nodes[indx].elev, 2)) + '\n' + str(
round(self.nodes[indx].elev1, 2)),
xy=(Decimal(0 + start_point),
self.min_top_elev - Decimal(self.height_row * 4 + self.height_row / 2)), fontsize=6,
rotation='vertical', horizontalalignment='center', verticalalignment='center')
else:
# Fill y_max and elevation
# 1st node : y_max,y2 and top_elev, elev2
if indx == 0:
# # Fill y_max
plt.annotate(' ' + '\n' + str(round(self.nodes[0].ymax, 2)) + '\n' + str(round(self.nodes[0].y1, 2)),
xy=(Decimal(0 + start_point),
self.min_top_elev - Decimal(self.height_row * 3 + self.height_row / 2)), fontsize=6,
rotation='vertical', horizontalalignment='center', verticalalignment='center')
# Fill elevation
plt.annotate(' ' + '\n' + str(round(self.nodes[0].elev, 2)) + '\n' + str(round(self.nodes[0].elev1, 2)),
xy=(Decimal(0 + start_point),
self.min_top_elev - Decimal(self.height_row * 4 + self.height_row / 2)), fontsize=6,
rotation='vertical', horizontalalignment='center', verticalalignment='center')
# Last node : y_max,y1 and top_elev, elev1
elif indx == self.n - 1:
pass
# Fill y_max
plt.annotate(
str(round(self.nodes[indx - 1].y2, 2)) + '\n' + str(round(self.nodes[indx].ymax, 2)) + '\n' + ' ',
xy=(Decimal(0 + start_point),
self.min_top_elev - Decimal(self.height_row * 3 + self.height_row / 2)), fontsize=6,
rotation='vertical', horizontalalignment='center', verticalalignment='center')
# Fill elevation
plt.annotate(
str(round(self.nodes[indx - 1].elev2, 2)) + '\n' + str(
round(self.nodes[indx].elev, 2)) + '\n' + ' ',
xy=(Decimal(0 + start_point),
self.min_top_elev - Decimal(self.height_row * 4 + self.height_row / 2)), fontsize=6,
rotation='vertical', horizontalalignment='center', verticalalignment='center')
# Nodes between 1st and last node : y_max,y1,y2 and top_elev, elev1, elev2
else:
# Fill y_max
plt.annotate(
str(round(self.nodes[indx - 1].y2, 2)) + '\n' + str(round(self.nodes[indx].ymax, 2)) + '\n' + str(
round(self.nodes[indx].y1, 2)),
xy=(Decimal(0 + start_point),
self.min_top_elev - Decimal(self.height_row * 3 + self.height_row / 2)), fontsize=6,
rotation='vertical', horizontalalignment='center', verticalalignment='center')
# Fill elevation
plt.annotate(
str(round(self.nodes[indx - 1].elev2, 2)) + '\n' + str(
round(self.nodes[indx].elev, 2)) + '\n' + str(
round(self.nodes[indx].elev1, 2)),
xy=(Decimal(0 + start_point),
self.min_top_elev - Decimal(self.height_row * 4 + self.height_row / 2)), fontsize=6,
rotation='vertical', horizontalalignment='center', verticalalignment='center')
# Fill diameter and slope / length for all nodes except last node
if indx != self.n - 1:
# Draw diameter
center = self.gis_length[indx + 1] / 2
plt.text(center + start_point, self.min_top_elev - 1 * self.height_row - Decimal(0.45) * self.height_row,
round(self.nodes[indx].cat_geom, 2),
fontsize=7.5, horizontalalignment='center') # PUT IN THE MIDDLE PARAMETRIZATION
# Draw slope / length
slope = str(round((self.nodes[indx].slope * 100), 2))
length = str(round(self.gis_length[indx + 1], 2))
plt.text(center + start_point, self.min_top_elev - 1 * self.height_row - Decimal(0.8) * self.height_row,
slope + '%/' + length,
fontsize=7.5, horizontalalignment='center') # PUT IN THE MIDDLE PARAMETRIZATION
def draw_last_node(self, node, prev_node, index):
if node.node_id == prev_node.node_2:
z = prev_node.z2
reverse = False
else:
z = prev_node.z1
reverse = True
# TODO:: comentar lista slast i ilast
s1x = self.slast[0]
s1y = self.slast[1]
s2x = node.start_point - node.geom / 2
s2y = node.top_elev - node.ymax + z + prev_node.cat_geom
s3x = node.start_point - node.geom / 2
s3y = node.top_elev
s4x = node.start_point + node.geom /2
s4y = node.top_elev
# Get inferior points
i1x = self.ilast[0]
i1y = self.ilast[1]
i2x = node.start_point - node.geom / 2
i2y = node.top_elev - node.ymax + z
i3x = node.start_point - node.geom / 2
i3y = node.top_elev - node.ymax
i4x = node.start_point + node.geom / 2
i4y = node.top_elev - node.ymax
# Create list points
xinf = [i1x, i2x, i3x, i4x]
yinf = [i1y, i2y, i3y, i4y]
xsup = [s1x, s2x, s3x, s4x, i4x]
ysup = [s1y, s2y, s3y, s4y, i4y]
row = self.controller.get_config('draw_profile_conf')
if row is not None:
row = json.loads(row[0])
if 'color' in row:
# Draw lines acording list points
plt.plot(xinf, yinf, row['color'])
plt.plot(xsup, ysup, row['color'])
else:
plt.plot(xinf, yinf, 'black', zorder=100)
plt.plot(xsup, ysup, 'black', zorder=100)
self.first_top_x = self.slast2[0]
self.first_top_y = self.slast2[1]
self.node_top_x = node.start_point
self.node_top_y = node.top_elev
# DRAW TABLE
# DRAW TABLE-MARKS
self.draw_marks(node.start_point)
# Fill table
self.fill_data(node.start_point, index, reverse)
def set_table_parameters(self):
# Search y coordinate min_top_elev ( top_elev- ymax)
self.min_top_elev = self.nodes[0].top_elev - self.nodes[0].ymax
for i in range(1, self.n):
if (self.nodes[i].top_elev - self.nodes[i].ymax) < self.min_top_elev:
self.min_top_elev = self.nodes[i].top_elev - self.nodes[i].ymax
# Search y coordinate max_top_elev
self.max_top_elev = self.nodes[0].top_elev
for i in range(1, self.n):
if self.nodes[i].top_elev > self.max_top_elev:
self.max_top_elev = self.nodes[i].top_elev
# Calculating dimensions of x-fixed part of table
self.fix_x = Decimal(0.15) * self.nodes[self.n - 1].start_point
# Calculating dimensions of y-fixed part of table
# Height y = height of table + height of graph
self.z = self.max_top_elev - self.min_top_elev
self.height_row = (self.z * Decimal(0.97)) / Decimal(5)
# Height of graph + table
self.height_y = self.z * 2
def draw_table_horizontals(self):
self.set_table_parameters()
# DRAWING TABLE
# Draw horizontal lines
x = [self.nodes[self.n - 1].start_point, self.nodes[0].start_point - self.fix_x]
y = [self.min_top_elev - self.height_row, self.min_top_elev - self.height_row]
plt.plot(x, y, 'black',zorder=100)
x = [self.nodes[self.n - 1].start_point, self.nodes[0].start_point - self.fix_x]
y = [self.min_top_elev - 2 * self.height_row, self.min_top_elev - 2 * self.height_row]
plt.plot(x, y, 'black',zorder=100)
# Draw horizontal(shorter) lines
x = [self.nodes[self.n - 1].start_point, self.nodes[0].start_point - self.fix_x * Decimal(0.75)]
y = [self.min_top_elev - 3 * self.height_row, self.min_top_elev - 3 * self.height_row]
plt.plot(x, y, 'black',zorder=100)
x = [self.nodes[self.n - 1].start_point, self.nodes[0].start_point - self.fix_x * Decimal(0.75)]
y = [self.min_top_elev - 4 * self.height_row, self.min_top_elev - 4 * self.height_row]
plt.plot(x, y, 'black',zorder=100)
# Last two lines
x = [self.nodes[self.n - 1].start_point, self.nodes[0].start_point - self.fix_x]
y = [self.min_top_elev - 5 * self.height_row, self.min_top_elev - 5 * self.height_row]
plt.plot(x, y, 'black',zorder=100)
x = [self.nodes[self.n - 1].start_point, self.nodes[0].start_point - self.fix_x]
y = [self.min_top_elev - 6 * self.height_row, self.min_top_elev - 6 * self.height_row]
plt.plot(x, y, 'black',zorder=100)
def draw_coordinates(self):
start_point = self.nodes[self.n - 1].start_point
geom1 = self.nodes[self.n - 1].geom
# Draw coocrdinates
x = [0, 0]
y = [self.min_top_elev - 1 * self.height_row, int(math.ceil(self.max_top_elev) + 1 )]
plt.plot(x, y, 'black',zorder=100)
x = [start_point,start_point]
y = [self.min_top_elev - 1 * self.height_row, int(math.ceil(self.max_top_elev) + 1 )]
plt.plot(x, y, 'black',zorder=100)
x = [0,start_point]
y = [int(math.ceil(self.max_top_elev) + 1 ),int(math.ceil(self.max_top_elev) + 1 )]
plt.plot(x, y, 'black',zorder=100)
# Loop till self.max_top_elev + height_row
y = int(math.ceil(self.min_top_elev - 1 * self.height_row))
x = int(math.floor(self.max_top_elev))
if x%2 == 0 :
x = x + 2
else :
x = x + 1
for i in range(y, x):
if i%2 == 0:
x1 = [0, start_point]
y1 = [i, i]
else:
i = i+1
x1 = [0, start_point]
y1 = [i, i]
plt.plot(x1, y1, 'lightgray', zorder=1)
# Values left y_ordinate_all
plt.text(0 - geom1 * Decimal(1.5), i,str(i), fontsize=7.5, horizontalalignment='right', verticalalignment='center')
def draw_grid(self):
# Values right y_ordinate_max
start_point = self.nodes[self.n-1].start_point
geom1 = self.nodes[self.n-1].geom
plt.annotate('P.C. '+str(round(self.min_top_elev - 1 * self.height_row,2)) + '\n' + ' ',
xy=(0 - geom1 * Decimal(1.5) , self.min_top_elev - 1 * self.height_row),
fontsize=6.5, horizontalalignment='right', verticalalignment='center')
# Values right x_ordinate_min
plt.annotate('0'+ '\n' + ' ',
xy=(0,int(math.ceil(self.max_top_elev) + 1 )),
fontsize=6.5, horizontalalignment='center')
# Values right x_ordinate_max
plt.annotate(str(round(start_point,2))+ '\n' + ' ',
xy=(start_point, int(math.ceil(self.max_top_elev) + 1 ) ),
fontsize=6.5, horizontalalignment='center')
# Loop from 0 to start_point(of last node)
x = int(math.floor(start_point))
# First after 0 (first is drawn ,start from i(0)+1)
for i in range(50, x, 50):
x1 = [i, i]
y1 = [self.min_top_elev - 1 * self.height_row, int(math.ceil(self.max_top_elev) + 1 )]
plt.plot(x1, y1, 'lightgray',zorder=1 )
# values left y_ordinate_all
plt.text(0 - geom1 * Decimal(1.5), i, str(i), fontsize=6.5,
horizontalalignment='right', verticalalignment='center')
plt.text(start_point + geom1 * Decimal(1.5), i, str(i), fontsize=6.5,
horizontalalignment='left', verticalalignment='center')
# values right x_ordinate_all
plt.annotate(str(i) + '\n' + ' ', xy=(i, int(math.ceil(self.max_top_elev) + 1 )),
fontsize=6.5, horizontalalignment='center')
# TODO: Not used
def draw_arc(self):
x = [self.x, self.x2]
y = [self.y, self.y2]
x1 = [self.x1, self.x3]
y1 = [self.y1, self.y3]
plt.plot(x, y, 'black', zorder=100)
plt.plot(x1, y1, 'black', zorder=100)
def draw_ground(self):
# Green triangle
plt.plot(self.first_top_x,self.first_top_y,'g^',linewidth=3.5)
plt.plot(self.node_top_x, self.node_top_y, 'g^',linewidth=3.5)
x = [self.first_top_x, self.node_top_x]
y = [self.first_top_y, self.node_top_y]
plt.plot(x, y, 'green', linestyle='dashed')
def shortest_path(self, start_point, end_point):
""" Calculating shortest path using dijkstra algorithm """
self.arc_id = []
self.node_id = []
self.rnode_id = []
self.rarc_id = []
rstart_point = None
sql = (f"SELECT rid "
f"FROM v_anl_pgrouting_node "
f"WHERE node_id = '{start_point}'")
row = self.controller.get_row(sql)
if row:
rstart_point = int(row[0])
rend_point = None
sql = (f"SELECT rid "
f"FROM v_anl_pgrouting_node "
f"WHERE node_id = '{end_point}'")
row = self.controller.get_row(sql)
if row:
rend_point = int(row[0])
# Check starting and end points | wait to select end_point
if rstart_point is None or rend_point is None:
return
# Clear list of arcs and nodes - preparing for new profile
sql = (f"SELECT * FROM public.pgr_dijkstra('SELECT id::integer, source, target, cost"
f" FROM v_anl_pgrouting_arc', {rstart_point}, {rend_point}, false")
if self.version == '2':
sql += ", false"
elif self.version == '3':
pass
else:
message = "You need to upgrade your version of pgRouting"
self.controller.show_info(message)
return
sql += ")"
rows = self.controller.get_rows(sql, commit=True)
for i in range(0, len(rows)):
if self.version == '2':
self.rnode_id.append(str(rows[i][1]))
self.rarc_id.append(str(rows[i][2]))
elif self.version == '3':
self.rnode_id.append(str(rows[i][2]))
self.rarc_id.append(str(rows[i][3]))
self.rarc_id.pop()
self.arc_id = []
self.node_id = []
for n in range(0, len(self.rarc_id)):
# convert arc_ids
sql = (f"SELECT arc_id "
f"FROM v_anl_pgrouting_arc "
f"WHERE id = '{self.rarc_id[n]}'")
row = self.controller.get_row(sql)
if row:
self.arc_id.append(str(row[0]))
for m in range(0, len(self.rnode_id)):
# convert node_ids
sql = (f"SELECT node_id "
f"FROM v_anl_pgrouting_node "
f"WHERE rid = '{self.rnode_id[m]}'")
row = self.controller.get_row(sql)
if row:
self.node_id.append(str(row[0]))
# Select arcs of the shortest path
for element_id in self.arc_id:
sql = (f"SELECT sys_type "
f"FROM v_edit_arc "
f"WHERE arc_id = '{element_id}'")
row = self.controller.get_row(sql)
if not row:
return
# Select feature of v_edit_man_@sys_type
sys_type = str(row[0].lower())
sql = f"SELECT parent_layer FROM cat_feature WHERE system_id = '{sys_type.upper()}' LIMIT 1"
row = self.controller.get_row(sql, log_sql=True, commit=True)
self.layer_feature = self.controller.get_layer_by_tablename(row[0])
aux = ""
for row in self.arc_id:
aux += "arc_id = '" + str(row) + "' OR "
aux = aux[:-3] + ""
# Select snapped features
selection = self.layer_feature.getFeatures(QgsFeatureRequest().setFilterExpression(aux))
self.layer_feature.selectByIds([a.id() for a in selection])
# Select nodes of shortest path on layers v_edit_man_|feature
for element_id in self.node_id:
sql = (f"SELECT sys_type "
f"FROM v_edit_node "
f"WHERE node_id = '{element_id}'")
row = self.controller.get_row(sql)
if not row:
return
# Select feature of v_edit_man_@sys_type
sys_type = str(row[0].lower())
sql = f"SELECT parent_layer FROM cat_feature WHERE system_id = '{sys_type.upper()}' LIMIT 1"
row = self.controller.get_row(sql, log_sql=True, commit=True)
self.layer_feature = self.controller.get_layer_by_tablename(row[0])
aux = ""
for row in self.node_id:
aux += f"node_id = '{row}' OR "
aux = aux[:-3] + ""
# Select snapped features
selection = self.layer_feature.getFeatures(QgsFeatureRequest().setFilterExpression(aux))
self.layer_feature.selectByIds([a.id() for a in selection])
# Select nodes of shortest path on v_edit_arc for ZOOM SELECTION
expr_filter = '"arc_id" IN ('
for i in range(len(self.arc_id)):
expr_filter += f"'{self.arc_id[i]}', "
expr_filter = expr_filter[:-2] + ")"
(is_valid, expr) = self.check_expression(expr_filter, True) #@UnusedVariable
if not is_valid:
return
# Build a list of feature id's from the previous result
# Select features with these id's
it = self.layer_arc.getFeatures(QgsFeatureRequest(expr))
self.id_list = [i.id() for i in it]
self.layer_arc.selectByIds(self.id_list)
# Center shortest path in canvas - ZOOM SELECTION
self.canvas.zoomToSelected(self.layer_arc)
# Clear list
list_arc = []
self.dlg_draw_profile.tbl_list_arc.clear()
for i in range(len(self.arc_id)):
item_arc = QListWidgetItem(self.arc_id[i])
self.dlg_draw_profile.tbl_list_arc.addItem(item_arc)
list_arc.append(self.arc_id[i])
def execute_profiles(self):
# Remove duplicated nodes
singles_list = []
for element in self.node_id:
if element not in singles_list:
singles_list.append(element)
self.node_id = []
self.node_id = singles_list
self.paint_event(self.arc_id, self.node_id)
# Maximize window (after drawing)
self.plot.show()
mng = self.plot.get_current_fig_manager()
mng.window.showMaximized()
def execute_profiles_composer(self):
# Remove duplicated nodes
singles_list = []
for element in self.node_id:
if element not in singles_list:
singles_list.append(element)
self.node_id = []
self.node_id = singles_list
self.paint_event(self.arc_id, self.node_id)
def clear_profile(self):
# Clear list of nodes and arcs
self.list_of_selected_nodes = []
self.list_of_selected_arcs = []
self.arcs = []
self.nodes = []
self.start_end_node = []
self.start_end_node = [None, None]
self.dlg_draw_profile.list_additional_points.clear()
self.dlg_draw_profile.btn_add_start_point.setDisabled(False)
self.dlg_draw_profile.btn_add_end_point.setDisabled(True)
self.dlg_draw_profile.btn_add_additional_point.setDisabled(True)
self.dlg_draw_profile.list_additional_points.setDisabled(True)
self.dlg_draw_profile.title.setDisabled(True)
self.dlg_draw_profile.rotation.setDisabled(True)
self.dlg_draw_profile.scale_vertical.setDisabled(True)
self.dlg_draw_profile.scale_horizontal.setDisabled(True)
self.dlg_draw_profile.btn_export_pdf.setDisabled(True)
self.dlg_draw_profile.cbx_template.setDisabled(True)
self.dlg_draw_profile.btn_update_path.setDisabled(True)
self.dlg_draw_profile.start_point.clear()
self.dlg_draw_profile.end_point.clear()
self.dlg_draw_profile.profile_id.clear()
self.rotation_vd_exist = False
# Get data from DB for selected item| tbl_list_arc
self.dlg_draw_profile.tbl_list_arc.clear()
# Clear selection
self.remove_selection()
self.deactivate()
def generate_composer(self):
# Check if template is selected
if str(self.dlg_draw_profile.cbx_template.currentText()) == "":
message = "You need to select a template"
self.controller.show_warning(message)
return
# Check if template file exists
plugin_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
template_path = ""
row = self.controller.get_config('qgis_composers_path')
if row:
template_path = f'{row[0]}{os.sep}{self.template}.qpt'
if not os.path.exists(template_path):
message = "File not found"
self.controller.show_warning(message, parameter=template_path)
return
# Check if composer exist
composers = self.get_composers_list()
index = self.get_composer_index(str(self.template))
# Composer not found
if index == len(composers):
# Create new composer with template selected in combobox(self.template)
template_file = open(template_path, 'rt')
template_content = template_file.read()
template_file.close()
document = QDomDocument()
document.setContent(template_content)
project = QgsProject.instance()
comp_view = QgsPrintLayout(project)
comp_view.loadFromTemplate(document, QgsReadWriteContext())
layout_manager = project.layoutManager()
layout_manager.addLayout(comp_view)
else:
comp_view = composers[index]
# Manage profile layout
self.manage_profile_layout(comp_view, plugin_path)
def manage_profile_layout(self, layout, plugin_path):
""" Manage profile layout """
if layout is None:
self.controller.log_warning("Layout not found")
return
# Get values from dialog
profile = plugin_path + os.sep + "templates" + os.sep + "profile.png"
title = self.dlg_draw_profile.title.text()
rotation = utils_giswater.getWidgetText(self.dlg_draw_profile, self.dlg_draw_profile.rotation, False, False)
rotation = 0 if rotation in (None, '', 'null') else int(rotation)
first_node = self.dlg_draw_profile.start_point.text()
end_node = self.dlg_draw_profile.end_point.text()
# Show layout
self.iface.openLayoutDesigner(layout)
# Set profile
picture_item = layout.itemById('profile')
picture_item.setPicturePath(profile)
# Zoom map to extent, rotation
map_item = layout.itemById('Mapa')
map_item.zoomToExtent(self.canvas.extent())
map_item.setMapRotation(rotation)
# Fill data in composer template
first_node_item = layout.itemById('first_node')
first_node_item.setText(str(first_node))
end_node_item = layout.itemById('end_node')
end_node_item.setText(str(end_node))
length_item = layout.itemById('length')
length_item.setText(str(self.start_point[-1]))
profile_title = layout.itemById('title')
profile_title.setText(str(title))
# Refresh items
layout.refresh()
layout.updateBounds()
def set_template(self):
template = self.dlg_draw_profile.cbx_template.currentText()
self.template = template[:-4]
def export_pdf(self):
""" Export PDF of selected template"""
# Generate Composer
self.execute_profiles_composer()
self.generate_composer()
def save_rotation_vdefault(self):
# Save vdefault value from rotation
tablename = "config_param_user"
rotation = utils_giswater.getWidgetText(self.dlg_draw_profile, self.dlg_draw_profile.rotation, False, False)
rotation = 0 if rotation in (None, '', 'null') else int(rotation)
if self.rotation_vd_exist:
sql = (f"UPDATE {tablename} "
f"SET value = '{rotation}' "
f"WHERE parameter = 'rotation_vdefault'")
else:
sql = (f"INSERT INTO {tablename} (parameter, value, cur_user) "
f"VALUES ('rotation_vdefault', '{rotation}', current_user)")
if sql:
self.controller.execute_sql(sql)
def manual_path(self, list_points):
""" Calculating shortest path using dijkstra algorithm """
self.arc_id = []
self.node_id = []
for i in range(0, (len(list_points)-1)):
# return
start_point = list_points[i]
end_point = list_points[i+1]
self.rnode_id = []
self.rarc_id = []
rstart_point = None
sql = (f"SELECT rid "
f"FROM v_anl_pgrouting_node "
f"WHERE node_id = '{start_point}'")
row = self.controller.get_row(sql)
if row:
rstart_point = int(row[0])
rend_point = None
sql = (f"SELECT rid "
f"FROM v_anl_pgrouting_node "
f"WHERE node_id = '{end_point}'")
row = self.controller.get_row(sql)
if row:
rend_point = int(row[0])
# Check starting and end points | wait to select end_point
if rstart_point is None or rend_point is None:
return
# Clear list of arcs and nodes - preparing for new profile
sql = (f"SELECT * FROM public.pgr_dijkstra('SELECT id::integer, source, target, cost "
f"FROM v_anl_pgrouting_arc', {rstart_point}, {rend_point}, false")
if self.version == '2':
sql += ", false"
elif self.version == '3':
pass
else:
message = "You need to upgrade your version of pgRouting"
self.controller.show_info(message)
return
sql += ")"
rows = self.controller.get_rows(sql, commit=True)
for i in range(0, len(rows)):
if self.version == '2':
self.rnode_id.append(str(rows[i][1]))
self.rarc_id.append(str(rows[i][2]))
elif self.version == '3':
self.rnode_id.append(str(rows[i][2]))
self.rarc_id.append(str(rows[i][3]))
self.rarc_id.pop()
for n in range(0, len(self.rarc_id)):
# Convert arc_ids
sql = (f"SELECT arc_id "
f"FROM v_anl_pgrouting_arc "
f"WHERE id = '{self.rarc_id[n]}'")
row = self.controller.get_row(sql)
if row:
self.arc_id.append(str(row[0]))
for m in range(0, len(self.rnode_id)):
# Convert node_ids
sql = (f"SELECT node_id "
f"FROM v_anl_pgrouting_node "
f"WHERE rid = '{self.rnode_id[m]}'")
row = self.controller.get_row(sql)
if row:
self.node_id.append(str(row[0]))
# Select arcs of the shortest path
for element_id in self.arc_id:
sql = (f"SELECT sys_type "
f"FROM v_edit_arc "
f"WHERE arc_id = '{element_id}'")
row = self.controller.get_row(sql)
if not row:
return
# Select feature of v_edit_man_@sys_type
sys_type = str(row[0].lower())
sql = f"SELECT parent_layer FROM cat_feature WHERE system_id = '{sys_type.upper()}' LIMIT 1"
row = self.controller.get_row(sql, log_sql=True, commit=True)
self.layer_feature = self.controller.get_layer_by_tablename(row[0])
aux = ""
for row in self.arc_id:
aux += f"arc_id = '{row}' OR "
aux = aux[:-3] + ""
# Select snapped features
selection = self.layer_feature.getFeatures(QgsFeatureRequest().setFilterExpression(aux))
self.layer_feature.selectByIds([a.id() for a in selection])
# Select nodes of shortest path on layers v_edit_man_|feature
for element_id in self.node_id:
sql = (f"SELECT sys_type "
f"FROM v_edit_node "
f"WHERE node_id = '{element_id}'")
row = self.controller.get_row(sql)
if not row:
return
# Select feature of v_edit_man_@sys_type
sys_type = str(row[0].lower())
sql = f"SELECT parent_layer FROM cat_feature WHERE system_id = '{sys_type.upper()}' LIMIT 1"
row = self.controller.get_row(sql, log_sql=True, commit=True)
self.layer_feature = self.controller.get_layer_by_tablename(row[0])
aux = ""
for row in self.node_id:
aux += f"node_id = '{row}' OR "
aux = aux[:-3] + ""
# Select snapped features
selection = self.layer_feature.getFeatures(QgsFeatureRequest().setFilterExpression(aux))
self.layer_feature.selectByIds([a.id() for a in selection])
# Select nodes of shortest path on v_edit_arc for ZOOM SELECTION
expr_filter = '"arc_id" IN ('
for i in range(len(self.arc_id)):
expr_filter += f"'{self.arc_id[i]}', "
expr_filter = expr_filter[:-2] + ")"
(is_valid, expr) = self.check_expression(expr_filter, True) #@UnusedVariable
if not is_valid:
return
# Build a list of feature id's from the previous result
# Select features with these id's
it = self.layer_arc.getFeatures(QgsFeatureRequest(expr))
self.id_list = [i.id() for i in it]
self.layer_arc.selectByIds(self.id_list)
# Center shortest path in canvas - ZOOM SELECTION
self.canvas.zoomToSelected(self.layer_arc)
# Clear list
self.list_arc = []
self.dlg_draw_profile.tbl_list_arc.clear()
for i in range(len(self.arc_id)):
item_arc = QListWidgetItem(self.arc_id[i])
self.dlg_draw_profile.tbl_list_arc.addItem(item_arc)
self.list_arc.append(self.arc_id[i])
def exec_path(self):
self.rotation_vd_exist = False
if str(self.start_end_node[0]) is not None:
self.dlg_draw_profile.btn_add_end_point.setDisabled(False)
# Shortest path - if additional point doesn't exist
if str(self.start_end_node[0]) is not None and self.start_end_node[1] is not None:
if str(self.start_end_node[0]) == self.start_end_node[1]:
msg = "Start and End point must be different."
self.controller.show_info_box(msg, "Info")
disabled = True
self.dlg_draw_profile.tbl_list_arc.clear()
else:
disabled = False
self.shortest_path(str(self.start_end_node[0]), str(self.start_end_node[1]))
self.dlg_draw_profile.btn_add_additional_point.setDisabled(disabled)
self.dlg_draw_profile.list_additional_points.setDisabled(disabled)
self.dlg_draw_profile.title.setDisabled(disabled)
self.dlg_draw_profile.rotation.setDisabled(disabled)
self.dlg_draw_profile.scale_vertical.setDisabled(disabled)
self.dlg_draw_profile.scale_horizontal.setDisabled(disabled)
# Get rotation vdefaut if exist
row = self.controller.get_config('rotation_vdefault')
if row:
utils_giswater.setWidgetText(self.dlg_draw_profile, self.dlg_draw_profile.rotation, row[0])
self.rotation_vd_exist = True
else:
utils_giswater.setWidgetText(self.dlg_draw_profile, self.dlg_draw_profile.rotation, '0')
# After executing of path enable btn_draw and open_composer
self.dlg_draw_profile.btn_draw.setDisabled(disabled)
self.dlg_draw_profile.btn_save_profile.setDisabled(disabled)
self.dlg_draw_profile.btn_export_pdf.setDisabled(disabled)
self.dlg_draw_profile.cbx_template.setDisabled(disabled)
self.dlg_draw_profile.btn_update_path.setDisabled(disabled)
if str(self.start_end_node[0]) is not None and self.start_end_node[1] is not None:
self.dlg_draw_profile.btn_delete_additional_point.setDisabled(False)
# Manual path - if additional point exist
if len(self.start_end_node) > 2:
self.dlg_draw_profile.btn_add_start_point.setDisabled(True)
self.dlg_draw_profile.btn_add_end_point.setDisabled(True)
self.manual_path(self.start_end_node)
def delete_profile(self):
""" Delete profile """
selected_list = self.dlg_load.tbl_profiles.selectionModel().selectedRows()
if len(selected_list) == 0:
message = "Any record selected"
self.controller.show_warning(message)
return
# Selected item from list
selected_profile = self.dlg_load.tbl_profiles.currentItem().text()
message = "Are you sure you want to delete these profile?"
answer = self.controller.ask_question(message, "Delete profile", selected_profile)
if answer:
# Delete selected profile
sql = (f"DELETE FROM anl_arc_profile_value "
f"WHERE profile_id = '{selected_profile}'")
status = self.controller.execute_sql(sql)
if not status:
message = "Error deleting profile"
self.controller.show_warning(message)
return
else:
message = "Profile deleted"
self.controller.show_info(message)
# Refresh list of arcs
self.dlg_load.tbl_profiles.clear()
sql = "SELECT DISTINCT(profile_id) FROM anl_arc_profile_value"
rows = self.controller.get_rows(sql)
if rows:
for row in rows:
item_arc = QListWidgetItem(str(row[0]))
self.dlg_load.tbl_profiles.addItem(item_arc)
def delete_additional_point(self):
self.dlg_draw_profile.btn_delete_additional_point.setDisabled(True)
self.widget_additional_point.clear()
self.start_end_node.pop(1)
self.exec_path()
def remove_selection(self):
""" Remove selected features of all layers """
for layer in self.canvas.layers():
if type(layer) is QgsVectorLayer:
layer.removeSelection()
self.canvas.refresh()
def manage_rejected(self):
self.close_dialog(self.dlg_draw_profile)
self.remove_vertex()
def get_folder_dialog(self, dialog, widget):
""" Get folder dialog """
# Check if selected folder exists. Set default value if necessary
folder_path = utils_giswater.getWidgetText(dialog, widget)
if folder_path is None or folder_path == 'null' or not os.path.exists(folder_path):
folder_path = os.path.expanduser("~")
# Open dialog to select folder
os.chdir(folder_path)
file_dialog = QFileDialog()
file_dialog.setFileMode(QFileDialog.Directory)
message = "Select folder"
folder_path = file_dialog.getExistingDirectory(parent=None, caption=self.controller.tr(message),
directory=folder_path)
if folder_path:
utils_giswater.setWidgetText(dialog, widget, str(folder_path)) | gpl-3.0 | -8,684,525,205,711,037,000 | 40.17672 | 156 | 0.5526 | false |
sandwich-share/sandwich | sandwich/indexer.py | 1 | 3005 | import urllib
import socket
import sys
import os
import config
import sqlite3
import json
db = "index.db"
table = "local"
# crawl a directory and find all files and folders
def find_files():
index = []
for path, dirs, files in os.walk(config.shared_directory):
for f in files:
if not f.startswith('.') and (not os.path.split(path)[1].startswith('.') or path == "."+os.sep or path == '.'):
size = os.path.getsize(os.path.join(path,f))
if size >= 1000000000:
size = float(size) / 1000000000
m = "G"
elif size >= 1000000:
size = float(size) / 1000000
m = "M"
elif size >= 1000:
size = float(size) / 1000
m = "K"
else:
m = ""
index.append((os.path.relpath(path, config.shared_directory), f, "%d%s" % (round(size,4),m)))
try:
os.remove(db)
except:
print "Database does not exist, creating it."
finally:
f = open(db, "w+")
f.close()
try:
con = sqlite3.connect(db)
cursor = con.cursor()
cursor.execute('''CREATE TABLE ''' + table + ''' (path text, filename text, size text)''')
cursor.executemany("INSERT INTO " + table + " VALUES (?,?,?)", index)
con.commit()
except:
for m in sys.exc_info():
print m
finally:
con.close()
# add a file and folder to the index
def add_file(path, filename, size):
try:
con = sqlite3.connect(db)
cursor = con.cursor()
cmd = "INSERT INTO " + table + " VALUES (?,?,?)"
cursor.execute(cmd, (path, filename, size))
con.commit()
except:
for m in sys.exc_info():
print m
finally:
con.close()
# remove a file from the index
def remove_file(path, filename):
try:
con = sqlite3.connect(db)
cursor = con.cursor()
cmd = "DELETE FROM " + table + " WHERE path=? AND filename=?"
cursor.execute(cmd, (path, filename))
con.commit()
except:
for m in sys.exc_info():
print m
finally:
con.close()
# finds a file with the given name in the database
def search(search_param, ip_address):
try:
con = sqlite3.connect(db)
cursor = con.cursor()
cmd = "SELECT * FROM " + table + " WHERE path LIKE ? OR filename LIKE ?"
results = []
search = '%' + str.replace(search_param, ' ', '%') + '%'
for res in cursor.execute(cmd, (search, search)):
results.append(res + ('http://' + ip_address + ":" + str(config.webapp) + '/files/' + str.replace(str(urllib.quote(res[0])) + '/', './', '') + urllib.quote(res[1]),))
return json.dumps(results)
#http://myip:serverport/files/path/filename
except:
for m in sys.exc_info():
print m
finally:
con.close()
| bsd-2-clause | 436,386,678,555,522,600 | 30.631579 | 178 | 0.518802 | false |
telminov/arduino-battle-server | control_server/project/urls.py | 1 | 1240 | """project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('core.urls')),
]
urlpatterns += static('img', document_root=settings.BASE_DIR + '/frontend/img')
urlpatterns += static('styles', document_root=settings.BASE_DIR + '/frontend/styles')
urlpatterns += static('dist', document_root=settings.BASE_DIR + '/frontend/dist')
urlpatterns += static('bower_components', document_root=settings.BASE_DIR + '/frontend/bower_components') | mit | -9,025,708,686,951,118,000 | 40.366667 | 105 | 0.716129 | false |
mohamedhagag/community-addons | hr_attendance_analysis/report/calendar_report.py | 1 | 3652 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2011-15 Agile Business Group sagl (<http://www.agilebg.com>)
# Copyright (C) 2011 Domsense srl (<http://www.domsense.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from openerp.report import report_sxw
from openerp.tools.translate import _
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT
class Parser(report_sxw.rml_parse):
def _get_day_of_week(self, day):
WEEKDAYS = {
0: _('Monday'),
1: _('Tuesday'),
2: _('Wednesday'),
3: _('Thursday'),
4: _('Friday'),
5: _('Saturday'),
6: _('Sunday'),
}
weekday = datetime.strptime(day, DEFAULT_SERVER_DATE_FORMAT).weekday()
return WEEKDAYS[weekday]
def _get_month_name(self, day):
str_month = ''
month = datetime.strptime(day, DEFAULT_SERVER_DATE_FORMAT).month
if month == 1:
str_month = _('January')
elif month == 2:
str_month = _('February')
elif month == 3:
str_month = _('March')
elif month == 4:
str_month = _('April')
elif month == 5:
str_month = _('May')
elif month == 6:
str_month = _('June')
elif month == 7:
str_month = _('July')
elif month == 8:
str_month = _('August')
elif month == 9:
str_month = _('September')
elif month == 10:
str_month = _('October')
elif month == 11:
str_month = _('November')
elif month == 12:
str_month = _('December')
return str_month
def _get_days_by_employee(self, employee_id):
form = self.localcontext['data']['form']
return form['days_by_employee'][str(employee_id)]
def _get_totals_by_employee(self, employee_id):
form = self.localcontext['data']['form']
return form['totals_by_employee'][str(employee_id)]
def _get_max_per_day(self):
form = self.localcontext['data']['form']
return form['max_number_of_attendances_per_day']
def __init__(self, cr, uid, name, context):
super(Parser, self).__init__(cr, uid, name, context)
self.localcontext.update({
'time': time,
'days_by_employee': self._get_days_by_employee,
'totals_by_employee': self._get_totals_by_employee,
'day_of_week': self._get_day_of_week,
'max_per_day': self._get_max_per_day,
'month_name': self._get_month_name,
})
report_sxw.report_sxw('report.attendance_analysis.calendar_report',
'attendance_analysis.calendar_report',
'attendance_analysis/report/calendar_report.mako',
parser=Parser)
| agpl-3.0 | 2,224,134,006,936,586,200 | 35.888889 | 79 | 0.547371 | false |
GretelF/semNets | semNetsTests/ViewTests.py | 1 | 1128 | from unittest import TestCase
from semNets.Topology import Topology
from semNets.Primitives import Node, Relation, RelationType, RelationAttributeType
from semNets.View import View
import json
def buildTopology():
t = Topology()
with open("TestData.json") as file:
net = json.load(file)
t = Topology()
t.load(net)
return t
class ViewTests(TestCase):
def test_basicViewOperations(self):
n = Node("penguin")
t = buildTopology()
t.insertNode(n)
r = Relation(RelationType("is_a"), n, Node("bird"))
t.insertRelation(r)
v = View(t)
with self.assertRaises(AssertionError):
v.includeNode(Node("beatle")) # does not exist in topology t
self.assertEqual(len(v.nodes), 0)
self.assertEqual(len(v.relations), 0)
v.includeNode(n)
v.includeRelation(r)
self.assertIn(n, v.nodes)
self.assertIn(r, v.relations)
v.mend()
self.assertIn(Node("bird"), v.nodes) # after mend the Node(name="bird") should be in the view, too
def test_expand(self):
t = buildTopology()
v = View(t)
v.includeNode(Node("bird"))
| mit | 6,986,505,701,506,567,000 | 21.117647 | 113 | 0.654255 | false |
messense/wechatpy | wechatpy/pay/api/coupon.py | 1 | 2788 | # -*- coding: utf-8 -*-
import random
from datetime import datetime
from wechatpy.pay.base import BaseWeChatPayAPI
class WeChatCoupon(BaseWeChatPayAPI):
def send(self, user_id, stock_id, op_user_id=None, device_info=None,
out_trade_no=None):
"""
发放代金券
:param user_id: 用户在公众号下的 openid
:param stock_id: 代金券批次 ID
:param op_user_id: 可选,操作员账号,默认为商户号
:param device_info: 可选,微信支付分配的终端设备号
:param out_trade_no: 可选,商户订单号,需保持唯一性,默认自动生成
:return: 返回的结果信息
"""
if not out_trade_no:
now = datetime.now()
out_trade_no = '{0}{1}{2}'.format(
self.mch_id,
now.strftime('%Y%m%d%H%M%S'),
random.randint(1000, 10000)
)
data = {
'appid': self.appid,
'coupon_stock_id': stock_id,
'openid': user_id,
'openid_count': 1,
'partner_trade_no': out_trade_no,
'op_user_id': op_user_id,
'device_info': device_info,
'version': '1.0',
'type': 'XML',
}
return self._post('mmpaymkttransfers/send_coupon', data=data)
def query_stock(self, stock_id, op_user_id=None, device_info=None):
"""
查询代金券批次
:param stock_id: 代金券批次 ID
:param op_user_id: 可选,操作员账号,默认为商户号
:param device_info: 可选,微信支付分配的终端设备号
:return: 返回的结果信息
"""
data = {
'appid': self.appid,
'coupon_stock_id': stock_id,
'op_user_id': op_user_id,
'device_info': device_info,
'version': '1.0',
'type': 'XML',
}
return self._post('mmpaymkttransfers/query_coupon_stock', data=data)
def query_coupon(self, coupon_id, user_id,
op_user_id=None, device_info=None):
"""
查询代金券信息
:param coupon_id: 代金券 ID
:param user_id: 用户在公众号下的 openid
:param op_user_id: 可选,操作员账号,默认为商户号
:param device_info: 可选,微信支付分配的终端设备号
:return: 返回的结果信息
"""
data = {
'coupon_id': coupon_id,
'openid': user_id,
'appid': self.appid,
'op_user_id': op_user_id,
'device_info': device_info,
'version': '1.0',
'type': 'XML',
}
return self._post('promotion/query_coupon', data=data)
| mit | 3,161,162,863,871,766,000 | 28.585366 | 76 | 0.504946 | false |
wirefish/amber | amber/script.py | 1 | 2128 | import marshal
import builtins
import random
from tornado.ioloop import IOLoop
from . import events
def deny():
"""
A function to be called from within a can_* event handler if the receiver
denies the proposed action.
"""
raise events.ActionDenied()
def stop():
"""
A function that can be called from an event handler to stop further
processing of that event for that receiver.
"""
raise events.StopEvent()
def delay(delay, func, *args, **kwargs):
IOLoop.current().call_later(delay, func, *args, **kwargs)
SCRIPT_GLOBALS = {
'__builtins__': builtins.__dict__,
# Standard python modules.
'random': random,
# Local imports.
'deny': deny,
'stop': stop,
'delay': delay,
}
def script_function(func):
"""
A decorator that adds a function to the script environment.
"""
SCRIPT_GLOBALS[func.__name__] = func
return func
"""
TODO:
Maybe we can extend the special comments to make loops and sequences easier:
#! async (sequence|random) [repeat(N)]
<stuff that happens immediately>
#! delay(N)
<delayed stuff>
#! delay(N)
<other delayed stuff>
And __call__ will need to return a timer object that can be cancelled when the
behavior changes state.
"""
class Script:
def __init__(self, src):
self.delay = None
self.code = compile(src, '<string>', 'exec')
# Look for special comments that modify how the script is run.
for line in src.splitlines():
if line.startswith('#!'):
tokens = line.split()
if tokens[1] == 'delay':
self.delay = int(tokens[2])
def __getstate__(self):
state = dict(self.__dict__)
state['code'] = marshal.dumps(self.code)
return state
def __setstate__(self, state):
self.__dict__.update(state)
self.code = marshal.loads(self.code)
def __call__(self, env):
if self.delay:
IOLoop.current().call_later(self.delay, self._run, env)
else:
self._run(env)
def _run(self, env):
exec(self.code, SCRIPT_GLOBALS, env)
| bsd-3-clause | -3,450,994,995,555,589,600 | 22.910112 | 78 | 0.607143 | false |
caktus/django-sticky-uploads | example/example/settings.py | 1 | 5351 | # Django settings for example project.
import os
PROJECT_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
DEBUG = True
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': os.path.join(PROJECT_PATH, 'example.db'), # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_PATH, 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '20qagh*vr36)5)t@4ni1g_kvroyp8qxdmhok&g_e_$9sy60#-u'
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'example.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'example.wsgi.application'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
# ... some options here ...
'context_processors': [
'django.contrib.auth.context_processors.auth',
]
},
},
]
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'stickyuploads',
'main',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
LOGIN_URL = '/login/'
LOGOUT_URL = '/logout/'
LOGIN_REDIRECT_URL = '/'
| bsd-3-clause | 6,936,067,417,082,235,000 | 31.23494 | 109 | 0.675388 | false |
AutorestCI/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2016_12_01/models/security_rule.py | 1 | 5308 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class SecurityRule(SubResource):
"""Network security rule.
:param id: Resource ID.
:type id: str
:param description: A description for this rule. Restricted to 140 chars.
:type description: str
:param protocol: Network protocol this rule applies to. Possible values
are 'Tcp', 'Udp', and '*'. Possible values include: 'Tcp', 'Udp', '*'
:type protocol: str or
~azure.mgmt.network.v2016_12_01.models.SecurityRuleProtocol
:param source_port_range: The source port or range. Integer or range
between 0 and 65535. Asterix '*' can also be used to match all ports.
:type source_port_range: str
:param destination_port_range: The destination port or range. Integer or
range between 0 and 65535. Asterix '*' can also be used to match all
ports.
:type destination_port_range: str
:param source_address_prefix: The CIDR or source IP range. Asterix '*' can
also be used to match all source IPs. Default tags such as
'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used. If
this is an ingress rule, specifies where network traffic originates from.
:type source_address_prefix: str
:param destination_address_prefix: The destination address prefix. CIDR or
source IP range. Asterix '*' can also be used to match all source IPs.
Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet'
can also be used.
:type destination_address_prefix: str
:param access: The network traffic is allowed or denied. Possible values
are: 'Allow' and 'Deny'. Possible values include: 'Allow', 'Deny'
:type access: str or
~azure.mgmt.network.v2016_12_01.models.SecurityRuleAccess
:param priority: The priority of the rule. The value can be between 100
and 4096. The priority number must be unique for each rule in the
collection. The lower the priority number, the higher the priority of the
rule.
:type priority: int
:param direction: The direction of the rule. The direction specifies if
rule will be evaluated on incoming or outcoming traffic. Possible values
are: 'Inbound' and 'Outbound'. Possible values include: 'Inbound',
'Outbound'
:type direction: str or
~azure.mgmt.network.v2016_12_01.models.SecurityRuleDirection
:param provisioning_state: The provisioning state of the public IP
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: The name of the resource that is unique within a resource
group. This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_validation = {
'protocol': {'required': True},
'source_address_prefix': {'required': True},
'destination_address_prefix': {'required': True},
'access': {'required': True},
'direction': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'protocol': {'key': 'properties.protocol', 'type': 'str'},
'source_port_range': {'key': 'properties.sourcePortRange', 'type': 'str'},
'destination_port_range': {'key': 'properties.destinationPortRange', 'type': 'str'},
'source_address_prefix': {'key': 'properties.sourceAddressPrefix', 'type': 'str'},
'destination_address_prefix': {'key': 'properties.destinationAddressPrefix', 'type': 'str'},
'access': {'key': 'properties.access', 'type': 'str'},
'priority': {'key': 'properties.priority', 'type': 'int'},
'direction': {'key': 'properties.direction', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, protocol, source_address_prefix, destination_address_prefix, access, direction, id=None, description=None, source_port_range=None, destination_port_range=None, priority=None, provisioning_state=None, name=None, etag=None):
super(SecurityRule, self).__init__(id=id)
self.description = description
self.protocol = protocol
self.source_port_range = source_port_range
self.destination_port_range = destination_port_range
self.source_address_prefix = source_address_prefix
self.destination_address_prefix = destination_address_prefix
self.access = access
self.priority = priority
self.direction = direction
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
| mit | -8,997,662,295,681,792,000 | 49.075472 | 245 | 0.654295 | false |
pvizeli/hassio | hassio/__main__.py | 1 | 1158 | """Main file for HassIO."""
import asyncio
from concurrent.futures import ThreadPoolExecutor
import logging
import sys
import hassio.bootstrap as bootstrap
import hassio.core as core
_LOGGER = logging.getLogger(__name__)
# pylint: disable=invalid-name
if __name__ == "__main__":
bootstrap.initialize_logging()
if not bootstrap.check_environment():
exit(1)
loop = asyncio.get_event_loop()
executor = ThreadPoolExecutor(thread_name_prefix="SyncWorker")
loop.set_default_executor(executor)
_LOGGER.info("Initialize Hassio setup")
config = bootstrap.initialize_system_data()
hassio = core.HassIO(loop, config)
bootstrap.migrate_system_env(config)
_LOGGER.info("Run Hassio setup")
loop.run_until_complete(hassio.setup())
_LOGGER.info("Start Hassio task")
loop.call_soon_threadsafe(loop.create_task, hassio.start())
loop.call_soon_threadsafe(bootstrap.reg_signal, loop, hassio)
_LOGGER.info("Run Hassio loop")
loop.run_forever()
_LOGGER.info("Cleanup system")
executor.shutdown(wait=False)
loop.close()
_LOGGER.info("Close Hassio")
sys.exit(hassio.exit_code)
| bsd-3-clause | 461,431,713,023,787,140 | 24.733333 | 66 | 0.702073 | false |
ChrisBeaumont/luigi | luigi/scheduler.py | 1 | 40532 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
The system for scheduling tasks and executing them in order.
Deals with dependencies, priorities, resources, etc.
The :py:class:`~luigi.worker.Worker` pulls tasks from the scheduler (usually over the REST interface) and executes them.
See :doc:`/central_scheduler` for more info.
"""
import collections
try:
import cPickle as pickle
except ImportError:
import pickle
import datetime
import functools
import itertools
import logging
import os
import time
from luigi import six
from luigi import configuration
from luigi import notifications
from luigi import parameter
from luigi import task_history as history
from luigi.task_status import DISABLED, DONE, FAILED, PENDING, RUNNING, SUSPENDED, UNKNOWN
from luigi.task import Config
logger = logging.getLogger("luigi.server")
class Scheduler(object):
"""
Abstract base class.
Note that the methods all take string arguments, not Task objects...
"""""
add_task = NotImplemented
get_work = NotImplemented
ping = NotImplemented
UPSTREAM_RUNNING = 'UPSTREAM_RUNNING'
UPSTREAM_MISSING_INPUT = 'UPSTREAM_MISSING_INPUT'
UPSTREAM_FAILED = 'UPSTREAM_FAILED'
UPSTREAM_DISABLED = 'UPSTREAM_DISABLED'
UPSTREAM_SEVERITY_ORDER = (
'',
UPSTREAM_RUNNING,
UPSTREAM_MISSING_INPUT,
UPSTREAM_FAILED,
UPSTREAM_DISABLED,
)
UPSTREAM_SEVERITY_KEY = UPSTREAM_SEVERITY_ORDER.index
STATUS_TO_UPSTREAM_MAP = {
FAILED: UPSTREAM_FAILED,
RUNNING: UPSTREAM_RUNNING,
PENDING: UPSTREAM_MISSING_INPUT,
DISABLED: UPSTREAM_DISABLED,
}
class scheduler(Config):
# TODO(erikbern): the config_path is needed for backwards compatilibity. We should drop the compatibility
# at some point (in particular this would force users to replace all dashes with underscores in the config)
retry_delay = parameter.FloatParameter(default=900.0)
remove_delay = parameter.FloatParameter(default=600.0)
worker_disconnect_delay = parameter.FloatParameter(default=60.0)
state_path = parameter.Parameter(default='/var/lib/luigi-server/state.pickle')
# Jobs are disabled if we see more than disable_failures failures in disable_window seconds.
# These disables last for disable_persist seconds.
disable_window = parameter.IntParameter(default=3600,
config_path=dict(section='scheduler', name='disable-window-seconds'))
disable_failures = parameter.IntParameter(default=None,
config_path=dict(section='scheduler', name='disable-num-failures'))
disable_hard_timeout = parameter.IntParameter(default=None,
config_path=dict(section='scheduler', name='disable-hard-timeout'))
disable_persist = parameter.IntParameter(default=86400,
config_path=dict(section='scheduler', name='disable-persist-seconds'))
max_shown_tasks = parameter.IntParameter(default=100000)
prune_done_tasks = parameter.BoolParameter(default=False)
record_task_history = parameter.BoolParameter(default=False)
visualization_graph = parameter.Parameter(default="svg", config_path=dict(section='scheduler', name='visualization-graph'))
prune_on_get_work = parameter.BoolParameter(default=False)
def fix_time(x):
# Backwards compatibility for a fix in Dec 2014. Prior to the fix, pickled state might store datetime objects
# Let's remove this function soon
if isinstance(x, datetime.datetime):
return time.mktime(x.timetuple())
else:
return x
class Failures(object):
"""
This class tracks the number of failures in a given time window.
Failures added are marked with the current timestamp, and this class counts
the number of failures in a sliding time window ending at the present.
"""
def __init__(self, window):
"""
Initialize with the given window.
:param window: how long to track failures for, as a float (number of seconds).
"""
self.window = window
self.failures = collections.deque()
self.first_failure_time = None
def add_failure(self):
"""
Add a failure event with the current timestamp.
"""
failure_time = time.time()
if not self.first_failure_time:
self.first_failure_time = failure_time
self.failures.append(failure_time)
def num_failures(self):
"""
Return the number of failures in the window.
"""
min_time = time.time() - self.window
while self.failures and fix_time(self.failures[0]) < min_time:
self.failures.popleft()
return len(self.failures)
def clear(self):
"""
Clear the failure queue.
"""
self.failures.clear()
def _get_default(x, default):
if x is not None:
return x
else:
return default
class Task(object):
def __init__(self, task_id, status, deps, resources=None, priority=0, family='', module=None,
params=None, disable_failures=None, disable_window=None, disable_hard_timeout=None):
self.id = task_id
self.stakeholders = set() # workers ids that are somehow related to this task (i.e. don't prune while any of these workers are still active)
self.workers = set() # workers ids that can perform task - task is 'BROKEN' if none of these workers are active
if deps is None:
self.deps = set()
else:
self.deps = set(deps)
self.status = status # PENDING, RUNNING, FAILED or DONE
self.time = time.time() # Timestamp when task was first added
self.retry = None
self.remove = None
self.worker_running = None # the worker id that is currently running the task or None
self.time_running = None # Timestamp when picked up by worker
self.expl = None
self.priority = priority
self.resources = _get_default(resources, {})
self.family = family
self.module = module
self.params = _get_default(params, {})
self.disable_failures = disable_failures
self.disable_hard_timeout = disable_hard_timeout
self.failures = Failures(disable_window)
self.scheduler_disable_time = None
self.runnable = False
def __repr__(self):
return "Task(%r)" % vars(self)
def add_failure(self):
self.failures.add_failure()
def has_excessive_failures(self):
if (self.failures.first_failure_time is not None and
self.disable_hard_timeout):
if (time.time() >= self.failures.first_failure_time +
self.disable_hard_timeout):
return True
if self.failures.num_failures() >= self.disable_failures:
return True
return False
def can_disable(self):
return (self.disable_failures is not None or
self.disable_hard_timeout is not None)
class Worker(object):
"""
Structure for tracking worker activity and keeping their references.
"""
def __init__(self, worker_id, last_active=None):
self.id = worker_id
self.reference = None # reference to the worker in the real world. (Currently a dict containing just the host)
self.last_active = last_active or time.time() # seconds since epoch
self.started = time.time() # seconds since epoch
self.tasks = set() # task objects
self.info = {}
def add_info(self, info):
self.info.update(info)
def update(self, worker_reference):
if worker_reference:
self.reference = worker_reference
self.last_active = time.time()
def prune(self, config):
# Delete workers that haven't said anything for a while (probably killed)
if self.last_active + config.worker_disconnect_delay < time.time():
return True
def get_pending_tasks(self, state):
"""
Get PENDING (and RUNNING) tasks for this worker.
You have to pass in the state for optimization reasons.
"""
if len(self.tasks) < state.num_pending_tasks():
return six.moves.filter(lambda task: task.status in [PENDING, RUNNING],
self.tasks)
else:
return state.get_pending_tasks()
def is_trivial_worker(self, state):
"""
If it's not an assistant having only tasks that are without
requirements.
We have to pass the state parameter for optimization reasons.
"""
if self.assistant:
return False
return all(not task.resources for task in self.get_pending_tasks(state))
@property
def assistant(self):
return self.info.get('assistant', False)
def __str__(self):
return self.id
class SimpleTaskState(object):
"""
Keep track of the current state and handle persistance.
The point of this class is to enable other ways to keep state, eg. by using a database
These will be implemented by creating an abstract base class that this and other classes
inherit from.
"""
def __init__(self, state_path):
self._state_path = state_path
self._tasks = {} # map from id to a Task object
self._status_tasks = collections.defaultdict(dict)
self._active_workers = {} # map from id to a Worker object
def dump(self):
state = (self._tasks, self._active_workers)
try:
with open(self._state_path, 'wb') as fobj:
pickle.dump(state, fobj)
except IOError:
logger.warning("Failed saving scheduler state", exc_info=1)
else:
logger.info("Saved state in %s", self._state_path)
# prone to lead to crashes when old state is unpickled with updated code. TODO some kind of version control?
def load(self):
if os.path.exists(self._state_path):
logger.info("Attempting to load state from %s", self._state_path)
try:
with open(self._state_path, 'rb') as fobj:
state = pickle.load(fobj)
except BaseException:
logger.exception("Error when loading state. Starting from clean slate.")
return
self._tasks, self._active_workers = state
self._status_tasks = collections.defaultdict(dict)
for task in six.itervalues(self._tasks):
self._status_tasks[task.status][task.id] = task
# Convert from old format
# TODO: this is really ugly, we need something more future-proof
# Every time we add an attribute to the Worker or Task class, this
# code needs to be updated
# Compatibility since 2014-06-02
for k, v in six.iteritems(self._active_workers):
if isinstance(v, float):
self._active_workers[k] = Worker(worker_id=k, last_active=v)
# Compatibility since 2015-05-28
if any(not hasattr(w, 'tasks') for k, w in six.iteritems(self._active_workers)):
# If you load from an old format where Workers don't contain tasks.
for k, worker in six.iteritems(self._active_workers):
worker.tasks = set()
for task in six.itervalues(self._tasks):
for worker_id in task.workers:
self._active_workers[worker_id].tasks.add(task)
# Compatibility since 2015-04-28
if any(not hasattr(t, 'disable_hard_timeout') for t in six.itervalues(self._tasks)):
for t in six.itervalues(self._tasks):
t.disable_hard_timeout = None
else:
logger.info("No prior state file exists at %s. Starting with clean slate", self._state_path)
def get_active_tasks(self, status=None):
if status:
for task in six.itervalues(self._status_tasks[status]):
yield task
else:
for task in six.itervalues(self._tasks):
yield task
def get_running_tasks(self):
return six.itervalues(self._status_tasks[RUNNING])
def get_pending_tasks(self):
return itertools.chain.from_iterable(six.itervalues(self._status_tasks[status])
for status in [PENDING, RUNNING])
def num_pending_tasks(self):
"""
Return how many tasks are PENDING + RUNNING. O(1).
"""
return len(self._status_tasks[PENDING]) + len(self._status_tasks[RUNNING])
def get_task(self, task_id, default=None, setdefault=None):
if setdefault:
task = self._tasks.setdefault(task_id, setdefault)
self._status_tasks[task.status][task.id] = task
return task
else:
return self._tasks.get(task_id, default)
def has_task(self, task_id):
return task_id in self._tasks
def re_enable(self, task, config=None):
task.scheduler_disable_time = None
task.failures.clear()
if config:
self.set_status(task, FAILED, config)
task.failures.clear()
def set_status(self, task, new_status, config=None):
if new_status == FAILED:
assert config is not None
# not sure why we have SUSPENDED, as it can never be set
if new_status == SUSPENDED:
new_status = PENDING
if new_status == DISABLED and task.status == RUNNING:
return
if task.status == DISABLED:
if new_status == DONE:
self.re_enable(task)
# don't allow workers to override a scheduler disable
elif task.scheduler_disable_time is not None:
return
if new_status == FAILED and task.can_disable():
task.add_failure()
if task.has_excessive_failures():
task.scheduler_disable_time = time.time()
new_status = DISABLED
notifications.send_error_email(
'Luigi Scheduler: DISABLED {task} due to excessive failures'.format(task=task.id),
'{task} failed {failures} times in the last {window} seconds, so it is being '
'disabled for {persist} seconds'.format(
failures=config.disable_failures,
task=task.id,
window=config.disable_window,
persist=config.disable_persist,
))
elif new_status == DISABLED:
task.scheduler_disable_time = None
self._status_tasks[task.status].pop(task.id)
self._status_tasks[new_status][task.id] = task
task.status = new_status
def fail_dead_worker_task(self, task, config, assistants):
# If a running worker disconnects, tag all its jobs as FAILED and subject it to the same retry logic
if task.status == RUNNING and task.worker_running and task.worker_running not in task.stakeholders | assistants:
logger.info("Task %r is marked as running by disconnected worker %r -> marking as "
"FAILED with retry delay of %rs", task.id, task.worker_running,
config.retry_delay)
task.worker_running = None
self.set_status(task, FAILED, config)
task.retry = time.time() + config.retry_delay
def prune(self, task, config):
remove = False
# Mark tasks with no remaining active stakeholders for deletion
if not task.stakeholders:
if task.remove is None:
logger.info("Task %r has stakeholders %r but none remain connected -> will remove "
"task in %s seconds", task.id, task.stakeholders, config.remove_delay)
task.remove = time.time() + config.remove_delay
# Re-enable task after the disable time expires
if task.status == DISABLED and task.scheduler_disable_time:
if time.time() - fix_time(task.scheduler_disable_time) > config.disable_persist:
self.re_enable(task, config)
# Remove tasks that have no stakeholders
if task.remove and time.time() > task.remove:
logger.info("Removing task %r (no connected stakeholders)", task.id)
remove = True
# Reset FAILED tasks to PENDING if max timeout is reached, and retry delay is >= 0
if task.status == FAILED and config.retry_delay >= 0 and task.retry < time.time():
self.set_status(task, PENDING, config)
return remove
def inactivate_tasks(self, delete_tasks):
# The terminology is a bit confusing: we used to "delete" tasks when they became inactive,
# but with a pluggable state storage, you might very well want to keep some history of
# older tasks as well. That's why we call it "inactivate" (as in the verb)
for task in delete_tasks:
task_obj = self._tasks.pop(task)
self._status_tasks[task_obj.status].pop(task)
def get_active_workers(self, last_active_lt=None):
for worker in six.itervalues(self._active_workers):
if last_active_lt is not None and worker.last_active >= last_active_lt:
continue
yield worker
def get_assistants(self, last_active_lt=None):
return filter(lambda w: w.assistant, self.get_active_workers(last_active_lt))
def get_worker_ids(self):
return self._active_workers.keys() # only used for unit tests
def get_worker(self, worker_id):
return self._active_workers.setdefault(worker_id, Worker(worker_id))
def inactivate_workers(self, delete_workers):
# Mark workers as inactive
for worker in delete_workers:
self._active_workers.pop(worker)
# remove workers from tasks
for task in self.get_active_tasks():
task.stakeholders.difference_update(delete_workers)
task.workers.difference_update(delete_workers)
def get_necessary_tasks(self):
necessary_tasks = set()
for task in self.get_active_tasks():
if task.status not in (DONE, DISABLED) or \
getattr(task, 'scheduler_disable_time', None) is not None:
necessary_tasks.update(task.deps)
necessary_tasks.add(task.id)
return necessary_tasks
class CentralPlannerScheduler(Scheduler):
"""
Async scheduler that can handle multiple workers, etc.
Can be run locally or on a server (using RemoteScheduler + server.Server).
"""
def __init__(self, config=None, resources=None, task_history_impl=None, **kwargs):
"""
Keyword Arguments:
:param config: an object of class "scheduler" or None (in which the global instance will be used)
:param resources: a dict of str->int constraints
:param task_history_override: ignore config and use this object as the task history
"""
self._config = config or scheduler(**kwargs)
self._state = SimpleTaskState(self._config.state_path)
if task_history_impl:
self._task_history = task_history_impl
elif self._config.record_task_history:
from luigi import db_task_history # Needs sqlalchemy, thus imported here
self._task_history = db_task_history.DbTaskHistory()
else:
self._task_history = history.NopHistory()
self._resources = resources or configuration.get_config().getintdict('resources') # TODO: Can we make this a Parameter?
self._make_task = functools.partial(
Task, disable_failures=self._config.disable_failures,
disable_hard_timeout=self._config.disable_hard_timeout,
disable_window=self._config.disable_window)
def load(self):
self._state.load()
def dump(self):
self._state.dump()
def prune(self):
logger.info("Starting pruning of task graph")
remove_workers = []
for worker in self._state.get_active_workers():
if worker.prune(self._config):
logger.info("Worker %s timed out (no contact for >=%ss)", worker, self._config.worker_disconnect_delay)
remove_workers.append(worker.id)
self._state.inactivate_workers(remove_workers)
assistant_ids = set(w.id for w in self._state.get_assistants())
remove_tasks = []
if assistant_ids:
necessary_tasks = self._state.get_necessary_tasks()
else:
necessary_tasks = ()
for task in self._state.get_active_tasks():
self._state.fail_dead_worker_task(task, self._config, assistant_ids)
if task.id not in necessary_tasks and self._state.prune(task, self._config):
remove_tasks.append(task.id)
self._state.inactivate_tasks(remove_tasks)
logger.info("Done pruning task graph")
def update(self, worker_id, worker_reference=None):
"""
Keep track of whenever the worker was last active.
"""
worker = self._state.get_worker(worker_id)
worker.update(worker_reference)
def _update_priority(self, task, prio, worker):
"""
Update priority of the given task.
Priority can only be increased.
If the task doesn't exist, a placeholder task is created to preserve priority when the task is later scheduled.
"""
task.priority = prio = max(prio, task.priority)
for dep in task.deps or []:
t = self._state.get_task(dep)
if t is not None and prio > t.priority:
self._update_priority(t, prio, worker)
def add_task(self, task_id=None, status=PENDING, runnable=True,
deps=None, new_deps=None, expl=None, resources=None,
priority=0, family='', module=None, params=None,
assistant=False, **kwargs):
"""
* add task identified by task_id if it doesn't exist
* if deps is not None, update dependency list
* update status of task
* add additional workers/stakeholders
* update priority when needed
"""
worker_id = kwargs['worker']
self.update(worker_id)
task = self._state.get_task(task_id, setdefault=self._make_task(
task_id=task_id, status=PENDING, deps=deps, resources=resources,
priority=priority, family=family, module=module, params=params))
# for setting priority, we'll sometimes create tasks with unset family and params
if not task.family:
task.family = family
if not getattr(task, 'module', None):
task.module = module
if not task.params:
task.params = _get_default(params, {})
if task.remove is not None:
task.remove = None # unmark task for removal so it isn't removed after being added
if not (task.status == RUNNING and status == PENDING):
# don't allow re-scheduling of task while it is running, it must either fail or succeed first
if status == PENDING or status != task.status:
# Update the DB only if there was a acctual change, to prevent noise.
# We also check for status == PENDING b/c that's the default value
# (so checking for status != task.status woule lie)
self._update_task_history(task_id, status)
self._state.set_status(task, PENDING if status == SUSPENDED else status, self._config)
if status == FAILED:
task.retry = self._retry_time(task, self._config)
if deps is not None:
task.deps = set(deps)
if new_deps is not None:
task.deps.update(new_deps)
if resources is not None:
task.resources = resources
if not assistant:
task.stakeholders.add(worker_id)
# Task dependencies might not exist yet. Let's create dummy tasks for them for now.
# Otherwise the task dependencies might end up being pruned if scheduling takes a long time
for dep in task.deps or []:
t = self._state.get_task(dep, setdefault=self._make_task(task_id=dep, status=UNKNOWN, deps=None, priority=priority))
t.stakeholders.add(worker_id)
self._update_priority(task, priority, worker_id)
if runnable:
task.workers.add(worker_id)
self._state.get_worker(worker_id).tasks.add(task)
task.runnable = runnable
if expl is not None:
task.expl = expl
def add_worker(self, worker, info, **kwargs):
self._state.get_worker(worker).add_info(info)
def update_resources(self, **resources):
if self._resources is None:
self._resources = {}
self._resources.update(resources)
def _has_resources(self, needed_resources, used_resources):
if needed_resources is None:
return True
available_resources = self._resources or {}
for resource, amount in six.iteritems(needed_resources):
if amount + used_resources[resource] > available_resources.get(resource, 1):
return False
return True
def _used_resources(self):
used_resources = collections.defaultdict(int)
if self._resources is not None:
for task in self._state.get_active_tasks():
if task.status == RUNNING and task.resources:
for resource, amount in six.iteritems(task.resources):
used_resources[resource] += amount
return used_resources
def _rank(self, task):
"""
Return worker's rank function for task scheduling.
:return:
"""
return task.priority, -task.time
def _schedulable(self, task):
if task.status != PENDING:
return False
for dep in task.deps:
dep_task = self._state.get_task(dep, default=None)
if dep_task is None or dep_task.status != DONE:
return False
return True
def _retry_time(self, task, config):
return time.time() + config.retry_delay
def get_work(self, host=None, assistant=False, **kwargs):
# TODO: remove any expired nodes
# Algo: iterate over all nodes, find the highest priority node no dependencies and available
# resources.
# Resource checking looks both at currently available resources and at which resources would
# be available if all running tasks died and we rescheduled all workers greedily. We do both
# checks in order to prevent a worker with many low-priority tasks from starving other
# workers with higher priority tasks that share the same resources.
# TODO: remove tasks that can't be done, figure out if the worker has absolutely
# nothing it can wait for
if self._config.prune_on_get_work:
self.prune()
worker_id = kwargs['worker']
# Return remaining tasks that have no FAILED descendants
self.update(worker_id, {'host': host})
if assistant:
self.add_worker(worker_id, [('assistant', assistant)])
best_task = None
locally_pending_tasks = 0
running_tasks = []
upstream_table = {}
greedy_resources = collections.defaultdict(int)
n_unique_pending = 0
worker = self._state.get_worker(worker_id)
if worker.is_trivial_worker(self._state):
relevant_tasks = worker.get_pending_tasks(self._state)
used_resources = collections.defaultdict(int)
greedy_workers = dict() # If there's no resources, then they can grab any task
else:
relevant_tasks = self._state.get_pending_tasks()
used_resources = self._used_resources()
greedy_workers = dict((worker.id, worker.info.get('workers', 1))
for worker in self._state.get_active_workers())
tasks = list(relevant_tasks)
tasks.sort(key=self._rank, reverse=True)
for task in tasks:
upstream_status = self._upstream_status(task.id, upstream_table)
in_workers = (assistant and getattr(task, 'runnable', bool(task.workers))) or worker_id in task.workers
if task.status == RUNNING and in_workers:
# Return a list of currently running tasks to the client,
# makes it easier to troubleshoot
other_worker = self._state.get_worker(task.worker_running)
more_info = {'task_id': task.id, 'worker': str(other_worker)}
if other_worker is not None:
more_info.update(other_worker.info)
running_tasks.append(more_info)
if task.status == PENDING and in_workers and upstream_status != UPSTREAM_DISABLED:
locally_pending_tasks += 1
if len(task.workers) == 1 and not assistant:
n_unique_pending += 1
if best_task:
continue
if task.status == RUNNING and (task.worker_running in greedy_workers):
greedy_workers[task.worker_running] -= 1
for resource, amount in six.iteritems((task.resources or {})):
greedy_resources[resource] += amount
if self._schedulable(task) and self._has_resources(task.resources, greedy_resources):
if in_workers and self._has_resources(task.resources, used_resources):
best_task = task
else:
workers = itertools.chain(task.workers, [worker_id]) if assistant else task.workers
for task_worker in workers:
if greedy_workers.get(task_worker, 0) > 0:
# use up a worker
greedy_workers[task_worker] -= 1
# keep track of the resources used in greedy scheduling
for resource, amount in six.iteritems((task.resources or {})):
greedy_resources[resource] += amount
break
reply = {'n_pending_tasks': locally_pending_tasks,
'running_tasks': running_tasks,
'task_id': None,
'n_unique_pending': n_unique_pending}
if best_task:
self._state.set_status(best_task, RUNNING, self._config)
best_task.worker_running = worker_id
best_task.time_running = time.time()
self._update_task_history(best_task.id, RUNNING, host=host)
reply['task_id'] = best_task.id
reply['task_family'] = best_task.family
reply['task_module'] = getattr(best_task, 'module', None)
reply['task_params'] = best_task.params
return reply
def ping(self, **kwargs):
worker_id = kwargs['worker']
self.update(worker_id)
def _upstream_status(self, task_id, upstream_status_table):
if task_id in upstream_status_table:
return upstream_status_table[task_id]
elif self._state.has_task(task_id):
task_stack = [task_id]
while task_stack:
dep_id = task_stack.pop()
if self._state.has_task(dep_id):
dep = self._state.get_task(dep_id)
if dep.status == DONE:
continue
if dep_id not in upstream_status_table:
if dep.status == PENDING and dep.deps:
task_stack = task_stack + [dep_id] + list(dep.deps)
upstream_status_table[dep_id] = '' # will be updated postorder
else:
dep_status = STATUS_TO_UPSTREAM_MAP.get(dep.status, '')
upstream_status_table[dep_id] = dep_status
elif upstream_status_table[dep_id] == '' and dep.deps:
# This is the postorder update step when we set the
# status based on the previously calculated child elements
upstream_status = [upstream_status_table.get(a_task_id, '') for a_task_id in dep.deps]
upstream_status.append('') # to handle empty list
status = max(upstream_status, key=UPSTREAM_SEVERITY_KEY)
upstream_status_table[dep_id] = status
return upstream_status_table[dep_id]
def _serialize_task(self, task_id, include_deps=True):
task = self._state.get_task(task_id)
ret = {
'status': task.status,
'workers': list(task.workers),
'worker_running': task.worker_running,
'time_running': getattr(task, "time_running", None),
'start_time': task.time,
'params': task.params,
'name': task.family,
'priority': task.priority,
'resources': task.resources,
}
if task.status == DISABLED:
ret['re_enable_able'] = task.scheduler_disable_time is not None
if include_deps:
ret['deps'] = list(task.deps)
return ret
def graph(self, **kwargs):
self.prune()
serialized = {}
for task in self._state.get_active_tasks():
serialized[task.id] = self._serialize_task(task.id)
return serialized
def _recurse_deps(self, task_id, serialized):
if task_id not in serialized:
task = self._state.get_task(task_id)
if task is None or not task.family:
logger.warn('Missing task for id [%s]', task_id)
# try to infer family and params from task_id
try:
family, _, param_str = task_id.rstrip(')').partition('(')
params = dict(param.split('=') for param in param_str.split(', '))
except BaseException:
family, params = '', {}
serialized[task_id] = {
'deps': [],
'status': UNKNOWN,
'workers': [],
'start_time': UNKNOWN,
'params': params,
'name': family,
'priority': 0,
}
else:
serialized[task_id] = self._serialize_task(task_id)
for dep in task.deps:
self._recurse_deps(dep, serialized)
def dep_graph(self, task_id, **kwargs):
self.prune()
serialized = {}
if self._state.has_task(task_id):
self._recurse_deps(task_id, serialized)
return serialized
def task_list(self, status, upstream_status, limit=True, search=None, **kwargs):
"""
Query for a subset of tasks by status.
"""
self.prune()
result = {}
upstream_status_table = {} # used to memoize upstream status
if search is None:
filter_func = lambda _: True
else:
terms = search.split()
filter_func = lambda t: all(term in t.id for term in terms)
for task in filter(filter_func, self._state.get_active_tasks(status)):
if (task.status != PENDING or not upstream_status or
upstream_status == self._upstream_status(task.id, upstream_status_table)):
serialized = self._serialize_task(task.id, False)
result[task.id] = serialized
if limit and len(result) > self._config.max_shown_tasks:
return {'num_tasks': len(result)}
return result
def worker_list(self, include_running=True, **kwargs):
self.prune()
workers = [
dict(
name=worker.id,
last_active=worker.last_active,
started=getattr(worker, 'started', None),
**worker.info
) for worker in self._state.get_active_workers()]
workers.sort(key=lambda worker: worker['started'], reverse=True)
if include_running:
running = collections.defaultdict(dict)
num_pending = collections.defaultdict(int)
num_uniques = collections.defaultdict(int)
for task in self._state.get_pending_tasks():
if task.status == RUNNING and task.worker_running:
running[task.worker_running][task.id] = self._serialize_task(task.id, False)
elif task.status == PENDING:
for worker in task.workers:
num_pending[worker] += 1
if len(task.workers) == 1:
num_uniques[list(task.workers)[0]] += 1
for worker in workers:
tasks = running[worker['name']]
worker['num_running'] = len(tasks)
worker['num_pending'] = num_pending[worker['name']]
worker['num_uniques'] = num_uniques[worker['name']]
worker['running'] = tasks
return workers
def inverse_dep_graph(self, task_id, **kwargs):
self.prune()
serialized = {}
if self._state.has_task(task_id):
self._traverse_inverse_deps(task_id, serialized)
return serialized
def _traverse_inverse_deps(self, task_id, serialized):
stack = [task_id]
serialized[task_id] = self._serialize_task(task_id)
while len(stack) > 0:
curr_id = stack.pop()
for task in self._state.get_active_tasks():
if curr_id in task.deps:
serialized[curr_id]["deps"].append(task.id)
if task.id not in serialized:
serialized[task.id] = self._serialize_task(task.id)
serialized[task.id]["deps"] = []
stack.append(task.id)
def task_search(self, task_str, **kwargs):
"""
Query for a subset of tasks by task_id.
:param task_str:
:return:
"""
self.prune()
result = collections.defaultdict(dict)
for task in self._state.get_active_tasks():
if task.id.find(task_str) != -1:
serialized = self._serialize_task(task.id, False)
result[task.status][task.id] = serialized
return result
def re_enable_task(self, task_id):
serialized = {}
task = self._state.get_task(task_id)
if task and task.status == DISABLED and task.scheduler_disable_time:
self._state.re_enable(task, self._config)
serialized = self._serialize_task(task_id)
return serialized
def fetch_error(self, task_id, **kwargs):
if self._state.has_task(task_id):
return {"taskId": task_id, "error": self._state.get_task(task_id).expl}
else:
return {"taskId": task_id, "error": ""}
def _update_task_history(self, task_id, status, host=None):
try:
if status == DONE or status == FAILED:
successful = (status == DONE)
self._task_history.task_finished(task_id, successful)
elif status == PENDING:
self._task_history.task_scheduled(task_id)
elif status == RUNNING:
self._task_history.task_started(task_id, host)
except BaseException:
logger.warning("Error saving Task history", exc_info=True)
@property
def task_history(self):
# Used by server.py to expose the calls
return self._task_history
| apache-2.0 | -8,370,775,439,895,190,000 | 39.090999 | 149 | 0.592421 | false |
GregJohnStewart/NumberGuesser | numberGuesser.py | 1 | 7208 | #!/usr/bin/python
"""
NumberGuesser
Guesses your number!
Author: Greg Stewart
Copyright 2014 Greg Stewart
Start: 7/15/14
Tries to guess your number, interacts with you via the Raspberry pi's
16x2 CharLCDPlate
It uses a relatively simple algorithm to guess numbers:
Step 1:
Find a number that is larger than the user's number
Does this by starting at 10, and multiplies it by 10 until
the number is larger than the user's
Also moves the lower boundary up to the last guess, as we know it is
higher than it already
Step 2:
Find the halfway point between the upper and lower bounds, see if that is
the number. If it isn't, see if it is high or low. Update the high/low
bounds accordingly, and repeat until number is reached.
Take the difference of the lower and upper bounds, then divide it by 2.
Add this to the value of the lower bounds, and we have our next
guess, half way between the lower bounds .
If not the correct number, prompt if the last guess is low or high.
Based on this, set it to the high/low bounds as necessary.
Repeat until desired number is reached.
"""
# print out program information
print "\n\nNumberGuesser\n\
Guesses a number that the user chooses\n\
Designed for use with the 16x2 Adafruit Character Plate\n\
\n\
Author: Greg Stewart\n\
Copyright 2014 Greg Stewart\n\
V 1.0\n\n"
#imports
print "Importing needed modules..."
import time
from Adafruit_CharLCDPlate import Adafruit_CharLCDPlate
print " Done.\n"
print "Setting up custom functions..."
#time waster between choices, as to not clog up input system
def okay():
lcd.clear()
lcd.message("Okay")
time.sleep(.5)
lcd.message(".")
time.sleep(.5)
lcd.message(".")
time.sleep(.5)
lcd.message(".")
time.sleep(.5)
print " Done.\n"
print "Setting up Global variables..."
#
# Setup global variables
#
# Initialize the LCD plate.
lcd = Adafruit_CharLCDPlate(1)
# if we are going to be guessing
run = True
print " Done.\n"
print "Resetting Screen..."
# Clear display
lcd.clear()
lcd.backlight(lcd.OFF)
lcd.clear()
lcd.backlight(lcd.ON)
print " Done.\n"
#display beginnging informationals
print "Displaying welcome messages..."
lcd.message("NumberGuesser\nBy Greg Stewart")
time.sleep(2)
lcd.clear()
time.sleep(1)
lcd.message("Version:\n1.0")
time.sleep(2)
lcd.clear()
time.sleep(1)
lcd.message("Guesses your\nnumber!")
time.sleep(2)
lcd.clear()
time.sleep(1)
lcd.message("For \nScience!")
time.sleep(2)
print " Done.\n"
#loop for input, to see what to do next
# SELECT exits the program, anything else starts the game
print "Waiting for player to decide on a number..."
lcd.clear()
time.sleep(1)
start = False
chosen = False
tempTime = time.time()
messArr = (("Think of a \npositive integer"),
("To skip guessing\npress SELECT"),
("Press anything\nelse to begin."))
messCounter = 0
lcd.message(messArr[messCounter])
#loop for input
while not start:
if lcd.buttonPressed(lcd.SELECT):
start = True
print " Player chose to quit.\n"
elif lcd.buttonPressed(lcd.UP) or lcd.buttonPressed(lcd.DOWN) or lcd.buttonPressed(lcd.LEFT) or lcd.buttonPressed(lcd.RIGHT):
start = True
chosen = True
print " Player chose a number.\n"
elif (time.time() - tempTime) >= 3:
tempTime = time.time()
lcd.clear()
lcd.message(messArr[messCounter])
messCounter += 1
if messCounter > 2:
messCounter = 0
lcd.clear()
#if not just exiting, play the game
if run and chosen:
print "Begin Playing game:"
print "\tShowing rules..."
lcd.message("REMEMBER:")
time.sleep(2)
lcd.clear()
lcd.message("UP = YES\nDOWN = NO")
time.sleep(3)
lcd.clear()
lcd.message("RIGHT = Too High\nLEFT = Too Low")
time.sleep(3)
lcd.clear()
lcd.message("SELECT = exit")
time.sleep(3)
print "\t Done.\n"
playing = True
turnCount = 0
lastGuess = 10
highGuess = lastGuess
lowGuess = 0
print "\tBegin trying to find player's number..."
foundNum = False
inStep = True
answered = False
#find a multiple of 10 that is larger than the number we are looking for
while inStep and not foundNum:
print "\t\tHighGuess: {0}\n\
\t\tlowGuess: {1}\n\
\t\tlastGuess: {2}".format(highGuess,lowGuess,lastGuess)
turnCount += 1
answered = False
lcd.clear()
lcd.message("Is it:\n{0}".format(lastGuess))
while not answered:
if lcd.buttonPressed(lcd.UP):
foundNum = True
answered = True
elif lcd.buttonPressed(lcd.DOWN):
answered = True
elif lcd.buttonPressed(lcd.SELECT):
inStep = False
answered = True
run = False
print "\tPlayer chose to end the game."
if not foundNum and run:
okay()
answered = False
lcd.clear()
lcd.message("{0}\nlow or high?".format(lastGuess))
while not answered and not foundNum and run:
if lcd.buttonPressed(lcd.RIGHT):#too high
inStep = False
answered = True
highGuess = lastGuess
print "\tFound upper bounds: {0}".format(highGuess)
elif lcd.buttonPressed(lcd.LEFT):#too low
answered = True
lowGuess = lastGuess
lastGuess *= 10
highGuess = lastGuess
print "\tFound another lower bounds: {0}".format(lowGuess)
elif lcd.buttonPressed(lcd.SELECT):
inStep = False
answered = True
run = False
print "\tPlayer chose to end the game."
if not foundNum and run:
okay()
#Find the half-way between high and low and try to get closer
inStep = True
answered = False
while inStep and not foundNum and run:
print "\t\tHighGuess: {0}\n\
\t\tlowGuess: {1}\n\
\t\tlastGuess: {2}".format(highGuess,lowGuess,lastGuess)
lastGuess = lowGuess + ((highGuess - lowGuess)/2)
turnCount += 1
answered = False
lcd.clear()
lcd.message("Is it:\n{0}".format(lastGuess))
while not answered:
if lcd.buttonPressed(lcd.UP):
foundNum = True
answered = True
elif lcd.buttonPressed(lcd.DOWN):
answered = True
elif lcd.buttonPressed(lcd.SELECT):
inStep = False
answered = True
run = False
print "\tPlayer chose to end the game."
if not foundNum and run:
okay()
answered = False
lcd.clear()
lcd.message("{0}\nlow or high?".format(lastGuess))
while not answered and not foundNum and run:
if lcd.buttonPressed(lcd.RIGHT):#too high
answered = True
highGuess = lastGuess
print "\tFound another upper bounds: {0}".format(highGuess)
elif lcd.buttonPressed(lcd.LEFT):#too low
answered = True
lowGuess = lastGuess
print "\tFound another lower bounds: {0}".format(lastGuess)
elif lcd.buttonPressed(lcd.SELECT):
inStep = False
answered = True
run = False
print "\tPlayer chose to end the game."
if not foundNum and run:
okay()
if foundNum:
print "\tFound it! {0}".format(lastGuess)
print "\tNumber of guesses: {0}".format(turnCount)
print "\tDisplaying Stats..."
lcd.clear()
lcd.message("I guessed it!\n{0}".format(lastGuess))
time.sleep(3)
lcd.clear()
lcd.message("# of guesses:\n{0}".format(turnCount))
time.sleep(3)
print "\t Done."
print " Game Completed.\n"
print "Clearing the screen a final time and turning off the backlight..."
lcd.clear()
lcd.message("Goodbye!")
time.sleep(2)
lcd.backlight(lcd.OFF)
lcd.clear()
print " Done.\n"
print "Program End.\n"
| mit | -2,430,133,786,668,103,000 | 22.946844 | 126 | 0.691593 | false |
meawoppl/GA144Tools | FA18A_util.py | 1 | 4881 | from bitUtils import *
from bitarray import bitarray
import FA18A_functions
# Semantic Sugar
def opNeedsAddress(op):
return isinstance(op, (jumpOp, nextOp, ifOp, minusIfOp))
def loadNewWordAfterOp(op):
return isinstance(op, (returnOp, executeOp, jumpOp, callOp, nextOp, ifOp, minusIfOp))
# Add the 5 bit of all functions
opCodeBitsToClass = { op.code : op for op in FA18A_functions.allOpList }
# lazy function to fetch a opCode class based on either
# The opcode (int)
# The bitarray representation there of
def getOp(rep):
if isinstance(rep, bitarray):
if rep.length() == 3:
rep = bitarray(rep + bitarray([False, False]))
return opCodeBitsToClass[ baToInt(rep[::-1]) ]
else:
return opCodeBitsToClass[rep]
slotToJumpSpan = [ (0, 9), (0, 7), (0, 2) ]
slotToInstSpan = [(13, 17), (8, 12), (3, 7), (0,2) ]
slotMasks = [bitarray( [False, True, False, True, False] ),
bitarray( [True, False, True, False, True ] ),
bitarray( [False, True, False, True, False] ),
bitarray( [True, False, True] )]
def encodeDecode(ba, slotNumber):
#print ba, slotMasks[ slotNumber ]
return ba ^ slotMasks[ slotNumber ]
def packInstructionsToBits(instrList, encode=False):
# Pack instructings into the 18 bit word format.
# If the encode flag is set to true, return the xor of the word with 0x15555
# Excluding the address of a jump etc
slotNumberToBitLength = [5,5,5,3]
ba = bitarray()
for slotNumber, instruction in enumerate(instrList):
if slotNumber == 3: assert instruction.minBitCount() == 3, "Last op needs to be 3 bits."
# Lookup the maximum length that this instruction can be
instrLength = slotNumberToBitLength[slotNumber]
# Encode it into the bit array
instrBits = instruction.getBitRep(instrLength)
if encode:
print repr(ba), slotNumber
ba = encodeDecode(ba, slotNumber)
ba += instrBits
if instruction.requiresAddress:
bitStart, bitStop = slotToAddressSpan[slotNumber]
addrLength = bitStop - bitStart
addressBits = intToBA(instrList[-1], addrLength)
# Add the three bits of padding if necessary
if slotNumber == 0:
addressBits = bitarray([False, False, False]) + addressBits
ba += addressBits
break
return ba
def unpackInstructionsFromBits(ba, decode=False):
ops = []
for slotNumber in range(4):
startBit, stopBit = slotToInstSpan[slotNumber]
opBits = ba[startBit:stopBit+1][::-1]
if decode:
opBits = encodeDecode(opBits, slotNumber)
opCode = getOp(opBits)
#print "Segment", slotNumber, opBits, opCode
ops.append(opCode)
# Decode the address as the last thing and break
if opCode.requiresAddress:
addressStart, addressStop = slotToJumpSpan[slotNumber]
address = baToInt( ba[addressStart:addressStop+1] )
ops.append(address)
break
return ops
def unpackInstructionsFromUI32(ui32, decode=False):
return unpackInstructionsFromBits( intToBA(ui32, 18), decode = decode )
def packInstructionsToUI32(ops, encode=False):
return baToInt( packInstructionsToBits(ops, encode = encode) )
def doJumpArithmetic(currentP, jumpz, jumpOpAddressSlot):
assert jumpOpAddressSlot in [0, 1, 2], "Jumpin Jesus!"
# Comments below from DB002-110705-G144A12.pdf:
# The destination address field simply replaces the low order bits of the current (incremented) value of P at the time the
# jump instruction executes. Thus the scope of a slot 2 jump is very limited while a slot 0 jump can reach any addressable
# destination, and can control P9 to explicitly enable or disable Extended Arithmetic Mode. Slot 1 and 2 jumps have no
# effect upon P9, so its selection is unchanged as these jumps are executed. In the F18A, slot 1 and 2 jumps additionally
# force P8 to zero. This means the destination of any slot 1 or 2 jump may never be in I/O space.
jumpBitLength = {0:10, 1:8, 2:3}[jumpOpAddressSlot]
p_ba = intToBA(currentP, 10)
j_ba = intToBA(jumpz, 10)
# print
# print "Jump From:", currentP, jumpz, jumpOpAddressSlot
# print "\tp", p_ba[::-1], currentP
# print "\tj", j_ba[::-1], jumpz
for bitNumber in range(jumpBitLength):
p_ba[bitNumber] = j_ba[bitNumber]
# Knock out bit 8 spec'ed in the above comment
if jumpOpAddressSlot in [1,2]:
p_ba[8] = 0
# print "\tf", p_ba[::-1], baToInt(p_ba)
# Return the new actual offset
return baToInt(p_ba)
# My best crack at what I think the multiport execute looks like
["@P", "ex", 0]
# opCodeToName = {code : func.__name__ for code, func in opCodeToC.items() }
| mit | 8,766,188,418,203,417,000 | 32.895833 | 126 | 0.654784 | false |
google/tink | testing/cross_language/aead_test.py | 1 | 7668 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cross-language tests for the Aead primitive.
These tests check some basic AEAD properties, and that all implementations can
interoperate with each other.
"""
# Placeholder for import for type annotations
from typing import Iterable, Text, Tuple
from absl.testing import absltest
from absl.testing import parameterized
import tink
from tink import aead
from tink.proto import tink_pb2
from tink.testing import keyset_builder
from util import supported_key_types
from util import testing_servers
SUPPORTED_LANGUAGES = testing_servers.SUPPORTED_LANGUAGES_BY_PRIMITIVE['aead']
def setUpModule():
aead.register()
testing_servers.start('aead')
def tearDownModule():
testing_servers.stop()
# To test all implementations of AEAD, we simply try all availalble default key
# templates.
# Note that in order to test keys not covered by key templates, the parameter
# function would need to be rewritten to yield keyset instead of key template
# names.
def all_aead_key_template_names() -> Iterable[Text]:
"""Yields all AEAD key template names."""
for key_type in supported_key_types.AEAD_KEY_TYPES:
for key_template_name in supported_key_types.KEY_TEMPLATE_NAMES[key_type]:
yield key_template_name
class AeadPythonTest(parameterized.TestCase):
@parameterized.parameters(all_aead_key_template_names())
def test_encrypt_decrypt(self, key_template_name):
supported_langs = supported_key_types.SUPPORTED_LANGUAGES_BY_TEMPLATE_NAME[
key_template_name]
self.assertNotEmpty(supported_langs)
key_template = supported_key_types.KEY_TEMPLATE[key_template_name]
# Take the first supported language to generate the keyset.
keyset = testing_servers.new_keyset(supported_langs[0], key_template)
supported_aeads = [
testing_servers.aead(lang, keyset) for lang in supported_langs
]
unsupported_aeads = [
testing_servers.aead(lang, keyset)
for lang in SUPPORTED_LANGUAGES
if lang not in supported_langs
]
for p in supported_aeads:
plaintext = (
b'This is some plaintext message to be encrypted using key_template '
b'%s using %s for encryption.'
% (key_template_name.encode('utf8'), p.lang.encode('utf8')))
associated_data = (
b'Some associated data for %s using %s for encryption.' %
(key_template_name.encode('utf8'), p.lang.encode('utf8')))
ciphertext = p.encrypt(plaintext, associated_data)
for p2 in supported_aeads:
output = p2.decrypt(ciphertext, associated_data)
self.assertEqual(output, plaintext)
for p2 in unsupported_aeads:
with self.assertRaises(
tink.TinkError,
msg='Language %s supports AEAD decrypt with %s unexpectedly' %
(p2.lang, key_template_name)):
p2.decrypt(ciphertext, associated_data)
for p in unsupported_aeads:
with self.assertRaises(
tink.TinkError,
msg='Language %s supports AEAD encrypt with %s unexpectedly' % (
p.lang, key_template_name)):
p.encrypt(b'plaintext', b'associated_data')
# If the implementations work fine for keysets with single keys, then key
# rotation should work if the primitive wrapper is implemented correctly.
# These wrappers do not depend on the key type, so it should be fine to always
# test with the same key type. Since the AEAD wrapper needs to treat keys
# with output prefix RAW differently, we also include such a template for that.
KEY_ROTATION_TEMPLATES = [
aead.aead_key_templates.AES128_CTR_HMAC_SHA256,
keyset_builder.raw_template(aead.aead_key_templates.AES128_CTR_HMAC_SHA256)
]
def key_rotation_test_cases(
) -> Iterable[Tuple[Text, Text, tink_pb2.KeyTemplate, tink_pb2.KeyTemplate]]:
for enc_lang in SUPPORTED_LANGUAGES:
for dec_lang in SUPPORTED_LANGUAGES:
for old_key_tmpl in KEY_ROTATION_TEMPLATES:
for new_key_tmpl in KEY_ROTATION_TEMPLATES:
yield (enc_lang, dec_lang, old_key_tmpl, new_key_tmpl)
class AeadKeyRotationTest(parameterized.TestCase):
@parameterized.parameters(key_rotation_test_cases())
def test_key_rotation(self, enc_lang, dec_lang, old_key_tmpl, new_key_tmpl):
# Do a key rotation from an old key generated from old_key_tmpl to a new
# key generated from new_key_tmpl. Encryption and decryption are done
# in languages enc_lang and dec_lang.
builder = keyset_builder.new_keyset_builder()
older_key_id = builder.add_new_key(old_key_tmpl)
builder.set_primary_key(older_key_id)
enc_aead1 = testing_servers.aead(enc_lang, builder.keyset())
dec_aead1 = testing_servers.aead(dec_lang, builder.keyset())
newer_key_id = builder.add_new_key(new_key_tmpl)
enc_aead2 = testing_servers.aead(enc_lang, builder.keyset())
dec_aead2 = testing_servers.aead(dec_lang, builder.keyset())
builder.set_primary_key(newer_key_id)
enc_aead3 = testing_servers.aead(enc_lang, builder.keyset())
dec_aead3 = testing_servers.aead(dec_lang, builder.keyset())
builder.disable_key(older_key_id)
enc_aead4 = testing_servers.aead(enc_lang, builder.keyset())
dec_aead4 = testing_servers.aead(dec_lang, builder.keyset())
self.assertNotEqual(older_key_id, newer_key_id)
# 1 encrypts with the older key. So 1, 2 and 3 can decrypt it, but not 4.
ciphertext1 = enc_aead1.encrypt(b'plaintext', b'ad')
self.assertEqual(dec_aead1.decrypt(ciphertext1, b'ad'), b'plaintext')
self.assertEqual(dec_aead2.decrypt(ciphertext1, b'ad'), b'plaintext')
self.assertEqual(dec_aead3.decrypt(ciphertext1, b'ad'), b'plaintext')
with self.assertRaises(tink.TinkError):
_ = dec_aead4.decrypt(ciphertext1, b'ad')
# 2 encrypts with the older key. So 1, 2 and 3 can decrypt it, but not 4.
ciphertext2 = enc_aead2.encrypt(b'plaintext', b'ad')
self.assertEqual(dec_aead1.decrypt(ciphertext2, b'ad'), b'plaintext')
self.assertEqual(dec_aead2.decrypt(ciphertext2, b'ad'), b'plaintext')
self.assertEqual(dec_aead3.decrypt(ciphertext2, b'ad'), b'plaintext')
with self.assertRaises(tink.TinkError):
_ = dec_aead4.decrypt(ciphertext2, b'ad')
# 3 encrypts with the newer key. So 2, 3 and 4 can decrypt it, but not 1.
ciphertext3 = enc_aead3.encrypt(b'plaintext', b'ad')
with self.assertRaises(tink.TinkError):
_ = dec_aead1.decrypt(ciphertext3, b'ad')
self.assertEqual(dec_aead2.decrypt(ciphertext3, b'ad'), b'plaintext')
self.assertEqual(dec_aead3.decrypt(ciphertext3, b'ad'), b'plaintext')
self.assertEqual(dec_aead4.decrypt(ciphertext3, b'ad'), b'plaintext')
# 4 encrypts with the newer key. So 2, 3 and 4 can decrypt it, but not 1.
ciphertext4 = enc_aead4.encrypt(b'plaintext', b'ad')
with self.assertRaises(tink.TinkError):
_ = dec_aead1.decrypt(ciphertext4, b'ad')
self.assertEqual(dec_aead2.decrypt(ciphertext4, b'ad'), b'plaintext')
self.assertEqual(dec_aead3.decrypt(ciphertext4, b'ad'), b'plaintext')
self.assertEqual(dec_aead4.decrypt(ciphertext4, b'ad'), b'plaintext')
if __name__ == '__main__':
absltest.main()
| apache-2.0 | -8,532,096,486,310,047,000 | 41.131868 | 79 | 0.71205 | false |
GitExl/WhackEd4 | src/whacked4/dehacked/table.py | 1 | 6880 | #!/usr/bin/env python
#coding=utf8
import copy
import math
import re
from dataclasses import dataclass
from typing import Dict, Optional, Set
@dataclass
class ThingFlag:
key: str
field: str
name: Optional[str]
index: Optional[int]
alias: Optional[str]
description: Optional[str]
@staticmethod
def from_item(key: str, values: Dict):
return ThingFlag(
key,
values.get('field', 'flags'),
values.get('name', None),
values.get('index', None),
values.get('alias', None),
values.get('description', None)
)
class Table(object):
"""
A table containing Dehacked entry objects.
"""
def __init__(self, entry_class, engine):
self.entries = []
self.entry_class = entry_class
self.offset = 0
self.engine = engine
self.flags: Dict[str, ThingFlag] = {}
self.names = None
def read_from_executable(self, count, f):
"""
Reads a number of entries from an executable.
"""
for _ in range(count):
self.entries.append(self.entry_class(self).read_from_executable(f))
def read_from_json(self, json):
"""
Reads this table's entries from a JSON object.
"""
unused_entry = self.entry_class(self)
unused_entry.unused = True
index = len(self.entries)
for json_entry in json:
# Start from a specific index.
if '_index' in json_entry:
next_index = json_entry['_index']
else:
next_index = index
# Add unused entries if needed.
if next_index > len(self.entries):
for _ in range(next_index - len(self.entries)):
self.entries.append(unused_entry.clone())
# Overwrite existing entry or add a new one.
if next_index < len(self.entries):
self.entries[next_index].from_json(json_entry)
self.entries[next_index].unused = False
else:
self.entries.append(self.entry_class(self).from_json(json_entry))
index = next_index + 1
def write_patch_data(self, source_table, f):
"""
Writes this table's entry to a Dehacked patch file.
"""
for index, entry in enumerate(self.entries):
source_entry = source_table.entries[index]
# Write the current entry index if it returns any data to be written.
patch_str = entry.get_patch_string(source_entry)
if patch_str is not None:
f.write(entry.get_patch_header(index, offset=self.offset))
f.write(patch_str)
# Write just a header if only the entry's name has changed.
elif entry.name != source_table.entries[index].name:
f.write(entry.get_patch_header(index, offset=self.offset))
def apply_defaults(self, defaults):
for entry in self.entries:
entry.apply_defaults(defaults)
def flags_parse_string(self, field_key: str, value: any):
"""
Filters a thing's flags value.
Extended patches can use mnemonics for flag names, separated by plus signs.
@raise LookupError: if the value contains an unknown mnemonic.
"""
if not isinstance(value, set):
flag_parts = re.split(r"[,+| \t\f\r]+", str(value))
else:
flag_parts = value
out = set()
for flag_str in flag_parts:
flag_str = flag_str.strip()
# Flag is any number of bits.
if flag_str.isdigit():
keys = self._get_flag_keys_for_bits(field_key, int(flag_str))
out.update(keys)
# Flag is a mnemonic.
else:
if not self.engine.extended:
raise LookupError('Encountered thing flag key "{}" in a non-extended patch.'.format(flag_str))
flag = self.flags.get(flag_str)
if flag is None:
raise LookupError('Ignoring unknown thing flag key "{}".'.format(flag_str))
if flag.alias is not None:
original_flag = flag.alias
flag = self.flags.get(flag.alias)
if flag is None:
raise LookupError('Ignoring unknown thing flag alias "{}".'.format(original_flag))
out.add(flag_str)
return out
def _get_flag_keys_for_bits(self, field_key: str, bits: int) -> Set[str]:
out = set()
for bit in range(0, 32):
mask = int(math.pow(2, bit))
if (bits & mask) == 0:
continue
for key, flag in self.flags.items():
if flag.field != field_key or flag.index is None or flag.index != bit:
continue
out.add(key)
break
return out
def flags_get_string(self, value: str):
"""
Returns a thing flags value as a string of mnemonics.
"""
if self.engine.extended:
return self._flags_get_string_extended(value)
else:
return self._flags_get_string_vanilla(value)
def _flags_get_string_extended(self, value: str):
"""
Returns a thing flags value as a string of extended engine mnemonics.
"""
out = []
for key in value:
if key not in self.flags:
raise LookupError('Unknown thing flag key "{}".'.format(key))
out.append(key)
if len(out) == 0:
return 0
return '+'.join(out)
def _flags_get_string_vanilla(self, value: str):
"""
Returns a thing flags value as a 32 bit integer bitfield.
"""
bits = 0
for key in value:
flag = self.flags.get(key)
if flag.index is None:
raise LookupError('Cannot write non-bitfield thing flag "{}" into a non-extended patch.'.format(key))
bits |= int(math.pow(2, flag.index))
return bits
def clone(self):
"""
Returns a clone of this table.
"""
dup = copy.copy(self)
dup_entries = []
for entry in self.entries:
dup_entries.append(entry.clone())
dup.entries = dup_entries
if self.names is not None:
dup.names = copy.copy(self.names)
return dup
def __repr__(self):
return '{}: {}'.format(self.entry_class, self.entries)
def __getitem__(self, index):
return self.entries[index]
def __setitem__(self, index, value):
self.entries[index] = value
def __len__(self):
return len(self.entries)
def __iter__(self):
return iter(self.entries)
| bsd-2-clause | 5,604,756,344,893,877,000 | 28.401709 | 117 | 0.542006 | false |
1337/yesterday-i-learned | leetcode/681m.py | 1 | 1570 | def nextClosestTime(input_time):
def get_digits(time):
return (
int(time[0]),
int(time[1]),
int(time[3]),
int(time[4]))
def is_valid_time(time):
hour = time[0] * 10 + time[1]
if hour > 23:
return False
minute = time[2] * 10 + time[3]
if minute > 59:
return False
return True
def time_diff(time1, time2):
time1_abs = (time1[0] * 10 + time1[1]) * 60 + time1[2] * 10 + time1[3]
time2_abs = (time2[0] * 10 + time2[1]) * 60 + time2[2] * 10 + time2[3]
if time1_abs <= time2_abs:
return time2_abs - time1_abs
time2_abs += 24 * 60 # Minutes
return time2_abs - time1_abs
current_diff = float('inf')
closest_time = None
input_time_digits = get_digits(input_time)
for char1 in input_time_digits:
for char2 in input_time_digits:
for char3 in input_time_digits:
for char4 in input_time_digits:
permute_time_digits = (char1, char2, char3, char4)
if is_valid_time(permute_time_digits):
diff = time_diff(input_time_digits, permute_time_digits)
if diff == 0:
continue
if diff < current_diff:
closest_time = permute_time_digits
current_diff = diff
return '%s%s:%s%s' % closest_time
print(nextClosestTime('19:34'))
print(nextClosestTime('23:59'))
| gpl-3.0 | 3,323,717,155,073,554,400 | 33.888889 | 80 | 0.498726 | false |
openSUSE/vdsm | vdsm/hooks.py | 1 | 5326 | #
# Copyright 2010-2011 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
import utils
import glob
import os
import tempfile
import logging
import hashlib
from constants import P_VDSM_HOOKS, P_VDSM
class HookError(Exception): pass
def _scriptsPerDir(dir):
return [ s for s in glob.glob(P_VDSM_HOOKS + dir + '/*')
if os.access(s, os.X_OK) ]
def _runHooksDir(domxml, dir, vmconf={}, raiseError=True):
scripts = _scriptsPerDir(dir)
scripts.sort()
if not scripts:
return domxml
xmlfd, xmlname = tempfile.mkstemp()
try:
os.write(xmlfd, domxml or '')
os.close(xmlfd)
scriptenv = os.environ.copy()
scriptenv.update(vmconf.get('custom', {}))
if vmconf.get('vmId'):
scriptenv['vmId'] = vmconf.get('vmId')
ppath = scriptenv.get('PYTHONPATH', '')
scriptenv['PYTHONPATH'] = ':'.join(ppath.split(':') + [P_VDSM])
scriptenv['_hook_domxml'] = xmlname
for k, v in scriptenv.iteritems():
scriptenv[k] = unicode(v).encode('utf-8')
errorSeen = False
for s in scripts:
rc, out, err = utils.execCmd([s], sudo=False, raw=True,
env=scriptenv)
logging.info(err)
if rc != 0:
errorSeen = True
if rc == 2:
break
elif rc > 2:
logging.warn('hook returned unexpected return code %s', rc)
if errorSeen and raiseError:
raise HookError()
finalxml = file(xmlname).read()
finally:
os.unlink(xmlname)
return finalxml
def before_vm_start(domxml, vmconf={}):
return _runHooksDir(domxml, 'before_vm_start', vmconf=vmconf)
def after_vm_start(domxml, vmconf={}):
return _runHooksDir(domxml, 'after_vm_start', vmconf=vmconf, raiseError=False)
def before_vm_cont(domxml, vmconf={}):
return _runHooksDir(domxml, 'before_vm_cont', vmconf=vmconf)
def after_vm_cont(domxml, vmconf={}):
return _runHooksDir(domxml, 'after_vm_cont', vmconf=vmconf, raiseError=False)
def before_vm_pause(domxml, vmconf={}):
return _runHooksDir(domxml, 'before_vm_pause', vmconf=vmconf)
def after_vm_pause(domxml, vmconf={}):
return _runHooksDir(domxml, 'after_vm_pause', vmconf=vmconf, raiseError=False)
def before_vm_migrate_source(domxml, vmconf={}):
return _runHooksDir(domxml, 'before_vm_migrate_source', vmconf=vmconf)
def after_vm_migrate_source(domxml, vmconf={}):
return _runHooksDir(domxml, 'after_vm_migrate_source', vmconf=vmconf,
raiseError=False)
def before_vm_migrate_destination(domxml, vmconf={}):
return _runHooksDir(domxml, 'before_vm_migrate_destination', vmconf=vmconf)
def after_vm_migrate_destination(domxml, vmconf={}):
return _runHooksDir(domxml, 'after_vm_migrate_destination', vmconf=vmconf,
raiseError=False)
def before_vm_hibernate(domxml, vmconf={}):
return _runHooksDir(domxml, 'before_vm_hibernate', vmconf=vmconf)
def after_vm_hibernate(domxml, vmconf={}):
return _runHooksDir(domxml, 'after_vm_hibernate', vmconf=vmconf,
raiseError=False)
def before_vm_dehibernate(domxml, vmconf={}):
return _runHooksDir(domxml, 'before_vm_dehibernate', vmconf=vmconf)
def after_vm_dehibernate(domxml, vmconf={}):
return _runHooksDir(domxml, 'after_vm_hibernate', vmconf=vmconf,
raiseError=False)
def after_vm_destroy(domxml, vmconf={}):
return _runHooksDir(domxml, 'after_vm_destroy', vmconf=vmconf, raiseError=False)
def before_vdsm_start():
return _runHooksDir(None, 'before_vdsm_start', raiseError=False)
def after_vdsm_stop():
return _runHooksDir(None, 'after_vdsm_stop', raiseError=False)
def _getScriptInfo(path):
try:
with file(path) as f:
md5 = hashlib.md5(f.read()).hexdigest()
except:
md5 = ''
return {'md5': md5}
def _getHookInfo(dir):
def scripthead(script):
return script[len(P_VDSM_HOOKS) + len(dir) + 1:]
return dict([ (scripthead(script), _getScriptInfo(script))
for script in _scriptsPerDir(dir) ])
def installed():
res = {}
for dir in os.listdir(P_VDSM_HOOKS):
inf = _getHookInfo(dir)
if inf:
res[dir] = inf
return res
if __name__ == '__main__':
import sys
def usage():
print 'Usage: %s hook_name' % sys.argv[0]
sys.exit(1)
if len(sys.argv) >= 2:
globals()[sys.argv[1]](*sys.argv[2:])
else:
usage()
| gpl-2.0 | -4,334,075,190,277,434,400 | 31.084337 | 84 | 0.63988 | false |
yephper/django | tests/template_tests/filter_tests/test_striptags.py | 1 | 1681 | from django.template.defaultfilters import striptags
from django.test import SimpleTestCase
from django.utils.functional import lazystr
from django.utils.safestring import mark_safe
from ..utils import setup
class StriptagsTests(SimpleTestCase):
@setup({'striptags01': '{{ a|striptags }} {{ b|striptags }}'})
def test_striptags01(self):
output = self.engine.render_to_string(
'striptags01',
{
'a': '<a>x</a> <p><b>y</b></p>',
'b': mark_safe('<a>x</a> <p><b>y</b></p>'),
},
)
self.assertEqual(output, 'x y x y')
@setup({'striptags02': '{% autoescape off %}{{ a|striptags }} {{ b|striptags }}{% endautoescape %}'})
def test_striptags02(self):
output = self.engine.render_to_string(
'striptags02',
{
'a': '<a>x</a> <p><b>y</b></p>',
'b': mark_safe('<a>x</a> <p><b>y</b></p>'),
},
)
self.assertEqual(output, 'x y x y')
class FunctionTests(SimpleTestCase):
def test_strip(self):
self.assertEqual(
striptags('some <b>html</b> with <script>alert("You smell")</script> disallowed <img /> tags'),
'some html with alert("You smell") disallowed tags',
)
def test_non_string_input(self):
self.assertEqual(striptags(123), '123')
def test_strip_lazy_string(self):
self.assertEqual(
striptags(lazystr('some <b>html</b> with <script>alert("Hello")</script> disallowed <img /> tags')),
'some html with alert("Hello") disallowed tags',
)
| bsd-3-clause | 7,574,818,189,147,422,000 | 32.306122 | 112 | 0.53837 | false |
joowani/quadriga | tests/conftest.py | 1 | 2110 | from __future__ import absolute_import, unicode_literals, division
import time
import mock
import pytest
from quadriga import QuadrigaClient
api_key = 'test_api_key'
api_secret = 'test_api_secret'
client_id = 'test_client_id'
nonce = 14914812560000
timeout = 123456789
signature = '6d39de3ac91dd6189993059be99068d2290d90207ab4aeca26dcbbccfef7b57d'
@pytest.fixture(autouse=True)
def patch_time(monkeypatch):
mock_time = mock.MagicMock()
mock_time.return_value = nonce // 10000
monkeypatch.setattr(time, 'time', mock_time)
@pytest.fixture(autouse=True)
def logger():
mock_logger = mock.MagicMock()
def debug_called_with(message):
mock_logger.debug.assert_called_with(message)
mock_logger.debug_called_with = debug_called_with
return mock_logger
# noinspection PyShadowingNames
@pytest.fixture(autouse=True)
def response():
response = mock.MagicMock()
response.status_code = 200
return response
# noinspection PyShadowingNames
@pytest.fixture(autouse=True)
def session(response):
session = mock.MagicMock()
session.get.return_value = response
session.post.return_value = response
def get_called_with(endpoint, params=None):
session.get.assert_called_with(
url=QuadrigaClient.url + endpoint,
params=params,
timeout=timeout
)
session.get_called_with = get_called_with
def post_called_with(endpoint, payload=None):
payload = payload or {}
payload.update({
'key': api_key,
'nonce': nonce,
'signature': signature
})
session.post.assert_called_with(
url=QuadrigaClient.url + endpoint,
json=payload,
timeout=timeout
)
session.post_called_with = post_called_with
return session
# noinspection PyShadowingNames
@pytest.fixture(autouse=True)
def client(session, logger):
return QuadrigaClient(
api_key=api_key,
api_secret=api_secret,
client_id=client_id,
timeout=timeout,
session=session,
logger=logger
)
| mit | -8,523,418,013,636,718,000 | 23.823529 | 78 | 0.670142 | false |
afrendeiro/pipelines | lib/peaks_analysis.py | 1 | 9185 | #!/usr/env python
#############################################################################################
#
# This code produces plots of average signal and heatmaps around motifs under peaks.
# Also produces clusters of peaks, and outputs heatmap
#
#############################################################################################
"""
TODO: Adapt to allow run without --strand-specific!
"""
from argparse import ArgumentParser
from collections import OrderedDict
import cPickle as pickle
import HTSeq
import numpy as np
import os
import pandas as pd
import pybedtools
import re
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
import sys
def main():
# Parse command-line arguments
parser = ArgumentParser()
# Global options
# positional arguments
parser.add_argument(dest='bam_file', type=str, help='Bam file location.')
parser.add_argument(dest='peak_file', type=str, help='Peak file location.')
parser.add_argument(dest='plots_dir', type=str, help='Directory to save plots to.')
# optional arguments
parser.add_argument('--duplicates', dest='duplicates', action='store_true')
parser.add_argument('--window-width', dest='window_width', type=int, default=2000)
parser.add_argument('--fragment-size', dest='fragment_size', type=int, default=1)
parser.add_argument('--strand-specific', dest='strand_specific', action='store_true')
parser.add_argument('--genome', dest='genome', type=str, default='hg19')
parser.add_argument('--n_clusters', dest='n_clusters', type=int, default=5)
args = parser.parse_args()
sample_name = re.sub("\..*", "", re.sub("\.bam", "", os.path.basename(args.bam_file)))
exportName = os.path.join(args.plots_dir, sample_name + "_coverage_%ibp" % args.window_width)
window_range = (-abs(args.window_width) / 2, abs(args.window_width) / 2)
# Loop through all samples, compute coverage in peak regions centered on motifs,
# save dicts with coverage and average profiles
# Load peak file from bed files, centered on motif
# TODO: modify pipeline so that slop is done here
# peaks = pybedtools.BedTool(os.path.join(peakFilePath, signal + ".motifStrand.bed")).slop(genome=genome, b=args.window_width/2)
peaks = bedToolsInterval2GenomicInterval(pybedtools.BedTool(args.peak_file))
# Filter peaks near chrm borders
for name, interval in peaks.iteritems():
if interval.length < args.window_width:
peaks.pop(name)
# Load bam
bamfile = HTSeq.BAM_Reader(args.bam_file)
# Get dataframe of signal coverage in bed regions, append to dict
cov = coverage(bamfile, peaks, args.fragment_size, strand_specific=args.strand_specific)
# Make multiindex dataframe
levels = [cov.keys(), ["+", "-"]]
labels = [[y for x in range(len(cov)) for y in [x, x]], [y for x in range(len(cov.keys())) for y in (0, 1)]]
index = pd.MultiIndex(labels=labels, levels=levels, names=["peak", "strand"])
df = pd.DataFrame(np.vstack(cov.values()), index=index)
df.columns = range(window_range[0], window_range[1])
# Save
pickle.dump(
df,
open(
os.path.join(
args.plots_dir,
"pickles",
sample_name + "_tssCoverage_%ibp.pickle" % args.window_width
),
'wb'
),
protocol=pickle.HIGHEST_PROTOCOL
)
# Compute averages
aveSignal = pd.DataFrame({"x": list(df.columns), # x axis
"average": df.apply(np.mean, axis=0), # both strands
"positive": df.ix[range(0, len(df), 2)].apply(np.mean, axis=0), # positive strand
"negative": df.ix[range(1, len(df), 2)].apply(np.mean, axis=0) # negative strand
})
# Plot average profiles by strand
aveSignal.plot("x", ["average", "positive", "negative"], subplots=True, sharex=True, colormap="Accent")
plt.savefig("{0}_averageProfile_{1}bp.pdf".format(exportName, args.window_width))
# join strand signal (plus - minus)
df = df.xs('+', level="strand") + df.xs('-', level="strand")
# Export as cdt
exportToJavaTreeView(
df.copy(),
os.path.join(
args.plots_dir,
"cdt",
sample_name + "_tssCoverage_%ibp_averageProfile.cdt" % args.window_width
)
)
# scale row signal to 0:1 (normalization)
dfNorm = df.apply(lambda x: (x - min(x)) / (max(x) - min(x)), axis=1)
# replace inf with 0s
dfNorm.replace([np.inf, -np.inf], 0, inplace=True)
# sort by absolute read amounts
order = dfNorm.sum(axis=1)
order.sort(inplace=True, ascending=False)
dfNorm = dfNorm.reindex([order.index])
# Export as cdt
exportToJavaTreeView(
dfNorm.copy(),
os.path.join(
args.plots_dir,
"cdt",
sample_name + "_tssCoverage_%ibp_averageProfile.normalized.cdt" % args.window_width
)
)
def bedToolsInterval2GenomicInterval(bedtool):
"""
Given a pybedtools.BedTool object, returns dictionary of HTSeq.GenomicInterval objects.
"""
intervals = OrderedDict()
for iv in bedtool:
if iv.strand == "+" or iv.strand == 0 or iv.strand == str(0):
intervals[iv.name] = HTSeq.GenomicInterval(iv.chrom, iv.start, iv.end, "+")
elif iv.strand == "-" or iv.strand == 0 or iv.strand == str(1):
intervals[iv.name] = HTSeq.GenomicInterval(iv.chrom, iv.start, iv.end, "-")
else:
intervals[iv.name] = HTSeq.GenomicInterval(iv.chrom, iv.start, iv.end)
return intervals
def coverage(bam, intervals, fragmentsize, orientation=True, duplicates=True, strand_specific=False):
"""
Gets read coverage in bed regions.
Returns dict of regionName:numpy.array if strand_specific=False, A dict of "+" and "-" keys with regionName:numpy.array
bam - HTSeq.BAM_Reader object. Must be sorted and indexed with .bai file!
intervals - dict with HTSeq.GenomicInterval objects as values
fragmentsize - integer
stranded - boolean
duplicates - boolean.
"""
chroms = ['chr1', 'chr2', 'chr3', 'chr4', 'chr5', 'chr6', 'chr7', 'chr8', 'chr9', 'chr10', 'chr11', 'chr12', 'chr13', 'chr14', 'chr15', 'chr16', 'chr17', 'chr18', 'chr19', 'chr20', 'chr21', 'chr22', 'chrX']
# Loop through TSSs, get coverage, append to dict
cov = OrderedDict()
i = 0
for name, feature in intervals.iteritems():
if feature.chrom not in chroms:
continue
# if i % 1000 == 0:
# print(len(intervals) - i)
# Initialize empty array for this feature
if not strand_specific:
profile = np.zeros(feature.length, dtype=np.int8)
else:
profile = np.zeros((2, feature.length), dtype=np.int8)
# Fetch alignments in feature window
for aln in bam[feature]:
# check if duplicate
if not duplicates and aln.pcr_or_optical_duplicate:
continue
aln.iv.length = fragmentsize # adjust to size
# get position in relative to window
if orientation:
if feature.strand == "+" or feature.strand == ".":
start_in_window = aln.iv.start - feature.start - 1
end_in_window = aln.iv.end - feature.start - 1
else:
start_in_window = feature.length - abs(feature.start - aln.iv.end) - 1
end_in_window = feature.length - abs(feature.start - aln.iv.start) - 1
else:
start_in_window = aln.iv.start - feature.start - 1
end_in_window = aln.iv.end - feature.start - 1
# check fragment is within window; this is because of fragmentsize adjustment
if start_in_window < 0 or end_in_window > feature.length:
continue
# add +1 to all positions overlapped by read within window
if not strand_specific:
profile[start_in_window: end_in_window] += 1
else:
if aln.iv.strand == "+":
profile[0][start_in_window: end_in_window] += 1
elif aln.iv.strand == "-":
profile[1][start_in_window: end_in_window] += 1
# append feature profile to dict
cov[name] = profile
i += 1
return cov
def exportToJavaTreeView(df, filename):
"""
Export cdt file of cluster to view in JavaTreeView.
df - pandas.DataFrame object with numeric data.
filename - string.
"""
cols = ["X" + str(x) for x in df.columns]
df.columns = cols
df["X"] = df.index
df["NAME"] = df.index
df["GWEIGHT"] = 1
df = df[["X", "NAME", "GWEIGHT"] + cols]
df.to_csv(filename, sep="\t", index=False)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print("Program canceled by user!")
sys.exit(0)
| gpl-2.0 | -7,948,042,608,009,070,000 | 37.755274 | 210 | 0.589439 | false |
KodiColdkeys/coldkeys-addons | repository/plugin.video.mosthaunted/default.py | 1 | 5874 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# Most Haunted Addon by coldkeys
#------------------------------------------------------------
# License: GPL (http://www.gnu.org/licenses/gpl-3.0.html)
# Based on code from youtube addon
#
# Author: coldkeys
#------------------------------------------------------------
import os
import sys
import plugintools
import xbmc,xbmcaddon
from addon.common.addon import Addon
addonID = 'plugin.video.mosthaunted'
addon = Addon(addonID, sys.argv)
local = xbmcaddon.Addon(id=addonID)
icon = local.getAddonInfo('icon')
YOUTUBE_CHANNEL_ID_1 = "PLZE_drxHBVHrzRh96m-1pe8GrRYfcGS6w"
YOUTUBE_CHANNEL_ID_2 = "PLZE_drxHBVHrNiwfDIB_GmdsxLb5O97d_"
YOUTUBE_CHANNEL_ID_3 = "PLZE_drxHBVHoMkk9pZiPtdj3gJQHN1Dzf"
YOUTUBE_CHANNEL_ID_4 = "PLZE_drxHBVHrt_1wf3F1r9sQitzndjboN"
YOUTUBE_CHANNEL_ID_5 = "PLq-ap_BfvXn8UcrNKkrpCfCuNvrfMaJE1"
YOUTUBE_CHANNEL_ID_6 = "PLq-ap_BfvXn8cvRqg9xTOEwminXpZLa0O"
YOUTUBE_CHANNEL_ID_7 = "PLq-ap_BfvXn-bfXXWQZxrEoVtMR1cRDRT"
YOUTUBE_CHANNEL_ID_8 = "PLq-ap_BfvXn_Xt_53bn9dKoRhxNDycY6y"
YOUTUBE_CHANNEL_ID_9 = "PL67gu3YR5V8VySnFWcNxukylY57AmI_zh"
YOUTUBE_CHANNEL_ID_10 = "PL9PCUr0Stw6-EOhDeUvvj-6nH6ouPdYJl"
YOUTUBE_CHANNEL_ID_11 = "PLWEJFP6S9CF2c3F3-Ne3lo4fMVIav5LJP"
YOUTUBE_CHANNEL_ID_12 = "PL9PCUr0Stw6-EOhDeUvvj-6nH6ouPdYJl"
YOUTUBE_CHANNEL_ID_13 = "PLurs-HetJcxlfdOVEfoEYt9FhUEqFrolk"
YOUTUBE_CHANNEL_ID_14 = "PL3CD837F7930EF545"
YOUTUBE_CHANNEL_ID_15 = "PLxdHf9vy34LkaY7xrmvGviMkH4bLoDwh-"
YOUTUBE_CHANNEL_ID_16 = "UCLl5DuQifCYReW0b5IomM5w"
# Entry point
def run():
plugintools.log("docu.run")
# Get params
params = plugintools.get_params()
if params.get("action") is None:
main_list(params)
else:
action = params.get("action")
exec action+"(params)"
plugintools.close_item_list()
# Main menu
def main_list(params):
plugintools.log("docu.main_list "+repr(params))
plugintools.add_item(
#action="",
title="Seasons 1 -3",
url="plugin://plugin.video.youtube/playlist/"+YOUTUBE_CHANNEL_ID_1+"/",
thumbnail=icon,
folder=True )
plugintools.add_item(
#action="",
title="Seasons 4 - 6",
url="plugin://plugin.video.youtube/playlist/"+YOUTUBE_CHANNEL_ID_2+"/",
thumbnail=icon,
folder=True )
plugintools.add_item(
#action="",
title="Seasons 7 - 9",
url="plugin://plugin.video.youtube/playlist/"+YOUTUBE_CHANNEL_ID_3+"/",
thumbnail=icon,
folder=True )
plugintools.add_item(
#action="",
title="Seasons 10 - 12",
url="plugin://plugin.video.youtube/playlist/"+YOUTUBE_CHANNEL_ID_4+"/",
thumbnail=icon,
folder=True )
plugintools.add_item(
#action="",
title="Season 13",
url="plugin://plugin.video.youtube/playlist/"+YOUTUBE_CHANNEL_ID_5+"/",
thumbnail=icon,
folder=True )
plugintools.add_item(
#action="",
title="Season 14",
url="plugin://plugin.video.youtube/playlist/"+YOUTUBE_CHANNEL_ID_6+"/",
thumbnail=icon,
folder=True )
plugintools.add_item(
#action="",
title="Season 15 - The Live Series",
url="plugin://plugin.video.youtube/playlist/"+YOUTUBE_CHANNEL_ID_7+"/",
thumbnail="special://home/addons/plugin.video.mosthaunted/resources/live.jpg",
folder=True )
plugintools.add_item(
#action="",
title="Season 16",
url="plugin://plugin.video.youtube/playlist/"+YOUTUBE_CHANNEL_ID_8+"/",
thumbnail=icon,
folder=True )
plugintools.add_item(
#action="",
title="Season 17",
url="plugin://plugin.video.youtube/playlist/"+YOUTUBE_CHANNEL_ID_9+"/",
thumbnail=icon,
folder=True )
plugintools.add_item(
#action="",
title="Most Haunted Live",
url="plugin://plugin.video.youtube/playlist/"+YOUTUBE_CHANNEL_ID_10+"/",
thumbnail="special://home/addons/plugin.video.mosthaunted/resources/live.jpg",
folder=True )
plugintools.add_item(
#action="",
title="Most Haunted Unseen",
url="plugin://plugin.video.youtube/playlist/"+YOUTUBE_CHANNEL_ID_11+"/",
thumbnail="special://home/addons/plugin.video.mosthaunted/resources/unseen.jpg",
folder=True )
plugintools.add_item(
#action="",
title="Best of Most Haunted Live",
url="plugin://plugin.video.youtube/playlist/"+YOUTUBE_CHANNEL_ID_12+"/",
thumbnail="special://home/addons/plugin.video.mosthaunted/resources/live.jpg",
folder=True )
plugintools.add_item(
#action="",
title="Most Haunted Live The Search for Evil 2009",
url="plugin://plugin.video.youtube/playlist/"+YOUTUBE_CHANNEL_ID_13+"/",
thumbnail="special://home/addons/plugin.video.mosthaunted/resources/live.jpg",
folder=True )
plugintools.add_item(
#action="",
title="Most Haunted Live - USA - Gettysburg",
url="plugin://plugin.video.youtube/playlist/"+YOUTUBE_CHANNEL_ID_14+"/",
thumbnail="special://home/addons/plugin.video.mosthaunted/resources/live.jpg",
folder=True )
plugintools.add_item(
#action="",
title="NEW Most Haunted S1 2014",
url="plugin://plugin.video.youtube/playlist/"+YOUTUBE_CHANNEL_ID_15+"/",
thumbnail="special://home/addons/plugin.video.mosthaunted/resources/Most_Haunted.jpg",
folder=True )
plugintools.add_item(
#action="",
title="Most Haunted Videos",
url="plugin://plugin.video.youtube/channel/"+YOUTUBE_CHANNEL_ID_16+"/",
thumbnail="special://home/addons/plugin.video.mosthaunted/resources/Most_Haunted.jpg",
folder=True )
run()
| gpl-2.0 | 2,938,105,776,240,216,000 | 33.757396 | 94 | 0.621893 | false |
matejc/searx | searx/engines/swisscows.py | 1 | 3750 | """
Swisscows (Web, Images)
@website https://swisscows.ch
@provide-api no
@using-api no
@results HTML (using search portal)
@stable no (HTML can change)
@parse url, title, content
"""
from json import loads
from urllib import urlencode, unquote
import re
from lxml.html import fromstring
# engine dependent config
categories = ['general', 'images']
paging = True
language_support = True
# search-url
base_url = 'https://swisscows.ch/'
search_string = '?{query}&page={page}'
supported_languages_url = base_url
# regex
regex_json = re.compile(r'initialData: {"Request":(.|\n)*},\s*environment')
regex_json_remove_start = re.compile(r'^initialData:\s*')
regex_json_remove_end = re.compile(r',\s*environment$')
regex_img_url_remove_start = re.compile(r'^https?://i\.swisscows\.ch/\?link=')
# do search-request
def request(query, params):
if params['language'] == 'all':
ui_language = 'browser'
region = 'browser'
elif params['language'].split('-')[0] == 'no':
region = 'nb-NO'
else:
region = params['language']
ui_language = params['language'].split('-')[0]
search_path = search_string.format(
query=urlencode({'query': query,
'uiLanguage': ui_language,
'region': region}),
page=params['pageno'])
# image search query is something like 'image?{query}&page={page}'
if params['category'] == 'images':
search_path = 'image' + search_path
params['url'] = base_url + search_path
return params
# get response from search-request
def response(resp):
results = []
json_regex = regex_json.search(resp.content)
# check if results are returned
if not json_regex:
return []
json_raw = regex_json_remove_end.sub('', regex_json_remove_start.sub('', json_regex.group()))
json = loads(json_raw)
# parse results
for result in json['Results'].get('items', []):
result_title = result['Title'].replace(u'\uE000', '').replace(u'\uE001', '')
# parse image results
if result.get('ContentType', '').startswith('image'):
img_url = unquote(regex_img_url_remove_start.sub('', result['Url']))
# append result
results.append({'url': result['SourceUrl'],
'title': result['Title'],
'content': '',
'img_src': img_url,
'template': 'images.html'})
# parse general results
else:
result_url = result['Url'].replace(u'\uE000', '').replace(u'\uE001', '')
result_content = result['Description'].replace(u'\uE000', '').replace(u'\uE001', '')
# append result
results.append({'url': result_url,
'title': result_title,
'content': result_content})
# parse images
for result in json.get('Images', []):
# decode image url
img_url = unquote(regex_img_url_remove_start.sub('', result['Url']))
# append result
results.append({'url': result['SourceUrl'],
'title': result['Title'],
'content': '',
'img_src': img_url,
'template': 'images.html'})
# return results
return results
# get supported languages from their site
def _fetch_supported_languages(resp):
supported_languages = []
dom = fromstring(resp.text)
options = dom.xpath('//div[@id="regions-popup"]//ul/li/a')
for option in options:
code = option.xpath('./@data-val')[0]
supported_languages.append(code)
return supported_languages
| agpl-3.0 | -4,480,584,742,850,424,000 | 29 | 97 | 0.5656 | false |
joaander/hoomd-blue | hoomd/data/local_access_cpu.py | 1 | 2996 | from hoomd.data.local_access import (
ParticleLocalAccessBase, BondLocalAccessBase, AngleLocalAccessBase,
DihedralLocalAccessBase, ImproperLocalAccessBase,
ConstraintLocalAccessBase, PairLocalAccessBase, _LocalSnapshot)
from hoomd.data.array import HOOMDArray
from hoomd import _hoomd
class ParticleLocalAccessCPU(ParticleLocalAccessBase):
_cpp_cls = _hoomd.LocalParticleDataHost
_array_cls = HOOMDArray
class BondLocalAccessCPU(BondLocalAccessBase):
_cpp_cls = _hoomd.LocalBondDataHost
_array_cls = HOOMDArray
class AngleLocalAccessCPU(AngleLocalAccessBase):
_cpp_cls = _hoomd.LocalAngleDataHost
_array_cls = HOOMDArray
class DihedralLocalAccessCPU(DihedralLocalAccessBase):
_cpp_cls = _hoomd.LocalDihedralDataHost
_array_cls = HOOMDArray
class ImproperLocalAccessCPU(ImproperLocalAccessBase):
_cpp_cls = _hoomd.LocalImproperDataHost
_array_cls = HOOMDArray
class ConstraintLocalAccessCPU(ConstraintLocalAccessBase):
_cpp_cls = _hoomd.LocalConstraintDataHost
_array_cls = HOOMDArray
class PairLocalAccessCPU(PairLocalAccessBase):
_cpp_cls = _hoomd.LocalPairDataHost
_array_cls = HOOMDArray
class LocalSnapshot(_LocalSnapshot):
"""Provides context manager access to HOOMD-blue CPU data buffers.
The interface of a `LocalSnapshot` is similar to that of the
`hoomd.Snapshot`. Data is MPI rank local so for MPI parallel simulations
only the data possessed by a rank is exposed. This means that users must
handle the domain decomposition directly. One consequence of this is that
access to ghost particle data is provided. A ghost particle is a particle
that is not owned by a rank, but nevertheless is required for operations
that use particle neighbors. Also, changing the global or local box within a
`LocalSnapshot` context manager is not allowed.
For every property (e.g. ``data.particles.position``), only grabs the
data for the regular (non-ghost) particles. The property can be prefixed
with ``ghost_`` to grab the ghost particles in a read only manner. Likewise,
suffixing with ``_with_ghost`` will grab all data on the rank (regular and
ghost particles) in a read only array.
All array-like properties return a `hoomd.array.HOOMDArray` object which
prevents invalid memory accesses.
Note:
For the ``LocalAccess`` classes the affixed attributes mentioned above
are not shown. Also of interest, ghost data always come immediately
after the regular data.
"""
def __init__(self, state):
super().__init__(state)
self._particles = ParticleLocalAccessCPU(state)
self._bonds = BondLocalAccessCPU(state)
self._angles = AngleLocalAccessCPU(state)
self._dihedrals = DihedralLocalAccessCPU(state)
self._impropers = ImproperLocalAccessCPU(state)
self._pairs = PairLocalAccessCPU(state)
self._constraints = ConstraintLocalAccessCPU(state)
| bsd-3-clause | -2,310,734,870,337,004,000 | 36.924051 | 80 | 0.741322 | false |
everlof/RestKit-n-Django-Sample | Project_Django/cite/storage.py | 1 | 6739 | # This file has been shamelessly copied (MIT licence) from
# https://bitbucket.org/akoha/django-randomfilenamestorage
# Conversion to Python 3 by Alexander Nilsson
from errno import EEXIST
import ntpath
import os
import posixpath
import random
import string
from warnings import warn
from django.conf import settings
from django.core.files.storage import (Storage, FileSystemStorage,
locks, file_move_safe)
CHARACTERS = string.ascii_lowercase + string.digits
DEFAULT_LENGTH = 16
def random_string(length):
return ''.join(random.choice(CHARACTERS) for i in range(length))
def RandomFilenameMetaStorage(storage_class, length=None, uniquify_names=True):
class RandomFilenameStorage(storage_class):
def __init__(self, *args, **kwargs):
self.randomfilename_length = kwargs.pop('randomfilename_length',
length)
if self.randomfilename_length is None:
self.randomfilename_length = getattr(settings,
'RANDOM_FILENAME_LENGTH',
DEFAULT_LENGTH)
# Do not uniquify filenames by default.
self.randomfilename_uniquify_names = kwargs.pop('uniquify_names',
uniquify_names)
# But still try to tell storage_class not to uniquify filenames.
# This class will be the one that uniquifies.
try:
new_kwargs = dict(kwargs, uniquify_names=False)
super(RandomFilenameStorage, self).__init__(*args,
**new_kwargs)
except TypeError:
super(RandomFilenameStorage, self).__init__(*args, **kwargs)
def get_available_name(self, name, retry=True):
# All directories have forward slashes, even on Windows
name = name.replace(ntpath.sep, posixpath.sep)
dir_name, file_name = posixpath.split(name)
file_root, file_ext = posixpath.splitext(file_name)
# If retry is True and the filename already exists, keep
# on generating random filenames until the generated
# filename doesn't exist.
while True:
file_prefix = random_string(self.randomfilename_length)
# file_ext includes the dot.
name = posixpath.join(dir_name, file_prefix + file_ext)
if not retry or not self.exists(name):
return name
def _save(self, name, *args, **kwargs):
while True:
try:
return super(RandomFilenameStorage, self)._save(name,
*args,
**kwargs)
except OSError as e:
if e.errno == EEXIST:
# We have a safe storage layer
if not self.randomfilename_uniquify_names:
# A higher storage layer will rename
raise
# Attempt to get_available_name() without retrying.
try:
name = self.get_available_name(name,
retry=False)
except TypeError:
warn('Could not call get_available_name() '
'on %r with retry=False' % self)
name = self.get_available_name(name)
else:
raise
RandomFilenameStorage.__name__ = 'RandomFilename' + storage_class.__name__
return RandomFilenameStorage
class SafeFileSystemStorage(FileSystemStorage):
"""
Standard filesystem storage
Supports *uniquify_names*, like other safe storage classes.
Based on django.core.files.storage.FileSystemStorage.
"""
def __init__(self, *args, **kwargs):
self.uniquify_names = kwargs.pop('uniquify_names', True)
super(SafeFileSystemStorage, self).__init__(*args, **kwargs)
def _save(self, name, content):
full_path = self.path(name)
directory = os.path.dirname(full_path)
if not os.path.exists(directory):
os.makedirs(directory)
elif not os.path.isdir(directory):
raise IOError("%s exists and is not a directory." % directory)
# There's a potential race condition between get_available_name and
# saving the file; it's possible that two threads might return the
# same name, at which point all sorts of fun happens. So we need to
# try to create the file, but if it already exists we have to go back
# to get_available_name() and try again.
while True:
try:
# This file has a file path that we can move.
if hasattr(content, 'temporary_file_path'):
file_move_safe(content.temporary_file_path(), full_path)
content.close()
# This is a normal uploadedfile that we can stream.
else:
# This fun binary flag incantation makes os.open throw an
# OSError if the file already exists before we open it.
fd = os.open(full_path,
(os.O_WRONLY | os.O_CREAT | os.O_EXCL |
getattr(os, 'O_BINARY', 0)))
try:
locks.lock(fd, locks.LOCK_EX)
for chunk in content.chunks():
os.write(fd, chunk)
finally:
locks.unlock(fd)
os.close(fd)
except OSError as e:
if e.errno == EEXIST:
# Ooops, the file exists. We need a new file name.
if not self.uniquify_names:
raise
name = self.get_available_name(name)
full_path = self.path(name)
else:
raise
else:
# OK, the file save worked. Break out of the loop.
break
if settings.FILE_UPLOAD_PERMISSIONS is not None:
os.chmod(full_path, settings.FILE_UPLOAD_PERMISSIONS)
return name
RandomFilenameFileSystemStorage = RandomFilenameMetaStorage(
storage_class=SafeFileSystemStorage,
)
| mit | -733,618,468,414,824,200 | 42.477419 | 79 | 0.520255 | false |
rschwiebert/RingApp | ringapp/migrations/0032_suggestion.py | 1 | 1328 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-04-07 19:52
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('ringapp', '0031_auto_20150322_1626'),
]
operations = [
migrations.CreateModel(
name='Suggestion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('object_type', models.SmallIntegerField(choices=[(0, 'ring'), (1, 'citation'), (1, 'theorem'), (1, 'citation')])),
('status', models.SmallIntegerField(choices=[(-2, 'need info'), (-1, 'declined'), (0, 'pending'), (1, 'accepted'), (2, 'withdrawn')], default=0)),
('name', models.CharField(blank=True, max_length=50, null=True)),
('description', models.CharField(blank=True, max_length=400, null=True)),
('citation', models.CharField(blank=True, max_length=100, null=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| mit | -3,963,655,787,127,243,000 | 43.266667 | 162 | 0.606175 | false |
damomeen/nsi_connections | src/attribute_utils.py | 1 | 1210 | from time_utils import time_constrains
def prepare_nsi_attributes(connAttributes):
params = {}
params['gid'] = "NSI-REST service"
params['desc'] = connAttributes['description']
params['src'] = "%(src_domain)s:%(src_port)s" % connAttributes
params['dst'] = "%(dst_domain)s:%(dst_port)s" % connAttributes
params['srcvlan'] = int(connAttributes['src_vlan'])
params['dstvlan'] = int(connAttributes['dst_vlan'])
params['capacity'] = int(connAttributes['capacity'])
params['explicit_routes'] = connAttributes.get('explicit_routes')
start_time = connAttributes.get('start_time')
end_time = connAttributes.get('end_time')
params['start_sec'], params['end_sec'] = time_constrains(start_time, end_time)
return params
def characterstics2attributes(characterstics):
attributes = {}
for characterstic in characterstics:
name = characterstic['name']
value = characterstic['value']
attributes[name] = value
return attributes
def status2characterstics(status):
characterstics = []
for name, value in status.items():
characterstics.append({'name':name, 'value':value})
return characterstics | apache-2.0 | -1,491,417,555,680,052,000 | 34.617647 | 82 | 0.664463 | false |
Islandman93/reinforcepy | reinforcepy/networks/dqn/theanolasagne/dqn_inits.py | 1 | 4658 | import lasagne
def create_NIPS(network_parms):
validate_parms(network_parms)
conv = get_lasagne_conv_layer()
# setup network layout
l_in = lasagne.layers.InputLayer([None] + network_parms.get('input_shape'))
l_hid1 = conv(l_in, 16, (8, 8), stride=network_parms.get('stride')[0])
l_hid2 = conv(l_hid1, 32, (4, 4), stride=network_parms.get('stride')[1])
l_hid3 = lasagne.layers.DenseLayer(l_hid2, 256)
l_out = lasagne.layers.DenseLayer(l_hid3, network_parms.get('output_num'), nonlinearity=lasagne.nonlinearities.linear)
return {'l_in': l_in, 'l_hid1': l_hid1, 'l_hid2': l_hid2, 'l_hid3': l_hid3, 'l_out': l_out}
def create_A3C(network_parms):
validate_parms(network_parms)
conv = get_lasagne_conv_layer()
# setup network layout
l_in = lasagne.layers.InputLayer(network_parms.get('input_shape'))
l_hid1 = conv(l_in, 16, (8, 8), stride=network_parms.get('stride')[0], untie_biases=network_parms.get('untie_biases'))
l_hid2 = conv(l_hid1, 32, (4, 4), stride=network_parms.get('stride')[1], untie_biases=network_parms.get('untie_biases'))
l_hid3 = lasagne.layers.DenseLayer(l_hid2, 256)
l_value = lasagne.layers.DenseLayer(l_hid3, 1, nonlinearity=lasagne.nonlinearities.linear)
l_policy = lasagne.layers.DenseLayer(l_hid3, network_parms.get('output_num'), nonlinearity=lasagne.nonlinearities.softmax)
return {'l_in': l_in, 'l_hid1': l_hid1, 'l_hid2': l_hid2, 'l_hid3': l_hid3, 'l_value': l_value, 'l_policy': l_policy}
def create_NIPS_sprag_init(network_parms):
validate_parms(network_parms)
conv = get_lasagne_conv_layer()
# setup network layout
l_in = lasagne.layers.InputLayer([None] + network_parms.get('input_shape'))
l_hid1 = conv(l_in, 16, (8, 8), stride=network_parms.get('stride')[0],
W=lasagne.init.Normal(.01),
b=lasagne.init.Constant(.1))
l_hid2 = conv(l_hid1, 32, (4, 4), stride=network_parms.get('stride')[1],
W=lasagne.init.Normal(.01),
b=lasagne.init.Constant(.1))
l_hid3 = lasagne.layers.DenseLayer(l_hid2, 256,
W=lasagne.init.Normal(.01),
b=lasagne.init.Constant(.1))
l_out = lasagne.layers.DenseLayer(l_hid3, network_parms.get('output_num'),
nonlinearity=lasagne.nonlinearities.linear,
W=lasagne.init.Normal(.01),
b=lasagne.init.Constant(.1))
return {'l_in': l_in, 'l_hid1': l_hid1, 'l_hid2': l_hid2, 'l_hid3': l_hid3, 'l_out': l_out}
def create_async_muupan_init(network_parms):
validate_parms(network_parms)
conv = get_lasagne_conv_layer()
# setup network layout
input_shape = network_parms.get('input_shape')
l_in = lasagne.layers.InputLayer(input_shape)
l_hid1 = conv(l_in, 16, (8, 8), stride=network_parms.get('stride')[0],
W=TorchInit((input_shape[1], 8, 8)),
b=TorchInit((input_shape[1], 8, 8)))
l_hid2 = conv(l_hid1, 32, (4, 4), stride=network_parms.get('stride')[1],
W=TorchInit((16, 4, 4)),
b=TorchInit((16, 4, 4)))
l_hid3 = lasagne.layers.DenseLayer(l_hid2, 256,
W=TorchInit((32, 4, 4)),
b=TorchInit((32, 4, 4)))
l_out = lasagne.layers.DenseLayer(l_hid3, network_parms.get('output_num'), nonlinearity=lasagne.nonlinearities.linear,
W=TorchInit(256),
b=TorchInit(256))
return {'l_in': l_in, 'l_hid1': l_hid1, 'l_hid2': l_hid2, 'l_hid3': l_hid3, 'l_out': l_out}
def get_lasagne_conv_layer():
import theano.tensor.signal.conv
from theano.sandbox.cuda import dnn
# if no dnn support use default conv
if not theano.config.device.startswith("gpu") or not dnn.dnn_available(): # code stolen from lasagne dnn.py
import lasagne.layers.conv
conv = lasagne.layers.conv.Conv2DLayer
else:
import lasagne.layers.dnn
conv = lasagne.layers.dnn.Conv2DDNNLayer
return conv
def validate_parms(network_parms):
network_parms.required(['input_shape', 'output_num', 'stride'])
# inspired by https://github.com/muupan/async-rl/blob/master/init_like_torch.py
class TorchInit(lasagne.init.Initializer):
def __init__(self, fan_in):
import numpy as np
self.stdv = 1 / np.sqrt(np.prod(fan_in))
def sample(self, shape):
return lasagne.utils.floatX(lasagne.random.get_rng().uniform(low=-self.stdv, high=self.stdv, size=shape))
| gpl-3.0 | 1,424,664,429,930,978,600 | 39.155172 | 126 | 0.604766 | false |
iamaris/CDFCodes | pythia/SimulationMods/python/analyze.py | 1 | 2324 | #
# This is a primitive script to parse the output of cdfIntegrate, and
# to generate a set of files (one for each track) that contains some of
# the integration results.
#
# It is intended as an example from which more useful scripts can be
# generated.
#
import sys
import re
import Point
from IntegrationLeg import IntegrationLeg
from math import sqrt
def geantPathLengths(x):
f = open("output7.log")
lines = f.readlines()
for line in lines:
if isGEANTLine(line):
parts = string.split(line)
name = parts[4] + '/' + parts[5]
pathLength = pars[6]
tuple = (name, pathLength)
x.append(tuple)
def isIntegralLine(aString):
"""Return true if the aString is part of the integration output of
IntegrationControl"""
return aString[0:7]=="(CDF 1/"
def usage():
print "Usage: analyze <filename>"
print "<filename> must be the name of an output file created by cdfIntegrate"
if __name__ == "__main__":
# Get the name of the input file.
numArgs = len(sys.argv)
if (numArgs != 2):
usage()
sys.exit(0)
# Read the input file
filename = sys.argv[1]
try:
f = open(filename, "r")
except:
print "Could not open the file: %s, exiting" % (filename)
sys.exit(1)
# Read the input
int_lines = filter(isIntegralLine, f.readlines())
# Proceed with processing
# Crank out a set of files, one for each track, containing the following:
# Col 1: cylindrical radius of the point
# Col 2: integrated radiation lengths
# Col 3: name of the last material
ifile = 0
ofilename = filename + str(ifile)
print "Opening file: %s" % (ofilename)
sys.stdout = open(ofilename, "w")
previous_rho = 0.0 # hackery, used to detect when we've gone on to another track
for line in int_lines:
ip = IntegrationLeg()
ip.fromString(line)
rho = sqrt(ip.end().x()**2 +ip.end().y()**2)
if rho < previous_rho:
ifile = ifile + 1
ofilename = filename + str(ifile)
sys.stdout = sys.__stdout__
print "Opening file: %s" % (ofilename)
sys.stdout = open(ofilename, "w")
previous_rho = rho
print "%g %g %s" % (rho, ip.cumulative().radL(), ip.material())
| apache-2.0 | -1,385,004,450,452,219,100 | 30.405405 | 84 | 0.609725 | false |
hanak/hermes2d | python/examples/03.py | 1 | 1831 | #! /usr/bin/env python
# This example shows how to solve a first simple PDE:
# - load the mesh,
# - perform initial refinements
# - create a H1 space over the mesh
# - define weak formulation
# - initialize matrix solver
# - assemble and solve the matrix system
# - visualize the solution
#
# PDE: Poisson equation -Laplace u = CONST_F with homogeneous (zero)
# Dirichlet boundary conditions.
#
# You can change the constant right-hand side CONST_F, the
# initial polynomial degree P_INIT, and play with various initial
# mesh refinements at the beginning.
# Import modules
from hermes2d import Mesh, MeshView, H1Shapeset, PrecalcShapeset, H1Space, \
WeakForm, Solution, ScalarView, LinSystem, DummySolver
from hermes2d.forms import set_forms
from hermes2d.examples.c03 import set_bc
from hermes2d.examples import get_example_mesh
P_INIT = 5 # Uniform polynomial degree of mesh elements.
# Load the mesh file
mesh = Mesh()
mesh.load(get_example_mesh())
# Sample "manual" mesh refinement
#mesh.refine_element(0)
# Initialize the shapeset and the cache
shapeset = H1Shapeset()
pss = PrecalcShapeset(shapeset)
# Create an H1 space
space = H1Space(mesh, shapeset)
space.set_uniform_order(P_INIT)
set_bc(space)
space.assign_dofs()
# Initialize the weak formulation
wf = WeakForm(1)
set_forms(wf)
# Initialize the linear system and solver
solver = DummySolver()
sys = LinSystem(wf, solver)
sys.set_spaces(space)
sys.set_pss(pss)
# Assemble the stiffness matrix and solve the system
#sys.assemble()
#A = sys.get_matrix()
#b = sys.get_rhs()
#from scipy.sparse.linalg import cg
#x, res = cg(A, b)
#sln = Solution()
#sln.set_fe_solution(space, pss, x)
sln = Solution()
sys.assemble()
sys.solve_system(sln)
# Visualize the solution
sln.plot()
# Visualize the mesh
mesh.plot(sapce=sapce)
| gpl-2.0 | 3,010,814,516,752,970,000 | 24.430556 | 76 | 0.730748 | false |
sellberg/SACLA2016B8055 | scripts/07_angular_avg_std.py | 1 | 5748 | #!/home/software/SACLA_tool/bin/python2.7
import numpy as np
import h5py
import matplotlib
import matplotlib.pyplot as plt
import argparse
import time
import sys
from argparse import ArgumentParser
# -- parse
parser = ArgumentParser()
parser.add_argument("-r", "--run", type=int, dest="run", required=True, help="first run to process")
parser.add_argument("-n", "--n_shots", type=int, dest="n_shots", help="number of shots",default=100)
args = parser.parse_args()
run = args.run#448566#448571
n_shots=args.n_shots#100
# -- parameters
ADU_thr = [50,300]
hit_thr = 75 # threshold for hit rate
std_dx = 200 #range to zoom in around the center
std_thr = 80
# -- constants
defaultEphoton = 4920#@5000. # in eV
defaultWavelength = 2.52 # in A
defaultDetectorDistance = 0.216 # in m
defaultPixelSize = 50E-6 # in m
defaultSystemGain = 18.1497 # in electrons/ADU
xc = 1201#.582 # center in pixels
yc = 1201#.499 # center in pixels
center = [xc,yc]
# -- files and folders
file_folder = '/UserData/fperakis/2016_6/run%d/'%(run)
fig_folder = '/home/fperakis/2016_06/figs/'
save_folder = '/home/fperakis/2016_06/figs/analysed_data/'
src_folder = '/home/fperakis/2016_06/python_scripts/src' # src files
file_name = '%d.h5'%(run)
file_path = file_folder+file_name
# -- import source functions
sys.path.insert(0, src_folder)
from radial_profile import *
# -- import data
fh5 = h5py.File(file_path, 'r')
run_key = [ k for k in fh5.keys() if k.startswith('run_') ][0]
tags = fh5['/%s/detector_2d_assembled_1'%run_key].keys()[1:]
# -- image generator
num_im = len(tags)
img_gen = ( fh5['%s/detector_2d_assembled_1/%s/detector_data'%(run_key,tag) ].value for tag in tags )
num_im = len(tags)
mean_int = np.zeros(n_shots,dtype=float)
std_int = np.zeros(n_shots,dtype=float)
# -- average image
im1 = img_gen.next()
# -- make mask
mask = np.ones(im1.shape,dtype=bool)
mask[im1<ADU_thr[0]]=0
mask[im1>ADU_thr[1]]=0
im = np.array(im1)#*mask
im_bg,im_hit=np.array(im1),np.array(im1)
# -- loop
for i_shot in range(n_shots):
#for im_next in img_gen:
im_next = np.array(img_gen.next())
t1 = time.time()
mean_int[i_shot] = np.average(im_next.flatten())
im += im_next
if mean_int[i_shot]>hit_thr:
std_int[i_shot] = np.std(im_next[xc-std_dx:xc+std_dx,yc-std_dx:yc+std_dx].flatten())
if std_int[i_shot]<std_thr:
im_hit += im_next
else:
im_bg += im_next
print 'R.%d | S.%d/%.d | %.1f Hz'%(run,i_shot,n_shots,1.0/(time.time() - t1))
# -- hit rate
num_streaks = len(std_int[std_int>std_thr])
num_hits = float(len(mean_int[mean_int>hit_thr]))-num_streaks
hit_rate = num_hits/float(n_shots)
# -- normalise
im /= float(n_shots)
im_hit /= num_hits
im_bg /= (n_shots-num_hits)
im_corr = im_hit-im_bg
## -- histogram ADUs of mean image
bi,bf,db1 = -200,500,10#ADU_thr[0],ADU_thr[1],5#70,100,1#3e6,1e4
hy1,hx1 = np.histogram(im_corr,bins = np.arange(bi,bf,db1))
# -- histogram shots
bi,bf,db2 = 70.,100.,0.2#70,100,1#3e6,1e4
hy2,hx2 = np.histogram(mean_int,bins = np.arange(bi,bf,db2))
# -- histogram std
bi,bf,db3 = 50.,1000,10#70,100,1#3e6,1e4
hy3,hx3 = np.histogram(std_int,bins = np.arange(bi,bf,db3))
# -- angular integration
q,Iq_hit = radialProfile(np.array(im_hit,dtype=float), center, mask=mask, wavelength=defaultWavelength, detectorDistance=defaultDetectorDistance, pixelSize=defaultPixelSize)
q,Iq_bg = radialProfile(np.array(im_bg,dtype=float), center, mask=mask, wavelength=defaultWavelength, detectorDistance=defaultDetectorDistance, pixelSize=defaultPixelSize)
# -- subtract background
Iq = Iq_hit-Iq_bg
np.save(save_folder+'Iq_r%d.npy'%(run),np.array([q,Iq]))
print std_int
# -- plot
plt.figure(figsize=[15,8])
vmin,vmax=0,50#ADU_thr[1]
plt.subplot(2,3,1)
plt.imshow(im_corr,vmin=vmin,vmax=vmax)#,vmin=0,vmax=0.3)
plt.colorbar()
plt.title('r.%d'%(run))
plt.subplot(2,3,4)
plt.bar(hx1[:-1]-db1/2.,hy1,width = db1,color='green')
plt.axvline(x=ADU_thr[0],ls='--',color='gray')
plt.axvline(x=ADU_thr[1],ls='--',color='gray')
plt.yscale('log',nonposy='clip')
plt.xlabel('ADUs/shot')
plt.ylabel('number of pixels')
plt.title('Pixels histogram')
plt.subplot(2,3,2)
plt.bar(hx2[:-1]-db2/2.,hy2,width = db2)
plt.axvline(x=hit_thr,ls='--',color='gray',label='threshold=%d'%(hit_thr))
plt.yscale('log',nonposy='clip')
plt.xlabel('mean ADUs/shot')
plt.ylabel('number of shots')
#plt.title('Shots histogram')
plt.title('hit rate: %.2f percent'%(hit_rate*100))#mean: %.3f ADUS/pixel'%(total_mean))
plt.ylim([0.1,n_shots])
plt.legend(frameon=False)
plt.subplot(2,3,5)
plt.bar(hx3[:-1]-db3/2.,hy3,width = db3,color='red')
#plt.axvline(x=hit_thr,ls='--',color='gray',label='threshold=%d'%(hit_thr))
plt.yscale('log',nonposy='clip')
plt.xlabel('std ADUs/shot')
plt.ylabel('number of shots')
#plt.title('Shots histogram')
#plt.title('hit rate: %.2f percent'%(hit_rate*100))#mean: %.3f ADUS/pixel'%(total_mean))
plt.ylim([0.1,n_shots])
plt.legend(frameon=False)
'''
plt.subplot(2,3,5)
plt.plot(mean_int[:n_shots],'ro')
plt.axhline(y=hit_thr,ls='--',color='gray')
plt.title('hit rate: %.2f percent'%(hit_rate*100))#mean: %.3f ADUS/pixel'%(total_mean))
plt.xlabel('Shot number')
plt.ylabel('mean ADUS/shot')
'''
plt.subplot(2,3,3)
plt.plot(q,Iq_hit,label='hit')
plt.plot(q,Iq_bg,label='background')
plt.xlabel(r'$\rm Q_x (\AA^{-1}$)')
plt.ylabel(r'$\rm I_q$ (ADUs/shot)')
plt.legend(frameon=False)
plt.title('Angular average')
plt.xlim([0.05,0.55])
#plt.ylim([80,120])
plt.subplot(2,3,6)
plt.plot(q,Iq,label='hit-background')
plt.xlabel(r'$\rm Q_x (\AA^{-1}$)')
plt.ylabel(r'$\rm I_q$ (ADUs/shot)')
plt.legend(frameon=False)
plt.title('Angular average')
plt.xlim([0.05,0.55])
plt.ylim([10,50])
plt.tight_layout()
plt.savefig(fig_folder+'SAXSstd_run%d.png'%(run))
#plt.show()
| bsd-2-clause | -8,606,726,048,632,943,000 | 29.252632 | 173 | 0.6738 | false |
emsrc/pycornetto | lib/cornetto/cornet.py | 1 | 36978 | # -*- coding: utf-8 -*-
# Copyright (C) 2008-2013 by
# Erwin Marsi and Tilburg University
# This file is part of the Pycornetto package.
# Pycornetto is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
# Pycornetto is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
the Cornet class which exposes the Cornetto xml database
"""
# BUGS/ISSUES:
# - inst.ask("slang +") takes forever (if no depth limit is set);
# perhaps change search from recursive DFS to deqeue based BFS?
# TODO:
# - deal with multi word units
# - write unit tests
# - remove code to circumvent bugs in the cornetto db
# - more code comments
# FEATURES:
# - optmization: for testing hypernym and meronym relation,
# a unidirectional BFS starting from the most specific/smallest part
# is probably faster than the current bidirectional BFS
# - option to supply xpath querys on xml
# - pprinted xml
__author__ = 'Erwin Marsi <[email protected]>'
__version__ = '0.6.1'
from collections import deque
from cornetto.parse import parse_cdb
from xml.etree.cElementTree import tostring
class Cornet(object):
"""
The Cornet class exposes the Cornetto xml database
Most public methods require input in the form of a shorthand for
specifying lexical units and relations, as described below.
B{Lexical units specifications}
A specification of lexical units consists of three parts, separated by a
single colon (':') character:
1. Spelling form (i.e. a word)
This can be any string without white space
2. Syntactic category (optional)
This can be any of 'noun', 'verb' or 'adj'.
3. A sense (optional)
This is number which distinguishes the particular word sense
Examples of valid lexical unit specifications are:
- slang:noun:1
- slang:noun
- slang::1
- slang
B{Relation specifications}
A specification of a relation consists of two parts:
1. Relation name (optional)
The name of a Wordnet relation between two synsets. See the
Cornetto documentation for the available relations. If not given,
all relations are tried. The special relation "SYNONYM" holds
between all members of the same synset. The relation name is not
case-sensitive; you can use lower case.
2. Depth (optional)
A digit ('0' to '9') or the plus sign ('+'). This represents the
depth of the relations that are considered during search. In other
words, the maximal number of links allowed. If not given a default
value of 1 is used. The plus represents the system maximum
(currently 9).
A relation specification must have a name, a depth or both. Valid
relation specification include:
- HAS_HYPERONYM
- HAS_HYPERONYM1
- HAS_HYPERONYM+
- 3
- +
"""
# ------------------------------------------------------------------------------
# Public methods
# ------------------------------------------------------------------------------
_unit_separator = ":"
_handled_output_formats = ("spec", "xml", "raw")
_default_output_format = "spec"
_default_max_depth = 9
def __init__(self, cdb_lu=None, cdb_syn=None,
output_format=_default_output_format,
max_depth=_default_max_depth):
"""
Create a new Cornet instance
@keyword cdb_lu: an xml file(name) to read the lexical units from
@keyword cdb_syn: an xml file(name) to read the synsets from
@keyword default_format: default output format
@type default_format: string ('spec', 'xml', 'raw')
@keyword max_depth: a maximal depth between 1 and 9
@type max_depth: int
"""
if cdb_lu and cdb_syn:
self.open(cdb_lu, cdb_syn)
self.set_output_format(output_format)
self.set_max_depth(max_depth)
def open(self, cdb_lu, cdb_syn, verbose=False):
"""
Open and parse Cornetto database files
@param cdb_lu: xml definition of the lexical units
@type cdb_lu: file or filename
@param cdb_syn: xml definition of the synsets
@type cdb_syn: file or filename
@keyword verbose: verbose output during parsing
"""
( self._form2lu,
self._c_lu_id2lu,
self._c_sy_id2synset,
self._graph ) = parse_cdb(cdb_lu, cdb_syn, verbose)
def ask(self, query, format=None):
"""
Pose a query about lexical units to the Cornetto database
This supports three different types of queries:
1. Getting lexical units
If the query consists of only a lexical unit specification the
answer lists all lexical units which satisfy this
specification. See also L{get_lex_units}
2. Getting related lexical units
If the query consists of a lexical unit specification plus a
relation specification, the answer consists of all lexical
units related by the specified relation(s). See also
L{get_related_lex_units}
3. Testing relations between lexical units
If the query consists of a lexical unit specification, plus a
relation specification plus another lexical specification, the
answer is a path from the first to the second lexical unit(s)
along the specified relation(s). See also
L{test_lex_units_relation}
@param query: a specification
@type query: string
@keyword format: output format
@type format: 'spec', 'xml', 'raw'
@return: depends on type of query an output format
"""
from_spec, rel, to_spec = self._split_query(query)
if to_spec:
return self.test_lex_units_relation(from_spec, rel, to_spec, format)
elif rel:
return self.get_related_lex_units(from_spec, rel, format)
else:
return self.get_lex_units(from_spec, format)
def get_lex_units(self, spec, format=None):
"""
Get all lexical units which satisfy this specification
>>> inst.get_lex_units("lamp")
['lamp:noun:3', 'lamp:noun:4', 'lamp:noun:1', 'lamp:noun:2']
>>> inst.get_lex_units("varen")
['varen:verb:3', 'varen:noun:1', 'varen:verb:1', 'varen:verb:2']
>>> inst.get_lex_units("varen:noun")
['varen:noun:1']
>>> inst.get_lex_units("varen:verb:3")
['varen:verb:3']
>>> inst.get_lex_units("varen:noun:3")
[]
@param spec: lexical unit specification
@keyword format: output format
@type format: 'spec', 'xml', 'raw'
@rtype: list
@return: list of lexical units in requested output format
"""
form, cat, sense = self._split_unit_spec(spec)
formatter = self._get_lex_unit_formatter(format)
return [ formatter(lu)
for lu in self._form2lu.get(form, [])
if ( self._lu_has_cat(lu, cat) and
self._lu_has_sense(lu, sense) ) ]
def get_related_lex_units(self, lu_spec, rel_spec, format=None):
"""
For all specified lexical units,
find all lexical units related by the specified relation.
The search may be constrained by the setting of the maximum search depth;
see set_max_depth.
>>> pprint(inst.get_related_lex_units("slang", "SYNONYM"))
{'slang:noun:1': {'SYNONYM': {'serpent:noun:2': {}}},
'slang:noun:2': {},
'slang:noun:3': {'SYNONYM': {'pin:noun:2': {}, 'tang:noun:2': {}}},
'slang:noun:4': {'SYNONYM': {'groepstaal:noun:1': {},
'jargon:noun:1': {},
'kringtaal:noun:1': {}}},
'slang:noun:5': {'SYNONYM': {'muntslang:noun:1': {}}},
'slang:noun:6': {'SYNONYM': {'Slang:noun:1': {}}}}
>>> pprint(inst.get_related_lex_units("slang::1", "1"))
{'slang:noun:1': {'HAS_HOLO_MEMBER': {'slangegebroed:noun:1': {},
'slangengebroed:noun:2': {}},
'HAS_HYPERONYM': {'reptiel:noun:1': {}},
'HAS_HYPONYM': {'cobra:noun:1': {},
'gifslang:noun:1': {},
'hoedslang:noun:1': {},
'hydra:noun:2': {},
'lansslang:noun:1': {},
'lepelslang:noun:1': {},
'python:noun:2': {},
'ratelslang:noun:1': {},
'ringslang:noun:1': {},
'rolslang:noun:1': {},
'waterslang:noun:3': {},
'wurgslang:noun:1': {},
'zeeslang:noun:1': {}},
'HAS_MERO_PART': {'slangekop:noun:1': {},
'slangenkop:noun:1': {}},
'SYNONYM': {'serpent:noun:2': {}}}}
@param lu_spec: lexical unit(s) specification of source
@param rel_spec: relation(s) specification
@keyword format: output format
@type format: 'spec', 'xml', 'raw'
@rtype: dict
@return: an hierachical dict structure with lexical units and relations as keys
"""
rel_name, depth = self._split_rel_spec(rel_spec)
if not rel_name:
if rel_spec == "+":
# silently change to max allowed search depth
depth = self._max_depth
elif depth > self._max_depth:
raise ValueError("requested search depth (%d) is larger than "
"maximum search depth (%d)" % (depth, self._max_depth))
lu_formatter = self._get_lex_unit_formatter(format)
rel_formatter = self._get_relation_formatter(format)
related_lus = {}
for from_lu in self.get_lex_units(lu_spec, format="raw"):
from_lu_repr = lu_formatter(from_lu)
related_lus[from_lu_repr] = \
self._search_related_lex_units(from_lu, rel_name, depth,
lu_formatter, rel_formatter, [from_lu])
return related_lus
def test_lex_units_relation(self, from_lu_spec, rel_spec, to_lu_spec, format=None):
"""
Test if certain relation(s) hold between certain lexical units by
searching for a a path from any of the source lexical units to any of
target lexical unit(s) along one or more of the specified relation(s)
>>> inst.test_lex_units_relation("lamp", "HAS_HYPONYM", "gloeilamp")
['lamp:noun:2', 'HAS_HYPONYM', 'gloeilamp:noun:1']
>>> inst.test_lex_units_relation("lamp", "HAS_HYPONYM2", "fotolamp")
['lamp:noun:2', 'HAS_HYPONYM', 'gloeilamp:noun:1', 'HAS_HYPONYM', 'fotolamp:noun:1']
>>> inst.test_lex_units_relation("lamp", "HAS_HYPONYM", "fotolamp")
[]
@param from_lu_spec: lexical unit specification of the source(s)
@param rel_spec: relation(s) specification
@param to_lu_spec: lexical unit specification of the target(s)
@keyword format: output format
@type format: 'spec', 'xml', 'raw'
@return: list of lexical units and relations in requested output format,
possibly empty
@rtype: list
@warning: The result may not be the only shortest path.
"""
rel_name, depth = self._split_rel_spec(rel_spec)
from_lus = self.get_lex_units(from_lu_spec, format="raw")
to_lus = self.get_lex_units(to_lu_spec, format="raw")
pred, common_lu, succ = self._bidirectional_shortest_path(from_lus, to_lus, rel_name, depth)
path = self._reconstruct_path(pred, common_lu, succ, format)
return path
def get_synsets(self, spec, format=None):
"""
Get all synsets containing lexical units which satisfy a certain
specification.
>>> pprint(inst.get_synsets("slang"))
[['Slang:noun:1', 'slang:noun:6'],
['slang:noun:5', 'muntslang:noun:1'],
['slang:noun:1', 'serpent:noun:2'],
['slang:noun:2'],
['tang:noun:2', 'pin:noun:2', 'slang:noun:3'],
['jargon:noun:1', 'groepstaal:noun:1', 'kringtaal:noun:1', 'slang:noun:4']]
>>> pprint(inst.get_synsets("slang:noun:5"))
[['slang:noun:5', 'muntslang:noun:1']]
>>> pprint(inst.get_synsets("slang:noun:7"))
[]
@param spec: lexical unit specification
@keyword format: output format
@type format: 'spec', 'xml', 'raw'
@return: list of synsets (lists) with lexical units in requested
output format
@rtype: list
"""
form, cat, sense = self._split_unit_spec(spec)
synsets = []
formatter = self._get_synset_formatter(format)
for lu in self._form2lu.get(form, []):
if ( self._lu_has_cat(lu, cat) and
self._lu_has_sense(lu, sense) ):
# Using new attribute added while parsing synonym relations
# to find the id's of all synsets to which this lu belongs.
# Alternatively, we could use the graph to find all lu's which
# are synonym to this lu.
for c_sy_id in lu.get("c_sy_id", "").split(","):
try:
sy = self._c_sy_id2synset[c_sy_id]
except KeyError:
# oops, there is no synset with this id
continue
synsets.append(formatter(sy))
return synsets
def get_related_synsets(self, lu_spec, rel_name=None, format=None):
# Not very useful. Remove this method?
# Or generalize to relation spec?
"""
For all synsets containing lexical units satisfying this specification
find the related synsets along this relation.
If no relation is given, all relations are considered.
>>> pprint(inst.get_related_synsets("lamp", "HAS_HYPERONYM"))
{'HAS_HYPERONYM': [['armatuur:noun:1', 'verlichtingsarmatuur:noun:1'],
['lamp:noun:2', 'licht:noun:13', 'lichtje:noun:1'],
['lichtbron:noun:1'],
['voorwerp:noun:1', 'ding:noun:1']]}
>>> pprint(inst.get_related_synsets("slang::1"))
{'HAS_HOLO_MEMBER': [['slangegebroed:noun:1', 'slangengebroed:noun:2']],
'HAS_HYPERONYM': [['reptiel:noun:1']],
'HAS_MERO_PART': [['slangekop:noun:1', 'slangenkop:noun:1']]}
@param lu_spec: lexical unit(s) specification of source
@param rel_name: relation name
@return: a dict with relations as keys and lists of synsets as values
@rtype: dict
@note: Parameter rel_name is a relation name, not a relation specification.
Search is thus not transitive.
"""
form, cat, sense = self._split_unit_spec(lu_spec)
if rel_name: rel_name = rel_name.upper()
syn_formatter = self._get_synset_formatter(format)
rel_formatter = self._get_relation_formatter(format)
related_syns = {}
# lazy and slow
for from_syn in self.get_synsets(lu_spec, format="raw"):
for rel in from_syn.find("wn_internal_relations") or []:
if self._rel_has_name(rel, rel_name):
to_syn_id = rel.get("target")
to_syn = self._c_sy_id2synset[to_syn_id]
syn_repr = syn_formatter(to_syn)
rel_repr = rel_formatter(rel)
related_syns.setdefault(rel_repr, []).append(syn_repr)
return related_syns
def get_lex_unit_by_id(self, c_lu_id, format=None):
"""
Get lexical unit by id
@param c_lu_id: Tvalue of the C{c_lu_id} attribute at C{<cdb_lu>} element
@keyword format: output format
@type format: 'spec', 'xml', 'raw'
@rtype: string or None
@return: lexical unit in the requested output format or None
"""
formatter = self._get_lex_unit_formatter(format)
try:
return formatter(self._c_lu_id2lu[c_lu_id])
except KeyError:
return None
def get_synset_by_id(self, c_sy_id, format=None):
"""
Get synset by id
@param c_sy_id: value of the C{c_sy_id} attribute at C{<cdb_synset>} element
@keyword format: output format
@type format: 'spec', 'xml', 'raw'
@return: set (list) of lexical units in the requested output format
@rtype: list or None
"""
formatter = self._get_synset_formatter(format)
try:
return formatter(self._c_sy_id2synset[c_sy_id])
except KeyError:
return None
def get_lex_unit_from_synset(self, c_sy_id, lemma, format=None): #added by Maarten van Gompel
"""Get a lexical unit based on a synset ID and a lemma"""
try:
synset = self._c_sy_id2synset[c_sy_id]
except KeyError:
return None
formatter = self._get_lex_unit_formatter(format)
c_lu_id = None
for syn in synset.find("synonyms") or []:
c_lu_id = syn.get("c_lu_id")
try:
lu = self._c_lu_id2lu[c_lu_id]
luform = self._get_lu_form(lu) #get form-spelling (lemma)
if luform == lemma:
#this one matches with the lemma we specified, return it!
return formatter(lu)
except KeyError:
# no lu with this id
continue
return None #nothing found
def all_common_subsumers(self, lu_spec1, lu_spec2,
rel_name="HAS_HYPERONYM", format=None):
"""
Finds all common subsumers of two lexical units over the given
relation. The common subsumers are grouped according to the lenght of
the path (in edges) from the first lexical unit to the subsumer to the
second lexical unit.
>>> pprint(c.all_common_subsumers("kat", "hond"))
{2: ['huisdier:noun:1', 'zoogdier:noun:1'],
4: ['beest:noun:2', 'gedierte:noun:2', 'dier:noun:1'],
5: ['ziel:noun:3',
'homo sapiens:noun:1',
'sterveling:noun:1',
'mens:noun:1',
'mensenkind:noun:1'],
6: ['organisme:noun:2'],
8: ['wezen:noun:1', 'schepsel:noun:1', 'creatuur:noun:2'],
9: ['iets:noun:2'],
10: ['object:noun:3']}
@param lu_spec1: first lexical unit(s) specification
@param rel_name: relation name (not a specification)
@param lu_spec2: second lexical unit(s) specification
@keyword format: output format
@type format: 'spec', 'xml', 'raw'
@return: a dict with path lenghts as key and lists of common subsumers
as values, possibly empty
@rtype: dict
@note: this method will only make sense for some relations
(typically HAS_HYPERONYM) but not for others
(e.g. SYNONYM)
"""
rel_name = rel_name.upper()
formatter = self._get_lex_unit_formatter(format)
lus1 = self.get_lex_units(lu_spec1, format="raw")
sucs1 = self._transitive_closure(lus1, rel_name)
# Add lus themselves as succesors with zero distance
# This acounts for cases where lu1 equals lu2 or
# where one lu is a hyperonym of the other lu.
for lu in lus1:
sucs1[lu] = 0
lus2 = self.get_lex_units(lu_spec2, format="raw")
sucs2 = self._transitive_closure(lus2, rel_name)
# idem for lus2
for lu in lus2:
sucs2[lu] = 0
acs = dict()
for lu, dist in sucs1.items():
try:
sum_of_dist = dist + sucs2[lu]
except KeyError:
continue
acs.setdefault(sum_of_dist, []).append(formatter(lu))
return acs
def least_common_subsumers(self, lu_spec1, lu_spec2,
rel_name="HAS_HYPERONYM", format=None):
"""
Finds the least common subsumers of two lexical units over the given
relation, that is, those common subsumers of which the lenght of
the path (in edges) from the first lexical unit to the subsumer to the
second lexical unit is minimal.
>>> c.least_common_subsumers("kat", "hond")
['huisdier:noun:1', 'zoogdier:noun:1']
@param lu_spec1: first lexical unit(s) specification
@param rel_name: relation name (not a specification)
@param lu_spec2: second lexical unit(s) specification
@keyword format: output format
@type format: 'spec', 'xml', 'raw'
@return: a lists of the least common subsumers, possibly empty
@rtype: list
@note: this method will only make sense for some relations
(typically HAS_HYPERONYM) but not for others
(e.g. SYNONYM)
"""
# It might not be neccessary to compute all common subsumers, i.e. the
# transtive closure of both lu's, before we can decide for sure
# which sum of distances is the minimal one, but it is not easy to avoid.
# The reason is that one large distance of an lcs to lu1 may be compensated
# for by a small or zero distance to lu2.
# TODO: this point needs more explanation
acs = self.all_common_subsumers(lu_spec1, lu_spec2, rel_name, format)
if acs:
minimum = min(acs.keys())
return acs[minimum]
else:
return []
def set_output_format(self, format=_default_output_format):
"""
Change the default output format
@param format: output format
@type format: 'spec', 'xml', 'raw'
"""
if format in self._handled_output_formats:
self._output_format = format
else:
raise ValueError("unknown output format: " + format + " not in " +
self._handled_output_formats)
def set_max_depth(self, max_depth=_default_max_depth):
"""
Sets a limit on the maximal depth of searches for related lexical units
where no relation name is specified.
@param max_depth: a maximal depth between 1 and 9
@type max_depth: int
@note: The limit is only enforced on the public method, i.e. ask,
get_related_lex_units, and not on the private methods.
Also note that this not affect test_lex_units_relation.
"""
# because the bidirectional search seems to me much less sensitive
# to deep searches, probably beacuse it doesn't store all the paths
if 0 < max_depth < 10:
self._max_depth = max_depth
else:
raise ValueError("not a valid value for maximal depth: %s "
"(should be between 1 and 9 included)" % max_depth)
# ------------------------------------------------------------------------------
# Semi-private methods
# ------------------------------------------------------------------------------
# parsing specifications
def _split_query(self, query):
query = query.strip().split() + 3 * [""]
# relation are always in upper case
query[1] = query[1].upper()
return query[:3]
def _split_unit_spec(self, spec):
spec = spec + 2 * self._unit_separator
return spec.strip().split(self._unit_separator)[:3]
def _split_rel_spec(self, spec):
if spec[-1] in "123456789":
name, depth = spec[:-1], int(spec[-1])
elif spec[-1] == "+":
name, depth = spec[:-1], 9
else:
name, depth = spec, 1
return name.upper(), depth
# search
def _transitive_closure(self, lus, rel_name):
"""
Computes the transitive closure of a set of lexical units
over a certain relation. Returns a dict with successors as keys
and their distance (in edges) to the orginal lexical units.
"""
assert isinstance(lus, list), repr(lus) + " is not a list"
queue = lus
lus = dict.fromkeys(lus)
next_queue = []
# distance of lu in queue to original lus
distance = 0
successors= {}
while queue:
out_edges = self._graph.out_edges_iter(queue, data=True)
for from_lu, to_lu, edge in out_edges:
if ( self._rel_has_name(edge, rel_name) and
to_lu not in successors):
successors[to_lu] = distance + 1
# A lexical unit from the original lus may be reached, and
# is indeed a valid successor, but should not be added to
# the queue otherwise we run in an endless loop
if to_lu not in lus:
next_queue.append(to_lu)
queue, next_queue = next_queue, []
distance += 1
return successors
def _bidirectional_shortest_path(self, from_lus, to_lus, rel_name, depth):
# Does BFS from both source and target and meets in the middle
# Based on _bidirectional_pred_succ in networkx/path.py
# Returns (pred, succ, w) where
# pred is a dictionary of predecessors from w to the source, and
# succ is a dictionary of successors from w to the target.
# predecesssor and successors in search
# keys are lexical units, values are tuples of lexcial units and relations
pred = dict.fromkeys(from_lus, (None, None))
succ = dict.fromkeys(to_lus, (None, None))
# check trivial case where sources and targets intersect
for lu in from_lus:
if lu in succ:
return None, lu, None
# initialize fringes, start with forward
forward_fringe = list(from_lus)
reverse_fringe = list(to_lus)
level = 0
while forward_fringe and reverse_fringe and level != depth:
this_level = forward_fringe
forward_fringe = []
out_edges = self._graph.out_edges_iter(this_level, data=True)
for from_lu, to_lu, edge in out_edges:
if self._rel_has_name(edge, rel_name):
if to_lu not in pred: # prevent cycles
forward_fringe.append(to_lu)
# If there are multiple matching edges,
# the previous dict value may be overwritten,
# but we don't care because we are looking for *a* path
# instead of *all* paths.
pred[to_lu] = (from_lu, edge)
if to_lu in succ: return pred, to_lu, succ # found path
level += 1
if level == depth: break # max search depth reached
this_level = reverse_fringe
reverse_fringe = []
in_edges = self._graph.in_edges_iter(this_level, data=True)
for from_lu, to_lu, edge in in_edges:
if self._rel_has_name(edge, rel_name):
if from_lu not in succ:
# may replace existing relation
succ[from_lu] = (to_lu, edge)
reverse_fringe.append(from_lu)
if from_lu in pred: return pred, from_lu, succ # found path
level += 1
return None, None, None # no path found
def _reconstruct_path(self, pred, common_lu, succ, format=None):
lu_formatter = self._get_lex_unit_formatter(format)
rel_formatter = self._get_relation_formatter(format)
if not pred and not succ:
if common_lu:
# trivial path because source and target nodes intersect
return [lu_formatter(common_lu)]
else:
# no path found
return []
path = []
lu = common_lu
# from common lu to target lu
while lu is not None:
path.append(lu_formatter(lu))
lu, edge = succ[lu]
if edge is not None:
path.append(rel_formatter(edge))
# from source lu to common
lu, edge = pred[common_lu]
path.insert(0, rel_formatter(edge))
while lu is not None:
path.insert(0, lu_formatter(lu))
lu, edge = pred[lu]
if edge is not None:
path.insert(0, rel_formatter(edge))
return path
def _search_related_lex_units(self, from_lu, rel_name, depth, lu_formatter,
rel_formatter, path=[]):
from_lu_related = {}
if len(path) <= depth:
for from_lu, to_lu, edge in self._graph.out_edges_iter(from_lu, data=True):
if ( to_lu not in path and
self._rel_has_name(edge, rel_name)):
to_lu_related = \
self._search_related_lex_units(to_lu, rel_name, depth,
lu_formatter, rel_formatter,
path + [to_lu])
to_lu_repr = lu_formatter(to_lu)
rel_repr = rel_formatter(edge)
try:
from_lu_related[rel_repr][to_lu_repr] = to_lu_related
except KeyError:
from_lu_related[rel_repr] = {to_lu_repr: to_lu_related}
return from_lu_related
# lexical unit formatting
def _get_lex_unit_formatter(self, format=None):
if not format: format = self._output_format
if format == "spec":
return self._lu_to_spec
elif format == "xml":
return tostring
elif format == "raw":
return lambda lu: lu
else:
raise ValueError("unknown output format: " + format)
def _lu_to_spec(self, lu):
return self._unit_separator.join((
self._get_lu_form(lu),
self._get_lu_cat(lu),
self._get_lu_sense(lu) ))
# relation formatting
def _get_relation_formatter(self, format=None):
if not format: format = self._output_format
if format == "xml":
return tostring
elif format == "spec":
return self._rel_to_spec
elif format == "raw":
return lambda lu: lu
else:
raise ValueError("unknown output format: " + format)
def _rel_to_spec(self, edge):
return edge.get("relation")
def _rel_to_xml(self, edge):
return '<relation relation_name="%s">' % edge.get("relation")
# synset formatting
def _get_synset_formatter(self, format=None):
if not format: format = self._output_format
if format == "spec":
return self._synset_to_specs
elif format == "xml":
return tostring
elif format == "raw":
return lambda lu: lu
else:
raise ValueError("unknown output format: " + format)
def _synset_to_specs(self, synset):
specs = []
# REMOVE-ME: a list a synonym lu id's, because cdb is still buggy and
# sometimes targets the same lu multiple times
seen_lu_ids = []
for syn in synset.find("synonyms") or []:
c_lu_id = syn.get("c_lu_id")
try:
lu = self._c_lu_id2lu[c_lu_id]
except KeyError:
# no lu with this id
continue
if c_lu_id not in seen_lu_ids:
specs.append(self._lu_to_spec(lu))
seen_lu_ids.append(c_lu_id)
return specs
# <cdb_lu> accessors
def _get_lu_form(self, lu):
try:
return lu.find("form").get("form-spelling", "")
except AttributeError:
# <form> not found
return ""
def _get_lu_cat(self, lu):
try:
return lu.find("form").get("form-cat", "")
except AttributeError:
# <form> not found
return ""
def _get_lu_sense(self, lu):
return lu.get("c_seq_nr", "")
def _lu_has_cat(self, lu, cat):
try:
# value of "form-cat" can be "noun"/"NOUN"
return not cat or lu.find("form").get("form-cat").lower() == cat
except AttributeError:
# <form> not found
return ""
def _lu_has_sense(self, lu, sense):
return not sense or lu.get("c_seq_nr") == sense
# <relations> accessors
def _get_rel_name(self, edge):
return edge.get("relation")
def _rel_has_name(self, edge, name):
return not name or edge.get("relation") == name
# Debugging code
# Parsing cdb takes a long time. Therefore debugging is much faster if we
# parse just once, like this:
#
# >>> import cornetto.cornet as cornet
# >>> cornet._parse()
#
# While debugging we inject the tables and graph in a Cornet instance:
#
# >>> reload(cornet); c = cornet._get_cornet_instance()
#
#def _parse(cdb_lu="cdb_lu_minimal.xml", cdb_syn="cdb_syn_minimal.xml"):
#global form2lu, c_lu_id2lu, c_sy_id2synset, graph
#form2lu, c_lu_id2lu, c_sy_id2synset, graph = parse_cdb(cdb_lu, cdb_syn, verbose=True)
#def _get_cornet_instance():
#c = Cornet()
#c._form2lu = form2lu
#c._c_lu_id2lu = c_lu_id2lu
#c._c_sy_id2synset = c_sy_id2synset
#c._graph = graph
#return c
#def _dump_multi_edges(c):
#edges = c._graph.edges_iter()
#while True:
#try:
#lu1, lu2 = edges.next()
#except StopIteration:
#break
#d = c._graph[lu1][lu2]
#if len(d) > 1:
#relations = [d2["relation"] for d2 in d.values()]
#print c._lu_to_spec(lu1), ",".join(relations), c._lu_to_spec(lu2)
#for i in range(len(d) - 1):
#edges.next()
| gpl-3.0 | 4,756,977,569,728,525,000 | 34.93586 | 101 | 0.521526 | false |
bwghughes/bankloader | bankloader/bankloader.py | 1 | 1182 | # -*- coding: utf-8 -*-
import hashlib
from decimal import Decimal, InvalidOperation
from dateutil.parser import parse
class InvalidTransaction(Exception):
pass
class RawTransaction(object):
date = None
description = None
amount = None
checksum = None
"""docstring for Transaction"""
def __init__(self, **kwargs):
self.kwargs = kwargs
self._determine_types()
def _determine_types(self):
"""
Will either be a date, a decimal, or a string
covering off when, what and amount. Uber basic
at the moment
#TODO: Make this less brittle
"""
try:
self.date = parse(self.kwargs.get('date'))
self.description = self.kwargs.get('description').strip()
self.amount = Decimal(self.kwargs.get('amount'))
self.checksum = hashlib.md5(u''.join(self.kwargs.values())
.encode('utf-8')).digest()
except (ValueError, InvalidOperation) as e:
raise InvalidTransaction("Can't make a transaction from {0} - {1}"
.format(self.kwargs, e))
| bsd-3-clause | -8,712,272,389,028,872,000 | 29.307692 | 78 | 0.567682 | false |
arkanister/minitickets | lib/utils/views/email/base.py | 1 | 3014 | # -*- coding: utf-8 -*-
from django.core.exceptions import ImproperlyConfigured
from django.http.response import HttpResponseRedirect
from django.template import loader
from django.utils.translation import ugettext as _
from .utils import HtmlEmailSender
from ..base import SmartView as View
class EmailTemplateRenderMixin(object):
email_template_name = None
def get_email_template_names(self):
if self.email_template_name is None:
raise ImproperlyConfigured(
_("EmailTemplateRenderMixin requires either a definition of "
"'template_name' or an implementation of 'get_email_template_names()'"))
return self.email_template_name
def get_email_context_data(self, **kwargs):
return kwargs
def render_email(self, **kwargs):
template = loader.get_template(self.get_email_template_names())
context = loader.Context(self.get_email_context_data(**kwargs))
return template.render(context)
class EmailMixin(EmailTemplateRenderMixin):
email_sender = HtmlEmailSender
email_subject = None
email_to = None
def get_email_subject(self):
return getattr(self, 'email_subject', None)
def get_email_to_list(self):
user_email = getattr(self.request.user, 'email', None)
email_to = getattr(self, 'email_to', None)
if email_to is not None and isinstance(email_to, basestring):
return [email_to]
elif email_to is not None and isinstance(email_to, (list, tuple)):
return email_to
elif email_to is None and user_email is not None:
return [user_email]
else:
raise ImproperlyConfigured(
_("EmailTemplateRenderMixin requires either a definition of "
"'email_to' or an implementation of 'get_email_to_list()'."))
def send_email(self, **kwargs):
""" Send email. """
self.email_sender(
subject=self.get_email_subject(),
to_email=self.get_email_to_list(),
content=self.render_email(**kwargs)
).send()
class EmailView(EmailMixin, View):
_default_template_messages = {
'success': _('Email sent succesfully!'),
'error': _('Failed to send mail!')
}
def get_success_url(self):
success_url = getattr(self, 'success_url', None)
if not success_url:
raise ImproperlyConfigured(
_("EmailView requires either a definition of "
"'success_url' or an implementation of 'get_success_url()'."))
return success_url
def get(self, request, *args, **kwargs):
try:
self.send_email()
self.messages.success(self.get_message('success'))
except Exception, e:
self.messages.error(self.get_message('error'))
if hasattr(self, 'send_email_error'):
return self.send_email_error(e.message)
return HttpResponseRedirect(self.get_success_url())
| apache-2.0 | 3,648,041,289,803,767,300 | 35.313253 | 90 | 0.627405 | false |
ingresso-group/pyticketswitch | pyticketswitch/utils.py | 1 | 6757 | import functools
import warnings
from datetime import date, datetime
from dateutil import parser
from decimal import Decimal
from pyticketswitch.exceptions import InvalidParametersError
def date_range_str(start_date, end_date):
"""Convert a set of dates to string readable by the API
Args:
start_date (datetime.date): the start of the date range.
end_date (datetime.date): the end of the date range.
Returns:
str: a date range in the format of "YYYYMMDD:YYYYMMDD".
Missing either or both dates is acceptable and will return
"YYYYMMDD:", ":YYYYMMDD", ":".
Raises:
InvalidParametersError: when a start_date or end_date is specified and
it is not a datetime.date object.
"""
if start_date and not isinstance(start_date, date):
raise InvalidParametersError("start_date is not a datetime instance")
if end_date and not isinstance(end_date, date):
raise InvalidParametersError("end_date is not a datetime instance")
if start_date:
start_date = start_date.strftime('%Y%m%d')
else:
start_date = ''
if end_date:
end_date = end_date.strftime('%Y%m%d')
else:
end_date = ''
if start_date or end_date:
date_range = '{}:{}'.format(start_date, end_date)
else:
date_range = ''
return date_range
def isostr_to_datetime(date_str):
"""Convert an iso datetime string to a :py:class:`datetime.datetime` object.
Args:
date_str (str): the string to convert.
Returns:
:py:class:`datetime.datetime`: the python representation of the date
and time.
Raises:
ValueError: when the date_str is empty or None.
"""
if not date_str:
raise ValueError('{} is not a valid datetime string'.format(date_str))
dt = parser.parse(date_str)
return dt
def yyyymmdd_to_date(date_str):
"""Convert a YYYYMMDDD formated date to python :py:class:`datetime.date` object.
Args:
date_str (str): the string to convert.
Returns:
:py:class:`datetime.date`: the python representation of the date.
Raises:
ValueError: when the date_str is empty or None.
"""
if not date_str:
raise ValueError('{} is not a valid datetime string'.format(date_str))
date = datetime.strptime(date_str, '%Y%m%d')
if date:
return date.date()
def specific_dates_from_api_data(dates):
MONTHS = {
'jan': 1, 'feb': 2, 'mar': 3, 'apr': 4,
'may': 5, 'jun': 6, 'jul': 7, 'aug': 8,
'sep': 9, 'oct': 10, 'nov': 11, 'dec': 12,
}
return [
date(int(year.split('_')[1]), MONTHS.get(month), int(day.split('_')[1]))
for year, months in dates.items()
if year.startswith('year_')
for month, days in months.items()
for day, valid in days.items()
if valid is True
]
def bitmask_to_boolean_list(mask):
"""Convert a bitmask to boolean list
Args:
mask (int): the mask returned by the API
Returns:
list: list of booleans.
"""
of_length = max(1, mask.bit_length())
return [
bool(mask >> i & 1)
for i in range(of_length)
]
def bitmask_to_numbered_list(mask):
"""Convert a bitmask to a numbered list
Args:
mask (int): the mask returned by the API
Returns:
list: list of integers
"""
if mask is None:
return []
return [
i+1
for i in range(mask.bit_length() + 1)
if bool(mask >> i & 1)
]
def get_price(data, key):
"""Extracts a price as a float from some data
Args:
data (dict): the data containing the price
key (str): the key of the target price.
Returns:
float: the price as a float.
When the dictionary is missing the requested key, returns :obj:`None`
"""
price = data.get(key)
if price is not None:
price = float(price)
return price
def add_prices(*prices):
"""Adds two or more prices together
Args:
*prices: Prices to add together. They, can be a
:py:class:`float`, :py:class:`int`,
:py:class:`Decimal <decimal.Decimal>` or :py:class:`str`
Returns:
:class:`Decimal <decimal.Decimal>`, str, float or int.
The sum of the prices, using the most specific of these types that
is used in the input arguments, where specificity is in the order
- :py:class:`Decimal <decimal.Decimal>`
- :py:class:`str`
- :py:class:`float`
- :py:class:`int`
Raises:
TypeError: when fewer than 2 prices are provided
decimal.InvalidOperation: when the string representation
of an argument cannot be parsed as a decimal
"""
if len(prices) < 2:
raise TypeError(
'add_prices expected at least 2 arguments, got {}'.format(len(prices))
)
converted = [
Decimal(str(price)) if price is not None else None
for price in prices
]
combined = sum(converted)
if any(isinstance(price, Decimal) for price in prices):
return Decimal(combined)
if any(isinstance(price, str) for price in prices):
return str(combined)
if any(isinstance(price, float) for price in prices):
return float(combined)
return int(combined)
def filter_none_from_parameters(params):
"""Removes parameters whos value is :obj:None
Args:
params (dict): dictionary of parameters to be passed to the API.
Returns:
dict: the original parameters with any parameters whos value was
:obj:`None` removed.
"""
return {
key: value
for key, value in params.items()
if value is not None
}
def deprecation_warning(message, stacklevel=2):
warnings.simplefilter('always', DeprecationWarning) # turn off filter
warnings.warn(message,
category=DeprecationWarning,
stacklevel=stacklevel)
warnings.simplefilter('default', DeprecationWarning) # reset filter
def deprecated(func):
"""Mark a function as deprecated and raise a warning
This decorator is used to mark functions as deprecated. It will result in
a warning being emitted when the function is called.
Args:
func: the function to be wrapped
Returns:
the wrapped function that raises a warning
"""
@functools.wraps(func)
def wrapped_func(*args, **kwargs):
deprecation_warning(
"Call to deprecated function {}".format(func.__name__)
)
return func(*args, **kwargs)
# Mark the function as deprecated
wrapped_func.is_deprecated = True
return wrapped_func
| mit | -1,653,030,869,344,057,600 | 25.291829 | 84 | 0.613142 | false |
vandersonmota/t_dict | t_dict/t_dict.py | 1 | 1194 | # -*- coding: utf-8 -*-
from copy import deepcopy
from jsonpointer import resolve_pointer, set_pointer
#py2-py3
try:
from collections import MutableMapping
except ImportError:
from collections.abc import MutableMapping
class TDict(MutableMapping):
def __init__(self, d=None):
if d is None:
self.__d = {}
elif isinstance(d, self.__class__):
self.__d = deepcopy(d.__d)
else:
self.__d = deepcopy(d)
def __getitem__(self, key):
return self.__d[key]
def __setitem__(self, key, value):
self.__d[key] = value
def __delitem__(self, key):
del self.__d[key]
def __iter__(self):
return iter(self.__d)
def __len__(self):
return len(self.__d)
def find(self, path, default=None):
"""
Retrieves a single value using JSON-Pointer syntax
"""
result = resolve_pointer(self.__d, path, default)
if isinstance(result, dict):
result = TDict(result)
return result
def setin(self, path, value):
"""
Set a value using JSON-pointer syntax
"""
set_pointer(self, path, value)
| bsd-3-clause | 6,149,249,169,215,890,000 | 22.411765 | 59 | 0.554439 | false |
davesque/django-rest-framework-simplejwt | rest_framework_simplejwt/serializers.py | 1 | 4306 | from django.contrib.auth import authenticate
from django.utils.translation import gettext_lazy as _
from rest_framework import exceptions, serializers
from .settings import api_settings
from .state import User
from .tokens import RefreshToken, SlidingToken, UntypedToken
class PasswordField(serializers.CharField):
def __init__(self, *args, **kwargs):
kwargs.setdefault('style', {})
kwargs['style']['input_type'] = 'password'
kwargs['write_only'] = True
super().__init__(*args, **kwargs)
class TokenObtainSerializer(serializers.Serializer):
username_field = User.USERNAME_FIELD
default_error_messages = {
'no_active_account': _('No active account found with the given credentials')
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields[self.username_field] = serializers.CharField()
self.fields['password'] = PasswordField()
def validate(self, attrs):
authenticate_kwargs = {
self.username_field: attrs[self.username_field],
'password': attrs['password'],
}
try:
authenticate_kwargs['request'] = self.context['request']
except KeyError:
pass
self.user = authenticate(**authenticate_kwargs)
# Prior to Django 1.10, inactive users could be authenticated with the
# default `ModelBackend`. As of Django 1.10, the `ModelBackend`
# prevents inactive users from authenticating. App designers can still
# allow inactive users to authenticate by opting for the new
# `AllowAllUsersModelBackend`. However, we explicitly prevent inactive
# users from authenticating to enforce a reasonable policy and provide
# sensible backwards compatibility with older Django versions.
if self.user is None or not self.user.is_active:
raise exceptions.AuthenticationFailed(
self.error_messages['no_active_account'],
'no_active_account',
)
return {}
@classmethod
def get_token(cls, user):
raise NotImplementedError('Must implement `get_token` method for `TokenObtainSerializer` subclasses')
class TokenObtainPairSerializer(TokenObtainSerializer):
@classmethod
def get_token(cls, user):
return RefreshToken.for_user(user)
def validate(self, attrs):
data = super().validate(attrs)
refresh = self.get_token(self.user)
data['refresh'] = str(refresh)
data['access'] = str(refresh.access_token)
return data
class TokenObtainSlidingSerializer(TokenObtainSerializer):
@classmethod
def get_token(cls, user):
return SlidingToken.for_user(user)
def validate(self, attrs):
data = super().validate(attrs)
token = self.get_token(self.user)
data['token'] = str(token)
return data
class TokenRefreshSerializer(serializers.Serializer):
refresh = serializers.CharField()
def validate(self, attrs):
refresh = RefreshToken(attrs['refresh'])
data = {'access': str(refresh.access_token)}
if api_settings.ROTATE_REFRESH_TOKENS:
if api_settings.BLACKLIST_AFTER_ROTATION:
try:
# Attempt to blacklist the given refresh token
refresh.blacklist()
except AttributeError:
# If blacklist app not installed, `blacklist` method will
# not be present
pass
refresh.set_jti()
refresh.set_exp()
data['refresh'] = str(refresh)
return data
class TokenRefreshSlidingSerializer(serializers.Serializer):
token = serializers.CharField()
def validate(self, attrs):
token = SlidingToken(attrs['token'])
# Check that the timestamp in the "refresh_exp" claim has not
# passed
token.check_exp(api_settings.SLIDING_TOKEN_REFRESH_EXP_CLAIM)
# Update the "exp" claim
token.set_exp()
return {'token': str(token)}
class TokenVerifySerializer(serializers.Serializer):
token = serializers.CharField()
def validate(self, attrs):
UntypedToken(attrs['token'])
return {}
| mit | 2,090,570,235,381,383,000 | 28.902778 | 109 | 0.634231 | false |
8l/beri | cheritest/trunk/tests/tlb/test_tlb_user_mode.py | 2 | 1213 | #-
# Copyright (c) 2012 Robert M. Norton
# All rights reserved.
#
# @BERI_LICENSE_HEADER_START@
#
# Licensed to BERI Open Systems C.I.C. (BERI) under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. BERI licenses this
# file to you under the BERI Hardware-Software License, Version 1.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.beri-open-systems.org/legal/license-1-0.txt
#
# Unless required by applicable law or agreed to in writing, Work distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# @BERI_LICENSE_HEADER_END@
#
from beritest_tools import BaseBERITestCase
from nose.plugins.attrib import attr
class test_tlb_user_mode(BaseBERITestCase):
@attr('tlb')
def test_flag_set(self):
self.assertRegisterEqual(self.MIPS.a5, 0xffffffffffffffff, "Failed to execute code at virtual address.")
| apache-2.0 | -7,301,654,046,864,827,000 | 38.129032 | 112 | 0.757626 | false |
drvinceknight/Nashpy | tests/unit/test_is_best_response.py | 1 | 1600 | """
Tests for the best response check
"""
import numpy as np
from nashpy.utils.is_best_response import (
is_best_response,
)
def test_is_best_response_example_1():
"""
This tests an example from the discussion documentation.
The second assert checks that the column player strategy is as expected.
"""
A = np.array(((0, -1, 1), (1, 0, -1), (-1, 1, 0)))
sigma_c = np.array((0, 1 / 2, 1 / 2))
sigma_r = np.array((0, 0, 1))
assert is_best_response(A=A, sigma_c=sigma_c, sigma_r=sigma_r) is True
assert is_best_response(A=-A.T, sigma_c=sigma_r, sigma_r=sigma_c) is False
def test_is_best_response_example_2():
"""
This tests an example from the discussion documentation.
The second assert checks that the column player strategy is as expected.
"""
A = np.array(((0, -1, 1), (1, 0, -1), (-1, 1, 0)))
sigma_c = np.array((0, 1 / 2, 1 / 2))
sigma_r = np.array((1 / 3, 1 / 3, 1 / 3))
assert is_best_response(A=A, sigma_c=sigma_c, sigma_r=sigma_r) is False
assert is_best_response(A=-A.T, sigma_c=sigma_r, sigma_r=sigma_c) is True
def test_is_best_response_example_3():
"""
This tests an example from the discussion documentation.
The second assert checks that the column player strategy is as expected.
"""
A = np.array(((0, -1, 1), (1, 0, -1), (-1, 1, 0)))
sigma_c = np.array((1 / 3, 1 / 3, 1 / 3))
sigma_r = np.array((1 / 3, 1 / 3, 1 / 3))
assert is_best_response(A=A, sigma_c=sigma_c, sigma_r=sigma_r) is True
assert is_best_response(A=-A.T, sigma_c=sigma_r, sigma_r=sigma_c) is True
| mit | 7,907,187,553,740,622,000 | 33.042553 | 78 | 0.6125 | false |
fpiotrow/caldav-tester-packaging | cdtdiagnose.py | 1 | 4722 | #!/usr/bin/env python
#
##
# Copyright (c) 2013-2015 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
import os
import datetime
import shutil
import sys
import argparse
from subprocess import Popen, PIPE
server_root = "/Applications/Server.app/Contents/ServerRoot"
os.environ["PATH"] = "%s/usr/bin:%s" % (server_root, os.environ["PATH"])
library_root = "/Library/Server/Calendar and Contacts"
directory_node = "/LDAPv3/127.0.0.1"
def cmd(args, input=None, raiseOnFail=True):
if input:
p = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True)
result = p.communicate(input)
else:
p = Popen(args, stdout=PIPE, stderr=PIPE, shell=True)
result = p.communicate()
if raiseOnFail and p.returncode:
raise RuntimeError(result[1])
return result[0], p.returncode
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Gather CalDAVTester diagnostics.',
)
parser.add_argument('-d', '--directory', action='store',
help='Destination directory for diagnostics archive')
args = parser.parse_args()
print "Running CDT diagnostics due to test failure."
log = []
def error(message, e):
log.append("CDT diagnostic: %s" % (message,))
log.append(str(e))
print "\n".join(log)
sys.exit(1)
now = datetime.datetime.now()
now = now.replace(microsecond=0)
dirname = "cdtdiagnose-%s" % (now.strftime("%Y%m%d-%H%M%S"),)
if args.directory is not None:
if not os.path.isdir(args.directory):
print "Specified target directory path is invalid, using default."
else:
dirname = os.path.join(args.directory, dirname)
print "Saving diagnostic archive to: {}".format(dirname,)
try:
os.mkdir(dirname)
except Exception as e:
error("Could not create archive directory: '%s'" % (dirname,), e)
# Copy CDT log file file
server_path = "cdt.txt"
archive_path = os.path.join(dirname, os.path.basename(server_path))
try:
shutil.copy(server_path, archive_path)
except Exception as e:
error("Could not copy cdt results file: '%s' to '%s'" % (server_path, archive_path,), e)
# Copy serverinfo file
server_path = "scripts/server/serverinfo-caldav.xml"
archive_path = os.path.join(dirname, os.path.basename(server_path))
try:
shutil.copy(server_path, archive_path)
except Exception as e:
error("Could not copy server info file: '%s' to '%s'" % (server_path, archive_path,), e)
# Get server logs
logs_path = os.path.join(library_root, "Logs")
archive_path = os.path.join(dirname, "logs")
try:
shutil.copytree(logs_path, archive_path)
except Exception as e:
error("Could not copy server logs: '%s' to '%s'" % (logs_path, archive_path,), e)
# Get server config files
server_path = os.path.join(server_root, "etc", "caldavd")
archive_path = os.path.join(dirname, "etc")
try:
shutil.copytree(server_path, archive_path)
except Exception as e:
error("Could not copy server conf: '%s' to '%s'" % (server_path, archive_path,), e)
server_path = library_root
archive_path = os.path.join(dirname, "Library")
try:
shutil.copytree(server_path, archive_path)
except Exception as e:
error("Could not copy library items: '%s' to '%s'" % (server_path, archive_path,), e)
# Dump OD data
try:
results = ["*** Users"]
results.extend(cmd("dscl %s -readall Users" % (directory_node,))[0].splitlines())
results.append("\n\n*** Groups")
results.extend(cmd("dscl %s -readall Groups" % (directory_node,))[0].splitlines())
results.append("")
with open(os.path.join(dirname, "dscl_dump.txt"), "w") as f:
f.write("\n".join(results))
except Exception as e:
error("Could not dump OD data.", e)
# Now archive the diagnostics data
try:
archive_name = shutil.make_archive(dirname, "gztar", dirname)
except Exception as e:
error("Could not make diagnostics archive.", e)
print "Saved diagnostics to '%s'" % (archive_name,)
| apache-2.0 | -979,777,726,195,420,800 | 32.971223 | 96 | 0.642524 | false |
ikarus23/MifareClassicTool | tools/dump-file-converter/mfd2eml.py | 1 | 1516 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
########################################################################
#
# Copyright 2015 Gerhard Klostermeier
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
from sys import exit, argv
from binascii import hexlify
def main():
""" Convert a .mfd file (MIFARE Drump) to a .eml file (Proxmark3 emulator). """
# Are there enouth arguments?
if len(argv) != 3:
usage()
# Convert the file line by line.
with open(argv[1], 'rb') as mfdFile, open(argv[2], 'w') as emlFile:
while True:
bytes = mfdFile.read(16)
if not bytes:
break
chars = hexlify(bytes).decode('UTF-8')
emlFile.write(chars + '\n')
def usage():
""" Print the usage. """
print('Usage: ' + argv[0] + ' <mfd-file> <output-file-(eml)>')
exit(1);
if __name__ == '__main__':
main()
| gpl-3.0 | 8,973,126,354,513,189,000 | 28.72549 | 81 | 0.604881 | false |
OrangeTux/python-snake | snake/field.py | 1 | 1314 | class Field:
""" A field keeps track of all positions on a field and the content of
these posisions.
:param length: Length of the field.
:param height: The height of the field.
"""
def __init__(self, width=10, height=10):
self.width = 10
self.height = 10
self.field = self.create_field()
def create_field(self):
""" Creates a field based on the dimensions.
:return: Dict filled with (x, y) tuples as keys.
"""
return {(x, y): None for x in range(self.width)
for y in range(self.width)}
def set_cell_content(self, x, y, content):
""" Set content of a cell.
:param x: The x coordinate of the cell.
:param y: The y coordinate of the cell.
:param content: The content for the cell.
:raises: KeyError when coordinates are invalid.
"""
cell = (x, y)
if cell not in self.field:
raise KeyError
self.field[cell] = content
def get_cell_content(self, x, y):
""" Return content of a cell.
:param x: The x coordinate of the cell.
:param y: The y coordinate of the cell.
:raises: KeyError when coordinates are invalid.
"""
return self.field[(x, y)]
| mit | 5,609,683,131,536,032,000 | 25.816327 | 74 | 0.560122 | false |
hastexo/edx-platform | common/lib/xmodule/xmodule/seq_module.py | 1 | 23976 | """
xModule implementation of a learning sequence
"""
# pylint: disable=abstract-method
import collections
import json
import logging
from datetime import datetime
from lxml import etree
from pkg_resources import resource_string
from pytz import UTC
from six import text_type
from web_fragments.fragment import Fragment
from xblock.completable import XBlockCompletionMode
from xblock.core import XBlock
from xblock.fields import Boolean, Integer, List, Scope, String
from .exceptions import NotFoundError
from .fields import Date
from .mako_module import MakoModuleDescriptor
from .progress import Progress
from .x_module import STUDENT_VIEW, XModule
from .xml_module import XmlDescriptor
log = logging.getLogger(__name__)
try:
import newrelic.agent
except ImportError:
newrelic = None # pylint: disable=invalid-name
# HACK: This shouldn't be hard-coded to two types
# OBSOLETE: This obsoletes 'type'
class_priority = ['video', 'problem']
# Make '_' a no-op so we can scrape strings. Using lambda instead of
# `django.utils.translation.ugettext_noop` because Django cannot be imported in this file
_ = lambda text: text
class SequenceFields(object):
has_children = True
completion_mode = XBlockCompletionMode.AGGREGATOR
# NOTE: Position is 1-indexed. This is silly, but there are now student
# positions saved on prod, so it's not easy to fix.
position = Integer(help="Last tab viewed in this sequence", scope=Scope.user_state)
due = Date(
display_name=_("Due Date"),
help=_("Enter the date by which problems are due."),
scope=Scope.settings,
)
hide_after_due = Boolean(
display_name=_("Hide sequence content After Due Date"),
help=_(
"If set, the sequence content is hidden for non-staff users after the due date has passed."
),
default=False,
scope=Scope.settings,
)
is_entrance_exam = Boolean(
display_name=_("Is Entrance Exam"),
help=_(
"Tag this course module as an Entrance Exam. "
"Note, you must enable Entrance Exams for this course setting to take effect."
),
default=False,
scope=Scope.settings,
)
class ProctoringFields(object):
"""
Fields that are specific to Proctored or Timed Exams
"""
is_time_limited = Boolean(
display_name=_("Is Time Limited"),
help=_(
"This setting indicates whether students have a limited time"
" to view or interact with this courseware component."
),
default=False,
scope=Scope.settings,
)
default_time_limit_minutes = Integer(
display_name=_("Time Limit in Minutes"),
help=_(
"The number of minutes available to students for viewing or interacting with this courseware component."
),
default=None,
scope=Scope.settings,
)
is_proctored_enabled = Boolean(
display_name=_("Is Proctoring Enabled"),
help=_(
"This setting indicates whether this exam is a proctored exam."
),
default=False,
scope=Scope.settings,
)
exam_review_rules = String(
display_name=_("Software Secure Review Rules"),
help=_(
"This setting indicates what rules the proctoring team should follow when viewing the videos."
),
default='',
scope=Scope.settings,
)
is_practice_exam = Boolean(
display_name=_("Is Practice Exam"),
help=_(
"This setting indicates whether this exam is for testing purposes only. Practice exams are not verified."
),
default=False,
scope=Scope.settings,
)
def _get_course(self):
"""
Return course by course id.
"""
return self.descriptor.runtime.modulestore.get_course(self.course_id) # pylint: disable=no-member
@property
def is_timed_exam(self):
"""
Alias the permutation of above fields that corresponds to un-proctored timed exams
to the more clearly-named is_timed_exam
"""
return not self.is_proctored_enabled and not self.is_practice_exam and self.is_time_limited
@property
def is_proctored_exam(self):
""" Alias the is_proctored_enabled field to the more legible is_proctored_exam """
return self.is_proctored_enabled
@property
def allow_proctoring_opt_out(self):
"""
Returns true if the learner should be given the option to choose between
taking a proctored exam, or opting out to take the exam without proctoring.
"""
return self._get_course().allow_proctoring_opt_out
@is_proctored_exam.setter
def is_proctored_exam(self, value):
""" Alias the is_proctored_enabled field to the more legible is_proctored_exam """
self.is_proctored_enabled = value
@XBlock.wants('proctoring')
@XBlock.wants('verification')
@XBlock.wants('milestones')
@XBlock.wants('credit')
@XBlock.needs('user')
@XBlock.needs('bookmarks')
class SequenceModule(SequenceFields, ProctoringFields, XModule):
"""
Layout module which lays out content in a temporal sequence
"""
js = {
'js': [resource_string(__name__, 'js/src/sequence/display.js')],
}
css = {
'scss': [resource_string(__name__, 'css/sequence/display.scss')],
}
js_module_name = "Sequence"
def __init__(self, *args, **kwargs):
super(SequenceModule, self).__init__(*args, **kwargs)
# If position is specified in system, then use that instead.
position = getattr(self.system, 'position', None)
if position is not None:
assert isinstance(position, int)
self.position = self.system.position
def get_progress(self):
''' Return the total progress, adding total done and total available.
(assumes that each submodule uses the same "units" for progress.)
'''
# TODO: Cache progress or children array?
children = self.get_children()
progresses = [child.get_progress() for child in children]
progress = reduce(Progress.add_counts, progresses, None)
return progress
def handle_ajax(self, dispatch, data): # TODO: bounds checking
''' get = request.POST instance '''
if dispatch == 'goto_position':
# set position to default value if either 'position' argument not
# found in request or it is a non-positive integer
position = data.get('position', u'1')
if position.isdigit() and int(position) > 0:
self.position = int(position)
else:
self.position = 1
return json.dumps({'success': True})
raise NotFoundError('Unexpected dispatch type')
@classmethod
def verify_current_content_visibility(cls, date, hide_after_date):
"""
Returns whether the content visibility policy passes
for the given date and hide_after_date values and
the current date-time.
"""
return (
not date or
not hide_after_date or
datetime.now(UTC) < date
)
def student_view(self, context):
context = context or {}
self._capture_basic_metrics()
banner_text = None
special_html_view = self._hidden_content_student_view(context) or self._special_exam_student_view()
if special_html_view:
masquerading_as_specific_student = context.get('specific_masquerade', False)
banner_text, special_html = special_html_view
if special_html and not masquerading_as_specific_student:
return Fragment(special_html)
else:
banner_text = self._gated_content_staff_banner()
return self._student_view(context, banner_text)
def _special_exam_student_view(self):
"""
Checks whether this sequential is a special exam. If so, returns
a banner_text or the fragment to display depending on whether
staff is masquerading.
"""
if self.is_time_limited:
special_exam_html = self._time_limited_student_view()
if special_exam_html:
banner_text = _("This exam is hidden from the learner.")
return banner_text, special_exam_html
def _hidden_content_student_view(self, context):
"""
Checks whether the content of this sequential is hidden from the
runtime user. If so, returns a banner_text or the fragment to
display depending on whether staff is masquerading.
"""
course = self._get_course()
if not self._can_user_view_content(course):
if course.self_paced:
banner_text = _("Because the course has ended, this assignment is hidden from the learner.")
else:
banner_text = _("Because the due date has passed, this assignment is hidden from the learner.")
hidden_content_html = self.system.render_template(
'hidden_content.html',
{
'self_paced': course.self_paced,
'progress_url': context.get('progress_url'),
}
)
return banner_text, hidden_content_html
def _gated_content_staff_banner(self):
"""
Checks whether the content is gated for learners. If so,
returns a banner_text depending on whether user is staff.
"""
milestones_service = self.runtime.service(self, 'milestones')
if milestones_service:
content_milestones = milestones_service.get_course_content_milestones(
self.course_id, self.location, 'requires'
)
banner_text = _('This subsection is unlocked for learners when they meet the prerequisite requirements.')
if content_milestones and self.runtime.user_is_staff:
return banner_text
def _can_user_view_content(self, course):
"""
Returns whether the runtime user can view the content
of this sequential.
"""
hidden_date = course.end if course.self_paced else self.due
return (
self.runtime.user_is_staff or
self.verify_current_content_visibility(hidden_date, self.hide_after_due)
)
def is_user_authenticated(self, context):
# NOTE (CCB): We default to true to maintain the behavior in place prior to allowing anonymous access access.
return context.get('user_authenticated', True)
def _student_view(self, context, banner_text=None):
"""
Returns the rendered student view of the content of this
sequential. If banner_text is given, it is added to the
content.
"""
display_items = self.get_display_items()
self._update_position(context, len(display_items))
fragment = Fragment()
params = {
'items': self._render_student_view_for_items(context, display_items, fragment),
'element_id': self.location.html_id(),
'item_id': text_type(self.location),
'position': self.position,
'tag': self.location.block_type,
'ajax_url': self.system.ajax_url,
'next_url': context.get('next_url'),
'prev_url': context.get('prev_url'),
'banner_text': banner_text,
'disable_navigation': not self.is_user_authenticated(context),
}
fragment.add_content(self.system.render_template("seq_module.html", params))
self._capture_full_seq_item_metrics(display_items)
self._capture_current_unit_metrics(display_items)
return fragment
def _update_position(self, context, number_of_display_items):
"""
Update the user's sequential position given the context and the
number_of_display_items
"""
position = context.get('position')
if position:
self.position = position
# If we're rendering this sequence, but no position is set yet,
# or exceeds the length of the displayable items,
# default the position to the first element
if context.get('requested_child') == 'first':
self.position = 1
elif context.get('requested_child') == 'last':
self.position = number_of_display_items or 1
elif self.position is None or self.position > number_of_display_items:
self.position = 1
def _render_student_view_for_items(self, context, display_items, fragment):
"""
Updates the given fragment with rendered student views of the given
display_items. Returns a list of dict objects with information about
the given display_items.
"""
is_user_authenticated = self.is_user_authenticated(context)
bookmarks_service = self.runtime.service(self, 'bookmarks')
context['username'] = self.runtime.service(self, 'user').get_current_user().opt_attrs.get(
'edx-platform.username')
display_names = [
self.get_parent().display_name_with_default,
self.display_name_with_default
]
contents = []
for item in display_items:
# NOTE (CCB): This seems like a hack, but I don't see a better method of determining the type/category.
item_type = item.get_icon_class()
usage_id = item.scope_ids.usage_id
if item_type == 'problem' and not is_user_authenticated:
log.info(
'Problem [%s] was not rendered because anonymous access is not allowed for graded content',
usage_id
)
continue
show_bookmark_button = False
is_bookmarked = False
if is_user_authenticated:
show_bookmark_button = True
is_bookmarked = bookmarks_service.is_bookmarked(usage_key=usage_id)
context['show_bookmark_button'] = show_bookmark_button
context['bookmarked'] = is_bookmarked
rendered_item = item.render(STUDENT_VIEW, context)
fragment.add_fragment_resources(rendered_item)
iteminfo = {
'content': rendered_item.content,
'page_title': getattr(item, 'tooltip_title', ''),
'type': item_type,
'id': text_type(usage_id),
'bookmarked': is_bookmarked,
'path': " > ".join(display_names + [item.display_name_with_default]),
}
contents.append(iteminfo)
return contents
def _locations_in_subtree(self, node):
"""
The usage keys for all descendants of an XBlock/XModule as a flat list.
Includes the location of the node passed in.
"""
stack = [node]
locations = []
while stack:
curr = stack.pop()
locations.append(curr.location)
if curr.has_children:
stack.extend(curr.get_children())
return locations
def _capture_basic_metrics(self):
"""
Capture basic information about this sequence in New Relic.
"""
if not newrelic:
return
newrelic.agent.add_custom_parameter('seq.block_id', unicode(self.location))
newrelic.agent.add_custom_parameter('seq.display_name', self.display_name or '')
newrelic.agent.add_custom_parameter('seq.position', self.position)
newrelic.agent.add_custom_parameter('seq.is_time_limited', self.is_time_limited)
def _capture_full_seq_item_metrics(self, display_items):
"""
Capture information about the number and types of XBlock content in
the sequence as a whole. We send this information to New Relic so that
we can do better performance analysis of courseware.
"""
if not newrelic:
return
# Basic count of the number of Units (a.k.a. VerticalBlocks) we have in
# this learning sequence
newrelic.agent.add_custom_parameter('seq.num_units', len(display_items))
# Count of all modules (leaf nodes) in this sequence (e.g. videos,
# problems, etc.) The units (verticals) themselves are not counted.
all_item_keys = self._locations_in_subtree(self)
newrelic.agent.add_custom_parameter('seq.num_items', len(all_item_keys))
# Count of all modules by block_type (e.g. "video": 2, "discussion": 4)
block_counts = collections.Counter(usage_key.block_type for usage_key in all_item_keys)
for block_type, count in block_counts.items():
newrelic.agent.add_custom_parameter('seq.block_counts.{}'.format(block_type), count)
def _capture_current_unit_metrics(self, display_items):
"""
Capture information about the current selected Unit within the Sequence.
"""
if not newrelic:
return
# Positions are stored with indexing starting at 1. If we get into a
# weird state where the saved position is out of bounds (e.g. the
# content was changed), avoid going into any details about this unit.
if 1 <= self.position <= len(display_items):
# Basic info about the Unit...
current = display_items[self.position - 1]
newrelic.agent.add_custom_parameter('seq.current.block_id', unicode(current.location))
newrelic.agent.add_custom_parameter('seq.current.display_name', current.display_name or '')
# Examining all items inside the Unit (or split_test, conditional, etc.)
child_locs = self._locations_in_subtree(current)
newrelic.agent.add_custom_parameter('seq.current.num_items', len(child_locs))
curr_block_counts = collections.Counter(usage_key.block_type for usage_key in child_locs)
for block_type, count in curr_block_counts.items():
newrelic.agent.add_custom_parameter('seq.current.block_counts.{}'.format(block_type), count)
def _time_limited_student_view(self):
"""
Delegated rendering of a student view when in a time
limited view. This ultimately calls down into edx_proctoring
pip installed djangoapp
"""
# None = no overridden view rendering
view_html = None
proctoring_service = self.runtime.service(self, 'proctoring')
credit_service = self.runtime.service(self, 'credit')
verification_service = self.runtime.service(self, 'verification')
# Is this sequence designated as a Timed Examination, which includes
# Proctored Exams
feature_enabled = (
proctoring_service and
credit_service and
self.is_time_limited
)
if feature_enabled:
user_id = self.runtime.user_id
user_role_in_course = 'staff' if self.runtime.user_is_staff else 'student'
course_id = self.runtime.course_id
content_id = self.location
context = {
'display_name': self.display_name,
'default_time_limit_mins': (
self.default_time_limit_minutes if
self.default_time_limit_minutes else 0
),
'is_practice_exam': self.is_practice_exam,
'allow_proctoring_opt_out': self.allow_proctoring_opt_out,
'due_date': self.due
}
# inject the user's credit requirements and fulfillments
if credit_service:
credit_state = credit_service.get_credit_state(user_id, course_id)
if credit_state:
context.update({
'credit_state': credit_state
})
# inject verification status
if verification_service:
verification_status, __ = verification_service.get_status(user_id)
context.update({
'verification_status': verification_status,
'reverify_url': verification_service.reverify_url(),
})
# See if the edx-proctoring subsystem wants to present
# a special view to the student rather
# than the actual sequence content
#
# This will return None if there is no
# overridden view to display given the
# current state of the user
view_html = proctoring_service.get_student_view(
user_id=user_id,
course_id=course_id,
content_id=content_id,
context=context,
user_role=user_role_in_course
)
return view_html
def get_icon_class(self):
child_classes = set(child.get_icon_class()
for child in self.get_children())
new_class = 'other'
for c in class_priority:
if c in child_classes:
new_class = c
return new_class
class SequenceDescriptor(SequenceFields, ProctoringFields, MakoModuleDescriptor, XmlDescriptor):
"""
A Sequence's Descriptor object
"""
mako_template = 'widgets/sequence-edit.html'
module_class = SequenceModule
resources_dir = None
show_in_read_only_mode = True
js = {
'coffee': [resource_string(__name__, 'js/src/sequence/edit.coffee')],
}
js_module_name = "SequenceDescriptor"
@classmethod
def definition_from_xml(cls, xml_object, system):
children = []
for child in xml_object:
try:
child_block = system.process_xml(etree.tostring(child, encoding='unicode'))
children.append(child_block.scope_ids.usage_id)
except Exception as e:
log.exception("Unable to load child when parsing Sequence. Continuing...")
if system.error_tracker is not None:
system.error_tracker(u"ERROR: {0}".format(e))
continue
return {}, children
def definition_to_xml(self, resource_fs):
xml_object = etree.Element('sequential')
for child in self.get_children():
self.runtime.add_block_as_child_node(child, xml_object)
return xml_object
@property
def non_editable_metadata_fields(self):
"""
`is_entrance_exam` should not be editable in the Studio settings editor.
"""
non_editable_fields = super(SequenceDescriptor, self).non_editable_metadata_fields
non_editable_fields.append(self.fields['is_entrance_exam'])
return non_editable_fields
def index_dictionary(self):
"""
Return dictionary prepared with module content and type for indexing.
"""
# return key/value fields in a Python dict object
# values may be numeric / string or dict
# default implementation is an empty dict
xblock_body = super(SequenceDescriptor, self).index_dictionary()
html_body = {
"display_name": self.display_name,
}
if "content" in xblock_body:
xblock_body["content"].update(html_body)
else:
xblock_body["content"] = html_body
xblock_body["content_type"] = "Sequence"
return xblock_body
class HighlightsFields(object):
"""Only Sections have summaries now, but we may expand that later."""
highlights = List(
help=_("A list summarizing what students should look forward to in this section."),
scope=Scope.settings
)
class SectionModule(HighlightsFields, SequenceModule):
"""Module for a Section/Chapter."""
class SectionDescriptor(HighlightsFields, SequenceDescriptor):
"""Descriptor for a Section/Chapter."""
module_class = SectionModule
| agpl-3.0 | 7,013,811,450,100,319,000 | 36.75748 | 117 | 0.611778 | false |
h2oai/h2o-dev | h2o-py/tests/testdir_algos/deeplearning/pyunit_cv_cars_deeplearning_medium.py | 1 | 4554 | from __future__ import print_function
from builtins import range
import sys, os
sys.path.insert(1, os.path.join("..","..",".."))
import h2o
from tests import pyunit_utils
import random
from h2o.estimators.deeplearning import H2ODeepLearningEstimator
def cv_cars_dl():
# read in the dataset and construct training set (and validation set)
cars = h2o.import_file(path=pyunit_utils.locate("smalldata/junit/cars_20mpg.csv"))
# choose the type model-building exercise (multinomial classification or regression). 0:regression, 1:binomial,
# 2:multinomial
problem = random.sample(list(range(3)),1)[0]
# pick the predictors and the correct response column
predictors = ["displacement","power","weight","acceleration","year"]
if problem == 1:
response_col = "economy_20mpg"
cars[response_col] = cars[response_col].asfactor()
elif problem == 2:
response_col = "cylinders"
cars[response_col] = cars[response_col].asfactor()
else:
response_col = "economy"
print("Response column: {0}".format(response_col))
## cross-validation
# 1. basic
dl = H2ODeepLearningEstimator(nfolds=random.randint(3,10),fold_assignment="Modulo",hidden=[20,20],epochs=10)
dl.train(x=predictors, y=response_col, training_frame=cars)
# 2. check that cv metrics are different over repeated "Random" runs
nfolds = random.randint(3,10)
dl1 = H2ODeepLearningEstimator(nfolds=nfolds,fold_assignment="Random",hidden=[20,20],epochs=10)
dl1.train(x=predictors,y=response_col,training_frame=cars)
dl2 = H2ODeepLearningEstimator(nfolds=nfolds,fold_assignment="Random",hidden=[20,20],epochs=10)
try:
pyunit_utils.check_models(dl1, dl2, True)
assert False, "Expected models to be different over repeated Random runs"
except AssertionError:
assert True
# 3. folds_column
num_folds = random.randint(2,5)
fold_assignments = h2o.H2OFrame([[random.randint(0,num_folds-1)] for _ in range(cars.nrow)])
fold_assignments.set_names(["fold_assignments"])
cars = cars.cbind(fold_assignments)
dl = H2ODeepLearningEstimator(keep_cross_validation_models=True, keep_cross_validation_predictions=True,
hidden=[20, 20], epochs=10)
dl.train(x=predictors,y=response_col,training_frame=cars,fold_column="fold_assignments")
num_cv_models = len(dl._model_json['output']['cross_validation_models'])
assert num_cv_models==num_folds, "Expected {0} cross-validation models, but got " \
"{1}".format(num_folds, num_cv_models)
cv_model1 = h2o.get_model(dl._model_json['output']['cross_validation_models'][0]['name'])
cv_model2 = h2o.get_model(dl._model_json['output']['cross_validation_models'][1]['name'])
# 4. keep_cross_validation_predictions
cv_predictions = dl1._model_json['output']['cross_validation_predictions']
## boundary cases
# 1. nfolds = number of observations (leave-one-out cross-validation)
dl = H2ODeepLearningEstimator(nfolds=cars.nrow, fold_assignment="Modulo",hidden=[20,20],epochs=10)
dl.train(x=predictors,y=response_col,training_frame=cars)
# 2. nfolds = 0
dl = H2ODeepLearningEstimator(nfolds=0,hidden=[20,20],epochs=10)
dl.train(x=predictors,y=response_col,training_frame=cars)
# 3. cross-validation and regular validation attempted
dl = H2ODeepLearningEstimator(nfolds=random.randint(3,10),hidden=[20,20],epochs=10)
dl.train(x=predictors, y=response_col, training_frame=cars, validation_frame=cars)
## error cases
# 1. nfolds == 1 or < 0
try:
dl = H2ODeepLearningEstimator(nfolds=random.sample([-1,1], 1)[0],hidden=[20,20],epochs=10)
dl.train(x=predictors, y=response_col, training_frame=cars)
assert False, "Expected model-build to fail when nfolds is 1 or < 0"
except EnvironmentError:
assert True
# 2. more folds than observations
try:
dl = H2ODeepLearningEstimator(nfolds=cars.nrow+1,fold_assignment="Modulo",hidden=[20,20],epochs=10)
dl.train(x=predictors, y=response_col, training_frame=cars)
assert False, "Expected model-build to fail when nfolds > nobs"
except EnvironmentError:
assert True
# 3. fold_column and nfolds both specified
try:
dl = H2ODeepLearningEstimator(nfolds=3, hidden=[20, 20], epochs=10)
dl.train(x=predictors, y=response_col, fold_column="fold_assignments", training_frame=cars)
assert False, "Expected model-build to fail when fold_column and nfolds both specified"
except EnvironmentError:
assert True
if __name__ == "__main__":
pyunit_utils.standalone_test(cv_cars_dl)
else:
cv_cars_dl()
| apache-2.0 | -6,992,844,085,351,824,000 | 38.947368 | 113 | 0.713878 | false |
SaintAttila/attila | attila/plugins.py | 1 | 8427 | """
Infrastructure for dealing with plugins.
"""
import pkg_resources
import warnings
from collections.abc import Mapping
from .exceptions import PluginExistsError, PluginNotFoundError, InvalidPluginError, verify_type
__author__ = 'Aaron Hosford'
__all__ = [
'PluginGroup',
'CONFIG_LOADERS',
'URL_SCHEMES',
'load_plugins',
'config_loader',
'url_scheme',
]
class PluginGroup(Mapping):
"""
A PluginGroup is a collection of plugins registered under the same entry point group name. It
supports both install-time and run-time registration of plugins, case-insensitive lookup by
name, and plugin type-checking.
"""
def __init__(self, name, value_type=None):
verify_type(name, str, non_empty=True)
if value_type is not None:
verify_type(value_type, type)
self._name = name
self._value_type = value_type
self._original_names = {}
self._registry = {}
def load(self, warn=True):
"""
Load any pre-registered plugins.
:param warn: Whether to warn if a registered plugin could not be loaded.
:return: None
"""
for entry_point in pkg_resources.iter_entry_points(group=self._name):
try:
self.register(entry_point.name, entry_point.load())
except Exception as exc:
if warn:
warnings.warn(str(exc))
def register(self, name, value):
"""
Register a new plugin.
:param name: The name of the plugin.
:param value: The plugin.
:return: None
"""
verify_type(name, str, non_empty=True)
if name.lower() in self._registry and self._registry[name.lower()] != value:
raise PluginExistsError("Another plugin by this name has already been registered: %s" % name)
if self._value_type is not None and not isinstance(value, self._value_type):
raise InvalidPluginError("Plugin %s is not a/an %s." % (name, self._value_type.__name__))
self._original_names[name] = name
self._registry[name.lower()] = value
def __getitem__(self, name):
if not isinstance(name, str):
return NotImplemented
if name.lower() not in self._registry:
raise PluginNotFoundError(name)
return self._registry[name.lower()]
def __setitem__(self, name, value):
if not isinstance(name, str):
return NotImplemented
self.register(name, value)
def __iter__(self):
return iter(self._original_names)
def __contains__(self, name):
if not isinstance(name, str):
return NotImplemented
return name.lower() in self._registry
def __len__(self):
return len(self._registry)
def get(self, name, default=None):
"""
Get the plugin by name.
:param name: The name of the plugin.
:param default: The default value if the plugin does not exist.
:return: The plugin, or the default.
"""
verify_type(name, str, non_empty=True)
return self._registry.get(name.lower(), default)
def plugin(self, name=NotImplemented, value=NotImplemented):
"""
A decorator for in-line registration of plugins.
Registering a plugin function under its own name to group PLUGIN_GROUP::
@PLUGIN_GROUP.plugin
def aptly_named_plugin_function(arg1, arg2):
...
Registering a plugin function to a different name to group PLUGIN_GROUP::
@PLUGIN_GROUP.plugin('BetterPluginName')
def less_aptly_named_plugin_function(arg1, arg2):
...
:param value: The value to be registered as a plugin.
:param name: The name to register the plugin under.
:return: The value, unchanged, after registration, or a parameter-free plugin decorator.
"""
assert name is not NotImplemented or value is not NotImplemented
if value is NotImplemented and not isinstance(name, str):
value = name
name = NotImplemented
if name is NotImplemented:
name = value.__name__
verify_type(name, str, non_empty=True)
if value is NotImplemented:
def registrar(obj):
"""
A parameter-free decorator for in-line registration of plugins.
:param obj: The value to be registered as a plugin.
:return: The value, unchanged, after registration.
"""
self.register(name, obj)
return obj
return registrar
self.register(name, value)
return value
CONFIG_LOADERS = PluginGroup('attila.config_loader')
URL_SCHEMES = PluginGroup('attila.url_scheme')
def load_plugins(warn=True):
"""
Load all registered Attila plugins.
Another package can register a plugin for use by attila by setting the entry_points parameter in
the other package's setup.py script. See http://stackoverflow.com/a/9615473/4683578 and/or
https://pythonhosted.org/setuptools/setuptools.use_html#dynamic-discovery-of-services-and-plugins
for an explanation of how plugins work in Python.
There are two distinct types of plugins that attila itself recognizes:
* Config Loaders: These are loaded from the plugin group 'attila.config_loaders', and must be
either subclasses of the attila.abc.configurations.Configurable class,
class method which accepts a configparser.ConfigParser and a section name as its arguments.
* Configured Objects: These are loaded from the plugin group 'attila.configured_object', and
must be *instances* of the attila.abc.configurations.Configurable class.
Each of these plugins is registered using the entry point name specified in the registering
package's setup.py. The registered plugins can then be accessed via the attila.plugins.iter_*()
and attila.plugins.get_*() methods.
The Configurable Types are all loaded before any Configured Objects are loaded, allowing the
Configurable Objects to be loaded, via the load_object() function, from a config section with a
Type parameter that refers to a Configurable Type.
"""
CONFIG_LOADERS.load(warn)
URL_SCHEMES.load(warn)
def config_loader(name=NotImplemented, value=NotImplemented):
"""
A decorator for in-line registration of config loaders.
Registering a config loader function under its own name::
@config_loader
def aptly_named_config_loader(string):
...
Registering a config loader function under a different name::
@config_loader('BetterConfigLoaderName')
def less_aptly_named_config_loader(string):
...
Registering a config loader class under its own name::
@config_loader
class AptlyNamedConfigLoader(attila.abc.configurations.Configurable):
...
Registering a config loader class under a different name::
@config_loader('BetterConfigLoaderName')
class LessAptlyNamedConfigLoader(attila.abc.configurations.Configurable):
...
IMPORTANT NOTE: If you use this decorator, but your configuration loader
isn't available, check to make sure the module you used it in has been
imported; the decorator has to be executed as a statement before it takes
effect.
:param name: The name to register the plugin under.
:param value: The value to register as a plugin.
:return: The value, unchanged, after registration, or a parameter-free plugin decorator.
"""
return CONFIG_LOADERS.plugin(name, value)
def url_scheme(name=NotImplemented, value=NotImplemented):
"""
A decorator for in-line registration of URL schemes.
Registering a URL scheme function under its own name::
@url_scheme
def aptly_named_url_scheme(string):
...
Registering a URL scheme function under a different name::
@url_scheme('BetterURLSchemeName')
def less_aptly_named_url_scheme(string):
...
:param name: The name to register the plugin under.
:param value: The value to register as a plugin.
:return: The value, unchanged, after registration, or a parameter-free plugin decorator.
"""
return URL_SCHEMES.plugin(name, value)
| mit | -5,618,134,764,142,204,000 | 32.3083 | 105 | 0.647324 | false |
tkemmer/ball | data/Amber/converter/parmConverter.py | 2 | 1757 | #!/usr/bin/python
import sys
from amberparser import *
def printUsage():
print "\nUsage: ./parmConvert.py -i filename [-m file1 .. fileN] [-o output_file]\n"
print "paramConvert accepts the following parameters:"
print "\t--help Show this help and exit."
print "\t-p,--parm filename The pram*.dat file which should be processed"
print "\t-i,--in file1, .. , fileN The .in files that should be processed"
print "\t-m,--mod file1 .. fileN A list of frcmod files"
inputFile=""
inFiles=[]
modFiles=[]
outputFile=""
i = 1
while i < len(sys.argv):
p = sys.argv[i]
if p == "--help":
printUsage()
exit(0)
elif p == "-p" or p == "--parm":
i += 1
inputFile = sys.argv[i]
elif p == "-o" or p == "--output":
i += 1
outputFile = sys.argv[i]
elif p == "-i" or p == "--in":
i += 1
start = i
while i < len(sys.argv) and not sys.argv[i].startswith("-"):
i += 1
inFiles=sys.argv[start:i]
i -= 1
elif p == "-m" or p == "--mod":
i += 1
start = i
while i < len(sys.argv) and not sys.argv[i].startswith("-"):
i += 1
modFiles=sys.argv[start:i]
else:
print "Unknown parameter " + sys.argv[i]
printUsage()
exit(-1)
i += 1
if inputFile == "":
print "You must specify an input file!"
printUsage()
exit(-1)
if outputFile == "":
outputFile = inputFile.split(".")[0]
#try:
# input = open(inputFile)
#except:
# print "You must specify a valid input file!"
# exit(-1)
try:
output = open(outputFile, "w")
except:
print "Could not open " + outputFile + " for writing"
exit(-1)
#line = input.readline()
#output.write(";" + line)
parser = AmberParser()
param = parser.parse(inputFile)
inparser = InParser()
param2 = inparser.parse(inFiles)
param.write(output)
param2.write(output)
| lgpl-2.1 | 4,100,769,552,055,874,000 | 20.168675 | 85 | 0.611838 | false |
sam81/pysoundanalyser | pysoundanalyser/win_waveform_plot.py | 1 | 6198 | # -*- coding: utf-8 -*-
# Copyright (C) 2010-2017 Samuele Carcagno <[email protected]>
# This file is part of pysoundanalyser
# pysoundanalyser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# pysoundanalyser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with pysoundanalyser. If not, see <http://www.gnu.org/licenses/>.
from __future__ import nested_scopes, generators, division, absolute_import, with_statement, print_function, unicode_literals
import matplotlib
matplotlib.rcParams['path.simplify'] = False
from .pyqtver import*
if pyqtversion == 4:
from PyQt4 import QtGui, QtCore
from PyQt4.QtGui import QAction, QInputDialog
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
matplotlib.rcParams['backend'] = "Qt4Agg"
matplotlib.rcParams['backend.qt4'] = "PyQt4"
elif pyqtversion == -4:
from PySide import QtGui, QtCore
from PySide.QtGui import QAction, QInputDialog
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
matplotlib.rcParams['backend'] = "Qt4Agg"
matplotlib.rcParams['backend.qt4'] = "PySide"
elif pyqtversion == 5:
from PyQt5 import QtGui, QtCore
from PyQt5.QtWidgets import QAction, QInputDialog
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
matplotlib.rcParams['backend'] = "Qt5Agg"
# Matplotlib Figure object
from matplotlib.figure import Figure
from matplotlib.widgets import Cursor
from matplotlib import font_manager
from .dialog_get_font import*
from .win_generic_plot import*
import numpy as np
class waveformPlot(genericPlot):
def __init__(self, parent, sound, prm):
genericPlot.__init__(self, parent, prm)
self.currLocale = self.parent().prm['data']['currentLocale']
#self.currLocale = prm['data']['currentLocale']
self.currLocale.setNumberOptions(self.currLocale.OmitGroupSeparator | self.currLocale.RejectGroupSeparator)
self.sound = sound
self.lineCol = pltColorFromQColor(self.prm['pref']['lineColor1'])
self.lineWidth = self.prm['pref']['line_width']
self.xAxisLabel = self.prm['pref']['waveform_x_axis_label']
self.yAxisLabel = self.prm['pref']['waveform_y_axis_label']
self.getData()
self.plotData()
self.setAxesLabels()
self.canvas.draw()
self.setWindowTitle(self.sound['label'] + ' [' + self.sound['chan'] +']')
def getData(self):
self.x = np.arange(len(self.sound['wave']))/self.sound['fs'] #self.sound['timeArray']
self.y = self.sound['wave']
def setAxesLabels(self):
self.axes.set_xlabel(self.xAxisLabel, color=self.axesLabelColor, fontproperties = self.labelFont)
self.axes.set_ylabel(self.yAxisLabel, color=self.axesLabelColor, fontproperties = self.labelFont)
def plotData(self):
self.line, = self.axes.plot(self.x, self.y, color=self.lineCol)
self.xminWidget.setText(self.currLocale.toString(self.axes.get_xlim()[0]))
self.xmaxWidget.setText(self.currLocale.toString(self.axes.get_xlim()[1]))
self.yminWidget.setText(self.currLocale.toString(self.axes.get_ylim()[0]))
self.ymaxWidget.setText(self.currLocale.toString(self.axes.get_ylim()[1]))
def createAdditionalMenus(self):
self.editLineWidthAction = QAction(self.tr('Line Width'), self)
self.editLineWidthAction.triggered.connect(self.onChangeLineWidth)
self.editLineColorAction = QAction(self.tr('Line Color'), self)
self.editLineColorAction.triggered.connect(self.onChangeLineColor)
def defineMenusLayout(self):
self.linePropertiesMenu.addAction(self.editLineWidthAction)
self.linePropertiesMenu.addAction(self.editMajorTickLengthAction)
self.linePropertiesMenu.addAction(self.editMajorTickWidthAction)
self.linePropertiesMenu.addAction(self.editMinorTickLengthAction)
self.linePropertiesMenu.addAction(self.editMinorTickWidthAction)
self.linePropertiesMenu.addAction(self.editGridLineWidthAction)
self.linePropertiesMenu.addAction(self.editSpinesLineWidthAction)
self.colorPropertiesMenu.addAction(self.editLineColorAction)
self.colorPropertiesMenu.addAction(self.editBackgroundColorAction)
self.colorPropertiesMenu.addAction(self.editCanvasColorAction)
self.colorPropertiesMenu.addAction(self.editAxesColorAction)
self.colorPropertiesMenu.addAction(self.editGridColorAction)
self.colorPropertiesMenu.addAction(self.editTickLabelColorAction)
self.colorPropertiesMenu.addAction(self.editAxesLabelColorAction)
self.labelPropertiesMenu.addAction(self.editXLabelAction)
self.labelPropertiesMenu.addAction(self.editYLabelAction)
self.labelPropertiesMenu.addAction(self.editLabelFontAction)
self.labelPropertiesMenu.addAction(self.editTickLabelFontAction)
def onChangeLineColor(self):
col = QColorDialog.getColor()
if col.isValid():
self.lineCol = pltColorFromQColor(col)
self.line.set_color(self.lineCol)
self.canvas.draw()
def onChangeLineWidth(self):
msg = self.tr('Line Width:')
value, ok = QInputDialog.getDouble(self, self.tr('Input Dialog'), msg, self.lineWidth, 0)
if ok:
self.lineWidth = value
self.line.set_linewidth(self.lineWidth)
self.canvas.draw()
| gpl-3.0 | 5,850,011,686,210,301,000 | 51.084034 | 125 | 0.729106 | false |
claudemuller/masfir | dirmuncher.py | 1 | 1217 | #!/usr/bin/env python
# -*- Coding: utf-8 -*-
import os
import enchant
class Dirmuncher:
delimeters = ['.', ' ']
def __init__(self, directory):
self.directory = directory
self.dictionary = enchant.Dict("en_US")
def getFiles(self):
result = {}
for dirname, dirnames, filenames in os.walk(self.directory):
# Get subdirectories
# for subdirname in dirnames:
# print(os.path.join(dirname, subdirname))
# Get filenames
# for filename in filenames:
# print(os.path.join(dirname, filename))
result[dirname] = filenames
return result
def process(self, filename):
name = []
for delimeter in self.delimeters:
for word in filename.split(delimeter):
if self.lookup(word):
name.append(word)
return name
def lookup(self, word):
return self.dictionary.check(word)
if __name__ == "__main__":
muncher = Dirmuncher('movies')
terms = muncher.getFiles()
for directory, filenames in terms.items():
for filename in filenames:
print(muncher.process(filename))
| mit | 1,979,910,930,079,346,200 | 22.862745 | 68 | 0.564503 | false |
whtsky/WeRoBot | werobot/logger.py | 1 | 2894 | # -*- coding:utf-8 -*-
import six
import sys
import time
import logging
try:
import curses
assert curses
except ImportError:
curses = None
logger = logging.getLogger("WeRoBot")
def enable_pretty_logging(logger, level='info'):
"""
按照配置开启 log 的格式化优化。
:param logger: 配置的 logger 对象
:param level: 要为 logger 设置的等级
"""
logger.setLevel(getattr(logging, level.upper()))
if not logger.handlers:
# Set up color if we are in a tty and curses is installed
color = False
if curses and sys.stderr.isatty():
try:
curses.setupterm()
if curses.tigetnum("colors") > 0:
color = True
finally:
pass
channel = logging.StreamHandler()
channel.setFormatter(_LogFormatter(color=color))
logger.addHandler(channel)
class _LogFormatter(logging.Formatter):
def __init__(self, color, *args, **kwargs):
logging.Formatter.__init__(self, *args, **kwargs)
self._color = color
if color:
fg_color = (
curses.tigetstr("setaf") or curses.tigetstr("setf") or ""
)
if (3, 0) < sys.version_info < (3, 2, 3):
fg_color = six.text_type(fg_color, "ascii")
self._colors = {
logging.DEBUG: six.text_type(
curses.tparm(fg_color, 4), "ascii"
), # Blue
logging.INFO: six.text_type(
curses.tparm(fg_color, 2), "ascii"
), # Green
logging.WARNING: six.text_type(
curses.tparm(fg_color, 3), "ascii"
), # Yellow
logging.ERROR: six.text_type(
curses.tparm(fg_color, 1), "ascii"
), # Red
}
self._normal = six.text_type(curses.tigetstr("sgr0"), "ascii")
def format(self, record):
try:
record.message = record.getMessage()
except Exception as e:
record.message = "Bad message (%r): %r" % (e, record.__dict__)
record.asctime = time.strftime(
"%y%m%d %H:%M:%S", self.converter(record.created)
)
prefix = '[%(levelname)1.1s %(asctime)s %(module)s:%(lineno)d]' % \
record.__dict__
if self._color:
prefix = (
self._colors.get(record.levelno, self._normal) + prefix +
self._normal
)
formatted = prefix + " " + record.message
if record.exc_info:
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
formatted = formatted.rstrip() + "\n" + record.exc_text
return formatted.replace("\n", "\n ")
| mit | -362,991,311,726,822,200 | 30.955056 | 75 | 0.512307 | false |
3dfxsoftware/cbss-addons | account_invoice_global_discount/wizard/discount_wizard.py | 1 | 1981 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Addons modules by CLEARCORP S.A.
# Copyright (C) 2009-TODAY CLEARCORP S.A. (<http://clearcorp.co.cr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import osv, fields
class DiscountWizard(osv.osv):
_name = "discount.wizard"
_description = "Discount Wizard"
_columns = {
'discount': fields.float('Discount percentage', required=True),
'period_from': fields.many2one('account.period', 'Start Period'),
'period_to': fields.many2one('account.period', 'End Period'),
}
_default = {
'discount': lambda *args: 0
}
def apply_discount(self, cr, uid, data, context):
invoice_obj = self.pool.get('account.invoice')
invoice_line_obj = self.pool.get('account.invoice.line')
for invoice in invoice_obj.browse(cr, uid, data['ids'], context=context):
invoice_line_obj.write(cr, uid, [line.id for line in invoice.invoice_line], {'discount': data['form']['discount']}, context=context,)
return {}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| gpl-2.0 | -8,941,316,200,682,872,000 | 39.428571 | 145 | 0.610298 | false |
bollu/polymage | sandbox/apps/python/img_proc/campipe/polymage_campipe.py | 1 | 11582 | from __init__ import *
import sys
import subprocess
import numpy as np
from fractions import Fraction
sys.path.insert(0, ROOT)
from compiler import *
from constructs import *
def camera_pipe(pipe_data):
# Pipeline Variables
x = Variable(Int, "x")
y = Variable(Int, "y")
c = Variable(Int, "c")
v = Variable(Int, "v")
# Pipeline Parameters
R = Parameter(Int, "R") # image rows
C = Parameter(Int, "C") # image cols
kelvin = Parameter(Float, "colour_temp") # temperature
gamma = Parameter(Float, "gamma") # gamma value
contrast = Parameter(Float, "contrast") # colour contrast
# register in the dictionary
pipe_data['R'] = R
pipe_data['C'] = C
# Intervals:
# shifts in each dimension
ghost_x = 12
ghost_y = 16
# camera_pipe intervals :
# bounds for input image
row = Interval(Int, 0, R-1)
col = Interval(Int, 0, C-1)
# bounds for denoise function
ghost_zone_2x = Interval(Int, (ghost_x-4), (R-24-ghost_x+6) - 1)
ghost_zone_2y = Interval(Int, (ghost_y-4), (C-32-ghost_y+6) - 1)
# ghost zone without any offset(ghost }:))
ghost_zone_0x = Interval(Int, (ghost_x-0), (R-24-ghost_x+0) - 1)
ghost_zone_0y = Interval(Int, (ghost_y-0), (C-32-ghost_y+0) - 1)
# bounds for g_gr, r_r, b_b, g_gb
half_ghost_zone_2x = Interval(Int, ((ghost_x)//2) - 2, ((R-24-ghost_x)//2 + 2))
half_ghost_zone_2y = Interval(Int, ((ghost_y)//2) - 2, ((C-32-ghost_y)//2 + 2))
# bounds for g_r, g_b
half_ghost_zone_1x = Interval(Int, ((ghost_x)//2) - 1, ((R-24-ghost_x)//2 + 1))
half_ghost_zone_1y = Interval(Int, ((ghost_y)//2) - 1, ((C-32-ghost_y)//2 + 1))
# bounds for b_r, b_gr, r_gr, b_gb, r_gb, r_b
half_ghost_zone_0x = Interval(Int, ((ghost_x)//2) - 0, ((R-24-ghost_x)//2 + 0))
half_ghost_zone_0y = Interval(Int, ((ghost_y)//2) - 0, ((C-32-ghost_y)//2 + 0))
# bounds for colour channels
rgb = Interval(Int, 0, 2)
grbg = Interval(Int, 0, 3)
# bound for LUT
lut_range = Interval(Int, -32768, 32767)
# Image Inputs
matrix_3200 = Image(Float, "matrix_3200", [3, 4])
matrix_7000 = Image(Float, "matrix_7000", [3, 4])
img = Image(Short, "img", [R, C])
# Pipeline
# ========
# 1. Hot Pixel Suppression / Denoising
x_max = Max(img(x-2, y), img(x+2, y))
y_max = Max(img(x, y-2), img(x, y+2))
x_min = Min(img(x-2, y), img(x+2, y))
y_min = Min(img(x, y-2), img(x, y+2))
max_ = Max(x_max, y_max)
min_ = Min(x_min, y_min)
clamp = Min(max_, img(x, y))
clamp = Max(min_, clamp)
denoised = Function(([x, y], [ghost_zone_2x, ghost_zone_2y]), \
Short, "denoised")
denoised.defn = [clamp]
# 2. Deinterleave the Bayer Array
deinterleaved = \
Function(([c, x, y], [grbg, half_ghost_zone_2x, half_ghost_zone_2y]), \
Short, "deinterleaved")
deinterleaved.defn = [Case(Condition(c, '==', 0), denoised(2*x, 2*y)),
Case(Condition(c, '==', 1), denoised(2*x, 2*y+1)),
Case(Condition(c, '==', 2), denoised(2*x+1, 2*y)),
Case(Condition(c, '==', 3), denoised(2*x+1, 2*y+1))]
# 3. Perform Demosaicing on the Deinterleaved array
#
# Halide :
# "These are the values we already know from the input
# x_y = the value of channel x at a site in the input of channel y
# gb refers to green sites in the blue rows
# gr refers to green sites in the red rows.
# We'll give more convenient names to the four channels we know"
g_gr = Function(([x, y], [half_ghost_zone_2x, half_ghost_zone_2y]), \
Short, "g_gr")
g_gr.defn = [deinterleaved(0, x, y)]
r_r = Function(([x, y], [half_ghost_zone_2x, half_ghost_zone_2y]), \
Short, "r_r")
r_r.defn = [deinterleaved(1, x, y)]
b_b = Function(([x, y], [half_ghost_zone_2x, half_ghost_zone_2y]), \
Short, "b_b")
b_b.defn = [deinterleaved(2, x, y)]
g_gb = Function(([x, y], [half_ghost_zone_2x, half_ghost_zone_2y]), \
Short, "g_gb")
g_gb.defn = [deinterleaved(3, x, y)]
# Halide :
# "These are the ones we need to interpolate
# b_r, g_r, b_gr, r_gr, b_gb, r_gb, r_b, g_b
#
# First calculate green at the red and blue sites
#
# Try interpolating vertically and horizontally. Also compute
# differences vertically and horizontally. Use interpolation in
# whichever direction had the smallest difference."
def absd(a, b):
return Select(Condition(a, '>', b), a - b, b - a)
#
gv_r = (g_gb(x-1, y) + g_gb(x, y))/2
gvd_r = absd(g_gb(x-1, y), g_gb(x, y))
gh_r = (g_gr(x, y+1) + g_gr(x, y))/2
ghd_r = absd(g_gr(x, y+1), g_gr(x, y))
g_r = Function(([x, y], [half_ghost_zone_1x, half_ghost_zone_1y]), \
Short, "g_r")
g_r.defn = [Select(Condition(ghd_r, '<', gvd_r), gh_r, gv_r)]
#
gv_b = (g_gr(x+1, y) + g_gr(x, y))/2
gvd_b = absd(g_gr(x+1, y), g_gr(x, y))
gh_b = (g_gb(x, y-1) + g_gb(x, y))/2
ghd_b = absd(g_gb(x, y-1), g_gb(x, y))
g_b = Function(([x, y], [half_ghost_zone_1x, half_ghost_zone_1y]), \
Short, "g_b")
g_b.defn = [Select(Condition(ghd_b, '<', gvd_b), gh_b, gv_b)]
# Halide :
# "Next interpolate red at gr by first interpolating, then
# correcting using the error green would have had if we had
# interpolated it in the same way (i.e. add the second derivative
# of the green channel at the same place)."
correction = g_gr(x, y) - (g_r(x, y) + g_r(x, y-1))/2
r_gr = Function(([x, y], [half_ghost_zone_0x, half_ghost_zone_0y]), \
Short, "r_gr")
r_gr.defn = [correction + (r_r(x, y-1) + r_r(x, y))/2]
# Halide : "Do the same for other reds and blues at green sites"
correction = g_gr(x, y) - (g_b(x, y) + g_b(x-1, y))/2
b_gr = Function(([x, y], [half_ghost_zone_0x, half_ghost_zone_0y]), \
Short, "b_gr")
b_gr.defn = [correction + (b_b(x, y) + b_b(x-1, y))/2]
correction = g_gb(x, y) - (g_r(x, y) + g_r(x+1, y))/2
r_gb = Function(([x, y], [half_ghost_zone_0x, half_ghost_zone_0y]), \
Short, "r_gb")
r_gb.defn = [correction + (r_r(x, y) + r_r(x+1, y))/2]
correction = g_gb(x, y) - (g_b(x, y) + g_b(x, y+1))/2
b_gb = Function(([x, y], [half_ghost_zone_0x, half_ghost_zone_0y]), \
Short, "b_gb")
b_gb.defn = [correction + (b_b(x, y) + b_b(x, y+1))/2]
# Halide:
# "Now interpolate diagonally to get red at blue and blue at
# red. Hold onto your hats; this gets really fancy. We do the
# same thing as for interpolating green where we try both
# directions (in this case the positive and negative diagonals),
# and use the one with the lowest absolute difference. But we
# also use the same trick as interpolating red and blue at green
# sites - we correct our interpolations using the second
# derivative of green at the same sites."
correction = g_b(x, y) - (g_r(x, y) + g_r(x+1, y-1))/2
rp_b = correction + (r_r(x, y) + r_r(x+1, y-1))/2
rpd_b = absd(r_r(x, y), r_r(x+1, y-1))
correction = g_b(x, y) - (g_r(x, y-1) + g_r(x+1, y))/2
rn_b = correction + (r_r(x, y-1) + r_r(x+1, y))/2
rnd_b = absd(r_r(x, y-1), r_r(x+1, y))
r_b = Function(([x, y], [half_ghost_zone_0x, half_ghost_zone_0y]), \
Short, "r_b")
r_b.defn = [Select(Condition(rpd_b, '<', rnd_b), rp_b, rn_b)]
# Halide : "Same thing for blue at red"
correction = g_r(x, y) - (g_b(x, y) + g_b(x-1, y+1))/2;
bp_r = correction + (b_b(x, y) + b_b(x-1, y+1))/2;
bpd_r = absd(b_b(x, y), b_b(x-1, y+1));
correction = g_r(x, y) - (g_b(x, y+1) + g_b(x-1, y))/2;
bn_r = correction + (b_b(x, y+1) + b_b(x-1, y))/2;
bnd_r = absd(b_b(x, y+1), b_b(x-1, y));
b_r = Function(([x, y], [half_ghost_zone_0x, half_ghost_zone_0y]), \
Short, "b_r")
b_r.defn = [Select(Condition(bpd_r, '<', bnd_r), bp_r, bn_r)]
# 4. Interleave the resulting channels
def interleave_x(a, b, name):
out = Function(([x, y], [ghost_zone_0x, ghost_zone_0y]), \
Short, name)
out.defn = [Select(Condition((x%2), '==', 0), a(x/2, y), b(x/2, y))]
return out
def interleave_y(a, b, name):
out = Function(([x, y], [half_ghost_zone_0x, ghost_zone_0y]), \
Short, name)
out.defn = [Select(Condition((y%2), '==', 0), a(x, y/2), b(x, y/2))]
return out
red = interleave_x(interleave_y(r_gr, r_r, "red_x1"), \
interleave_y(r_b, r_gb, "red_x2"), \
"red")(x, y)
green = interleave_x(interleave_y(g_gr, g_r, "green_x1"), \
interleave_y(g_b, g_gb, "green_x2"), \
"green")(x, y)
blue = interleave_x(interleave_y(b_gr, b_r, "blue_x1"), \
interleave_y(b_b, b_gb, "blue_x2"), \
"blue")(x, y)
# 5. Colour Correction
#
# Halide :
# "Get a color matrix by linearly interpolating between two
# calibrated matrices using inverse kelvin."
alpha = (1.0/kelvin - 1.0/3200) / (1.0/7000 - 1.0/3200)
#def matrix(i, j):
# val = (matrix_3200(i, j) * alpha + matrix_7000(i, j) * (1 - alpha))
# val = Cast(Int, (val * 256.0)) # Halide : "Q8.8 fixed point"
# return val
mat = Function(([x, y], [rgb, grbg]), Int, "matrix", "const")
val = (matrix_3200(x, y) * alpha + matrix_7000(x, y) * (1 - alpha))
mat.defn = [Cast(Int, val * 256.0)]
r = mat(0, 3) + mat(0, 0) * red + mat(0, 1) * green + mat(0, 2) * blue
g = mat(1, 3) + mat(1, 0) * red + mat(1, 1) * green + mat(1, 2) * blue
b = mat(2, 3) + mat(2, 0) * red + mat(2, 1) * green + mat(2, 2) * blue
corrected = Function(([c, x, y], [rgb, ghost_zone_0x, ghost_zone_0y]), \
Short, "corrected")
corrected.defn = [ Case(Condition(c, '==', 2), r/256),
Case(Condition(c, '==', 1), g/256),
Case(Condition(c, '==', 0), b/256) ]
# 6. Apply Curve
#
# Halide :
# "copied from FCam"
def lut_value(anything):
xf = Cast(Float, anything/1024.0)
# clamp
xf = Min(1.0, xf)
xf = Max(0.0, xf)
g = Powf(xf, 1.0/gamma)
b = 2.0 - Powf(2.0, contrast/100.0)
a = 2.0 - 2.0*b
z = Select(Condition(g, '>', 0.5), \
1.0 - (a*(1.0-g)*(1.0-g) + b*(1.0-g)), \
a*g*g + b*g)
val = Cast(UChar, Min(Max(z*256.0, 0.0), 255.0))
return val
# compute this beforehand (its a LUT)
curve = Function(([v], [lut_range]), UChar, "curveLUT", "const")
curve.defn = [lut_value(v)]
# pick from LUT map
curved = Function(([c, x, y], [rgb, ghost_zone_0x, ghost_zone_0y]), \
UChar, "process")
# (1) compute only those out of inLUT on-the-fly
# inLUT = Condition(corrected(c, x, y), '<=', 65535) & \
# Condition(corrected(c, x, y), '>=', 0)
# curved.defn = [Select(inLUT, curve(corrected(c, x, y)), \
# lut_value(corrected(c, x, y)))]
# (2) with correct range [ -2^15 : 2^15 ]
curved.defn = [curve(corrected(c, x, y))]
# (3) compute everything on-the-fly
# curved.defn = [lut_value(corrected(c, x, y))]
return curved
| apache-2.0 | -480,251,529,535,593,900 | 37.098684 | 83 | 0.518218 | false |
Morphux/installer | pkgs/iproute_p2/iproute_p2.py | 1 | 2697 | ################################### LICENSE ####################################
# Copyright 2016 Morphux #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
################################################################################
##
# iproute_p2.py
# Created: 22/12/2016
# By: Louis Solofrizzo <[email protected]>
##
import os
class Iproute_P2:
conf_lst = {}
e = False
root_dir = ""
def init(self, c_lst, ex, root_dir):
self.conf_lst = c_lst
self.e = ex
self.root_dir = root_dir
self.config = {
"name": "iproute2", # Name of the package
"version": "4.7.0", # Version of the package
"size": 11, # Size of the installed package (MB)
"archive": "iproute2-4.7.0.tar.xz", # Archive name
"SBU": 0.2, # SBU (Compilation time)
"tmp_install": False, # Is this package part of the temporary install
"next": "kbd", # Next package to install
"after": False,
"configure": False,
"urls": [ # Url to download the package. The first one must be morphux servers
"https://install.morphux.org/packages/iproute2-4.7.0.tar.xz"
]
}
return self.config
def before(self):
self.e(["sed", "-i", "/ARPD/d", "Makefile"])
self.e(["sed", "-i", "s/arpd.8//", "man/man8/Makefile"])
self.e(["rm", "-v", "doc/arpd.sgml"])
return self.e(["sed", "-i", "s/m_ipt.o//", "tc/Makefile"])
def make(self):
return self.e(["make", "-j", self.conf_lst["cpus"]])
def install(self):
return self.e(["make", "DOCDIR=/usr/share/doc/iproute2-4.7.0", "install"])
| apache-2.0 | -2,530,462,135,269,676,000 | 43.213115 | 90 | 0.448276 | false |
poojavade/Genomics_Docker | Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/khmer-1.1-py2.7-linux-x86_64.egg/EGG-INFO/scripts/extract-paired-reads.py | 1 | 4428 | #!/usr/bin/python2.7
#
# This script is part of khmer, http://github.com/ged-lab/khmer/, and is
# Copyright (C) Michigan State University, 2009-2014. It is licensed under
# the three-clause BSD license; see doc/LICENSE.txt.
# Contact: [email protected]
#
# pylint: disable=invalid-name,missing-docstring
"""
Take a file containing a mixture of interleaved and orphaned reads, and
extract them into separate files (.pe and .se).
% scripts/extract-paired-reads.py <infile>
Reads FASTQ and FASTA input, retains format for output.
"""
import screed
import sys
import os.path
import textwrap
import argparse
import khmer
from khmer.file import check_file_status, check_space
from khmer.khmer_args import info
def is_pair(name1, name2):
if name1.endswith('/1') and name2.endswith('/2'):
subpart1 = name1.split('/')[0]
subpart2 = name2.split('/')[0]
if subpart1 == subpart2:
assert subpart1
return True
return False
def output_pair(read1, read2):
if hasattr(read1, 'accuracy'):
return "@%s\n%s\n+\n%s\n@%s\n%s\n+\n%s\n" % \
(read1.name, read1.sequence, read1.accuracy,
read2.name, read2.sequence, read2.accuracy)
else:
return ">%s\n%s\n>%s\n%s\n" % (read1.name, read1.sequence, read2.name,
read2.sequence)
def output_single(read):
if hasattr(read, 'accuracy'):
return "@%s\n%s\n+\n%s\n" % (read.name, read.sequence, read.accuracy)
else:
return ">%s\n%s\n" % (read.name, read.sequence)
def get_parser():
epilog = """
The output is two files, <input file>.pe and <input file>.se, placed in the
current directory. The .pe file contains interleaved and properly paired
sequences, while the .se file contains orphan sequences.
Many assemblers (e.g. Velvet) require that you give them either perfectly
interleaved files, or files containing only single reads. This script takes
files that were originally interleaved but where reads may have been
orphaned via error filtering, application of abundance filtering, digital
normalization in non-paired mode, or partitioning.
Example::
extract-paired-reads.py tests/test-data/paired.fq
"""
parser = argparse.ArgumentParser(
description='Take a mixture of reads and split into pairs and '
'orphans.', epilog=textwrap.dedent(epilog))
parser.add_argument('infile')
parser.add_argument('--version', action='version', version='%(prog)s '
+ khmer.__version__)
return parser
def main():
info('extract-paired-reads.py')
args = get_parser().parse_args()
check_file_status(args.infile)
infiles = [args.infile]
check_space(infiles)
outfile = os.path.basename(args.infile)
if len(sys.argv) > 2:
outfile = sys.argv[2]
single_fp = open(outfile + '.se', 'w')
paired_fp = open(outfile + '.pe', 'w')
print 'reading file "%s"' % args.infile
print 'outputting interleaved pairs to "%s.pe"' % outfile
print 'outputting orphans to "%s.se"' % outfile
last_record = None
last_name = None
n_pe = 0
n_se = 0
record = None
index = 0
for index, record in enumerate(screed.open(sys.argv[1])):
if index % 100000 == 0 and index > 0:
print '...', index
name = record['name'].split()[0]
if last_record:
if is_pair(last_name, name):
paired_fp.write(output_pair(last_record, record))
name, record = None, None
n_pe += 1
else:
single_fp.write(output_single(last_record))
n_se += 1
last_name = name
last_record = record
if last_record:
if is_pair(last_name, name):
paired_fp.write(output_pair(last_record, record))
name, record = None, None
n_pe += 1
else:
single_fp.write(output_single(last_record))
name, record = None, None
n_se += 1
if record:
single_fp.write(output_single(record))
n_se += 1
single_fp.close()
paired_fp.close()
if n_pe == 0:
raise Exception("no paired reads!? check file formats...")
print 'DONE; read %d sequences, %d pairs and %d singletons' % \
(index + 1, n_pe, n_se)
if __name__ == '__main__':
main()
| apache-2.0 | -8,108,609,103,664,629,000 | 28.918919 | 79 | 0.608853 | false |
mic4ael/indico | indico/modules/events/views.py | 1 | 12820 | # This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import print_function, unicode_literals
import posixpath
from flask import current_app, render_template, request
from sqlalchemy.orm import load_only
from werkzeug.utils import cached_property
from indico.modules.admin.views import WPAdmin
from indico.modules.core.settings import social_settings
from indico.modules.events import Event
from indico.modules.events.layout import layout_settings, theme_settings
from indico.modules.events.layout.util import (build_menu_entry_name, get_css_url, get_menu_entry_by_name,
menu_entries_for_event)
from indico.modules.events.models.events import EventType
from indico.modules.events.util import serialize_event_for_json_ld
from indico.util.date_time import format_date
from indico.util.mathjax import MathjaxMixin
from indico.util.string import strip_tags, to_unicode, truncate
from indico.web.flask.util import url_for
from indico.web.views import WPDecorated, WPJinjaMixin
def _get_print_url(event, theme=None, theme_override=False):
view = theme if theme_override else None
if event.type_ == EventType.conference:
return url_for('timetable.timetable', event, print='1', view=view)
elif event.type_ == EventType.meeting:
show_date = request.args.get('showDate')
show_session = request.args.get('showSession')
detail_level = request.args.get('detailLevel')
if show_date == 'all':
show_date = None
if show_session == 'all':
show_session = None
if detail_level in ('all', 'contrinbution'):
detail_level = None
return url_for('events.display', event, showDate=show_date, showSession=show_session, detailLevel=detail_level,
print='1', view=view)
elif event.type_ == EventType.lecture:
return url_for('events.display', event, print='1', view=view)
def render_event_header(event, conference_layout=False, theme=None, theme_override=False):
print_url = _get_print_url(event, theme, theme_override) if not conference_layout else None
show_nav_bar = event.type_ != EventType.conference or layout_settings.get(event, 'show_nav_bar')
themes = {tid: {'name': data['title'], 'user_visible': data.get('user_visible')}
for tid, data in theme_settings.get_themes_for(event.type_.name).viewitems()}
return render_template('events/header.html',
event=event, print_url=print_url, show_nav_bar=show_nav_bar, themes=themes, theme=theme)
def render_event_footer(event, dark=False):
location = event.venue_name
if event.room_name:
location = '{} ({})'.format(event.room_name, location)
description = '{}\n\n{}'.format(truncate(event.description, 1000), event.short_external_url).strip()
google_calendar_params = {
'action': 'TEMPLATE',
'text': event.title,
'dates': '{}/{}'.format(event.start_dt.strftime('%Y%m%dT%H%M%SZ'),
event.end_dt.strftime('%Y%m%dT%H%M%SZ')),
'details': description,
'location': location,
'trp': False,
'sprop': [event.external_url, 'name:indico']
}
social_settings_data = social_settings.get_all()
show_social = social_settings_data['enabled'] and layout_settings.get(event, 'show_social_badges')
return render_template('events/footer.html',
event=event,
dark=dark,
social_settings=social_settings_data,
show_social=show_social,
google_calendar_params=google_calendar_params)
class WPEventAdmin(WPAdmin):
template_prefix = 'events/'
class WPEventBase(WPDecorated):
ALLOW_JSON = False
bundles = ('module_events.display.js',)
@property
def page_metadata(self):
metadata = super(WPEventBase, self).page_metadata
return {
'og': dict(metadata['og'], **{
'title': self.event.title,
'type': 'event',
'image': (self.event.logo_url if self.event.has_logo else
url_for('assets.image', filename='indico_square.png', _external=True)),
'description': self.event.description
}),
'json_ld': serialize_event_for_json_ld(self.event, full=True),
'keywords': self.event.keywords
}
def __init__(self, rh, event_, **kwargs):
assert event_ == kwargs.setdefault('event', event_)
self.event = event_
WPDecorated.__init__(self, rh, **kwargs)
start_dt_local = event_.start_dt_display.astimezone(event_.display_tzinfo)
end_dt_local = event_.end_dt_display.astimezone(event_.display_tzinfo)
dates = ' ({})'.format(to_unicode(format_date(start_dt_local, format='long')))
if start_dt_local.date() != end_dt_local.date():
if start_dt_local.year == end_dt_local.year and start_dt_local.month == end_dt_local.month:
dates = ' ({}-{})'.format(start_dt_local.day, to_unicode(format_date(end_dt_local, format='long')))
else:
dates = ' ({} - {})'.format(to_unicode(format_date(start_dt_local, format='long')),
to_unicode(format_date(end_dt_local, format='long')))
self.title = '{} {}'.format(strip_tags(self.event.title), dates)
page_title = kwargs.get('page_title')
if page_title:
self.title += ': {}'.format(strip_tags(page_title))
def _get_header(self):
raise NotImplementedError # must be overridden by meeting/lecture and conference WPs
class WPSimpleEventDisplayBase(MathjaxMixin, WPEventBase):
"""Base class for displaying something on a lecture/meeting page"""
def __init__(self, rh, event_, **kwargs):
self.event = event_
WPEventBase.__init__(self, rh, event_, **kwargs)
def _get_header(self):
return render_event_header(self.event)
def _get_footer(self):
return render_event_footer(self.event)
class WPSimpleEventDisplay(WPSimpleEventDisplayBase):
bundles = ('module_vc.js', 'module_vc.css', 'module_events.cloning.js')
def __init__(self, rh, conf, theme_id, theme_override=False):
WPSimpleEventDisplayBase.__init__(self, rh, conf)
self.theme_id = theme_id
self.theme_file_name = theme_id.replace('-', '_')
self.theme = theme_settings.themes[theme_id]
self.theme_override = theme_override
@property
def additional_bundles(self):
plugin = self.theme.get('plugin')
print_stylesheet = self.theme.get('print_stylesheet')
if plugin:
manifest = plugin.manifest
else:
manifest = current_app.manifest
return {
'screen': (manifest['themes_{}.css'.format(self.theme_file_name)],),
'print': ((manifest['themes_{}.print.css'.format(self.theme_file_name)],)
if print_stylesheet else ())
}
def _get_head_content(self):
return MathjaxMixin._get_head_content(self) + WPEventBase._get_head_content(self)
def get_extra_css_files(self):
custom_url = get_css_url(self.event)
return [custom_url] if custom_url else []
def _apply_decoration(self, body):
if request.args.get('frame') == 'no' or request.args.get('fr') == 'no' or request.args.get('print') == '1':
return render_template('events/display/print.html', content=body)
else:
return WPEventBase._apply_decoration(self, body)
def _get_header(self):
return render_event_header(self.event, theme=self.theme_id, theme_override=self.theme_override)
def _get_footer(self):
return render_event_footer(self.event, dark=True)
def _get_body(self, params):
attached_items = self.event.attached_items
folders = [folder for folder in attached_items.get('folders', []) if folder.title != 'Internal Page Files']
files = attached_items.get('files', [])
lectures = []
if self.event.series is not None and self.event.series.show_links:
lectures = (Event.query.with_parent(self.event.series)
.filter(Event.id != self.event.id)
.options(load_only('series_pos', 'id'))
.order_by(Event.series_pos)
.all())
plugin = self.theme.get('plugin')
tpl_name = self.theme['template']
tpl = ((plugin.name + tpl_name)
if (plugin and tpl_name[0] == ':')
else posixpath.join('events/display', tpl_name))
rv = render_template(tpl,
event=self.event,
category=self.event.category.title,
timezone=self.event.display_tzinfo,
theme_settings=self.theme.get('settings', {}),
theme_user_settings=layout_settings.get(self.event, 'timetable_theme_settings'),
files=files,
folders=folders,
lectures=lectures)
return rv.encode('utf-8')
class WPConferenceDisplayBase(WPJinjaMixin, MathjaxMixin, WPEventBase):
menu_entry_plugin = None
menu_entry_name = None
bundles = ('conferences.css',)
def __init__(self, rh, event_, **kwargs):
assert event_ == kwargs.setdefault('event', event_)
self.event = event_
kwargs['conf_layout_params'] = self._get_layout_params()
kwargs.setdefault('page_title', self.sidemenu_title)
WPEventBase.__init__(self, rh, event_, **kwargs)
def _get_layout_params(self):
bg_color = layout_settings.get(self.event, 'header_background_color').replace('#', '').lower()
text_color = layout_settings.get(self.event, 'header_text_color').replace('#', '').lower()
announcement = ''
if layout_settings.get(self.event, 'show_announcement'):
announcement = layout_settings.get(self.event, 'announcement')
return {
'menu': menu_entries_for_event(self.event),
'active_menu_item': self.sidemenu_option,
'bg_color_css': 'background: #{0}; border-color: #{0};'.format(bg_color) if bg_color else '',
'text_color_css': 'color: #{};'.format(text_color) if text_color else '',
'announcement': announcement
}
def get_extra_css_files(self):
theme_url = self._kwargs.get('css_url_override', get_css_url(self.event))
return [theme_url] if theme_url else []
def _get_header(self):
return render_event_header(self.event, conference_layout=True)
@cached_property
def sidemenu_entry(self):
if not self.menu_entry_name:
return None
name = build_menu_entry_name(self.menu_entry_name, self.menu_entry_plugin)
return get_menu_entry_by_name(name, self.event)
@cached_property
def sidemenu_option(self):
entry = self.sidemenu_entry
return entry.id if entry else None
@cached_property
def sidemenu_title(self):
entry = self.sidemenu_entry
return entry.localized_title if entry else ''
def _get_head_content(self):
return '\n'.join([
MathjaxMixin._get_head_content(self),
WPEventBase._get_head_content(self)
])
def _get_body(self, params):
return WPJinjaMixin._get_page_content(self, params)
def _apply_decoration(self, body):
self.logo_url = self.event.logo_url if self.event.has_logo else None
css_override_form = self._kwargs.get('css_override_form')
if css_override_form:
override_html = render_template('events/layout/css_preview_header.html',
event=self.event, form=css_override_form,
download_url=self._kwargs['css_url_override'])
body = override_html + body
return WPEventBase._apply_decoration(self, to_unicode(body))
class WPConferenceDisplay(WPConferenceDisplayBase):
menu_entry_name = 'overview'
def _get_body(self, params):
return render_template('events/display/conference.html', **self._kwargs)
def _get_footer(self):
return render_event_footer(self.event)
class WPAccessKey(WPJinjaMixin, WPDecorated):
template_prefix = 'events/'
def _get_body(self, params):
return self._get_page_content(params)
| mit | -9,060,796,817,758,610,000 | 41.171053 | 119 | 0.61092 | false |
Alexey-T/CudaText | app/cudatext.app/Contents/Resources/py/cuda_options_editor/cd_opts_dlg.py | 1 | 113409 | ''' Plugin for CudaText editor
Authors:
Andrey Kvichansky (kvichans on github.com)
Version:
'2.3.15 2021-04-02'
ToDo: (see end of file)
'''
import re, os, sys, json, collections, itertools, webbrowser, tempfile, html, pickle, time, datetime
from itertools import *
from pathlib import PurePath as PPath
from pathlib import Path as Path
def first_true(iterable, default=False, pred=None):return next(filter(pred, iterable), default) # 10.1.2. Itertools Recipes
import cudatext as app
import cudatext_cmd as cmds
import cudax_lib as apx
from .cd_plug_lib import *
d = dict
odict = collections.OrderedDict
#class odict(collections.OrderedDict): #py3.9 conflict
# def __init__(self, *args, **kwargs):
# if args:super().__init__(*args)
# elif kwargs:super().__init__(kwargs.items())
# def __repr__(self):
# return '{%s}' % (', '.join("'%s':%r" % (k,v) for k,v in self.items()))
pass; LOG = (-1== 1) or apx.get_opt('_opts_dlg_log',False) # Do or dont logging.
pass; from pprint import pformat
pass; pf=lambda d:pformat(d,width=150)
pass; pf80=lambda d:pformat(d,width=80)
pass; pf60=lambda d:pformat(d,width=60)
pass; ##!! waits correction
_ = get_translation(__file__) # I18N
MIN_API_VER = '1.0.168'
MIN_API_VER_4WR = '1.0.175' # vis
MIN_API_VER = '1.0.231' # listview has prop columns
MIN_API_VER = '1.0.236' # p, panel
MIN_API_VER = '1.0.237' # STATUSBAR_SET_CELL_HINT
VERSION = re.split('Version:', __doc__)[1].split("'")[1]
VERSION_V, \
VERSION_D = VERSION.split(' ')
MAX_HIST = apx.get_opt('ui_max_history_edits', 20)
CFG_JSON = app.app_path(app.APP_DIR_SETTINGS)+os.sep+'cuda_options_editor.json'
HTM_RPT_FILE= str(Path(tempfile.gettempdir()) / 'CudaText_option_report.html')
FONT_LST = ['default'] \
+ [font
for font in app.app_proc(app.PROC_ENUM_FONTS, '')
if not font.startswith('@')]
pass; #FONT_LST=FONT_LST[:3]
def load_definitions(defn_path_or_json)->list:
""" Return
[{ opt:'opt name'
, def:<def val>
, cmt:'full comment'
, frm:'bool'|'float'|'int'|'str'| # simple
'int2s'|'strs'|'str2s'| # list/dict
'font'|'font-e'| # font non-empty/can-empty
'#rgb'|'#rgb-e'| # color non-empty/can-empty
'hotk'|'file'|'json'|
'unk'
, lst:[str] for frm==ints
, dct:[(num,str)] for frm==int2s
, [(str,str)] for frm==str2s
, chp:'chapter/chapter'
, tgs:['tag',]
}]
"""
pass; #LOG and log('defn_path_or_json={}',(defn_path_or_json))
kinfs = []
lines = defn_path_or_json \
if str==type(defn_path_or_json) else \
defn_path_or_json.open(encoding='utf8').readlines()
if lines[0][0]=='[':
# Data is ready - SKIP parsing
json_bd = defn_path_or_json \
if str==type(defn_path_or_json) else \
defn_path_or_json.open(encoding='utf8').read()
kinfs = json.loads(json_bd, object_pairs_hook=odict)
for kinf in kinfs:
pass; #LOG and log('opt in kinf={}',('opt' in kinf))
if isinstance(kinf['cmt'], list):
kinf['cmt'] = '\n'.join(kinf['cmt'])
upd_cald_vals(kinfs, '+def')
for kinf in kinfs:
kinf['jdc'] = kinf.get('jdc', kinf.get('dct', []))
kinf['jdf'] = kinf.get('jdf', kinf.get('def', ''))
return kinfs
l = '\n'
#NOTE: parse_raw
reTags = re.compile(r' *\((#\w+,?)+\)')
reN2S = re.compile(r'^\s*(\d+): *(.+)' , re.M)
reS2S = re.compile(r'^\s*"(\w*)": *(.+)' , re.M)
# reLike = re.compile(r' *\(like (\w+)\)') ##??
reFldFr = re.compile(r'\s*Folders from: (.+)')
def parse_cmnt(cmnt, frm):#, kinfs):
tags= set()
mt = reTags.search(cmnt)
while mt:
tags_s = mt.group(0)
tags |= set(tags_s.strip(' ()').replace('#', '').split(','))
cmnt = cmnt.replace(tags_s, '')
mt = reTags.search(cmnt)
dctN= [[int(m.group(1)), m.group(2).rstrip(', ')] for m in reN2S.finditer(cmnt+l)]
dctS= [[ m.group(1) , m.group(2).rstrip(', ')] for m in reS2S.finditer(cmnt+l)]
lstF= None
mt = reFldFr.search(cmnt)
if mt:
from_short = mt.group(1)
from_dir = from_short if os.path.isabs(from_short) else os.path.join(app.app_path(app.APP_DIR_DATA), from_short)
pass; #LOG and log('from_dir={}',(from_dir))
if not os.path.isdir(from_dir):
log(_('No folder "{}" from\n{}'), from_short, cmnt)
else:
lstF = [d for d in os.listdir(from_dir)
if os.path.isdir(from_dir+os.sep+d) and d.upper()!='README' and d.strip()]
lstF = sorted(lstF)
pass; #LOG and log('lstF={}',(lstF))
frm,\
lst = ('strs' , lstF) if lstF else \
(frm , [] )
frm,\
dct = ('int2s', dctN) if dctN else \
('str2s', dctS) if dctS else \
(frm , [] )
return cmnt, frm, dct, lst, list(tags)
#def parse_cmnt
def jsstr(s):
return s[1:-1].replace(r'\"','"').replace(r'\\','\\')
reChap1 = re.compile(r' *//\[Section: +(.+)\]')
reChap2 = re.compile(r' *//\[(.+)\]')
reCmnt = re.compile(r' *//(.+)')
reKeyDV = re.compile(r' *"(\w+)" *: *(.+)')
reInt = re.compile(r' *(-?\d+)')
reFloat = re.compile(r' *(-?\d+\.\d+)')
reFontNm= re.compile(r'font\w*_name')
reHotkey= re.compile(r'_hotkey_')
reColor = re.compile(r'_color$')
chap = ''
pre_cmnt= ''
pre_kinf= None
cmnt = ''
for line in lines:
if False:pass
elif reChap1.match(line):
mt= reChap1.match(line)
chap = mt.group(1)
cmnt = ''
elif reChap2.match(line):
mt= reChap2.match(line)
chap = mt.group(1)
cmnt = ''
elif reCmnt.match(line):
mt= reCmnt.match(line)
cmnt += l+mt.group(1)
elif reKeyDV.match(line):
mt= reKeyDV.match(line)
key = mt.group(1)
dval_s = mt.group(2).rstrip(', ')
dfrm,dval= \
('bool', True ) if dval_s=='true' else \
('bool', False ) if dval_s=='false' else \
('float',float(dval_s)) if reFloat.match(dval_s) else \
('int', int( dval_s)) if reInt.match(dval_s) else \
('font', dval_s[1:-1] ) if reFontNm.search(key) else \
('hotk', dval_s[1:-1] ) if reHotkey.search(key) else \
('#rgb', dval_s[1:-1] ) if reColor.search(key) else \
('str', jsstr(dval_s)) if dval_s[0]=='"' and dval_s[-1]=='"' else \
('unk', dval_s )
dfrm,dval=('#rgb-e','' ) if dfrm=='#rgb' and dval=='' else \
(dfrm, dval )
pass; #LOG and log('key,dval_s,dfrm,dval={}',(key,dval_s,dfrm,dval))
cmnt = cmnt.strip(l) if cmnt else pre_cmnt
ref_frm = cmnt[:3]=='...'
pre_cmnt= cmnt if cmnt else pre_cmnt
pass; #LOG and log('ref_frm,pre_cmnt,cmnt={}',(ref_frm,pre_cmnt,cmnt))
cmnt = cmnt.lstrip('.'+l)
dfrm = 'font-e' if dfrm=='font' and _('Empty string is allowed') in cmnt else dfrm
kinf = odict()
kinfs += [kinf]
kinf['opt'] = key
kinf['def'] = dval
kinf['cmt'] = cmnt.strip()
kinf['frm'] = dfrm
if dfrm in ('int','str'):
cmnt,frm,\
dct,lst,tags = parse_cmnt(cmnt, dfrm)#, kinfs)
kinf['cmt'] = cmnt.strip()
if frm!=dfrm:
kinf['frm'] = frm
if dct:
kinf['dct'] = dct
if lst:
kinf['lst'] = lst
if tags:
kinf['tgs'] = tags
if dfrm=='font':
kinf['lst'] = FONT_LST
if dfrm=='font-e':
kinf['lst'] = [''] + FONT_LST
if chap:
kinf['chp'] = chap
if ref_frm and pre_kinf:
# Copy frm data from prev oi
pass; #LOG and log('Copy frm pre_kinf={}',(pre_kinf))
kinf[ 'frm'] = pre_kinf['frm']
if 'dct' in pre_kinf:
kinf['dct'] = pre_kinf['dct']
if 'lst' in pre_kinf:
kinf['lst'] = pre_kinf['lst']
pre_kinf= kinf.copy()
cmnt = ''
#for line
pass; #open(str(defn_path_or_json)+'.p.json', 'w').write(json.dumps(kinfs,indent=2))
upd_cald_vals(kinfs, '+def')
for kinf in kinfs:
kinf['jdc'] = kinf.get('jdc', kinf.get('dct', []))
kinf['jdf'] = kinf.get('jdf', kinf.get('def', ''))
return kinfs
#def load_definitions
def load_vals(opt_dfns:list, lexr_json='', ed_=None, full=False, user_json='user.json')->odict:
""" Create reformated copy (as odict) of
definitions data opt_dfns (see load_definitions)
If ed_ then add
'fval'
for some options
If full==True then append optitions without definition
but only with
{ opt:'opt name'
, frm:'int'|'float'|'str'
, uval:<value from user.json>
, lval:<value from lexer*.json>
}}
Return
{'opt name':{ opt:'opt name', frm:
? , def:, cmt:, dct:, chp:, tgs:
? , uval:<value from user.json>
? , lval:<value from lexer*.json>
? , fval:<value from ed>
}}
"""
user_json = app.app_path(app.APP_DIR_SETTINGS)+os.sep+user_json
lexr_def_json = apx.get_def_setting_dir() +os.sep+lexr_json
lexr_json = app.app_path(app.APP_DIR_SETTINGS)+os.sep+lexr_json
user_vals = apx._json_loads(open(user_json , encoding='utf8').read(), object_pairs_hook=odict) \
if os.path.isfile(user_json) else {}
lexr_def_vals = apx._json_loads(open(lexr_def_json, encoding='utf8').read(), object_pairs_hook=odict) \
if os.path.isfile(lexr_def_json) else {}
lexr_vals = apx._json_loads(open(lexr_json , encoding='utf8').read(), object_pairs_hook=odict) \
if os.path.isfile(lexr_json) else {}
pass; #LOG and log('lexr_vals={}',(lexr_vals))
pass; #LOG and log('lexr_def_vals={}',(lexr_def_vals))
# Fill vals for defined opt
pass; #LOG and log('no opt={}',([oi for oi in opt_dfns if 'opt' not in oi]))
oinf_valed = odict([(oi['opt'], oi) for oi in opt_dfns])
for opt, oinf in oinf_valed.items():
if opt in lexr_def_vals: # Correct def-vals for lexer
oinf['dlx'] = True
oinf['def'] = lexr_def_vals[opt]
oinf['jdf'] = oinf['def']
if opt in user_vals: # Found user-val for defined opt
oinf['uval'] = user_vals[opt]
if opt in lexr_vals: # Found lexer-val for defined opt
oinf['lval'] = lexr_vals[opt]
if ed_ and opt in apx.OPT2PROP: # Found file-val for defined opt
fval = ed_.get_prop(apx.OPT2PROP[opt])
oinf['fval'] =fval
if full:
# Append item for non-defined opt
reFontNm = re.compile(r'font\w*_name')
def val2frm(val, opt=''):
pass; #LOG and log('opt,val={}',(opt,val))
return ('bool' if isinstance(val, bool) else
'int' if isinstance(val, int) else
'float' if isinstance(val, float) else
'json' if isinstance(val, (list, dict)) else
'hotk' if '_hotkey_' in val else
'font' if isinstance(val, str) and
reFontNm.search(val) else
'str')
for uop,uval in user_vals.items():
if uop in oinf_valed: continue
oinf_valed[uop] = odict(
[ ('opt' ,uop)
, ('frm' ,val2frm(uval,uop))
, ('uval' ,uval)
]+([('lval' ,lexr_vals[uop])] if uop in lexr_vals else [])
)
for lop,lval in lexr_vals.items():
if lop in oinf_valed: continue
oinf_valed[lop] = odict(
[ ('opt' ,lop)
, ('frm' ,val2frm(lval,lop))
, ('lval' ,lval)
])
upd_cald_vals(oinf_valed)
upd_cald_vals(oinf_valed, '+def') if lexr_def_vals else None # To update oi['jdf'] by oi['def']
return oinf_valed
#def load_vals
def upd_cald_vals(ois, what=''):
# Fill calculated attrs
if '+def' in what:
for oi in [oi for oi in ois if 'dct' in oi]:
dct = oi['dct']
dval= oi['def']
dc = odict(dct)
pass; #LOG and log('dct={}',(dct))
oi['jdc'] = [f('({}) {}', vl, cm ) for vl,cm in dct]
oi['jdf'] = f('({}) {}', dval, dc[dval])
pass; #LOG and log('oi={}',(oi))
# Fill calculated attrs
if not what or '+clcd' in what:
for op, oi in ois.items():
oi['!'] = ('L' if oi.get('dlx') else '') \
+ ('+!!' if 'def' not in oi and 'lval' in oi else
'+!' if 'def' not in oi and 'uval' in oi else
'!!!' if 'fval' in oi
and oi['fval'] != oi.get('lval'
, oi.get('uval'
, oi.get( 'def'))) else
'!!' if 'lval' in oi else
'!' if 'uval' in oi else
'')
dct = odict(oi.get('dct', []))
oi['juvl'] = oi.get('uval', '') \
if not dct or 'uval' not in oi else \
f('({}) {}', oi['uval'], dct[oi['uval']])
oi['jlvl'] = oi.get('lval', '') \
if not dct or 'lval' not in oi else \
f('({}) {}', oi['lval'], dct[oi['lval']])
oi['jfvl'] = oi.get('fval', '') \
if not dct or 'fval' not in oi else \
f('({}) {}', oi['fval'], dct[oi['fval']])
#def upd_cald_vals
#class OptDt:
# """ Options infos to view/change in dlg.
# Opt getting is direct - by fields.
# Opt setting only by methods.
# """
#
# def __init__(self
# , keys_info=None # Ready data
# , path_raw_keys_info='' # default.json
# , path_svd_keys_info='' # To save parsed default.json
# , bk_sets=False # Create backup of settings before the first change
# ):
# self.defn_path = Path(path_raw_keys_info)
# self.bk_sets = bk_sets # Need to backup
# self.bk_files = {} # Created backup files
#
# self.opts_defn = {} # Meta-info for options: format, comment, dict/list of values, chapter, tags
# self.ul_opts = {} # Total options info for user+cur_lexer
# #def __init__
#
# #class OptDt
_SORT_NO = -1
_SORT_DN = 0
_SORT_UP = 1
_SORT_TSGN = {_SORT_NO:'', _SORT_UP:'↑', _SORT_DN:'↓'}
_SORT_NSGN = {-1:'', 0:'', 1:'²', 2:'³'}
_SORT_NSGN.update({n:str(1+n) for n in range(3,10)})
_sort_pfx = lambda to,num: '' if to==_SORT_NO else _SORT_TSGN[to]+_SORT_NSGN[num]+' '
_next_sort = lambda to: ((1 + 1+to) % 3) - 1
_inve_sort = lambda to: 1 - to
sorts_dflt = lambda cols: [[_SORT_NO, -1] for c in range(cols)]
sorts_sign = lambda sorts, col: _sort_pfx(sorts[col][0], sorts[col][1])
sorts_on = lambda sorts, col: sorts[col][0] != _SORT_NO
def sorts_turn(sorts, col, scam=''):
""" Switch one of sorts """
max_num = max(tn[1] for tn in sorts)
tn_col = sorts[col]
if 0:pass
elif 'c'==scam and tn_col[1]==max_num: # Turn col with max number
tn_col[0] = _next_sort(tn_col[0])
tn_col[1] = -1 if tn_col[0]==_SORT_NO else tn_col[1]
elif 'c'==scam: # Add new or turn other col
tn_col[0] = _next_sort(tn_col[0]) if -1==tn_col[1] else _inve_sort(tn_col[0])
tn_col[1] = max_num+1 if -1==tn_col[1] else tn_col[1]
else:#not scam: # Only col
for cl,tn in enumerate(sorts):
tn[0] = _next_sort(tn_col[0]) if cl==col else _SORT_NO
tn[1] = 0 if cl==col else -1
return sorts
#def sorts_turn
def sorts_sort(sorts, tdata):
""" Sort tdata (must contain only str) by sorts """
pass; #log('tdata={}',(tdata))
pass; #log('sorts={}',(sorts))
max_num = max(tn[1] for tn in sorts)
if -1==max_num: return tdata
def push(lst, v):
lst.append(v)
return lst
prep_str = lambda s,inv: (chr(0x10FFFF) # To move empty to bottom
if not s else
s
if not inv else
''.join(chr(0x10FFFF - ord(c)) for c in s) # 0x10FFFF from chr() doc
)
td_keys = [[r] for r in tdata]
for srt_n in range(1+max_num):
srt_ctn = first_true(((c,tn) for c,tn in enumerate(sorts)), None
,lambda ntn: ntn[1][1]==srt_n)
assert srt_ctn is not None
srt_c = srt_ctn[0]
inv = srt_ctn[1][0]==_SORT_UP
td_keys = [push(r, prep_str(r[0][srt_c], inv)) for r in td_keys]
td_keys.sort(key=lambda r: r[1:])
tdata = [r[0] for r in td_keys] # Remove appended cols
return tdata
#def sorts_sort
class OptEdD:
SCROLL_W= app.app_proc(app.PROC_GET_GUI_HEIGHT, 'scrollbar') if app.app_api_version()>='1.0.233' else 15
COL_SEC = 0
COL_NAM = 1
COL_OVR = 2
COL_DEF = 3
COL_USR = 4
COL_LXR = 5
COL_FIL = 6
COL_LEXR= _('Lexer')
COL_FILE= _('File "{}"')
COL_NMS = (_('Section'), _('Option'), '!', _('Default'), _('User'), COL_LEXR, COL_FILE)
COL_MWS = [ 70, 210, 25, 120, 120, 70, 50] # Min col widths
# COL_MWS = [ 70, 150, 25, 120, 120, 70, 50] # Min col widths
COL_N = len(COL_MWS)
CMNT_MHT= 60 # Min height of Comment
STBR_FLT= 10
STBR_ALL= 11
STBR_MSG= 12
STBR_H = apx.get_opt('ui_statusbar_height',24)
FILTER_C= _('&Filter')
NO_CHAP = _('_no_')
CHPS_H = f(_('Choose section to append in "{}".'
'\rHold Ctrl to add several sections.'
), FILTER_C).replace('&', '')
FLTR_H = _('Suitable options will contain all specified words.'
'\r Tips and tricks:'
'\r • Add "#" to search the words also in comments.'
'\r • Add "@sec" to show options from section with "sec" in name.'
'\r Several sections are allowed.'
'\r Click item in menu "Section..." with Ctrl to add it.'
'\r • To show only overridden options:'
'\r - Add "!" to show only User+Lexer+File.'
'\r - Add "!!" to show only Lexer+File'
'\r - Add "!!!" to show only File.'
'\r • Use "<" or ">" for word boundary.'
'\r Example: '
'\r size> <tab'
'\r selects "tab_size" but not "ui_tab_size" or "tab_size_x".'
'\r • Alt+L - Clear filter')
LOCV_C = _('Go to "{}" in user/lexer config file')
LOCD_C = _('Go to "{}" in default config file')
OPME_H = _('Edit JSON value')
TOOP_H = f(_('Close dialog and open user/lexer settings file'
'\rto edit the current option.'
'\rSee also menu command'
'\r {}'), f(LOCD_C, '<option>'))
LIFL_C = _('Instant filtering')
FULL_C = _('Show &all keys in user/lexer configs')
@staticmethod
def prep_sorts(sorts):
M = OptEdD
if len(sorts)==len(M.COL_NMS):
return sorts
return sorts_dflt(len(M.COL_NMS))
def __init__(self
, path_keys_info ='' # default.json or parsed data (file or list_of_dicts)
, subset ='' # To get/set from/to cuda_options_editor.json
, how ={} # Details to work
):
M,m = self.__class__,self
m.ed = ed
m.how = how
m.defn_path = Path(path_keys_info) if str==type(path_keys_info) else json.dumps(path_keys_info)
m.subset = subset
m.stores = get_hist('dlg'
, json.loads(open(CFG_JSON).read(), object_pairs_hook=odict)
if os.path.exists(CFG_JSON) else odict())
pass; #LOG and log('ok',())
# m.bk_sets = m.stores.get(m.subset+'bk_sets' , False)
m.lexr_l = app.lexer_proc(app.LEXER_GET_LEXERS, False)
m.lexr_w_l = [f('{} {}'
,'!!' if os.path.isfile(app.app_path(app.APP_DIR_SETTINGS)+os.sep+'lexer '+lxr+'.json') else ' '
, lxr)
for lxr in m.lexr_l]
m.cur_op = m.stores.get(m.subset+'cur_op' , '') # Name of current option
m.col_ws = m.stores.get(m.subset+'col_ws' , M.COL_MWS[:])
m.col_ws = m.col_ws if M.COL_N==len(m.col_ws) else M.COL_MWS[:]
m.h_cmnt = m.stores.get(m.subset+'cmnt_heght', M.CMNT_MHT)
m.sorts = m.stores.get(m.subset+'sorts' , [] ) # Def sorts is no sorts
m.live_fltr = m.stores.get(m.subset+'live_fltr' , False) # To filter after each change and no History
m.cond_hl = [s for s in m.stores.get(m.subset+'h.cond', []) if s] if not m.live_fltr else []
m.cond_s = '' if M.restart_cond is None else M.restart_cond # String filter
m.ops_only = [] # Subset to show (future)
m.sorts = M.prep_sorts(m.sorts)
m.lexr = m.ed.get_prop(app.PROP_LEXER_CARET)
m.all_ops = m.stores.get(m.subset+'all_ops' , False) # Show also options without definition
m.opts_defn = {} # Meta-info for options: format, comment, dict of values, chapter, tags
m.opts_full = {} # Show all options
m.chp_tree = {} # {'Ui':{ops:[], 'kids':{...}, 'path':'Ui/Tabs'}
m.pth2chp = {} # path-index for m.chp_tree
# Cache
m.SKWULFs = [] # Last filtered+sorted
m.cols = [] # Last info about listview columns
m.itms = [] # Last info about listview cells
# m.bk_files = {}
# m.do_file('backup-user') if m.bk_sets else 0
m.do_file('load-data')
m.for_ulf = 'u' # 'u' for User, 'l' for Lexer, 'f' for File
m.cur_op = m.cur_op if m.cur_op in m.opts_full else '' # First at start
m.cur_in = 0 if m.cur_op else -1
m.stbr = None # Handle for statusbar_proc
m.locate_on_exit = None
m.chng_rpt = [] # Report of all changes by user
m.apply_one = m.stores.get(m.subset+'apply_one', False) # Do one call OpsReloadAndApply on exit
m.apply_need= False # Need to call OpsReloadAndApply
m.auto4file = m.stores.get(m.subset+'auto4file', True) # Auto reset file value to over value def/user/lex
#def __init__
def stbr_act(self, tag=None, val='', opts={}):
M,m = self.__class__,self
if not m.stbr: return
app.statusbar_proc(m.stbr, app.STATUSBAR_SET_CELL_TEXT, tag=tag, value=str(val))
#def stbr_act
def do_file(self, what, data='', opts={}):
M,m = self.__class__,self
if False:pass
elif what=='load-data':
pass; #LOG and log('',)
m.opts_defn = load_definitions(m.defn_path)
pass; #LOG and log('m.opts_defn={}',pf([o for o in m.opts_defn]))
pass; #LOG and log('m.opts_defn={}',pf([o for o in m.opts_defn if '2s' in o['frm']]))
m.opts_full = load_vals(m.opts_defn
,lexr_json='lexer '+m.lexr+'.json'
,user_json=m.how.get('stor_json', 'user.json')
, ed_=m.ed, full=m.all_ops)
m.cur_op = m.cur_op if m.cur_op in m.opts_full else ''
pass; #LOG and log('m.opts_full={}',pf(m.opts_full))
m.do_file('build-chp-tree')
elif what=='build-chp-tree':
# Build chapter tree
m.chp_tree = odict(ops=list(m.opts_full.keys())
,kids=odict()
,path='') # {chp:{ops:[], kids:{...}, path:'c1/c2'}
m.pth2chp = {} # {path:chp}
for op,oi in m.opts_full.items():
chp_s = oi.get('chp', M.NO_CHAP)
chp_s = chp_s if chp_s else M.NO_CHAP
chp_node= m.chp_tree # Start root to move
kids = chp_node['kids']
path =''
for chp in chp_s.split('/'):
# Move along branch and create nodes if need
chp_node = kids.setdefault(chp, odict())
path += ('/'+chp) if path else chp
chp_node['path']= path
m.pth2chp[path] = chp_node
ops_l = chp_node.setdefault('ops', [])
ops_l += [op]
if not ('/'+chp_s).endswith('/'+chp): # not last
kids = chp_node.setdefault('kids', odict())
pass; #LOG and log('m.chp_tree=¶{}',pf60(m.chp_tree))
pass; #LOG and log('m.pth2chp=¶{}',pf60(m.pth2chp))
elif what == 'locate_to':
to_open = data['path']
find_s = data['find']
app.file_open(to_open) ##!!
pass; #log('to_open={}',(to_open))
pass; #log('ed.get_filename()={}',(ed.get_filename()))
m.ag.opts['on_exit_focus_to_ed'] = ed
# Locate
user_opt= app.app_proc(app.PROC_GET_FINDER_PROP, '') \
if app.app_api_version()>='1.0.248' else \
app.app_proc(app.PROC_GET_FIND_OPTIONS, '') # Deprecated
pass; #log('ed_to_fcs.get_filename()={}',(ed_to_fcs.get_filename()))
pass; #log('ed.get_filename()={}',(ed.get_filename()))
pass; #LOG and log('find_s={!r}',(find_s))
ed.cmd(cmds.cmd_FinderAction, chr(1).join(['findnext', find_s, '', 'fa'])) # f - From-caret, a - Wrap
if app.app_api_version()>='1.0.248':
app.app_proc(app.PROC_SET_FINDER_PROP, user_opt)
else:
app.app_proc(app.PROC_SET_FIND_OPTIONS, user_opt) # Deprecated
elif what in ('locate-def', 'locate-opt', 'goto-def', 'goto-opt', ):
if not m.cur_op:
m.stbr_act(M.STBR_MSG, _('Choose option to find in config file'))
return False
oi = m.opts_full[m.cur_op]
pass; #LOG and log('m.cur_op,oi={}',(m.cur_op,oi))
to_open = ''
if what in ('locate-opt', 'goto-opt'):
if 'uval' not in oi and m.for_ulf=='u':
m.stbr_act(M.STBR_MSG, f(_('No user value for option "{}"'), m.cur_op))
return False
if 'lval' not in oi and m.for_ulf=='l':
m.stbr_act(M.STBR_MSG, f(_('No lexer "{}" value for option "{}"'), m.lexr, m.cur_op))
return False
to_open = 'lexer '+m.lexr+'.json' if m.for_ulf=='l' else 'user.json'
to_open = app.app_path(app.APP_DIR_SETTINGS)+os.sep+to_open
else:
if 'def' not in oi:
m.stbr_act(M.STBR_MSG, f(_('No default for option "{}"'), m.cur_op))
return False
to_open = str(m.defn_path)
if not os.path.exists(to_open):
log('No file={}',(to_open))
return False
find_s = f('"{}"', m.cur_op)
if what in ('goto-def', 'goto-opt'):
m.locate_on_exit = d(path=to_open, find=find_s)
return True #
m.do_file('locate_to', d(path=to_open, find=find_s))
return False
#elif what=='set-dfns':
# m.defn_path = data
# m.do_file('load-data')
# return d(ctrls=odict(m.get_cnts('lvls')))
elif what=='set-lexr':
m.opts_full = load_vals(m.opts_defn
,lexr_json='lexer '+m.lexr+'.json'
,user_json=m.how.get('stor_json', 'user.json')
,ed_=m.ed, full=m.all_ops)
return d(ctrls=odict(m.get_cnts('lvls')))
elif what=='out-rprt':
if do_report(HTM_RPT_FILE, 'lexer '+m.lexr+'.json', m.ed):
webbrowser.open_new_tab('file://' +HTM_RPT_FILE)
app.msg_status(_('Opened browser with file ')+HTM_RPT_FILE)
return []
#def do_file
def _prep_opt(self, opts='', ind=-1, nm=None):
""" Prepare vars to show info about current option by
m.cur_op
m.lexr
Return
{} vi-attrs
{} en-attrs
{} val-attrs
{} items-attrs
"""
M,m = self.__class__,self
if opts=='key2ind':
opt_nm = nm if nm else m.cur_op
m.cur_in= index_1([m.SKWULFs[row][1] for row in range(len(m.SKWULFs))], opt_nm, -1)
return m.cur_in
if opts=='ind2key':
opt_in = ind if -1!=ind else m.ag.cval('lvls')
m.cur_op= m.SKWULFs[opt_in][1] if -1<opt_in<len(m.SKWULFs) else ''
return m.cur_op
if opts=='fid4ed':
if not m.cur_op: return 'lvls'
frm = m.opts_full[m.cur_op]['frm']
fid = 'eded' if frm in ('str', 'int', 'float') else \
'edcb' if frm in ('int2s', 'str2s', 'strs', 'font', 'font-e') else \
'edrf' if frm in ('bool',) else \
'brow' if frm in ('hotk', 'file', '#rgb', '#rgb-e') else \
'opjs' if frm in ('json') else \
'lvls'
pass; #LOG and log('m.cur_op,frm,fid={}',(m.cur_op,frm,fid))
return fid
pass; #LOG and log('m.cur_op, m.lexr={}',(m.cur_op, m.lexr))
vis,ens,vas,its,bcl = {},{},{},{},{}
vis['edcl'] = vis['dfcl'] = False
bcl['edcl'] = bcl['dfcl'] = 0x20000000
# bcl['eded'] = bcl['dfvl'] = 0x20000000
ens['eded'] = ens['setd'] = False # All un=F
vis['eded'] = vis['edcb']=vis['edrf']=vis['edrt']=vis['brow']=vis['toop']=vis['opjs'] = False # All vi=F
vas['eded'] = vas['dfvl']=vas['cmnt']= '' # All ed empty
vas['edcb'] = -1
vas['edrf'] = vas['edrt'] = False
its['edcb'] = []
ens['dfvl'] = True
ens['tofi'] = m.cur_op in apx.OPT2PROP
if m.for_ulf=='l' and m.lexr not in m.lexr_l:
# Not selected lexer
vis['eded'] = True
ens['dfvl'] = False
return vis,ens,vas,its,bcl
if m.for_ulf=='f' and m.cur_op not in apx.OPT2PROP:
# No the option for File
vis['eded'] = True
ens['dfvl'] = False
return vis,ens,vas,its,bcl
if not m.cur_op:
# No current option
vis['eded'] = True
else:
# Current option
oi = m.opts_full[m.cur_op]
pass; #LOG and log('oi={}',(oi))
vas['dfvl'] = str(oi.get('jdf' , '')).replace('True', 'true').replace('False', 'false')
vas['uval'] = oi.get('uval', '')
vas['lval'] = oi.get('lval', '')
vas['fval'] = oi.get('fval', '')
vas['cmnt'] = oi.get('cmt' , '')
frm = oi['frm']
ulfvl_va = vas['fval'] \
if m.for_ulf=='f' else \
vas['lval'] \
if m.for_ulf=='l' else \
vas['uval'] # Cur val with cur state of "For lexer"
ens['eded'] = frm not in ('json', 'hotk', 'file')#, '#rgb', '#rgb-e')
ens['setd'] = frm not in ('json',) and ulfvl_va is not None
if False:pass
elif frm in ('json'):
# vis['toop'] = True
vis['opjs'] = True
vis['eded'] = True
vas['eded'] = str(ulfvl_va)
elif frm in ('str', 'int', 'float'):
vis['eded'] = True
vas['eded'] = str(ulfvl_va)
elif frm in ('hotk', 'file', '#rgb', '#rgb-e'):
vis['eded'] = True
vis['brow'] = True
vas['eded'] = str(ulfvl_va)
vis['edcl'] = frm in ('#rgb', '#rgb-e')
vis['dfcl'] = frm in ('#rgb', '#rgb-e')
bcl['edcl'] = apx.html_color_to_int(ulfvl_va ) if frm in ('#rgb', '#rgb-e') and ulfvl_va else 0x20000000
bcl['dfcl'] = apx.html_color_to_int(vas['dfvl'] ) if frm in ('#rgb', '#rgb-e') and vas['dfvl'] else 0x20000000
elif frm in ('bool',):
vis['edrf'] = True
vis['edrt'] = True
vas['edrf'] = ulfvl_va is False
vas['edrt'] = ulfvl_va is True
elif frm in ('int2s', 'str2s'):
vis['edcb'] = True
ens['edcb'] = True
its['edcb'] = oi['jdc']
vas['edcb'] = index_1([k for (k,v) in oi['dct']], ulfvl_va, -1)
pass; #LOG and log('ulfvl_va, vas[edcb]={}',(ulfvl_va,vas['edcb']))
elif frm in ('strs','font','font-e'):
vis['edcb'] = True
ens['edcb'] = True
its['edcb'] = oi['lst']
vas['edcb'] = index_1(oi['lst'], ulfvl_va, -1)
pass; #LOG and log('ulfvl_va={}',(ulfvl_va))
pass; #LOG and log('vis={}',(vis))
pass; #LOG and log('ens={}',(ens))
pass; #LOG and log('vas={}',(vas))
pass; #LOG and log('its={}',(its))
return vis,ens,vas,its,bcl
#def _prep_opt
def show(self
, title # For cap of dlg
):
M,m = self.__class__,self
def when_exit(ag):
pass; #LOG and log('',())
pass; #pr_ = dlg_proc_wpr(ag.id_dlg, app.DLG_CTL_PROP_GET, name='edch')
pass; #log('exit,pr_={}',('edch', {k:v for k,v in pr_.items() if k in ('x','y')}))
pass; #log('cols={}',(ag.cattr('lvls', 'cols')))
m.col_ws= [ci['wd'] for ci in ag.cattr('lvls', 'cols')]
m.stores[m.subset+'cmnt_heght'] = m.ag.cattr('cmnt', 'h')
if m.apply_one and m.apply_need:
ed.cmd(cmds.cmd_OpsReloadAndApply)
if m.locate_on_exit:
m.do_file('locate_to', m.locate_on_exit)
#def when_exit
repro_py = apx.get_opt('dlg_cuda_options.repro_py') # 'repro_dlg_opted.py'
m.dlg_min_w = 10 + sum(M.COL_MWS) + M.COL_N + M.SCROLL_W
m.dlg_w = 10 + sum(m.col_ws) + M.COL_N + M.SCROLL_W
m.dlg_h = 380 + m.h_cmnt +10 + M.STBR_H
# m.dlg_h = 270 + m.h_cmnt +10 + M.STBR_H
pass; #log('m.dlg_w,m.dlg_h={}',(m.dlg_w,m.dlg_h))
m.ag = DlgAgent(
form =dict(cap = title + f(' ({})', VERSION_V)
,resize = True
,w = m.dlg_w ,w_min=m.dlg_min_w
,h = m.dlg_h
,on_resize=m.do_resize
)
, ctrls=m.get_cnts()
, vals =m.get_vals()
, fid ='cond'
,options = ({
'gen_repro_to_file':repro_py, #NOTE: repro
} if repro_py else {})
)
# Select on pre-show. Reason: linux skip selection event after show
m.ag._update_on_call(m.do_sele('lvls', m.ag))
m.stbr = app.dlg_proc(m.ag.id_dlg, app.DLG_CTL_HANDLE, name='stbr')
app.statusbar_proc(m.stbr, app.STATUSBAR_ADD_CELL , tag=M.STBR_ALL)
app.statusbar_proc(m.stbr, app.STATUSBAR_SET_CELL_SIZE , tag=M.STBR_ALL, value=40)
app.statusbar_proc(m.stbr, app.STATUSBAR_SET_CELL_ALIGN , tag=M.STBR_ALL, value='R')
app.statusbar_proc(m.stbr, app.STATUSBAR_SET_CELL_HINT , tag=M.STBR_ALL, value=_('Number of all options'))
app.statusbar_proc(m.stbr, app.STATUSBAR_ADD_CELL , tag=M.STBR_FLT)
app.statusbar_proc(m.stbr, app.STATUSBAR_SET_CELL_SIZE , tag=M.STBR_FLT, value=40)
app.statusbar_proc(m.stbr, app.STATUSBAR_SET_CELL_ALIGN , tag=M.STBR_FLT, value='R')
app.statusbar_proc(m.stbr, app.STATUSBAR_SET_CELL_HINT , tag=M.STBR_FLT, value=_('Number of shown options'))
app.statusbar_proc(m.stbr, app.STATUSBAR_ADD_CELL , tag=M.STBR_MSG)
app.statusbar_proc(m.stbr, app.STATUSBAR_SET_CELL_AUTOSTRETCH , tag=M.STBR_MSG, value=True)
m.stbr_act(M.STBR_ALL, len(m.opts_full))
m.stbr_act(M.STBR_FLT, len(m.opts_full))
stor_json = app.app_path(app.APP_DIR_SETTINGS)+os.sep+m.how.get('stor_json', 'user.json')
start_mtime = os.path.getmtime(stor_json) if os.path.exists(stor_json) else 0
m.ag.show(when_exit)
m.ag = None
# Save for next using
m.stores[m.subset+'cur_op'] = m.cur_op
m.stores[m.subset+'col_ws'] = m.col_ws
m.stores[m.subset+'sorts'] = m.sorts
if not m.live_fltr:
m.stores[m.subset+'h.cond'] = m.cond_hl
m.stores[m.subset+'all_ops'] = m.all_ops
set_hist('dlg', m.stores)
return start_mtime != (os.path.getmtime(stor_json) if os.path.exists(stor_json) else 0)
#def show
def get_cnts(self, what=''):
M,m = self.__class__,self
reNotWdChar = re.compile(r'\W')
def test_fltr(fltr_s, op, oi):
if not fltr_s: return True
pass; #LOG and log('fltr_s, op, oi[!]={}',(fltr_s, op, oi['!']))
if '!!!' in fltr_s and '!!!' not in oi['!']: return False
if '!!' in fltr_s and '!!' not in oi['!']: return False
pass; #LOG and log('skip !!',())
if '!' in fltr_s and '!' not in oi['!']: return False
pass; #LOG and log('skip !',())
text = op \
+ (' '+oi.get('cmt', '') if '#' in fltr_s else '')
text = text.upper()
fltr_s = fltr_s.replace('!', '').replace('#', '').upper()
if '<' in fltr_s or '>' in fltr_s:
text = '·' + reNotWdChar.sub('·', text) + '·'
fltr_s = ' ' + fltr_s + ' '
fltr_s = fltr_s.replace(' <', ' ·').replace('> ', '· ')
pass; #LOG and log('fltr_s, text={}',(fltr_s, text))
return all(map(lambda c:c in text, fltr_s.split()))
#def test_fltr
def get_tbl_cols(sorts, col_ws):
cnms = list(M.COL_NMS)
cnms[M.COL_FIL] = f(cnms[M.COL_FIL], m.ed.get_prop(app.PROP_TAB_TITLE))
cols = [d(nm=sorts_sign(sorts, c) + cnms[c]
,wd=col_ws[c]
,mi=M.COL_MWS[c]
) for c in range(M.COL_N)]
cols[M.COL_OVR]['al'] = 'C'
if m.how.get('hide_fil', False):
pos_fil = M.COL_NMS.index(M.COL_FILE)
cols[pos_fil]['vi'] = False
if m.how.get('hide_lex_fil', False):
pos_lex = M.COL_NMS.index(M.COL_LEXR)
pos_fil = M.COL_NMS.index(M.COL_FILE)
cols[pos_lex]['vi'] = False
cols[pos_fil]['vi'] = False
return cols
#def get_tbl_cols
def get_tbl_data(opts_full, cond_s, ops_only, sorts, col_ws):
# Filter table data
pass; #LOG and log('cond_s={}',(cond_s))
pass; #log('opts_full/tab_s={}',({o:oi for o,oi in opts_full.items() if o.startswith('tab_s')}))
chp_cond = ''
chp_no_c = False
if '@' in cond_s:
# Prepare to match chapters
chp_cond = ' '.join([mt.group(1) for mt in re.finditer(r'@([\w/]+)' , cond_s)]).upper() # @s+ not empty chp
chp_cond = chp_cond.replace(M.NO_CHAP.upper(), '').strip()
chp_no_c = '@'+M.NO_CHAP in cond_s
cond_s = re.sub( r'@([\w/]*)', '', cond_s) # @s* clear @ and cph
pass; #log('chp_cond, chp_no_c, cond_s={}',(chp_cond, chp_no_c, cond_s))
SKWULFs = [ (oi.get('chp','')
,op
,oi['!']
,str(oi.get('jdf' ,'')).replace('True', 'true').replace('False', 'false')
,str(oi.get('juvl','')).replace('True', 'true').replace('False', 'false')
,str(oi.get('jlvl','')).replace('True', 'true').replace('False', 'false')
,str(oi.get('jfvl','')).replace('True', 'true').replace('False', 'false')
,oi['frm']
)
for op,oi in opts_full.items()
# if (not chp_cond or chp_cond in oi.get('chp', '').upper())
if (not chp_cond or any((chp_cond in oi.get('chp', '').upper()) for chp_cond in chp_cond.split()))
and (not chp_no_c or not oi.get('chp', ''))
and (not cond_s or test_fltr(cond_s, op, oi))
and (not ops_only or op in ops_only)
]
# Sort table data
SKWULFs = sorts_sort(sorts, SKWULFs)
# Fill table
pass; #LOG and log('M.COL_NMS,col_ws,M.COL_MWS={}',(len(M.COL_NMS),len(col_ws),len(M.COL_MWS)))
cols = get_tbl_cols(sorts, col_ws)
itms = (list(zip([_('Section'),_('Option'), '', _('Default'), _('User'), _('Lexer'), _('File')], map(str, col_ws)))
#, [ (str(n)+':'+sc,k ,w ,dv ,uv ,lv ,fv) # for debug
#, [ (sc+' '+fm ,k ,w ,dv ,uv ,lv ,fv) # for debug
, [ (sc ,k ,w ,dv ,uv ,lv ,fv) # for user
for n,( sc ,k ,w ,dv ,uv ,lv ,fv, fm) in enumerate(SKWULFs) ]
)
return SKWULFs, cols, itms
#def get_tbl_data
if not what or '+lvls' in what:
m.SKWULFs,\
m.cols ,\
m.itms = get_tbl_data(m.opts_full, m.cond_s, m.ops_only, m.sorts, m.col_ws)
if 'stbr' in dir(m):
m.stbr_act(M.STBR_FLT, len(m.SKWULFs))
if '+cols' in what:
pass; #LOG and log('m.col_ws={}',(m.col_ws))
m.cols = get_tbl_cols(m.sorts, m.col_ws)
pass; #LOG and log('m.cols={}',(m.cols))
# Prepare [Def]Val data by m.cur_op
vis,ens,vas,its,bcl = m._prep_opt()
ed_s_c = _('>Fil&e:') if m.for_ulf=='f' else \
_('>L&exer:') if m.for_ulf=='l' else \
_('>Us&er:')
cnts = []
if '+cond' in what:
cnts += [0
,('cond',d(items=m.cond_hl))
][1:]
if '+cols' in what or '=cols' in what:
cnts += [0
,('lvls',d(cols=m.cols))
][1:]
if '+lvls' in what or '=lvls' in what:
cnts += [0
,('lvls',d(cols=m.cols, items=m.itms))
][1:]
tofi_en = not m.how.get('only_for_ul', not ens['tofi']) # Forbid to switch fo File ops
if '+cur' in what:
cnts += [0
,('ed_s',d(cap=ed_s_c ,hint=m.cur_op ))
# ,('eded',d(vis=vis['eded'] ,sto=ens['eded'] ,color=bcl['eded'] ))
# ,('eded',d(vis=vis['eded'],ex0=not ens['eded'],sto=ens['eded'] ,color=bcl['eded'] ))
# ,('eded',d(vis=vis['eded'],en=ens['eded'] ,color=bcl['eded'] ))
,('eded',d(vis=vis['eded'],en=ens['eded'] ))
,('edcl',d(vis=vis['edcl'] ,color=bcl['edcl'] ))
,('edcb',d(vis=vis['edcb'] ,items=its['edcb'] ))
,('edrf',d(vis=vis['edrf'] ))
,('edrt',d(vis=vis['edrt'] ))
,('brow',d(vis=vis['brow'] ))
,('toop',d(vis=vis['toop'] ))
,('opjs',d(vis=vis['opjs'] ))
,('dfv_',d( hint=m.cur_op ))
,('dfvl',d( ))
# ,('dfvl',d( en=ens['dfvl'] ,color=bcl['dfvl'] ))
,('dfcl',d(vis=vis['dfcl'] ,color=bcl['dfcl'] ))
,('setd',d( en=ens['setd'] ))
,('tofi',d( en=tofi_en ))
][1:]
if what and cnts:
# Part info
return cnts
# Full dlg controls info #NOTE: cnts
edit_h = get_gui_height('edit')
cmnt_t = m.dlg_h-m.h_cmnt-5-M.STBR_H
tofi_c = m.ed.get_prop(app.PROP_TAB_TITLE)
co_tp = 'ed' if m.live_fltr else 'cb'
cnts = [0 #
# Hidden buttons
,('flt-',d(tp='bt' ,cap='&l' ,sto=False ,t=-99,l=0,w=44)) # &l
,('fltr',d(tp='bt' ,cap='' ,sto=False ,def_bt='1' ,t=-99,l=0,w=44)) # Enter
,('srt0',d(tp='bt' ,cap='&1' ,sto=False ,t=-99,l=0,w=44)) # &1
,('srt1',d(tp='bt' ,cap='&2' ,sto=False ,t=-99,l=0,w=44)) # &2
,('srt2',d(tp='bt' ,cap='&3' ,sto=False ,t=-99,l=0,w=44)) # &3
,('srt3',d(tp='bt' ,cap='&4' ,sto=False ,t=-99,l=0,w=44)) # &4
,('srt4',d(tp='bt' ,cap='&5' ,sto=False ,t=-99,l=0,w=44)) # &5
,('srt5',d(tp='bt' ,cap='&6' ,sto=False ,t=-99,l=0,w=44)) # &6
,('srt6',d(tp='bt' ,cap='&7' ,sto=False ,t=-99,l=0,w=44)) # &7
,('srt-',d(tp='bt' ,cap='&9' ,sto=False ,t=-99,l=0,w=44)) # &9
,('cws-',d(tp='bt' ,cap='&W' ,sto=False ,t=-99,l=0,w=44)) # &w
,('cpnm',d(tp='bt' ,cap='&C' ,sto=False ,t=-99,l=0,w=44)) # &c
,('erpt',d(tp='bt' ,cap='&O' ,sto=False ,t=-99,l=0,w=44)) # &o
,('apnw',d(tp='bt' ,cap='&Y' ,sto=False ,t=-99,l=0,w=44)) # &y
,('help',d(tp='bt' ,cap='&H' ,sto=False ,t=-99,l=0,w=44)) # &h
# Top-panel
,('ptop',d(tp='pn' ,h= 270 ,w=m.dlg_w ,ali=ALI_CL
,h_min=270 ))
# Menu
,('menu',d(tp='bt' ,tid='cond' ,l=-40-5,w= 40 ,p='ptop' ,cap='&=' ,a='LR' )) # &=
# Filter
,('chps',d(tp='bt' ,tid='cond' ,l=-270 ,r=-180 ,p='ptop' ,cap=_('+&Section…') ,hint=M.CHPS_H ,a='LR' )) # &s
,('flt_',d(tp='lb' ,tid='cond' ,l= 5 ,w= 70 ,p='ptop' ,cap='>'+M.FILTER_C+':' ,hint=M.FLTR_H )) # &f
,('cond',d(tp=co_tp,t= 5 ,l= 78 ,r=-270 ,p='ptop' ,items=m.cond_hl ,a='lR' )) #
#,('cond',d(tp='cb' ,t= 5 ,l= 78 ,r=-270 ,p='ptop' ,items=m.cond_hl ,a='lR' )) #
# Table of keys+values
,('lvls',d(tp='lvw',t= 35,h=160,l= 5 ,r= -5 ,p='ptop' ,items=m.itms,cols=m.cols ,grid='1' ,a='tBlR' )) #
# Editors for value
,('ed_s',d(tp='lb' ,t=210 ,l= 5 ,w= 70 ,p='ptop' ,cap=ed_s_c ,hint=m.cur_op ,a='TB' )) # &e
,('eded',d(tp='ed' ,tid='ed_s' ,l= 78 ,r=-270 ,p='ptop' ,vis=vis['eded'],ex0=not ens['eded'],a='TBlR' )) #
#,('eded',d(tp='ed' ,tid='ed_s' ,l= 78 ,r=-270 ,p='ptop' ,vis=vis['eded'],en=ens['eded'] ,a='TBlR' )) #
,('edcl',d(tp='clr',t=210-2 ,l= 210 ,r=-271 ,p='ptop' ,h=edit_h-4 ,vis=vis['edcl'],border=True ,a='TBlR' )) #
,('edcb',d(tp='cbr',tid='ed_s' ,l= 78 ,r=-270 ,p='ptop' ,items=its['edcb'] ,vis=vis['edcb'] ,a='TBlR' )) #
,('edrf',d(tp='ch' ,tid='ed_s' ,l= 78 ,w= 60 ,p='ptop' ,cap=_('f&alse') ,vis=vis['edrf'] ,a='TB' )) # &a
,('edrt',d(tp='ch' ,tid='ed_s' ,l= 140 ,w= 60 ,p='ptop' ,cap=_('t&rue') ,vis=vis['edrt'] ,a='TB' )) # &r
,('brow',d(tp='bt' ,tid='ed_s' ,l=-270 ,w= 90 ,p='ptop' ,cap=_('&...') ,vis=vis['brow'] ,a='TBLR' )) # &.
,('toop',d(tp='bt' ,tid='ed_s' ,l=-270 ,w= 90 ,p='ptop' ,cap=_('&GoTo') ,vis=vis['toop'],hint=M.TOOP_H ,a='TBLR' )) # &g
,('opjs',d(tp='bt' ,tid='ed_s' ,l=-270 ,w= 90 ,p='ptop' ,cap=_('E&dit') ,vis=vis['opjs'],hint=M.OPME_H ,a='TBLR' )) # &d
# View def-value
,('dfv_',d(tp='lb' ,tid='dfvl' ,l= 5 ,w= 70 ,p='ptop' ,cap=_('>Defa&ult:') ,hint=m.cur_op ,a='TB' )) # &u
#,('dfvl',d(tp='ed' ,t=235 ,l= 78 ,r=-270 ,p='ptop' ,en=False ,sto=False ,a='TBlR' )) #
,('dfvl',d(tp='ed' ,t=235 ,l= 78 ,r=-270 ,p='ptop' ,ex0=True ,sto=False ,a='TBlR' )) #
#,('dfvl',d(tp='ed' ,t=235 ,l= 78 ,r=-270 ,p='ptop' ,ro_mono_brd='1,0,1' ,sto=False ,a='TBlR' )) #
,('dfcl',d(tp='clr',t=235+1 ,l= 210 ,r=-271 ,p='ptop' ,h=edit_h-4 ,vis=vis['dfcl'],border=True ,a='TBlR' )) #
,('setd',d(tp='bt' ,tid='dfvl' ,l=-270 ,w= 90 ,p='ptop' ,cap=_('Rese&t') ,en=ens['setd'] ,a='TBLR' )) # &t
# For lexer/file
#,('to__',d(tp='lb' ,tid='ed_s' ,l=-170 ,w= 30 ,p='ptop' ,cap=_('>For:') ,a='TBLR' )) #
,('to__',d(tp='lb' ,tid='ed_s' ,l=-165 ,w= 30 ,p='ptop' ,cap=_('For:') ,a='TBLR' )) #
,('tolx',d(tp='ch' ,tid='ed_s' ,l=-140 ,w= 70 ,p='ptop' ,cap=_('Le&xer') ,a='TBLR' )) # &x
,('tofi',d(tp='ch' ,tid='ed_s' ,l=- 90 ,w= 70 ,p='ptop' ,cap=_('F&ile') ,hint=tofi_c ,en=tofi_en ,a='TBLR' )) # &i
,('lexr',d(tp='cbr',tid='dfvl' ,l=-165 ,w= 160 ,p='ptop' ,items=m.lexr_w_l ,a='TBLR' ))
# Comment
,('cmsp',d(tp='sp' ,y=cmnt_t-5 ,ali=ALI_BT,sp_lr=5 ))
,('cmnt',d(tp='me' ,t=cmnt_t ,h= m.h_cmnt
,h_min=M.CMNT_MHT ,ali=ALI_BT,sp_lrb=5 ,ro_mono_brd='1,1,1' ))
,('stbr',d(tp='sb' ,y=-M.STBR_H
,h= M.STBR_H ,ali=ALI_BT ))
][1:]
if 'mac'==get_desktop_environment():
cnts = [(cid,cnt) for cid,cnt in cnts if cnt.get('cap', '')[:3]!='srt']
cnts = odict(cnts)
if m.how.get('hide_fil', False):
for cid in ('tofi',):
cnts[cid]['vis'] = False
if m.how.get('hide_lex_fil', False):
for cid in ('to__', 'tolx', 'lexr', 'tofi'):
cnts[cid]['vis'] = False
for cnt in cnts.values():
if 'l' in cnt: cnt['l'] = m.dlg_w+cnt['l'] if cnt['l']<0 else cnt['l']
if 'r' in cnt: cnt['r'] = m.dlg_w+cnt['r'] if cnt['r']<0 else cnt['r']
if 'y' in cnt: cnt['y'] = m.dlg_h+cnt['y'] if cnt['y']<0 else cnt['y']
cnts['menu']['call'] = m.do_menu
cnts['chps']['call'] = m.do_menu
cnts['cpnm']['call'] = m.do_menu
cnts['erpt']['call'] = m.do_menu
cnts['apnw']['call'] = m.do_menu
cnts['flt-']['call'] = m.do_fltr
cnts['fltr']['call'] = m.do_fltr
if m.live_fltr:
cnts['cond']['call'] = m.do_fltr
cnts['lexr']['call'] = m.do_lxfi
cnts['tolx']['call'] = m.do_lxfi
cnts['tofi']['call'] = m.do_lxfi
cnts['lvls']['call'] = m.do_sele
cnts['lvls']['on_click_header'] = m.do_sort
cnts['srt0']['call'] = m.do_sort
cnts['srt1']['call'] = m.do_sort
cnts['srt2']['call'] = m.do_sort
cnts['srt3']['call'] = m.do_sort
cnts['srt4']['call'] = m.do_sort
cnts['srt5']['call'] = m.do_sort
cnts['srt6']['call'] = m.do_sort
cnts['srt-']['call'] = m.do_sort
cnts['cmsp']['call'] = m.do_cust
cnts['cws-']['call'] = m.do_cust
cnts['lvls']['on_click_dbl'] = m.do_dbcl #lambda idd,idc,data:print('on dbl d=', data)
cnts['setd']['call'] = m.do_setv
cnts['edcb']['call'] = m.do_setv
cnts['edrf']['call'] = m.do_setv
cnts['edrt']['call'] = m.do_setv
cnts['brow']['call'] = m.do_setv
cnts['toop']['call'] = m.do_setv
cnts['opjs']['call'] = m.do_setv
cnts['help']['call'] = m.do_help
return cnts
#def get_cnts
def get_vals(self, what=''):
M,m = self.__class__,self
m.cur_in = m._prep_opt('key2ind')
if not what or 'cur' in what:
vis,ens,vas,its,bcl = m._prep_opt()
if not what:
# all
return dict(cond=m.cond_s
,lvls=m.cur_in
,eded=vas['eded']
,edcb=vas['edcb']
,edrf=vas['edrf']
,edrt=vas['edrt']
,dfvl=vas['dfvl']
,cmnt=vas['cmnt']
,tolx=m.for_ulf=='l'
,tofi=m.for_ulf=='f'
,lexr=m.lexr_l.index(m.lexr) if m.lexr in m.lexr_l else -1
)
if '+' in what:
rsp = dict()
if '+lvls' in what:
rsp.update(dict(
lvls=m.cur_in
))
if '+cur' in what:
rsp.update(dict(
eded=vas['eded']
,edcb=vas['edcb']
,edrf=vas['edrf']
,edrt=vas['edrt']
,dfvl=vas['dfvl']
,cmnt=vas['cmnt']
))
if '+inlxfi' in what:
rsp.update(dict(
tolx=m.for_ulf=='l'
,tofi=m.for_ulf=='f'
))
pass; #LOG and log('rsp={}',(rsp))
return rsp
if what=='lvls':
return dict(lvls=m.cur_in
)
if what=='lvls-cur':
return dict(lvls=m.cur_in
,eded=vas['eded']
,edcb=vas['edcb']
,edrf=vas['edrf']
,edrt=vas['edrt']
,dfvl=vas['dfvl']
,cmnt=vas['cmnt']
)
if what=='cur':
return dict(eded=vas['eded']
,edcb=vas['edcb']
,edrf=vas['edrf']
,edrt=vas['edrt']
,dfvl=vas['dfvl']
,cmnt=vas['cmnt']
)
#def get_vals
def do_resize(self, ag):
M,m = self.__class__,self
m.stbr_act(M.STBR_MSG, '')
f_w = ag.fattr('w')
l_w = ag.cattr('lvls', 'w')
pass; #LOG and log('f_w,l_w={}',(f_w,l_w))
if f_w < m.dlg_min_w: return [] # fake event
m.col_ws= [ci['wd'] for ci in m.ag.cattr('lvls', 'cols')]
if f_w == m.dlg_min_w and m.col_ws!=M.COL_MWS:
return m.do_cust('cws-', ag)
sum_ws = sum(m.col_ws)
pass; #LOG and log('l_w,sum_ws={}',(l_w,sum_ws))
if sum_ws >= (l_w - M.COL_N - M.SCROLL_W):return [] # decrease dlg - need user choice
# Auto increase widths of def-val and user-val cols
extra = int((l_w - M.COL_N - M.SCROLL_W - sum_ws)/2)
pass; #LOG and log('extra={}',(extra))
pass; #LOG and log('m.col_ws={}',(m.col_ws))
m.col_ws[3] += extra
m.col_ws[4] += extra
pass; #LOG and log('m.col_ws={}',(m.col_ws))
return d(ctrls=m.get_cnts('+cols'))
#def do_resize
def do_cust(self, aid, ag, data=''):
M,m = self.__class__,self
m.stbr_act(M.STBR_MSG, '')
pass; #LOG and log('aid={}',(aid))
if False:pass
elif aid=='cmsp':
# Splitter moved
sp_y = ag.cattr('cmsp', 'y')
return []
##??
elif aid=='cws-':
# Set def col widths
m.col_ws = M.COL_MWS[:]
m.stores.pop(m.subset+'col_ws', None)
return d(ctrls=m.get_cnts('+cols'))
elif aid=='vali':
if dlg_valign_consts():
return d(ctrls=m.get_cnts())
return []
elif aid=='rslt':
# Restore dlg/ctrls sizes
fpr = ag.fattrs()
layout = data
m.col_ws = layout.get('col_ws', m.col_ws)
cmnt_h = layout.get('cmnt_h', ag.cattr('cmnt', 'h'))
dlg_h = layout.get('dlg_h' , fpr['h'])
dlg_w = layout.get('dlg_w' , fpr['w'])
return d(ctrls=
m.get_cnts('+cols')+
[('cmnt', d(h=cmnt_h))
,('stbr', d(y=dlg_h)) # Hack to push it at bottom (by Alex)
],form=d(
h=dlg_h
,w=dlg_w
))
elif aid=='svlt':
# Save dlg/ctrls sizes
m.col_ws = [ci['wd'] for ci in m.ag.cattr('lvls', 'cols')]
layout = data
fpr = ag.fattrs()
layout['dlg_w'] = fpr['w']
layout['dlg_h'] = fpr['h']
layout['cmnt_h']= ag.cattr('cmnt', 'h')
layout['col_ws']= m.col_ws
#def do_cust
def do_menu(self, aid, ag, data=''):
pass; #LOG and log('aid={}',(aid))
M,m = self.__class__,self
m.stbr_act(M.STBR_MSG, '')
scam = app.app_proc(app.PROC_GET_KEYSTATE, '')
if scam=='c' and aid=='menu':
return m.do_cust('vali', ag)
def wnen_menu(ag, tag):
pass; #LOG and log('tag={}',(tag))
if False:pass
elif tag[:3]=='ch:':
return m.do_fltr('chps', ag, tag[3:])
elif tag=='srt-':
return m.do_sort('', ag, -1)
elif tag[:3]=='srt':
return m.do_sort('', ag, int(tag[3]))
elif tag=='cws-':
return m.do_cust(tag, ag)
elif tag=='vali':
return m.do_cust(tag, ag)
# elif tag=='lubk':
# if app.ID_OK != app.msg_box(
# _('Restore user settings from backup copy?')
# , app.MB_OKCANCEL+app.MB_ICONQUESTION): return []
# return m.do_file('restore-user')
# elif tag=='llbk':
# if app.ID_OK != app.msg_box(
# f(_('Restore lexer "{}" settings from backup copy?'), m.lexr)
# , app.MB_OKCANCEL+app.MB_ICONQUESTION): return []
# return m.do_file('restore-lexr')
# elif tag=='dobk':
# m.stores[m.subset+'bk_sets'] = m.bk_sets = not m.bk_sets
# return []
# elif tag=='dfns':
# m.col_ws = [ci['wd'] for ci in m.ag.cattr('lvls', 'cols')]
# new_file = app.dlg_file(True, m.defn_path.name, str(m.defn_path.parent), 'JSONs|*.json')
# if not new_file or not os.path.isfile(new_file): return []
# return m.do_file('set-dfns', new_file)
elif tag=='full':
m.col_ws = [ci['wd'] for ci in m.ag.cattr('lvls', 'cols')]
m.all_ops = not m.all_ops
m.opts_full = load_vals(m.opts_defn
,lexr_json='lexer '+m.lexr+'.json'
,user_json=m.how.get('stor_json', 'user.json')
, ed_=m.ed, full=m.all_ops)
m.cur_op = m.cur_op if m.cur_op in m.opts_full else ''
m.do_file('build-chp-tree')
m.stbr_act(M.STBR_ALL, len(m.opts_full))
return d(ctrls=odict(m.get_cnts('+lvls +cur')))
if tag=='apex':
m.apply_one = not m.apply_one
m.stores[m.subset+'apply_one'] = m.apply_one
if tag=='apnw':
ed.cmd(cmds.cmd_OpsReloadAndApply)
if tag=='aufi':
m.auto4file = not m.auto4file
m.stores[m.subset+'auto4file'] = m.auto4file
if tag=='lifl':
m.stores[m.subset+'live_fltr'] = not m.stores.get(m.subset+'live_fltr' , False)
M.restart = True
M.restart_cond = ag.cval('cond')
return None # Close dlg
elif tag=='cpnm':
app.app_proc(app.PROC_SET_CLIP, m.cur_op)
elif tag=='erpt':
body = '\n'.join(m.chng_rpt)
dlg_wrapper(_('Сhange log') , 500+10 ,400+10,
[ dict(cid='body',tp='me' ,l=5,w=500 ,t=5,h=400, ro_mono_brd='1,0,0')]
, dict(body=body), focus_cid='body')
elif tag=='locv':
# m.do_file('locate-opt') # while wait core fix
if m.do_file('goto-opt'): return None # need close dlg
elif tag=='locd':
# m.do_file('locate-def') # while wait core fix
if m.do_file('goto-def'): return None # need close dlg
elif tag[:4] in ('rslt', 'rmlt', 'svlt'):
layouts_l = m.stores.get(m.subset+'layouts', []) # [{nm:Nm, dlg_h:H, dlg_w:W, ...}]
layouts_d = {lt['nm']:lt for lt in layouts_l}
lt_i = int(tag[4:]) if tag[:4] in ('rslt', 'rmlt') else -1
layout = layouts_l[lt_i] if lt_i>=0 else None
if 0:pass
elif tag[:4]=='rmlt':
if app.ID_OK != app.msg_box(
f(_('Remove layout "{}"?'), layout['nm'])
, app.MB_OKCANCEL+app.MB_ICONQUESTION): return []
del layouts_l[lt_i]
elif tag=='svlt':
nm_tmpl = _('#{}')
layout_nm = f(nm_tmpl
,first_true(itertools.count(1+len(layouts_d))
,pred=lambda n:f(nm_tmpl, n) not in layouts_d)) # First free #N after len()
while True:
pass; #LOG and log('layout_nm={!r}',(layout_nm))
layout_nm = app.dlg_input('Name to save current sizes of the dialog and controls', layout_nm)
if not layout_nm: return []
layout_nm = layout_nm.strip()
if not layout_nm: return []
if layout_nm in layouts_d and \
app.ID_OK != app.msg_box(
f(_('Name "{}" already used. Overwrite?'), layout_nm)
, app.MB_OKCANCEL+app.MB_ICONQUESTION): continue
break
layout = None
if layout_nm in layouts_d:
layout = layouts_d[layout_nm] # Overwrite
else:
layout = d(nm=layout_nm) # Create
layouts_l+=[layout]
# Fill
m.do_cust( 'svlt', ag, layout)
elif tag[:4]=='rslt':
return m.do_cust('rslt', ag, layout)
# Save
m.stores[m.subset+'layouts'] = layouts_l
return []
elif tag=='rprt':
m.do_file('out-rprt')
elif tag=='help':
return m.do_help('', ag)
return []
#def wnen_menu
pass; #LOG and log('',())
if aid=='chps':
def tree2menu(node, chp=''):
mn_l = [ d( tag='ch:'+ node['path']
, cap=f('{} ({})', chp, len(node['ops']))
, cmd=wnen_menu)
,d( cap='-')
] if chp else []
for chp,kid in node['kids'].items():
mn_l +=([d( cap=f('{} ({})', chp, len(kid['ops']))
, sub=tree2menu(kid, chp))
]
if 'kids' in kid else
[d( tag='ch:'+ kid['path']
, cap=f('{} ({})', chp, len(kid['ops']))
, cmd=wnen_menu)
]
)
return mn_l
#def tree2menu
mn_its = tree2menu(m.chp_tree)
ag.show_menu('chps', mn_its)
if aid=='apnw': return wnen_menu(ag, aid)
if aid=='cpnm': return wnen_menu(ag, aid)
if aid=='erpt': return wnen_menu(ag, aid)
if aid=='menu':
locv_c = f(M.LOCV_C, m.cur_op)
locd_c = f(M.LOCD_C, m.cur_op)
lts_l = m.stores.get(m.subset+'layouts', []) # [{nm:Nm, dlg_h:H, dlg_w:W, ...}]
full_en = not m.how.get('only_with_def', False) # Forbid to switch fo User+Lexer ops
live_fltr=m.stores.get(m.subset+'live_fltr' , False)
pass; #lts_l = [d(nm='Nm1'), d(nm='Nm2')]
mn_its = \
[ d(tag='cpnm' ,cap=_('&Copy option name') ,key='Alt+C'
),d( cap='-'
),d( cap=_('&Layout') ,sub=
[ d(tag='svlt' ,cap=_('&Save current layout...')
),d( cap='-'
)]+ (
[ d(tag='rslt'+str(nlt) ,cap=f(_('Restore layout "{}"'), lt['nm'])) for nlt, lt in enumerate(lts_l)
]+
[ d( cap=_('&Forget layout'),sub=
[ d(tag='rmlt'+str(nlt) ,cap=f(_('Forget layout "{}"...'), lt['nm'])) for nlt, lt in enumerate(lts_l)
])
] if lts_l else []) +
[ d( cap='-'
),d(tag='vali' ,cap=_('Adjust vertical alignments...')
),d(tag='cws-' ,cap=_('Set default columns &widths') ,key='Alt+W'
)]
),d( cap=_('&Table') ,sub=
[ d(tag='srt'+str(cn) ,cap=f(_('Sort by column "{}"'), cs.split()[0])
,ch=sorts_on(m.sorts, cn)
,key='Alt+'+str(1+cn))
for cn, cs in enumerate(M.COL_NMS)
]+
[ d( cap='-'
),d(tag='srt-' ,cap=_('Reset sorting') ,key='Alt+9'
)]
),d( cap=_('M&ore') ,sub=
[ d(tag='locv' ,cap=locv_c ,en=bool(m.cur_op)
),d(tag='locd' ,cap=locd_c ,en=bool(m.cur_op)
),d( cap='-'
),d(tag='erpt' ,cap=_('Show rep&ort of changes...') ,key='Alt+O'
),d(tag='apex' ,cap=_('Apply changes on exit') ,ch=m.apply_one
),d(tag='apnw' ,cap=_('Appl&y changes now') ,en=m.apply_need ,key='Alt+Y'
),d(tag='aufi' ,cap=_('Auto-update FILE options') ,ch=m.auto4file
),d( cap='-'
),d(tag='lifl' ,cap=M.LIFL_C ,ch=live_fltr
),d( cap='-'
),d(tag='full' ,cap=M.FULL_C ,ch=m.all_ops ,en=full_en
)]
),d( cap='-'
),d( tag='rprt' ,cap=_('Create HTML &report')
),d( cap='-'
),d( tag='help' ,cap=_('&Help...') ,key='Alt+H'
)]
pass; #LOG and log('mn_its=¶{}',pf(mn_its))
def add_cmd(its):
for it in its:
if 'sub' in it: add_cmd(it['sub'])
else: it['cmd']=wnen_menu
add_cmd(mn_its)
ag.show_menu(aid, mn_its)
return []
#def do_menu
def do_fltr(self, aid, ag, data=''):
M,m = self.__class__,self
m.stbr_act(M.STBR_MSG, '')
m.col_ws= [ci['wd'] for ci in m.ag.cattr('lvls', 'cols')]
fid = ag.fattr('fid')
pass; #LOG and log('aid,fid={}',(aid,fid))
if aid=='fltr' and fid in ('dfvl', 'eded', 'edrf', 'edrt'):
# Imitate default button
return m.do_setv('setd' if fid in ('dfvl',) else
'setv' if fid in ('eded',) else
fid if fid in ('edrf', 'edrt') else
''
, ag)
if aid=='cond':
pass; #LOG and log('ag.cval(cond)={}',(ag.cval('cond')))
m.cond_s = ag.cval('cond')
fid = '' if m.live_fltr else 'lvls'
if aid=='fltr':
m.cond_s = ag.cval('cond')
m.cond_hl = add_to_history(m.cond_s, m.cond_hl) if m.cond_s and not m.live_fltr else m.cond_hl
fid = 'lvls'
if aid=='flt-':
m.cond_s = ''
fid = 'cond'
if aid=='chps':
# Append selected chapter as filter value
scam = app.app_proc(app.PROC_GET_KEYSTATE, '')
path = '@'+data
if path not in m.cond_s:
if scam!='c':
m.cond_s= re.sub(r'@([\w/]*)', '', m.cond_s).strip() # del old
m.cond_s = (m.cond_s+' '+path).strip() # add new
m.cond_hl = add_to_history(m.cond_s, m.cond_hl) if not m.live_fltr else m.cond_hl
fid = 'cond'
# Select old/new op
m.cur_op= m._prep_opt('ind2key')
ctrls = m.get_cnts('+lvls')
m.cur_in= m._prep_opt('key2ind')
if m.cur_in<0 and m.SKWULFs:
# Sel top if old hidden
m.cur_in= 0
m.cur_op= m._prep_opt('ind2key', ind=m.cur_in)
return d(ctrls=m.get_cnts('+cond =lvls +cur')
,vals =m.get_vals()
,form =d(fid=fid)
)
#def do_fltr
def do_sort(self, aid, ag, col=-1):
scam = app.app_proc(app.PROC_GET_KEYSTATE, '')
pass; #LOG and log('col,scam={}',(col,scam))
pass; #return []
M,m = self.__class__,self
m.stbr_act(M.STBR_MSG, '')
m.col_ws= [ci['wd'] for ci in m.ag.cattr('lvls', 'cols')]
if aid=='srt-' or col==-1:
m.sorts = sorts_dflt(len(M.COL_NMS))
else:
col = int(aid[3]) if aid[:3]=='srt' else col
pass; #LOG and log('?? m.sorts={}',(m.sorts))
m.sorts = sorts_turn(m.sorts, col, scam)
pass; #LOG and log('ok m.sorts={}',(m.sorts))
old_in = m._prep_opt('key2ind')
ctrls = m.get_cnts('+lvls')
if old_in==0:
# Set top if old was top
m.cur_in= 0
m.cur_op= m._prep_opt('ind2key', ind=m.cur_in)
else:
# Save old op
m.cur_in= m._prep_opt('key2ind')
return d(ctrls=m.get_cnts('=lvls +cur')
,vals =m.get_vals()
)
#def do_sort
def do_sele(self, aid, ag, data=''):
M,m = self.__class__,self
m.stbr_act(M.STBR_MSG, '')
pass; #LOG and log('data,m.cur_op,m.cur_in={}',(data,m.cur_op,m.cur_in))
m.cur_op= m._prep_opt('ind2key')
pass; #LOG and log('m.cur_op,m.cur_in={}',(m.cur_op,m.cur_in))
pass; #log('###m.get_cnts(+cur)={}',(m.get_cnts('+cur')))
return d(ctrls=odict(m.get_cnts('+cur'))
,vals = m.get_vals('cur')
)
#def do_sele
def do_lxfi(self, aid, ag, data=''):
M,m = self.__class__,self
m.stbr_act(M.STBR_MSG, '')
pass; #LOG and log('aid={}',(aid))
m.col_ws= [ci['wd'] for ci in m.ag.cattr('lvls', 'cols')]
if False:pass
elif aid in ('tolx', 'tofi'):
# Changed "For Lexer/File"
m.for_ulf = 'l' if aid=='tolx' and ag.cval('tolx') else \
'f' if aid=='tofi' and ag.cval('tofi') else \
'u'
fid = 'lexr' \
if m.for_ulf=='l' and m.lexr not in m.lexr_l else \
m._prep_opt('fid4ed')
return d(ctrls=m.get_cnts('+cur')
,vals =m.get_vals('+cur+inlxfi')
,form =d(fid=fid)
)
elif aid=='lexr':
# Change current lexer
lexr_n = ag.cval('lexr')
m.lexr = m.lexr_l[lexr_n] if lexr_n>=0 else ''
m.cur_op= m._prep_opt('ind2key')
m.do_file('load-data')
ctrls = m.get_cnts('+lvls')
m.cur_in= m._prep_opt('key2ind')
if m.cur_in<0 and m.SKWULFs:
# Sel top if old hidden
m.cur_in= 0
m.cur_op= m._prep_opt('ind2key', ind=m.cur_in)
elif m.cur_in<0:
m.cur_op= ''
return d(ctrls=m.get_cnts('=lvls +cur')
,vals =m.get_vals()#'+lvls +cur')
)
#def do_lxfi
def do_dbcl(self, aid, ag, data=''):
M,m = self.__class__,self
m.stbr_act(M.STBR_MSG, '')
pass; #LOG and log('data,m.cur_op,m.cur_in={}',(data,m.cur_op,m.cur_in))
m.col_ws= [ci['wd'] for ci in m.ag.cattr('lvls', 'cols')]
if aid!='lvls': return []
# Dbl-click on lvls cell
if sum(m.col_ws) > ag.cattr('lvls', 'w') - M.SCROLL_W:
# Has hor-scrolling
pass; #LOG and log('skip as h-scroll',())
return []
op_r = ag.cval('lvls')
op_c = next(filter( # next(filter())==first_true
lambda col_n_sw: col_n_sw[1]>data[0] # > x from click (x,y)
, enumerate(accumulate(m.col_ws)) # (n_col, sum(col<=n))
), [-1, -1
])[0]
pass; #LOG and log('op_r,op_c,m.cur_op,m.cur_in={}',(op_r,op_c,m.cur_op,m.cur_in))
pass; #LOG and log('op_r,op_c={}',(op_r,op_c))
if False:pass
elif op_c not in (M.COL_DEF,M.COL_USR,M.COL_LXR,M.COL_FIL):
return []
elif -1==op_r:
pass; #LOG and log('skip as no opt',())
return []
elif -1==op_c:
pass; #LOG and log('skip as miss col',())
return []
elif M.COL_DEF==op_c:
return d(form =d(fid='setd'))
elif M.COL_USR==op_c and m.for_ulf!='u':
# Switch to user vals
m.for_ulf = 'u'
elif M.COL_LXR==op_c and m.for_ulf!='l':
# Switch to lexer vals
m.for_ulf = 'l'
elif M.COL_FIL==op_c and m.for_ulf!='f':
# Switch to lexer vals
m.for_ulf = 'f'
else:
return []
pass; LOG and log('op_r,op_c,m.for_ulf={}',(op_r,op_c,m.for_ulf))
return d(ctrls=m.get_cnts('+cur')
,vals =m.get_vals('+cur+inlxfi')
,form =d(fid=m._prep_opt('fid4ed'))
)
#def do_dbcl
def do_setv(self, aid, ag, data=''):
M,m = self.__class__,self
m.stbr_act(M.STBR_MSG, '')
pass; #LOG and log('aid,m.cur_op={}',(aid,m.cur_op))
if not m.cur_op: return []
m.col_ws= [ci['wd'] for ci in m.ag.cattr('lvls', 'cols')]
if aid=='toop':
# m.do_file('locate-opt') # while wait core fix
if m.do_file('goto-opt'): return None # need close dlg
return []
trg = 'lexer '+m.lexr+'.json' if m.for_ulf=='l' else 'user.join'
key4v = m.for_ulf+'val'
op = m.cur_op
oi = m.opts_full[op]
frm = oi['frm']
# if frm=='json':
# m.stbr_act(M.STBR_MSG, f(_('Edit {!r} to change value'), trg))
# return []
dval = oi.get( 'def')
uval = oi.get('uval')
lval = oi.get('lval')
fval = oi.get('fval')
ulfvl = oi.get(key4v ) #fval if m.for_ulf=='f' else lval if m.for_ulf=='l' else uval
jval = oi['jlvl'] if m.for_ulf=='l' else \
oi['juvl'] if m.for_ulf=='u' else \
oi['jfvl']
scam = app.app_proc(app.PROC_GET_KEYSTATE, '')
# Get new value
newv = None
erpt_s = ''
if False:pass
elif aid=='setd' and \
m.for_ulf=='f' and \
op in apx.OPT2PROP:
# Remove from file - set over def/user/lex val
newv = oi.get('lval', oi.get('uval', oi.get('def')))
if newv==ulfvl:
m.stbr_act(M.STBR_MSG, _('No need changes'))
return []
erpt_s = 'reset-f'
m.ed.set_prop(apx.OPT2PROP[op], newv)
elif aid=='setd' and \
ulfvl is not None and \
m.for_ulf!='f':
# Remove from user/lexer
if scam!='c' and \
app.ID_OK != app.msg_box(f(_('Remove {} option'
'\n {} = {!r}'
'\n?'), 'LEXER' if m.for_ulf=='l' else 'USER', op, jval)
, app.MB_OKCANCEL+app.MB_ICONQUESTION): return []
newv= None
elif aid=='brow' and frm in ('hotk', 'file', '#rgb', '#rgb-e'):
ulfvl_s = '' if ulfvl is None else ulfvl
m.stbr_act(M.STBR_MSG, f(_('Default value: "{}". Old value: "{}"'), dval, ulfvl_s))
if frm in ('#rgb', '#rgb-e'):
ulfvl_s = ulfvl_s if ulfvl_s else dval if frm=='#rgb' else '#fff'
newv = app.dlg_color(apx.html_color_to_int(ulfvl_s))
if newv is None: return []
newv = apx.int_to_html_color(newv)
else:
newv= (app.dlg_hotkey(op) if frm=='hotk' else
app.dlg_file(False, '', os.path.expanduser(ulfvl_s), '') if frm=='file' else None)
m.stbr_act(M.STBR_MSG, '')
if not newv: return []
elif aid=='opjs':
newv = edit_json_as_dict(op, ulfvl, dval, oi.get('cmt' , ''))
if newv is None: return []
elif aid=='setv': # Add/Set opt for user/lexer/file
# Enter from edit. Need parse some string
newv = m.ag.cval('eded')
try:
newv = int(newv) if frm=='int' else \
float(newv) if frm=='float' else \
newv
except Exception as ex:
app.msg_box(f(_('Incorrect value. It\'s needed in format: {}'), frm)
, app.MB_OK+app.MB_ICONWARNING)
return d(form=d(fid='eded'))
if frm=='#rgb' or frm=='#rgb-e' and newv: # Testing new val
try:
apx.html_color_to_int(newv)
except Exception as ex:
app.msg_box(f(_('Incorrect value. It\'s needed in format: {}'), '#RGB or #RRGGBB')
, app.MB_OK+app.MB_ICONWARNING)
return d(form=d(fid='eded'))
elif aid in ('edrf', 'edrt'): # Add/Set opt for user/lexer/file
newv = aid=='edrt'
newv = not newv if newv==ulfvl else newv
elif aid=='edcb': # Add/Set opt into user/lexer/file
pass; #LOG and log('oi={}',(oi))
vl_l = [k for k,v in oi.get('dct', [])] if 'dct' in oi else oi.get('lst', [])
pass; #LOG and log('vl_l={}',(vl_l))
pass; #LOG and log('m.ag.cval(edcb)={}',(m.ag.cval('edcb')))
newv = vl_l[m.ag.cval('edcb')]
pass; #LOG and log('newv={}',(newv))
# Use new value to change env
if newv is not None and newv==ulfvl:
m.stbr_act(M.STBR_MSG, _('No need changes'))
return []
if m.for_ulf=='f' and newv is not None and op in apx.OPT2PROP:
# Change for file
erpt_s = 'set-f'
ed.set_prop(apx.OPT2PROP[op], newv)
if m.for_ulf!='f':
# Change target file
pass; #LOG and log('?? do_erpt',())
erpt_s =('reset-u' if newv is None and m.for_ulf=='u' else
'reset-l' if newv is None and m.for_ulf=='l' else
'add-u' if ulfvl is None and m.for_ulf=='u' else
'add-l' if ulfvl is None and m.for_ulf=='l' else
'set-u' if m.for_ulf=='u' else
'set-l' if m.for_ulf=='l' else '')
pass; #LOG and log('?? set_opt',())
apx.set_opt(op
,newv
,apx.CONFIG_LEV_LEX if m.for_ulf=='l' else apx.CONFIG_LEV_USER
,ed_cfg =None
,lexer =m.lexr if m.for_ulf=='l' else None
,user_json=m.how.get('stor_json', 'user.json')
)
if not m.apply_one:
pass; #LOG and log('?? OpsReloadAndApply',())
ed.cmd(cmds.cmd_OpsReloadAndApply)
else:
m.apply_need = True
# Use new value to change dlg data
pass; #LOG and log('?? oi={}',(oi))
pass; #LOG and log('?? m.opts_full={}',pf(m.opts_full))
if False:pass
elif aid=='setd':
oi.pop(key4v, None) if m.for_ulf!='f' else 0
else:
pass; #LOG and log('key4v, newv={}',(key4v, newv))
oi[key4v] = newv
pass; #LOG and log('oi={}',(oi))
upd_cald_vals(m.opts_full)
pass; #LOG and log('oi={}',(oi))
jnewv = oi['jlvl'] if m.for_ulf=='l' else oi['juvl'] if m.for_ulf=='u' else oi['jfvl']
m.do_erpt(erpt_s, jnewv, jval)
pass; #LOG and log('ok oi={}',(oi))
pass; #LOG and log('ok m.opts_full={}',pf(m.opts_full))
pass; #LOG and log('?? get_cnts',())
if m.for_ulf!='f' and m.auto4file and op in apx.OPT2PROP:
# Change FILE to over
newv = oi.get('lval', oi.get('uval', oi.get('def')))
if newv!=oi.get('fval'):
erpt_s = 'reset-f'
m.ed.set_prop(apx.OPT2PROP[op], newv)
oi['fval'] = newv
jval = oi['jfvl']
upd_cald_vals(m.opts_full)
jnewv = oi['jfvl']
m.do_erpt('auset-f', jnewv, jval)
pass; #LOG and log('m.get_vals(lvls-cur)={}',(m.get_vals('lvls-cur')))
return d(ctrls=m.get_cnts('+lvls+cur')
,vals =m.get_vals('lvls-cur')
)
#def do_setv
def do_erpt(self, what='', jnewv=None, joldv=None):
pass; #LOG and log('what, newv={}',(what, newv))
M,m = self.__class__,self
if 0==len(m.chng_rpt):
rpt = f('Starting to change options at {:%Y-%m-%d %H:%M:%S}', datetime.datetime.now())
m.chng_rpt += [rpt]
oi = m.opts_full[m.cur_op]
oldv= None
rpt = ''
if 0:pass
elif what=='reset-f':
rpt = f(_('Set FILE option to overridden value {!r}') ,jnewv)
elif what=='set-f':
rpt = f(_('Set FILE option to {!r}') ,jnewv)
elif what=='auset-f':
rpt = f(_('Auto-set FILE option to overridden value {!r}') ,jnewv)
elif what=='reset-l':
rpt = f(_('Remove LEXER {!r} option') ,m.lexr )
elif what=='set-l':
rpt = f(_('Set LEXER {!r} option to {!r}') ,m.lexr ,jnewv)
elif what=='add-l':
rpt = f(_('Add LEXER {!r} option {!r}') ,m.lexr ,jnewv)
elif what=='reset-u':
rpt = f(_('Remove USER option') )
elif what=='set-u':
rpt = f(_('Set USER option to {!r}') ,jnewv)
elif what=='add-u':
rpt = f(_('Add USER option {!r}') ,jnewv)
else:
return
rpt = f('{} (from {!r})', rpt, joldv) \
if what[:3]!='add' and joldv is not None else rpt
rpt = rpt.replace('True', 'true').replace('False', 'false')
rpt = m.cur_op + ': ' + rpt
rpt = f('{}. ', len(m.chng_rpt)) + rpt
# print(rpt)
m.stbr_act(M.STBR_MSG, rpt + _(' [Alt+O - all changes]'))
m.chng_rpt += [rpt]
#def do_erpt
def do_help(self, aid, ag, data=''):
M,m = self.__class__,self
m.stbr_act(M.STBR_MSG, '')
pass; #LOG and log('',())
dlg_wrapper('Help'
, 680+10, 500+10
, [d(cid='body', tp='me', l=5, t=5, w=680, h=500, ro_mono_brd='1,1,0')]
, d( body= #NOTE: help
f(
_( 'About "{fltr}"'
'\r '
)
+M.FLTR_H+
_('\r '
'\rOther tips.'
'\r • Use ENTER to filter table and to change or reset value.'
'\r • Use double click on any cell in columns'
'\r "{c_usr}"'
'\r "{c_lxr}"'
'\r "{c_fil}"'
'\r to change "{in_lxr}" flag and to put focus on the value field.'
'\r • Use double click on any cell in column'
'\r "{c_def}"'
'\r to put focus on "{reset}".'
'\r • Clicking "{reset}" will ask for confirmation, for user/lexer options.'
'\r Hold Ctrl key to skip this confirmation.'
'\r • Click on a column header sorts data in the column.'
'\r Alt+# (# is 1..8) sorts the N column (not on macOS).'
'\r Alt+9 resets sorting (not on macOS).'
'\r Click with Ctrl allows to sort by several columns.'
'\r Clicking with Ctrl on already sorted column does 2-state loop (down, up).'
'\r Clicking with Ctrl on already sorted column with maximal sorting index, '
'\r does 3-state loop (down, up, off).'
'\r • Use option "{lifl}" to see instant update of the list after'
'\r each changing in the filter field'
'\r (otherwise you need to press Enter after changing).'
'\r With this option, no history of the filter is kept'
'\r (filter combobox has empty dropdown list).'
'\r • If current list line is scrolled out of view, '
'\r you can still see the option name - in the tooltip'
'\r of "User" (Lexer/File) label near the value field.'
'\r • Tooltip shows file name (or tag name), when cursor hovers the checkbox "{tofi}".'
'\r • Some plugins store their settings into user.json.'
'\r So after a while, user.json contains options not present in default.json.'
'\r To see all these keys, use option "{full}".'
'\r • Values in table column "!"'
'\r ! option is set in "user.json",'
'\r !! option is set in "lexer NNN.json",'
'\r !!! option is set for current file,'
'\r L default value is from "settings_default/lexer NNN.json",'
'\r + not CudaText standard option.'
) , c_usr=M.COL_NMS[M.COL_USR]
, c_lxr=M.COL_NMS[M.COL_LXR]
, c_fil=M.COL_NMS[M.COL_FIL].split()[0]
, c_def=M.COL_NMS[M.COL_DEF]
, fltr = ag.cattr('flt_', 'cap', live=False).replace('&', '').strip(':')
, in_lxr=ag.cattr('tolx', 'cap', live=False).replace('&', '')
, reset= ag.cattr('setd', 'cap', live=False).replace('&', '')
, tofi = ag.cattr('tofi', 'cap', live=False).replace('&', '')
, lifl = M.LIFL_C.replace('&', '')
, full = M.FULL_C.replace('&', '')
))
)
return []
#def do_help
restart = False
restart_cond= None
#class OptEdD
def edit_json_as_dict(op, uval, dval, cmnt4v):
""" Allow user to edit JSON value
"""
pass; #log("op, uval, dval={}",(op, uval, dval))
newv = None
def acts(aid, ag, data=''):
nonlocal newv
if False:pass
elif aid=='defv':
return d(vals=d(meme=json.dumps(dval, indent=2)),fid='meme')
elif aid=='undo':
return d(vals=d(meme=json.dumps(uval, indent=2)),fid='meme')
elif aid in ('test', 'okok'):
mejs = ag.cval('meme')
pass; #log("mejs={!r}",(mejs))
try:
jsvl = json.loads(mejs, object_pairs_hook=odict)
except Exception as ex:
warn = str(ex) + c10 + (c10.join('{:>3}|{}'.format(n+1, s.replace(' ','·'))
for n,s in enumerate(mejs.split(c10))))
return d(vals=d(cmnt=warn),fid='meme')
# app.msg_box(str(ex)
# +c10+(c10.join('{:>3}|{}'.format(n+1, s.replace(' ','·'))
# for n,s in enumerate(mejs.split(c10))))
# , app.MB_OK)
# return d(fid='meme')
if aid=='okok':
newv = jsvl
return None # Close
return d(vals=d(cmnt=cmnt4v),fid='meme')
#def acts
DlgAgent(
form =dict(cap = f(_('Edit JSON option ({})'), op)
,resize = True
,w = 510
,h = 400
)
, ctrls=[0
,('meme',d(tp='me' ,l= 5 ,w=500 ,t= 5 ,h=150 ,a='tBlR'))
,('cmnt',d(tp='me' ,l= 5 ,w=500 ,t=160 ,h=200 ,ro_mono_brd='1,1,1' ,a='TBlR'))
,('defv',d(tp='bt' ,l= 5 ,w=110 ,t=370 ,cap=_('Set &default') ,a='TB' ,call=acts ,en=(dval is not None)))
,('undo',d(tp='bt' ,l=120 ,w=110 ,t=370 ,cap=_('&Undo changes') ,a='TB' ,call=acts))
,('test',d(tp='bt' ,l=285 ,w= 70 ,t=370 ,cap=_('Chec&k') ,a='TBLR' ,call=acts))
,('cans',d(tp='bt' ,l=360 ,w= 70 ,t=370 ,cap=_('Cancel') ,a='TBLR' ,call=acts))
,('okok',d(tp='bt' ,l=435 ,w= 70 ,t=370 ,cap=_('OK') ,a='TBLR' ,call=acts ,def_bt=True))
][1:]
, vals =dict(meme=json.dumps(uval, indent=2)
,cmnt=cmnt4v)
, fid ='meme'
).show()
return newv
#def edit_json_as_dict
class Command:
def dlg_cuda_options(self):
while True:
OptEdD.restart = False
self._dlg_opt()
if not OptEdD.restart: break
#def dlg_cuda_options
def _dlg_opt(self):
if app.app_api_version()<MIN_API_VER: return app.msg_status(_('Need update CudaText'))
defs_json = apx.get_opt('dlg_cuda_options.defs_json', 'default.json')
defs_json = defs_json if os.sep in defs_json else apx.get_def_setting_dir()+os.sep+defs_json
OptEdD(
path_keys_info=defs_json
, subset='df.'
).show(_('CudaText options'))
#def _dlg_opt
#class Command
def add_to_history(val:str, lst:list, max_len=MAX_HIST, unicase=False)->list:
""" Add/Move val to list head. """
lst_u = [ s.upper() for s in lst] if unicase else lst
val_u = val.upper() if unicase else val
if val_u in lst_u:
if 0 == lst_u.index(val_u): return lst
del lst[lst_u.index(val_u)]
lst.insert(0, val)
if len(lst)>max_len:
del lst[max_len:]
return lst
#def add_to_history
RPT_HEAD = '''
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<title>CudaText options</title>
<style type="text/css">
td, th, body {
color: #000;
font-family: Verdana, Arial, Helvetica, sans-serif;
font-size: 12px;
}
table {
border-width: 1px;
border-spacing: 2px;
border-color: gray;
border-collapse:collapse;
}
table td, table th{
border-width: 1px;
padding: 1px;
border-style: solid;
border-color: gray;
}
pre {
margin: 0;
padding: 0;
}
td.nxt {
color: grey;
word-break: break-all;
}
td.win {
font-weight: bold;
word-break: break-all;
}
</style>
</head>
<body>
'''
RPT_FOOT = '''
</body>
</html>
'''
def do_report(fn, lex='', ed_=ed):
def hard_word_wrap(text, rmax):
reShift = re.compile(r'\s*')
reHeadTail = re.compile(r'(.{' + str(rmax) + r'}\S*)\s*(.*)')
src_lines = text.splitlines()
pass; #print('src_lines=',src_lines)
trg_lines = []
for line in src_lines:
pass; #print('line=', line, 'len=', len(line.rstrip()))
if len(line.rstrip()) <= rmax:
trg_lines.append(line)
continue
shift = reShift.match(line).group(0)
head, \
tail = reHeadTail.match(line).group(1, 2)
if not tail:
tail= line.split()[-1]
head= line[:-len(tail)]
pass; #print('head=', head, 'tail=', tail)
trg_lines.append(head)
trg_lines.append(shift+tail)
pass; #print('trg_lines=',trg_lines)
return '\n'.join(trg_lines)
#def hard_word_wrap
# lex = ed_.get_prop(app.PROP_LEXER_CARET)
def_json = apx.get_def_setting_dir() +os.sep+'default.json'
usr_json = app.app_path(app.APP_DIR_SETTINGS)+os.sep+'user.json'
lex_json = app.app_path(app.APP_DIR_SETTINGS)+os.sep+lex if lex else ''
def_opts = apx._get_file_opts(def_json, {}, object_pairs_hook=collections.OrderedDict)
usr_opts = apx._get_file_opts(usr_json, {}, object_pairs_hook=collections.OrderedDict)
lex_opts = apx._get_file_opts(lex_json, {}, object_pairs_hook=collections.OrderedDict) if lex else None
def_opts = pickle.loads(pickle.dumps(def_opts)) # clone to pop
usr_opts = pickle.loads(pickle.dumps(usr_opts)) # clone to pop
lex_opts = pickle.loads(pickle.dumps(lex_opts)) if lex else {} # clone to pop
fil_opts = {op:ed_.get_prop(pr) for op,pr in apx.OPT2PROP.items()}
# fil_opts = get_ovrd_ed_opts(ed)
cmt_opts = {}
# Find Commentary for def opts in def file
# Rely: _commentary_ is some (0+) lines between opt-line and prev opt-line
def_body = open(def_json).read()
def_body = def_body.replace('\r\n', '\n').replace('\r', '\n')
def_body = def_body[def_body.find('{')+1:] # Cut head with start '{'
def_body = def_body.lstrip()
for opt in def_opts.keys():
pos_opt = def_body.find('"{}"'.format(opt))
cmt = def_body[:pos_opt].strip()
cmt = ('\n\n'+cmt).split('\n\n')[-1]
cmt = re.sub('^\s*//', '', cmt, flags=re.M)
cmt = cmt.strip()
cmt_opts[opt] = html.escape(cmt)
def_body= def_body[def_body.find('\n', pos_opt)+1:] # Cut the opt
with open(fn, 'w', encoding='utf8') as f:
f.write(RPT_HEAD)
f.write('<h4>High priority: editor options</h4>\n')
f.write('<table>\n')
f.write( '<tr>\n')
f.write( '<th>Option name</th>\n')
f.write( '<th>Value in<br>default</th>\n')
f.write( '<th>Value in<br>user</th>\n')
f.write( '<th>Value in<br>{}</th>\n'.format(lex)) if lex else None
f.write( '<th title="{}">Value for file<br>{}</th>\n'.format(ed_.get_filename()
, os.path.basename(ed_.get_filename())))
f.write( '<th>Comment</th>\n')
f.write( '</tr>\n')
for opt in fil_opts.keys():
winner = 'def'
winner = 'usr' if opt in usr_opts else winner
winner = 'lex' if opt in lex_opts else winner
winner = 'fil' if opt in fil_opts else winner
f.write( '<tr>\n')
f.write( '<td>{}</td>\n'.format(opt))
f.write( '<td class="{}">{}</td>\n'.format('win' if winner=='def' else 'nxt', def_opts.get(opt, '')))
f.write( '<td class="{}">{}</td>\n'.format('win' if winner=='usr' else 'nxt', usr_opts.get(opt, '')))
f.write( '<td class="{}">{}</td>\n'.format('win' if winner=='lex' else 'nxt', lex_opts.get(opt, ''))) if lex else None
f.write( '<td class="{}">{}</td>\n'.format('win' if winner=='fil' else 'nxt', fil_opts.get(opt, '')))
# f.write( '<td><pre>{}</pre></td>\n'.format(cmt_opts.get(opt, '')))
f.write( '<td><pre>{}</pre></td>\n'.format(hard_word_wrap(cmt_opts.get(opt, ''), 50)))
f.write( '</tr>\n')
def_opts.pop(opt, None)
usr_opts.pop(opt, None)
lex_opts.pop(opt, None) if lex else None
f.write('</table><br/>\n')
f.write('<h4>Overridden default options</h4>\n')
f.write('<table>\n')
f.write( '<tr>\n')
f.write( '<th width="15%">Option name</th>\n')
f.write( '<th width="20%">Value in<br>default</th>\n')
f.write( '<th width="20%">Value in<br>user</th>\n')
f.write( '<th width="10%">Value in<br>{}<br></th>\n'.format(lex)) if lex else None
f.write( '<th width="35%">Comment</th>\n')
f.write( '</tr>\n')
for opt in def_opts.keys():
winner = 'def'
winner = 'usr' if opt in usr_opts else winner
winner = 'lex' if opt in lex_opts else winner
winner = 'fil' if opt in fil_opts else winner
f.write( '<tr>\n')
f.write( '<td>{}</td>\n'.format(opt))
f.write( '<td class="{}">{}</td>\n'.format('win' if winner=='def' else 'nxt', def_opts.get(opt, '')))
f.write( '<td class="{}">{}</td>\n'.format('win' if winner=='usr' else 'nxt', usr_opts.get(opt, '')))
f.write( '<td class="{}">{}</td>\n'.format('win' if winner=='lex' else 'nxt', lex_opts.get(opt, ''))) if lex else None
f.write( '<td><pre>{}</pre></td>\n'.format(hard_word_wrap(cmt_opts.get(opt, ''), 50)))
f.write( '</tr>\n')
usr_opts.pop(opt, None)
lex_opts.pop(opt, None) if lex else None
f.write('</table><br/>\n')
f.write('<h4>Overridden user-only options</h4>')
f.write('<table>\n')
f.write( '<tr>\n')
f.write( '<th>Option name</th>\n')
f.write( '<th>Value in<br>user</th>\n')
f.write( '<th>Value in<br>{}</th>\n'.format(lex)) if lex else None
f.write( '<th>Comment</th>\n')
f.write( '</tr>\n')
for opt in usr_opts.keys():
winner = 'usr'
winner = 'lex' if opt in lex_opts else winner
f.write( '<tr>\n')
f.write( '<td>{}</td>\n'.format(opt))
f.write( '<td class="{}">{}</td>\n'.format('win' if winner=='usr' else 'nxt', usr_opts.get(opt, '')))
f.write( '<td class="{}">{}</td>\n'.format('win' if winner=='lex' else 'nxt', lex_opts.get(opt, ''))) if lex else None
f.write( '<td><pre>{}</pre></td>\n'.format(cmt_opts.get(opt, '')))
f.write( '</tr>\n')
lex_opts.pop(opt, None) if lex else None
for opt in lex_opts:
winner = 'lex'
f.write( '<tr>\n')
f.write( '<td>{}</td>\n'.format(opt))
f.write( '<td class="{}"></td> \n'.format('non'))
f.write( '<td class="{}">{}</td>\n'.format('win', lex_opts.get(opt, '')))
f.write( '<td><pre>{}</pre></td>\n'.format(cmt_opts.get(opt, '')))
f.write( '</tr>\n')
lex_opts.pop(opt, None)
f.write('</table><br/>\n')
f.write(RPT_FOOT)
return True
#def do_report(fn):
def index_1(cllc, val, defans=-1):
return cllc.index(val) if val in cllc else defans
if __name__ == '__main__' : # Tests
# To start the tests run in Console
# exec(open(path_to_the_file, encoding="UTF-8").read())
# app.app_log(app.LOG_CONSOLE_CLEAR, 'm')
# for smk in [smk for smk
# in sys.modules if 'cuda_options_editor.tests.test_options_editor' in smk]:
# del sys.modules[smk] # Avoid old module
# import cuda_options_editor.tests.test_options_editor
# import unittest
# suite = unittest.TestLoader().loadTestsFromModule( cuda_options_editor.tests.test_options_editor)
# unittest.TextTestRunner(verbosity=0).run(suite)
pass
'''
ToDo
[+][kv-kv][02apr17] History for cond
[-][kv-kv][02apr17] ? Chapters list and "chap" attr into kinfo
[-][kv-kv][02apr17] ? Tags list and "tag" attr into kinfo
[-][kv-kv][02apr17] ? Delimeter row in table
[ ][kv-kv][02apr17] "Need restart" in Comments
[+][kv-kv][02apr17] ? Calc Format by Def_val
[ ][kv-kv][02apr17] int_mm for min+max
[+][kv-kv][02apr17] VERS in Title
[+][at-kv][02apr17] 'enum' вместо 'enum_i'
[ ][kv-kv][02apr17] Save top row in table
[+][kv-kv][03apr17] Show stat in Chap-combo and tags check-list
[-][kv-kv][03apr17] ? Add chap "(No chapter)"
[-][kv-kv][03apr17] ? Add tag "#no_tag"
[+][kv-kv][03apr17] Call opts report
[+][at-kv][04apr17] Format 'font'
[-][at-kv][04apr17] ? FilterListView
[+][at-kv][04apr17] use new default.json
[-][kv-kv][04apr17] Testing for update user.json
[+][kv-kv][04apr17] Restore Sec and Tags
[+][kv-kv][04apr17] ro-combo hitory for Tags
[+][kv-kv][05apr17] Add "default" to fonts if def_val=="default"
[+][at-kv][05apr17] Preview for format=fontmay
[+][kv-kv][06apr17] Spec filter sign: * - to show only modified
[-][kv-kv][06apr17] Format color
[+][kv-kv][24apr17] Sort as Def or as User
[+][kv-kv][05may17] New type "list of str"
[ ][kv-kv][23jun17] ? Filter with tag (part of tag?). "smth #my"
[+][kv-kv][15mar18] ? Filter with all text=key+comment
[+][kv-kv][19mar18] ? First "+" to filter with comment
[-][kv-kv][19mar18] !! Point the fact if value is overed in ed
[?][kv-kv][20mar18] Allow to add/remove opt in user/lex
[?][kv-kv][21mar18] ? Allow to meta keys in user.json:
"_fif_LOG__comment":"Comment for fif_LOG"
[+][kv-kv][22mar18] Set conrol's tab_order to always work Alt+E for "Valu&e"
[ ][kv-kv][26mar18] Use 'editor' for comment
[+][kv-kv][26mar18] Increase w for one col when user increases w of dlg (if no h-scroll)
[+][kv-kv][13apr18] DClick on Def-col - focus to Reset
[-][kv-kv][16apr18] Open in tag for fmt=json
[?][kv-kv][23apr18] ? Show opt from cur line if ed(default.json)
[+][at-kv][03may18] Rework ask to confirm removing user/lex opt
[+][at-kv][04may18] Report to console all changes
[+][at-kv][05may18] Call OpsReloadAndApply
[+][kv-kv][05may18] Rework radio to checks (Linux bug: always set one of radio-buttons)
[-][kv-kv][05may18] Ask "Set also for current file?" if ops is ed.prop
[+][kv-kv][06may18] Menu command "Show changes"
[+][kv-kv][06may18] Show all file opt value. !!! only if val!=over-val
[+][kv-kv][06may18] Rework Sort
[+][kv-kv][14may18] Scale def col widths
[ ][at-kv][14may18] DClick over 1-2-3 is bad
[+][at-kv][14may18] Allow to refresh table on each changing of filter
[+][at-kv][15may18] Allow to extra sort cols with Ctrl+Click
[ ][kv-kv][04jun18] Cannot select section @Ui after selected @Ui/Tabs
[ ][kv-kv][16jun18] Have 2 filter control to instant and history. Switch by vis
[+][kv-kv][18jun18] More then one chap in filter. Append from menu if Ctrl holds
[+][at-kv][24apr19] Add types: rgb
[ ][at-kv][24apr19] Add type condition: int/float range
[+][kv-kv][25apr19] Hide cols "Lexer" and "File", controls []For and lexer list (by init opt)
[+][kv-kv][25apr19] Allow store other then user.json
[+][kv-kv][25apr19] Return 'was modified' from show()
''' | mpl-2.0 | 6,833,770,248,211,527,000 | 47.691151 | 141 | 0.420363 | false |
retr0h/ansible | test/TestPlayBook.py | 1 | 13823 |
# tests are fairly 'live' (but safe to run)
# setup authorized_keys for logged in user such
# that the user can log in as themselves before running tests
import unittest
import getpass
import ansible.playbook
import ansible.utils as utils
import ansible.callbacks as ans_callbacks
import os
import shutil
import ansible.constants as C
EVENTS = []
class TestCallbacks(object):
# using same callbacks class for both runner and playbook
def __init__(self):
pass
def set_playbook(self, playbook):
self.playbook = playbook
def on_no_hosts_remaining(self):
pass
def on_start(self):
EVENTS.append('start')
def on_skipped(self, host, item=None):
EVENTS.append([ 'skipped', [ host ]])
def on_import_for_host(self, host, filename):
EVENTS.append([ 'import', [ host, filename ]])
def on_error(self, host, msg):
EVENTS.append([ 'stderr', [ host, msg ]])
def on_not_import_for_host(self, host, missing_filename):
pass
def on_notify(self, host, handler):
EVENTS.append([ 'notify', [ host, handler ]])
def on_task_start(self, name, is_conditional):
EVENTS.append([ 'task start', [ name, is_conditional ]])
def on_failed(self, host, results, ignore_errors):
EVENTS.append([ 'failed', [ host, results, ignore_errors ]])
def on_ok(self, host, result):
# delete certain info from host_result to make test comparisons easier
host_result = result.copy()
for k in [ 'ansible_job_id', 'results_file', 'md5sum', 'delta', 'start', 'end' ]:
if k in host_result:
del host_result[k]
for k in host_result.keys():
if k.startswith('facter_') or k.startswith('ohai_'):
del host_result[k]
EVENTS.append([ 'ok', [ host, host_result ]])
def on_play_start(self, pattern):
EVENTS.append([ 'play start', [ pattern ]])
def on_async_ok(self, host, res, jid):
EVENTS.append([ 'async ok', [ host ]])
def on_async_poll(self, host, res, jid, clock):
EVENTS.append([ 'async poll', [ host ]])
def on_async_failed(self, host, res, jid):
EVENTS.append([ 'async failed', [ host ]])
def on_unreachable(self, host, msg):
EVENTS.append([ 'failed/dark', [ host, msg ]])
def on_setup(self):
pass
def on_no_hosts(self):
pass
class TestPlaybook(unittest.TestCase):
def setUp(self):
self.user = getpass.getuser()
self.cwd = os.getcwd()
self.test_dir = os.path.join(self.cwd, 'test')
self.stage_dir = self._prepare_stage_dir()
if os.path.exists('/tmp/ansible_test_data_copy.out'):
os.unlink('/tmp/ansible_test_data_copy.out')
if os.path.exists('/tmp/ansible_test_data_template.out'):
os.unlink('/tmp/ansible_test_data_template.out')
if os.path.exists('/tmp/ansible_test_messages.out'):
os.unlink('/tmp/ansible_test_messages.out')
if os.path.exists('/tmp/ansible_test_role_messages.out'):
os.unlink('/tmp/ansible_test_role_messages.out')
def _prepare_stage_dir(self):
stage_path = os.path.join(self.test_dir, 'test_data')
if os.path.exists(stage_path):
shutil.rmtree(stage_path, ignore_errors=False)
assert not os.path.exists(stage_path)
os.makedirs(stage_path)
assert os.path.exists(stage_path)
return stage_path
def _get_test_file(self, filename):
# get a file inside the test input directory
filename = os.path.join(self.test_dir, filename)
assert os.path.exists(filename)
return filename
def _get_stage_file(self, filename):
# get a file inside the test output directory
filename = os.path.join(self.stage_dir, filename)
return filename
def _run(self, test_playbook, host_list='test/ansible_hosts',
extra_vars=None):
''' run a module and get the localhost results '''
# This ensures tests are independent of eachother
global EVENTS
ansible.playbook.SETUP_CACHE.clear()
EVENTS = []
self.test_callbacks = TestCallbacks()
self.playbook = ansible.playbook.PlayBook(
playbook = test_playbook,
host_list = host_list,
module_path = 'library/',
forks = 1,
timeout = 5,
remote_user = self.user,
remote_pass = None,
extra_vars = extra_vars,
stats = ans_callbacks.AggregateStats(),
callbacks = self.test_callbacks,
runner_callbacks = self.test_callbacks
)
result = self.playbook.run()
return result
def test_playbook_vars(self):
test_callbacks = TestCallbacks()
playbook = ansible.playbook.PlayBook(
playbook=os.path.join(self.test_dir, 'test_playbook_vars', 'playbook.yml'),
host_list='test/test_playbook_vars/hosts',
stats=ans_callbacks.AggregateStats(),
callbacks=test_callbacks,
runner_callbacks=test_callbacks
)
playbook.run()
def _test_playbook_undefined_vars(self, playbook, fail_on_undefined):
# save DEFAULT_UNDEFINED_VAR_BEHAVIOR so we can restore it in the end of the test
saved_undefined_var_behavior = C.DEFAULT_UNDEFINED_VAR_BEHAVIOR
C.DEFAULT_UNDEFINED_VAR_BEHAVIOR = fail_on_undefined
test_callbacks = TestCallbacks()
playbook = ansible.playbook.PlayBook(
playbook=os.path.join(self.test_dir, 'test_playbook_undefined_vars', playbook),
host_list='test/test_playbook_undefined_vars/hosts',
stats=ans_callbacks.AggregateStats(),
callbacks=test_callbacks,
runner_callbacks=test_callbacks
)
actual = playbook.run()
C.DEFAULT_UNDEFINED_VAR_BEHAVIOR = saved_undefined_var_behavior
# if different, this will output to screen
print "**ACTUAL**"
print utils.jsonify(actual, format=True)
expected = {
"localhost": {
"changed": 0,
"failures": 0,
"ok": int(not fail_on_undefined) + 1,
"skipped": 0,
"unreachable": int(fail_on_undefined)
}
}
print "**EXPECTED**"
print utils.jsonify(expected, format=True)
assert utils.jsonify(expected, format=True) == utils.jsonify(actual, format=True)
#def test_playbook_undefined_vars1_ignore(self):
# self._test_playbook_undefined_vars('playbook1.yml', False)
#def test_playbook_undefined_vars1_fail(self):
# self._test_playbook_undefined_vars('playbook1.yml', True)
#def test_playbook_undefined_vars2_ignore(self):
# self._test_playbook_undefined_vars('playbook2.yml', False)
#def test_playbook_undefined_vars2_fail(self):
# self._test_playbook_undefined_vars('playbook2.yml', True)
def test_yaml_hosts_list(self):
# Make sure playbooks support hosts: [host1, host2]
# TODO: Actually run the play on more than one host
test_callbacks = TestCallbacks()
playbook = ansible.playbook.PlayBook(
playbook=os.path.join(self.test_dir, 'hosts_list.yml'),
host_list='test/ansible_hosts',
stats=ans_callbacks.AggregateStats(),
callbacks=test_callbacks,
runner_callbacks=test_callbacks
)
play = ansible.playbook.Play(playbook, playbook.playbook[0], os.getcwd())
assert play.hosts == ';'.join(('host1', 'host2', 'host3'))
def test_playbook_hash_replace(self):
# save default hash behavior so we can restore it in the end of the test
saved_hash_behavior = C.DEFAULT_HASH_BEHAVIOUR
C.DEFAULT_HASH_BEHAVIOUR = "replace"
test_callbacks = TestCallbacks()
playbook = ansible.playbook.PlayBook(
playbook=os.path.join(self.test_dir, 'test_hash_behavior', 'playbook.yml'),
host_list='test/ansible_hosts',
stats=ans_callbacks.AggregateStats(),
callbacks=test_callbacks,
runner_callbacks=test_callbacks
)
playbook.run()
filename = '/tmp/ansible_test_messages.out'
expected_lines = [
"goodbye: Goodbye World!"
]
self._compare_file_output(filename, expected_lines)
filename = '/tmp/ansible_test_role_messages.out'
expected_lines = [
"inside_a_role: Indeed!"
]
self._compare_file_output(filename, expected_lines)
# restore default hash behavior
C.DEFAULT_HASH_BEHAVIOUR = saved_hash_behavior
def test_playbook_hash_merge(self):
# save default hash behavior so we can restore it in the end of the test
saved_hash_behavior = C.DEFAULT_HASH_BEHAVIOUR
C.DEFAULT_HASH_BEHAVIOUR = "merge"
test_callbacks = TestCallbacks()
playbook = ansible.playbook.PlayBook(
playbook=os.path.join(self.test_dir, 'test_hash_behavior', 'playbook.yml'),
host_list='test/ansible_hosts',
stats=ans_callbacks.AggregateStats(),
callbacks=test_callbacks,
runner_callbacks=test_callbacks
)
playbook.run()
filename = '/tmp/ansible_test_messages.out'
expected_lines = [
"goodbye: Goodbye World!",
"hello: Hello World!"
]
self._compare_file_output(filename, expected_lines)
filename = '/tmp/ansible_test_role_messages.out'
expected_lines = [
"goodbye: Goodbye World!",
"hello: Hello World!",
"inside_a_role: Indeed!"
]
self._compare_file_output(filename, expected_lines)
# restore default hash behavior
C.DEFAULT_HASH_BEHAVIOUR = saved_hash_behavior
def test_playbook_ignore_errors(self):
test_callbacks = TestCallbacks()
playbook = ansible.playbook.PlayBook(
playbook=os.path.join(self.test_dir, 'playbook-ignore-errors.yml'),
host_list='test/ansible_hosts',
stats=ans_callbacks.AggregateStats(),
callbacks=test_callbacks,
runner_callbacks=test_callbacks
)
actual = playbook.run()
# if different, this will output to screen
print "**ACTUAL**"
print utils.jsonify(actual, format=True)
expected = {
"localhost": {
"changed": 1,
"failures": 1,
"ok": 1,
"skipped": 0,
"unreachable": 0
}
}
print "**EXPECTED**"
print utils.jsonify(expected, format=True)
assert utils.jsonify(expected, format=True) == utils.jsonify(actual,format=True)
def test_playbook_changed_when(self):
test_callbacks = TestCallbacks()
playbook = ansible.playbook.PlayBook(
playbook=os.path.join(self.test_dir, 'playbook-changed_when.yml'),
host_list='test/ansible_hosts',
stats=ans_callbacks.AggregateStats(),
callbacks=test_callbacks,
runner_callbacks=test_callbacks
)
actual = playbook.run()
# if different, this will output to screen
print "**ACTUAL**"
print utils.jsonify(actual, format=True)
expected = {
"localhost": {
"changed": 3,
"failures": 0,
"ok": 6,
"skipped": 0,
"unreachable": 0
}
}
print "**EXPECTED**"
print utils.jsonify(expected, format=True)
assert utils.jsonify(expected, format=True) == utils.jsonify(actual,format=True)
def test_playbook_failed_when(self):
test_callbacks = TestCallbacks()
playbook = ansible.playbook.PlayBook(
playbook=os.path.join(self.test_dir, 'playbook-failed_when.yml'),
host_list='test/ansible_hosts',
stats=ans_callbacks.AggregateStats(),
callbacks=test_callbacks,
runner_callbacks=test_callbacks
)
actual = playbook.run()
# if different, this will output to screen
print "**ACTUAL**"
print utils.jsonify(actual, format=True)
expected = {
"localhost": {
"changed": 2,
"failures": 1,
"ok": 2,
"skipped": 0,
"unreachable": 0
}
}
print "**EXPECTED**"
print utils.jsonify(expected, format=True)
assert utils.jsonify(expected, format=True) == utils.jsonify(actual,format=True)
def test_playbook_always_run(self):
test_callbacks = TestCallbacks()
playbook = ansible.playbook.PlayBook(
playbook=os.path.join(self.test_dir, 'playbook-always-run.yml'),
host_list='test/ansible_hosts',
stats=ans_callbacks.AggregateStats(),
callbacks=test_callbacks,
runner_callbacks=test_callbacks,
check=True
)
actual = playbook.run()
# if different, this will output to screen
print "**ACTUAL**"
print utils.jsonify(actual, format=True)
expected = {
"localhost": {
"changed": 4,
"failures": 0,
"ok": 4,
"skipped": 8,
"unreachable": 0
}
}
print "**EXPECTED**"
print utils.jsonify(expected, format=True)
assert utils.jsonify(expected, format=True) == utils.jsonify(actual,format=True)
def _compare_file_output(self, filename, expected_lines):
actual_lines = []
with open(filename) as f:
actual_lines = [l.strip() for l in f.readlines()]
actual_lines = sorted(actual_lines)
print "**ACTUAL**"
print actual_lines
print "**EXPECTED**"
print expected_lines
assert actual_lines == expected_lines
| gpl-3.0 | -3,527,965,832,506,031,000 | 32.879902 | 90 | 0.59987 | false |
sadig/DC2 | components/dc2-admincenter/dc2/admincenter/globals/webconst.py | 1 | 1413 | # -*- coding: utf-8 -*-
#
# (DC)² - DataCenter Deployment Control
# Copyright (C) 2010, 2011, 2012, 2013, 2014 Stephan Adig <[email protected]>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
CSS_FILES = [
'/static/css/bootstrap/bootstrap.css',
'/static/css/admincenter/base.css',
'/static/css/bootstrap/bootstrap-responsive.css',
'/static/css/datatable/jquery.dataTables.css',
]
JS_LIBS = [
'/static/js/jquery/jquery.js',
'/static/js/bootstrap/bootstrap.js',
'/static/js/datatable/jquery.dataTables.js',
'/static/js/datatable/DT_bootstrap.js',
'/static/js/jplugins/jquery.rightClick.js',
'/static/js/jplugins/jquery.formparams.js',
'/static/js/admincenter/utils.js',
'/static/js/admincenter/main.js',
]
| gpl-2.0 | 7,963,510,408,333,447,000 | 37.162162 | 76 | 0.72238 | false |
ufal/neuralmonkey | neuralmonkey/tests/test_encoders_init.py | 1 | 5390 | #!/usr/bin/env python3.5
"""Test init methods of encoders."""
import unittest
import copy
from typing import Dict, List, Any, Iterable
from neuralmonkey.encoders.recurrent import SentenceEncoder
from neuralmonkey.encoders.sentence_cnn_encoder import SentenceCNNEncoder
from neuralmonkey.model.sequence import EmbeddedSequence
from neuralmonkey.vocabulary import Vocabulary
VOCABULARY = Vocabulary(["ich", "bin", "der", "walrus"])
INPUT_SEQUENCE = EmbeddedSequence("seq", VOCABULARY, "marmelade", 300)
SENTENCE_ENCODER_GOOD = {
"name": ["encoder"],
"vocabulary": [VOCABULARY],
"data_id": ["marmelade"],
"embedding_size": [20],
"rnn_size": [30],
"max_input_len": [None, 15],
"dropout_keep_prob": [0.5, 1.],
}
SENTENCE_ENCODER_BAD = {
"nonexistent": ["ahoj"],
"name": [None, 1],
"vocabulary": [0, None, "ahoj", dict()],
"data_id": [0, None, VOCABULARY],
"embedding_size": [-1, 0, "ahoj", 3.14, VOCABULARY, SentenceEncoder, None],
"rnn_size": [-1, 0, "ahoj", 3.14, VOCABULARY, SentenceEncoder, None],
"max_input_len": [-1, 0, "ahoj", 3.14, VOCABULARY, SentenceEncoder],
"dropout_keep_prob": [0.0, 0, -1.0, 2.0, "ahoj", VOCABULARY, None],
}
TRANSFORMER_ENCODER_GOOD = {
"name": ["transformer_encoder"],
"input_sequence": [INPUT_SEQUENCE],
"ff_hidden_size": [10],
"depth": [6],
"n_heads": [3],
"dropout_keep_prob": [0.5],
}
TRANSFORMER_ENCODER_BAD = {
"nonexistent": ["ahoj"],
"name": [None, 1],
"input_sequence": [0, None, VOCABULARY],
"ff_hidden_size": [-1, 0, "ahoj", 3.14, VOCABULARY, SentenceEncoder, None],
"depth": [-1, "ahoj", 3.14, SentenceEncoder, None],
"n_heads": [-1, "ahoj", 3.14, SentenceEncoder, None],
"dropout_keep_prob": [0.0, 0, -1.0, 2.0, "ahoj", VOCABULARY, None]
}
SENTENCE_CNN_ENCODER_GOOD = {
"name": ["cnn_encoder"],
"input_sequence": [INPUT_SEQUENCE],
"segment_size": [10],
"highway_depth": [11],
"rnn_size": [30],
"filters": [[(2, 10)], [(3, 20), (4, 10)]],
"dropout_keep_prob": [0.5, 1.],
"use_noisy_activations": [False]
}
SENTENCE_CNN_ENCODER_BAD = {
"nonexistent": ["ahoj"],
"name": [None, 1],
"input_sequence": [0, None, VOCABULARY],
"segment_size": [-1, 0, "ahoj", 3.14, VOCABULARY, None],
"highway_depth": [-1, "ahoj", 3.14, SentenceEncoder, None],
"rnn_size": [-1, 0, "ahoj", 3.14, VOCABULARY, SentenceEncoder, None],
"filters": ["ahoj", [], [(0, 0)], [(1, 2, 3)], [VOCABULARY, None],
[(None, None)]],
"dropout_keep_prob": [0.0, 0, -1.0, 2.0, "ahoj", VOCABULARY, None],
"use_noisy_activations": [None, SentenceEncoder]
}
def traverse_combinations(
params: Dict[str, List[Any]],
partial_params: Dict[str, Any]) -> Iterable[Dict[str, Any]]:
params = copy.copy(params)
if params:
pivot_key, values = params.popitem()
for val in values:
partial_params[pivot_key] = val
yield from traverse_combinations(params, partial_params)
else:
yield partial_params
class TestEncodersInit(unittest.TestCase):
def _run_constructors(self, encoder_type, good_params, bad_params):
good_index = 0
good_options = {par: value[good_index]
for par, value in good_params.items()}
name_suffix = 0
for key, bad_values in bad_params.items():
for value in bad_values:
options = copy.copy(good_options)
options[key] = value
if key != "name":
options["name"] = "{}_{}".format(options["name"],
name_suffix)
name_suffix += 1
try:
with self.assertRaises(Exception):
encoder_type(**options)
except Exception:
print("FAILED '{}', configuration: {}".format(
encoder_type, str(options)))
raise
for good_param_combo in traverse_combinations(good_params, {}):
try:
options = copy.copy(good_param_combo)
options["name"] = "{}_{}".format(options["name"], name_suffix)
name_suffix += 1
encoder_type(**options)
except Exception:
print("Good param combo FAILED: {}, configuration: {}".format(
encoder_type, str(options)))
raise
def test_sentence_encoder(self):
with self.assertRaises(Exception):
# pylint: disable=no-value-for-parameter
# on purpose, should fail
SentenceEncoder()
# pylint: enable=no-value-for-parameter
self._run_constructors(SentenceEncoder,
SENTENCE_ENCODER_GOOD,
SENTENCE_ENCODER_BAD)
def test_sentence_cnn_encoder(self):
with self.assertRaises(Exception):
# pylint: disable=no-value-for-parameter
# on purpose, should fail
SentenceCNNEncoder()
# pylint: enable=no-value-for-parameter
self._run_constructors(SentenceCNNEncoder,
SENTENCE_CNN_ENCODER_GOOD,
SENTENCE_CNN_ENCODER_BAD)
if __name__ == "__main__":
unittest.main()
| bsd-3-clause | -7,045,819,577,563,692,000 | 32.6875 | 79 | 0.556215 | false |
HazyResearch/snorkel | snorkel/contrib/parser/spacy.py | 1 | 3108 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import *
from collections import defaultdict
from snorkel.models import construct_stable_id
from snorkel.parser.parser import Parser, ParserConnection
class SpaCy(Parser):
'''
spaCy
https://spacy.io/
Minimal (buggy) implementation to show how alternate parsers can
be added to Snorkel.
Models for each target language needs to be downloaded using the
following command:
python -m spacy download en
'''
def __init__(self,lang='en'):
try:
import spacy
except:
raise Exception("spacy not installed. Use `pip install spacy`.")
super(SpaCy, self).__init__(name="spaCy")
self.model = spacy.load('en')
def connect(self):
return ParserConnection(self)
def parse(self, document, text):
'''
Transform spaCy output to match CoreNLP's default format
:param document:
:param text:
:return:
'''
text = text.encode('utf-8', 'error')
text = text.decode('utf-8')
doc = self.model(text)
assert doc.is_parsed
position = 0
for sent in doc.sents:
parts = defaultdict(list)
dep_order, dep_par, dep_lab = [], [], []
for token in sent:
parts['words'].append(str(token))
parts['lemmas'].append(token.lemma_)
parts['pos_tags'].append(token.tag_)
parts['ner_tags'].append(token.ent_type_)
parts['char_offsets'].append(token.idx)
dep_par.append(token.head)
dep_lab.append(token.dep_)
#dep_order.append(deps['dependent'])
# Add null entity array (matching null for CoreNLP)
parts['entity_cids'] = ['O' for _ in parts['words']]
parts['entity_types'] = ['O' for _ in parts['words']]
# Link the sentence to its parent document object
parts['document'] = document
parts['text'] = sent.text
# make char_offsets relative to start of sentence
abs_sent_offset = parts['char_offsets'][0]
parts['char_offsets'] = [
p - abs_sent_offset for p in parts['char_offsets']
]
parts['dep_parents'] = dep_par #sort_X_on_Y(dep_par, dep_order)
parts['dep_labels'] = dep_lab #sort_X_on_Y(dep_lab, dep_order)
parts['position'] = position
# Add full dependency tree parse to document meta
# TODO
# Assign the stable id as document's stable id plus absolute
# character offset
abs_sent_offset_end = (abs_sent_offset + parts['char_offsets'][-1] +
len(parts['words'][-1]))
parts['stable_id'] = construct_stable_id(
document, 'sentence', abs_sent_offset, abs_sent_offset_end
)
position += 1
yield parts
| apache-2.0 | -7,778,223,192,332,906,000 | 32.782609 | 80 | 0.563707 | false |
webgeodatavore/pyqgis-samples | core/qgis-sample-QgsVectorLayer.py | 1 | 1183 | # coding: utf-8
import urlparse
from qgis.core import QgsVectorLayer
from qgis.utils import iface
layer = iface.activeLayer()
if layer.providerType() == 'virtual':
url_params = layer.source()[1:] # To remove the ? at the beginning
params_dict = urlparse.parse_qsl(url_params)
params = dict(params_dict)
print(params)
print(params['query'])
SELECT "resultats-definitifs-consultation-26-juin-2016".*,
"communes44".*, ST_PointOnSurface("communes44".geometry) as geom
FROM "communes44", "resultats-definitifs-consultation-26-juin-2016"
WHERE "communes44"."INSEE_COM" = "resultats-definitifs-consultation-26-juin-2016"."code_insee"
CASE
WHEN ("oui_p" > 50 AND "oui_p" <= 57.7) THEN color_rgb(247, 251, 255)
WHEN ("oui_p" > 57.7 AND "oui_p" <= 63.6) THEN color_rgb(114, 178, 215)
WHEN ("oui_p" > 63.6 AND "oui_p" <= 85.1) THEN color_rgb(8, 48, 107)
WHEN ("non_p" > 50 AND "non_p" <= 51.8) THEN color_rgb(254, 240, 217)
WHEN ("non_p" > 51.8 AND "non_p" <= 57.1) THEN color_rgb(252, 141, 89)
WHEN ("non_p" > 57.1 AND "non_p" <= 73.6) THEN color_rgb(179, 0, 0)
END
from qgis.gui import QgsTextAnnotationItem;iface.mapCanvas().findChildren(QgsTextAnnotationItem) | gpl-2.0 | -3,936,529,623,969,429,500 | 41.285714 | 96 | 0.688081 | false |
TimothyZhang/structer | structerui/hotkey.py | 1 | 3301 | # Copyright 2014 Timothy Zhang([email protected]).
#
# This file is part of Structer.
#
# Structer is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Structer is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Structer. If not, see <http://www.gnu.org/licenses/>.
import string
import wx
'''
Hotkey settings
'''
_keycode_map = {wx.WXK_NUMPAD0: '0',
wx.WXK_NUMPAD1: '1',
wx.WXK_NUMPAD2: '2',
wx.WXK_NUMPAD3: '3',
wx.WXK_NUMPAD4: '4',
wx.WXK_NUMPAD5: '5',
wx.WXK_NUMPAD6: '6',
wx.WXK_NUMPAD7: '7',
wx.WXK_NUMPAD8: '8',
wx.WXK_NUMPAD9: '9',
wx.WXK_INSERT: 'Insert',
wx.WXK_DELETE: 'Delete',
wx.WXK_UP: 'Up',
wx.WXK_DOWN: 'Down',
wx.WXK_LEFT: 'Left',
wx.WXK_RIGHT: 'Right',
wx.WXK_ESCAPE: 'Esc',
wx.WXK_TAB: 'Tab',
wx.WXK_BACK: 'Back',
wx.WXK_RETURN: 'Enter',
wx.WXK_NUMPAD_ENTER: 'Enter',
wx.WXK_F2: 'F2'}
def get_key_name(keycode):
ch = _keycode_map.get(keycode)
if ch is not None:
return ch
if 0 <= keycode < 256 and chr(keycode) in string.printable:
return chr(keycode).upper()
return 'Unknown'
def build_keystr(evt):
s = []
if evt.ControlDown():
s.append('Ctrl')
if evt.AltDown():
s.append('Alt')
if evt.ShiftDown():
s.append("Shift")
key = evt.GetKeyCode()
s.append(get_key_name(key))
return '+'.join(s)
def check(keydef, keystr):
# print 'check hotkey', keydef, keystr
if type(keydef) is tuple:
return keystr in keydef
return keydef == keystr
CLOSE_DIALOG = 'Esc', 'Ctrl+Q'
CLOSE_EDITOR_FRAME = 'Esc', 'Ctrl+Q'
SAVE = 'Ctrl+S'
SHOW_EXPLORER = '`'
# grid
COPY = 'Ctrl+C'
COPY_TEXT = 'Ctrl+Alt+C'
PASTE = 'Ctrl+V'
PASTE_TEXT = 'Ctrl+Alt+V'
UNDO = 'Ctrl+Z'
REDO = 'Ctrl+Y'
FIND = 'Ctrl+F'
SELECT_ALL = 'Ctrl+A'
# list
LIST_INSERT_HEAD = 'Ctrl+,'
LIST_APPEND_TAIL = 'Ctrl+.'
LIST_INSERT = 'Ctrl+I', 'Ctrl+Insert'
LIST_DELETE = 'Ctrl+D', 'Ctrl+Delete'
LIST_SELECT_ROWS = 'Ctrl+R'
LIST_SELECT_COLS = 'Ctrl+L'
LIST_CUT = 'Ctrl+X'
LIST_INSERT_COPIED = 'Ctrl+Alt+I'
LIST_APPEND_COPIED_HEAD = 'Ctrl+Alt+,'
LIST_APPEND_COPIED_TAIL = 'Ctrl+Alt+.'
FLATTEN_EDITOR = 'Ctrl+K'
# cell
INSERT_FRONT = ','
INSERT_TAIL = '.'
CELL_BEGIN_EDIT = ' '
INCREASE = 'I'
DECREASE = 'D'
GOTO_REF = 'Ctrl+G'
RESET = 'Delete' # set to default
# explorer
EXPLORER_OPEN = 'Enter', 'Alt+Down'
EXPLORER_UP_LEVEL = 'Alt+Up'
EXPLORER_HISTORY_PREV = "Alt+Left" # , "Back" will interrupt text editing
EXPLORER_HISTORY_NEXT = "Alt+Right"
EXPLORER_RENAME = 'F2'
| gpl-3.0 | -6,358,792,627,314,684,000 | 24.789063 | 74 | 0.574977 | false |
TightSquad/HABIP | gui/axlisten_logger.py | 1 | 5466 | #!/usr/bin/env python
import axreader
import common
import groundComms
import time
import logging
if __name__ == "__main__":
LOG_IN_PATH = "/home/spex/axlisten.log"
LOG_DATA_PATH = "/home/spex/habip_data.log"
LOG_CMDACK_PATH = "/home/spex/habip_ack.log"
AX_INTERFACES = ["sm0"]
AX_SOURCES = ["W2RIT-11"]
AX_DESTINATIONS = ["W2RIT"]
# Axlisten parser
reader = axreader.axreader(filePath=LOG_IN_PATH, interfaces=AX_INTERFACES, sources=AX_SOURCES, destinations=AX_DESTINATIONS)
# HABIP Data logger
dataLogger = logging.getLogger("datalog")
dataLoggerHandler = logging.FileHandler(LOG_DATA_PATH)
dataLoggerFormatter = logging.Formatter("%(asctime)s,%(message)s",datefmt="%H:%M:%S")
dataLoggerHandler.setFormatter(dataLoggerFormatter)
dataLogger.addHandler(dataLoggerHandler)
dataLogger.setLevel(logging.INFO)
# HABIP Command Ack logger
ackLogger = logging.getLogger("acklog")
ackLoggerHandler = logging.FileHandler(LOG_CMDACK_PATH)
ackLoggerFormatter = logging.Formatter("%(asctime)s,%(message)s",datefmt="%H:%M:%S")
ackLoggerHandler.setFormatter(ackLoggerFormatter)
ackLogger.addHandler(ackLoggerHandler)
ackLogger.setLevel(logging.INFO)
# Then use logger.info(stringToAddHere) to add a string to a log file
# Main loop
while True:
# Get newly received packets from axreader
packets = reader.getNewData()
# Flag if received data
receivedData = False
# List of values of HABIP sensors for telemetry data. 64 sensor data points
habipSensorData = ["NULL"] * 64
# HABIP Sensors for telemetry data. Values are the index of the list the values are in
habipSensors = {
"B0:TB0": 0,
"B0:TB1": 1,
"B0:TE0": 2,
"B0:TE1": 3,
"B0:TD0": 4,
"B0:P0": 5,
"B0:P1": 6,
"B0:H": 7,
"B0:V": 8,
"B0:C": 9,
"B1:TB0": 10,
"B1:TB1": 11,
"B1:TE0": 12,
"B1:TE1": 13,
"B1:TD0": 14,
"B1:P0": 15,
"B1:P1": 16,
"B1:H": 17,
"B1:V": 18,
"B1:C": 19,
"B2:TB0": 20,
"B2:TB1": 21,
"B2:TE0": 22,
"B2:TE1": 23,
"B2:TD0": 24,
"B2:P0": 25,
"B2:P1": 26,
"B2:H": 27,
"B2:V": 28,
"B2:C": 29,
"B3:TB0": 30,
"B3:TB1": 31,
"B3:TE0": 32,
"B3:TE1": 33,
"B3:TD0": 34,
"B3:P0": 35,
"B3:P1": 36,
"B3:H": 37,
"B3:V": 38,
"B3:C": 39,
"B4:TB0": 40,
"B4:P0": 41,
"B4:PB": 42,
"B4:V": 43,
"B4:C": 44,
"B4:XGY": 45,
"B4:XAC": 46,
"B4:YGY": 47,
"B4:YAC": 48,
"B4:ZGY": 49,
"B4:ZAC": 50,
"B4:MS": 51,
"B4:MC": 52,
"B4:MV": 53,
"B4:MD": 54,
"B4:ME": 55,
"B5:TB0": 56,
"B5:TD0": 57,
"B5:P0": 58,
"B5:LAT": 59,
"B5:LON": 60,
"B5:TM": 61,
"B5:SPD": 62,
"B5:ALT": 63,
"B5:TBL": 64,
"B5:PBL": 65,
}
# Loop through newly received packets
for packet in packets:
# Get the data/command portion (relevant portion) of the received packet
packetDataString = packet.data
#print packet.data
#print "--------"
# If received command acknowledgement(s)
if "ACK" in packetDataString:
# Split the acknowledgements if received multiple
ackList = packetDataString.split(";")
# Add the acknowledgements to the command acknowledgement log file
for ack in ackList:
ackLogger.info(ack)
# Else received telemetry data
else:
# Received data in the latest packets
receivedData = True
# Split the data from the various censors (semicolon separated)
dataList = packetDataString.split(";")
# Loop through the sensor data received
for data in dataList:
# Data goes boardNum:sensorAbbrev:value
dataParts = data.split(":")
# Grab the boardNum:sensorAbbrev portion of the data
sensor = dataParts[0] + ":" + dataParts[1]
# Grab the value for the sensor
value = dataParts[2]
# Add the value to the sensor data list if it is a valid sensor
if sensor in habipSensors.keys():
habipSensorIndex = habipSensors[sensor]
habipSensorData[habipSensorIndex] = value
# Log data for all sensors if received any
if receivedData:
# Loop through the sensor data list and add the sensor values to the data log file, comma-separated
dataToStoreString = ""
for sensorData in habipSensorData:
dataToStoreString += sensorData + ","
dataLogger.info(dataToStoreString)
# Sleep a bit
common.msleep(1000)
| gpl-3.0 | 447,616,459,182,597,100 | 31.343195 | 128 | 0.502561 | false |
huggingface/transformers | tests/test_modeling_ibert.py | 1 | 30207 | # coding=utf-8
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from .test_configuration_common import ConfigTester
from .test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from torch import nn
from transformers import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertConfig,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
)
from transformers.models.ibert.modeling_ibert import (
IBertEmbeddings,
IntGELU,
IntLayerNorm,
IntSoftmax,
QuantAct,
QuantEmbedding,
QuantLinear,
create_position_ids_from_input_ids,
)
class IBertModelTester:
def __init__(
self,
parent,
):
self.parent = parent
self.batch_size = 13
self.seq_length = 7
self.is_training = True
self.use_input_mask = True
self.use_token_type_ids = True
self.use_labels = True
self.vocab_size = 99
self.hidden_size = 32
self.num_hidden_layers = 5
self.num_attention_heads = 4
self.intermediate_size = 37
self.hidden_act = "gelu"
self.hidden_dropout_prob = 0.1
self.attention_probs_dropout_prob = 0.1
self.max_position_embeddings = 512
self.type_vocab_size = 16
self.type_sequence_label_size = 2
self.initializer_range = 0.02
self.num_labels = 3
self.num_choices = 4
self.scope = None
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = IBertConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
initializer_range=self.initializer_range,
quant_mode=True,
)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def create_and_check_model(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = IBertModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
result = model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def create_and_check_for_masked_lm(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = IBertForMaskedLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_for_token_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = IBertForTokenClassification(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_for_multiple_choice(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_choices = self.num_choices
model = IBertForMultipleChoice(config=config)
model.to(torch_device)
model.eval()
multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
result = model(
multiple_choice_inputs_ids,
attention_mask=multiple_choice_input_mask,
token_type_ids=multiple_choice_token_type_ids,
labels=choice_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def create_and_check_for_question_answering(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = IBertForQuestionAnswering(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
start_positions=sequence_labels,
end_positions=sequence_labels,
)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class IBertModelTest(ModelTesterMixin, unittest.TestCase):
test_pruning = False
test_torchscript = False
test_head_masking = False
test_resize_embeddings = False
all_model_classes = (
(
IBertForMaskedLM,
IBertModel,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertForMultipleChoice,
IBertForQuestionAnswering,
)
if is_torch_available()
else ()
)
def setUp(self):
self.model_tester = IBertModelTester(self)
self.config_tester = ConfigTester(self, config_class=IBertConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_model_various_embeddings(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
# I-BERT only supports absolute embedding
for type in ["absolute"]:
config_and_inputs[0].position_embedding_type = type
self.model_tester.create_and_check_model(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*config_and_inputs)
def test_for_multiple_choice(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in IBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = IBertModel.from_pretrained(model_name)
self.assertIsNotNone(model)
def test_create_position_ids_respects_padding_index(self):
"""Ensure that the default position ids only assign a sequential . This is a regression
test for https://github.com/huggingface/transformers/issues/1761
The position ids should be masked with the embedding object's padding index. Therefore, the
first available non-padding position index is IBertEmbeddings.padding_idx + 1
"""
config = self.model_tester.prepare_config_and_inputs()[0]
model = IBertEmbeddings(config=config)
input_ids = torch.as_tensor([[12, 31, 13, model.padding_idx]])
expected_positions = torch.as_tensor(
[[0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx]]
)
position_ids = create_position_ids_from_input_ids(input_ids, model.padding_idx)
self.assertEqual(position_ids.shape, expected_positions.shape)
self.assertTrue(torch.all(torch.eq(position_ids, expected_positions)))
def test_create_position_ids_from_inputs_embeds(self):
"""Ensure that the default position ids only assign a sequential . This is a regression
test for https://github.com/huggingface/transformers/issues/1761
The position ids should be masked with the embedding object's padding index. Therefore, the
first available non-padding position index is IBertEmbeddings.padding_idx + 1
"""
config = self.model_tester.prepare_config_and_inputs()[0]
embeddings = IBertEmbeddings(config=config)
inputs_embeds = torch.empty(2, 4, 30)
expected_single_positions = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
expected_positions = torch.as_tensor([expected_single_positions, expected_single_positions])
position_ids = embeddings.create_position_ids_from_inputs_embeds(inputs_embeds)
self.assertEqual(position_ids.shape, expected_positions.shape)
self.assertTrue(torch.all(torch.eq(position_ids, expected_positions)))
# Override
def test_model_common_attributes(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), QuantEmbedding)
model.set_input_embeddings(nn.Embedding(10, 10))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, nn.Linear))
# Override
def test_feed_forward_chunking(self):
pass # I-BERT does not support chunking
# Override
def test_inputs_embeds(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
if not self.is_encoder_decoder:
input_ids = inputs["input_ids"]
del inputs["input_ids"]
else:
encoder_input_ids = inputs["input_ids"]
decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids)
del inputs["input_ids"]
inputs.pop("decoder_input_ids", None)
wte = model.get_input_embeddings()
if not self.is_encoder_decoder:
embed, embed_scaling_factor = wte(input_ids)
inputs["inputs_embeds"] = embed
else:
inputs["inputs_embeds"] = wte(encoder_input_ids)
inputs["decoder_inputs_embeds"] = wte(decoder_input_ids)
with torch.no_grad():
model(**inputs)[0]
@require_torch
class IBertModelIntegrationTest(unittest.TestCase):
def test_quant_embedding(self):
weight_bit = 8
embedding = QuantEmbedding(2, 4, quant_mode=True, weight_bit=weight_bit)
embedding_weight = torch.tensor([[-1.0, -2.0, -3.0, -4.0], [5.0, 6.0, 7.0, 8.0]])
embedding.weight = nn.Parameter(embedding_weight)
expected_scaling_factor = embedding_weight.abs().max() / (2 ** (weight_bit - 1) - 1)
x, x_scaling_factor = embedding(torch.tensor(0))
y, y_scaling_factor = embedding(torch.tensor(1))
# scaling factor should follow the symmetric quantization rule
self.assertTrue(torch.allclose(x_scaling_factor, expected_scaling_factor, atol=1e-4))
self.assertTrue(torch.allclose(x_scaling_factor, expected_scaling_factor, atol=1e-4))
self.assertTrue(torch.allclose(y_scaling_factor, expected_scaling_factor, atol=1e-4))
# quantization error should not exceed the scaling factor
self.assertTrue(torch.allclose(x, embedding_weight[0], atol=expected_scaling_factor))
self.assertTrue(torch.allclose(y, embedding_weight[1], atol=expected_scaling_factor))
def test_quant_act(self):
def _test_range():
act = QuantAct(activation_bit, act_range_momentum, quant_mode=True)
# First pass
x = torch.tensor([[-1.0, -2.0, -3.0, -4.0], [5.0, 6.0, 7.0, 8.0]])
x_scaling_factor = torch.tensor(1.0)
y, y_scaling_factor = act(x, x_scaling_factor)
y_int = y / y_scaling_factor
# After the first pass, x_min and x_max should be initialized with x.min() and x.max()
expected_x_min, expected_x_max = x.min(), x.max()
self.assertTrue(torch.allclose(act.x_min, expected_x_min, atol=1e-4))
self.assertTrue(torch.allclose(act.x_max, expected_x_max, atol=1e-4))
# scaling factor should follow the symmetric quantization rule
expected_range = torch.max(expected_x_min.abs(), expected_x_max.abs())
expected_scaling_factor = expected_range / (2 ** (activation_bit - 1) - 1)
self.assertTrue(torch.allclose(y_scaling_factor, expected_scaling_factor, atol=1e-4))
# quantization error should not exceed the scaling factor
self.assertTrue(torch.allclose(x, y, atol=expected_scaling_factor))
# output should be integer
self.assertTrue(torch.allclose(y_int, y_int.round(), atol=1e-4))
# Second Pass
x = torch.tensor([[-1.0, -2.0, -3.0, -4.0], [5.0, 6.0, 7.0, 8.0]]) * 2
x_scaling_factor = torch.tensor(1.0)
y, y_scaling_factor = act(x, x_scaling_factor)
y_int = y / y_scaling_factor
# From the second pass, x_min and x_max should be updated with moving average
expected_x_min = expected_x_min * act_range_momentum + x.min() * (1 - act_range_momentum)
expected_x_max = expected_x_max * act_range_momentum + x.max() * (1 - act_range_momentum)
self.assertTrue(torch.allclose(act.x_min, expected_x_min, atol=1e-4))
self.assertTrue(torch.allclose(act.x_max, expected_x_max, atol=1e-4))
# scaling factor should follow the symmetric quantization rule
expected_range = torch.max(expected_x_min.abs(), expected_x_max.abs())
expected_scaling_factor = expected_range / (2 ** (activation_bit - 1) - 1)
self.assertTrue(torch.allclose(y_scaling_factor, expected_scaling_factor, atol=1e-4))
# quantization error should not exceed the scaling factor
x = x.clamp(min=-expected_range, max=expected_range)
self.assertTrue(torch.allclose(x, y, atol=expected_scaling_factor))
# output should be integer
self.assertTrue(torch.allclose(y_int, y_int.round(), atol=1e-4))
# Third pass, with eval()
act.eval()
x = torch.tensor([[-1.0, -2.0, -3.0, -4.0], [5.0, 6.0, 7.0, 8.0]]) * 3
# In eval mode, min/max and scaling factor must be fixed
self.assertTrue(torch.allclose(act.x_min, expected_x_min, atol=1e-4))
self.assertTrue(torch.allclose(act.x_max, expected_x_max, atol=1e-4))
self.assertTrue(torch.allclose(y_scaling_factor, expected_scaling_factor, atol=1e-4))
def _test_identity():
# test if identity and identity_scaling_factor are given
# should add the input values
act = QuantAct(activation_bit, act_range_momentum, quant_mode=True)
x = torch.tensor([[-1.0, -2.0, -3.0, -4.0], [5.0, 6.0, 7.0, 8.0]])
y = torch.tensor([[6.0, -7.0, 1.0, -2.0], [3.0, -4.0, -8.0, 5.0]])
x_scaling_factor = torch.tensor(1.0)
y_scaling_factor = torch.tensor(0.5)
z, z_scaling_factor = act(x, x_scaling_factor, y, y_scaling_factor)
z_int = z / z_scaling_factor
self.assertTrue(torch.allclose(x + y, z, atol=0.1))
self.assertTrue(torch.allclose(z_int, z_int.round(), atol=1e-4))
activation_bit = 8
act_range_momentum = 0.95
_test_range()
_test_identity()
def test_quant_linear(self):
def _test(per_channel):
linear_q = QuantLinear(2, 4, quant_mode=True, per_channel=per_channel, weight_bit=weight_bit)
linear_dq = QuantLinear(2, 4, quant_mode=False, per_channel=per_channel, weight_bit=weight_bit)
linear_weight = torch.tensor([[-1.0, 2.0, 3.0, -4.0], [5.0, -6.0, -7.0, 8.0]]).T
linear_q.weight = nn.Parameter(linear_weight)
linear_dq.weight = nn.Parameter(linear_weight)
q, q_scaling_factor = linear_q(x, x_scaling_factor)
q_int = q / q_scaling_factor
dq, dq_scaling_factor = linear_dq(x, x_scaling_factor)
if per_channel:
q_max = linear_weight.abs().max(dim=1).values
else:
q_max = linear_weight.abs().max()
expected_scaling_factor = q_max / (2 ** (weight_bit - 1) - 1)
# scaling factor should follow the symmetric quantization rule
self.assertTrue(torch.allclose(linear_q.fc_scaling_factor, expected_scaling_factor, atol=1e-4))
# output of the normal linear layer and the quantized linear layer should be similar
self.assertTrue(torch.allclose(q, dq, atol=0.5))
# output of the quantized linear layer should be integer
self.assertTrue(torch.allclose(q_int, q_int.round(), atol=1e-4))
weight_bit = 8
x = torch.tensor([[2.0, -5.0], [-3.0, 4.0]])
x_scaling_factor = torch.tensor([1.0])
_test(True)
_test(False)
def test_int_gelu(self):
gelu_q = IntGELU(quant_mode=True)
gelu_dq = nn.GELU()
x_int = torch.range(-10000, 10000, 1)
x_scaling_factor = torch.tensor(0.001)
x = x_int * x_scaling_factor
q, q_scaling_factor = gelu_q(x, x_scaling_factor)
q_int = q / q_scaling_factor
dq = gelu_dq(x)
# output of the normal GELU and the quantized GELU should be similar
self.assertTrue(torch.allclose(q, dq, atol=0.5))
# output of the quantized GELU layer should be integer
self.assertTrue(torch.allclose(q_int, q_int.round(), atol=1e-4))
def test_force_dequant_gelu(self):
x_int = torch.range(-10000, 10000, 1)
x_scaling_factor = torch.tensor(0.001)
x = x_int * x_scaling_factor
gelu_dq = IntGELU(quant_mode=False)
gelu_fdqs_dict = {
True: [
IntGELU(quant_mode=True, force_dequant="nonlinear"),
IntGELU(quant_mode=True, force_dequant="gelu"),
],
False: [
IntGELU(quant_mode=True, force_dequant="none"),
IntGELU(quant_mode=True, force_dequant="softmax"),
IntGELU(quant_mode=True, force_dequant="layernorm"),
],
}
dq, dq_scaling_factor = gelu_dq(x, x_scaling_factor)
for label, gelu_fdqs in gelu_fdqs_dict.items():
for gelu_fdq in gelu_fdqs:
q, q_scaling_factor = gelu_fdq(x, x_scaling_factor)
if label:
self.assertTrue(torch.allclose(q, dq, atol=1e-4))
else:
self.assertFalse(torch.allclose(q, dq, atol=1e-4))
def test_int_softmax(self):
output_bit = 8
softmax_q = IntSoftmax(output_bit, quant_mode=True)
softmax_dq = nn.Softmax()
# x_int = torch.range(-10000, 10000, 1)
def _test(array):
x_int = torch.tensor(array)
x_scaling_factor = torch.tensor(0.1)
x = x_int * x_scaling_factor
q, q_scaling_factor = softmax_q(x, x_scaling_factor)
q_int = q / q_scaling_factor
dq = softmax_dq(x)
# output of the normal Softmax and the quantized Softmax should be similar
self.assertTrue(torch.allclose(q, dq, atol=0.5))
# output of the quantized GELU layer should be integer
self.assertTrue(torch.allclose(q_int, q_int.round(), atol=1e-4))
# Output of the quantize Softmax should not exceed the output_bit
self.assertTrue(q.abs().max() < 2 ** output_bit)
array = [[i + j for j in range(10)] for i in range(-10, 10)]
_test(array)
array = [[i + j for j in range(50)] for i in range(-10, 10)]
_test(array)
array = [[i + 100 * j for j in range(2)] for i in range(-10, 10)]
_test(array)
def test_force_dequant_softmax(self):
output_bit = 8
array = [[i + j for j in range(10)] for i in range(-10, 10)]
x_int = torch.tensor(array)
x_scaling_factor = torch.tensor(0.1)
x = x_int * x_scaling_factor
softmax_dq = IntSoftmax(output_bit, quant_mode=False)
softmax_fdqs_dict = {
True: [
IntSoftmax(output_bit, quant_mode=True, force_dequant="nonlinear"),
IntSoftmax(output_bit, quant_mode=True, force_dequant="softmax"),
],
False: [
IntSoftmax(output_bit, quant_mode=True, force_dequant="none"),
IntSoftmax(output_bit, quant_mode=True, force_dequant="gelu"),
IntSoftmax(output_bit, quant_mode=True, force_dequant="layernorm"),
],
}
dq, dq_scaling_factor = softmax_dq(x, x_scaling_factor)
for label, softmax_fdqs in softmax_fdqs_dict.items():
for softmax_fdq in softmax_fdqs:
q, q_scaling_factor = softmax_fdq(x, x_scaling_factor)
if label:
self.assertTrue(torch.allclose(q, dq, atol=1e-4))
else:
self.assertFalse(torch.allclose(q, dq, atol=1e-4))
def test_int_layernorm(self):
output_bit = 8
# some random matrix
array = [[[i * j * j + j for j in range(5, 15)]] for i in range(-10, 10)]
x_int = torch.tensor(array)
x_scaling_factor = torch.tensor(0.1)
x = x_int * x_scaling_factor
ln_q = IntLayerNorm(x.shape[1:], 1e-5, quant_mode=True, output_bit=output_bit)
ln_dq = nn.LayerNorm(x.shape[1:], 1e-5)
ln_q.weight = nn.Parameter(torch.ones(x.shape[1:]))
ln_q.bias = nn.Parameter(torch.ones(x.shape[1:]))
ln_dq.weight = nn.Parameter(torch.ones(x.shape[1:]))
ln_dq.bias = nn.Parameter(torch.ones(x.shape[1:]))
q, q_scaling_factor = ln_q(x, x_scaling_factor)
q_int = q / q_scaling_factor
dq = ln_dq(x)
# output of the normal LN and the quantized LN should be similar
self.assertTrue(torch.allclose(q, dq, atol=0.5))
# output of the quantized GELU layer should be integer
self.assertTrue(torch.allclose(q_int, q_int.round(), atol=1e-4))
def test_force_dequant_layernorm(self):
output_bit = 8
array = [[[i * j * j + j for j in range(5, 15)]] for i in range(-10, 10)]
x_int = torch.tensor(array)
x_scaling_factor = torch.tensor(0.1)
x = x_int * x_scaling_factor
ln_dq = IntLayerNorm(x.shape[1:], 1e-5, quant_mode=False, output_bit=output_bit)
ln_fdqs_dict = {
True: [
IntLayerNorm(x.shape[1:], 1e-5, quant_mode=True, output_bit=output_bit, force_dequant="nonlinear"),
IntLayerNorm(x.shape[1:], 1e-5, quant_mode=True, output_bit=output_bit, force_dequant="layernorm"),
],
False: [
IntLayerNorm(x.shape[1:], 1e-5, quant_mode=True, output_bit=output_bit, force_dequant="none"),
IntLayerNorm(x.shape[1:], 1e-5, quant_mode=True, output_bit=output_bit, force_dequant="gelu"),
IntLayerNorm(x.shape[1:], 1e-5, quant_mode=True, output_bit=output_bit, force_dequant="softmax"),
],
}
ln_dq.weight = nn.Parameter(torch.ones(x.shape[1:]))
ln_dq.bias = nn.Parameter(torch.ones(x.shape[1:]))
dq, dq_scaling_factor = ln_dq(x, x_scaling_factor)
for label, ln_fdqs in ln_fdqs_dict.items():
for ln_fdq in ln_fdqs:
ln_fdq.weight = nn.Parameter(torch.ones(x.shape[1:]))
ln_fdq.bias = nn.Parameter(torch.ones(x.shape[1:]))
q, q_scaling_factor = ln_fdq(x, x_scaling_factor)
if label:
self.assertTrue(torch.allclose(q, dq, atol=1e-4))
else:
self.assertFalse(torch.allclose(q, dq, atol=1e-4))
def quantize(self, model):
# Helper function that quantizes the given model
# Recursively convert all the `quant_mode` attributes as `True`
if hasattr(model, "quant_mode"):
model.quant_mode = True
elif type(model) == nn.Sequential:
for n, m in model.named_children():
self.quantize(m)
elif type(model) == nn.ModuleList:
for n in model:
self.quantize(n)
else:
for attr in dir(model):
mod = getattr(model, attr)
if isinstance(mod, nn.Module) and mod != model:
self.quantize(mod)
@slow
def test_inference_masked_lm(self):
# I-BERT should be "equivalent" to RoBERTa if not quantized
# Test coped from `test_modeling_roberta.py`
model = IBertForMaskedLM.from_pretrained("kssteven/ibert-roberta-base")
input_ids = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]])
output = model(input_ids)[0]
expected_shape = torch.Size((1, 11, 50265))
self.assertEqual(output.shape, expected_shape)
expected_slice = torch.tensor(
[[[33.8802, -4.3103, 22.7761], [4.6539, -2.8098, 13.6253], [1.8228, -3.6898, 8.8600]]]
)
self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
# I-BERT should be "similar" to RoBERTa if quantized
self.quantize(model)
output = model(input_ids)[0]
self.assertEqual(output.shape, expected_shape)
self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=0.1))
@slow
def test_inference_classification_head(self):
# I-BERT should be "equivalent" to RoBERTa if not quantized
# Test coped from `test_modeling_roberta.py`
model = IBertForSequenceClassification.from_pretrained("kssteven/ibert-roberta-large-mnli")
input_ids = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]])
output = model(input_ids)[0]
expected_shape = torch.Size((1, 3))
self.assertEqual(output.shape, expected_shape)
expected_tensor = torch.tensor([[-0.9469, 0.3913, 0.5118]])
self.assertTrue(torch.allclose(output, expected_tensor, atol=1e-4))
# I-BERT should be "similar" to RoBERTa if quantized
self.quantize(model)
output = model(input_ids)[0]
self.assertEqual(output.shape, expected_shape)
self.assertTrue(torch.allclose(output, expected_tensor, atol=0.1))
| apache-2.0 | 4,290,449,463,490,228,700 | 42.400862 | 117 | 0.612639 | false |
pypingou/pagure | pagure/api/issue.py | 1 | 47403 | # -*- coding: utf-8 -*-
"""
(c) 2015-2017 - Copyright Red Hat Inc
Authors:
Pierre-Yves Chibon <[email protected]>
"""
from __future__ import print_function, unicode_literals, absolute_import
import flask
import datetime
import logging
import arrow
from sqlalchemy.exc import SQLAlchemyError
import pagure.exceptions
import pagure.lib.query
from pagure.api import (
API,
api_method,
api_login_required,
api_login_optional,
APIERROR,
get_request_data,
get_page,
get_per_page,
)
from pagure.config import config as pagure_config
from pagure.utils import (
api_authenticated,
is_repo_committer,
urlpattern,
is_true,
)
from pagure.api.utils import (
_get_repo,
_check_token,
_get_issue,
_check_issue_tracker,
_check_ticket_access,
_check_private_issue_access,
)
_log = logging.getLogger(__name__)
def _check_link_custom_field(field, links):
"""Check if the value provided in the link custom field
is a link.
:param field : The issue custom field key object.
:param links : Value of the custom field.
:raises pagure.exceptions.APIERROR when invalid.
"""
if field.key_type == "link":
links = links.split(",")
for link in links:
link = link.replace(" ", "")
if not urlpattern.match(link):
raise pagure.exceptions.APIError(
400, error_code=APIERROR.EINVALIDISSUEFIELD_LINK
)
@API.route("/<repo>/new_issue", methods=["POST"])
@API.route("/<namespace>/<repo>/new_issue", methods=["POST"])
@API.route("/fork/<username>/<repo>/new_issue", methods=["POST"])
@API.route("/fork/<username>/<namespace>/<repo>/new_issue", methods=["POST"])
@api_login_required(acls=["issue_create"])
@api_method
def api_new_issue(repo, username=None, namespace=None):
"""
Create a new issue
------------------
Open a new issue on a project.
::
POST /api/0/<repo>/new_issue
POST /api/0/<namespace>/<repo>/new_issue
::
POST /api/0/fork/<username>/<repo>/new_issue
POST /api/0/fork/<username>/<namespace>/<repo>/new_issue
Input
^^^^^
+-------------------+--------+-------------+---------------------------+
| Key | Type | Optionality | Description |
+===================+========+=============+===========================+
| ``title`` | string | Mandatory | The title of the issue |
+-------------------+--------+-------------+---------------------------+
| ``issue_content`` | string | Mandatory | | The description of the |
| | | | issue |
+-------------------+--------+-------------+---------------------------+
| ``private`` | boolean| Optional | | Include this key if |
| | | | you want a private issue|
| | | | to be created |
+-------------------+--------+-------------+---------------------------+
| ``priority`` | string | Optional | | The priority to set to |
| | | | this ticket from the |
| | | | list of priorities set |
| | | | in the project |
+-------------------+--------+-------------+---------------------------+
| ``milestone`` | string | Optional | | The milestone to assign |
| | | | to this ticket from the |
| | | | list of milestones set |
| | | | in the project |
+-------------------+--------+-------------+---------------------------+
| ``tag`` | string | Optional | | Comma separated list of |
| | | | tags to link to this |
| | | | ticket from the list of |
| | | | tags in the project |
+-------------------+--------+-------------+---------------------------+
| ``assignee`` | string | Optional | | The username of the user|
| | | | to assign this ticket to|
+-------------------+--------+-------------+---------------------------+
Sample response
^^^^^^^^^^^^^^^
::
{
"issue": {
"assignee": null,
"blocks": [],
"close_status": null,
"closed_at": null,
"closed_by": null,
"comments": [],
"content": "This issue needs attention",
"custom_fields": [],
"date_created": "1479458613",
"depends": [],
"id": 1,
"milestone": null,
"priority": null,
"private": false,
"status": "Open",
"tags": [],
"title": "test issue",
"user": {
"fullname": "PY C",
"name": "pingou"
}
},
"message": "Issue created"
}
"""
output = {}
repo = _get_repo(repo, username, namespace)
_check_issue_tracker(repo)
_check_token(repo, project_token=False)
user_obj = pagure.lib.query.get_user(
flask.g.session, flask.g.fas_user.username
)
if not user_obj:
raise pagure.exceptions.APIError(404, error_code=APIERROR.ENOUSER)
form = pagure.forms.IssueFormSimplied(
priorities=repo.priorities,
milestones=repo.milestones,
csrf_enabled=False,
)
if form.validate_on_submit():
title = form.title.data
content = form.issue_content.data
milestone = form.milestone.data or None
private = is_true(form.private.data)
priority = form.priority.data or None
assignee = get_request_data().get("assignee", "").strip() or None
tags = [
tag.strip()
for tag in get_request_data().get("tag", "").split(",")
if tag.strip()
]
try:
issue = pagure.lib.query.new_issue(
flask.g.session,
repo=repo,
title=title,
content=content,
private=private,
assignee=assignee,
milestone=milestone,
priority=priority,
tags=tags,
user=flask.g.fas_user.username,
)
flask.g.session.flush()
# If there is a file attached, attach it.
filestream = flask.request.files.get("filestream")
if filestream and "<!!image>" in issue.content:
new_filename = pagure.lib.query.add_attachment(
repo=repo,
issue=issue,
attachmentfolder=pagure_config["ATTACHMENTS_FOLDER"],
user=user_obj,
filename=filestream.filename,
filestream=filestream.stream,
)
# Replace the <!!image> tag in the comment with the link
# to the actual image
filelocation = flask.url_for(
"ui_ns.view_issue_raw_file",
repo=repo.name,
username=username,
filename="files/%s" % new_filename,
)
new_filename = new_filename.split("-", 1)[1]
url = "[](%s)" % (
new_filename,
filelocation,
filelocation,
)
issue.content = issue.content.replace("<!!image>", url)
flask.g.session.add(issue)
flask.g.session.flush()
flask.g.session.commit()
output["message"] = "Issue created"
output["issue"] = issue.to_json(public=True)
except SQLAlchemyError as err: # pragma: no cover
flask.g.session.rollback()
_log.exception(err)
raise pagure.exceptions.APIError(400, error_code=APIERROR.EDBERROR)
else:
raise pagure.exceptions.APIError(
400, error_code=APIERROR.EINVALIDREQ, errors=form.errors
)
jsonout = flask.jsonify(output)
return jsonout
@API.route("/<namespace>/<repo>/issues")
@API.route("/fork/<username>/<repo>/issues")
@API.route("/<repo>/issues")
@API.route("/fork/<username>/<namespace>/<repo>/issues")
@api_login_optional()
@api_method
def api_view_issues(repo, username=None, namespace=None):
"""
List project's issues
---------------------
List issues of a project.
::
GET /api/0/<repo>/issues
GET /api/0/<namespace>/<repo>/issues
::
GET /api/0/fork/<username>/<repo>/issues
GET /api/0/fork/<username>/<namespace>/<repo>/issues
Parameters
^^^^^^^^^^
+---------------+---------+--------------+---------------------------+
| Key | Type | Optionality | Description |
+===============+=========+==============+===========================+
| ``status`` | string | Optional | | Filters the status of |
| | | | issues. Fetches all the |
| | | | issues if status is |
| | | | ``all``. Default: |
| | | | ``Open`` |
+---------------+---------+--------------+---------------------------+
| ``tags`` | string | Optional | | A list of tags you |
| | | | wish to filter. If |
| | | | you want to filter |
| | | | for issues not having |
| | | | a tag, add an |
| | | | exclamation mark in |
| | | | front of it |
+---------------+---------+--------------+---------------------------+
| ``assignee`` | string | Optional | | Filter the issues |
| | | | by assignee |
+---------------+---------+--------------+---------------------------+
| ``author`` | string | Optional | | Filter the issues |
| | | | by creator |
+---------------+---------+--------------+---------------------------+
| ``milestones``| list of | Optional | | Filter the issues |
| | strings | | by milestone |
+---------------+---------+--------------+---------------------------+
| ``priority`` | string | Optional | | Filter the issues |
| | | | by priority |
+---------------+---------+--------------+---------------------------+
| ``no_stones`` | boolean | Optional | | If true returns only the|
| | | | issues having no |
| | | | milestone, if false |
| | | | returns only the issues |
| | | | having a milestone |
+---------------+---------+--------------+---------------------------+
| ``since`` | string | Optional | | Filter the issues |
| | | | updated after this date.|
| | | | The date can either be |
| | | | provided as an unix date|
| | | | or in the format Y-M-D |
+---------------+---------+--------------+---------------------------+
| ``order`` | string | Optional | | Set the ordering of the |
| | | | issues. This can be |
| | | | ``asc`` or ``desc``. |
| | | | Default: ``desc`` |
+---------------+---------+--------------+---------------------------+
| ``page`` | int | Optional | | Specifies which |
| | | | page to return |
| | | | (defaults to: 1) |
+---------------+----------+-------------+---------------------------+
| ``per_page`` | int | Optional | | The number of projects |
| | | | to return per page. |
| | | | The maximum is 100. |
+---------------+----------+-------------+---------------------------+
Sample response
^^^^^^^^^^^^^^^
::
{
"args": {
"assignee": null,
"author": null,
'milestones': [],
'no_stones': null,
'order': null,
'priority': null,
"since": null,
"status": "Closed",
"tags": [
"0.1"
]
},
"total_issues": 1,
"issues": [
{
"assignee": null,
"blocks": ["1"],
"close_status": null,
"closed_at": null,
"closed_by": null,
"comments": [],
"content": "asd",
"custom_fields": [],
"date_created": "1427442217",
"depends": [],
"id": 4,
"last_updated": "1533815358",
"milestone": null,
"priority": null,
"private": false,
"status": "Fixed",
"tags": [
"0.1"
],
"title": "bug",
"user": {
"fullname": "PY.C",
"name": "pingou"
}
}
],
'pagination': {
'first': 'http://localhost/api/0/test/issues?per_page=20&page=1',
'last': 'http://localhost/api/0/test/issues?per_page=20&page=1',
'next': null,
'page': 1,
'pages': 1,
'per_page': 20,
'prev': null
},
}
"""
repo = _get_repo(repo, username, namespace)
_check_issue_tracker(repo)
_check_token(repo)
assignee = flask.request.args.get("assignee", None)
author = flask.request.args.get("author", None)
milestone = flask.request.args.getlist("milestones", None)
no_stones = flask.request.args.get("no_stones", None)
if no_stones is not None:
no_stones = is_true(no_stones)
priority = flask.request.args.get("priority", None)
since = flask.request.args.get("since", None)
order = flask.request.args.get("order", None)
status = flask.request.args.get("status", None)
tags = flask.request.args.getlist("tags")
tags = [tag.strip() for tag in tags if tag.strip()]
search_id = flask.request.args.get("query_id", None)
priority_key = None
if priority:
found = False
if priority in repo.priorities:
found = True
priority_key = int(priority)
else:
for key, val in repo.priorities.items():
if val.lower() == priority.lower():
priority_key = key
found = True
break
if not found:
raise pagure.exceptions.APIError(
400, error_code=APIERROR.EINVALIDPRIORITY
)
# Hide private tickets
private = False
# If user is authenticated, show him/her his/her private tickets
if api_authenticated():
private = flask.g.fas_user.username
# If user is repo committer, show all tickets included the private ones
if is_repo_committer(repo):
private = None
params = {
"session": flask.g.session,
"repo": repo,
"tags": tags,
"assignee": assignee,
"author": author,
"private": private,
"milestones": milestone,
"priority": priority_key,
"order": order,
"no_milestones": no_stones,
"search_id": search_id,
}
if status is not None:
if status.lower() == "all":
params.update({"status": None})
elif status.lower() == "closed":
params.update({"closed": True})
else:
params.update({"status": status})
else:
params.update({"status": "Open"})
updated_after = None
if since:
# Validate and convert the time
if since.isdigit():
# We assume its a timestamp, so convert it to datetime
try:
updated_after = arrow.get(int(since)).datetime
except ValueError:
raise pagure.exceptions.APIError(
400, error_code=APIERROR.ETIMESTAMP
)
else:
# We assume datetime format, so validate it
try:
updated_after = datetime.datetime.strptime(since, "%Y-%m-%d")
except ValueError:
raise pagure.exceptions.APIError(
400, error_code=APIERROR.EDATETIME
)
params.update({"updated_after": updated_after})
page = get_page()
per_page = get_per_page()
params["count"] = True
issue_cnt = pagure.lib.query.search_issues(**params)
pagination_metadata = pagure.lib.query.get_pagination_metadata(
flask.request, page, per_page, issue_cnt
)
query_start = (page - 1) * per_page
query_limit = per_page
params["count"] = False
params["limit"] = query_limit
params["offset"] = query_start
issues = pagure.lib.query.search_issues(**params)
jsonout = flask.jsonify(
{
"total_issues": len(issues),
"issues": [issue.to_json(public=True) for issue in issues],
"args": {
"assignee": assignee,
"author": author,
"milestones": milestone,
"no_stones": no_stones,
"order": order,
"priority": priority,
"since": since,
"status": status,
"tags": tags,
},
"pagination": pagination_metadata,
}
)
return jsonout
@API.route("/<repo>/issue/<issueid>")
@API.route("/<namespace>/<repo>/issue/<issueid>")
@API.route("/fork/<username>/<repo>/issue/<issueid>")
@API.route("/fork/<username>/<namespace>/<repo>/issue/<issueid>")
@api_login_optional()
@api_method
def api_view_issue(repo, issueid, username=None, namespace=None):
"""
Issue information
-----------------
Retrieve information of a specific issue.
::
GET /api/0/<repo>/issue/<issue id>
GET /api/0/<namespace>/<repo>/issue/<issue id>
::
GET /api/0/fork/<username>/<repo>/issue/<issue id>
GET /api/0/fork/<username>/<namespace>/<repo>/issue/<issue id>
The identifier provided can be either the unique identifier or the
regular identifier used in the UI (for example ``24`` in
``/forks/user/test/issue/24``)
Sample response
^^^^^^^^^^^^^^^
::
{
"assignee": null,
"blocks": [],
"comments": [],
"content": "This issue needs attention",
"date_created": "1431414800",
"depends": ["4"],
"id": 1,
"private": false,
"status": "Open",
"tags": [],
"title": "test issue",
"user": {
"fullname": "PY C",
"name": "pingou"
}
}
"""
comments = is_true(flask.request.args.get("comments", True))
repo = _get_repo(repo, username, namespace)
_check_issue_tracker(repo)
_check_token(repo)
issue_id = issue_uid = None
try:
issue_id = int(issueid)
except (ValueError, TypeError):
issue_uid = issueid
issue = _get_issue(repo, issue_id, issueuid=issue_uid)
_check_private_issue_access(issue)
jsonout = flask.jsonify(issue.to_json(public=True, with_comments=comments))
return jsonout
@API.route("/<repo>/issue/<issueid>/comment/<int:commentid>")
@API.route("/<namespace>/<repo>/issue/<issueid>/comment/<int:commentid>")
@API.route("/fork/<username>/<repo>/issue/<issueid>/comment/<int:commentid>")
@API.route(
"/fork/<username>/<namespace>/<repo>/issue/<issueid>/"
"comment/<int:commentid>"
)
@api_login_optional()
@api_method
def api_view_issue_comment(
repo, issueid, commentid, username=None, namespace=None
):
"""
Comment of an issue
--------------------
Retrieve a specific comment of an issue.
::
GET /api/0/<repo>/issue/<issue id>/comment/<comment id>
GET /api/0/<namespace>/<repo>/issue/<issue id>/comment/<comment id>
::
GET /api/0/fork/<username>/<repo>/issue/<issue id>/comment/<comment id>
GET /api/0/fork/<username>/<namespace>/<repo>/issue/<issue id>/comment/<comment id>
The identifier provided can be either the unique identifier or the
regular identifier used in the UI (for example ``24`` in
``/forks/user/test/issue/24``)
Sample response
^^^^^^^^^^^^^^^
::
{
"avatar_url": "https://seccdn.libravatar.org/avatar/...",
"comment": "9",
"comment_date": "2015-07-01 15:08",
"date_created": "1435756127",
"id": 464,
"parent": null,
"user": {
"fullname": "P.-Y.C.",
"name": "pingou"
}
}
""" # noqa: E501
repo = _get_repo(repo, username, namespace)
_check_issue_tracker(repo)
_check_token(repo)
issue_id = issue_uid = None
try:
issue_id = int(issueid)
except (ValueError, TypeError):
issue_uid = issueid
issue = _get_issue(repo, issue_id, issueuid=issue_uid)
_check_private_issue_access(issue)
comment = pagure.lib.query.get_issue_comment(
flask.g.session, issue.uid, commentid
)
if not comment:
raise pagure.exceptions.APIError(404, error_code=APIERROR.ENOCOMMENT)
output = comment.to_json(public=True)
output["avatar_url"] = pagure.lib.query.avatar_url_from_email(
comment.user.default_email, size=16
)
output["comment_date"] = comment.date_created.strftime("%Y-%m-%d %H:%M:%S")
jsonout = flask.jsonify(output)
return jsonout
@API.route("/<repo>/issue/<int:issueid>/status", methods=["POST"])
@API.route("/<namespace>/<repo>/issue/<int:issueid>/status", methods=["POST"])
@API.route(
"/fork/<username>/<repo>/issue/<int:issueid>/status", methods=["POST"]
)
@API.route(
"/fork/<username>/<namespace>/<repo>/issue/<int:issueid>/status",
methods=["POST"],
)
@api_login_required(acls=["issue_change_status", "issue_update"])
@api_method
def api_change_status_issue(repo, issueid, username=None, namespace=None):
"""
Change issue status
-------------------
Change the status of an issue.
::
POST /api/0/<repo>/issue/<issue id>/status
POST /api/0/<namespace>/<repo>/issue/<issue id>/status
::
POST /api/0/fork/<username>/<repo>/issue/<issue id>/status
POST /api/0/fork/<username>/<namespace>/<repo>/issue/<issue id>/status
Input
^^^^^
+------------------+---------+--------------+------------------------+
| Key | Type | Optionality | Description |
+==================+=========+==============+========================+
| ``close_status`` | string | Optional | The close status of |
| | | | the issue |
+------------------+---------+--------------+------------------------+
| ``status`` | string | Mandatory | The new status of the |
| | | | issue, can be 'Open' or|
| | | | 'Closed' |
+------------------+---------+--------------+------------------------+
Sample response
^^^^^^^^^^^^^^^
::
{
"message": "Successfully edited issue #1"
}
"""
output = {}
repo = _get_repo(repo, username, namespace)
_check_issue_tracker(repo)
_check_token(repo, project_token=False)
issue = _get_issue(repo, issueid)
open_access = repo.settings.get("open_metadata_access_to_all", False)
_check_ticket_access(issue, assignee=True, open_access=open_access)
status = pagure.lib.query.get_issue_statuses(flask.g.session)
form = pagure.forms.StatusForm(
status=status, close_status=repo.close_status, csrf_enabled=False
)
close_status = None
if form.close_status.raw_data:
close_status = form.close_status.data
new_status = form.status.data.strip()
if new_status in repo.close_status and not close_status:
close_status = new_status
new_status = "Closed"
form.status.data = new_status
if form.validate_on_submit():
try:
# Update status
message = pagure.lib.query.edit_issue(
flask.g.session,
issue=issue,
status=new_status,
close_status=close_status,
user=flask.g.fas_user.username,
)
flask.g.session.commit()
if message:
output["message"] = message
else:
output["message"] = "No changes"
if message:
pagure.lib.query.add_metadata_update_notif(
session=flask.g.session,
obj=issue,
messages=message,
user=flask.g.fas_user.username,
)
except pagure.exceptions.PagureException as err:
raise pagure.exceptions.APIError(
400, error_code=APIERROR.ENOCODE, error=str(err)
)
except SQLAlchemyError: # pragma: no cover
flask.g.session.rollback()
raise pagure.exceptions.APIError(400, error_code=APIERROR.EDBERROR)
else:
raise pagure.exceptions.APIError(
400, error_code=APIERROR.EINVALIDREQ, errors=form.errors
)
jsonout = flask.jsonify(output)
return jsonout
@API.route("/<repo>/issue/<int:issueid>/milestone", methods=["POST"])
@API.route(
"/<namespace>/<repo>/issue/<int:issueid>/milestone", methods=["POST"]
)
@API.route(
"/fork/<username>/<repo>/issue/<int:issueid>/milestone", methods=["POST"]
)
@API.route(
"/fork/<username>/<namespace>/<repo>/issue/<int:issueid>/milestone",
methods=["POST"],
)
@api_login_required(acls=["issue_update_milestone", "issue_update"])
@api_method
def api_change_milestone_issue(repo, issueid, username=None, namespace=None):
"""
Change issue milestone
----------------------
Change the milestone of an issue.
::
POST /api/0/<repo>/issue/<issue id>/milestone
POST /api/0/<namespace>/<repo>/issue/<issue id>/milestone
::
POST /api/0/fork/<username>/<repo>/issue/<issue id>/milestone
POST /api/0/fork/<username>/<namespace>/<repo>/issue/<issue id>/milestone
Input
^^^^^
+------------------+---------+--------------+------------------------+
| Key | Type | Optionality | Description |
+==================+=========+==============+========================+
| ``milestone`` | string | Optional | The new milestone of |
| | | | the issue, can be any |
| | | | of defined milestones |
| | | | or empty to unset the |
| | | | milestone |
+------------------+---------+--------------+------------------------+
Sample response
^^^^^^^^^^^^^^^
::
{
"message": "Successfully edited issue #1"
}
""" # noqa
output = {}
repo = _get_repo(repo, username, namespace)
_check_issue_tracker(repo)
_check_token(repo)
issue = _get_issue(repo, issueid)
open_access = repo.settings.get("open_metadata_access_to_all", False)
_check_ticket_access(issue, open_access=open_access)
form = pagure.forms.MilestoneForm(
milestones=repo.milestones.keys(), csrf_enabled=False
)
if form.validate_on_submit():
new_milestone = form.milestone.data or None
try:
# Update status
message = pagure.lib.query.edit_issue(
flask.g.session,
issue=issue,
milestone=new_milestone,
user=flask.g.fas_user.username,
)
flask.g.session.commit()
if message:
output["message"] = message
else:
output["message"] = "No changes"
if message:
pagure.lib.query.add_metadata_update_notif(
session=flask.g.session,
obj=issue,
messages=message,
user=flask.g.fas_user.username,
)
except pagure.exceptions.PagureException as err:
raise pagure.exceptions.APIError(
400, error_code=APIERROR.ENOCODE, error=str(err)
)
except SQLAlchemyError: # pragma: no cover
flask.g.session.rollback()
raise pagure.exceptions.APIError(400, error_code=APIERROR.EDBERROR)
else:
raise pagure.exceptions.APIError(
400, error_code=APIERROR.EINVALIDREQ, errors=form.errors
)
jsonout = flask.jsonify(output)
return jsonout
@API.route("/<repo>/issue/<int:issueid>/comment", methods=["POST"])
@API.route("/<namespace>/<repo>/issue/<int:issueid>/comment", methods=["POST"])
@API.route(
"/fork/<username>/<repo>/issue/<int:issueid>/comment", methods=["POST"]
)
@API.route(
"/fork/<username>/<namespace>/<repo>/issue/<int:issueid>/comment",
methods=["POST"],
)
@api_login_required(acls=["issue_comment", "issue_update"])
@api_method
def api_comment_issue(repo, issueid, username=None, namespace=None):
"""
Comment to an issue
-------------------
Add a comment to an issue.
::
POST /api/0/<repo>/issue/<issue id>/comment
POST /api/0/<namespace>/<repo>/issue/<issue id>/comment
::
POST /api/0/fork/<username>/<repo>/issue/<issue id>/comment
POST /api/0/fork/<username>/<namespace>/<repo>/issue/<issue id>/comment
Input
^^^^^
+--------------+----------+---------------+---------------------------+
| Key | Type | Optionality | Description |
+==============+==========+===============+===========================+
| ``comment`` | string | Mandatory | | The comment to add to |
| | | | the issue |
+--------------+----------+---------------+---------------------------+
Sample response
^^^^^^^^^^^^^^^
::
{
"message": "Comment added"
}
"""
output = {}
repo = _get_repo(repo, username, namespace)
_check_issue_tracker(repo)
_check_token(repo, project_token=False)
issue = _get_issue(repo, issueid)
_check_private_issue_access(issue)
form = pagure.forms.CommentForm(csrf_enabled=False)
if form.validate_on_submit():
comment = form.comment.data
try:
# New comment
message = pagure.lib.query.add_issue_comment(
flask.g.session,
issue=issue,
comment=comment,
user=flask.g.fas_user.username,
)
flask.g.session.commit()
output["message"] = message
except SQLAlchemyError as err: # pragma: no cover
flask.g.session.rollback()
_log.exception(err)
raise pagure.exceptions.APIError(400, error_code=APIERROR.EDBERROR)
else:
raise pagure.exceptions.APIError(
400, error_code=APIERROR.EINVALIDREQ, errors=form.errors
)
output["avatar_url"] = pagure.lib.query.avatar_url_from_email(
flask.g.fas_user.default_email, size=30
)
output["user"] = flask.g.fas_user.username
jsonout = flask.jsonify(output)
return jsonout
@API.route("/<repo>/issue/<int:issueid>/assign", methods=["POST"])
@API.route("/<namespace>/<repo>/issue/<int:issueid>/assign", methods=["POST"])
@API.route(
"/fork/<username>/<repo>/issue/<int:issueid>/assign", methods=["POST"]
)
@API.route(
"/fork/<username>/<namespace>/<repo>/issue/<int:issueid>/assign",
methods=["POST"],
)
@api_login_required(acls=["issue_assign", "issue_update"])
@api_method
def api_assign_issue(repo, issueid, username=None, namespace=None):
"""
Assign an issue
---------------
Assign an issue to someone.
::
POST /api/0/<repo>/issue/<issue id>/assign
POST /api/0/<namespace>/<repo>/issue/<issue id>/assign
::
POST /api/0/fork/<username>/<repo>/issue/<issue id>/assign
POST /api/0/fork/<username>/<namespace>/<repo>/issue/<issue id>/assign
Input
^^^^^
+--------------+----------+---------------+---------------------------+
| Key | Type | Optionality | Description |
+==============+==========+===============+===========================+
| ``assignee`` | string | Mandatory | | The username of the user|
| | | | to assign the issue to. |
+--------------+----------+---------------+---------------------------+
Sample response
^^^^^^^^^^^^^^^
::
{
"message": "Issue assigned"
}
"""
output = {}
repo = _get_repo(repo, username, namespace)
_check_issue_tracker(repo)
_check_token(repo)
issue = _get_issue(repo, issueid)
open_access = repo.settings.get("open_metadata_access_to_all", False)
_check_ticket_access(issue, assignee=True, open_access=open_access)
form = pagure.forms.AssignIssueForm(csrf_enabled=False)
if form.validate_on_submit():
assignee = form.assignee.data or None
# Create our metadata comment object
try:
# New comment
message = pagure.lib.query.add_issue_assignee(
flask.g.session,
issue=issue,
assignee=assignee,
user=flask.g.fas_user.username,
)
flask.g.session.commit()
if message:
pagure.lib.query.add_metadata_update_notif(
session=flask.g.session,
obj=issue,
messages=message,
user=flask.g.fas_user.username,
)
output["message"] = message
else:
output["message"] = "Nothing to change"
except pagure.exceptions.PagureException as err: # pragma: no cover
raise pagure.exceptions.APIError(
400, error_code=APIERROR.ENOCODE, error=str(err)
)
except SQLAlchemyError as err: # pragma: no cover
flask.g.session.rollback()
_log.exception(err)
raise pagure.exceptions.APIError(400, error_code=APIERROR.EDBERROR)
jsonout = flask.jsonify(output)
return jsonout
@API.route("/<repo>/issue/<int:issueid>/subscribe", methods=["POST"])
@API.route(
"/<namespace>/<repo>/issue/<int:issueid>/subscribe", methods=["POST"]
)
@API.route(
"/fork/<username>/<repo>/issue/<int:issueid>/subscribe", methods=["POST"]
)
@API.route(
"/fork/<username>/<namespace>/<repo>/issue/<int:issueid>/subscribe",
methods=["POST"],
)
@api_login_required(acls=["issue_subscribe"])
@api_method
def api_subscribe_issue(repo, issueid, username=None, namespace=None):
"""
Subscribe to an issue
---------------------
Allows someone to subscribe to or unsubscribe from the notifications
related to an issue.
::
POST /api/0/<repo>/issue/<issue id>/subscribe
POST /api/0/<namespace>/<repo>/issue/<issue id>/subscribe
::
POST /api/0/fork/<username>/<repo>/issue/<issue id>/subscribe
POST /api/0/fork/<username>/<namespace>/<repo>/issue/<issue id>/subscribe
Input
^^^^^
+--------------+----------+---------------+---------------------------+
| Key | Type | Optionality | Description |
+==============+==========+===============+===========================+
| ``status`` | boolean | Mandatory | The intended subscription |
| | | | status. ``true`` for |
| | | | subscribing, ``false`` |
| | | | for unsubscribing. |
+--------------+----------+---------------+---------------------------+
Sample response
^^^^^^^^^^^^^^^
::
{
"message": "User subscribed",
"avatar_url": "https://image.png",
"user": "pingou"
}
""" # noqa
output = {}
repo = _get_repo(repo, username, namespace)
_check_issue_tracker(repo)
_check_token(repo)
issue = _get_issue(repo, issueid)
_check_private_issue_access(issue)
form = pagure.forms.SubscribtionForm(csrf_enabled=False)
if form.validate_on_submit():
status = is_true(form.status.data)
try:
# Toggle subscribtion
message = pagure.lib.query.set_watch_obj(
flask.g.session,
user=flask.g.fas_user.username,
obj=issue,
watch_status=status,
)
flask.g.session.commit()
output["message"] = message
user_obj = pagure.lib.query.get_user(
flask.g.session, flask.g.fas_user.username
)
output["avatar_url"] = pagure.lib.query.avatar_url_from_email(
user_obj.default_email, size=30
)
output["user"] = flask.g.fas_user.username
except SQLAlchemyError as err: # pragma: no cover
flask.g.session.rollback()
_log.exception(err)
raise pagure.exceptions.APIError(400, error_code=APIERROR.EDBERROR)
jsonout = flask.jsonify(output)
return jsonout
@API.route("/<repo>/issue/<int:issueid>/custom/<field>", methods=["POST"])
@API.route(
"/<namespace>/<repo>/issue/<int:issueid>/custom/<field>", methods=["POST"]
)
@API.route(
"/fork/<username>/<repo>/issue/<int:issueid>/custom/<field>",
methods=["POST"],
)
@API.route(
"/fork/<username>/<namespace>/<repo>/issue/<int:issueid>/custom/<field>",
methods=["POST"],
)
@api_login_required(acls=["issue_update_custom_fields", "issue_update"])
@api_method
def api_update_custom_field(
repo, issueid, field, username=None, namespace=None
):
"""
Update custom field
-------------------
Update or reset the content of a custom field associated to an issue.
::
POST /api/0/<repo>/issue/<issue id>/custom/<field>
POST /api/0/<namespace>/<repo>/issue/<issue id>/custom/<field>
::
POST /api/0/fork/<username>/<repo>/issue/<issue id>/custom/<field>
POST /api/0/fork/<username>/<namespace>/<repo>/issue/<issue id>/custom/<field>
Input
^^^^^
+------------------+---------+--------------+-------------------------+
| Key | Type | Optionality | Description |
+==================+=========+==============+=========================+
| ``value`` | string | Optional | The new value of the |
| | | | custom field of interest|
+------------------+---------+--------------+-------------------------+
Sample response
^^^^^^^^^^^^^^^
::
{
"message": "Custom field adjusted"
}
""" # noqa
output = {}
repo = _get_repo(repo, username, namespace)
_check_issue_tracker(repo)
_check_token(repo)
issue = _get_issue(repo, issueid)
open_access = repo.settings.get("open_metadata_access_to_all", False)
_check_ticket_access(issue, open_access=open_access)
fields = {k.name: k for k in repo.issue_keys}
if field not in fields:
raise pagure.exceptions.APIError(
400, error_code=APIERROR.EINVALIDISSUEFIELD
)
key = fields[field]
value = get_request_data().get("value")
if value:
_check_link_custom_field(key, value)
try:
message = pagure.lib.query.set_custom_key_value(
flask.g.session, issue, key, value
)
flask.g.session.commit()
if message:
output["message"] = message
pagure.lib.query.add_metadata_update_notif(
session=flask.g.session,
obj=issue,
messages=message,
user=flask.g.fas_user.username,
)
else:
output["message"] = "No changes"
except pagure.exceptions.PagureException as err:
raise pagure.exceptions.APIError(
400, error_code=APIERROR.ENOCODE, error=str(err)
)
except SQLAlchemyError as err: # pragma: no cover
print(err)
flask.g.session.rollback()
raise pagure.exceptions.APIError(400, error_code=APIERROR.EDBERROR)
jsonout = flask.jsonify(output)
return jsonout
@API.route("/<repo>/issue/<int:issueid>/custom", methods=["POST"])
@API.route("/<namespace>/<repo>/issue/<int:issueid>/custom", methods=["POST"])
@API.route(
"/fork/<username>/<repo>/issue/<int:issueid>/custom", methods=["POST"]
)
@API.route(
"/fork/<username>/<namespace>/<repo>/issue/<int:issueid>/custom",
methods=["POST"],
)
@api_login_required(acls=["issue_update_custom_fields", "issue_update"])
@api_method
def api_update_custom_fields(repo, issueid, username=None, namespace=None):
"""
Update custom fields
--------------------
Update or reset the content of a collection of custom fields
associated to an issue.
::
POST /api/0/<repo>/issue/<issue id>/custom
POST /api/0/<namespace>/<repo>/issue/<issue id>/custom
::
POST /api/0/fork/<username>/<repo>/issue/<issue id>/custom
POST /api/0/fork/<username>/<namespace>/<repo>/issue/<issue id>/custom
Input
^^^^^
+------------------+---------+--------------+-----------------------------+
| Key | Type | Optionality | Description |
+==================+=========+==============+=============================+
| ``myfields`` | dict | Mandatory | A dictionary with the fields|
| | | | name as key and the value |
+------------------+---------+--------------+-----------------------------+
Sample payload
^^^^^^^^^^^^^^
::
{
"myField": "to do",
"myField_1": "test",
"myField_2": "done",
}
Sample response
^^^^^^^^^^^^^^^
::
{
"messages": [
{
"myField" : "Custom field myField adjusted to to do"
},
{
"myField_1": "Custom field myField_1 adjusted test (was: to do)"
},
{
"myField_2": "Custom field myField_1 adjusted to done (was: test)"
}
]
}
""" # noqa
output = {"messages": []}
repo = _get_repo(repo, username, namespace)
_check_issue_tracker(repo)
_check_token(repo)
issue = _get_issue(repo, issueid)
open_access = repo.settings.get("open_metadata_access_to_all", False)
_check_ticket_access(issue, open_access=open_access)
fields = get_request_data()
if not fields:
raise pagure.exceptions.APIError(400, error_code=APIERROR.EINVALIDREQ)
repo_fields = {k.name: k for k in repo.issue_keys}
if not all(key in repo_fields.keys() for key in fields.keys()):
raise pagure.exceptions.APIError(
400, error_code=APIERROR.EINVALIDISSUEFIELD
)
for field in fields:
key = repo_fields[field]
value = fields.get(key.name)
if value:
_check_link_custom_field(key, value)
try:
message = pagure.lib.query.set_custom_key_value(
flask.g.session, issue, key, value
)
flask.g.session.commit()
if message:
output["messages"].append({key.name: message})
pagure.lib.query.add_metadata_update_notif(
session=flask.g.session,
obj=issue,
messages=message,
user=flask.g.fas_user.username,
)
else:
output["messages"].append({key.name: "No changes"})
except pagure.exceptions.PagureException as err:
raise pagure.exceptions.APIError(
400, error_code=APIERROR.ENOCODE, error=str(err)
)
except SQLAlchemyError as err: # pragma: no cover
print(err)
flask.g.session.rollback()
raise pagure.exceptions.APIError(400, error_code=APIERROR.EDBERROR)
jsonout = flask.jsonify(output)
return jsonout
@API.route("/<repo>/issues/history/stats")
@API.route("/<namespace>/<repo>/issues/history/stats")
@API.route("/fork/<username>/<repo>/issues/history/stats")
@API.route("/fork/<username>/<namespace>/<repo>/issues/history/stats")
@api_method
def api_view_issues_history_stats(repo, username=None, namespace=None):
"""
List project's statistical issues history.
------------------------------------------
Provides the number of opened issues over the last 6 months of the
project.
::
GET /api/0/<repo>/issues/history/stats
GET /api/0/<namespace>/<repo>/issues/history/stats
::
GET /api/0/fork/<username>/<repo>/issues/history/stats
GET /api/0/fork/<username>/<namespace>/<repo>/issues/history/stats
Sample response
^^^^^^^^^^^^^^^
::
{
"stats": {
...
"2017-09-19T13:10:51.041345": 6,
"2017-09-26T13:10:51.041345": 6,
"2017-10-03T13:10:51.041345": 6,
"2017-10-10T13:10:51.041345": 6,
"2017-10-17T13:10:51.041345": 6
}
}
"""
repo = _get_repo(repo, username, namespace)
_check_issue_tracker(repo)
stats = pagure.lib.query.issues_history_stats(flask.g.session, repo)
jsonout = flask.jsonify({"stats": stats})
return jsonout
| gpl-2.0 | -7,478,782,353,708,977,000 | 32.358902 | 91 | 0.477354 | false |
mpasternak/django-multiseek | test_project/test_app/tests.py | 1 | 25528 | # -*- encoding: utf-8 -*-
from __future__ import print_function
import json
import time
from builtins import str as text
import pytest
from django.contrib.auth.models import User
from django.utils.translation import gettext_lazy as _
from model_bakery import baker
from multiseek import logic
from multiseek.logic import (
AND,
CONTAINS,
EQUAL,
MULTISEEK_ORDERING_PREFIX,
MULTISEEK_REPORT_TYPE,
OR,
RANGE_OPS,
)
from multiseek.models import SearchForm
from multiseek.util import make_field
from multiseek.views import LAST_FIELD_REMOVE_MESSAGE
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.expected_conditions import alert_is_present
from selenium.webdriver.support.wait import WebDriverWait
from . import multiseek_registry
from .models import Author, Language
from .testutil import select_select2_autocomplete, wait_for_page_load
class wait_for_alert(object):
method_name = "until"
def __init__(self, browser):
self.browser = browser
def __enter__(self):
pass
def __exit__(self, *_):
wait = WebDriverWait(self.browser, 10)
method = getattr(wait, self.method_name)
method(alert_is_present())
class wait_until_no_alert(wait_for_alert):
method_name = "until_not"
FRAME = "frame-0"
FIELD = "field-0"
@pytest.mark.django_db
def test_client_picks_up_database_changes_direct(initial_data, client):
res = client.get("/multiseek/")
assert "english" in res.content.decode(res.charset)
n = Language.objects.all()[0]
n.name = "FOOBAR"
n.save()
res = client.get("/multiseek/")
assert "FOOBAR" in res.content.decode(res.charset)
@pytest.mark.django_db
def test_liveserver_picks_up_database_changes(multiseek_page):
n = Language.objects.all()[0]
n.name = "FOOBAR"
n.save()
with wait_for_page_load(multiseek_page.browser):
multiseek_page.browser.reload()
assert "FOOBAR" in multiseek_page.browser.html
@pytest.mark.django_db
def test_multiseek(multiseek_page):
field = multiseek_page.get_field(FIELD)
# On init, the first field will be selected
assert field["selected"] == multiseek_page.registry.fields[0].label
@pytest.mark.django_db
def test_liveserver_picks_up_database_changes_direct(
initial_data, browser, live_server
):
with wait_for_page_load(browser):
browser.visit(live_server.url)
assert "english" in browser.html
n = Language.objects.all()[0]
n.name = "FOOBAR"
n.save()
with wait_for_page_load(browser):
browser.reload()
assert "FOOBAR" in browser.html
@pytest.mark.django_db
def test_change_field(multiseek_page):
field = multiseek_page.get_field(FIELD)
field["type"].find_by_value(text(multiseek_registry.YearQueryObject.label)).click()
field = multiseek_page.get_field(FIELD)
assert field["inner_type"] == logic.RANGE
assert len(field["value"]) == 2
field["type"].find_by_value(
text(multiseek_registry.LanguageQueryObject.label)
).click()
field = multiseek_page.get_field(FIELD)
assert field["inner_type"] == logic.VALUE_LIST
field["type"].find_by_value(
text(multiseek_registry.AuthorQueryObject.label)
).click()
field = multiseek_page.get_field(FIELD)
assert field["inner_type"] == logic.AUTOCOMPLETE
@pytest.mark.django_db
def test_serialize_form(multiseek_page):
with wait_for_page_load(multiseek_page.browser):
multiseek_page.browser.reload()
frame = multiseek_page.get_frame("frame-0")
frame["add_field"].click()
frame["add_field"].click()
frame["add_field"].click()
frame["add_frame"].click()
frame["add_frame"].click()
for n in range(2, 5):
field = multiseek_page.get_field("field-%i" % n)
field["value_widget"].type("aaapud!")
field = multiseek_page.get_field("field-0")
field["type"].find_by_value(text(multiseek_registry.YearQueryObject.label)).click()
field = multiseek_page.get_field("field-0")
field["value_widget"][0].type("1999")
field["value_widget"][1].type("2000")
field = multiseek_page.get_field("field-1")
field["prev-op"].find_by_value("or").click()
field["type"].find_by_value(
text(multiseek_registry.LanguageQueryObject.label)
).click()
field = multiseek_page.get_field("field-1")
field["value_widget"].find_by_value(text(_(u"english"))).click()
expected = [
None,
{
u"field": u"Year",
u"operator": text(RANGE_OPS[0]),
u"value": u"[1999,2000]",
u"prev_op": None,
},
{
u"field": u"Language",
u"operator": text(EQUAL),
u"value": u"english",
u"prev_op": OR,
},
{
u"field": u"Title",
u"operator": text(CONTAINS),
u"value": u"aaapud!",
u"prev_op": AND,
},
{
u"field": u"Title",
u"operator": text(CONTAINS),
u"value": u"aaapud!",
u"prev_op": AND,
},
[
AND,
{
u"field": u"Title",
u"operator": text(CONTAINS),
u"value": u"aaapud!",
u"prev_op": None,
},
],
[
AND,
{
u"field": u"Title",
u"operator": text(CONTAINS),
u"value": u"",
u"prev_op": None,
},
],
]
serialized = multiseek_page.serialize()
assert serialized == expected
for n in range(1, 6):
field = multiseek_page.get_field("field-%i" % n)
field["close-button"].click()
time.sleep(2)
expected = [
None,
{
u"field": u"Year",
u"operator": u"in range",
u"value": u"[1999,2000]",
u"prev_op": None,
},
]
serialized = multiseek_page.serialize()
assert serialized == expected
@pytest.mark.django_db
def test_remove_last_field(multiseek_page):
assert Language.objects.count()
field = multiseek_page.get_field("field-0")
field["close-button"].click()
alert = multiseek_page.browser.get_alert()
alert.text == LAST_FIELD_REMOVE_MESSAGE
alert.accept()
@pytest.mark.django_db
def test_autocomplete_field(multiseek_page):
assert Language.objects.count()
field = multiseek_page.get_field(FIELD)
field["type"].find_by_value(
text(multiseek_registry.AuthorQueryObject.label)
).click()
element = multiseek_page.browser.find_by_css(".select2-container")
select_select2_autocomplete(multiseek_page.browser, element, "Smith")
got = multiseek_page.serialize()
expect = [
None,
make_field(
multiseek_registry.AuthorQueryObject,
text(EQUAL),
str(Author.objects.filter(last_name="Smith")[0].pk),
prev_op=None,
),
]
assert got == expect
@pytest.mark.django_db
def test_autocomplete_field_bug(multiseek_page):
"""We fill autocomplete field with NOTHING, then we submit the form,
then we reload the homepage, and by the time of writing, we see
HTTP error 500, which is not what we need..."""
field = multiseek_page.get_field(FIELD)
field["type"].find_by_value(
text(multiseek_registry.AuthorQueryObject.label)
).click()
multiseek_page.browser.find_by_id("sendQueryButton").click()
time.sleep(1)
with wait_for_page_load(multiseek_page.browser):
multiseek_page.browser.reload()
assert "Server Error (500)" not in multiseek_page.browser.html
@pytest.mark.django_db
def test_autocomplete_field_bug_2(multiseek_page):
"""We fill autocomplete field with NOTHING, then we submit the form,
then we reload the homepage, click the "add field button" and by the
time of writing, we get a javascript error."""
field = multiseek_page.get_field(FIELD)
field["type"].find_by_value(
text(multiseek_registry.AuthorQueryObject.label)
).click()
multiseek_page.browser.find_by_id("sendQueryButton").click()
time.sleep(1)
with wait_for_page_load(multiseek_page.browser):
multiseek_page.browser.reload()
multiseek_page.browser.find_by_id("add_field").click()
time.sleep(1)
selects = [
tag
for tag in multiseek_page.browser.find_by_tag("select")
if tag["id"] == "type"
]
assert len(selects[0].find_by_tag("option")) != 0
assert len(selects[1].find_by_tag("option")) != 0
@pytest.mark.django_db
def test_set_join(multiseek_page):
multiseek_page.browser.find_by_id("add_field").click()
multiseek_page.browser.execute_script(
"$('#field-1').multiseekField('prevOperation').val('or')"
)
ret = multiseek_page.browser.evaluate_script(
"$('#field-1').multiseekField('prevOperation').val()"
)
assert ret == "or"
multiseek_page.add_field(
FRAME,
text(multiseek_page.registry.fields[0].label),
text(multiseek_page.registry.fields[0].ops[0]),
"",
)
multiseek_page.browser.execute_script(
"$('#field-2').multiseekField('prevOperation').val('or')"
)
ret = multiseek_page.browser.evaluate_script(
"$('#field-2').multiseekField('prevOperation').val()"
)
assert ret == "or"
@pytest.mark.django_db
def test_set_frame_join(multiseek_page):
multiseek_page.browser.execute_script(
"""
$("#frame-0").multiseekFrame('addFrame');
$("#frame-0").multiseekFrame('addFrame', 'or');
"""
)
ret = multiseek_page.browser.evaluate_script(
"$('#frame-2').multiseekFrame('getPrevOperationValue')"
)
assert ret == "or"
@pytest.mark.django_db
def test_add_field_value_list(multiseek_page):
multiseek_page.add_field(
FRAME,
multiseek_registry.LanguageQueryObject.label,
multiseek_registry.LanguageQueryObject.ops[1],
text(_(u"polish")),
)
field = multiseek_page.get_field("field-1")
assert field["type"].value == text(multiseek_registry.LanguageQueryObject.label)
assert field["op"].value == text(multiseek_registry.LanguageQueryObject.ops[1])
assert field["value"] == text(_(u"polish"))
@pytest.mark.django_db
def test_add_field_autocomplete(multiseek_page):
multiseek_page.add_field(
FRAME,
multiseek_registry.AuthorQueryObject.label,
multiseek_registry.AuthorQueryObject.ops[1],
'[1,"John Smith"]',
)
value = multiseek_page.get_field_value("field-1")
assert value == "1"
@pytest.mark.django_db
def test_add_field_string(multiseek_page):
multiseek_page.add_field(
FRAME,
multiseek_registry.TitleQueryObject.label,
multiseek_registry.TitleQueryObject.ops[0],
"aaapud!",
)
field = multiseek_page.get_field_value("field-1")
assert field == "aaapud!"
@pytest.mark.django_db
def test_add_field_range(multiseek_page):
multiseek_page.add_field(
FRAME,
multiseek_registry.YearQueryObject.label,
multiseek_registry.YearQueryObject.ops[0],
"[1000, 2000]",
)
field = multiseek_page.get_field_value("field-1")
assert field == "[1000,2000]"
@pytest.mark.django_db
def test_refresh_bug(multiseek_page):
# There was a bug, that when you submit the form with "OR" operation,
# and then you refresh the page, the operation is changed to "AND"
frame = multiseek_page.get_frame("frame-0")
frame["add_field"].click()
field = multiseek_page.get_field("field-1")
field["prev-op"].find_by_value(text(_("or"))).click()
assert field["prev-op"].value == text(_("or"))
button = multiseek_page.browser.find_by_id("sendQueryButton")
button.click()
time.sleep(0.5)
multiseek_page.browser.reload()
field = multiseek_page.get_field("field-1")
assert field["prev-op"].value == text(_("or"))
@pytest.mark.django_db
def test_frame_bug(multiseek_page):
multiseek_page.browser.find_by_id("add_frame").click()
multiseek_page.browser.find_by_id("close-button").click()
multiseek_page.browser.find_by_id("sendQueryButton").click()
with multiseek_page.browser.get_iframe("if") as iframe:
assert "Server Error (500)" not in iframe.html
@pytest.mark.django_db
def test_date_field(multiseek_page):
field = multiseek_page.get_field("field-0")
field["type"].find_by_value(
text(multiseek_registry.DateLastUpdatedQueryObject.label)
).click()
field["op"].find_by_value(
text(multiseek_registry.DateLastUpdatedQueryObject.ops[6])
).click()
expected = [
None,
{
u"field": u"Last updated on",
u"operator": u"in range",
u"value": u'["",""]',
u"prev_op": None,
},
]
assert multiseek_page.serialize() == expected
field["op"].find_by_value(
text(multiseek_registry.DateLastUpdatedQueryObject.ops[3])
).click()
expected = [
None,
{
u"field": u"Last updated on",
u"operator": u"greater or equal to(female gender)",
u"value": u'[""]',
u"prev_op": None,
},
]
assert expected == multiseek_page.serialize()
@pytest.mark.django_db
def test_removed_records(multiseek_page, live_server, initial_data):
"""Try to remove a record by hand and check if that fact is properly
recorded."""
multiseek_page.browser.visit(live_server + "/multiseek/results")
assert "A book with" in multiseek_page.browser.html
assert "Second book" in multiseek_page.browser.html
multiseek_page.browser.execute_script("""$("a:contains('❌')").first().click()""")
time.sleep(1)
multiseek_page.browser.visit(live_server + "/multiseek/results")
assert "A book with" in multiseek_page.browser.html
assert "Second book" not in multiseek_page.browser.html
assert "1 record(s) has been removed manually" in multiseek_page.browser.html
multiseek_page.browser.execute_script("""$("a:contains('❌')").first().click()""")
time.sleep(1)
multiseek_page.browser.execute_script("""$("a:contains('❌')").first().click()""")
time.sleep(1)
multiseek_page.browser.visit(live_server + "/multiseek/results")
assert "A book with" in multiseek_page.browser.html
assert "Second book" not in multiseek_page.browser.html
assert "1 record(s) has been removed manually" in multiseek_page.browser.html
@pytest.mark.django_db
def test_form_save_anon_initial(multiseek_page):
# Without SearchForm objects, the formsSelector is invisible
elem = multiseek_page.browser.find_by_id("formsSelector")
assert not elem.visible
@pytest.mark.django_db
def test_form_save_anon_initial_with_data(multiseek_page):
baker.make(SearchForm, public=True)
multiseek_page.browser.reload()
elem = multiseek_page.browser.find_by_id("formsSelector")
assert elem.visible
@pytest.mark.django_db
def test_form_save_anon_form_save_anonymous(multiseek_page):
# Anonymous users cannot save forms:
assert len(multiseek_page.browser.find_by_id("saveFormButton")) == 0
@pytest.mark.django_db
def test_form_save_anon_bug(multiseek_page):
multiseek_page.browser.find_by_id("add_frame").click()
multiseek_page.browser.find_by_id("add_field").click()
field1 = multiseek_page.get_field("field-1")
field1["close-button"].click()
time.sleep(1)
selects = multiseek_page.browser.find_by_tag("select")
prevops = [x for x in selects if x["id"] == "prev-op"]
assert len(prevops) == 1
@pytest.mark.django_db
def test_public_report_types_secret_report_invisible(multiseek_page):
elem = multiseek_page.browser.find_by_name("_ms_report_type").find_by_tag("option")
assert len(elem) == 2
@pytest.mark.django_db
def test_logged_in_secret_report_visible(
multiseek_admin_page, admin_user, initial_data
):
elem = multiseek_admin_page.browser.find_by_name("_ms_report_type")
elem = elem.first.find_by_tag("option")
assert len(elem) == 3
@pytest.mark.django_db
def test_save_form_logged_in(multiseek_admin_page, initial_data):
assert multiseek_admin_page.browser.find_by_id("saveFormButton").visible
@pytest.mark.django_db
def test_save_form_server_error(multiseek_admin_page, initial_data):
NAME = "testowy formularz"
multiseek_admin_page.browser.execute_script(
"multiseek.SAVE_FORM_URL='/unexistent';"
)
browser = multiseek_admin_page.browser
# Zapiszmy formularz
multiseek_admin_page.save_form_as(NAME)
# ... pytanie, czy ma być publiczny:
WebDriverWait(browser, 10).until(alert_is_present())
multiseek_admin_page.browser.get_alert().accept()
time.sleep(1)
# ... po chwili informacja, że BŁĄD!
WebDriverWait(browser, 10).until(alert_is_present())
multiseek_admin_page.browser.get_alert().accept()
WebDriverWait(browser, 10).until_not(alert_is_present())
# ... i selector się NIE pojawia:
assert not multiseek_admin_page.browser.find_by_id("formsSelector").visible
# ... i w bazie też PUSTKA:
assert SearchForm.objects.all().count() == 0
@pytest.mark.django_db
def test_save_form_save(multiseek_admin_page, initial_data):
browser = multiseek_admin_page.browser
assert SearchForm.objects.all().count() == 0
# multiseek_admin_page.browser.reload()
with wait_for_alert(browser):
multiseek_admin_page.click_save_button()
with wait_until_no_alert(browser):
multiseek_admin_page.dismiss_alert()
# Anulowanie nie powinno wyświetlić następnego formularza
NAME = "testowy formularz"
# Zapiszmy formularz
multiseek_admin_page.save_form_as(NAME)
# ... pytanie, czy ma być publiczny:
WebDriverWait(browser, 10).until(alert_is_present())
with wait_until_no_alert(browser):
multiseek_admin_page.accept_alert()
# ... po chwili informacja, że zapisano
WebDriverWait(browser, 10).until(alert_is_present())
with wait_until_no_alert(browser):
multiseek_admin_page.accept_alert()
# ... i nazwa pojawia się w selectorze
assert multiseek_admin_page.count_elements_in_form_selector(NAME) == 1
# ... i w bazie:
assert SearchForm.objects.all().count() == 1
# Zapiszmy formularz pod TĄ SAMĄ NAZWĄ
multiseek_admin_page.save_form_as(NAME)
# ... pytanie, czy ma być publiczny:
WebDriverWait(browser, 10).until(alert_is_present())
with wait_until_no_alert(browser):
multiseek_admin_page.accept_alert()
# ... po chwili informacja, że jest już taki w bazie i czy nadpisać?
WebDriverWait(browser, 10).until(alert_is_present())
with wait_until_no_alert(browser):
multiseek_admin_page.accept_alert()
# ... po chwili informacja, że zapisano:
WebDriverWait(browser, 10).until(alert_is_present())
with wait_until_no_alert(browser):
multiseek_admin_page.accept_alert()
# ... i nazwa pojawia się w selectorze
assert multiseek_admin_page.count_elements_in_form_selector(NAME) == 1
# ... i w bazie jest nadal jeden
assert SearchForm.objects.all().count() == 1
# Zapiszmy formularz pod TĄ SAMĄ NAZWĄ ale już NIE nadpisujemy
multiseek_admin_page.save_form_as(NAME)
# ... pytanie, czy ma być publiczny:
WebDriverWait(browser, 10).until(alert_is_present())
with wait_until_no_alert(browser):
multiseek_admin_page.accept_alert()
# ... po chwili informacja, że jest już taki w bazie i czy nadpisać?
WebDriverWait(browser, 10).until(alert_is_present())
with wait_until_no_alert(browser):
multiseek_admin_page.accept_alert()
# ... po chwili informacja, że ZAPISANY
WebDriverWait(browser, 10).until(alert_is_present())
with wait_until_no_alert(browser):
multiseek_admin_page.accept_alert()
# ... i w bazie jest nadal jeden
assert SearchForm.objects.all().count() == 1
# Sprawdźmy, czy jest publiczny
assert SearchForm.objects.all()[0].public
# Nadpiszmy formularz jako nie-publiczny
multiseek_admin_page.save_form_as(NAME)
# ... pytanie, czy ma być publiczny:
WebDriverWait(browser, 10).until(alert_is_present())
with wait_until_no_alert(browser):
multiseek_admin_page.dismiss_alert()
# ... po chwili informacja, że jest już taki w bazie i czy nadpisać?
WebDriverWait(browser, 10).until(alert_is_present())
with wait_until_no_alert(browser):
multiseek_admin_page.accept_alert()
# ... po chwili informacja, że zapisano:
WebDriverWait(browser, 10).until(alert_is_present())
with wait_until_no_alert(browser):
multiseek_admin_page.accept_alert()
# ... i jest to już NIE-publiczny:
assert not SearchForm.objects.all()[0].public
@pytest.mark.django_db
def test_load_form(multiseek_admin_page, initial_data):
fld = make_field(
multiseek_admin_page.registry.fields[2],
multiseek_admin_page.registry.fields[2].ops[1],
json.dumps([2000, 2010]),
)
SearchForm.objects.create(
name="lol",
owner=User.objects.create(username="foo", password="bar"),
public=True,
data=json.dumps({"form_data": [None, fld]}),
)
multiseek_admin_page.load_form_by_name("lol")
field = multiseek_admin_page.extract_field_data(
multiseek_admin_page.browser.find_by_id("field-0")
)
assert field["selected"] == text(multiseek_admin_page.registry.fields[2].label)
assert field["value"][0] == 2000
assert field["value"][1] == 2010
# Przetestuj, czy po ANULOWANIU select wróci do pierwotnej wartości
elem = multiseek_admin_page.browser.find_by_id("formsSelector").first
elem.find_by_text("lol").click()
multiseek_admin_page.dismiss_alert()
elem = multiseek_admin_page.browser.find_by_id("formsSelector").find_by_tag(
"option"
)
assert elem[0].selected
@pytest.mark.django_db
def test_bug_2(multiseek_admin_page, initial_data):
f = multiseek_admin_page.registry.fields[0]
v = multiseek_admin_page.registry.fields[0].ops[0]
value = "foo"
field = make_field(f, v, value, OR)
form = [None, field, [OR, field, field, field], [OR, field, field, field]]
data = json.dumps({"form_data": form})
user = User.objects.create(username="foo", password="bar")
SearchForm.objects.create(name="bug-2", owner=user, public=True, data=data)
multiseek_admin_page.load_form_by_name("bug-2")
elements = multiseek_admin_page.browser.find_by_css("[name=prev-op]:visible")
for elem in elements:
if elem.css("visibility") != "hidden":
assert elem.value == logic.OR
@pytest.mark.django_db
def test_save_ordering_direction(multiseek_admin_page, initial_data):
elem = "input[name=%s1_dir]" % MULTISEEK_ORDERING_PREFIX
browser = multiseek_admin_page.browser
browser.find_by_css(elem).type(Keys.SPACE)
multiseek_admin_page.save_form_as("foobar")
# Should the dialog be public?
WebDriverWait(browser, 10).until(alert_is_present())
multiseek_admin_page.accept_alert()
WebDriverWait(browser, 10).until_not(alert_is_present())
# Form saved success
WebDriverWait(browser, 10).until(alert_is_present())
multiseek_admin_page.accept_alert()
WebDriverWait(browser, 10).until_not(alert_is_present())
multiseek_admin_page.reset_form()
multiseek_admin_page.load_form_by_name("foobar")
assert len(multiseek_admin_page.browser.find_by_css("%s:checked" % elem)) == 1
@pytest.mark.django_db
def test_save_ordering_box(multiseek_admin_page, initial_data):
elem = "select[name=%s0]" % MULTISEEK_ORDERING_PREFIX
browser = multiseek_admin_page.browser
select = browser.find_by_css(elem)
option = select.find_by_css('option[value="2"]')
assert not option.selected
option.click()
multiseek_admin_page.save_form_as("foobar")
WebDriverWait(browser, 10).until(alert_is_present())
multiseek_admin_page.accept_alert()
WebDriverWait(browser, 10).until_not(alert_is_present())
WebDriverWait(browser, 10).until(alert_is_present())
multiseek_admin_page.accept_alert()
WebDriverWait(browser, 10).until_not(alert_is_present())
multiseek_admin_page.reset_form()
multiseek_admin_page.load_form_by_name("foobar")
select = multiseek_admin_page.browser.find_by_css(elem)
option = select.find_by_css('option[value="2"]')
assert option.selected
@pytest.mark.django_db
def test_save_report_type(multiseek_admin_page, initial_data):
elem = "select[name=%s]" % MULTISEEK_REPORT_TYPE
select = multiseek_admin_page.browser.find_by_css(elem).first
option = select.find_by_css('option[value="1"]')
assert not option.selected
option.click()
multiseek_admin_page.save_form_as("foobar")
browser = multiseek_admin_page.browser
WebDriverWait(browser, 10).until(alert_is_present())
multiseek_admin_page.accept_alert()
WebDriverWait(browser, 10).until_not(alert_is_present())
WebDriverWait(browser, 10).until(alert_is_present())
multiseek_admin_page.accept_alert()
WebDriverWait(browser, 10).until_not(alert_is_present())
multiseek_admin_page.reset_form()
time.sleep(1)
multiseek_admin_page.load_form_by_name("foobar")
select = multiseek_admin_page.browser.find_by_css(elem).first
option = select.find_by_css('option[value="1"]')
assert option.selected
| mit | -2,225,583,535,854,035,700 | 30.267485 | 87 | 0.655339 | false |
Xarrow/PyCharmWorkSpace | everything_sdk/pyeverything.py | 1 | 9309 | # -*- coding:utf-8 -*-
"""
Verion: 1.0
Author: zhangjian
Site: https://iliangqunru.bitcron.com/
File: pyeverything.py
Time: 2018/3/9
Add New Functional pyeverything.py
"""
import logging
import sys
from ctypes import windll, create_unicode_buffer, byref, WinDLL
level = logging.DEBUG
format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
datefmt = '%Y-%m-%d %H:%M'
logging.basicConfig(level=level, format=format, datefmt=datefmt)
logger = logging.getLogger(__name__)
PY3 = False
if sys.version > '3':
PY3 = True
class BaseException(Exception):
def __init__(self, *args, **kwargs):
pass
class UnknownOSVersionException(BaseException):
"""UnknownOSVersion Exception"""
pass
class UnknowOperationSystemException(BaseException):
"""UnknowOperationSystemException"""
pass
from platform import architecture, platform
arch = architecture()
platform = platform()
logger.info("==> current os version is : (%s , %s)", platform, arch)
class Singleton(object):
_instance = None
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(Singleton, cls).__new__(cls, *args, **kwargs)
return cls._instance
class PyEverything(Singleton):
def __init__(self):
if not str(arch[1]).__contains__("Windows"):
raise UnknowOperationSystemException("Unknown Operation System , And Only Apply For Windows")
if str(arch[0]).__contains__("64"):
self.everything_dll = windll.LoadLibrary(r'Everything64.dll')
elif str(arch[0]).__contains__("32"):
self.everything_dll = windll.LoadLibrary(r'Everything32.dll')
else:
raise UnknownOSVersionException("Unknown OS Version")
def everything_clean_up(self) -> None:
"""The Everything_CleanUp function resets the result list and search state, freeing any allocated memory by the library."""
self.everything_dll.Everything_CleanUp()
@property
def everything_delete_run_history(self) -> int:
"""
The Everything_DeleteRunHistory function deletes all run history.Calling this function will clear all run history from memory and disk.
Return Value:
The function returns non-zero if run history is cleared.
The function returns 0 if an error occurred. To get extended error information, call Everything_GetLastError
"""
return self.everything_dll.Everything_DeleteRunHistory()
@property
def everything_get_last_error(self) -> int:
"""The Everything_GetLastError function retrieves the last-error code value."""
return self.everything_dll.Everything_GetLastError()
def everything_get_result_full_path_name_w(self, index: int, lp_string: str, n_max_count: int) -> None:
"""The Everything_GetResultFullPathName function retrieves the full path and file name of the visible result.
index
Zero based index of the visible result.
lpString [out]
Pointer to the buffer that will receive the text. If the string is as long or longer than the buffer, the string is truncated and terminated with a NULL character.
nMaxCount
Specifies the maximum number of characters to copy to the buffer, including the NULL character. If the text exceeds this limit, it is truncated.
"""
self.everything_dll.Everything_GetResultFullPathNameW(index, lp_string, n_max_count)
@property
def everything_get_num_file_results(self) -> int:
"""The Everything_GetNumFileResults function returns the number of visible file results.
返回文件数量
"""
return self.everything_dll.Everything_GetNumFileResults()
@property
def everything_get_num_folder_results(self) -> int:
"""The Everything_GetNumFolderResults function returns the number of visible folder results.
返回文件件数量
"""
return self.everything_dll.Everything_GetNumFolderResults()
@property
def everything_get_num_results(self) -> int:
"""The Everything_GetNumResults function returns the number of visible file and folder results.
结果数量,包含文件和文件夹
"""
return self.everything_dll.Everything_GetNumResults()
def everything_get_result_file_name(self, index: int):
"""The Everything_GetResultFileName function retrieves the file name part only of the visible result."""
return self.everything_dll.Everything_GetResultFileNameW(index)
# Manipulating the search state
def everything_set_search_w(self, key_string: str) -> None:
"""The Everything_SetSearch function sets the search string for the IPC Query."""
self.everything_dll.Everything_SetSearchW(key_string)
def everything_set_match_path_w(self, enable: bool) -> None:
"""The Everything_SetMatchPath function enables or disables full path matching for the next call to
Everything_Query. """
self.everything_dll.Everything_SetMatchPath(enable)
def everything_set_match_case(self, enable: bool = False) -> None:
"""The Everything_SetMatchCase function enables or disables full path matching for the next call to Everything_Query.
搜素是否区分大小
enable
Specifies whether the search is case sensitive or insensitive.
If this parameter is TRUE, the search is case sensitive.
If the parameter is FALSE, the search is case insensitive.
"""
self.everything_dll.Everything_SetMatchCase(enable)
def everything_query_w(self, wait: bool = True) -> None:
"""The Everything_Query function executes an Everything IPC query with the current search state.
wait
Should the function wait for the results or return immediately.
Set this to FALSE to post the IPC Query and return immediately.
Set this to TRUE to send the IPC Query and wait for the results.
"""
self.everything_dll.Everything_QueryW(wait)
def __call__(self, *args, **kwargs):
pass
def __del__(self):
self.everything_dll.Everything_CleanUp()
from typing import List, Dict, Any
FileList = List[str]
HookData = Dict[str, Any]
def magix(str_buffer_size: int = 512, only_first: bool = False, match_case: bool = False, query_wait: bool = True):
"""magic box"""
def _decorator(func):
def wrapper(self, *args, **kwargs):
# 创建buffer
str_buffer = create_unicode_buffer(str_buffer_size)
# 搜索条件
rs = func(self, *args, **kwargs)
self.everything_set_match_case(match_case)
# 调用搜索(核心)
self.everything_query_w(query_wait)
if only_first:
self.everything_get_result_full_path_name_w(index=0, lp_string=byref(str_buffer),
n_max_count=len(str_buffer))
return str_buffer.value
def gen():
for index in range(0, self.everything_get_num_results):
self.everything_get_result_full_path_name_w(index=index, lp_string=byref(str_buffer),
n_max_count=len(str_buffer))
yield str_buffer.value
return [x for x in gen()]
return wrapper
return _decorator
class SearchFile(PyEverything):
def __init__(self) -> None:
super().__init__()
@magix(str_buffer_size=100, )
def common_search(self, key_string: str):
"""common search
key_string -> key file which you want to search in the disk
"""
self.everything_set_search_w(key_string=key_string)
@magix(match_case=True)
def match_case_search(self, key_string):
"""是否区分大小写"""
self.everything_set_search_w(key_string=key_string)
pass
# def registry_hooks(self, hooks: list, hook_data: dict, **kwargs) -> HookData:
# """hooks 注册"""
# hook_data = hook_data or dict()
# hooks = hooks or list()
# if hooks:
# for hook in hooks:
# _hook_data = hook(**kwargs)
# if _hook_data is not None:
# # 将值返回出去
# hook_data[hook.__name__] = _hook_data
# return hook_data
#
# d
@property
def files_search_nums(self) -> int:
return self.everything_get_num_results
@property
def files_search_file_nums(self) -> int:
return self.everything_get_num_file_results
@property
def files_search_folder_nums(self) -> int:
return self.everything_get_num_folder_results
if __name__ == '__main__':
search_file = SearchFile()
# search_file.test_registry_hooks()
common_search_rs = search_file.common_search(key_string="abc")
print(len(common_search_rs))
match_case_search_rs = search_file.match_case_search(key_string="abc")
print(len(match_case_search_rs))
# print(search_file.files_search_nums)
# print(search_file.files_search_file_nums)
# print(search_file.files_search_folder_nums)
| apache-2.0 | 8,502,296,471,336,731,000 | 34.593023 | 175 | 0.636938 | false |
sthyme/ZFSchizophrenia | ClusterBrainImages/sum_intensity_review_allRegion_thres.py | 1 | 6111 | import numpy as np
import tifffile
import os
import scipy
import dill
import pandas as pd
def load_object(filename):
f = open(filename, "rb")
obj = dill.load(f)
f.close()
return obj
## Directory of the folder containing activity images
file_dir="/n/schier_lab2/everyone/avereallyfullfusionperk_Yiqun_filtered_fixed/"
#file_dir="/n/schier_lab2/everyone/avereallyfullfusionstruct_Yiqun_filtered_fixed/"
#file_dir="/n/boslfs/LABS/schier_lab/everyone/aveupdatedreallyfullfusionstruct_Yiqun_filtered/"
#file_dir="/n/schier_lab2/users/yiqunwang/Summer Data/ReviewAnalysis/filtered_fused_and_nodup/"
## Load various masks
mask_noSC=load_object("../Masks/whole_brain_mask_no_SpinalCord")
mask=load_object("../Masks/whole_brain_mask")
all_masks=load_object("../Masks/all_masks_sym_all_good")
#mask_dien=all_masks['Diencephalon']
#mask_mesen=all_masks['Mesencephalon']
#mask_rhom=all_masks['Rhombencephalon']
#mask_telen=all_masks['Telencephalon']
## Parse image names to extract genotype information: gene name, hom/het
files=[name for name in os.listdir(file_dir)]
genes=np.array([name.split("_")[0] for name in files])
genos=np.array([name.split("_")[2] for name in files])
genos=np.array([''.join(i for i in j if not i.isdigit()) for j in genos])
labels=[genes[i]+"_"+genos[i] for i in np.arange(len(genes))]
## Preallocate dataframes (1 for each channel) to store sum of intensities in each image (rows) for each region (columns)
all_regions=list(all_masks.keys())
sum_G=pd.DataFrame(np.zeros((len(files)+1,len(all_regions))),index=["size"]+labels,columns=all_regions)
sum_R=pd.DataFrame(np.zeros((len(files)+1,len(all_regions))),index=["size"]+labels,columns=all_regions)
sum_both=pd.DataFrame(np.zeros((len(files)+1,len(all_regions))),index=["size"]+labels,columns=all_regions)
## Calculate the size of each brain region by summing up each region mask. Write the sums in the dataframes as a row
sum_G.loc["size",:]=[np.sum(all_masks[region_mask]) for region_mask in all_regions]
sum_R.loc["size",:]=[np.sum(all_masks[region_mask]) for region_mask in all_regions]
sum_both.loc["size",:]=[np.sum(all_masks[region_mask]) for region_mask in all_regions]
## Preallocate dataframes to store number of active pixels in each image for each region
sum_numG=pd.DataFrame(np.zeros((len(files)+1,len(all_regions))),index=["size"]+labels,columns=all_regions)
sum_numR=pd.DataFrame(np.zeros((len(files)+1,len(all_regions))),index=["size"]+labels,columns=all_regions)
sum_numboth=pd.DataFrame(np.zeros((len(files)+1,len(all_regions))),index=["size"]+labels,columns=all_regions)
sum_numG.loc["size",:]=[np.sum(all_masks[region_mask]) for region_mask in all_regions]
sum_numR.loc["size",:]=[np.sum(all_masks[region_mask]) for region_mask in all_regions]
sum_numboth.loc["size",:]=[np.sum(all_masks[region_mask]) for region_mask in all_regions]
#labels=[filename.split('_')[0] for filename in files]
#sum_G=pd.DataFrame(np.zeros((len(files),6)),index=labels,columns=['Brain','NoSpinalCord','Diencephalon','Mesencephalon','Rhombencephalon','Telencephalon'])
#sum_R=pd.DataFrame(np.zeros((len(files),6)),index=labels,columns=['Brain','NoSpinalCord','Diencephalon','Mesencephalon','Rhombencephalon','Telencephalon'])
#sum_both=pd.DataFrame(np.zeros((len(files),6)),index=labels,columns=['Brain','NoSpinalCord','Diencephalon','Mesencephalon','Rhombencephalon','Telencephalon'])
## set intensity threshold for calling active pixels.
thres=50
## Calculate region-wise sum of intensities and number of active pixels for each image
for i in np.arange(len(files)):
file_name=files[i]
label=labels[i]
print("summing intensities for "+label+"...")
brain_im=np.array(tifffile.imread(file_dir+file_name))
brain_R=brain_im[:,:,:,0]
brain_R=brain_R*(brain_R>=thres)
brain_G=brain_im[:,:,:,1]
brain_G=brain_G*(brain_G>=thres)
brain_both=np.max(np.array([brain_im[:,:,:,0],brain_im[:,:,:,1]]),axis=0)
#sum_G.loc[label,:]=[np.sum(brain_G*mask),np.sum(brain_G*mask_noSC),np.sum(brain_G*mask_dien),np.sum(brain_G*mask_mesen),np.sum(brain_G*mask_rhom),np.sum(brain_G*mask_telen)]
#sum_R.loc[label,:]=[np.sum(brain_R*mask),np.sum(brain_R*mask_noSC),np.sum(brain_R*mask_dien),np.sum(brain_R*mask_mesen),np.sum(brain_R*mask_rhom),np.sum(brain_R*mask_telen)]
#sum_both.loc[label,:]=[np.sum(brain_both*mask),np.sum(brain_both*mask_noSC),np.sum(brain_both*mask_dien),np.sum(brain_both*mask_mesen),np.sum(brain_both*mask_rhom),np.sum(brain_both*mask_telen)]
sum_G.loc[label,:]=[np.sum(brain_G*all_masks[region_mask]) for region_mask in all_regions]
sum_R.loc[label,:]=[np.sum(brain_R*all_masks[region_mask]) for region_mask in all_regions]
sum_both.loc[label,:]=[np.sum(brain_both*all_masks[region_mask]) for region_mask in all_regions]
sum_numG.loc[label,:]=[np.sum((brain_G>0)*all_masks[region_mask]) for region_mask in all_regions]
sum_numR.loc[label,:]=[np.sum((brain_R>0)*all_masks[region_mask]) for region_mask in all_regions]
sum_numboth.loc[label,:]=[np.sum((brain_both>0)*all_masks[region_mask]) for region_mask in all_regions]
## save the dataframes
sum_G.to_csv('/n/schier_lab2/users/yiqunwang/Summer Data/ReviewAnalysis/intensity_sum/all_regions_sum_perk_green_channel_PaperData_thres50.csv')
sum_R.to_csv('/n/schier_lab2/users/yiqunwang/Summer Data/ReviewAnalysis/intensity_sum/all_regions_sum_perk_red_channel_PaperData_thres50.csv')
sum_both.to_csv('/n/schier_lab2/users/yiqunwang/Summer Data/ReviewAnalysis/intensity_sum/all_regions_sum_perk_both_channels_PaperData_thres50.csv')
sum_numG.to_csv('/n/schier_lab2/users/yiqunwang/Summer Data/ReviewAnalysis/intensity_sum/all_regions_sum_nPix_perk_green_channel_PaperData_thres50.csv')
sum_numR.to_csv('/n/schier_lab2/users/yiqunwang/Summer Data/ReviewAnalysis/intensity_sum/all_regions_sum_nPix_perk_red_channel_PaperData_thres50.csv')
sum_numboth.to_csv('/n/schier_lab2/users/yiqunwang/Summer Data/ReviewAnalysis/intensity_sum/all_regions_sum_nPix_perk_both_channels_PaperData_thres50.csv')
| mit | 1,075,696,351,922,056,700 | 63.709677 | 199 | 0.723777 | false |
teamshadi/mf-dataentry | project/settings.py | 1 | 3197 | """
Django settings for project project.
Generated by 'django-admin startproject' using Django 1.11.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'e((6x$9gbz6iq(j80+sj@g8us73s@d0#j&xyji=!s!%9r@mokv'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ["localhost","pmo.ffaprivatebank.com"]
# Application definition
INSTALLED_APPS = [
'app.apps.AppConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
DISABLE_EXPORT_TO_MF = False
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| mit | -2,286,854,437,003,504,000 | 24.576 | 91 | 0.686581 | false |
QLaboratory/QlabChallengerRepo | ai_challenger_scene/scale_layer.py | 1 | 3385 | from keras.layers.core import Layer
from keras.engine import InputSpec
from keras import backend as K
try:
from keras import initializations
except ImportError:
from keras import initializers as initializations
class Scale(Layer):
'''Learns a set of weights and biases used for scaling the input data.
the output consists simply in an element-wise multiplication of the input
and a sum of a set of constants:
out = in * gamma + beta,
where 'gamma' and 'beta' are the weights and biases larned.
# Arguments
axis: integer, axis along which to normalize in mode 0. For instance,
if your input tensor has shape (samples, channels, rows, cols),
set axis to 1 to normalize per feature map (channels axis).
momentum: momentum in the computation of the
exponential average of the mean and standard deviation
of the data, for feature-wise normalization.
weights: Initialization weights.
List of 2 Numpy arrays, with shapes:
`[(input_shape,), (input_shape,)]`
beta_init: name of initialization function for shift parameter
(see [initializations](../initializations.md)), or alternatively,
Theano/TensorFlow function to use for weights initialization.
This parameter is only relevant if you don't pass a `weights` argument.
gamma_init: name of initialization function for scale parameter (see
[initializations](../initializations.md)), or alternatively,
Theano/TensorFlow function to use for weights initialization.
This parameter is only relevant if you don't pass a `weights` argument.
'''
def __init__(self, weights=None, axis=-1, momentum = 0.9, beta_init='zero', gamma_init='one', **kwargs):
self.momentum = momentum
self.axis = axis
self.beta_init = initializations.get(beta_init)
self.gamma_init = initializations.get(gamma_init)
self.initial_weights = weights
super(Scale, self).__init__(**kwargs)
def build(self, input_shape):
self.input_spec = [InputSpec(shape=input_shape)]
shape = (int(input_shape[self.axis]),)
# Compatibility with TensorFlow >= 1.0.0
self.gamma = K.variable(self.gamma_init(shape), name='{}_gamma'.format(self.name))
self.beta = K.variable(self.beta_init(shape), name='{}_beta'.format(self.name))
#self.gamma = self.gamma_init(shape, name='{}_gamma'.format(self.name))
#self.beta = self.beta_init(shape, name='{}_beta'.format(self.name))
self.trainable_weights = [self.gamma, self.beta]
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
def call(self, x, mask=None):
input_shape = self.input_spec[0].shape
broadcast_shape = [1] * len(input_shape)
broadcast_shape[self.axis] = input_shape[self.axis]
out = K.reshape(self.gamma, broadcast_shape) * x + K.reshape(self.beta, broadcast_shape)
return out
def get_config(self):
config = {"momentum": self.momentum, "axis": self.axis}
base_config = super(Scale, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| mit | -5,671,898,261,230,597,000 | 45.676056 | 108 | 0.639586 | false |
andrew-szymanski/gae_django | jsonpickle/__init__.py | 1 | 10743 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2008 John Paulett (john -at- paulett.org)
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
"""Python library for serializing any arbitrary object graph into JSON.
It can take almost any Python object and turn the object into JSON.
Additionally, it can reconstitute the object back into Python.
>>> import jsonpickle
>>> from samples import Thing
Create an object.
>>> obj = Thing('A String')
>>> print obj.name
A String
Use jsonpickle to transform the object into a JSON string.
>>> pickled = jsonpickle.encode(obj)
>>> print pickled
{"py/object": "samples.Thing", "name": "A String", "child": null}
Use jsonpickle to recreate a Python object from a JSON string
>>> unpickled = jsonpickle.decode(pickled)
>>> str(unpickled.name)
'A String'
.. warning::
Loading a JSON string from an untrusted source represents a potential
security vulnerability. jsonpickle makes no attempt to sanitize the input.
The new object has the same type and data, but essentially is now a copy of
the original.
>>> obj == unpickled
False
>>> obj.name == unpickled.name
True
>>> type(obj) == type(unpickled)
True
If you will never need to load (regenerate the Python class from JSON), you can
pass in the keyword unpicklable=False to prevent extra information from being
added to JSON.
>>> oneway = jsonpickle.encode(obj, unpicklable=False)
>>> print oneway
{"name": "A String", "child": null}
"""
from jsonpickle.pickler import Pickler
from jsonpickle.unpickler import Unpickler
__version__ = '0.4.0'
__all__ = ('encode', 'decode')
SUPPORTED_BACKENDS = ('json',
'simplejson',
'demjson',
'django.util.simplejson')
class JSONPluginMgr(object):
"""The JSONPluginMgr handles encoding and decoding.
It tries these modules in this order:
simplejson, json, demjson
simplejson is a fast and popular backend and is tried first.
json comes with python2.6 and is tried second.
demjson is the most permissive backend and is tried last.
"""
def __init__(self):
## The names of backends that have been successfully imported
self._backend_names = []
## A dictionary mapping backend names to encode/decode functions
self._encoders = {}
self._decoders = {}
## Options to pass to specific encoders
json_opts = ((), {'sort_keys': True})
self._encoder_options = {
'json': json_opts,
'simplejson': json_opts,
'django.util.simplejson': json_opts,
}
## The exception class that is thrown when a decoding error occurs
self._decoder_exceptions = {}
## Whether we've loaded any backends successfully
self._verified = False
## Try loading simplejson and demjson
self.load_backend('simplejson', 'dumps', 'loads', ValueError)
self.load_backend('json', 'dumps', 'loads', ValueError)
self.load_backend('demjson', 'encode', 'decode', 'JSONDecodeError')
## Experimental support
self.load_backend('jsonlib', 'write', 'read', 'ReadError')
self.load_backend('yajl', 'dumps', 'loads', ValueError)
def _verify(self):
"""Ensures that we've loaded at least one JSON backend."""
if self._verified:
return
raise AssertionError('jsonpickle requires at least one of the '
'following:\n'
' python2.6, simplejson, or demjson')
def load_backend(self, name, encode_name, decode_name, decode_exc):
"""
Load a JSON backend by name.
This method loads a backend and sets up references to that
backend's encode/decode functions and exception classes.
:param encode_name: is the name of the backend's encode method.
The method should take an object and return a string.
:param decode_name: names the backend's method for the reverse
operation -- returning a Python object from a string.
:param decode_exc: can be either the name of the exception class
used to denote decoding errors, or it can be a direct reference
to the appropriate exception class itself. If it is a name,
then the assumption is that an exception class of that name
can be found in the backend module's namespace.
"""
try:
## Load the JSON backend
mod = __import__(name)
except ImportError:
return
try:
## Handle submodules, e.g. django.utils.simplejson
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
except AttributeError:
return
try:
## Setup the backend's encode/decode methods
self._encoders[name] = getattr(mod, encode_name)
self._decoders[name] = getattr(mod, decode_name)
except AttributeError:
self.remove_backend(name)
return
try:
if type(decode_exc) is str:
## This backend's decoder exception is part of the backend
self._decoder_exceptions[name] = getattr(mod, decode_exc)
else:
## simplejson uses the ValueError exception
self._decoder_exceptions[name] = decode_exc
except AttributeError:
self.remove_backend(name)
return
## Setup the default args and kwargs for this encoder
self._encoder_options[name] = ([], {})
## Add this backend to the list of candidate backends
self._backend_names.append(name)
## Indicate that we successfully loaded a JSON backend
self._verified = True
def remove_backend(self, name):
"""Remove all entries for a particular backend."""
self._encoders.pop(name, None)
self._decoders.pop(name, None)
self._decoder_exceptions.pop(name, None)
self._encoder_options.pop(name, None)
if name in self._backend_names:
self._backend_names.remove(name)
self._verified = bool(self._backend_names)
def encode(self, obj):
"""
Attempt to encode an object into JSON.
This tries the loaded backends in order and passes along the last
exception if no backend is able to encode the object.
"""
self._verify()
for idx, name in enumerate(self._backend_names):
try:
optargs, optkwargs = self._encoder_options[name]
encoder_kwargs = optkwargs.copy()
encoder_args = (obj,) + tuple(optargs)
return self._encoders[name](*encoder_args, **encoder_kwargs)
except Exception:
if idx == len(self._backend_names) - 1:
raise
def decode(self, string):
"""
Attempt to decode an object from a JSON string.
This tries the loaded backends in order and passes along the last
exception if no backends are able to decode the string.
"""
self._verify()
for idx, name in enumerate(self._backend_names):
try:
return self._decoders[name](string)
except self._decoder_exceptions[name], e:
if idx == len(self._backend_names) - 1:
raise e
else:
pass # and try a more forgiving encoder, e.g. demjson
def set_preferred_backend(self, name):
"""
Set the preferred json backend.
If a preferred backend is set then jsonpickle tries to use it
before any other backend.
For example::
set_preferred_backend('simplejson')
If the backend is not one of the built-in jsonpickle backends
(json/simplejson, or demjson) then you must load the backend
prior to calling set_preferred_backend.
AssertionError is raised if the backend has not been loaded.
"""
if name in self._backend_names:
self._backend_names.remove(name)
self._backend_names.insert(0, name)
else:
errmsg = 'The "%s" backend has not been loaded.' % name
raise AssertionError(errmsg)
def set_encoder_options(self, name, *args, **kwargs):
"""
Associate encoder-specific options with an encoder.
After calling set_encoder_options, any calls to jsonpickle's
encode method will pass the supplied args and kwargs along to
the appropriate backend's encode method.
For example::
set_encoder_options('simplejson', sort_keys=True, indent=4)
set_encoder_options('demjson', compactly=False)
See the appropriate encoder's documentation for details about
the supported arguments and keyword arguments.
"""
self._encoder_options[name] = (args, kwargs)
# Initialize a JSONPluginMgr
json = JSONPluginMgr()
# Export specific JSONPluginMgr methods into the jsonpickle namespace
set_preferred_backend = json.set_preferred_backend
set_encoder_options = json.set_encoder_options
load_backend = json.load_backend
remove_backend = json.remove_backend
def encode(value, unpicklable=True, max_depth=None):
"""
Return a JSON formatted representation of value, a Python object.
The keyword argument 'unpicklable' defaults to True.
If set to False, the output will not contain the information
necessary to turn the JSON data back into Python objects.
The keyword argument 'max_depth' defaults to None.
If set to a non-negative integer then jsonpickle will not recurse
deeper than 'max_depth' steps into the object. Anything deeper
than 'max_depth' is represented using a Python repr() of the object.
>>> encode('my string')
'"my string"'
>>> encode(36)
'36'
>>> encode({'foo': True})
'{"foo": true}'
>>> encode({'foo': True}, max_depth=0)
'"{\\'foo\\': True}"'
>>> encode({'foo': True}, max_depth=1)
'{"foo": "True"}'
"""
j = Pickler(unpicklable=unpicklable,
max_depth=max_depth)
return json.encode(j.flatten(value))
def decode(string):
"""
Convert a JSON string into a Python object.
>>> str(decode('"my string"'))
'my string'
>>> decode('36')
36
"""
j = Unpickler()
return j.restore(json.decode(string))
| bsd-3-clause | -7,820,685,076,702,264,000 | 32.055385 | 79 | 0.613237 | false |
EOSIO/eos | tests/p2p_test_peers.py | 1 | 1555 | import subprocess
class P2PTestPeers:
#for testing with localhost
sshname = "testnet" # ssh name for executing remote commands
hosts = ["localhost"] # host list
ports = [8888] # eosiod listening port of each host
devs = ["lo0"] # network device of each host
#for testing with testnet2
#sshname = "testnet" # ssh name for executing remote commands
#hosts = ["10.160.11.101", "10.160.12.102", "10.160.13.103", "10.160.11.104", "10.160.12.105", "10.160.13.106", "10.160.11.107", "10.160.12.108", "10.160.13.109", "10.160.11.110", "10.160.12.111", "10.160.13.112", "10.160.11.113", "10.160.12.114", "10.160.13.115", "10.160.11.116", "10.160.12.117", "10.160.13.118", "10.160.11.119", "10.160.12.120", "10.160.13.121", "10.160.11.122"] # host list
#ports = [8888, 8888,8888,8888,8888,8888,8888,8888,8888,8888,8888,8888,8888,8888,8888,8888,8888,8888,8888,8888,8888, 8888] # eosiod listening port of each host
#devs = ["ens3", "ens3", "ens3", "ens3", "ens3", "ens3", "ens3", "ens3", "ens3", "ens3", "ens3", "ens3", "ens3", "ens3", "ens3", "ens3", "ens3", "ens3", "ens3", "ens3", "ens3", "ens3"] # network device of each host
@staticmethod
def exec(remoteCmd, toprint=True):
for i in range(len(P2PTestPeers.hosts)):
remoteCmd2 = remoteCmd.replace("{dev}", P2PTestPeers.devs[i])
cmd = "ssh " + P2PTestPeers.sshname + "@" + P2PTestPeers.hosts[i] + ' ' + remoteCmd2
if toprint is True:
print("execute:" + cmd)
subprocess.call(cmd, shell=True) | mit | -2,289,070,867,791,307,000 | 63.833333 | 399 | 0.615434 | false |
boh1996/LectioAPI | sync.py | 1 | 4414 | import database
from datetime import *
import error
from bson.objectid import ObjectId
db = database.db
def flatten ( l ):
out = []
if isinstance(l, (list, tuple)):
for item in l:
out.extend(flatten(item))
elif isinstance(l, (dict)):
for dictkey in l.keys():
out.extend(flatten(l[dictkey]))
elif isinstance(l, (str, int, unicode)):
if isinstance(l, int):
l = str(l)
out.append(l)
elif isinstance(l, datetime):
out.append(l.isoformat(' '))
elif isinstance(l, ObjectId):
out.append(str(l))
elif l is None:
out.append("")
else:
out.append(l)
return out
# Inserts the data if it doesn't exits, skips if a match exists and updates if it exists but in an older version
def sync ( table, query, document, unset=["_updated", "_id", "_created"] ):
existsing = table.find(query).limit(1)
if existsing.count() is 0:
document["_created"] = datetime.now()
document["_updated"] = datetime.now()
try:
_id = table.insert(document, manipulate=True)
except Exception, e:
error.log(__file__, False, str(e))
return {
"status" : True,
"action" : "created",
"_id" : _id
}
else:
existsing = existsing[0]
difference = None
unsettedRows = {}
_id = None
try:
for item in unset:
if item in existsing:
unsettedRows[item] = existsing[item]
existsing.pop(item, None)
if item in document:
document.pop(item, None)
existingRows = []
for row in document:
if row in existsing:
existingRows.append(existsing[row])
existingItems = []
documentItems = []
for row in document:
row = ""
try:
row = " ".join(flatten(document[row]))
except Exception, e:
error.log(__file__, False, str(e))
documentItems.append(row)
for row in existingRows:
row = " ".join(flatten(row))
existingItems.append(row)
difference = set(documentItems)-set(existingItems)
for row in existsing:
if not row in document:
document[row] = existsing[row]
if len(difference) == 0 or difference == None:
return {
"status" : True,
"action" : "existsing",
"_id" : unsettedRows["_id"]
}
except Exception, e:
error.log(__file__, False, str(e))
for item in unsettedRows:
if item in unsettedRows and not unsettedRows[item] == None:
document[item] = unsettedRows[item]
# Assign updated Time
document["_updated"] = datetime.now()
# If no created field, create it
if not "_created" in document:
document["_created"] = datetime.now()
# Update Table
try:
table.update(query, document, upsert=True)
_id = unsettedRows["_id"]
except Exception, e:
error.log(__file__, False, str(e))
return {
"status" : True,
"action" : "updated",
"difference" : difference,
"_id" : _id
}
# Checks if to fire an event based on the status of
def check_action_event ( status ):
if not "status" in status or not "action" in status:
return False
if status["status"] == False or status["action"] == "existsing":
return False
return True
'''
Retrieves a list of event listeners for the specific object type,
and where the data matches the quer, a list of URLs is returned
'''
def find_listeners ( type, query ):
listeners = db.event_listeners.find({
"type" : type,
"query" : query
})
urls = []
for listeners in listeners:
urls = urls + listeners["urls"]
return urls
def find_general_listeners ( type ):
listeners = db.event_listeners.find({
"type" : type
})
urls = []
for listeners in listeners:
urls = urls + listeners["urls"]
return urls
def send_event ( url, event, data ):
pass
def find_deleted ( table, query, uniqueRows, current ):
deleted = []
existsing = table.find(query)
if existsing is None:
return deleted
for row in existsing:
found = False
for element in current:
if same(uniqueRows, element, row):
found = True
if not found:
table.remove({
"_id" : row["_id"]
})
deleted.append(row)
return deleted
def same ( uniqueRows, element1, element2 ):
same = True
for row in uniqueRows:
if not row in element1 and row in element2:
same = False
if not row in element2 and row in element1:
same = False
if row in element1 and row in element2:
if type(element2[row]) == type(element1[row]):
if not element2[row] == element1[row]:
same = False
else:
if not str(element2[row]) == str(element1[row]):
same = False
return same | mit | -2,249,145,133,268,138,000 | 20.536585 | 112 | 0.645446 | false |
IRC-SPHERE/HyperStream | hyperstream/tools/splitter_from_stream/2016-10-06_v0.0.1.py | 1 | 2970 | # The MIT License (MIT)
# Copyright (c) 2014-2017 University of Bristol
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from hyperstream import TimeInterval, MIN_DATE
from hyperstream.tool import MultiOutputTool
from hyperstream.stream import StreamMetaInstance, AssetStream
import logging
from copy import deepcopy
class SplitterFromStream(MultiOutputTool):
"""
This version of the splitter assumes that the mapping exists as the last element in a (asset) stream
"""
def __init__(self, element):
super(SplitterFromStream, self).__init__(element=element)
def _execute(self, source, splitting_stream, interval, meta_data_id, output_plate_values):
if splitting_stream is None:
raise ValueError("Splitting stream required for this tool")
if isinstance(splitting_stream, AssetStream):
time_interval = TimeInterval(MIN_DATE, interval.end)
splitter = splitting_stream.window(time_interval, force_calculation=True).last()
else:
splitter = splitting_stream.window(interval, force_calculation=True).last()
if not splitter:
logging.debug("No assets found for source {} and splitter {}"
.format(source.stream_id, splitting_stream.stream_id))
return
mapping = splitter.value
for timestamp, value in source.window(interval, force_calculation=True):
if self.element not in value:
logging.debug("Mapping element {} not in instance".format(self.element))
continue
value = deepcopy(value)
meta_data = str(value.pop(self.element))
if meta_data not in mapping:
logging.warn("Unknown value {} for meta data {}".format(meta_data, self.element))
continue
plate_value = mapping[meta_data]
yield StreamMetaInstance((timestamp, value), (meta_data_id, plate_value))
| mit | -5,980,094,370,727,878,000 | 46.142857 | 104 | 0.69899 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.