code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Domain objects used within multiple extensions."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import python_utils
class CustomizationArgSpec(python_utils.OBJECT):
"""Value object for a customization arg specification."""
def __init__(self, name, description, schema, default_value):
self.name = name
self.description = description
self.schema = schema
self.default_value = default_value
def to_dict(self):
"""Returns a dict representing this CustomizationArgSpec domain object.
Returns:
dict. A dict, mapping all fields of CustomizationArgSpec instance.
"""
return {
'name': self.name,
'description': self.description,
'schema': self.schema,
'default_value': self.default_value
}
| prasanna08/oppia | extensions/domain.py | Python | apache-2.0 | 1,571 |
# -*- encoding: utf-8 -*-
"""
lunaport.dao.line
~~~~~~~~~~~~~~~~~
Storage interaction logic for line resource.
"""
import pprint
pp = pprint.PrettyPrinter(indent=4).pprint
from sqlalchemy import text, exc
from ..wsgi import app, db
from .. domain.line import LineBuilder, LineAdaptor
from exceptions import StorageError
class Filter(object):
params_allowed = {
'name': (
"AND name LIKE '%:name%'"),
}
cast_to_int = []
def __init__(self, **kw):
self.rule = []
self.q_params = {}
for p, v in kw.iteritems():
if p not in self.params_allowed.keys():
continue
elif isinstance(v, (unicode, basestring)):
self.rule.append(self.params_allowed[p][0])
self.q_params.update({p: v})
else:
raise StorageError('Wrong *{}* param type.'.format(p))
def cmpl_query(self):
sql_text = '\n' + ' '.join(self.rule)
return sql_text, self.q_params
class Dao(object):
"""Interface for line storage"""
@classmethod
def insert(cls, ammo):
raise NotImplemented()
@classmethod
def get_single(cls, **kw):
raise NotImplemented()
@classmethod
def get_many(cls, **kw):
raise NotImplemented()
class RDBMS(Dao):
"""PostgreSQL wrapper, implementing line.dao interface"""
per_page_default = app.config.get('LINE_PER_PAGE_DEFAULT') or 10
per_page_max = app.config.get('LINE_PER_PAGE_MAX') or 100
select_join_part = '''
SELECT l.*,
dc.name AS dc_name
FROM line l,
dc dc
WHERE l.dc_id = dc.id'''
update_top = '''UPDATE line
SET
'''
update_bottom = '''
WHERE id = :id'''
@staticmethod
def rdbms_call(q_test, q_params):
return db.engine.connect().execute(text(q_test), **q_params)
@classmethod
def cmpl_update_sql(cls, line_diff):
rv = []
attr_allowed = [
'id',
'name',
'dc_name',
]
for attr in attr_allowed:
if not line_diff.get(attr) is None:
if attr == 'dc_name':
rv.append('dc_id = (SELECT id FROM dc WHERE name = :dc_name)')
else:
rv.append('{} = :{}'.format(attr, attr))
return ''.join([
cls.update_top,
',\n'.join(rv),
cls.update_bottom,
])
@classmethod
def insert(cls, line):
kw = LineAdaptor.to_dict(line)
kw['dc_name'] = kw['dc']['name']
#pp(kw)
def query():
return cls.rdbms_call('''
INSERT INTO line
(
id,
name,
dc_id
)
VALUES (
:id,
:name,
(SELECT id FROM dc WHERE name = :dc_name)
)
returning id''', kw)
err_duplicate = 'line:{} allready exists'.format(kw.get('name'))
try:
pk_id = [r for r in query()].pop()[0]
except exc.IntegrityError as e:
if 'unique constraint "line_pkey"' in str(e):
raise StorageError(err_duplicate)
raise StorageError('Some kind of IntegrityError')
return pk_id
@classmethod
def get_single(cls, **kw):
if kw.get('line_id'):
query_params = {
'line_id': kw.get('line_id'),
}
rv = cls.rdbms_call(' '.join([cls.select_join_part, 'AND l.id = :line_id']), query_params)
row = rv.first()
if not row:
return None
t_kw = dict(zip(rv.keys(), row))
return LineBuilder.from_row(**t_kw)
@classmethod
def get_many(cls, **kw):
"""pagination"""
pagination_part = '\nORDER BY id DESC\nLIMIT :limit OFFSET :offset'
param_per_page = kw.get('per_page')
if param_per_page and (param_per_page <= cls.per_page_max):
per_page = param_per_page
else:
per_page = cls.per_page_default
page_num = kw.get('page')
# page number starts from 1, page 0 and 1 mean the same -
# first slice from data set.
if page_num and isinstance(page_num, int) and (page_num >= 2):
offset = (page_num - 1) * per_page
next_page = page_num + 1
prev_page = page_num - 1
else:
offset = 0
next_page = 2
prev_page = None
query_params = {
'limit': per_page,
'offset': offset,
}
"""filtering"""
f = Filter(**kw)
filter_part, q_params_up = f.cmpl_query()
query_params.update(q_params_up)
rv = cls.rdbms_call(
''.join([cls.select_join_part, filter_part, pagination_part]),
query_params)
rows = rv.fetchall()
if len(rows) == 0:
return None, None, None, None
elif len(rows) < per_page: # last chunk of data
next_page = None
def create_dc(row):
t_kw = dict(zip(rv.keys(), row))
return LineBuilder.from_row(**t_kw)
return map(create_dc, rows), per_page, next_page, prev_page
@classmethod
def update_or_create(cls, line_entrie):
q_params = LineAdaptor.to_dict(line_entrie)
q_params['dc_name'] = q_params['dc']['name']
condition_part = '''
INSERT INTO line
(
id,
name,
dc_id
)
SELECT :id,
:name,
(SELECT id FROM dc WHERE name = :dc_name)
WHERE NOT EXISTS (SELECT 1 FROM line WHERE id=:id)'''
q_text = cls.cmpl_update_sql(q_params) + ';\n' + condition_part
try:
cls.rdbms_call(q_text + ' RETURNING id', q_params)
except exc.IntegrityError as e:
raise StorageError('Some kind of IntegrityError')
return cls.get_single(line_id=line_entrie.id)
| greggyNapalm/lunaport_server | lunaport_server/dao/line.py | Python | apache-2.0 | 5,948 |
from distutils.core import setup
import py2exe
packages = ['numpy']
includes = ['matplotlib.numerix.random_array',
'pytz.zoneinfo.UTC',
'scipy.misc.info']
setup(
name="SolarViz v0.1",
version="0.1",
description = "Solar data visualizer",
author="Stanislav Bobovych",
author_email="[email protected]",
options = { 'py2exe': { "includes" : ["matplotlib.backends", "matplotlib.backends.backend_qt4agg",
"matplotlib.figure", "pylab", "numpy", "matplotlib.numerix.fft",
"matplotlib.numerix.linear_algebra", "matplotlib.numerix.random_array",
"matplotlib.backends.backend_tkagg"] }},
# options = {'py2exe': {'optimize': 0, 'packages': packages,'includes': includes} },
windows = [
{'script': "main.py"}
#{'icon_resources': [(0, 'dcs.ico')], 'dest_base': 'DCS', 'script': main.py}
],
)
| sbobovyc/LabNotes | SolarViz/setup.py | Python | gpl-3.0 | 897 |
# -*- coding: utf-8 -*-
import re
from module.plugins.internal.SimpleCrypter import SimpleCrypter
class CloudzillaToFolder(SimpleCrypter):
__name__ = "CloudzillaToFolder"
__type__ = "crypter"
__version__ = "0.09"
__status__ = "testing"
__pattern__ = r'http://(?:www\.)?cloudzilla\.to/share/folder/(?P<ID>[\w^_]+)'
__config__ = [("activated", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
("folder_per_package", "Default;Yes;No",
"Create folder for each package", "Default"),
("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10)]
__description__ = """Cloudzilla.to folder decrypter plugin"""
__license__ = "GPLv3"
__authors__ = [("Walter Purcaro", "[email protected]")]
INFO_PATTERN = r'<span class="name" title="(?P<N>.+?)"'
OFFLINE_PATTERN = r'>File not found...<'
LINK_PATTERN = r'<a href="(.+?)" class="item_href">'
PASSWORD_PATTERN = r'<div id="pwd_protected">'
def check_errors(self):
m = re.search(self.PASSWORD_PATTERN, self.data)
if m is not None:
self.data = self.load(
self.pyfile.url, get={
'key': self.get_password()})
if re.search(self.PASSWORD_PATTERN, self.data):
self.retry(msg="Wrong password")
| rlindner81/pyload | module/plugins/crypter/CloudzillaToFolder.py | Python | gpl-3.0 | 1,409 |
# -*- coding: iso-latin-1 -*-
"""
systems specifics for sandcrawler
"""
__docformat__ = 'restructuredtext en'
__author__ = "Denis 'jawa' Pompilio"
__credits__ = "Denis 'jawa' Pompilio"
__license__ = "GPLv3"
__maintainer__ = "Denis 'jawa' Pompilio"
__email__ = "[email protected]"
__status__ = "Development"
import sys
import types
import collections
import sc_logs as LOG
import sc_common as scc
import fabric_wrp as fapi
class CallBacks():
""" class to store callbacks """
def __init__(self, trunk, parent, module, search_paths):
LOG.log_d("initialising Callbacks class %s" % (module))
self.trk = trunk
self.mom = parent
modlist = list()
generic_module = "generic_callbacks.%s" % (module)
LOG.log_d("generic module is %s" % (generic_module))
generic_imported = self.try_import(generic_module)
if generic_imported is not None:
LOG.log_d("generic module successfully imported")
modlist.extend([generic_imported])
# add users' callbacks_path prefix
for path in search_paths:
path = '.'.join(path.split('/'))
target_module = "callbacks.%s.%s" % (path, module)
specific_imported = self.try_import(target_module)
if specific_imported is not None:
LOG.log_d("%s successfully imported" % (target_module))
modlist.extend([specific_imported])
if not len(modlist):
raise ImportError("unable to import required callbacks: %s" % (
module))
LOG.log_d("importing methods and attributes from each module")
self.retriev_attrs_and_methods(modlist)
@staticmethod
def try_import(module):
""" try to import module """
# add generic_callbacks methods and attributes if found
try:
__import__(module)
return sys.modules[module]
except ImportError:
LOG.log_d("module '%s' not found" % (module))
return None
def retriev_attrs_and_methods(self, modules_list):
""" retrieve attributes and methods from each module """
for mod in modules_list:
LOG.log_d("importing from %s" % (mod.__name__))
for attr in dir(mod):
# import everything but builtins and privates
if attr[0:2] == "__":
continue
module_attr = getattr(mod, attr)
## store callback, if callable self refer to the callback
if isinstance(module_attr, collections.Callable):
setattr(self, attr,
types.MethodType(module_attr, self))
else:
setattr(self, attr, module_attr)
return True
def fapi_wrapper(method_name):
""" trying metaclass """
def _method(self, *argl, **argd):
# adding self.srv_ip as first argument for every fapi invocations
# all the fabric_wrp methods MUST take the ip as first argument
argl = list(argl)
argl.insert(0, self.srv_ip)
argl = tuple(argl)
return getattr(fapi, '%s' % method_name)(*argl,**argd)
return _method
class Server:
""" server class with os/distribs callbacks """
def __init__(self, srv_ip, load=True, systemtype=None):
LOG.log_d("initialising Server(%s, %s, %s)" % (srv_ip, load,
systemtype))
self.srv_ip = srv_ip
self.hostname = None
self.systemtype = systemtype
self.callbacks_paths = list()
self.fapi = scc.AttStr('fapi calls wrapper')
self.load_fapi()
if load:
LOG.log_d("guessing server's system type")
self.guess_system(systemtype)
def is_up(self, port = 22):
""" check if host is UP """
return scc.is_host_up(self.srv_ip, int(port))
def __getparent(self, callback_name):
""" get parent from callback name """
splitted = callback_name.split('.')
if len(splitted) == 1:
return None
parent = self
for member in splitted[:-1]:
if not hasattr(parent, member):
parent.load_callbacks(member)
parent = getattr(parent, member)
return parent
def load_callbacks(self, callback):
""" callbacks loader """
if self.systemtype == None and not self.guess_system():
return False
# load callback specified by arg, split by dot and recurse load
callback_name = callback.split('.')[-1]
parent = self.__getparent(callback)
if parent is None or not hasattr(parent, 'trk'):
trunk = parent = self
else:
trunk = parent.trk
setattr(parent, callback_name, CallBacks(trunk, parent, callback,
self.callbacks_paths))
return True
def load_fapi(self):
"""
wrap methods from fabric_wrp module
wrapper assume that the first argument of each fapi methods is srv_ip
all invocations have to be done without the first argument (srv_ip)
"""
for attr in dir(fapi):
if attr[0:2] == "__" or attr == "with_statement":
continue
module_attr = getattr(fapi, attr)
if isinstance(module_attr, collections.Callable):
setattr(self.fapi, attr,
types.MethodType(fapi_wrapper(attr), self))
else:
setattr(self, attr, module_attr)
return True
def guess_system(self, systype = None):
""" guess on system of remote target """
if systype is None:
if not self.is_up():
return False
try:
sysinfos_mod = "callbacks.systems_infos"
__import__(sysinfos_mod)
except ImportError:
sysinfos_mod = "generic_callbacks.systems_infos"
__import__(sysinfos_mod)
LOG.log_d("guessing system using '%s'" % (sysinfos_mod))
guess = sys.modules[sysinfos_mod].sysguess
requisites = sys.modules[sysinfos_mod].check_prerequisites
requisites(self.srv_ip)
(self.hostname,
self.systemtype,
self.callbacks_paths) = guess(self.srv_ip)
LOG.log_d("system guessed as %s" % (self.systemtype))
else:
self.systemtype = systype
self.callbacks_paths.append(systype)
return True
| outini/sandcrawler | libraries/sc_systems.py | Python | gpl-3.0 | 6,623 |
import razorpay
import uuid
import time
from payments.models import Payment, Order
class RazorpayPayments:
def __init__(self, key, secret):
self.key = key
self.secret = secret
self.client = razorpay.Client(auth=(self.key, self.secret))
def create_invoice(self, customer, items):
receipt_id = str(uuid.uuid4().int)[-10:]
date = int(time.time())
data = {
"customer": customer,
"line_items": items,
"draft": "0",
"sms_notify": "1",
"email_notify": "1",
"date": date,
"receipt": receipt_id,
"type": "link",
"currency": "INR",
}
invoice = self.client.invoice.create(data=data)
return invoice
def fetch_invoices(self, invoice_id):
invoices = self.client.invoice.fetch(invoice_id)
return invoices
def fetch_orders(self, order_id):
orders = self.client.order.fetch(order_id)
return orders
def fetch_payment(self, payment_id):
invoices = self.client.payment.fetch(payment_id)
return invoices
@staticmethod
def save_payment(payment_entity):
payment = Payment(payment_id=payment_entity['id'])
payment.amount = payment_entity['amount']
payment.currency = payment_entity['currency']
payment.status = payment_entity['status']
payment.order_id = payment_entity['order_id']
payment.invoice_id = payment_entity['invoice_id']
payment.international = payment_entity['international']
payment.amount_refunded = payment_entity['amount_refunded']
payment.refund_status = payment_entity['refund_status']
payment.email = payment_entity['email']
payment.contact = payment_entity['contact']
payment.fee = payment_entity['fee']
payment.service_tax = payment_entity['tax']
payment.created_at = str(payment_entity['created_at'])
payment.save()
@staticmethod
def save_order(order_entity):
order = Order(order_id=order_entity['id'])
order.amount = order_entity['amount']
order.currency = order_entity['currency']
order.status = order_entity['status']
order.created_at = str(order_entity['created_at'])
order.save()
| PyConPune/pune.pycon.org | payments/razorpay/razorpay_payments.py | Python | mit | 2,314 |
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import httplib
import json
import os
import shutil
import sys
import tempfile
import unittest
_SRC_DIR = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..', '..', '..', '..'))
sys.path.append(os.path.join(_SRC_DIR, 'tools', 'android', 'loading'))
import options
from trace_test import test_server
from trace_test import webserver_test
OPTIONS = options.OPTIONS
class WebServerTestCase(unittest.TestCase):
def setUp(self):
OPTIONS.ParseArgs('', extra=[('--noisy', False)])
self._temp_dir = tempfile.mkdtemp()
self._server = webserver_test.WebServer(self._temp_dir, self._temp_dir)
def tearDown(self):
self.assertTrue(self._server.Stop())
shutil.rmtree(self._temp_dir)
def StartServer(self):
self._server.Start()
def WriteFile(self, path, file_content):
with open(os.path.join(self._temp_dir, path), 'w') as file_output:
file_output.write(file_content)
def Request(self, path):
host, port = self._server.Address().split(':')
connection = httplib.HTTPConnection(host, int(port))
connection.request('GET', path)
response = connection.getresponse()
connection.close()
return response
def testWebserverBasic(self):
self.WriteFile('test.html',
'<!DOCTYPE html><html><head><title>Test</title></head>'
'<body><h1>Test Page</h1></body></html>')
self.StartServer()
response = self.Request('test.html')
self.assertEqual(200, response.status)
response = self.Request('/test.html')
self.assertEqual(200, response.status)
response = self.Request('///test.html')
self.assertEqual(200, response.status)
def testWebserver404(self):
self.StartServer()
response = self.Request('null')
self.assertEqual(404, response.status)
self.assertEqual('text/html', response.getheader('content-type'))
def testContentType(self):
self.WriteFile('test.html',
'<!DOCTYPE html><html><head><title>Test</title></head>'
'<body><h1>Test Page</h1></body></html>')
self.WriteFile('blobfile',
'whatever')
self.StartServer()
response = self.Request('test.html')
self.assertEqual(200, response.status)
self.assertEqual('text/html', response.getheader('content-type'))
response = self.Request('blobfile')
self.assertEqual(500, response.status)
def testCustomResponseHeader(self):
self.WriteFile('test.html',
'<!DOCTYPE html><html><head><title>Test</title></head>'
'<body><h1>Test Page</h1></body></html>')
self.WriteFile('test2.html',
'<!DOCTYPE html><html><head><title>Test 2</title></head>'
'<body><h1>Test Page 2</h1></body></html>')
self.WriteFile(test_server.RESPONSE_HEADERS_PATH,
json.dumps({'test2.html': [['Cache-Control', 'no-store']]}))
self.StartServer()
response = self.Request('test.html')
self.assertEqual(200, response.status)
self.assertEqual('text/html', response.getheader('content-type'))
self.assertEqual(None, response.getheader('cache-control'))
response = self.Request('test2.html')
self.assertEqual(200, response.status)
self.assertEqual('text/html', response.getheader('content-type'))
self.assertEqual('no-store', response.getheader('cache-control'))
response = self.Request(test_server.RESPONSE_HEADERS_PATH)
self.assertEqual(200, response.status)
self.assertEqual('application/json', response.getheader('content-type'))
self.assertEqual(None, response.getheader('cache-control'))
if __name__ == '__main__':
unittest.main()
| danakj/chromium | tools/android/loading/trace_test/webserver_unittest.py | Python | bsd-3-clause | 3,781 |
from jinja2 import Environment
from jinja2.loaders import FileSystemLoader
env = Environment(loader=FileSystemLoader('templates'))
tmpl = env.get_template('broken.html')
print tmpl.render(seq=range(10))
| minixalpha/SourceLearning | jinja2/jinja2-2.0/examples/basic/debugger.py | Python | apache-2.0 | 205 |
# Copyright (C) weslowskij
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button
from kivy.uix.gridlayout import GridLayout
from kivy.uix.label import Label
from kivy.uix.scrollview import ScrollView
from Game import options
from Game.myLogHandler import infoLog
from Gui.MyScreenController import ScreenState
from Gui.Screens.myScreen import MyScreen
#from pianosimon import updateGame
#from Gui.Widgets.kivyNoteMap import KivyNoteMap
from Gui.Widgets.infoLogWidget import InfoLogWidget
from Gui.Widgets.kivyNoteMap import KivyNoteMap
import myglobals
import logging
logger = logging.getLogger(__name__)
hdlr = logging.FileHandler(__name__ + '.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.NOTSET)
class SelectNotes(object):
def build(self):
self._selectedBtn=[]
self.maxselectzed = 2
self.stream= None
self.speed=1.0
def selectBtn(self, instance):
#print "Button Selected 1 " + str(instance) +" " + str(instance.state)
self._selectedBtn.append(instance)
#instance.state = 'down'
while len(self._selectedBtn) > self.maxselectzed:
tmp = self._selectedBtn.pop(0)
tmp.state='normal'
for x in self._selectedBtn:
x.state='down'
#print "Button Selected 2" + str(self._selectedBtn)
#print "Button Selected 2" + str(instance) +" " + str(instance.playstream)
def play(self):
stream1= self.getStream()
if stream1 is None:
return
#print "SelectNotes playing " + str(stream1) + " " + str([indx,endx])
#print "playing at speed: " + str(self.speed)
self.output = myglobals.gameState.myMidiController.getMidiOut()
if type(stream1[0]) is list:
self.output.playpartMyStream(stream1 , 0 , len(stream1), startchord=stream1[0],speed=self.speed)
else:
self.output.playpartMyStream(stream1 , 0 , len(stream1), startnote=stream1[0],speed=self.speed)
def getStream(self):
if len(self._selectedBtn) <2:
if self.stream:
indx = 0
endx = len(self.stream)
stream = self.stream
else:
print "SelectNotes no Stream"
return
else:
# play hole chords only
stream1 = self._selectedBtn[0].myStreamNote.mystream
stream2 = self._selectedBtn[1].myStreamNote.mystream
start = self._selectedBtn[0].myStreamNote
end = self._selectedBtn[1].myStreamNote
if start.myoffset > end.myoffset:
start = self._selectedBtn[1].myStreamNote
end = self._selectedBtn[0].myStreamNote
#stream = self._selectedBtn[0].KivyNoteMap.noterating.map.inwort
# no mixing TODO
if stream1 != stream2:
print "SelectNotes different Streams"
return None
return stream1.buildAllInChordRange(start, end)
def getList(self):
if len(self._selectedBtn) <2:
if self.stream:
indx = 0
endx = len(self.stream)
stream = self.stream
else:
print "SelectNotes no Stream"
return
else:
# play hole chords only
stream1 = self._selectedBtn[0].myStreamNote.mystream
stream2 = self._selectedBtn[1].myStreamNote.mystream
start = self._selectedBtn[0].myStreamNote
end = self._selectedBtn[1].myStreamNote
if start.myoffset > end.myoffset:
start = self._selectedBtn[1].myStreamNote
end = self._selectedBtn[0].myStreamNote
#stream = self._selectedBtn[0].KivyNoteMap.noterating.map.inwort
# no mixing TODO
if stream1 != stream2:
print "SelectNotes different Streams"
return None
return stream1.buildAllInChordRangeList(start, end)
class AViewNotesScreen(MyScreen):
def __init__(self):
super(AViewNotesScreen,self).__init__()
self.btnbar = None
self.notesscroller=None
def buildbtnbar(self):
if self.btnbar is None:
self.btnbar=GridLayout(cols=7, size_hint=(1,0.2))
btnbar = self.btnbar
def changeStateCallback(instance):
myglobals.ScreenState.change_state(instance.mystate)
def callbackplay70(instance):
#main.console()
myglobals.gameState.console = True
btn70 = Button(text='Console')
btn70.bind(on_press=callbackplay70)
btnbar.add_widget(btn70)
def callbackplay80(instance):
#main.console()
myglobals.gameState.test()
btn80 = Button(text='test')
btn80.bind(on_press=callbackplay80)
btnbar.add_widget(btn80)
def callback14(instance):
ScreenState.mystate.dirty = True
ScreenState.updateGui()
btn14 = Button(text='update\nGui')
btn14.bind(on_press=callback14)
btnbar.add_widget(btn14)
def callbackplay(instance):
#print "replaying " + str(self.notesscroller._selectedBtn[0]) + " " + str(self.notesscroller._selectedBtn[1])
self.notesscroller.play()
btn4 = Button(text='Play\nselected\nChords')
btn4.speed = 1.0
btn4.bind(on_press=callbackplay)
btnbar.add_widget(btn4)
def callback(instance):
ScreenState.change_state_back()
btn = Button(text='Back')
btn.bind(on_press=callback)
btnbar.add_widget(btn)
btnOlgasDuty = Button(text='OlgasDuty')
btnOlgasDuty.mystate = "OlgasDuty"
btnOlgasDuty.bind(on_press=changeStateCallback)
btnbar.add_widget(btnOlgasDuty)
return btnbar
def build(self):
#self.btnbar=GridLayout(cols=7, size_hint=(1,0.2))
if not hasattr(self,"btnbar"):
self.btnbar=None
if self.btnbar is None:
self.btnbar = self.buildbtnbar()
print self.btnbar
self.widget = BoxLayout(orientation='vertical')
#l = Label(text=str(self.mystate))
self.logWidget = InfoLogWidget()
self.logWidget.build()
self.widget.add_widget(self.logWidget)
"""
self.logWidget = Label(size_hint=(1,0.1))
self.widget.add_widget(self.logWidget)
self.logWidget.text = "hallo"
infoLog.handlers.append(self.logWidget)
"""
if not(hasattr(self,"notesscroller")):
self.notesscroller = ScrollView(size_hint=(1,1-self.btnbar.size_hint_y))
if self.notesscroller is None:
print str(self.mystate) + "self.notesscroller is None"
self.notesscroller = ScrollView(size_hint=(1,1-self.btnbar.size_hint_y))
#self.notesscroller = ListView8(size_hint=(1,1-self.btnbar.size_hint_y))
#self.notesscroller = ListView8(size_hint=(1,1-self.btnbar.size_hint_y))
self.notesscroller.do_scroll_x=True
self.notesscroller.do_scroll_y=True
self.widget.add_widget(self.notesscroller)
#self.notesscroller = anotesscroller
#self.notesscroller.add_widget(self.notesscroller.widget)
self.widget.add_widget(self.btnbar)
l = Label(text=str(self.mystate),size_hint=(1,0.1))
self.widget.add_widget(l)
self.dirty=True
ScreenState.add_state(self)
def buildAll(self, astateName, anotesscroller):
self.mystate = astateName
self.notesscroller=anotesscroller
self.notesscroller = anotesscroller
self.widget = BoxLayout(orientation='vertical')
self.logWidget = InfoLogWidget()
self.logWidget.build()
self.widget.add_widget(self.logWidget)
"""
self.logWidget = Label(size_hint=(1,0.1))
self.widget.add_widget(self.logWidget)
infoLog.handlers.append(self.logWidget)
"""
"""
self.notesscroller = ScrollView(size_hint=(1,0.8))
self.notesscroller.do_scroll_x=True
self.notesscroller.do_scroll_y=True
self.widget.add_widget(self.notesscroller)
self.notesscroller = anotesscroller
self.notesscroller.add_widget(self.notesscroller.widget)
"""
self.widget.add_widget(self.notesscroller)
self.mystate=astateName
l = Label(text=str(self.mystate),size_hint=(1,0.1))
ScreenState.add_state(self)
#btnbar=GridLayout(cols=8, size_hint=(1,0.2))
#btnbar=GridLayout(cols=7, size_hint=(1,0.2))
self.btnbar = self.buildbtnbar()
self.widget.add_widget(self.btnbar)
self.widget.add_widget(l)
return self.widget
def updateGui(self):
if self.mystate == "LastError":
#ShowErrors.computeBestInputMap(5)
pass
self.notesscroller.updateGui()
def update(self):
#self.notesscroller.update()
# TODODone dont belong here (All Game states are Screens?)
# Done but some Screens needed State updates
#if myglobals.gameState:
if self.dirty:
#logger.info("update game")
#myglobals.gameState.updateGame(0)
#logger.info("update game done")
#logger.info("update screen")
if options.getOptionValue("update Gui"):
ScreenState.updateGui()
#logger.info("update screen done")
#else:
#myglobals.gameState = pianoSimonGame.PianoSimonGame()
# pianosimon.updateGame(0)
pass
| olga-weslowskij/olga.weslowskij | Gui/Screens/aViewNotesScreen.py | Python | gpl-2.0 | 10,493 |
outfile = open('ids.csv','w')
x = open('foo.txt')
j = x.readlines()
for i in j:
outfile.write(i.split('=')[1].strip())
outfile.write(',')
outfile.close()
| siramix/phrasecraze | utils/getids.py | Python | gpl-3.0 | 158 |
# -*- coding: utf-8 -*-
"""
===========================================
Sportran graphic user interface
===========================================
Version: 0.0.2
Release state: Beta
Last update: 09/2019
Main Developer: Sebastiano Bisacchi
Other developer: Riccardo Bertossa
This file contains the GUI of the Sportran project developed at SISSA.
"""
# todo: Put an accurate description of the project?
from sportran_gui.interfaces import *
from sportran_gui.utils.custom_widgets import *
from sportran_gui.assets import ICON, METADATA, LANGUAGES, dev_state
# Verify that sportran is installed
try:
import sportran
except ImportError:
raise ImportError('Couldn\'t find sportran')
# Main app
class SportranGUI(Tk):
"""
This class is used to initialize all
the interfaces and to setup the multi frame functionality.
SportranGUI is a subclass of Tk that is the root window.
"""
# Class variables to store some main parameters
open_windows = []
frames = []
frame = None
root = None
container = None
home = FileManager
def __init__(self, *args, **kwargs):
Tk.__init__(self, *args, **kwargs)
SportranGUI.root = self
# Setup the default colors and font to use in the interface
self.option_add('*Font', '{} {}'.format(settings.FONT, settings.FONT_SIZE))
self.option_add('*Background', '{}'.format(settings.BG_COLOR))
self.option_add('*selectBackground', 'light blue')
self.option_add('*selectForeground', 'black')
self.show_software_info()
# Add the main window to the open windows
SportranGUI.open_windows.insert(0, self)
# Configure the window
window_icon = PhotoImage(master=self, data=ICON)
self.iconphoto(True, window_icon)
#self.tk.call('wm', 'iconphoto', self._w, window_icon)
#self.iconbitmap(bitmap=window_icon)
self.title('Sportran')
self.geometry('{}x{}+{}+{}'.format(settings.X_SIZE, settings.Y_SIZE, settings.X_SPACING, settings.Y_SPACING))
self.resizable(settings.X_RESIZE, settings.Y_RESIZE)
# Define the exit function
self.protocol('WM_DELETE_WINDOW', func=lambda: cu.secure_exit(SportranGUI))
# Creating the main frame
container = Frame(self)
SportranGUI.container = container
container.grid(row=0, column=0, sticky='nsew')
self.grid_rowconfigure(0, weight=1)
self.grid_columnconfigure(0, weight=1)
container.grid_rowconfigure(0, weight=10)
container.grid_columnconfigure(0, weight=10)
SportranGUI.root.grid_propagate(True)
# ## Setting up multiple window system ## #
self.topbar = TopBar(self, self, SportranGUI)
SportranGUI.frames = {}
frame_order = [FileManager, HeaderSelector, OtherVariables, FStarSelector, PStarSelector]
# Load and setup the interfaces
for n, F in enumerate(frame_order):
SportranGUI.frame = F(container, SportranGUI)
try:
SportranGUI.frame.set_next_frame(frame_order[n + 1])
except:
pass
try:
SportranGUI.frame.set_prev_frame(frame_order[n - 1])
except:
pass
SportranGUI.frames[F] = SportranGUI.frame
# Init the main interface
self.show_frame(SportranGUI.home)
@staticmethod
def show_software_info():
"""
This function displays some software info at the startup.
The data displayed are took from METADATA
"""
print('------------------- Sportran GUI -------------------')
print('')
print('\t\t\tGUI version: {}'.format(METADATA['gui_version']))
print('\t\t\tSportran version: {}'.format(METADATA['version']))
print('\t\t\tDev state: {}'.format(dev_state))
#print('\t\t\tLast release: {}'.format(METADATA['release_date']))
print('\t\t\tDevelopers: {}'.format(METADATA['author']))
print('\t\t\tURL: {}'.format(METADATA['url']))
print('')
print('This software is an open-source project licensed under {}'.format(METADATA['license']))
print(METADATA['credits'])
print('')
print(METADATA['description']) # todo: Add other project infos
print('----------------------------------------------------------')
@staticmethod
def show_frame(frame):
"""
This function is used to display a frame.
:param frame: the frame to be displayed. Must be a Frame object.
"""
SportranGUI.container.grid_rowconfigure(0, weight=0)
SportranGUI.container.grid_columnconfigure(0, weight=0)
SportranGUI.frame = SportranGUI.frames[frame]
SportranGUI.frame.grid(row=0, column=0, sticky='nsew')
SportranGUI.container.grid_rowconfigure(0, weight=1)
SportranGUI.container.grid_columnconfigure(0, weight=1)
SportranGUI.frame.tkraise()
SportranGUI.frame.update_data()
SportranGUI.frame.update()
def run():
"""
This function is called only one time at the
startup and it load the .ini file and start the
software.
"""
# Load data
cu.load_settings()
# Start the software
app = SportranGUI()
app.mainloop()
if __name__ == '__main__':
# Set the output method
cu.log.set_method('other')
run()
| lorisercole/thermocepstrum | sportran_gui/main.py | Python | gpl-3.0 | 5,464 |
# Copyright (c) 2019, Matt Layman and contributors
from contextlib import contextmanager
import inspect
from io import BytesIO, StringIO
import sys
import tempfile
import unittest
from unittest import mock
from tap.parser import Parser
try:
import yaml
from more_itertools import peekable # noqa
have_yaml = True
except ImportError:
have_yaml = False
@contextmanager
def captured_output():
if sys.version_info[0] < 3:
new_out, new_err = BytesIO(), BytesIO()
else:
new_out, new_err = StringIO(), StringIO()
old_out, old_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = new_out, new_err
yield sys.stdout, sys.stderr
finally:
sys.stdout, sys.stderr = old_out, old_err
class TestParser(unittest.TestCase):
"""Tests for tap.parser.Parser"""
def test_finds_ok(self):
"""The parser extracts an ok line."""
parser = Parser()
line = parser.parse_line("ok - This is a passing test line.")
self.assertEqual("test", line.category)
self.assertTrue(line.ok)
self.assertTrue(line.number is None)
def test_finds_number(self):
"""The parser extracts a test number."""
parser = Parser()
line = parser.parse_line("ok 42 is the magic number.")
self.assertEqual("test", line.category)
self.assertEqual(42, line.number)
def test_finds_description(self):
parser = Parser()
line = parser.parse_line("ok 42 A passing test.")
self.assertEqual("test", line.category)
self.assertEqual("A passing test.", line.description)
def test_after_hash_is_not_description(self):
parser = Parser()
line = parser.parse_line("ok A description # Not part of description.")
self.assertEqual("test", line.category)
self.assertEqual("A description", line.description)
def test_finds_todo(self):
parser = Parser()
line = parser.parse_line("ok A description # TODO Not done")
self.assertEqual("test", line.category)
self.assertTrue(line.todo)
def test_finds_skip(self):
parser = Parser()
line = parser.parse_line("ok A description # SKIP for now")
self.assertEqual("test", line.category)
self.assertTrue(line.skip)
def test_finds_not_ok(self):
"""The parser extracts a not ok line."""
parser = Parser()
line = parser.parse_line("not ok - This is a failing test line.")
self.assertEqual("test", line.category)
self.assertFalse(line.ok)
self.assertTrue(line.number is None)
self.assertEqual("", line.directive.text)
def test_finds_directive(self):
"""The parser extracts a directive"""
parser = Parser()
test_line = "not ok - This line fails # TODO not implemented"
line = parser.parse_line(test_line)
directive = line.directive
self.assertEqual("test", line.category)
self.assertEqual("TODO not implemented", directive.text)
self.assertFalse(directive.skip)
self.assertTrue(directive.todo)
self.assertEqual("not implemented", directive.reason)
def test_unrecognizable_line(self):
"""The parser returns an unrecognizable line."""
parser = Parser()
line = parser.parse_line("This is not a valid TAP line. # srsly")
self.assertEqual("unknown", line.category)
def test_diagnostic_line(self):
"""The parser extracts a diagnostic line."""
text = "# An example diagnostic line"
parser = Parser()
line = parser.parse_line(text)
self.assertEqual("diagnostic", line.category)
self.assertEqual(text, line.text)
def test_bail_out_line(self):
"""The parser extracts a bail out line."""
parser = Parser()
line = parser.parse_line("Bail out! This is the reason to bail.")
self.assertEqual("bail", line.category)
self.assertEqual("This is the reason to bail.", line.reason)
def test_finds_version(self):
"""The parser extracts a version line."""
parser = Parser()
line = parser.parse_line("TAP version 13")
self.assertEqual("version", line.category)
self.assertEqual(13, line.version)
def test_errors_on_old_version(self):
"""The TAP spec dictates that anything less than 13 is an error."""
parser = Parser()
with self.assertRaises(ValueError):
parser.parse_line("TAP version 12")
def test_finds_plan(self):
"""The parser extracts a plan line."""
parser = Parser()
line = parser.parse_line("1..42")
self.assertEqual("plan", line.category)
self.assertEqual(42, line.expected_tests)
def test_finds_plan_with_skip(self):
"""The parser extracts a plan line containing a SKIP."""
parser = Parser()
line = parser.parse_line("1..42 # Skipping this test file.")
self.assertEqual("plan", line.category)
self.assertTrue(line.skip)
def test_ignores_plan_with_any_non_skip_directive(self):
"""The parser only recognizes SKIP directives in plans."""
parser = Parser()
line = parser.parse_line("1..42 # TODO will not work.")
self.assertEqual("unknown", line.category)
def test_parses_text(self):
sample = inspect.cleandoc(
u"""1..2
ok 1 A passing test
not ok 2 A failing test"""
)
parser = Parser()
lines = []
for line in parser.parse_text(sample):
lines.append(line)
self.assertEqual(3, len(lines))
self.assertEqual("plan", lines[0].category)
self.assertEqual("test", lines[1].category)
self.assertTrue(lines[1].ok)
self.assertEqual("test", lines[2].category)
self.assertFalse(lines[2].ok)
def test_parses_file(self):
sample = inspect.cleandoc(
"""1..2
ok 1 A passing test
not ok 2 A failing test"""
)
temp = tempfile.NamedTemporaryFile(delete=False)
temp.write(sample.encode("utf-8"))
temp.close()
parser = Parser()
lines = []
for line in parser.parse_file(temp.name):
lines.append(line)
self.assertEqual(3, len(lines))
self.assertEqual("plan", lines[0].category)
self.assertEqual("test", lines[1].category)
self.assertTrue(lines[1].ok)
self.assertIsNone(lines[1].yaml_block)
self.assertEqual("test", lines[2].category)
self.assertFalse(lines[2].ok)
def test_parses_yaml(self):
sample = inspect.cleandoc(
u"""TAP version 13
1..2
ok 1 A passing test
---
test: sample yaml
...
not ok 2 A failing test"""
)
parser = Parser()
lines = []
for line in parser.parse_text(sample):
lines.append(line)
if have_yaml:
converted_yaml = yaml.safe_load(u"""test: sample yaml""")
self.assertEqual(4, len(lines))
self.assertEqual(13, lines[0].version)
self.assertEqual(converted_yaml, lines[2].yaml_block)
self.assertEqual("test", lines[3].category)
self.assertIsNone(lines[3].yaml_block)
else:
self.assertEqual(7, len(lines))
self.assertEqual(13, lines[0].version)
for line_index in list(range(3, 6)):
self.assertEqual("unknown", lines[line_index].category)
self.assertEqual("test", lines[6].category)
def test_parses_mixed(self):
# Test that we can parse both a version 13 and earlier version files
# using the same parser. Make sure that parsing works regardless of
# the order of the incoming documents.
sample_version_13 = inspect.cleandoc(
u"""TAP version 13
1..2
ok 1 A passing version 13 test
---
test: sample yaml
...
not ok 2 A failing version 13 test"""
)
sample_pre_13 = inspect.cleandoc(
"""1..2
ok 1 A passing pre-13 test
not ok 2 A failing pre-13 test"""
)
parser = Parser()
lines = []
lines.extend(parser.parse_text(sample_version_13))
lines.extend(parser.parse_text(sample_pre_13))
if have_yaml:
self.assertEqual(13, lines[0].version)
self.assertEqual("A passing version 13 test", lines[2].description)
self.assertEqual("A failing version 13 test", lines[3].description)
self.assertEqual("A passing pre-13 test", lines[5].description)
self.assertEqual("A failing pre-13 test", lines[6].description)
else:
self.assertEqual(13, lines[0].version)
self.assertEqual("A passing version 13 test", lines[2].description)
self.assertEqual("A failing version 13 test", lines[6].description)
self.assertEqual("A passing pre-13 test", lines[8].description)
self.assertEqual("A failing pre-13 test", lines[9].description)
# Test parsing documents in reverse order
parser = Parser()
lines = []
lines.extend(parser.parse_text(sample_pre_13))
lines.extend(parser.parse_text(sample_version_13))
if have_yaml:
self.assertEqual("A passing pre-13 test", lines[1].description)
self.assertEqual("A failing pre-13 test", lines[2].description)
self.assertEqual(13, lines[3].version)
self.assertEqual("A passing version 13 test", lines[5].description)
self.assertEqual("A failing version 13 test", lines[6].description)
else:
self.assertEqual("A passing pre-13 test", lines[1].description)
self.assertEqual("A failing pre-13 test", lines[2].description)
self.assertEqual(13, lines[3].version)
self.assertEqual("A passing version 13 test", lines[5].description)
self.assertEqual("A failing version 13 test", lines[9].description)
def test_parses_yaml_no_end(self):
sample = inspect.cleandoc(
u"""TAP version 13
1..2
ok 1 A passing test
---
test: sample yaml
not ok 2 A failing test"""
)
parser = Parser()
lines = []
for line in parser.parse_text(sample):
lines.append(line)
if have_yaml:
converted_yaml = yaml.safe_load(u"""test: sample yaml""")
self.assertEqual(4, len(lines))
self.assertEqual(13, lines[0].version)
self.assertEqual(converted_yaml, lines[2].yaml_block)
self.assertEqual("test", lines[3].category)
self.assertIsNone(lines[3].yaml_block)
else:
self.assertEqual(6, len(lines))
self.assertEqual(13, lines[0].version)
for line_index in list(range(3, 5)):
self.assertEqual("unknown", lines[line_index].category)
self.assertEqual("test", lines[5].category)
def test_parses_yaml_more_complex(self):
sample = inspect.cleandoc(
u"""TAP version 13
1..2
ok 1 A passing test
---
message: test
severity: fail
data:
got:
- foo
expect:
- bar
output: |-
a multiline string
must be handled properly
even with | pipes
| here > and: there"""
)
parser = Parser()
lines = []
for line in parser.parse_text(sample):
lines.append(line)
if have_yaml:
converted_yaml = yaml.safe_load(
u'''
message: test
severity: fail
data:
got:
- foo
expect:
- bar
output: "a multiline string\\nmust be handled properly\\neven with | pipes\\n| here > and: there"''' # noqa
)
self.assertEqual(3, len(lines))
self.assertEqual(13, lines[0].version)
self.assertEqual(converted_yaml, lines[2].yaml_block)
else:
self.assertEqual(16, len(lines))
self.assertEqual(13, lines[0].version)
for line_index in list(range(3, 11)):
self.assertEqual("unknown", lines[line_index].category)
def test_parses_yaml_no_association(self):
sample = inspect.cleandoc(
u"""TAP version 13
1..2
ok 1 A passing test
# Diagnostic line
---
test: sample yaml
...
not ok 2 A failing test"""
)
parser = Parser()
lines = []
for line in parser.parse_text(sample):
lines.append(line)
self.assertEqual(8, len(lines))
self.assertEqual(13, lines[0].version)
self.assertIsNone(lines[2].yaml_block)
self.assertEqual("diagnostic", lines[3].category)
for line_index in list(range(4, 7)):
self.assertEqual("unknown", lines[line_index].category)
self.assertEqual("test", lines[7].category)
def test_parses_yaml_no_start(self):
sample = inspect.cleandoc(
u"""TAP version 13
1..2
ok 1 A passing test
test: sample yaml
...
not ok 2 A failing test"""
)
parser = Parser()
lines = []
for line in parser.parse_text(sample):
lines.append(line)
self.assertEqual(6, len(lines))
self.assertEqual(13, lines[0].version)
self.assertIsNone(lines[2].yaml_block)
for line_index in list(range(3, 5)):
self.assertEqual("unknown", lines[line_index].category)
self.assertEqual("test", lines[5].category)
def test_malformed_yaml(self):
self.maxDiff = None
sample = inspect.cleandoc(
u"""TAP version 13
1..2
ok 1 A passing test
---
test: sample yaml
\tfail: tabs are not allowed!
...
not ok 2 A failing test"""
)
yaml_err = inspect.cleandoc(
u"""
WARNING: Optional imports not found, TAP 13 output will be
ignored. To parse yaml, see requirements in docs:
https://tappy.readthedocs.io/en/latest/consumers.html#tap-version-13"""
)
parser = Parser()
lines = []
with captured_output() as (parse_out, _):
for line in parser.parse_text(sample):
lines.append(line)
if have_yaml:
self.assertEqual(4, len(lines))
self.assertEqual(13, lines[0].version)
with captured_output() as (out, _):
self.assertIsNone(lines[2].yaml_block)
self.assertEqual(
"Error parsing yaml block. Check formatting.", out.getvalue().strip()
)
self.assertEqual("test", lines[3].category)
self.assertIsNone(lines[3].yaml_block)
else:
self.assertEqual(8, len(lines))
self.assertEqual(13, lines[0].version)
for line_index in list(range(3, 7)):
self.assertEqual("unknown", lines[line_index].category)
self.assertEqual("test", lines[7].category)
self.assertEqual(yaml_err, parse_out.getvalue().strip())
def test_parse_empty_file(self):
temp = tempfile.NamedTemporaryFile(delete=False)
temp.close()
parser = Parser()
lines = []
for line in parser.parse_file(temp.name):
lines.append(line)
self.assertEqual(0, len(lines))
@mock.patch(
"tap.parser.sys.stdin",
StringIO(
u"""1..2
ok 1 A passing test
not ok 2 A failing test"""
),
)
def test_parses_stdin(self):
parser = Parser()
lines = []
for line in parser.parse_stdin():
lines.append(line)
self.assertEqual(3, len(lines))
self.assertEqual("plan", lines[0].category)
self.assertEqual("test", lines[1].category)
self.assertTrue(lines[1].ok)
self.assertEqual("test", lines[2].category)
self.assertFalse(lines[2].ok)
| mblayman/tappy | tap/tests/test_parser.py | Python | bsd-2-clause | 16,677 |
"""Test for upgrading Puppet to Puppet4
:Requirement: Puppet
:CaseAutomation: NotAutomated
:CaseLevel: System
:CaseComponent: ConfigurationManagement
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
from robottelo.config import settings
from robottelo.decorators import (
run_in_one_thread,
skip_if_not_set,
stubbed,
tier4,
)
from robottelo.test import CLITestCase
@run_in_one_thread
class PuppetUpgradeTestCase(CLITestCase):
"""Implements Puppet test scenario"""
@classmethod
@skip_if_not_set('clients')
def setUpClass(cls):
super(PuppetUpgradeTestCase, cls).setUpClass()
cls.sat6_hostname = settings.server.hostname
@stubbed()
@tier4
def test_positive_puppet_upgrade(self):
"""Upgrade Satellite/client puppet versions
:id: fd311168-afda-49b6-ac5f-533c4fd411b5
:Steps:
1. register client (p3)
2. prepare puppet module
3. upgrade Satellite from p3 to p4
4. apply puppet module to p3 client
5. upgrade client from p3 to p4
6. apply puppet module to the client
7. register another client (p4)
8. apply puppet module to the client
:expectedresults: multiple asserts along the code that motd module was
applied
:CaseAutomation: notautomated
:CaseLevel: System
"""
@stubbed()
@tier4
def test_positive_puppet_capsule_upgrade(self):
"""Upgrade standalone Capsule/client puppet versions
:id: 7e8e9047-d012-4fc5-9e6e-f11c1b05df5d
:Steps:
1. register p3 client to p3 Capsule
2. prepare puppet module
3. upgrade Capsule from p3 to p4
4. apply puppet module to p3 client
5. upgrade client from p3 to p4
6. apply puppet module to the p4 client
7. register another p4 client
8. apply puppet module to the p4 client
:expectedresults: multiple asserts along the code that motd module was
applied
:CaseAutomation: notautomated
:CaseLevel: System
"""
@stubbed()
@tier4
def test_positive_puppet_capsule_rolling_upgrade(self):
"""Upgrade by moving clients from old to new Capsule
:id: db23cd30-736f-11e7-87a6-c85b7636aebf
:Steps:
1. register p3 client to p3 Capsule
2. prepare puppet module
3. prepare new p4 Capsule
4. transition p3 client to the new p4 Capsule
5. apply puppet module to p3 client
6. upgrade client from p3 to p4
7. apply puppet module to the p4 client
8. register another p4 client
9. apply puppet module to the p4 client
:expectedresults: multiple asserts along the code that motd module was
applied
:CaseAutomation: notautomated
:CaseLevel: System
"""
| ldjebran/robottelo | tests/foreman/longrun/test_puppet_upgrade.py | Python | gpl-3.0 | 2,986 |
# Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from __future__ import print_function
import base64
import hashlib
import json
import logging
import os
import passlib.utils as passutils
import six
import struct
import subprocess
import time
from heatclient.common import event_utils
from heatclient.exc import HTTPNotFound
from openstackclient.i18n import _
from tripleoclient import exceptions
_MIN_PASSWORD_SIZE = 25
_PASSWORD_NAMES = (
"OVERCLOUD_ADMIN_PASSWORD",
"OVERCLOUD_ADMIN_TOKEN",
"OVERCLOUD_CEILOMETER_PASSWORD",
"OVERCLOUD_CEILOMETER_SECRET",
"OVERCLOUD_CINDER_PASSWORD",
"OVERCLOUD_DEMO_PASSWORD",
"OVERCLOUD_GLANCE_PASSWORD",
"OVERCLOUD_HAPROXY_STATS_PASSWORD",
"OVERCLOUD_HEAT_PASSWORD",
"OVERCLOUD_HEAT_STACK_DOMAIN_PASSWORD",
"OVERCLOUD_NEUTRON_PASSWORD",
"OVERCLOUD_NOVA_PASSWORD",
"OVERCLOUD_RABBITMQ_PASSWORD",
"OVERCLOUD_REDIS_PASSWORD",
"OVERCLOUD_SAHARA_PASSWORD",
"OVERCLOUD_SWIFT_HASH",
"OVERCLOUD_SWIFT_PASSWORD",
"NEUTRON_METADATA_PROXY_SHARED_SECRET",
)
def generate_overcloud_passwords(output_file="tripleo-overcloud-passwords"):
"""Create the passwords needed for the overcloud
This will create the set of passwords required by the overcloud, store
them in the output file path and return a dictionary of passwords. If the
file already exists the existing passwords will be returned instead,
"""
passwords = {}
if os.path.isfile(output_file):
with open(output_file) as f:
passwords = dict(line.split('=') for line in f.read().splitlines())
for name in _PASSWORD_NAMES:
if not passwords.get(name):
passwords[name] = passutils.generate_password(
size=_MIN_PASSWORD_SIZE)
with open(output_file, 'w') as f:
for name, password in passwords.items():
f.write("{0}={1}\n".format(name, password))
return passwords
def check_hypervisor_stats(compute_client, nodes=1, memory=0, vcpu=0):
"""Check the Hypervisor stats meet a minimum value
Check the hypervisor stats match the required counts. This is an
implementation of a command in TripleO with the same name.
:param compute_client: Instance of Nova client
:type compute_client: novaclient.client.v2.Client
:param nodes: The number of nodes to wait for, defaults to 1.
:type nodes: int
:param memory: The amount of memory to wait for in MB, defaults to 0.
:type memory: int
:param vcpu: The number of vcpus to wait for, defaults to 0.
:type vcpu: int
"""
statistics = compute_client.hypervisors.statistics().to_dict()
if all([statistics['count'] >= nodes,
statistics['memory_mb'] >= memory,
statistics['vcpus'] >= vcpu]):
return statistics
else:
return None
def wait_for_stack_ready(orchestration_client, stack_name, marker=None,
action='CREATE', verbose=False):
"""Check the status of an orchestration stack
Get the status of an orchestration stack and check whether it is complete
or failed.
:param orchestration_client: Instance of Orchestration client
:type orchestration_client: heatclient.v1.client.Client
:param stack_name: Name or UUID of stack to retrieve
:type stack_name: string
:param marker: UUID of the last stack event before the current action
:type marker: string
:param action: Current action to check the stack for COMPLETE
:type action: string
:param verbose: Whether to print events
:type verbose: boolean
"""
stack = get_stack(orchestration_client, stack_name)
if not stack:
return False
stack_name = stack.stack_name
while True:
events = event_utils.get_events(orchestration_client,
stack_id=stack_name, nested_depth=2,
event_args={'sort_dir': 'asc',
'marker': marker})
if len(events) >= 1:
# set marker to last event that was received.
marker = getattr(events[-1], 'id', None)
if verbose:
events_log = event_log_formatter(events)
print(events_log)
stack = get_stack(orchestration_client, stack_name)
stack_status = stack.stack_status
if stack_status == '%s_COMPLETE' % action:
print("Stack %(name)s %(status)s" % dict(
name=stack_name, status=stack_status))
return True
elif stack_status == '%s_FAILED' % action:
print("Stack %(name)s %(status)s" % dict(
name=stack_name, status=stack_status))
return False
time.sleep(5)
def event_log_formatter(events):
"""Return the events in log format."""
event_log = []
log_format = ("%(event_time)s "
"[%(rsrc_name)s]: %(rsrc_status)s %(rsrc_status_reason)s")
for event in events:
event_time = getattr(event, 'event_time', '')
log = log_format % {
'event_time': event_time.replace('T', ' '),
'rsrc_name': getattr(event, 'resource_name', ''),
'rsrc_status': getattr(event, 'resource_status', ''),
'rsrc_status_reason': getattr(event, 'resource_status_reason', '')
}
event_log.append(log)
return "\n".join(event_log)
def nodes_in_states(baremetal_client, states):
"""List the introspectable nodes with the right provision_states."""
nodes = baremetal_client.node.list(maintenance=False, associated=False)
return [node for node in nodes if node.provision_state in states]
def wait_for_provision_state(baremetal_client, node_uuid, provision_state,
loops=10, sleep=1):
"""Wait for a given Provisioning state in Ironic
Updating the provisioning state is an async operation, we
need to wait for it to be completed.
:param baremetal_client: Instance of Ironic client
:type baremetal_client: ironicclient.v1.client.Client
:param node_uuid: The Ironic node UUID
:type node_uuid: str
:param provision_state: The provisioning state name to wait for
:type provision_state: str
:param loops: How many times to loop
:type loops: int
:param sleep: How long to sleep between loops
:type sleep: int
:raises exceptions.StateTransitionFailed: if node.last_error is set
"""
for _l in range(0, loops):
node = baremetal_client.node.get(node_uuid)
if node is None:
# The node can't be found in ironic, so we don't need to wait for
# the provision state
return
if node.provision_state == provision_state:
return
# node.last_error should be None after any successful operation
if node.last_error:
raise exceptions.StateTransitionFailed(
"Error transitioning node %(uuid)s to provision state "
"%(state)s: %(error)s. Now in state %(actual)s." % {
'uuid': node_uuid,
'state': provision_state,
'error': node.last_error,
'actual': node.provision_state
}
)
time.sleep(sleep)
raise exceptions.Timeout(
"Node %(uuid)s did not reach provision state %(state)s. "
"Now in state %(actual)s." % {
'uuid': node_uuid,
'state': provision_state,
'actual': node.provision_state
}
)
def wait_for_node_introspection(inspector_client, auth_token, inspector_url,
node_uuids, loops=220, sleep=10):
"""Check the status of Node introspection in Ironic inspector
Gets the status and waits for them to complete.
:param inspector_client: Ironic inspector client
:type inspector_client: ironic_inspector_client
:param node_uuids: List of Node UUID's to wait for introspection
:type node_uuids: [string, ]
:param loops: How many times to loop
:type loops: int
:param sleep: How long to sleep between loops
:type sleep: int
"""
log = logging.getLogger(__name__ + ".wait_for_node_introspection")
node_uuids = node_uuids[:]
for _l in range(0, loops):
for node_uuid in node_uuids:
status = inspector_client.get_status(
node_uuid,
base_url=inspector_url,
auth_token=auth_token)
if status['finished']:
log.debug("Introspection finished for node {0} "
"(Error: {1})".format(node_uuid, status['error']))
node_uuids.remove(node_uuid)
yield node_uuid, status
if not len(node_uuids):
raise StopIteration
time.sleep(sleep)
if len(node_uuids):
log.error("Introspection didn't finish for nodes {0}".format(
','.join(node_uuids)))
def create_environment_file(path="~/overcloud-env.json",
control_scale=1, compute_scale=1,
ceph_storage_scale=0, block_storage_scale=0,
swift_storage_scale=0):
"""Create a heat environment file
Create the heat environment file with the scale parameters.
:param control_scale: Scale value for control roles.
:type control_scale: int
:param compute_scale: Scale value for compute roles.
:type compute_scale: int
:param ceph_storage_scale: Scale value for ceph storage roles.
:type ceph_storage_scale: int
:param block_storage_scale: Scale value for block storage roles.
:type block_storage_scale: int
:param swift_storage_scale: Scale value for swift storage roles.
:type swift_storage_scale: int
"""
env_path = os.path.expanduser(path)
with open(env_path, 'w+') as f:
f.write(json.dumps({
"parameter_defaults": {
"ControllerCount": control_scale,
"ComputeCount": compute_scale,
"CephStorageCount": ceph_storage_scale,
"BlockStorageCount": block_storage_scale,
"ObjectStorageCount": swift_storage_scale}
}))
return env_path
def set_nodes_state(baremetal_client, nodes, transition, target_state,
skipped_states=()):
"""Make all nodes available in the baremetal service for a deployment
For each node, make it available unless it is already available or active.
Available nodes can be used for a deployment and an active node is already
in use.
:param baremetal_client: Instance of Ironic client
:type baremetal_client: ironicclient.v1.client.Client
:param nodes: List of Baremetal Nodes
:type nodes: [ironicclient.v1.node.Node]
:param transition: The state to set for a node. The full list of states
can be found in ironic.common.states.
:type transition: string
:param target_state: The expected result state for a node. For example when
transitioning to 'manage' the result is 'manageable'
:type target_state: string
:param skipped_states: A set of states to skip, for example 'active' nodes
are already deployed and the state can't always be
changed.
:type skipped_states: iterable of strings
:param error_states: Node states treated as error for this transition
:type error_states: collection of strings
:param error_message: Optional message to append to an error message
:param error_message: str
:raises exceptions.StateTransitionFailed: if a node enters any of the
states in error_states
:raises exceptions.Timeout: if a node takes too long to reach target state
"""
log = logging.getLogger(__name__ + ".set_nodes_state")
for node in nodes:
if node.provision_state in skipped_states:
continue
log.debug(
"Setting provision state from {0} to '{1} for Node {2}"
.format(node.provision_state, transition, node.uuid))
baremetal_client.node.set_provision_state(node.uuid, transition)
try:
wait_for_provision_state(baremetal_client, node.uuid, target_state)
except exceptions.StateTransitionFailed as e:
log.error("FAIL: State transition failed for Node {0}. {1}"
.format(node.uuid, e))
except exceptions.Timeout as e:
log.error("FAIL: Timeout waiting for Node {0}. {1}"
.format(node.uuid, e))
yield node.uuid
def get_hiera_key(key_name):
"""Retrieve a key from the hiera store
:param password_name: Name of the key to retrieve
:type password_name: type
"""
command = ["hiera", key_name]
p = subprocess.Popen(command, stdout=subprocess.PIPE)
out, err = p.communicate()
return out
def get_config_value(section, option):
p = six.moves.configparser.ConfigParser()
p.read(os.path.expanduser("~/undercloud-passwords.conf"))
return p.get(section, option)
def get_overcloud_endpoint(stack):
for output in stack.to_dict().get('outputs', {}):
if output['output_key'] == 'KeystoneURL':
return output['output_value']
def get_service_ips(stack):
service_ips = {}
for output in stack.to_dict().get('outputs', {}):
service_ips[output['output_key']] = output['output_value']
return service_ips
__password_cache = None
def get_password(pass_name):
"""Retrieve a password by name, such as 'OVERCLOUD_ADMIN_PASSWORD'.
Raises KeyError if password does not exist.
"""
global __password_cache
if __password_cache is None:
__password_cache = generate_overcloud_passwords()
return __password_cache[pass_name]
def get_stack(orchestration_client, stack_name):
"""Get the ID for the current deployed overcloud stack if it exists.
Caller is responsible for checking if return is None
"""
try:
stack = orchestration_client.stacks.get(stack_name)
return stack
except HTTPNotFound:
pass
def remove_known_hosts(overcloud_ip):
"""For a given IP address remove SSH keys from the known_hosts file"""
known_hosts = os.path.expanduser("~/.ssh/known_hosts")
if os.path.exists(known_hosts):
command = ['ssh-keygen', '-R', overcloud_ip, '-f', known_hosts]
subprocess.check_call(command)
def create_cephx_key():
# NOTE(gfidente): Taken from
# https://github.com/ceph/ceph-deploy/blob/master/ceph_deploy/new.py#L21
key = os.urandom(16)
header = struct.pack("<hiih", 1, int(time.time()), 0, len(key))
return base64.b64encode(header + key)
def run_shell(cmd):
return subprocess.call([cmd], shell=True)
def all_unique(x):
"""Return True if the collection has no duplications."""
return len(set(x)) == len(x)
def file_checksum(filepath):
"""Calculate md5 checksum on file
:param filepath: Full path to file (e.g. /home/stack/image.qcow2)
:type filepath: string
"""
if not os.path.isfile(filepath):
raise ValueError("The given file {0} is not a regular "
"file".format(filepath))
checksum = hashlib.md5()
with open(filepath, 'rb') as f:
while True:
fragment = f.read(65536)
if not fragment:
break
checksum.update(fragment)
return checksum.hexdigest()
def check_nodes_count(baremetal_client, stack, parameters, defaults):
"""Check if there are enough available nodes for creating/scaling stack"""
count = 0
if stack:
for param in defaults:
try:
current = int(stack.parameters[param])
except KeyError:
raise ValueError(
"Parameter '%s' was not found in existing stack" % param)
count += parameters.get(param, current)
else:
for param, default in defaults.items():
count += parameters.get(param, default)
# We get number of nodes usable for the stack by getting already
# used (associated) nodes and number of nodes which can be used
# (not in maintenance mode).
# Assumption is that associated nodes are part of the stack (only
# one overcloud is supported).
associated = len(baremetal_client.node.list(associated=True))
available = len(baremetal_client.node.list(associated=False,
maintenance=False))
ironic_nodes_count = associated + available
if count > ironic_nodes_count:
raise exceptions.DeploymentError(
"Not enough nodes - available: {0}, requested: {1}".format(
ironic_nodes_count, count))
else:
return True
def ensure_run_as_normal_user():
"""Check if the command runs under normal user (EUID!=0)"""
if os.geteuid() == 0:
raise exceptions.RootUserExecution(
'This command cannot run under root user.'
' Switch to a normal user.')
def capabilities_to_dict(caps):
"""Convert the Node's capabilities into a dictionary."""
if not caps:
return {}
return dict([key.split(':', 1) for key in caps.split(',')])
def dict_to_capabilities(caps_dict):
"""Convert a dictionary into a string with the capabilities syntax."""
return ','.join(["%s:%s" % (key, value)
for key, value in caps_dict.items()
if value is not None])
def node_get_capabilities(node):
"""Get node capabilities."""
return capabilities_to_dict(node.properties.get('capabilities'))
def node_add_capabilities(bm_client, node, **updated):
"""Add or replace capabilities for a node."""
caps = node_get_capabilities(node)
caps.update(updated)
converted_caps = dict_to_capabilities(caps)
node.properties['capabilities'] = converted_caps
bm_client.node.update(node.uuid, [{'op': 'add',
'path': '/properties/capabilities',
'value': converted_caps}])
return caps
def assign_and_verify_profiles(bm_client, flavors,
assign_profiles=False, dry_run=False):
"""Assign and verify profiles for given flavors.
:param bm_client: ironic client instance
:param flavors: map flavor name -> (flavor object, required count)
:param assign_profiles: whether to allow assigning profiles to nodes
:param dry_run: whether to skip applying actual changes (only makes sense
if assign_profiles is True)
:returns: tuple (errors count, warnings count)
"""
log = logging.getLogger(__name__ + ".assign_and_verify_profiles")
predeploy_errors = 0
predeploy_warnings = 0
# nodes available for deployment and scaling (including active)
bm_nodes = {node.uuid: node
for node in bm_client.node.list(maintenance=False,
detail=True)
if node.provision_state in ('available', 'active')}
# create a pool of unprocessed nodes and record their capabilities
free_node_caps = {uu: node_get_capabilities(node)
for uu, node in bm_nodes.items()}
# TODO(dtantsur): use command-line arguments to specify the order in
# which profiles are processed (might matter for assigning profiles)
profile_flavor_used = False
for flavor_name, (flavor, scale) in flavors.items():
if not scale:
log.debug("Skipping verification of flavor %s because "
"none will be deployed", flavor_name)
continue
profile = flavor.get_keys().get('capabilities:profile')
# If there's only a single flavor, then it's expected for it to have
# no profile assigned.
if not profile and len(flavors) > 1:
predeploy_errors += 1
log.error(
'Error: The %s flavor has no profile associated', flavor_name)
log.error(
'Recommendation: assign a profile with openstack flavor '
'set --property "capabilities:profile"="PROFILE_NAME" %s',
flavor_name)
continue
profile_flavor_used = True
# first collect nodes with known profiles
assigned_nodes = [uu for uu, caps in free_node_caps.items()
if caps.get('profile') == profile]
required_count = scale - len(assigned_nodes)
if required_count < 0:
log.warning('%d nodes with profile %s won\'t be used '
'for deployment now', -required_count, profile)
predeploy_warnings += 1
required_count = 0
elif required_count > 0 and assign_profiles:
# find more nodes by checking XXX_profile capabilities that are
# set by ironic-inspector or manually
capability = '%s_profile' % profile
more_nodes = [
uu for uu, caps in free_node_caps.items()
# use only nodes without a know profile
if not caps.get('profile') and
caps.get(capability, '').lower() in ('1', 'true') and
# do not assign profiles for active nodes
bm_nodes[uu].provision_state == 'available'
][:required_count]
assigned_nodes.extend(more_nodes)
required_count -= len(more_nodes)
for uu in assigned_nodes:
# make sure these nodes are not reused for other profiles
node_caps = free_node_caps.pop(uu)
# save profile for newly assigned nodes, but only if we
# succeeded in finding enough of them
if not required_count and not node_caps.get('profile'):
node = bm_nodes[uu]
if not dry_run:
node_add_capabilities(bm_client, node, profile=profile)
log.info('Node %s was assigned profile %s', uu, profile)
else:
log.debug('Node %s has profile %s', uu, profile)
if required_count > 0:
log.error(
"Error: only %s of %s requested ironic nodes are tagged "
"to profile %s (for flavor %s)",
scale - required_count, scale, profile, flavor_name
)
log.error(
"Recommendation: tag more nodes using ironic node-update "
"<NODE ID> replace properties/capabilities=profile:%s,"
"boot_option:local", profile)
predeploy_errors += 1
nodes_without_profile = [uu for uu, caps in free_node_caps.items()
if not caps.get('profile')]
if nodes_without_profile and profile_flavor_used:
predeploy_warnings += 1
log.warning(
"There are %d ironic nodes with no profile that will "
"not be used: %s", len(nodes_without_profile),
', '.join(nodes_without_profile)
)
return predeploy_errors, predeploy_warnings
def add_deployment_plan_arguments(parser):
"""Add deployment plan arguments (flavors and scales) to a parser"""
parser.add_argument('--control-scale', type=int,
help=_('New number of control nodes.'))
parser.add_argument('--compute-scale', type=int,
help=_('New number of compute nodes.'))
parser.add_argument('--ceph-storage-scale', type=int,
help=_('New number of ceph storage nodes.'))
parser.add_argument('--block-storage-scale', type=int,
help=_('New number of cinder storage nodes.'))
parser.add_argument('--swift-storage-scale', type=int,
help=_('New number of swift storage nodes.'))
parser.add_argument('--control-flavor',
help=_("Nova flavor to use for control nodes."))
parser.add_argument('--compute-flavor',
help=_("Nova flavor to use for compute nodes."))
parser.add_argument('--ceph-storage-flavor',
help=_("Nova flavor to use for ceph storage "
"nodes."))
parser.add_argument('--block-storage-flavor',
help=_("Nova flavor to use for cinder storage "
"nodes."))
parser.add_argument('--swift-storage-flavor',
help=_("Nova flavor to use for swift storage "
"nodes."))
def get_roles_info(parsed_args):
"""Get flavor name and scale for all deployment roles.
:returns: dict role name -> (flavor name, scale)
"""
return {
'control': (parsed_args.control_flavor, parsed_args.control_scale),
'compute': (parsed_args.compute_flavor, parsed_args.compute_scale),
'ceph-storage': (parsed_args.ceph_storage_flavor,
parsed_args.ceph_storage_scale),
'block-storage': (parsed_args.block_storage_flavor,
parsed_args.block_storage_scale),
'swift-storage': (parsed_args.swift_storage_flavor,
parsed_args.swift_storage_scale)
}
| jonjozwiak/openstack | director-examples/sahara-osp8/utils.py | Python | apache-2.0 | 25,989 |
import json
from splunkdj.tokens import TokenSafeString
def component_context(context, type, id, component_type, require_file, kwargs, tag="div", classes=""):
"""Returns a component template context constructed from the given args."""
options = { 'app': context['app_name'] }
options.update(kwargs)
options = dict((k, _encode_option_value(v)) for (k, v) in options.iteritems())
return {
"type": type,
"tag": tag,
"id": id,
"component_type": component_type,
"style": "display: none;" if component_type == "context" else "",
"require_file": require_file,
"options": json.dumps(options),
"raw_options": options,
"context": context
}
def _encode_option_value(value):
if isinstance(value, TokenSafeString):
return {'type': 'token_safe', 'value': value.value}
else:
return value
| splunk/splunk-webframework | server/splunkdj/templatetags/tagutils.py | Python | apache-2.0 | 910 |
import re
import logging
import threading
from datetime import timedelta, datetime
from apscheduler.schedulers.background import BackgroundScheduler
import config
import utils.sms
import utils.system
import utils.network
from garden import Garden
from records import Records
from web.web_server import web_server
# One year in minutes = greatest task period
INFINITE_MINUTES = 60*24*365
# Seconds after which will be missed tasks forgotten.
# Smaller than shortest task period so it won't buffer in scheduler.
MISFIRE_GRACE_TIME = 4*60
class Gardener:
"""
Gardener manages garden according schedule and collected sensor data.
* Garden - Controls HW I/O - simple stateless servant for single thread use.
* sensors: temperature (TODO: water level, light density, ...)
* relays: pump, fan, fogger
* Records - Via scheduler collects and store sensors data + current garden state.
* Web server shows
* current garden state (TODO)
* light version of sensor data history
* next planned maintenance action (TODO)
* buttons for manipulation with garden
"""
def __init__(self):
self.garden = Garden()
self.records = Records(sensors=self.garden.sensors)
self.scheduler = BackgroundScheduler({
'apscheduler.executors.default':
{'class': 'apscheduler.executors.pool:ThreadPoolExecutor', 'max_workers': '1'}
}
)
def reschedule_job(self, job_id):
period_minutes = self.compute_period(job_id, self.garden.city_temperature.value)
last_job_run = self.garden.get_last_job_run(job_id)
next_job_run = max((self.get_asap_schedule(), last_job_run + timedelta(minutes=period_minutes)))
self.scheduler.reschedule_job(job_id, trigger='cron',
minute="*/{}".format(period_minutes), start_date=str(next_job_run))
def sensors_refresh(self):
old_temperature = self.garden.city_temperature.value
self.garden.sensors_refresh()
new_temperature = self.garden.city_temperature.value
if old_temperature != new_temperature and new_temperature:
self.reschedule_job('FOGGING')
self.reschedule_job('WATERING')
def send_sms_report(self):
message = 'I am alive.'
for sensor in self.garden.sensors:
message += " {}:{}".format(sensor.name, str(sensor.value))
message += " f:{}/{} w:{}/{}".format(self.get_job_period("FOGGING"),
self.garden.get_job_run_count("FOGGING"),
self.get_job_period("WATERING"),
self.garden.get_job_run_count("WATERING"))
utils.sms.send_sms(message)
def working_loop(self):
# shared cross threads
threading.gardener = self
# default schedule
cron_params = {'trigger': 'cron', 'misfire_grace_time': MISFIRE_GRACE_TIME}
self.scheduler.add_job(self.garden.watering, trigger='date')
self.scheduler.add_job(self.garden.watering, minute='*/20', id='WATERING', **cron_params)
self.scheduler.add_job(self.garden.fogging, minute='*/3', id='FOGGING', **cron_params)
# sensors maintenance
self.scheduler.add_job(self.sensors_refresh, minute='*/10', **cron_params)
self.scheduler.add_job(self.records.write_values, minute='*/10',
kwargs={'file': config.SensorData.FULL_FILE}, **cron_params)
self.scheduler.add_job(self.records.write_values, hour='*',
kwargs={'file': config.SensorData.WEB_FILE}, **cron_params)
self.scheduler.add_job(self.records.trim_records, week='*', # show on web only latest 30 days
kwargs={'file': config.SensorData.WEB_FILE, 'count': 24*7*4}, **cron_params)
self.scheduler.add_job(self.send_sms_report, hour='12', **cron_params)
# TODO: create more oxygen when high temperature via extra long pumping cycle?
# network maintenance
self.scheduler.add_job(utils.network.check_and_fix, hour='*',
kwargs={'address': config.RouterAddress, 'network': 'wlan0'}, **cron_params)
self.scheduler.add_job(utils.system.reboot, hour='0', **cron_params)
logging.info('Starting scheduler.')
self.scheduler.start()
# web server needs main thread for its signal handling
logging.info('Starting web server.')
web_server.run(**config.WebServer)
self.scheduler.shutdown()
def get_job_period(self, job_id):
trigger = self.scheduler.get_job(job_id).trigger
period = re.search(r"cron\[minute='\*/(\d+)'\]", str(trigger))
return int(period.group(1)) if period else 0
def get_job_next_run_time(self, job_id):
return self.scheduler.get_job(job_id).next_run_time
def start_job(self, job_id):
# FIXME
job = self.scheduler.get_job(job_id)
@staticmethod
def get_asap_schedule():
return datetime.now() + timedelta(seconds=2)
@staticmethod
def compute_period(job_id, temperature):
if job_id == 'FOGGING':
return int(2* 4 * 60 / (temperature - 4) ** 1.5) if 4 < temperature < 27 else INFINITE_MINUTES
elif job_id == 'WATERING':
return int(4* 24 * 60 / (temperature - 4) ** 2) if 4 < temperature < 27 else INFINITE_MINUTES
else:
assert 0
| Hejtman/ultraGarden | server/gardener.py | Python | gpl-2.0 | 5,537 |
##############################################################################
#
# Copyright (C) 2018-2020 Compassion CH (http://www.compassion.ch)
# @author: Théo Nikles <[email protected]>
#
# The licence is in the file __manifest__.py
#
##############################################################################
from odoo import models, fields, _
from math import ceil
class PaymentOptionsForm(models.AbstractModel):
_name = "cms.form.payment.options"
_inherit = "cms.form"
form_buttons_template = "cms_form_compassion.modal_form_buttons"
form_id = "modal_payment_options"
_form_model = "recurring.contract.group"
payment_mode = fields.Selection(selection=[
# CO-3574 TODO activate LSV/DD with bank authorization form
# ("LSV", "LSV"),
# ("Postfinance Direct Debit", "Postfinance Direct Debit"),
("Permanent Order", "Permanent Order"),
], help="Don't forget to change your standing order accordingly. "
"Please contact us if you want to setup a Direct Debit or LSV "
"automatic withdrawal.")
payment_frequency = fields.Selection(selection=[
("1 month", "1 month"),
("2 month", "2 months"),
("3 month", "3 months"),
("4 month", "4 months"),
("6 month", "6 months"),
("1 year", "1 year"),
])
additional_amount = fields.Selection(selection=[
(8, "8"),
(0, "0")
])
bvr_reference = None
_form_model_fields = [
"advance_billing_months",
"recurring_unit",
"bvr_reference",
"payment_mode_id"
]
_form_fields_order = [
# "payment_mode", TODO remove comment when payment mode change is ready
"payment_frequency",
"additional_amount",
]
def _form_load_payment_mode(self, fname, field, value, **req_values):
payment = self.main_object.with_context(lang="en_US")\
.payment_mode_id.name
return next((payment for modes in self._fields["payment_mode"]
.selection if payment in modes), None)
def _form_load_payment_frequency(self, fname, field, value, **req_values):
group = self.main_object
return f"{group.advance_billing_months} {group.recurring_unit}"
def _form_load_additional_amount(self, fname, field, value, **req_values):
return self.additional_amount
@property
def form_title(self):
return _("Payment options")
@property
def submit_text(self):
return _("Save")
@property
def form_msg_success_updated(self):
return _("Payment options updated.")
def form_init(self, request, main_object=None, **kw):
form = super(PaymentOptionsForm, self).form_init(
request, main_object.sudo(), **kw
)
# Set default value
form.additional_amount = kw["total_amount"] if kw["total_amount"] in [8, 0] else 8
form.bvr_reference = kw["bvr_reference"]
return form
def _if_needed(self, dic):
"""
Update the dictionary only if needed. If values changes from stored
:param dic: the dic to check
:return: dic with non needed key removed
"""
res = {}
for key, val in dic.items():
if not self.main_object[key] == val:
res.update({key: val})
# manual check for payment_mode_id
if "payment_mode_id" in dic and dic["payment_mode_id"] == self.main_object["payment_mode_id"].id:
del res["payment_mode_id"]
return res
def form_extract_values(self, **request_values):
values = super(PaymentOptionsForm, self).form_extract_values(
**request_values
)
group_vals = {}
key = "payment_mode"
if key in values:
if values[key]:
payment_mode_id = self.env["account.payment.mode"]\
.with_context(lang="en_US").search([
("name", "=", values[key]),
])
group_vals.update({
"payment_mode_id": payment_mode_id.id,
})
# Handle BVR reference for Permanent Order
if values[key] == "Permanent Order":
if not self.bvr_reference:
raise ValueError(
"Permanent Order needs a BVR reference."
)
group_vals.update({
"bvr_reference": self.bvr_reference,
})
del values[key]
key = "payment_frequency"
if key in values:
if values[key]:
value, unit = values[key].split()
group_vals.update({
"advance_billing_months": value,
"recurring_unit": unit,
})
del values[key]
key = "additional_amount"
if key in values:
if values[key]:
contracts = self.main_object.mapped("contract_ids").filtered(
lambda c: c.state not in ["cancelled", "terminated"] and
c.type == "S"
)
amount = int(values[key])
amount_by_child = ceil(amount / len(contracts))
for contract in contracts:
amount_for_child = min(amount_by_child, amount)
if len(contract.contract_line_ids) == 1:
gen_product = self.env["product.template"].search(
[("default_code", "=", "fund_gen")]
)
contract_line = self.env["recurring.contract.line"]\
.create({
"contract_id": contract.id,
"amount": amount_for_child,
"quantity": 1,
"product_id": gen_product.id,
})
contract.contract_line_ids += contract_line
else:
for contract_line in contract.contract_line_ids:
if contract_line.amount != 42:
contract_line.write({
"amount": amount_for_child,
})
break
amount -= amount_for_child
del values[key]
return self._if_needed(group_vals)
class PaymentOptionsMultipleForm(models.AbstractModel):
_name = "cms.form.payment.options.multiple"
_inherit = "cms.form.payment.options"
form_id = "modal_payment_options_multiple"
_form_required_fields = [
"payment_mode",
"payment_frequency",
"additional_amount",
]
@property
def form_title(self):
return _("Merge payment groups")
@property
def form_msg_success_updated(self):
return _("Payment groups merged.")
@property
def _form_fieldsets(self):
return [
{
"id": "modal_payment_options_multiple",
"description": _(
"Note that merging is an operation that cannot be undone. "
"Your different groups will be fused together with one "
"payment method, payment frequency and additional amount."
),
"fields": [
"payment_mode",
"payment_frequency",
"additional_amount",
]
}
]
def form_extract_values(self, **request_values):
def filter_fun(c):
return c.state not in ["cancelled", "terminated"] and \
partner == c.mapped("partner_id")
selected_group = self.main_object
partner = selected_group.partner_id
# Select only groups with a sponsorship not cancelled nor terminated
groups = (
partner.contracts_fully_managed.filtered(filter_fun) +
partner.contracts_correspondant.filtered(filter_fun) +
partner.contracts_paid.filtered(filter_fun)
).mapped("group_id")
# Select ALL the contracts in the groups; some groups will be unlinked
contracts = groups.mapped("contract_ids")
for contract in contracts:
contract.write({"group_id": selected_group.id})
for group in groups:
if group != selected_group:
group.unlink()
return super(PaymentOptionsMultipleForm, self).form_extract_values(
**request_values
)
| CompassionCH/compassion-switzerland | website_compassion/forms/my_donations_form.py | Python | agpl-3.0 | 8,735 |
"""Flow based node and edge disjoint paths."""
import networkx as nx
from networkx.exception import NetworkXNoPath
# Define the default maximum flow function to use for the undelying
# maximum flow computations
from networkx.algorithms.flow import edmonds_karp
from networkx.algorithms.flow import preflow_push
from networkx.algorithms.flow import shortest_augmenting_path
default_flow_func = edmonds_karp
# Functions to build auxiliary data structures.
from .utils import build_auxiliary_node_connectivity
from .utils import build_auxiliary_edge_connectivity
from itertools import filterfalse as _filterfalse
__all__ = [
"edge_disjoint_paths",
"node_disjoint_paths",
]
def edge_disjoint_paths(
G, s, t, flow_func=None, cutoff=None, auxiliary=None, residual=None
):
"""Returns the edges disjoint paths between source and target.
Edge disjoint paths are paths that do not share any edge. The
number of edge disjoint paths between source and target is equal
to their edge connectivity.
Parameters
----------
G : NetworkX graph
s : node
Source node for the flow.
t : node
Sink node for the flow.
flow_func : function
A function for computing the maximum flow among a pair of nodes.
The function has to accept at least three parameters: a Digraph,
a source node, and a target node. And return a residual network
that follows NetworkX conventions (see :meth:`maximum_flow` for
details). If flow_func is None, the default maximum flow function
(:meth:`edmonds_karp`) is used. The choice of the default function
may change from version to version and should not be relied on.
Default value: None.
cutoff : int
Maximum number of paths to yield. Some of the maximum flow
algorithms, such as :meth:`edmonds_karp` (the default) and
:meth:`shortest_augmenting_path` support the cutoff parameter,
and will terminate when the flow value reaches or exceeds the
cutoff. Other algorithms will ignore this parameter.
Default value: None.
auxiliary : NetworkX DiGraph
Auxiliary digraph to compute flow based edge connectivity. It has
to have a graph attribute called mapping with a dictionary mapping
node names in G and in the auxiliary digraph. If provided
it will be reused instead of recreated. Default value: None.
residual : NetworkX DiGraph
Residual network to compute maximum flow. If provided it will be
reused instead of recreated. Default value: None.
Returns
-------
paths : generator
A generator of edge independent paths.
Raises
------
NetworkXNoPath
If there is no path between source and target.
NetworkXError
If source or target are not in the graph G.
See also
--------
:meth:`node_disjoint_paths`
:meth:`edge_connectivity`
:meth:`maximum_flow`
:meth:`edmonds_karp`
:meth:`preflow_push`
:meth:`shortest_augmenting_path`
Examples
--------
We use in this example the platonic icosahedral graph, which has node
edge connectivity 5, thus there are 5 edge disjoint paths between any
pair of nodes.
>>> G = nx.icosahedral_graph()
>>> len(list(nx.edge_disjoint_paths(G, 0, 6)))
5
If you need to compute edge disjoint paths on several pairs of
nodes in the same graph, it is recommended that you reuse the
data structures that NetworkX uses in the computation: the
auxiliary digraph for edge connectivity, and the residual
network for the underlying maximum flow computation.
Example of how to compute edge disjoint paths among all pairs of
nodes of the platonic icosahedral graph reusing the data
structures.
>>> import itertools
>>> # You also have to explicitly import the function for
>>> # building the auxiliary digraph from the connectivity package
>>> from networkx.algorithms.connectivity import (
... build_auxiliary_edge_connectivity)
>>> H = build_auxiliary_edge_connectivity(G)
>>> # And the function for building the residual network from the
>>> # flow package
>>> from networkx.algorithms.flow import build_residual_network
>>> # Note that the auxiliary digraph has an edge attribute named capacity
>>> R = build_residual_network(H, 'capacity')
>>> result = {n: {} for n in G}
>>> # Reuse the auxiliary digraph and the residual network by passing them
>>> # as arguments
>>> for u, v in itertools.combinations(G, 2):
... k = len(list(nx.edge_disjoint_paths(G, u, v, auxiliary=H, residual=R)))
... result[u][v] = k
>>> all(result[u][v] == 5 for u, v in itertools.combinations(G, 2))
True
You can also use alternative flow algorithms for computing edge disjoint
paths. For instance, in dense networks the algorithm
:meth:`shortest_augmenting_path` will usually perform better than
the default :meth:`edmonds_karp` which is faster for sparse
networks with highly skewed degree distributions. Alternative flow
functions have to be explicitly imported from the flow package.
>>> from networkx.algorithms.flow import shortest_augmenting_path
>>> len(list(nx.edge_disjoint_paths(G, 0, 6, flow_func=shortest_augmenting_path)))
5
Notes
-----
This is a flow based implementation of edge disjoint paths. We compute
the maximum flow between source and target on an auxiliary directed
network. The saturated edges in the residual network after running the
maximum flow algorithm correspond to edge disjoint paths between source
and target in the original network. This function handles both directed
and undirected graphs, and can use all flow algorithms from NetworkX flow
package.
"""
if s not in G:
raise nx.NetworkXError(f"node {s} not in graph")
if t not in G:
raise nx.NetworkXError(f"node {t} not in graph")
if flow_func is None:
flow_func = default_flow_func
if auxiliary is None:
H = build_auxiliary_edge_connectivity(G)
else:
H = auxiliary
# Maximum possible edge disjoint paths
possible = min(H.out_degree(s), H.in_degree(t))
if not possible:
raise NetworkXNoPath
if cutoff is None:
cutoff = possible
else:
cutoff = min(cutoff, possible)
# Compute maximum flow between source and target. Flow functions in
# NetworkX return a residual network.
kwargs = dict(
capacity="capacity", residual=residual, cutoff=cutoff, value_only=True
)
if flow_func is preflow_push:
del kwargs["cutoff"]
if flow_func is shortest_augmenting_path:
kwargs["two_phase"] = True
R = flow_func(H, s, t, **kwargs)
if R.graph["flow_value"] == 0:
raise NetworkXNoPath
# Saturated edges in the residual network form the edge disjoint paths
# between source and target
cutset = [
(u, v)
for u, v, d in R.edges(data=True)
if d["capacity"] == d["flow"] and d["flow"] > 0
]
# This is equivalent of what flow.utils.build_flow_dict returns, but
# only for the nodes with saturated edges and without reporting 0 flows.
flow_dict = {n: {} for edge in cutset for n in edge}
for u, v in cutset:
flow_dict[u][v] = 1
# Rebuild the edge disjoint paths from the flow dictionary.
paths_found = 0
for v in list(flow_dict[s]):
if paths_found >= cutoff:
# preflow_push does not support cutoff: we have to
# keep track of the paths founds and stop at cutoff.
break
path = [s]
if v == t:
path.append(v)
yield path
continue
u = v
while u != t:
path.append(u)
try:
u, _ = flow_dict[u].popitem()
except KeyError:
break
else:
path.append(t)
yield path
paths_found += 1
def node_disjoint_paths(
G, s, t, flow_func=None, cutoff=None, auxiliary=None, residual=None
):
r"""Computes node disjoint paths between source and target.
Node disjoint paths are paths that only share their first and last
nodes. The number of node independent paths between two nodes is
equal to their local node connectivity.
Parameters
----------
G : NetworkX graph
s : node
Source node.
t : node
Target node.
flow_func : function
A function for computing the maximum flow among a pair of nodes.
The function has to accept at least three parameters: a Digraph,
a source node, and a target node. And return a residual network
that follows NetworkX conventions (see :meth:`maximum_flow` for
details). If flow_func is None, the default maximum flow function
(:meth:`edmonds_karp`) is used. See below for details. The choice
of the default function may change from version to version and
should not be relied on. Default value: None.
cutoff : int
Maximum number of paths to yield. Some of the maximum flow
algorithms, such as :meth:`edmonds_karp` (the default) and
:meth:`shortest_augmenting_path` support the cutoff parameter,
and will terminate when the flow value reaches or exceeds the
cutoff. Other algorithms will ignore this parameter.
Default value: None.
auxiliary : NetworkX DiGraph
Auxiliary digraph to compute flow based node connectivity. It has
to have a graph attribute called mapping with a dictionary mapping
node names in G and in the auxiliary digraph. If provided
it will be reused instead of recreated. Default value: None.
residual : NetworkX DiGraph
Residual network to compute maximum flow. If provided it will be
reused instead of recreated. Default value: None.
Returns
-------
paths : generator
Generator of node disjoint paths.
Raises
------
NetworkXNoPath
If there is no path between source and target.
NetworkXError
If source or target are not in the graph G.
Examples
--------
We use in this example the platonic icosahedral graph, which has node
node connectivity 5, thus there are 5 node disjoint paths between any
pair of non neighbor nodes.
>>> G = nx.icosahedral_graph()
>>> len(list(nx.node_disjoint_paths(G, 0, 6)))
5
If you need to compute node disjoint paths between several pairs of
nodes in the same graph, it is recommended that you reuse the
data structures that NetworkX uses in the computation: the
auxiliary digraph for node connectivity and node cuts, and the
residual network for the underlying maximum flow computation.
Example of how to compute node disjoint paths reusing the data
structures:
>>> # You also have to explicitly import the function for
>>> # building the auxiliary digraph from the connectivity package
>>> from networkx.algorithms.connectivity import (
... build_auxiliary_node_connectivity)
>>> H = build_auxiliary_node_connectivity(G)
>>> # And the function for building the residual network from the
>>> # flow package
>>> from networkx.algorithms.flow import build_residual_network
>>> # Note that the auxiliary digraph has an edge attribute named capacity
>>> R = build_residual_network(H, 'capacity')
>>> # Reuse the auxiliary digraph and the residual network by passing them
>>> # as arguments
>>> len(list(nx.node_disjoint_paths(G, 0, 6, auxiliary=H, residual=R)))
5
You can also use alternative flow algorithms for computing node disjoint
paths. For instance, in dense networks the algorithm
:meth:`shortest_augmenting_path` will usually perform better than
the default :meth:`edmonds_karp` which is faster for sparse
networks with highly skewed degree distributions. Alternative flow
functions have to be explicitly imported from the flow package.
>>> from networkx.algorithms.flow import shortest_augmenting_path
>>> len(list(nx.node_disjoint_paths(G, 0, 6, flow_func=shortest_augmenting_path)))
5
Notes
-----
This is a flow based implementation of node disjoint paths. We compute
the maximum flow between source and target on an auxiliary directed
network. The saturated edges in the residual network after running the
maximum flow algorithm correspond to node disjoint paths between source
and target in the original network. This function handles both directed
and undirected graphs, and can use all flow algorithms from NetworkX flow
package.
See also
--------
:meth:`edge_disjoint_paths`
:meth:`node_connectivity`
:meth:`maximum_flow`
:meth:`edmonds_karp`
:meth:`preflow_push`
:meth:`shortest_augmenting_path`
"""
if s not in G:
raise nx.NetworkXError(f"node {s} not in graph")
if t not in G:
raise nx.NetworkXError(f"node {t} not in graph")
if auxiliary is None:
H = build_auxiliary_node_connectivity(G)
else:
H = auxiliary
mapping = H.graph.get("mapping", None)
if mapping is None:
raise nx.NetworkXError("Invalid auxiliary digraph.")
# Maximum possible edge disjoint paths
possible = min(H.out_degree(f"{mapping[s]}B"), H.in_degree(f"{mapping[t]}A"))
if not possible:
raise NetworkXNoPath
if cutoff is None:
cutoff = possible
else:
cutoff = min(cutoff, possible)
kwargs = dict(flow_func=flow_func, residual=residual, auxiliary=H, cutoff=cutoff)
# The edge disjoint paths in the auxiliary digraph correspond to the node
# disjoint paths in the original graph.
paths_edges = edge_disjoint_paths(H, f"{mapping[s]}B", f"{mapping[t]}A", **kwargs)
for path in paths_edges:
# Each node in the original graph maps to two nodes in auxiliary graph
yield list(_unique_everseen(H.nodes[node]["id"] for node in path))
def _unique_everseen(iterable):
# Adapted from https://docs.python.org/3/library/itertools.html examples
"List unique elements, preserving order. Remember all elements ever seen."
# unique_everseen('AAAABBBCCDAABBB') --> A B C D
seen = set()
seen_add = seen.add
for element in _filterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
| SpaceGroupUCL/qgisSpaceSyntaxToolkit | esstoolkit/external/networkx/algorithms/connectivity/disjoint_paths.py | Python | gpl-3.0 | 14,544 |
# -*- coding: utf-8 -*-
"""
Various i18n functions.
Helper functions for both the internal translation system
and for TranslateWiki-based translations.
By default messages are assumed to reside in a package called
'scripts.i18n'. In pywikibot 2.0, that package is not packaged
with pywikibot, and pywikibot 2.0 does not have a hard dependency
on any i18n messages. However, there are three user input questions
in pagegenerators which will use i18 messages if they can be loaded.
The default message location may be changed by calling
L{set_message_package} with a package name. The package must contain
an __init__.py, and a message bundle called 'pywikibot' containing
messages. See L{twntranslate} for more information on the messages.
"""
#
# (C) Pywikibot team, 2004-2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals
__version__ = '$Id$'
#
import sys
import re
import locale
import json
import os
import pkgutil
from collections import defaultdict
from pywikibot import Error
from .plural import plural_rules
import pywikibot
from . import config2 as config
if sys.version_info[0] > 2:
basestring = (str, )
PLURAL_PATTERN = r'{{PLURAL:(?:%\()?([^\)]*?)(?:\)d)?\|(.*?)}}'
# Package name for the translation messages. The messages data must loaded
# relative to that package name. In the top of this package should be
# directories named after for each script/message bundle, and each directory
# should contain JSON files called <lang>.json
_messages_package_name = 'scripts.i18n'
# Flag to indicate whether translation messages are available
_messages_available = None
# Cache of translated messages
_cache = defaultdict(dict)
def set_messages_package(package_name):
"""Set the package name where i18n messages are located."""
global _messages_package_name
global _messages_available
_messages_package_name = package_name
_messages_available = None
def messages_available():
"""
Return False if there are no i18n messages available.
To determine if messages are available, it looks for the package name
set using L{set_messages_package} for a message bundle called 'pywikibot'
containing messages.
@rtype: bool
"""
global _messages_available
if _messages_available is not None:
return _messages_available
try:
__import__(_messages_package_name)
except ImportError:
_messages_available = False
return False
_messages_available = True
return True
def _altlang(code):
"""Define fallback languages for particular languages.
If no translation is available to a specified language, translate() will
try each of the specified fallback languages, in order, until it finds
one with a translation, with 'en' and '_default' as a last resort.
For example, if for language 'xx', you want the preference of languages
to be: xx > fr > ru > en, you let this method return ['fr', 'ru'].
This code is used by other translating methods below.
@param code: The language code
@type code: string
@return: language codes
@rtype: list of str
"""
# Akan
if code in ['ak', 'tw']:
return ['ak', 'tw']
# Amharic
if code in ['aa', 'ti']:
return ['am']
# Arab
if code in ['arc', 'arz', 'so']:
return ['ar']
if code == 'kab':
return ['ar', 'fr']
# Bulgarian
if code in ['cu', 'mk']:
return ['bg', 'sr', 'sh']
# Czech
if code in ['cs', 'sk']:
return ['cs', 'sk']
# German
if code in ['bar', 'frr', 'ksh', 'pdc', 'pfl']:
return ['de']
if code == 'lb':
return ['de', 'fr']
if code in ['als', 'gsw']:
return ['als', 'gsw', 'de']
if code == 'nds':
return ['nds-nl', 'de']
if code in ['dsb', 'hsb']:
return ['hsb', 'dsb', 'de']
if code == 'sli':
return ['de', 'pl']
if code == 'rm':
return ['de', 'it']
if code == 'stq':
return ['nds', 'de']
# Greek
if code in ['grc', 'pnt']:
return ['el']
# Esperanto
if code in ['io', 'nov']:
return ['eo']
# Spanish
if code in ['an', 'arn', 'ast', 'ay', 'ca', 'ext', 'lad', 'nah', 'nv', 'qu',
'yua']:
return ['es']
if code in ['gl', 'gn']:
return ['es', 'pt']
if code == 'eu':
return ['es', 'fr']
if code == 'cbk-zam':
return ['es', 'tl']
# Estonian
if code in ['fiu-vro', 'vro']:
return ['fiu-vro', 'vro', 'et']
if code == 'liv':
return ['et', 'lv']
# Persian (Farsi)
if code == 'ps':
return ['fa']
if code in ['glk', 'mzn']:
return ['glk', 'mzn', 'fa', 'ar']
# Finnish
if code == 'vep':
return ['fi', 'ru']
if code == 'fit':
return ['fi', 'sv']
# French
if code in ['bm', 'br', 'ht', 'kg', 'ln', 'mg', 'nrm', 'pcd',
'rw', 'sg', 'ty', 'wa']:
return ['fr']
if code == 'oc':
return ['fr', 'ca', 'es']
if code in ['co', 'frp']:
return ['fr', 'it']
# Hindi
if code in ['sa']:
return ['hi']
if code in ['ne', 'new']:
return ['ne', 'new', 'hi']
if code in ['bh', 'bho']:
return ['bh', 'bho']
# Indonesian and Malay
if code in ['ace', 'bug', 'bjn', 'id', 'jv', 'ms', 'su']:
return ['id', 'ms', 'jv']
if code == 'map-bms':
return ['jv', 'id', 'ms']
# Inuit languages
if code in ['ik', 'iu']:
return ['iu', 'kl']
if code == 'kl':
return ['da', 'iu', 'no', 'nb']
# Italian
if code in ['eml', 'fur', 'lij', 'lmo', 'nap', 'pms', 'roa-tara', 'sc',
'scn', 'vec']:
return ['it']
# Lithuanian
if code in ['bat-smg', 'sgs']:
return ['bat-smg', 'sgs', 'lt']
# Latvian
if code == 'ltg':
return ['lv']
# Dutch
if code in ['af', 'fy', 'li', 'pap', 'srn', 'vls', 'zea']:
return ['nl']
if code == ['nds-nl']:
return ['nds', 'nl']
# Polish
if code in ['csb', 'szl']:
return ['pl']
# Portuguese
if code in ['fab', 'mwl', 'tet']:
return ['pt']
# Romanian
if code in ['roa-rup', 'rup']:
return ['roa-rup', 'rup', 'ro']
if code == 'mo':
return ['ro']
# Russian and Belarusian
if code in ['ab', 'av', 'ba', 'bxr', 'ce', 'cv', 'inh', 'kk', 'koi', 'krc',
'kv', 'ky', 'lbe', 'lez', 'mdf', 'mhr', 'mn', 'mrj', 'myv',
'os', 'sah', 'tg', 'udm', 'uk', 'xal']:
return ['ru']
if code in ['kbd', 'ady']:
return ['kbd', 'ady', 'ru']
if code == 'tt':
return ['tt-cyrl', 'ru']
if code in ['be', 'be-x-old', 'be-tarask']:
return ['be', 'be-x-old', 'be-tarask', 'ru']
if code == 'kaa':
return ['uz', 'ru']
# Serbocroatian
if code in ['bs', 'hr', 'sh']:
return ['sh', 'hr', 'bs', 'sr', 'sr-el']
if code == 'sr':
return ['sr-el', 'sh', 'hr', 'bs']
# Tagalog
if code in ['bcl', 'ceb', 'ilo', 'pag', 'pam', 'war']:
return ['tl']
# Turkish and Kurdish
if code in ['diq', 'ku']:
return ['ku', 'ku-latn', 'tr']
if code == 'gag':
return ['tr']
if code == 'ckb':
return ['ku']
# Ukrainian
if code in ['crh', 'crh-latn']:
return ['crh', 'crh-latn', 'uk', 'ru']
if code in ['rue']:
return ['uk', 'ru']
# Chinese
if code in ['zh-classical', 'lzh', 'minnan', 'zh-min-nan', 'nan', 'zh-tw',
'zh', 'zh-hans']:
return ['zh', 'zh-hans', 'zh-tw', 'zh-cn', 'zh-classical', 'lzh']
if code in ['cdo', 'gan', 'hak', 'ii', 'wuu', 'za', 'zh-classical', 'lzh',
'zh-cn', 'zh-yue', 'yue']:
return ['zh', 'zh-hans' 'zh-cn', 'zh-tw', 'zh-classical', 'lzh']
# Scandinavian languages
if code in ['da', 'sv']:
return ['da', 'no', 'nb', 'sv', 'nn']
if code in ['fo', 'is']:
return ['da', 'no', 'nb', 'nn', 'sv']
if code == 'nn':
return ['no', 'nb', 'sv', 'da']
if code in ['no', 'nb']:
return ['no', 'nb', 'da', 'nn', 'sv']
if code == 'se':
return ['sv', 'no', 'nb', 'nn', 'fi']
# Other languages
if code in ['bi', 'tpi']:
return ['bi', 'tpi']
if code == 'yi':
return ['he', 'de']
if code in ['ia', 'ie']:
return ['ia', 'la', 'it', 'fr', 'es']
if code == 'xmf':
return ['ka']
if code in ['nso', 'st']:
return ['st', 'nso']
if code in ['kj', 'ng']:
return ['kj', 'ng']
if code in ['meu', 'hmo']:
return ['meu', 'hmo']
if code == ['as']:
return ['bn']
# Default value
return []
class TranslationError(Error, ImportError):
"""Raised when no correct translation could be found."""
# Inherits from ImportError, as this exception is now used
# where previously an ImportError would have been raised,
# and may have been caught by scripts as such.
pass
def _get_translation(lang, twtitle):
"""
Return message of certain twtitle if exists.
For internal use, don't use it directly.
"""
if twtitle in _cache[lang]:
return _cache[lang][twtitle]
message_bundle = twtitle.split('-')[0]
trans_text = None
filename = '%s/%s.json' % (message_bundle, lang)
try:
trans_text = pkgutil.get_data(
_messages_package_name, filename).decode('utf-8')
except (OSError, IOError): # file open can cause several exceptions
_cache[lang][twtitle] = None
return
transdict = json.loads(trans_text)
_cache[lang].update(transdict)
try:
return transdict[twtitle]
except KeyError:
return
def _extract_plural(code, message, parameters):
"""Check for the plural variants in message and replace them.
@param message: the message to be replaced
@type message: unicode string
@param parameters: plural parameters passed from other methods
@type parameters: int, basestring, tuple, list, dict
"""
plural_items = re.findall(PLURAL_PATTERN, message)
if plural_items: # we found PLURAL patterns, process it
if len(plural_items) > 1 and isinstance(parameters, (tuple, list)) and \
len(plural_items) != len(parameters):
raise ValueError("Length of parameter does not match PLURAL "
"occurrences.")
i = 0
for selector, variants in plural_items:
if isinstance(parameters, dict):
num = int(parameters[selector])
elif isinstance(parameters, basestring):
num = int(parameters)
elif isinstance(parameters, (tuple, list)):
num = int(parameters[i])
i += 1
else:
num = parameters
# TODO: check against plural_rules[code]['nplurals']
try:
index = plural_rules[code]['plural'](num)
except KeyError:
index = plural_rules['_default']['plural'](num)
except TypeError:
# we got an int, not a function
index = plural_rules[code]['plural']
repl = variants.split('|')[index]
message = re.sub(PLURAL_PATTERN, repl, message, count=1)
return message
DEFAULT_FALLBACK = ('_default', )
def translate(code, xdict, parameters=None, fallback=False):
"""Return the most appropriate translation from a translation dict.
Given a language code and a dictionary, returns the dictionary's value for
key 'code' if this key exists; otherwise tries to return a value for an
alternative language that is most applicable to use on the wiki in
language 'code' except fallback is False.
The language itself is always checked first, then languages that
have been defined to be alternatives, and finally English. If none of
the options gives result, we just take the one language from xdict which may
not be always the same. When fallback is iterable it'll return None if no
code applies (instead of returning one).
For PLURAL support have a look at the twntranslate method
@param code: The language code
@type code: string or Site object
@param xdict: dictionary with language codes as keys or extended dictionary
with family names as keys containing language dictionaries or
a single (unicode) string. May contain PLURAL tags as
described in twntranslate
@type xdict: dict, string, unicode
@param parameters: For passing (plural) parameters
@type parameters: dict, string, unicode, int
@param fallback: Try an alternate language code. If it's iterable it'll
also try those entries and choose the first match.
@type fallback: boolean or iterable
"""
family = pywikibot.config.family
# If a site is given instead of a code, use its language
if hasattr(code, 'code'):
family = code.family.name
code = code.code
# Check whether xdict has multiple projects
if isinstance(xdict, dict):
if family in xdict:
xdict = xdict[family]
elif 'wikipedia' in xdict:
xdict = xdict['wikipedia']
# Get the translated string
if not isinstance(xdict, dict):
trans = xdict
elif not xdict:
trans = None
else:
codes = [code]
if fallback is True:
codes += _altlang(code) + ['_default', 'en']
elif fallback is not False:
codes += list(fallback)
for code in codes:
if code in xdict:
trans = xdict[code]
break
else:
if fallback is not True:
# this shouldn't simply return "any one" code but when fallback
# was True before 65518573d2b0, it did just that. When False it
# did just return None. It's now also returning None in the new
# iterable mode.
return
code = list(xdict.keys())[0]
trans = xdict[code]
if trans is None:
return # return None if we have no translation found
if parameters is None:
return trans
# else we check for PLURAL variants
trans = _extract_plural(code, trans, parameters)
if parameters:
try:
return trans % parameters
except (KeyError, TypeError):
# parameter is for PLURAL variants only, don't change the string
pass
return trans
def twtranslate(code, twtitle, parameters=None, fallback=True):
"""
Translate a message.
The translations are retrieved from json files in messages_package_name.
fallback parameter must be True for i18n and False for L10N or testing
purposes.
@param code: The language code
@param twtitle: The TranslateWiki string title, in <package>-<key> format
@param parameters: For passing parameters.
@param fallback: Try an alternate language code
@type fallback: boolean
"""
if not messages_available():
raise TranslationError(
'Unable to load messages package %s for bundle %s'
'\nIt can happen due to lack of i18n submodule or files. '
'Read https://mediawiki.org/wiki/PWB/i18n'
% (_messages_package_name, twtitle))
code_needed = False
# If a site is given instead of a code, use its language
if hasattr(code, 'code'):
lang = code.code
# check whether we need the language code back
elif isinstance(code, list):
lang = code.pop()
code_needed = True
else:
lang = code
# There are two possible failure modes: the translation dict might not have
# the language altogether, or a specific key could be untranslated. Both
# modes are caught with the KeyError.
langs = [lang]
if fallback:
langs += _altlang(lang) + ['en']
for alt in langs:
trans = _get_translation(alt, twtitle)
if trans:
break
else:
raise TranslationError(
'No English translation has been defined for TranslateWiki key'
' %r\nIt can happen due to lack of i18n submodule or files. '
'Read https://mediawiki.org/wiki/PWB/i18n' % twtitle)
# send the language code back via the given list
if code_needed:
code.append(alt)
if parameters:
return trans % parameters
else:
return trans
# Maybe this function should be merged with twtranslate
def twntranslate(code, twtitle, parameters=None):
r"""Translate a message with plural support.
Support is implemented like in MediaWiki extension. If the TranslateWiki
message contains a plural tag inside which looks like::
{{PLURAL:<number>|<variant1>|<variant2>[|<variantn>]}}
it takes that variant calculated by the plural_rules depending on the number
value. Multiple plurals are allowed.
As an examples, if we had several json dictionaries in test folder like:
en.json:
{
"test-plural": "Bot: Changing %(num)s {{PLURAL:%(num)d|page|pages}}.",
}
fr.json:
{
"test-plural": "Robot: Changer %(descr)s {{PLURAL:num|une page|quelques pages}}.",
}
and so on.
>>> from pywikibot import i18n
>>> i18n.set_messages_package('tests.i18n')
>>> # use a number
>>> str(i18n.twntranslate('en', 'test-plural', 0) % {'num': 'no'})
'Bot: Changing no pages.'
>>> # use a string
>>> str(i18n.twntranslate('en', 'test-plural', '1') % {'num': 'one'})
'Bot: Changing one page.'
>>> # use a dictionary
>>> str(i18n.twntranslate('en', 'test-plural', {'num':2}))
'Bot: Changing 2 pages.'
>>> # use additional format strings
>>> str(i18n.twntranslate('fr', 'test-plural', {'num': 1, 'descr': 'seulement'}))
'Robot: Changer seulement une page.'
>>> # use format strings also outside
>>> str(i18n.twntranslate('fr', 'test-plural', 10) % {'descr': 'seulement'})
'Robot: Changer seulement quelques pages.'
The translations are retrieved from i18n.<package>, based on the callers
import table.
@param code: The language code
@param twtitle: The TranslateWiki string title, in <package>-<key> format
@param parameters: For passing (plural) parameters.
"""
# If a site is given instead of a code, use its language
if hasattr(code, 'code'):
code = code.code
# we send the code via list and get the alternate code back
code = [code]
trans = twtranslate(code, twtitle)
# get the alternate language code modified by twtranslate
lang = code.pop()
# check for PLURAL variants
trans = _extract_plural(lang, trans, parameters)
# we always have a dict for replacement of translatewiki messages
if parameters and isinstance(parameters, dict):
try:
return trans % parameters
except KeyError:
# parameter is for PLURAL variants only, don't change the string
pass
return trans
def twhas_key(code, twtitle):
"""
Check if a message has a translation in the specified language code.
The translations are retrieved from i18n.<package>, based on the callers
import table.
No code fallback is made.
@param code: The language code
@param twtitle: The TranslateWiki string title, in <package>-<key> format
"""
# If a site is given instead of a code, use its language
if hasattr(code, 'code'):
code = code.code
transdict = _get_translation(code, twtitle)
if transdict is None:
return False
return True
def twget_keys(twtitle):
"""
Return all language codes for a special message.
@param twtitle: The TranslateWiki string title, in <package>-<key> format
"""
# obtain the directory containing all the json files for this package
package = twtitle.split("-")[0]
mod = __import__(_messages_package_name, fromlist=[str('__file__')])
pathname = os.path.join(os.path.dirname(mod.__file__), package)
# build a list of languages in that directory
langs = [filename.partition('.')[0]
for filename in sorted(os.listdir(pathname))
if filename.endswith('.json')]
# exclude languages does not have this specific message in that package
# i.e. an incomplete set of translated messages.
return [lang for lang in langs
if lang != 'qqq' and
_get_translation(lang, twtitle)]
def input(twtitle, parameters=None, password=False, fallback_prompt=None):
"""
Ask the user a question, return the user's answer.
The prompt message is retrieved via L{twtranslate} and either uses the
config variable 'userinterface_lang' or the default locale as the language
code.
@param twtitle: The TranslateWiki string title, in <package>-<key> format
@param parameters: The values which will be applied to the translated text
@param password: Hides the user's input (for password entry)
@param fallback_prompt: The English prompt if i18n is not available.
@rtype: unicode string
"""
if not messages_available():
if not fallback_prompt:
raise TranslationError(
'Unable to load messages package %s for bundle %s'
% (_messages_package_name, twtitle))
else:
prompt = fallback_prompt
else:
code = config.userinterface_lang or \
locale.getdefaultlocale()[0].split('_')[0]
prompt = twtranslate(code, twtitle, parameters)
return pywikibot.input(prompt, password)
| emijrp/pywikibot-core | pywikibot/i18n.py | Python | mit | 21,745 |
import json
import threading
import unittest
import urllib2
import pytest
from django_comment_client.tests.mock_cs_server.mock_cs_server import MockCommentServiceServer
class MockCommentServiceServerTest(unittest.TestCase):
'''
A mock version of the Comment Service server that listens on a local
port and responds with pre-defined grade messages.
'''
shard = 4
def setUp(self):
super(MockCommentServiceServerTest, self).setUp()
# This is a test of the test setup,
# so it does not need to run as part of the unit test suite
# You can re-enable it by commenting out the line below
pytest.skip()
# Create the server
server_port = 4567
self.server_url = 'http://127.0.0.1:%d' % server_port
# Start up the server and tell it that by default it should
# return this as its json response
self.expected_response = {'username': 'user100', 'external_id': '4'}
self.server = MockCommentServiceServer(port_num=server_port,
response=self.expected_response)
self.addCleanup(self.server.shutdown)
# Start the server in a separate daemon thread
server_thread = threading.Thread(target=self.server.serve_forever)
server_thread.daemon = True
server_thread.start()
def test_new_user_request(self):
"""
Test the mock comment service using an example
of how you would create a new user
"""
# Send a request
values = {'username': u'user100',
'external_id': '4', 'email': u'[email protected]'}
data = json.dumps(values)
headers = {'Content-Type': 'application/json', 'Content-Length': len(data), 'X-Edx-Api-Key': 'TEST_API_KEY'}
req = urllib2.Request(self.server_url + '/api/v1/users/4', data, headers)
# Send the request to the mock cs server
response = urllib2.urlopen(req)
# Receive the reply from the mock cs server
response_dict = json.loads(response.read())
# You should have received the response specified in the setup above
self.assertEqual(response_dict, self.expected_response)
| ahmedaljazzar/edx-platform | lms/djangoapps/django_comment_client/tests/mock_cs_server/test_mock_cs_server.py | Python | agpl-3.0 | 2,228 |
from django.conf.urls import patterns, url
from .views import HomeView
urlpatterns = [
url(r'^$', HomeView.as_view(), name='index'),
] | SafePodOrg/SafePodWeb | safepod_site/home/urls.py | Python | gpl-3.0 | 171 |
# This file is heavily inspired by django.utils.version
def get_version(version):
"""Return a PEP 440-compliant version number from VERSION."""
version = get_complete_version(version)
# Now build the two parts of the version number:
# main = X.Y[.Z]
# sub = .devN - for pre-alpha releases
# | {a|b|rc}N - for alpha, beta, and rc releases
main = get_main_version(version)
sub = ""
if version[3] != "final":
mapping = {"alpha": "a", "beta": "b", "rc": "rc", "dev": ".dev"}
sub = mapping[version[3]] + str(version[4])
return main + sub
def get_main_version(version=None):
"""Return main version (X.Y[.Z]) from VERSION."""
version = get_complete_version(version)
parts = 2 if version[2] == 0 else 3
return ".".join(str(x) for x in version[:parts])
def get_complete_version(version=None):
"""
Return a tuple of the Wagtail version. If version argument is non-empty,
check for correctness of the tuple provided.
"""
if version is None:
from wagtail import VERSION as version
else:
assert len(version) == 5
assert version[3] in ("dev", "alpha", "beta", "rc", "final")
return version
def get_semver_version(version):
"Returns the semver version (X.Y.Z[-(alpha|beta)]) from VERSION"
main = ".".join(str(x) for x in version[:3])
sub = ""
if version[3] != "final":
sub = "-{}.{}".format(*version[3:])
return main + sub
| wagtail/wagtail | wagtail/utils/version.py | Python | bsd-3-clause | 1,478 |
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Line(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "sankey.link"
_path_str = "sankey.link.line"
_valid_props = {"color", "colorsrc", "width", "widthsrc"}
# color
# -----
@property
def color(self):
"""
Sets the color of the `line` around each `link`.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# width
# -----
@property
def width(self):
"""
Sets the width (in px) of the `line` around each `link`.
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
# widthsrc
# --------
@property
def widthsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `width`.
The 'widthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["widthsrc"]
@widthsrc.setter
def widthsrc(self, val):
self["widthsrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
Sets the color of the `line` around each `link`.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
width
Sets the width (in px) of the `line` around each
`link`.
widthsrc
Sets the source reference on Chart Studio Cloud for
`width`.
"""
def __init__(
self, arg=None, color=None, colorsrc=None, width=None, widthsrc=None, **kwargs
):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.sankey.link.Line`
color
Sets the color of the `line` around each `link`.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
width
Sets the width (in px) of the `line` around each
`link`.
widthsrc
Sets the source reference on Chart Studio Cloud for
`width`.
Returns
-------
Line
"""
super(Line, self).__init__("line")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.sankey.link.Line
constructor must be a dict or
an instance of :class:`plotly.graph_objs.sankey.link.Line`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("colorsrc", None)
_v = colorsrc if colorsrc is not None else _v
if _v is not None:
self["colorsrc"] = _v
_v = arg.pop("width", None)
_v = width if width is not None else _v
if _v is not None:
self["width"] = _v
_v = arg.pop("widthsrc", None)
_v = widthsrc if widthsrc is not None else _v
if _v is not None:
self["widthsrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| plotly/plotly.py | packages/python/plotly/plotly/graph_objs/sankey/link/_line.py | Python | mit | 7,566 |
#!/usr/bin/env python3
# ====================================
# Copyright (c) Microsoft Corporation. All rights reserved.
# ====================================
from optparse import OptionParser
import os
import sys
import configparser
import packagesimportutil
# append worker binary source path
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
packagesimportutil.add_all_packages_under_automationworker_to_sys_path()
# since we are using the worker httpclient, some configuration values are expected
from worker import configuration3 as configuration
from worker import diydirs
def get_config_file_path():
return os.path.join(diydirs.DIY_STATE_PATH, "worker.conf")
def set_signature_enforcement_policy(node_locked):
config_file_path = get_config_file_path()
if os.path.isfile(config_file_path) is False:
sys.stderr.write("Could not find DIY automation worker configuration file\nPlease check if the DIY automation worker is registered for the specified workspace\n")
sys.exit(1)
worker_optional_section = configuration.WORKER_OPTIONAL_CONFIG_SECTION
enforce_runbook_signature_validation = configuration.ENFORCE_RUNBOOK_SIGNATURE_VALIDATION
config = configparser.ConfigParser()
config.read(config_file_path)
config.set(worker_optional_section, enforce_runbook_signature_validation, str(node_locked))
config_file_handle = open(config_file_path, 'w')
config.write(config_file_handle)
config_file_handle.close()
# Verify that the entry was made
config.read(config_file_path)
assert config.get(worker_optional_section, enforce_runbook_signature_validation).__eq__(node_locked)
| MSFTOSSMgmt/WPSDSCLinux | Providers/nxOMSAutomationWorker/automationworker/3.x/scripts/require_runbook_signature3.py | Python | mit | 1,687 |
# Created by Pearu Peterson, September 2002
from numpy.testing import (assert_, assert_equal, assert_array_almost_equal,
assert_array_almost_equal_nulp, assert_array_less)
import pytest
from pytest import raises as assert_raises
from scipy.fftpack import ifft, fft, fftn, ifftn, rfft, irfft, fft2
from numpy import (arange, add, array, asarray, zeros, dot, exp, pi,
swapaxes, double, cdouble)
import numpy as np
import numpy.fft
from numpy.random import rand
# "large" composite numbers supported by FFTPACK
LARGE_COMPOSITE_SIZES = [
2**13,
2**5 * 3**5,
2**3 * 3**3 * 5**2,
]
SMALL_COMPOSITE_SIZES = [
2,
2*3*5,
2*2*3*3,
]
# prime
LARGE_PRIME_SIZES = [
2011
]
SMALL_PRIME_SIZES = [
29
]
def _assert_close_in_norm(x, y, rtol, size, rdt):
# helper function for testing
err_msg = "size: %s rdt: %s" % (size, rdt)
assert_array_less(np.linalg.norm(x - y), rtol*np.linalg.norm(x), err_msg)
def random(size):
return rand(*size)
def get_mat(n):
data = arange(n)
data = add.outer(data, data)
return data
def direct_dft(x):
x = asarray(x)
n = len(x)
y = zeros(n, dtype=cdouble)
w = -arange(n)*(2j*pi/n)
for i in range(n):
y[i] = dot(exp(i*w), x)
return y
def direct_idft(x):
x = asarray(x)
n = len(x)
y = zeros(n, dtype=cdouble)
w = arange(n)*(2j*pi/n)
for i in range(n):
y[i] = dot(exp(i*w), x)/n
return y
def direct_dftn(x):
x = asarray(x)
for axis in range(len(x.shape)):
x = fft(x, axis=axis)
return x
def direct_idftn(x):
x = asarray(x)
for axis in range(len(x.shape)):
x = ifft(x, axis=axis)
return x
def direct_rdft(x):
x = asarray(x)
n = len(x)
w = -arange(n)*(2j*pi/n)
r = zeros(n, dtype=double)
for i in range(n//2+1):
y = dot(exp(i*w), x)
if i:
r[2*i-1] = y.real
if 2*i < n:
r[2*i] = y.imag
else:
r[0] = y.real
return r
def direct_irdft(x):
x = asarray(x)
n = len(x)
x1 = zeros(n, dtype=cdouble)
for i in range(n//2+1):
if i:
if 2*i < n:
x1[i] = x[2*i-1] + 1j*x[2*i]
x1[n-i] = x[2*i-1] - 1j*x[2*i]
else:
x1[i] = x[2*i-1]
else:
x1[0] = x[0]
return direct_idft(x1).real
class _TestFFTBase(object):
def setup_method(self):
self.cdt = None
self.rdt = None
np.random.seed(1234)
def test_definition(self):
x = np.array([1,2,3,4+1j,1,2,3,4+2j], dtype=self.cdt)
y = fft(x)
assert_equal(y.dtype, self.cdt)
y1 = direct_dft(x)
assert_array_almost_equal(y,y1)
x = np.array([1,2,3,4+0j,5], dtype=self.cdt)
assert_array_almost_equal(fft(x),direct_dft(x))
def test_n_argument_real(self):
x1 = np.array([1,2,3,4], dtype=self.rdt)
x2 = np.array([1,2,3,4], dtype=self.rdt)
y = fft([x1,x2],n=4)
assert_equal(y.dtype, self.cdt)
assert_equal(y.shape,(2,4))
assert_array_almost_equal(y[0],direct_dft(x1))
assert_array_almost_equal(y[1],direct_dft(x2))
def _test_n_argument_complex(self):
x1 = np.array([1,2,3,4+1j], dtype=self.cdt)
x2 = np.array([1,2,3,4+1j], dtype=self.cdt)
y = fft([x1,x2],n=4)
assert_equal(y.dtype, self.cdt)
assert_equal(y.shape,(2,4))
assert_array_almost_equal(y[0],direct_dft(x1))
assert_array_almost_equal(y[1],direct_dft(x2))
def test_invalid_sizes(self):
assert_raises(ValueError, fft, [])
assert_raises(ValueError, fft, [[1,1],[2,2]], -5)
class TestDoubleFFT(_TestFFTBase):
def setup_method(self):
self.cdt = np.cdouble
self.rdt = np.double
class TestSingleFFT(_TestFFTBase):
def setup_method(self):
self.cdt = np.complex64
self.rdt = np.float32
@pytest.mark.xfail(run=False, reason="single-precision FFT implementation is partially disabled, until accuracy issues with large prime powers are resolved")
def test_notice(self):
pass
class TestFloat16FFT(object):
def test_1_argument_real(self):
x1 = np.array([1, 2, 3, 4], dtype=np.float16)
y = fft(x1, n=4)
assert_equal(y.dtype, np.complex64)
assert_equal(y.shape, (4, ))
assert_array_almost_equal(y, direct_dft(x1.astype(np.float32)))
def test_n_argument_real(self):
x1 = np.array([1, 2, 3, 4], dtype=np.float16)
x2 = np.array([1, 2, 3, 4], dtype=np.float16)
y = fft([x1, x2], n=4)
assert_equal(y.dtype, np.complex64)
assert_equal(y.shape, (2, 4))
assert_array_almost_equal(y[0], direct_dft(x1.astype(np.float32)))
assert_array_almost_equal(y[1], direct_dft(x2.astype(np.float32)))
class _TestIFFTBase(object):
def setup_method(self):
np.random.seed(1234)
def test_definition(self):
x = np.array([1,2,3,4+1j,1,2,3,4+2j], self.cdt)
y = ifft(x)
y1 = direct_idft(x)
assert_equal(y.dtype, self.cdt)
assert_array_almost_equal(y,y1)
x = np.array([1,2,3,4+0j,5], self.cdt)
assert_array_almost_equal(ifft(x),direct_idft(x))
def test_definition_real(self):
x = np.array([1,2,3,4,1,2,3,4], self.rdt)
y = ifft(x)
assert_equal(y.dtype, self.cdt)
y1 = direct_idft(x)
assert_array_almost_equal(y,y1)
x = np.array([1,2,3,4,5], dtype=self.rdt)
assert_equal(y.dtype, self.cdt)
assert_array_almost_equal(ifft(x),direct_idft(x))
def test_random_complex(self):
for size in [1,51,111,100,200,64,128,256,1024]:
x = random([size]).astype(self.cdt)
x = random([size]).astype(self.cdt) + 1j*x
y1 = ifft(fft(x))
y2 = fft(ifft(x))
assert_equal(y1.dtype, self.cdt)
assert_equal(y2.dtype, self.cdt)
assert_array_almost_equal(y1, x)
assert_array_almost_equal(y2, x)
def test_random_real(self):
for size in [1,51,111,100,200,64,128,256,1024]:
x = random([size]).astype(self.rdt)
y1 = ifft(fft(x))
y2 = fft(ifft(x))
assert_equal(y1.dtype, self.cdt)
assert_equal(y2.dtype, self.cdt)
assert_array_almost_equal(y1, x)
assert_array_almost_equal(y2, x)
def test_size_accuracy(self):
# Sanity check for the accuracy for prime and non-prime sized inputs
if self.rdt == np.float32:
rtol = 1e-5
elif self.rdt == np.float64:
rtol = 1e-10
for size in LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES:
np.random.seed(1234)
x = np.random.rand(size).astype(self.rdt)
y = ifft(fft(x))
_assert_close_in_norm(x, y, rtol, size, self.rdt)
y = fft(ifft(x))
_assert_close_in_norm(x, y, rtol, size, self.rdt)
x = (x + 1j*np.random.rand(size)).astype(self.cdt)
y = ifft(fft(x))
_assert_close_in_norm(x, y, rtol, size, self.rdt)
y = fft(ifft(x))
_assert_close_in_norm(x, y, rtol, size, self.rdt)
def test_invalid_sizes(self):
assert_raises(ValueError, ifft, [])
assert_raises(ValueError, ifft, [[1,1],[2,2]], -5)
class TestDoubleIFFT(_TestIFFTBase):
def setup_method(self):
self.cdt = np.cdouble
self.rdt = np.double
class TestSingleIFFT(_TestIFFTBase):
def setup_method(self):
self.cdt = np.complex64
self.rdt = np.float32
class _TestRFFTBase(object):
def setup_method(self):
np.random.seed(1234)
def test_definition(self):
for t in [[1, 2, 3, 4, 1, 2, 3, 4], [1, 2, 3, 4, 1, 2, 3, 4, 5]]:
x = np.array(t, dtype=self.rdt)
y = rfft(x)
y1 = direct_rdft(x)
assert_array_almost_equal(y,y1)
assert_equal(y.dtype, self.rdt)
def test_invalid_sizes(self):
assert_raises(ValueError, rfft, [])
assert_raises(ValueError, rfft, [[1,1],[2,2]], -5)
# See gh-5790
class MockSeries(object):
def __init__(self, data):
self.data = np.asarray(data)
def __getattr__(self, item):
try:
return getattr(self.data, item)
except AttributeError:
raise AttributeError(("'MockSeries' object "
"has no attribute '{attr}'".
format(attr=item)))
def test_non_ndarray_with_dtype(self):
x = np.array([1., 2., 3., 4., 5.])
xs = _TestRFFTBase.MockSeries(x)
expected = [1, 2, 3, 4, 5]
rfft(xs)
# Data should not have been overwritten
assert_equal(x, expected)
assert_equal(xs.data, expected)
def test_complex_input(self):
assert_raises(TypeError, rfft, np.arange(4, dtype=np.complex64))
class TestRFFTDouble(_TestRFFTBase):
def setup_method(self):
self.cdt = np.cdouble
self.rdt = np.double
class TestRFFTSingle(_TestRFFTBase):
def setup_method(self):
self.cdt = np.complex64
self.rdt = np.float32
class _TestIRFFTBase(object):
def setup_method(self):
np.random.seed(1234)
def test_definition(self):
x1 = [1,2,3,4,1,2,3,4]
x1_1 = [1,2+3j,4+1j,2+3j,4,2-3j,4-1j,2-3j]
x2 = [1,2,3,4,1,2,3,4,5]
x2_1 = [1,2+3j,4+1j,2+3j,4+5j,4-5j,2-3j,4-1j,2-3j]
def _test(x, xr):
y = irfft(np.array(x, dtype=self.rdt))
y1 = direct_irdft(x)
assert_equal(y.dtype, self.rdt)
assert_array_almost_equal(y,y1, decimal=self.ndec)
assert_array_almost_equal(y,ifft(xr), decimal=self.ndec)
_test(x1, x1_1)
_test(x2, x2_1)
def test_random_real(self):
for size in [1,51,111,100,200,64,128,256,1024]:
x = random([size]).astype(self.rdt)
y1 = irfft(rfft(x))
y2 = rfft(irfft(x))
assert_equal(y1.dtype, self.rdt)
assert_equal(y2.dtype, self.rdt)
assert_array_almost_equal(y1, x, decimal=self.ndec,
err_msg="size=%d" % size)
assert_array_almost_equal(y2, x, decimal=self.ndec,
err_msg="size=%d" % size)
def test_size_accuracy(self):
# Sanity check for the accuracy for prime and non-prime sized inputs
if self.rdt == np.float32:
rtol = 1e-5
elif self.rdt == np.float64:
rtol = 1e-10
for size in LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES:
np.random.seed(1234)
x = np.random.rand(size).astype(self.rdt)
y = irfft(rfft(x))
_assert_close_in_norm(x, y, rtol, size, self.rdt)
y = rfft(irfft(x))
_assert_close_in_norm(x, y, rtol, size, self.rdt)
def test_invalid_sizes(self):
assert_raises(ValueError, irfft, [])
assert_raises(ValueError, irfft, [[1,1],[2,2]], -5)
def test_complex_input(self):
assert_raises(TypeError, irfft, np.arange(4, dtype=np.complex64))
# self.ndec is bogus; we should have a assert_array_approx_equal for number of
# significant digits
class TestIRFFTDouble(_TestIRFFTBase):
def setup_method(self):
self.cdt = np.cdouble
self.rdt = np.double
self.ndec = 14
class TestIRFFTSingle(_TestIRFFTBase):
def setup_method(self):
self.cdt = np.complex64
self.rdt = np.float32
self.ndec = 5
class Testfft2(object):
def setup_method(self):
np.random.seed(1234)
def test_regression_244(self):
"""FFT returns wrong result with axes parameter."""
# fftn (and hence fft2) used to break when both axes and shape were
# used
x = numpy.ones((4, 4, 2))
y = fft2(x, shape=(8, 8), axes=(-3, -2))
y_r = numpy.fft.fftn(x, s=(8, 8), axes=(-3, -2))
assert_array_almost_equal(y, y_r)
def test_invalid_sizes(self):
assert_raises(ValueError, fft2, [[]])
assert_raises(ValueError, fft2, [[1, 1], [2, 2]], (4, -3))
class TestFftnSingle(object):
def setup_method(self):
np.random.seed(1234)
def test_definition(self):
x = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
y = fftn(np.array(x, np.float32))
assert_(y.dtype == np.complex64,
msg="double precision output with single precision")
y_r = np.array(fftn(x), np.complex64)
assert_array_almost_equal_nulp(y, y_r)
@pytest.mark.parametrize('size', SMALL_COMPOSITE_SIZES + SMALL_PRIME_SIZES)
def test_size_accuracy_small(self, size):
x = np.random.rand(size, size) + 1j*np.random.rand(size, size)
y1 = fftn(x.real.astype(np.float32))
y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)
assert_equal(y1.dtype, np.complex64)
assert_array_almost_equal_nulp(y1, y2, 2000)
@pytest.mark.parametrize('size', LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES)
def test_size_accuracy_large(self, size):
x = np.random.rand(size, 3) + 1j*np.random.rand(size, 3)
y1 = fftn(x.real.astype(np.float32))
y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)
assert_equal(y1.dtype, np.complex64)
assert_array_almost_equal_nulp(y1, y2, 2000)
def test_definition_float16(self):
x = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
y = fftn(np.array(x, np.float16))
assert_equal(y.dtype, np.complex64)
y_r = np.array(fftn(x), np.complex64)
assert_array_almost_equal_nulp(y, y_r)
@pytest.mark.parametrize('size', SMALL_COMPOSITE_SIZES + SMALL_PRIME_SIZES)
def test_float16_input_small(self, size):
x = np.random.rand(size, size) + 1j*np.random.rand(size, size)
y1 = fftn(x.real.astype(np.float16))
y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)
assert_equal(y1.dtype, np.complex64)
assert_array_almost_equal_nulp(y1, y2, 5e5)
@pytest.mark.parametrize('size', LARGE_COMPOSITE_SIZES + LARGE_PRIME_SIZES)
def test_float16_input_large(self, size):
x = np.random.rand(size, 3) + 1j*np.random.rand(size, 3)
y1 = fftn(x.real.astype(np.float16))
y2 = fftn(x.real.astype(np.float64)).astype(np.complex64)
assert_equal(y1.dtype, np.complex64)
assert_array_almost_equal_nulp(y1, y2, 2e6)
class TestFftn(object):
def setup_method(self):
np.random.seed(1234)
def test_definition(self):
x = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
y = fftn(x)
assert_array_almost_equal(y, direct_dftn(x))
x = random((20, 26))
assert_array_almost_equal(fftn(x), direct_dftn(x))
x = random((5, 4, 3, 20))
assert_array_almost_equal(fftn(x), direct_dftn(x))
def test_axes_argument(self):
# plane == ji_plane, x== kji_space
plane1 = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
plane2 = [[10, 11, 12],
[13, 14, 15],
[16, 17, 18]]
plane3 = [[19, 20, 21],
[22, 23, 24],
[25, 26, 27]]
ki_plane1 = [[1, 2, 3],
[10, 11, 12],
[19, 20, 21]]
ki_plane2 = [[4, 5, 6],
[13, 14, 15],
[22, 23, 24]]
ki_plane3 = [[7, 8, 9],
[16, 17, 18],
[25, 26, 27]]
jk_plane1 = [[1, 10, 19],
[4, 13, 22],
[7, 16, 25]]
jk_plane2 = [[2, 11, 20],
[5, 14, 23],
[8, 17, 26]]
jk_plane3 = [[3, 12, 21],
[6, 15, 24],
[9, 18, 27]]
kj_plane1 = [[1, 4, 7],
[10, 13, 16], [19, 22, 25]]
kj_plane2 = [[2, 5, 8],
[11, 14, 17], [20, 23, 26]]
kj_plane3 = [[3, 6, 9],
[12, 15, 18], [21, 24, 27]]
ij_plane1 = [[1, 4, 7],
[2, 5, 8],
[3, 6, 9]]
ij_plane2 = [[10, 13, 16],
[11, 14, 17],
[12, 15, 18]]
ij_plane3 = [[19, 22, 25],
[20, 23, 26],
[21, 24, 27]]
ik_plane1 = [[1, 10, 19],
[2, 11, 20],
[3, 12, 21]]
ik_plane2 = [[4, 13, 22],
[5, 14, 23],
[6, 15, 24]]
ik_plane3 = [[7, 16, 25],
[8, 17, 26],
[9, 18, 27]]
ijk_space = [jk_plane1, jk_plane2, jk_plane3]
ikj_space = [kj_plane1, kj_plane2, kj_plane3]
jik_space = [ik_plane1, ik_plane2, ik_plane3]
jki_space = [ki_plane1, ki_plane2, ki_plane3]
kij_space = [ij_plane1, ij_plane2, ij_plane3]
x = array([plane1, plane2, plane3])
assert_array_almost_equal(fftn(x),
fftn(x, axes=(-3, -2, -1))) # kji_space
assert_array_almost_equal(fftn(x), fftn(x, axes=(0, 1, 2)))
assert_array_almost_equal(fftn(x, axes=(0, 2)), fftn(x, axes=(0, -1)))
y = fftn(x, axes=(2, 1, 0)) # ijk_space
assert_array_almost_equal(swapaxes(y, -1, -3), fftn(ijk_space))
y = fftn(x, axes=(2, 0, 1)) # ikj_space
assert_array_almost_equal(swapaxes(swapaxes(y, -1, -3), -1, -2),
fftn(ikj_space))
y = fftn(x, axes=(1, 2, 0)) # jik_space
assert_array_almost_equal(swapaxes(swapaxes(y, -1, -3), -3, -2),
fftn(jik_space))
y = fftn(x, axes=(1, 0, 2)) # jki_space
assert_array_almost_equal(swapaxes(y, -2, -3), fftn(jki_space))
y = fftn(x, axes=(0, 2, 1)) # kij_space
assert_array_almost_equal(swapaxes(y, -2, -1), fftn(kij_space))
y = fftn(x, axes=(-2, -1)) # ji_plane
assert_array_almost_equal(fftn(plane1), y[0])
assert_array_almost_equal(fftn(plane2), y[1])
assert_array_almost_equal(fftn(plane3), y[2])
y = fftn(x, axes=(1, 2)) # ji_plane
assert_array_almost_equal(fftn(plane1), y[0])
assert_array_almost_equal(fftn(plane2), y[1])
assert_array_almost_equal(fftn(plane3), y[2])
y = fftn(x, axes=(-3, -2)) # kj_plane
assert_array_almost_equal(fftn(x[:, :, 0]), y[:, :, 0])
assert_array_almost_equal(fftn(x[:, :, 1]), y[:, :, 1])
assert_array_almost_equal(fftn(x[:, :, 2]), y[:, :, 2])
y = fftn(x, axes=(-3, -1)) # ki_plane
assert_array_almost_equal(fftn(x[:, 0, :]), y[:, 0, :])
assert_array_almost_equal(fftn(x[:, 1, :]), y[:, 1, :])
assert_array_almost_equal(fftn(x[:, 2, :]), y[:, 2, :])
y = fftn(x, axes=(-1, -2)) # ij_plane
assert_array_almost_equal(fftn(ij_plane1), swapaxes(y[0], -2, -1))
assert_array_almost_equal(fftn(ij_plane2), swapaxes(y[1], -2, -1))
assert_array_almost_equal(fftn(ij_plane3), swapaxes(y[2], -2, -1))
y = fftn(x, axes=(-1, -3)) # ik_plane
assert_array_almost_equal(fftn(ik_plane1),
swapaxes(y[:, 0, :], -1, -2))
assert_array_almost_equal(fftn(ik_plane2),
swapaxes(y[:, 1, :], -1, -2))
assert_array_almost_equal(fftn(ik_plane3),
swapaxes(y[:, 2, :], -1, -2))
y = fftn(x, axes=(-2, -3)) # jk_plane
assert_array_almost_equal(fftn(jk_plane1),
swapaxes(y[:, :, 0], -1, -2))
assert_array_almost_equal(fftn(jk_plane2),
swapaxes(y[:, :, 1], -1, -2))
assert_array_almost_equal(fftn(jk_plane3),
swapaxes(y[:, :, 2], -1, -2))
y = fftn(x, axes=(-1,)) # i_line
for i in range(3):
for j in range(3):
assert_array_almost_equal(fft(x[i, j, :]), y[i, j, :])
y = fftn(x, axes=(-2,)) # j_line
for i in range(3):
for j in range(3):
assert_array_almost_equal(fft(x[i, :, j]), y[i, :, j])
y = fftn(x, axes=(0,)) # k_line
for i in range(3):
for j in range(3):
assert_array_almost_equal(fft(x[:, i, j]), y[:, i, j])
y = fftn(x, axes=()) # point
assert_array_almost_equal(y, x)
def test_shape_argument(self):
small_x = [[1, 2, 3],
[4, 5, 6]]
large_x1 = [[1, 2, 3, 0],
[4, 5, 6, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]]
y = fftn(small_x, shape=(4, 4))
assert_array_almost_equal(y, fftn(large_x1))
y = fftn(small_x, shape=(3, 4))
assert_array_almost_equal(y, fftn(large_x1[:-1]))
def test_shape_axes_argument(self):
small_x = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
large_x1 = array([[1, 2, 3, 0],
[4, 5, 6, 0],
[7, 8, 9, 0],
[0, 0, 0, 0]])
y = fftn(small_x, shape=(4, 4), axes=(-2, -1))
assert_array_almost_equal(y, fftn(large_x1))
y = fftn(small_x, shape=(4, 4), axes=(-1, -2))
assert_array_almost_equal(y, swapaxes(
fftn(swapaxes(large_x1, -1, -2)), -1, -2))
def test_shape_axes_argument2(self):
# Change shape of the last axis
x = numpy.random.random((10, 5, 3, 7))
y = fftn(x, axes=(-1,), shape=(8,))
assert_array_almost_equal(y, fft(x, axis=-1, n=8))
# Change shape of an arbitrary axis which is not the last one
x = numpy.random.random((10, 5, 3, 7))
y = fftn(x, axes=(-2,), shape=(8,))
assert_array_almost_equal(y, fft(x, axis=-2, n=8))
# Change shape of axes: cf #244, where shape and axes were mixed up
x = numpy.random.random((4, 4, 2))
y = fftn(x, axes=(-3, -2), shape=(8, 8))
assert_array_almost_equal(y,
numpy.fft.fftn(x, axes=(-3, -2), s=(8, 8)))
def test_shape_argument_more(self):
x = zeros((4, 4, 2))
with assert_raises(ValueError,
match="when given, axes and shape arguments"
" have to be of the same length"):
fftn(x, shape=(8, 8, 2, 1))
def test_invalid_sizes(self):
with assert_raises(ValueError,
match="invalid number of data points"
r" \(\[1, 0\]\) specified"):
fftn([[]])
with assert_raises(ValueError,
match="invalid number of data points"
r" \(\[4, -3\]\) specified"):
fftn([[1, 1], [2, 2]], (4, -3))
class TestIfftn(object):
dtype = None
cdtype = None
def setup_method(self):
np.random.seed(1234)
@pytest.mark.parametrize('dtype,cdtype,maxnlp',
[(np.float64, np.complex128, 2000),
(np.float32, np.complex64, 3500)])
def test_definition(self, dtype, cdtype, maxnlp):
x = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]], dtype=dtype)
y = ifftn(x)
assert_equal(y.dtype, cdtype)
assert_array_almost_equal_nulp(y, direct_idftn(x), maxnlp)
x = random((20, 26))
assert_array_almost_equal_nulp(ifftn(x), direct_idftn(x), maxnlp)
x = random((5, 4, 3, 20))
assert_array_almost_equal_nulp(ifftn(x), direct_idftn(x), maxnlp)
@pytest.mark.parametrize('maxnlp', [2000, 3500])
@pytest.mark.parametrize('size', [1, 2, 51, 32, 64, 92])
def test_random_complex(self, maxnlp, size):
x = random([size, size]) + 1j*random([size, size])
assert_array_almost_equal_nulp(ifftn(fftn(x)), x, maxnlp)
assert_array_almost_equal_nulp(fftn(ifftn(x)), x, maxnlp)
def test_invalid_sizes(self):
with assert_raises(ValueError,
match="invalid number of data points"
r" \(\[1, 0\]\) specified"):
ifftn([[]])
with assert_raises(ValueError,
match="invalid number of data points"
r" \(\[4, -3\]\) specified"):
ifftn([[1, 1], [2, 2]], (4, -3))
class FakeArray(object):
def __init__(self, data):
self._data = data
self.__array_interface__ = data.__array_interface__
class FakeArray2(object):
def __init__(self, data):
self._data = data
def __array__(self):
return self._data
class TestOverwrite(object):
"""Check input overwrite behavior of the FFT functions."""
real_dtypes = [np.float32, np.float64]
dtypes = real_dtypes + [np.complex64, np.complex128]
fftsizes = [8, 16, 32]
def _check(self, x, routine, fftsize, axis, overwrite_x):
x2 = x.copy()
for fake in [lambda x: x, FakeArray, FakeArray2]:
routine(fake(x2), fftsize, axis, overwrite_x=overwrite_x)
sig = "%s(%s%r, %r, axis=%r, overwrite_x=%r)" % (
routine.__name__, x.dtype, x.shape, fftsize, axis, overwrite_x)
if not overwrite_x:
assert_equal(x2, x, err_msg="spurious overwrite in %s" % sig)
def _check_1d(self, routine, dtype, shape, axis, overwritable_dtypes,
fftsize, overwrite_x):
np.random.seed(1234)
if np.issubdtype(dtype, np.complexfloating):
data = np.random.randn(*shape) + 1j*np.random.randn(*shape)
else:
data = np.random.randn(*shape)
data = data.astype(dtype)
self._check(data, routine, fftsize, axis,
overwrite_x=overwrite_x)
@pytest.mark.parametrize('dtype', dtypes)
@pytest.mark.parametrize('fftsize', fftsizes)
@pytest.mark.parametrize('overwrite_x', [True, False])
@pytest.mark.parametrize('shape,axes', [((16,), -1),
((16, 2), 0),
((2, 16), 1)])
def test_fft_ifft(self, dtype, fftsize, overwrite_x, shape, axes):
overwritable = (np.complex128, np.complex64)
self._check_1d(fft, dtype, shape, axes, overwritable,
fftsize, overwrite_x)
self._check_1d(ifft, dtype, shape, axes, overwritable,
fftsize, overwrite_x)
@pytest.mark.parametrize('dtype', real_dtypes)
@pytest.mark.parametrize('fftsize', fftsizes)
@pytest.mark.parametrize('overwrite_x', [True, False])
@pytest.mark.parametrize('shape,axes', [((16,), -1),
((16, 2), 0),
((2, 16), 1)])
def test_rfft_irfft(self, dtype, fftsize, overwrite_x, shape, axes):
overwritable = self.real_dtypes
self._check_1d(irfft, dtype, shape, axes, overwritable,
fftsize, overwrite_x)
self._check_1d(rfft, dtype, shape, axes, overwritable,
fftsize, overwrite_x)
def _check_nd_one(self, routine, dtype, shape, axes, overwritable_dtypes,
overwrite_x):
np.random.seed(1234)
if np.issubdtype(dtype, np.complexfloating):
data = np.random.randn(*shape) + 1j*np.random.randn(*shape)
else:
data = np.random.randn(*shape)
data = data.astype(dtype)
def fftshape_iter(shp):
if len(shp) <= 0:
yield ()
else:
for j in (shp[0]//2, shp[0], shp[0]*2):
for rest in fftshape_iter(shp[1:]):
yield (j,) + rest
if axes is None:
part_shape = shape
else:
part_shape = tuple(np.take(shape, axes))
for fftshape in fftshape_iter(part_shape):
self._check(data, routine, fftshape, axes,
overwrite_x=overwrite_x)
if data.ndim > 1:
self._check(data.T, routine, fftshape, axes,
overwrite_x=overwrite_x)
@pytest.mark.parametrize('dtype', dtypes)
@pytest.mark.parametrize('overwrite_x', [True, False])
@pytest.mark.parametrize('shape,axes', [((16,), None),
((16,), (0,)),
((16, 2), (0,)),
((2, 16), (1,)),
((8, 16), None),
((8, 16), (0, 1)),
((8, 16, 2), (0, 1)),
((8, 16, 2), (1, 2)),
((8, 16, 2), (0,)),
((8, 16, 2), (1,)),
((8, 16, 2), (2,)),
((8, 16, 2), None),
((8, 16, 2), (0, 1, 2))])
def test_fftn_ifftn(self, dtype, overwrite_x, shape, axes):
overwritable = (np.complex128, np.complex64)
self._check_nd_one(fftn, dtype, shape, axes, overwritable,
overwrite_x)
self._check_nd_one(ifftn, dtype, shape, axes, overwritable,
overwrite_x)
| aeklant/scipy | scipy/fftpack/tests/test_basic.py | Python | bsd-3-clause | 29,856 |
import numpy as np
import pandas as pd
arts = pd.DataFrame()
# 1. Clean the dates so you only see numbers by using string manipulations
arts["execution_date"] = arts["execution_date"].str.findall(r"([0-9]+)").str[0]
arts["execution_date"] = arts["execution_date"].astype(float)
arts.head()
# 1. If a year is lower than 100, then is referred to 1900. For example, 78 is actually 1978, and that needs to be fixed too.
arts["execution_date"] = arts["execution_date"].apply(lambda x: 1900 + x if x < 100 else x)
arts.head()
# 2. Get the average execution year per artist.
arts.groupby("artist_name").mean().head()
# 3. Get the average execution year per category.
arts.groupby("category").mean().head()
# 4. Get the number of artworks per artist. Which artist is the most prolific?
artworks_by_artist = arts.groupby("artist_name")[["title"]].aggregate(np.count_nonzero)
artworks_by_artist.sort("title", ascending=False).head()
# 5. Get the number of artworks per category. Which category has the highest number?
artworks_by_category = arts.groupby("category")[["title"]].aggregate(np.count_nonzero)
artworks_by_category.sort("title", ascending=False).head()
# 6. Get the average length of artworks titles per category and artist.
arts['title_length'] = arts['title'].str.len()
length_by_category = arts.groupby("category")[["title_length"]].aggregate(np.mean)
length_by_category.sort("title_length", ascending=False).head()
# 6. Get the year with the highest production.
artworks_by_year = arts.groupby("execution_date")[["title"]].aggregate(np.count_nonzero)
artworks_by_year.sort("title", ascending=False).head()
# 8. Get the approximate period of production for each artist. If an artist painted from 1970 to 1990, the period is 20.
period_min = arts.groupby("artist_name")[['execution_date']].aggregate(np.min)
period_max = arts.groupby("artist_name")[['execution_date']].aggregate(np.max)
(period_max - period_min).sort("execution_date", ascending=False).head()
| versae/DH2304 | data/arts2.py | Python | mit | 1,974 |
# -*- coding: utf-8 -*-
import json
import pytz
import uuid
import http.client
from base64 import standard_b64decode
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.timezone import make_aware
from github import Github
from bakery.cookies.exceptions import (InvalidRepositoryError,
InvalidContentFileEncoding)
def _github_setup():
"""
Sets up the server-wide Github authentication for the project and returns
an authorized `Github object from PyGithub
<http://jacquev6.github.io/PyGithub/github.html>`_ which can be used to
list users, repos, etc.
"""
credentials = getattr(settings, 'GITHUB_CREDENTIALS', None)
if credentials is None:
raise ImproperlyConfigured('No GITHUB_CREDENTIALS set')
# Verify that only allowed keys are passed
allowed_keys = set(['login_or_token', 'password', 'client_id', 'client_secret'])
given_keys = set(credentials.keys())
forbidden_keys = given_keys - allowed_keys
if given_keys - allowed_keys:
raise ImproperlyConfigured('Invalid keys in GITHUB_CREDENTIALS: '
'{0}'.format(','.join(forbidden_keys)))
if 'password' in credentials and not 'login_or_token' in credentials:
raise ImproperlyConfigured('You need to define the login_or_token to '
'use password authentiaction in GITHUB_CREDENTIALS')
if 'client_secret' in credentials and not 'client_id' in credentials:
raise ImproperlyConfigured('You need to define the client_id to '
'use client_secret authentiaction in GITHUB_CREDENTIALS')
if 'client_id' in credentials and not 'client_secret' in credentials:
raise ImproperlyConfigured('You need to define the client_secret to '
'use client_id authentiaction in GITHUB_CREDENTIALS')
return Github(**credentials)
#: Server-wide authenticated GitHub state
github_setup = _github_setup()
def get_repo_from_url(url, gh_setup=github_setup):
"""
Given an URL like (ssh://)[email protected]/user/repo.git or any other url
that defines the root of a repository, this function returns the PyGithub
resource describing that object.
One can use :func:`get_cookie_data_from_repo` or
:func:`get_mapping_file_from_repo` to get further information about that
repository such as the content of the ``cookiecutter.json`` file.
:param str url: The root URL to a github repository
:param gh_setup: If not the server-wide authentiaction :data:`github_setup`
should be used, this parameter can be set to another, e.g. user
authenticated PyGithub object
:return: Returns an instance of a ``PyGithub.Repository``.
:raises: ``InvalidRepositoryError`` if the given URL does not match a known
GitHub URL.
"""
if '[email protected]' in url:
identifier = '[email protected]'
elif 'https://github.com/' in url:
identifier = 'https://github.com'
else:
raise InvalidRepositoryError('{0} is not a valid GitHub URL'.format(url))
index = url.index(identifier)
length = len(identifier)
start = length + index + 1 # +1 for separator after identifier
full_name = url[start:]
if full_name.endswith('.git'):
full_name = full_name[:-4] # strip .git
return get_repo_from_full_name(full_name, gh_setup)
def get_repo_from_full_name(full_name, gh_setup=github_setup):
"""
Returns a PyGithub.Repository by a given full_name (<user>/<repo_name>)
"""
repository = gh_setup.get_repo(full_name)
return repository
def get_cookie_data_from_repo(repo):
"""
Given a ``PyGithub.Repository`` instance construct a dict holding the
following information:
* ``name`` -- Repository name
* ``owner_name`` -- The owner name of the repository
* ``url`` -- The HTTP URL to view the repository in a browser
* ``description`` -- A brief description about the repository
* ``last_change`` -- A timezone aware timestamp of the last modification
on the repository
* ``mapping`` -- The content of the ``cookiecutter.json`` file (or similar)
* ``backend`` -- ``github``
* ``repo_watchers`` -- Amount of repository watchers
* ``repo_forks`` -- About of repository forks
* ``participants`` -- List of participant usernames
* ``language`` -- The main language used in the repository
* ``homepage`` -- The repository homepage
* ``clone_urls`` -- A Dict of urls one can use to clone the repository
* _``owner`` -- A dict with information about the owner of the repository
* ``username`` -- The user- or login name (required)
* ``email`` -- The email address of the user
* ``name`` -- The full name
* ``is_organization`` -- If the repository is owned by a orga: ``True``
* ``profile_url`` -- The HTTP URL to view the user in a browser
:param repo: A ``PyGithub.Repository`` instance
:return dict: The dict containing Cookie and BakeryUser information
:raises: ``InvalidRepositoryError`` if no mapping file can be found in the
given repository
:raises: ``InvalidContentFileEncoding`` if the content of the given file
cannot be parsed.
"""
mapping_file = get_mapping_file_from_repo(repo)
content = get_content_from_content_file(mapping_file)
owner = repo.owner
email = owner.email
if not email:
email = '{0}@localhost.invalid'.format(uuid.uuid4().hex)
owner_data = {
'username': owner.login,
'email': email,
'name': owner.name,
'is_organization': owner.type == "Organization",
'profile_url': owner.html_url,
}
# TODO: I think this should not fail like that :-/
try:
participants = ', '.join(user.login for user in repo.get_contributors())
except http.client.BadStatusLine:
participants = None
data = {
'name': repo.name,
'owner_name': repo.owner.login,
'url': repo.html_url,
'description': repo.description,
'last_change': make_aware(repo.updated_at, pytz.UTC),
'mapping': content,
'backend': 'github',
'repo_watchers': repo.watchers,
'repo_forks': repo.forks,
'participants': participants,
'language': repo.language,
'homepage': repo.homepage,
'clone_urls': {
'ssh': repo.ssh_url,
'git': repo.git_url,
},
'_owner': owner_data,
}
return data
def filter_repo(repo, filters):
contents = repo.get_contents('/')
if contents:
candidates = {}
for rd in contents.raw_data:
if rd['type'] != 'file':
continue
for key, filter in filters.items():
if filter(rd[key]):
candidates[rd['name']] = rd
return candidates
def get_mapping_file_from_repo(repo):
"""
Finds a ``cookiecutter.json`` or another JSON file in the repository root
and treat it as the mapping file.
The candidate selection works as follows:
#. All files ending with ``.json`` on the root-level will be added to a candidate set.
#. If now candidates have been found raise ``InvalidRepositoryError``
#. If more than one candidate has been found:
#. if there is a ``cookiecutter.json`` in the candidate list, use it
#. Otherwise raise ``InvalidRepositoryError``
#. Return there is exactly one ``JSON`` file: use it
#. If a mapping_file has been found, open it as a ``PyGithub.ContentFile``
and return the content_file
:raises: ``InvalidRepositoryError`` if there was no way to
deterministically find the mapping file.
"""
candidates = filter_repo(repo, {'name': lambda val: val.endswith('.json')})
if not candidates:
raise InvalidRepositoryError('No JSON mapping file found!')
if len(candidates) > 1:
mapping_file = candidates.get('cookiecutter.json', None)
if mapping_file is None:
raise InvalidRepositoryError('Cannot decide for a mapping file! '
'Multiple files found: {0}'.format(', '.join(candidates.keys)))
else:
mapping_file = list(candidates.values())[0]
return repo.get_contents('/' + mapping_file['name'])
def decode_file(content_file):
"""
Given a ``PyGithub.ContentFile`` this function will decode the file's data.
:return dict: Returns a raw decoded string.
:raises: ``InvalidContentFileEncoding`` raised if not suitable decoding
is defined.
"""
decoded = None
if content_file.encoding == 'base64':
decoded = standard_b64decode(content_file.content).decode('utf-8')
if decoded is None:
raise InvalidContentFileEncoding(
'Encoding {0} cannot be decoded'.format(content_file.encoding))
return decoded
def get_content_from_content_file(content_file):
"""
Given a ``PyGithub.ContentFile`` this function will decode the file's data
and loads it's JSON content.
:return dict: Returns a ``dict`` with the JSON content
:raises: ``InvalidContentFileEncoding`` raised if not suitable decoding
is defined.
"""
return json.loads(decode_file(content_file))
def fork_repository(user, repo):
"""
Forks the repository ``repo`` to the user ``user``.
:return: Returns an instance of the newly forked ``PyGithub.Repository``.
"""
return user.create_fork(repo)
| muffins-on-dope/bakery | bakery/utils/vcs/gh.py | Python | bsd-3-clause | 9,617 |
from moviepy.editor import ImageSequenceClip
import argparse
def main():
parser = argparse.ArgumentParser(description='Create driving video.')
parser.add_argument(
'image_folder',
type=str,
default='',
help='Path to image folder. The video will be created from these images.'
)
parser.add_argument(
'--fps',
type=int,
default=60,
help='FPS (Frames per second) setting for the video.')
args = parser.parse_args()
video_file = args.image_folder + '.mp4'
print("Creating video {}, FPS={}".format(video_file, args.fps))
clip = ImageSequenceClip(args.image_folder, fps=args.fps)
clip.write_videofile(video_file)
if __name__ == '__main__':
main()
| twildhage/BehavioralCloning | video.py | Python | mit | 750 |
# Import the Blender-Python API module
import bpy
# Import other useful modules
import mathutils
import math
import random
import sys
# Helper Method to rotate an object so that it points at a target
def pointObjectToTarget(obj, targetLoc):
dx = targetLoc.x - obj.location.x;
dy = targetLoc.y - obj.location.y;
dz = targetLoc.z - obj.location.z;
xRad = math.atan2(dz, math.sqrt(dy**2 + dx**2)) + math.pi/2;
zRad = math.atan2(dy, dx) - math.pi/2;
obj.rotation_euler = mathutils.Euler((xRad, 0, zRad), 'XYZ');
# Helper method to create a random surface (resembling spilled liquid)
# by blitting Gaussians and truncating the result
def createRandomSurfaceMap(xBinsNum, yBinsNum):
sigmaX = 1/3.2;
sigmaY = 1/3.2;
elevationMap = [[0 for x in range(0,xBinsNum)] for y in range(0,yBinsNum)];
for y in range (0, yBinsNum ):
for x in range(0,xBinsNum):
xc = 2*(x-xBinsNum/2+0.5)/xBinsNum;
yc = 2*(y-yBinsNum/2+0.5)/yBinsNum;
z = random.random()*math.pow(math.exp(-0.5*(math.pow(xc/sigmaX,2)+math.pow(yc/sigmaY,2))),0.7);
if z > 0.25:
z = 0.25;
elevationMap[y][x] = z;
return(elevationMap);
# Helper method to create an elevation map (resembling a stretched cloth)
# by blitting elongated Gaussians at random positions/orientations
def createRandomGaussianBlobsMap(xBinsNum, yBinsNum):
# create a 2D list and fill it with 0
elevation = [[0 for x in range(0,xBinsNum)] for y in range(0,yBinsNum)];
# allocate memory
fx = [0 for x in range(0, xBinsNum)];
fy = [0 for y in range(0, yBinsNum)];
fySinTheta = [0 for y in range(0, yBinsNum)];
fyCosTheta = [0 for y in range(0, yBinsNum)];
peakPos = mathutils.Vector((0.0, 0.0));
positionSigma = mathutils.Vector((1/3.0, 1/3.0));
dx = 1.0/xBinsNum;
dy = 1.0/yBinsNum;
# medium + small elongated bumps
mediumBumpsNum = 200;
smallBumpsNum = 100;
for bumpIndex in range(1,mediumBumpsNum+smallBumpsNum):
# give some feedback regarding the progress
print('Generating gaussian blob #{} of {}'.format(bumpIndex, mediumBumpsNum+smallBumpsNum));
sys.stdout.flush()
# randomize Gaussian sigmas
bumpSigmaX = (0.16+random.random())*0.03;
if bumpIndex > mediumBumpsNum:
bumpSigmaX = (0.12+random.random())*0.03;
bumpSigmaY = (6.0 + 2*random.random())*bumpSigmaX;
# randomize Gaussian position around main radius
randomRadius = random.gauss(0.9, 0.35);
if (randomRadius < 0.0):
continue;
randomTheta = random.random()*2.0*math.pi + 2.0*math.pi;
randomXpos = randomRadius * math.cos(randomTheta);
randomYpos = randomRadius * math.sin(randomTheta);
xc = peakPos.x + randomXpos;
yc = peakPos.y + randomYpos;
# this choice of Gaussian orientation results in an elevation map resembling a stretched cloth
gaussianOrientaton = randomTheta - math.pi/2.0 + random.gauss(0, math.pi/60);
sinTheta = math.sin(gaussianOrientaton);
cosTheta = math.cos(gaussianOrientaton);
# precompute some stuff
for y in range(0, yBinsNum):
fy[y] = 2*(y-yBinsNum/2+0.5)/yBinsNum - yc;
fySinTheta[y] = fy[y] * sinTheta;
fyCosTheta[y] = fy[y] * cosTheta;
# blit the Gaussian
for x in range(0, xBinsNum):
fx[x] = 2*(x-xBinsNum/2+0.5)/xBinsNum - xc;
fxCosTheta = fx[x] * cosTheta;
fxSinTheta = fx[x] * sinTheta;
for y in range(0, yBinsNum):
xx = fxCosTheta - fySinTheta[y];
yy = fxSinTheta + fyCosTheta[y];
elevation[y][x] += math.exp(-0.5*(math.pow(xx/bumpSigmaX,2.0) + math.pow(yy/bumpSigmaY,2.0)));
# normalize elevation to 1.0
maxElevation = max(max(elevation));
minElevation = min(min(elevation));
maxElevation = max([maxElevation, -minElevation]);
rows = len(elevation);
cols = len(elevation[0]);
for y in range(0, rows):
for x in range(0, cols):
elevation[y][x] *= 1.0/maxElevation;
# return computed elevation map
return(elevation);
# Class for managing basscene components
class sceneManager:
# ---- Method to initialize the SceneManager object -----
def __init__(self, params):
if ('erasePreviousScene' in params) and (params['erasePreviousScene'] == True):
# Remove objects from previous scene
self.erasePreviousContents();
# Set the scene name
if 'name' in params:
bpy.context.scene.name = params['name'];
# Set the unit system to Metric and the unit scale to 1.0 cm
bpy.context.screen.scene.unit_settings.system = 'METRIC';
if 'sceneUnitScale' in params:
bpy.context.screen.scene.unit_settings.scale_length = params['sceneUnitScale'];
# set the grid spacing and the number of grid lines
if ('sceneGridSpacing' in params) and ('sceneGridLinesNum' in params):
self.setGrid(params['sceneGridSpacing'], params['sceneGridLinesNum']);
# set the size of the displayed grid lines
bpy.context.scene.tool_settings.normal_size = params['sceneGridLinesNum'];
# Set rendering params
# exposure boost
bpy.data.worlds[0].exposure = 1.0;
# contrast boost
bpy.data.worlds[0].color_range = 1;
# Set rendering resolution
bpy.context.scene.render.resolution_x = params['sceneWidthInPixels'];
bpy.context.scene.render.resolution_y = params['sceneHeightInPixels'];
# Set rendering quality (highest possible)
bpy.context.scene.render.resolution_percentage = 100;
bpy.context.scene.render.use_antialiasing = True
bpy.context.scene.render.use_full_sample = True
# Set image format
bpy.context.scene.render.image_settings.file_format = 'TIFF'
bpy.context.scene.render.image_settings.quality = 100;
bpy.context.scene.render.image_settings.color_mode = 'RGB';
# Set BLENDER as the rendering engine
bpy.context.scene.render.engine = 'BLENDER_RENDER';
# Set CYCLES as the rendering engine
#bpy.context.scene.render.engine = 'CYCLES';
bpy.context.scene.cycles.samples = 100;
bpy.context.scene.cycles.film_exposure = 5;
# Generate a transparent material (used to bypass collada issue with Blender area lights)
params = {'name' : 'transparent material',
'diffuse_shader' : 'LAMBERT',
'diffuse_intensity' : 1.0,
'diffuse_color' : mathutils.Vector((1.0, 1.0, 1.0)),
'specular_shader' : 'WARDISO',
'specular_intensity': 1.0,
'specular_color' : mathutils.Vector((1.0, 1.0, 1.0)),
'alpha' : 0.0,
};
self.transparentMaterial = self.generateMaterialType(params);
# ---- Method to erase a previous scene ----------------
def erasePreviousContents(self):
print('Erasing previous scene components')
self.unlinkAllObjects();
self.removeAllMeshes();
self.removeAllLamps();
self.removeAllCameras();
self.removeAllMaterials();
self.removeAllObjects();
# Method to remove a single oject from the current scene
def removeObjectFromScene(self, object):
# Remove the object from the scene
print('Removing object "{}", from old scene ("{}")'.format(object.name, bpy.context.scene.name));
bpy.data.objects.remove(object);
# Method to remove all objects from the current scene
def removeAllObjects(self):
for object in bpy.data.objects:
self.removeObjectFromScene(object);
def unlinkObjectFromScene(self, object):
# Check to see if the object is in the scene, and if it is, unlink it from the scene
if object.name in bpy.context.scene.objects:
bpy.context.scene.objects.unlink(object);
print('Unlinking object "{}", from old scene ("{}")'.format(object.name, bpy.context.scene.name));
# Method to unlink all objects from the current scene
def unlinkAllObjects(self):
for object in bpy.data.objects:
# we can unlink an object only when in OBJECT mode
if bpy.ops.object.mode_set.poll():
bpy.ops.object.mode_set(mode='OBJECT')
self.unlinkObjectFromScene(object);
# Method to remove all mesh data
def removeAllMeshes(self):
for mesh in bpy.data.meshes:
print('Clearing all users for mesh "{}"'.format(mesh.name));
mesh.user_clear();
print('Removing mesh "{}", from old scene ("{}")'.format(mesh.name, bpy.context.scene.name));
bpy.data.meshes.remove(mesh);
# Method to remove all lamp data
def removeAllLamps(self):
for lamp in bpy.data.lamps:
print('Clearing all users for lamp "{}"'.format(lamp.name));
lamp.user_clear();
print('Removing lamp "{}", from old scene ("{}")'.format(lamp.name, bpy.context.scene.name));
bpy.data.lamps.remove(lamp);
# Method to remove all camera data
def removeAllCameras(self):
for camera in bpy.data.cameras:
print('Clearing all users for camera "{}"'.format(camera.name));
camera.user_clear();
print('Removing camera "{}", from old scene ("{}")'.format(camera.name, bpy.context.scene.name));
bpy.data.cameras.remove(camera);
# Method to remove all material data
def removeAllMaterials(self):
for material in bpy.data.materials:
print('Clearing all users for material "{}"'.format(material.name));
material.user_clear();
print('Removing material "{}", from old scene ("{}")'.format(material.name, bpy.context.scene.name));
bpy.data.materials.remove(material);
# ---- Method to set the grid spacing and the number of grid lines ----
def setGrid(self, gridSpacing, gridLinesNum):
# Search all Blender windows to find the grid_space parameter
# print('Will search {} Blender windows'.format(len(bpy.data.screens)));
foundGridParam = False;
for screen in bpy.data.screens:
# print(' Screen {} has {} areas'.format(screen.name, len(screen.areas)));
# Loop through all areas to find one who's type is 'VIEW_3D'
# Such an area is a subclass of SpaceView3D, in which grid params are defined
for area in screen.areas:
# print(' Area is of type {}'.format(area.type));
if area.type == 'VIEW_3D':
# search through the spaces to find spaces with type VIEW_3D
# print(' Will search {} spaces in current area'.format(len(area.spaces)))
for space in area.spaces:
if space.type == 'VIEW_3D':
#print(' >> Setting grid scale')
foundGridParam = True;
space.grid_scale = gridSpacing;
space.grid_lines = gridLinesNum;
if not(foundGridParam):
print('Did not find any "VIEW_3D" space in which the grid is defined');
# Method to generate a camera type
def generateCameraType(self, params):
# generate a camera type
theCameraType = bpy.data.cameras.new('CAMERA');
# configure the camera type
theCameraType.type = 'PERSP' ; # perspective camera
theCameraType.angle_x = params['fieldOfViewInDegrees']/180*math.pi;
if 'widthToHeightAspectRatio' in params:
print('before camera sensor: {} x {}; image resolution: {} x {}; horiz FOV = {}'.format(theCameraType.sensor_width, theCameraType.sensor_height, bpy.data.scenes[0].render.resolution_x, bpy.data.scenes[0].render.resolution_y, theCameraType.angle_x));
aspectRatio = theCameraType.sensor_width / theCameraType.sensor_height;
theCameraType.sensor_height = theCameraType.sensor_width / params['widthToHeightAspectRatio'];
bpy.data.scenes[0].render.resolution_x = params['pixelSamplesAlongWidth'];
bpy.data.scenes[0].render.resolution_y = bpy.data.scenes[0].render.resolution_x / params['widthToHeightAspectRatio'];
print('after camera sensor: {} x {}; image resolution: {} x {}; horiz FOV = {}'.format(theCameraType.sensor_width, theCameraType.sensor_height, bpy.data.scenes[0].render.resolution_x, bpy.data.scenes[0].render.resolution_y, theCameraType.angle_x));
theCameraType.clip_start = params['clipRange'][0];
theCameraType.clip_end = params['clipRange'][1];
theCameraType.draw_size = params['drawSize']; # apparent size of Camera object in 3D View
theCameraType.show_limits = True; # draw clipping range and focus point
return(theCameraType);
# Method to add a camera object to the current scene
def addCameraObject(self,params):
# generate the container object
theCameraObject = bpy.data.objects.new(params['name'], params['cameraType']);
theCameraObject.show_name = params['showName'];
# position the camera object
theCameraObject.location = params['location'];
# rotate the camera object so that it looks at the desired position
pointObjectToTarget(theCameraObject, params['lookAt']);
# link the camera object to the current scene (if not linked, the camera is not functional)
bpy.context.screen.scene.objects.link(theCameraObject);
return(theCameraObject);
# Method to generate an area lamp type
def generateAreaLampType(self, params):
# generate a lamp type
theLampType = bpy.data.lamps.new(params['name'], 'AREA');
# configure the lamp type
theLampType.energy = 1;
theLampType.color = params['color'];
theLampType.use_specular = True;
theLampType.use_diffuse = True;
theLampType.distance = params['fallOffDistance']; # falloff distance (1/2 intensity) in cm
theLampType.shape = 'RECTANGLE';
theLampType.size = params['width1'];
theLampType.size_y = params['width2']
return(theLampType);
# Method to generate a directional lamp type
def generateDirectionalLampType(self, params):
# generate a lamp type
theLampType = bpy.data.lamps.new(params['name'], 'SUN');
# configure the lamp type
return(theLampType);
# Method to add a lamp object to the current scene
def addLampObject(self,params):
# generate a lamp object
theLampObject = bpy.data.objects.new(params['name'], params['model']);
theLampObject.show_name = params['showName'];
# position the lamp object
theLampObject.location = params['location'];
# rotate the lamp object so that it looks at the desired position
pointObjectToTarget(theLampObject, params['lookAt']);
# link the lamp object to the current scene (if not linked, the lamp is not functional)
bpy.context.screen.scene.objects.link(theLampObject);
# Check whether we are adding a Blender area lamp object ...
if params['model'].type == 'AREA':
# add a transparent planar Quad at the same xyz coords, which RT3 will transform into an area light
quadParams = {'name' : '{}-geomObject'.format(params['name']),
'scaling' : mathutils.Vector((params['model'].size, params['model'].size_y, 1)),
'rotation' : mathutils.Vector((0, 0, 0)),
'location' : params['location'],
'material' : self.transparentMaterial,
'flipNormal' : True,
};
quadOBJ = self.addPlanarQuad(quadParams);
# rotate the lamp object so that it looks at the desired position
pointObjectToTarget(quadOBJ, params['lookAt']);
# rename the underlying mesh so RT3 can access it
bpy.data.meshes[quadOBJ.data.name].name = '{}'.format(params['name']);
print('Area light mesh name for RT3: {}'.format(bpy.data.meshes[quadOBJ.data.name].name));
# Method to add a matte material
def generateMaterialType(self, params):
theMaterialType = bpy.data.materials.new(params['name']);
# Options for diffuse shaders: Minnaert, Fresnel, Toon, Oren-Nayar, Lambert
theMaterialType.diffuse_shader = params['diffuse_shader'];
theMaterialType.diffuse_intensity = params['diffuse_intensity'];
theMaterialType.diffuse_color = params['diffuse_color'];
# Options for specular shaders: CookTorr, Phong, Blinn, Toon, WardIso
theMaterialType.specular_shader = params['specular_shader'];
theMaterialType.specular_intensity = params['specular_intensity'];
theMaterialType.specular_color = params['specular_color'];
# Transparency options
theMaterialType.ambient = 1;
theMaterialType.alpha = params['alpha'];
theMaterialType.use_transparency = True;
theMaterialType.transparency_method = 'RAYTRACE';
#theMaterialType.raytrace_mirror.depth = 5;
#theMaterialType.raytrace_mirror.use = True;
return(theMaterialType);
# Method to add a cube at a specified location, rotation with specified scaling and material
def addCube(self, params):
bpy.ops.mesh.primitive_cube_add();
theCube = bpy.context.active_object;
theCube.name = params['name'];
theCube.rotation_euler = params['rotation'];
theCube.scale = params['scaling'];
theCube.location = params['location'];
# attach a material
theCube.data.materials.append(params['material']);
# return the generated object
return(theCube);
# Method to add a cylinder with a desired scale, rotation, and location
def addCylinder(self,params):
# Create cylinder
bpy.ops.mesh.primitive_cylinder_add(vertices=128, radius=1, depth=1, end_fill_type='NGON');
theCylinder = bpy.context.active_object;
theCylinder.name = params['name'];
theCylinder.rotation_euler = params['rotation'];
theCylinder.scale = params['scaling'];
theCylinder.location = params['location'];
# attach a material
theCylinder.data.materials.append(params['material']);
# return the generated object
return(theCylinder);
# Method to add a sphere with a desired scale, and location
def addSphere(self,params):
# Create sphere
if 'subdivisions' in params:
subdivisionsNum = params['subdivisions'];
else:
subdivisionsNum = 5;
bpy.ops.mesh.primitive_ico_sphere_add(subdivisions=subdivisionsNum, size=1);
theSphere = bpy.context.active_object;
theSphere.name = params['name'];
theSphere.scale = params['scaling'];
theSphere.location = params['location'];
# attach a material
theSphere.data.materials.append(params['material']);
return(theSphere);
def addPlanarQuad(self, params):
if 'vertices' in params:
# Generate the mesh for the plane.
vertices = params['vertices'];
else:
vertices = [(-0.5, -0.5, 0),(0.5, -0.5, 0),(0.5, 0.5, 0),(-0.5, 0.5, 0)];
# edges: pairs, each pais containing two indices to the vertices list
edges = [];
# faces: list of N-tuples (N >= 3) containing indices to the vertices list
if params['flipNormal'] == True:
faces = [(3,2,1,0)];
else:
faces = [(0,1,2,3)];
theMesh = bpy.data.meshes.new('{}-mesh'.format(params['name']));
theMesh.from_pydata(vertices, edges, faces);
theMesh.validate();
theMesh.update();
# generate encapsulating plane object
thePlanarQuad = bpy.data.objects.new(params['name'], theMesh);
thePlanarQuad.name = params['name'];
thePlanarQuad.scale = params['scaling'];
thePlanarQuad.rotation_euler = params['rotation'];
thePlanarQuad.location = params['location'];
# attach a material
if 'material' in params:
thePlanarQuad.data.materials.append(params['material']);
# link the plane to the scene
bpy.context.scene.objects.link(thePlanarQuad);
# the normal will be shown only in EditMode in the 3D View viewport
thePlanarQuad.data.show_normal_face = True;
return(thePlanarQuad);
# Method to add a room. Note: if you want to make openings in the room
# using the boreOut method, the room must have a specified 'wallThickness'.
def addRoom(self, roomParams):
roomLocation = roomParams['roomLocation'];
roomWidth = roomParams['roomWidth'];
roomDepth = roomParams['roomDepth'];
roomHeight = roomParams['roomHeight'];
# the floor plane
params = { 'name' : roomParams['floorName'],
'scaling' : mathutils.Vector((roomWidth, roomDepth, 0.1)),
'rotation' : mathutils.Vector((0, 0, 0)),
'location' : mathutils.Vector((roomLocation.x, roomLocation.y, roomLocation.z)),
'material' : roomParams['floorMaterialType'],
'flipNormal' : False,
};
if ('wallThickness' in roomParams):
params['scaling'].x = roomWidth/2;
params['scaling'].y = roomDepth/2;
params['scaling'].z = roomParams['wallThickness'];
if params['location'].z < 0:
params['location'].z += roomParams['wallThickness'];
else:
params['location'].z -= roomParams['wallThickness'];
floorPlane = self.addCube(params);
else:
floorPlane = self.addPlanarQuad(params);
# the backwall
params = { 'name' : roomParams['backWallName'],
'scaling' : mathutils.Vector((roomWidth, roomHeight, 0.1)),
'rotation' : mathutils.Vector((math.pi/2, 0, 0)),
'location' : mathutils.Vector((roomLocation.x, roomLocation.y+roomDepth/2, roomLocation.z+roomHeight/2)),
'material' : roomParams['backWallMaterialType'],
'flipNormal' : False,
};
if ('wallThickness' in roomParams):
params['scaling'].x = roomWidth/2;
params['scaling'].y = roomHeight/2;
params['scaling'].z = roomParams['wallThickness'];
if params['location'].y < 0:
params['location'].y += roomParams['wallThickness'];
else:
params['location'].y -= roomParams['wallThickness'];
backWallPlane = self.addCube(params);
else:
backWallPlane = self.addPlanarQuad(params);
# the left wall
params = { 'name' : roomParams['leftWallName'],
'scaling' : mathutils.Vector((roomHeight, roomDepth, 0.1)),
'rotation' : mathutils.Vector((0,math.pi/2,0)),
'location' : mathutils.Vector((roomLocation.x-roomWidth/2, roomLocation.y, roomLocation.z+roomHeight/2)),
'material' : roomParams['leftWallMaterialType'],
'flipNormal' : False,
};
if ('wallThickness' in roomParams):
params['scaling'].x = roomHeight/2;
params['scaling'].y = roomDepth/2;
params['scaling'].z = roomParams['wallThickness'];
if params['location'].x < 0:
params['location'].x += roomParams['wallThickness'];
else:
params['location'].x -= roomParams['wallThickness'];
leftWallPlane = self.addCube(params);
else:
leftWallPlane = self.addPlanarQuad(params);
# the right wall
params = { 'name' : roomParams['rightWallName'],
'scaling' : mathutils.Vector((roomHeight, roomDepth, 0.1)),
'rotation' : mathutils.Vector((0,-math.pi/2,0)),
'location' : mathutils.Vector((roomLocation.x+roomWidth/2, roomLocation.y, roomLocation.z+roomHeight/2)),
'material' : roomParams['rightWallMaterialType'],
'flipNormal' : False,
};
if ('wallThickness' in roomParams):
params['scaling'].x = roomHeight/2;
params['scaling'].y = roomDepth/2;
params['scaling'].z = roomParams['wallThickness'];
if params['location'].x < 0:
params['location'].x += roomParams['wallThickness'];
else:
params['location'].x -= roomParams['wallThickness'];
rightWallPlane = self.addCube(params);
else:
rightWallPlane = self.addPlanarQuad(params);
# the frontwall
params = { 'name' : roomParams['frontWallName'],
'scaling' : mathutils.Vector((roomWidth, roomHeight, 0.1)),
'rotation' : mathutils.Vector((-math.pi/2, 0, 0)),
'location' : mathutils.Vector((roomLocation.x, roomLocation.y-roomDepth/2, roomLocation.z+roomHeight/2)),
'material' : roomParams['frontWallMaterialType'],
'flipNormal' : False,
};
if ('wallThickness' in roomParams):
params['scaling'].x = roomWidth/2;
params['scaling'].y = roomHeight/2;
params['scaling'].z = roomParams['wallThickness'];
if params['location'].y < 0:
params['location'].y += roomParams['wallThickness'];
else:
params['location'].y -= roomParams['wallThickness'];
frontWallPlane = self.addCube(params);
else:
frontWallPlane = self.addPlanarQuad(params);
# the ceiling plane
params = { 'name' : roomParams['ceilingName'],
'scaling' : mathutils.Vector((roomWidth, roomDepth, 0.1)),
'rotation' : mathutils.Vector((math.pi, 0, 0)),
'location' : mathutils.Vector((roomLocation.x, roomLocation.y, roomLocation.z+roomHeight)),
'material' : roomParams['ceilingMaterialType'],
'flipNormal' : False,
};
if ('wallThickness' in roomParams):
params['scaling'].x = roomWidth/2;
params['scaling'].y = roomDepth/2;
params['scaling'].z = roomParams['wallThickness'];
if params['location'].z < 0:
params['location'].z += roomParams['wallThickness'];
else:
params['location'].z -= roomParams['wallThickness'];
ceilingPlane = self.addCube(params);
else:
ceilingPlane = self.addPlanarQuad(params);
# Generate a dictionary with the room's surface planes return the room surfaces
surfacesDict = {
'floorPlane' : floorPlane,
'backWallPlane' : backWallPlane,
'leftWallPlane' : leftWallPlane,
'rightWallPlane' : rightWallPlane,
'frontWallPlane' : frontWallPlane,
'ceilingPlane' : ceilingPlane,
}
# and return it
return(surfacesDict);
# Method to generate a mesh object from an elevation map
def addElevationMapObject(self, params):
# compute vertices
vertices = [];
numX = params['xBinsNum'];
numY = params['yBinsNum'];
elevation = params['elevationMap'];
for y in range (0, numY):
for x in range(0,numX):
xc = 2*(x-(numX-1.5)/2)/(numX-2);
yc = 2*(y-(numY-1.5)/2)/(numY-2);
vertices.append((xc, yc, elevation[y][x]));
# Fill faces array.
# Each item in the face array contains 4 indices that refer to items in the vertices array.
count = 0;
faces = [];
for i in range (0, numY*(numX-1)):
if count < numX-1:
A = i; # first vertex
B = i+1; # second vertex
C = (i+numX)+1; # third vertex
D = (i+numX); # fourth vertex
face = (A,B,C,D);
faces.append(face);
count = count + 1
else:
count = 0
#create mesh and object
theRandomSurfaceMesh = bpy.data.meshes.new('{}-mesh'.format(params['name']));
theRandomSurfaceObject = bpy.data.objects.new(params['name'], theRandomSurfaceMesh);
# set object location and scale
theRandomSurfaceObject.location = params['location'];
theRandomSurfaceObject.scale = params['scale'];
theRandomSurfaceObject.rotation_euler = params['rotation'];
#create mesh from python data
theRandomSurfaceMesh.from_pydata(vertices,[],faces)
theRandomSurfaceMesh.update()
# subdivide modifier
#theRandomSurfaceObject.modifiers.new("subd", type='SUBSURF')
#theRandomSurfaceObject.modifiers['subd'].levels = 3;
# smooth the mesh's polygons
for polygon in theRandomSurfaceMesh.polygons:
polygon.use_smooth = True
# attach a material
theRandomSurfaceObject.data.materials.append(params['material']);
# link it to current scene to make it visible
bpy.context.scene.objects.link(theRandomSurfaceObject);
# return the generated object
return(theRandomSurfaceObject);
# Method to subtract (boring out) one geometric object from another
def boreOut(self, targetObject, boringObject, hideBoringObject):
# Deselect all object
bpy.ops.object.select_all(action='DESELECT')
# make the target object active and select it
bpy.context.scene.objects.active = targetObject;
targetObject.select = True;
# add a modifier to the target object
objectModifiersNum = len(bpy.context.object.modifiers);
bpy.ops.object.modifier_add(type='BOOLEAN')
bpy.context.object.modifiers[objectModifiersNum].object = boringObject;
bpy.context.object.modifiers[objectModifiersNum].operation = 'DIFFERENCE';
# apply modifier
bpy.ops.object.modifier_apply(apply_as='DATA', modifier=bpy.context.object.modifiers[0].name);
if hideBoringObject:
# unlink the boring object from the scene so it is not visible
self.unlinkObjectFromScene(boringObject);
# Method to export a collada file for the current 3D scene
def exportToColladaFile(self, filePath):
# Get scene
currentScene = bpy.data.scenes[0];
fileName = '{}/{}.dae'.format(filePath, currentScene.name);
#The transrotloc option is necessary for RT3 to successfully parse the collada file
bpy.ops.wm.collada_export(filepath=fileName, export_transformation_type_selection='transrotloc');
| tlian7/RenderToolbox3 | Utilities/BlenderPython/SceneUtilsV1.py | Python | mit | 32,125 |
#!/usr/bin/env python
# -*-coding: utf-8 -*-
"""
__init__.py
~~~~~~~~~~~
Configurations for iiify
:copyright: (c) 2015 by AUTHORS
:license: see LICENSE for more details
"""
import os
import types
import configparser
path = os.path.dirname(os.path.realpath(__file__))
approot = os.path.abspath(os.path.join(path, os.pardir))
def getdef(self, section, option, default_value):
try:
return self.get(section, option)
except:
return default_value
config = configparser.ConfigParser()
config.read('%s/settings.cfg' % path)
config.getdef = types.MethodType(getdef, config)
HOST = config.getdef('server', 'host', '0.0.0.0')
PORT = int(config.getdef('server', 'port', 8080))
DEBUG = bool(int(config.getdef('server', 'debug', 1)))
CRT = config.getdef('ssl', 'crt', '')
KEY = config.getdef('ssl', 'key', '')
options = {'debug': DEBUG, 'host': HOST, 'port': PORT}
if CRT and KEY:
options['ssl_context'] = (CRT, KEY)
# Enable CORS to allow cross-domain loading of tilesets from this server
# Especially useful for SeaDragon viewers running locally
cors = bool(int(config.getdef('server', 'cors', 0)))
media_root = config.getdef('media', 'root', 'media')
if not os.path.isabs(media_root):
media = os.path.join(approot, media_root)
if not os.path.exists(media_root):
os.makedirs(media_root)
cache_root = config.getdef('cache', 'root', 'cache')
if not os.path.isabs(cache_root):
cache = os.path.join(approot, cache_root)
if not os.path.exists(cache_root):
os.makedirs(cache_root)
cache_expr = config.getdef('cache', 'expr', 100)
| mekarpeles/iiify | niiifty/configs/__init__.py | Python | gpl-3.0 | 1,589 |
###################################################################################################\
# Diffcalc utilities
###################################################################################################
###################################################################################################\
# Installing
###################################################################################################
#1- Download from: https://github.com/DiamondLightSource/diffcalc/archive/v2.1.zip
#2- Extract the contents to {script}/Lib/diffcalc
#3- Download http://central.maven.org/maven2/gov/nist/math/jama/1.0.3/jama-1.0.3.jar
# to the extensions folder.
#4- On {script}/Lib/diffcalc/diffcalc/gdasupport/you.py, the line " wl.asynchronousMoveTo(1)"
# must be commented for the energy not to move when the library is loaded.
###################################################################################################\
# Library loading and Hardware setup
###################################################################################################
#1- Create a MotorGroup with the diffractometer motors
# e.g. 'sixc', containing mu, delta, gam, eta, chi, phi motors (gam = nu)
# or 'fivec', containing delta, gam, eta, chi, phi motors
# or 'fourc', containing delta, eta, chi, phi motors
#2- Create positioner to read/set the energy in kEv, e.g. named 'en'
#3- Execute: run("diffutils")
#4- Execute: setup_diff(sixc, en)
###################################################################################################\
# API
###################################################################################################
# Orientation commands defined in https://github.com/DiamondLightSource/diffcalc#id19 are
# defined heren with identical signatures, and so the constraint commands.
# Motion command names were changed because thge original can collide with other globals:
# hklci, hklca, hklwh, hklget, hklmv and hklsim(hkl).
from __future__ import absolute_import
import traceback
import os
import Jama.Matrix
diffcalc_path = os.path.abspath(expand_path("{script}/Lib/diffcalc"))
if not diffcalc_path in sys.path:
sys.path.append(diffcalc_path)
import diffcalc
import math
from diffcalc import settings
from diffcalc.hkl.you.geometry import YouGeometry,SixCircle, FiveCircle, FourCircle, YouPosition
from diffcalc.hardware import HardwareAdapter
from diffcalc.ub.persistence import UBCalculationJSONPersister, UbCalculationNonPersister
from diffcalc.gdasupport.minigda.scannable import ScannableBase, ScannableGroup
#from diffcalc.gdasupport.minigda import command
import diffcalc.hkl.you.calc as you_calc
import ch.psi.pshell.device.PositionerConfig as PositionerConfig
import ch.psi.pshell.device.RegisterConfig as RegisterConfig
import ch.psi.pshell.device.Register as Register
_difcalc_names = {}
#
# Disable error handling designed for interactive use
#diffcalc.util.DEBUG = True
# Disable console bold charcters
diffcalc.util.COLOURISE_TERMINAL_OUTPUT = False
###################################################################################################
# Device mapping to difcalc
###################################################################################################
class PositionerScannable(ScannableBase):
def __init__(self, positioner, name = None):
self.positioner = positioner
self.name = positioner.name if name is None else name
self.inputNames = [self.name]
self.outputFormat = ['% 6.4f']
self.level = 3
def isBusy(self):
return self.positioner.state == State.Busy
def waitWhileBusy(self):
self.positioner.waitReady(-1)
def asynchronousMoveTo(self, new_position):
#print "Moving " , self.name, " to: ", new_position
self.positioner.moveAsync(float(new_position), -1)
def getPosition(self):
return self.positioner.getPosition()
def _get_diffcalc_axis_names():
nu_name=diffcalc.hkl.you.constraints.NUNAME
return ("mu", "delta", nu_name, "eta", "chi", "phi")
class PositionerScannableGroup(ScannableGroup):
def __init__(self, name, motors, diffcalc_axis_names=None):
self.name = name
global _difcalc_names
_difcalc_names = {}
positioners = []
if diffcalc_axis_names is None:
if len(motors) == 6: diffcalc_axis_names = _get_diffcalc_axis_names()
elif len(motors) == 5: diffcalc_axis_names = ("delta", "gam", "eta", "chi", " phi")
elif len(motors) == 4: diffcalc_axis_names = ("delta", "eta", "chi", " phi")
self.diffcalc_axis_names = diffcalc_axis_names
for i in range(len(motors)):
_difcalc_names[motors[i]] = diffcalc_axis_names[i]
exec('self.' + diffcalc_axis_names[i] + ' = PositionerScannable(' + motors[i].name + ', "' +diffcalc_axis_names[i] + '")')
exec('positioners.append(self.' + diffcalc_axis_names[i] + ')' )
#for m in motors:
# exec('self.' + m.name + ' = PositionerScannable(' + m.name + ', "' + m.name + '")')
# exec('positioners.append(self.' + m.name + ')' )
ScannableGroup.__init__(self, self.name, positioners)
class MotorGroupScannable(PositionerScannableGroup):
def __init__(self, motor_group, diffcalc_axis_names=None, simultaneous_move=False):
self.simultaneous_move = simultaneous_move
self.motor_group = motor_group
PositionerScannableGroup.__init__(self, motor_group.name, motor_group.motors, diffcalc_axis_names)
self.motor_group.restoreSpeedAfterMove = self.simultaneous_move
#Make sync moves (default implementation trigger each motor individually)
def asynchronousMoveTo(self, position):
if self.simultaneous_move:
position = [(float('nan') if v is None else v) for v in position]
self.motor_group.write(position)
else:
PositionerScannableGroup.asynchronousMoveTo(self, position)
class ScannableAdapter(HardwareAdapter):
def __init__(self, diffractometer, energy, energy_multiplier_to_kev=1):
self.diffractometer = diffractometer
self.energy = energy
self.energy_multiplier_to_kev = energy_multiplier_to_kev
input_names = diffractometer.getInputNames()
HardwareAdapter.__init__(self, input_names)
#Returns the current physical POSITIONS
def get_position(self):
"""
pos = getDiffractometerPosition() -- returns the current physical
diffractometer position as a list in degrees
"""
return self.diffractometer.getPosition()
#returns energy in kEv
def get_energy(self):
"""energy = get_energy() -- returns energy in kEv (NOT eV!) """
multiplier = self.energy_multiplier_to_kev
energy = self.energy.getPosition() * multiplier
if energy is None:
raise DiffcalcException("Energy has not been set")
return energy
def get_motor(self,name):
global _motor_group
global _difcalc_names
for m in _difcalc_names.keys():
if _difcalc_names[m] == name:
return m
for m in _motor_group.motors:
if m.name == name:
return m
raise Exception("Invalid axis name: " + str(name))
def get_lower_limit(self, name):
'''returns lower limits by axis name. Limit may be None if not set
'''
m = self.get_motor(name)
ret = m.getMinValue()
if ret == float("NaN"): ret = None
return ret
def get_upper_limit(self, name):
'''returns upper limit by axis name. Limit may be None if not set
'''
m = self.get_motor(name)
ret = m.getMaxValue()
if ret == float("NaN"): ret = None
return ret
def set_lower_limit(self, name, value):
"""value may be None to remove limit"""
if value is None: value = float("NaN")
m = self.get_motor(name)
m.config.minValue =value
def set_upper_limit(self, name, value):
"""value may be None to remove limit"""
if value is None: value = float("NaN")
m = self.get_motor(name)
m.config.maxValue =value
def is_axis_value_within_limits(self, axis_name, value):
m = self.get_motor(axis_name)
upper = self.get_upper_limit(axis_name)
lower = self.get_lower_limit(axis_name)
if (upper is None) or (math.isnan(upper)): upper = sys.float_info.max
if (lower is None) or (math.isnan(lower)): lower = -sys.float_info.max
return lower <= value <= upper
@property
def name(self):
return self.diffractometer.getName()
class MotorGroupAdapter(ScannableAdapter):
def __init__(self, diffractometer, energy, energy_multiplier_to_kev=1, diffcalc_axis_names=None, simultaneous_move=False):
self.diffractometer = MotorGroupScannable(diffractometer, diffcalc_axis_names, simultaneous_move)
self.energy = PositionerScannable(energy)
self.energy.level = 3
ScannableAdapter.__init__(self, self.diffractometer, self.energy, energy_multiplier_to_kev)
class Wavelength(RegisterBase):
def doRead(self):
try:
return get_wavelength().getPosition()
except:
return None
def doWrite(self, val):
get_wavelength().asynchronousMoveTo(val)
###################################################################################################
# HKL Pseudo-devices
###################################################################################################
class HklPositoner (PositionerBase):
def __init__(self, name, index, hkl_group):
PositionerBase.__init__(self, name, PositionerConfig())
self.setParent(hkl_group)
self.index = index
def isReady(self):
return PositionerBase.isReady(self) and self.getParent().isReady()
def doRead(self):
return self.getParent()._setpoint[self.index]
def doWrite(self, value):
#print "Setting " , self.getName(), "to: ", value
pos = [None, None, None]
pos[self.index] = value
self.getParent().write(pos)
def doReadReadback(self):
if java.lang.Thread.currentThread() != self.getParent()._updating_thread:
self.getParent().update()
return self.getParent()._readback[self.index]
class HklGroup(RegisterBase, Register.RegisterArray):
def __init__(self, name):
RegisterBase.__init__(self, name, RegisterConfig())
self.hkl=get_hkl()
self.h, self.k, self.l = HklPositoner("h", 0, self), HklPositoner("k", 1, self), HklPositoner("l", 2, self)
add_device(self.h, True)
add_device(self.k, True)
add_device(self.l, True)
self._setpoint = self.doRead()
self._updating = False
def getSize(self):
return 3
def doRead(self):
try:
self._readback = self.hkl.getPosition()
self._updating_thread = java.lang.Thread.currentThread()
self.h.update()
self.k.update()
self.l.update()
except:
#traceback.print_exc()
self._readback = (None, None, None)
finally:
self._updating_thread = None
return self._readback
def doWrite(self, pos):
self._setpoint = None if (pos is None) else [(None if v is None else float(v)) for v in pos]
#print "Moving to: " + str(pos)
self.hkl.asynchronousMoveTo(pos)
def sim(self, pos):
return self.hkl.simulateMoveTo(pos)
###################################################################################################
# System setup
###################################################################################################
you = None
dc, ub, hardware, hkl = None, None, None, None
_motor_group = None
def setup_diff(diffractometer= None, energy= None, diffcalc_axis_names = None, geometry=None, persist_ub=True, simultaneous_move=False):
"""
configure diffractometer. Display configuration if no parameter is given
diffractometer: Diffraction motor group
energy: Positioner having energy in kev
geometry: YouGeometry extension. If none, uses default
diffcalc_axis_names: if None use defaults:
- mu, delta, gam, eta, chi, phi (six circle)
- delta, gam, eta, chi, phi (ficve circle)
- delta, eta, chi, phi (four circle)
"""
global you, dc, ub, hardware, hkl, _motor_group
if diffractometer is not None:
_motor_group = diffractometer
you = None
if geometry is not None:
settings.geometry = geometry
elif diffcalc_axis_names is not None:
class CustomGeometry(YouGeometry):
def __init__(self):
self.all_axis_names = _get_diffcalc_axis_names()
self.my_axis_names = diffcalc_axis_names
fixed_constraints = {}
for axis in self.all_axis_names:
if not axis in self.my_axis_names:
fixed_constraints[axis] = 0
YouGeometry.__init__(self, diffractometer.name, fixed_constraints)
def physical_angles_to_internal_position(self, physical_angle_tuple):
pos=[]
index = 0
for axis in self.all_axis_names:
pos.append(physical_angle_tuple[index] if (axis in self.my_axis_names) else 0)
index = index+1
pos.append("DEG")#units
return YouPosition(*pos)
def internal_position_to_physical_angles(self, internal_position):
pos = internal_position.clone()
pos.changeToDegrees()
pos = pos.totuple()
ret = []
for i in range (len(self.all_axis_names)):
if self.all_axis_names[i] in self.my_axis_names:
ret.append(pos[i])
return tuple(ret)
settings.geometry = CustomGeometry()
elif len(diffractometer.motors) == 6:
settings.geometry = SixCircle()
elif len(diffractometer.motors) == 5:
settings.geometry = FiveCircle()
elif len(diffractometer.motors) == 4:
settings.geometry = FourCircle()
else:
raise Exception("Invalid motor group")
settings.hardware = MotorGroupAdapter(diffractometer, energy, 1, diffcalc_axis_names, simultaneous_move)
if persist_ub:
settings.persistence_path = os.path.abspath(expand_path("{config}/diffcalc"))
if not os.path.exists(settings.persistence_path):
os.makedirs(settings.persistence_path)
print "UB calculations persistence path: " + settings.persistence_path
settings.ubcalc_persister = UBCalculationJSONPersister(settings.persistence_path)
else:
print "UB calculations are not persisteds"
settings.ubcalc_persister = UbCalculationNonPersister()
settings.axes_scannable_group = settings.hardware.diffractometer
settings.energy_scannable = settings.hardware.energy
settings.ubcalc_strategy = you_calc.YouUbCalcStrategy()
settings.angles_to_hkl_function = you_calc.youAnglesToHkl
from diffcalc.gdasupport import you
reload(you)
# These must be imported AFTER the settings have been configured
from diffcalc.dc import dcyou as dc
from diffcalc.ub import ub
from diffcalc import hardware
from diffcalc.hkl.you import hkl
add_device(HklGroup("hkl_group"), True)
add_device(Wavelength("wavelength", 6), True)
hkl_group.polling = 250
wavelength.polling = 250
if settings.hardware is not None:
print "Diffractometer defined with:"
print " \t" + "Motor group: " + str(settings.hardware.diffractometer.name)
print " \t" + "Energy: " + str(settings.hardware.energy.name)
print "\nDiffcalc axis names:"
for m in _difcalc_names.keys():
print " \t Motor " + m.name + " = Axis " + _difcalc_names[m]
else:
print "Diffractometer is not defined\n"
print
def setup_axis(motor = None, min=None, max=None, cut=None):
"""
configure axis range and cut.
displays ranges if motor is None
"""
if motor is not None:
name = get_axis_name(motor)
if min is not None: hardware.setmin(name, min)
if max is not None: hardware.setmax(name, max)
if cut is not None: hardware.setcut(name, cut)
else:
print "Axis range configuration:"
hardware.hardware()
print
###################################################################################################
# Acceess functions
###################################################################################################
def get_diff():
return settings.hardware.diffractometer
def get_energy():
return settings.hardware.energy
def get_adapter():
return settings.hardware
def get_motor_group():
return _motor_group
def get_wavelength():
return you.wl
def get_hkl():
return you.hkl
def get_axis_name(motor):
if is_string(motor):
motor = get_adapter().get_motor(motor)
return _difcalc_names[motor]
###################################################################################################
# Orientation Commands
###################################################################################################
# State
def newub(name):
"""
start a new ub calculation name
"""
try:
rmub(name)
except:
pass
try:
return ub.newub(name)
finally:
save_exp_context()
def loadub(name_or_num):
"""
load an existing ub calculation
"""
try:
return ub.loadub(name_or_num)
finally:
save_exp_context()
def lastub():
"""
load the last used ub calculation
"""
try:
return ub.lastub()
finally:
save_exp_context()
def listub():
"""
list the ub calculations available to load
"""
return ub.listub()
def rmub(name_or_num):
"""
remove existing ub calculation
"""
return ub.rmub(name_or_num)
def saveubas(name):
"""
save the ub calculation with a new name
"""
try:
return ub.saveubas(name)
finally:
save_exp_context()
# Lattice
def setlat(name=None, *args):
"""
set lattice parameters (Angstroms and Deg)
setlat -- interactively enter lattice parameters (Angstroms and Deg)
setlat name a -- assumes cubic
setlat name a b -- assumes tetragonal
setlat name a b c -- assumes ortho
setlat name a b c gamma -- assumes mon/hex with gam not equal to 90
setlat name a b c alpha beta gamma -- arbitrary
"""
return ub.setlat(name, *args)
def c2th(hkl, en=None):
"""
calculate two-theta angle for reflection
"""
return ub.c2th(hkl, en)
def hklangle(hkl1, hkl2):
"""
calculate angle between [h1 k1 l1] and [h2 k2 l2] crystal planes
"""
return ub.hklangle(hkl1, hkl2)
# Reference (surface)
def setnphi(xyz = None):
"""
sets or displays (xyz=None) n_phi reference
"""
return ub.setnphi(xyz)
def setnhkl(hkl = None):
"""
sets or displays (hkl=None) n_hkl reference
"""
return ub.setnhkl(hkl)
# Reflections
def showref():
"""
shows full reflection list
"""
return ub.showref()
def addref(*args):
"""
Add reflection
addref -- add reflection interactively
addref [h k l] {'tag'} -- add reflection with current position and energy
addref [h k l] (p1, .., pN) energy {'tag'} -- add arbitrary reflection
"""
return ub.addref(*args)
def editref(idx):
"""
interactively edit a reflection (idx is tag or index numbered from 1)
"""
return ub.editref(idx)
def delref(idx):
"""
deletes a reflection (idx is tag or index numbered from 1)
"""
return ub.delref(idx)
def clearref():
"""
deletes all the reflections
"""
return ub.clearref()
def swapref(idx1=None, idx2=None):
"""
swaps two reflections
swapref -- swaps first two reflections used for calculating U matrix
swapref {num1 | 'tag1'} {num2 | 'tag2'} -- swaps two reflections
"""
return ub.swapref(idx1, idx2)
# Crystal Orientations
def showorient():
"""
shows full list of crystal orientations
"""
#TODO: Workaround of bug on Diffcalc (str_lines needs parameter)
if ub.ubcalc._state.orientlist:
print '\n'.join(ub.ubcalc._state.orientlist.str_lines(None))
return
return ub.showorient()
def addorient(*args):
"""
addorient -- add crystal orientation interactively
addorient [h k l] [x y z] {'tag'} -- add crystal orientation in laboratory frame
"""
return ub.addorient(*args)
def editorient(idx):
"""
interactively edit a crystal orientation (idx is tag or index numbered from 1)
"""
return ub.editorient(tag_or_num)
def delorient(idx):
"""
deletes a crystal orientation (idx is tag or index numbered from 1)
"""
return ub.delorient(tag_or_num)
def clearorient():
"""
deletes all the crystal orientations
"""
return ub.clearorient()
def swaporient(idx1=None, idx2=None):
"""
swaps two swaporient
swaporient -- swaps first two crystal orientations used for calculating U matrix
swaporient {num1 | 'tag1'} {num2 | 'tag2'} -- swaps two crystal orientations
"""
return ub.swaporient(idx1, idx2)
# UB Matrix
def showub():
"""
show the complete state of the ub calculation
NOT A DIFFCALC COMMAND
"""
return ub.ub()
def checkub():
"""
show calculated and entered hkl values for reflections
"""
return ub.checkub()
def setu(U=None):
"""
manually set U matrix
setu -- set U matrix interactively
setu [[..][..][..]] -- manually set U matrix
"""
return ub.setu(U)
def setub(UB=None):
"""
manually set UB matrix
setub -- set UB matrix interactively
setub [[..][..][..]] -- manually set UB matrix
"""
return ub.setub(UB)
def getub():
"""
returns current UB matrix
NOT A DIFFCALC COMMAND
"""
return None if ub.ubcalc._UB is None else ub.ubcalc._UB.tolist()
def calcub(idx1=None, idx2=None):
"""
(re)calculate u matrix
calcub -- (re)calculate U matrix from the first two reflections and/or orientations.
calcub idx1 idx2 -- (re)calculate U matrix from reflections and/or orientations referred by indices and/or tags idx1 and idx2.
"""
return ub.calcub(idx1, idx2)
def trialub(idx=1):
"""
(re)calculate u matrix using one reflection only
Use indice or tags idx1. Default: use first reflection.
"""
return ub.trialub(idx)
def refineub(*args):
"""
refine unit cell dimensions and U matrix to match diffractometer angles for a given hkl value
refineub -- interactively
refineub [h k l] {pos}
"""
return ub.refineub(*args)
def fitub(*args):
"""
fitub ref1, ref2, ref3... -- fit UB matrix to match list of provided reference reflections.
"""
return ub.fitub(*args)
def addmiscut(angle, xyz=None):
"""
apply miscut to U matrix using a specified miscut angle in degrees and a rotation axis (default: [0 1 0])
"""
return ub.addmiscut(angle, xyz)
def setmiscut(angle, xyz=None):
"""
manually set U matrix using a specified miscut angle in degrees and a rotation axis (default: [0 1 0])
"""
return ub.setmiscut(angle, xyz)
###################################################################################################
# Motion Commands
###################################################################################################
#Constraints
def con(*args):
"""
list or set available constraints and values
con -- list available constraints and values
con <name> {val} -- constrains and optionally sets one constraint
con <name> {val} <name> {val} <name> {val} -- clears and then fully constrains
"""
try:
ret = hkl.con(*args)
finally:
save_exp_context()
return ret
def uncon(name):
"""
remove constraint
"""
try:
ret = hkl.uncon(name)
finally:
save_exp_context()
return ret
# HKL
def allhkl(_hkl, wavelength=None):
"""
print all hkl solutions ignoring limits
"""
return hkl.allhkl(_hkl, wavelength)
#Hardware
def setmin(axis, val=None):
"""
set lower limits used by auto sector code (nan to clear)
"""
name = get_axis_name(axis)
try:
hardware.setmin(name, val)
finally:
save_exp_context()
def setmax(axis, val=None):
"""
set upper limits used by auto sector code (nan to clear)
"""
name = get_axis_name(axis)
try:
return hardware.setmax(name, val)
finally:
save_exp_context()
def setcut(axis, val):
"""
sets cut angle
"""
name = get_axis_name(axis)
try:
return hardware.setcut(name, val)
finally:
save_exp_context()
###################################################################################################
# Motion commands: not standard Diffcalc names
###################################################################################################
def hklci(positions, energy=None):
"""
converts positions of motors to reciprocal space coordinates (H K L)
"""
return dc.angles_to_hkl(positions, energy)
def hklca(hkl, energy=None):
"""
converts reciprocal space coordinates (H K L) to positions of motors.
"""
return dc.hkl_to_angles(hkl[0], hkl[1], hkl[2], energy)
def hklwh():
"""
prints the current reciprocal space coordinates (H K L) and positions of motors.
"""
hkl = hklget()
print "HKL: " + str(hkl)
for m in _difcalc_names.keys():
print _difcalc_names[m] + " [" + m.name + "] :" + str(m.take())
def hklget():
"""
get current hkl position
"""
return hkl_group.read()
def hklmv(hkl):
"""
move to hkl position
"""
hkl_group.write(hkl)
def hklsim(hkl):
"""
simulates moving diffractometer
"""
return hkl_group.sim(hkl)
###################################################################################################
# HKL Combined Scan
###################################################################################################
def hklscan(vector, readables,latency = 0.0, passes = 1, **pars):
"""
HKL Scan:
Args:
vector(list of lists): HKL values to be scanned
readables(list of Readable): Sensors to be sampled on each step.
latency(float, optional): settling time for each step before readout, defaults to 0.0.
passes(int, optional): number of passes
pars(keyworded variable length arguments, optional): scan optional named arguments:
- title(str, optional): plotting window name.
- hidden(bool, optional): if true generates no effects on user interface.
- before_read (function, optional): callback on each step, before sampling. Arguments: positions, scan
- after_read (function, optional): callback on each step, after sampling. Arguments: record, scan.
- before_pass (function, optional): callback before each scan pass execution. Arguments: pass_num, scan.
- after_pass (function, optional): callback after each scan pass execution. Arguments: pass_num, scan.
- Aditional arguments defined by set_exec_pars.
Returns:
ScanResult object.
"""
readables=to_list(string_to_obj(readables))
pars["initial_move"] = False
scan = ManualScan([h,k,l], readables ,vector[0], vector[-1], [len(vector)-1] * 3, dimensions = 1)
if not "domain_axis" in pars.keys():
pars["domain_axis"] = "Index"
processScanPars(scan, pars)
scan.start()
try:
for pos in vector:
#print "Writing ", pos
hkl_group.write(pos)
time.sleep(0.1) #Make sure is busy
get_motor_group().update()
get_motor_group().waitReady(-1)
time.sleep(latency)
hkl_group.update()
if scan.before_read: scan.before_read(pos,scan)
scan.append ([h.take(), k.take(), l.take()], [h.getPosition(), k.getPosition(), l.getPosition()], [readable.read() for readable in readables ])
if scan.after_read: scan.after_read(scan.currentRecord,scan)
finally:
scan.end()
return scan.result
def get_constraints():
constraints={}
from diffcalc.hkl.you.constraints import valueless_constraints
all_constraints=hkl.hklcalc.constraints.all
for name in all_constraints:
if not hkl.hklcalc.constraints.is_constraint_fixed(name):
value = hkl.hklcalc.constraints.get_constraint(name)
if name in valueless_constraints:
constraints[name] = None
elif value is not None:
constraints[name] = value
return constraints
def set_constraints(constraints):
for name in constraints.keys():
try:
value = constraints[name]
if value is None:
con(name)
else:
con(name, value)
except:
print sys.exc_info()[1]
def get_limits():
limits={}
for name in settings.hardware.get_axes_names():
axis = {}
axis["lower_limit"] = settings.hardware.get_lower_limit(name)
axis["upper_limit"] = settings.hardware.get_upper_limit(name)
axis["cut"] = settings.hardware.get_cuts()[name]
limits[name]=axis
return limits
def set_limits(limits):
for name in limits.keys():
try:
axis = limits[name]
if axis.get("lower_limit") is not None: setmin (name, axis["lower_limit"])
if axis.get("upper_limit") is not None: setmax (name, axis["upper_limit"])
if axis.get("cut") is not None: setcut (name, axis["cut"])
except:
print sys.exc_info()[1]
def get_exp_context():
context = {}
try:
context["limits"] = get_limits()
except:
context["limits"] = None
try:
context["constraints"] = get_constraints()
except:
context["constraints"] = None
try:
context["ub"] = ub.ubcalc._state.name
except:
context["ub"] = None
return context
def set_exp_context(context):
try:
if context.get("limits") is not None:
set_limits(context["limits"])
except:
print sys.exc_info()[1]
try:
if context.get("constraints") is not None:
set_constraints(context["constraints"])
except:
print sys.exc_info()[1]
try:
if context.get("ub") is not None:
loadub(str(context["ub"]))
except:
print sys.exc_info()[1]
EXPERIMENT_CONTEXT_FILE = expand_path("{context}/diff_exp_context.json")
def save_exp_context():
"""
Saves experiment context (constraints, ub and hw limits)
"""
try:
c = get_exp_context()
with open(EXPERIMENT_CONTEXT_FILE, 'w') as json_file:
json.dump(c, json_file)
except:
print "Cannot save experiment context: ", sys.exc_info()[1]
def load_exp_context():
"""
Loads experiment context (constraints, ub and hw limits)
"""
try:
with open(EXPERIMENT_CONTEXT_FILE) as json_file:
c = json.load(json_file)
set_exp_context(c)
except:
print "Cannot load experiment context: ", sys.exc_info()[1]
###################################################################################################
# Experiment context
###################################################################################################
def test_diffcalc():
print "Start test"
energy.move(20.0)
delta.config.maxSpeed = 50.0
delta.speed = 50.0
delta.move(1.0)
#Setup
setup_diff(sixc, energy)
setup_axis('gam', 0, 179)
setup_axis('delta', 0, 179)
setup_axis('delta', min=0)
setup_axis('phi', cut=-180.0)
setup_axis()
#Orientation
listub()
# Create a new ub calculation and set lattice parameters
newub('test')
setlat('cubic', 1, 1, 1, 90, 90, 90)
# Add 1st reflection (demonstrating the hardware adapter)
settings.hardware.wavelength = 1
c2th([1, 0, 0]) # energy from hardware
settings.hardware.position = 0, 60, 0, 30, 0, 0
addref([1, 0, 0])# energy and position from hardware
# Add 2nd reflection (this time without the harware adapter)
c2th([0, 1, 0], 12.39842)
addref([0, 1, 0], [0, 60, 0, 30, 0, 90], 12.39842)
# check the state
showub()
checkub()
#Constraints
con('qaz', 90)
con('a_eq_b')
con('mu', 0)
con()
#Motion
print hklci((0., 60., 0., 30., 0., 0.))
print hklca((1, 0, 0))
sixc.write([0, 60, 0, 30, 90, 0])
print "sixc=" , sixc.position
wavelength.write(1.0)
print "wavelength = ", wavelength.read()
lastub()
setu ([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
showref()
swapref(1,2)
hklwh()
hklsim([0.0,1.0,1.0])
hklmv([0.0,1.0,1.0])
#Scans
lscan(l, [sin], 1.0, 1.5, 0.1)
ascan([k,l], [sin], [1.0, 1.0], [1.2, 1.3], [0.1, 0.1], zigzag=True, parallel_positioning = False)
vector = [[1.0,1.0,1.0], [1.0,1.0,1.1], [1.0,1.0,1.2], [1.0,1.0,1.4]]
hklscan(vector, [sin, arr], 0.9) | paulscherrerinstitute/pshell | src/main/assembly/script/Lib/diffutils.py | Python | gpl-3.0 | 34,642 |
try:
import cPickle as pickle
except ImportError:
import pickle
from django.conf import settings
from django.forms import BooleanField
from django.utils.crypto import salted_hmac
from django.utils.hashcompat import md5_constructor
def security_hash(request, form, *args):
"""
Calculates a security hash for the given Form instance.
This creates a list of the form field names/values in a deterministic
order, pickles the result with the SECRET_KEY setting, then takes an md5
hash of that.
"""
import warnings
warnings.warn("security_hash is deprecated; use form_hmac instead",
PendingDeprecationWarning)
data = []
for bf in form:
# Get the value from the form data. If the form allows empty or hasn't
# changed then don't call clean() to avoid trigger validation errors.
if form.empty_permitted and not form.has_changed():
value = bf.data or ''
else:
value = bf.field.clean(bf.data) or ''
if isinstance(value, basestring):
value = value.strip()
data.append((bf.name, value))
data.extend(args)
data.append(settings.SECRET_KEY)
# Use HIGHEST_PROTOCOL because it's the most efficient. It requires
# Python 2.3, but Django requires 2.4 anyway, so that's OK.
pickled = pickle.dumps(data, pickle.HIGHEST_PROTOCOL)
return md5_constructor(pickled).hexdigest()
def form_hmac(form):
"""
Calculates a security hash for the given Form instance.
"""
data = []
for bf in form:
# Get the value from the form data. If the form allows empty or hasn't
# changed then don't call clean() to avoid trigger validation errors.
if form.empty_permitted and not form.has_changed():
value = bf.data or ''
else:
value = bf.field.clean(bf.data) or ''
if isinstance(value, basestring):
value = value.strip()
data.append((bf.name, value))
pickled = pickle.dumps(data, pickle.HIGHEST_PROTOCOL)
key_salt = 'django.contrib.formtools'
return salted_hmac(key_salt, pickled).hexdigest()
| ychen820/microblog | y/google-cloud-sdk/platform/google_appengine/lib/django-1.3/django/contrib/formtools/utils.py | Python | bsd-3-clause | 2,158 |
#!/usr/bin/env python
import os
import re
from collections import OrderedDict
def read_sample_name(line_iter, clean_fn, program_name):
"""
Consumes lines from the provided line_iter and parses those lines
as a header for the picard base distribution file. The header
file is assumed to contain a line with both 'INPUT' and
'BaseDistributionByCycle'.
If the header parses correctly, the sample name is returned. If
the header does not parse correctly, None is returned.
"""
try:
while True:
new_line = next(line_iter)
new_line = new_line.strip()
if program_name in new_line and "INPUT" in new_line:
# Pull sample name from input
fn_search = re.search(r"INPUT=?\s*(\[?[^\s]+\]?)", new_line, flags=re.IGNORECASE)
if fn_search:
s_name = os.path.basename(fn_search.group(1).strip("[]"))
s_name = clean_fn(s_name)
return s_name
except StopIteration:
return None
def read_histogram(self, program_key, program_name, headers, formats):
"""
Reads a Picard HISTOGRAM file.
Args:
self: the Picard QC module
program_key: the key used to find the program (ex. picard/quality_by_cycle)
program_name: the program key in the header to find the I/INPUT line
headers: the list of expected headers for the histogram
formats: the list of methods to apply to re-format each field (on a given row)
"""
all_data = OrderedDict()
assert len(formats) == len(headers)
# Go through logs and find Metrics
for f in self.find_log_files(program_key, filehandles=True):
self.add_data_source(f, section="Histogram")
lines = iter(f["f"])
# read through the header of the file to obtain the
# sample name
clean_fn = lambda n: self.clean_s_name(n, f)
s_name = read_sample_name(lines, clean_fn, program_name)
if s_name is None:
continue
sample_data = OrderedDict()
try:
# skip to the histogram
line = next(lines)
while not line.startswith("## HISTOGRAM"):
line = next(lines)
# check the header
line = next(lines)
if headers != line.strip().split("\t"):
continue
# slurp the data
line = next(lines).rstrip()
while line:
fields = line.split("\t")
assert len(fields) == len(headers)
for i in range(len(fields)):
fields[i] = formats[i](fields[i])
sample_data[fields[0]] = OrderedDict(zip(headers, fields))
line = next(lines).rstrip()
except StopIteration:
pass
# append the data
if sample_data:
all_data[s_name] = sample_data
data = self.ignore_samples(all_data)
# Write data to file
self.write_data_file(data, "picard_histogram")
return data
| ewels/MultiQC | multiqc/modules/picard/util.py | Python | gpl-3.0 | 3,082 |
# encoding=utf-8
__author__ = 'Q-Whai'
# Blog: http://blog.csdn.net/lemon_tree12138
# 2016/2/25
# version: 0.0.1
| William-Hai/SimpleDemo-python | enum/__init__.py | Python | gpl-3.0 | 115 |
class Address(object):
class InvalidFormat(Exception):
pass
@staticmethod
def sanity_check(host, port):
if not isinstance(host, str):
raise Address.InvalidFormat('Host must be a string, got %s' % host)
if not isinstance(port, (int, long)):
raise Address.InvalidFormat('Port must be an integer, got %s' % port)
if port <= 0:
raise Address.InvalidFormat('Port must be a positive integer, got %s' % port)
@staticmethod
def from_string(*args, **kw):
if kw or len(args) != 1 or not isinstance(args[0], str) or not len(args[0].split(':')) == 2:
raise Address.InvalidFormat('from_string expects "host:port" string.')
host, port = args[0].split(':')
try:
port = int(port)
except ValueError:
raise Address.InvalidFormat('Port must be an integer, got %s' % port)
Address.sanity_check(host, port)
return Address(host, port)
@staticmethod
def from_pair(*args, **kw):
if kw or len(args) != 2 or not isinstance(args[0], str) or not isinstance(args[1], (int, long)):
raise Address.InvalidFormat('from_pair expects host, port as input!')
Address.sanity_check(args[0], args[1])
return Address(args[0], args[1])
@staticmethod
def from_tuple(*args, **kw):
if kw or len(args) != 1 or len(args[0]) != 2:
raise Address.InvalidFormat('from_tuple expects (host, port) tuple as input!')
host, port = args[0]
Address.sanity_check(host, port)
return Address(host, port)
@staticmethod
def from_address(*args, **kw):
if kw or len(args) != 1 or not isinstance(args[0], Address):
raise Address.InvalidFormat('from_address expects an address as input!')
return Address(args[0].host, args[0].port)
@staticmethod
def parse(*args, **kw):
for parser in [Address.from_string, Address.from_pair, Address.from_address, Address.from_tuple]:
try:
return parser(*args, **kw)
except Address.InvalidFormat:
continue
raise Address.InvalidFormat('Could not parse input: args=%s kw=%s' % (
repr(args), repr(kw)))
def __init__(self, host, port):
self._host = host
self._port = port
@property
def host(self):
return self._host
@property
def port(self):
return self._port
| foursquare/commons-old | src/python/twitter/common/rpc/address.py | Python | apache-2.0 | 2,255 |
#!/usr/bin/env python3
import sys
import ptnet.net
if __name__ == '__main__' :
n = ptnet.net.Net (True)
n.read (sys.stdin, 'pt1')
n.stubbornify ()
n.cont2plain ()
n.write (sys.stdout, 'pt1')
# vi:ts=4:sw=4:et:
| cesaro/cunf | scripts/stubbornify.py | Python | gpl-3.0 | 233 |
## @package csnLinuxCommon
# Definition of LinuxCommon csnCompiler.Compiler.
# \ingroup compiler
import csnCompiler
import platform
import csnUtility
class LinuxCommon(csnCompiler.Compiler):
""" Abstract Linux compiler. """
def __init__(self):
csnCompiler.Compiler.__init__(self)
#self.basicFields.append("kdevelopProjectFolder")
def GetCompileFlags(self):
return ["-fPIC"]
def IsForPlatform(self, _WIN32, _NOT_WIN32):
return (((not csnUtility.IsWindowsPlatform()) and _NOT_WIN32) # Unix match
or (csnUtility.IsWindowsPlatform() and _WIN32) # Cygwin match
or (not _WIN32 and not _NOT_WIN32)) # Nothing demanded
def GetOutputSubFolder(self, _configuration = "${CMAKE_CFG_INTDIR}"):
"""
Returns the folder where the compiler places binaries for _configuration.
The default value for _configuration returns the output folder for the current configuration.
for storing binaries.
"""
return "bin/%s" % (_configuration)
def GetBuildSubFolder(self, _projectType, _projectName):
return "%s/%s/%s" % (_projectType, self._configurationName, _projectName)
def GetThirdPartySubFolder(self):
return self._configurationName
def GetThirdPartyCMakeParameters(self):
return [
"-D", "CMAKE_BUILD_TYPE=" + self._configurationName,
"-D", "CMAKE_C_FLAGS=-fPIC",
"-D", "CMAKE_CXX_FLAGS=-fPIC"
]
def GetProjectCMakeParameters(self):
return []
def GetAllowedConfigurations(self):
return ["Debug", "Release"]
def TargetIs32Bits(self):
return platform.architecture()[0]=="32bit"
def TargetIs64Bits(self):
return platform.architecture()[0]=="64bit"
def TargetIsMac(self):
return csnUtility.IsMacPlatform()
def TargetIsLinux(self):
return csnUtility.IsLinuxPlatform()
| msteghofer/CSnake | src/csnLinuxCommon.py | Python | bsd-3-clause | 1,983 |
import os
import sys
import pkg_resources
from path_helpers import path
import jinja2
config_template = '''
data_dir = .
[plugins]
# directory containing microdrop plugins
directory = plugins
[microdrop.gui.experiment_log_controller]
notebook_directory = notebooks
[microdrop.gui.dmf_device_controller]
device_directory = devices
'''
launcher_template = '''
REM Change into [parent directory of batch file][1].
REM
REM [1]: http://stackoverflow.com/questions/16623780/how-to-get-windows-batchs-parent-folder
cd %~dp0
REM Launch MicroDrop
{{ py_exe }} -m microdrop.microdrop -c %~dp0microdrop.ini
'''
def parse_args(args=None):
'''Parses arguments, returns (options, args).'''
from argparse import ArgumentParser
if args is None:
args = sys.argv
parser = ArgumentParser(description='Create portable MicroDrop settings '
'directory.')
parser.add_argument('output_dir', type=path)
args = parser.parse_args()
return args
def main(output_dir):
output_dir = path(output_dir)
if not output_dir.isdir():
output_dir.makedirs_p()
elif list(output_dir.files()):
raise IOError('Output directory exists and is not empty.')
config_path = output_dir.joinpath('microdrop.ini')
with config_path.open('wb') as output:
template = jinja2.Template(config_template)
config_str = template.render(output_dir=output_dir.name)
output.write(config_str)
py_exe = path(sys.executable).abspath()
launcher_path = output_dir.joinpath('microdrop.bat')
with launcher_path.open('wb') as output:
template = jinja2.Template(launcher_template)
launcher_str = template.render(working_dir=output_dir.abspath(),
py_exe=py_exe,
config_path=config_path.abspath())
output.write(launcher_str)
print 'Start MicroDrop with the following:'
print '\n %s' % launcher_path.abspath()
if __name__ == '__main__':
args = parse_args()
main(args.output_dir)
| wheeler-microfluidics/microdrop | microdrop/bin/create_portable_config.py | Python | bsd-3-clause | 2,107 |
# -*- coding: utf-8 -*-
# Copyright (C) 2017 Matthias Luescher
#
# Authors:
# Matthias Luescher
#
# This file is part of edi.
#
# edi is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# edi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with edi. If not, see <http://www.gnu.org/licenses/>.
import edi
from tests.libtesting.fixtures.configfiles import empty_config_file
def test_command_line_interface_setup(empty_config_file):
parser = edi._setup_command_line_interface()
assert 'embedded development infrastructure' in parser.description
args = parser.parse_args(['-v', 'lxc', 'configure', 'some-container', empty_config_file])
assert args.command_name == 'lxc'
assert str(args.config_file.name) == str(empty_config_file)
assert args.container_name == 'some-container'
assert args.sub_command_name == 'configure'
assert args.verbose == True
| erickeller/edi | tests/test_command_line_interface.py | Python | lgpl-3.0 | 1,344 |
"""
dj-stripe TransferReversal model tests
"""
from copy import deepcopy
from unittest.mock import PropertyMock, patch
import pytest
from django.test.testcases import TestCase
from djstripe.models import TransferReversal
from djstripe.models.connect import Transfer
from djstripe.settings import djstripe_settings
from . import (
FAKE_BALANCE_TRANSACTION_II,
FAKE_STANDARD_ACCOUNT,
FAKE_TRANSFER,
FAKE_TRANSFER_WITH_1_REVERSAL,
IS_STATICMETHOD_AUTOSPEC_SUPPORTED,
AssertStripeFksMixin,
)
pytestmark = pytest.mark.django_db
class TestTransferReversalStr:
@patch.object(Transfer, "_attach_objects_post_save_hook")
@patch(
"stripe.Account.retrieve",
return_value=deepcopy(FAKE_STANDARD_ACCOUNT),
autospec=IS_STATICMETHOD_AUTOSPEC_SUPPORTED,
)
@patch(
"stripe.BalanceTransaction.retrieve",
return_value=deepcopy(FAKE_BALANCE_TRANSACTION_II),
autospec=True,
)
@patch(
"stripe.Transfer.retrieve_reversal",
autospec=True,
return_value=deepcopy(FAKE_TRANSFER_WITH_1_REVERSAL),
)
def test___str__(
self,
transfer_reversal_retrieve_mock,
balance_transaction_retrieve_mock,
account_retrieve_mock,
transfer__attach_object_post_save_hook_mock,
):
transfer_reversal = TransferReversal.sync_from_stripe_data(
deepcopy(FAKE_TRANSFER_WITH_1_REVERSAL["reversals"]["data"][0])
)
assert str(f"{transfer_reversal.transfer}") == str(transfer_reversal)
class TestTransfer(AssertStripeFksMixin, TestCase):
@patch.object(Transfer, "_attach_objects_post_save_hook")
@patch(
"stripe.Account.retrieve",
return_value=deepcopy(FAKE_STANDARD_ACCOUNT),
autospec=IS_STATICMETHOD_AUTOSPEC_SUPPORTED,
)
@patch(
"stripe.BalanceTransaction.retrieve",
return_value=deepcopy(FAKE_BALANCE_TRANSACTION_II),
autospec=True,
)
@patch(
"stripe.Transfer.retrieve_reversal",
autospec=True,
return_value=deepcopy(FAKE_TRANSFER_WITH_1_REVERSAL["reversals"]["data"][0]),
)
def test_sync_from_stripe_data(
self,
transfer_reversal_retrieve_mock,
balance_transaction_retrieve_mock,
account_retrieve_mock,
transfer__attach_object_post_save_hook_mock,
):
transfer_reversal = TransferReversal.sync_from_stripe_data(
deepcopy(FAKE_TRANSFER_WITH_1_REVERSAL["reversals"]["data"][0])
)
balance_transaction_retrieve_mock.assert_not_called()
transfer_reversal_retrieve_mock.assert_not_called()
assert (
transfer_reversal.balance_transaction.id
== FAKE_TRANSFER["balance_transaction"]["id"]
)
assert (
transfer_reversal.transfer.id
== FAKE_TRANSFER_WITH_1_REVERSAL["reversals"]["data"][0]["transfer"]["id"]
)
self.assert_fks(transfer_reversal, expected_blank_fks="")
@patch.object(Transfer, "_attach_objects_post_save_hook")
@patch(
"stripe.Account.retrieve",
return_value=deepcopy(FAKE_STANDARD_ACCOUNT),
autospec=IS_STATICMETHOD_AUTOSPEC_SUPPORTED,
)
@patch(
"stripe.BalanceTransaction.retrieve",
return_value=deepcopy(FAKE_BALANCE_TRANSACTION_II),
autospec=True,
)
@patch(
"stripe.Transfer.retrieve_reversal",
autospec=IS_STATICMETHOD_AUTOSPEC_SUPPORTED,
return_value=deepcopy(FAKE_TRANSFER_WITH_1_REVERSAL),
)
def test_api_retrieve(
self,
transfer_reversal_retrieve_mock,
balance_transaction_retrieve_mock,
account_retrieve_mock,
transfer__attach_object_post_save_hook_mock,
):
transfer_reversal = TransferReversal.sync_from_stripe_data(
deepcopy(FAKE_TRANSFER_WITH_1_REVERSAL["reversals"]["data"][0])
)
transfer_reversal.api_retrieve()
transfer_reversal_retrieve_mock.assert_called_once_with(
id=FAKE_TRANSFER_WITH_1_REVERSAL["id"],
nested_id=FAKE_TRANSFER_WITH_1_REVERSAL["reversals"]["data"][0]["id"],
api_key=djstripe_settings.STRIPE_SECRET_KEY,
expand=["balance_transaction", "transfer"],
stripe_account=None,
)
@patch.object(Transfer, "_attach_objects_post_save_hook")
# we are returning any value for the Transfer.objects.get as we only need to avoid the Transfer.DoesNotExist error
@patch(
"djstripe.models.connect.Transfer.objects.get",
return_value=deepcopy(FAKE_TRANSFER),
)
@patch(
"stripe.Transfer.create_reversal",
autospec=IS_STATICMETHOD_AUTOSPEC_SUPPORTED,
return_value=deepcopy(FAKE_TRANSFER_WITH_1_REVERSAL),
)
def test__api_create(
self,
transfer_reversal_create_mock,
transfer_get_mock,
transfer__attach_object_post_save_hook_mock,
):
TransferReversal._api_create(
id=FAKE_TRANSFER_WITH_1_REVERSAL["reversals"]["data"][0]["transfer"]["id"]
)
transfer_reversal_create_mock.assert_called_once_with(
id=FAKE_TRANSFER_WITH_1_REVERSAL["reversals"]["data"][0]["transfer"]["id"],
api_key=djstripe_settings.STRIPE_SECRET_KEY,
)
@patch(
"stripe.Transfer.list_reversals", autospec=IS_STATICMETHOD_AUTOSPEC_SUPPORTED
)
def test_api_list(self, transfer_reversal_list_mock):
p = PropertyMock(return_value=deepcopy(FAKE_TRANSFER_WITH_1_REVERSAL))
type(transfer_reversal_list_mock).auto_paging_iter = p
TransferReversal.api_list(
id=FAKE_TRANSFER_WITH_1_REVERSAL["reversals"]["data"][0]["transfer"]["id"]
)
transfer_reversal_list_mock.assert_called_once_with(
id=FAKE_TRANSFER_WITH_1_REVERSAL["reversals"]["data"][0]["transfer"]["id"],
api_key=djstripe_settings.STRIPE_SECRET_KEY,
)
def test_is_valid_object(self):
assert TransferReversal.is_valid_object(
deepcopy(FAKE_TRANSFER_WITH_1_REVERSAL["reversals"]["data"][0])
)
| pydanny/dj-stripe | tests/test_transfer_reversal.py | Python | bsd-3-clause | 6,148 |
#!/usr/bin/env python
#coding:utf-8
import root
import j
| tonghuashuai/ODockerUI | view/_url.py | Python | mit | 60 |
class JWTValidationError(Exception):
pass
class JWTAudienceError(JWTValidationError):
pass
| uc-cdis/cdis-python-utils | cdispyutils/auth/errors.py | Python | apache-2.0 | 101 |
from datetime import datetime
from hashlib import sha1
from xml.sax.saxutils import escape as xml_escape
from .common import CHARSET
def xml_template(template, data):
"""
return template % data but with proper escaping
"""
escaped_data = {}
for key, value in data.items():
escaped_data[key] = xml_escape(value)
return template % escaped_data
class OFXExporter(object):
HEADER = """ENCODING:UTF-8
OFXHEADER:100
DATA:OFXSGML
VERSION:211
SECURITY:NONE
CHARSET:UTF-8
COMPRESSION:NONE
OLDFILEUID:NONE
NEWFILEUID:NONE
<OFX>
<BANKMSGSRSV1>
<STMTTRNRS>
<TRNUID>0</TRNUID>
<STMTRS>
<CURDEF>EUR</CURDEF>
<BANKACCTFROM>
<BANKID>info.hoffmann-christian.gnucash-export</BANKID>
<ACCTID>%(acctid)s</ACCTID>
<ACCTTYPE>CHECKING</ACCTTYPE>
</BANKACCTFROM>
<BANKTRANLIST>
<DTSTART>%(dtstart)s</DTSTART>
<DTEND>%(dtend)s</DTEND>"""
TRANSACTION = """
<STMTTRN>
<TRNTYPE>%(trntype)s</TRNTYPE>
<DTPOSTED>%(dtposted)s</DTPOSTED>
<DTUSER>%(dtuser)s</DTUSER>
<TRNAMT>%(trnamt)s</TRNAMT>
<FITID>%(fitid)s</FITID>
<NAME>%(name)s</NAME>
<MEMO>%(name)s</MEMO>
</STMTTRN>"""
FOOTER = """
</BANKTRANLIST>
</STMTRS>
</STMTTRNRS>
</BANKMSGSRSV1>
</OFX>
"""
def __init__(self):
self.transactions = []
def set_account(self, name):
self.account_name = name
self.account_id = sha1(name).hexdigest()
def add_transaction(self, guid, unixtime, memo, value):
self.transactions.append((guid, unixtime, memo, value))
def unixtime2ofx(self, unixtime):
dt = datetime.fromtimestamp(unixtime)
return dt.strftime("%Y%m%d%H%M%S")
def generate(self, reverse=False):
earliest_tx = None
latest_tx = None
transactions = ""
for guid, unixtime, memo, amount in self.transactions:
if reverse:
if amount[0] == '-':
amount = amount[1:]
else:
amount = '-' + amount
ofxdate = self.unixtime2ofx(unixtime)
transaction_type = "CREDIT"
if amount[0] == '-':
transaction_type = "DEBIT"
transactions += xml_template(self.TRANSACTION, {
'trntype': transaction_type,
'fitid': guid,
'dtposted': ofxdate,
'dtuser': ofxdate,
'trnamt': amount,
'name': memo
})
if not earliest_tx or earliest_tx > unixtime:
earliest_tx = unixtime
if not latest_tx or latest_tx < unixtime:
latest_tx = unixtime
header = xml_template(self.HEADER, {
'acctid': self.account_id,
'dtstart': self.unixtime2ofx(earliest_tx),
'dtend': self.unixtime2ofx(latest_tx)
})
footer = self.FOOTER % {
}
return (header + transactions + footer).encode(CHARSET)
| hoffie/gnucash-ofx-export | gce/ofx.py | Python | gpl-2.0 | 3,118 |
import datetime
import logging
from random import normalvariate
from django.core.management.base import BaseCommand
from dbag import autodiscover, dbag_manager
from dbag.models import Metric, DataSample
logger = logging.getLogger(__name__)
class Command(BaseCommand):
args = ''
help = 'Fake 60 days worth of data for all metrics'
def handle(self, *args, **kwargs):
autodiscover()
metrics = Metric.objects.filter(do_collect=True)
logger.info("Faking 60 days of data samples for %s Metrics", metrics.count())
for metric in metrics:
self._get_samples_for_metric(dbag_manager, metric)
def _get_samples_for_metric(self, manager, metric):
"""
Get 60 days worth of samples going back in time for the given metric.
"""
utc_now = datetime.datetime.utcnow()
one_day = datetime.timedelta(days=1)
start_day = utc_now - datetime.timedelta(days=60)
previous_sample = None
day_counter = utc_now
while day_counter > start_day:
previous_sample = self._fake_sample(manager, metric, day_counter, previous_sample)
day_counter = day_counter - one_day
def _fake_sample(self, manager, metric, utc_timestamp, seed_sample):
"""
If a sample for the given day doesn't exist, fake it.
"""
existing_sample = metric.get_sample_for_day(utc_timestamp)
if existing_sample:
return existing_sample
if seed_sample is None:
# Let's try collecting an actual sample to give our data some meaning
return metric.collect_data_sample(manager)
new_value = normalvariate(seed_sample.value, 1)
return DataSample.objects.create(
metric=metric, utc_timestamp=utc_timestamp, value=new_value)
| winhamwr/dbag | dbag/management/commands/dbag_fake_metrics.py | Python | mit | 1,830 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Generate and display thumbnails and selection widgets for dealing with image galleries"""
# Copyright 2004 St James Software
#
# This file is part of jToolkit.
#
# jToolkit is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# jToolkit is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with jToolkit; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import os
from jToolkit.widgets import table
from jToolkit.widgets import widgets
from jToolkit.web import server
from jToolkit import attachments
def ImportPIL():
"""Import PIL"""
import Image
Image.preinit()
return Image
imageformats = {'jpg': 'image/jpeg', 'jpeg': 'image/jpeg', 'png': 'image/png', 'gif': 'image/png', 'bmp': 'image/bmp'}
# TODO: move this out of being a fully-blown server class
def ThumbnailServer(baseclass):
class ThumbnailServer(baseclass):
"""the Server that serves the thumbnail pages"""
def opensharedfolder(self, folderpath):
"""opens the shared folder and remembers the direct path"""
if not hasattr(self, "sharedfolders"):
self.sharedfolders = {}
if folderpath in self.sharedfolders:
return self.sharedfolders[folderpath]
else:
import os
logondetails, folder = folderpath.split("@", 1)
if "/" in logondetails:
username, password = logondetails.split("/", 1)
os.system("net use %s %s /user:%s" % (folder, password, username))
else:
os.system("net use %s %s" % (folder, logondetails))
self.sharedfolders[folderpath] = folder
return folder
def getinstancefolder(self, foldername):
"""returns a folder name configured on the instance"""
if hasattr(self.instance, foldername):
folder = getattr(self.instance, foldername)
if "@" in folder:
folder = self.opensharedfolder(folder)
return folder
else:
return None
def getpage(self, pathwords, session, argdict):
"""return a page that will be sent to the user"""
gallerydir = self.getinstancefolder("gallerydir")
attachmentsdir = self.getinstancefolder("attachmentsdir")
if pathwords == ['gallery.htm']:
gtable = ThumbnailSelector(gallerydir, 'gallery', columns=3)
submit = widgets.Input({'value':'finished','type':'submit'})
return widgets.Form([gtable, submit])
elif pathwords == ['selectimage.htm']:
return ThumbnailSelectorPage(session, gallerydir, 'gallery', columns=3)
elif pathwords[:1] == ['gallery']:
pathwords = pathwords[1:]
if KnownImageType(pathwords[-1]):
if len(pathwords) >= 2 and pathwords[-2] == 'thumbnails':
imagefilename = pathwords[:-2] + [pathwords[-1]]
imagefilename = os.path.join(gallerydir, *imagefilename)
ThumbFromImage(imagefilename)
pic = widgets.PlainContents(None)
pic.sendfile_path = os.path.join(gallerydir, *pathwords)
pic.content_type = getcontenttype(pathwords[-1])
return pic
if pathwords[-1].endswith(".html"):
imagefilename = pathwords[-1]
imagefilename = imagefilename[:imagefilename.rfind(".html")]
image = widgets.Image(imagefilename)
backlink = "javascript:history.back(1)"
backtext = widgets.Link(backlink, "Back")
imagepage = widgets.Page(imagefilename, [backtext, "<br/>", image])
return imagepage
elif pathwords[:1] == ['attachment']:
pathwords = pathwords[1:]
if len(pathwords) >= 2 and pathwords[-2] == 'thumbnails':
imagefilename = pathwords[:-2] + [pathwords[-1]]
imagefilename = os.path.join(attachmentsdir, *imagefilename)
ThumbFromImage(imagefilename)
imagename = argdict.get('name', '')
pic = widgets.PlainContents(None)
pic.sendfile_path = os.path.join(attachmentsdir, *pathwords)
pic.content_type = getcontenttype(imagename)
return pic
return super(ThumbnailServer, self).getpage(pathwords, session, argdict)
return ThumbnailServer
class ThumbnailGallery(table.TableLayout):
"""
a gallery of thumbnails
"""
def __init__(self, folder, baseurl, rows=None, columns=None):
"""
folder is the path of the images
baseurl is the url to server the images on the server (absolute or relative to the current page)
if rows is defined then the appropriate number of columns will be made
if columns is defined then the appropriate number of rows will be made
"""
table.TableLayout.__init__(self)
#first create the thumbnails
thumbs = ThumbsFromFolder(folder)
if rows is not None:
columns = len(thumbs)/rows
if columns is not None:
rows = len(thumbs)/columns
thumbstack = thumbs
for row in range(rows):
for col in range(columns):
image,thumb = thumbstack.pop()
#assemble this part of the path into the right format for an image src
thumbsrc = '/'.join([baseurl] + list(os.path.split(thumb)))
imagesrc = '/'.join([baseurl] + list(os.path.split(image)))
pic = widgets.Image(thumbsrc)
link = widgets.Link(imagesrc, [pic, image], {'target':'jlogbookimagepreview'})
self.setcell(row,col,table.TableCell(link))
class ThumbnailSelector(table.TableLayout):
"""
a gallery widget where thumbnails can be selected
"""
def __init__(self, folder, baseurl, rows=None, columns=None):
"""
folder is the path of the images
baseurl is the url to server the images on the server (absolute or relative to the current page)
if rows is defined then the appropriate number of columns will be made
if columns is defined then the appropriate number of rows will be made
"""
table.TableLayout.__init__(self, {'border':1,
'style':"border-collapse: collapse;",
'width=':'100%',
'cellspacing':10,
'cellpadding':10})
#first create the thumbnails, and make them relative to the folder
if not folder.endswith(os.sep):
folder += os.sep
thumbs = [(image.replace(folder, "", 1), thumb.replace(folder, "", 1)) for image,thumb in ThumbsFromFolder(folder)]
if rows is not None:
columns = (len(thumbs)+rows-1)/rows
if columns is not None:
rows = (len(thumbs)+columns-1)/columns
checkboxcounter = 0
for row in range(rows):
for col in range(columns):
checkboxcounter += 1
thumbid = row*columns+col
if thumbid >= len(thumbs):
continue
image,thumb = thumbs[thumbid]
#assemble this part of the path into the right format for an image src
thumbsrc = '/'.join([baseurl] + list(os.path.split(thumb)))
imagesrc = '/'.join([baseurl] + list(os.path.split(image)))
pic = widgets.Image(thumbsrc,{'border':1})
link = widgets.Link(imagesrc, [pic, image], {'target':'jlogbookimagepreview'})
checkbox = widgets.Input({'type':'checkbox',
'value':"%s"%image,
'name':"checkbox%i"%checkboxcounter,
'thumbsrc':thumbsrc})
self.setcell(row,col,table.TableCell([checkbox,link],{'valign':'bottom'}))
class ThumbnailSelectorPage(widgets.Page):
"""
This creates a page that can be opened from another window, select thumbnails, and return them to the main form
"""
def __init__(self, session, folder, baseurl, rows=None, columns=None):
"""
constructs the thumbnail selector page
"""
selector = ThumbnailSelector(folder, baseurl, rows, columns)
javascript = widgets.Script("text/javascript", """
function updatemainform(thumbnailform)
{
var attachmentnum=0;
for (i=0;i<thumbnailform.elements.length;i++)
{
if (thumbnailform.elements[i].checked)
{
attachmentnum += 1;
//for each checked item create a new hidden input element in the calling form, just above the button
//note: selectbutton was stored in this window in the onclick method of the button
var checkbox = thumbnailform.elements[i];
var imagename = checkbox.value;
var thumbsrc = checkbox.getAttribute('thumbsrc');
var newhtml = "<input type=hidden"+
" name='"+(window.categoryname + attachmentnum)+
"' value='" + imagename + "'>";
//now insert a thumbnail (we get the src for the thumbnail from the checkbox)
newhtml += "<img src='" + thumbsrc + "'> " + imagename + " ";
//append a link to remove the paragraph containing the thumbnail
newhtml += "<a href='#' onclick='this.parentNode.parentNode.removeChild(this.parentNode);return false'>" +
"%s</a>";
selectbutton.addattachment(newhtml);
}
}
}
""" % session.localize("remove"))
submit = widgets.Input({'value':'Finished','type':'button','onclick':'updatemainform(this.form) ; window.close()'})
selectorform = widgets.Form([selector,
widgets.PlainContents('<br>'),
widgets.ContentWidget('center',submit)])
widgets.Page.__init__(self, contents=[javascript, selectorform])
class ImageAttachmentsWidget(attachments.MultiAttachmentsWidget):
"""
an attachments field that contains image and selects them from the thumbnail gallery
"""
def __init__(self, session, name, rowid, multiattachment, mode, folder):
#create a link that spawns file inputs
self.folder = folder
attachments.MultiAttachmentsWidget.__init__(self, session, name, rowid, multiattachment, mode)
def getlink(self, attachmentnum, attachment):
"""
gets the link to the attachment
(overrides MultiAttachmentsWidget)
"""
attachmentlink = self.multiattachment.geturl(self.rowid, self.name, attachmentnum)
#create the thumbnail
image = attachment.fullstoredpath()
folder = os.path.dirname(image)
if not folder.endswith(os.sep):
folder += os.sep
thumb = getthumbnailpath(image)
image = image.replace(folder, "", 1)
thumb = thumb.replace(folder, "", 1)
thumbsrc = '/'.join(['attachment'] + list(os.path.split(thumb))) + '?name=%s' % attachment.filename
pic = widgets.Image(thumbsrc,{'border':1})
link = widgets.Link(attachmentlink, [pic,attachment.filename], {'target':'attachmentpage'})
if self.mode in ("add", "modify"):
removefield = widgets.Input({'type':'hidden', 'name': "%s.remove%d" % (self.name, attachmentnum), 'value':''})
javascriptcall = "MarkRemoved(this,\'%s\'); return false" % (self.name)
removelink = self.getremovelink()
link = widgets.Paragraph([link, removefield, removelink])
return link
def buildwidget(self):
"""gets the contents of the widget..."""
links = self.getlinks()
if self.mode in ("add", "modify"):
javascript = widgets.Script("text/javascript", '', newattribs={'src':'js/attachments.js'})
addlink = self.getselectbutton()
return [javascript, links, addlink]
elif self.mode == "view":
if len(links):
thumbnaillinks = []
for link in links:
thumbnaillinks.extend([link,"<br>"])
return thumbnaillinks
else:
return self.session.localize('(no attachment)')
elif self.mode == "filter":
options = [('', ''),
('no', self.session.localize('No')),
('yes', self.session.localize('Yes'))]
return widgets.Select({'name': self.name}, options)
def getselectbutton(self):
"""returns a button that lets the user select the images to be attached..."""
javascript = widgets.Script("text/javascript","""
function addattachment(attachmenthtml)
{
var newparagraph = document.createElement("p");
newparagraph.innerHTML = attachmenthtml;
this.parentNode.insertBefore(newparagraph, this);
}
function PopupThumbnailSelectorPage(selectbutton){
selectbutton.addattachment = addattachment;
selectorwindow = window.open("selectimage.htm", "selectimages", "width=350,height=500,scrollbars=1");
selectorwindow.selectbutton = selectbutton;
selectorwindow.categoryname = \"%s\";
}
""" % self.name)
input = widgets.Link('#', 'add images',
{'onclick':'PopupThumbnailSelectorPage(this);return false'})
layer = widgets.Division(input)
return widgets.PlainContents([javascript,layer])
def getext(filename):
"""returns the filename's extension without the ."""
return os.path.splitext(filename)[1].replace(os.extsep, '', 1)
def KnownImageType(image):
return getext(image).lower() in imageformats
def getcontenttype(image):
return imageformats[getext(image).lower()]
def ThumbsFromFolder(folder):
"""
Create a collection of thumbnail images from the images in folder
Returns a list of (image,thumbnail) pairs where image and thumbnail are
paths to the origional image and the created thumbnail
"""
#make sure the folder exists
if not os.path.isdir(folder):
raise IOError("Image Folder is not a valid directory: %s" % folder)
#read images in the folder into a list
images = [os.path.join(folder,image) for image in os.listdir(folder) if KnownImageType(image)]
thumbs = []
for image in images:
thumbs.append((image, getthumbnailpath(image)))
return thumbs
def getthumbnailpath(imagepath, thumbnaildir=None):
"""find the path the thumbnail should be in..."""
if thumbnaildir is None:
thumbnaildir = os.path.join(os.path.dirname(imagepath), "thumbnails")
return os.path.join(thumbnaildir, os.path.basename(imagepath))
def ThumbFromImage(image, size = (50,50), thumbnaildir=None):
"""
Create a thumbnail from an image path
Thumbnails will be saved in basedir/thumbnails if no other is specified
Returns a (image,thumbnail) pair where image and thumbnail are
paths to the origional image and the created thumbnail
"""
Image = ImportPIL()
if not os.path.exists(image):
raise IOError("image does not exist: %s" % image)
#make sure there's a folder for the thumbnail
if thumbnaildir is None:
thumbnaildir = os.path.join(os.path.dirname(image), "thumbnails")
if not os.path.isdir(thumbnaildir):
os.mkdir(thumbnaildir)
thumbnail = os.path.join(thumbnaildir, os.path.basename(image))
# check if we already have an image that's older
if os.path.exists(thumbnail):
imagemtime = os.stat(image)[os.path.stat.ST_MTIME]
thumbmtime = os.stat(image)[os.path.stat.ST_MTIME]
if imagemtime <= thumbmtime:
return thumbnail
#resize and save the image
im = Image.open(image)
im.thumbnail(size,Image.ANTIALIAS)
im.save(thumbnail,im.format)
return thumbnail
| cc-archive/jtoolkit | jToolkit/widgets/thumbgallery.py | Python | gpl-2.0 | 15,507 |
##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
import Gaffer
import GafferUI
import GafferCortex
Gaffer.Metadata.registerNode(
GafferCortex.ObjectReader,
"description",
"""
Loads objects from disk using the readers provided by
the Cortex project. In most cases it is preferable to
use a dedicated SceneReader or ImageReader instead of
this node.
""",
plugs = {
"fileName" : [
"description",
"""
The file to load.
""",
"nodule:type", "",
"plugValueWidget:type", "GafferUI.FileSystemPathPlugValueWidget",
"path:leaf", True,
"path:valid", True,
"path:bookmarks", "cortex",
"fileSystemPath:extensions", " ".join( IECore.Reader.supportedExtensions() ),
"fileSystemPath:extensionsLabel", "Show only supported files",
],
"out" : [
"description",
"""
The loaded object. Note that the ObjectToScene node may
be used to convert this for use with the GafferScene
module.
""",
"plugValueWidget:type", "",
]
}
)
| hradec/gaffer | python/GafferCortexUI/ObjectReaderUI.py | Python | bsd-3-clause | 2,749 |
# (c) Nelen & Schuurmans, see LICENSE.rst.
from qgis.core import QgsProject
from ThreeDiToolbox.tool_commands.custom_command_base import CustomCommandBase
from ThreeDiToolbox.utils import constants
from ThreeDiToolbox.utils.predictions import Predictor
from ThreeDiToolbox.utils.user_messages import messagebar_message
from ThreeDiToolbox.utils.user_messages import pop_up_question
from ThreeDiToolbox.views.modify_schematisation_dialogs import (
PredictCalcPointsDialogWidget,
)
import logging
logger = logging.getLogger(__name__)
class CustomCommand(CustomCommandBase):
"""
command to predict the threedicore calculation points based on
calculation type, geometry and the attribute dist_calc_points
The results will be written to the database table v2_calculation_point.
When running the command, the table must be empty!
"""
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.iface = kwargs.get("iface")
self.ts_datasources = kwargs.get("ts_datasources")
self.tool_dialog_widget = None
def run(self):
self.show_gui()
def show_gui(self):
self.tool_dialog_widget = PredictCalcPointsDialogWidget(command=self)
self.tool_dialog_widget.exec_() # block execution
def run_it(self, db_set, db_type):
"""
:param db_set: dict of database settings. Expected keywords:
'host': '',
'port': '',
'name': '',
'username': '',
'password': '',
'schema': '',
'database': '',
'db_path': ,
:param db_type: 'spatialite' or 'postgres'
"""
predictor = Predictor(db_type)
uri = predictor.get_uri(**db_set)
calc_pnts_lyr = predictor.get_layer_from_uri(
uri, constants.TABLE_NAME_CALC_PNT, "the_geom"
)
self.connected_pnts_lyr = predictor.get_layer_from_uri(
uri, constants.TABLE_NAME_CONN_PNT, "the_geom"
)
predictor.start_sqalchemy_engine(db_set)
if not self.fresh_start(predictor):
return
default_epsg_code = 28992
epsg_code = predictor.get_epsg_code() or default_epsg_code
logger.info(
"[*] Using epsg code {} to build the calc_type_dict".format(epsg_code)
)
predictor.build_calc_type_dict(epsg_code=epsg_code)
transform = None
# spatialites are in WGS84 so we need a transformation
if db_type == "spatialite":
transform = "{epsg_code}:4326".format(epsg_code=epsg_code)
succces, features = predictor.predict_points(
output_layer=calc_pnts_lyr, transform=transform
)
if succces:
msg = "Predicted {} calculation points".format(len(features))
level = 3
QgsProject.instance().addMapLayer(calc_pnts_lyr)
else:
msg = (
"Predicted calculation points failed! "
'Are you sure the table "v2_calculation_point" '
"is empty?".format(len(features))
)
level = 1
messagebar_message("Finished", msg, level=level, duration=12)
cp_succces, cp_features = predictor.fill_connected_pnts_table(
calc_pnts_lyr=calc_pnts_lyr, connected_pnts_lyr=self.connected_pnts_lyr
)
if cp_succces:
cp_msg = "Created {} connected points template".format(len(cp_features))
cp_level = 3
QgsProject.instance().addMapLayer(self.connected_pnts_lyr)
else:
cp_msg = "Creating connected points failed!"
cp_level = 1
messagebar_message("Finished", cp_msg, level=cp_level, duration=12)
logger.info("Done predicting calcualtion points.\n" + msg)
def fresh_start(self, predictor):
"""
Check whether we start off fresh or not. That is, if the
calculation and connected points have been calculated before
the stale data will be removed from the database after
the user has confirmed to do so
:param predictor: utils.predictions.Predictor instance
:returns True if we start fresh. In this case all database
tables are empty. False otherwise
"""
fresh = True
are_empty = []
table_names = [constants.TABLE_NAME_CALC_PNT, constants.TABLE_NAME_CONN_PNT]
for tn in table_names:
are_empty.append(predictor.threedi_db.table_is_empty(tn))
if not all(are_empty):
fresh = False
question = (
"Calculation point and connected point tables are not "
"empty! Do you want to delete all their contents?"
)
if pop_up_question(question, "Warning"):
predictor.threedi_db.delete_from(constants.TABLE_NAME_CONN_PNT)
predictor.threedi_db.delete_from(constants.TABLE_NAME_CALC_PNT)
fresh = True
return fresh
| nens/threedi-qgis-plugin | tool_commands/predict_calc_points/command.py | Python | gpl-3.0 | 5,075 |
import os, sys
# - good = works
# - main = useful
# - extra = not very useful
# - bad = doesn't work
EXTRA = ['console', 'helloworld']
BAD = ['shoutcast']
def scan():
all = set(f for f in os.listdir('.') if os.path.isdir(f))
bad = set(BAD)
good = all - bad
extra = set(EXTRA)
main = good - extra
return locals()
plugins = scan()
def parse(argv):
if len(argv) == 1:
return plugins['all']
return plugins[argv[1]]
if __name__ == '__main__':
names = list(parse(sys.argv))
names.sort()
print((' '.join(names)))
| Zarokka/exaile | plugins/list.py | Python | gpl-2.0 | 568 |
from ctypes import *
from ctypes import util
from rubicon.objc import *
######################################################################
# FOUNDATION
foundation = cdll.LoadLibrary(util.find_library('Foundation'))
foundation.NSMouseInRect.restype = c_bool
foundation.NSMouseInRect.argtypes = [NSPoint, NSRect, c_bool]
# NSArray.h
NSArray = ObjCClass('NSArray')
NSMutableArray = ObjCClass('NSMutableArray')
# NSBundle.h
NSBundle = ObjCClass('NSBundle')
# NSCursor.h
NSCursor = ObjCClass('NSCursor')
# NSDictionary.h
NSDictionary = ObjCClass('NSDictionary')
# NSDocument.h
NSDocument = ObjCClass('NSDocument')
# NSDocumentController.h
NSDocumentController = ObjCClass('NSDocumentController')
# NSEvent.h
NSAlphaShiftKeyMask = 1 << 16
NSShiftKeyMask = 1 << 17
NSControlKeyMask = 1 << 18
NSAlternateKeyMask = 1 << 19
NSCommandKeyMask = 1 << 20
NSNumericPadKeyMask = 1 << 21
NSHelpKeyMask = 1 << 22
NSFunctionKeyMask = 1 << 23
NSDeviceIndependentModifierFlagsMask = 0xffff0000
# NSFileWrapper.h
NSFileWrapper = ObjCClass('NSFileWrapper')
# NSNumber.h
NSNumber = ObjCClass('NSNumber')
# NSSavePanel.h
NSSavePanel = ObjCClass('NSSavePanel')
NSFileHandlingPanelOKButton = 1
# NSOpenPanel.h
NSOpenPanel = ObjCClass('NSOpenPanel')
# NSScreen.h
NSScreen = ObjCClass('NSScreen')
# NSURL.h
NSURL = ObjCClass('NSURL')
# NSURLRequest.h
NSURLRequest = ObjCClass('NSURLRequest')
# NSFont.h
NSFont = ObjCClass('NSFont')
| pybee-attic/toga-cocoa | toga_cocoa/libs/foundation.py | Python | bsd-3-clause | 1,443 |
from django.contrib import admin # noqa
from junction.conferences import service
from .models import ScheduleItem, ScheduleItemType
@admin.register(ScheduleItem)
class SchduleItemAdmin(admin.ModelAdmin):
list_filter = ("type", "room")
def get_queryset(self, request):
qs = super(SchduleItemAdmin, self).get_queryset(request)
if request.user.is_superuser:
return qs
moderators = service.list_conference_moderator(user=request.user)
return qs.filter(conference__in=[m.conference for m in moderators])
@admin.register(ScheduleItemType)
class SchduleItemTypeAdmin(admin.ModelAdmin):
pass
| pythonindia/junction | junction/schedule/admin.py | Python | mit | 648 |
#!/usr/bin/env python3
"""
There is a filter_gff3_by_id_list.py script, but it doesn't currently have support for
features which share IDs (like segmented CDS) and I need something quick. The use
case here is that you have a list of mRNA.ids and this well export them along with their
child and parent features.
WARNING: This script very much assumes a feature-type-sorted order where the gene appears
first, then mRNA, then any child features of that.
"""
import argparse
import os
import sys
import biocodegff
def main():
parser = argparse.ArgumentParser('Filter the genes of a GFF3 file by mRNA child IDs')
## output file to be written
parser.add_argument('-i', '--input_gff3', type=str, required=True, help='GFF3 file of source molecules' )
parser.add_argument('-l', '--id_list', type=str, required=True, help='List file of mRNA IDs to keep' )
parser.add_argument('-o', '--output_file', type=str, required=False, help='Optional output file path (else STDOUT)' )
args = parser.parse_args()
## output will either be a file or STDOUT
fout = sys.stdout
if args.output_file is not None:
fout = open(args.output_file, 'wt')
ids_to_keep = list()
for line in open(args.id_list):
line = line.rstrip()
if len(line) > 2:
ids_to_keep.append(line)
fout.write("##gff-version 3\n")
current_gene_lines = list()
current_gene_id = None
keep = False
for line in open(args.input_gff3):
line = line.rstrip()
cols = line.split("\t")
if len(cols) != 9:
continue
# grab the ID and Parent columns if any
id = biocodegff.column_9_value(cols[8], 'ID')
parent = biocodegff.column_9_value(cols[8], 'Parent')
type = cols[2]
if type == 'gene':
# purge the current gene, if any
if len(current_gene_lines) > 1:
for li in current_gene_lines:
fout.write("{0}\n".format(li) )
# reset
current_gene_lines = list()
current_gene_lines.append( line )
current_gene_id = id
else:
if type == 'mRNA':
if id in ids_to_keep:
keep = True
else:
keep = False
if keep == True:
current_gene_lines.append(line)
if __name__ == '__main__':
main()
| zctea/biocode | sandbox/jorvis/hack.filter_gff3_genes_by_mRNA_ids.py | Python | gpl-3.0 | 2,464 |
# Blogger.com Related Posts Service (http://brps.appspot.com/)
#
# Copyright (C) 2008, 2009 Yu-Jie Lin
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# For posts
import logging
import sets
import simplejson as json
import urllib
from google.appengine.api import memcache
from google.appengine.api import urlfetch
from google.appengine.ext import db
from brps import util
from brps.util import json_str_sanitize
# Since Google hasn't support disjunction querying on labels
# Need to limit the max queries
MAX_LABEL_QUERIES = 20
MAX_POSTS = 10
# Post cache time in seconds
POST_CACHE_TIME = 3600
LABEL_QUERY_RESULT_CACHE_TIME = 86400
# In seconds
UPDATE_INTERVAL = 86400
# The summary endpoint is revealed in this post
# http://groups.google.com/group/bloggerDev/t/214ac9a9f8800935
BASE_API_URL = 'http://www.blogger.com/feeds/%d/posts/summary'
POST_FETCH_URL = BASE_API_URL + '/%d?alt=json&v=2'
POST_QUERY_URL = BASE_API_URL + '?category=%s&max-results=100&alt=json&v=2'
class PrivateBlogError(Exception):
pass
class Post(db.Model):
"""Post data model"""
blog_id = db.IntegerProperty()
post_id = db.IntegerProperty()
last_updated = db.DateTimeProperty()
relates = db.TextProperty()
def _get_relates(self):
"""Gets related posts"""
if self.relates:
return json.loads(self.relates.encode('latin-1'))
def _set_relates(self, new_relates):
"""Sets related posts"""
if isinstance(new_relates, (str, unicode)):
self.relates = new_relates
else:
self.relates = db.Text(json.dumps(new_relates, separators=(',', ':')), encoding='latin-1')
_relates_ = property(_get_relates, _set_relates)
def get(blog_id, post_id):
"""Returns post from memcache or datastore
This method also updates if data is too old"""
if post_id:
key_name = 'b%dp%d' % (blog_id, post_id)
p = Post.get_by_key_name(key_name)
if not p:
return None
# Check if need to update
if util.td_seconds(p.last_updated) > UPDATE_INTERVAL:
labels = get_labels(blog_id, post_id)
relates = {'entry': []}
if labels:
relates = get_relates(blog_id, post_id, labels)
p = db.run_in_transaction(transaction_update_relates, blog_id, post_id,
relates)
return p
return None
def get_related_list(blog_id, post_id):
if post_id:
key_name = 'b%dp%dl' % (blog_id, post_id)
rlist = memcache.get(key_name)
if rlist is None:
p = get(blog_id, post_id)
if not p:
return None
rlist = p._relates_['entry']
memcache.add(key_name, rlist, POST_CACHE_TIME)
return rlist
return None
def add(blog_id, post_id):
"""Adds new post to db"""
logging.debug('Adding %d, %d' % (blog_id, post_id))
p = get(blog_id, post_id)
if p:
return p
labels = get_labels(blog_id, post_id)
relates = {'entry': []}
if isinstance(labels, list):
relates = get_relates(blog_id, post_id, labels)
p = db.run_in_transaction(transaction_add_post, blog_id, post_id, relates)
return p
def get_labels(blog_id, post_id):
"""Gets labels of a blog post"""
labels = memcache.get('b%dp%dlabels' % (blog_id, post_id))
if labels is not None:
logging.debug('Fetching labels for %d, %d from memcache' % \
(blog_id, post_id))
return labels
logging.debug('Fetching labels for %d, %d' % (blog_id, post_id))
f = urlfetch.fetch(POST_FETCH_URL % (blog_id, post_id))
if f.status_code == 200:
p_json = json.loads(json_str_sanitize(f.content))
entry = p_json['entry']
labels = []
if 'category' in entry:
labels += [cat['term'] for cat in entry['category']]
# Save it for 5 minutes in case of this post has too many labels to query
memcache.set('b%dp%dlabels' % (blog_id, post_id), labels, 300)
return labels
elif f.status_code == 401:
raise PrivateBlogError
logging.warning('Unable to fetch labels: %d' % f.status_code)
# FIXME should raise exception and get client a better understanding.
return []
def get_relates(blog_id, post_id, labels):
"""Gets a list of realted posts of a blog post"""
logging.debug('Fetching relates for %d' % blog_id)
# Nice Google: Disjunctions not supported yet
# %7C = '|'
# cat_query = urllib.quote('|'.join(labels))
s_post_id = str(post_id)
s_labels = sets.Set(labels)
len_labels = len(labels)
_entries = []
e_id_check = []
for label in labels[:MAX_LABEL_QUERIES]:
entries = memcache.get('b%dl%s-2' % (blog_id, label))
if entries is not None:
logging.debug('Got label %s from memcache' % label)
else:
logging.debug('Querying label %s' % label)
f = urlfetch.fetch(POST_QUERY_URL % (blog_id,
urllib.quote(label.encode('utf-8'))))
if f.status_code == 200:
json_content = f.content
p_json = json.loads(json_str_sanitize(json_content))
# Clean up, remove unnecessary data
entries = {}
if 'feed' in p_json and 'entry' in p_json['feed']:
for entry in p_json['feed']['entry']:
# entry['id']['$t']
_id = int(entry['id']['$t'].rsplit('-', 1)[1])
_title = entry['title']['$t']
_link = ''
for l in entry['link']:
if l['rel'] == 'alternate':
_link = l['href']
break
_labels = [cat['term'] for cat in entry['category']]
entries[_id] = {'title': _title, 'link': _link, 'labels': _labels}
memcache.set('b%dl%s-2' % (blog_id, label), entries,
LABEL_QUERY_RESULT_CACHE_TIME)
else:
# Something went wrong when querying label for posts
logging.debug('Error on querying label %s, %d' % (label,
f.status_code))
continue
for e_id in entries:
if e_id == post_id or e_id in e_id_check:
# Same post skip or we already have this post
continue
entry = entries[e_id]
match_count = len(s_labels & sets.Set(entry['labels']))
if not match_count:
# No label is matched
continue
_entries.append((float(match_count) / len_labels, entry['title'],
entry['link']))
e_id_check.append(e_id)
if _entries:
_entries.sort()
_entries.reverse()
_entries = _entries[:MAX_POSTS]
# XXX Truncate the length of scort, simplejson can not serialize Decimal.
# This is not a best way because score becomes str, but the easist to get
# it around. 1/20 = 0.05, only need two digits. Should have 10-20% space
# saving
_entries = [('%.2f' % e[0], e[1], e[2]) for e in _entries]
# jsonize the result
entries_json = {'entry': [dict(zip(('score', 'title', 'link'), entry))\
for entry in _entries]}
else:
entries_json = {'entry': []}
return entries_json
def transaction_add_post(blog_id, post_id, relates):
"""Transaction function to add a new post"""
post = Post(key_name='b%dp%d' % (blog_id, post_id))
post.blog_id = blog_id
post.post_id = post_id
post._relates_ = relates
post.last_updated = util.now()
post.put()
return post
def transaction_update_relates(blog_id, post_id, relates):
"""Transaction function to update related posts of a post"""
post = Post.get_by_key_name('b%dp%d' % (blog_id, post_id))
post._relates_ = relates
post.last_updated = util.now()
post.put()
return post
| livibetter-backup/brps | src/brps/post.py | Python | gpl-3.0 | 7,920 |
# *****************************************************************************
# Copyright (c) 2016 IBM Corporation and other Contributors.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
#
# Contributors:
# Lokesh Haralakatta - Initial Contribution
# *****************************************************************************
import ibmiotf.application
from ibmiotf import *
from nose.tools import *
class TestApplication:
appClient=None
httpClient=None
@classmethod
def setup_class(self):
appConfFile="application.conf"
options = ibmiotf.application.ParseConfigFile(appConfFile)
self.org = options['auth-key'][2:8]
self.deviceType = options['type']
self.deviceId = options['id']
self.authToken = options['auth-token']
self.authKey = options['auth-key']
self.appClient = ibmiotf.application.Client(options)
self.httpClient = ibmiotf.application.HttpClient(options)
self.appClient.connect()
assert_true(self.appClient.subscribeToDeviceEvents())
assert_true(self.appClient.subscribeToDeviceStatus())
assert_true(self.appClient.subscribeToDeviceCommands())
@classmethod
def teardown_class(self):
self.appClient.disconnect()
def testQuickStartInstance(self):
client = ibmiotf.application.Client({})
assert_is_instance(client , ibmiotf.application.Client)
assert_equals(client.organization,"quickstart")
client = ibmiotf.application.Client({"org": "quickstart", "type": "standalone","id": "MyFirstDevice"})
hclient = ibmiotf.application.HttpClient({"org": "quickstart", "type": "standalone","id": "MyFirstDevice"})
assert_is_instance(client , ibmiotf.application.Client)
assert_is_instance(hclient , ibmiotf.application.HttpClient)
assert_equals(client.organization,"quickstart")
assert_equals(client.clientId , "a:quickstart:MyFirstDevice")
assert_false(client.subscribeToDeviceEvents())
assert_false(client.subscribeToDeviceStatus())
assert_false(client.subscribeToDeviceCommands())
commandData={'rebootDelay' : 50}
assert_false(client.publishCommand(self.deviceType, self.deviceId, "reboot", "json", commandData))
def testApplicationClientInstance(self):
client = ibmiotf.application.Client({"org": self.org, "type": self.deviceType, "id": self.deviceId,
"auth-method": "token", "auth-token": self.authToken, "auth-key":self.authKey})
assert_is_instance(client , ibmiotf.application.Client)
assert_equals(client.clientId , "A:"+self.org+":"+self.deviceId)
@raises(Exception)
def testMissingAuthToken1(self):
with assert_raises(ConfigurationException) as e:
ibmiotf.application.Client({"org": self.org, "type": self.deviceType, "id": self.deviceId,
"auth-method": "token", "auth-token": None, "auth-key":self.authKey})
assert_equal(e.exception.msg, 'Missing required property for API key based authentication: auth-token')
@raises(Exception)
def testMissingAuthToken2(self):
with assert_raises(ConfigurationException) as e:
ibmiotf.application.Client({"org": self.org, "type": self.deviceType, "id": self.deviceId,
"auth-method": "token", "auth-key":self.authKey})
assert_equal(e.exception.msg, 'Missing required property for API key based authentication: auth-token')
@raises(Exception)
def testMissingConfigFile(self):
appConfFile="InvalidFile.out"
with assert_raises(ConfigurationException) as e:
ibmiotf.application.ParseConfigFile(appConfFile)
assert_equal(e.exception.msg, 'Error reading device configuration file')
@raises(Exception)
def testInvalidConfigFile(self):
appConfFile="nullValues.conf"
with assert_raises(AttributeError) as e:
ibmiotf.application.ParseConfigFile(appConfFile)
assert_equal(e.exception, AttributeError)
@raises(Exception)
def testNotAuthorizedConnect(self):
client = ibmiotf.application.Client({"org": self.org, "type": self.deviceType, "id": self.deviceId,
"auth-method": "token", "auth-token": "MGhUxxxxxxxx6keG(l", "auth-key":self.authKey})
with assert_raises(ConnectionException) as e:
client.connect()
assert_equal(e.exception, ConnectionException)
assert_equal(e.exception.msg,'Not authorized')
@raises(Exception)
def testMissingMessageEncoder(self):
with assert_raises(MissingMessageDecoderException)as e:
myData={'name' : 'foo', 'cpu' : 60, 'mem' : 50}
self.appClient.publishEvent(self.deviceType,self.deviceId,"missingMsgEncode", "jason", myData)
assert_equals(e.exception, MissingMessageEncoderException)
def testPublishEvent(self):
def appEventPublishCallback():
print("Application Publish Event done!!!")
myData={'name' : 'foo', 'cpu' : 60, 'mem' : 50}
assert(self.appClient.publishEvent(self.deviceType,self.deviceId,"testPublishEvent", "json", myData, on_publish=appEventPublishCallback))
assert(self.appClient.publishEvent(self.deviceType,self.deviceId,"testPublishEvent", "xml", myData, on_publish=appEventPublishCallback))
def testPublishOverHTTPs(self):
myData={'name' : 'foo', 'cpu' : 60, 'mem' : 50}
assert_equals(self.httpClient.publishEvent(self.deviceType,self.deviceId,"testPublishEventHTTPs", myData),200)
assert_equals(self.httpClient.publishEvent(self.deviceType,self.deviceId,"testPublishEventHTTPs", myData,"xml"),200)
myCMD={'command':'Reboot'}
assert_equals(self.httpClient.publishCommand(self.deviceType,self.deviceId,"testPublishCMDHTTPQS", myCMD),200)
assert_equals(self.httpClient.publishCommand(self.deviceType,self.deviceId,"testPublishCMDHTTPQS", myCMD,"xml"),200)
def testPublishOverHTTPQS(self):
hclient = ibmiotf.application.HttpClient({"org": "quickstart", "type": "standalone","id": "MyFirstDevice"})
myData={'name' : 'foo', 'cpu' : 60, 'mem' : 50}
assert_equals(hclient.publishEvent(self.deviceType,self.deviceId,"testPublishEventHTTPQS", myData),200)
assert_equals(hclient.publishEvent(self.deviceType,self.deviceId,"testPublishEventHTTPQS", myData,"xml"),200)
myCMD={'command':'Reboot'}
assert_equals(hclient.publishCommand(self.deviceType,self.deviceId,"testPublishCMDHTTPQS", myCMD),200)
assert_equals(hclient.publishCommand(self.deviceType,self.deviceId,"testPublishCMDHTTPQS", myCMD,"xml"),200)
@raises(Exception)
def testMissingMessageEncoderForPublishCommand(self):
with assert_raises(MissingMessageDecoderException)as e:
commandData={'rebootDelay' : 50}
self.appClient.publishCommand(self.deviceType, self.deviceId, "reboot", "jason", commandData)
assert_equals(e.exception, MissingMessageEncoderException)
def testPublishCommand(self):
def appCmdPublishCallback():
print("Application Publish Command done!!!")
commandData={'rebootDelay' : 50}
assert_true(self.appClient.publishCommand(self.deviceType, self.deviceId, "reboot", "json", commandData, on_publish=appCmdPublishCallback))
| Lokesh-K-Haralakatta/iot-python | test/applicationTest.py | Python | epl-1.0 | 7,665 |
"""
This tutorial introduces logistic regression using Theano and stochastic
gradient descent.
Logistic regression is a probabilistic, linear classifier. It is parametrized
by a weight matrix :math:`W` and a bias vector :math:`b`. Classification is
done by projecting data points onto a set of hyperplanes, the distance to
which is used to determine a class membership probability.
Mathematically, this can be written as:
.. math::
P(Y=i|x, W,b) &= softmax_i(W x + b) \\
&= \frac {e^{W_i x + b_i}} {\sum_j e^{W_j x + b_j}}
The output of the model or prediction is then done by taking the argmax of
the vector whose i'th element is P(Y=i|x).
.. math::
y_{pred} = argmax_i P(Y=i|x,W,b)
This tutorial presents a stochastic gradient descent optimization method
suitable for large datasets.
References:
- textbooks: "Pattern Recognition and Machine Learning" -
Christopher M. Bishop, section 4.3.2
"""
__docformat__ = 'restructedtext en'
import cPickle
import gzip
import os
import sys
import timeit
import numpy
import theano
import theano.tensor as T
class LogisticRegression(object):
"""Multi-class Logistic Regression Class
The logistic regression is fully described by a weight matrix :math:`W`
and bias vector :math:`b`. Classification is done by projecting data
points onto a set of hyperplanes, the distance to which is used to
determine a class membership probability.
"""
def __init__(self, input, n_in, n_out, W=None, b=None):
""" Initialize the parameters of the logistic regression
:type input: theano.tensor.TensorType
:param input: symbolic variable that describes the input of the
architecture (one minibatch)
:type n_in: int
:param n_in: number of input units, the dimension of the space in
which the datapoints lie
:type n_out: int
:param n_out: number of output units, the dimension of the space in
which the labels lie
"""
# start-snippet-1
# initialize with 0 the weights W as a matrix of shape (n_in, n_out)
if W is None:
self.W = theano.shared(
value=numpy.zeros(
(n_in, n_out),
dtype=theano.config.floatX
),
name='W_reg',
borrow=True
)
else:
self.W = theano.shared(W, name='W_reg', borrow=True)
# initialize the biases b as a vector of n_out 0s
if b is None:
self.b = theano.shared(
value=numpy.zeros(
(n_out,),
dtype=theano.config.floatX
),
name='b_reg',
borrow=True
)
else:
self.b = theano.shared(b, name='b_reg', borrow=True)
# symbolic expression for computing the matrix of class-membership
# probabilities
# Where:
# W is a matrix where column-k represent the separation hyperplane for
# class-k
# x is a matrix where row-j represents input training sample-j
# b is a vector where element-k represent the free parameter of
# hyperplane-k
# the following line has been modified since the original implementation is unstable
# self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b)
self.p_y_given_x = self.__my_softmax(T.dot(input, self.W) + self.b)
# symbolic description of how to compute prediction as class whose
# probability is maximal
self.y_pred = T.argmax(self.p_y_given_x, axis=1)
# end-snippet-1
# parameters of the model
self.params = [self.W, self.b]
# keep track of model input
self.input = input
def negative_log_likelihood(self, y):
"""Return the mean of the negative log-likelihood of the prediction
of this model under a given target distribution.
.. math::
\frac{1}{|\mathcal{D}|} \mathcal{L} (\theta=\{W,b\}, \mathcal{D}) =
\frac{1}{|\mathcal{D}|} \sum_{i=0}^{|\mathcal{D}|}
\log(P(Y=y^{(i)}|x^{(i)}, W,b)) \\
\ell (\theta=\{W,b\}, \mathcal{D})
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
Note: we use the mean instead of the sum so that
the learning rate is less dependent on the batch size
"""
# start-snippet-2
# y.shape[0] is (symbolically) the number of rows in y, i.e.,
# number of examples (call it n) in the minibatch
# T.arange(y.shape[0]) is a symbolic vector which will contain
# [0,1,2,... n-1] T.log(self.p_y_given_x) is a matrix of
# Log-Probabilities (call it LP) with one row per example and
# one column per class LP[T.arange(y.shape[0]),y] is a vector
# v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ...,
# LP[n-1,y[n-1]]] and T.mean(LP[T.arange(y.shape[0]),y]) is
# the mean (across minibatch examples) of the elements in v,
# i.e., the mean log-likelihood across the minibatch.
return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])
# end-snippet-2
def errors(self, y):
"""Return a float representing the number of errors in the minibatch
over the total number of examples of the minibatch ; zero one
loss over the size of the minibatch
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
"""
# check if y has same dimension of y_pred
if y.ndim != self.y_pred.ndim:
raise TypeError(
'y should have the same shape as self.y_pred',
('y', y.type, 'y_pred', self.y_pred.type)
)
# check if y is of the correct datatype
if y.dtype.startswith('int'):
# the T.neq operator returns a vector of 0s and 1s, where 1
# represents a mistake in prediction
return T.mean(T.neq(self.y_pred, y))
else:
raise NotImplementedError()
def __my_softmax(self, x):
return T.exp(x) / (T.exp(x).sum(1, keepdims=True))
def load_data(dataset):
''' Loads the dataset
:type dataset: string
:param dataset: the path to the dataset (here MNIST)
'''
#############
# LOAD DATA #
#############
# Download the MNIST dataset if it is not present
data_dir, data_file = os.path.split(dataset)
if data_dir == "" and not os.path.isfile(dataset):
# Check if dataset is in the data directory.
new_path = os.path.join(
os.path.split(__file__)[0],
"..",
"data",
dataset
)
if os.path.isfile(new_path) or data_file == 'mnist.pkl.gz':
dataset = new_path
if (not os.path.isfile(dataset)) and data_file == 'mnist.pkl.gz':
import urllib
origin = (
'http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz'
)
print 'Downloading data from %s' % origin
urllib.urlretrieve(origin, dataset)
print '... loading data'
# Load the dataset
f = gzip.open(dataset, 'rb')
train_set, valid_set, test_set = cPickle.load(f)
f.close()
# train_set, valid_set, test_set format: tuple(input, target)
# input is an numpy.ndarray of 2 dimensions (a matrix)
# witch row's correspond to an example. target is a
# numpy.ndarray of 1 dimensions (vector)) that have the same length as
# the number of rows in the input. It should give the target
# target to the example with the same index in the input.
def shared_dataset(data_xy, borrow=True):
""" Function that loads the dataset into shared variables
The reason we store our dataset in shared variables is to allow
Theano to copy it into the GPU memory (when code is run on GPU).
Since copying data into the GPU is slow, copying a minibatch everytime
is needed (the default behaviour if the data is not in a shared
variable) would lead to a large decrease in performance.
"""
data_x, data_y = data_xy
shared_x = theano.shared(numpy.asarray(data_x,
dtype=theano.config.floatX),
borrow=borrow)
shared_y = theano.shared(numpy.asarray(data_y,
dtype=theano.config.floatX),
borrow=borrow)
# When storing data on the GPU it has to be stored as floats
# therefore we will store the labels as ``floatX`` as well
# (``shared_y`` does exactly that). But during our computations
# we need them as ints (we use labels as index, and if they are
# floats it doesn't make sense) therefore instead of returning
# ``shared_y`` we will have to cast it to int. This little hack
# lets ous get around this issue
return shared_x, T.cast(shared_y, 'int32')
test_set_x, test_set_y = shared_dataset(test_set)
valid_set_x, valid_set_y = shared_dataset(valid_set)
train_set_x, train_set_y = shared_dataset(train_set)
rval = [(train_set_x, train_set_y), (valid_set_x, valid_set_y),
(test_set_x, test_set_y)]
return rval
def sgd_optimization_mnist(learning_rate=0.13, n_epochs=1000,
dataset='mnist.pkl.gz',
batch_size=600):
"""
Demonstrate stochastic gradient descent optimization of a log-linear
model
This is demonstrated on MNIST.
:type learning_rate: float
:param learning_rate: learning rate used (factor for the stochastic
gradient)
:type n_epochs: int
:param n_epochs: maximal number of epochs to run the optimizer
:type dataset: string
:param dataset: the path of the MNIST dataset file from
http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz
"""
datasets = load_data(dataset)
train_set_x, train_set_y = datasets[0]
valid_set_x, valid_set_y = datasets[1]
test_set_x, test_set_y = datasets[2]
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size
n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size
######################
# BUILD ACTUAL MODEL #
######################
print '... building the model'
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
# generate symbolic variables for input (x and y represent a
# minibatch)
x = T.matrix('x') # data, presented as rasterized images
y = T.ivector('y') # labels, presented as 1D vector of [int] labels
# construct the logistic regression class
# Each MNIST image has size 28*28
classifier = LogisticRegression(input=x, n_in=28 * 28, n_out=10)
# the cost we minimize during training is the negative log likelihood of
# the model in symbolic format
cost = classifier.negative_log_likelihood(y)
# compiling a Theano function that computes the mistakes that are made by
# the model on a minibatch
test_model = theano.function(
inputs=[index],
outputs=classifier.errors(y),
givens={
x: test_set_x[index * batch_size: (index + 1) * batch_size],
y: test_set_y[index * batch_size: (index + 1) * batch_size]
}
)
validate_model = theano.function(
inputs=[index],
outputs=classifier.errors(y),
givens={
x: valid_set_x[index * batch_size: (index + 1) * batch_size],
y: valid_set_y[index * batch_size: (index + 1) * batch_size]
}
)
# compute the gradient of cost with respect to theta = (W,b)
g_W = T.grad(cost=cost, wrt=classifier.W)
g_b = T.grad(cost=cost, wrt=classifier.b)
# start-snippet-3
# specify how to update the parameters of the model as a list of
# (variable, update expression) pairs.
updates = [(classifier.W, classifier.W - learning_rate * g_W),
(classifier.b, classifier.b - learning_rate * g_b)]
# compiling a Theano function `train_model` that returns the cost, but in
# the same time updates the parameter of the model based on the rules
# defined in `updates`
train_model = theano.function(
inputs=[index],
outputs=cost,
updates=updates,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size]
}
)
# end-snippet-3
###############
# TRAIN MODEL #
###############
print '... training the model'
# early-stopping parameters
patience = 5000 # look as this many examples regardless
patience_increase = 2 # wait this much longer when a new best is
# found
improvement_threshold = 0.995 # a relative improvement of this much is
# considered significant
validation_frequency = min(n_train_batches, patience / 2)
# go through this many
# minibatche before checking the network
# on the validation set; in this case we
# check every epoch
best_validation_loss = numpy.inf
test_score = 0.
start_time = timeit.default_timer()
done_looping = False
epoch = 0
while (epoch < n_epochs) and (not done_looping):
epoch = epoch + 1
for minibatch_index in xrange(n_train_batches):
minibatch_avg_cost = train_model(minibatch_index)
# iteration number
iter = (epoch - 1) * n_train_batches + minibatch_index
if (iter + 1) % validation_frequency == 0:
# compute zero-one loss on validation set
validation_losses = [validate_model(i)
for i in xrange(n_valid_batches)]
this_validation_loss = numpy.mean(validation_losses)
print(
'epoch %i, minibatch %i/%i, validation error %f %%' %
(
epoch,
minibatch_index + 1,
n_train_batches,
this_validation_loss * 100.
)
)
# if we got the best validation score until now
if this_validation_loss < best_validation_loss:
# improve patience if loss improvement is good enough
if this_validation_loss < best_validation_loss * \
improvement_threshold:
patience = max(patience, iter * patience_increase)
best_validation_loss = this_validation_loss
# test it on the test set
test_losses = [test_model(i)
for i in xrange(n_test_batches)]
test_score = numpy.mean(test_losses)
print(
(
' epoch %i, minibatch %i/%i, test error of'
' best model %f %%'
) %
(
epoch,
minibatch_index + 1,
n_train_batches,
test_score * 100.
)
)
# save the best model
with open('best_model.pkl', 'w') as f:
cPickle.dump(classifier, f)
if patience <= iter:
done_looping = True
break
end_time = timeit.default_timer()
print(
(
'Optimization complete with best validation score of %f %%,'
'with test performance %f %%'
)
% (best_validation_loss * 100., test_score * 100.)
)
print 'The code run for %d epochs, with %f epochs/sec' % (
epoch, 1. * epoch / (end_time - start_time))
print >> sys.stderr, ('The code for file ' +
os.path.split(__file__)[1] +
' ran for %.1fs' % ((end_time - start_time)))
def predict():
"""
An example of how to load a trained model and use it
to predict labels.
"""
# load the saved model
classifier = cPickle.load(open('best_model.pkl'))
# compile a predictor function
predict_model = theano.function(
inputs=[classifier.input],
outputs=classifier.y_pred)
# We can test it on some examples from test test
dataset = 'mnist.pkl.gz'
datasets = load_data(dataset)
test_set_x, test_set_y = datasets[2]
test_set_x = test_set_x.get_value()
predicted_values = predict_model(test_set_x[:10])
print ("Predicted values for the first 10 examples in test set:")
print predicted_values
if __name__ == '__main__':
sgd_optimization_mnist()
| ZiweiXU/CAPTCHA_recog | logistic_sgd.py | Python | gpl-3.0 | 17,569 |
# -*- coding: utf-8 -*-
#
# Sentry documentation build configuration file, created by
# sphinx-quickstart on Wed Oct 20 16:21:42 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), os.pardir, 'src'))
sys.path.insert(1, os.path.join(os.path.dirname(__file__), '_themes'))
if 'DJANGO_SETTINGS_MODULE' not in os.environ:
os.environ['DJANGO_SETTINGS_MODULE'] = 'sentry.conf.server'
# TODO(dcramer): this is to allow autodoc support
from django.conf import settings
settings.SENTRY_CACHE = 'sentry.cache.django.DjangoCache'
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
#extensions = ['sphinxtogithub']
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Sentry'
copyright = u'2010-2015, the Sentry Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __import__('pkg_resources').get_distribution('sentry').version
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'kr'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "_static/logo.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Sentrydoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
(
'index', 'Sentry.tex', u'Sentry Documentation',
u'Functional Software Inc.', 'manual'
),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'sentry', u'Sentry Documentation',
[u'Functional Software Inc.'], 1)
]
if os.environ.get('SENTRY_FEDERATED_DOCS') != '1':
sys.path.insert(0, os.path.abspath('_sentryext'))
import sentryext
sentryext.activate()
| imankulov/sentry | docs/conf.py | Python | bsd-3-clause | 7,824 |
#/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 The SymbiFlow Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
""" This file uses the fpga_interchange to create a very simple FPGA design.
This design is target the 7-series FPGA line, and the physical netlist is
suitable for a Artix 50T class fabric.
To test this flow:
- Invoke this script to output the logical netlist, physical netlist, and a
small XDC file to set the IOSTANDARD's on the ports.
- Use RapidWright's interchange branch to create a DCP using the entry point
com.xilinx.rapidwright.interchange.PhysicalNetlistToDcp
Example:
export RAPIDWRIGHT_PATH=~/RapidWright
$RAPIDWRIGHT_PATH/scripts/invoke_rapidwright.sh \
com.xilinx.rapidwright.interchange.PhysicalNetlistToDcp \
test.netlist test.phys test.xdc test.dcp
"""
import argparse
from fpga_interchange.interchange_capnp import Interchange, write_capnp_file
from fpga_interchange.logical_netlist import Library, Cell, Direction, CellInstance, LogicalNetlist
from fpga_interchange.physical_netlist import PhysicalNetlist, PhysicalBelPin, \
Placement, PhysicalPip, PhysicalSitePin, PhysicalSitePip, \
chain_branches, chain_pips, PhysicalNetType, PhysicalCellType
def example_logical_netlist():
hdi_primitives = Library('hdi_primitives')
cell = Cell('FDRE')
cell.add_port('D', Direction.Input)
cell.add_port('C', Direction.Input)
cell.add_port('CE', Direction.Input)
cell.add_port('R', Direction.Input)
cell.add_port('Q', Direction.Output)
hdi_primitives.add_cell(cell)
cell = Cell('IBUF')
cell.add_port('I', Direction.Input)
cell.add_port('O', Direction.Output)
hdi_primitives.add_cell(cell)
cell = Cell('OBUF')
cell.add_port('I', Direction.Input)
cell.add_port('O', Direction.Output)
hdi_primitives.add_cell(cell)
cell = Cell('BUFG')
cell.add_port('I', Direction.Input)
cell.add_port('O', Direction.Output)
hdi_primitives.add_cell(cell)
cell = Cell('VCC')
cell.add_port('P', Direction.Output)
hdi_primitives.add_cell(cell)
cell = Cell('GND')
cell.add_port('G', Direction.Output)
hdi_primitives.add_cell(cell)
top = Cell('top')
top.add_port('i', Direction.Input)
top.add_port('clk', Direction.Input)
top.add_port('o', Direction.Output)
top.add_cell_instance('ibuf', 'IBUF')
top.add_cell_instance('obuf', 'OBUF')
top.add_cell_instance('clk_ibuf', 'IBUF')
top.add_cell_instance('clk_buf', 'BUFG')
top.add_cell_instance('ff', 'FDRE')
top.add_cell_instance('VCC', 'VCC')
top.add_cell_instance('GND', 'GND')
top.add_net('i')
top.connect_net_to_cell_port('i', 'i')
top.connect_net_to_instance('i', 'ibuf', 'I')
top.add_net('i_buf')
top.connect_net_to_instance('i_buf', 'ibuf', 'O')
top.connect_net_to_instance('i_buf', 'ff', 'D')
top.add_net('o_buf')
top.connect_net_to_instance('o_buf', 'ff', 'Q')
top.connect_net_to_instance('o_buf', 'obuf', 'I')
top.add_net('o')
top.connect_net_to_instance('o', 'obuf', 'O')
top.connect_net_to_cell_port('o', 'o')
top.add_net('clk')
top.connect_net_to_cell_port('clk', 'clk')
top.connect_net_to_instance('clk', 'clk_ibuf', 'I')
top.add_net('clk_ibuf')
top.connect_net_to_instance('clk_ibuf', 'clk_ibuf', 'O')
top.connect_net_to_instance('clk_ibuf', 'clk_buf', 'I')
top.add_net('clk_buf')
top.connect_net_to_instance('clk_buf', 'clk_buf', 'O')
top.connect_net_to_instance('clk_buf', 'ff', 'C')
top.add_net('GLOBAL_LOGIC1')
top.connect_net_to_instance('GLOBAL_LOGIC1', 'VCC', 'P')
top.connect_net_to_instance('GLOBAL_LOGIC1', 'ff', 'CE')
top.add_net('GLOBAL_LOGIC0')
top.connect_net_to_instance('GLOBAL_LOGIC0', 'GND', 'G')
top.connect_net_to_instance('GLOBAL_LOGIC0', 'ff', 'R')
work = Library('work')
work.add_cell(top)
logical_netlist = LogicalNetlist(
name='top',
top_instance_name='top',
top_instance=CellInstance(
cell_name='top',
view='netlist',
property_map={},
),
property_map={},
libraries={
'work': work,
'hdi_primitives': hdi_primitives,
})
return logical_netlist
def example_physical_netlist():
phys_netlist = PhysicalNetlist(part='xc7a50tfgg484-1')
ibuf_placement = Placement(
cell_type='IBUF', cell_name='ibuf', site='IOB_X0Y12', bel='INBUF_EN')
ibuf_placement.add_bel_pin_to_cell_pin(bel_pin='PAD', cell_pin='I')
ibuf_placement.add_bel_pin_to_cell_pin(bel_pin='OUT', cell_pin='O')
phys_netlist.add_placement(ibuf_placement)
phys_netlist.add_site_instance(site_name='IOB_X0Y12', site_type='IOB33')
obuf_placement = Placement(
cell_type='OBUF', cell_name='obuf', site='IOB_X0Y11', bel='OUTBUF')
obuf_placement.add_bel_pin_to_cell_pin(bel_pin='IN', cell_pin='I')
obuf_placement.add_bel_pin_to_cell_pin(bel_pin='OUT', cell_pin='O')
phys_netlist.add_placement(obuf_placement)
phys_netlist.add_site_instance(site_name='IOB_X0Y11', site_type='IOB33')
clk_ibuf_placement = Placement(
cell_type='IBUF',
cell_name='clk_ibuf',
site='IOB_X0Y24',
bel='INBUF_EN')
clk_ibuf_placement.add_bel_pin_to_cell_pin(bel_pin='PAD', cell_pin='I')
clk_ibuf_placement.add_bel_pin_to_cell_pin(bel_pin='OUT', cell_pin='O')
phys_netlist.add_placement(clk_ibuf_placement)
phys_netlist.add_site_instance(site_name='IOB_X0Y24', site_type='IOB33')
clk_buf_placement = Placement(
cell_type='BUFG',
cell_name='clk_buf',
site='BUFGCTRL_X0Y0',
bel='BUFG')
clk_buf_placement.add_bel_pin_to_cell_pin(bel_pin='I0', cell_pin='I')
clk_buf_placement.add_bel_pin_to_cell_pin(bel_pin='O', cell_pin='O')
phys_netlist.add_placement(clk_buf_placement)
phys_netlist.add_site_instance(site_name='BUFGCTRL_X0Y0', site_type='BUFG')
ff_placement = Placement(
cell_type='FDRE', cell_name='ff', site='SLICE_X1Y12', bel='AFF')
ff_placement.add_bel_pin_to_cell_pin(bel_pin='SR', cell_pin='R')
ff_placement.add_bel_pin_to_cell_pin(bel_pin='D', cell_pin='D')
ff_placement.add_bel_pin_to_cell_pin(bel_pin='Q', cell_pin='Q')
ff_placement.add_bel_pin_to_cell_pin(bel_pin='CE', cell_pin='CE')
ff_placement.add_bel_pin_to_cell_pin(bel_pin='CK', cell_pin='C')
phys_netlist.add_placement(ff_placement)
phys_netlist.add_site_instance(site_name='SLICE_X1Y12', site_type='SLICEL')
i_root = chain_branches((PhysicalBelPin('IOB_X0Y12', 'PAD', 'PAD'),
PhysicalBelPin('IOB_X0Y12', 'INBUF_EN', 'PAD')))
phys_netlist.add_physical_net(net_name='i', sources=[i_root], stubs=[])
i_buf_root = chain_branches(
(PhysicalBelPin('IOB_X0Y12', 'INBUF_EN', 'OUT'),
PhysicalSitePip('IOB_X0Y12', 'IUSED', '0'),
PhysicalBelPin('IOB_X0Y12', 'I', 'I'),
PhysicalSitePin('IOB_X0Y12', 'I')) +
chain_pips('LIOI3_X0Y11', ('LIOI_IBUF0', 'LIOI_I0', 'LIOI_ILOGIC0_D',
'IOI_ILOGIC0_O', 'IOI_LOGIC_OUTS18_1')) +
(PhysicalPip('IO_INT_INTERFACE_L_X0Y12',
'INT_INTERFACE_LOGIC_OUTS_L_B18',
'INT_INTERFACE_LOGIC_OUTS_L18'),
PhysicalPip('INT_L_X0Y12', 'LOGIC_OUTS_L18', 'EE2BEG0'),
PhysicalPip('INT_L_X2Y12', 'EE2END0', 'BYP_ALT0'),
PhysicalPip('INT_L_X2Y12', 'BYP_ALT0', 'BYP_L0'),
PhysicalPip('CLBLL_L_X2Y12', 'CLBLL_BYP0', 'CLBLL_L_AX'),
PhysicalSitePin('SLICE_X1Y12', 'AX'),
PhysicalBelPin('SLICE_X1Y12', 'AX', 'AX'),
PhysicalSitePip('SLICE_X1Y12', 'AFFMUX', 'AX'),
PhysicalBelPin('SLICE_X1Y12', 'AFF', 'D')))
phys_netlist.add_physical_net(
net_name='i_buf', sources=[i_buf_root], stubs=[])
o_buf_root = chain_branches(
(PhysicalBelPin('SLICE_X1Y12', 'AFF', 'Q'),
PhysicalBelPin('SLICE_X1Y12', 'AQ', 'AQ'),
PhysicalSitePin('SLICE_X1Y12', 'AQ'),
PhysicalPip('CLBLL_L_X2Y12', 'CLBLL_L_AQ', 'CLBLL_LOGIC_OUTS0'),
PhysicalPip('INT_L_X2Y12', 'LOGIC_OUTS_L0', 'SL1BEG0'),
PhysicalPip('INT_L_X2Y11', 'SL1END0', 'WW2BEG0'),
PhysicalPip('INT_L_X0Y11', 'WW2END0', 'IMUX_L34')) +
chain_pips('LIOI3_X0Y11', ('IOI_IMUX34_0', 'IOI_OLOGIC1_D1',
'LIOI_OLOGIC1_OQ', 'LIOI_O1')) +
(
PhysicalSitePin('IOB_X0Y11', 'O'),
PhysicalBelPin('IOB_X0Y11', 'O', 'O'),
PhysicalSitePip('IOB_X0Y11', 'OUSED', '0'),
PhysicalBelPin('IOB_X0Y11', 'OUTBUF', 'IN'),
))
phys_netlist.add_physical_net(
net_name='o_buf', sources=[o_buf_root], stubs=[])
o_root = chain_branches((PhysicalBelPin('IOB_X0Y11', 'OUTBUF', 'OUT'),
PhysicalBelPin('IOB_X0Y11', 'PAD', 'PAD')))
phys_netlist.add_physical_net(net_name='o', sources=[o_root], stubs=[])
clk_root = chain_branches((PhysicalBelPin('IOB_X0Y24', 'PAD', 'PAD'),
PhysicalBelPin('IOB_X0Y24', 'INBUF_EN', 'PAD')))
phys_netlist.add_physical_net(net_name='clk', sources=[clk_root], stubs=[])
clk_ibuf_root = chain_branches(
(PhysicalBelPin('IOB_X0Y24', 'INBUF_EN', 'OUT'),
PhysicalSitePip('IOB_X0Y24', 'IUSED', '0'),
PhysicalBelPin('IOB_X0Y24', 'I', 'I'),
PhysicalSitePin('IOB_X0Y24', 'I')) +
chain_pips('LIOI3_X0Y23', ('LIOI_IBUF0', 'LIOI_I0', 'LIOI_ILOGIC0_D',
'IOI_ILOGIC0_O', 'LIOI_I2GCLK_TOP0')) +
(PhysicalPip('HCLK_CMT_X8Y26', 'HCLK_CMT_CCIO3',
'HCLK_CMT_MUX_CLK_13'),
PhysicalPip('CLK_HROW_BOT_R_X60Y26', 'CLK_HROW_CK_IN_L13',
'CLK_HROW_BOT_R_CK_BUFG_CASCO0'),
PhysicalPip('CLK_BUFG_BOT_R_X60Y48', 'CLK_BUFG_BOT_R_CK_MUXED0',
'CLK_BUFG_BUFGCTRL0_I0'),
PhysicalSitePin('BUFGCTRL_X0Y0', 'I0'),
PhysicalBelPin('BUFGCTRL_X0Y0', 'I0', 'I0'),
PhysicalBelPin('BUFGCTRL_X0Y0', 'BUFG', 'I0')))
phys_netlist.add_physical_net(
net_name='clk_ibuf', sources=[clk_ibuf_root], stubs=[])
clk_buf_root = chain_branches(
(PhysicalBelPin('BUFGCTRL_X0Y0', 'BUFG', 'O'),
PhysicalBelPin('BUFGCTRL_X0Y0', 'O', 'O'),
PhysicalSitePin('BUFGCTRL_X0Y0', 'O'),
PhysicalPip('CLK_BUFG_BOT_R_X60Y48', 'CLK_BUFG_BUFGCTRL0_O',
'CLK_BUFG_CK_GCLK0'),
PhysicalPip(
'CLK_BUFG_REBUF_X60Y38',
'CLK_BUFG_REBUF_R_CK_GCLK0_TOP',
'CLK_BUFG_REBUF_R_CK_GCLK0_BOT',
forward=False)) + chain_pips('CLK_HROW_BOT_R_X60Y26', (
'CLK_HROW_R_CK_GCLK0', 'CLK_HROW_CK_MUX_OUT_L2',
'CLK_HROW_CK_HCLK_OUT_L2', 'CLK_HROW_CK_BUFHCLK_L2')) + (
PhysicalPip('HCLK_R_X12Y26', 'HCLK_CK_BUFHCLK2',
'HCLK_LEAF_CLK_B_BOT4'),
PhysicalPip('INT_R_X3Y12', 'GCLK_B4', 'GCLK_B4_WEST'),
PhysicalPip('INT_L_X2Y12', 'GCLK_L_B4', 'CLK_L0'),
PhysicalPip('CLBLL_L_X2Y12', 'CLBLL_CLK0', 'CLBLL_L_CLK'),
PhysicalSitePin('SLICE_X1Y12', 'CLK'),
PhysicalBelPin('SLICE_X1Y12', 'CLK', 'CLK'),
PhysicalSitePip('SLICE_X1Y12', 'CLKINV', 'CLK'),
PhysicalBelPin('SLICE_X1Y12', 'AFF', 'CK'),
))
phys_netlist.add_physical_net(
net_name='clk_buf', sources=[clk_buf_root], stubs=[])
const0 = chain_branches((
PhysicalBelPin('SLICE_X1Y12', 'SRUSEDGND', '0'),
PhysicalSitePip('SLICE_X1Y12', 'SRUSEDMUX', '0'),
PhysicalBelPin('SLICE_X1Y12', 'AFF', 'SR'),
))
phys_netlist.add_physical_net(
net_name='GLOBAL_LOGIC0',
sources=[
const0,
],
stubs=[],
net_type=PhysicalNetType.Gnd)
const1 = chain_branches((
PhysicalBelPin('SLICE_X1Y12', 'CEUSEDVCC', '1'),
PhysicalSitePip('SLICE_X1Y12', 'CEUSEDMUX', '1'),
PhysicalBelPin('SLICE_X1Y12', 'AFF', 'CE'),
))
phys_netlist.add_physical_net(
net_name='GLOBAL_LOGIC1',
sources=[const1],
stubs=[],
net_type=PhysicalNetType.Vcc)
phys_netlist.add_physical_cell(
cell_name='ibuf', cell_type=PhysicalCellType.Port)
phys_netlist.add_physical_cell(
cell_name='obuf', cell_type=PhysicalCellType.Port)
return phys_netlist
def example_xdc():
return """\
set_property IOSTANDARD LVCMOS33 [get_ports]
"""
def main():
parser = argparse.ArgumentParser(
description=
"Create an example netlist, suitable for use with Vivado 2019.2")
parser.add_argument('--schema_dir', required=True)
parser.add_argument('--logical_netlist', required=True)
parser.add_argument('--physical_netlist', required=True)
parser.add_argument('--xdc', required=True)
args = parser.parse_args()
interchange = Interchange(args.schema_dir)
logical_netlist = example_logical_netlist()
logical_netlist_capnp = logical_netlist.convert_to_capnp(interchange)
phys_netlist = example_physical_netlist()
phys_netlist_capnp = phys_netlist.convert_to_capnp(interchange)
with open(args.logical_netlist, 'wb') as f:
write_capnp_file(logical_netlist_capnp, f)
with open(args.physical_netlist, 'wb') as f:
write_capnp_file(phys_netlist_capnp, f)
with open(args.xdc, 'w') as f:
f.write(example_xdc())
if __name__ == "__main__":
main()
| SymbiFlow/python-fpga-interchange | tests/example_netlist.py | Python | isc | 13,854 |
import os
# Django settings for bettercachedemo project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'demo.db', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
CACHE_BACKEND = 'locmem:///'
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL that handles the static files served from STATICFILES_ROOT.
# Example: "http://static.lawrence.com/", "http://example.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin media -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# A list of locations of additional static files
STATICFILES_DIRS = ()
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 't6nup$dkukc(@+p*a!l)^2pfa7%&&ant@660iulg&lapx9u+z2'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'bettercachedemo.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(os.path.dirname(__file__), 'templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'bettercache',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request':{
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
BETTERCACHE_LOCAL_POSTCHECK = 60
| ironfroggy/django-better-cache | bettercachedemo/settings.py | Python | mit | 4,983 |
import json
import logging
import re
from streamlink.plugin import Plugin, pluginmatcher
from streamlink.stream.hls import HLSStream
log = logging.getLogger(__name__)
@pluginmatcher(re.compile(
r"https?://news\.now\.com/home/live"
))
class NowNews(Plugin):
epg_re = re.compile(r'''epg.getEPG\("(\d+)"\);''')
api_url = "https://hkt-mobile-api.nowtv.now.com/09/1/getLiveURL"
backup_332_api = "https://d7lz7jwg8uwgn.cloudfront.net/apps_resource/news/live.json"
backup_332_stream = "https://d3i3yn6xwv1jpw.cloudfront.net/live/now332/playlist.m3u8"
def _get_streams(self):
res = self.session.http.get(self.url)
m = self.epg_re.search(res.text)
channel_id = m and m.group(1)
if channel_id:
log.debug("Channel ID: {0}".format(channel_id))
if channel_id == "332":
# there is a special backup stream for channel 332
bk_res = self.session.http.get(self.backup_332_api)
bk_data = self.session.http.json(bk_res)
if bk_data and bk_data["backup"]:
log.info("Using backup stream for channel 332")
return HLSStream.parse_variant_playlist(self.session, self.backup_332_stream)
api_res = self.session.http.post(self.api_url,
headers={"Content-Type": 'application/json'},
data=json.dumps(dict(channelno=channel_id,
mode="prod",
audioCode="",
format="HLS",
callerReferenceNo="20140702122500")))
data = self.session.http.json(api_res)
for stream_url in data.get("asset", {}).get("hls", {}).get("adaptive", []):
return HLSStream.parse_variant_playlist(self.session, stream_url)
__plugin__ = NowNews
| amurzeau/streamlink-debian | src/streamlink/plugins/nownews.py | Python | bsd-2-clause | 2,076 |
# -*- coding: utf-8 -*-
#
# Django Extra Views documentation build configuration file, created by
# sphinx-quickstart on Sun Jan 6 03:11:50 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import re
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ["sphinx.ext.autodoc", "sphinx.ext.intersphinx"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"Django Extra Views"
copyright = u"2013, Andrew Ingram"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
with open("../extra_views/__init__.py", "rb") as f:
# The full version, including alpha/beta/rc tags.
release = str(re.search('__version__ = "(.+?)"', f.read().decode("utf-8")).group(1))
# The short X.Y version.
version = release.rpartition(".")[0]
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
if on_rtd:
html_theme = "default"
else:
html_theme = "nature"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "DjangoExtraViewsdoc"
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
(
"index",
"DjangoExtraViews.tex",
u"Django Extra Views Documentation",
u"Andrew Ingram",
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
"index",
"djangoextraviews",
u"Django Extra Views Documentation",
[u"Andrew Ingram"],
1,
)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"DjangoExtraViews",
u"Django Extra Views Documentation",
u"Andrew Ingram",
"DjangoExtraViews",
"One line description of project.",
"Miscellaneous",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {"http://docs.python.org/": None}
| AndrewIngram/django-extra-views | docs/conf.py | Python | mit | 8,483 |
# Copyright (c) 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class AbstractVersionHandler(object):
@abc.abstractmethod
def get_node_processes(self):
return
@abc.abstractmethod
def get_plugin_configs(self):
return
@abc.abstractmethod
def configure_cluster(self, cluster):
return
@abc.abstractmethod
def start_cluster(self, cluster):
return
@abc.abstractmethod
def validate(self, cluster):
return
@abc.abstractmethod
def scale_cluster(self, cluster, instances):
return
@abc.abstractmethod
def decommission_nodes(self, cluster, instances):
return
@abc.abstractmethod
def validate_scaling(self, cluster, existing, additional):
return
@abc.abstractmethod
def get_edp_engine(self, cluster, job_type):
return
def get_edp_job_types(self):
return []
def get_edp_config_hints(self, job_type):
return {}
@abc.abstractmethod
def get_open_ports(self, node_group):
return
def on_terminate_cluster(self, cluster):
pass
| henaras/sahara | sahara/plugins/vanilla/abstractversionhandler.py | Python | apache-2.0 | 1,693 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Format management.
Creating new formats
--------------------
A new format named 'foo-bar' corresponds to Python module
'tracetool/format/foo_bar.py'.
A format module should provide a docstring, whose first non-empty line will be
considered its short description.
All formats must generate their contents through the 'tracetool.out' routine.
Format functions
----------------
All the following functions are optional, and no output will be generated if
they do not exist.
======== =======================================================================
Function Description
======== =======================================================================
begin Called to generate the format-specific file header.
end Called to generate the format-specific file footer.
nop Called to generate the per-event contents when the event is disabled or
the selected backend is 'nop'.
======== =======================================================================
"""
__author__ = "Lluís Vilanova <[email protected]>"
__copyright__ = "Copyright 2012, Lluís Vilanova <[email protected]>"
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "[email protected]"
import pkgutil
import tracetool
def get_list():
"""Get a list of (name, description) pairs."""
res = []
for _, modname, _ in pkgutil.iter_modules(tracetool.format.__path__):
module = tracetool.try_import("tracetool.format." + modname)
# just in case; should never fail unless non-module files are put there
if not module[0]:
continue
module = module[1]
doc = module.__doc__
if doc is None:
doc = ""
doc = doc.strip().split("\n")[0]
name = modname.replace("_", "-")
res.append((name, doc))
return res
def exists(name):
"""Return whether the given format exists."""
if len(name) == 0:
return False
name = name.replace("-", "_")
return tracetool.try_import("tracetool.format." + name)[1]
def _empty(events):
pass
def generate_begin(name, events):
"""Generate the header of the format-specific file."""
if not exists(name):
raise ValueError("unknown format: %s" % name)
name = name.replace("-", "_")
func = tracetool.try_import("tracetool.format." + name,
"begin", _empty)[1]
func(events)
def generate_end(name, events):
"""Generate the footer of the format-specific file."""
if not exists(name):
raise ValueError("unknown format: %s" % name)
name = name.replace("-", "_")
func = tracetool.try_import("tracetool.format." + name,
"end", _empty)[1]
func(events)
| yingted/qemu | scripts/tracetool/format/__init__.py | Python | gpl-2.0 | 2,859 |
from yapsy.IPlugin import IPlugin
from scanner.util import check_call
class FallbackScsv(IPlugin):
def description(self):
return "TLS_FALLBACK_SCSV"
def check(self, url, host_info):
host_info[self.description()] = None
if host_info["ssl checked"].get():
return None
else:
output = check_call([
"openssl", "s_client", "-connect", "{}:443".format(url),
"-fallback_scsv", "-no_tls1_2",
])
if "alert inappropriate fallback" in output:
host_info[self.description()] = True
return True
host_info[self.description()] = False
return False
| kotnik/scanner | scanner/checks/fallback_scsv.py | Python | mit | 713 |
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Embeds standalone JavaScript snippets in C++ code.
The script requires the OverridesView file from WebKit that lists the known
mobile devices to be passed in as the only argument. The list of known devices
will be written to a C-style string to be parsed with JSONReader.
"""
import optparse
import os
import sys
import cpp_source
def main():
parser = optparse.OptionParser()
parser.add_option(
'', '--directory', type='string', default='.',
help='Path to directory where the cc/h files should be created')
options, args = parser.parse_args()
devices = '['
file_name = args[0]
inside_list = False
with open(file_name, 'r') as f:
for line in f:
if not inside_list:
if 'WebInspector.OverridesUI._phones = [' in line or \
'WebInspector.OverridesUI._tablets = [' in line:
inside_list = True
else:
if line.strip() == '];':
inside_list = False
continue
devices += line.strip()
devices += ']'
cpp_source.WriteSource('mobile_device_list',
'chrome/test/chromedriver/chrome',
options.directory, {'kMobileDevices': devices})
if __name__ == '__main__':
sys.exit(main())
| s20121035/rk3288_android5.1_repo | external/chromium_org/chrome/test/chromedriver/embed_mobile_devices_in_cpp.py | Python | gpl-3.0 | 1,418 |
import sys
try:
import pep8
except ImportError:
pep8 = None
def pep8_run(args, paths, config_file=None, stream=None):
'''Programmatically run ``pep8``.
Returns a 2-elements tuple with a string message and an exit code.
'''
args.remove('--pep8')
if pep8:
stream = stream or sys.stderr
stream.write('Running pep8\n')
pep8style = pep8.StyleGuide(paths=paths, config_file=config_file)
options = pep8style.options
report = pep8style.check_files()
if options.statistics:
report.print_statistics()
if options.benchmark:
report.print_benchmark()
if options.testsuite and not options.quiet:
report.print_results()
if report.total_errors:
msg = str(report.total_errors) + '\n' if options.count else ''
return msg, 1
return 'OK', 0
return 'pep8 not installed', 1
| Ghost-script/dyno-chat | kickchat/apps/pulsar/apps/test/pep.py | Python | gpl-2.0 | 928 |
# -*- coding: utf-8 -*-
# © 2015 Grupo ESOC Ingeniería de Servicios, S.L. - Jairo Llopis.
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
"""Test default values for models."""
from openerp.tests.common import TransactionCase
from .base import MailInstalled
class PersonCase(TransactionCase):
"""Test ``res.partner`` when it is a person."""
context = {"default_is_company": False}
model = "res.partner"
def setUp(self):
super(PersonCase, self).setUp()
self.good_values = {
"firstname": u"Núñez",
"lastname": u"Fernán",
}
self.good_values["name"] = "%s %s" % (self.good_values["lastname"],
self.good_values["firstname"])
if "default_is_company" in self.context:
self.good_values["is_company"] = self.context["default_is_company"]
self.values = self.good_values.copy()
def tearDown(self):
self.record = (self.env[self.model]
.with_context(self.context)
.create(self.values))
for key, value in self.good_values.iteritems():
self.assertEqual(
self.record[key],
value,
"Checking key %s" % key)
super(PersonCase, self).tearDown()
def test_no_name(self):
"""Name is calculated."""
del self.values["name"]
def test_wrong_name_value(self):
"""Wrong name value is ignored, name is calculated."""
self.values["name"] = u"BÄD"
def test_wrong_name_context(self):
"""Wrong name context is ignored, name is calculated."""
del self.values["name"]
self.context["default_name"] = u"BÄD"
def test_wrong_name_value_and_context(self):
"""Wrong name value and context is ignored, name is calculated."""
self.values["name"] = u"BÄD1"
self.context["default_name"] = u"BÄD2"
class CompanyCase(PersonCase):
"""Test ``res.partner`` when it is a company."""
context = {"default_is_company": True}
def setUp(self):
super(CompanyCase, self).setUp()
self.good_values.update(lastname=self.values["name"], firstname=False)
self.values = self.good_values.copy()
class UserCase(PersonCase, MailInstalled):
"""Test ``res.users``."""
model = "res.users"
context = {"default_login": "[email protected]"}
def tearDown(self):
# Cannot create users if ``mail`` is installed
if self.mail_installed():
# Skip tests
super(PersonCase, self).tearDown()
else:
# Run tests
super(UserCase, self).tearDown()
| Antiun/partner-contact | partner_firstname/tests/test_create.py | Python | agpl-3.0 | 2,704 |
import argparse
import gzip
import io
import os
import subprocess
from collections import Counter
import progressbar
def convert_and_filter_topk(args):
""" Convert to lowercase, count word occurrences and save top-k words to a file """
counter = Counter()
data_lower = os.path.join(args.output_dir, "lower.txt.gz")
print("\nConverting to lowercase and counting word occurrences ...")
with io.TextIOWrapper(
io.BufferedWriter(gzip.open(data_lower, "w+")), encoding="utf-8"
) as file_out:
# Open the input file either from input.txt or input.txt.gz
_, file_extension = os.path.splitext(args.input_txt)
if file_extension == ".gz":
file_in = io.TextIOWrapper(
io.BufferedReader(gzip.open(args.input_txt)), encoding="utf-8"
)
else:
file_in = open(args.input_txt, encoding="utf-8")
for line in progressbar.progressbar(file_in):
line_lower = line.lower()
counter.update(line_lower.split())
file_out.write(line_lower)
file_in.close()
# Save top-k words
print("\nSaving top {} words ...".format(args.top_k))
top_counter = counter.most_common(args.top_k)
vocab_str = "\n".join(word for word, count in top_counter)
vocab_path = "vocab-{}.txt".format(args.top_k)
vocab_path = os.path.join(args.output_dir, vocab_path)
with open(vocab_path, "w+") as file:
file.write(vocab_str)
print("\nCalculating word statistics ...")
total_words = sum(counter.values())
print(" Your text file has {} words in total".format(total_words))
print(" It has {} unique words".format(len(counter)))
top_words_sum = sum(count for word, count in top_counter)
word_fraction = (top_words_sum / total_words) * 100
print(
" Your top-{} words are {:.4f} percent of all words".format(
args.top_k, word_fraction
)
)
print(' Your most common word "{}" occurred {} times'.format(*top_counter[0]))
last_word, last_count = top_counter[-1]
print(
' The least common word in your top-k is "{}" with {} times'.format(
last_word, last_count
)
)
for i, (w, c) in enumerate(reversed(top_counter)):
if c > last_count:
print(
' The first word with {} occurrences is "{}" at place {}'.format(
c, w, len(top_counter) - 1 - i
)
)
break
return data_lower, vocab_str
def build_lm(args, data_lower, vocab_str):
print("\nCreating ARPA file ...")
lm_path = os.path.join(args.output_dir, "lm.arpa")
subargs = [
os.path.join(args.kenlm_bins, "lmplz"),
"--order",
str(args.arpa_order),
"--temp_prefix",
args.output_dir,
"--memory",
args.max_arpa_memory,
"--text",
data_lower,
"--arpa",
lm_path,
"--prune",
*args.arpa_prune.split("|"),
]
if args.discount_fallback:
subargs += ["--discount_fallback"]
subprocess.check_call(subargs)
# Filter LM using vocabulary of top-k words
print("\nFiltering ARPA file using vocabulary of top-k words ...")
filtered_path = os.path.join(args.output_dir, "lm_filtered.arpa")
subprocess.run(
[
os.path.join(args.kenlm_bins, "filter"),
"single",
"model:{}".format(lm_path),
filtered_path,
],
input=vocab_str.encode("utf-8"),
check=True,
)
# Quantize and produce trie binary.
print("\nBuilding lm.binary ...")
binary_path = os.path.join(args.output_dir, "lm.binary")
subprocess.check_call(
[
os.path.join(args.kenlm_bins, "build_binary"),
"-a",
str(args.binary_a_bits),
"-q",
str(args.binary_q_bits),
"-v",
args.binary_type,
filtered_path,
binary_path,
]
)
def main():
parser = argparse.ArgumentParser(
description="Generate lm.binary and top-k vocab for DeepSpeech."
)
parser.add_argument(
"--input_txt",
help="Path to a file.txt or file.txt.gz with sample sentences",
type=str,
required=True,
)
parser.add_argument(
"--output_dir", help="Directory path for the output", type=str, required=True
)
parser.add_argument(
"--top_k",
help="Use top_k most frequent words for the vocab.txt file. These will be used to filter the ARPA file.",
type=int,
required=True,
)
parser.add_argument(
"--kenlm_bins",
help="File path to the KENLM binaries lmplz, filter and build_binary",
type=str,
required=True,
)
parser.add_argument(
"--arpa_order",
help="Order of k-grams in ARPA-file generation",
type=int,
required=True,
)
parser.add_argument(
"--max_arpa_memory",
help="Maximum allowed memory usage for ARPA-file generation",
type=str,
required=True,
)
parser.add_argument(
"--arpa_prune",
help="ARPA pruning parameters. Separate values with '|'",
type=str,
required=True,
)
parser.add_argument(
"--binary_a_bits",
help="Build binary quantization value a in bits",
type=int,
required=True,
)
parser.add_argument(
"--binary_q_bits",
help="Build binary quantization value q in bits",
type=int,
required=True,
)
parser.add_argument(
"--binary_type",
help="Build binary data structure type",
type=str,
required=True,
)
parser.add_argument(
"--discount_fallback",
help="To try when such message is returned by kenlm: 'Could not calculate Kneser-Ney discounts [...] rerun with --discount_fallback'",
action="store_true",
)
args = parser.parse_args()
data_lower, vocab_str = convert_and_filter_topk(args)
build_lm(args, data_lower, vocab_str)
# Delete intermediate files
os.remove(os.path.join(args.output_dir, "lower.txt.gz"))
os.remove(os.path.join(args.output_dir, "lm.arpa"))
os.remove(os.path.join(args.output_dir, "lm_filtered.arpa"))
if __name__ == "__main__":
main()
| googleinterns/deepspeech-reconstruction | data/lm/generate_lm.py | Python | apache-2.0 | 6,452 |
from deterministic_encryption_utils.encryption.EncryptionExtensions import FileSaltProvider, FilenameSaltProvider
from deterministic_encryption_utils.encryption.extensions import * # Necessary for finding subclasses
class ExtensionRegistry(object):
def __init__(self):
self.fileSaltProviders = ExtensionRegistry.__getProviders(FileSaltProvider)
self.filenameSaltProviders = ExtensionRegistry.__getProviders(FilenameSaltProvider)
def getFileSaltProvider(self, identifier):
if identifier not in self.fileSaltProviders.keys():
raise ValueError("The given identifier is invalid.")
return self.fileSaltProviders.get(identifier)
def getFileSaltProviderIds(self):
return self.fileSaltProviders.keys()
def getFilenameSaltProvider(self, identifier):
if identifier not in self.filenameSaltProviders.keys():
raise ValueError("The given identifier is invalid.")
return self.filenameSaltProviders.get(identifier)
def getFilenameSaltProviderIds(self):
return self.filenameSaltProviders.keys()
@staticmethod
def __getProviders(cls):
providers = dict()
for c in ExtensionRegistry.__findAllSubclasses(cls):
provider = c()
providers[provider.getId()] = provider
return providers
@staticmethod
def __findAllSubclasses(cls):
subclasses = set()
for s in cls.__subclasses__():
subclasses.add(s)
for g in ExtensionRegistry.__findAllSubclasses(s):
subclasses.add(g)
return subclasses
| seiferma/deterministic_encryption_utils | deterministic_encryption_utils/encryption/extensions/ExtensionRegistry.py | Python | mit | 1,633 |
# Copyright (C) 2013,2014,2015,2016 The ESPResSo project
# Copyright (C) 2012 Olaf Lenz
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This script generates the files featureconfig.h and featureconfig.c.
#
from __future__ import print_function
import time, string
import inspect, sys, os
# find featuredefs.py
moduledir = os.path.dirname(inspect.getfile(inspect.currentframe()))
sys.path.append(os.path.join(moduledir, '..'))
import featuredefs
if len(sys.argv) != 4:
print("Usage: {} DEFFILE HPPFILE CPPFILE".format(sys.argv[0]), file=sys.stderr)
exit(2)
deffilename, hfilename, cfilename = sys.argv[1:5]
print("Reading definitions from " + deffilename + "...")
defs = featuredefs.defs(deffilename)
print("Done.")
print("Writing " + hfilename + "...")
hfile = open(hfilename, 'w');
hfile.write("""/*
WARNING: This file was autogenerated by
%s on %s
Do not modify it or your changes will be overwritten!
Modify features.def instead.
*/
#ifndef _FEATURECONFIG_HPP
#define _FEATURECONFIG_HPP
""" % (sys.argv[0], time.asctime()))
# handle implications
hfile.write('/* Handle implications */')
implication_template = string.Template("""
// $feature implies $implied
#if defined($feature) && !defined($implied)
#define $implied
#endif
""")
for feature, implied in defs.implications:
hfile.write(implication_template.substitute(feature=feature, implied=implied))
# output warnings if internal features are set manually
hfile.write('/* Warn when derived switches are specified manually */')
derivation_template = string.Template("""
// $feature equals $expr
#ifdef $feature
#warning $feature is a derived switch and should not be set manually!
#elif $cppexpr
#define $feature
#endif
""")
for feature, expr, cppexpr in defs.derivations:
hfile.write(derivation_template.substitute(feature=feature, cppexpr=cppexpr, expr=expr))
# write footer
# define external FEATURES and NUM_FEATURES
hfile.write("""
extern const char* FEATURES[];
extern const int NUM_FEATURES;
#endif /* of _FEATURECONFIG_HPP */""")
hfile.close()
print("Done.")
print("Writing " + cfilename + "...")
cfile = open(cfilename, 'w');
# handle requirements
cfile.write("""/*
WARNING: This file was autogenerated by
{script}
on
{date}
Do not modify it or your changes will be overwritten!
Modify features.def instead.
*/
/* config.hpp includes config-features.hpp and myconfig.hpp */
#include "config.hpp"
""".format(script=sys.argv[0], date=time.asctime()))
cfile.write('/* Handle requirements */')
requirement_string = """
// {feature} requires {expr}
#if defined({feature}) && !({cppexpr})
#error Feature {feature} requires {expr}
#endif
"""
for feature, expr, cppexpr in defs.requirements:
cfile.write(
requirement_string.format(
feature=feature, cppexpr=cppexpr, expr=expr))
cfile.write("""
/* Feature list */
const char* FEATURES[] = {
""")
feature_string = """
#ifdef {feature}
"{feature}",
#endif
"""
for feature in defs.externals.union(defs.features, defs.derived):
cfile.write(feature_string.format(feature=feature))
cfile.write("""
};
const int NUM_FEATURES = sizeof(FEATURES)/sizeof(char*);
""");
cfile.close()
print("Done.")
| Marcello-Sega/espresso | src/core/gen_featureconfig.py | Python | gpl-3.0 | 3,818 |
import socket
import threading
import struct
import logging
import string
from SocketServer import UDPServer, ThreadingMixIn, BaseRequestHandler
from core.responder.fingerprinter.RAPLANMANPackets import *
mitmf_logger = logging.getLogger("mitmf")
class LANFingerprinter():
def start(self, options):
global args; args = options #For now a quick hack to make argparse's namespace object available to all
try:
mitmf_logger.debug("[LANFingerprinter] online")
server = ThreadingUDPServer(("0.0.0.0", 138), Browser)
t = threading.Thread(name="LANFingerprinter", target=server.serve_forever)
t.setDaemon(True)
t.start()
except Exception, e:
mitmf_logger.error("[LANFingerprinter] Error starting on port 138: {}:".format(e))
class ThreadingUDPServer(ThreadingMixIn, UDPServer):
allow_reuse_address = 1
def server_bind(self):
UDPServer.server_bind(self)
class Browser(BaseRequestHandler):
def handle(self):
try:
request, socket = self.request
if args.analyze:
ParseDatagramNBTNames(request,self.client_address[0])
BecomeBackup(request,self.client_address[0])
BecomeBackup(request,self.client_address[0])
except Exception:
pass
def NBT_NS_Role(data):
Role = {
"\x41\x41\x00":"Workstation/Redirector Service.",
"\x42\x4c\x00":"Domain Master Browser. This name is likely a domain controller or a homegroup.)",
"\x42\x4d\x00":"Domain controller service. This name is a domain controller.",
"\x42\x4e\x00":"Local Master Browser.",
"\x42\x4f\x00":"Browser Election Service.",
"\x43\x41\x00":"File Server Service.",
"\x41\x42\x00":"Browser Service.",
}
if data in Role:
return Role[data]
else:
return "Service not known."
def Decode_Name(nbname):
#From http://code.google.com/p/dpkt/ with author's permission.
try:
if len(nbname) != 32:
return nbname
l = []
for i in range(0, 32, 2):
l.append(chr(((ord(nbname[i]) - 0x41) << 4) |
((ord(nbname[i+1]) - 0x41) & 0xf)))
return filter(lambda x: x in string.printable, ''.join(l).split('\x00', 1)[0].replace(' ', ''))
except Exception, e:
mitmf_logger.debug("[LANFingerprinter] Error parsing NetBIOS name: {}".format(e))
return "Illegal NetBIOS name"
def WorkstationFingerPrint(data):
Role = {
"\x04\x00" :"Windows 95",
"\x04\x10" :"Windows 98",
"\x04\x90" :"Windows ME",
"\x05\x00" :"Windows 2000",
"\x05\x00" :"Windows XP",
"\x05\x02" :"Windows 2003",
"\x06\x00" :"Windows Vista/Server 2008",
"\x06\x01" :"Windows 7/Server 2008R2",
}
if data in Role:
return Role[data]
else:
return False
def PrintServerName(data, entries):
if entries == 0:
pass
else:
entrieslen = 26*entries
chunks, chunk_size = len(data[:entrieslen]), entrieslen/entries
ServerName = [data[i:i+chunk_size] for i in range(0, chunks, chunk_size) ]
l =[]
for x in ServerName:
if WorkstationFingerPrint(x[16:18]):
l.append(x[:16].replace('\x00', '')+'| OS:%s'%(WorkstationFingerPrint(x[16:18])))
else:
l.append(x[:16].replace('\x00', ''))
return l
def ParsePacket(Payload):
PayloadOffset = struct.unpack('<H',Payload[51:53])[0]
StatusCode = Payload[PayloadOffset-4:PayloadOffset-2]
if StatusCode == "\x00\x00":
EntriesNum = struct.unpack('<H',Payload[PayloadOffset:PayloadOffset+2])[0]
ParsedNames = PrintServerName(Payload[PayloadOffset+4:], EntriesNum)
return ParsedNames
else:
return None
def RAPThisDomain(Client,Domain):
try:
l =[]
for x in range(1):
PDC = RapFinger(Client,Domain,"\x00\x00\x00\x80")
if PDC is not None:
l.append('[LANFingerprinter]')
l.append('Domain detected on this network:')
for x in PDC:
l.append(' -'+x)
SQL = RapFinger(Client,Domain,"\x04\x00\x00\x00")
if SQL is not None:
l.append('SQL Server detected on Domain {}:'.format(Domain))
for x in SQL:
l.append(' -'+x)
WKST = RapFinger(Client,Domain,"\xff\xff\xff\xff")
if WKST is not None:
l.append('Workstations/Servers detected on Domain {}:'.format(Domain))
for x in WKST:
l.append(' -'+x)
else:
pass
return '\n'.join(l)
except:
pass
def RapFinger(Host,Domain, Type):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((Host,445))
s.settimeout(0.3)
h = SMBHeader(cmd="\x72",mid="\x01\x00")
n = SMBNegoData()
n.calculate()
packet0 = str(h)+str(n)
buffer0 = longueur(packet0)+packet0
s.send(buffer0)
data = s.recv(1024)
##Session Setup AndX Request, Anonymous.
if data[8:10] == "\x72\x00":
head = SMBHeader(cmd="\x73",mid="\x02\x00")
t = SMBSessionData()
t.calculate()
final = t
packet1 = str(head)+str(t)
buffer1 = longueur(packet1)+packet1
s.send(buffer1)
data = s.recv(1024)
##Tree Connect IPC$.
if data[8:10] == "\x73\x00":
head = SMBHeader(cmd="\x75",flag1="\x08", flag2="\x01\x00",uid=data[32:34],mid="\x03\x00")
t = SMBTreeConnectData(Path="\\\\"+Host+"\\IPC$")
t.calculate()
packet1 = str(head)+str(t)
buffer1 = longueur(packet1)+packet1
s.send(buffer1)
data = s.recv(1024)
##Rap ServerEnum.
if data[8:10] == "\x75\x00":
head = SMBHeader(cmd="\x25",flag1="\x08", flag2="\x01\xc8",uid=data[32:34],tid=data[28:30],pid=data[30:32],mid="\x04\x00")
t = SMBTransRAPData(Data=RAPNetServerEnum3Data(ServerType=Type,DetailLevel="\x01\x00",TargetDomain=Domain))
t.calculate()
packet1 = str(head)+str(t)
buffer1 = longueur(packet1)+packet1
s.send(buffer1)
data = s.recv(64736)
##Rap ServerEnum, Get answer and return what we're looking for.
if data[8:10] == "\x25\x00":
s.close()
return ParsePacket(data)
except:
return None
def BecomeBackup(data, Client):
try:
DataOffset = struct.unpack('<H',data[139:141])[0]
BrowserPacket = data[82+DataOffset:]
if BrowserPacket[0] == "\x0b":
ServerName = BrowserPacket[1:]
Domain = Decode_Name(data[49:81])
Name = Decode_Name(data[15:47])
Role = NBT_NS_Role(data[45:48])
if args.analyze:
Message1=RAPThisDomain(Client,Domain)
mitmf_logger.warning(Message1)
mitmf_logger.warning("[LANFingerprinter] Datagram Request from {} | Hostname: {} via the {} wants to become a Local Master Browser Backup on this domain: {}.".format(Client, Name,Role,Domain))
except:
pass
try:
Domain = Decode_Name(data[49:81])
Name = Decode_Name(data[15:47])
Role1 = NBT_NS_Role(data[45:48])
Role2 = NBT_NS_Role(data[79:82])
if Role2 == "Domain controller service. This name is a domain controller." or Role2 == "Browser Election Service." or Role2 == "Local Master Browser.":
if args.analyze:
Message1=RAPThisDomain(Client,Domain)
mitmf_logger.warning(Message1)
mitmf_logger.warning('[LANFingerprinter] Datagram Request from: {} | Hostname: {} via the {} to {} | Service: {}'.format(Client, Name, Role1, Domain, Role2))
except:
pass
def ParseDatagramNBTNames(data,Client):
try:
Domain = Decode_Name(data[49:81])
Name = Decode_Name(data[15:47])
Role1 = NBT_NS_Role(data[45:48])
Role2 = NBT_NS_Role(data[79:82])
if Role2 == "Domain controller service. This name is a domain controller." or Role2 == "Browser Election Service." or Role2 == "Local Master Browser.":
if args.analyze:
Message1=RAPThisDomain(Client,Domain)
mitmf_logger.warning(Message1)
mitmf_logger.warning('[LANFingerprinter] Datagram Request from: {} | Hostname: {} via the {} to {} | Service: {}'.format(Client, Name, Role1, Domain, Role2))
except:
pass | lucap91/mitmf | core/responder/fingerprinter/LANFingerprinter.py | Python | gpl-3.0 | 8,947 |
_is_python2 = False
try:
""" Running on python 3.x """
from configparser import ConfigParser
except ImportError:
""" Running on python 2.6+ """
import ConfigParser
_is_python2 = True
if _is_python2:
config = ConfigParser.SafeConfigParser()
else:
config = ConfigParser()
def get_value(filepath, section, option):
"""Gets the value from the configuration file
:param section: string with section containing the value
:param option: string with option containing the value
:return: a string with the wanted value :raise ConfigOptionError: if invalid section/option
"""
config.read(filepath)
if config.has_section(section) and config.has_option(section, option):
if _is_python2:
value = config.get(section, option)
else:
value = config[section][option]
return value
else:
raise KeyError("Error reading configuration option '[{0}] [{1}]'.".format(section, option))
| aalves/voomon | voomon/utils/config/myconfig.py | Python | gpl-2.0 | 984 |
__author__ = 'Brendan'
#!/usr/bin/env python
from gimpfu import *
import random
import sys
from BasicOperations import *
import traceback
def Generative() :
try:
img,layer=initalize(2000,2000)
finalize(img)
except: #if program fails, print traceback information to gimp error console
tb=traceback.format_exc()
pdb.gimp_message(tb)
register(
"python_fu_GenerativeScript",
"GenerativeScript",
"GenerativeScript",
"Brendan Degrafft",
"Brendan Degrafft",
"2016",
"Generative",
"", # Create a new image, don't work on an existing one
[],
[],
Generative, menu="<Image>/Generative")
main()
| bdegrafft/GGIMPLib | new.py | Python | gpl-3.0 | 677 |
#!/usr/bin/env python
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from __future__ import absolute_import
import os
import subprocess
import sys
import os.path as p
import glob
PY_MAJOR, PY_MINOR = sys.version_info[ 0 : 2 ]
if not ( ( PY_MAJOR == 2 and PY_MINOR >= 6 ) or
( PY_MAJOR == 3 and PY_MINOR >= 3 ) or
PY_MAJOR > 3 ):
sys.exit( 'YouCompleteMe requires Python >= 2.6 or >= 3.3; '
'your version of Python is ' + sys.version )
DIR_OF_THIS_SCRIPT = p.dirname( p.abspath( __file__ ) )
DIR_OF_OLD_LIBS = p.join( DIR_OF_THIS_SCRIPT, 'python' )
def CheckCall( args, **kwargs ):
try:
subprocess.check_call( args, **kwargs )
except subprocess.CalledProcessError as error:
sys.exit( error.returncode )
def Main():
build_file = p.join( DIR_OF_THIS_SCRIPT, 'third_party', 'ycmd', 'build.py' )
if not p.isfile( build_file ):
sys.exit(
'File {0} does not exist; you probably forgot to run:\n'
'\tgit submodule update --init --recursive\n'.format( build_file ) )
CheckCall( [ sys.executable, build_file ] + sys.argv[ 1: ] )
# Remove old YCM libs if present so that YCM can start.
old_libs = (
glob.glob( p.join( DIR_OF_OLD_LIBS, '*ycm_core.*' ) ) +
glob.glob( p.join( DIR_OF_OLD_LIBS, '*ycm_client_support.*' ) ) +
glob.glob( p.join( DIR_OF_OLD_LIBS, '*clang*.*') ) )
for lib in old_libs:
os.remove( lib )
if __name__ == "__main__":
Main()
| sunchuanleihit/vimrc | sources_non_forked/YouCompleteMe/install.py | Python | mit | 1,500 |
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2016-2018 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import subprocess
import fixtures
import testscenarios
from testtools.matchers import Equals, FileExists, Not
from tests import integration
class GodepsPluginTestCase(testscenarios.WithScenarios,
integration.TestCase):
scenarios = [
('no GOBIN', dict(set_gobin=False)),
('with GOBIN', dict(set_gobin=True)),
]
def _assert_bcrypt_output(self, *, binary):
hash_command = [binary, 'hash', '10', 'password']
output = subprocess.check_output(hash_command)
check_hash_command = [binary, 'check', output, 'password']
output = subprocess.check_output(check_hash_command)
self.assertThat(output.decode('UTF-8').strip(' \n'), Equals('Equal'))
def test_stage(self):
if self.set_gobin:
gobin = 'gobin'
self.useFixture(fixtures.EnvironmentVariable('GOBIN', gobin))
self.run_snapcraft('stage', 'godeps')
binary = os.path.join(self.stage_dir, 'bin', 'bcrypt')
self.assertThat(binary, FileExists())
self._assert_bcrypt_output(binary=binary)
def test_stage_with_go_packages(self):
if self.set_gobin:
gobin = 'gobin'
self.useFixture(fixtures.EnvironmentVariable('GOBIN', gobin))
self.run_snapcraft('stage', 'godeps-with-go-packages')
binary = os.path.join(self.stage_dir, 'bin', 'only-main')
self.assertThat(binary, FileExists())
self.assertThat(
os.path.join(self.stage_dir, 'bin', 'bcrypt'),
Not(FileExists()))
self._assert_bcrypt_output(binary=binary)
| elopio/snapcraft | tests/integration/plugins/test_godeps_plugin.py | Python | gpl-3.0 | 2,298 |
#!/usr/bin/env python
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Generate and send Ethernet packets to specified interfaces.
# Collect data from interfaces.
# Analyse dumps for packets with special cookie in UDP payload.
#
import argparse
import itertools
import json
import logging
import os
import re
import shutil
import signal
import socket
import subprocess
import sys
import time
import traceback
import logging.handlers
from scapy import config as scapy_config
scapy_config.logLevel = 40
scapy_config.use_pcap = True
import scapy.all as scapy
from scapy.utils import rdpcap
class ActorFabric(object):
@classmethod
def getInstance(cls, config):
if config.get('action') not in ('listen', 'generate'):
raise Exception(
'Wrong config, you need define '
'valid action instead of {0}'.format(config.get('action')))
if config['action'] in ('listen',):
return Listener(config)
elif config['action'] in ('generate',):
return Sender(config)
class ActorException(Exception):
def __init__(self, logger, message='', level='error'):
getattr(logger, level, logger.error)(message)
super(ActorException, self).__init__(message)
class Actor(object):
def __init__(self, config=None):
self.config = {
'src_mac': None,
'src': '198.18.1.1',
'dst': '198.18.1.2',
'sport': 31337,
'dport': 31337,
'cookie': "Nailgun:",
'pcap_dir': "/var/run/pcap_dir/",
'duration': 5,
'repeat': 1
}
if config:
self.config.update(config)
self.logger.debug("Running with config: %s", json.dumps(self.config))
self._execute(["modprobe", "8021q"])
self.iface_down_after = {}
self.viface_remove_after = {}
def _define_logger(self, filename=None,
appname='netprobe', level=logging.DEBUG):
logger = logging.getLogger(appname)
logger.setLevel(level)
syslog_formatter = logging.Formatter(
'{appname}: %(message)s'.format(appname=appname)
)
syslog_handler = logging.handlers.SysLogHandler('/dev/log')
syslog_handler.setFormatter(syslog_formatter)
logger.addHandler(syslog_handler)
# A syslog handler should be always. But a file handler is the option.
# If you don't want it you can keep 'filename' variable as None to skip
# this handler.
if filename:
file_formatter = logging.Formatter(
'%(asctime)s %(levelname)s %(name)s %(message)s'
)
file_handler = logging.FileHandler(filename)
file_handler.setFormatter(file_formatter)
logger.addHandler(file_handler)
return logger
def _execute(self, command, expected_exit_codes=(0,)):
self.logger.debug("Running command: %s" % " ".join(command))
env = os.environ
env["PATH"] = "/bin:/usr/bin:/sbin:/usr/sbin"
p = subprocess.Popen(command, shell=False,
env=env, stdout=subprocess.PIPE)
output, _ = p.communicate()
if p.returncode not in expected_exit_codes:
raise ActorException(
self.logger,
"Command exited with error: %s: %s" % (" ".join(command),
p.returncode)
)
return output.split('\n')
def _viface_by_iface_vid(self, iface, vid):
return (self._try_viface_create(iface, vid) or "%s.%d" % (iface, vid))
def _iface_name(self, iface, vid=None):
if vid:
return self._viface_by_iface_vid(iface, vid)
return iface
def _look_for_link(self, iface, vid=None):
viface = None
if vid:
viface = self._viface_by_iface_vid(iface, vid)
command = ['ip', 'link']
r = re.compile(ur"(\d+?):\s+((?P<viface>[^:@]+)@)?(?P<iface>[^:]+?):"
".+?(?P<state>UP|DOWN|UNKNOWN).*$")
for line in self._execute(command):
m = r.search(line)
if m:
md = m.groupdict()
if (iface == md.get('iface') and
viface == md.get('viface') and md.get('state')):
return (iface, viface, md.get('state'))
# If we are here we aren't able to say if iface with vid is up
raise ActorException(
self.logger,
"Cannot find interface %s with vid=%s" % (iface, vid)
)
def _try_iface_up(self, iface, vid=None):
if vid and not self._try_viface_create(iface, vid):
# if viface does not exist we raise exception
raise ActorException(
self.logger,
"Vlan %s on interface %s does not exist" % (str(vid), iface)
)
self.logger.debug("Checking if interface %s with vid %s is up",
iface, str(vid))
_, _, state = self._look_for_link(iface, vid)
return (state == 'UP')
def _iface_up(self, iface, vid=None):
"""Brings interface with vid up
"""
if vid and not self._try_viface_create(iface, vid):
# if viface does not exist we raise exception
raise ActorException(
self.logger,
"Vlan %s on interface %s does not exist" % (str(vid), iface)
)
set_iface = self._iface_name(iface, vid)
self.logger.debug("Brining interface %s with vid %s up",
set_iface, str(vid))
self._execute([
"ip",
"link", "set",
"dev", set_iface,
"up"])
def _ensure_iface_up(self, iface, vid=None):
"""Ensures interface is with vid up.
"""
if not self._try_iface_up(iface, vid):
# if iface is not up we try to bring it up
self._iface_up(iface, vid)
if self._try_iface_up(iface, vid):
# if iface was down and we have brought it up
# we should mark it to be brought down after probing
self.iface_down_after[self._iface_name(iface, vid)] = True
else:
# if viface is still down we raise exception
raise ActorException(
self.logger,
"Can not bring interface %s with vid %s up" % (iface,
str(vid))
)
def _ensure_iface_down(self, iface, vid=None):
set_iface = self._iface_name(iface, vid)
if self.iface_down_after.get(set_iface, False):
# if iface with vid have been marked to be brought down
# after probing we try to bring it down
self.logger.debug("Brining down interface %s with vid %s",
iface, str(vid))
self._execute([
"ip",
"link", "set",
"dev", set_iface,
"down"])
self.iface_down_after.pop(set_iface)
def _try_viface_create(self, iface, vid):
"""Tries to find vlan interface on iface with VLAN_ID=vid and return it
:returns: name of vlan interface if it exists or None
"""
self.logger.debug("Checking if vlan %s on interface %s exists",
str(vid), iface)
with open("/proc/net/vlan/config", "r") as f:
for line in f:
m = re.search(ur'(.+?)\s+\|\s+(.+?)\s+\|\s+(.+?)\s*$', line)
if m and m.group(2) == str(vid) and m.group(3) == iface:
return m.group(1)
def _viface_create(self, iface, vid):
"""Creates VLAN interface with VLAN_ID=vid on interface iface
:returns: None
"""
self.logger.debug("Creating vlan %s on interface %s", str(vid), iface)
self._execute([
"ip",
"link", "add",
"link", iface,
"name", self._viface_by_iface_vid(iface, vid),
"type", "vlan",
"id", str(vid)])
def _ensure_viface_create(self, iface, vid):
"""Ensures that vlan interface exists. If it does not already
exist, then we need it to be created. It also marks newly created
vlan interface to remove it after probing procedure.
"""
if not self._try_viface_create(iface, vid):
# if viface does not exist we try to create it
self._viface_create(iface, vid)
if self._try_viface_create(iface, vid):
# if viface had not existed and have been created
# we mark it to be removed after probing procedure
self.viface_remove_after[
self._viface_by_iface_vid(iface, vid)
] = True
else:
# if viface had not existed and still does not
# we raise exception
raise ActorException(
self.logger,
"Can not create vlan %d on interface %s" % (vid, iface)
)
def _ensure_viface_remove(self, iface, vid):
viface = self._viface_by_iface_vid(iface, vid)
if self.viface_remove_after.get(viface, False):
# if viface have been marked to be removed after probing
# we try to remove it
self.logger.debug("Removing vlan %s on interface %s",
str(vid), iface)
self._execute([
"ip",
"link", "del",
"dev", viface])
self.viface_remove_after.pop(viface)
def _parse_vlan_list(self, vlan_string):
self.logger.debug("Parsing vlan list: %s", vlan_string)
validate = lambda x: (x >= 0) and (x < 4095)
chunks = vlan_string.split(",")
vlan_list = []
for chunk in chunks:
delim = chunk.find("-")
try:
if delim > 0:
left = int(chunk[:delim])
right = int(chunk[delim + 1:])
if validate(left) and validate(right):
vlan_list.extend(xrange(left, right + 1))
else:
raise ValueError
else:
vlan = int(chunk)
if validate(vlan):
vlan_list.append(vlan)
else:
raise ValueError
except ValueError:
raise ActorException(self.logger, "Incorrect vlan: %s" % chunk)
self.logger.debug("Parsed vlans: %s", str(vlan_list))
return vlan_list
def _ensure_viface_create_and_up(self, iface, vid):
self._ensure_viface_create(iface, vid)
self._ensure_iface_up(iface, vid)
def _ensure_viface_down_and_remove(self, iface, vid):
self._ensure_iface_down(iface, vid)
self._ensure_viface_remove(iface, vid)
def _iface_vlan_iterator(self):
for iface, vlan_list in self.config['interfaces'].iteritems():
# Variables iface and vlan_list are getted from decoded JSON
# and json.dump convert all string data to Python unicode string.
# We use these variables in logging messages later.
# CentOS 6.4 uses Python 2.6 and logging module 0.5.0.5 which has
# a bug with converting unicode strings to message in
# SysLogHandler. So we need to convert all unicode to plain
# strings to avoid syslog message corruption.
for vlan in self._parse_vlan_list(str(vlan_list)):
yield (str(iface), vlan)
def _iface_iterator(self):
for iface in self.config['interfaces']:
yield iface
def _log_ifaces(self, prefix="Current interfaces"):
self.logger.debug("%s: ", prefix)
for line in self._execute(['ip', 'address']):
self.logger.debug(line.rstrip())
class Sender(Actor):
def __init__(self, config=None):
self.logger = self._define_logger('/var/log/netprobe_sender.log',
'netprobe_sender')
super(Sender, self).__init__(config)
self.logger.info("=== Starting Sender ===")
self._log_ifaces("Interfaces just before sending probing packages")
def run(self):
try:
self._run()
except Exception as e:
self.logger.error("An internal error occured: %s\n%s", str(e),
traceback.format_exc())
def _get_iface_mac(self, iface):
path = '/sys/class/net/{iface}/address'.format(iface=iface)
with open(path, 'r') as address:
return address.read().strip('\n')
def _run(self):
for iface, vlan in self._iface_vlan_iterator():
self._ensure_iface_up(iface)
self._send_packets()
self._log_ifaces("Interfaces just after sending probing packages")
for iface in self._iface_iterator():
self._ensure_iface_down(iface)
self._log_ifaces("Interfaces just after ensuring them down in sender")
self.logger.info("=== Sender Finished ===")
def _send_packets(self):
start_time = time.time()
for iface, vlan in itertools.cycle(self._iface_vlan_iterator()):
self.logger.debug("Sending packets: iface=%s vlan=%s",
iface, str(vlan))
for _ in xrange(self.config['repeat']):
self._sendp(iface, vlan)
if time.time() - start_time >= self.config['duration']:
break
def _sendp(self, iface, vlan):
try:
data = str(''.join((self.config['cookie'], iface, ' ',
self.config['uid'])))
p = scapy.Ether(src=self._get_iface_mac(iface),
dst="ff:ff:ff:ff:ff:ff")
if vlan > 0:
p = p / scapy.Dot1Q(vlan=vlan)
p = p / scapy.IP(src=self.config['src'], dst=self.config['dst'])
p = p / scapy.UDP(sport=self.config['sport'],
dport=self.config['dport']) / data
scapy.sendp(p, iface=iface)
except socket.error as e:
self.logger.error("Socket error: %s, %s", e, iface)
class Listener(Actor):
def __init__(self, config=None):
self.logger = self._define_logger('/var/log/netprobe_listener.log',
'netprobe_listener')
super(Listener, self).__init__(config)
self.logger.info("=== Starting Listener ===")
self._log_ifaces("Interfaces just before starting listerning "
"for probing packages")
self.pidfile = self.addpid('/var/run/net_probe')
self.neighbours = {}
self._define_pcap_dir()
def addpid(self, piddir):
pid = os.getpid()
if not os.path.exists(piddir):
os.mkdir(piddir)
pidfile = os.path.join(piddir, str(pid))
with open(pidfile, 'w') as fo:
fo.write('')
return pidfile
def _define_pcap_dir(self):
if os.path.exists(self.config['pcap_dir']):
shutil.rmtree(self.config['pcap_dir'])
os.mkdir(self.config['pcap_dir'])
def run(self):
try:
self._run()
except Exception as e:
self.logger.error("An internal error occured: %s\n%s", str(e),
traceback.format_exc())
def _run(self):
sniffers = set()
listeners = []
for iface in self._iface_iterator():
self._ensure_iface_up(iface)
if iface not in sniffers:
listeners.append(self.get_probe_frames(iface))
listeners.append(self.get_probe_frames(iface, vlan=True))
sniffers.add(iface)
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self.config.get('ready_address', 'locahost'),
self.config.get('ready_port', 31338)))
except socket.error as e:
self.logger.error("Socket error: %s", e)
else:
self.logger.debug("Listener threads have been launched. "
"Reporting READY.")
msg = "READY"
total_sent = 0
while total_sent < len(msg):
sent = s.send(msg[total_sent:])
if sent == 0:
raise ActorException(
self.logger,
"Socket broken. Cannot send %s status." % msg
)
total_sent += sent
s.shutdown(socket.SHUT_RDWR)
s.close()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
self.logger.debug("Interruption signal catched")
except SystemExit:
self.logger.debug("TERM signal catched")
for listener in listeners:
# terminate and flush pipes
listener.terminate()
out, err = listener.communicate()
if err and listener.returncode:
self.logger.error('Listerner: %s', err)
elif err:
self.logger.warning('Listener: %s', err)
self.logger.debug('Start reading dumped information.')
self.read_packets()
self._log_ifaces("Interfaces just before ensuring interfaces down")
for iface in self._iface_iterator():
self._ensure_iface_down(iface)
self._log_ifaces(
"Interfaces just after ensuring them down in listener")
with open(self.config['dump_file'], 'w') as fo:
fo.write(json.dumps(self.neighbours))
os.unlink(self.pidfile)
self.logger.info("=== Listener Finished ===")
def read_packets(self):
for iface in self._iface_iterator():
filenames = ['{0}.pcap'.format(iface),
'vlan_{0}.pcap'.format(iface)]
for filename in filenames:
self.read_pcap_file(iface, filename)
def read_pcap_file(self, iface, filename):
try:
pcap_file = os.path.join(self.config['pcap_dir'], filename)
for pkt in rdpcap(pcap_file):
self.fprn(pkt, iface)
except Exception:
self.logger.exception('Cant read pcap file %s', pcap_file)
def fprn(self, p, iface):
if scapy.Dot1Q in p:
vlan = p[scapy.Dot1Q].vlan
else:
vlan = 0
self.logger.debug("Catched packet: vlan=%s len=%s payload=%s",
str(vlan), p[scapy.UDP].len, p[scapy.UDP].payload)
received_msg, _ = p[scapy.UDP].extract_padding(p[scapy.UDP].load)
decoded_msg = received_msg.decode()
riface, uid = decoded_msg[len(self.config["cookie"]):].split(' ', 1)
self.neighbours[iface].setdefault(vlan, {})
if riface not in self.neighbours[iface][vlan].setdefault(uid, []):
self.neighbours[iface][vlan][uid].append(riface)
def get_probe_frames(self, iface, vlan=False):
if iface not in self.neighbours:
self.neighbours[iface] = {}
filter_string = 'udp and dst port {0}'.format(self.config['dport'])
filename = '{0}.pcap'.format(iface)
if vlan:
filter_string = '{0} {1}'.format('vlan and', filter_string)
filename = '{0}_{1}'.format('vlan', filename)
pcap_file = os.path.join(self.config['pcap_dir'], filename)
return subprocess.Popen(
['tcpdump', '-i', iface, '-w', pcap_file, filter_string],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# -------------- main ---------------
def define_parser():
config_examples = """
Config file examples:
Capture frames config file example is:
{"action": "listen", "interfaces": {"eth0": "1-4094"},
"dump_file": "/var/tmp/net-probe-dump-eth0"}
Simple frame generation config file example is:
{"action": "generate", "uid": "aaa-bb-cccccc",
"interfaces": { "eth0": "1-4094"}}
Full frame generation config file example is:
{ "action": "generate",
"uid": "aaa-bb-cccccc", "cookie": "Some cookie",
"src_mac": "11:22:33:44:55:66",
"src": "10.0.0.1", "dst": "10.255.255.255",
"sport": 4056, "dport": 4057,
"interfaces": {
"eth0": "10, 15, 20, 201-210, 301-310, 1000-2000",
"eth1": "1-4094"
}
}
"""
parser = argparse.ArgumentParser(epilog=config_examples)
parser.add_argument(
'-c', '--config', dest='config', action='store', type=str,
help='config file', default=None
)
return parser
def define_subparsers(parser):
subparsers = parser.add_subparsers(
dest="action", help='actions'
)
listen_parser = subparsers.add_parser(
'listen', help='listen for probe packets'
)
listen_parser.add_argument(
'-i', '--interface', dest='interface', action='store', type=str,
help='interface to listen on', required=True
)
listen_parser.add_argument(
'-v', '--vlans', dest='vlan_list', action='store', type=str,
help='vlan list to send tagged packets ("100,200-300")', required=True
)
listen_parser.add_argument(
'-k', '--cookie', dest='cookie', action='store', type=str,
help='cookie string to insert into probe packets payload',
default='Nailgun:'
)
listen_parser.add_argument(
'-o', '--file', dest='dump_file', action='store', type=str,
help='file to dump captured packets', default=None
)
listen_parser.add_argument(
'-a', '--address', dest='ready_address', action='store', type=str,
help='address to report listener ready state', default='localhost'
)
listen_parser.add_argument(
'-p', '--port', dest='ready_port', action='store', type=int,
help='port to report listener ready state', default=31338
)
generate_parser = subparsers.add_parser(
'generate', help='generate and send probe packets'
)
generate_parser.add_argument(
'-i', '--interface', dest='interface', action='store', type=str,
help='interface to send packets from', required=True
)
generate_parser.add_argument(
'-v', '--vlans', dest='vlan_list', action='store', type=str,
help='vlan list to send tagged packets ("100,200-300")', required=True
)
generate_parser.add_argument(
'-k', '--cookie', dest='cookie', action='store', type=str,
help='cookie string to insert into probe packets payload',
default='Nailgun:'
)
generate_parser.add_argument(
'-u', '--uid', dest='uid', action='store', type=str,
help='uid to insert into probe packets payload', default='1'
)
generate_parser.add_argument(
'-d', '--duration', dest='duration', type=int, default=5,
help='Amount of time to generate network packets. In seconds',
)
generate_parser.add_argument(
'-r', '--repeat', dest='repeat', type=int, default=1,
help='Amount of packets sended in one iteration.',
)
def term_handler(signum, sigframe):
sys.exit()
def main():
signal.signal(signal.SIGTERM, term_handler)
parser = define_parser()
params, other_params = parser.parse_known_args()
config = {}
if params.config:
# if config file is set then we discard all other
# command line parameters
try:
if params.config == '-':
fo = sys.stdin
else:
fo = open(params.config, 'r')
config = json.load(fo)
fo.close()
except IOError:
print("Can not read config file %s" % params.config)
exit(1)
except ValueError as e:
print("Can not parse config file: %s" % str(e))
exit(1)
else:
define_subparsers(parser)
params, other_params = parser.parse_known_args()
if params.action == 'listen':
config['action'] = 'listen'
config['interfaces'] = {}
config['interfaces'][params.interface] = params.vlan_list
config['cookie'] = params.cookie
config['ready_address'] = params.ready_address
config['ready_port'] = params.ready_port
if params.dump_file:
config['dump_file'] = params.dump_file
else:
config['dump_file'] = "/var/tmp/net-probe-dump-%s" %\
config['interface']
elif params.action == 'generate':
config['action'] = 'generate'
config['interfaces'] = {}
config['interfaces'][params.interface] = params.vlan_list
config['uid'] = params.uid
config['cookie'] = params.cookie
config['duration'] = params.duration
config['repeat'] = params.repeat
actor = ActorFabric.getInstance(config)
actor.run()
if __name__ == "__main__":
main()
| zhaochao/fuel-web | network_checker/network_checker/net_check/api.py | Python | apache-2.0 | 25,801 |
"""CGI/FastCGI handler for Mapnik OGC WMS Server.
Requires 'jon' module.
"""
from os import environ
from tempfile import gettempdir
environ['PYTHON_EGG_CACHE'] = gettempdir()
import sys
from jon import cgi
from ogcserver.common import Version
from ogcserver.configparser import SafeConfigParser
from ogcserver.wms111 import ExceptionHandler as ExceptionHandler111
from ogcserver.wms130 import ExceptionHandler as ExceptionHandler130
from ogcserver.exceptions import OGCException, ServerConfigurationError
class Handler(cgi.DebugHandler):
def __init__(self, home_html=None):
conf = SafeConfigParser()
conf.readfp(open(self.configpath))
# TODO - be able to supply in config as well
self.home_html = home_html
self.conf = conf
if not conf.has_option_with_value('server', 'module'):
raise ServerConfigurationError('The factory module is not defined in the configuration file.')
try:
mapfactorymodule = __import__(conf.get('server', 'module'))
except ImportError:
raise ServerConfigurationError('The factory module could not be loaded.')
if hasattr(mapfactorymodule, 'WMSFactory'):
self.mapfactory = getattr(mapfactorymodule, 'WMSFactory')()
else:
raise ServerConfigurationError('The factory module does not have a WMSFactory class.')
if conf.has_option('server', 'debug'):
self.debug = int(conf.get('server', 'debug'))
else:
self.debug = 0
def process(self, req):
base = False
if not req.params:
base = True
reqparams = lowerparams(req.params)
if self.conf.has_option_with_value('service', 'baseurl'):
onlineresource = '%s' % self.conf.get('service', 'baseurl')
else:
# if there is no baseurl in the config file try to guess a valid one
onlineresource = 'http://%s%s?' % (req.environ['HTTP_HOST'], req.environ['SCRIPT_NAME'])
try:
if not reqparams.has_key('request'):
raise OGCException('Missing request parameter.')
request = reqparams['request']
del reqparams['request']
if request == 'GetCapabilities' and not reqparams.has_key('service'):
raise OGCException('Missing service parameter.')
if request in ['GetMap', 'GetFeatureInfo']:
service = 'WMS'
else:
service = reqparams['service']
if reqparams.has_key('service'):
del reqparams['service']
try:
ogcserver = __import__('ogcserver.' + service)
except:
raise OGCException('Unsupported service "%s".' % service)
ServiceHandlerFactory = getattr(ogcserver, service).ServiceHandlerFactory
servicehandler = ServiceHandlerFactory(self.conf, self.mapfactory, onlineresource, reqparams.get('version', None))
if reqparams.has_key('version'):
del reqparams['version']
if request not in servicehandler.SERVICE_PARAMS.keys():
raise OGCException('Operation "%s" not supported.' % request, 'OperationNotSupported')
ogcparams = servicehandler.processParameters(request, reqparams)
try:
requesthandler = getattr(servicehandler, request)
except:
raise OGCException('Operation "%s" not supported.' % request, 'OperationNotSupported')
# stick the user agent in the request params
# so that we can add ugly hacks for specific buggy clients
ogcparams['HTTP_USER_AGENT'] = req.environ['HTTP_USER_AGENT']
response = requesthandler(ogcparams)
except:
version = reqparams.get('version', None)
if not version:
version = Version()
else:
version = Version(version)
if version >= '1.3.0':
eh = ExceptionHandler130(self.debug,base,self.home_html)
else:
eh = ExceptionHandler111(self.debug,base,self.home_html)
response = eh.getresponse(reqparams)
req.set_header('Content-Type', response.content_type)
req.set_header('Content-Length', str(len(response.content)))
req.write(response.content)
def traceback(self, req):
reqparams = lowerparams(req.params)
version = reqparams.get('version', None)
if not version:
version = Version()
else:
version = Version(version)
if version >= '1.3.0':
eh = ExceptionHandler130(self.debug)
else:
eh = ExceptionHandler111(self.debug)
response = eh.getresponse(reqparams)
req.set_header('Content-Type', response.content_type)
req.set_header('Content-Length', str(len(response.content)))
req.write(response.content)
def lowerparams(params):
reqparams = {}
for key, value in params.items():
reqparams[key.lower()] = value
return reqparams | pbabik/OGCServer | ogcserver/cgiserver.py | Python | bsd-3-clause | 5,124 |
# mopitwail.py 0.1
# piratesec.de
#!/usr/bin/python
import subprocess
import datetime
import time
from time import sleep
import os
import sys
import random
# Mail-Imports
import smtplib
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
# Tweet-Imports
import tweepy
import tweetpony
# GPIO-Import
import RPi.GPIO as GPIO
import time
# --------- Config ---------
GPIO.setmode(GPIO.BCM)
GPIO_PIR = 7
GPIO.setup(GPIO_PIR,GPIO.IN)
curr = 0
prev = 0
api = tweetpony.API(consumer_key = "XXX", consumer_secret = "XXX", access_token = "XXX", access_token_secret = "XXX")
tweettext = "Intruder detected"
# --------- Funktionen ---------
def takepic():
print "taking pic..."
grab_cam = subprocess.Popen("sudo fswebcam -r 1920x1080 -d /dev/video0 -q /home/pi/projects/tweepic/pictures/%d.%m.%Y-%H:%M.jpg", shell=True)
grab_cam.wait()
# prep the pic.
todays_date = datetime.datetime.today()
image_name = todays_date.strftime('%d.%m.%Y-%H:%M')
image_path = '/home/pi/projects/tweepic/pictures/' + image_name + '.jpg'
pic = image_path
print "pic taken."
return todays_date, image_path, pic
def tweeter(todays_date, image_path):
print "tweeting pic and this text: %s" % tweettext
try:
api.update_status_with_media(status = (tweettext, todays_date.strftime('%d.%m.%Y-%H:%M')), media= image_path)
print "getweetet."
except tweetpony.APIError as err:
print "Twitter error #%i: %s" % (err.code, err.description)
# del_img = subprocess.Popen("sudo rm -rf " + image_path, shell=True)
# del_img.wait()
time.sleep(1)
def mailer(pic):
print "mailing pic to: XXX"
sender = "XXX"
empfaenger = "XXX"
s = smtplib.SMTP('XXX')
s.starttls()
s.ehlo()
s.login('XXX', 'XXX')
msg = MIMEMultipart('alternative')
msg['Subject'] = 'Intruder detected'
msg['From'] = "XXX"
msg['To'] = empfaenger
text = "Pic of intruder"
fp = open(pic, 'rb')
img = MIMEImage(fp.read())
fp.close()
msg.attach(img)
msg['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S +0000",time.gmtime())
msg['User-Agent'] = "PyLucid (Python v2.7)"
s.sendmail(sender, empfaenger, msg.as_string())
print "pic mailed."
# --------- Main ---------
try:
while GPIO.input(GPIO_PIR)==1:
curr = 0
print "Scanning..."
while True :
curr = GPIO.input(GPIO_PIR)
# MOTION DETECTED
if curr==1 and prev==0:
print "Motion detected! taking 3 pics"
count = 0
while count < 3:
todays_date, image_path, pic = takepic() # take pic
tweeter(todays_date, image_path) # tweet
mailer(pic) # mail
count += 1
time.sleep(15)
# Record previous state
prev=1
elif curr==0 and prev==1:
prev=0
print "Scanning..."
time.sleep(0.01)
except KeyboardInterrupt:
print " Quit"
GPIO.cleanup()
| nullx31/mopitwail.py | mopitwail.py | Python | mit | 3,087 |
from pyautomate import trackers, hash_
states = ''
def get_initial_state():
trackers['last compiled'] = lambda: hash_('file.f')
print(trackers['last compiled'].has_changed)
return 'state'
| timdiels/pyautomate | test/texttest/stateless_automaton/filesystem_changed/file/has_changed/auto.py | Python | gpl-3.0 | 203 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Example of the EnumerateInstances operation. The execution
arguments may be either internal defaults if less than the full set of
arguments is supplied on the command line or the supplied command
line arguments.
The command line arguments are:
- server_url: scheme, server name, (optional) port
- username
- password
- namespace
- classname
This simple example allows both http and https
requests but does not allow verification of server cert or
mutual authentication
This example demonstrates executing the connection,
operation request, displaying results and handling exceptions.
"""
from __future__ import print_function
import sys
from pywbem import WBEMConnection, Error, CIMError
# default connection attributes. Used if not all arguments are
# supplied on the command line.
USERNAME = 'blah'
PASSWORD = 'blah'
TEST_CLASS = 'CIM_ComputerSystem'
TEST_NAMESPACE = 'root/cimv2'
SERVER_URL = 'http://localhost'
def execute_request(server_url, creds, namespace, classname):
""" Open a connection with the server_url and creds, and
enumerate instances defined by the functions namespace and
classname arguments.
Displays either the error return or the mof for instances
returned.
"""
print('Requesting url=%s, ns=%s, class=%s' % \
(server_url, namespace, classname))
try:
# Create a connection
CONN = WBEMConnection(server_url, creds,
default_namespace=namespace,
no_verification=True)
#Issue the request to EnumerateInstances on the defined class
INSTANCES = CONN.EnumerateInstances(classname)
#Display of characteristics of the result object
print('instances type=%s len=%s' % (type(INSTANCES),
len(INSTANCES)))
#display the mof output
for inst in INSTANCES:
print('path=%s\n' % inst.path)
print(inst.tomof())
# handle any exception
except Error as err:
# If CIMError, display CIMError attributes
if isinstance(err, CIMError):
print('Operation Failed: CIMError: code=%s, Description=%s' % \
(err.status_code_name, err.status_description))
else:
print ("Operation failed: %s" % err)
sys.exit(1)
def main():
""" Get arguments and call the execution function"""
if len(sys.argv) < 6:
print("Usage: %s server_url username password namespace' \
' classname" % sys.argv[0])
print('Using internal defaults')
server_url = SERVER_URL
namespace = TEST_NAMESPACE
username = USERNAME
password = PASSWORD
classname = TEST_CLASS
else:
print('Get from input')
server_url = sys.argv[1]
namespace = sys.argv[2]
username = sys.argv[3]
password = sys.argv[4]
classname = sys.argv[5]
# create the credentials tuple for WBEMConnection
creds = (username, password)
# call the method to execute the request and display results
execute_request(server_url, creds, namespace, classname)
return 0
if __name__ == '__main__':
sys.exit(main())
| zenoss/pywbem | examples/enuminstances.py | Python | lgpl-2.1 | 3,334 |
# Generated by Django 1.11.4 on 2017-08-31 00:13
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("zerver", "0102_convert_muted_topic"),
]
operations = [
migrations.RemoveField(
model_name="userprofile",
name="muted_topics",
),
]
| rht/zulip | zerver/migrations/0103_remove_userprofile_muted_topics.py | Python | apache-2.0 | 339 |
# missileExplosiveDmgBonus
#
# Used by:
# Skills named like: Missiles (5 of 7)
# Skill: Rockets
# Skill: Torpedoes
type = "passive"
def handler(fit, skill, context):
fit.modules.filteredChargeBoost(lambda mod: mod.charge.requiresSkill(skill),
"explosiveDamage", skill.getModifiedItemAttr("damageMultiplierBonus") * skill.level)
| Ebag333/Pyfa | eos/effects/missileexplosivedmgbonus.py | Python | gpl-3.0 | 370 |
#! /usr/bin/python
'''
This program is a demo on how to generate 'packet too
big packet' and IPv6 fragmented packets.
This program can also be used as NUT side of v6LC.4.1.4.
'''
from __future__ import print_function
import sys
import getopt
import time
from scapy.all import *
def main():
opts, _ = getopt.getopt(sys.argv[1:], "l:r:", ["local", "remote"])
for opt, arg in opts:
if opt in ("-r", "--remote"):
remote_host = arg
elif opt in ("-l", "--local"):
local_host = arg
print("local: %s and remote: %s." % (local_host, remote_host))
pmtu = 1280
# 1 - send/recv echo req/reply
payload = "A" * 64
base = IPv6(src=local_host, dst=remote_host)
extension=ICMPv6EchoRequest(data=payload)
packet = base/extension
echo_reply = sr1(packet)
if echo_reply == None:
print("no echo reply is available")
sys.exit(0)
elif not echo_reply.haslayer(IPv6):
echo_reply.show()
sys.exit(0);
echo_reply.show()
payload = "A" * (pmtu - 40 - 8)
base = IPv6(src=local_host, dst=remote_host)
extension=ICMPv6EchoRequest(data=payload)
packet = base/extension
echo_reply = sr1(packet)
if echo_reply == None:
print("no echo reply is available")
sys.exit(0)
elif not echo_reply.haslayer(IPv6):
echo_reply.show()
sys.exit(0);
echo_reply.show()
# 2 - send 'packet too big' packet
time.sleep(1/20);
echo_reply_c = echo_reply.copy()
payload=raw(echo_reply_c)
base = IPv6(src=local_host, dst=remote_host)
extension=ICMPv6PacketTooBig(mtu=pmtu)
packet=base/extension/payload[:1184]
send(packet)
# 3 - send fragmented packets
# sleep 0.5s to let new mtu takes effect on remote
time.sleep(1/2)
packets = fragment6(base / IPv6ExtHdrFragment() / ICMPv6EchoRequest(data='A'*1452), pmtu)
send(packets)
if __name__ == "__main__":
main()
| jijianwen/Learn | Linux/network/scapy/pkt_too_big.py | Python | gpl-2.0 | 1,950 |
"""The syslog component."""
| aronsky/home-assistant | homeassistant/components/syslog/__init__.py | Python | apache-2.0 | 28 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('truco', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Perfiles',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('telefono', models.IntegerField()),
('usuario', models.OneToOneField(to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
migrations.DeleteModel(
name='Login',
),
]
| emmanuel-santos/GEM | old/truco/migrations/0002_auto_20140910_1306.py | Python | mit | 833 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2008-2019 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.com/license.html.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/.
import os.path
import sys
from trac.admin import IAdminCommandProvider, IAdminPanelProvider
from trac.api import IEnvironmentSetupParticipant
from trac.config import ListOption
from trac.core import *
from trac.perm import IPermissionRequestor
from trac.util import as_bool, is_path_below
from trac.util.html import tag
from trac.util.text import breakable_path, normalize_whitespace, print_table, \
printerr, printout
from trac.util.translation import _, ngettext, tag_
from trac.versioncontrol import DbRepositoryProvider, InvalidRepository, \
NoSuchChangeset, RepositoryManager, is_default
from trac.versioncontrol.cache import CachedRepository
from trac.web.chrome import Chrome, add_notice, add_warning
class VersionControlAdmin(Component):
"""trac-admin command provider for version control administration."""
implements(IAdminCommandProvider, IEnvironmentSetupParticipant,
IPermissionRequestor)
# IAdminCommandProvider methods
def get_admin_commands(self):
yield ('changeset added', '<repos> <rev> [rev] [...]',
"""Notify trac about changesets added to a repository
This command should be called from a post-commit hook. It will
trigger a cache update and notify components about the addition.
""",
self._complete_repos, self._do_changeset_added)
yield ('changeset modified', '<repos> <rev> [rev] [...]',
"""Notify trac about changesets modified in a repository
This command should be called from a post-revprop hook after
revision properties like the commit message, author or date
have been changed. It will trigger a cache update for the given
revisions and notify components about the change.
""",
self._complete_repos, self._do_changeset_modified)
yield ('repository list', '',
'List source repositories',
None, self._do_list)
yield ('repository resync', '<repos> [rev]',
"""Re-synchronize trac with repositories
When [rev] is specified, only that revision is synchronized.
Otherwise, the complete revision history is synchronized. Note
that this operation can take a long time to complete.
If synchronization gets interrupted, it can be resumed later
using the `sync` command.
<repos> must be the repository name, not the repository path.
Use `list` to see a list of repository names and associated
paths. To synchronize all repositories, specify "*" for
<repos>. The default repository can be specified
using "(default)".
""",
self._complete_repos, self._do_resync)
yield ('repository sync', '<repos> [rev]',
"""Resume synchronization of repositories
It works like `resync`, except that it doesn't clear the already
synchronized changesets, so it's a better way to resume an
interrupted `resync`.
See `resync` help for detailed usage.
""",
self._complete_repos, self._do_sync)
def get_reponames(self):
rm = RepositoryManager(self.env)
return [reponame or '(default)'
for reponame in rm.get_all_repositories()]
def _complete_repos(self, args):
if len(args) == 1:
return self.get_reponames()
def _do_changeset_added(self, reponame, first_rev, *revs):
if is_default(reponame):
reponame = ''
rm = RepositoryManager(self.env)
errors = rm.notify('changeset_added', reponame, (first_rev,) + revs)
for error in errors:
printerr(error)
return 2 if errors else 0
def _do_changeset_modified(self, reponame, first_rev, *revs):
if is_default(reponame):
reponame = ''
rm = RepositoryManager(self.env)
errors = rm.notify('changeset_modified', reponame, (first_rev,) + revs)
for error in errors:
printerr(error)
return 2 if errors else 0
def _do_list(self):
rm = RepositoryManager(self.env)
values = []
for reponame, info in sorted(rm.get_all_repositories().iteritems()):
alias = ''
if 'alias' in info:
alias = info['alias'] or '(default)'
values.append((reponame or '(default)', info.get('type', ''),
alias, info.get('dir', '')))
print_table(values, [_('Name'), _('Type'), _('Alias'), _('Directory')])
def _sync(self, reponame, rev, clean):
rm = RepositoryManager(self.env)
if reponame == '*':
if rev is not None:
raise TracError(_('Cannot synchronize a single revision '
'on multiple repositories'))
repositories = rm.get_real_repositories()
else:
if is_default(reponame):
reponame = ''
repos = rm.get_repository(reponame)
if repos is None:
raise TracError(_("Repository \"%(repo)s\" doesn't exist",
repo=reponame or '(default)'))
repositories = [repos]
for repos in sorted(repositories, key=lambda r: r.reponame):
pretty_name = repos.reponame or '(default)'
if not isinstance(repos, CachedRepository):
printout(_("%(reponame)s is not a cached repository.",
reponame=pretty_name))
elif rev is not None:
repos.sync_changeset(rev)
printout(_('%(rev)s resynced on %(reponame)s.', rev=rev,
reponame=pretty_name))
else:
printout(_('Resyncing repository history for %(reponame)s... ',
reponame=pretty_name))
repos.sync(self._sync_feedback, clean=clean)
for cnt, in self.env.db_query(
"SELECT count(rev) FROM revision WHERE repos=%s",
(repos.id,)):
printout(ngettext('%(num)s revision cached.',
'%(num)s revisions cached.', num=cnt))
printout(_('Done.'))
def _sync_feedback(self, rev):
sys.stdout.write(' [%s]\r' % rev)
sys.stdout.flush()
def _do_resync(self, reponame, rev=None):
self._sync(reponame, rev, clean=True)
def _do_sync(self, reponame, rev=None):
self._sync(reponame, rev, clean=False)
# IEnvironmentSetupParticipant methods
def environment_created(self):
"""Index the repositories."""
for repos in RepositoryManager(self.env).get_real_repositories():
pretty_name = repos.reponame or '(default)'
printout(_(" Indexing '%(name)s' repository", name=pretty_name))
try:
repos.sync(self._sync_feedback)
except TracError:
printerr(_("""
---------------------------------------------------------------------
Warning: couldn't index '%(pretty_name)s' repository.
This can happen for a variety of reasons: wrong repository type,
no appropriate third party library for this repository type,
no repository at the specified repository path...
You can nevertheless start using your Trac environment, but you'll
need to check your `%(name)s.type` and `%(name)s.dir` option values
in the [repositories] section of your trac.ini file.
""", pretty_name=pretty_name, name=repos.reponame or ''))
else:
# Erase to end of line.
sys.stdout.write('\033[K')
sys.stdout.flush()
def environment_needs_upgrade(self):
pass
def upgrade_environment(self):
pass
# IPermissionRequestor methods
def get_permission_actions(self):
return [('VERSIONCONTROL_ADMIN', ['BROWSER_VIEW', 'CHANGESET_VIEW',
'FILE_VIEW', 'LOG_VIEW'])]
class RepositoryAdminPanel(Component):
"""Web admin panel for repository administration."""
implements(IAdminPanelProvider)
allowed_repository_dir_prefixes = ListOption('versioncontrol',
'allowed_repository_dir_prefixes', '',
doc="""Comma-separated list of allowed prefixes for repository
directories when adding and editing repositories in the repository
admin panel. If the list is empty, all repository directories are
allowed.
""")
# IAdminPanelProvider methods
def get_admin_panels(self, req):
types = RepositoryManager(self.env).get_supported_types()
if types and 'VERSIONCONTROL_ADMIN' \
in req.perm('admin', 'versioncontrol/repository'):
yield ('versioncontrol', _('Version Control'), 'repository',
_('Repositories'))
def render_admin_panel(self, req, category, page, path_info):
# Retrieve info for all repositories
rm = RepositoryManager(self.env)
all_repos = rm.get_all_repositories()
db_provider = self.env[DbRepositoryProvider]
if path_info:
# Detail view
reponame = path_info if not is_default(path_info) else ''
info = all_repos.get(reponame)
if info is None:
raise TracError(_("Repository '%(repo)s' not found",
repo=path_info))
if req.method == 'POST':
if req.args.get('cancel'):
req.redirect(req.href.admin(category, page))
elif db_provider and req.args.get('save'):
# Modify repository
changes = {}
valid = True
for field in db_provider.repository_attrs:
value = normalize_whitespace(req.args.get(field))
if (value is not None
or field in ('hidden', 'sync_per_request')) \
and value != info.get(field):
changes[field] = value
if 'dir' in changes and not \
self._check_dir(req, changes['dir']):
valid = False
if valid and changes:
db_provider.modify_repository(reponame, changes)
add_notice(req, _('Your changes have been saved.'))
name = req.args.get('name')
pretty_name = name or '(default)'
resync = tag.code('trac-admin "%s" repository resync '
'"%s"' % (self.env.path, pretty_name))
if 'dir' in changes:
msg = tag_('You should now run %(resync)s to '
'synchronize Trac with the repository.',
resync=resync)
add_notice(req, msg)
elif 'type' in changes:
msg = tag_('You may have to run %(resync)s to '
'synchronize Trac with the repository.',
resync=resync)
add_notice(req, msg)
if name and name != path_info and 'alias' not in info:
cset_added = tag.code('trac-admin "%s" changeset '
'added "%s" $REV'
% (self.env.path,
pretty_name))
msg = tag_('You will need to update your '
'post-commit hook to call '
'%(cset_added)s with the new '
'repository name.',
cset_added=cset_added)
add_notice(req, msg)
if valid:
req.redirect(req.href.admin(category, page))
chrome = Chrome(self.env)
chrome.add_wiki_toolbars(req)
chrome.add_auto_preview(req)
data = {'view': 'detail', 'reponame': reponame}
else:
# List view
if req.method == 'POST':
# Add a repository
if db_provider and req.args.get('add_repos'):
name = req.args.get('name')
pretty_name = name or '(default)'
if name in all_repos:
raise TracError(_('The repository "%(name)s" already '
'exists.', name=pretty_name))
type_ = req.args.get('type')
# Avoid errors when copy/pasting paths
dir = normalize_whitespace(req.args.get('dir', ''))
if name is None or type_ is None or not dir:
add_warning(req, _('Missing arguments to add a '
'repository.'))
elif self._check_dir(req, dir):
db_provider.add_repository(name, dir, type_)
add_notice(req, _('The repository "%(name)s" has been '
'added.', name=pretty_name))
resync = tag.code('trac-admin "%s" repository resync '
'"%s"' % (self.env.path, pretty_name))
msg = tag_('You should now run %(resync)s to '
'synchronize Trac with the repository.',
resync=resync)
add_notice(req, msg)
cset_added = tag.code('trac-admin "%s" changeset '
'added "%s" $REV'
% (self.env.path, pretty_name))
doc = tag.a(_("documentation"),
href=req.href.wiki('TracRepositoryAdmin')
+ '#Synchronization')
msg = tag_('You should also set up a post-commit hook '
'on the repository to call %(cset_added)s '
'for each committed changeset. See the '
'%(doc)s for more information.',
cset_added=cset_added, doc=doc)
add_notice(req, msg)
# Add a repository alias
elif db_provider and req.args.get('add_alias'):
name = req.args.get('name')
pretty_name = name or '(default)'
alias = req.args.get('alias')
if name is not None and alias is not None:
try:
db_provider.add_alias(name, alias)
except self.env.db_exc.IntegrityError:
raise TracError(_('The alias "%(name)s" already '
'exists.', name=pretty_name))
add_notice(req, _('The alias "%(name)s" has been '
'added.', name=pretty_name))
else:
add_warning(req, _('Missing arguments to add an '
'alias.'))
# Refresh the list of repositories
elif req.args.get('refresh'):
pass
# Remove repositories
elif db_provider and req.args.get('remove'):
sel = req.args.getlist('sel')
if sel:
for name in sel:
db_provider.remove_repository(name)
add_notice(req, _('The selected repositories have '
'been removed.'))
else:
add_warning(req, _('No repositories were selected.'))
req.redirect(req.href.admin(category, page))
data = {'view': 'list'}
# Find repositories that are editable
db_repos = {}
if db_provider is not None:
db_repos = dict(db_provider.get_repositories())
# Prepare common rendering data
repositories = {reponame: self._extend_info(reponame, info.copy(),
reponame in db_repos)
for (reponame, info) in all_repos.iteritems()}
types = sorted([''] + rm.get_supported_types())
data.update(
{'types': types,
'default_type': rm.default_repository_type,
'repositories': repositories,
'can_add_alias': any('alias' not in info
for info in repositories.itervalues())})
return 'admin_repositories.html', data
def _extend_info(self, reponame, info, editable):
"""Extend repository info for rendering."""
info['name'] = reponame
info['hidden'] = as_bool(info.get('hidden'))
info['sync_per_request'] = as_bool(info.get('sync_per_request'))
info['editable'] = editable
if 'alias' not in info:
if info.get('dir') is not None:
info['prettydir'] = breakable_path(info['dir']) or ''
try:
repos = RepositoryManager(self.env).get_repository(reponame)
except InvalidRepository as e:
info['error'] = e
except TracError:
pass # Probably "unsupported connector"
else:
youngest_rev = repos.get_youngest_rev()
info['rev'] = youngest_rev
try:
info['display_rev'] = repos.display_rev(youngest_rev)
except NoSuchChangeset:
pass
return info
def _check_dir(self, req, dir):
"""Check that a repository directory is valid, and add a warning
message if not.
"""
if not os.path.isabs(dir):
add_warning(req, _('The repository directory must be an absolute '
'path.'))
return False
prefixes = [os.path.join(self.env.path, prefix)
for prefix in self.allowed_repository_dir_prefixes]
if prefixes and not any(is_path_below(dir, prefix)
for prefix in prefixes):
add_warning(req, _('The repository directory must be located '
'below one of the following directories: '
'%(dirs)s', dirs=', '.join(prefixes)))
return False
return True
| rbaumg/trac | trac/versioncontrol/admin.py | Python | bsd-3-clause | 19,753 |
"""SCons.Tool.mslink
Tool-specific initialization for the Microsoft linker.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import os
import os.path
import SCons.Action
import SCons.Defaults
import SCons.Errors
import SCons.Platform.win32
import SCons.Tool
import SCons.Tool.msvc
import SCons.Tool.msvs
import SCons.Util
from .MSCommon import msvc_setup_env_once, msvc_exists
def pdbGenerator(env, target, source, for_signature):
try:
return ['/PDB:%s' % target[0].attributes.pdb, '/DEBUG']
except (AttributeError, IndexError):
return None
def _dllTargets(target, source, env, for_signature, paramtp):
listCmd = []
dll = env.FindIxes(target, '%sPREFIX' % paramtp, '%sSUFFIX' % paramtp)
if dll: listCmd.append("/out:%s"%dll.get_string(for_signature))
implib = env.FindIxes(target, 'LIBPREFIX', 'LIBSUFFIX')
if implib: listCmd.append("/implib:%s"%implib.get_string(for_signature))
return listCmd
def _dllSources(target, source, env, for_signature, paramtp):
listCmd = []
deffile = env.FindIxes(source, "WINDOWSDEFPREFIX", "WINDOWSDEFSUFFIX")
for src in source:
# Check explicitly for a non-None deffile so that the __cmp__
# method of the base SCons.Util.Proxy class used for some Node
# proxies doesn't try to use a non-existent __dict__ attribute.
if deffile and src == deffile:
# Treat this source as a .def file.
listCmd.append("/def:%s" % src.get_string(for_signature))
else:
# Just treat it as a generic source file.
listCmd.append(src)
return listCmd
def windowsShlinkTargets(target, source, env, for_signature):
return _dllTargets(target, source, env, for_signature, 'SHLIB')
def windowsShlinkSources(target, source, env, for_signature):
return _dllSources(target, source, env, for_signature, 'SHLIB')
def _windowsLdmodTargets(target, source, env, for_signature):
"""Get targets for loadable modules."""
return _dllTargets(target, source, env, for_signature, 'LDMODULE')
def _windowsLdmodSources(target, source, env, for_signature):
"""Get sources for loadable modules."""
return _dllSources(target, source, env, for_signature, 'LDMODULE')
def _dllEmitter(target, source, env, paramtp):
"""Common implementation of dll emitter."""
SCons.Tool.msvc.validate_vars(env)
extratargets = []
extrasources = []
dll = env.FindIxes(target, '%sPREFIX' % paramtp, '%sSUFFIX' % paramtp)
no_import_lib = env.get('no_import_lib', 0)
if not dll:
raise SCons.Errors.UserError('A shared library should have exactly one target with the suffix: %s' % env.subst('$%sSUFFIX' % paramtp))
insert_def = env.subst("$WINDOWS_INSERT_DEF")
if insert_def not in ['', '0', 0] and \
not env.FindIxes(source, "WINDOWSDEFPREFIX", "WINDOWSDEFSUFFIX"):
# append a def file to the list of sources
extrasources.append(
env.ReplaceIxes(dll,
'%sPREFIX' % paramtp, '%sSUFFIX' % paramtp,
"WINDOWSDEFPREFIX", "WINDOWSDEFSUFFIX"))
version_num, suite = SCons.Tool.msvs.msvs_parse_version(env.get('MSVS_VERSION', '6.0'))
if version_num >= 8.0 and \
(env.get('WINDOWS_INSERT_MANIFEST', 0) or env.get('WINDOWS_EMBED_MANIFEST', 0)):
# MSVC 8 and above automatically generate .manifest files that must be installed
extratargets.append(
env.ReplaceIxes(dll,
'%sPREFIX' % paramtp, '%sSUFFIX' % paramtp,
"WINDOWSSHLIBMANIFESTPREFIX", "WINDOWSSHLIBMANIFESTSUFFIX"))
if 'PDB' in env and env['PDB']:
pdb = env.arg2nodes('$PDB', target=target, source=source)[0]
extratargets.append(pdb)
target[0].attributes.pdb = pdb
if version_num >= 11.0 and env.get('PCH', 0):
# MSVC 11 and above need the PCH object file to be added to the link line,
# otherwise you get link error LNK2011.
pchobj = SCons.Util.splitext(str(env['PCH']))[0] + '.obj'
# print "prog_emitter, version %s, appending pchobj %s"%(version_num, pchobj)
if pchobj not in extrasources:
extrasources.append(pchobj)
if not no_import_lib and \
not env.FindIxes(target, "LIBPREFIX", "LIBSUFFIX"):
# Append an import library to the list of targets.
extratargets.append(
env.ReplaceIxes(dll,
'%sPREFIX' % paramtp, '%sSUFFIX' % paramtp,
"LIBPREFIX", "LIBSUFFIX"))
# and .exp file is created if there are exports from a DLL
extratargets.append(
env.ReplaceIxes(dll,
'%sPREFIX' % paramtp, '%sSUFFIX' % paramtp,
"WINDOWSEXPPREFIX", "WINDOWSEXPSUFFIX"))
return (target+extratargets, source+extrasources)
def windowsLibEmitter(target, source, env):
return _dllEmitter(target, source, env, 'SHLIB')
def ldmodEmitter(target, source, env):
"""Emitter for loadable modules.
Loadable modules are identical to shared libraries on Windows, but building
them is subject to different parameters (LDMODULE*).
"""
return _dllEmitter(target, source, env, 'LDMODULE')
def prog_emitter(target, source, env):
SCons.Tool.msvc.validate_vars(env)
extratargets = []
extrasources = []
exe = env.FindIxes(target, "PROGPREFIX", "PROGSUFFIX")
if not exe:
raise SCons.Errors.UserError("An executable should have exactly one target with the suffix: %s" % env.subst("$PROGSUFFIX"))
version_num, suite = SCons.Tool.msvs.msvs_parse_version(env.get('MSVS_VERSION', '6.0'))
if version_num >= 8.0 and \
(env.get('WINDOWS_INSERT_MANIFEST', 0) or env.get('WINDOWS_EMBED_MANIFEST', 0)):
# MSVC 8 and above automatically generate .manifest files that have to be installed
extratargets.append(
env.ReplaceIxes(exe,
"PROGPREFIX", "PROGSUFFIX",
"WINDOWSPROGMANIFESTPREFIX", "WINDOWSPROGMANIFESTSUFFIX"))
if 'PDB' in env and env['PDB']:
pdb = env.arg2nodes('$PDB', target=target, source=source)[0]
extratargets.append(pdb)
target[0].attributes.pdb = pdb
if version_num >= 11.0 and env.get('PCH', 0):
# MSVC 11 and above need the PCH object file to be added to the link line,
# otherwise you get link error LNK2011.
pchobj = SCons.Util.splitext(str(env['PCH']))[0] + '.obj'
# print("prog_emitter, version %s, appending pchobj %s"%(version_num, pchobj))
if pchobj not in extrasources:
extrasources.append(pchobj)
return (target+extratargets,source+extrasources)
def RegServerFunc(target, source, env):
if 'register' in env and env['register']:
ret = regServerAction([target[0]], [source[0]], env)
if ret:
raise SCons.Errors.UserError("Unable to register %s" % target[0])
else:
print("Registered %s sucessfully" % target[0])
return ret
return 0
# These are the actual actions run to embed the manifest.
# They are only called from the Check versions below.
embedManifestExeAction = SCons.Action.Action('$MTEXECOM')
embedManifestDllAction = SCons.Action.Action('$MTSHLIBCOM')
def embedManifestDllCheck(target, source, env):
"""Function run by embedManifestDllCheckAction to check for existence of manifest
and other conditions, and embed the manifest by calling embedManifestDllAction if so."""
if env.get('WINDOWS_EMBED_MANIFEST', 0):
manifestSrc = target[0].get_abspath() + '.manifest'
if os.path.exists(manifestSrc):
ret = embedManifestDllAction([target[0]], None, env)
if ret:
raise SCons.Errors.UserError("Unable to embed manifest into %s" % (target[0]))
return ret
else:
print('(embed: no %s.manifest found; not embedding.)'%str(target[0]))
return 0
def embedManifestExeCheck(target, source, env):
"""Function run by embedManifestExeCheckAction to check for existence of manifest
and other conditions, and embed the manifest by calling embedManifestExeAction if so."""
if env.get('WINDOWS_EMBED_MANIFEST', 0):
manifestSrc = target[0].get_abspath() + '.manifest'
if os.path.exists(manifestSrc):
ret = embedManifestExeAction([target[0]], None, env)
if ret:
raise SCons.Errors.UserError("Unable to embed manifest into %s" % (target[0]))
return ret
else:
print('(embed: no %s.manifest found; not embedding.)'%str(target[0]))
return 0
embedManifestDllCheckAction = SCons.Action.Action(embedManifestDllCheck, None)
embedManifestExeCheckAction = SCons.Action.Action(embedManifestExeCheck, None)
regServerAction = SCons.Action.Action("$REGSVRCOM", "$REGSVRCOMSTR")
regServerCheck = SCons.Action.Action(RegServerFunc, None)
shlibLinkAction = SCons.Action.Action('${TEMPFILE("$SHLINK $SHLINKFLAGS $_SHLINK_TARGETS $_LIBDIRFLAGS $_LIBFLAGS $_PDB $_SHLINK_SOURCES", "$SHLINKCOMSTR")}', '$SHLINKCOMSTR')
compositeShLinkAction = shlibLinkAction + regServerCheck + embedManifestDllCheckAction
ldmodLinkAction = SCons.Action.Action('${TEMPFILE("$LDMODULE $LDMODULEFLAGS $_LDMODULE_TARGETS $_LIBDIRFLAGS $_LIBFLAGS $_PDB $_LDMODULE_SOURCES", "$LDMODULECOMSTR")}', '$LDMODULECOMSTR')
compositeLdmodAction = ldmodLinkAction + regServerCheck + embedManifestDllCheckAction
exeLinkAction = SCons.Action.Action('${TEMPFILE("$LINK $LINKFLAGS /OUT:$TARGET.windows $_LIBDIRFLAGS $_LIBFLAGS $_PDB $SOURCES.windows", "$LINKCOMSTR")}', '$LINKCOMSTR')
compositeLinkAction = exeLinkAction + embedManifestExeCheckAction
def generate(env):
"""Add Builders and construction variables for ar to an Environment."""
SCons.Tool.createSharedLibBuilder(env, shlib_suffix='$SHLIBSUFFIX')
SCons.Tool.createProgBuilder(env)
env['SHLINK'] = '$LINK'
env['SHLINKFLAGS'] = SCons.Util.CLVar('$LINKFLAGS /dll')
env['_SHLINK_TARGETS'] = windowsShlinkTargets
env['_SHLINK_SOURCES'] = windowsShlinkSources
env['SHLINKCOM'] = compositeShLinkAction
env.Append(SHLIBEMITTER = [windowsLibEmitter])
env.Append(LDMODULEEMITTER = [windowsLibEmitter])
env['LINK'] = 'link'
env['LINKFLAGS'] = SCons.Util.CLVar('/nologo')
env['_PDB'] = pdbGenerator
env['LINKCOM'] = compositeLinkAction
env.Append(PROGEMITTER = [prog_emitter])
env['LIBDIRPREFIX']='/LIBPATH:'
env['LIBDIRSUFFIX']=''
env['LIBLINKPREFIX']=''
env['LIBLINKSUFFIX']='$LIBSUFFIX'
env['WIN32DEFPREFIX'] = ''
env['WIN32DEFSUFFIX'] = '.def'
env['WIN32_INSERT_DEF'] = 0
env['WINDOWSDEFPREFIX'] = '${WIN32DEFPREFIX}'
env['WINDOWSDEFSUFFIX'] = '${WIN32DEFSUFFIX}'
env['WINDOWS_INSERT_DEF'] = '${WIN32_INSERT_DEF}'
env['WIN32EXPPREFIX'] = ''
env['WIN32EXPSUFFIX'] = '.exp'
env['WINDOWSEXPPREFIX'] = '${WIN32EXPPREFIX}'
env['WINDOWSEXPSUFFIX'] = '${WIN32EXPSUFFIX}'
env['WINDOWSSHLIBMANIFESTPREFIX'] = ''
env['WINDOWSSHLIBMANIFESTSUFFIX'] = '${SHLIBSUFFIX}.manifest'
env['WINDOWSPROGMANIFESTPREFIX'] = ''
env['WINDOWSPROGMANIFESTSUFFIX'] = '${PROGSUFFIX}.manifest'
env['REGSVRACTION'] = regServerCheck
env['REGSVR'] = os.path.join(SCons.Platform.win32.get_system_root(),'System32','regsvr32')
env['REGSVRFLAGS'] = '/s '
env['REGSVRCOM'] = '$REGSVR $REGSVRFLAGS ${TARGET.windows}'
env['WINDOWS_EMBED_MANIFEST'] = 0
env['MT'] = 'mt'
#env['MTFLAGS'] = ['-hashupdate']
env['MTFLAGS'] = SCons.Util.CLVar('/nologo')
# Note: use - here to prevent build failure if no manifest produced.
# This seems much simpler than a fancy system using a function action to see
# if the manifest actually exists before trying to run mt with it.
env['MTEXECOM'] = '-$MT $MTFLAGS -manifest ${TARGET}.manifest $_MANIFEST_SOURCES -outputresource:$TARGET;1'
env['MTSHLIBCOM'] = '-$MT $MTFLAGS -manifest ${TARGET}.manifest $_MANIFEST_SOURCES -outputresource:$TARGET;2'
# TODO Future work garyo 27-Feb-11
env['_MANIFEST_SOURCES'] = None # _windowsManifestSources
# Set-up ms tools paths
msvc_setup_env_once(env)
# Loadable modules are on Windows the same as shared libraries, but they
# are subject to different build parameters (LDMODULE* variables).
# Therefore LDMODULE* variables correspond as much as possible to
# SHLINK*/SHLIB* ones.
SCons.Tool.createLoadableModuleBuilder(env, loadable_module_suffix='$LDMODULESUFFIX')
env['LDMODULE'] = '$SHLINK'
env['LDMODULEPREFIX'] = '$SHLIBPREFIX'
env['LDMODULESUFFIX'] = '$SHLIBSUFFIX'
env['LDMODULEFLAGS'] = '$SHLINKFLAGS'
env['_LDMODULE_TARGETS'] = _windowsLdmodTargets
env['_LDMODULE_SOURCES'] = _windowsLdmodSources
env['LDMODULEEMITTER'] = [ldmodEmitter]
env['LDMODULECOM'] = compositeLdmodAction
# Issue #3350
# Change tempfile argument joining character from a space to a newline
# mslink will fail if any single line is too long, but is fine with many lines
# in a tempfile
env['TEMPFILEARGJOIN'] = os.linesep
def exists(env):
return msvc_exists(env)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| lightmare/mapnik | scons/scons-local-4.1.0/SCons/Tool/mslink.py | Python | lgpl-2.1 | 14,612 |
import setuptools
if __name__ == '__main__':
setuptools.setup(
name="groupmestats",
version="0.0.1",
author="Kyle Teske",
author_email="[email protected]",
description="Calculating and displaying groupme statistics",
packages=setuptools.find_packages(),
license='MIT',
classifiers=[
'License :: OSI Approved :: MIT License',
],
install_requires=[
'GroupyAPI',
'jinja2',
'Pillow',
'plotly',
'PyYAML',
],
entry_points={
'console_scripts': [
'gstat_fetch_data = groupmestats.groupserializer:gstat_fetch_data',
'gstat_gen_groups = groupmestats.grouplookup:gstat_gen_groups',
'gstat_gen_members = groupmestats.memberlookup:gstat_gen_members',
'gstat_help = groupmestats.help:gstat_help',
'gstat_stats = groupmestats.generatestats:gstat_stats',
'gstat_timeline = groupmestats.statistics:messagetimelineplot.gstat_timeline_main',
],
},
)
| kjteske/groupmestats | setup.py | Python | mit | 1,136 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-09-06 17:02
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('anagrafica', '0040_refactor_nome_cognome'),
]
operations = [
migrations.AlterModelOptions(
name='appartenenza',
options={'permissions': (('view_appartenenza', 'Can view appartenenza'),), 'verbose_name_plural': 'Appartenenze'},
),
migrations.AlterModelOptions(
name='delega',
options={'permissions': (('view_delega', 'Can view delega'),), 'verbose_name_plural': 'Deleghe'},
),
migrations.AlterModelOptions(
name='dimissione',
options={'permissions': (('view_dimissione', 'Can view Documento di Dimissione'),), 'verbose_name': 'Documento di Dimissione', 'verbose_name_plural': 'Documenti di Dimissione'},
),
migrations.AlterModelOptions(
name='documento',
options={'permissions': (('view_documento', 'Can view documento'),), 'verbose_name_plural': 'Documenti'},
),
migrations.AlterModelOptions(
name='estensione',
options={'permissions': (('view_estensions', 'Can view Richiesta di estensione'),), 'verbose_name': 'Richiesta di estensione', 'verbose_name_plural': 'Richieste di estensione'},
),
migrations.AlterModelOptions(
name='fototessera',
options={'permissions': (('view_fototessera', 'Can view fototessera'),), 'verbose_name_plural': 'Fototessere'},
),
migrations.AlterModelOptions(
name='persona',
options={'permissions': (('view_persona', 'Can view persona'),), 'verbose_name_plural': 'Persone'},
),
migrations.AlterModelOptions(
name='riserva',
options={'permissions': (('view_riserva', 'Can view Richiesta di riserva'),), 'verbose_name': 'Richiesta di riserva', 'verbose_name_plural': 'Richieste di riserva'},
),
migrations.AlterModelOptions(
name='sede',
options={'permissions': (('view_sede', 'Can view Sede CRI'),), 'verbose_name': 'Sede CRI', 'verbose_name_plural': 'Sedi CRI'},
),
migrations.AlterModelOptions(
name='telefono',
options={'permissions': (('view_telefono', 'Can view Numero di telefono'),), 'verbose_name': 'Numero di telefono', 'verbose_name_plural': 'Numeri di telefono'},
),
migrations.AlterModelOptions(
name='trasferimento',
options={'permissions': (('view_trasferimento', 'Can view Richiesta di trasferimento'),), 'verbose_name': 'Richiesta di trasferimento', 'verbose_name_plural': 'Richieste di trasferimento'},
),
]
| CroceRossaItaliana/jorvik | anagrafica/migrations/0041_auto_20160906_1702.py | Python | gpl-3.0 | 2,825 |
import json
from django.views.generic import TemplateView
from resource_daily_scheduler.booking_req_views import AjaxableBookingRequestCreateView, ColorSchema
from resource_daily_scheduler.models import BookableResource
from resource_daily_scheduler.views import ResourceScheduleTemplateView
__author__ = 'weijia'
class ResourceTableScheduler(ResourceScheduleTemplateView):
template_name = "resource_daily_scheduler/table_scheduler.html"
# bookable_resource_class = BookableResource
# get_schedule_url_name = 'get_schedule'
# resource_permission_id = "resource_daily_scheduler.change_bookableresource"
# # resource_detail = "update_resource/"
# request_create_view = AjaxableBookingRequestCreateView
#
# def get_context_data(self, **kwargs):
# default_context = super(ResourceTableScheduler, self).get_context_data(**kwargs)
# resource_list_query = self.bookable_resource_class.objects.all()
# resource_list = []
# for resource in resource_list_query:
# resource_list.append({"id": str(resource.pk), "title": resource.get_title()})
# default_context["resource_list"] = json.dumps(resource_list)
# if self.request.user.has_perm(self.resource_permission_id):
# default_context["is_admin"] = "true"
# default_context["event_colors"] = json.dumps(self.get_colors())
# return default_context
| weijia/resource-daily-scheduler | resource_daily_scheduler/table_scheduler.py | Python | bsd-3-clause | 1,410 |
import socket
import pickle
import time
class GameSocket(object):
def __init__(self, socket):
self.__socket = socket
self.set_buffer_size(4096)
self.__head_size = 64
# self.set_buffer_size(65536)
# self.set_buffer_size(131072)
# self.set_buffer_size(262144)
def set_buffer_size(self, buffer_size):
self.__buffer_size = buffer_size
def get_buffer_size(self):
return self.__buffer_size
def setsockopt(self, level, optname, value):
self.__socket.setsockopt(level, optname, value)
def setblocking(self, flag):
self.__socket.setblocking(flag)
def settimeout(self, value):
self.__socket.settimeout(value)
def gettimeout(self):
return self.__socket.gettimeout()
def bind(self, address):
self.__socket.bind(address)
def listen(self, backlog):
self.__socket.listen(backlog)
def accept(self):
client_socket, (client_host, client_port) = self.__socket.accept()
client_socket.setblocking(1)
client_socket.settimeout(1)
return GameSocket(client_socket), (client_host, client_port)
def connect(self, address):
self.__socket.connect(address)
def close(self):
self.__socket.close()
def __recv_raw(self, data_size):
data = ""
buffer_size = self.get_buffer_size()
while data_size:
try:
chunk = self.__socket.recv(min(buffer_size, data_size))
except socket.timeout:
chunk = ""
if not chunk:
break
data += chunk
data_size -= len(chunk)
return data
def recv(self):
head_raw = self.__recv_raw(self.__head_size).rstrip()
# return None if header is empty
if not head_raw:
return None
head = pickle.loads(head_raw)
return pickle.loads(self.__recv_raw(head['data_size']))
def __compose_head(self, data_size):
return pickle.dumps({
'data_size': data_size
}).ljust(self.__head_size)
def send(self, data):
data = pickle.dumps(data)
head = self.__compose_head(len(data))
data = head + data
total_size = len(data)
sent = 0
while sent < total_size:
chunk_size = self.__socket.send(data[sent:])
sent = sent + chunk_size
| mimi1vx/openmoo2 | oldmess/networking/game_socket.py | Python | gpl-2.0 | 2,415 |
# -*- coding: utf-8 -*-
# (c) 2012-2013, Timothy Appnel <[email protected]>
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os.path
from ansible import constants as C
from ansible.module_utils.six import string_types
from ansible.module_utils._text import to_text
from ansible.module_utils.common._collections_compat import MutableSequence
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.plugins.action import ActionBase
from ansible.plugins.loader import connection_loader
class ActionModule(ActionBase):
def _get_absolute_path(self, path):
original_path = path
if path.startswith('rsync://'):
return path
if self._task._role is not None:
path = self._loader.path_dwim_relative(self._task._role._role_path, 'files', path)
else:
path = self._loader.path_dwim_relative(self._loader.get_basedir(), 'files', path)
if original_path and original_path[-1] == '/' and path[-1] != '/':
# make sure the dwim'd path ends in a trailing "/"
# if the original path did
path += '/'
return path
def _host_is_ipv6_address(self, host):
return ':' in host
def _format_rsync_rsh_target(self, host, path, user):
''' formats rsync rsh target, escaping ipv6 addresses if needed '''
user_prefix = ''
if path.startswith('rsync://'):
return path
# If using docker or buildah, do not add user information
if self._remote_transport not in ['docker', 'buildah'] and user:
user_prefix = '%s@' % (user, )
if self._host_is_ipv6_address(host):
return '[%s%s]:%s' % (user_prefix, host, path)
else:
return '%s%s:%s' % (user_prefix, host, path)
def _process_origin(self, host, path, user):
if host not in C.LOCALHOST:
return self._format_rsync_rsh_target(host, path, user)
if ':' not in path and not path.startswith('/'):
path = self._get_absolute_path(path=path)
return path
def _process_remote(self, task_args, host, path, user, port_matches_localhost_port):
"""
:arg host: hostname for the path
:arg path: file path
:arg user: username for the transfer
:arg port_matches_localhost_port: boolean whether the remote port
matches the port used by localhost's sshd. This is used in
conjunction with seeing whether the host is localhost to know
if we need to have the module substitute the pathname or if it
is a different host (for instance, an ssh tunnelled port or an
alternative ssh port to a vagrant host.)
"""
transport = self._connection.transport
# If we're connecting to a remote host or we're delegating to another
# host or we're connecting to a different ssh instance on the
# localhost then we have to format the path as a remote rsync path
if host not in C.LOCALHOST or transport != "local" or \
(host in C.LOCALHOST and not port_matches_localhost_port):
# If we're delegating to non-localhost and but the
# inventory_hostname host is localhost then we need the module to
# fix up the rsync path to use the controller's public DNS/IP
# instead of "localhost"
if port_matches_localhost_port and host in C.LOCALHOST:
task_args['_substitute_controller'] = True
return self._format_rsync_rsh_target(host, path, user)
if ':' not in path and not path.startswith('/'):
path = self._get_absolute_path(path=path)
return path
def _override_module_replaced_vars(self, task_vars):
""" Some vars are substituted into the modules. Have to make sure
that those are correct for localhost when synchronize creates its own
connection to localhost."""
# Clear the current definition of these variables as they came from the
# connection to the remote host
if 'ansible_syslog_facility' in task_vars:
del task_vars['ansible_syslog_facility']
for key in list(task_vars.keys()):
if key.startswith("ansible_") and key.endswith("_interpreter"):
del task_vars[key]
# Add the definitions from localhost
for host in C.LOCALHOST:
if host in task_vars['hostvars']:
localhost = task_vars['hostvars'][host]
break
if 'ansible_syslog_facility' in localhost:
task_vars['ansible_syslog_facility'] = localhost['ansible_syslog_facility']
for key in localhost:
if key.startswith("ansible_") and key.endswith("_interpreter"):
task_vars[key] = localhost[key]
def run(self, tmp=None, task_vars=None):
''' generates params and passes them on to the rsync module '''
# When modifying this function be aware of the tricky convolutions
# your thoughts have to go through:
#
# In normal ansible, we connect from controller to inventory_hostname
# (playbook's hosts: field) or controller to delegate_to host and run
# a module on one of those hosts.
#
# So things that are directly related to the core of ansible are in
# terms of that sort of connection that always originate on the
# controller.
#
# In synchronize we use ansible to connect to either the controller or
# to the delegate_to host and then run rsync which makes its own
# connection from controller to inventory_hostname or delegate_to to
# inventory_hostname.
#
# That means synchronize needs to have some knowledge of the
# controller to inventory_host/delegate host that ansible typically
# establishes and use those to construct a command line for rsync to
# connect from the inventory_host to the controller/delegate. The
# challenge for coders is remembering which leg of the trip is
# associated with the conditions that you're checking at any one time.
if task_vars is None:
task_vars = dict()
# We make a copy of the args here because we may fail and be asked to
# retry. If that happens we don't want to pass the munged args through
# to our next invocation. Munged args are single use only.
_tmp_args = self._task.args.copy()
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
# Store remote connection type
self._remote_transport = self._connection.transport
# Handle docker connection options
if self._remote_transport == 'docker':
self._docker_cmd = self._connection.docker_cmd
if self._play_context.docker_extra_args:
self._docker_cmd = "%s %s" % (self._docker_cmd, self._play_context.docker_extra_args)
# self._connection accounts for delegate_to so
# remote_transport is the transport ansible thought it would need
# between the controller and the delegate_to host or the controller
# and the remote_host if delegate_to isn't set.
remote_transport = False
if self._connection.transport != 'local':
remote_transport = True
try:
delegate_to = self._task.delegate_to
except (AttributeError, KeyError):
delegate_to = None
# ssh paramiko docker buildah and local are fully supported transports. Anything
# else only works with delegate_to
if delegate_to is None and self._connection.transport not in \
('ssh', 'paramiko', 'local', 'docker', 'buildah'):
result['failed'] = True
result['msg'] = (
"synchronize uses rsync to function. rsync needs to connect to the remote "
"host via ssh, docker client or a direct filesystem "
"copy. This remote host is being accessed via %s instead "
"so it cannot work." % self._connection.transport)
return result
use_ssh_args = _tmp_args.pop('use_ssh_args', None)
# Parameter name needed by the ansible module
_tmp_args['_local_rsync_path'] = task_vars.get('ansible_rsync_path') or 'rsync'
_tmp_args['_local_rsync_password'] = task_vars.get('ansible_ssh_pass') or task_vars.get('ansible_password')
# rsync thinks that one end of the connection is localhost and the
# other is the host we're running the task for (Note: We use
# ansible's delegate_to mechanism to determine which host rsync is
# running on so localhost could be a non-controller machine if
# delegate_to is used)
src_host = '127.0.0.1'
inventory_hostname = task_vars.get('inventory_hostname')
dest_host_inventory_vars = task_vars['hostvars'].get(inventory_hostname)
try:
dest_host = dest_host_inventory_vars['ansible_host']
except KeyError:
dest_host = dest_host_inventory_vars.get('ansible_ssh_host', inventory_hostname)
dest_host_ids = [hostid for hostid in (dest_host_inventory_vars.get('inventory_hostname'),
dest_host_inventory_vars.get('ansible_host'),
dest_host_inventory_vars.get('ansible_ssh_host'))
if hostid is not None]
localhost_ports = set()
for host in C.LOCALHOST:
localhost_vars = task_vars['hostvars'].get(host, {})
for port_var in C.MAGIC_VARIABLE_MAPPING['port']:
port = localhost_vars.get(port_var, None)
if port:
break
else:
port = C.DEFAULT_REMOTE_PORT
localhost_ports.add(port)
# dest_is_local tells us if the host rsync runs on is the same as the
# host rsync puts the files on. This is about *rsync's connection*,
# not about the ansible connection to run the module.
dest_is_local = False
if delegate_to is None and remote_transport is False:
dest_is_local = True
elif delegate_to is not None and delegate_to in dest_host_ids:
dest_is_local = True
# CHECK FOR NON-DEFAULT SSH PORT
inv_port = task_vars.get('ansible_ssh_port', None) or C.DEFAULT_REMOTE_PORT
if _tmp_args.get('dest_port', None) is None:
if inv_port is not None:
_tmp_args['dest_port'] = inv_port
# Set use_delegate if we are going to run rsync on a delegated host
# instead of localhost
use_delegate = False
if delegate_to is not None and delegate_to in dest_host_ids:
# edge case: explicit delegate and dest_host are the same
# so we run rsync on the remote machine targeting its localhost
# (itself)
dest_host = '127.0.0.1'
use_delegate = True
elif delegate_to is not None and remote_transport:
# If we're delegating to a remote host then we need to use the
# delegate_to settings
use_delegate = True
# Delegate to localhost as the source of the rsync unless we've been
# told (via delegate_to) that a different host is the source of the
# rsync
if not use_delegate and remote_transport:
# Create a connection to localhost to run rsync on
new_stdin = self._connection._new_stdin
# Unike port, there can be only one shell
localhost_shell = None
for host in C.LOCALHOST:
localhost_vars = task_vars['hostvars'].get(host, {})
for shell_var in C.MAGIC_VARIABLE_MAPPING['shell']:
localhost_shell = localhost_vars.get(shell_var, None)
if localhost_shell:
break
if localhost_shell:
break
else:
localhost_shell = os.path.basename(C.DEFAULT_EXECUTABLE)
self._play_context.shell = localhost_shell
# Unike port, there can be only one executable
localhost_executable = None
for host in C.LOCALHOST:
localhost_vars = task_vars['hostvars'].get(host, {})
for executable_var in C.MAGIC_VARIABLE_MAPPING['executable']:
localhost_executable = localhost_vars.get(executable_var, None)
if localhost_executable:
break
if localhost_executable:
break
else:
localhost_executable = C.DEFAULT_EXECUTABLE
self._play_context.executable = localhost_executable
new_connection = connection_loader.get('local', self._play_context, new_stdin)
self._connection = new_connection
# Override _remote_is_local as an instance attribute specifically for the synchronize use case
# ensuring we set local tmpdir correctly
self._connection._remote_is_local = True
self._override_module_replaced_vars(task_vars)
# SWITCH SRC AND DEST HOST PER MODE
if _tmp_args.get('mode', 'push') == 'pull':
(dest_host, src_host) = (src_host, dest_host)
# MUNGE SRC AND DEST PER REMOTE_HOST INFO
src = _tmp_args.get('src', None)
dest = _tmp_args.get('dest', None)
if src is None or dest is None:
return dict(failed=True, msg="synchronize requires both src and dest parameters are set")
# Determine if we need a user@
user = None
if not dest_is_local:
# Src and dest rsync "path" handling
if boolean(_tmp_args.get('set_remote_user', 'yes'), strict=False):
if use_delegate:
user = task_vars.get('ansible_delegated_vars', dict()).get('ansible_ssh_user', None)
if not user:
user = task_vars.get('ansible_ssh_user') or self._play_context.remote_user
if not user:
user = C.DEFAULT_REMOTE_USER
else:
user = task_vars.get('ansible_ssh_user') or self._play_context.remote_user
# Private key handling
private_key = self._play_context.private_key_file
if private_key is not None:
_tmp_args['private_key'] = private_key
# use the mode to define src and dest's url
if _tmp_args.get('mode', 'push') == 'pull':
# src is a remote path: <user>@<host>, dest is a local path
src = self._process_remote(_tmp_args, src_host, src, user, inv_port in localhost_ports)
dest = self._process_origin(dest_host, dest, user)
else:
# src is a local path, dest is a remote path: <user>@<host>
src = self._process_origin(src_host, src, user)
dest = self._process_remote(_tmp_args, dest_host, dest, user, inv_port in localhost_ports)
else:
# Still need to munge paths (to account for roles) even if we aren't
# copying files between hosts
if not src.startswith('/'):
src = self._get_absolute_path(path=src)
if not dest.startswith('/'):
dest = self._get_absolute_path(path=dest)
_tmp_args['src'] = src
_tmp_args['dest'] = dest
# Allow custom rsync path argument
rsync_path = _tmp_args.get('rsync_path', None)
# backup original become as we are probably about to unset it
become = self._play_context.become
if not dest_is_local:
# don't escalate for docker. doing --rsync-path with docker exec fails
# and we can switch directly to the user via docker arguments
if self._play_context.become and not rsync_path and self._remote_transport != 'docker':
# If no rsync_path is set, become was originally set, and dest is
# remote then add privilege escalation here.
if self._play_context.become_method == 'sudo':
rsync_path = 'sudo rsync'
# TODO: have to add in the rest of the become methods here
# We cannot use privilege escalation on the machine running the
# module. Instead we run it on the machine rsync is connecting
# to.
self._play_context.become = False
_tmp_args['rsync_path'] = rsync_path
if use_ssh_args:
ssh_args = [
getattr(self._play_context, 'ssh_args', ''),
getattr(self._play_context, 'ssh_common_args', ''),
getattr(self._play_context, 'ssh_extra_args', ''),
]
_tmp_args['ssh_args'] = ' '.join([a for a in ssh_args if a])
# If launching synchronize against docker container
# use rsync_opts to support container to override rsh options
if self._remote_transport in ['docker', 'buildah']:
# Replicate what we do in the module argumentspec handling for lists
if not isinstance(_tmp_args.get('rsync_opts'), MutableSequence):
tmp_rsync_opts = _tmp_args.get('rsync_opts', [])
if isinstance(tmp_rsync_opts, string_types):
tmp_rsync_opts = tmp_rsync_opts.split(',')
elif isinstance(tmp_rsync_opts, (int, float)):
tmp_rsync_opts = [to_text(tmp_rsync_opts)]
_tmp_args['rsync_opts'] = tmp_rsync_opts
if '--blocking-io' not in _tmp_args['rsync_opts']:
_tmp_args['rsync_opts'].append('--blocking-io')
if self._remote_transport in ['docker']:
if become and self._play_context.become_user:
_tmp_args['rsync_opts'].append("--rsh=%s exec -u %s -i" % (self._docker_cmd, self._play_context.become_user))
elif user is not None:
_tmp_args['rsync_opts'].append("--rsh=%s exec -u %s -i" % (self._docker_cmd, user))
else:
_tmp_args['rsync_opts'].append("--rsh=%s exec -i" % self._docker_cmd)
elif self._remote_transport in ['buildah']:
_tmp_args['rsync_opts'].append("--rsh=buildah run --")
# run the module and store the result
result.update(self._execute_module('synchronize', module_args=_tmp_args, task_vars=task_vars))
if 'SyntaxError' in result.get('exception', result.get('msg', '')):
# Emit a warning about using python3 because synchronize is
# somewhat unique in running on localhost
result['exception'] = result['msg']
result['msg'] = ('SyntaxError parsing module. Perhaps invoking "python" on your local (or delegate_to) machine invokes python3. '
'You can set ansible_python_interpreter for localhost (or the delegate_to machine) to the location of python2 to fix this')
return result
| shepdelacreme/ansible | lib/ansible/plugins/action/synchronize.py | Python | gpl-3.0 | 19,981 |
#!/usr/bin/env python
"""
Undistort image.
(C) 2016-2018 1024jp
"""
import math
import os
import sys
import cv2
import numpy as np
from modules import argsparser
from modules.datafile import Data
from modules.undistortion import Undistorter
from modules.projection import Projector
# constants
SUFFIX = "_calib"
class ArgsParser(argsparser.Parser):
description = 'Undistort image based on a location file.'
datafile_name = 'image'
def init_arguments(self):
super(ArgsParser, self).init_arguments()
script = self.add_argument_group('script options')
script.add_argument('--save',
action='store_true',
default=False,
help="save result in a file instead displaying it"
" (default: %(default)s)"
)
script.add_argument('--perspective',
action='store_true',
default=False,
help="also remove perspective"
" (default: %(default)s)"
)
script.add_argument('--stats',
action='store_true',
default=False,
help="display stats"
" (default: %(default)s)"
)
def add_suffix_to_path(path, suffix):
"""Append suffix to file name before file extension.
Arguments:
path (str) -- File path.
suffix (str) -- Suffix string to append.
"""
root, extension = os.path.splitext(path)
return root + suffix + extension
def show_image(image, scale=1.0, window_title='Image'):
"""Display given image in a window.
Arguments:
image () -- Image to display.
scale (float) -- Magnification of image.
window_title (str) -- Title of window.
"""
scaled_image = scale_image(image, scale)
cv2.imshow(window_title, scaled_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
def scale_image(image, scale=1.0):
"""Scale up/down given image.
Arguments:
image () -- Image to process.
scale (float) -- Magnification of image.
"""
height, width = [int(scale * length) for length in image.shape[:2]]
return cv2.resize(image, (width, height))
def plot_points(image, points, color=(0, 0, 255)):
"""Draw circles at given locations on image.
Arguments:
image -- Image to draw on.
points -- x,y pairs of points to plot.
"""
# find best radius for image
image_width = image.shape[1]
radius = int(image_width / 400)
# draw
for point in points:
point = tuple(map(int, point))
cv2.circle(image, point, color=color, radius=radius,
thickness=radius/2)
def estimate_clipping_rect(projector, size):
"""
Return:
rect -- NSRect style 2d-tuple.
flipped (bool) -- Whether y-axis is flipped.
"""
# lt -> rt -> lb -> rb
image_corners = [(0, 0), (size[0], 0), (0, size[1]), size]
x_points = []
y_points = []
for corner in image_corners:
x, y = map(int, projector.project_point(*corner))
x_points.append(x)
y_points.append(y)
min_x = min(x_points)
min_y = min(y_points)
max_x = max(x_points)
max_y = max(y_points)
rect = ((min_x, min_y), (max_x - min_x, max_y - min_y))
flipped = y_points[3] < 0
return rect, flipped
def main(data, saves_file=False, removes_perspective=True, shows_stats=False):
imgpath = data.datafile.name
image = cv2.imread(imgpath)
size = image.shape[::-1][1:3]
undistorter = Undistorter.init(data.image_points, data.dest_points, size)
image = undistorter.undistort_image(image)
undistorted_points = undistorter.calibrate_points(data.image_points)
plot_points(image, undistorted_points)
if shows_stats:
print('[stats]')
print('number of points: {}'.format(len(undistorted_points)))
if removes_perspective:
projector = Projector(undistorted_points, data.dest_points)
# show stats if needed
if shows_stats:
diffs = []
for point, (dest_x, dest_y, dest_z) in zip(undistorted_points,
data.dest_points):
x, y = projector.project_point(*point)
diffs.append([x - dest_x, y - dest_y])
abs_diffs = [(abs(x), abs(y)) for x, y in diffs]
print('mean: {:.2f}, {:.2f}'.format(*np.mean(abs_diffs, axis=0)))
print(' std: {:.2f}, {:.2f}'.format(*np.std(abs_diffs, axis=0)))
print(' max: {:.2f}, {:.2f}'.format(*np.max(abs_diffs, axis=0)))
print('diff:')
for x, y in diffs:
print(' {:6.1f},{:6.1f}'.format(x, y))
# transform image by removing perspective
rect, is_flipped = estimate_clipping_rect(projector, size)
image = projector.project_image(image, rect[1], rect[0])
scale = float(size[0]) / image.shape[1]
image = scale_image(image, scale)
for point in data.dest_points:
point = point[0:2]
point = [scale * (l - origin) for l, origin in zip(point, rect[0])]
plot_points(image, [point], color=(255, 128, 0))
# flip image if needed
if is_flipped:
image = cv2.flip(image, 0)
if saves_file:
outpath = add_suffix_to_path(imgpath, SUFFIX)
cv2.imwrite(outpath, image)
else:
show_image(image, scale=1.0/2, window_title='Undistorted Image')
if __name__ == "__main__":
parser = ArgsParser()
args = parser.parse_args()
if args.test:
print("This script doesn't have test.")
sys.exit()
data = Data(args.file, in_cols=args.in_cols)
main(data, saves_file=args.save,
removes_perspective=args.perspective, shows_stats=args.stats)
| 1024jp/LensCalibrator | createimage.py | Python | mit | 6,006 |
# -*- coding: utf-8 -*-
from utils.db import SqliteDB
from utils.rwlogging import log
from indicator import ma
def calc_macd(prices, fast = 12, slow = 26, sign = 9):
ps = [p['close'] for p in prices]
macds = {}
macds['fast'] = ma.calc_ema(ps, fast)
macds['slow'] = ma.calc_ema(ps, slow)
macds['macd'] = map(lambda f,s: round(f - s, 5), macds['fast'], macds['slow'])
macds['sign'] = ma.calc_ma(macds['macd'], sign)
#macds['macd'] = map(lambda f,s: round(f - s, 5), macds['dif'], macds['sign'])
return macds
def calc_all_macd(table, fast = 12, slow = 26, sign = 9):
log.info('MACD generating for ' + table)
db = SqliteDB().createIndicator(table, 'MACD', 'A', fast, slow, sign)
prices = SqliteDB().getAllPrices(table)
macds = calc_macd(prices, fast, slow, sign)
for i in range(len(prices)):
db.addIndicate(prices[i]['dtlong'], macds['macd'][i], macds['sign'][i], macds['fast'][i], macds['slow'][i])
db.commit()
log.info('MACD done')
| rolandwz/pymisc | utrader/indicator/macd.py | Python | mit | 964 |
# Copyright 2020 Google Sans Authors
# Copyright 2021 Simon Cozens
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import sys
import textwrap
from difflib import ndiff
from pathlib import Path
from fontbakery.callable import check, condition
from fontbakery.checkrunner import FAIL, PASS, SKIP
from fontbakery.fonts_profile import profile_factory
from fontbakery.message import Message
from fontbakery.section import Section
from vharfbuzz import Vharfbuzz, FakeBuffer
from os.path import basename, relpath
from stringbrewer import StringBrewer
from collidoscope import Collidoscope
shaping_basedir = Path("qa", "shaping_tests")
profile_imports = ()
profile = profile_factory(default_section=Section("Shaping Checks"))
SHAPING_PROFILE_CHECKS = [
"com.google.fonts/check/shaping/regression",
"com.google.fonts/check/shaping/forbidden",
"com.google.fonts/check/shaping/collides",
]
STYLESHEET = """
<style type="text/css">
@font-face {font-family: "TestFont"; src: url(%s);}
.tf { font-family: "TestFont"; }
.shaping pre { font-size: 1.2rem; }
.shaping li { font-size: 1.2rem; border-top: 1px solid #ddd; padding: 12px; margin-top: 12px; }
.shaping-svg { height: 100px; margin:10px; transform: matrix(1, 0, 0, -1, 0, 0); }
</style>
"""
def get_stylesheet(vharfbuzz):
filename = Path(vharfbuzz.filename)
return STYLESHEET % relpath(filename, shaping_basedir)
def fix_svg(svg):
return svg.replace('<svg', '<svg class="shaping-svg"')
def create_report_item(vharfbuzz,
message,
text=None,
buf1=None,
buf2=None,
type="item",
note=None,
extra_data=None):
if text:
message += f': <span class="tf">{text}</span>'
if type == "item":
message = f"<li>{message}"
if note:
message += f" ({note})"
message += "</li>\n"
elif type == "header":
message = get_stylesheet(vharfbuzz) + f"\n<h4>{message}</h4>\n"
if extra_data:
message += f"\n\n<pre>{extra_data}</pre>\n\n"
serialized_buf1 = None
serialized_buf2 = None
if buf2:
if isinstance(buf2, FakeBuffer):
try:
serialized_buf2 = vharfbuzz.serialize_buf(buf2)
except Exception:
# This may fail if the glyphs are not found in the font
serialized_buf2 = None
buf2 = None # Don't try to draw it either
else:
serialized_buf2 = buf2
message += f"\n\n<pre>Expected: {serialized_buf2}</pre>\n\n"
if buf1:
serialized_buf1 = vharfbuzz.serialize_buf(buf1,
glyphsonly=(buf2 and isinstance(buf2, str)))
message += f"\n\n<pre>Got : {serialized_buf1}</pre>\n\n"
# Report a diff table
if serialized_buf1 and serialized_buf2:
diff = list(ndiff([serialized_buf1], [serialized_buf2]))
if diff and diff[-1][0] == "?":
message += f"\n\n<pre> {diff[-1][1:]}</pre>\n\n"
# Now draw it as SVG
if buf1:
message += "\nGot: " + fix_svg(vharfbuzz.buf_to_svg(buf1))
if buf2 and isinstance(buf2, FakeBuffer):
try:
message += " Expected: " + fix_svg(vharfbuzz.buf_to_svg(buf2))
except KeyError:
pass
return f'<div class="shaping">\n\n{message}\n\n</div>'
def get_from_test_with_default(test, configuration, el, default=None):
defaults = configuration.get("defaults", {})
return test.get(el, defaults.get(el, default))
def get_shaping_parameters(test, configuration):
params = {}
for el in ["script", "language", "direction", "features", "shaper"]:
params[el] = get_from_test_with_default(test, configuration, el)
return params
# This is a very generic "do something with shaping" test runner.
# It'll be given concrete meaning later.
def run_a_set_of_shaping_tests(config,
ttFont,
run_a_test,
test_filter,
generate_report,
preparation=None):
filename = Path(ttFont.reader.file.name)
vharfbuzz = Vharfbuzz(filename)
shaping_file_found = False
ran_a_test = False
extra_data = None
if "com.google.fonts/check/shaping" not in config:
yield SKIP, "Shaping test directory not defined in configuration file"
return
shaping_basedir = config["com.google.fonts/check/shaping"].get("test_directory")
if not shaping_basedir:
yield SKIP, "Shaping test directory not defined in configuration file"
return
for shaping_file in Path(shaping_basedir).glob("*.json"):
shaping_file_found = True
try:
shaping_input_doc = json.loads(shaping_file.read_text())
except Exception as e:
yield FAIL,\
Message("shaping-invalid-json",
f"{shaping_file}: Invalid JSON: {e}.")
return
configuration = shaping_input_doc.get("configuration", {})
try:
shaping_tests = shaping_input_doc["tests"]
except KeyError:
yield FAIL,\
Message("shaping-missing-tests",
f"{shaping_file}: JSON file must have a 'tests' key.")
return
if preparation:
extra_data = preparation(ttFont, configuration)
failed_shaping_tests = []
for test in shaping_tests:
if not test_filter(test, configuration):
continue
if not "input" in test:
yield FAIL,\
Message("shaping-missing-input",
f"{shaping_file}: test is missing an input key.")
return
exclude_fonts = test.get("exclude", [])
if basename(filename) in exclude_fonts:
continue
only_fonts = test.get("only")
if only_fonts and basename(filename) not in only_fonts:
continue
run_a_test(filename,
vharfbuzz,
test,
configuration,
failed_shaping_tests,
extra_data)
ran_a_test = True
if ran_a_test:
if not failed_shaping_tests:
yield PASS, f"{shaping_file}: No regression detected"
else:
yield from generate_report(vharfbuzz,
shaping_file,
failed_shaping_tests)
if not shaping_file_found:
yield SKIP, "No test files found."
if not ran_a_test:
yield SKIP, "No applicable tests ran."
@check(
id = "com.google.fonts/check/shaping/regression",
rationale = """
Fonts with complex layout rules can benefit from regression tests to ensure that the rules are behaving as designed. This checks runs a shaping test suite and compares expected shaping against actual shaping, reporting any differences.
Shaping test suites should be written by the font engineer and referenced in the fontbakery configuration file. For more information about write shaping test files and how to configure fontbakery to read the shaping test suites, see https://simoncozens.github.io/tdd-for-otl/
""",
proposal = "https://github.com/googlefonts/fontbakery/pull/3223"
)
def com_google_fonts_check_shaping_regression(config, ttFont):
"""Check that texts shape as per expectation"""
yield from run_a_set_of_shaping_tests(
config,
ttFont,
run_shaping_regression,
lambda test, configuration: "expectation" in test,
gereate_shaping_regression_report,
)
def run_shaping_regression(filename,
vharfbuzz,
test,
configuration,
failed_shaping_tests,
extra_data):
shaping_text = test["input"]
parameters = get_shaping_parameters(test, configuration)
output_buf = vharfbuzz.shape(shaping_text, parameters)
expectation = test["expectation"]
if isinstance(expectation, dict):
expectation = expectation.get(filename.name, expectation["default"])
output_serialized = vharfbuzz.serialize_buf(output_buf,
glyphsonly="+" not in expectation)
if output_serialized != expectation:
failed_shaping_tests.append((test, expectation, output_buf, output_serialized))
def gereate_shaping_regression_report(vharfbuzz, shaping_file, failed_shaping_tests):
report_items = []
header = f"{shaping_file}: Expected and actual shaping not matching"
report_items.append(create_report_item(vharfbuzz, header, type="header"))
for test, expected, output_buf, output_serialized in failed_shaping_tests:
extra_data = {
k: test[k]
for k in ["script", "language", "direction", "features"]
if k in test
}
# Make HTML report here.
if "=" in expected:
buf2 = vharfbuzz.buf_from_string(expected)
else:
buf2 = expected
report_item = create_report_item(vharfbuzz,
"Shaping did not match",
text=test["input"],
buf1=output_buf,
buf2=buf2,
note=test.get("note"),
extra_data=extra_data)
report_items.append(report_item)
yield FAIL,\
Message("shaping-regression",
header + "\n" + "\n".join(report_items))
@check(
id="com.google.fonts/check/shaping/forbidden",
rationale="""
Fonts with complex layout rules can benefit from regression tests to ensure that the rules are behaving as designed. This checks runs a shaping test suite and reports if any glyphs are generated in the shaping which should not be produced. (For example, .notdef glyphs, visible viramas, etc.)
Shaping test suites should be written by the font engineer and referenced in the fontbakery configuration file. For more information about write shaping test files and how to configure fontbakery to read the shaping test suites, see https://simoncozens.github.io/tdd-for-otl/
""",
proposal = "https://github.com/googlefonts/fontbakery/pull/3223"
)
def com_google_fonts_check_shaping_forbidden(config, ttFont):
"""Check that no forbidden glyphs are found while shaping"""
yield from run_a_set_of_shaping_tests(
config,
ttFont,
run_forbidden_glyph_test,
lambda test, configuration: "forbidden_glyphs" in configuration,
forbidden_glyph_test_results,
)
def run_forbidden_glyph_test(
filename, vharfbuzz, test, configuration, failed_shaping_tests, extra_data
):
is_stringbrewer = (
get_from_test_with_default(test, configuration, "input_type", "string")
== "pattern"
)
parameters = get_shaping_parameters(test, configuration)
forbidden_glyphs = configuration["forbidden_glyphs"]
if is_stringbrewer:
sb = StringBrewer(
recipe=test["input"], ingredients=configuration["ingredients"]
)
strings = sb.generate_all()
else:
strings = [test["input"]]
for shaping_text in strings:
output_buf = vharfbuzz.shape(shaping_text, parameters)
output_serialized = vharfbuzz.serialize_buf(output_buf, glyphsonly=True)
glyph_names = output_serialized.split("|")
for forbidden in forbidden_glyphs:
if forbidden in glyph_names:
failed_shaping_tests.append((shaping_text, output_buf, forbidden))
def forbidden_glyph_test_results(vharfbuzz, shaping_file, failed_shaping_tests):
report_items = []
msg = f"{shaping_file}: Forbidden glyphs found while shaping"
report_items.append(create_report_item(vharfbuzz, msg, type="header"))
for shaping_text, buf, forbidden in failed_shaping_tests:
msg = f"{shaping_text} produced '{forbidden}'"
report_items.append(create_report_item(vharfbuzz,
msg,
text=shaping_text,
buf1=buf))
yield FAIL,\
Message("shaping-forbidden",
msg + ".\n" + "\n".join(report_items))
@check(
id = "com.google.fonts/check/shaping/collides",
rationale = """
Fonts with complex layout rules can benefit from regression tests to ensure that the rules are behaving as designed. This checks runs a shaping test suite and reports instances where the glyphs collide in unexpected ways.
Shaping test suites should be written by the font engineer and referenced in the fontbakery configuration file. For more information about write shaping test files and how to configure fontbakery to read the shaping test suites, see https://simoncozens.github.io/tdd-for-otl/
""",
proposal = "https://github.com/googlefonts/fontbakery/pull/3223"
)
def com_google_fonts_check_shaping_collides(config, ttFont):
"""Check that no collisions are found while shaping"""
yield from run_a_set_of_shaping_tests(
config,
ttFont,
run_collides_glyph_test,
lambda test, configuration: "collidoscope" in test
or "collidoscope" in configuration,
collides_glyph_test_results,
setup_glyph_collides,
)
def setup_glyph_collides(ttFont, configuration):
filename = Path(ttFont.reader.file.name)
collidoscope_configuration = configuration.get("collidoscope")
if not collidoscope_configuration:
return {
"bases": True,
"marks": True,
"faraway": True,
"adjacent_clusters": True,
}
col = Collidoscope(filename,
collidoscope_configuration,
direction=configuration.get("direction", "LTR"))
return {"collidoscope": col}
def run_collides_glyph_test(filename,
vharfbuzz,
test,
configuration,
failed_shaping_tests,
extra_data):
col = extra_data["collidoscope"]
is_stringbrewer = get_from_test_with_default(test,
configuration,
"input_type",
"string") == "pattern"
parameters = get_shaping_parameters(test, configuration)
allowed_collisions = get_from_test_with_default(test,
configuration,
"allowedcollisions",
[])
if is_stringbrewer:
sb = StringBrewer(recipe=test["input"],
ingredients=configuration["ingredients"])
strings = sb.generate_all()
else:
strings = [test["input"]]
for shaping_text in strings:
output_buf = vharfbuzz.shape(shaping_text, parameters)
glyphs = col.get_glyphs(shaping_text, buf=output_buf)
collisions = col.has_collisions(glyphs)
bumps = [f"{c.glyph1}/{c.glyph2}" for c in collisions]
bumps = [b for b in bumps if b not in allowed_collisions]
if bumps:
draw = fix_svg(col.draw_overlaps(glyphs, collisions))
failed_shaping_tests.append((shaping_text, bumps, draw, output_buf))
def collides_glyph_test_results(vharfbuzz, shaping_file, failed_shaping_tests):
report_items = []
seen_bumps = {}
msg = f"{shaping_file}: {len(failed_shaping_tests)} collisions found while shaping"
report_items.append(create_report_item(vharfbuzz, msg, type="header"))
for shaping_text, bumps, draw, buf in failed_shaping_tests:
# Make HTML report here.
if tuple(bumps) in seen_bumps:
continue
seen_bumps[tuple(bumps)] = True
report_item = create_report_item(vharfbuzz,
f"{',' .join(bumps)} collision found in"
f" e.g. <span class='tf'>{shaping_text}</span>"
f" <div>{draw}</div>",
buf1=buf)
report_items.append(report_item)
yield FAIL,\
Message("shaping-collides",
msg + ".\n" + "\n".join(report_items))
profile.auto_register(globals())
profile.test_expected_checks(SHAPING_PROFILE_CHECKS, exclusive=True)
| googlefonts/fontbakery | Lib/fontbakery/profiles/shaping.py | Python | apache-2.0 | 17,535 |
# -*- coding: utf-8 -*-
"""h2lily.__main__: executed when h2lily directory is called as script."""
from h2lily.h2lily import main, __doc__
from docopt import docopt
options = docopt(__doc__, version='0.1')
main(options)
| thomasfillon/h2lily | h2lily/__main__.py | Python | agpl-3.0 | 224 |
from simple_block_tag import simple_block_tag
| piha/django-simple-block-tag | simpleblocktag/__init__.py | Python | mit | 46 |
# Copyright 2017 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module: security_monkey.auditors.gcp.gce.firewall
:platform: Unix
.. version:: $$VERSION$$
.. moduleauthor:: Tom Melendez <[email protected]> @supertom
"""
from security_monkey.auditor import Auditor
from security_monkey.auditors.gcp.util import make_audit_issue, process_issues
from security_monkey.common.gcp.config import AuditorConfig
from security_monkey.common.gcp.error import AuditIssue
from security_monkey.watchers.gcp.gce.firewall import GCEFirewallRule
# NOTE: issue scores and messages are defined in
# security_monkey/common/gcp/config.py
class GCEFirewallRuleAuditor(Auditor):
index = GCEFirewallRule.index
i_am_singular = GCEFirewallRule.i_am_singular
i_am_plural = GCEFirewallRule.i_am_plural
gcp_config = AuditorConfig.GCEFirewallRule
def __init__(self, accounts=None, debug=True):
super(
GCEFirewallRuleAuditor,
self).__init__(
accounts=accounts,
debug=debug)
def _port_range_exists(self, allowed_list, error_cat='ALLOWED'):
"""
Check to see if a port range exists in the allowed field.
"""
errors = []
for allowed in allowed_list:
ports = allowed.get('ports', None)
if ports:
for port in ports:
if str(port).find('-') > -1:
ae = make_audit_issue(
error_cat, 'EXISTS', 'PORTRANGE')
ae.notes = '%s:%s' % (allowed['IPProtocol'], port)
errors.append(ae)
return errors
def _target_tags_valid(self, target_tags, error_cat='TARGET_TAGS'):
"""
Check to see if target tags are present.
"""
errors = []
if not target_tags:
ae = make_audit_issue(
error_cat, 'FOUND', 'NOT')
errors.append(ae)
return errors
def _source_ranges_open(self, source_ranges, error_cat='SOURCE_RANGES'):
"""
Check to see if the source range field is set to allow all traffic
"""
errors = []
open_range = '0.0.0.0/0'
for source_range in source_ranges:
if source_range == open_range:
ae = make_audit_issue(
error_cat, 'OPEN', 'TRAFFIC')
errors.append(ae)
return errors
def inspect_target_tags(self, item):
"""
Driver for Target Tags. Calls helpers as needed.
return: (bool, [list of AuditIssues])
"""
errors = []
target_tags = item.config.get('TargetTags', None)
err = self._target_tags_valid(target_tags)
errors.extend(err) if err else None
if errors:
return (False, errors)
return (True, None)
def inspect_source_ranges(self, item):
"""
Driver for Source Ranges. Calls helpers as needed.
return: (bool, [list of AuditIssues])
"""
errors = []
source_ranges = item.config.get('SourceRanges', None)
if source_ranges:
err = self._source_ranges_open(source_ranges)
errors.extend(err) if err else None
if errors:
return (False, errors)
return (True, None)
def inspect_allowed(self, item):
"""
Driver for Allowed field (protocol/ports list). Calls helpers as needed.
return: (bool, [list of AuditIssues])
"""
errors = []
err = self._port_range_exists(item.config.get('Allowed'))
errors.extend(err) if err else None
if errors:
return (False, errors)
return (True, None)
def check_allowed(self, item):
(ok, errors) = self.inspect_allowed(item)
process_issues(self, ok, errors, item)
def check_target_tags(self, item):
(ok, errors) = self.inspect_target_tags(item)
process_issues(self, ok, errors, item)
def check_source_ranges(self, item):
(ok, errors) = self.inspect_source_ranges(item)
process_issues(self, ok, errors, item)
| stackArmor/security_monkey | security_monkey/auditors/gcp/gce/firewall.py | Python | apache-2.0 | 4,717 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Database Anonymization',
'version': '1.0',
'category': 'Extra Tools',
'description': """
This module allows you to anonymize a database.
===============================================
This module allows you to keep your data confidential for a given database.
This process is useful, if you want to use the migration process and protect
your own or your customer’s confidential data. The principle is that you run
an anonymization tool which will hide your confidential data(they are replaced
by ‘XXX’ characters). Then you can send the anonymized database to the migration
team. Once you get back your migrated database, you restore it and reverse the
anonymization process to recover your previous data.
""",
'depends': ['base'],
'demo': ['data/anonymization_demo.xml'],
'data': [
'data/ir.model.fields.anonymization.csv',
'security/ir.model.access.csv',
'views/anonymization_views.xml',
'wizard/anonymize_wizard_views.xml'
],
'installable': True,
'auto_install': False,
}
| chienlieu2017/it_management | odoo/addons/anonymization/__manifest__.py | Python | gpl-3.0 | 1,171 |
with open("spellList.yaml", "a") as output:
category = input("category: ")
output.write('- "'+category+'":\n')
while True:
spell = input("spell: ")
if spell:
output.write(' - "'+spell+'.SPELL."\n')
else:
break
| Tosh007/DnD5_CharCreator | data/character/spellCreator.py | Python | gpl-3.0 | 273 |
"""
WSGI config for fishscore project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "fishscore.settings")
application = get_wsgi_application()
| superdachs/fishscore | fishscore/fishscore/wsgi.py | Python | gpl-3.0 | 395 |
from .generic_plugin import GenericPlugin
from nose.plugins import Plugin as NosePluginBase
from nose.failure import Failure
class NosePlugin(NosePluginBase, GenericPlugin):
name = 'snort'
def begin(self):
self._begin_run()
def addFailure(self, test, exception_tuple, *_):
self._add_failure(self._get_test_name(test), exception_tuple)
def addError(self, test, exception_tuple, *_):
self._add_error(self._get_test_name(test), exception_tuple)
def _get_test_name(self, test):
if isinstance(test, Failure):
return "?"
return str(test)
def finalize(self, result=None):
self._end_run(result)
| vmalloc/snort | snort/nose_plugin.py | Python | bsd-3-clause | 675 |
Subsets and Splits